Merge branch 'for-3.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata

Pull libata fixes from Tejun Heo:
 "Various device specific fixes.  Nothing too interesting"

* 'for-3.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata:
  ahci: disable NCQ on Samsung pci-e SSDs on macbooks
  ata: sata_mv: Cleanup only the initialized ports
  sata_sil: apply MOD15WRITE quirk to TOSHIBA MK2561GSYN
  ata: enable quirk from jmicron JMB350 for JMB394
  ATA: SATA_MV: Add missing Kconfig select statememnt
  ata: pata_imx: Check the return value from clk_prepare_enable()
diff --git a/.gitignore b/.gitignore
index 7e9932e..42fa0d5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -92,3 +92,6 @@
 signing_key.priv
 signing_key.x509
 x509.genkey
+
+# Kconfig presets
+all.config
diff --git a/CREDITS b/CREDITS
index 4c7738f..e371c55 100644
--- a/CREDITS
+++ b/CREDITS
@@ -823,8 +823,8 @@
 S: Germany
 
 N: Jean Delvare
-E: khali@linux-fr.org
-W: http://khali.linux-fr.org/
+E: jdelvare@suse.de
+W: http://jdelvare.nerim.net/
 D: Several hardware monitoring drivers
 S: France
 
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 38f8444..07de7e1 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -29,6 +29,8 @@
 	- How to do DMA with ISA (and LPC) devices.
 DMA-attributes.txt
 	- listing of the various possible attributes a DMA region can have
+dmatest.txt
+	- how to compile, configure and use the dmatest system.
 DocBook/
 	- directory with DocBook templates etc. for kernel documentation.
 EDID/
@@ -77,6 +79,8 @@
 	- directory with info about Linux on the ARM architecture.
 arm64/
 	- directory with info about Linux on the 64 bit ARM architecture.
+assoc_array.txt
+	- generic associative array intro.
 atomic_ops.txt
 	- semantics and behavior of atomic and bitmask operations.
 auxdisplay/
@@ -87,6 +91,8 @@
 	- how to use kernel parameters to exclude bad RAM regions.
 basic_profiling.txt
 	- basic instructions for those who wants to profile Linux kernel.
+bcache.txt
+	- Block-layer cache on fast SSDs to improve slow (raid) I/O performance.
 binfmt_misc.txt
 	- info on the kernel support for extra binary formats.
 blackfin/
@@ -171,6 +177,8 @@
 	- info about initramfs, klibc, and userspace early during boot.
 edac.txt
 	- information on EDAC - Error Detection And Correction
+efi-stub.txt
+	- How to use the EFI boot stub to bypass GRUB or elilo on EFI systems.
 eisa.txt
 	- info on EISA bus support.
 email-clients.txt
@@ -195,8 +203,8 @@
 	- info on requeueing of tasks from a non-PI futex to a PI futex
 gcov.txt
 	- use of GCC's coverage testing tool "gcov" with the Linux kernel
-gpio.txt
-	- overview of GPIO (General Purpose Input/Output) access conventions.
+gpio/
+	- gpio related documentation
 hid/
 	- directory with information on human interface devices
 highuid.txt
@@ -255,6 +263,8 @@
 	- listing of various WWW + books that document kernel internals.
 kernel-parameters.txt
 	- summary listing of command line / boot prompt args for the kernel.
+kernel-per-CPU-kthreads.txt
+	- List of all per-CPU kthreads and how they introduce jitter.
 kmemcheck.txt
 	- info on dynamic checker that detects uses of uninitialized memory.
 kmemleak.txt
@@ -299,8 +309,6 @@
 	- directory with info on parts like the Texas Instruments EMIF driver
 memory-hotplug.txt
 	- Hotpluggable memory support, how to use and current status.
-memory.txt
-	- info on typical Linux memory problems.
 metag/
 	- directory with info about Linux on Meta architecture.
 mips/
@@ -311,6 +319,8 @@
 	- directory with info about the MMC subsystem
 mn10300/
 	- directory with info about the mn10300 architecture port
+module-signing.txt
+	- Kernel module signing for increased security when loading modules.
 mtd/
 	- directory with info about memory technology devices (flash)
 mono.txt
@@ -343,6 +353,8 @@
 	- info on the Linux PCMCIA driver.
 percpu-rw-semaphore.txt
 	- RCU based read-write semaphore optimized for locking for reading
+phy.txt
+	- Description of the generic PHY framework.
 pi-futex.txt
 	- documentation on lightweight priority inheritance futexes.
 pinctrl.txt
@@ -431,6 +443,8 @@
 	- info on the magic SysRq key.
 target/
 	- directory with info on generating TCM v4 fabric .ko modules
+this_cpu_ops.txt
+	- List rationale behind and the way to use this_cpu operations.
 thermal/
 	- directory with information on managing thermal issues (CPU/temp)
 trace/
@@ -469,6 +483,8 @@
 	- directory with info about Intel Wireless Wimax Connections
 workqueue.txt
 	- information on the Concurrency Managed Workqueue implementation
+ww-mutex-design.txt
+	- Intro to Mutex wait/would deadlock handling.s
 x86/x86_64/
 	- directory with info on Linux support for AMD x86-64 (Hammer) machines.
 xtensa/
diff --git a/Documentation/ABI/testing/configfs-usb-gadget b/Documentation/ABI/testing/configfs-usb-gadget
index 01e769d..37559a0 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget
+++ b/Documentation/ABI/testing/configfs-usb-gadget
@@ -1,13 +1,13 @@
 What:		/config/usb-gadget
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 		This group contains sub-groups corresponding to created
 		USB gadgets.
 
 What:		/config/usb-gadget/gadget
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 
 		The attributes of a gadget:
@@ -27,7 +27,7 @@
 
 What:		/config/usb-gadget/gadget/configs
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 		This group contains a USB gadget's configurations
 
@@ -58,20 +58,20 @@
 
 What:		/config/usb-gadget/gadget/functions
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 		This group contains functions available to this USB gadget.
 
 What:		/config/usb-gadget/gadget/strings
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 		This group contains subdirectories for language-specific
 		strings for this gadget.
 
 What:		/config/usb-gadget/gadget/strings/language
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 		The attributes:
 
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-acm b/Documentation/ABI/testing/configfs-usb-gadget-acm
index 5708a56..d21092d 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-acm
+++ b/Documentation/ABI/testing/configfs-usb-gadget-acm
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/acm.name
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 
 		This item contains just one readonly attribute: port_num.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-ecm b/Documentation/ABI/testing/configfs-usb-gadget-ecm
index 6b9a582..0addf77 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-ecm
+++ b/Documentation/ABI/testing/configfs-usb-gadget-ecm
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/ecm.name
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 		The attributes:
 
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-eem b/Documentation/ABI/testing/configfs-usb-gadget-eem
index dbddf36..a4c5715 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-eem
+++ b/Documentation/ABI/testing/configfs-usb-gadget-eem
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/eem.name
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 		The attributes:
 
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-ffs b/Documentation/ABI/testing/configfs-usb-gadget-ffs
index 14343e2..e39b276 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-ffs
+++ b/Documentation/ABI/testing/configfs-usb-gadget-ffs
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/ffs.name
 Date:		Nov 2013
-KenelVersion:	3.13
+KernelVersion:	3.13
 Description:	The purpose of this directory is to create and remove it.
 
 		A corresponding USB function instance is created/removed.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-loopback b/Documentation/ABI/testing/configfs-usb-gadget-loopback
index 852b236..9aae5bf 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-loopback
+++ b/Documentation/ABI/testing/configfs-usb-gadget-loopback
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/Loopback.name
 Date:		Nov 2013
-KenelVersion:	3.13
+KernelVersion:	3.13
 Description:
 		The attributes:
 
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-mass-storage b/Documentation/ABI/testing/configfs-usb-gadget-mass-storage
index ad72a37..9931fb0 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-mass-storage
+++ b/Documentation/ABI/testing/configfs-usb-gadget-mass-storage
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/mass_storage.name
 Date:		Oct 2013
-KenelVersion:	3.13
+KernelVersion:	3.13
 Description:
 		The attributes:
 
@@ -13,7 +13,7 @@
 
 What:		/config/usb-gadget/gadget/functions/mass_storage.name/lun.name
 Date:		Oct 2013
-KenelVersion:	3.13
+KernelVersion:	3.13
 Description:
 		The attributes:
 
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-ncm b/Documentation/ABI/testing/configfs-usb-gadget-ncm
index bc309f423..6fe723e 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-ncm
+++ b/Documentation/ABI/testing/configfs-usb-gadget-ncm
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/ncm.name
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 		The attributes:
 
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-obex b/Documentation/ABI/testing/configfs-usb-gadget-obex
index aaa5c96..a6a9327 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-obex
+++ b/Documentation/ABI/testing/configfs-usb-gadget-obex
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/obex.name
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 
 		This item contains just one readonly attribute: port_num.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-phonet b/Documentation/ABI/testing/configfs-usb-gadget-phonet
index 3e3b742..7037a35 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-phonet
+++ b/Documentation/ABI/testing/configfs-usb-gadget-phonet
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/phonet.name
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 
 		This item contains just one readonly attribute: ifname.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-rndis b/Documentation/ABI/testing/configfs-usb-gadget-rndis
index 822e6da..e32879b 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-rndis
+++ b/Documentation/ABI/testing/configfs-usb-gadget-rndis
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/rndis.name
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 		The attributes:
 
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-serial b/Documentation/ABI/testing/configfs-usb-gadget-serial
index 16f130c..474d249 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-serial
+++ b/Documentation/ABI/testing/configfs-usb-gadget-serial
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/gser.name
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 
 		This item contains just one readonly attribute: port_num.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
index a30f309..29477c3 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
+++ b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/SourceSink.name
 Date:		Nov 2013
-KenelVersion:	3.13
+KernelVersion:	3.13
 Description:
 		The attributes:
 
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-subset b/Documentation/ABI/testing/configfs-usb-gadget-subset
index 154ae59..9373e2c5 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-subset
+++ b/Documentation/ABI/testing/configfs-usb-gadget-subset
@@ -1,6 +1,6 @@
 What:		/config/usb-gadget/gadget/functions/geth.name
 Date:		Jun 2013
-KenelVersion:	3.11
+KernelVersion:	3.11
 Description:
 		The attributes:
 
diff --git a/Documentation/ABI/testing/sysfs-tty b/Documentation/ABI/testing/sysfs-tty
index ad22fb0..a2ccec3 100644
--- a/Documentation/ABI/testing/sysfs-tty
+++ b/Documentation/ABI/testing/sysfs-tty
@@ -3,7 +3,8 @@
 Contact:	Kay Sievers <kay.sievers@vrfy.org>
 Description:
 		 Shows the list of currently configured
-		 console devices, like 'tty1 ttyS0'.
+		 tty devices used for the console,
+		 like 'tty1 ttyS0'.
 		 The last entry in the file is the active
 		 device connected to /dev/console.
 		 The file supports poll() to detect virtual
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index 0c7195e..c4cac6d 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2523,6 +2523,18 @@
       </orderedlist>
     </section>
 
+    <section>
+      <title>V4L2 in Linux 3.14</title>
+      <orderedlist>
+        <listitem>
+		<para> In struct <structname>v4l2_rect</structname>, the type
+of <structfield>width</structfield> and <structfield>height</structfield>
+fields changed from _s32 to _u32.
+	  </para>
+        </listitem>
+      </orderedlist>
+    </section>
+
     <section id="other">
       <title>Relation of V4L2 to other Linux multimedia APIs</title>
 
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index 7a3b49b..a5a3188 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -3161,6 +3161,47 @@
 		</entrytbl>
 	      </row>
 
+	      <row><entry></entry></row>
+	      <row>
+		<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_MIN_QP</constant></entry>
+		<entry>integer</entry>
+	      </row>
+	      <row><entry spanname="descr">Minimum quantization parameter for VP8.</entry>
+	      </row>
+
+	      <row><entry></entry></row>
+	      <row>
+		<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_MAX_QP</constant></entry>
+		<entry>integer</entry>
+	      </row>
+	      <row><entry spanname="descr">Maximum quantization parameter for VP8.</entry>
+	      </row>
+
+	      <row><entry></entry></row>
+	      <row>
+		<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP</constant>&nbsp;</entry>
+		<entry>integer</entry>
+	      </row>
+	      <row><entry spanname="descr">Quantization parameter for an I frame for VP8.</entry>
+	      </row>
+
+	      <row><entry></entry></row>
+	      <row>
+		<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP</constant>&nbsp;</entry>
+		<entry>integer</entry>
+	      </row>
+	      <row><entry spanname="descr">Quantization parameter for a P frame for VP8.</entry>
+	      </row>
+
+	      <row><entry></entry></row>
+	      <row>
+		<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_PROFILE</constant>&nbsp;</entry>
+		<entry>integer</entry>
+	      </row>
+	      <row><entry spanname="descr">Select the desired profile for VPx encoder.
+Acceptable values are 0, 1, 2 and 3 corresponding to encoder profiles 0, 1, 2 and 3.</entry>
+	      </row>
+
           <row><entry></entry></row>
         </tbody>
       </tgroup>
diff --git a/Documentation/DocBook/media/v4l/dev-overlay.xml b/Documentation/DocBook/media/v4l/dev-overlay.xml
index 40d1d76..cc6e0c5 100644
--- a/Documentation/DocBook/media/v4l/dev-overlay.xml
+++ b/Documentation/DocBook/media/v4l/dev-overlay.xml
@@ -346,17 +346,14 @@
 rectangle, in pixels. Offsets increase to the right and down.</entry>
 	  </row>
 	  <row>
-	    <entry>__s32</entry>
+	    <entry>__u32</entry>
 	    <entry><structfield>width</structfield></entry>
 	    <entry>Width of the rectangle, in pixels.</entry>
 	  </row>
 	  <row>
-	    <entry>__s32</entry>
+	    <entry>__u32</entry>
 	    <entry><structfield>height</structfield></entry>
-	    <entry>Height of the rectangle, in pixels. Width and
-height cannot be negative, the fields are signed for hysterical
-reasons. <!-- video4linux-list@redhat.com on 22 Oct 2002 subject
-"Re:[V4L][patches!] Re:v4l2/kernel-2.5" --></entry>
+	    <entry>Height of the rectangle, in pixels.</entry>
 	  </row>
 	</tbody>
       </tgroup>
diff --git a/Documentation/DocBook/media/v4l/media-ioc-enum-links.xml b/Documentation/DocBook/media/v4l/media-ioc-enum-links.xml
index 355df43..cf85485 100644
--- a/Documentation/DocBook/media/v4l/media-ioc-enum-links.xml
+++ b/Documentation/DocBook/media/v4l/media-ioc-enum-links.xml
@@ -134,6 +134,15 @@
 	    <entry>Output pad, relative to the entity. Output pads source data
 	    and are origins of links.</entry>
 	  </row>
+	  <row>
+	    <entry><constant>MEDIA_PAD_FL_MUST_CONNECT</constant></entry>
+	    <entry>If this flag is set and the pad is linked to any other
+	    pad, then at least one of those links must be enabled for the
+	    entity to be able to stream. There could be temporary reasons
+	    (e.g. device configuration dependent) for the pad to need
+	    enabled links even when this flag isn't set; the absence of the
+	    flag doesn't imply there is none.</entry>
+	  </row>
 	</tbody>
       </tgroup>
     </table>
diff --git a/Documentation/DocBook/media/v4l/subdev-formats.xml b/Documentation/DocBook/media/v4l/subdev-formats.xml
index f72c1cc..7331ce1 100644
--- a/Documentation/DocBook/media/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/media/v4l/subdev-formats.xml
@@ -89,7 +89,7 @@
       <constant>V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE</constant>.
       </para>
 
-      <para>The following tables list existing packet RGB formats.</para>
+      <para>The following tables list existing packed RGB formats.</para>
 
       <table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-rgb">
 	<title>RGB formats</title>
@@ -615,7 +615,7 @@
 	</mediaobject>
       </figure>
 
-      <para>The following table lists existing packet Bayer formats. The data
+      <para>The following table lists existing packed Bayer formats. The data
       organization is given as an example for the first pixel only.</para>
 
       <table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-bayer">
@@ -1178,7 +1178,7 @@
       U, Y, V, Y order will be named <constant>V4L2_MBUS_FMT_UYVY8_2X8</constant>.
       </para>
 
-	<para><xref linkend="v4l2-mbus-pixelcode-yuv8"/> list existing packet YUV
+	<para><xref linkend="v4l2-mbus-pixelcode-yuv8"/> lists existing packed YUV
 	formats and describes the organization of each pixel data in each sample.
 	When a format pattern is split across multiple samples each of the samples
 	in the pattern is described.</para>
@@ -2492,6 +2492,163 @@
     </section>
 
     <section>
+      <title>HSV/HSL Formats</title>
+
+      <para>Those formats transfer pixel data as RGB values in a cylindrical-coordinate
+      system using Hue-Saturation-Value or Hue-Saturation-Lightness components. The
+      format code is made of the following information.
+      <itemizedlist>
+	<listitem><para>The hue, saturation, value or lightness and optional alpha
+	components order code, as encoded in a pixel sample. The only currently
+	supported value is AHSV.
+	</para></listitem>
+	<listitem><para>The number of bits per component, for each component. The values
+	can be different for all components. The only currently supported value is 8888.
+	</para></listitem>
+	<listitem><para>The number of bus samples per pixel. Pixels that are wider than
+	the bus width must be transferred in multiple samples. The only currently
+	supported value is 1.</para></listitem>
+	<listitem><para>The bus width.</para></listitem>
+	<listitem><para>For formats where the total number of bits per pixel is smaller
+	than the number of bus samples per pixel times the bus width, a padding
+	value stating if the bytes are padded in their most high order bits
+	(PADHI) or low order bits (PADLO).</para></listitem>
+	<listitem><para>For formats where the number of bus samples per pixel is larger
+	than 1, an endianness value stating if the pixel is transferred MSB first
+	(BE) or LSB first (LE).</para></listitem>
+      </itemizedlist>
+      </para>
+
+      <para>The following table lists existing HSV/HSL formats.</para>
+
+      <table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-hsv">
+	<title>HSV/HSL formats</title>
+	<tgroup cols="27">
+	  <colspec colname="id" align="left" />
+	  <colspec colname="code" align="center"/>
+	  <colspec colname="bit" />
+	  <colspec colnum="4" colname="b31" align="center" />
+	  <colspec colnum="5" colname="b20" align="center" />
+	  <colspec colnum="6" colname="b29" align="center" />
+	  <colspec colnum="7" colname="b28" align="center" />
+	  <colspec colnum="8" colname="b27" align="center" />
+	  <colspec colnum="9" colname="b26" align="center" />
+	  <colspec colnum="10" colname="b25" align="center" />
+	  <colspec colnum="11" colname="b24" align="center" />
+	  <colspec colnum="12" colname="b23" align="center" />
+	  <colspec colnum="13" colname="b22" align="center" />
+	  <colspec colnum="14" colname="b21" align="center" />
+	  <colspec colnum="15" colname="b20" align="center" />
+	  <colspec colnum="16" colname="b19" align="center" />
+	  <colspec colnum="17" colname="b18" align="center" />
+	  <colspec colnum="18" colname="b17" align="center" />
+	  <colspec colnum="19" colname="b16" align="center" />
+	  <colspec colnum="20" colname="b15" align="center" />
+	  <colspec colnum="21" colname="b14" align="center" />
+	  <colspec colnum="22" colname="b13" align="center" />
+	  <colspec colnum="23" colname="b12" align="center" />
+	  <colspec colnum="24" colname="b11" align="center" />
+	  <colspec colnum="25" colname="b10" align="center" />
+	  <colspec colnum="26" colname="b09" align="center" />
+	  <colspec colnum="27" colname="b08" align="center" />
+	  <colspec colnum="28" colname="b07" align="center" />
+	  <colspec colnum="29" colname="b06" align="center" />
+	  <colspec colnum="30" colname="b05" align="center" />
+	  <colspec colnum="31" colname="b04" align="center" />
+	  <colspec colnum="32" colname="b03" align="center" />
+	  <colspec colnum="33" colname="b02" align="center" />
+	  <colspec colnum="34" colname="b01" align="center" />
+	  <colspec colnum="35" colname="b00" align="center" />
+	  <spanspec namest="b31" nameend="b00" spanname="b0" />
+	  <thead>
+	    <row>
+	      <entry>Identifier</entry>
+	      <entry>Code</entry>
+	      <entry></entry>
+	      <entry spanname="b0">Data organization</entry>
+	    </row>
+	    <row>
+	      <entry></entry>
+	      <entry></entry>
+	      <entry>Bit</entry>
+	      <entry>31</entry>
+	      <entry>30</entry>
+	      <entry>29</entry>
+	      <entry>28</entry>
+	      <entry>27</entry>
+	      <entry>26</entry>
+	      <entry>25</entry>
+	      <entry>24</entry>
+	      <entry>23</entry>
+	      <entry>22</entry>
+	      <entry>21</entry>
+	      <entry>20</entry>
+	      <entry>19</entry>
+	      <entry>18</entry>
+	      <entry>17</entry>
+	      <entry>16</entry>
+	      <entry>15</entry>
+	      <entry>14</entry>
+	      <entry>13</entry>
+	      <entry>12</entry>
+	      <entry>11</entry>
+	      <entry>10</entry>
+	      <entry>9</entry>
+	      <entry>8</entry>
+	      <entry>7</entry>
+	      <entry>6</entry>
+	      <entry>5</entry>
+	      <entry>4</entry>
+	      <entry>3</entry>
+	      <entry>2</entry>
+	      <entry>1</entry>
+	      <entry>0</entry>
+	    </row>
+	  </thead>
+	  <tbody valign="top">
+	    <row id="V4L2-MBUS-FMT-AHSV8888-1X32">
+	      <entry>V4L2_MBUS_FMT_AHSV8888_1X32</entry>
+	      <entry>0x6001</entry>
+	      <entry></entry>
+	      <entry>a<subscript>7</subscript></entry>
+	      <entry>a<subscript>6</subscript></entry>
+	      <entry>a<subscript>5</subscript></entry>
+	      <entry>a<subscript>4</subscript></entry>
+	      <entry>a<subscript>3</subscript></entry>
+	      <entry>a<subscript>2</subscript></entry>
+	      <entry>a<subscript>1</subscript></entry>
+	      <entry>a<subscript>0</subscript></entry>
+	      <entry>h<subscript>7</subscript></entry>
+	      <entry>h<subscript>6</subscript></entry>
+	      <entry>h<subscript>5</subscript></entry>
+	      <entry>h<subscript>4</subscript></entry>
+	      <entry>h<subscript>3</subscript></entry>
+	      <entry>h<subscript>2</subscript></entry>
+	      <entry>h<subscript>1</subscript></entry>
+	      <entry>h<subscript>0</subscript></entry>
+	      <entry>s<subscript>7</subscript></entry>
+	      <entry>s<subscript>6</subscript></entry>
+	      <entry>s<subscript>5</subscript></entry>
+	      <entry>s<subscript>4</subscript></entry>
+	      <entry>s<subscript>3</subscript></entry>
+	      <entry>s<subscript>2</subscript></entry>
+	      <entry>s<subscript>1</subscript></entry>
+	      <entry>s<subscript>0</subscript></entry>
+	      <entry>v<subscript>7</subscript></entry>
+	      <entry>v<subscript>6</subscript></entry>
+	      <entry>v<subscript>5</subscript></entry>
+	      <entry>v<subscript>4</subscript></entry>
+	      <entry>v<subscript>3</subscript></entry>
+	      <entry>v<subscript>2</subscript></entry>
+	      <entry>v<subscript>1</subscript></entry>
+	      <entry>v<subscript>0</subscript></entry>
+	    </row>
+	  </tbody>
+	</tgroup>
+      </table>
+    </section>
+
+    <section>
       <title>JPEG Compressed Formats</title>
 
       <para>Those data formats consist of an ordered sequence of 8-bit bytes
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index 8469fe1..74b7f27 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -141,6 +141,14 @@
 applications. -->
 
       <revision>
+	<revnumber>3.14</revnumber>
+	<date>2013-11-25</date>
+	<authorinitials>rr</authorinitials>
+	<revremark>Set width and height as unsigned on v4l2_rect.
+	</revremark>
+      </revision>
+
+      <revision>
 	<revnumber>3.11</revnumber>
 	<date>2013-05-26</date>
 	<authorinitials>hv</authorinitials>
@@ -501,7 +509,7 @@
 </partinfo>
 
 <title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.11</subtitle>
+ <subtitle>Revision 3.14</subtitle>
 
   <chapter id="common">
     &sub-common;
diff --git a/Documentation/DocBook/media/v4l/vidioc-cropcap.xml b/Documentation/DocBook/media/v4l/vidioc-cropcap.xml
index bf7cc97..1f5ed64 100644
--- a/Documentation/DocBook/media/v4l/vidioc-cropcap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-cropcap.xml
@@ -133,18 +133,14 @@
 rectangle, in pixels.</entry>
 	  </row>
 	  <row>
-	    <entry>__s32</entry>
+	    <entry>__u32</entry>
 	    <entry><structfield>width</structfield></entry>
 	    <entry>Width of the rectangle, in pixels.</entry>
 	  </row>
 	  <row>
-	    <entry>__s32</entry>
+	    <entry>__u32</entry>
 	    <entry><structfield>height</structfield></entry>
-	    <entry>Height of the rectangle, in pixels. Width
-and height cannot be negative, the fields are signed for
-hysterical reasons. <!-- video4linux-list@redhat.com
-on 22 Oct 2002 subject "Re:[V4L][patches!] Re:v4l2/kernel-2.5" -->
-</entry>
+	    <entry>Height of the rectangle, in pixels.</entry>
 	  </row>
 	</tbody>
       </tgroup>
diff --git a/Documentation/DocBook/media/v4l/vidioc-streamon.xml b/Documentation/DocBook/media/v4l/vidioc-streamon.xml
index 716ea15..65dff55 100644
--- a/Documentation/DocBook/media/v4l/vidioc-streamon.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-streamon.xml
@@ -59,7 +59,7 @@
 queue) until <constant>VIDIOC_STREAMON</constant> has been called.
 Accordingly the output hardware is disabled, no video signal is
 produced until <constant>VIDIOC_STREAMON</constant> has been called.
-The ioctl will succeed only when at least one output buffer is in the
+The ioctl will succeed when at least one output buffer is in the
 incoming queue.</para>
 
     <para>The <constant>VIDIOC_STREAMOFF</constant> ioctl, apart of
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
index 1d7a885..fa57139 100644
--- a/Documentation/RCU/00-INDEX
+++ b/Documentation/RCU/00-INDEX
@@ -8,6 +8,8 @@
 	- Using RCU to Protect Read-Mostly Linked Lists
 lockdep.txt
 	- RCU and lockdep checking
+lockdep-splat.txt
+	- RCU Lockdep splats explained.
 NMI-RCU.txt
 	- Using RCU to Protect Dynamic NMI Handlers
 rcubarrier.txt
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
index 36420e1..a94090c 100644
--- a/Documentation/arm/00-INDEX
+++ b/Documentation/arm/00-INDEX
@@ -4,6 +4,8 @@
 	- requirements for booting
 Interrupts
 	- ARM Interrupt subsystem documentation
+IXP4xx
+	- Intel IXP4xx Network processor.
 msm
 	- MSM specific documentation
 Netwinder
@@ -24,8 +26,16 @@
 	- ST SPEAr platform Linux Overview
 VFP/
 	- Release notes for Linux Kernel Vector Floating Point support code
+cluster-pm-race-avoidance.txt
+	- Algorithm for CPU and Cluster setup/teardown
 empeg/
 	- Ltd's Empeg MP3 Car Audio Player
+firmware.txt
+	- Secure firmware registration and calling.
+kernel_mode_neon.txt
+	- How to use NEON instructions in kernel mode
+kernel_user_helpers.txt
+	- Helper functions in kernel space made available for userspace.
 mem_alignment
 	- alignment abort handler documentation
 memory.txt
@@ -34,3 +44,7 @@
 	- NWFPE floating point emulator documentation
 swp_emulation
 	- SWP/SWPB emulation handler/logging description
+tcm.txt
+	- ARM Tightly Coupled Memory
+vlocks.txt
+	- Voting locks, low-level mechanism relying on memory system atomic writes.
diff --git a/Documentation/blackfin/00-INDEX b/Documentation/blackfin/00-INDEX
index 2df0365..c54fcdd 100644
--- a/Documentation/blackfin/00-INDEX
+++ b/Documentation/blackfin/00-INDEX
@@ -1,8 +1,10 @@
 00-INDEX
 	- This file
-
+Makefile
+	- Makefile for gptimers example file.
 bfin-gpio-notes.txt
 	- Notes in developing/using bfin-gpio driver.
-
 bfin-spi-notes.txt
 	- Notes for using bfin spi bus driver.
+gptimers-example.c
+	- gptimers example
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index 929d990..e840b47 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -14,6 +14,8 @@
 	- Deadline IO scheduler tunables
 ioprio.txt
 	- Block io priorities (in CFQ scheduler)
+null_blk.txt
+	- Null block for block-layer benchmarking.
 queue-sysfs.txt
 	- Queue's sysfs entries
 request.txt
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 8df5e8e..2101e71 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -447,14 +447,13 @@
  * main unit of I/O for the block layer and lower layers (ie drivers)
  */
 struct bio {
-       sector_t            bi_sector;
        struct bio          *bi_next;    /* request queue link */
        struct block_device *bi_bdev;	/* target device */
        unsigned long       bi_flags;    /* status, command, etc */
        unsigned long       bi_rw;       /* low bits: r/w, high: priority */
 
        unsigned int	bi_vcnt;     /* how may bio_vec's */
-       unsigned int	bi_idx;		/* current index into bio_vec array */
+       struct bvec_iter	bi_iter;	/* current index into bio_vec array */
 
        unsigned int	bi_size;     /* total size in bytes */
        unsigned short 	bi_phys_segments; /* segments after physaddr coalesce*/
@@ -480,7 +479,7 @@
 - Code that traverses the req list can find all the segments of a bio
   by using rq_for_each_segment.  This handles the fact that a request
   has multiple bios, each of which can have multiple segments.
-- Drivers which can't process a large bio in one shot can use the bi_idx
+- Drivers which can't process a large bio in one shot can use the bi_iter
   field to keep track of the next bio_vec entry to process.
   (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
   [TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying
@@ -589,7 +588,7 @@
 nr_sectors and current_nr_sectors fields (based on the corresponding
 hard_xxx values and the number of bytes transferred) and updates it on
 every transfer that invokes end_that_request_first. It does the same for the
-buffer, bio, bio->bi_idx fields too.
+buffer, bio, bio->bi_iter fields too.
 
 The buffer field is just a virtual address mapping of the current segment
 of the i/o buffer in cases where the buffer resides in low-memory. For high
diff --git a/Documentation/block/biovecs.txt b/Documentation/block/biovecs.txt
new file mode 100644
index 0000000..74a32ad
--- /dev/null
+++ b/Documentation/block/biovecs.txt
@@ -0,0 +1,111 @@
+
+Immutable biovecs and biovec iterators:
+=======================================
+
+Kent Overstreet <kmo@daterainc.com>
+
+As of 3.13, biovecs should never be modified after a bio has been submitted.
+Instead, we have a new struct bvec_iter which represents a range of a biovec -
+the iterator will be modified as the bio is completed, not the biovec.
+
+More specifically, old code that needed to partially complete a bio would
+update bi_sector and bi_size, and advance bi_idx to the next biovec. If it
+ended up partway through a biovec, it would increment bv_offset and decrement
+bv_len by the number of bytes completed in that biovec.
+
+In the new scheme of things, everything that must be mutated in order to
+partially complete a bio is segregated into struct bvec_iter: bi_sector,
+bi_size and bi_idx have been moved there; and instead of modifying bv_offset
+and bv_len, struct bvec_iter has bi_bvec_done, which represents the number of
+bytes completed in the current bvec.
+
+There are a bunch of new helper macros for hiding the gory details - in
+particular, presenting the illusion of partially completed biovecs so that
+normal code doesn't have to deal with bi_bvec_done.
+
+ * Driver code should no longer refer to biovecs directly; we now have
+   bio_iovec() and bio_iovec_iter() macros that return literal struct biovecs,
+   constructed from the raw biovecs but taking into account bi_bvec_done and
+   bi_size.
+
+   bio_for_each_segment() has been updated to take a bvec_iter argument
+   instead of an integer (that corresponded to bi_idx); for a lot of code the
+   conversion just required changing the types of the arguments to
+   bio_for_each_segment().
+
+ * Advancing a bvec_iter is done with bio_advance_iter(); bio_advance() is a
+   wrapper around bio_advance_iter() that operates on bio->bi_iter, and also
+   advances the bio integrity's iter if present.
+
+   There is a lower level advance function - bvec_iter_advance() - which takes
+   a pointer to a biovec, not a bio; this is used by the bio integrity code.
+
+What's all this get us?
+=======================
+
+Having a real iterator, and making biovecs immutable, has a number of
+advantages:
+
+ * Before, iterating over bios was very awkward when you weren't processing
+   exactly one bvec at a time - for example, bio_copy_data() in fs/bio.c,
+   which copies the contents of one bio into another. Because the biovecs
+   wouldn't necessarily be the same size, the old code was tricky convoluted -
+   it had to walk two different bios at the same time, keeping both bi_idx and
+   and offset into the current biovec for each.
+
+   The new code is much more straightforward - have a look. This sort of
+   pattern comes up in a lot of places; a lot of drivers were essentially open
+   coding bvec iterators before, and having common implementation considerably
+   simplifies a lot of code.
+
+ * Before, any code that might need to use the biovec after the bio had been
+   completed (perhaps to copy the data somewhere else, or perhaps to resubmit
+   it somewhere else if there was an error) had to save the entire bvec array
+   - again, this was being done in a fair number of places.
+
+ * Biovecs can be shared between multiple bios - a bvec iter can represent an
+   arbitrary range of an existing biovec, both starting and ending midway
+   through biovecs. This is what enables efficient splitting of arbitrary
+   bios. Note that this means we _only_ use bi_size to determine when we've
+   reached the end of a bio, not bi_vcnt - and the bio_iovec() macro takes
+   bi_size into account when constructing biovecs.
+
+ * Splitting bios is now much simpler. The old bio_split() didn't even work on
+   bios with more than a single bvec! Now, we can efficiently split arbitrary
+   size bios - because the new bio can share the old bio's biovec.
+
+   Care must be taken to ensure the biovec isn't freed while the split bio is
+   still using it, in case the original bio completes first, though. Using
+   bio_chain() when splitting bios helps with this.
+
+ * Submitting partially completed bios is now perfectly fine - this comes up
+   occasionally in stacking block drivers and various code (e.g. md and
+   bcache) had some ugly workarounds for this.
+
+   It used to be the case that submitting a partially completed bio would work
+   fine to _most_ devices, but since accessing the raw bvec array was the
+   norm, not all drivers would respect bi_idx and those would break. Now,
+   since all drivers _must_ go through the bvec iterator - and have been
+   audited to make sure they are - submitting partially completed bios is
+   perfectly fine.
+
+Other implications:
+===================
+
+ * Almost all usage of bi_idx is now incorrect and has been removed; instead,
+   where previously you would have used bi_idx you'd now use a bvec_iter,
+   probably passing it to one of the helper macros.
+
+   I.e. instead of using bio_iovec_idx() (or bio->bi_iovec[bio->bi_idx]), you
+   now use bio_iter_iovec(), which takes a bvec_iter and returns a
+   literal struct bio_vec - constructed on the fly from the raw biovec but
+   taking into account bi_bvec_done (and bi_size).
+
+ * bi_vcnt can't be trusted or relied upon by driver code - i.e. anything that
+   doesn't actually own the bio. The reason is twofold: firstly, it's not
+   actually needed for iterating over the bio anymore - we only use bi_size.
+   Secondly, when cloning a bio and reusing (a portion of) the original bio's
+   biovec, in order to calculate bi_vcnt for the new bio we'd have to iterate
+   over all the biovecs in the new bio - which is silly as it's not needed.
+
+   So, don't use bi_vcnt anymore.
diff --git a/drivers/staging/zram/zram.txt b/Documentation/blockdev/zram.txt
similarity index 90%
rename from drivers/staging/zram/zram.txt
rename to Documentation/blockdev/zram.txt
index 765d790..2eccddf 100644
--- a/drivers/staging/zram/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -1,8 +1,6 @@
 zram: Compressed RAM based block devices
 ----------------------------------------
 
-Project home: http://compcache.googlecode.com/
-
 * Introduction
 
 The zram module creates RAM based block devices named /dev/zram<id>
@@ -69,9 +67,5 @@
 	resets the disksize to zero. You must set the disksize again
 	before reusing the device.
 
-Please report any problems at:
- - Mailing list: linux-mm-cc at laptop dot org
- - Issue tracker: http://code.google.com/p/compcache/issues/list
-
 Nitin Gupta
 ngupta@vflare.org
diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt
index 52e1da1..5108afb 100644
--- a/Documentation/cgroups/resource_counter.txt
+++ b/Documentation/cgroups/resource_counter.txt
@@ -95,7 +95,7 @@
 
  f. u64 res_counter_uncharge_until
 		(struct res_counter *rc, struct res_counter *top,
-		 unsinged long val)
+		 unsigned long val)
 
 	Almost same as res_counter_uncharge() but propagation of uncharge
 	stops when rc == top. This is useful when kill a res_counter in
diff --git a/Documentation/devicetree/00-INDEX b/Documentation/devicetree/00-INDEX
index b78f691..8c4102c 100644
--- a/Documentation/devicetree/00-INDEX
+++ b/Documentation/devicetree/00-INDEX
@@ -8,3 +8,5 @@
 	- this file
 booting-without-of.txt
 	- Booting Linux without Open Firmware, describes history and format of device trees.
+usage-model.txt
+	- How Linux uses DT and what DT aims to solve.
\ No newline at end of file
diff --git a/Documentation/devicetree/bindings/arm/bcm/kona-timer.txt b/Documentation/devicetree/bindings/arm/bcm/kona-timer.txt
index 17d88b2..39adf54 100644
--- a/Documentation/devicetree/bindings/arm/bcm/kona-timer.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/kona-timer.txt
@@ -8,13 +8,18 @@
 - DEPRECATED: compatible : "bcm,kona-timer"
 - reg : Register range for the timer
 - interrupts : interrupt for the timer
+- clocks: phandle + clock specifier pair of the external clock
 - clock-frequency: frequency that the clock operates
 
+Only one of clocks or clock-frequency should be specified.
+
+Refer to clocks/clock-bindings.txt for generic clock consumer properties.
+
 Example:
 	timer@35006000 {
 		compatible = "brcm,kona-timer";
 		reg = <0x35006000 0x1000>;
 		interrupts = <0x0 7 0x4>;
-		clock-frequency = <32768>;
+		clocks = <&hub_timer_clk>;
 	};
 
diff --git a/Documentation/devicetree/bindings/clock/bcm-kona-clock.txt b/Documentation/devicetree/bindings/clock/bcm-kona-clock.txt
new file mode 100644
index 0000000..56d1f49
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/bcm-kona-clock.txt
@@ -0,0 +1,93 @@
+Broadcom Kona Family Clocks
+
+This binding is associated with Broadcom SoCs having "Kona" style
+clock control units (CCUs).  A CCU is a clock provider that manages
+a set of clock signals.  Each CCU is represented by a node in the
+device tree.
+
+This binding uses the common clock binding:
+    Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible
+	Shall have one of the following values:
+	- "brcm,bcm11351-root-ccu"
+	- "brcm,bcm11351-aon-ccu"
+	- "brcm,bcm11351-hub-ccu"
+	- "brcm,bcm11351-master-ccu"
+	- "brcm,bcm11351-slave-ccu"
+- reg
+	Shall define the base and range of the address space
+	containing clock control registers
+- #clock-cells
+	Shall have value <1>.  The permitted clock-specifier values
+	are defined below.
+- clock-output-names
+	Shall be an ordered list of strings defining the names of
+	the clocks provided by the CCU.
+
+
+BCM281XX family SoCs use Kona CCUs.  The following table defines
+the set of CCUs and clock specifiers for BCM281XX clocks.  When
+a clock consumer references a clocks, its symbolic specifier
+(rather than its numeric index value) should be used.  These
+specifiers are defined in "include/dt-bindings/clock/bcm281xx.h".
+
+    CCU     Clock           Type    Index   Specifier
+    ---     -----           ----    -----   ---------
+    root    frac_1m         peri      0     BCM281XX_ROOT_CCU_FRAC_1M
+
+    aon     hub_timer       peri      0     BCM281XX_AON_CCU_HUB_TIMER
+    aon     pmu_bsc         peri      1     BCM281XX_AON_CCU_PMU_BSC
+    aon     pmu_bsc_var     peri      2     BCM281XX_AON_CCU_PMU_BSC_VAR
+
+    hub     tmon_1m         peri      0     BCM281XX_HUB_CCU_TMON_1M
+
+    master  sdio1           peri      0     BCM281XX_MASTER_CCU_SDIO1
+    master  sdio2           peri      1     BCM281XX_MASTER_CCU_SDIO2
+    master  sdio3           peri      2     BCM281XX_MASTER_CCU_SDIO3
+    master  sdio4           peri      3     BCM281XX_MASTER_CCU_SDIO4
+    master  dmac            peri      4     BCM281XX_MASTER_CCU_DMAC
+    master  usb_ic          peri      5     BCM281XX_MASTER_CCU_USB_IC
+    master  hsic2_48m       peri      6     BCM281XX_MASTER_CCU_HSIC_48M
+    master  hsic2_12m       peri      7     BCM281XX_MASTER_CCU_HSIC_12M
+
+    slave   uartb           peri      0     BCM281XX_SLAVE_CCU_UARTB
+    slave   uartb2          peri      1     BCM281XX_SLAVE_CCU_UARTB2
+    slave   uartb3          peri      2     BCM281XX_SLAVE_CCU_UARTB3
+    slave   uartb4          peri      3     BCM281XX_SLAVE_CCU_UARTB4
+    slave   ssp0            peri      4     BCM281XX_SLAVE_CCU_SSP0
+    slave   ssp2            peri      5     BCM281XX_SLAVE_CCU_SSP2
+    slave   bsc1            peri      6     BCM281XX_SLAVE_CCU_BSC1
+    slave   bsc2            peri      7     BCM281XX_SLAVE_CCU_BSC2
+    slave   bsc3            peri      8     BCM281XX_SLAVE_CCU_BSC3
+    slave   pwm             peri      9     BCM281XX_SLAVE_CCU_PWM
+
+
+Device tree example:
+
+	slave_ccu: slave_ccu {
+		compatible = "brcm,bcm11351-slave-ccu";
+		reg = <0x3e011000 0x0f00>;
+		#clock-cells = <1>;
+		clock-output-names = "uartb",
+				     "uartb2",
+				     "uartb3",
+				     "uartb4";
+	};
+
+	ref_crystal_clk: ref_crystal {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <26000000>;
+	};
+
+	uart@3e002000 {
+		compatible = "brcm,bcm11351-dw-apb-uart", "snps,dw-apb-uart";
+		status = "disabled";
+		reg = <0x3e002000 0x1000>;
+		clocks = <&slave_ccu BCM281XX_SLAVE_CCU_UARTB3>;
+		interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/corenet-clock.txt b/Documentation/devicetree/bindings/clock/corenet-clock.txt
new file mode 100644
index 0000000..24711af
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/corenet-clock.txt
@@ -0,0 +1,134 @@
+* Clock Block on Freescale CoreNet Platforms
+
+Freescale CoreNet chips take primary clocking input from the external
+SYSCLK signal. The SYSCLK input (frequency) is multiplied using
+multiple phase locked loops (PLL) to create a variety of frequencies
+which can then be passed to a variety of internal logic, including
+cores and peripheral IP blocks.
+Please refer to the Reference Manual for details.
+
+1. Clock Block Binding
+
+Required properties:
+- compatible: Should contain a specific clock block compatible string
+	and a single chassis clock compatible string.
+	Clock block strings include, but not limited to, one of the:
+	* "fsl,p2041-clockgen"
+	* "fsl,p3041-clockgen"
+	* "fsl,p4080-clockgen"
+	* "fsl,p5020-clockgen"
+	* "fsl,p5040-clockgen"
+	* "fsl,t4240-clockgen"
+	* "fsl,b4420-clockgen"
+	* "fsl,b4860-clockgen"
+	Chassis clock strings include:
+	* "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
+	* "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks
+- reg: Describes the address of the device's resources within the
+	address space defined by its parent bus, and resource zero
+	represents the clock register set
+- clock-frequency: Input system clock frequency
+
+Recommended properties:
+- ranges: Allows valid translation between child's address space and
+	parent's. Must be present if the device has sub-nodes.
+- #address-cells: Specifies the number of cells used to represent
+	physical base addresses.  Must be present if the device has
+	sub-nodes and set to 1 if present
+- #size-cells: Specifies the number of cells used to represent
+	the size of an address. Must be present if the device has
+	sub-nodes and set to 1 if present
+
+2. Clock Provider/Consumer Binding
+
+Most of the bindings are from the common clock binding[1].
+ [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : Should include one of the following:
+	* "fsl,qoriq-core-pll-1.0" for core PLL clocks (v1.0)
+	* "fsl,qoriq-core-pll-2.0" for core PLL clocks (v2.0)
+	* "fsl,qoriq-core-mux-1.0" for core mux clocks (v1.0)
+	* "fsl,qoriq-core-mux-2.0" for core mux clocks (v2.0)
+	* "fsl,qoriq-sysclk-1.0": for input system clock (v1.0).
+		It takes parent's clock-frequency as its clock.
+	* "fsl,qoriq-sysclk-2.0": for input system clock (v2.0).
+		It takes parent's clock-frequency as its clock.
+- #clock-cells: From common clock binding. The number of cells in a
+	clock-specifier. Should be <0> for "fsl,qoriq-sysclk-[1,2].0"
+	clocks, or <1> for "fsl,qoriq-core-pll-[1,2].0" clocks.
+	For "fsl,qoriq-core-pll-[1,2].0" clocks, the single
+	clock-specifier cell may take the following values:
+	* 0 - equal to the PLL frequency
+	* 1 - equal to the PLL frequency divided by 2
+	* 2 - equal to the PLL frequency divided by 4
+
+Recommended properties:
+- clocks: Should be the phandle of input parent clock
+- clock-names: From common clock binding, indicates the clock name
+- clock-output-names: From common clock binding, indicates the names of
+	output clocks
+- reg: Should be the offset and length of clock block base address.
+	The length should be 4.
+
+Example for clock block and clock provider:
+/ {
+	clockgen: global-utilities@e1000 {
+		compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0";
+		ranges = <0x0 0xe1000 0x1000>;
+		clock-frequency = <133333333>;
+		reg = <0xe1000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		sysclk: sysclk {
+			#clock-cells = <0>;
+			compatible = "fsl,qoriq-sysclk-1.0";
+			clock-output-names = "sysclk";
+		}
+
+		pll0: pll0@800 {
+			#clock-cells = <1>;
+			reg = <0x800 0x4>;
+			compatible = "fsl,qoriq-core-pll-1.0";
+			clocks = <&sysclk>;
+			clock-output-names = "pll0", "pll0-div2";
+		};
+
+		pll1: pll1@820 {
+			#clock-cells = <1>;
+			reg = <0x820 0x4>;
+			compatible = "fsl,qoriq-core-pll-1.0";
+			clocks = <&sysclk>;
+			clock-output-names = "pll1", "pll1-div2";
+		};
+
+		mux0: mux0@0 {
+			#clock-cells = <0>;
+			reg = <0x0 0x4>;
+			compatible = "fsl,qoriq-core-mux-1.0";
+			clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
+			clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
+			clock-output-names = "cmux0";
+		};
+
+		mux1: mux1@20 {
+			#clock-cells = <0>;
+			reg = <0x20 0x4>;
+			compatible = "fsl,qoriq-core-mux-1.0";
+			clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
+			clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
+			clock-output-names = "cmux1";
+		};
+	};
+  }
+
+Example for clock consumer:
+
+/ {
+	cpu0: PowerPC,e5500@0 {
+		...
+		clocks = <&mux0>;
+		...
+	};
+  }
diff --git a/Documentation/devicetree/bindings/dma/bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt
new file mode 100644
index 0000000..1396078
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt
@@ -0,0 +1,57 @@
+* BCM2835 DMA controller
+
+The BCM2835 DMA controller has 16 channels in total.
+Only the lower 13 channels have an associated IRQ.
+Some arbitrary channels are used by the firmware
+(1,3,6,7 in the current firmware version).
+The channels 0,2 and 3 have special functionality
+and should not be used by the driver.
+
+Required properties:
+- compatible: Should be "brcm,bcm2835-dma".
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain the DMA interrupts associated
+		to the DMA channels in ascending order.
+- #dma-cells: Must be <1>, the cell in the dmas property of the
+		client device represents the DREQ number.
+- brcm,dma-channel-mask: Bit mask representing the channels
+			 not used by the firmware in ascending order,
+			 i.e. first channel corresponds to LSB.
+
+Example:
+
+dma: dma@7e007000 {
+	compatible = "brcm,bcm2835-dma";
+	reg = <0x7e007000 0xf00>;
+	interrupts = <1 16>,
+		     <1 17>,
+		     <1 18>,
+		     <1 19>,
+		     <1 20>,
+		     <1 21>,
+		     <1 22>,
+		     <1 23>,
+		     <1 24>,
+		     <1 25>,
+		     <1 26>,
+		     <1 27>,
+		     <1 28>;
+
+	#dma-cells = <1>;
+	brcm,dma-channel-mask = <0x7f35>;
+};
+
+DMA clients connected to the BCM2835 DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel.
+
+Example:
+
+bcm2835_i2s: i2s@7e203000 {
+	compatible = "brcm,bcm2835-i2s";
+	reg = <	0x7e203000 0x20>,
+	      < 0x7e101098 0x02>;
+
+	dmas = <&dma 2>,
+	       <&dma 3>;
+	dma-names = "tx", "rx";
+};
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
index 4fa814d..68b83ec 100644
--- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
@@ -42,6 +42,7 @@
 	19	IPU Memory
 	20	ASRC
 	21	ESAI
+	22	SSI Dual FIFO	(needs firmware ver >= 2)
 
 The third cell specifies the transfer priority as below.
 
diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..8a9f355
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+	sdhci: sdhci@98e00000 {
+		compatible = "moxa,moxart-sdhci";
+		reg = <0x98e00000 0x5C>;
+		interrupts = <5 0>;
+		clocks = <&clk_apb>;
+		dmas =  <&dma 5>,
+			<&dma 5>;
+		dma-names = "tx", "rx";
+	};
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
index ab45c02..efaeec8 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -118,6 +118,9 @@
     See ../reset/reset.txt for details.
   - reset-names: Must include the following entries:
     - dc
+  - nvidia,head: The number of the display controller head. This is used to
+    setup the various types of output to receive video data from the given
+    head.
 
   Each display controller node has a child node, named "rgb", that represents
   the RGB output associated with the controller. It can take the following
@@ -125,6 +128,7 @@
   - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
   - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
   - nvidia,edid: supplies a binary EDID blob
+  - nvidia,panel: phandle of a display panel
 
 - hdmi: High Definition Multimedia Interface
 
@@ -149,6 +153,7 @@
   - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
   - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
   - nvidia,edid: supplies a binary EDID blob
+  - nvidia,panel: phandle of a display panel
 
 - tvo: TV encoder output
 
@@ -169,11 +174,21 @@
   - clock-names: Must include the following entries:
     - dsi
       This MUST be the first entry.
+    - lp
     - parent
   - resets: Must contain an entry for each entry in reset-names.
     See ../reset/reset.txt for details.
   - reset-names: Must include the following entries:
     - dsi
+  - nvidia,mipi-calibrate: Should contain a phandle and a specifier specifying
+    which pads are used by this DSI output and need to be calibrated. See also
+    ../mipi/nvidia,tegra114-mipi.txt.
+
+  Optional properties:
+  - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+  - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+  - nvidia,edid: supplies a binary EDID blob
+  - nvidia,panel: phandle of a display panel
 
 Example:
 
@@ -253,7 +268,7 @@
 			interrupts = <0 73 0x04>;
 			clocks = <&tegra_car TEGRA20_CLK_DISP1>,
 				 <&tegra_car TEGRA20_CLK_PLL_P>;
-			clock-names = "disp1", "parent";
+			clock-names = "dc", "parent";
 			resets = <&tegra_car 27>;
 			reset-names = "dc";
 
@@ -268,7 +283,7 @@
 			interrupts = <0 74 0x04>;
 			clocks = <&tegra_car TEGRA20_CLK_DISP2>,
 				 <&tegra_car TEGRA20_CLK_PLL_P>;
-			clock-names = "disp2", "parent";
+			clock-names = "dc", "parent";
 			resets = <&tegra_car 26>;
 			reset-names = "dc";
 
diff --git a/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt
new file mode 100644
index 0000000..aee38e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt
@@ -0,0 +1,18 @@
+TI-NSPIRE interrupt controller
+
+Required properties:
+- compatible: Compatible property value should be "lsi,zevio-intc".
+
+- reg: Physical base address of the controller and length of memory mapped
+	region.
+
+- interrupt-controller : Identifies the node as an interrupt controller
+
+Example:
+
+interrupt-controller {
+	compatible = "lsi,zevio-intc";
+	interrupt-controller;
+	reg = <0xDC000000 0x1000>;
+	#interrupt-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt b/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt
new file mode 100644
index 0000000..937b755
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/exynos-jpeg-codec.txt
@@ -0,0 +1,11 @@
+Samsung S5P/EXYNOS SoC series JPEG codec
+
+Required properties:
+
+- compatible	: should be one of:
+		  "samsung,s5pv210-jpeg", "samsung,exynos4210-jpeg";
+- reg		: address and length of the JPEG codec IP register set;
+- interrupts	: specifies the JPEG codec IP interrupt;
+- clocks	: should contain the JPEG codec IP gate clock specifier, from the
+		  common clock bindings;
+- clock-names	: should contain "jpeg" entry.
diff --git a/Documentation/devicetree/bindings/media/samsung-s5k5baf.txt b/Documentation/devicetree/bindings/media/samsung-s5k5baf.txt
new file mode 100644
index 0000000..1f51e04
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/samsung-s5k5baf.txt
@@ -0,0 +1,58 @@
+Samsung S5K5BAF UXGA 1/5" 2M CMOS Image Sensor with embedded SoC ISP
+--------------------------------------------------------------------
+
+Required properties:
+
+- compatible	  : "samsung,s5k5baf";
+- reg		  : I2C slave address of the sensor;
+- vdda-supply	  : analog power supply 2.8V (2.6V to 3.0V);
+- vddreg-supply	  : regulator input power supply 1.8V (1.7V to 1.9V)
+		    or 2.8V (2.6V to 3.0);
+- vddio-supply	  : I/O power supply 1.8V (1.65V to 1.95V)
+		    or 2.8V (2.5V to 3.1V);
+- stbyn-gpios	  : GPIO connected to STDBYN pin;
+- rstn-gpios	  : GPIO connected to RSTN pin;
+- clocks	  : list of phandle and clock specifier pairs
+		    according to common clock bindings for the
+		    clocks described in clock-names;
+- clock-names	  : should include "mclk" for the sensor's master clock;
+
+Optional properties:
+
+- clock-frequency : the frequency at which the "mclk" clock should be
+		    configured to operate, in Hz; if this property is not
+		    specified default 24 MHz value will be used.
+
+The device node should contain one 'port' child node with one child 'endpoint'
+node, according to the bindings defined in Documentation/devicetree/bindings/
+media/video-interfaces.txt. The following are properties specific to those
+nodes.
+
+endpoint node
+-------------
+
+- data-lanes : (optional) specifies MIPI CSI-2 data lanes as covered in
+	       video-interfaces.txt. If present it should be <1> - the device
+	       supports only one data lane without re-mapping.
+
+Example:
+
+s5k5bafx@2d {
+	compatible = "samsung,s5k5baf";
+	reg = <0x2d>;
+	vdda-supply = <&cam_io_en_reg>;
+	vddreg-supply = <&vt_core_15v_reg>;
+	vddio-supply = <&vtcam_reg>;
+	stbyn-gpios = <&gpl2 0 1>;
+	rstn-gpios = <&gpl2 1 1>;
+	clock-names = "mclk";
+	clocks = <&clock_cam 0>;
+	clock-frequency = <24000000>;
+
+	port {
+		s5k5bafx_ep: endpoint {
+			remote-endpoint = <&csis1_ep>;
+			data-lanes = <1>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/mipi/dsi/mipi-dsi-bus.txt b/Documentation/devicetree/bindings/mipi/dsi/mipi-dsi-bus.txt
new file mode 100644
index 0000000..973c272
--- /dev/null
+++ b/Documentation/devicetree/bindings/mipi/dsi/mipi-dsi-bus.txt
@@ -0,0 +1,98 @@
+MIPI DSI (Display Serial Interface) busses
+==========================================
+
+The MIPI Display Serial Interface specifies a serial bus and a protocol for
+communication between a host and up to four peripherals. This document will
+define the syntax used to represent a DSI bus in a device tree.
+
+This document describes DSI bus-specific properties only or defines existing
+standard properties in the context of the DSI bus.
+
+Each DSI host provides a DSI bus. The DSI host controller's node contains a
+set of properties that characterize the bus. Child nodes describe individual
+peripherals on that bus.
+
+The following assumes that only a single peripheral is connected to a DSI
+host. Experience shows that this is true for the large majority of setups.
+
+DSI host
+--------
+
+In addition to the standard properties and those defined by the parent bus of
+a DSI host, the following properties apply to a node representing a DSI host.
+
+Required properties:
+- #address-cells: The number of cells required to represent an address on the
+  bus. DSI peripherals are addressed using a 2-bit virtual channel number, so
+  a maximum of 4 devices can be addressed on a single bus. Hence the value of
+  this property should be 1.
+- #size-cells: Should be 0. There are cases where it makes sense to use a
+  different value here. See below.
+
+DSI peripheral
+--------------
+
+Peripherals are represented as child nodes of the DSI host's node. Properties
+described here apply to all DSI peripherals, but individual bindings may want
+to define additional, device-specific properties.
+
+Required properties:
+- reg: The virtual channel number of a DSI peripheral. Must be in the range
+  from 0 to 3.
+
+Some DSI peripherals respond to more than a single virtual channel. In that
+case two alternative representations can be chosen:
+- The reg property can take multiple entries, one for each virtual channel
+  that the peripheral responds to.
+- If the virtual channels that a peripheral responds to are consecutive, the
+  #size-cells can be set to 1. The first cell of each entry in the reg
+  property is the number of the first virtual channel and the second cell is
+  the number of consecutive virtual channels.
+
+Example
+-------
+
+	dsi-host {
+		...
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		/* peripheral responds to virtual channel 0 */
+		peripheral@0 {
+			compatible = "...";
+			reg = <0>;
+		};
+
+		...
+	};
+
+	dsi-host {
+		...
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		/* peripheral responds to virtual channels 0 and 2 */
+		peripheral@0 {
+			compatible = "...";
+			reg = <0, 2>;
+		};
+
+		...
+	};
+
+	dsi-host {
+		...
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		/* peripheral responds to virtual channels 1, 2 and 3 */
+		peripheral@1 {
+			compatible = "...";
+			reg = <1 3>;
+		};
+
+		...
+	};
diff --git a/Documentation/devicetree/bindings/mipi/nvidia,tegra114-mipi.txt b/Documentation/devicetree/bindings/mipi/nvidia,tegra114-mipi.txt
new file mode 100644
index 0000000..e4a25ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/mipi/nvidia,tegra114-mipi.txt
@@ -0,0 +1,41 @@
+NVIDIA Tegra MIPI pad calibration controller
+
+Required properties:
+- compatible: "nvidia,tegra<chip>-mipi"
+- reg: Physical base address and length of the controller's registers.
+- clocks: Must contain an entry for each entry in clock-names.
+  See ../clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+  - mipi-cal
+- #nvidia,mipi-calibrate-cells: Should be 1. The cell is a bitmask of the pads
+  that need to be calibrated for a given device.
+
+User nodes need to contain an nvidia,mipi-calibrate property that has a
+phandle to refer to the calibration controller node and a bitmask of the pads
+that need to be calibrated.
+
+Example:
+
+	mipi: mipi@700e3000 {
+		compatible = "nvidia,tegra114-mipi";
+		reg = <0x700e3000 0x100>;
+		clocks = <&tegra_car TEGRA114_CLK_MIPI_CAL>;
+		clock-names = "mipi-cal";
+		#nvidia,mipi-calibrate-cells = <1>;
+	};
+
+	...
+
+	host1x@50000000 {
+		...
+
+		dsi@54300000 {
+			...
+
+			nvidia,mipi-calibrate = <&mipi 0x060>;
+
+			...
+		};
+
+		...
+	};
diff --git a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
index 0a85c70..07ad020 100644
--- a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
+++ b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
@@ -13,6 +13,9 @@
 - #address-cells: should be one. The cell is the slot id.
 - #size-cells: should be zero.
 - at least one slot node
+- clock-names: tuple listing input clock names.
+	Required elements: "mci_clk"
+- clocks: phandles to input clocks.
 
 The node contains child nodes for each slot that the platform uses
 
@@ -24,6 +27,8 @@
 	interrupts = <12 4>;
 	#address-cells = <1>;
 	#size-cells = <0>;
+	clock-names = "mci_clk";
+	clocks = <&mci0_clk>;
 
 	[ child node definitions...]
 };
diff --git a/Documentation/devicetree/bindings/mmc/kona-sdhci.txt b/Documentation/devicetree/bindings/mmc/kona-sdhci.txt
index 789fb07..aaba248 100644
--- a/Documentation/devicetree/bindings/mmc/kona-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/kona-sdhci.txt
@@ -6,12 +6,16 @@
 Required properties:
 - compatible : Should be "brcm,kona-sdhci"
 - DEPRECATED: compatible : Should be "bcm,kona-sdhci"
+- clocks: phandle + clock specifier pair of the external clock
+
+Refer to clocks/clock-bindings.txt for generic clock consumer properties.
 
 Example:
 
 sdio2: sdio@0x3f1a0000 {
 	compatible = "brcm,kona-sdhci";
 	reg = <0x3f1a0000 0x10000>;
+	clocks = <&sdio3_clk>;
 	interrupts = <0x0 74 0x4>;
 };
 
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
index b90bfcd..863d5b81 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
@@ -1,7 +1,8 @@
 * Allwinner EMAC ethernet controller
 
 Required properties:
-- compatible: should be "allwinner,sun4i-emac".
+- compatible: should be "allwinner,sun4i-a10-emac" (Deprecated:
+              "allwinner,sun4i-emac")
 - reg: address and length of the register set for the device.
 - interrupts: interrupt for the device
 - phy: A phandle to a phy node defining the PHY address (as the reg
@@ -14,7 +15,7 @@
 Example:
 
 emac: ethernet@01c0b000 {
-       compatible = "allwinner,sun4i-emac";
+       compatible = "allwinner,sun4i-a10-emac";
        reg = <0x01c0b000 0x1000>;
        interrupts = <55>;
        clocks = <&ahb_gates 17>;
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
index 00b9f9a..4ec5641 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
@@ -1,7 +1,8 @@
 * Allwinner A10 MDIO Ethernet Controller interface
 
 Required properties:
-- compatible: should be "allwinner,sun4i-mdio".
+- compatible: should be "allwinner,sun4i-a10-mdio"
+              (Deprecated: "allwinner,sun4i-mdio").
 - reg: address and length of the register set for the device.
 
 Optional properties:
@@ -9,7 +10,7 @@
 
 Example at the SoC level:
 mdio@01c0b080 {
-	compatible = "allwinner,sun4i-mdio";
+	compatible = "allwinner,sun4i-a10-mdio";
 	reg = <0x01c0b080 0x14>;
 	#address-cells = <1>;
 	#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index ca0911a..6e356d1 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -10,8 +10,6 @@
 - ti,davinci-ctrl-mod-reg-offset: offset to control module register
 - ti,davinci-ctrl-ram-offset: offset to control module ram
 - ti,davinci-ctrl-ram-size: size of control module ram
-- ti,davinci-rmii-en: use RMII
-- ti,davinci-no-bd-ram: has the emac controller BD RAM
 - interrupts: interrupt mapping for the davinci emac interrupts sources:
               4 sources: <Receive Threshold Interrupt
 			  Receive Interrupt
@@ -22,6 +20,8 @@
 - phy-handle: Contains a phandle to an Ethernet PHY.
               If absent, davinci_emac driver defaults to 100/FULL.
 - local-mac-address : 6 bytes, mac address
+- ti,davinci-rmii-en: 1 byte, 1 means use RMII
+- ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
 
 Example (enbw_cmc board):
 	eth0: emac@1e20000 {
diff --git a/Documentation/devicetree/bindings/net/sti-dwmac.txt b/Documentation/devicetree/bindings/net/sti-dwmac.txt
new file mode 100644
index 0000000..3dd3d0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/sti-dwmac.txt
@@ -0,0 +1,58 @@
+STMicroelectronics SoC DWMAC glue layer controller
+
+The device node has following properties.
+
+Required properties:
+ - compatible	: Can be "st,stih415-dwmac", "st,stih416-dwmac" or
+   "st,stid127-dwmac".
+ - reg		: Offset of the glue configuration register map in system
+   configuration regmap pointed by st,syscon property and size.
+
+ - reg-names	: Should be "sti-ethconf".
+
+ - st,syscon	: Should be phandle to system configuration node which
+   encompases this glue registers.
+
+ - st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be
+   wired up in from different sources. One via TXCLK pin and other via CLK_125
+   pin. This wiring is totally board dependent. However the retiming glue
+   logic should be configured accordingly. Possible values for this property
+
+	   "txclk" - if 125Mhz clock is wired up via txclk line.
+	   "clk_125" - if 125Mhz clock is wired up via clk_125 line.
+
+   This property is only valid for Giga bit setup( GMII, RGMII), and it is
+   un-used for non-giga bit (MII and RMII) setups. Also note that internal
+   clockgen can not generate stable 125Mhz clock.
+
+ - st,ext-phyclk: This boolean property indicates who is generating the clock
+  for tx and rx. This property is only valid for RMII case where the clock can
+  be generated from the MAC or PHY.
+
+ - clock-names: should be "sti-ethclk".
+ - clocks: Should point to ethernet clockgen which can generate phyclk.
+
+
+Example:
+
+ethernet0: dwmac@fe810000 {
+	device_type 	= "network";
+	compatible	= "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
+	reg 		= <0xfe810000 0x8000>, <0x8bc 0x4>;
+	reg-names	= "stmmaceth", "sti-ethconf";
+	interrupts	= <0 133 0>, <0 134 0>, <0 135 0>;
+	interrupt-names	= "macirq", "eth_wake_irq", "eth_lpi";
+	phy-mode	= "mii";
+
+	st,syscon	= <&syscfg_rear>;
+
+	snps,pbl 	= <32>;
+	snps,mixed-burst;
+
+	resets		= <&softreset STIH416_ETH0_SOFTRESET>;
+	reset-names	= "stmmaceth";
+	pinctrl-0	= <&pinctrl_mii0>;
+	pinctrl-names 	= "default";
+	clocks		= <&CLK_S_GMAC0_PHY>;
+	clock-names	= "stmmaceth";
+};
diff --git a/Documentation/devicetree/bindings/panel/auo,b101aw03.txt b/Documentation/devicetree/bindings/panel/auo,b101aw03.txt
new file mode 100644
index 0000000..72e088a
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b101aw03.txt
@@ -0,0 +1,7 @@
+AU Optronics Corporation 10.1" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,b101aw03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/chunghwa,claa101wa01a.txt b/Documentation/devicetree/bindings/panel/chunghwa,claa101wa01a.txt
new file mode 100644
index 0000000..f24614e
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/chunghwa,claa101wa01a.txt
@@ -0,0 +1,7 @@
+Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "chunghwa,claa101wa01a"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt b/Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt
new file mode 100644
index 0000000..0ab2c05
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt
@@ -0,0 +1,7 @@
+Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "chunghwa,claa101wb03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt b/Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt
new file mode 100644
index 0000000..d328b03
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt
@@ -0,0 +1,7 @@
+Panasonic Corporation 10.1" WUXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "panasonic,vvx10f004b00"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/samsung,ltn101nt05.txt b/Documentation/devicetree/bindings/panel/samsung,ltn101nt05.txt
new file mode 100644
index 0000000..ef522c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/samsung,ltn101nt05.txt
@@ -0,0 +1,7 @@
+Samsung Electronics 10.1" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "samsung,ltn101nt05"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/simple-panel.txt b/Documentation/devicetree/bindings/panel/simple-panel.txt
new file mode 100644
index 0000000..1341bbf
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/simple-panel.txt
@@ -0,0 +1,21 @@
+Simple display panel
+
+Required properties:
+- power-supply: regulator to provide the supply voltage
+
+Optional properties:
+- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+	panel: panel {
+		compatible = "cptt,claa101wb01";
+		ddc-i2c-bus = <&panelddc>;
+
+		power-supply = <&vdd_pnl_reg>;
+		enable-gpios = <&gpio 90 0>;
+
+		backlight = <&backlight>;
+	};
diff --git a/Documentation/devicetree/bindings/power/bq2415x.txt b/Documentation/devicetree/bindings/power/bq2415x.txt
new file mode 100644
index 0000000..d0327f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/bq2415x.txt
@@ -0,0 +1,47 @@
+Binding for TI bq2415x Li-Ion Charger
+
+Required properties:
+- compatible: Should contain one of the following:
+ * "ti,bq24150"
+ * "ti,bq24150"
+ * "ti,bq24150a"
+ * "ti,bq24151"
+ * "ti,bq24151a"
+ * "ti,bq24152"
+ * "ti,bq24153"
+ * "ti,bq24153a"
+ * "ti,bq24155"
+ * "ti,bq24156"
+ * "ti,bq24156a"
+ * "ti,bq24158"
+- reg:			   integer, i2c address of the device.
+- ti,current-limit:	   integer, initial maximum current charger can pull
+			   from power supply in mA.
+- ti,weak-battery-voltage: integer, weak battery voltage threshold in mV.
+			   The chip will use slow precharge if battery voltage
+			   is below this value.
+- ti,battery-regulation-voltage: integer, maximum charging voltage in mV.
+- ti,charge-current:	   integer, maximum charging current in mA.
+- ti,termination-current:  integer, charge will be terminated when current in
+			   constant-voltage phase drops below this value (in mA).
+- ti,resistor-sense:	   integer, value of sensing resistor in milliohm.
+
+Optional properties:
+- ti,usb-charger-detection: phandle to usb charger detection device.
+			    (required for auto mode)
+
+Example from Nokia N900:
+
+bq24150a {
+	compatible = "ti,bq24150a";
+	reg = <0x6b>;
+
+	ti,current-limit = <100>;
+	ti,weak-battery-voltage = <3400>;
+	ti,battery-regulation-voltage = <4200>;
+	ti,charge-current = <650>;
+	ti,termination-current = <100>;
+	ti,resistor-sense = <68>;
+
+	ti,usb-charger-detection = <&isp1704>;
+};
diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt
index e9e20ec..19c84df 100644
--- a/Documentation/devicetree/bindings/sound/simple-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-card.txt
@@ -43,7 +43,7 @@
 sound {
 	compatible = "simple-audio-card";
 	simple-audio-card,format = "left_j";
-	simple-audio-routing =
+	simple-audio-card,routing =
 		"MIC_IN", "Mic Jack",
 		"Headphone Jack", "HP_OUT",
 		"Ext Spk", "LINE_OUT";
diff --git a/Documentation/devicetree/bindings/spi/spi_atmel.txt b/Documentation/devicetree/bindings/spi/spi_atmel.txt
index 07e04cd..4f8184d 100644
--- a/Documentation/devicetree/bindings/spi/spi_atmel.txt
+++ b/Documentation/devicetree/bindings/spi/spi_atmel.txt
@@ -5,6 +5,9 @@
 - reg: Address and length of the register set for the device
 - interrupts: Should contain spi interrupt
 - cs-gpios: chipselects
+- clock-names: tuple listing input clock names.
+	Required elements: "spi_clk"
+- clocks: phandles to input clocks.
 
 Example:
 
@@ -14,6 +17,8 @@
 	interrupts = <13 4 5>;
 	#address-cells = <1>;
 	#size-cells = <0>;
+	clocks = <&spi1_clk>;
+	clock-names = "spi_clk";
 	cs-gpios = <&pioB 3 0>;
 	status = "okay";
 
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 3f900cd..40ce2df 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -8,6 +8,7 @@
 adi	Analog Devices, Inc.
 aeroflexgaisler	Aeroflex Gaisler AB
 ak	Asahi Kasei Corp.
+allwinner	Allwinner Technology Co., Ltd.
 altr	Altera Corp.
 amcc	Applied Micro Circuits Corporation (APM, formally AMCC)
 amstaos	AMS-Taos Inc.
@@ -40,6 +41,7 @@
 gumstix	Gumstix, Inc.
 haoyu	Haoyu Microelectronic Co. Ltd.
 hisilicon	Hisilicon Limited.
+honeywell	Honeywell
 hp	Hewlett Packard
 ibm	International Business Machines (IBM)
 idt	Integrated Device Technologies, Inc.
@@ -55,6 +57,7 @@
 microchip	Microchip Technology Inc.
 mosaixtech	Mosaix Technologies, Inc.
 national	National Semiconductor
+neonode		Neonode Inc.
 nintendo	Nintendo
 nvidia	NVIDIA
 nxp	NXP Semiconductors
@@ -64,7 +67,7 @@
 picochip	Picochip Ltd
 powervr	PowerVR (deprecated, use img)
 qca	Qualcomm Atheros, Inc.
-qcom	Qualcomm, Inc.
+qcom	Qualcomm Technologies, Inc
 ralink	Mediatek/Ralink Technology Corp.
 ramtron	Ramtron International
 realtek Realtek Semiconductor Corp.
@@ -78,6 +81,7 @@
 simtek
 sirf	SiRF Technology, Inc.
 snps 	Synopsys, Inc.
+spansion	Spansion Inc.
 st	STMicroelectronics
 ste	ST-Ericsson
 stericsson	ST-Ericsson
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
index fcdd48f..f90e294 100644
--- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
@@ -9,11 +9,37 @@
 
 Optional properties:
 - timeout-sec: contains the watchdog timeout in seconds.
+- interrupts : Should contain WDT interrupt.
+- atmel,max-heartbeat-sec : Should contain the maximum heartbeat value in
+	seconds. This value should be less or equal to 16. It is used to
+	compute the WDV field.
+- atmel,min-heartbeat-sec : Should contain the minimum heartbeat value in
+	seconds. This value must be smaller than the max-heartbeat-sec value.
+	It is used to compute the WDD field.
+- atmel,watchdog-type : Should be "hardware" or "software". Hardware watchdog
+	use the at91 watchdog reset. Software watchdog use the watchdog
+	interrupt to trigger a software reset.
+- atmel,reset-type : Should be "proc" or "all".
+	"all" : assert peripherals and processor reset signals
+	"proc" : assert the processor reset signal
+	This is valid only when using "hardware" watchdog.
+- atmel,disable : Should be present if you want to disable the watchdog.
+- atmel,idle-halt : Should be present if you want to stop the watchdog when
+	entering idle state.
+- atmel,dbg-halt : Should be present if you want to stop the watchdog when
+	entering debug state.
 
 Example:
-
 	watchdog@fffffd40 {
 		compatible = "atmel,at91sam9260-wdt";
 		reg = <0xfffffd40 0x10>;
-		timeout-sec = <10>;
+		interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+		timeout-sec = <15>;
+		atmel,watchdog-type = "hardware";
+		atmel,reset-type = "all";
+		atmel,dbg-halt;
+		atmel,idle-halt;
+		atmel,max-heartbeat-sec = <16>;
+		atmel,min-heartbeat-sec = <0>;
+		status = "okay";
 	};
diff --git a/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt b/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt
index 75558cc..e60b9a1 100644
--- a/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt
@@ -1,12 +1,24 @@
-DaVinci Watchdog Timer (WDT) Controller
+Texas Instruments DaVinci/Keystone Watchdog Timer (WDT) Controller
 
 Required properties:
-- compatible : Should be "ti,davinci-wdt"
+- compatible : Should be "ti,davinci-wdt", "ti,keystone-wdt"
 - reg : Should contain WDT registers location and length
 
+Optional properties:
+- timeout-sec : Contains the watchdog timeout in seconds
+- clocks : the clock feeding the watchdog timer.
+	   Needed if platform uses clocks.
+	   See clock-bindings.txt
+
+Documentation:
+Davinci DM646x - http://www.ti.com/lit/ug/spruer5b/spruer5b.pdf
+Keystone - http://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf
+
 Examples:
 
 wdt: wdt@2320000 {
 	compatible = "ti,davinci-wdt";
 	reg = <0x02320000 0x80>;
+	timeout-sec = <30>;
+	clocks = <&clkwdtimer0>;
 };
diff --git a/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt b/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt
new file mode 100644
index 0000000..37afec1
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt
@@ -0,0 +1,23 @@
+* GPIO-controlled Watchdog
+
+Required Properties:
+- compatible: Should contain "linux,wdt-gpio".
+- gpios: From common gpio binding; gpio connection to WDT reset pin.
+- hw_algo: The algorithm used by the driver. Should be one of the
+  following values:
+  - toggle: Either a high-to-low or a low-to-high transition clears
+    the WDT counter. The watchdog timer is disabled when GPIO is
+    left floating or connected to a three-state buffer.
+  - level: Low or high level starts counting WDT timeout,
+    the opposite level disables the WDT. Active level is determined
+    by the GPIO flags.
+- hw_margin_ms: Maximum time to reset watchdog circuit (milliseconds).
+
+Example:
+	watchdog: watchdog {
+		/* ADM706 */
+		compatible = "linux,wdt-gpio";
+		gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
+		hw_algo = "toggle";
+		hw_margin_ms = <1600>;
+	};
diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
index 2aa486c..cfff375 100644
--- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
@@ -5,10 +5,29 @@
 occurred.
 
 Required properties:
-- compatible : should be "samsung,s3c2410-wdt"
+- compatible : should be one among the following
+	(a) "samsung,s3c2410-wdt" for Exynos4 and previous SoCs
+	(b) "samsung,exynos5250-wdt" for Exynos5250
+	(c) "samsung,exynos5420-wdt" for Exynos5420
+
 - reg : base physical address of the controller and length of memory mapped
 	region.
 - interrupts : interrupt number to the cpu.
+- samsung,syscon-phandle : reference to syscon node (This property required only
+	in case of compatible being "samsung,exynos5250-wdt" or "samsung,exynos5420-wdt".
+	In case of Exynos5250 and 5420 this property points to syscon node holding the PMU
+	base address)
 
 Optional properties:
 - timeout-sec : contains the watchdog timeout in seconds.
+
+Example:
+
+watchdog@101D0000 {
+	compatible = "samsung,exynos5250-wdt";
+	reg = <0x101D0000 0x100>;
+	interrupts = <0 42 0>;
+	clocks = <&clock 336>;
+	clock-names = "watchdog";
+	samsung,syscon-phandle = <&pmu_syscon>;
+};
diff --git a/Documentation/dvb/contributors.txt b/Documentation/dvb/contributors.txt
index 47c3009..731a009 100644
--- a/Documentation/dvb/contributors.txt
+++ b/Documentation/dvb/contributors.txt
@@ -78,7 +78,7 @@
 Wilson Michaels <wilsonmichaels@earthlink.net>
   for the lgdt330x frontend driver, and various bugfixes
 
-Michael Krufky <mkrufky@m1k.net>
+Michael Krufky <mkrufky@linuxtv.org>
   for maintaining v4l/dvb inter-tree dependencies
 
 Taylor Jacob <rtjacob@earthlink.net>
diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
index 30a7054..fe85e7c 100644
--- a/Documentation/fb/00-INDEX
+++ b/Documentation/fb/00-INDEX
@@ -5,6 +5,8 @@
 
 00-INDEX
 	- this file.
+api.txt
+	- The frame buffer API between applications and buffer devices.
 arkfb.txt
 	- info on the fbdev driver for ARK Logic chips.
 aty128fb.txt
@@ -51,12 +53,16 @@
 	- info on the SH7760/SH7763 integrated LCDC Framebuffer driver.
 sisfb.txt
 	- info on the framebuffer device driver for various SiS chips.
+sm501.txt
+	- info on the framebuffer device driver for sm501 videoframebuffer.
 sstfb.txt
 	- info on the frame buffer driver for 3dfx' Voodoo Graphics boards.
 tgafb.txt
 	- info on the TGA (DECChip 21030) frame buffer driver.
 tridentfb.txt
 	info on the framebuffer driver for some Trident chip based cards.
+udlfb.txt
+	- Driver for DisplayLink USB 2.0 chips.
 uvesafb.txt
 	- info on the userspace VESA (VBE2+ compliant) frame buffer device.
 vesafb.txt
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 632211c..ac28149 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -2,6 +2,8 @@
 	- this file (info on some of the filesystems supported by linux).
 Locking
 	- info on locking rules as they pertain to Linux VFS.
+Makefile
+	- Makefile for building the filsystems-part of DocBook.
 9p.txt
 	- 9p (v9fs) is an implementation of the Plan 9 remote fs protocol.
 adfs.txt
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
index 5dd282d..d11cc2f 100644
--- a/Documentation/filesystems/btrfs.txt
+++ b/Documentation/filesystems/btrfs.txt
@@ -38,7 +38,7 @@
 =============
 
 When mounting a btrfs filesystem, the following option are accepted.
-Unless otherwise specified, all options default to off.
+Options with (*) are default options and will not show in the mount options.
 
   alloc_start=<bytes>
 	Debugging option to force all block allocations above a certain
@@ -46,10 +46,12 @@
 	bytes, optionally with a K, M, or G suffix, case insensitive.
 	Default is 1MB.
 
+  noautodefrag(*)
   autodefrag
-	Detect small random writes into files and queue them up for the
-	defrag process.  Works best for small files; Not well suited for
-	large database workloads.
+	Disable/enable auto defragmentation.
+	Auto defragmentation detects small random writes into files and queue
+	them up for the defrag process.  Works best for small files;
+	Not well suited for large database workloads.
 
   check_int
   check_int_data
@@ -96,21 +98,26 @@
 	can be avoided.  Especially useful when trying to mount a multi-device
 	setup as root.  May be specified multiple times for multiple devices.
 
+  nodiscard(*)
   discard
-	Issue frequent commands to let the block device reclaim space freed by
-	the filesystem.  This is useful for SSD devices, thinly provisioned
+	Disable/enable discard mount option.
+	Discard issues frequent commands to let the block device reclaim space
+	freed by the filesystem.
+	This is useful for SSD devices, thinly provisioned
 	LUNs and virtual machine images, but may have a significant
 	performance impact.  (The fstrim command is also available to
 	initiate batch trims from userspace).
 
+  noenospc_debug(*)
   enospc_debug
-	Debugging option to be more verbose in some ENOSPC conditions.
+	Disable/enable debugging option to be more verbose in some ENOSPC conditions.
 
   fatal_errors=<action>
 	Action to take when encountering a fatal error: 
 	  "bug" - BUG() on a fatal error.  This is the default.
 	  "panic" - panic() on a fatal error.
 
+  noflushoncommit(*)
   flushoncommit
 	The 'flushoncommit' mount option forces any data dirtied by a write in a
 	prior transaction to commit as part of the current commit.  This makes
@@ -134,26 +141,32 @@
 	Specify that 1 metadata chunk should be allocated after every <value>
 	data chunks.  Off by default.
 
+  acl(*)
   noacl
-	Disable support for Posix Access Control Lists (ACLs).  See the
+	Enable/disable support for Posix Access Control Lists (ACLs).  See the
 	acl(5) manual page for more information about ACLs.
 
+  barrier(*)
   nobarrier
-        Disables the use of block layer write barriers.  Write barriers ensure
-	that certain IOs make it through the device cache and are on persistent
-	storage.  If used on a device with a volatile (non-battery-backed)
-	write-back cache, this option will lead to filesystem corruption on a
-	system crash or power loss.
+        Enable/disable the use of block layer write barriers.  Write barriers
+	ensure that certain IOs make it through the device cache and are on
+	persistent storage. If disabled on a device with a volatile
+	(non-battery-backed) write-back cache, nobarrier option will lead to
+	filesystem corruption on a system crash or power loss.
 
+  datacow(*)
   nodatacow
-	Disable data copy-on-write for newly created files.  Implies nodatasum,
-	and disables all compression.
+	Enable/disable data copy-on-write for newly created files.
+	Nodatacow implies nodatasum, and disables all compression.
 
+  datasum(*)
   nodatasum
-	Disable data checksumming for newly created files.
+	Enable/disable data checksumming for newly created files.
+	Datasum implies datacow.
 
+  treelog(*)
   notreelog
-	Disable the tree logging used for fsync and O_SYNC writes.
+	Enable/disable the tree logging used for fsync and O_SYNC writes.
 
   recovery
 	Enable autorecovery attempts if a bad tree root is found at mount time.
diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX
index 66eb6c8..53f3b59 100644
--- a/Documentation/filesystems/nfs/00-INDEX
+++ b/Documentation/filesystems/nfs/00-INDEX
@@ -12,6 +12,8 @@
 	- info on the Linux server implementation of NFSv4 minor version 1.
 nfs-rdma.txt
 	- how to install and setup the Linux NFS/RDMA client and server software
+nfsd-admin-interfaces.txt
+	- Administrative interfaces for nfsd.
 nfsroot.txt
 	- short guide on setting up a diskless box with NFS root filesystem.
 pnfs.txt
@@ -20,5 +22,5 @@
 	- introduction to the caching mechanisms in the sunrpc layer.
 idmapper.txt
 	- information for configuring request-keys to be used by idmapper
-knfsd-rpcgss.txt
+rpc-server-gss.txt
 	- Information on GSS authentication support in the NFS Server
diff --git a/Documentation/filesystems/nfs/nfs41-server.txt b/Documentation/filesystems/nfs/nfs41-server.txt
index 01c2db7..b930ad0 100644
--- a/Documentation/filesystems/nfs/nfs41-server.txt
+++ b/Documentation/filesystems/nfs/nfs41-server.txt
@@ -5,11 +5,11 @@
 by reading this file will contain either "+4.1" or "-4.1"
 correspondingly.
 
-Currently, server support for minorversion 1 is disabled by default.
-It can be enabled at run time by writing the string "+4.1" to
+Currently, server support for minorversion 1 is enabled by default.
+It can be disabled at run time by writing the string "-4.1" to
 the /proc/fs/nfsd/versions control file.  Note that to write this
-control file, the nfsd service must be taken down.  Use your user-mode
-nfs-utils to set this up; see rpc.nfsd(8)
+control file, the nfsd service must be taken down.  You can use rpc.nfsd
+for this; see rpc.nfsd(8).
 
 (Warning: older servers will interpret "+4.1" and "-4.1" as "+4" and
 "-4", respectively.  Therefore, code meant to work on both new and old
@@ -29,29 +29,6 @@
 See http://wiki.linux-nfs.org/wiki/index.php/PNFS_prototype_design
 for more information.
 
-The current implementation is intended for developers only: while it
-does support ordinary file operations on clients we have tested against
-(including the linux client), it is incomplete in ways which may limit
-features unexpectedly, cause known bugs in rare cases, or cause
-interoperability problems with future clients.  Known issues:
-
-	- gss support is questionable: currently mounts with kerberos
-	  from a linux client are possible, but we aren't really
-	  conformant with the spec (for example, we don't use kerberos
-	  on the backchannel correctly).
-	- We do not support SSV, which provides security for shared
-	  client-server state (thus preventing unauthorized tampering
-	  with locks and opens, for example).  It is mandatory for
-	  servers to support this, though no clients use it yet.
-
-In addition, some limitations are inherited from the current NFSv4
-implementation:
-
-	- Incomplete delegation enforcement: if a file is renamed or
-	  unlinked by a local process, a client holding a delegation may
-	  continue to indefinitely allow opens of the file under the old
-	  name.
-
 The table below, taken from the NFSv4.1 document, lists
 the operations that are mandatory to implement (REQ), optional
 (OPT), and NFSv4.0 operations that are required not to implement (MNI)
@@ -169,6 +146,16 @@
 
 Implementation notes:
 
+SSV:
+* The spec claims this is mandatory, but we don't actually know of any
+  implementations, so we're ignoring it for now.  The server returns
+  NFS4ERR_ENCR_ALG_UNSUPP on EXCHANGE_ID, which should be future-proof.
+
+GSS on the backchannel:
+* Again, theoretically required but not widely implemented (in
+  particular, the current Linux client doesn't request it).  We return
+  NFS4ERR_ENCR_ALG_UNSUPP on CREATE_SESSION.
+
 DELEGPURGE:
 * mandatory only for servers that support CLAIM_DELEGATE_PREV and/or
   CLAIM_DELEG_PREV_FH (which allows clients to keep delegations that
@@ -176,7 +163,6 @@
   now.
 
 EXCHANGE_ID:
-* only SP4_NONE state protection supported
 * implementation ids are ignored
 
 CREATE_SESSION:
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 31f7617..f00bee14 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1386,8 +1386,8 @@
 For example, if a task is using all allowed memory, its badness score will be
 1000.  If it is using half of its allowed memory, its score will be 500.
 
-There is an additional factor included in the badness score: root
-processes are given 3% extra memory over other tasks.
+There is an additional factor included in the badness score: the current memory
+and swap usage is discounted by 3% for root processes.
 
 The amount of "allowed" memory depends on the context in which the oom killer
 was called.  If it is due to the memory assigned to the allocating task's cpuset
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index deb48b5..c53784c 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -782,7 +782,7 @@
 ----------------------
 
 This describes how the VFS can manipulate an open file. As of kernel
-3.5, the following members are defined:
+3.12, the following members are defined:
 
 struct file_operations {
 	struct module *owner;
@@ -803,9 +803,6 @@
 	int (*aio_fsync) (struct kiocb *, int datasync);
 	int (*fasync) (int, struct file *, int);
 	int (*lock) (struct file *, int, struct file_lock *);
-	ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, loff_t *);
-	ssize_t (*writev) (struct file *, const struct iovec *, unsigned long, loff_t *);
-	ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, void *);
 	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
 	unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 	int (*check_flags)(int);
@@ -814,6 +811,7 @@
 	ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int);
 	int (*setlease)(struct file *, long arg, struct file_lock **);
 	long (*fallocate)(struct file *, int mode, loff_t offset, loff_t len);
+	int (*show_fdinfo)(struct seq_file *m, struct file *f);
 };
 
 Again, all methods are called without any locks being held, unless
@@ -864,12 +862,6 @@
   lock: called by the fcntl(2) system call for F_GETLK, F_SETLK, and F_SETLKW
   	commands
 
-  readv: called by the readv(2) system call
-
-  writev: called by the writev(2) system call
-
-  sendfile: called by the sendfile(2) system call
-
   get_unmapped_area: called by the mmap(2) system call
 
   check_flags: called by the fcntl(2) system call for F_SETFL command
diff --git a/Documentation/hwmon/adm1025 b/Documentation/hwmon/adm1025
index 39d2b78..99f0504 100644
--- a/Documentation/hwmon/adm1025
+++ b/Documentation/hwmon/adm1025
@@ -18,7 +18,7 @@
 
 Authors:
         Chen-Yuan Wu <gwu@esoft.com>,
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
diff --git a/Documentation/hwmon/adm1031 b/Documentation/hwmon/adm1031
index be92a77..a143117 100644
--- a/Documentation/hwmon/adm1031
+++ b/Documentation/hwmon/adm1031
@@ -16,7 +16,7 @@
 
 Authors:
         Alexandre d'Alton <alex@alexdalton.org>
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
diff --git a/Documentation/hwmon/adm9240 b/Documentation/hwmon/adm9240
index 36e8ec6..9b174fc 100644
--- a/Documentation/hwmon/adm9240
+++ b/Documentation/hwmon/adm9240
@@ -25,7 +25,7 @@
     Philip Edelbrock <phil@netroedge.com>,
     Michiel Rook <michiel@grendelproject.nl>,
     Grant Coady <gcoady.lk@gmail.com> with guidance
-        from Jean Delvare <khali@linux-fr.org>
+        from Jean Delvare <jdelvare@suse.de>
 
 Interface
 ---------
diff --git a/Documentation/hwmon/ds1621 b/Documentation/hwmon/ds1621
index 896cdc9..f775e61 100644
--- a/Documentation/hwmon/ds1621
+++ b/Documentation/hwmon/ds1621
@@ -31,7 +31,7 @@
         Christian W. Zuckschwerdt <zany@triq.net>
         valuable contributions by Jan M. Sendler <sendler@sendler.de>
         ported to 2.6 by Aurelien Jarno <aurelien@aurel32.net>
-        with the help of Jean Delvare <khali@linux-fr.org>
+        with the help of Jean Delvare <jdelvare@suse.de>
 
 Module Parameters
 ------------------
diff --git a/Documentation/hwmon/emc6w201 b/Documentation/hwmon/emc6w201
index 32f355a..757629b 100644
--- a/Documentation/hwmon/emc6w201
+++ b/Documentation/hwmon/emc6w201
@@ -7,7 +7,7 @@
     Addresses scanned: I2C 0x2c, 0x2d, 0x2e
     Datasheet: Not public
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 
 Description
diff --git a/Documentation/hwmon/f71805f b/Documentation/hwmon/f71805f
index f0d5597..48a3560 100644
--- a/Documentation/hwmon/f71805f
+++ b/Documentation/hwmon/f71805f
@@ -15,7 +15,7 @@
     Addresses scanned: none, address read from Super I/O config space
     Datasheet: Available from the Fintek website
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 Thanks to Denis Kieft from Barracuda Networks for the donation of a
 test system (custom Jetway K8M8MS motherboard, with CPU and RAM) and
diff --git a/Documentation/hwmon/gl518sm b/Documentation/hwmon/gl518sm
index 26f9f3c..494bb55 100644
--- a/Documentation/hwmon/gl518sm
+++ b/Documentation/hwmon/gl518sm
@@ -14,7 +14,7 @@
         Frodo Looijaard <frodol@dds.nl>,
         Kyösti Mälkki <kmalkki@cc.hut.fi>
         Hong-Gunn Chew <hglinux@gunnet.org>
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87
index c263740..0c16350 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87
@@ -2,6 +2,10 @@
 ==================
 
 Supported chips:
+  * IT8603E
+    Prefix: 'it8603'
+    Addresses scanned: from Super I/O config space (8 I/O ports)
+    Datasheet: Not publicly available
   * IT8705F
     Prefix: 'it87'
     Addresses scanned: from Super I/O config space (8 I/O ports)
@@ -53,7 +57,7 @@
 
 Authors:
     Christophe Gauthron
-    Jean Delvare <khali@linux-fr.org>
+    Jean Delvare <jdelvare@suse.de>
 
 
 Module Parameters
@@ -90,7 +94,7 @@
 Description
 -----------
 
-This driver implements support for the IT8705F, IT8712F, IT8716F,
+This driver implements support for the IT8603E, IT8705F, IT8712F, IT8716F,
 IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E, IT8771E, IT8772E,
 IT8782F, IT8783E/F, and SiS950 chips.
 
@@ -129,6 +133,10 @@
 The IT8728F, IT8771E, and IT8772E are considered compatible with the IT8721F,
 until a datasheet becomes available (hopefully.)
 
+The IT8603E is a custom design, hardware monitoring part is similar to
+IT8728F. It only supports 16-bit fan mode, the full speed mode of the
+fan is not supported (value 0 of pwmX_enable).
+
 Temperatures are measured in degrees Celsius. An alarm is triggered once
 when the Overtemperature Shutdown limit is crossed.
 
@@ -145,13 +153,16 @@
 maximum limit. Note that minimum in this case always means 'closest to
 zero'; this is important for negative voltage measurements. All voltage
 inputs can measure voltages between 0 and 4.08 volts, with a resolution of
-0.016 volt (except IT8721F/IT8758E and IT8728F: 0.012 volt.) The battery
-voltage in8 does not have limit registers.
+0.016 volt (except IT8603E, IT8721F/IT8758E and IT8728F: 0.012 volt.) The
+battery voltage in8 does not have limit registers.
 
-On the IT8721F/IT8758E, IT8782F, and IT8783E/F, some voltage inputs are
-internal and scaled inside the chip (in7 (optional for IT8782F and IT8783E/F),
-in8 and optionally in3). The driver handles this transparently so user-space
-doesn't have to care.
+On the IT8603E, IT8721F/IT8758E, IT8782F, and IT8783E/F, some voltage inputs
+are internal and scaled inside the chip:
+* in3 (optional)
+* in7 (optional for IT8782F and IT8783E/F)
+* in8 (always)
+* in9 (relevant for IT8603E only)
+The driver handles this transparently so user-space doesn't have to care.
 
 The VID lines (IT8712F/IT8716F/IT8718F/IT8720F) encode the core voltage value:
 the voltage level your processor should work with. This is hardcoded by
diff --git a/Documentation/hwmon/lm63 b/Documentation/hwmon/lm63
index 4d30d20..4a00461 100644
--- a/Documentation/hwmon/lm63
+++ b/Documentation/hwmon/lm63
@@ -18,7 +18,7 @@
     Datasheet: Publicly available at the National Semiconductor website
                http://www.national.com/pf/LM/LM96163.html
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 Thanks go to Tyan and especially Alex Buckingham for setting up a remote
 access to their S4882 test platform for this driver.
diff --git a/Documentation/hwmon/lm70 b/Documentation/hwmon/lm70
index 86d1829..1bb2db4 100644
--- a/Documentation/hwmon/lm70
+++ b/Documentation/hwmon/lm70
@@ -43,5 +43,5 @@
 
 Thanks to
 ---------
-Jean Delvare <khali@linux-fr.org> for mentoring the hwmon-side driver
+Jean Delvare <jdelvare@suse.de> for mentoring the hwmon-side driver
 development.
diff --git a/Documentation/hwmon/lm78 b/Documentation/hwmon/lm78
index 2bdc881..4dd4773 100644
--- a/Documentation/hwmon/lm78
+++ b/Documentation/hwmon/lm78
@@ -14,7 +14,7 @@
                http://www.national.com/
 
 Authors: Frodo Looijaard <frodol@dds.nl>
-         Jean Delvare <khali@linux-fr.org>
+         Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
diff --git a/Documentation/hwmon/lm83 b/Documentation/hwmon/lm83
index a04d1fe..50be5cb 100644
--- a/Documentation/hwmon/lm83
+++ b/Documentation/hwmon/lm83
@@ -13,7 +13,7 @@
                http://www.national.com/pf/LM/LM82.html
 
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
diff --git a/Documentation/hwmon/lm87 b/Documentation/hwmon/lm87
index 6b47b67..a2339fd 100644
--- a/Documentation/hwmon/lm87
+++ b/Documentation/hwmon/lm87
@@ -17,7 +17,7 @@
         Mark Studebaker <mdsxyz123@yahoo.com>,
         Stephen Rousset <stephen.rousset@rocketlogix.com>,
         Dan Eaton <dan.eaton@rocketlogix.com>,
-        Jean Delvare <khali@linux-fr.org>,
+        Jean Delvare <jdelvare@suse.de>,
         Original 2.6 port Jeff Oliver
 
 Description
diff --git a/Documentation/hwmon/lm90 b/Documentation/hwmon/lm90
index ab81013..8122675 100644
--- a/Documentation/hwmon/lm90
+++ b/Documentation/hwmon/lm90
@@ -129,7 +129,7 @@
                http://www.ti.com/litv/pdf/sbos686
 
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 
 Description
diff --git a/Documentation/hwmon/lm92 b/Documentation/hwmon/lm92
index 7705bfa..22f68ad 100644
--- a/Documentation/hwmon/lm92
+++ b/Documentation/hwmon/lm92
@@ -19,7 +19,7 @@
 
 Authors:
         Abraham van der Merwe <abraham@2d3d.co.za>
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 
 Description
diff --git a/Documentation/hwmon/max1619 b/Documentation/hwmon/max1619
index e6d8739..518bae3 100644
--- a/Documentation/hwmon/max1619
+++ b/Documentation/hwmon/max1619
@@ -10,7 +10,7 @@
 
 Authors:
         Oleksij Rempel <bug-track@fisher-privat.net>,
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
diff --git a/Documentation/hwmon/pc87360 b/Documentation/hwmon/pc87360
index cbac32b..d5f5cf1 100644
--- a/Documentation/hwmon/pc87360
+++ b/Documentation/hwmon/pc87360
@@ -7,7 +7,7 @@
     Addresses scanned: none, address read from Super I/O config space
     Datasheets: No longer available
 
-Authors: Jean Delvare <khali@linux-fr.org>
+Authors: Jean Delvare <jdelvare@suse.de>
 
 Thanks to Sandeep Mehta, Tonko de Rooy and Daniel Ceregatti for testing.
 Thanks to Rudolf Marek for helping me investigate conversion issues.
diff --git a/Documentation/hwmon/pc87427 b/Documentation/hwmon/pc87427
index 8fdd08c..c313eb6 100644
--- a/Documentation/hwmon/pc87427
+++ b/Documentation/hwmon/pc87427
@@ -7,7 +7,7 @@
     Addresses scanned: none, address read from Super I/O config space
     Datasheet: No longer available
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 Thanks to Amir Habibi at Candelis for setting up a test system, and to
 Michael Kress for testing several iterations of this driver.
diff --git a/Documentation/hwmon/pcf8591 b/Documentation/hwmon/pcf8591
index ac020b3..447c070 100644
--- a/Documentation/hwmon/pcf8591
+++ b/Documentation/hwmon/pcf8591
@@ -11,7 +11,7 @@
 Authors:
         Aurelien Jarno <aurelien@aurel32.net>
         valuable contributions by Jan M. Sendler <sendler@sendler.de>,
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 
 Description
diff --git a/Documentation/hwmon/smsc47m1 b/Documentation/hwmon/smsc47m1
index 2a13378..10a24b4 100644
--- a/Documentation/hwmon/smsc47m1
+++ b/Documentation/hwmon/smsc47m1
@@ -25,7 +25,7 @@
         With assistance from Bruce Allen <ballen@uwm.edu>, and his
         fan.c program: http://www.lsc-group.phys.uwm.edu/%7Eballen/driver/
         Gabriele Gorla <gorlik@yahoo.com>,
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf
index ceaf6f6..735c42a 100644
--- a/Documentation/hwmon/w83627ehf
+++ b/Documentation/hwmon/w83627ehf
@@ -36,7 +36,7 @@
     Datasheet: Available from Nuvoton upon request
 
 Authors:
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
         Yuan Mu (Winbond)
         Rudolf Marek <r.marek@assembler.cz>
         David Hubbard <david.c.hubbard@gmail.com>
diff --git a/Documentation/hwmon/w83795 b/Documentation/hwmon/w83795
index 9f16037..d3e6782 100644
--- a/Documentation/hwmon/w83795
+++ b/Documentation/hwmon/w83795
@@ -13,7 +13,7 @@
 
 Authors:
     Wei Song (Nuvoton)
-    Jean Delvare <khali@linux-fr.org>
+    Jean Delvare <jdelvare@suse.de>
 
 
 Pin mapping
diff --git a/Documentation/hwmon/w83l785ts b/Documentation/hwmon/w83l785ts
index bd1fa9d..c897847 100644
--- a/Documentation/hwmon/w83l785ts
+++ b/Documentation/hwmon/w83l785ts
@@ -9,7 +9,7 @@
                http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L785TS-S.pdf
 
 Authors:
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 7b0dcdb..aaaf0693 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -33,7 +33,7 @@
 
 Authors: 
 	Mark Studebaker <mdsxyz123@yahoo.com>
-	Jean Delvare <khali@linux-fr.org>
+	Jean Delvare <jdelvare@suse.de>
 
 
 Module Parameters
diff --git a/Documentation/i2c/busses/i2c-parport b/Documentation/i2c/busses/i2c-parport
index 2461c7b..0e2d17b 100644
--- a/Documentation/i2c/busses/i2c-parport
+++ b/Documentation/i2c/busses/i2c-parport
@@ -1,6 +1,6 @@
 Kernel driver i2c-parport
 
-Author: Jean Delvare <khali@linux-fr.org> 
+Author: Jean Delvare <jdelvare@suse.de>
 
 This is a unified driver for several i2c-over-parallel-port adapters,
 such as the ones made by Philips, Velleman or ELV. This driver is
diff --git a/Documentation/i2c/busses/i2c-parport-light b/Documentation/i2c/busses/i2c-parport-light
index c22ee06..7071b8b 100644
--- a/Documentation/i2c/busses/i2c-parport-light
+++ b/Documentation/i2c/busses/i2c-parport-light
@@ -1,6 +1,6 @@
 Kernel driver i2c-parport-light
 
-Author: Jean Delvare <khali@linux-fr.org> 
+Author: Jean Delvare <jdelvare@suse.de>
 
 This driver is a light version of i2c-parport. It doesn't depend        
 on the parport driver, and uses direct I/O access instead. This might be
diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
index c097e0f..aa959fd 100644
--- a/Documentation/i2c/busses/i2c-piix4
+++ b/Documentation/i2c/busses/i2c-piix4
@@ -13,7 +13,7 @@
   * AMD SP5100 (SB700 derivative found on some server mainboards)
     Datasheet: Publicly available at the AMD website
     http://support.amd.com/us/Embedded_TechDocs/44413.pdf
-  * AMD Hudson-2, CZ
+  * AMD Hudson-2, ML, CZ
     Datasheet: Not publicly available
   * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
     Datasheet: Publicly available at the SMSC website http://www.smsc.com
diff --git a/Documentation/i2c/busses/i2c-taos-evm b/Documentation/i2c/busses/i2c-taos-evm
index 63f62bc..6029955 100644
--- a/Documentation/i2c/busses/i2c-taos-evm
+++ b/Documentation/i2c/busses/i2c-taos-evm
@@ -1,6 +1,6 @@
 Kernel driver i2c-taos-evm
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 This is a driver for the evaluation modules for TAOS I2C/SMBus chips.
 The modules include an SMBus master with limited capabilities, which can
diff --git a/Documentation/i2c/busses/i2c-viapro b/Documentation/i2c/busses/i2c-viapro
index b88f91a..ab64ce2 100644
--- a/Documentation/i2c/busses/i2c-viapro
+++ b/Documentation/i2c/busses/i2c-viapro
@@ -28,7 +28,7 @@
 Authors:
 	Kyösti Mälkki <kmalkki@cc.hut.fi>,
 	Mark D. Studebaker <mdsxyz123@yahoo.com>,
-	Jean Delvare <khali@linux-fr.org>
+	Jean Delvare <jdelvare@suse.de>
 
 Module Parameters
 -----------------
diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices
index c70e7a7..0d85ac19 100644
--- a/Documentation/i2c/instantiating-devices
+++ b/Documentation/i2c/instantiating-devices
@@ -8,8 +8,8 @@
 several ways to achieve this, depending on the context and requirements.
 
 
-Method 1: Declare the I2C devices by bus number
------------------------------------------------
+Method 1a: Declare the I2C devices by bus number
+------------------------------------------------
 
 This method is appropriate when the I2C bus is a system bus as is the case
 for many embedded systems. On such systems, each I2C bus has a number
@@ -51,6 +51,43 @@
 they sit on goes away (if ever.)
 
 
+Method 1b: Declare the I2C devices via devicetree
+-------------------------------------------------
+
+This method has the same implications as method 1a. The declaration of I2C
+devices is here done via devicetree as subnodes of the master controller.
+
+Example:
+
+	i2c1: i2c@400a0000 {
+		/* ... master properties skipped ... */
+		clock-frequency = <100000>;
+
+		flash@50 {
+			compatible = "atmel,24c256";
+			reg = <0x50>;
+		};
+
+		pca9532: gpio@60 {
+			compatible = "nxp,pca9532";
+			gpio-controller;
+			#gpio-cells = <2>;
+			reg = <0x60>;
+		};
+	};
+
+Here, two devices are attached to the bus using a speed of 100kHz. For
+additional properties which might be needed to set up the device, please refer
+to its devicetree documentation in Documentation/devicetree/bindings/.
+
+
+Method 1c: Declare the I2C devices via ACPI
+-------------------------------------------
+
+ACPI can also describe I2C devices. There is special documentation for this
+which is currently located at Documentation/acpi/enumeration.txt.
+
+
 Method 2: Instantiate the devices explicitly
 --------------------------------------------
 
diff --git a/Documentation/ide/00-INDEX b/Documentation/ide/00-INDEX
index d6b7788..22f98ca 100644
--- a/Documentation/ide/00-INDEX
+++ b/Documentation/ide/00-INDEX
@@ -10,3 +10,5 @@
 	- info on the IDE ATAPI streaming tape driver
 ide.txt
 	- important info for users of ATA devices (IDE/EIDE disks and CD-ROMS).
+warm-plug-howto.txt
+	- using sysfs to remove and add IDE devices.
\ No newline at end of file
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8f441da..7116fda 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1726,16 +1726,16 @@
 			option description.
 
 	memmap=nn[KMG]@ss[KMG]
-			[KNL] Force usage of a specific region of memory
-			Region of memory to be used, from ss to ss+nn.
+			[KNL] Force usage of a specific region of memory.
+			Region of memory to be used is from ss to ss+nn.
 
 	memmap=nn[KMG]#ss[KMG]
 			[KNL,ACPI] Mark specific memory as ACPI data.
-			Region of memory to be used, from ss to ss+nn.
+			Region of memory to be marked is from ss to ss+nn.
 
 	memmap=nn[KMG]$ss[KMG]
 			[KNL,ACPI] Mark specific memory as reserved.
-			Region of memory to be used, from ss to ss+nn.
+			Region of memory to be reserved is from ss to ss+nn.
 			Example: Exclude memory from 0x18690000-0x1869ffff
 			         memmap=64K$0x18690000
 			         or
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
index fa68853..d13b9a9 100644
--- a/Documentation/laptops/00-INDEX
+++ b/Documentation/laptops/00-INDEX
@@ -1,13 +1,15 @@
 00-INDEX
 	- This file
-acer-wmi.txt
-	- information on the Acer Laptop WMI Extras driver.
+Makefile
+	- Makefile for building dslm example program.
 asus-laptop.txt
 	- information on the Asus Laptop Extras driver.
 disk-shock-protection.txt
 	- information on hard disk shock protection.
 dslm.c
 	- Simple Disk Sleep Monitor program
+hpfall.c
+	- (HP) laptop accelerometer program for disk protection.
 laptop-mode.txt
 	- how to conserve battery power using laptop-mode.
 sony-laptop.txt
diff --git a/Documentation/leds/00-INDEX b/Documentation/leds/00-INDEX
index 1ecd159..b4ef1f3 100644
--- a/Documentation/leds/00-INDEX
+++ b/Documentation/leds/00-INDEX
@@ -1,3 +1,7 @@
+00-INDEX
+	- This file
+leds-blinkm.txt
+	- Driver for BlinkM LED-devices.
 leds-class.txt
 	- documents LED handling under Linux.
 leds-lp3944.txt
@@ -12,3 +16,7 @@
 	- description about lp55xx common driver.
 leds-lm3556.txt
 	- notes on how to use the leds-lm3556 driver.
+ledtrig-oneshot.txt
+	- One-shot LED trigger for both sporadic and dense events.
+ledtrig-transient.txt
+	- LED Transient Trigger, one shot timer activation.
diff --git a/Documentation/m68k/00-INDEX b/Documentation/m68k/00-INDEX
index a014e9f..2be8c6b 100644
--- a/Documentation/m68k/00-INDEX
+++ b/Documentation/m68k/00-INDEX
@@ -1,5 +1,7 @@
 00-INDEX
 	- this file
+README.buddha
+	- Amiga Buddha and Catweasel IDE Driver
 kernel-options.txt
 	- command line options for Linux/m68k
 
diff --git a/Documentation/misc-devices/eeprom b/Documentation/misc-devices/eeprom
index f7e8104..ba69201 100644
--- a/Documentation/misc-devices/eeprom
+++ b/Documentation/misc-devices/eeprom
@@ -38,7 +38,7 @@
 Authors:
         Frodo Looijaard <frodol@dds.nl>,
         Philip Edelbrock <phil@netroedge.com>,
-        Jean Delvare <khali@linux-fr.org>,
+        Jean Delvare <jdelvare@suse.de>,
         Greg Kroah-Hartman <greg@kroah.com>,
         IBM Corp.
 
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index f11580f..557b6ef 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -6,8 +6,14 @@
 	- information on the 3Com Etherlink III Series Ethernet cards.
 6pack.txt
 	- info on the 6pack protocol, an alternative to KISS for AX.25
-DLINK.txt
-	- info on the D-Link DE-600/DE-620 parallel port pocket adapters
+LICENSE.qla3xxx
+	- GPLv2 for QLogic Linux Networking HBA Driver
+LICENSE.qlge
+	- GPLv2 for QLogic Linux qlge NIC Driver
+LICENSE.qlcnic
+	- GPLv2 for QLogic Linux qlcnic NIC Driver
+Makefile
+	- Makefile for docsrc.
 PLIP.txt
 	- PLIP: The Parallel Line Internet Protocol device driver
 README.ipw2100
@@ -17,7 +23,7 @@
 README.sb1000
 	- info on General Instrument/NextLevel SURFboard1000 cable modem.
 alias.txt
-	- info on using alias network devices 
+	- info on using alias network devices.
 arcnet-hardware.txt
 	- tons of info on ARCnet, hubs, jumper settings for ARCnet cards, etc.
 arcnet.txt
@@ -80,7 +86,7 @@
 	- info on using Frame Relay/Data Link Connection Identifier (DLCI).
 gen_stats.txt
 	- Generic networking statistics for netlink users.
-generic_hdlc.txt
+generic-hdlc.txt
 	- The generic High Level Data Link Control (HDLC) layer.
 generic_netlink.txt
 	- info on Generic Netlink
@@ -88,6 +94,8 @@
 	- Gianfar Ethernet Driver.
 i40e.txt
 	- README for the Intel Ethernet Controller XL710 Driver (i40e).
+i40evf.txt
+	- Short note on the Driver for the Intel(R) XL710 X710 Virtual Function
 ieee802154.txt
 	- Linux IEEE 802.15.4 implementation, API and drivers
 igb.txt
@@ -102,6 +110,8 @@
 	- AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation
 iphase.txt
 	- Interphase PCI ATM (i)Chip IA Linux driver info.
+ipsec.txt
+	- Note on not compressing IPSec payload and resulting failed policy check.
 ipv6.txt
 	- Options to the ipv6 kernel module.
 ipvs-sysctl.txt
@@ -120,6 +130,8 @@
 	- programming information of the LAPB module.
 ltpc.txt
 	- the Apple or Farallon LocalTalk PC card driver
+mac80211-auth-assoc-deauth.txt
+	- authentication and association / deauth-disassoc with max80211
 mac80211-injection.txt
 	- HOWTO use packet injection with mac80211
 multiqueue.txt
@@ -134,6 +146,10 @@
 	- info on network device driver functions exported to the kernel.
 netif-msg.txt
 	- Design of the network interface message level setting (NETIF_MSG_*).
+netlink_mmap.txt
+	- memory mapped I/O with netlink
+nf_conntrack-sysctl.txt
+	- list of netfilter-sysctl knobs.
 nfc.txt
 	- The Linux Near Field Communication (NFS) subsystem.
 openvswitch.txt
@@ -176,7 +192,7 @@
 	- SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info.
 smc9.txt
 	- the driver for SMC's 9000 series of Ethernet cards
-spider-net.txt
+spider_net.txt
 	- README for the Spidernet Driver (as found in PS3 / Cell BE).
 stmmac.txt
 	- README for the STMicro Synopsys Ethernet driver.
@@ -188,6 +204,8 @@
 	- short blurb on how TCP output takes place.
 tcp-thin.txt
 	- kernel tuning options for low rate 'thin' TCP streams.
+team.txt
+	- pointer to information for ethernet teaming devices.
 tlan.txt
 	- ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info.
 tproxy.txt
@@ -200,6 +218,8 @@
 	- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
 vxge.txt
 	- README for the Neterion X3100 PCIe Server Adapter.
+vxlan.txt
+	- Virtual extensible LAN overview
 x25.txt
 	- general info on X.25 development.
 x25-iface.txt
diff --git a/Documentation/networking/3c505.txt b/Documentation/networking/3c505.txt
deleted file mode 100644
index 72f38b1..0000000
--- a/Documentation/networking/3c505.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-The 3Com Etherlink Plus (3c505) driver.
-
-This driver now uses DMA.  There is currently no support for PIO operation.
-The default DMA channel is 6; this is _not_ autoprobed, so you must
-make sure you configure it correctly.  If loading the driver as a
-module, you can do this with "modprobe 3c505 dma=n".  If the driver is
-linked statically into the kernel, you must either use an "ether="
-statement on the command line, or change the definition of ELP_DMA in 3c505.h.
-
-The driver will warn you if it has to fall back on the compiled in
-default DMA channel. 
-
-If no base address is given at boot time, the driver will autoprobe
-ports 0x300, 0x280 and 0x310 (in that order).  If no IRQ is given, the driver
-will try to probe for it.
-
-The driver can be used as a loadable module.
-
-Theoretically, one instance of the driver can now run multiple cards,
-in the standard way (when loading a module, say "modprobe 3c505
-io=0x300,0x340 irq=10,11 dma=6,7" or whatever).  I have not tested
-this, though.
-
-The driver may now support revision 2 hardware; the dependency on
-being able to read the host control register has been removed.  This
-is also untested, since I don't have a suitable card.
-
-Known problems:
- I still see "DMA upload timed out" messages from time to time.  These
-seem to be fairly non-fatal though.
- The card is old and slow.
-
-To do:
- Improve probe/setup code
- Test multicast and promiscuous operation
-
-Authors:
- The driver is mainly written by Craig Southeren, email
- <craigs@ineluki.apana.org.au>.
- Parts of the driver (adapting the driver to 1.1.4+ kernels,
- IRQ/address detection, some changes) and this README by
- Juha Laiho <jlaiho@ichaos.nullnet.fi>.
- DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk>
- Multicard support, Software configurable DMA, etc., by
- Christopher Collins <ccollins@pcug.org.au>
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 5de0374..ab42c95 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1088,6 +1088,12 @@
 	IGMPv3 report retransmit will take place.
 	Default: 1000 (1 seconds)
 
+promote_secondaries - BOOLEAN
+	When a primary IP address is removed from this interface
+	promote a corresponding secondary IP address instead of
+	removing all the corresponding secondary IP addresses.
+
+
 tag - INTEGER
 	Allows you to write a number, which can be used as required.
 	Default value is 0.
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 91ffe1d..1404674 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -583,6 +583,7 @@
   - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
   - PACKET_FANOUT_RND: schedule to socket by random selection
   - PACKET_FANOUT_ROLLOVER: if one socket is full, rollover to another
+  - PACKET_FANOUT_QM: schedule to socket by skbs recorded queue_mapping
 
 Minimal example code by David S. Miller (try things like "./test eth0 hash",
 "./test eth0 lb", etc.):
diff --git a/Documentation/phy.txt b/Documentation/phy.txt
index 0103e4b..ebff6ee 100644
--- a/Documentation/phy.txt
+++ b/Documentation/phy.txt
@@ -75,14 +75,26 @@
 it. This framework provides the following APIs to get a reference to the PHY.
 
 struct phy *phy_get(struct device *dev, const char *string);
+struct phy *phy_optional_get(struct device *dev, const char *string);
 struct phy *devm_phy_get(struct device *dev, const char *string);
+struct phy *devm_phy_optional_get(struct device *dev, const char *string);
 
-phy_get and devm_phy_get can be used to get the PHY. In the case of dt boot,
-the string arguments should contain the phy name as given in the dt data and
-in the case of non-dt boot, it should contain the label of the PHY.
-The only difference between the two APIs is that devm_phy_get associates the
-device with the PHY using devres on successful PHY get. On driver detach,
-release function is invoked on the the devres data and devres data is freed.
+phy_get, phy_optional_get, devm_phy_get and devm_phy_optional_get can
+be used to get the PHY. In the case of dt boot, the string arguments
+should contain the phy name as given in the dt data and in the case of
+non-dt boot, it should contain the label of the PHY.  The two
+devm_phy_get associates the device with the PHY using devres on
+successful PHY get. On driver detach, release function is invoked on
+the the devres data and devres data is freed. phy_optional_get and
+devm_phy_optional_get should be used when the phy is optional. These
+two functions will never return -ENODEV, but instead returns NULL when
+the phy cannot be found.
+
+It should be noted that NULL is a valid phy reference. All phy
+consumer calls on the NULL phy become NOPs. That is the release calls,
+the phy_init() and phy_exit() calls, and phy_power_on() and
+phy_power_off() calls are all NOP when applied to a NULL phy. The NULL
+phy is useful in devices for handling optional phy devices.
 
 5. Releasing a reference to the PHY
 
diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX
index a4d682f..ad04cc8 100644
--- a/Documentation/power/00-INDEX
+++ b/Documentation/power/00-INDEX
@@ -4,6 +4,8 @@
 	- basic info about the APM and ACPI support.
 basic-pm-debugging.txt
 	- Debugging suspend and resume
+charger-manager.txt
+	- Battery charger management.
 devices.txt
 	- How drivers interact with system-wide power management
 drivers-testing.txt
@@ -22,6 +24,8 @@
 	- info on Linux PM Quality of Service interface
 power_supply_class.txt
 	- Tells userspace about battery, UPS, AC or DC power supply properties
+runtime_pm.txt
+	- Power management framework for I/O devices.
 s2ram.txt
 	- How to get suspend to ram working (and debug it when it isn't)
 states.txt
@@ -38,7 +42,5 @@
 	- How to trick software suspend (to disk) into working when it isn't
 userland-swsusp.txt
 	- Experimental implementation of software suspend in userspace
-video_extension.txt
-	- ACPI video extensions
 video.txt
 	- Video issues during resume from suspend
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index a74d0a8..4aba043 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -117,6 +117,7 @@
 		" -f val     adjust the ptp clock frequency by 'val' ppb\n"
 		" -g         get the ptp clock time\n"
 		" -h         prints this message\n"
+		" -i val     index for event/trigger\n"
 		" -k val     measure the time offset between system and phc clock\n"
 		"            for 'val' times (Maximum 25)\n"
 		" -p val     enable output with a period of 'val' nanoseconds\n"
@@ -154,6 +155,7 @@
 	int capabilities = 0;
 	int extts = 0;
 	int gettime = 0;
+	int index = 0;
 	int oneshot = 0;
 	int pct_offset = 0;
 	int n_samples = 0;
@@ -167,7 +169,7 @@
 
 	progname = strrchr(argv[0], '/');
 	progname = progname ? 1+progname : argv[0];
-	while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghk:p:P:sSt:v"))) {
+	while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghi:k:p:P:sSt:v"))) {
 		switch (c) {
 		case 'a':
 			oneshot = atoi(optarg);
@@ -190,6 +192,9 @@
 		case 'g':
 			gettime = 1;
 			break;
+		case 'i':
+			index = atoi(optarg);
+			break;
 		case 'k':
 			pct_offset = 1;
 			n_samples = atoi(optarg);
@@ -301,7 +306,7 @@
 
 	if (extts) {
 		memset(&extts_request, 0, sizeof(extts_request));
-		extts_request.index = 0;
+		extts_request.index = index;
 		extts_request.flags = PTP_ENABLE_FEATURE;
 		if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
 			perror("PTP_EXTTS_REQUEST");
@@ -375,7 +380,7 @@
 			return -1;
 		}
 		memset(&perout_request, 0, sizeof(perout_request));
-		perout_request.index = 0;
+		perout_request.index = index;
 		perout_request.start.sec = ts.tv_sec + 2;
 		perout_request.start.nsec = 0;
 		perout_request.period.sec = 0;
diff --git a/Documentation/s390/00-INDEX b/Documentation/s390/00-INDEX
index 3a2b963..10c874e 100644
--- a/Documentation/s390/00-INDEX
+++ b/Documentation/s390/00-INDEX
@@ -16,11 +16,13 @@
 	- hints for debugging on s390 systems.
 driver-model.txt
 	- information on s390 devices and the driver model.
+kvm.txt
+	- ioctl calls to /dev/kvm on s390.
 monreader.txt
 	- information on accessing the z/VM monitor stream from Linux.
+qeth.txt
+	- HiperSockets Bridge Port Support.
 s390dbf.txt
 	- information on using the s390 debug feature.
-TAPE
-	- information on the driver for channel-attached tapes.
-zfcpdump
+zfcpdump.txt
 	- information on the s390 SCSI dump tool.
diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX
index d2651c4..eccf7ad 100644
--- a/Documentation/scheduler/00-INDEX
+++ b/Documentation/scheduler/00-INDEX
@@ -2,6 +2,8 @@
 	- this file.
 sched-arch.txt
 	- CPU Scheduler implementation hints for architecture specific code.
+sched-bwc.txt
+	- CFS bandwidth control overview.
 sched-design-CFS.txt
 	- goals, design and implementation of the Completely Fair Scheduler.
 sched-domains.txt
@@ -10,5 +12,7 @@
 	- How and why the scheduler's nice levels are implemented.
 sched-rt-group.txt
 	- real-time group scheduling.
+sched-deadline.txt
+	- deadline scheduling.
 sched-stats.txt
 	- information on schedstats (Linux Scheduler Statistics).
diff --git a/Documentation/scheduler/sched-deadline.txt b/Documentation/scheduler/sched-deadline.txt
new file mode 100644
index 0000000..18adc92
--- /dev/null
+++ b/Documentation/scheduler/sched-deadline.txt
@@ -0,0 +1,281 @@
+			  Deadline Task Scheduling
+			  ------------------------
+
+CONTENTS
+========
+
+ 0. WARNING
+ 1. Overview
+ 2. Scheduling algorithm
+ 3. Scheduling Real-Time Tasks
+ 4. Bandwidth management
+   4.1 System-wide settings
+   4.2 Task interface
+   4.3 Default behavior
+ 5. Tasks CPU affinity
+   5.1 SCHED_DEADLINE and cpusets HOWTO
+ 6. Future plans
+
+
+0. WARNING
+==========
+
+ Fiddling with these settings can result in an unpredictable or even unstable
+ system behavior. As for -rt (group) scheduling, it is assumed that root users
+ know what they're doing.
+
+
+1. Overview
+===========
+
+ The SCHED_DEADLINE policy contained inside the sched_dl scheduling class is
+ basically an implementation of the Earliest Deadline First (EDF) scheduling
+ algorithm, augmented with a mechanism (called Constant Bandwidth Server, CBS)
+ that makes it possible to isolate the behavior of tasks between each other.
+
+
+2. Scheduling algorithm
+==================
+
+ SCHED_DEADLINE uses three parameters, named "runtime", "period", and
+ "deadline" to schedule tasks. A SCHED_DEADLINE task is guaranteed to receive
+ "runtime" microseconds of execution time every "period" microseconds, and
+ these "runtime" microseconds are available within "deadline" microseconds
+ from the beginning of the period.  In order to implement this behaviour,
+ every time the task wakes up, the scheduler computes a "scheduling deadline"
+ consistent with the guarantee (using the CBS[2,3] algorithm). Tasks are then
+ scheduled using EDF[1] on these scheduling deadlines (the task with the
+ smallest scheduling deadline is selected for execution). Notice that this
+ guaranteed is respected if a proper "admission control" strategy (see Section
+ "4. Bandwidth management") is used.
+
+ Summing up, the CBS[2,3] algorithms assigns scheduling deadlines to tasks so
+ that each task runs for at most its runtime every period, avoiding any
+ interference between different tasks (bandwidth isolation), while the EDF[1]
+ algorithm selects the task with the smallest scheduling deadline as the one
+ to be executed first.  Thanks to this feature, also tasks that do not
+ strictly comply with the "traditional" real-time task model (see Section 3)
+ can effectively use the new policy.
+
+ In more details, the CBS algorithm assigns scheduling deadlines to
+ tasks in the following way:
+
+  - Each SCHED_DEADLINE task is characterised by the "runtime",
+    "deadline", and "period" parameters;
+
+  - The state of the task is described by a "scheduling deadline", and
+    a "current runtime". These two parameters are initially set to 0;
+
+  - When a SCHED_DEADLINE task wakes up (becomes ready for execution),
+    the scheduler checks if
+
+                    current runtime                runtime
+         ---------------------------------- > ----------------
+         scheduling deadline - current time         period
+
+    then, if the scheduling deadline is smaller than the current time, or
+    this condition is verified, the scheduling deadline and the
+    current budget are re-initialised as
+
+         scheduling deadline = current time + deadline
+         current runtime = runtime
+
+    otherwise, the scheduling deadline and the current runtime are
+    left unchanged;
+
+  - When a SCHED_DEADLINE task executes for an amount of time t, its
+    current runtime is decreased as
+
+         current runtime = current runtime - t
+
+    (technically, the runtime is decreased at every tick, or when the
+    task is descheduled / preempted);
+
+  - When the current runtime becomes less or equal than 0, the task is
+    said to be "throttled" (also known as "depleted" in real-time literature)
+    and cannot be scheduled until its scheduling deadline. The "replenishment
+    time" for this task (see next item) is set to be equal to the current
+    value of the scheduling deadline;
+
+  - When the current time is equal to the replenishment time of a
+    throttled task, the scheduling deadline and the current runtime are
+    updated as
+
+         scheduling deadline = scheduling deadline + period
+         current runtime = current runtime + runtime
+
+
+3. Scheduling Real-Time Tasks
+=============================
+
+ * BIG FAT WARNING ******************************************************
+ *
+ * This section contains a (not-thorough) summary on classical deadline
+ * scheduling theory, and how it applies to SCHED_DEADLINE.
+ * The reader can "safely" skip to Section 4 if only interested in seeing
+ * how the scheduling policy can be used. Anyway, we strongly recommend
+ * to come back here and continue reading (once the urge for testing is
+ * satisfied :P) to be sure of fully understanding all technical details.
+ ************************************************************************
+
+ There are no limitations on what kind of task can exploit this new
+ scheduling discipline, even if it must be said that it is particularly
+ suited for periodic or sporadic real-time tasks that need guarantees on their
+ timing behavior, e.g., multimedia, streaming, control applications, etc.
+
+ A typical real-time task is composed of a repetition of computation phases
+ (task instances, or jobs) which are activated on a periodic or sporadic
+ fashion.
+ Each job J_j (where J_j is the j^th job of the task) is characterised by an
+ arrival time r_j (the time when the job starts), an amount of computation
+ time c_j needed to finish the job, and a job absolute deadline d_j, which
+ is the time within which the job should be finished. The maximum execution
+ time max_j{c_j} is called "Worst Case Execution Time" (WCET) for the task.
+ A real-time task can be periodic with period P if r_{j+1} = r_j + P, or
+ sporadic with minimum inter-arrival time P is r_{j+1} >= r_j + P. Finally,
+ d_j = r_j + D, where D is the task's relative deadline.
+
+ SCHED_DEADLINE can be used to schedule real-time tasks guaranteeing that
+ the jobs' deadlines of a task are respected. In order to do this, a task
+ must be scheduled by setting:
+
+  - runtime >= WCET
+  - deadline = D
+  - period <= P
+
+ IOW, if runtime >= WCET and if period is >= P, then the scheduling deadlines
+ and the absolute deadlines (d_j) coincide, so a proper admission control
+ allows to respect the jobs' absolute deadlines for this task (this is what is
+ called "hard schedulability property" and is an extension of Lemma 1 of [2]).
+
+ References:
+  1 - C. L. Liu and J. W. Layland. Scheduling algorithms for multiprogram-
+      ming in a hard-real-time environment. Journal of the Association for
+      Computing Machinery, 20(1), 1973.
+  2 - L. Abeni , G. Buttazzo. Integrating Multimedia Applications in Hard
+      Real-Time Systems. Proceedings of the 19th IEEE Real-time Systems
+      Symposium, 1998. http://retis.sssup.it/~giorgio/paps/1998/rtss98-cbs.pdf
+  3 - L. Abeni. Server Mechanisms for Multimedia Applications. ReTiS Lab
+      Technical Report. http://xoomer.virgilio.it/lucabe72/pubs/tr-98-01.ps
+
+4. Bandwidth management
+=======================
+
+ In order for the -deadline scheduling to be effective and useful, it is
+ important to have some method to keep the allocation of the available CPU
+ bandwidth to the tasks under control.
+ This is usually called "admission control" and if it is not performed at all,
+ no guarantee can be given on the actual scheduling of the -deadline tasks.
+
+ Since when RT-throttling has been introduced each task group has a bandwidth
+ associated, calculated as a certain amount of runtime over a period.
+ Moreover, to make it possible to manipulate such bandwidth, readable/writable
+ controls have been added to both procfs (for system wide settings) and cgroupfs
+ (for per-group settings).
+ Therefore, the same interface is being used for controlling the bandwidth
+ distrubution to -deadline tasks.
+
+ However, more discussion is needed in order to figure out how we want to manage
+ SCHED_DEADLINE bandwidth at the task group level. Therefore, SCHED_DEADLINE
+ uses (for now) a less sophisticated, but actually very sensible, mechanism to
+ ensure that a certain utilization cap is not overcome per each root_domain.
+
+ Another main difference between deadline bandwidth management and RT-throttling
+ is that -deadline tasks have bandwidth on their own (while -rt ones don't!),
+ and thus we don't need an higher level throttling mechanism to enforce the
+ desired bandwidth.
+
+4.1 System wide settings
+------------------------
+
+ The system wide settings are configured under the /proc virtual file system.
+
+ For now the -rt knobs are used for dl admission control and the -deadline
+ runtime is accounted against the -rt runtime. We realise that this isn't
+ entirely desirable; however, it is better to have a small interface for now,
+ and be able to change it easily later. The ideal situation (see 5.) is to run
+ -rt tasks from a -deadline server; in which case the -rt bandwidth is a direct
+ subset of dl_bw.
+
+ This means that, for a root_domain comprising M CPUs, -deadline tasks
+ can be created while the sum of their bandwidths stays below:
+
+   M * (sched_rt_runtime_us / sched_rt_period_us)
+
+ It is also possible to disable this bandwidth management logic, and
+ be thus free of oversubscribing the system up to any arbitrary level.
+ This is done by writing -1 in /proc/sys/kernel/sched_rt_runtime_us.
+
+
+4.2 Task interface
+------------------
+
+ Specifying a periodic/sporadic task that executes for a given amount of
+ runtime at each instance, and that is scheduled according to the urgency of
+ its own timing constraints needs, in general, a way of declaring:
+  - a (maximum/typical) instance execution time,
+  - a minimum interval between consecutive instances,
+  - a time constraint by which each instance must be completed.
+
+ Therefore:
+  * a new struct sched_attr, containing all the necessary fields is
+    provided;
+  * the new scheduling related syscalls that manipulate it, i.e.,
+    sched_setattr() and sched_getattr() are implemented.
+
+
+4.3 Default behavior
+---------------------
+
+ The default value for SCHED_DEADLINE bandwidth is to have rt_runtime equal to
+ 950000. With rt_period equal to 1000000, by default, it means that -deadline
+ tasks can use at most 95%, multiplied by the number of CPUs that compose the
+ root_domain, for each root_domain.
+
+ A -deadline task cannot fork.
+
+5. Tasks CPU affinity
+=====================
+
+ -deadline tasks cannot have an affinity mask smaller that the entire
+ root_domain they are created on. However, affinities can be specified
+ through the cpuset facility (Documentation/cgroups/cpusets.txt).
+
+5.1 SCHED_DEADLINE and cpusets HOWTO
+------------------------------------
+
+ An example of a simple configuration (pin a -deadline task to CPU0)
+ follows (rt-app is used to create a -deadline task).
+
+ mkdir /dev/cpuset
+ mount -t cgroup -o cpuset cpuset /dev/cpuset
+ cd /dev/cpuset
+ mkdir cpu0
+ echo 0 > cpu0/cpuset.cpus
+ echo 0 > cpu0/cpuset.mems
+ echo 1 > cpuset.cpu_exclusive
+ echo 0 > cpuset.sched_load_balance
+ echo 1 > cpu0/cpuset.cpu_exclusive
+ echo 1 > cpu0/cpuset.mem_exclusive
+ echo $$ > cpu0/tasks
+ rt-app -t 100000:10000:d:0 -D5 (it is now actually superfluous to specify
+ task affinity)
+
+6. Future plans
+===============
+
+ Still missing:
+
+  - refinements to deadline inheritance, especially regarding the possibility
+    of retaining bandwidth isolation among non-interacting tasks. This is
+    being studied from both theoretical and practical points of view, and
+    hopefully we should be able to produce some demonstrative code soon;
+  - (c)group based bandwidth management, and maybe scheduling;
+  - access control for non-root users (and related security concerns to
+    address), which is the best way to allow unprivileged use of the mechanisms
+    and how to prevent non-root users "cheat" the system?
+
+ As already discussed, we are planning also to merge this work with the EDF
+ throttling patches [https://lkml.org/lkml/2010/2/23/239] but we still are in
+ the preliminary phases of the merge and we really seek feedback that would
+ help us decide on the direction it should take.
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index 2044be5..c4b978a 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -36,6 +36,8 @@
 	- info on WorkBiT NinjaSCSI-32/32Bi driver
 aacraid.txt
 	- Driver supporting Adaptec RAID controllers
+advansys.txt
+	- List of Advansys Host Adapters
 aha152x.txt
 	- info on driver for Adaptec AHA152x based adapters
 aic79xx.txt
@@ -44,6 +46,12 @@
 	- info on driver for Adaptec controllers
 arcmsr_spec.txt
 	- ARECA FIRMWARE SPEC (for IOP331 adapter)
+bfa.txt
+	- Brocade FC/FCOE adapter driver.
+bnx2fc.txt
+	- FCoE hardware offload for Broadcom network interfaces.
+cxgb3i.txt
+	- Chelsio iSCSI Linux Driver
 dc395x.txt
 	- README file for the dc395x SCSI driver
 dpti.txt
@@ -52,18 +60,24 @@
 	- info on driver for DTC 2x80 based adapters
 g_NCR5380.txt
 	- info on driver for NCR5380 and NCR53c400 based adapters
+hpsa.txt
+	- HP Smart Array Controller SCSI driver.
 hptiop.txt
 	- HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
 in2000.txt
 	- info on in2000 driver
 libsas.txt
 	- Serial Attached SCSI management layer.
+link_power_management_policy.txt
+	- Link power management options.
 lpfc.txt
 	- LPFC driver release notes
 megaraid.txt
 	- Common Management Module, shared code handling ioctls for LSI drivers
 ncr53c8xx.txt
 	- info on driver for NCR53c8xx based adapters
+osd.txt
+	Object-Based Storage Device, command set introduction.
 osst.txt
 	- info on driver for OnStream SC-x0 SCSI tape
 ppa.txt
@@ -74,6 +88,8 @@
 	- README for the SCSI media changer driver
 scsi-generic.txt
 	- info on the sg driver for generic (non-disk/CD/tape) SCSI devices.
+scsi-parameters.txt
+	- List of SCSI-parameters to pass to the kernel at module load-time.
 scsi.txt
 	- short blurb on using SCSI support as a module.
 scsi_mid_low_api.txt
diff --git a/Documentation/serial/00-INDEX b/Documentation/serial/00-INDEX
index 1f1b22f..f9c6b5e 100644
--- a/Documentation/serial/00-INDEX
+++ b/Documentation/serial/00-INDEX
@@ -4,10 +4,12 @@
 	- info on Cyclades-Z firmware loading.
 digiepca.txt
 	- info on Digi Intl. {PC,PCI,EISA}Xx and Xem series cards.
-hayes-esp.txt
-	- info on using the Hayes ESP serial driver.
+driver
+	- intro to the low level serial driver.
 moxa-smartio
 	- file with info on installing/using Moxa multiport serial driver.
+n_gsm.txt
+	- GSM 0710 tty multiplexer howto.
 riscom8.txt
 	- notes on using the RISCom/8 multi-port serial driver.
 rocket.txt
diff --git a/Documentation/spi/00-INDEX b/Documentation/spi/00-INDEX
new file mode 100644
index 0000000..a128fa8
--- /dev/null
+++ b/Documentation/spi/00-INDEX
@@ -0,0 +1,22 @@
+00-INDEX
+	- this file.
+Makefile
+	- Makefile for the example sourcefiles.
+butterfly
+	- AVR Butterfly SPI driver overview and pin configuration.
+ep93xx_spi
+	- Basic EP93xx SPI driver configuration.
+pxa2xx
+	- PXA2xx SPI master controller build by spi_message fifo wq
+spidev
+	- Intro to the userspace API for spi devices
+spidev_fdx.c
+	- spidev example file
+spi-lm70llp
+	- Connecting an LM70-LLP sensor to the kernel via the SPI subsys.
+spi-sc18is602
+	- NXP SC18IS602/603 I2C-bus to SPI bridge
+spi-summary
+	- (Linux) SPI overview. If unsure about SPI or SPI in Linux, start here.
+spidev_test.c
+	- SPI testing utility.
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index f72e0d1..7982bcc 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -543,7 +543,22 @@
 	queuing transfers that arrive in the meantime. When the driver is
 	finished with this message, it must call
 	spi_finalize_current_message() so the subsystem can issue the next
-	transfer. This may sleep.
+	message. This may sleep.
+
+    master->transfer_one(struct spi_master *master, struct spi_device *spi,
+			 struct spi_transfer *transfer)
+	The subsystem calls the driver to transfer a single transfer while
+	queuing transfers that arrive in the meantime. When the driver is
+	finished with this transfer, it must call
+	spi_finalize_current_transfer() so the subsystem can issue the next
+	transfer. This may sleep. Note: transfer_one and transfer_one_message
+	are mutually exclusive; when both are set, the generic subsystem does
+	not call your transfer_one callback.
+
+	Return values:
+	negative errno: error
+	0: transfer is finished
+	1: transfer is still in progress
 
     DEPRECATED METHODS
 
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index ee9a2f9..e55124e 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -33,6 +33,10 @@
 - domainname
 - hostname
 - hotplug
+- hung_task_panic
+- hung_task_check_count
+- hung_task_timeout_secs
+- hung_task_warnings
 - kexec_load_disabled
 - kptr_restrict
 - kstack_depth_to_print       [ X86 only ]
@@ -288,6 +292,44 @@
 
 ==============================================================
 
+hung_task_panic:
+
+Controls the kernel's behavior when a hung task is detected.
+This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
+
+0: continue operation. This is the default behavior.
+
+1: panic immediately.
+
+==============================================================
+
+hung_task_check_count:
+
+The upper bound on the number of tasks that are checked.
+This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
+
+==============================================================
+
+hung_task_timeout_secs:
+
+Check interval. When a task in D state did not get scheduled
+for more than this value report a warning.
+This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
+
+0: means infinite timeout - no checking done.
+
+==============================================================
+
+hung_task_warning:
+
+The maximum number of warnings to report. During a check interval
+When this value is reached, no more the warnings will be reported.
+This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
+
+-1: report an infinite number of warnings.
+
+==============================================================
+
 kexec_load_disabled:
 
 A toggle indicating if the kexec_load syscall has been disabled. This
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 9f5481b..d614a9b 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -696,7 +696,9 @@
 
 This control is used to define how aggressive the kernel will swap
 memory pages.  Higher values will increase agressiveness, lower values
-decrease the amount of swap.
+decrease the amount of swap.  A value of 0 instructs the kernel not to
+initiate swap until the amount of free and file-backed pages is less
+than the high water mark in a zone.
 
 The default value is 60.
 
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
index ef2ccbf..6d042dc 100644
--- a/Documentation/timers/00-INDEX
+++ b/Documentation/timers/00-INDEX
@@ -8,6 +8,8 @@
 	- sample hpet timer test program
 hrtimers.txt
 	- subsystem for high-resolution kernel timers
+Makefile
+	- Build and link hpet_example
 NO_HZ.txt
 	- Summary of the different methods for the scheduler clock-interrupts management.
 timers-howto.txt
diff --git a/Documentation/video4linux/omap4_camera.txt b/Documentation/video4linux/omap4_camera.txt
new file mode 100644
index 0000000..25d9b40
--- /dev/null
+++ b/Documentation/video4linux/omap4_camera.txt
@@ -0,0 +1,60 @@
+                              OMAP4 ISS Driver
+                              ================
+
+Introduction
+------------
+
+The OMAP44XX family of chips contains the Imaging SubSystem (a.k.a. ISS),
+Which contains several components that can be categorized in 3 big groups:
+
+- Interfaces (2 Interfaces: CSI2-A & CSI2-B/CCP2)
+- ISP (Image Signal Processor)
+- SIMCOP (Still Image Coprocessor)
+
+For more information, please look in [1] for latest version of:
+	"OMAP4430 Multimedia Device Silicon Revision 2.x"
+
+As of Revision AB, the ISS is described in detail in section 8.
+
+This driver is supporting _only_ the CSI2-A/B interfaces for now.
+
+It makes use of the Media Controller framework [2], and inherited most of the
+code from OMAP3 ISP driver (found under drivers/media/platform/omap3isp/*),
+except that it doesn't need an IOMMU now for ISS buffers memory mapping.
+
+Supports usage of MMAP buffers only (for now).
+
+Tested platforms
+----------------
+
+- OMAP4430SDP, w/ ES2.1 GP & SEVM4430-CAM-V1-0 (Contains IMX060 & OV5640, in
+  which only the last one is supported, outputting YUV422 frames).
+
+- TI Blaze MDP, w/ OMAP4430 ES2.2 EMU (Contains 1 IMX060 & 2 OV5650 sensors, in
+  which only the OV5650 are supported, outputting RAW10 frames).
+
+- PandaBoard, Rev. A2, w/ OMAP4430 ES2.1 GP & OV adapter board, tested with
+  following sensors:
+  * OV5640
+  * OV5650
+
+- Tested on mainline kernel:
+
+	http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=summary
+
+  Tag: v3.3 (commit c16fa4f2ad19908a47c63d8fa436a1178438c7e7)
+
+File list
+---------
+drivers/staging/media/omap4iss/
+include/media/omap4iss.h
+
+References
+----------
+
+[1] http://focus.ti.com/general/docs/wtbu/wtbudocumentcenter.tsp?navigationId=12037&templateId=6123#62
+[2] http://lwn.net/Articles/420485/
+[3] http://www.spinics.net/lists/linux-media/msg44370.html
+--
+Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+Copyright (C) 2012, Texas Instruments
diff --git a/Documentation/video4linux/si476x.txt b/Documentation/video4linux/si476x.txt
index 2f9b487..6166079 100644
--- a/Documentation/video4linux/si476x.txt
+++ b/Documentation/video4linux/si476x.txt
@@ -147,7 +147,7 @@
   --------------------------------------------------------------------
   0x12		| readfreq	| Current tuned frequency
   --------------------------------------------------------------------
-  0x14		| freqoff	| Singed frequency offset in units of
+  0x14		| freqoff	| Signed frequency offset in units of
   		| 		| 2ppm
   --------------------------------------------------------------------
   0x15		| rssi		| Signed value of RSSI in dBuV
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
index 641ec92..fee9f2b 100644
--- a/Documentation/virtual/kvm/00-INDEX
+++ b/Documentation/virtual/kvm/00-INDEX
@@ -20,5 +20,7 @@
 	- the paravirtualization interface on PowerPC.
 review-checklist.txt
 	- review checklist for KVM patches.
+s390-diag.txt
+	- Diagnose hypercall description (for IBM S/390)
 timekeeping.txt
 	- timekeeping virtualization for x86-based architectures.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 366bf4b..6cd63a9 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1838,6 +1838,7 @@
   PPC   | KVM_REG_PPC_LPCR	| 64
   PPC   | KVM_REG_PPC_PPR	| 64
   PPC   | KVM_REG_PPC_ARCH_COMPAT 32
+  PPC   | KVM_REG_PPC_DABRX     | 32
   PPC   | KVM_REG_PPC_TM_GPR0	| 64
           ...
   PPC   | KVM_REG_PPC_TM_GPR31	| 64
diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
index a39d066..081c497 100644
--- a/Documentation/vm/00-INDEX
+++ b/Documentation/vm/00-INDEX
@@ -16,8 +16,6 @@
 	- explains what hwpoison is
 ksm.txt
 	- how to use the Kernel Samepage Merging feature.
-locking
-	- info on how locking and synchronization is done in the Linux vm code.
 numa
 	- information about NUMA specific code in the Linux vm.
 numa_memory_policy.txt
@@ -32,6 +30,8 @@
 	- a short users guide for SLUB.
 soft-dirty.txt
 	- short explanation for soft-dirty PTEs
+split_page_table_lock
+	- Separate per-table lock to improve scalability of the old page_table_lock.
 transhuge.txt
 	- Transparent Hugepage Support, alternative way of using hugepages.
 unevictable-lru.txt
diff --git a/Documentation/w1/masters/00-INDEX b/Documentation/w1/masters/00-INDEX
index d63fa02..8330cf9 100644
--- a/Documentation/w1/masters/00-INDEX
+++ b/Documentation/w1/masters/00-INDEX
@@ -4,7 +4,9 @@
 	- The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses.
 ds2490
 	- The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges.
-mxc_w1
+mxc-w1
 	- W1 master controller driver found on Freescale MX2/MX3 SoCs
+omap-hdq
+	- HDQ/1-wire module of TI OMAP 2430/3430.
 w1-gpio
 	- GPIO 1-wire bus master driver.
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
index 75613c9..6e18c70 100644
--- a/Documentation/w1/slaves/00-INDEX
+++ b/Documentation/w1/slaves/00-INDEX
@@ -4,3 +4,5 @@
 	- The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
 w1_ds2423
 	- The Maxim/Dallas Semiconductor ds2423 counter device.
+w1_ds28e04
+	- The Maxim/Dallas Semiconductor ds28e04 eeprom.
diff --git a/Documentation/x86/00-INDEX b/Documentation/x86/00-INDEX
index f37b46d..6922644 100644
--- a/Documentation/x86/00-INDEX
+++ b/Documentation/x86/00-INDEX
@@ -1,6 +1,20 @@
 00-INDEX
 	- this file
-mtrr.txt
-	- how to use x86 Memory Type Range Registers to increase performance
+boot.txt
+	- List of boot protocol versions
+early-microcode.txt
+	- How to load microcode from an initrd-CPIO archive early to fix CPU issues.
+earlyprintk.txt
+	- Using earlyprintk with a USB2 debug port key.
+entry_64.txt
+	- Describe (some of the) kernel entry points for x86.
 exception-tables.txt
 	- why and how Linux kernel uses exception tables on x86
+mtrr.txt
+	- how to use x86 Memory Type Range Registers to increase performance
+pat.txt
+	- Page Attribute Table intro and API
+usb-legacy-support.txt
+	- how to fix/avoid quirks when using emulated PS/2 mouse/keyboard.
+zero-page.txt
+	- layout of the first page of memory.
diff --git a/Documentation/zh_CN/arm64/booting.txt b/Documentation/zh_CN/arm64/booting.txt
index 28fa325..6f6d956 100644
--- a/Documentation/zh_CN/arm64/booting.txt
+++ b/Documentation/zh_CN/arm64/booting.txt
@@ -7,7 +7,7 @@
 or if there is a problem with the translation.
 
 Maintainer: Will Deacon <will.deacon@arm.com>
-Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
 ---------------------------------------------------------------------
 Documentation/arm64/booting.txt 的中文翻译
 
@@ -16,9 +16,9 @@
 译存在问题,请联系中文版维护者。
 
 英文版维护者: Will Deacon <will.deacon@arm.com>
-中文版维护者: 傅炜  Fu Wei <tekkamanninja@gmail.com>
-中文版翻译者: 傅炜  Fu Wei <tekkamanninja@gmail.com>
-中文版校译者: 傅炜  Fu Wei <tekkamanninja@gmail.com>
+中文版维护者: 傅炜  Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜  Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜  Fu Wei <wefu@redhat.com>
 
 以下为正文
 ---------------------------------------------------------------------
@@ -64,8 +64,8 @@
 
 必要性: 强制
 
-设备树数据块(dtb)大小必须不大于 2 MB,且位于从内核映像起始算起第一个
-512MB 内的 2MB 边界上。这使得内核可以通过初始页表中的单个节描述符来
+设备树数据块(dtb)必须 8 字节对齐,并位于从内核映像起始算起第一个 512MB
+内,且不得跨越 2MB 对齐边界。这使得内核可以通过初始页表中的单个节描述符来
 映射此数据块。
 
 
@@ -84,13 +84,23 @@
 
 必要性: 强制
 
-已解压的内核映像包含一个 32 字节的头,内容如下:
+已解压的内核映像包含一个 64 字节的头,内容如下:
 
-  u32 magic	= 0x14000008;	/* 跳转到 stext, 小端 */
-  u32 res0	= 0;		/* 保留 */
+  u32 code0;			/* 可执行代码 */
+  u32 code1;			/* 可执行代码 */
   u64 text_offset;		/* 映像装载偏移 */
+  u64 res0	= 0;		/* 保留 */
   u64 res1	= 0;		/* 保留 */
   u64 res2	= 0;		/* 保留 */
+  u64 res3	= 0;		/* 保留 */
+  u64 res4	= 0;		/* 保留 */
+  u32 magic	= 0x644d5241;	/* 魔数, 小端, "ARM\x64" */
+  u32 res5 = 0;      		/* 保留 */
+
+
+映像头注释:
+
+- code0/code1 负责跳转到 stext.
 
 映像必须位于系统 RAM 起始处的特定偏移(当前是 0x80000)。系统 RAM
 的起始地址必须是以 2MB 对齐的。
@@ -118,9 +128,9 @@
   外部高速缓存(如果存在)必须配置并禁用。
 
 - 架构计时器
-  CNTFRQ 必须设定为计时器的频率。
-  如果在 EL1 模式下进入内核,则 CNTHCTL_EL2 中的 EL1PCTEN (bit 0)
-  必须置位。
+  CNTFRQ 必须设定为计时器的频率,且 CNTVOFF 必须设定为对所有 CPU
+  都一致的值。如果在 EL1 模式下进入内核,则 CNTHCTL_EL2 中的
+  EL1PCTEN (bit 0) 必须置位。
 
 - 一致性
   通过内核启动的所有 CPU 在内核入口地址上必须处于相同的一致性域中。
@@ -131,23 +141,40 @@
   在进入内核映像的异常级中,所有构架中可写的系统寄存器必须通过软件
   在一个更高的异常级别下初始化,以防止在 未知 状态下运行。
 
+以上对于 CPU 模式、高速缓存、MMU、架构计时器、一致性、系统寄存器的
+必要条件描述适用于所有 CPU。所有 CPU 必须在同一异常级别跳入内核。
+
 引导装载程序必须在每个 CPU 处于以下状态时跳入内核入口:
 
 - 主 CPU 必须直接跳入内核映像的第一条指令。通过此 CPU 传递的设备树
-  数据块必须在每个 CPU 节点中包含以下内容:
-
-    1、‘enable-method’属性。目前,此字段支持的值仅为字符串“spin-table”。
-
-    2、‘cpu-release-addr’标识一个 64-bit、初始化为零的内存位置。
+  数据块必须在每个 CPU 节点中包含一个 ‘enable-method’ 属性,所
+  支持的 enable-method 请见下文。
 
   引导装载程序必须生成这些设备树属性,并在跳入内核入口之前将其插入
   数据块。
 
-- 任何辅助 CPU 必须在内存保留区(通过设备树中的 /memreserve/ 域传递
+- enable-method 为 “spin-table” 的 CPU 必须在它们的 CPU
+  节点中包含一个 ‘cpu-release-addr’ 属性。这个属性标识了一个
+  64 位自然对齐且初始化为零的内存位置。
+
+  这些 CPU 必须在内存保留区(通过设备树中的 /memreserve/ 域传递
   给内核)中自旋于内核之外,轮询它们的 cpu-release-addr 位置(必须
   包含在保留区中)。可通过插入 wfe 指令来降低忙循环开销,而主 CPU 将
   发出 sev 指令。当对 cpu-release-addr 所指位置的读取操作返回非零值
-  时,CPU 必须直接跳入此值所指向的地址。
+  时,CPU 必须跳入此值所指向的地址。此值为一个单独的 64 位小端值,
+  因此 CPU 须在跳转前将所读取的值转换为其本身的端模式。
+
+- enable-method 为 “psci” 的 CPU 保持在内核外(比如,在
+  memory 节点中描述为内核空间的内存区外,或在通过设备树 /memreserve/
+  域中描述为内核保留区的空间中)。内核将会发起在 ARM 文档(编号
+  ARM DEN 0022A:用于 ARM 上的电源状态协调接口系统软件)中描述的
+  CPU_ON 调用来将 CPU 带入内核。
+
+  *译者注:到文档翻译时,此文档已更新为 ARM DEN 0022B。
+
+  设备树必须包含一个 ‘psci’ 节点,请参考以下文档:
+  Documentation/devicetree/bindings/arm/psci.txt
+
 
 - 辅助 CPU 通用寄存器设置
   x0 = 0 (保留,将来可能使用)
diff --git a/Documentation/zh_CN/arm64/memory.txt b/Documentation/zh_CN/arm64/memory.txt
index a5f6283..a782704 100644
--- a/Documentation/zh_CN/arm64/memory.txt
+++ b/Documentation/zh_CN/arm64/memory.txt
@@ -7,7 +7,7 @@
 or if there is a problem with the translation.
 
 Maintainer: Catalin Marinas <catalin.marinas@arm.com>
-Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
 ---------------------------------------------------------------------
 Documentation/arm64/memory.txt 的中文翻译
 
@@ -16,9 +16,9 @@
 译存在问题,请联系中文版维护者。
 
 英文版维护者: Catalin Marinas <catalin.marinas@arm.com>
-中文版维护者: 傅炜  Fu Wei <tekkamanninja@gmail.com>
-中文版翻译者: 傅炜  Fu Wei <tekkamanninja@gmail.com>
-中文版校译者: 傅炜  Fu Wei <tekkamanninja@gmail.com>
+中文版维护者: 傅炜  Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜  Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜  Fu Wei <wefu@redhat.com>
 
 以下为正文
 ---------------------------------------------------------------------
@@ -41,7 +41,7 @@
 TTBR1 中,且从不写入 TTBR0。
 
 
-AArch64 Linux 内存布局:
+AArch64 Linux 在页大小为 4KB 时的内存布局:
 
 起始地址			结束地址			大小		用途
 -----------------------------------------------------------------------
@@ -55,15 +55,42 @@
 
 ffffffbe00000000	ffffffbffbbfffff	  ~8GB		[防护页,未来用于 vmmemap]
 
+ffffffbffbc00000	ffffffbffbdfffff	   2MB		earlyprintk 设备
+
 ffffffbffbe00000	ffffffbffbe0ffff	  64KB		PCI I/O 空间
 
-ffffffbbffff0000	ffffffbcffffffff	  ~2MB		[防护页]
+ffffffbffbe10000	ffffffbcffffffff	  ~2MB		[防护页]
 
 ffffffbffc000000	ffffffbfffffffff	  64MB		模块
 
 ffffffc000000000	ffffffffffffffff	 256GB		内核逻辑内存映射
 
 
+AArch64 Linux 在页大小为 64KB 时的内存布局:
+
+起始地址			结束地址			大小		用途
+-----------------------------------------------------------------------
+0000000000000000	000003ffffffffff	   4TB		用户空间
+
+fffffc0000000000	fffffdfbfffeffff	  ~2TB		vmalloc
+
+fffffdfbffff0000	fffffdfbffffffff	  64KB		[防护页]
+
+fffffdfc00000000	fffffdfdffffffff	   8GB		vmemmap
+
+fffffdfe00000000	fffffdfffbbfffff	  ~8GB		[防护页,未来用于 vmmemap]
+
+fffffdfffbc00000	fffffdfffbdfffff	   2MB		earlyprintk 设备
+
+fffffdfffbe00000	fffffdfffbe0ffff	  64KB		PCI I/O 空间
+
+fffffdfffbe10000	fffffdfffbffffff	  ~2MB		[防护页]
+
+fffffdfffc000000	fffffdffffffffff	  64MB		模块
+
+fffffe0000000000	ffffffffffffffff	   2TB		内核逻辑内存映射
+
+
 4KB 页大小的转换表查找:
 
 +--------+--------+--------+--------+--------+--------+--------+--------+
@@ -91,3 +118,10 @@
  |                 |    +--------------------------> [41:29] L2 索引 (仅使用 38:29 )
  |                 +-------------------------------> [47:42] L1 索引 (未使用)
  +-------------------------------------------------> [63] TTBR0/1
+
+当使用 KVM 时, 管理程序(hypervisor)在 EL2 中通过相对内核虚拟地址的
+一个固定偏移来映射内核页(内核虚拟地址的高 24 位设为零):
+
+起始地址			结束地址			大小		用途
+-----------------------------------------------------------------------
+0000004000000000	0000007fffffffff	 256GB		在 HYP 中映射的内核对象
diff --git a/Documentation/zh_CN/arm64/tagged-pointers.txt b/Documentation/zh_CN/arm64/tagged-pointers.txt
new file mode 100644
index 0000000..2664d1b
--- /dev/null
+++ b/Documentation/zh_CN/arm64/tagged-pointers.txt
@@ -0,0 +1,52 @@
+Chinese translated version of Documentation/arm64/tagged-pointers.txt
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly.  However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help.  Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Will Deacon <will.deacon@arm.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
+---------------------------------------------------------------------
+Documentation/arm64/tagged-pointers.txt 的中文翻译
+
+如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
+交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
+译存在问题,请联系中文版维护者。
+
+英文版维护者: Will Deacon <will.deacon@arm.com>
+中文版维护者: 傅炜  Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜  Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜  Fu Wei <wefu@redhat.com>
+
+以下为正文
+---------------------------------------------------------------------
+		Linux 在 AArch64 中带标记的虚拟地址
+		=================================
+
+作者: Will Deacon <will.deacon@arm.com>
+日期: 2013 年 06 月 12 日
+
+本文档简述了在 AArch64 地址转换系统中提供的带标记的虚拟地址及其在
+AArch64 Linux 中的潜在用途。
+
+内核提供的地址转换表配置使通过 TTBR0 完成的虚拟地址转换(即用户空间
+映射),其虚拟地址的最高 8 位(63:56)会被转换硬件所忽略。这种机制
+让这些位可供应用程序自由使用,其注意事项如下:
+
+	(1) 内核要求所有传递到 EL1 的用户空间地址带有 0x00 标记。
+	    这意味着任何携带用户空间虚拟地址的系统调用(syscall)
+	    参数 *必须* 在陷入内核前使它们的最高字节被清零。
+
+	(2) 非零标记在传递信号时不被保存。这意味着在应用程序中利用了
+	    标记的信号处理函数无法依赖 siginfo_t 的用户空间虚拟
+	    地址所携带的包含其内部域信息的标记。此规则的一个例外是
+	    当信号是在调试观察点的异常处理程序中产生的,此时标记的
+	    信息将被保存。
+
+	(3) 当使用带标记的指针时需特别留心,因为仅对两个虚拟地址
+	    的高字节,C 编译器很可能无法判断它们是不同的。
+
+此构架会阻止对带标记的 PC 指针的利用,因此在异常返回时,其高字节
+将被设置成一个为 “55” 的扩展符。
diff --git a/MAINTAINERS b/MAINTAINERS
index 6d73831c..2225bb9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -309,36 +309,36 @@
 
 AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
 M:	Michael Hennerich <michael.hennerich@analog.com>
-L:	device-drivers-devel@blackfin.uclinux.org
 W:	http://wiki.analog.com/AD5254
+W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 F:	drivers/misc/ad525x_dpot.c
 
 AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821)
 M:	Michael Hennerich <michael.hennerich@analog.com>
-L:	device-drivers-devel@blackfin.uclinux.org
 W:	http://wiki.analog.com/AD5398
+W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 F:	drivers/regulator/ad5398.c
 
 AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A)
 M:	Michael Hennerich <michael.hennerich@analog.com>
-L:	device-drivers-devel@blackfin.uclinux.org
 W:	http://wiki.analog.com/AD7142
+W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 F:	drivers/input/misc/ad714x.c
 
 AD7877 TOUCHSCREEN DRIVER
 M:	Michael Hennerich <michael.hennerich@analog.com>
-L:	device-drivers-devel@blackfin.uclinux.org
 W:	http://wiki.analog.com/AD7877
+W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 F:	drivers/input/touchscreen/ad7877.c
 
 AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889)
 M:	Michael Hennerich <michael.hennerich@analog.com>
-L:	device-drivers-devel@blackfin.uclinux.org
 W:	http://wiki.analog.com/AD7879
+W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 F:	drivers/input/touchscreen/ad7879.c
 
@@ -347,7 +347,7 @@
 S:	Maintained
 
 ADM1025 HARDWARE MONITOR DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	Documentation/hwmon/adm1025
@@ -374,8 +374,8 @@
 
 ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501)
 M:	Michael Hennerich <michael.hennerich@analog.com>
-L:	device-drivers-devel@blackfin.uclinux.org
 W:	http://wiki.analog.com/ADP5520
+W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 F:	drivers/mfd/adp5520.c
 F:	drivers/video/backlight/adp5520_bl.c
@@ -385,16 +385,16 @@
 
 ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
 M:	Michael Hennerich <michael.hennerich@analog.com>
-L:	device-drivers-devel@blackfin.uclinux.org
 W:	http://wiki.analog.com/ADP5588
+W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 F:	drivers/input/keyboard/adp5588-keys.c
 F:	drivers/gpio/gpio-adp5588.c
 
 ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
 M:	Michael Hennerich <michael.hennerich@analog.com>
-L:	device-drivers-devel@blackfin.uclinux.org
 W:	http://wiki.analog.com/ADP8860
+W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 F:	drivers/video/backlight/adp8860_bl.c
 
@@ -412,7 +412,7 @@
 F:	drivers/macintosh/therm_adt746x.c
 
 ADT7475 HARDWARE MONITOR DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	Documentation/hwmon/adt7475
@@ -420,8 +420,8 @@
 
 ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
 M:	Michael Hennerich <michael.hennerich@analog.com>
-L:	device-drivers-devel@blackfin.uclinux.org
 W:	http://wiki.analog.com/ADXL345
+W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 F:	drivers/input/misc/adxl34x.c
 
@@ -627,9 +627,9 @@
 
 ANALOG DEVICES INC ASOC CODEC DRIVERS
 M:	Lars-Peter Clausen <lars@metafoo.de>
-L:	device-drivers-devel@blackfin.uclinux.org
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:	http://wiki.analog.com/
+W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 F:	sound/soc/codecs/adau*
 F:	sound/soc/codecs/adav*
@@ -639,7 +639,7 @@
 F:	sound/soc/codecs/sigmadsp.*
 
 ANALOG DEVICES INC ASOC DRIVERS
-L:	uclinux-dist-devel@blackfin.uclinux.org
+L:	adi-buildroot-devel@lists.sourceforge.net
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:	http://blackfin.uclinux.org/
 S:	Supported
@@ -868,14 +868,7 @@
 F:	drivers/clk/clk-prima2.c
 F:	drivers/clocksource/timer-prima2.c
 F:	drivers/clocksource/timer-marco.c
-F:	drivers/dma/sirf-dma.c
-F:	drivers/i2c/busses/i2c-sirf.c
-F:	drivers/input/misc/sirfsoc-onkey.c
-F:	drivers/irqchip/irq-sirfsoc.c
-F:	drivers/mmc/host/sdhci-sirf.c
-F:	drivers/pinctrl/sirf/
-F:	drivers/rtc/rtc-sirfsoc.c
-F:	drivers/spi/spi-sirf.c
+N:	[^a-z]sirf
 
 ARM/EBSA110 MACHINE SUPPORT
 M:	Russell King <linux@arm.linux.org.uk>
@@ -1742,56 +1735,54 @@
 F:	include/uapi/linux/bfs_fs.h
 
 BLACKFIN ARCHITECTURE
-M:	Mike Frysinger <vapier@gentoo.org>
-L:	uclinux-dist-devel@blackfin.uclinux.org
+M:	Steven Miao <realmz6@gmail.com>
+L:	adi-buildroot-devel@lists.sourceforge.net
 W:	http://blackfin.uclinux.org
 S:	Supported
 F:	arch/blackfin/
 
 BLACKFIN EMAC DRIVER
-L:	uclinux-dist-devel@blackfin.uclinux.org
+L:	adi-buildroot-devel@lists.sourceforge.net
 W:	http://blackfin.uclinux.org
 S:	Supported
 F:	drivers/net/ethernet/adi/
 
 BLACKFIN RTC DRIVER
-M:	Mike Frysinger <vapier.adi@gmail.com>
-L:	uclinux-dist-devel@blackfin.uclinux.org
+L:	adi-buildroot-devel@lists.sourceforge.net
 W:	http://blackfin.uclinux.org
 S:	Supported
 F:	drivers/rtc/rtc-bfin.c
 
 BLACKFIN SDH DRIVER
 M:	Sonic Zhang <sonic.zhang@analog.com>
-L:	uclinux-dist-devel@blackfin.uclinux.org
+L:	adi-buildroot-devel@lists.sourceforge.net
 W:	http://blackfin.uclinux.org
 S:	Supported
 F:	drivers/mmc/host/bfin_sdh.c
 
 BLACKFIN SERIAL DRIVER
 M:	Sonic Zhang <sonic.zhang@analog.com>
-L:	uclinux-dist-devel@blackfin.uclinux.org
+L:	adi-buildroot-devel@lists.sourceforge.net
 W:	http://blackfin.uclinux.org
 S:	Supported
 F:	drivers/tty/serial/bfin_uart.c
 
 BLACKFIN WATCHDOG DRIVER
-M:	Mike Frysinger <vapier.adi@gmail.com>
-L:	uclinux-dist-devel@blackfin.uclinux.org
+L:	adi-buildroot-devel@lists.sourceforge.net
 W:	http://blackfin.uclinux.org
 S:	Supported
 F:	drivers/watchdog/bfin_wdt.c
 
 BLACKFIN I2C TWI DRIVER
 M:	Sonic Zhang <sonic.zhang@analog.com>
-L:	uclinux-dist-devel@blackfin.uclinux.org
+L:	adi-buildroot-devel@lists.sourceforge.net
 W:	http://blackfin.uclinux.org/
 S:	Supported
 F:	drivers/i2c/busses/i2c-bfin-twi.c
 
 BLACKFIN MEDIA DRIVER
 M:	Scott Jiang <scott.jiang.linux@gmail.com>
-L:	uclinux-dist-devel@blackfin.uclinux.org
+L:	adi-buildroot-devel@lists.sourceforge.net
 W:	http://blackfin.uclinux.org/
 S:	Supported
 F:	drivers/media/platform/blackfin/
@@ -2224,6 +2215,7 @@
 CLOCKSOURCE, CLOCKEVENT DRIVERS
 M:	Daniel Lezcano <daniel.lezcano@linaro.org>
 M:	Thomas Gleixner <tglx@linutronix.de>
+L:	linux-kernel@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:	Supported
 F:	drivers/clocksource
@@ -2375,7 +2367,7 @@
 
 CPU FREQUENCY DRIVERS - ARM BIG LITTLE
 M:	Viresh Kumar <viresh.kumar@linaro.org>
-M:	Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+M:	Sudeep Holla <sudeep.holla@arm.com>
 L:	cpufreq@vger.kernel.org
 L:	linux-pm@vger.kernel.org
 W:	http://www.arm.com/products/processors/technologies/biglittleprocessing.php
@@ -2416,8 +2408,10 @@
 
 CPUSETS
 M:	Li Zefan <lizefan@huawei.com>
+L:	cgroups@vger.kernel.org
 W:	http://www.bullopensource.org/cpuset/
 W:	http://oss.sgi.com/projects/cpusets/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
 S:	Maintained
 F:	Documentation/cgroups/cpusets.txt
 F:	include/linux/cpuset.h
@@ -2865,7 +2859,7 @@
 L:	intel-gfx@lists.freedesktop.org
 L:	dri-devel@lists.freedesktop.org
 Q:	http://patchwork.freedesktop.org/project/intel-gfx/
-T:	git git://people.freedesktop.org/~danvet/drm-intel
+T:	git git://anongit.freedesktop.org/drm-intel
 S:	Supported
 F:	drivers/gpu/drm/i915/
 F:	include/drm/i915*
@@ -3332,6 +3326,17 @@
 F:	include/linux/netfilter_bridge/
 F:	net/bridge/
 
+ETHERNET PHY LIBRARY
+M:	Florian Fainelli <f.fainelli@gmail.com>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	include/linux/phy.h
+F:	include/linux/phy_fixed.h
+F:	drivers/net/phy/
+F:	Documentation/networking/phy.txt
+F:	drivers/of/of_mdio.c
+F:	drivers/of/of_net.c
+
 EXT2 FILE SYSTEM
 M:	Jan Kara <jack@suse.cz>
 L:	linux-ext4@vger.kernel.org
@@ -3389,7 +3394,7 @@
 F:	include/video/exynos_mipi*
 
 F71805F HARDWARE MONITORING DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	Documentation/hwmon/f71805f
@@ -3916,7 +3921,7 @@
 F:	drivers/tty/hvc/
 
 HARDWARE MONITORING
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 M:	Guenter Roeck <linux@roeck-us.net>
 L:	lm-sensors@lm-sensors.org
 W:	http://www.lm-sensors.org/
@@ -4025,6 +4030,7 @@
 
 HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
 M:	Thomas Gleixner <tglx@linutronix.de>
+L:	linux-kernel@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:	Maintained
 F:	Documentation/timers/
@@ -4144,7 +4150,7 @@
 F:	tools/hv/
 
 I2C OVER PARALLEL PORT
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	linux-i2c@vger.kernel.org
 S:	Maintained
 F:	Documentation/i2c/busses/i2c-parport
@@ -4153,7 +4159,7 @@
 F:	drivers/i2c/busses/i2c-parport-light.c
 
 I2C/SMBUS CONTROLLER DRIVERS FOR PC
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	linux-i2c@vger.kernel.org
 S:	Maintained
 F:	Documentation/i2c/busses/i2c-ali1535
@@ -4194,7 +4200,7 @@
 F:	Documentation/i2c/busses/i2c-ismt
 
 I2C/SMBUS STUB DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	linux-i2c@vger.kernel.org
 S:	Maintained
 F:	drivers/i2c/i2c-stub.c
@@ -4213,7 +4219,7 @@
 F:	include/uapi/linux/i2c-*.h
 
 I2C-TAOS-EVM DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	linux-i2c@vger.kernel.org
 S:	Maintained
 F:	Documentation/i2c/busses/i2c-taos-evm
@@ -4696,6 +4702,7 @@
 
 IRQ SUBSYSTEM
 M:	Thomas Gleixner <tglx@linutronix.de>
+L:	linux-kernel@vger.kernel.org
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:	kernel/irq/
@@ -4770,7 +4777,7 @@
 F:	drivers/isdn/hardware/eicon/
 
 IT87 HARDWARE MONITORING DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	Documentation/hwmon/it87
@@ -5140,7 +5147,7 @@
 F:	include/linux/leds.h
 
 LEGACY EEPROM DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 S:	Maintained
 F:	Documentation/misc-devices/eeprom
 F:	drivers/misc/eeprom/eeprom.c
@@ -5288,21 +5295,21 @@
 F:	drivers/hwmon/lm73.c
 
 LM78 HARDWARE MONITOR DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	Documentation/hwmon/lm78
 F:	drivers/hwmon/lm78.c
 
 LM83 HARDWARE MONITOR DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	Documentation/hwmon/lm83
 F:	drivers/hwmon/lm83.c
 
 LM90 HARDWARE MONITOR DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	Documentation/hwmon/lm90
@@ -5327,6 +5334,7 @@
 LOCKDEP AND LOCKSTAT
 M:	Peter Zijlstra <peterz@infradead.org>
 M:	Ingo Molnar <mingo@redhat.com>
+L:	linux-kernel@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
 S:	Maintained
 F:	Documentation/lockdep*.txt
@@ -5423,6 +5431,16 @@
 S:	Maintained
 F:	arch/m68k/hp300/
 
+M88DS3103 MEDIA DRIVER
+M:	Antti Palosaari <crope@iki.fi>
+L:	linux-media@vger.kernel.org
+W:	http://linuxtv.org/
+W:	http://palosaari.fi/linux/
+Q:	http://patchwork.linuxtv.org/project/linux-media/list/
+T:	git git://linuxtv.org/anttip/media_tree.git
+S:	Maintained
+F:	drivers/media/dvb-frontends/m88ds3103*
+
 M88RS2000 MEDIA DRIVER
 M:	Malcolm Priestley <tvboxspy@gmail.com>
 L:	linux-media@vger.kernel.org
@@ -5431,6 +5449,16 @@
 S:	Maintained
 F:	drivers/media/dvb-frontends/m88rs2000*
 
+M88TS2022 MEDIA DRIVER
+M:	Antti Palosaari <crope@iki.fi>
+L:	linux-media@vger.kernel.org
+W:	http://linuxtv.org/
+W:	http://palosaari.fi/linux/
+Q:	http://patchwork.linuxtv.org/project/linux-media/list/
+T:	git git://linuxtv.org/anttip/media_tree.git
+S:	Maintained
+F:	drivers/media/tuners/m88ts2022*
+
 MA901 MASTERKIT USB FM RADIO DRIVER
 M:      Alexey Klimov <klimov.linux@gmail.com>
 L:      linux-media@vger.kernel.org
@@ -6495,7 +6523,7 @@
 F:	drivers/char/pc8736x_gpio.c
 
 PC87427 HARDWARE MONITORING DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	Documentation/hwmon/pc87427
@@ -6624,6 +6652,7 @@
 M:	Paul Mackerras <paulus@samba.org>
 M:	Ingo Molnar <mingo@redhat.com>
 M:	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+L:	linux-kernel@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:	Supported
 F:	kernel/events/*
@@ -6747,6 +6776,7 @@
 
 POSIX CLOCKS and TIMERS
 M:	Thomas Gleixner <tglx@linutronix.de>
+L:	linux-kernel@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:	Supported
 F:	fs/timerfd.c
@@ -7155,6 +7185,7 @@
 RCUTORTURE MODULE
 M:	Josh Triplett <josh@freedesktop.org>
 M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+L:	linux-kernel@vger.kernel.org
 S:	Supported
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:	Documentation/RCU/torture.txt
@@ -7162,6 +7193,7 @@
 
 RCUTORTURE TEST FRAMEWORK
 M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+L:	linux-kernel@vger.kernel.org
 S:	Supported
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:	tools/testing/selftests/rcutorture
@@ -7177,7 +7209,7 @@
 F:	drivers/net/ethernet/rdc/r6040.c
 
 RDS - RELIABLE DATAGRAM SOCKETS
-M:	Venkat Venkatsubra <venkat.x.venkatsubra@oracle.com>
+M:	Chien Yen <chien.yen@oracle.com>
 L:	rds-devel@oss.oracle.com (moderated for non-subscribers)
 S:	Supported
 F:	net/rds/
@@ -7185,6 +7217,7 @@
 READ-COPY UPDATE (RCU)
 M:	Dipankar Sarma <dipankar@in.ibm.com>
 M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+L:	linux-kernel@vger.kernel.org
 W:	http://www.rdrop.com/users/paulmck/RCU/
 S:	Supported
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
@@ -7464,6 +7497,13 @@
 S:	Supported
 F:	drivers/media/i2c/s5c73m3/*
 
+SAMSUNG S5K5BAF CAMERA DRIVER
+M:	Kyungmin Park <kyungmin.park@samsung.com>
+M:	Andrzej Hajda <a.hajda@samsung.com>
+L:	linux-media@vger.kernel.org
+S:	Supported
+F:	drivers/media/i2c/s5k5baf.c
+
 SAMSUNG SOC CLOCK DRIVERS
 M:	Tomasz Figa <t.figa@samsung.com>
 S:	Supported
@@ -7494,6 +7534,7 @@
 TIMEKEEPING, CLOCKSOURCE CORE, NTP
 M:	John Stultz <john.stultz@linaro.org>
 M:	Thomas Gleixner <tglx@linutronix.de>
+L:	linux-kernel@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:	Supported
 F:	include/linux/clocksource.h
@@ -7519,6 +7560,7 @@
 SCHEDULER
 M:	Ingo Molnar <mingo@redhat.com>
 M:	Peter Zijlstra <peterz@infradead.org>
+L:	linux-kernel@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
 S:	Maintained
 F:	kernel/sched/
@@ -7765,7 +7807,7 @@
 T:	git git://linuxtv.org/media_tree.git
 W:	http://linuxtv.org
 S:	Odd Fixes
-F:	drivers/media/radio/si4713-i2c.?
+F:	drivers/media/radio/si4713/si4713.?
 
 SI4713 FM RADIO TRANSMITTER PLATFORM DRIVER
 M:	Eduardo Valentin <edubezval@gmail.com>
@@ -7773,7 +7815,15 @@
 T:	git git://linuxtv.org/media_tree.git
 W:	http://linuxtv.org
 S:	Odd Fixes
-F:	drivers/media/radio/radio-si4713.c
+F:	drivers/media/radio/si4713/radio-platform-si4713.c
+
+SI4713 FM RADIO TRANSMITTER USB DRIVER
+M:	Hans Verkuil <hverkuil@xs4all.nl>
+L:	linux-media@vger.kernel.org
+T:	git git://linuxtv.org/media_tree.git
+W:	http://linuxtv.org
+S:	Maintained
+F:	drivers/media/radio/si4713/radio-usb-si4713.c
 
 SIANO DVB DRIVER
 M:	Mauro Carvalho Chehab <m.chehab@samsung.com>
@@ -7886,6 +7936,7 @@
 SLEEPABLE READ-COPY UPDATE (SRCU)
 M:	Lai Jiangshan <laijs@cn.fujitsu.com>
 M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+L:	linux-kernel@vger.kernel.org
 W:	http://www.rdrop.com/users/paulmck/RCU/
 S:	Supported
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
@@ -7945,7 +7996,7 @@
 F:	drivers/hwmon/sch5627.c
 
 SMSC47B397 HARDWARE MONITOR DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	Documentation/hwmon/smsc47b397
@@ -8624,6 +8675,14 @@
 S:	Maintained
 F:	arch/xtensa/
 
+THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER
+M:	Hans Verkuil <hverkuil@xs4all.nl>
+L:	linux-media@vger.kernel.org
+T:	git git://linuxtv.org/media_tree.git
+W:	http://linuxtv.org
+S:	Maintained
+F:	drivers/media/radio/radio-raremono.c
+
 THERMAL
 M:      Zhang Rui <rui.zhang@intel.com>
 M:      Eduardo Valentin <eduardo.valentin@ti.com>
@@ -8962,7 +9021,7 @@
 M:	Artem Bityutskiy <dedekind1@gmail.com>
 W:	http://www.linux-mtd.infradead.org/
 L:	linux-mtd@lists.infradead.org
-T:	git git://git.infradead.org/ubi-2.6.git
+T:	git git://git.infradead.org/ubifs-2.6.git
 S:	Maintained
 F:	drivers/mtd/ubi/
 F:	include/linux/mtd/ubi.h
@@ -9156,8 +9215,7 @@
 T:	git git://linuxtv.org/media_tree.git
 W:	http://www.linux-projects.org
 S:	Maintained
-F:	Documentation/video4linux/sn9c102.txt
-F:	drivers/media/usb/sn9c102/
+F:	drivers/staging/media/sn9c102/
 
 USB SUBSYSTEM
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -9463,7 +9521,7 @@
 F:	drivers/hwmon/w83793.c
 
 W83795 HARDWARE MONITORING DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
+M:	Jean Delvare <jdelvare@suse.de>
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	drivers/hwmon/w83795.c
@@ -9589,6 +9647,7 @@
 M:	Ingo Molnar <mingo@redhat.com>
 M:	"H. Peter Anvin" <hpa@zytor.com>
 M:	x86@kernel.org
+L:	linux-kernel@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
 S:	Maintained
 F:	Documentation/x86/
@@ -9742,11 +9801,27 @@
 S:	Odd Fixes
 F:	drivers/media/pci/zoran/
 
+ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
+M:	Minchan Kim <minchan@kernel.org>
+M:	Nitin Gupta <ngupta@vflare.org>
+L:	linux-kernel@vger.kernel.org
+S:	Maintained
+F:	drivers/block/zram/
+F:	Documentation/blockdev/zram.txt
+
 ZS DECSTATION Z85C30 SERIAL DRIVER
 M:	"Maciej W. Rozycki" <macro@linux-mips.org>
 S:	Maintained
 F:	drivers/tty/serial/zs.*
 
+ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
+M:	Minchan Kim <minchan@kernel.org>
+M:	Nitin Gupta <ngupta@vflare.org>
+L:	linux-mm@kvack.org
+S:	Maintained
+F:	mm/zsmalloc.c
+F:	include/linux/zsmalloc.h
+
 ZSWAP COMPRESSED SWAP CACHING
 M:	Seth Jennings <sjenning@linux.vnet.ibm.com>
 L:	linux-mm@kvack.org
diff --git a/Makefile b/Makefile
index 455fd48..893d6f0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 3
-PATCHLEVEL = 13
+PATCHLEVEL = 14
 SUBLEVEL = 0
-EXTRAVERSION =
-NAME = One Giant Leap for Frogkind
+EXTRAVERSION = -rc3
+NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -311,9 +311,15 @@
 # If the user is running make -s (silent mode), suppress echoing of
 # commands
 
+ifneq ($(filter 4.%,$(MAKE_VERSION)),)	# make-4
+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
+  quiet=silent_
+endif
+else					# make-3.8x
 ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
   quiet=silent_
 endif
+endif
 
 export quiet Q KBUILD_VERBOSE
 
@@ -633,7 +639,7 @@
 
 ifdef CONFIG_DEBUG_INFO
 KBUILD_CFLAGS	+= -g
-KBUILD_AFLAGS	+= -gdwarf-2
+KBUILD_AFLAGS	+= -Wa,--gdwarf-2
 endif
 
 ifdef CONFIG_DEBUG_INFO_REDUCED
@@ -682,6 +688,9 @@
 # require functions to have arguments in prototypes, not empty 'int foo()'
 KBUILD_CFLAGS   += $(call cc-option,-Werror=strict-prototypes)
 
+# Prohibit date/time macros, which would make the build non-deterministic
+KBUILD_CFLAGS   += $(call cc-option,-Werror=date-time)
+
 # use the deterministic mode of AR if available
 KBUILD_ARFLAGS := $(call ar-option,D)
 
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 97a2d9a..f6c6b34 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -17,6 +17,7 @@
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+	select AUDIT_ARCH
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_STRNCPY_FROM_USER
@@ -77,6 +78,8 @@
 source "init/Kconfig"
 source "kernel/Kconfig.freezer"
 
+config AUDIT_ARCH
+	bool
 
 menu "System setup"
 
diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h
index 2112850..9047c2f 100644
--- a/arch/alpha/include/asm/ptrace.h
+++ b/arch/alpha/include/asm/ptrace.h
@@ -19,4 +19,9 @@
 
 #define force_successful_syscall_return() (current_pt_regs()->r0 = 0)
 
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+	return regs->r0;
+}
+
 #endif
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 453597b..3d6ce6d 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -70,6 +70,7 @@
 #define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
 #define TIF_SIGPENDING		2	/* signal pending */
 #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+#define TIF_SYSCALL_AUDIT	4	/* syscall audit active */
 #define TIF_DIE_IF_KERNEL	9	/* dik recursion lock */
 #define TIF_MEMDIE		13	/* is terminating due to OOM killer */
 
@@ -77,6 +78,7 @@
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 
 /* Work to do on interrupt/exception return.  */
 #define _TIF_WORK_MASK		(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 0d54650..3ecac01 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -17,6 +17,7 @@
 obj-$(CONFIG_MODULES)	+= module.o
 obj-$(CONFIG_PERF_EVENTS) += perf_event.o
 obj-$(CONFIG_RTC_DRV_ALPHA) += rtc.o
+obj-$(CONFIG_AUDIT)	+= audit.o
 
 ifdef CONFIG_ALPHA_GENERIC
 
diff --git a/arch/alpha/kernel/audit.c b/arch/alpha/kernel/audit.c
new file mode 100644
index 0000000..96a9d18
--- /dev/null
+++ b/arch/alpha/kernel/audit.c
@@ -0,0 +1,60 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+static unsigned write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static unsigned signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_arch(int arch)
+{
+	return 0;
+}
+
+int audit_classify_syscall(int abi, unsigned syscall)
+{
+	switch(syscall) {
+	case __NR_open:
+		return 2;
+	case __NR_openat:
+		return 3;
+	case __NR_execve:
+		return 5;
+	default:
+		return 0;
+	}
+}
+
+static int __init audit_classes_init(void)
+{
+	audit_register_class(AUDIT_CLASS_WRITE, write_class);
+	audit_register_class(AUDIT_CLASS_READ, read_class);
+	audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+	audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+	audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
+	return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index a969b95..98703d9 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -465,7 +465,11 @@
 	.cfi_rel_offset	$16, SP_OFF+24
 	.cfi_rel_offset	$17, SP_OFF+32
 	.cfi_rel_offset	$18, SP_OFF+40
-	blbs	$3, strace
+#ifdef CONFIG_AUDITSYSCALL
+	lda     $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+	and     $3, $6, $3
+#endif
+	bne     $3, strace
 	beq	$4, 1f
 	ldq	$27, 0($5)
 1:	jsr	$26, ($27), alpha_ni_syscall
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index 2a4a80f..86d8351 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -14,6 +14,7 @@
 #include <linux/security.h>
 #include <linux/signal.h>
 #include <linux/tracehook.h>
+#include <linux/audit.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -316,15 +317,18 @@
 asmlinkage unsigned long syscall_trace_enter(void)
 {
 	unsigned long ret = 0;
+	struct pt_regs *regs = current_pt_regs();
 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
 	    tracehook_report_syscall_entry(current_pt_regs()))
 		ret = -1UL;
+	audit_syscall_entry(AUDIT_ARCH_ALPHA, regs->r0, regs->r16, regs->r17, regs->r18, regs->r19);
 	return ret ?: current_pt_regs()->r0;
 }
 
 asmlinkage void
 syscall_trace_leave(void)
 {
+	audit_syscall_exit(current_pt_regs());
 	if (test_thread_flag(TIF_SYSCALL_TRACE))
 		tracehook_report_syscall_exit(current_pt_regs(), 0);
 }
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index ff3c107..5675dca 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -378,6 +378,11 @@
 __wsum
 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 {
-	return csum_partial_copy_from_user((__force const void __user *)src,
-			dst, len, sum, NULL);
+	__wsum checksum;
+	mm_segment_t oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	checksum = csum_partial_copy_from_user((__force const void __user *)src,
+						dst, len, sum, NULL);
+	set_fs(oldfs);
+	return checksum;
 }
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index 0283e9e..66ee552 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -11,6 +11,8 @@
 
 #ifdef __ASSEMBLY__
 
+#define ASM_NL		 `	/* use '`' to mark new line in macro */
+
 /* Can't use the ENTRY macro in linux/linkage.h
  * gas considers ';' as comment vs. newline
  */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index dc6ef9a..e254198 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1905,6 +1905,7 @@
 	depends on !GENERIC_ATOMIC64
 	select ARM_PSCI
 	select SWIOTLB_XEN
+	select ARCH_DMA_ADDR_T_64BIT
 	help
 	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 23d5e39..08a9ef5 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -96,7 +96,7 @@
 tune-y := $(tune-y)
 
 ifeq ($(CONFIG_AEABI),y)
-CFLAGS_ABI	:=-mabi=aapcs-linux -mno-thumb-interwork
+CFLAGS_ABI	:=-mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp
 else
 CFLAGS_ABI	:=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,)
 endif
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index ede21c1..6d1e43d 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -38,6 +38,7 @@
 dtb-$(CONFIG_ARCH_AT91) += at91sam9x25ek.dtb
 dtb-$(CONFIG_ARCH_AT91) += at91sam9x35ek.dtb
 # sama5d3
+dtb-$(CONFIG_ARCH_AT91)	+= at91-sama5d3_xplained.dtb
 dtb-$(CONFIG_ARCH_AT91)	+= sama5d31ek.dtb
 dtb-$(CONFIG_ARCH_AT91)	+= sama5d33ek.dtb
 dtb-$(CONFIG_ARCH_AT91)	+= sama5d34ek.dtb
@@ -152,10 +153,13 @@
 	imx53-mba53.dtb \
 	imx53-qsb.dtb \
 	imx53-smd.dtb \
+	imx6dl-cubox-i.dtb \
+	imx6dl-hummingboard.dtb \
 	imx6dl-sabreauto.dtb \
 	imx6dl-sabresd.dtb \
 	imx6dl-wandboard.dtb \
 	imx6q-arm2.dtb \
+	imx6q-cubox-i.dtb \
 	imx6q-phytec-pbab01.dtb \
 	imx6q-sabreauto.dtb \
 	imx6q-sabrelite.dtb \
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
new file mode 100644
index 0000000..ce13755
--- /dev/null
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -0,0 +1,229 @@
+/*
+ * at91-sama5d3_xplained.dts - Device Tree file for the SAMA5D3 Xplained board
+ *
+ *  Copyright (C) 2014 Atmel,
+ *		  2014 Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+/dts-v1/;
+#include "sama5d36.dtsi"
+
+/ {
+	model = "SAMA5D3 Xplained";
+	compatible = "atmel,sama5d3-xplained", "atmel,sama5d3", "atmel,sama5";
+
+	chosen {
+		bootargs = "console=ttyS0,115200";
+	};
+
+	memory {
+		reg = <0x20000000 0x10000000>;
+	};
+
+	ahb {
+		apb {
+			mmc0: mmc@f0000000 {
+				pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_dat4_7 &pinctrl_mmc0_cd>;
+				status = "okay";
+				slot@0 {
+					reg = <0>;
+					bus-width = <8>;
+					cd-gpios = <&pioE 0 GPIO_ACTIVE_LOW>;
+				};
+			};
+
+			spi0: spi@f0004000 {
+				cs-gpios = <&pioD 13 0>;
+				status = "okay";
+			};
+
+			can0: can@f000c000 {
+				status = "okay";
+			};
+
+			i2c0: i2c@f0014000 {
+				status = "okay";
+			};
+
+			i2c1: i2c@f0018000 {
+				status = "okay";
+			};
+
+			macb0: ethernet@f0028000 {
+				phy-mode = "rgmii";
+				status = "okay";
+			};
+
+			usart0: serial@f001c000 {
+				status = "okay";
+			};
+
+			usart1: serial@f0020000 {
+				pinctrl-0 = <&pinctrl_usart1 &pinctrl_usart1_rts_cts>;
+				status = "okay";
+			};
+
+			uart0: serial@f0024000 {
+				status = "okay";
+			};
+
+			mmc1: mmc@f8000000 {
+				pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3 &pinctrl_mmc1_cd>;
+				status = "okay";
+				slot@0 {
+					reg = <0>;
+					bus-width = <4>;
+					cd-gpios = <&pioE 1 GPIO_ACTIVE_HIGH>;
+				};
+			};
+
+			spi1: spi@f8008000 {
+				cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>;
+				status = "okay";
+			};
+
+			adc0: adc@f8018000 {
+				pinctrl-0 = <
+					&pinctrl_adc0_adtrg
+					&pinctrl_adc0_ad0
+					&pinctrl_adc0_ad1
+					&pinctrl_adc0_ad2
+					&pinctrl_adc0_ad3
+					&pinctrl_adc0_ad4
+					&pinctrl_adc0_ad5
+					&pinctrl_adc0_ad6
+					&pinctrl_adc0_ad7
+					&pinctrl_adc0_ad8
+					&pinctrl_adc0_ad9
+					>;
+				status = "okay";
+			};
+
+			i2c2: i2c@f801c000 {
+				dmas = <0>, <0>;	/* Do not use DMA for i2c2 */
+				status = "okay";
+			};
+
+			macb1: ethernet@f802c000 {
+				phy-mode = "rmii";
+				status = "okay";
+			};
+
+			dbgu: serial@ffffee00 {
+				status = "okay";
+			};
+
+			pinctrl@fffff200 {
+				board {
+					pinctrl_mmc0_cd: mmc0_cd {
+						atmel,pins =
+							<AT91_PIOE 0 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+					};
+
+					pinctrl_mmc1_cd: mmc1_cd {
+						atmel,pins =
+							<AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+					};
+
+					pinctrl_usba_vbus: usba_vbus {
+						atmel,pins =
+							<AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>;	/* PE9, conflicts with A9 */
+					};
+				};
+			};
+
+			pmc: pmc@fffffc00 {
+				main: mainck {
+					clock-frequency = <12000000>;
+				};
+			};
+		};
+
+		nand0: nand@60000000 {
+			nand-bus-width = <8>;
+			nand-ecc-mode = "hw";
+			atmel,has-pmecc;
+			atmel,pmecc-cap = <4>;
+			atmel,pmecc-sector-size = <512>;
+			nand-on-flash-bbt;
+			status = "okay";
+
+			at91bootstrap@0 {
+				label = "at91bootstrap";
+				reg = <0x0 0x40000>;
+			};
+
+			bootloader@40000 {
+				label = "bootloader";
+				reg = <0x40000 0x80000>;
+			};
+
+			bootloaderenv@c0000 {
+				label = "bootloader env";
+				reg = <0xc0000 0xc0000>;
+			};
+
+			dtb@180000 {
+				label = "device tree";
+				reg = <0x180000 0x80000>;
+			};
+
+			kernel@200000 {
+				label = "kernel";
+				reg = <0x200000 0x600000>;
+			};
+
+			rootfs@800000 {
+				label = "rootfs";
+				reg = <0x800000 0x0f800000>;
+			};
+		};
+
+		usb0: gadget@00500000 {
+			atmel,vbus-gpio = <&pioE 9 GPIO_ACTIVE_HIGH>;	/* PE9, conflicts with A9 */
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usba_vbus>;
+			status = "okay";
+		};
+
+		usb1: ohci@00600000 {
+			num-ports = <3>;
+			atmel,vbus-gpio = <0
+					   &pioE 3 GPIO_ACTIVE_LOW
+					   &pioE 4 GPIO_ACTIVE_LOW
+					  >;
+			status = "okay";
+		};
+
+		usb2: ehci@00700000 {
+			status = "okay";
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+
+		bp3 {
+			label = "PB_USER";
+			gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
+			linux,code = <0x104>;
+			gpio-key,wakeup;
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		d2 {
+			label = "d2";
+			gpios = <&pioE 23 GPIO_ACTIVE_LOW>;	/* PE23, conflicts with A23, CTS2 */
+			linux,default-trigger = "heartbeat";
+		};
+
+		d3 {
+			label = "d3";
+			gpios = <&pioE 24 GPIO_ACTIVE_HIGH>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 56ee828..997901f 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -648,6 +648,11 @@
 			watchdog@fffffd40 {
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffd40 0x10>;
+				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				atmel,watchdog-type = "hardware";
+				atmel,reset-type = "all";
+				atmel,dbg-halt;
+				atmel,idle-halt;
 				status = "disabled";
 			};
 		};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index c8fa9b9..fece866 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -523,7 +523,7 @@
 			};
 
 			i2c0: i2c@fff88000 {
-				compatible = "atmel,at91sam9263-i2c";
+				compatible = "atmel,at91sam9260-i2c";
 				reg = <0xfff88000 0x100>;
 				interrupts = <13 IRQ_TYPE_LEVEL_HIGH 6>;
 				#address-cells = <1>;
@@ -552,6 +552,11 @@
 			watchdog@fffffd40 {
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffd40 0x10>;
+				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				atmel,watchdog-type = "hardware";
+				atmel,reset-type = "all";
+				atmel,dbg-halt;
+				atmel,idle-halt;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index ef0857c..cbcc058 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -706,6 +706,11 @@
 			watchdog@fffffd40 {
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffd40 0x10>;
+				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				atmel,watchdog-type = "hardware";
+				atmel,reset-type = "all";
+				atmel,dbg-halt;
+				atmel,idle-halt;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 7248270..394e6ce 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -541,6 +541,11 @@
 			watchdog@fffffe40 {
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffe40 0x10>;
+				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				atmel,watchdog-type = "hardware";
+				atmel,reset-type = "all";
+				atmel,dbg-halt;
+				atmel,idle-halt;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index e9487f6..924a6a6 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -124,6 +124,10 @@
 			nand-on-flash-bbt;
 			status = "okay";
 		};
+
+		usb0: ohci@00500000 {
+			status = "okay";
+		};
 	};
 
 	leds {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 6e5e9cf..174219d 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -754,6 +754,11 @@
 			watchdog@fffffe40 {
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffe40 0x10>;
+				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				atmel,watchdog-type = "hardware";
+				atmel,reset-type = "all";
+				atmel,dbg-halt;
+				atmel,idle-halt;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/bcm11351-brt.dts b/arch/arm/boot/dts/bcm11351-brt.dts
index 23cd16d..396b704 100644
--- a/arch/arm/boot/dts/bcm11351-brt.dts
+++ b/arch/arm/boot/dts/bcm11351-brt.dts
@@ -44,5 +44,11 @@
 		status = "okay";
 	};
 
+	usbotg: usb@3f120000 {
+		status = "okay";
+	};
 
+	usbphy: usb-phy@3f130000 {
+		status = "okay";
+	};
 };
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi
index dd8e878..e491b82 100644
--- a/arch/arm/boot/dts/bcm11351.dtsi
+++ b/arch/arm/boot/dts/bcm11351.dtsi
@@ -43,7 +43,7 @@
 		compatible = "brcm,bcm11351-dw-apb-uart", "snps,dw-apb-uart";
 		status = "disabled";
 		reg = <0x3e000000 0x1000>;
-		clock-frequency = <13000000>;
+		clocks = <&uartb_clk>;
 		interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
 		reg-shift = <2>;
 		reg-io-width = <4>;
@@ -53,7 +53,7 @@
 		compatible = "brcm,bcm11351-dw-apb-uart", "snps,dw-apb-uart";
 		status = "disabled";
 		reg = <0x3e001000 0x1000>;
-		clock-frequency = <13000000>;
+		clocks = <&uartb2_clk>;
 		interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
 		reg-shift = <2>;
 		reg-io-width = <4>;
@@ -63,7 +63,7 @@
 		compatible = "brcm,bcm11351-dw-apb-uart", "snps,dw-apb-uart";
 		status = "disabled";
 		reg = <0x3e002000 0x1000>;
-		clock-frequency = <13000000>;
+		clocks = <&uartb3_clk>;
 		interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
 		reg-shift = <2>;
 		reg-io-width = <4>;
@@ -73,7 +73,7 @@
 		compatible = "brcm,bcm11351-dw-apb-uart", "snps,dw-apb-uart";
 		status = "disabled";
 		reg = <0x3e003000 0x1000>;
-		clock-frequency = <13000000>;
+		clocks = <&uartb4_clk>;
 		interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
 		reg-shift = <2>;
 		reg-io-width = <4>;
@@ -95,7 +95,7 @@
 		compatible = "brcm,kona-timer";
 		reg = <0x35006000 0x1000>;
 		interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
-		clock-frequency = <32768>;
+		clocks = <&hub_timer_clk>;
 	};
 
 	gpio: gpio@35003000 {
@@ -118,6 +118,7 @@
 		compatible = "brcm,kona-sdhci";
 		reg = <0x3f180000 0x10000>;
 		interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&sdio1_clk>;
 		status = "disabled";
 	};
 
@@ -125,6 +126,7 @@
 		compatible = "brcm,kona-sdhci";
 		reg = <0x3f190000 0x10000>;
 		interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&sdio2_clk>;
 		status = "disabled";
 	};
 
@@ -132,6 +134,7 @@
 		compatible = "brcm,kona-sdhci";
 		reg = <0x3f1a0000 0x10000>;
 		interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&sdio3_clk>;
 		status = "disabled";
 	};
 
@@ -139,6 +142,7 @@
 		compatible = "brcm,kona-sdhci";
 		reg = <0x3f1b0000 0x10000>;
 		interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&sdio4_clk>;
 		status = "disabled";
 	};
 
@@ -146,4 +150,160 @@
 		compatible = "brcm,capri-pinctrl";
 		reg = <0x35004800 0x430>;
 	};
+
+	i2c@3e016000 {
+		compatible = "brcm,bcm11351-i2c", "brcm,kona-i2c";
+		reg = <0x3e016000 0x80>;
+		interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bsc1_clk>;
+		status = "disabled";
+	};
+
+	i2c@3e017000 {
+		compatible = "brcm,bcm11351-i2c", "brcm,kona-i2c";
+		reg = <0x3e017000 0x80>;
+		interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bsc2_clk>;
+		status = "disabled";
+	};
+
+	i2c@3e018000 {
+		compatible = "brcm,bcm11351-i2c", "brcm,kona-i2c";
+		reg = <0x3e018000 0x80>;
+		interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bsc3_clk>;
+		status = "disabled";
+	};
+
+	i2c@3500d000 {
+		compatible = "brcm,bcm11351-i2c", "brcm,kona-i2c";
+		reg = <0x3500d000 0x80>;
+		interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&pmu_bsc_clk>;
+		status = "disabled";
+	};
+
+	clocks {
+		bsc1_clk: bsc1 {
+			compatible = "fixed-clock";
+			clock-frequency = <13000000>;
+			#clock-cells = <0>;
+		};
+
+		bsc2_clk: bsc2 {
+			compatible = "fixed-clock";
+			clock-frequency = <13000000>;
+			#clock-cells = <0>;
+		};
+
+		bsc3_clk: bsc3 {
+			compatible = "fixed-clock";
+			clock-frequency = <13000000>;
+			#clock-cells = <0>;
+		};
+
+		pmu_bsc_clk: pmu_bsc {
+			compatible = "fixed-clock";
+			clock-frequency = <13000000>;
+			#clock-cells = <0>;
+		};
+
+		hub_timer_clk: hub_timer {
+			compatible = "fixed-clock";
+			clock-frequency = <32768>;
+			#clock-cells = <0>;
+		};
+
+		pwm_clk: pwm {
+			compatible = "fixed-clock";
+			clock-frequency = <26000000>;
+			#clock-cells = <0>;
+		};
+
+		sdio1_clk: sdio1 {
+			compatible = "fixed-clock";
+			clock-frequency = <48000000>;
+			#clock-cells = <0>;
+		};
+
+		sdio2_clk: sdio2 {
+			compatible = "fixed-clock";
+			clock-frequency = <48000000>;
+			#clock-cells = <0>;
+		};
+
+		sdio3_clk: sdio3 {
+			compatible = "fixed-clock";
+			clock-frequency = <48000000>;
+			#clock-cells = <0>;
+		};
+
+		sdio4_clk: sdio4 {
+			compatible = "fixed-clock";
+			clock-frequency = <48000000>;
+			#clock-cells = <0>;
+		};
+
+		tmon_1m_clk: tmon_1m {
+			compatible = "fixed-clock";
+			clock-frequency = <1000000>;
+			#clock-cells = <0>;
+		};
+
+		uartb_clk: uartb {
+			compatible = "fixed-clock";
+			clock-frequency = <13000000>;
+			#clock-cells = <0>;
+		};
+
+		uartb2_clk: uartb2 {
+			compatible = "fixed-clock";
+			clock-frequency = <13000000>;
+			#clock-cells = <0>;
+		};
+
+		uartb3_clk: uartb3 {
+			compatible = "fixed-clock";
+			clock-frequency = <13000000>;
+			#clock-cells = <0>;
+		};
+
+		uartb4_clk: uartb4 {
+			compatible = "fixed-clock";
+			clock-frequency = <13000000>;
+			#clock-cells = <0>;
+		};
+
+		usb_otg_ahb_clk: usb_otg_ahb {
+			compatible = "fixed-clock";
+			clock-frequency = <52000000>;
+			#clock-cells = <0>;
+		};
+	};
+
+	usbotg: usb@3f120000 {
+		compatible = "snps,dwc2";
+		reg = <0x3f120000 0x10000>;
+		interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&usb_otg_ahb_clk>;
+		clock-names = "otg";
+		phys = <&usbphy>;
+		phy-names = "usb2-phy";
+		status = "disabled";
+	};
+
+	usbphy: usb-phy@3f130000 {
+		compatible = "brcm,kona-usb2-phy";
+		reg = <0x3f130000 0x28>;
+		#phy-cells = <0>;
+		status = "disabled";
+	};
 };
diff --git a/arch/arm/boot/dts/bcm28155-ap.dts b/arch/arm/boot/dts/bcm28155-ap.dts
index 08e47c2..5ff2382 100644
--- a/arch/arm/boot/dts/bcm28155-ap.dts
+++ b/arch/arm/boot/dts/bcm28155-ap.dts
@@ -13,6 +13,8 @@
 
 /dts-v1/;
 
+#include <dt-bindings/gpio/gpio.h>
+
 #include "bcm11351.dtsi"
 
 / {
@@ -27,6 +29,26 @@
 		status = "okay";
 	};
 
+	i2c@3e016000 {
+		status="okay";
+		clock-frequency = <400000>;
+	};
+
+	i2c@3e017000 {
+		status="okay";
+		clock-frequency = <400000>;
+	};
+
+	i2c@3e018000 {
+		status="okay";
+		clock-frequency = <400000>;
+	};
+
+	i2c@3500d000 {
+		status="okay";
+		clock-frequency = <400000>;
+	};
+
 	sdio1: sdio@3f180000 {
 		max-frequency = <48000000>;
 		status = "okay";
@@ -40,7 +62,15 @@
 
 	sdio4: sdio@3f1b0000 {
 		max-frequency = <48000000>;
-		cd-gpios = <&gpio 14 0>;
+		cd-gpios = <&gpio 14 GPIO_ACTIVE_LOW>;
+		status = "okay";
+	};
+
+	usbotg: usb@3f120000 {
+		status = "okay";
+	};
+
+	usbphy: usb-phy@3f130000 {
 		status = "okay";
 	};
 };
diff --git a/arch/arm/boot/dts/imx6dl-cubox-i.dts b/arch/arm/boot/dts/imx6dl-cubox-i.dts
new file mode 100644
index 0000000..58aa8f2
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-cubox-i.dts
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2014 Russell King
+ */
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-cubox-i.dtsi"
+
+/ {
+	model = "SolidRun Cubox-i Solo/DualLite";
+	compatible = "solidrun,cubox-i/dl", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts
new file mode 100644
index 0000000..5bfae54
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2013,2014 Russell King
+ */
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-microsom.dtsi"
+#include "imx6qdl-microsom-ar8035.dtsi"
+
+/ {
+	model = "SolidRun HummingBoard DL/Solo";
+	compatible = "solidrun,hummingboard", "fsl,imx6dl";
+
+	ir_recv: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&gpio1 2 1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_hummingboard_gpio1_2>;
+	};
+
+	regulators {
+		compatible = "simple-bus";
+
+		reg_3p3v: 3p3v {
+			compatible = "regulator-fixed";
+			regulator-name = "3P3V";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+
+		reg_usbh1_vbus: usb-h1-vbus {
+			compatible = "regulator-fixed";
+			enable-active-high;
+			gpio = <&gpio1 0 0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_hummingboard_usbh1_vbus>;
+			regulator-name = "usb_h1_vbus";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+		};
+
+		reg_usbotg_vbus: usb-otg-vbus {
+			compatible = "regulator-fixed";
+			enable-active-high;
+			gpio = <&gpio3 22 0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_hummingboard_usbotg_vbus>;
+			regulator-name = "usb_otg_vbus";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+		};
+	};
+
+	sound-spdif {
+		compatible = "fsl,imx-audio-spdif";
+		model = "imx-spdif";
+		/* IMX6 doesn't implement this yet */
+		spdif-controller = <&spdif>;
+		spdif-out;
+	};
+};
+
+&can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_hummingboard_flexcan1>;
+	status = "okay";
+};
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_hummingboard_i2c1>;
+
+	/*
+	 * Not fitted on Carrier-1 board... yet
+	status = "okay";
+
+	rtc: pcf8523@68 {
+		compatible = "nxp,pcf8523";
+		reg = <0x68>;
+	};
+	 */
+};
+
+&iomuxc {
+	hummingboard {
+		pinctrl_hummingboard_flexcan1: hummingboard-flexcan1 {
+			fsl,pins = <
+				MX6QDL_PAD_SD3_CLK__FLEXCAN1_RX 0x80000000
+				MX6QDL_PAD_SD3_CMD__FLEXCAN1_TX 0x80000000
+			>;
+		};
+
+		pinctrl_hummingboard_gpio1_2: hummingboard-gpio1_2 {
+			fsl,pins = <
+				MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000
+			>;
+		};
+
+		pinctrl_hummingboard_i2c1: hummingboard-i2c1 {
+			fsl,pins = <
+				MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+				MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+			>;
+		};
+
+		pinctrl_hummingboard_spdif: hummingboard-spdif {
+			fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
+		};
+
+		pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
+			fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
+		};
+
+		pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
+			fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
+		};
+
+		pinctrl_hummingboard_usdhc2_aux: hummingboard-usdhc2-aux {
+			fsl,pins = <
+				MX6QDL_PAD_GPIO_4__GPIO1_IO04    0x1f071
+			>;
+		};
+
+		pinctrl_hummingboard_usdhc2: hummingboard-usdhc2 {
+			fsl,pins = <
+				MX6QDL_PAD_SD2_CMD__SD2_CMD    0x17059
+				MX6QDL_PAD_SD2_CLK__SD2_CLK    0x10059
+				MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+				MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+				MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+				MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
+			>;
+		};
+	};
+};
+
+&spdif {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_hummingboard_spdif>;
+	status = "okay";
+};
+
+&usbh1 {
+	vbus-supply = <&reg_usbh1_vbus>;
+	status = "okay";
+};
+
+&usbotg {
+	vbus-supply = <&reg_usbotg_vbus>;
+	status = "okay";
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <
+		&pinctrl_hummingboard_usdhc2_aux
+		&pinctrl_hummingboard_usdhc2
+	>;
+	vmmc-supply = <&reg_3p3v>;
+	cd-gpios = <&gpio1 4 0>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-cubox-i.dts b/arch/arm/boot/dts/imx6q-cubox-i.dts
new file mode 100644
index 0000000..bc5f31e
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-cubox-i.dts
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2014 Russell King
+ */
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-cubox-i.dtsi"
+
+/ {
+	model = "SolidRun Cubox-i Dual/Quad";
+	compatible = "solidrun,cubox-i/q", "fsl,imx6q";
+};
+
+&sata {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
new file mode 100644
index 0000000..c2a2488
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2014 Russell King
+ */
+#include "imx6qdl-microsom.dtsi"
+#include "imx6qdl-microsom-ar8035.dtsi"
+
+/ {
+	ir_recv: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&gpio3 9 1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_cubox_i_ir>;
+	};
+
+	regulators {
+		compatible = "simple-bus";
+
+		reg_3p3v: 3p3v {
+			compatible = "regulator-fixed";
+			regulator-name = "3P3V";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+
+		reg_usbh1_vbus: usb-h1-vbus {
+			compatible = "regulator-fixed";
+			enable-active-high;
+			gpio = <&gpio1 0 0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_cubox_i_usbh1_vbus>;
+			regulator-name = "usb_h1_vbus";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+		};
+
+		reg_usbotg_vbus: usb-otg-vbus {
+			compatible = "regulator-fixed";
+			enable-active-high;
+			gpio = <&gpio3 22 0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_cubox_i_usbotg_vbus>;
+			regulator-name = "usb_otg_vbus";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+		};
+	};
+
+	sound-spdif {
+		compatible = "fsl,imx-audio-spdif";
+		model = "imx-spdif";
+		/* IMX6 doesn't implement this yet */
+		spdif-controller = <&spdif>;
+		spdif-out;
+	};
+};
+
+&i2c3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_cubox_i_i2c3>;
+
+	status = "okay";
+
+	rtc: pcf8523@68 {
+		compatible = "nxp,pcf8523";
+		reg = <0x68>;
+	};
+};
+
+&iomuxc {
+	cubox_i {
+		pinctrl_cubox_i_i2c3: cubox-i-i2c3 {
+			fsl,pins = <
+				MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
+				MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+			>;
+		};
+
+		pinctrl_cubox_i_ir: cubox-i-ir {
+			fsl,pins = <
+				MX6QDL_PAD_EIM_DA9__GPIO3_IO09 0x80000000
+			>;
+		};
+
+		pinctrl_cubox_i_spdif: cubox-i-spdif {
+			fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
+		};
+
+		pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus {
+			fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>;
+		};
+
+		pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus {
+			fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x4001b0b0>;
+		};
+
+		pinctrl_cubox_i_usdhc2_aux: cubox-i-usdhc2-aux {
+			fsl,pins = <
+				MX6QDL_PAD_GPIO_4__GPIO1_IO04    0x1f071
+				MX6QDL_PAD_KEY_ROW1__SD2_VSELECT 0x1b071
+			>;
+		};
+
+		pinctrl_cubox_i_usdhc2: cubox-i-usdhc2 {
+			fsl,pins = <
+				MX6QDL_PAD_SD2_CMD__SD2_CMD    0x17059
+				MX6QDL_PAD_SD2_CLK__SD2_CLK    0x10059
+				MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+				MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+				MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+				MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
+			>;
+		};
+	};
+};
+
+&spdif {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_cubox_i_spdif>;
+	status = "okay";
+};
+
+&usbh1 {
+	vbus-supply = <&reg_usbh1_vbus>;
+	status = "okay";
+};
+
+&usbotg {
+	vbus-supply = <&reg_usbotg_vbus>;
+	status = "okay";
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
+	vmmc-supply = <&reg_3p3v>;
+	cd-gpios = <&gpio1 4 0>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi b/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
new file mode 100644
index 0000000..a3cb2ff
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2013,2014 Russell King
+ *
+ * This describes the hookup for an AR8035 to the iMX6 on the SolidRun
+ * MicroSOM.
+ */
+&fec {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
+	phy-mode = "rgmii";
+	phy-reset-duration = <2>;
+	phy-reset-gpios = <&gpio4 15 0>;
+	status = "okay";
+};
+
+&iomuxc {
+	enet {
+		pinctrl_microsom_enet_ar8035: microsom-enet-ar8035 {
+			fsl,pins = <
+				MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+				MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+				/* AR8035 reset */
+				MX6QDL_PAD_KEY_ROW4__GPIO4_IO15		0x130b0
+				/* AR8035 interrupt */
+				MX6QDL_PAD_DI0_PIN2__GPIO4_IO18		0x80000000
+				/* GPIO16 -> AR8035 25MHz */
+				MX6QDL_PAD_GPIO_16__ENET_REF_CLK	0xc0000000
+				MX6QDL_PAD_RGMII_TXC__RGMII_TXC		0x80000000
+				MX6QDL_PAD_RGMII_TD0__RGMII_TD0		0x1b0b0
+				MX6QDL_PAD_RGMII_TD1__RGMII_TD1		0x1b0b0
+				MX6QDL_PAD_RGMII_TD2__RGMII_TD2		0x1b0b0
+				MX6QDL_PAD_RGMII_TD3__RGMII_TD3		0x1b0b0
+				MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL	0x1b0b0
+				/* AR8035 CLK_25M --> ENET_REF_CLK (V22) */
+				MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK	0x0a0b1
+				/* AR8035 pin strapping: IO voltage: pull up */
+				MX6QDL_PAD_RGMII_RXC__RGMII_RXC		0x1b0b0
+				/* AR8035 pin strapping: PHYADDR#0: pull down */
+				MX6QDL_PAD_RGMII_RD0__RGMII_RD0		0x130b0
+				/* AR8035 pin strapping: PHYADDR#1: pull down */
+				MX6QDL_PAD_RGMII_RD1__RGMII_RD1		0x130b0
+				/* AR8035 pin strapping: MODE#1: pull up */
+				MX6QDL_PAD_RGMII_RD2__RGMII_RD2		0x1b0b0
+				/* AR8035 pin strapping: MODE#3: pull up */
+				MX6QDL_PAD_RGMII_RD3__RGMII_RD3		0x1b0b0
+				/* AR8035 pin strapping: MODE#0: pull down */
+				MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL	0x130b0
+
+				/*
+				 * As the RMII pins are also connected to RGMII
+				 * so that an AR8030 can be placed, set these
+				 * to high-z with the same pulls as above.
+				 * Use the GPIO settings to avoid changing the
+				 * input select registers.
+				 */
+				MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25	0x03000
+				MX6QDL_PAD_ENET_RXD0__GPIO1_IO27	0x03000
+				MX6QDL_PAD_ENET_RXD1__GPIO1_IO26	0x03000
+			>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/imx6qdl-microsom.dtsi b/arch/arm/boot/dts/imx6qdl-microsom.dtsi
new file mode 100644
index 0000000..d729d0b
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-microsom.dtsi
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013,2014 Russell King
+ */
+
+&iomuxc {
+	microsom {
+		pinctrl_microsom_uart1: microsom-uart1 {
+			fsl,pins = <
+				MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA	0x1b0b1
+				MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA	0x1b0b1
+			>;
+		};
+
+		pinctrl_microsom_usbotg: microsom-usbotg {
+			/*
+			 * Similar to pinctrl_usbotg_2, but we want it
+			 * pulled down for a fixed host connection.
+			 */
+			fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
+		};
+	};
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_microsom_uart1>;
+	status = "okay";
+};
+
+&usbotg {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_microsom_usbotg>;
+};
diff --git a/arch/arm/boot/dts/kizbox.dts b/arch/arm/boot/dts/kizbox.dts
index 02df191..928f6ee 100644
--- a/arch/arm/boot/dts/kizbox.dts
+++ b/arch/arm/boot/dts/kizbox.dts
@@ -53,6 +53,12 @@
 				status = "okay";
 			};
 
+			watchdog@fffffd40 {
+				timeout-sec = <15>;
+				atmel,max-heartbeat-sec = <16>;
+				atmel,min-heartbeat-sec = <0>;
+				status = "okay";
+			};
 		};
 
 		nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts
index 90749d5..10d088d 100644
--- a/arch/arm/boot/dts/moxart-uc7112lx.dts
+++ b/arch/arm/boot/dts/moxart-uc7112lx.dts
@@ -17,6 +17,14 @@
 		reg = <0x0 0x2000000>;
 	};
 
+	clocks {
+		ref12: ref12M {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <12000000>;
+		};
+	};
+
 	flash@80000000,0 {
 		compatible = "numonyx,js28f128", "cfi-flash";
 		reg = <0x80000000 0x1000000>;
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index da1d8ef..1fd27ed 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -26,12 +26,6 @@
 	clocks {
 		#address-cells = <1>;
 		#size-cells = <0>;
-
-		ref12: ref12M {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <12000000>;
-		};
 	};
 
 	soc {
diff --git a/arch/arm/boot/dts/qcom-msm8660-surf.dts b/arch/arm/boot/dts/qcom-msm8660-surf.dts
index 1187185..68a72f5 100644
--- a/arch/arm/boot/dts/qcom-msm8660-surf.dts
+++ b/arch/arm/boot/dts/qcom-msm8660-surf.dts
@@ -2,6 +2,8 @@
 
 /include/ "skeleton.dtsi"
 
+#include <dt-bindings/clock/qcom,gcc-msm8660.h>
+
 / {
 	model = "Qualcomm MSM8660 SURF";
 	compatible = "qcom,msm8660-surf", "qcom,msm8660";
@@ -37,11 +39,20 @@
 		#interrupt-cells = <2>;
 	};
 
+	gcc: clock-controller@900000 {
+		compatible = "qcom,gcc-msm8660";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+		reg = <0x900000 0x4000>;
+	};
+
 	serial@19c40000 {
 		compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
 		reg = <0x19c40000 0x1000>,
 		      <0x19c00000 0x1000>;
 		interrupts = <0 195 0x0>;
+		clocks = <&gcc GSBI12_UART_CLK>, <&gcc GSBI12_H_CLK>;
+		clock-names = "core", "iface";
 	};
 
 	qcom,ssbi@500000 {
diff --git a/arch/arm/boot/dts/qcom-msm8960-cdp.dts b/arch/arm/boot/dts/qcom-msm8960-cdp.dts
index 6ccbac7..7c30de4 100644
--- a/arch/arm/boot/dts/qcom-msm8960-cdp.dts
+++ b/arch/arm/boot/dts/qcom-msm8960-cdp.dts
@@ -2,6 +2,8 @@
 
 /include/ "skeleton.dtsi"
 
+#include <dt-bindings/clock/qcom,gcc-msm8960.h>
+
 / {
 	model = "Qualcomm MSM8960 CDP";
 	compatible = "qcom,msm8960-cdp", "qcom,msm8960";
@@ -37,11 +39,27 @@
 		reg = <0x800000 0x4000>;
 	};
 
+	gcc: clock-controller@900000 {
+		compatible = "qcom,gcc-msm8960";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+		reg = <0x900000 0x4000>;
+	};
+
+	clock-controller@4000000 {
+		compatible = "qcom,mmcc-msm8960";
+		reg = <0x4000000 0x1000>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
 	serial@16440000 {
 		compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
 		reg = <0x16440000 0x1000>,
 		      <0x16400000 0x1000>;
 		interrupts = <0 154 0x0>;
+		clocks = <&gcc GSBI5_UART_CLK>, <&gcc GSBI5_H_CLK>;
+		clock-names = "core", "iface";
 	};
 
 	qcom,ssbi@500000 {
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index 6ac9496..9e5dadb 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -2,6 +2,8 @@
 
 #include "skeleton.dtsi"
 
+#include <dt-bindings/clock/qcom,gcc-msm8974.h>
+
 / {
 	model = "Qualcomm MSM8974";
 	compatible = "qcom,msm8974";
@@ -93,5 +95,27 @@
 			compatible = "qcom,pshold";
 			reg = <0xfc4ab000 0x4>;
 		};
+
+		gcc: clock-controller@fc400000 {
+			compatible = "qcom,gcc-msm8974";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+			reg = <0xfc400000 0x4000>;
+		};
+
+		mmcc: clock-controller@fd8c0000 {
+			compatible = "qcom,mmcc-msm8974";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+			reg = <0xfd8c0000 0x6000>;
+		};
+
+		serial@f991e000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0xf991e000 0x1000>;
+			interrupts = <0 108 0x0>;
+			clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index f48487c..71b1251 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -197,7 +197,7 @@
 		reg = <0 0xe6508000 0 0x40>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 287 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&mstp3_clks R8A7790_CLK_I2C0>;
+		clocks = <&mstp9_clks R8A7790_CLK_I2C0>;
 		status = "disabled";
 	};
 
@@ -208,7 +208,7 @@
 		reg = <0 0xe6518000 0 0x40>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 288 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&mstp3_clks R8A7790_CLK_I2C1>;
+		clocks = <&mstp9_clks R8A7790_CLK_I2C1>;
 		status = "disabled";
 	};
 
@@ -219,7 +219,7 @@
 		reg = <0 0xe6530000 0 0x40>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 286 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&mstp3_clks R8A7790_CLK_I2C2>;
+		clocks = <&mstp9_clks R8A7790_CLK_I2C2>;
 		status = "disabled";
 	};
 
@@ -230,7 +230,7 @@
 		reg = <0 0xe6540000 0 0x40>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 290 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&mstp3_clks R8A7790_CLK_I2C3>;
+		clocks = <&mstp9_clks R8A7790_CLK_I2C3>;
 		status = "disabled";
 	};
 
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 1105558..3d5faf8 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -1092,6 +1092,11 @@
 			watchdog@fffffe40 {
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffe40 0x10>;
+				interrupts = <4 IRQ_TYPE_LEVEL_HIGH 7>;
+				atmel,watchdog-type = "hardware";
+				atmel,reset-type = "all";
+				atmel,dbg-halt;
+				atmel,idle-halt;
 				status = "disabled";
 			};
 
@@ -1223,7 +1228,7 @@
 			compatible = "atmel,at91rm9200-ohci", "usb-ohci";
 			reg = <0x00600000 0x100000>;
 			interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>;
-			clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
+			clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
 				 <&uhpck>;
 			clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
 			status = "disabled";
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index 0c1e8d8..6cb9b68 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -188,7 +188,6 @@
 		msp2: msp@80117000 {
 			pinctrl-names = "default";
 			pinctrl-0 = <&msp2_default_mode>;
-			status = "okay";
 		};
 
 		msp3: msp@80125000 {
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 040bb0e..10666ca 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -315,7 +315,7 @@
 		ranges;
 
 		emac: ethernet@01c0b000 {
-			compatible = "allwinner,sun4i-emac";
+			compatible = "allwinner,sun4i-a10-emac";
 			reg = <0x01c0b000 0x1000>;
 			interrupts = <55>;
 			clocks = <&ahb_gates 17>;
@@ -323,7 +323,7 @@
 		};
 
 		mdio@01c0b080 {
-			compatible = "allwinner,sun4i-mdio";
+			compatible = "allwinner,sun4i-a10-mdio";
 			reg = <0x01c0b080 0x14>;
 			status = "disabled";
 			#address-cells = <1>;
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index ea16054..6496159 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -278,7 +278,7 @@
 		ranges;
 
 		emac: ethernet@01c0b000 {
-			compatible = "allwinner,sun4i-emac";
+			compatible = "allwinner,sun4i-a10-emac";
 			reg = <0x01c0b000 0x1000>;
 			interrupts = <55>;
 			clocks = <&ahb_gates 17>;
@@ -286,7 +286,7 @@
 		};
 
 		mdio@01c0b080 {
-			compatible = "allwinner,sun4i-mdio";
+			compatible = "allwinner,sun4i-a10-mdio";
 			reg = <0x01c0b080 0x14>;
 			status = "disabled";
 			#address-cells = <1>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 119f066..9ff0948 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -340,7 +340,7 @@
 		ranges;
 
 		emac: ethernet@01c0b000 {
-			compatible = "allwinner,sun4i-emac";
+			compatible = "allwinner,sun4i-a10-emac";
 			reg = <0x01c0b000 0x1000>;
 			interrupts = <0 55 4>;
 			clocks = <&ahb_gates 17>;
@@ -348,7 +348,7 @@
 		};
 
 		mdio@01c0b080 {
-			compatible = "allwinner,sun4i-mdio";
+			compatible = "allwinner,sun4i-a10-mdio";
 			reg = <0x01c0b080 0x14>;
 			status = "disabled";
 			#address-cells = <1>;
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index 5d7681b..8b67b19 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -102,6 +102,26 @@
 			clock-names = "pclk", "hclk", "tx_clk";
 		};
 
+		sdhci0: ps7-sdhci@e0100000 {
+			compatible = "arasan,sdhci-8.9a";
+			status = "disabled";
+			clock-names = "clk_xin", "clk_ahb";
+			clocks = <&clkc 21>, <&clkc 32>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 24 4>;
+			reg = <0xe0100000 0x1000>;
+		} ;
+
+		sdhci1: ps7-sdhci@e0101000 {
+			compatible = "arasan,sdhci-8.9a";
+			status = "disabled";
+			clock-names = "clk_xin", "clk_ahb";
+			clocks = <&clkc 22>, <&clkc 33>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 47 4>;
+			reg = <0xe0101000 0x1000>;
+		} ;
+
 		slcr: slcr@f8000000 {
 			compatible = "xlnx,zynq-slcr";
 			reg = <0xF8000000 0x1000>;
diff --git a/arch/arm/boot/dts/zynq-zc702.dts b/arch/arm/boot/dts/zynq-zc702.dts
index 34d680a..c913f77 100644
--- a/arch/arm/boot/dts/zynq-zc702.dts
+++ b/arch/arm/boot/dts/zynq-zc702.dts
@@ -34,6 +34,10 @@
 	phy-mode = "rgmii";
 };
 
+&sdhci0 {
+	status = "okay";
+};
+
 &uart1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/zynq-zc706.dts b/arch/arm/boot/dts/zynq-zc706.dts
index b2835d5..88f62c5 100644
--- a/arch/arm/boot/dts/zynq-zc706.dts
+++ b/arch/arm/boot/dts/zynq-zc706.dts
@@ -35,6 +35,10 @@
 	phy-mode = "rgmii";
 };
 
+&sdhci0 {
+	status = "okay";
+};
+
 &uart1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/zynq-zed.dts b/arch/arm/boot/dts/zynq-zed.dts
index 2eda068..82d7ef1 100644
--- a/arch/arm/boot/dts/zynq-zed.dts
+++ b/arch/arm/boot/dts/zynq-zed.dts
@@ -35,6 +35,10 @@
 	phy-mode = "rgmii";
 };
 
+&sdhci0 {
+	status = "okay";
+};
+
 &uart1 {
 	status = "okay";
 };
diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig
index 2c38fdf..2519d6d 100644
--- a/arch/arm/configs/bcm_defconfig
+++ b/arch/arm/configs/bcm_defconfig
@@ -126,3 +126,6 @@
 CONFIG_XZ_DEC=y
 CONFIG_AVERAGE=y
 CONFIG_PINCTRL_CAPRI=y
+CONFIG_WATCHDOG=y
+CONFIG_BCM_KONA_WDT=y
+CONFIG_BCM_KONA_WDT_DEBUG=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index a018244..4582e16 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -142,6 +142,7 @@
 CONFIG_USB_DWC3_VERBOSE=y
 CONFIG_KEYSTONE_USB_PHY=y
 CONFIG_DMADEVICES=y
+CONFIG_TI_EDMA=y
 CONFIG_COMMON_CLK_DEBUG=y
 CONFIG_MEMORY=y
 CONFIG_EXT4_FS=y
diff --git a/arch/arm/configs/msm_defconfig b/arch/arm/configs/msm_defconfig
index 0219c65..c5858b9 100644
--- a/arch/arm/configs/msm_defconfig
+++ b/arch/arm/configs/msm_defconfig
@@ -114,6 +114,10 @@
 CONFIG_NEW_LEDS=y
 CONFIG_RTC_CLASS=y
 CONFIG_STAGING=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_MSM_GCC_8660=y
+CONFIG_MSM_MMCC_8960=y
+CONFIG_MSM_MMCC_8974=y
 CONFIG_MSM_IOMMU=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 687e4e8..ee69829 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -1,7 +1,12 @@
+CONFIG_SYSVIPC=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARCH_MVEBU=y
 CONFIG_MACH_ARMADA_370=y
 CONFIG_MACH_ARMADA_XP=y
@@ -24,6 +29,7 @@
 CONFIG_ARCH_OMAP4=y
 CONFIG_SOC_OMAP5=y
 CONFIG_SOC_AM33XX=y
+CONFIG_SOC_DRA7XX=y
 CONFIG_SOC_AM43XX=y
 CONFIG_ARCH_ROCKCHIP=y
 CONFIG_ARCH_SOCFPGA=y
@@ -38,7 +44,7 @@
 CONFIG_ARCH_TEGRA_2x_SOC=y
 CONFIG_ARCH_TEGRA_3x_SOC=y
 CONFIG_ARCH_TEGRA_114_SOC=y
-CONFIG_TEGRA_PCI=y
+CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_TEGRA_EMC_SCALING_ENABLE=y
 CONFIG_ARCH_U8500=y
 CONFIG_MACH_HREFV60=y
@@ -49,19 +55,55 @@
 CONFIG_ARCH_VIRT=y
 CONFIG_ARCH_WM8850=y
 CONFIG_ARCH_ZYNQ=y
+CONFIG_TRUSTED_FOUNDATIONS=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_MVEBU=y
+CONFIG_PCI_TEGRA=y
 CONFIG_SMP=y
 CONFIG_HIGHPTE=y
+CONFIG_CMA=y
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_KEXEC=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_IDLE=y
 CONFIG_NET=y
+CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_GPIO=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
 CONFIG_OMAP_OCP2SCP=y
+CONFIG_MTD=y
+CONFIG_MTD_M25P80=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_ICS932S401=y
+CONFIG_APDS9802ALS=y
+CONFIG_ISL29003=y
 CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_MULTI_LUN=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI_PLATFORM=y
 CONFIG_SATA_HIGHBANK=y
@@ -69,13 +111,30 @@
 CONFIG_NETDEVICES=y
 CONFIG_SUN4I_EMAC=y
 CONFIG_NET_CALXEDA_XGMAC=y
+CONFIG_MVNETA=y
 CONFIG_KS8851=y
+CONFIG_R8169=y
 CONFIG_SMSC911X=y
 CONFIG_STMMAC_ETH=y
-CONFIG_ICPLUS_PHY=y
-CONFIG_MDIO_SUN4I=y
 CONFIG_TI_CPSW=y
+CONFIG_AT803X_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_ICPLUS_PHY=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_BRCMFMAC=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_TEGRA=y
 CONFIG_KEYBOARD_SPEAR=y
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_MPU3050=y
 CONFIG_SERIO_AMBAKMI=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
@@ -98,30 +157,81 @@
 CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
 CONFIG_SERIAL_ST_ASC=y
 CONFIG_SERIAL_ST_ASC_CONSOLE=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PINCTRL=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_MV64XXX=y
 CONFIG_I2C_SIRF=y
 CONFIG_I2C_TEGRA=y
 CONFIG_SPI=y
 CONFIG_SPI_OMAP24XX=y
+CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_SIRF=y
 CONFIG_SPI_TEGRA114=y
+CONFIG_SPI_TEGRA20_SFLASH=y
 CONFIG_SPI_TEGRA20_SLINK=y
-CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_AS3722=y
+CONFIG_PINCTRL_PALMAS=y
+CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_GENERIC_PLATFORM=y
+CONFIG_GPIO_PCA953X_IRQ=y
 CONFIG_GPIO_TWL4030=y
-CONFIG_REGULATOR_GPIO=y
+CONFIG_GPIO_PALMAS=y
+CONFIG_GPIO_TPS6586X=y
+CONFIG_GPIO_TPS65910=y
+CONFIG_BATTERY_SBS=y
+CONFIG_CHARGER_TPS65090=y
+CONFIG_POWER_RESET_AS3722=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_SENSORS_LM90=y
+CONFIG_THERMAL=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_MFD_AS3722=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_MAX8907=y
+CONFIG_MFD_PALMAS=y
+CONFIG_MFD_TPS65090=y
+CONFIG_MFD_TPS6586X=y
+CONFIG_MFD_TPS65910=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
 CONFIG_REGULATOR_AB8500=y
+CONFIG_REGULATOR_AS3722=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_MAX8907=y
+CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_TPS51632=y
 CONFIG_REGULATOR_TPS62360=y
+CONFIG_REGULATOR_TPS65090=y
+CONFIG_REGULATOR_TPS6586X=y
+CONFIG_REGULATOR_TPS65910=y
 CONFIG_REGULATOR_TWL4030=y
 CONFIG_REGULATOR_VEXPRESS=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
 CONFIG_DRM=y
-CONFIG_TEGRA_HOST1X=y
 CONFIG_DRM_TEGRA=y
+CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_FB_WM8505=y
 CONFIG_FB_SIMPLE=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TEGRA=y
+CONFIG_SND_SOC_TEGRA_RT5640=y
+CONFIG_SND_SOC_TEGRA_WM8753=y
+CONFIG_SND_SOC_TEGRA_WM8903=y
+CONFIG_SND_SOC_TEGRA_TRIMSLICE=y
+CONFIG_SND_SOC_TEGRA_ALC5632=y
+CONFIG_SND_SOC_TEGRA_MAX98090=y
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_EHCI_HCD=y
@@ -132,8 +242,6 @@
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_HOST=y
 CONFIG_AB8500_USB=y
-CONFIG_NOP_USB_XCEIV=y
-CONFIG_OMAP_USB2=y
 CONFIG_OMAP_USB3=y
 CONFIG_SAMSUNG_USB2PHY=y
 CONFIG_SAMSUNG_USB3PHY=y
@@ -144,24 +252,32 @@
 CONFIG_MMC_BLOCK_MINORS=16
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_ESDHC_IMX=y
 CONFIG_MMC_SDHCI_TEGRA=y
 CONFIG_MMC_SDHCI_SPEAR=y
 CONFIG_MMC_SDHCI_BCM_KONA=y
 CONFIG_MMC_OMAP=y
 CONFIG_MMC_OMAP_HS=y
+CONFIG_MMC_MVSDIO=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
 CONFIG_EDAC_HIGHBANK_MC=y
 CONFIG_EDAC_HIGHBANK_L2=y
 CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AS3722=y
+CONFIG_RTC_DRV_MAX8907=y
+CONFIG_RTC_DRV_PALMAS=y
 CONFIG_RTC_DRV_TWL4030=y
+CONFIG_RTC_DRV_TPS6586X=y
+CONFIG_RTC_DRV_TPS65910=y
+CONFIG_RTC_DRV_EM3027=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_VT8500=y
+CONFIG_RTC_DRV_MV=y
 CONFIG_RTC_DRV_TEGRA=y
 CONFIG_DMADEVICES=y
 CONFIG_DW_DMAC=y
+CONFIG_MV_XOR=y
 CONFIG_TEGRA20_APB_DMA=y
 CONFIG_STE_DMA40=y
 CONFIG_SIRF_DMA=y
@@ -171,15 +287,34 @@
 CONFIG_IMX_DMA=y
 CONFIG_MXS_DMA=y
 CONFIG_DMA_OMAP=y
+CONFIG_STAGING=y
+CONFIG_SENSORS_ISL29018=y
+CONFIG_SENSORS_ISL29028=y
+CONFIG_MFD_NVEC=y
+CONFIG_KEYBOARD_NVEC=y
+CONFIG_SERIO_NVEC_PS2=y
+CONFIG_NVEC_POWER=y
+CONFIG_TEGRA_IOMMU_GART=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_MEMORY=y
+CONFIG_IIO=y
+CONFIG_AK8975=y
 CONFIG_PWM=y
+CONFIG_PWM_TEGRA=y
 CONFIG_PWM_VT8500=y
+CONFIG_OMAP_USB2=y
 CONFIG_EXT4_FS=y
+CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
 CONFIG_LOCKUP_DETECTOR=y
+CONFIG_CRYPTO_DEV_TEGRA_AES=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 5fdc9a0..00fe9e9 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -29,11 +29,11 @@
 CONFIG_ARCH_TEGRA_114_SOC=y
 CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_TEGRA_EMC_SCALING_ENABLE=y
+CONFIG_TRUSTED_FOUNDATIONS=y
 CONFIG_PCI=y
 CONFIG_PCI_MSI=y
 CONFIG_PCI_TEGRA=y
 CONFIG_PCIEPORTBUS=y
-CONFIG_TRUSTED_FOUNDATIONS=y
 CONFIG_SMP=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
@@ -125,7 +125,6 @@
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_I2C_COMPAT is not set
-CONFIG_I2C_MUX=y
 CONFIG_I2C_MUX_PINCTRL=y
 CONFIG_I2C_TEGRA=y
 CONFIG_SPI=y
@@ -169,9 +168,8 @@
 CONFIG_MEDIA_USB_SUPPORT=y
 CONFIG_USB_VIDEO_CLASS=m
 CONFIG_DRM=y
-CONFIG_DRM_PANEL=y
-CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_TEGRA=y
+CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 # CONFIG_LCD_CLASS_DEVICE is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -206,10 +204,7 @@
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_TEGRA=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_LEDS_TRIGGER_ONESHOT=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
@@ -235,7 +230,6 @@
 CONFIG_SERIO_NVEC_PS2=y
 CONFIG_NVEC_POWER=y
 CONFIG_NVEC_PAZ00=y
-CONFIG_COMMON_CLK_DEBUG=y
 CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
 CONFIG_MEMORY=y
@@ -265,6 +259,7 @@
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_SLAB=y
 CONFIG_DEBUG_VM=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e9a49fe..8b8b616 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -212,6 +212,7 @@
 static inline void __flush_icache_all(void)
 {
 	__flush_icache_preferred();
+	dsb();
 }
 
 /*
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index fbeb39c..8aa4cca 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -38,6 +38,12 @@
 #define isa_bus_to_virt phys_to_virt
 
 /*
+ * Atomic MMIO-wide IO modify
+ */
+extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set);
+extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
+
+/*
  * Generic IO read/write.  These perform native-endian accesses.  Note
  * that some architectures will want to re-define __raw_{read,write}w.
  */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 03243f7..85c60ad 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -120,13 +120,16 @@
 /*
  * 2nd stage PTE definitions for LPAE.
  */
-#define L_PTE_S2_MT_UNCACHED	 (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITEBACK	 (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_RDONLY		 (_AT(pteval_t, 1) << 6)   /* HAP[1]   */
-#define L_PTE_S2_RDWR		 (_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
+#define L_PTE_S2_MT_UNCACHED		(_AT(pteval_t, 0x0) << 2) /* strongly ordered */
+#define L_PTE_S2_MT_WRITETHROUGH	(_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
+#define L_PTE_S2_MT_WRITEBACK		(_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
+#define L_PTE_S2_MT_DEV_SHARED		(_AT(pteval_t, 0x1) << 2) /* device */
+#define L_PTE_S2_MT_MASK		(_AT(pteval_t, 0xf) << 2)
 
-#define L_PMD_S2_RDWR		 (_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */
+#define L_PTE_S2_RDONLY			(_AT(pteval_t, 1) << 6)   /* HAP[1]   */
+#define L_PTE_S2_RDWR			(_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
+
+#define L_PMD_S2_RDWR			(_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */
 
 /*
  * Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ef3c607..ac4bfae 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -37,18 +37,9 @@
 
 static inline void dsb_sev(void)
 {
-#if __LINUX_ARM_ARCH__ >= 7
-	__asm__ __volatile__ (
-		"dsb ishst\n"
-		SEV
-	);
-#else
-	__asm__ __volatile__ (
-		"mcr p15, 0, %0, c7, c10, 4\n"
-		SEV
-		: : "r" (0)
-	);
-#endif
+
+	dsb(ishst);
+	__asm__(SEV);
 }
 
 /*
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index b3fb8c9..1879e8d 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -451,9 +451,11 @@
 	.arch	armv6t2
 #endif
 2:	ldrht	r5, [r4]
+ARM_BE8(rev16	r5, r5)				@ little endian instruction
 	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
 	blo	__und_usr_fault_16		@ 16bit undefined instruction
 3:	ldrht	r0, [r2]
+ARM_BE8(rev16	r0, r0)				@ little endian instruction
 	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
 	orr	r0, r0, r5, lsl #16
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 32f317e..914616e 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -52,7 +52,8 @@
 	.equ	swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
 
 	.macro	pgtbl, rd, phys
-	add	\rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
+	add	\rd, \phys, #TEXT_OFFSET
+	sub	\rd, \rd, #PG_DIR_SIZE
 	.endm
 
 /*
diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c
index dcd5b4d..9203cf8 100644
--- a/arch/arm/kernel/io.c
+++ b/arch/arm/kernel/io.c
@@ -1,6 +1,41 @@
 #include <linux/export.h>
 #include <linux/types.h>
 #include <linux/io.h>
+#include <linux/spinlock.h>
+
+static DEFINE_RAW_SPINLOCK(__io_lock);
+
+/*
+ * Generic atomic MMIO modify.
+ *
+ * Allows thread-safe access to registers shared by unrelated subsystems.
+ * The access is protected by a single MMIO-wide lock.
+ */
+void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set)
+{
+	unsigned long flags;
+	u32 value;
+
+	raw_spin_lock_irqsave(&__io_lock, flags);
+	value = readl_relaxed(reg) & ~mask;
+	value |= (set & mask);
+	writel_relaxed(value, reg);
+	raw_spin_unlock_irqrestore(&__io_lock, flags);
+}
+EXPORT_SYMBOL(atomic_io_modify_relaxed);
+
+void atomic_io_modify(void __iomem *reg, u32 mask, u32 set)
+{
+	unsigned long flags;
+	u32 value;
+
+	raw_spin_lock_irqsave(&__io_lock, flags);
+	value = readl_relaxed(reg) & ~mask;
+	value |= (set & mask);
+	writel(value, reg);
+	raw_spin_unlock_irqrestore(&__io_lock, flags);
+}
+EXPORT_SYMBOL(atomic_io_modify);
 
 /*
  * Copy data from IO memory space to "real" memory space.
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b0df976..1e8b030 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -731,7 +731,7 @@
 	kernel_data.end     = virt_to_phys(_end - 1);
 
 	for_each_memblock(memory, region) {
-		res = memblock_virt_alloc_low(sizeof(*res), 0);
+		res = memblock_virt_alloc(sizeof(*res), 0);
 		res->name  = "System RAM";
 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
diff --git a/arch/arm/mach-hisi/Kconfig b/arch/arm/mach-hisi/Kconfig
index 018ad67..1abae5f 100644
--- a/arch/arm/mach-hisi/Kconfig
+++ b/arch/arm/mach-hisi/Kconfig
@@ -8,10 +8,9 @@
 	select CLKSRC_OF
 	select GENERIC_CLOCKEVENTS
 	select HAVE_ARM_SCU
-	select HAVE_ARM_TWD
+	select HAVE_ARM_TWD if SMP
 	select HAVE_SMP
 	select PINCTRL
 	select PINCTRL_SINGLE
-	select SMP
 	help
 	  Support for Hisilicon Hi36xx/Hi37xx processor family
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index af2e582..4d677f4 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -482,6 +482,9 @@
 	if (IS_ENABLED(CONFIG_PCI_IMX6))
 		clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
 
+	/* Set initial power mode */
+	imx6q_set_lpm(WAIT_CLOCKED);
+
 	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
 	base = of_iomap(np, 0);
 	WARN_ON(!base);
diff --git a/arch/arm/mach-imx/clk-imx6sl.c b/arch/arm/mach-imx/clk-imx6sl.c
index 3781a18..4c86f30 100644
--- a/arch/arm/mach-imx/clk-imx6sl.c
+++ b/arch/arm/mach-imx/clk-imx6sl.c
@@ -266,6 +266,9 @@
 	/* Audio-related clocks configuration */
 	clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]);
 
+	/* Set initial power mode */
+	imx6q_set_lpm(WAIT_CLOCKED);
+
 	np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt");
 	base = of_iomap(np, 0);
 	WARN_ON(!base);
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index d2ea6e6..76e5db4 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -133,6 +133,39 @@
 
 #define PHY_ID_AR8031	0x004dd074
 
+static int ar8035_phy_fixup(struct phy_device *dev)
+{
+	u16 val;
+
+	/* Ar803x phy SmartEEE feature cause link status generates glitch,
+	 * which cause ethernet link down/up issue, so disable SmartEEE
+	 */
+	phy_write(dev, 0xd, 0x3);
+	phy_write(dev, 0xe, 0x805d);
+	phy_write(dev, 0xd, 0x4003);
+
+	val = phy_read(dev, 0xe);
+	phy_write(dev, 0xe, val & ~(1 << 8));
+
+	/*
+	 * Enable 125MHz clock from CLK_25M on the AR8031.  This
+	 * is fed in to the IMX6 on the ENET_REF_CLK (V22) pad.
+	 * Also, introduce a tx clock delay.
+	 *
+	 * This is the same as is the AR8031 fixup.
+	 */
+	ar8031_phy_fixup(dev);
+
+	/*check phy power*/
+	val = phy_read(dev, 0x0);
+	if (val & BMCR_PDOWN)
+		phy_write(dev, 0x0, val & ~BMCR_PDOWN);
+
+	return 0;
+}
+
+#define PHY_ID_AR8035 0x004dd072
+
 static void __init imx6q_enet_phy_init(void)
 {
 	if (IS_BUILTIN(CONFIG_PHYLIB)) {
@@ -142,6 +175,8 @@
 				ksz9031rn_phy_fixup);
 		phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff,
 				ar8031_phy_fixup);
+		phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef,
+				ar8035_phy_fixup);
 	}
 }
 
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c
index 9d47adc..7a9b985 100644
--- a/arch/arm/mach-imx/pm-imx6q.c
+++ b/arch/arm/mach-imx/pm-imx6q.c
@@ -236,8 +236,6 @@
 		regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT,
 				   IMX6Q_GPR1_GINT);
 
-	/* Set initial power mode */
-	imx6q_set_lpm(WAIT_CLOCKED);
 
 	suspend_set_ops(&imx6q_pm_ops);
 }
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 5e84149..a3ef961 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -64,6 +64,7 @@
 
 /*
  * Logical      Physical
+ * f1000000	10000000	Core module registers
  * f1300000	13000000	Counter/Timer
  * f1400000	14000000	Interrupt controller
  * f1600000	16000000	UART 0
@@ -75,6 +76,11 @@
 
 static struct map_desc intcp_io_desc[] __initdata __maybe_unused = {
 	{
+		.virtual	= IO_ADDRESS(INTEGRATOR_HDR_BASE),
+		.pfn		= __phys_to_pfn(INTEGRATOR_HDR_BASE),
+		.length		= SZ_4K,
+		.type		= MT_DEVICE
+	}, {
 		.virtual	= IO_ADDRESS(INTEGRATOR_CT_BASE),
 		.pfn		= __phys_to_pfn(INTEGRATOR_CT_BASE),
 		.length		= SZ_4K,
diff --git a/arch/arm/mach-iop32x/em7210.c b/arch/arm/mach-iop32x/em7210.c
index 177cd07..77e1ff0 100644
--- a/arch/arm/mach-iop32x/em7210.c
+++ b/arch/arm/mach-iop32x/em7210.c
@@ -23,6 +23,7 @@
 #include <linux/mtd/physmap.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
+#include <linux/gpio.h>
 #include <mach/hardware.h>
 #include <linux/io.h>
 #include <linux/irq.h>
@@ -176,12 +177,36 @@
 	.resource	= &em7210_uart_resource,
 };
 
+#define EM7210_HARDWARE_POWER 0
+
 void em7210_power_off(void)
 {
-	*IOP3XX_GPOE &= 0xfe;
-	*IOP3XX_GPOD |= 0x01;
+	int ret;
+
+	ret = gpio_direction_output(EM7210_HARDWARE_POWER, 1);
+	if (ret)
+		pr_crit("could not drive power off GPIO high\n");
 }
 
+static int __init em7210_request_gpios(void)
+{
+	int ret;
+
+	if (!machine_is_em7210())
+		return 0;
+
+	ret = gpio_request(EM7210_HARDWARE_POWER, "power");
+	if (ret) {
+		pr_err("could not request power off GPIO\n");
+		return 0;
+	}
+
+	pm_power_off = em7210_power_off;
+
+	return 0;
+}
+device_initcall(em7210_request_gpios);
+
 static void __init em7210_init_machine(void)
 {
 	register_iop32x_gpio();
@@ -194,9 +219,6 @@
 
 	i2c_register_board_info(0, em7210_i2c_devices,
 		ARRAY_SIZE(em7210_i2c_devices));
-
-
-	pm_power_off = em7210_power_off;
 }
 
 MACHINE_START(EM7210, "Lanner EM7210")
diff --git a/arch/arm/mach-keystone/Kconfig b/arch/arm/mach-keystone/Kconfig
index dabc5ee..90a708f 100644
--- a/arch/arm/mach-keystone/Kconfig
+++ b/arch/arm/mach-keystone/Kconfig
@@ -10,7 +10,6 @@
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARM_ERRATA_798181 if SMP
 	select COMMON_CLK_KEYSTONE
-	select TI_EDMA
 	select ARCH_SUPPORTS_BIG_ENDIAN
 	select ZONE_DMA if ARM_LPAE
 	help
diff --git a/arch/arm/mach-kirkwood/pm.c b/arch/arm/mach-kirkwood/pm.c
index 8783a71..c6ab8d9 100644
--- a/arch/arm/mach-kirkwood/pm.c
+++ b/arch/arm/mach-kirkwood/pm.c
@@ -18,6 +18,7 @@
 #include <linux/suspend.h>
 #include <linux/io.h>
 #include <mach/bridge-regs.h>
+#include "common.h"
 
 static void __iomem *ddr_operation_base;
 
@@ -65,9 +66,8 @@
 	.valid = kirkwood_pm_valid_standby,
 };
 
-int __init kirkwood_pm_init(void)
+void __init kirkwood_pm_init(void)
 {
 	ddr_operation_base = ioremap(DDR_OPERATION_BASE, 4);
 	suspend_set_ops(&kirkwood_suspend_ops);
-	return 0;
 }
diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig
index ba470d6..3795ae2 100644
--- a/arch/arm/mach-moxart/Kconfig
+++ b/arch/arm/mach-moxart/Kconfig
@@ -2,7 +2,6 @@
 	bool "MOXA ART SoC" if ARCH_MULTI_V4T
 	select CPU_FA526
 	select ARM_DMA_MEM_BUFFERABLE
-	select DMA_OF
 	select USE_OF
 	select CLKSRC_OF
 	select CLKSRC_MMIO
diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.c b/arch/arm/mach-mvebu/mvebu-soc-id.c
index fe4fc1c..f3b325f 100644
--- a/arch/arm/mach-mvebu/mvebu-soc-id.c
+++ b/arch/arm/mach-mvebu/mvebu-soc-id.c
@@ -88,7 +88,7 @@
 	}
 
 	pci_base = of_iomap(child, 0);
-	if (IS_ERR(pci_base)) {
+	if (pci_base == NULL) {
 		pr_err("cannot map registers\n");
 		ret = -ENOMEM;
 		goto res_ioremap;
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 653b489..e2ce4f8 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -54,7 +54,7 @@
 	select ARM_GIC
 	select CPU_V7
 	select HAVE_ARM_SCU if SMP
-	select HAVE_ARM_TWD if LOCAL_TIMERS
+	select HAVE_ARM_TWD if SMP
 	select HAVE_SMP
 	select HAVE_ARM_ARCH_TIMER
 	select ARM_ERRATA_798181 if SMP
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index f093af17..8760bbe 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -760,7 +760,14 @@
 	},
 };
 
+static const char * const si4713_supply_names[] = {
+	"vio",
+	"vdd",
+};
+
 static struct si4713_platform_data rx51_si4713_i2c_data __initdata_or_module = {
+	.supplies	= ARRAY_SIZE(si4713_supply_names),
+	.supply_names	= si4713_supply_names,
 	.gpio_reset	= RX51_FMTX_RESET_GPIO,
 };
 
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index e515278..a6aae30 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -62,11 +62,17 @@
 
 #if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
 int omap4_pm_init(void);
+int omap4_pm_init_early(void);
 #else
 static inline int omap4_pm_init(void)
 {
 	return 0;
 }
+
+static inline int omap4_pm_init_early(void)
+{
+	return 0;
+}
 #endif
 
 #ifdef CONFIG_OMAP_MUX
@@ -236,6 +242,7 @@
 
 extern void __init gic_init_irq(void);
 extern void gic_dist_disable(void);
+extern void gic_dist_enable(void);
 extern bool gic_dist_disabled(void);
 extern void gic_timer_retrigger(void);
 extern void omap_smc1(u32 fn, u32 arg);
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 4c8982a..4c158c8 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -80,6 +80,7 @@
 			int index)
 {
 	struct idle_statedata *cx = state_ptr + index;
+	u32 mpuss_can_lose_context = 0;
 
 	/*
 	 * CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -104,6 +105,9 @@
 		}
 	}
 
+	mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
+				 (cx->mpu_logic_state == PWRDM_POWER_OFF);
+
 	/*
 	 * Call idle CPU PM enter notifier chain so that
 	 * VFP and per CPU interrupt context is saved.
@@ -118,9 +122,8 @@
 		 * Call idle CPU cluster PM enter notifier chain
 		 * to save GIC and wakeupgen context.
 		 */
-		if ((cx->mpu_state == PWRDM_POWER_RET) &&
-			(cx->mpu_logic_state == PWRDM_POWER_OFF))
-				cpu_cluster_pm_enter();
+		if (mpuss_can_lose_context)
+			cpu_cluster_pm_enter();
 	}
 
 	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
@@ -128,9 +131,23 @@
 
 	/* Wakeup CPU1 only if it is not offlined */
 	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
+
+		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
+		    mpuss_can_lose_context)
+			gic_dist_disable();
+
 		clkdm_wakeup(cpu_clkdm[1]);
 		omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
 		clkdm_allow_idle(cpu_clkdm[1]);
+
+		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
+		    mpuss_can_lose_context) {
+			while (gic_dist_disabled()) {
+				udelay(1);
+				cpu_relax();
+			}
+			gic_timer_retrigger();
+		}
 	}
 
 	/*
@@ -143,8 +160,7 @@
 	 * Call idle CPU cluster PM exit notifier chain
 	 * to restore GIC and wakeupgen context.
 	 */
-	if (dev->cpu == 0 && (cx->mpu_state == PWRDM_POWER_RET) &&
-		(cx->mpu_logic_state == PWRDM_POWER_OFF))
+	if (dev->cpu == 0 && mpuss_can_lose_context)
 		cpu_cluster_pm_exit();
 
 fail:
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 47381fd..d408b15 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -641,6 +641,7 @@
 	omap_cm_base_init();
 	omap4xxx_check_revision();
 	omap4xxx_check_features();
+	omap4_pm_init_early();
 	omap44xx_prm_init();
 	omap44xx_voltagedomains_init();
 	omap44xx_powerdomains_init();
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index f991016..667915d 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -271,6 +271,9 @@
 	else
 		omap_pm_ops.finish_suspend(save_state);
 
+	if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu)
+		gic_dist_enable();
+
 	/*
 	 * Restore the CPUx power state to ON otherwise CPUx
 	 * power domain can transitions to programmed low power
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 75e95d4..17550aa 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -39,8 +39,6 @@
 
 #define OMAP5_CORE_COUNT	0x2
 
-u16 pm44xx_errata;
-
 /* SCU base address */
 static void __iomem *scu_base;
 
@@ -217,10 +215,8 @@
 	if (scu_base)
 		scu_enable(scu_base);
 
-	if (cpu_is_omap446x()) {
+	if (cpu_is_omap446x())
 		startup_addr = omap4460_secondary_startup;
-		pm44xx_errata |= PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD;
-	}
 
 	/*
 	 * Write the address of secondary startup routine into the
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index dd893ec..6cd3f37 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -127,6 +127,12 @@
 		__raw_writel(0x0, gic_dist_base_addr + GIC_DIST_CTRL);
 }
 
+void gic_dist_enable(void)
+{
+	if (gic_dist_base_addr)
+		__raw_writel(0x1, gic_dist_base_addr + GIC_DIST_CTRL);
+}
+
 bool gic_dist_disabled(void)
 {
 	return !(__raw_readl(gic_dist_base_addr + GIC_DIST_CTRL) & 0x1);
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 82f06989..eefb30c 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -24,6 +24,8 @@
 #include "powerdomain.h"
 #include "pm.h"
 
+u16 pm44xx_errata;
+
 struct power_state {
 	struct powerdomain *pwrdm;
 	u32 next_state;
@@ -199,6 +201,19 @@
 }
 
 /**
+ * omap4_pm_init_early - Does early initialization necessary for OMAP4+ devices
+ *
+ * Initializes basic stuff for power management functionality.
+ */
+int __init omap4_pm_init_early(void)
+{
+	if (cpu_is_omap446x())
+		pm44xx_errata |= PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD;
+
+	return 0;
+}
+
+/**
  * omap4_pm_init - Init routine for OMAP4+ devices
  *
  * Initializes all powerdomain and clockdomain target states
diff --git a/arch/arm/mach-pxa/am300epd.c b/arch/arm/mach-pxa/am300epd.c
index c9f309a..8b90c4f 100644
--- a/arch/arm/mach-pxa/am300epd.c
+++ b/arch/arm/mach-pxa/am300epd.c
@@ -30,6 +30,7 @@
 
 #include <mach/gumstix.h>
 #include <mach/mfp-pxa25x.h>
+#include <mach/irqs.h>
 #include <linux/platform_data/video-pxafb.h>
 
 #include "generic.h"
diff --git a/arch/arm/mach-pxa/include/mach/balloon3.h b/arch/arm/mach-pxa/include/mach/balloon3.h
index 954641e..1b08259 100644
--- a/arch/arm/mach-pxa/include/mach/balloon3.h
+++ b/arch/arm/mach-pxa/include/mach/balloon3.h
@@ -14,6 +14,8 @@
 #ifndef ASM_ARCH_BALLOON3_H
 #define ASM_ARCH_BALLOON3_H
 
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
+
 enum balloon3_features {
 	BALLOON3_FEATURE_OHCI,
 	BALLOON3_FEATURE_MMC,
diff --git a/arch/arm/mach-pxa/include/mach/corgi.h b/arch/arm/mach-pxa/include/mach/corgi.h
index f3c3493..c030d95 100644
--- a/arch/arm/mach-pxa/include/mach/corgi.h
+++ b/arch/arm/mach-pxa/include/mach/corgi.h
@@ -13,6 +13,7 @@
 #ifndef __ASM_ARCH_CORGI_H
 #define __ASM_ARCH_CORGI_H  1
 
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
 
 /*
  * Corgi (Non Standard) GPIO Definitions
diff --git a/arch/arm/mach-pxa/include/mach/csb726.h b/arch/arm/mach-pxa/include/mach/csb726.h
index 2628e7b..00cfbbb 100644
--- a/arch/arm/mach-pxa/include/mach/csb726.h
+++ b/arch/arm/mach-pxa/include/mach/csb726.h
@@ -11,6 +11,8 @@
 #ifndef CSB726_H
 #define CSB726_H
 
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
 #define CSB726_GPIO_IRQ_LAN	52
 #define CSB726_GPIO_IRQ_SM501	53
 #define CSB726_GPIO_MMC_DETECT	100
diff --git a/arch/arm/mach-pxa/include/mach/gumstix.h b/arch/arm/mach-pxa/include/mach/gumstix.h
index dba14b6..f7df27b 100644
--- a/arch/arm/mach-pxa/include/mach/gumstix.h
+++ b/arch/arm/mach-pxa/include/mach/gumstix.h
@@ -6,6 +6,7 @@
  * published by the Free Software Foundation.
  */
 
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
 
 /* BTRESET - Reset line to Bluetooth module, active low signal. */
 #define GPIO_GUMSTIX_BTRESET          7
diff --git a/arch/arm/mach-pxa/include/mach/idp.h b/arch/arm/mach-pxa/include/mach/idp.h
index 22a96f8..7e63f46 100644
--- a/arch/arm/mach-pxa/include/mach/idp.h
+++ b/arch/arm/mach-pxa/include/mach/idp.h
@@ -23,6 +23,7 @@
  * IDP hardware.
  */
 
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
 
 #define IDP_FLASH_PHYS		(PXA_CS0_PHYS)
 #define IDP_ALT_FLASH_PHYS	(PXA_CS1_PHYS)
diff --git a/arch/arm/mach-pxa/include/mach/palmld.h b/arch/arm/mach-pxa/include/mach/palmld.h
index 2c447133..b184f29 100644
--- a/arch/arm/mach-pxa/include/mach/palmld.h
+++ b/arch/arm/mach-pxa/include/mach/palmld.h
@@ -13,6 +13,8 @@
 #ifndef _INCLUDE_PALMLD_H_
 #define _INCLUDE_PALMLD_H_
 
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
 /** HERE ARE GPIOs **/
 
 /* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmt5.h b/arch/arm/mach-pxa/include/mach/palmt5.h
index 0bd4f03..e342c59 100644
--- a/arch/arm/mach-pxa/include/mach/palmt5.h
+++ b/arch/arm/mach-pxa/include/mach/palmt5.h
@@ -15,6 +15,8 @@
 #ifndef _INCLUDE_PALMT5_H_
 #define _INCLUDE_PALMT5_H_
 
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
 /** HERE ARE GPIOs **/
 
 /* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/include/mach/palmtc.h
index c383a21..81c727b 100644
--- a/arch/arm/mach-pxa/include/mach/palmtc.h
+++ b/arch/arm/mach-pxa/include/mach/palmtc.h
@@ -16,6 +16,8 @@
 #ifndef _INCLUDE_PALMTC_H_
 #define _INCLUDE_PALMTC_H_
 
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
 /** HERE ARE GPIOs **/
 
 /* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/include/mach/palmtx.h
index f2e5303..92bc1f0 100644
--- a/arch/arm/mach-pxa/include/mach/palmtx.h
+++ b/arch/arm/mach-pxa/include/mach/palmtx.h
@@ -16,6 +16,8 @@
 #ifndef _INCLUDE_PALMTX_H_
 #define _INCLUDE_PALMTX_H_
 
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
 /** HERE ARE GPIOs **/
 
 /* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/pcm027.h b/arch/arm/mach-pxa/include/mach/pcm027.h
index 6bf28de..86ebd7b 100644
--- a/arch/arm/mach-pxa/include/mach/pcm027.h
+++ b/arch/arm/mach-pxa/include/mach/pcm027.h
@@ -23,6 +23,8 @@
  * Definitions of CPU card resources only
  */
 
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
 /* phyCORE-PXA270 (PCM027) Interrupts */
 #define PCM027_IRQ(x)          (IRQ_BOARD_START + (x))
 #define PCM027_BTDET_IRQ       PCM027_IRQ(0)
diff --git a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
index 0260aaa..7e544c1 100644
--- a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
+++ b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
@@ -20,6 +20,7 @@
  */
 
 #include <mach/pcm027.h>
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
 
 /*
  * definitions relevant only when the PCM-990
diff --git a/arch/arm/mach-pxa/include/mach/poodle.h b/arch/arm/mach-pxa/include/mach/poodle.h
index f32ff75..b56b193 100644
--- a/arch/arm/mach-pxa/include/mach/poodle.h
+++ b/arch/arm/mach-pxa/include/mach/poodle.h
@@ -15,6 +15,8 @@
 #ifndef __ASM_ARCH_POODLE_H
 #define __ASM_ARCH_POODLE_H  1
 
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
 /*
  * GPIOs
  */
diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h
index 0bfe650..25c9f62 100644
--- a/arch/arm/mach-pxa/include/mach/spitz.h
+++ b/arch/arm/mach-pxa/include/mach/spitz.h
@@ -15,8 +15,8 @@
 #define __ASM_ARCH_SPITZ_H  1
 #endif
 
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO, PXA_GPIO_TO_IRQ */
 #include <linux/fb.h>
-#include <linux/gpio.h>
 
 /* Spitz/Akita GPIOs */
 
diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h
index 2bb0e86..0497d95 100644
--- a/arch/arm/mach-pxa/include/mach/tosa.h
+++ b/arch/arm/mach-pxa/include/mach/tosa.h
@@ -13,6 +13,8 @@
 #ifndef _ASM_ARCH_TOSA_H_
 #define _ASM_ARCH_TOSA_H_ 1
 
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
+
 /*  TOSA Chip selects  */
 #define TOSA_LCDC_PHYS		PXA_CS4_PHYS
 /* Internel Scoop */
diff --git a/arch/arm/mach-pxa/include/mach/trizeps4.h b/arch/arm/mach-pxa/include/mach/trizeps4.h
index d2ca010..ae3ca01 100644
--- a/arch/arm/mach-pxa/include/mach/trizeps4.h
+++ b/arch/arm/mach-pxa/include/mach/trizeps4.h
@@ -10,6 +10,8 @@
 #ifndef _TRIPEPS4_H_
 #define _TRIPEPS4_H_
 
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
 /* physical memory regions */
 #define TRIZEPS4_FLASH_PHYS	(PXA_CS0_PHYS)  /* Flash region */
 #define TRIZEPS4_DISK_PHYS	(PXA_CS1_PHYS)  /* Disk On Chip region */
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 3386406..05fa505 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -8,7 +8,7 @@
 	select CPU_V7
 	select GENERIC_CLOCKEVENTS
 	select HAVE_ARM_SCU if SMP
-	select HAVE_ARM_TWD if LOCAL_TIMERS
+	select HAVE_ARM_TWD if SMP
 	select HAVE_SMP
 	select ARM_GIC
 	select MIGHT_HAVE_CACHE_L2X0
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index 4ae0286..f55b05a 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -24,6 +24,7 @@
 #include <linux/cpu_pm.h>
 #include <linux/suspend.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 #include <linux/clk/tegra.h>
 
 #include <asm/smp_plat.h>
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 1db2a5ca..8c09a83 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -25,6 +25,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/of.h>
+#include <linux/memblock.h>
 #include <linux/irqchip.h>
 #include <linux/irqchip/arm-gic.h>
 
@@ -41,6 +42,18 @@
 
 void __iomem *zynq_scu_base;
 
+/**
+ * zynq_memory_init - Initialize special memory
+ *
+ * We need to stop things allocating the low memory as DMA can't work in
+ * the 1st 512K of memory.
+ */
+static void __init zynq_memory_init(void)
+{
+	if (!__pa(PAGE_OFFSET))
+		memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir));
+}
+
 static struct platform_device zynq_cpuidle_device = {
 	.name = "cpuidle-zynq",
 };
@@ -117,5 +130,6 @@
 	.init_machine	= zynq_init_machine,
 	.init_time	= zynq_timer_init,
 	.dt_compat	= zynq_dt_match,
+	.reserve	= zynq_memory_init,
 	.restart	= zynq_system_reset,
 MACHINE_END
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1a77450..11b3914 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1358,7 +1358,7 @@
 	*handle = DMA_ERROR_CODE;
 	size = PAGE_ALIGN(size);
 
-	if (gfp & GFP_ATOMIC)
+	if (!(gfp & __GFP_WAIT))
 		return __iommu_alloc_atomic(dev, size, handle);
 
 	/*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index f57fb33..804d615 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -290,10 +290,11 @@
 #endif
 #ifdef CONFIG_BLK_DEV_INITRD
 	/* FDT scan will populate initrd_start */
-	if (initrd_start) {
+	if (initrd_start && !phys_initrd_size) {
 		phys_initrd_start = __virt_to_phys(initrd_start);
 		phys_initrd_size = initrd_end - initrd_start;
 	}
+	initrd_start = initrd_end = 0;
 	if (phys_initrd_size &&
 	    !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
 		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a982d..7ea641b 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -38,6 +38,7 @@
 
 struct mem_type {
 	pteval_t prot_pte;
+	pteval_t prot_pte_s2;
 	pmdval_t prot_l1;
 	pmdval_t prot_sect;
 	unsigned int domain;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4f08c13..a623cb3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -232,12 +232,16 @@
 #endif /* ifdef CONFIG_CPU_CP15 / else */
 
 #define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
+#define PROT_PTE_S2_DEVICE	PROT_PTE_DEVICE
 #define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 
 static struct mem_type mem_types[] = {
 	[MT_DEVICE] = {		  /* Strongly ordered / ARMv6 shared device */
 		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
 				  L_PTE_SHARED,
+		.prot_pte_s2	= s2_policy(PROT_PTE_S2_DEVICE) |
+				  s2_policy(L_PTE_S2_MT_DEV_SHARED) |
+				  L_PTE_SHARED,
 		.prot_l1	= PMD_TYPE_TABLE,
 		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_S,
 		.domain		= DOMAIN_IO,
@@ -508,7 +512,8 @@
 	cp = &cache_policies[cachepolicy];
 	vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
 	s2_pgprot = cp->pte_s2;
-	hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+	hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+	s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
 
 	/*
 	 * ARMv6 and above have extended page tables.
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 45dc29f..32b3558 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -208,7 +208,6 @@
 	mcr	p15, 0, r0, c7, c14, 0		@ clean+invalidate D cache
 	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 	mcr	p15, 0, r0, c7, c15, 0		@ clean+invalidate cache
-	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 #ifdef CONFIG_MMU
 	mcr	p15, 0, r0, c8, c7, 0		@ invalidate I + D TLBs
 	mcr	p15, 0, r0, c2, c0, 2		@ TTB control register
@@ -218,6 +217,8 @@
 	ALT_UP(orr	r8, r8, #TTB_FLAGS_UP)
 	mcr	p15, 0, r8, c2, c0, 1		@ load TTB1
 #endif /* CONFIG_MMU */
+	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer and
+						@ complete invalidations
 	adr	r5, v6_crval
 	ldmia	r5, {r5, r6}
  ARM_BE8(orr	r6, r6, #1 << 25)		@ big-endian page tables
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index bd17819..74f6033 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -351,7 +351,6 @@
 
 4:	mov	r10, #0
 	mcr	p15, 0, r10, c7, c5, 0		@ I+BTB cache invalidate
-	dsb
 #ifdef CONFIG_MMU
 	mcr	p15, 0, r10, c8, c7, 0		@ invalidate I + D TLBs
 	v7_ttb_setup r10, r4, r8, r5		@ TTBCR, TTBRx setup
@@ -360,6 +359,7 @@
 	mcr	p15, 0, r5, c10, c2, 0		@ write PRRR
 	mcr	p15, 0, r6, c10, c2, 1		@ write NMRR
 #endif
+	dsb					@ Complete invalidations
 #ifndef CONFIG_ARM_THUMBEE
 	mrc	p15, 0, r0, c0, c1, 0		@ read ID_PFR0 for ThumbEE
 	and	r0, r0, #(0xf << 12)		@ ThumbEE enabled field
diff --git a/arch/arm/plat-orion/irq.c b/arch/arm/plat-orion/irq.c
index c492e1b..807df142 100644
--- a/arch/arm/plat-orion/irq.c
+++ b/arch/arm/plat-orion/irq.c
@@ -15,8 +15,51 @@
 #include <linux/io.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <asm/exception.h>
 #include <plat/irq.h>
 #include <plat/orion-gpio.h>
+#include <mach/bridge-regs.h>
+
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+/*
+ * Compiling with both non-DT and DT support enabled, will
+ * break asm irq handler used by non-DT boards. Therefore,
+ * we provide a C-style irq handler even for non-DT boards,
+ * if MULTI_IRQ_HANDLER is set.
+ *
+ * Notes:
+ * - this is prepared for Kirkwood and Dove only, update
+ *   accordingly if you add Orion5x or MV78x00.
+ * - Orion5x uses different macro names and has only one
+ *   set of CAUSE/MASK registers.
+ * - MV78x00 uses the same macro names but has a third
+ *   set of CAUSE/MASK registers.
+ *
+ */
+
+static void __iomem *orion_irq_base = IRQ_VIRT_BASE;
+
+asmlinkage void
+__exception_irq_entry orion_legacy_handle_irq(struct pt_regs *regs)
+{
+	u32 stat;
+
+	stat = readl_relaxed(orion_irq_base + IRQ_CAUSE_LOW_OFF);
+	stat &= readl_relaxed(orion_irq_base + IRQ_MASK_LOW_OFF);
+	if (stat) {
+		unsigned int hwirq = __fls(stat);
+		handle_IRQ(hwirq, regs);
+		return;
+	}
+	stat = readl_relaxed(orion_irq_base + IRQ_CAUSE_HIGH_OFF);
+	stat &= readl_relaxed(orion_irq_base + IRQ_MASK_HIGH_OFF);
+	if (stat) {
+		unsigned int hwirq = 32 + __fls(stat);
+		handle_IRQ(hwirq, regs);
+		return;
+	}
+}
+#endif
 
 void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr)
 {
@@ -35,6 +78,10 @@
 	ct->chip.irq_unmask = irq_gc_mask_set_bit;
 	irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_MASK_CACHE,
 			       IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+	set_handle_irq(orion_legacy_handle_irq);
+#endif
 }
 
 #ifdef CONFIG_OF
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 2162172..b96723e 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -23,6 +23,7 @@
 #include <linux/of_address.h>
 #include <linux/cpuidle.h>
 #include <linux/cpufreq.h>
+#include <linux/cpu.h>
 
 #include <linux/mm.h>
 
@@ -154,7 +155,7 @@
 }
 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
 
-static void __init xen_percpu_init(void *unused)
+static void xen_percpu_init(void)
 {
 	struct vcpu_register_vcpu_info info;
 	struct vcpu_info *vcpup;
@@ -193,6 +194,31 @@
 		BUG();
 }
 
+static int xen_cpu_notification(struct notifier_block *self,
+				unsigned long action,
+				void *hcpu)
+{
+	switch (action) {
+	case CPU_STARTING:
+		xen_percpu_init();
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block xen_cpu_notifier = {
+	.notifier_call = xen_cpu_notification,
+};
+
+static irqreturn_t xen_arm_callback(int irq, void *arg)
+{
+	xen_hvm_evtchn_do_upcall();
+	return IRQ_HANDLED;
+}
+
 /*
  * see Documentation/devicetree/bindings/arm/xen.txt for the
  * documentation of the Xen Device Tree format.
@@ -208,7 +234,7 @@
 	const char *version = NULL;
 	const char *xen_prefix = "xen,xen-";
 	struct resource res;
-	unsigned long grant_frames;
+	phys_addr_t grant_frames;
 
 	node = of_find_compatible_node(NULL, NULL, "xen,xen");
 	if (!node) {
@@ -227,8 +253,12 @@
 		return 0;
 	grant_frames = res.start;
 	xen_events_irq = irq_of_parse_and_map(node, 0);
-	pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
-			version, xen_events_irq, (grant_frames >> PAGE_SHIFT));
+	pr_info("Xen %s support found, events_irq=%d gnttab_frame=%pa\n",
+			version, xen_events_irq, &grant_frames);
+
+	if (xen_events_irq < 0)
+		return -ENODEV;
+
 	xen_domain_type = XEN_HVM_DOMAIN;
 
 	xen_setup_features();
@@ -281,9 +311,21 @@
 	disable_cpuidle();
 	disable_cpufreq();
 
+	xen_init_IRQ();
+
+	if (request_percpu_irq(xen_events_irq, xen_arm_callback,
+			       "events", &xen_vcpu)) {
+		pr_err("Error request IRQ %d\n", xen_events_irq);
+		return -EINVAL;
+	}
+
+	xen_percpu_init();
+
+	register_cpu_notifier(&xen_cpu_notifier);
+
 	return 0;
 }
-core_initcall(xen_guest_init);
+early_initcall(xen_guest_init);
 
 static int __init xen_pm_init(void)
 {
@@ -297,31 +339,6 @@
 }
 late_initcall(xen_pm_init);
 
-static irqreturn_t xen_arm_callback(int irq, void *arg)
-{
-	xen_hvm_evtchn_do_upcall();
-	return IRQ_HANDLED;
-}
-
-static int __init xen_init_events(void)
-{
-	if (!xen_domain() || xen_events_irq < 0)
-		return -ENODEV;
-
-	xen_init_IRQ();
-
-	if (request_percpu_irq(xen_events_irq, xen_arm_callback,
-			"events", &xen_vcpu)) {
-		pr_err("Error requesting IRQ %d\n", xen_events_irq);
-		return -EINVAL;
-	}
-
-	on_each_cpu(xen_percpu_init, NULL, 0);
-
-	return 0;
-}
-postcore_initcall(xen_init_events);
-
 /* In the hypervisor.S file. */
 EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
 EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index dd4327f..27bbcfc 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -36,6 +36,7 @@
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_HW_BREAKPOINT if PERF_EVENTS
 	select HAVE_MEMBLOCK
+	select HAVE_PATA_PLATFORM
 	select HAVE_PERF_EVENTS
 	select IRQ_DOMAIN
 	select MODULES_USE_ELF_RELA
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 84139be..7959dd0 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
@@ -19,6 +18,7 @@
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -27,6 +27,7 @@
 CONFIG_ARCH_XGENE=y
 CONFIG_SMP=y
 CONFIG_PREEMPT=y
+CONFIG_CMA=y
 CONFIG_CMDLINE="console=ttyAMA0"
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_COMPAT=y
@@ -42,14 +43,17 @@
 # CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
-CONFIG_BLK_DEV=y
+CONFIG_DMA_CMA=y
 CONFIG_SCSI=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
-CONFIG_MII=y
 CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
 # CONFIG_WLAN is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_SERIO_I8042 is not set
@@ -62,13 +66,19 @@
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_FB=y
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 01de5aa..0237f08 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -54,8 +54,7 @@
 "	stxr	%w1, %w0, %2\n"
 "	cbnz	%w1, 1b"
 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-	: "Ir" (i)
-	: "cc");
+	: "Ir" (i));
 }
 
 static inline int atomic_add_return(int i, atomic_t *v)
@@ -64,14 +63,15 @@
 	int result;
 
 	asm volatile("// atomic_add_return\n"
-"1:	ldaxr	%w0, %2\n"
+"1:	ldxr	%w0, %2\n"
 "	add	%w0, %w0, %w3\n"
 "	stlxr	%w1, %w0, %2\n"
 "	cbnz	%w1, 1b"
 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
 	: "Ir" (i)
-	: "cc", "memory");
+	: "memory");
 
+	smp_mb();
 	return result;
 }
 
@@ -86,8 +86,7 @@
 "	stxr	%w1, %w0, %2\n"
 "	cbnz	%w1, 1b"
 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-	: "Ir" (i)
-	: "cc");
+	: "Ir" (i));
 }
 
 static inline int atomic_sub_return(int i, atomic_t *v)
@@ -96,14 +95,15 @@
 	int result;
 
 	asm volatile("// atomic_sub_return\n"
-"1:	ldaxr	%w0, %2\n"
+"1:	ldxr	%w0, %2\n"
 "	sub	%w0, %w0, %w3\n"
 "	stlxr	%w1, %w0, %2\n"
 "	cbnz	%w1, 1b"
 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
 	: "Ir" (i)
-	: "cc", "memory");
+	: "memory");
 
+	smp_mb();
 	return result;
 }
 
@@ -112,17 +112,20 @@
 	unsigned long tmp;
 	int oldval;
 
+	smp_mb();
+
 	asm volatile("// atomic_cmpxchg\n"
-"1:	ldaxr	%w1, %2\n"
+"1:	ldxr	%w1, %2\n"
 "	cmp	%w1, %w3\n"
 "	b.ne	2f\n"
-"	stlxr	%w0, %w4, %2\n"
+"	stxr	%w0, %w4, %2\n"
 "	cbnz	%w0, 1b\n"
 "2:"
 	: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
 	: "Ir" (old), "r" (new)
-	: "cc", "memory");
+	: "cc");
 
+	smp_mb();
 	return oldval;
 }
 
@@ -173,8 +176,7 @@
 "	stxr	%w1, %0, %2\n"
 "	cbnz	%w1, 1b"
 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-	: "Ir" (i)
-	: "cc");
+	: "Ir" (i));
 }
 
 static inline long atomic64_add_return(long i, atomic64_t *v)
@@ -183,14 +185,15 @@
 	unsigned long tmp;
 
 	asm volatile("// atomic64_add_return\n"
-"1:	ldaxr	%0, %2\n"
+"1:	ldxr	%0, %2\n"
 "	add	%0, %0, %3\n"
 "	stlxr	%w1, %0, %2\n"
 "	cbnz	%w1, 1b"
 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
 	: "Ir" (i)
-	: "cc", "memory");
+	: "memory");
 
+	smp_mb();
 	return result;
 }
 
@@ -205,8 +208,7 @@
 "	stxr	%w1, %0, %2\n"
 "	cbnz	%w1, 1b"
 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-	: "Ir" (i)
-	: "cc");
+	: "Ir" (i));
 }
 
 static inline long atomic64_sub_return(long i, atomic64_t *v)
@@ -215,14 +217,15 @@
 	unsigned long tmp;
 
 	asm volatile("// atomic64_sub_return\n"
-"1:	ldaxr	%0, %2\n"
+"1:	ldxr	%0, %2\n"
 "	sub	%0, %0, %3\n"
 "	stlxr	%w1, %0, %2\n"
 "	cbnz	%w1, 1b"
 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
 	: "Ir" (i)
-	: "cc", "memory");
+	: "memory");
 
+	smp_mb();
 	return result;
 }
 
@@ -231,17 +234,20 @@
 	long oldval;
 	unsigned long res;
 
+	smp_mb();
+
 	asm volatile("// atomic64_cmpxchg\n"
-"1:	ldaxr	%1, %2\n"
+"1:	ldxr	%1, %2\n"
 "	cmp	%1, %3\n"
 "	b.ne	2f\n"
-"	stlxr	%w0, %4, %2\n"
+"	stxr	%w0, %4, %2\n"
 "	cbnz	%w0, 1b\n"
 "2:"
 	: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
 	: "Ir" (old), "r" (new)
-	: "cc", "memory");
+	: "cc");
 
+	smp_mb();
 	return oldval;
 }
 
@@ -253,11 +259,12 @@
 	unsigned long tmp;
 
 	asm volatile("// atomic64_dec_if_positive\n"
-"1:	ldaxr	%0, %2\n"
+"1:	ldxr	%0, %2\n"
 "	subs	%0, %0, #1\n"
 "	b.mi	2f\n"
 "	stlxr	%w1, %0, %2\n"
 "	cbnz	%w1, 1b\n"
+"	dmb	ish\n"
 "2:"
 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
 	:
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 78e20ba..409ca37 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,7 +25,7 @@
 #define wfi()		asm volatile("wfi" : : : "memory")
 
 #define isb()		asm volatile("isb" : : : "memory")
-#define dsb()		asm volatile("dsb sy" : : : "memory")
+#define dsb(opt)	asm volatile("dsb sy" : : : "memory")
 
 #define mb()		dsb()
 #define rmb()		asm volatile("dsb ld" : : : "memory")
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index fea9ee3..88932498 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -116,6 +116,7 @@
 static inline void __flush_icache_all(void)
 {
 	asm("ic	ialluis");
+	dsb();
 }
 
 #define flush_dcache_mmap_lock(mapping) \
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 56166d7..57c0fa7 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,44 +29,45 @@
 	switch (size) {
 	case 1:
 		asm volatile("//	__xchg1\n"
-		"1:	ldaxrb	%w0, %2\n"
+		"1:	ldxrb	%w0, %2\n"
 		"	stlxrb	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
 			: "r" (x)
-			: "cc", "memory");
+			: "memory");
 		break;
 	case 2:
 		asm volatile("//	__xchg2\n"
-		"1:	ldaxrh	%w0, %2\n"
+		"1:	ldxrh	%w0, %2\n"
 		"	stlxrh	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
 			: "r" (x)
-			: "cc", "memory");
+			: "memory");
 		break;
 	case 4:
 		asm volatile("//	__xchg4\n"
-		"1:	ldaxr	%w0, %2\n"
+		"1:	ldxr	%w0, %2\n"
 		"	stlxr	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
 			: "r" (x)
-			: "cc", "memory");
+			: "memory");
 		break;
 	case 8:
 		asm volatile("//	__xchg8\n"
-		"1:	ldaxr	%0, %2\n"
+		"1:	ldxr	%0, %2\n"
 		"	stlxr	%w1, %3, %2\n"
 		"	cbnz	%w1, 1b\n"
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
 			: "r" (x)
-			: "cc", "memory");
+			: "memory");
 		break;
 	default:
 		BUILD_BUG();
 	}
 
+	smp_mb();
 	return ret;
 }
 
diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h
index d6aacb6..14c4c0c 100644
--- a/arch/arm64/include/asm/dma-contiguous.h
+++ b/arch/arm64/include/asm/dma-contiguous.h
@@ -18,7 +18,6 @@
 #ifdef CONFIG_DMA_CMA
 
 #include <linux/types.h>
-#include <asm-generic/dma-contiguous.h>
 
 static inline void
 dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 7883412..c4a7f94 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -42,7 +42,7 @@
 #define ESR_EL1_EC_SP_ALIGN	(0x26)
 #define ESR_EL1_EC_FP_EXC32	(0x28)
 #define ESR_EL1_EC_FP_EXC64	(0x2C)
-#define ESR_EL1_EC_SERRROR	(0x2F)
+#define ESR_EL1_EC_SERROR	(0x2F)
 #define ESR_EL1_EC_BREAKPT_EL0	(0x30)
 #define ESR_EL1_EC_BREAKPT_EL1	(0x31)
 #define ESR_EL1_EC_SOFTSTP_EL0	(0x32)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 78cc3ab..5f750dc 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -24,10 +24,11 @@
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)		\
 	asm volatile(							\
-"1:	ldaxr	%w1, %2\n"						\
+"1:	ldxr	%w1, %2\n"						\
 	insn "\n"							\
 "2:	stlxr	%w3, %w0, %2\n"						\
 "	cbnz	%w3, 1b\n"						\
+"	dmb	ish\n"							\
 "3:\n"									\
 "	.pushsection .fixup,\"ax\"\n"					\
 "	.align	2\n"							\
@@ -40,7 +41,7 @@
 "	.popsection\n"							\
 	: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)	\
 	: "r" (oparg), "Ir" (-EFAULT)					\
-	: "cc", "memory")
+	: "memory")
 
 static inline int
 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -111,11 +112,12 @@
 		return -EFAULT;
 
 	asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-"1:	ldaxr	%w1, %2\n"
+"1:	ldxr	%w1, %2\n"
 "	sub	%w3, %w1, %w4\n"
 "	cbnz	%w3, 3f\n"
 "2:	stlxr	%w3, %w5, %2\n"
 "	cbnz	%w3, 1b\n"
+"	dmb	ish\n"
 "3:\n"
 "	.pushsection .fixup,\"ax\"\n"
 "4:	mov	%w0, %w6\n"
@@ -127,7 +129,7 @@
 "	.popsection\n"
 	: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
 	: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
-	: "cc", "memory");
+	: "memory");
 
 	*uval = val;
 	return ret;
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index c98ef47..0eb3986 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -231,7 +231,7 @@
 #define ESR_EL2_EC_SP_ALIGN	(0x26)
 #define ESR_EL2_EC_FP_EXC32	(0x28)
 #define ESR_EL2_EC_FP_EXC64	(0x2C)
-#define ESR_EL2_EC_SERRROR	(0x2F)
+#define ESR_EL2_EC_SERROR	(0x2F)
 #define ESR_EL2_EC_BREAKPT	(0x30)
 #define ESR_EL2_EC_BREAKPT_HYP	(0x31)
 #define ESR_EL2_EC_SOFTSTP	(0x32)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7f2b60a..b524dcd 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -28,7 +28,7 @@
 #define PTE_FILE		(_AT(pteval_t, 1) << 2)	/* only when !pte_present() */
 #define PTE_DIRTY		(_AT(pteval_t, 1) << 55)
 #define PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
-				/* bit 57 for PMD_SECT_SPLITTING */
+#define PTE_WRITE		(_AT(pteval_t, 1) << 57)
 #define PTE_PROT_NONE		(_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
 
 /*
@@ -67,15 +67,15 @@
 
 #define _MOD_PROT(p, b)		__pgprot_modify(p, 0, b)
 
-#define PAGE_NONE		__pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
-#define PAGE_SHARED		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
-#define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
-#define PAGE_COPY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define PAGE_COPY_EXEC		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define PAGE_READONLY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define PAGE_KERNEL		_MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
-#define PAGE_KERNEL_EXEC	_MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
+#define PAGE_NONE		__pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_SHARED		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+#define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+#define PAGE_COPY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_COPY_EXEC		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
+#define PAGE_READONLY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
+#define PAGE_KERNEL		_MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+#define PAGE_KERNEL_EXEC	_MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE)
 
 #define PAGE_HYP		_MOD_PROT(pgprot_default, PTE_HYP)
 #define PAGE_HYP_DEVICE		__pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
@@ -83,13 +83,13 @@
 #define PAGE_S2			__pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
 #define PAGE_S2_DEVICE		__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
 
-#define __PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
-#define __PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
-#define __PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
-#define __PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define __PAGE_COPY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define __PAGE_READONLY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define __PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define __PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define __PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+#define __PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+#define __PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define __PAGE_COPY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+#define __PAGE_READONLY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define __PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
 
 #endif /* __ASSEMBLY__ */
 
@@ -140,22 +140,53 @@
 #define pte_dirty(pte)		(pte_val(pte) & PTE_DIRTY)
 #define pte_young(pte)		(pte_val(pte) & PTE_AF)
 #define pte_special(pte)	(pte_val(pte) & PTE_SPECIAL)
-#define pte_write(pte)		(!(pte_val(pte) & PTE_RDONLY))
+#define pte_write(pte)		(pte_val(pte) & PTE_WRITE)
 #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
 
 #define pte_valid_user(pte) \
 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
 
-#define PTE_BIT_FUNC(fn,op) \
-static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+	pte_val(pte) &= ~PTE_WRITE;
+	return pte;
+}
 
-PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY);
-PTE_BIT_FUNC(mkwrite,   &= ~PTE_RDONLY);
-PTE_BIT_FUNC(mkclean,   &= ~PTE_DIRTY);
-PTE_BIT_FUNC(mkdirty,   |= PTE_DIRTY);
-PTE_BIT_FUNC(mkold,     &= ~PTE_AF);
-PTE_BIT_FUNC(mkyoung,   |= PTE_AF);
-PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL);
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+	pte_val(pte) |= PTE_WRITE;
+	return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+	pte_val(pte) &= ~PTE_DIRTY;
+	return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+	pte_val(pte) |= PTE_DIRTY;
+	return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+	pte_val(pte) &= ~PTE_AF;
+	return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+	pte_val(pte) |= PTE_AF;
+	return pte;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+	pte_val(pte) |= PTE_SPECIAL;
+	return pte;
+}
 
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
@@ -170,8 +201,10 @@
 	if (pte_valid_user(pte)) {
 		if (pte_exec(pte))
 			__sync_icache_dcache(pte, addr);
-		if (!pte_dirty(pte))
-			pte = pte_wrprotect(pte);
+		if (pte_dirty(pte) && pte_write(pte))
+			pte_val(pte) &= ~PTE_RDONLY;
+		else
+			pte_val(pte) |= PTE_RDONLY;
 	}
 
 	set_pte(ptep, pte);
@@ -345,7 +378,7 @@
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
-			      PTE_PROT_NONE | PTE_VALID;
+			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 	return pte;
 }
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 3d5cf06..c45b7b1 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -132,7 +132,7 @@
 	"	cbnz	%w0, 2b\n"
 	: "=&r" (tmp), "+Q" (rw->lock)
 	: "r" (0x80000000)
-	: "cc", "memory");
+	: "memory");
 }
 
 static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -146,7 +146,7 @@
 	"1:\n"
 	: "=&r" (tmp), "+Q" (rw->lock)
 	: "r" (0x80000000)
-	: "cc", "memory");
+	: "memory");
 
 	return !tmp;
 }
@@ -187,7 +187,7 @@
 	"	cbnz	%w1, 2b\n"
 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 	:
-	: "cc", "memory");
+	: "memory");
 }
 
 static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -201,7 +201,7 @@
 	"	cbnz	%w1, 1b\n"
 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 	:
-	: "cc", "memory");
+	: "memory");
 }
 
 static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -216,7 +216,7 @@
 	"1:\n"
 	: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
 	:
-	: "cc", "memory");
+	: "memory");
 
 	return !tmp2;
 }
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 58125bf..bb8eb8a 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -399,7 +399,10 @@
 __SYSCALL(375, sys_setns)
 __SYSCALL(376, compat_sys_process_vm_readv)
 __SYSCALL(377, compat_sys_process_vm_writev)
-__SYSCALL(378, sys_ni_syscall)			/* 378 for kcmp */
+__SYSCALL(378, sys_kcmp)
+__SYSCALL(379, sys_finit_module)
+__SYSCALL(380, sys_sched_setattr)
+__SYSCALL(381, sys_sched_getattr)
 
 #define __NR_compat_syscalls		379
 
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 495ab6f..eaf54a3 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -148,6 +148,15 @@
 #define KVM_REG_ARM_TIMER_CNT		ARM64_SYS_REG(3, 3, 14, 3, 2)
 #define KVM_REG_ARM_TIMER_CVAL		ARM64_SYS_REG(3, 3, 14, 0, 2)
 
+/* Device Control API: ARM VGIC */
+#define KVM_DEV_ARM_VGIC_GRP_ADDR	0
+#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
+#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS	2
+#define   KVM_DEV_ARM_VGIC_CPUID_SHIFT	32
+#define   KVM_DEV_ARM_VGIC_CPUID_MASK	(0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
+#define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT	0
+#define   KVM_DEV_ARM_VGIC_OFFSET_MASK	(0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+
 /* KVM_IRQ_LINE irq field index values */
 #define KVM_ARM_IRQ_TYPE_SHIFT		24
 #define KVM_ARM_IRQ_TYPE_MASK		0xff
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 63c48ff..7787208 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -38,12 +38,13 @@
 	.inst	0xe92d00f0		//	push		{r4, r5, r6, r7}
 	.inst	0xe1c040d0		//	ldrd		r4, r5, [r0]
 	.inst	0xe1c160d0		//	ldrd		r6, r7, [r1]
-	.inst	0xe1b20e9f		// 1:	ldaexd		r0, r1, [r2]
+	.inst	0xe1b20f9f		// 1:	ldrexd		r0, r1, [r2]
 	.inst	0xe0303004		//	eors		r3, r0, r4
 	.inst	0x00313005		//	eoreqs		r3, r1, r5
 	.inst	0x01a23e96		//	stlexdeq	r3, r6, [r2]
 	.inst	0x03330001		//	teqeq		r3, #1
 	.inst	0x0afffff9		//	beq		1b
+	.inst	0xf57ff05b		//	dmb		ish
 	.inst	0xe2730000		//	rsbs		r0, r3, #0
 	.inst	0xe8bd00f0		//	pop		{r4, r5, r6, r7}
 	.inst	0xe12fff1e		//	bx		lr
@@ -55,11 +56,12 @@
 
 	.align	5
 __kuser_cmpxchg:			// 0xffff0fc0
-	.inst	0xe1923e9f		// 1:	ldaex		r3, [r2]
+	.inst	0xe1923f9f		// 1:	ldrex		r3, [r2]
 	.inst	0xe0533000		//	subs		r3, r3, r0
 	.inst	0x01823e91		//	stlexeq		r3, r1, [r2]
 	.inst	0x03330001		//	teqeq		r3, #1
 	.inst	0x0afffffa		//	beq		1b
+	.inst	0xf57ff05b		//	dmb		ish
 	.inst	0xe2730000		//	rsbs		r0, r3, #0
 	.inst	0xe12fff1e		//	bx		lr
 
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 248a15d..1c0a9be 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -85,11 +85,6 @@
 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
 EXPORT_SYMBOL_GPL(arm_pm_restart);
 
-void arch_cpu_idle_prepare(void)
-{
-	local_fiq_enable();
-}
-
 /*
  * This is our default idle handler.
  */
@@ -138,7 +133,6 @@
 
 	/* Disable interrupts first */
 	local_irq_disable();
-	local_fiq_disable();
 
 	/* Now call the architecture specific reboot code. */
 	if (arm_pm_restart)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 1b7617a..7cfb92a 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -161,7 +161,6 @@
 	complete(&cpu_running);
 
 	local_irq_enable();
-	local_fiq_enable();
 	local_async_enable();
 
 	/*
@@ -495,7 +494,6 @@
 
 	set_cpu_online(cpu, false);
 
-	local_fiq_disable();
 	local_irq_disable();
 
 	while (1)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 430344e..1fa9ce4 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,3 +1,4 @@
+#include <linux/percpu.h>
 #include <linux/slab.h>
 #include <asm/cacheflush.h>
 #include <asm/cpu_ops.h>
@@ -89,6 +90,13 @@
 	if (ret == 0) {
 		cpu_switch_mm(mm->pgd, mm);
 		flush_tlb_all();
+
+		/*
+		 * Restore per-cpu offset before any kernel
+		 * subsystem relying on it has a chance to run.
+		 */
+		set_my_cpu_offset(per_cpu_offset(cpu));
+
 		/*
 		 * Restore HW breakpoint registers to sane values
 		 * before debug exceptions are possibly reenabled
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 65d40cf..a7149ca 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -238,6 +238,8 @@
 	vdso_data->use_syscall			= use_syscall;
 	vdso_data->xtime_coarse_sec		= xtime_coarse.tv_sec;
 	vdso_data->xtime_coarse_nsec		= xtime_coarse.tv_nsec;
+	vdso_data->wtm_clock_sec		= tk->wall_to_monotonic.tv_sec;
+	vdso_data->wtm_clock_nsec		= tk->wall_to_monotonic.tv_nsec;
 
 	if (!use_syscall) {
 		vdso_data->cs_cycle_last	= tk->clock->cycle_last;
@@ -245,8 +247,6 @@
 		vdso_data->xtime_clock_nsec	= tk->xtime_nsec;
 		vdso_data->cs_mult		= tk->mult;
 		vdso_data->cs_shift		= tk->shift;
-		vdso_data->wtm_clock_sec	= tk->wall_to_monotonic.tv_sec;
-		vdso_data->wtm_clock_nsec	= tk->wall_to_monotonic.tv_nsec;
 	}
 
 	smp_wmb();
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index d8064af..6d20b7d 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -48,7 +48,7 @@
 
 # Actual build commands
 quiet_cmd_vdsold = VDSOL $@
-      cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
+      cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
 quiet_cmd_vdsoas = VDSOA $@
       cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
 
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index f0a6d10..fe652ff 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -103,6 +103,8 @@
 	bl	__do_get_tspec
 	seqcnt_check w9, 1b
 
+	mov	x30, x2
+
 	cmp	w0, #CLOCK_MONOTONIC
 	b.ne	6f
 
@@ -118,6 +120,9 @@
 	ccmp	w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
 	b.ne	8f
 
+	/* xtime_coarse_nsec is already right-shifted */
+	mov	x12, #0
+
 	/* Get coarse timespec. */
 	adr	vdso_data, _vdso_data
 3:	seqcnt_acquire
@@ -156,7 +161,7 @@
 	lsr	x11, x11, x12
 	stp	x10, x11, [x1, #TSPEC_TV_SEC]
 	mov	x0, xzr
-	ret	x2
+	ret
 7:
 	mov	x30, x2
 8:	/* Syscall fallback. */
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index e5db797..7dac371 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -46,11 +46,12 @@
 	mov	x2, #1
 	add	x1, x1, x0, lsr #3	// Get word offset
 	lsl	x4, x2, x3		// Create mask
-1:	ldaxr	x2, [x1]
+1:	ldxr	x2, [x1]
 	lsr	x0, x2, x3		// Save old value of bit
 	\instr	x2, x2, x4		// toggle bit
 	stlxr	w5, x2, [x1]
 	cbnz	w5, 1b
+	dmb	ish
 	and	x0, x0, #1
 3:	ret
 ENDPROC(\name	)
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 48a3860..1ea9f26 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -146,7 +146,7 @@
 ENDPROC(__flush_cache_user_range)
 
 /*
- *	__flush_kern_dcache_page(kaddr)
+ *	__flush_dcache_area(kaddr, size)
  *
  *	Ensure that the data held in the page kaddr is written back to the
  *	page in question.
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 45b5ab5..fbd7678 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -45,6 +45,7 @@
 	if (IS_ENABLED(CONFIG_DMA_CMA)) {
 		struct page *page;
 
+		size = PAGE_ALIGN(size);
 		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
 							get_order(size));
 		if (!page)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f557ebb..f8dc7e8 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -203,10 +203,18 @@
 	do {
 		next = pmd_addr_end(addr, end);
 		/* try section mapping first */
-		if (((addr | next | phys) & ~SECTION_MASK) == 0)
+		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+			pmd_t old_pmd =*pmd;
 			set_pmd(pmd, __pmd(phys | prot_sect_kernel));
-		else
+			/*
+			 * Check for previous table entries created during
+			 * boot (__create_page_tables) and flush them.
+			 */
+			if (!pmd_none(old_pmd))
+				flush_tlb_all();
+		} else {
 			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
+		}
 		phys += next - addr;
 	} while (pmd++, addr = next, addr != end);
 }
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7083cda..62c6101 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -32,17 +32,10 @@
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-	pgd_t *new_pgd;
-
 	if (PGD_SIZE == PAGE_SIZE)
-		new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+		return (pgd_t *)get_zeroed_page(GFP_KERNEL);
 	else
-		new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL);
-
-	if (!new_pgd)
-		return NULL;
-
-	return new_pgd;
+		return kzalloc(PGD_SIZE, GFP_KERNEL);
 }
 
 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 8957b82..005d29e 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -38,8 +38,7 @@
  */
 	.macro	dcache_line_size, reg, tmp
 	mrs	\tmp, ctr_el0			// read CTR
-	lsr	\tmp, \tmp, #16
-	and	\tmp, \tmp, #0xf		// cache line size encoding
+	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
 	mov	\reg, #4			// bytes per word
 	lsl	\reg, \reg, \tmp		// actual cache line size
 	.endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index bed1f1d..1333e6f9 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -150,7 +150,7 @@
 #endif
 
 /*
- *	cpu_switch_mm(pgd_phys, tsk)
+ *	cpu_do_switch_mm(pgd_phys, tsk)
  *
  *	Set the translation table base pointer to be pgd_phys.
  *
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
index 22fb665..dba48a5d 100644
--- a/arch/avr32/Makefile
+++ b/arch/avr32/Makefile
@@ -11,7 +11,7 @@
 
 KBUILD_DEFCONFIG	:= atstk1002_defconfig
 
-KBUILD_CFLAGS	+= -pipe -fno-builtin -mno-pic
+KBUILD_CFLAGS	+= -pipe -fno-builtin -mno-pic -D__linux__
 KBUILD_AFLAGS	+= -mrelax -mno-pic
 KBUILD_CFLAGS_MODULE += -mno-relax
 LDFLAGS_vmlinux	+= --relax
diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c
index 9764a1a..c1466a8 100644
--- a/arch/avr32/boards/mimc200/fram.c
+++ b/arch/avr32/boards/mimc200/fram.c
@@ -11,6 +11,7 @@
 #define FRAM_VERSION	"1.0"
 
 #include <linux/miscdevice.h>
+#include <linux/module.h>
 #include <linux/proc_fs.h>
 #include <linux/mm.h>
 #include <linux/io.h>
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index cfb9fe1..c7c64a6 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -17,5 +17,6 @@
 generic-y       += sections.h
 generic-y       += topology.h
 generic-y	+= trace_clock.h
+generic-y += vga.h
 generic-y       += xor.h
 generic-y	+= hash.h
diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h
index fc6483f..4f5ec2b 100644
--- a/arch/avr32/include/asm/io.h
+++ b/arch/avr32/include/asm/io.h
@@ -295,6 +295,8 @@
 #define iounmap(addr)				\
 	__iounmap(addr)
 
+#define ioremap_wc ioremap_nocache
+
 #define cached(addr) P1SEGADDR(addr)
 #define uncached(addr) P2SEGADDR(addr)
 
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 90b1753..af2738c 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -146,6 +146,7 @@
 CONFIG_USB_OTG_BLACKLIST_HUB=y
 CONFIG_USB_MON=y
 CONFIG_USB_MUSB_HDRC=y
+CONFIG_MUSB_PIO_ONLY=y
 CONFIG_USB_MUSB_BLACKFIN=y
 CONFIG_MUSB_PIO_ONLY=y
 CONFIG_USB_STORAGE=y
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index 972aa62..be03be6 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -59,7 +59,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=m
 CONFIG_MTD_BLOCK=y
diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
index 9198837..802f9c4 100644
--- a/arch/blackfin/configs/BF561-ACVILON_defconfig
+++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
@@ -49,7 +49,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index 7b982d0..3853c47 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -44,7 +44,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=m
 CONFIG_MTD_BLOCK=y
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index c940a1e..5e0db82 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -36,7 +36,6 @@
 # CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index e961483..b9af4fa 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -53,7 +53,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index 24936b9..d6dd98e 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -51,7 +51,6 @@
 # CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
diff --git a/arch/blackfin/configs/DNP5370_defconfig b/arch/blackfin/configs/DNP5370_defconfig
index 89162d0..2b58cb2 100644
--- a/arch/blackfin/configs/DNP5370_defconfig
+++ b/arch/blackfin/configs/DNP5370_defconfig
@@ -36,7 +36,6 @@
 CONFIG_MTD=y
 CONFIG_MTD_DEBUG=y
 CONFIG_MTD_DEBUG_VERBOSE=1
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_NFTL=y
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index a26436b..f754e49 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -36,7 +36,6 @@
 # CONFIG_WIRELESS is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_RAM=y
diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig
index 6479915..6295165 100644
--- a/arch/blackfin/configs/IP0X_defconfig
+++ b/arch/blackfin/configs/IP0X_defconfig
@@ -43,7 +43,6 @@
 CONFIG_IP_NF_MANGLE=y
 # CONFIG_WIRELESS is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index 8fd9b44..a6a7298 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -46,7 +46,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=m
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_RAM=y
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index 0520c16..bc21664 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -38,7 +38,6 @@
 # CONFIG_WIRELESS is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=m
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_JEDECPROBE=m
diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
index e4ed865..ea88158 100644
--- a/arch/blackfin/configs/TCM-BF518_defconfig
+++ b/arch/blackfin/configs/TCM-BF518_defconfig
@@ -54,7 +54,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
diff --git a/arch/blackfin/include/asm/def_LPBlackfin.h b/arch/blackfin/include/asm/def_LPBlackfin.h
index ca67145..c5c8d8a 100644
--- a/arch/blackfin/include/asm/def_LPBlackfin.h
+++ b/arch/blackfin/include/asm/def_LPBlackfin.h
@@ -544,6 +544,7 @@
 #define DCBS_P			0x04	/* L1 Data Cache Bank Select */
 #define PORT_PREF0_P		0x12	/* DAG0 Port Preference */
 #define PORT_PREF1_P		0x13	/* DAG1 Port Preference */
+#define RDCHK			0x9	/* Enable L1 Parity Check */
 
 /* Masks */
 #define ENDM               0x00000001	/* (doesn't really exist) Enable
diff --git a/arch/blackfin/include/uapi/asm/byteorder.h b/arch/blackfin/include/uapi/asm/byteorder.h
index 9558416..3b125da 100644
--- a/arch/blackfin/include/uapi/asm/byteorder.h
+++ b/arch/blackfin/include/uapi/asm/byteorder.h
@@ -1 +1,6 @@
+#ifndef _UAPI__BFIN_ASM_BYTEORDER_H
+#define _UAPI__BFIN_ASM_BYTEORDER_H
+
 #include <linux/byteorder/little_endian.h>
+
+#endif /* _UAPI__BFIN_ASM_BYTEORDER_H */
diff --git a/arch/blackfin/include/uapi/asm/cachectl.h b/arch/blackfin/include/uapi/asm/cachectl.h
index 03255df..4fdab75 100644
--- a/arch/blackfin/include/uapi/asm/cachectl.h
+++ b/arch/blackfin/include/uapi/asm/cachectl.h
@@ -7,8 +7,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef	_ASM_CACHECTL
-#define	_ASM_CACHECTL
+#ifndef _UAPI_ASM_CACHECTL
+#define _UAPI_ASM_CACHECTL
 
 /*
  * Options for cacheflush system call
@@ -17,4 +17,4 @@
 #define	DCACHE	(1<<1)		/* writeback and flush data cache */
 #define	BCACHE	(ICACHE|DCACHE)	/* flush both caches              */
 
-#endif	/* _ASM_CACHECTL */
+#endif /* _UAPI_ASM_CACHECTL */
diff --git a/arch/blackfin/include/uapi/asm/fcntl.h b/arch/blackfin/include/uapi/asm/fcntl.h
index 251c911..f51ad9a 100644
--- a/arch/blackfin/include/uapi/asm/fcntl.h
+++ b/arch/blackfin/include/uapi/asm/fcntl.h
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef _BFIN_FCNTL_H
-#define _BFIN_FCNTL_H
+#ifndef _UAPI_BFIN_FCNTL_H
+#define _UAPI_BFIN_FCNTL_H
 
 #define O_DIRECTORY	 040000	/* must be a directory */
 #define O_NOFOLLOW	0100000	/* don't follow links */
@@ -14,4 +14,4 @@
 
 #include <asm-generic/fcntl.h>
 
-#endif
+#endif /* _UAPI_BFIN_FCNTL_H */
diff --git a/arch/blackfin/include/uapi/asm/ioctls.h b/arch/blackfin/include/uapi/asm/ioctls.h
index eca8d75..9a41c20 100644
--- a/arch/blackfin/include/uapi/asm/ioctls.h
+++ b/arch/blackfin/include/uapi/asm/ioctls.h
@@ -1,7 +1,7 @@
-#ifndef __ARCH_BFIN_IOCTLS_H__
-#define __ARCH_BFIN_IOCTLS_H__
+#ifndef _UAPI__ARCH_BFIN_IOCTLS_H__
+#define _UAPI__ARCH_BFIN_IOCTLS_H__
 
 #define FIOQSIZE	0x545E
 #include <asm-generic/ioctls.h>
 
-#endif
+#endif /* _UAPI__ARCH_BFIN_IOCTLS_H__ */
diff --git a/arch/blackfin/include/uapi/asm/poll.h b/arch/blackfin/include/uapi/asm/poll.h
index 072d896..99c7d68 100644
--- a/arch/blackfin/include/uapi/asm/poll.h
+++ b/arch/blackfin/include/uapi/asm/poll.h
@@ -5,12 +5,12 @@
  *
  */
 
-#ifndef __BFIN_POLL_H
-#define __BFIN_POLL_H
+#ifndef _UAPI__BFIN_POLL_H
+#define _UAPI__BFIN_POLL_H
 
 #define POLLWRNORM	4 /* POLLOUT */
 #define POLLWRBAND	256
 
 #include <asm-generic/poll.h>
 
-#endif
+#endif /* _UAPI__BFIN_POLL_H */
diff --git a/arch/blackfin/include/uapi/asm/posix_types.h b/arch/blackfin/include/uapi/asm/posix_types.h
index 1bd3436..9608ef6 100644
--- a/arch/blackfin/include/uapi/asm/posix_types.h
+++ b/arch/blackfin/include/uapi/asm/posix_types.h
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef __ARCH_BFIN_POSIX_TYPES_H
-#define __ARCH_BFIN_POSIX_TYPES_H
+#ifndef _UAPI__ARCH_BFIN_POSIX_TYPES_H
+#define _UAPI__ARCH_BFIN_POSIX_TYPES_H
 
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
@@ -27,4 +27,4 @@
 
 #include <asm-generic/posix_types.h>
 
-#endif
+#endif /* _UAPI__ARCH_BFIN_POSIX_TYPES_H */
diff --git a/arch/blackfin/include/uapi/asm/sigcontext.h b/arch/blackfin/include/uapi/asm/sigcontext.h
index 906bdc1..b58f12d 100644
--- a/arch/blackfin/include/uapi/asm/sigcontext.h
+++ b/arch/blackfin/include/uapi/asm/sigcontext.h
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef _ASM_BLACKFIN_SIGCONTEXT_H
-#define _ASM_BLACKFIN_SIGCONTEXT_H
+#ifndef _UAPI_ASM_BLACKFIN_SIGCONTEXT_H
+#define _UAPI_ASM_BLACKFIN_SIGCONTEXT_H
 
 /* Add new entries at the end of the structure only.  */
 struct sigcontext {
@@ -58,4 +58,4 @@
 	unsigned long sc_seqstat;
 };
 
-#endif
+#endif /* _UAPI_ASM_BLACKFIN_SIGCONTEXT_H */
diff --git a/arch/blackfin/include/uapi/asm/siginfo.h b/arch/blackfin/include/uapi/asm/siginfo.h
index 3e81306..c72f4e6 100644
--- a/arch/blackfin/include/uapi/asm/siginfo.h
+++ b/arch/blackfin/include/uapi/asm/siginfo.h
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef _BFIN_SIGINFO_H
-#define _BFIN_SIGINFO_H
+#ifndef _UAPI_BFIN_SIGINFO_H
+#define _UAPI_BFIN_SIGINFO_H
 
 #include <linux/types.h>
 #include <asm-generic/siginfo.h>
@@ -38,4 +38,4 @@
  */
 #define SEGV_STACKFLOW	(__SI_FAULT|3)	/* stack overflow */
 
-#endif
+#endif /* _UAPI_BFIN_SIGINFO_H */
diff --git a/arch/blackfin/include/uapi/asm/signal.h b/arch/blackfin/include/uapi/asm/signal.h
index 77a3bf3..f0a0d8b 100644
--- a/arch/blackfin/include/uapi/asm/signal.h
+++ b/arch/blackfin/include/uapi/asm/signal.h
@@ -1,7 +1,7 @@
-#ifndef _BLACKFIN_SIGNAL_H
-#define _BLACKFIN_SIGNAL_H
+#ifndef _UAPI_BLACKFIN_SIGNAL_H
+#define _UAPI_BLACKFIN_SIGNAL_H
 
 #define SA_RESTORER 0x04000000
 #include <asm-generic/signal.h>
 
-#endif
+#endif /* _UAPI_BLACKFIN_SIGNAL_H */
diff --git a/arch/blackfin/include/uapi/asm/stat.h b/arch/blackfin/include/uapi/asm/stat.h
index 2e27665..d3068a7 100644
--- a/arch/blackfin/include/uapi/asm/stat.h
+++ b/arch/blackfin/include/uapi/asm/stat.h
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2.
  */
 
-#ifndef _BFIN_STAT_H
-#define _BFIN_STAT_H
+#ifndef _UAPI_BFIN_STAT_H
+#define _UAPI_BFIN_STAT_H
 
 struct stat {
 	unsigned short st_dev;
@@ -66,4 +66,4 @@
 	unsigned long long st_ino;
 };
 
-#endif				/* _BFIN_STAT_H */
+#endif /* _UAPI_BFIN_STAT_H */
diff --git a/arch/blackfin/include/uapi/asm/swab.h b/arch/blackfin/include/uapi/asm/swab.h
index 89de650..f5626b7 100644
--- a/arch/blackfin/include/uapi/asm/swab.h
+++ b/arch/blackfin/include/uapi/asm/swab.h
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef _BLACKFIN_SWAB_H
-#define _BLACKFIN_SWAB_H
+#ifndef _UAPI_BLACKFIN_SWAB_H
+#define _UAPI_BLACKFIN_SWAB_H
 
 #include <linux/types.h>
 #include <asm-generic/swab.h>
@@ -47,4 +47,4 @@
 
 #endif /* __GNUC__ */
 
-#endif				/* _BLACKFIN_SWAB_H */
+#endif /* _UAPI_BLACKFIN_SWAB_H */
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 4a8c2e3..4da70c4 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -370,7 +370,8 @@
 #endif
 #endif
 
-#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || \
+	defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
 #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
 static struct resource bfin_sport0_uart_resources[] = {
 	{
@@ -441,6 +442,50 @@
 #endif
 #endif
 
+#if defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE)
+static struct resource bfin_sport0_resources[] = {
+	{
+		.start = SPORT0_TCR1,
+		.end = SPORT0_MRCS3+4,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_SPORT0_TX,
+		.end = IRQ_SPORT0_TX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_SPORT0_RX,
+		.end = IRQ_SPORT0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_SPORT0_ERROR,
+		.end = IRQ_SPORT0_ERROR,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_SPORT0_TX,
+		.end = CH_SPORT0_TX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_SPORT0_RX,
+		.end = CH_SPORT0_RX,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sport0_device = {
+	.name = "bfin_sport_raw",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sport0_resources),
+	.resource = bfin_sport0_resources,
+	.dev = {
+		.platform_data = &bfin_sport0_peripherals,
+	},
+};
+#endif
+
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
diff --git a/arch/blackfin/mach-bf609/Kconfig b/arch/blackfin/mach-bf609/Kconfig
index b0fca44..6584190 100644
--- a/arch/blackfin/mach-bf609/Kconfig
+++ b/arch/blackfin/mach-bf609/Kconfig
@@ -17,6 +17,12 @@
 	  Divide the total number of interrupt priority levels into sub-levels.
 	  There is 2 ^ (SEC_IRQ_PRIORITY_LEVELS + 1) different levels.
 
+config L1_PARITY_CHECK
+	bool "Enable L1 parity check"
+	default n
+	help
+	  Enable the L1 parity check in L1 sram. A fault event is raised
+	  when L1 parity error is found.
 
 comment "System Cross Bar Priority Assignment"
 
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 05194e9..8de8bc6 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -1025,7 +1025,9 @@
 	.ain_sel = ADV7842_AIN10_11_12_NC_SYNC_4_1,
 	.prim_mode = ADV7842_PRIM_MODE_SDP,
 	.vid_std_select = ADV7842_SDP_VID_STD_CVBS_SD_4x1,
-	.inp_color_space = ADV7842_INP_COLOR_SPACE_AUTO,
+	.hdmi_free_run_enable = 1,
+	.sdp_free_run_auto = 1,
+	.llc_dll_phase = 0x10,
 	.i2c_sdp_io = 0x40,
 	.i2c_sdp = 0x41,
 	.i2c_cp = 0x42,
diff --git a/arch/blackfin/mach-bf609/clock.c b/arch/blackfin/mach-bf609/clock.c
index dab8849..13644ed 100644
--- a/arch/blackfin/mach-bf609/clock.c
+++ b/arch/blackfin/mach-bf609/clock.c
@@ -120,6 +120,7 @@
 }
 EXPORT_SYMBOL(clk_disable);
 
+
 unsigned long clk_get_rate(struct clk *clk)
 {
 	unsigned long ret = 0;
@@ -131,7 +132,7 @@
 
 long clk_round_rate(struct clk *clk, unsigned long rate)
 {
-	long ret = -EIO;
+	long ret = 0;
 	if (clk->ops && clk->ops->round_rate)
 		ret = clk->ops->round_rate(clk, rate);
 	return ret;
diff --git a/arch/blackfin/mach-bf609/include/mach/anomaly.h b/arch/blackfin/mach-bf609/include/mach/anomaly.h
index 7a07374..696786e 100644
--- a/arch/blackfin/mach-bf609/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf609/include/mach/anomaly.h
@@ -23,11 +23,11 @@
 /* TRU_STAT.ADDRERR and TRU_ERRADDR.ADDR May Not Reflect the Correct Status */
 #define ANOMALY_16000003 (1)
 /* The EPPI Data Enable (DEN) Signal is Not Functional */
-#define ANOMALY_16000004 (1)
+#define ANOMALY_16000004 (__SILICON_REVISION__ < 1)
 /* Using L1 Instruction Cache with Parity Enabled is Unreliable */
-#define ANOMALY_16000005 (1)
+#define ANOMALY_16000005 (__SILICON_REVISION__ < 1)
 /* SEQSTAT.SYSNMI Clears Upon Entering the NMI ISR */
-#define ANOMALY_16000006 (1)
+#define ANOMALY_16000006 (__SILICON_REVISION__ < 1)
 /* DDR2 Memory Reads May Fail Intermittently */
 #define ANOMALY_16000007 (1)
 /* Instruction Memory Stalls Can Cause IFLUSH to Fail */
@@ -49,19 +49,53 @@
 /* Speculative Fetches Can Cause Undesired External FIFO Operations */
 #define ANOMALY_16000017 (1)
 /* RSI Boot Cleanup Routine Does Not Clear Registers */
-#define ANOMALY_16000018 (1)
+#define ANOMALY_16000018 (__SILICON_REVISION__ < 1)
 /* SPI Master Boot Device Auto-detection Frequency is Set Incorrectly */
-#define ANOMALY_16000019 (1)
+#define ANOMALY_16000019 (__SILICON_REVISION__ < 1)
 /* rom_SysControl() Fails to Set DDR0_CTL.INIT for Wakeup From Hibernate */
-#define ANOMALY_16000020 (1)
+#define ANOMALY_16000020 (__SILICON_REVISION__ < 1)
 /* rom_SysControl() Fails to Save and Restore DDR0_PHYCTL3 for Hibernate/Wakeup Sequence */
-#define ANOMALY_16000021 (1)
+#define ANOMALY_16000021 (__SILICON_REVISION__ < 1)
 /* Boot Code Fails to Enable Parity Fault Detection */
-#define ANOMALY_16000022 (1)
+#define ANOMALY_16000022 (__SILICON_REVISION__ < 1)
+/* Rom_SysControl Does not Update CGU0_CLKOUTSEL */
+#define ANOMALY_16000023 (__SILICON_REVISION__ < 1)
+/* Spurious Fault Signaled After Clearing an Externally Generated Fault */
+#define ANOMALY_16000024 (1)
+/* SPORT May Drive Data Pins During Inactive Channels in Multichannel Mode */
+#define ANOMALY_16000025 (1)
 /* USB DMA interrupt status do not show the DMA channel interrupt in the DMA ISR */
-#define ANOMALY_16000027 (1)
+#define ANOMALY_16000027 (__SILICON_REVISION__ < 1)
+/* Default SPI Master Boot Mode Setting is Incorrect */
+#define ANOMALY_16000028 (__SILICON_REVISION__ < 1)
+/* PPI tDFSPI Timing Does Not Meet Data Sheet Specification */
+#define ANOMALY_16000027 (__SILICON_REVISION__ < 1)
 /* Interrupted Core Reads of MMRs May Cause Data Loss */
-#define ANOMALY_16000030 (1)
+#define ANOMALY_16000030 (__SILICON_REVISION__ < 1)
+/* Incorrect Default USB_PLL_OSC.PLLM Value */
+#define ANOMALY_16000031 (__SILICON_REVISION__ < 1)
+/* Core Reads of System MMRs May Cause the Core to Hang */
+#define ANOMALY_16000032 (__SILICON_REVISION__ < 1)
+/* PPI Data Underflow on First Word Not Reported in Certain Modes */
+#define ANOMALY_16000033 (1)
+/* CNV1 Red Pixel Substitution feature not functional in the PVP */
+#define ANOMALY_16000034 (__SILICON_REVISION__ < 1)
+/* IPF0 Output Port Color Separation feature not functional */
+#define ANOMALY_16000035 (__SILICON_REVISION__ < 1)
+/* Spurious USB Wake From Hibernate May Occur When USB_VBUS is Low */
+#define ANOMALY_16000036 (__SILICON_REVISION__ < 1)
+/* Core RAISE 2 Instruction Not Latched When Executed at Priority Level 0, 1, or 2 */
+#define ANOMALY_16000037 (__SILICON_REVISION__ < 1)
+/* Spurious Unhandled NMI or L1 Memory Parity Error Interrupt May Occur Upon Entering the NMI ISR */
+#define ANOMALY_16000038 (__SILICON_REVISION__ < 1)
+/* CGU_STAT.PLOCKERR Bit May be Unreliable */
+#define ANOMALY_16000039 (1)
+/* JTAG Emulator Reads of SDU_IDCODE Alter Register Contents */
+#define ANOMALY_16000040 (1)
+/* IFLUSH Instruction Causes Parity Error When Parity Is Enabled */
+#define ANOMALY_16000041 (1)
+/* Instruction Cache Failure When Parity Is Enabled */
+#define ANOMALY_16000042 (__SILICON_REVISION__ == 1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000158 (0)
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c
index 0e1e451..f4adedc 100644
--- a/arch/blackfin/mach-common/cache-c.c
+++ b/arch/blackfin/mach-common/cache-c.c
@@ -6,7 +6,6 @@
  * Licensed under the GPL-2 or later.
  */
 
-#include <linux/init.h>
 #include <asm/blackfin.h>
 #include <asm/cplbinit.h>
 
@@ -42,6 +41,16 @@
                 unsigned long mem_mask)
 {
 	int i;
+#ifdef CONFIG_L1_PARITY_CHECK
+	u32 ctrl;
+
+	if (cplb_addr == DCPLB_ADDR0) {
+		ctrl = bfin_read32(mem_control) | (1 << RDCHK);
+		CSYNC();
+		bfin_write32(mem_control, ctrl);
+		SSYNC();
+	}
+#endif
 
 	for (i = 0; i < MAX_CPLBS; i++) {
 		bfin_write32(cplb_addr + i * 4, cplb_tbl[i].addr);
diff --git a/arch/blackfin/mach-common/clocks-init.c b/arch/blackfin/mach-common/clocks-init.c
index 2308ce5..d436bd9 100644
--- a/arch/blackfin/mach-common/clocks-init.c
+++ b/arch/blackfin/mach-common/clocks-init.c
@@ -7,7 +7,6 @@
  */
 
 #include <linux/linkage.h>
-#include <linux/init.h>
 #include <asm/blackfin.h>
 
 #include <asm/dma.h>
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index ca75613..867b7ce 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -471,13 +471,8 @@
 
 }
 
-void handle_sec_fault(unsigned int irq, struct irq_desc *desc)
+void handle_sec_fault(uint32_t sec_gstat)
 {
-	uint32_t sec_gstat;
-
-	raw_spin_lock(&desc->lock);
-
-	sec_gstat = bfin_read32(SEC_GSTAT);
 	if (sec_gstat & SEC_GSTAT_ERR) {
 
 		switch (sec_gstat & SEC_GSTAT_ERRC) {
@@ -494,18 +489,16 @@
 
 
 	}
-
-	raw_spin_unlock(&desc->lock);
-
-	handle_fasteoi_irq(irq, desc);
 }
 
-void handle_core_fault(unsigned int irq, struct irq_desc *desc)
+static struct irqaction bfin_fault_irq = {
+	.name = "Blackfin fault",
+};
+
+static irqreturn_t bfin_fault_routine(int irq, void *data)
 {
 	struct pt_regs *fp = get_irq_regs();
 
-	raw_spin_lock(&desc->lock);
-
 	switch (irq) {
 	case IRQ_C0_DBL_FAULT:
 		double_fault_c(fp);
@@ -522,11 +515,15 @@
 	case IRQ_C0_NMI_L1_PARITY_ERR:
 		panic("Core 0 NMI L1 parity error");
 		break;
+	case IRQ_SEC_ERR:
+		pr_err("SEC error\n");
+		handle_sec_fault(bfin_read32(SEC_GSTAT));
+		break;
 	default:
-		panic("Core 1 fault %d occurs unexpectedly", irq);
+		panic("Unknown fault %d", irq);
 	}
 
-	raw_spin_unlock(&desc->lock);
+	return IRQ_HANDLED;
 }
 #endif /* SEC_GCTL */
 
@@ -1195,12 +1192,7 @@
 				handle_percpu_irq);
 		} else {
 			irq_set_chip(irq, &bfin_sec_irqchip);
-			if (irq == IRQ_SEC_ERR)
-				irq_set_handler(irq, handle_sec_fault);
-			else if (irq >= IRQ_C0_DBL_FAULT && irq < CORE_IRQS)
-				irq_set_handler(irq, handle_core_fault);
-			else
-				irq_set_handler(irq, handle_fasteoi_irq);
+			irq_set_handler(irq, handle_fasteoi_irq);
 			__irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
 		}
 	}
@@ -1239,6 +1231,13 @@
 	register_syscore_ops(&sec_pm_syscore_ops);
 #endif
 
+	bfin_fault_irq.handler = bfin_fault_routine;
+#ifdef CONFIG_L1_PARITY_CHECK
+	setup_irq(IRQ_C0_NMI_L1_PARITY_ERR, &bfin_fault_irq);
+#endif
+	setup_irq(IRQ_C0_DBL_FAULT, &bfin_fault_irq);
+	setup_irq(IRQ_SEC_ERR, &bfin_fault_irq);
+
 	return 0;
 }
 
diff --git a/arch/blackfin/mach-common/scb-init.c b/arch/blackfin/mach-common/scb-init.c
index 2cbfb0b..8923398 100644
--- a/arch/blackfin/mach-common/scb-init.c
+++ b/arch/blackfin/mach-common/scb-init.c
@@ -6,7 +6,6 @@
  * Licensed under the GPL-2 or later.
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <asm/scb.h>
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 2bbae07..ba6c30d 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -53,7 +53,6 @@
 	BFIN_IPI_TIMER,
 	BFIN_IPI_RESCHEDULE,
 	BFIN_IPI_CALL_FUNC,
-	BFIN_IPI_CALL_FUNC_SINGLE,
 	BFIN_IPI_CPU_STOP,
 };
 
@@ -162,9 +161,6 @@
 			case BFIN_IPI_CALL_FUNC:
 				generic_smp_call_function_interrupt();
 				break;
-			case BFIN_IPI_CALL_FUNC_SINGLE:
-				generic_smp_call_function_single_interrupt();
-				break;
 			case BFIN_IPI_CPU_STOP:
 				ipi_cpu_stop(cpu);
 				break;
@@ -210,7 +206,7 @@
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-	send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
+	send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC);
 }
 
 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
diff --git a/arch/frv/Makefile b/arch/frv/Makefile
index 4d1b1e9..2a8fb73 100644
--- a/arch/frv/Makefile
+++ b/arch/frv/Makefile
@@ -74,13 +74,6 @@
 KBUILD_CFLAGS	+= -ffixed-fcc3 -ffixed-cc3 -ffixed-gr15 -ffixed-icc2
 KBUILD_AFLAGS	+= -mno-fdpic
 
-# make sure the .S files get compiled with debug info
-# and disable optimisations that are unhelpful whilst debugging
-ifdef CONFIG_DEBUG_INFO
-#KBUILD_CFLAGS	+= -O1
-KBUILD_AFLAGS	+= -Wa,--gdwarf2
-endif
-
 head-y		:= arch/frv/kernel/head.o
 
 core-y		+= arch/frv/kernel/ arch/frv/mm/
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index afd45e0..ae763d8b 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls			312 /* length of syscall table */
+#define NR_syscalls			314 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 34fd6fe..715e85f 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -325,5 +325,7 @@
 #define __NR_process_vm_writev		1333
 #define __NR_accept4			1334
 #define __NR_finit_module		1335
+#define __NR_sched_setattr		1336
+#define __NR_sched_getattr		1337
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ddea607f..fa8d61a 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1773,6 +1773,8 @@
 	data8 sys_process_vm_writev
 	data8 sys_accept4
 	data8 sys_finit_module			// 1335
+	data8 sys_sched_setattr
+	data8 sys_sched_getattr
 
 	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 0721858..2d75ae2 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -62,17 +62,18 @@
 static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
 {
 	struct nfhd_device *dev = queue->queuedata;
-	struct bio_vec *bvec;
-	int i, dir, len, shift;
-	sector_t sec = bio->bi_sector;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
+	int dir, len, shift;
+	sector_t sec = bio->bi_iter.bi_sector;
 
 	dir = bio_data_dir(bio);
 	shift = dev->bshift;
-	bio_for_each_segment(bvec, bio, i) {
-		len = bvec->bv_len;
+	bio_for_each_segment(bvec, bio, iter) {
+		len = bvec.bv_len;
 		len >>= 9;
 		nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
-				bvec_to_phys(bvec));
+				bvec_to_phys(&bvec));
 		sec += len;
 	}
 	bio_endio(bio, 0);
diff --git a/arch/microblaze/include/asm/delay.h b/arch/microblaze/include/asm/delay.h
index 05b7d39..66fc24c 100644
--- a/arch/microblaze/include/asm/delay.h
+++ b/arch/microblaze/include/asm/delay.h
@@ -13,6 +13,8 @@
 #ifndef _ASM_MICROBLAZE_DELAY_H
 #define _ASM_MICROBLAZE_DELAY_H
 
+#include <linux/param.h>
+
 extern inline void __delay(unsigned long loops)
 {
 	asm volatile ("# __delay		\n\t"		\
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index a2cea72..3fbb7f1 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -89,6 +89,11 @@
 {
 	return le32_to_cpu(*(volatile unsigned int __force *)addr);
 }
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+	return le64_to_cpu(__raw_readq(addr));
+}
 static inline void writeb(unsigned char v, volatile void __iomem *addr)
 {
 	*(volatile unsigned char __force *)addr = v;
@@ -101,6 +106,7 @@
 {
 	*(volatile unsigned int __force *)addr = cpu_to_le32(v);
 }
+#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr)
 
 /* ioread and iowrite variants. thease are for now same as __raw_
  * variants of accessors. we might check for endianess in the feature
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index b7fb043..17645b2 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -66,7 +66,7 @@
 	mts	rmsr, r0
 /* Disable stack protection from bootloader */
 	mts	rslr, r0
-	addi	r8, r0, 0xFFFFFFF
+	addi	r8, r0, 0xFFFFFFFF
 	mts	rshr, r8
 /*
  * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c02f1c0..dcae3a7 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -116,7 +116,6 @@
 	select CEVT_R4K
 	select CSRC_R4K
 	select DMA_NONCOHERENT
-	select FW_CFE
 	select HW_HAS_PCI
 	select IRQ_CPU
 	select SYS_HAS_CPU_MIPS32_R1
@@ -124,6 +123,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_HAS_EARLY_PRINTK
+	select EARLY_PRINTK_8250 if EARLY_PRINTK
 	help
 	 Support for BCM47XX based boards
 
@@ -134,14 +134,13 @@
 	select CSRC_R4K
 	select DMA_NONCOHERENT
 	select IRQ_CPU
-	select SYS_HAS_CPU_MIPS32_R1
-	select SYS_HAS_CPU_BMIPS4350 if !BCM63XX_CPU_6338 && !BCM63XX_CPU_6345 && !BCM63XX_CPU_6348
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_HAS_EARLY_PRINTK
 	select SWAP_IO_SPACE
 	select ARCH_REQUIRE_GPIOLIB
 	select HAVE_CLK
+	select MIPS_L1_CACHE_SHIFT_4
 	help
 	 Support for BCM63XX based boards
 
@@ -186,6 +185,7 @@
 	select SYS_SUPPORTS_128HZ
 	select SYS_SUPPORTS_256HZ
 	select SYS_SUPPORTS_1024HZ
+	select MIPS_L1_CACHE_SHIFT_4
 	help
 	  This enables support for DEC's MIPS based workstations.  For details
 	  see the Linux/MIPS FAQ on <http://www.linux-mips.org/> and the
@@ -305,7 +305,7 @@
 	select CEVT_R4K
 	select CSRC_R4K
 	select CSRC_GIC
-	select DMA_NONCOHERENT
+	select DMA_MAYBE_COHERENT
 	select GENERIC_ISA_DMA
 	select HAVE_PCSPKR_PLATFORM
 	select IRQ_CPU
@@ -324,7 +324,6 @@
 	select SYS_HAS_CPU_MIPS64_R2
 	select SYS_HAS_CPU_NEVADA
 	select SYS_HAS_CPU_RM7000
-	select SYS_HAS_EARLY_PRINTK
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL
 	select SYS_SUPPORTS_BIG_ENDIAN
@@ -349,6 +348,7 @@
 	select DMA_NONCOHERENT
 	select IRQ_CPU
 	select IRQ_GIC
+	select LIBFDT
 	select MIPS_MSC
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_HAS_CPU_MIPS32_R2
@@ -471,6 +471,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL
 	select SYS_SUPPORTS_BIG_ENDIAN
+	select MIPS_L1_CACHE_SHIFT_7
 	help
 	  This are the SGI Indy, Challenge S and Indigo2, as well as certain
 	  OEM variants like the Tandem CMN B006S. To compile a Linux kernel
@@ -491,6 +492,7 @@
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_NUMA
 	select SYS_SUPPORTS_SMP
+	select MIPS_L1_CACHE_SHIFT_7
 	help
 	  This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics
 	  workstations.  To compile a Linux kernel that runs on these, say Y
@@ -697,6 +699,7 @@
 	select SWAP_IO_SPACE
 	select BOOT_RAW
 	select ARCH_REQUIRE_GPIOLIB
+	select MIPS_L1_CACHE_SHIFT_4
 	help
 	  Support the Mikrotik(tm) RouterBoard 532 series,
 	  based on the IDT RC32434 SoC.
@@ -779,6 +782,7 @@
 	select CEVT_R4K
 	select CSRC_R4K
 	select IRQ_CPU
+	select ARCH_SUPPORTS_MSI
 	select ZONE_DMA32 if 64BIT
 	select SYNC_R4K
 	select SYS_HAS_EARLY_PRINTK
@@ -897,6 +901,10 @@
 config ARCH_DMA_ADDR_T_64BIT
 	def_bool (HIGHMEM && 64BIT_PHYS_ADDR) || 64BIT
 
+config DMA_MAYBE_COHERENT
+	select DMA_NONCOHERENT
+	bool
+
 config DMA_COHERENT
 	bool
 
@@ -1091,11 +1099,24 @@
 config BOOT_ELF32
 	bool
 
+config MIPS_L1_CACHE_SHIFT_4
+	bool
+
+config MIPS_L1_CACHE_SHIFT_5
+	bool
+
+config MIPS_L1_CACHE_SHIFT_6
+	bool
+
+config MIPS_L1_CACHE_SHIFT_7
+	bool
+
 config MIPS_L1_CACHE_SHIFT
 	int
-	default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL || SOC_RT288X
-	default "6" if MIPS_CPU_SCACHE
-	default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
+	default "4" if MIPS_L1_CACHE_SHIFT_4
+	default "5" if MIPS_L1_CACHE_SHIFT_5
+	default "6" if MIPS_L1_CACHE_SHIFT_6
+	default "7" if MIPS_L1_CACHE_SHIFT_7
 	default "5"
 
 config HAVE_STD_PC_SERIAL_PORT
@@ -1375,47 +1396,31 @@
 	select LIBFDT
 	select USE_OF
 	select USB_EHCI_BIG_ENDIAN_MMIO
+	select SYS_HAS_DMA_OPS
+	select MIPS_L1_CACHE_SHIFT_7
 	help
 	  The Cavium Octeon processor is a highly integrated chip containing
 	  many ethernet hardware widgets for networking tasks. The processor
 	  can have up to 16 Mips64v2 cores and 8 integrated gigabit ethernets.
 	  Full details can be found at http://www.caviumnetworks.com.
 
-config CPU_BMIPS3300
-	bool "BMIPS3300"
-	depends on SYS_HAS_CPU_BMIPS3300
-	select CPU_BMIPS
-	help
-	  Broadcom BMIPS3300 processors.
-
-config CPU_BMIPS4350
-	bool "BMIPS4350"
-	depends on SYS_HAS_CPU_BMIPS4350
-	select CPU_BMIPS
-	select SYS_SUPPORTS_SMP
-	select SYS_SUPPORTS_HOTPLUG_CPU
-	help
-	  Broadcom BMIPS4350 ("VIPER") processors.
-
-config CPU_BMIPS4380
-	bool "BMIPS4380"
-	depends on SYS_HAS_CPU_BMIPS4380
-	select CPU_BMIPS
-	select SYS_SUPPORTS_SMP
-	select SYS_SUPPORTS_HOTPLUG_CPU
-	help
-	  Broadcom BMIPS4380 processors.
-
-config CPU_BMIPS5000
-	bool "BMIPS5000"
-	depends on SYS_HAS_CPU_BMIPS5000
-	select CPU_BMIPS
+config CPU_BMIPS
+	bool "Broadcom BMIPS"
+	depends on SYS_HAS_CPU_BMIPS
+	select CPU_MIPS32
+	select CPU_BMIPS32_3300 if SYS_HAS_CPU_BMIPS32_3300
+	select CPU_BMIPS4350 if SYS_HAS_CPU_BMIPS4350
+	select CPU_BMIPS4380 if SYS_HAS_CPU_BMIPS4380
+	select CPU_BMIPS5000 if SYS_HAS_CPU_BMIPS5000
+	select CPU_SUPPORTS_32BIT_KERNEL
+	select DMA_NONCOHERENT
+	select IRQ_CPU
+	select SWAP_IO_SPACE
+	select WEAK_ORDERING
 	select CPU_SUPPORTS_HIGHMEM
-	select MIPS_CPU_SCACHE
-	select SYS_SUPPORTS_SMP
-	select SYS_SUPPORTS_HOTPLUG_CPU
+	select CPU_HAS_PREFETCH
 	help
-	  Broadcom BMIPS5000 processors.
+	  Support for BMIPS32/3300/4350/4380 and BMIPS5000 processors.
 
 config CPU_XLR
 	bool "Netlogic XLR SoC"
@@ -1498,14 +1503,25 @@
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
 
-config CPU_BMIPS
+config CPU_BMIPS32_3300
+	select SMP_UP if SMP
 	bool
-	select CPU_MIPS32
-	select CPU_SUPPORTS_32BIT_KERNEL
-	select DMA_NONCOHERENT
-	select IRQ_CPU
-	select SWAP_IO_SPACE
-	select WEAK_ORDERING
+
+config CPU_BMIPS4350
+	bool
+	select SYS_SUPPORTS_SMP
+	select SYS_SUPPORTS_HOTPLUG_CPU
+
+config CPU_BMIPS4380
+	bool
+	select SYS_SUPPORTS_SMP
+	select SYS_SUPPORTS_HOTPLUG_CPU
+
+config CPU_BMIPS5000
+	bool
+	select MIPS_CPU_SCACHE
+	select SYS_SUPPORTS_SMP
+	select SYS_SUPPORTS_HOTPLUG_CPU
 
 config SYS_HAS_CPU_LOONGSON2E
 	bool
@@ -1579,17 +1595,24 @@
 config SYS_HAS_CPU_CAVIUM_OCTEON
 	bool
 
-config SYS_HAS_CPU_BMIPS3300
+config SYS_HAS_CPU_BMIPS
 	bool
 
+config SYS_HAS_CPU_BMIPS32_3300
+	bool
+	select SYS_HAS_CPU_BMIPS
+
 config SYS_HAS_CPU_BMIPS4350
 	bool
+	select SYS_HAS_CPU_BMIPS
 
 config SYS_HAS_CPU_BMIPS4380
 	bool
+	select SYS_HAS_CPU_BMIPS
 
 config SYS_HAS_CPU_BMIPS5000
 	bool
+	select SYS_HAS_CPU_BMIPS
 
 config SYS_HAS_CPU_XLR
 	bool
@@ -1797,6 +1820,7 @@
 config MIPS_CPU_SCACHE
 	bool
 	select BOARD_SCACHE
+	select MIPS_L1_CACHE_SHIFT_6
 
 config R5000_CPU_SCACHE
 	bool
@@ -1833,59 +1857,48 @@
 	prompt "MIPS MT options"
 
 config MIPS_MT_DISABLED
-	bool "Disable multithreading support."
+	bool "Disable multithreading support"
 	help
-	  Use this option if your workload can't take advantage of
-	  MIPS hardware multithreading support.  On systems that don't have
-	  the option of an MT-enabled processor this option will be the only
-	  option in this menu.
+	  Use this option if your platform does not support the MT ASE
+	  which is hardware multithreading support. On systems without
+	  an MT-enabled processor, this will be the only option that is
+	  available in this menu.
 
 config MIPS_MT_SMP
 	bool "Use 1 TC on each available VPE for SMP"
 	depends on SYS_SUPPORTS_MULTITHREADING
 	select CPU_MIPSR2_IRQ_VI
 	select CPU_MIPSR2_IRQ_EI
+	select SYNC_R4K
 	select MIPS_MT
 	select SMP
-	select SYS_SUPPORTS_SCHED_SMT if SMP
-	select SYS_SUPPORTS_SMP
 	select SMP_UP
+	select SYS_SUPPORTS_SMP
+	select SYS_SUPPORTS_SCHED_SMT
 	select MIPS_PERF_SHARED_TC_COUNTERS
 	help
-	  This is a kernel model which is known a VSMP but lately has been
-	  marketesed into SMVP.
-	  Virtual SMP uses the processor's VPEs  to implement virtual
-	  processors. In currently available configuration of the 34K processor
-	  this allows for a dual processor. Both processors will share the same
-	  primary caches; each will obtain the half of the TLB for it's own
-	  exclusive use. For a layman this model can be described as similar to
-	  what Intel calls Hyperthreading.
-
-	  For further information see http://www.linux-mips.org/wiki/34K#VSMP
+	  This is a kernel model which is known as SMVP. This is supported
+	  on cores with the MT ASE and uses the available VPEs to implement
+	  virtual processors which supports SMP. This is equivalent to the
+	  Intel Hyperthreading feature. For further information go to
+	  <http://www.imgtec.com/mips/mips-multithreading.asp>.
 
 config MIPS_MT_SMTC
-	bool "SMTC: Use all TCs on all VPEs for SMP"
+	bool "Use all TCs on all VPEs for SMP (DEPRECATED)"
 	depends on CPU_MIPS32_R2
-	#depends on CPU_MIPS64_R2		# once there is hardware ...
 	depends on SYS_SUPPORTS_MULTITHREADING
 	select CPU_MIPSR2_IRQ_VI
 	select CPU_MIPSR2_IRQ_EI
 	select MIPS_MT
-	select NR_CPUS_DEFAULT_8
 	select SMP
-	select SYS_SUPPORTS_SMP
 	select SMP_UP
+	select SYS_SUPPORTS_SMP
+	select NR_CPUS_DEFAULT_8
 	help
-	  This is a kernel model which is known a SMTC or lately has been
-	  marketesed into SMVP.
-	  is presenting the available TC's of the core as processors to Linux.
-	  On currently available 34K processors this means a Linux system will
-	  see up to 5 processors. The implementation of the SMTC kernel differs
-	  significantly from VSMP and cannot efficiently coexist in the same
-	  kernel binary so the choice between VSMP and SMTC is a compile time
-	  decision.
-
-	  For further information see http://www.linux-mips.org/wiki/34K#SMTC
+	  This is a kernel model which is known as SMTC. This is
+	  supported on cores with the MT ASE and presents all TCs
+	  available on all VPEs to support SMP. For further
+	  information see <http://www.linux-mips.org/wiki/34K#SMTC>.
 
 endchoice
 
@@ -1922,6 +1935,16 @@
 	  Includes a loader for loading an elf relocatable object
 	  onto another VPE and running it.
 
+config MIPS_VPE_LOADER_CMP
+	bool
+	default "y"
+	depends on MIPS_VPE_LOADER && MIPS_CMP
+
+config MIPS_VPE_LOADER_MT
+	bool
+	default "y"
+	depends on MIPS_VPE_LOADER && !MIPS_CMP
+
 config MIPS_MT_SMTC_IM_BACKSTOP
 	bool "Use per-TC register bits as backstop for inhibited IM bits"
 	depends on MIPS_MT_SMTC
@@ -1955,24 +1978,29 @@
 	  you to ensure the amount you put in the option and the space your
 	  program requires is less or equal to the amount physically present.
 
-# this should possibly be in drivers/char, but it is rather cpu related. Hmmm
 config MIPS_VPE_APSP_API
 	bool "Enable support for AP/SP API (RTLX)"
 	depends on MIPS_VPE_LOADER
 	help
 
+config MIPS_VPE_APSP_API_CMP
+	bool
+	default "y"
+	depends on MIPS_VPE_APSP_API && MIPS_CMP
+
+config MIPS_VPE_APSP_API_MT
+	bool
+	default "y"
+	depends on MIPS_VPE_APSP_API && !MIPS_CMP
+
 config MIPS_CMP
-	bool "MIPS CMP framework support"
-	depends on SYS_SUPPORTS_MIPS_CMP
-	select SMP
+	bool "MIPS CMP support"
+	depends on SYS_SUPPORTS_MIPS_CMP && MIPS_MT_SMP
 	select SYNC_R4K
-	select SYS_SUPPORTS_SMP
-	select SYS_SUPPORTS_SCHED_SMT if SMP
 	select WEAK_ORDERING
 	default n
 	help
-	  This is a placeholder option for the GCMP work. It will need to
-	  be handled differently...
+	  Enable Coherency Manager processor (CMP) support.
 
 config SB1_PASS_1_WORKAROUNDS
 	bool
@@ -2324,6 +2352,23 @@
 
 	  If unsure, say Y. Only embedded should say N here.
 
+config MIPS_O32_FP64_SUPPORT
+	bool "Support for O32 binaries using 64-bit FP"
+	depends on 32BIT || MIPS32_O32
+	default y
+	help
+	  When this is enabled, the kernel will support use of 64-bit floating
+	  point registers with binaries using the O32 ABI along with the
+	  EF_MIPS_FP64 ELF header flag (typically built with -mfp64). On
+	  32-bit MIPS systems this support is at the cost of increasing the
+	  size and complexity of the compiled FPU emulator. Thus if you are
+	  running a MIPS32 system and know that none of your userland binaries
+	  will require 64-bit floating point, you may wish to reduce the size
+	  of your kernel & potentially improve FP emulation performance by
+	  saying N here.
+
+	  If unsure, say Y.
+
 config USE_OF
 	bool
 	select OF
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index efe50787..9b8556d 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -114,7 +114,7 @@
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
 cflags-$(CONFIG_CPU_HAS_SMARTMIPS)	+= $(call cc-option,-msmartmips)
-cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips -mno-jals)
+cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips)
 
 cflags-$(CONFIG_SB1XXX_CORELIS)	+= $(call cc-option,-mno-sched-prolog) \
 				   -fno-omit-frame-pointer
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index 0c7fce2..bdb28dee 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -29,7 +29,6 @@
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include <linux/init.h>
 #include <linux/pm.h>
 #include <linux/sysctl.h>
 #include <linux/jiffies.h>
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 11f3ad2..5483906 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -534,13 +534,10 @@
 		s0 = AU1100_GPIO1_INT;
 		s1 = AU1100_GPIO4_INT;
 
+		gpio_request(19, "sd0_cd");
+		gpio_request(20, "sd1_cd");
 		gpio_direction_input(19);	/* sd0 cd# */
 		gpio_direction_input(20);	/* sd1 cd# */
-		gpio_direction_input(21);	/* touch pendown# */
-		gpio_direction_input(207);	/* SPI MISO */
-		gpio_direction_output(208, 0);	/* SPI MOSI */
-		gpio_direction_output(209, 1);	/* SPI SCK */
-		gpio_direction_output(210, 1);	/* SPI CS# */
 
 		/* spi_gpio on SSI0 pins */
 		pfc = __raw_readl((void __iomem *)SYS_PINFUNC);
diff --git a/arch/mips/ar7/time.c b/arch/mips/ar7/time.c
index 22c9321..1dc6c3b 100644
--- a/arch/mips/ar7/time.c
+++ b/arch/mips/ar7/time.c
@@ -18,7 +18,6 @@
  * Setting up the clock on the MIPS boards.
  */
 
-#include <linux/init.h>
 #include <linux/time.h>
 #include <linux/err.h>
 #include <linux/clk.h>
diff --git a/arch/mips/ath79/common.h b/arch/mips/ath79/common.h
index 648d2da..a312071 100644
--- a/arch/mips/ath79/common.h
+++ b/arch/mips/ath79/common.h
@@ -15,7 +15,6 @@
 #define __ATH79_COMMON_H
 
 #include <linux/types.h>
-#include <linux/init.h>
 
 #define ATH79_MEM_SIZE_MIN	(2 * 1024 * 1024)
 #define ATH79_MEM_SIZE_MAX	(128 * 1024 * 1024)
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index 2b8b118..09cb6f7 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -2,6 +2,7 @@
 
 config BCM47XX_SSB
 	bool "SSB Support for Broadcom BCM47XX"
+	select SYS_HAS_CPU_BMIPS32_3300
 	select SSB
 	select SSB_DRIVER_MIPS
 	select SSB_DRIVER_EXTIF
@@ -11,6 +12,7 @@
 	select SSB_PCICORE_HOSTMODE if PCI
 	select SSB_DRIVER_GPIO
 	select GPIOLIB
+	select LEDS_GPIO_REGISTER
 	default y
 	help
 	 Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
@@ -20,6 +22,7 @@
 config BCM47XX_BCMA
 	bool "BCMA Support for Broadcom BCM47XX"
 	select SYS_HAS_CPU_MIPS32_R2
+	select CPU_MIPSR2_IRQ_VI
 	select BCMA
 	select BCMA_HOST_SOC
 	select BCMA_DRIVER_MIPS
@@ -27,6 +30,7 @@
 	select BCMA_DRIVER_PCI_HOSTMODE if PCI
 	select BCMA_DRIVER_GPIO
 	select GPIOLIB
+	select LEDS_GPIO_REGISTER
 	default y
 	help
 	 Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
index c52daf9..4688b6a 100644
--- a/arch/mips/bcm47xx/Makefile
+++ b/arch/mips/bcm47xx/Makefile
@@ -4,5 +4,4 @@
 #
 
 obj-y				+= irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
-obj-y				+= board.o
-obj-$(CONFIG_BCM47XX_SSB)	+= wgt634u.o
+obj-y				+= board.o buttons.o leds.o
diff --git a/arch/mips/bcm47xx/bcm47xx_private.h b/arch/mips/bcm47xx/bcm47xx_private.h
new file mode 100644
index 0000000..5c94ace
--- /dev/null
+++ b/arch/mips/bcm47xx/bcm47xx_private.h
@@ -0,0 +1,12 @@
+#ifndef LINUX_BCM47XX_PRIVATE_H_
+#define LINUX_BCM47XX_PRIVATE_H_
+
+#include <linux/kernel.h>
+
+/* buttons.c */
+int __init bcm47xx_buttons_register(void);
+
+/* leds.c */
+void __init bcm47xx_leds_register(void);
+
+#endif
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
index f3f6bfe..6d612e2 100644
--- a/arch/mips/bcm47xx/board.c
+++ b/arch/mips/bcm47xx/board.c
@@ -36,26 +36,32 @@
 struct bcm47xx_board_type_list1 bcm47xx_board_list_model_name[] __initconst = {
 	{{BCM47XX_BOARD_DLINK_DIR130, "D-Link DIR-130"}, "DIR-130"},
 	{{BCM47XX_BOARD_DLINK_DIR330, "D-Link DIR-330"}, "DIR-330"},
-	{ {0}, 0},
+	{ {0}, NULL},
 };
 
 /* model_no */
 static const
 struct bcm47xx_board_type_list1 bcm47xx_board_list_model_no[] __initconst = {
 	{{BCM47XX_BOARD_ASUS_WL700GE, "Asus WL700"}, "WL700"},
-	{ {0}, 0},
+	{ {0}, NULL},
 };
 
 /* machine_name */
 static const
 struct bcm47xx_board_type_list1 bcm47xx_board_list_machine_name[] __initconst = {
 	{{BCM47XX_BOARD_LINKSYS_WRTSL54GS, "Linksys WRTSL54GS"}, "WRTSL54GS"},
-	{ {0}, 0},
+	{ {0}, NULL},
 };
 
 /* hardware_version */
 static const
 struct bcm47xx_board_type_list1 bcm47xx_board_list_hardware_version[] __initconst = {
+	{{BCM47XX_BOARD_ASUS_RTN10U, "Asus RT-N10U"}, "RTN10U"},
+	{{BCM47XX_BOARD_ASUS_RTN12, "Asus RT-N12"}, "RT-N12"},
+	{{BCM47XX_BOARD_ASUS_RTN12B1, "Asus RT-N12B1"}, "RTN12B1"},
+	{{BCM47XX_BOARD_ASUS_RTN12C1, "Asus RT-N12C1"}, "RTN12C1"},
+	{{BCM47XX_BOARD_ASUS_RTN12D1, "Asus RT-N12D1"}, "RTN12D1"},
+	{{BCM47XX_BOARD_ASUS_RTN12HP, "Asus RT-N12HP"}, "RTN12HP"},
 	{{BCM47XX_BOARD_ASUS_RTN16, "Asus RT-N16"}, "RT-N16-"},
 	{{BCM47XX_BOARD_ASUS_WL320GE, "Asus WL320GE"}, "WL320G-"},
 	{{BCM47XX_BOARD_ASUS_WL330GE, "Asus WL330GE"}, "WL330GE-"},
@@ -66,7 +72,7 @@
 	{{BCM47XX_BOARD_ASUS_WL520GC, "Asus WL520GC"}, "WL520GC-"},
 	{{BCM47XX_BOARD_ASUS_WL520GU, "Asus WL520GU"}, "WL520GU-"},
 	{{BCM47XX_BOARD_BELKIN_F7D4301, "Belkin F7D4301"}, "F7D4301"},
-	{ {0}, 0},
+	{ {0}, NULL},
 };
 
 /* productid */
@@ -75,19 +81,13 @@
 	{{BCM47XX_BOARD_ASUS_RTAC66U, "Asus RT-AC66U"}, "RT-AC66U"},
 	{{BCM47XX_BOARD_ASUS_RTN10, "Asus RT-N10"}, "RT-N10"},
 	{{BCM47XX_BOARD_ASUS_RTN10D, "Asus RT-N10D"}, "RT-N10D"},
-	{{BCM47XX_BOARD_ASUS_RTN10U, "Asus RT-N10U"}, "RT-N10U"},
-	{{BCM47XX_BOARD_ASUS_RTN12, "Asus RT-N12"}, "RT-N12"},
-	{{BCM47XX_BOARD_ASUS_RTN12B1, "Asus RT-N12B1"}, "RT-N12B1"},
-	{{BCM47XX_BOARD_ASUS_RTN12C1, "Asus RT-N12C1"}, "RT-N12C1"},
-	{{BCM47XX_BOARD_ASUS_RTN12D1, "Asus RT-N12D1"}, "RT-N12D1"},
-	{{BCM47XX_BOARD_ASUS_RTN12HP, "Asus RT-N12HP"}, "RT-N12HP"},
 	{{BCM47XX_BOARD_ASUS_RTN15U, "Asus RT-N15U"}, "RT-N15U"},
 	{{BCM47XX_BOARD_ASUS_RTN16, "Asus RT-N16"}, "RT-N16"},
 	{{BCM47XX_BOARD_ASUS_RTN53, "Asus RT-N53"}, "RT-N53"},
 	{{BCM47XX_BOARD_ASUS_RTN66U, "Asus RT-N66U"}, "RT-N66U"},
 	{{BCM47XX_BOARD_ASUS_WL300G, "Asus WL300G"}, "WL300g"},
 	{{BCM47XX_BOARD_ASUS_WLHDD, "Asus WLHDD"}, "WLHDD"},
-	{ {0}, 0},
+	{ {0}, NULL},
 };
 
 /* ModelId */
@@ -97,7 +97,7 @@
 	{{BCM47XX_BOARD_MOTOROLA_WE800G, "Motorola WE800G"}, "WE800G"},
 	{{BCM47XX_BOARD_MOTOROLA_WR850GP, "Motorola WR850GP"}, "WR850GP"},
 	{{BCM47XX_BOARD_MOTOROLA_WR850GV2V3, "Motorola WR850G"}, "WR850G"},
-	{ {0}, 0},
+	{ {0}, NULL},
 };
 
 /* melco_id or buf1falo_id */
@@ -112,7 +112,7 @@
 	{{BCM47XX_BOARD_BUFFALO_WZR_G300N, "Buffalo WZR-G300N"}, "31120"},
 	{{BCM47XX_BOARD_BUFFALO_WZR_RS_G54, "Buffalo WZR-RS-G54"}, "30083"},
 	{{BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP, "Buffalo WZR-RS-G54HP"}, "30103"},
-	{ {0}, 0},
+	{ {0}, NULL},
 };
 
 /* boot_hw_model, boot_hw_ver */
@@ -143,7 +143,7 @@
 	{{BCM47XX_BOARD_LINKSYS_WRT54G3GV2, "Linksys WRT54G3GV2-VF"}, "WRT54G3GV2-VF", "1.0"},
 	{{BCM47XX_BOARD_LINKSYS_WRT610NV1, "Linksys WRT610N V1"}, "WRT610N", "1.0"},
 	{{BCM47XX_BOARD_LINKSYS_WRT610NV2, "Linksys WRT610N V2"}, "WRT610N", "2.0"},
-	{ {0}, 0},
+	{ {0}, NULL},
 };
 
 /* board_id */
@@ -165,7 +165,7 @@
 	{{BCM47XX_BOARD_NETGEAR_WNR3500V2, "Netgear WNR3500 V2"}, "U12H127T00_NETGEAR"},
 	{{BCM47XX_BOARD_NETGEAR_WNR3500V2VC, "Netgear WNR3500 V2vc"}, "U12H127T70_NETGEAR"},
 	{{BCM47XX_BOARD_NETGEAR_WNR834BV2, "Netgear WNR834B V2"}, "U12H081T00_NETGEAR"},
-	{ {0}, 0},
+	{ {0}, NULL},
 };
 
 /* boardtype, boardnum, boardrev */
@@ -174,7 +174,9 @@
 	{{BCM47XX_BOARD_HUAWEI_E970, "Huawei E970"}, "0x048e", "0x5347", "0x11"},
 	{{BCM47XX_BOARD_PHICOMM_M1, "Phicomm M1"}, "0x0590", "80", "0x1104"},
 	{{BCM47XX_BOARD_ZTE_H218N, "ZTE H218N"}, "0x053d", "1234", "0x1305"},
-	{ {0}, 0},
+	{{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "0x04CF", "3500", "02"},
+	{{BCM47XX_BOARD_LINKSYS_WRT54GSV1, "Linksys WRT54GS V1"}, "0x0101", "42", "0x10"},
+	{ {0}, NULL},
 };
 
 static const
diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
new file mode 100644
index 0000000..872c62e
--- /dev/null
+++ b/arch/mips/bcm47xx/buttons.c
@@ -0,0 +1,531 @@
+#include "bcm47xx_private.h"
+
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+#include <linux/interrupt.h>
+#include <bcm47xx_board.h>
+#include <bcm47xx.h>
+
+/**************************************************
+ * Database
+ **************************************************/
+
+#define BCM47XX_GPIO_KEY(_gpio, _code)					\
+	{								\
+		.code		= _code,				\
+		.gpio		= _gpio,				\
+		.active_low	= 1,					\
+	}
+
+/* Asus */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_rtn12[] __initconst = {
+	BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(1, KEY_RESTART),
+	BCM47XX_GPIO_KEY(4, BTN_0), /* Router mode */
+	BCM47XX_GPIO_KEY(5, BTN_1), /* Repeater mode */
+	BCM47XX_GPIO_KEY(6, BTN_2), /* AP mode */
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_rtn16[] __initconst = {
+	BCM47XX_GPIO_KEY(6, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(8, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_rtn66u[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(9, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl300g[] __initconst = {
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl320ge[] __initconst = {
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl330ge[] __initconst = {
+	BCM47XX_GPIO_KEY(2, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl500gd[] __initconst = {
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl500gpv1[] __initconst = {
+	BCM47XX_GPIO_KEY(0, KEY_RESTART),
+	BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl500gpv2[] __initconst = {
+	BCM47XX_GPIO_KEY(2, KEY_RESTART),
+	BCM47XX_GPIO_KEY(3, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl500w[] __initconst = {
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+	BCM47XX_GPIO_KEY(7, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl520gc[] __initconst = {
+	BCM47XX_GPIO_KEY(2, KEY_RESTART),
+	BCM47XX_GPIO_KEY(3, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl520gu[] __initconst = {
+	BCM47XX_GPIO_KEY(2, KEY_RESTART),
+	BCM47XX_GPIO_KEY(3, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl700ge[] __initconst = {
+	BCM47XX_GPIO_KEY(0, KEY_POWER), /* Hard disk power switch */
+	BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON), /* EZSetup */
+	BCM47XX_GPIO_KEY(6, KEY_COPY), /* Copy data from USB to internal disk */
+	BCM47XX_GPIO_KEY(7, KEY_RESTART), /* Hard reset */
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wlhdd[] __initconst = {
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+/* Huawei */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_huawei_e970[] __initconst = {
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+/* Belkin */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_belkin_f7d4301[] __initconst = {
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+	BCM47XX_GPIO_KEY(8, KEY_WPS_BUTTON),
+};
+
+/* Buffalo */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_whr2_a54g54[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_whr_g125[] __initconst = {
+	BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(4, KEY_RESTART),
+	BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_whr_g54s[] __initconst = {
+	BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(4, KEY_RESTART),
+	BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_whr_hp_g54[] __initconst = {
+	BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(4, KEY_RESTART),
+	BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_wzr_g300n[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_wzr_rs_g54[] __initconst = {
+	BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(4, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_wzr_rs_g54hp[] __initconst = {
+	BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(4, KEY_RESTART),
+};
+
+/* Dell */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_dell_tm2300[] __initconst = {
+	BCM47XX_GPIO_KEY(0, KEY_RESTART),
+};
+
+/* D-Link */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_dlink_dir130[] __initconst = {
+	BCM47XX_GPIO_KEY(3, KEY_RESTART),
+	BCM47XX_GPIO_KEY(7, KEY_UNKNOWN),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_dlink_dir330[] __initconst = {
+	BCM47XX_GPIO_KEY(3, KEY_RESTART),
+	BCM47XX_GPIO_KEY(7, KEY_UNKNOWN),
+};
+
+/* Linksys */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e1000v1[] __initconst = {
+	BCM47XX_GPIO_KEY(5, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e1000v21[] __initconst = {
+	BCM47XX_GPIO_KEY(9, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(10, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e2000v1[] __initconst = {
+	BCM47XX_GPIO_KEY(5, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(8, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e3000v1[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e3200v1[] __initconst = {
+	BCM47XX_GPIO_KEY(5, KEY_RESTART),
+	BCM47XX_GPIO_KEY(8, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e4200v1[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt150nv1[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt150nv11[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt160nv1[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt160nv3[] __initconst = {
+	BCM47XX_GPIO_KEY(5, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt300nv11[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_UNKNOWN),
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt310nv1[] __initconst = {
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+	BCM47XX_GPIO_KEY(8, KEY_UNKNOWN),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt610nv1[] __initconst = {
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+	BCM47XX_GPIO_KEY(8, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt610nv2[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+/* Motorola */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_motorola_we800g[] __initconst = {
+	BCM47XX_GPIO_KEY(0, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_motorola_wr850gp[] __initconst = {
+	BCM47XX_GPIO_KEY(5, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_motorola_wr850gv2v3[] __initconst = {
+	BCM47XX_GPIO_KEY(5, KEY_RESTART),
+};
+
+/* Netgear */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wndr3400v1[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_RESTART),
+	BCM47XX_GPIO_KEY(6, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(8, KEY_RFKILL),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wndr3700v3[] __initconst = {
+	BCM47XX_GPIO_KEY(2, KEY_RFKILL),
+	BCM47XX_GPIO_KEY(3, KEY_RESTART),
+	BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wndr4500v1[] __initconst = {
+	BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+	BCM47XX_GPIO_KEY(5, KEY_RFKILL),
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wnr834bv2[] __initconst = {
+	BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+/* SimpleTech */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_simpletech_simpleshare[] __initconst = {
+	BCM47XX_GPIO_KEY(0, KEY_RESTART),
+};
+
+/**************************************************
+ * Init
+ **************************************************/
+
+static struct gpio_keys_platform_data bcm47xx_button_pdata;
+
+static struct platform_device bcm47xx_buttons_gpio_keys = {
+	.name = "gpio-keys",
+	.dev = {
+		.platform_data = &bcm47xx_button_pdata,
+	}
+};
+
+/* Copy data from __initconst */
+static int __init bcm47xx_buttons_copy(const struct gpio_keys_button *buttons,
+				       size_t nbuttons)
+{
+	size_t size = nbuttons * sizeof(*buttons);
+
+	bcm47xx_button_pdata.buttons = kmalloc(size, GFP_KERNEL);
+	if (!bcm47xx_button_pdata.buttons)
+		return -ENOMEM;
+	memcpy(bcm47xx_button_pdata.buttons, buttons, size);
+	bcm47xx_button_pdata.nbuttons = nbuttons;
+
+	return 0;
+}
+
+#define bcm47xx_copy_bdata(dev_buttons)					\
+	bcm47xx_buttons_copy(dev_buttons, ARRAY_SIZE(dev_buttons));
+
+int __init bcm47xx_buttons_register(void)
+{
+	enum bcm47xx_board board = bcm47xx_board_get();
+	int err;
+
+	switch (board) {
+	case BCM47XX_BOARD_ASUS_RTN12:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_rtn12);
+		break;
+	case BCM47XX_BOARD_ASUS_RTN16:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_rtn16);
+		break;
+	case BCM47XX_BOARD_ASUS_RTN66U:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_rtn66u);
+		break;
+	case BCM47XX_BOARD_ASUS_WL300G:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl300g);
+		break;
+	case BCM47XX_BOARD_ASUS_WL320GE:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl320ge);
+		break;
+	case BCM47XX_BOARD_ASUS_WL330GE:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl330ge);
+		break;
+	case BCM47XX_BOARD_ASUS_WL500GD:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl500gd);
+		break;
+	case BCM47XX_BOARD_ASUS_WL500GPV1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl500gpv1);
+		break;
+	case BCM47XX_BOARD_ASUS_WL500GPV2:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl500gpv2);
+		break;
+	case BCM47XX_BOARD_ASUS_WL500W:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl500w);
+		break;
+	case BCM47XX_BOARD_ASUS_WL520GC:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl520gc);
+		break;
+	case BCM47XX_BOARD_ASUS_WL520GU:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl520gu);
+		break;
+	case BCM47XX_BOARD_ASUS_WL700GE:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl700ge);
+		break;
+	case BCM47XX_BOARD_ASUS_WLHDD:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wlhdd);
+		break;
+
+	case BCM47XX_BOARD_BELKIN_F7D4301:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_belkin_f7d4301);
+		break;
+
+	case BCM47XX_BOARD_BUFFALO_WHR2_A54G54:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_whr2_a54g54);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WHR_G125:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_whr_g125);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WHR_G54S:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_whr_g54s);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WHR_HP_G54:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_whr_hp_g54);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WZR_G300N:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_wzr_g300n);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WZR_RS_G54:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_wzr_rs_g54);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_wzr_rs_g54hp);
+		break;
+
+	case BCM47XX_BOARD_DELL_TM2300:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_dell_tm2300);
+		break;
+
+	case BCM47XX_BOARD_DLINK_DIR130:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_dlink_dir130);
+		break;
+	case BCM47XX_BOARD_DLINK_DIR330:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_dlink_dir330);
+		break;
+
+	case BCM47XX_BOARD_HUAWEI_E970:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_huawei_e970);
+		break;
+
+	case BCM47XX_BOARD_LINKSYS_E1000V1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e1000v1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_E1000V21:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e1000v21);
+		break;
+	case BCM47XX_BOARD_LINKSYS_E2000V1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e2000v1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_E3000V1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e3000v1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_E3200V1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e3200v1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_E4200V1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e4200v1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT150NV1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt150nv1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT150NV11:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt150nv11);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT160NV1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt160nv1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT160NV3:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt160nv3);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT300NV11:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt300nv11);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT310NV1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt310nv1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT610NV1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt610nv1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT610NV2:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt610nv2);
+		break;
+
+	case BCM47XX_BOARD_MOTOROLA_WE800G:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_motorola_we800g);
+		break;
+	case BCM47XX_BOARD_MOTOROLA_WR850GP:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_motorola_wr850gp);
+		break;
+	case BCM47XX_BOARD_MOTOROLA_WR850GV2V3:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_motorola_wr850gv2v3);
+		break;
+
+	case BCM47XX_BOARD_NETGEAR_WNDR3400V1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr3400v1);
+		break;
+	case BCM47XX_BOARD_NETGEAR_WNDR3700V3:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr3700v3);
+		break;
+	case BCM47XX_BOARD_NETGEAR_WNDR4500V1:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr4500v1);
+		break;
+	case BCM47XX_BOARD_NETGEAR_WNR834BV2:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wnr834bv2);
+		break;
+
+	case BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE:
+		err = bcm47xx_copy_bdata(bcm47xx_buttons_simpletech_simpleshare);
+		break;
+
+	default:
+		pr_debug("No buttons configuration found for this device\n");
+		return -ENOTSUPP;
+	}
+
+	if (err)
+		return -ENOMEM;
+
+	err = platform_device_register(&bcm47xx_buttons_gpio_keys);
+	if (err) {
+		pr_err("Failed to register platform device: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
diff --git a/arch/mips/bcm47xx/irq.c b/arch/mips/bcm47xx/irq.c
index 8cf3833..e0585b7 100644
--- a/arch/mips/bcm47xx/irq.c
+++ b/arch/mips/bcm47xx/irq.c
@@ -25,10 +25,11 @@
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <asm/setup.h>
 #include <asm/irq_cpu.h>
 #include <bcm47xx.h>
 
-void plat_irq_dispatch(void)
+asmlinkage void plat_irq_dispatch(void)
 {
 	u32 cause;
 
@@ -50,6 +51,18 @@
 		do_IRQ(6);
 }
 
+#define DEFINE_HWx_IRQDISPATCH(x)					\
+	static void bcm47xx_hw ## x ## _irqdispatch(void)		\
+	{								\
+		do_IRQ(x);						\
+	}
+DEFINE_HWx_IRQDISPATCH(2)
+DEFINE_HWx_IRQDISPATCH(3)
+DEFINE_HWx_IRQDISPATCH(4)
+DEFINE_HWx_IRQDISPATCH(5)
+DEFINE_HWx_IRQDISPATCH(6)
+DEFINE_HWx_IRQDISPATCH(7)
+
 void __init arch_init_irq(void)
 {
 #ifdef CONFIG_BCM47XX_BCMA
@@ -64,4 +77,14 @@
 	}
 #endif
 	mips_cpu_irq_init();
+
+	if (cpu_has_vint) {
+		pr_info("Setting up vectored interrupts\n");
+		set_vi_handler(2, bcm47xx_hw2_irqdispatch);
+		set_vi_handler(3, bcm47xx_hw3_irqdispatch);
+		set_vi_handler(4, bcm47xx_hw4_irqdispatch);
+		set_vi_handler(5, bcm47xx_hw5_irqdispatch);
+		set_vi_handler(6, bcm47xx_hw6_irqdispatch);
+		set_vi_handler(7, bcm47xx_hw7_irqdispatch);
+	}
 }
diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
new file mode 100644
index 0000000..647d155
--- /dev/null
+++ b/arch/mips/bcm47xx/leds.c
@@ -0,0 +1,542 @@
+#include "bcm47xx_private.h"
+
+#include <linux/leds.h>
+#include <bcm47xx_board.h>
+
+/**************************************************
+ * Database
+ **************************************************/
+
+#define BCM47XX_GPIO_LED(_gpio, _color, _function, _active_low,		\
+			 _default_state)				\
+	{								\
+		.name		= "bcm47xx:" _color ":" _function,	\
+		.gpio		= _gpio,				\
+		.active_low	= _active_low,				\
+		.default_state	= _default_state,			\
+	}
+
+#define BCM47XX_GPIO_LED_TRIGGER(_gpio, _color, _function, _active_low,	\
+				 _default_trigger)			\
+	{								\
+		.name		= "bcm47xx:" _color ":" _function,	\
+		.gpio		= _gpio,				\
+		.active_low	= _active_low,				\
+		.default_state	= LEDS_GPIO_DEFSTATE_OFF,		\
+		.default_trigger	= _default_trigger,		\
+	}
+
+/* Asus */
+
+static const struct gpio_led
+bcm47xx_leds_asus_rtn12[] __initconst = {
+	BCM47XX_GPIO_LED(2, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(7, "unk", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_rtn16[] __initconst = {
+	BCM47XX_GPIO_LED(1, "blue", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(7, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_rtn66u[] __initconst = {
+	BCM47XX_GPIO_LED(12, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(15, "unk", "usb", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl300g[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl320ge[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(2, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(11, "unk", "link", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl330ge[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl500gd[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl500gpv1[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl500gpv2[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(1, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl500w[] __initconst = {
+	BCM47XX_GPIO_LED(5, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl520gc[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(1, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl520gu[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(1, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl700ge[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON), /* Labeled "READY" (there is no "power" LED). Originally ON, flashing on USB activity. */
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wlhdd[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(2, "unk", "usb", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Belkin */
+
+static const struct gpio_led
+bcm47xx_leds_belkin_f7d4301[] __initconst = {
+	BCM47XX_GPIO_LED(10, "green", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(11, "amber", "power", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(12, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(13, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(14, "unk", "usb0", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(15, "unk", "usb1", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Buffalo */
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_whr2_a54g54[] __initconst = {
+	BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_whr_g125[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(2, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(3, "unk", "internal", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_whr_g54s[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(2, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(3, "unk", "internal", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_whr_hp_g54[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(2, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(3, "unk", "internal", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_wzr_g300n[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_wzr_rs_g54[] __initconst = {
+	BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(1, "unk", "vpn", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_wzr_rs_g54hp[] __initconst = {
+	BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(1, "unk", "vpn", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Dell */
+
+static const struct gpio_led
+bcm47xx_leds_dell_tm2300[] __initconst = {
+	BCM47XX_GPIO_LED(6, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(7, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+/* D-Link */
+
+static const struct gpio_led
+bcm47xx_leds_dlink_dir130[] __initconst = {
+	BCM47XX_GPIO_LED_TRIGGER(0, "green", "status", 1, "timer"), /* Originally blinking when device is ready, separated from "power" LED */
+	BCM47XX_GPIO_LED(6, "blue", "unk", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_dlink_dir330[] __initconst = {
+	BCM47XX_GPIO_LED_TRIGGER(0, "green", "status", 1, "timer"), /* Originally blinking when device is ready, separated from "power" LED */
+	BCM47XX_GPIO_LED(4, "unk", "usb", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(6, "blue", "unk", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Huawei */
+
+static const struct gpio_led
+bcm47xx_leds_huawei_e970[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Linksys */
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e1000v1[] __initconst = {
+	BCM47XX_GPIO_LED(0, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(1, "blue", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(2, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(4, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e1000v21[] __initconst = {
+	BCM47XX_GPIO_LED(5, "unk", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(6, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(7, "amber", "wps", 0, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(8, "blue", "wps", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e2000v1[] __initconst = {
+	BCM47XX_GPIO_LED(1, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(2, "blue", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(3, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(4, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e3000v1[] __initconst = {
+	BCM47XX_GPIO_LED(0, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(1, "unk", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(3, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(5, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(7, "unk", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e3200v1[] __initconst = {
+	BCM47XX_GPIO_LED(3, "green", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e4200v1[] __initconst = {
+	BCM47XX_GPIO_LED(5, "white", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt150nv1[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(5, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt150nv11[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(5, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt160nv1[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(5, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt160nv3[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(2, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(4, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt300nv11[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(5, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt310nv1[] __initconst = {
+	BCM47XX_GPIO_LED(1, "blue", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(9, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt610nv1[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "usb",  1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(1, "unk", "power",  0, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(3, "amber", "wps",  1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(9, "blue", "wps",  1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt610nv2[] __initconst = {
+	BCM47XX_GPIO_LED(0, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(1, "unk", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(3, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(5, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(7, "unk", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Motorola */
+
+static const struct gpio_led
+bcm47xx_leds_motorola_we800g[] __initconst = {
+	BCM47XX_GPIO_LED(1, "amber", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(2, "unk", "unk", 1, LEDS_GPIO_DEFSTATE_OFF), /* There are only 3 LEDs: Power, Wireless and Device (ethernet) */
+	BCM47XX_GPIO_LED(4, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_motorola_wr850gp[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(6, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_motorola_wr850gv2v3[] __initconst = {
+	BCM47XX_GPIO_LED(0, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Netgear */
+
+static const struct gpio_led
+bcm47xx_leds_netgear_wndr3400v1[] __initconst = {
+	BCM47XX_GPIO_LED(2, "green", "usb", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(3, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(7, "amber", "power", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_netgear_wndr4500v1[] __initconst = {
+	BCM47XX_GPIO_LED(1, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(2, "green", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(3, "amber", "power", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(8, "green", "usb1", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(9, "green", "2ghz", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(11, "blue", "5ghz", 1, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(14, "green", "usb2", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_netgear_wnr834bv2[] __initconst = {
+	BCM47XX_GPIO_LED(2, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+	BCM47XX_GPIO_LED(3, "amber", "power", 0, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(7, "unk", "connected", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* SimpleTech */
+
+static const struct gpio_led
+bcm47xx_leds_simpletech_simpleshare[] __initconst = {
+	BCM47XX_GPIO_LED(1, "unk", "status", 1, LEDS_GPIO_DEFSTATE_OFF), /* "Ready" LED */
+};
+
+/**************************************************
+ * Init
+ **************************************************/
+
+static struct gpio_led_platform_data bcm47xx_leds_pdata;
+
+#define bcm47xx_set_pdata(dev_leds) do {				\
+	bcm47xx_leds_pdata.leds = dev_leds;				\
+	bcm47xx_leds_pdata.num_leds = ARRAY_SIZE(dev_leds);		\
+} while (0)
+
+void __init bcm47xx_leds_register(void)
+{
+	enum bcm47xx_board board = bcm47xx_board_get();
+
+	switch (board) {
+	case BCM47XX_BOARD_ASUS_RTN12:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_rtn12);
+		break;
+	case BCM47XX_BOARD_ASUS_RTN16:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_rtn16);
+		break;
+	case BCM47XX_BOARD_ASUS_RTN66U:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_rtn66u);
+		break;
+	case BCM47XX_BOARD_ASUS_WL300G:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_wl300g);
+		break;
+	case BCM47XX_BOARD_ASUS_WL320GE:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_wl320ge);
+		break;
+	case BCM47XX_BOARD_ASUS_WL330GE:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_wl330ge);
+		break;
+	case BCM47XX_BOARD_ASUS_WL500GD:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_wl500gd);
+		break;
+	case BCM47XX_BOARD_ASUS_WL500GPV1:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_wl500gpv1);
+		break;
+	case BCM47XX_BOARD_ASUS_WL500GPV2:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_wl500gpv2);
+		break;
+	case BCM47XX_BOARD_ASUS_WL500W:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_wl500w);
+		break;
+	case BCM47XX_BOARD_ASUS_WL520GC:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_wl520gc);
+		break;
+	case BCM47XX_BOARD_ASUS_WL520GU:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_wl520gu);
+		break;
+	case BCM47XX_BOARD_ASUS_WL700GE:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_wl700ge);
+		break;
+	case BCM47XX_BOARD_ASUS_WLHDD:
+		bcm47xx_set_pdata(bcm47xx_leds_asus_wlhdd);
+		break;
+
+	case BCM47XX_BOARD_BELKIN_F7D4301:
+		bcm47xx_set_pdata(bcm47xx_leds_belkin_f7d4301);
+		break;
+
+	case BCM47XX_BOARD_BUFFALO_WHR2_A54G54:
+		bcm47xx_set_pdata(bcm47xx_leds_buffalo_whr2_a54g54);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WHR_G125:
+		bcm47xx_set_pdata(bcm47xx_leds_buffalo_whr_g125);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WHR_G54S:
+		bcm47xx_set_pdata(bcm47xx_leds_buffalo_whr_g54s);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WHR_HP_G54:
+		bcm47xx_set_pdata(bcm47xx_leds_buffalo_whr_hp_g54);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WZR_G300N:
+		bcm47xx_set_pdata(bcm47xx_leds_buffalo_wzr_g300n);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WZR_RS_G54:
+		bcm47xx_set_pdata(bcm47xx_leds_buffalo_wzr_rs_g54);
+		break;
+	case BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP:
+		bcm47xx_set_pdata(bcm47xx_leds_buffalo_wzr_rs_g54hp);
+		break;
+
+	case BCM47XX_BOARD_DELL_TM2300:
+		bcm47xx_set_pdata(bcm47xx_leds_dell_tm2300);
+		break;
+
+	case BCM47XX_BOARD_DLINK_DIR130:
+		bcm47xx_set_pdata(bcm47xx_leds_dlink_dir130);
+		break;
+	case BCM47XX_BOARD_DLINK_DIR330:
+		bcm47xx_set_pdata(bcm47xx_leds_dlink_dir330);
+		break;
+
+	case BCM47XX_BOARD_HUAWEI_E970:
+		bcm47xx_set_pdata(bcm47xx_leds_huawei_e970);
+		break;
+
+	case BCM47XX_BOARD_LINKSYS_E1000V1:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_e1000v1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_E1000V21:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_e1000v21);
+		break;
+	case BCM47XX_BOARD_LINKSYS_E2000V1:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_e2000v1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_E3000V1:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_e3000v1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_E3200V1:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_e3200v1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_E4200V1:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_e4200v1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT150NV1:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt150nv1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT150NV11:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt150nv11);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT160NV1:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt160nv1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT160NV3:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt160nv3);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT300NV11:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt300nv11);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT310NV1:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt310nv1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT610NV1:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt610nv1);
+		break;
+	case BCM47XX_BOARD_LINKSYS_WRT610NV2:
+		bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt610nv2);
+		break;
+
+	case BCM47XX_BOARD_MOTOROLA_WE800G:
+		bcm47xx_set_pdata(bcm47xx_leds_motorola_we800g);
+		break;
+	case BCM47XX_BOARD_MOTOROLA_WR850GP:
+		bcm47xx_set_pdata(bcm47xx_leds_motorola_wr850gp);
+		break;
+	case BCM47XX_BOARD_MOTOROLA_WR850GV2V3:
+		bcm47xx_set_pdata(bcm47xx_leds_motorola_wr850gv2v3);
+		break;
+
+	case BCM47XX_BOARD_NETGEAR_WNDR3400V1:
+		bcm47xx_set_pdata(bcm47xx_leds_netgear_wndr3400v1);
+		break;
+	case BCM47XX_BOARD_NETGEAR_WNDR4500V1:
+		bcm47xx_set_pdata(bcm47xx_leds_netgear_wndr4500v1);
+		break;
+	case BCM47XX_BOARD_NETGEAR_WNR834BV2:
+		bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr834bv2);
+		break;
+
+	case BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE:
+		bcm47xx_set_pdata(bcm47xx_leds_simpletech_simpleshare);
+		break;
+
+	default:
+		pr_debug("No LEDs configuration found for this device\n");
+		return;
+	}
+
+	gpio_led_register_device(-1, &bcm47xx_leds_pdata);
+}
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index b4c585b..6decb27 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -11,7 +11,6 @@
  * option) any later version.
  */
 
-#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/ssb/ssb.h>
@@ -22,11 +21,11 @@
 #include <asm/mach-bcm47xx/bcm47xx.h>
 
 static char nvram_buf[NVRAM_SPACE];
+static const u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000};
 
 static u32 find_nvram_size(u32 end)
 {
 	struct nvram_header *header;
-	u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000};
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index 5cba318..0af808d 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -28,126 +28,27 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
+#include <linux/ssb/ssb_driver_chipcommon.h>
+#include <linux/ssb/ssb_regs.h>
 #include <linux/smp.h>
 #include <asm/bootinfo.h>
-#include <asm/fw/cfe/cfe_api.h>
-#include <asm/fw/cfe/cfe_error.h>
 #include <bcm47xx.h>
 #include <bcm47xx_board.h>
 
-static int cfe_cons_handle;
 
-static u16 get_chip_id(void)
-{
-	switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
-	case BCM47XX_BUS_TYPE_SSB:
-		return bcm47xx_bus.ssb.chip_id;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
-	case BCM47XX_BUS_TYPE_BCMA:
-		return bcm47xx_bus.bcma.bus.chipinfo.id;
-#endif
-	}
-	return 0;
-}
+static char bcm47xx_system_type[20] = "Broadcom BCM47XX";
 
 const char *get_system_type(void)
 {
-	static char buf[50];
-	u16 chip_id = get_chip_id();
-
-	snprintf(buf, sizeof(buf),
-		 (chip_id > 0x9999) ? "Broadcom BCM%d (%s)" :
-				      "Broadcom BCM%04X (%s)",
-		 chip_id, bcm47xx_board_get_name());
-
-	return buf;
+	return bcm47xx_system_type;
 }
 
-void prom_putchar(char c)
+__init void bcm47xx_set_system_type(u16 chip_id)
 {
-	while (cfe_write(cfe_cons_handle, &c, 1) == 0)
-		;
-}
-
-static __init void prom_init_cfe(void)
-{
-	uint32_t cfe_ept;
-	uint32_t cfe_handle;
-	uint32_t cfe_eptseal;
-	int argc = fw_arg0;
-	char **envp = (char **) fw_arg2;
-	int *prom_vec = (int *) fw_arg3;
-
-	/*
-	 * Check if a loader was used; if NOT, the 4 arguments are
-	 * what CFE gives us (handle, 0, EPT and EPTSEAL)
-	 */
-	if (argc < 0) {
-		cfe_handle = (uint32_t)argc;
-		cfe_ept = (uint32_t)envp;
-		cfe_eptseal = (uint32_t)prom_vec;
-	} else {
-		if ((int)prom_vec < 0) {
-			/*
-			 * Old loader; all it gives us is the handle,
-			 * so use the "known" entrypoint and assume
-			 * the seal.
-			 */
-			cfe_handle = (uint32_t)prom_vec;
-			cfe_ept = 0xBFC00500;
-			cfe_eptseal = CFE_EPTSEAL;
-		} else {
-			/*
-			 * Newer loaders bundle the handle/ept/eptseal
-			 * Note: prom_vec is in the loader's useg
-			 * which is still alive in the TLB.
-			 */
-			cfe_handle = prom_vec[0];
-			cfe_ept = prom_vec[2];
-			cfe_eptseal = prom_vec[3];
-		}
-	}
-
-	if (cfe_eptseal != CFE_EPTSEAL) {
-		/* too early for panic to do any good */
-		printk(KERN_ERR "CFE's entrypoint seal doesn't match.");
-		while (1) ;
-	}
-
-	cfe_init(cfe_handle, cfe_ept);
-}
-
-static __init void prom_init_console(void)
-{
-	/* Initialize CFE console */
-	cfe_cons_handle = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE);
-}
-
-static __init void prom_init_cmdline(void)
-{
-	static char buf[COMMAND_LINE_SIZE] __initdata;
-
-	/* Get the kernel command line from CFE */
-	if (cfe_getenv("LINUX_CMDLINE", buf, COMMAND_LINE_SIZE) >= 0) {
-		buf[COMMAND_LINE_SIZE - 1] = 0;
-		strcpy(arcs_cmdline, buf);
-	}
-
-	/* Force a console handover by adding a console= argument if needed,
-	 * as CFE is not available anymore later in the boot process. */
-	if ((strstr(arcs_cmdline, "console=")) == NULL) {
-		/* Try to read the default serial port used by CFE */
-		if ((cfe_getenv("BOOT_CONSOLE", buf, COMMAND_LINE_SIZE) < 0)
-		    || (strncmp("uart", buf, 4)))
-			/* Default to uart0 */
-			strcpy(buf, "uart0");
-
-		/* Compute the new command line */
-		snprintf(arcs_cmdline, COMMAND_LINE_SIZE, "%s console=ttyS%c,115200",
-			 arcs_cmdline, buf[4]);
-	}
+	snprintf(bcm47xx_system_type, sizeof(bcm47xx_system_type),
+		 (chip_id > 0x9999) ? "Broadcom BCM%d" :
+				      "Broadcom BCM%04X",
+		 chip_id);
 }
 
 static __init void prom_init_mem(void)
@@ -195,12 +96,16 @@
 	add_memory_region(0, mem, BOOT_MEM_RAM);
 }
 
+/*
+ * This is the first serial on the chip common core, it is at this position
+ * for sb (ssb) and ai (bcma) bus.
+ */
+#define BCM47XX_SERIAL_ADDR (SSB_ENUM_BASE + SSB_CHIPCO_UART0_DATA)
+
 void __init prom_init(void)
 {
-	prom_init_cfe();
-	prom_init_console();
-	prom_init_cmdline();
 	prom_init_mem();
+	setup_8250_early_printk_port(CKSEG1ADDR(BCM47XX_SERIAL_ADDR), 0, 0);
 }
 
 void __init prom_free_prom_memory(void)
diff --git a/arch/mips/bcm47xx/serial.c b/arch/mips/bcm47xx/serial.c
index b8ef965..2f5bbd6 100644
--- a/arch/mips/bcm47xx/serial.c
+++ b/arch/mips/bcm47xx/serial.c
@@ -31,7 +31,8 @@
 
 	memset(&uart8250_data, 0,  sizeof(uart8250_data));
 
-	for (i = 0; i < mcore->nr_serial_ports; i++) {
+	for (i = 0; i < mcore->nr_serial_ports &&
+		    i < ARRAY_SIZE(uart8250_data) - 1; i++) {
 		struct plat_serial8250_port *p = &(uart8250_data[i]);
 		struct ssb_serial_port *ssb_port = &(mcore->serial_ports[i]);
 
@@ -55,7 +56,8 @@
 
 	memset(&uart8250_data, 0,  sizeof(uart8250_data));
 
-	for (i = 0; i < cc->nr_serial_ports; i++) {
+	for (i = 0; i < cc->nr_serial_ports &&
+		    i < ARRAY_SIZE(uart8250_data) - 1; i++) {
 		struct plat_serial8250_port *p = &(uart8250_data[i]);
 		struct bcma_serial_port *bcma_port;
 		bcma_port = &(cc->serial_ports[i]);
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 9057728..025be21 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -26,6 +26,8 @@
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "bcm47xx_private.h"
+
 #include <linux/export.h>
 #include <linux/types.h>
 #include <linux/ethtool.h>
@@ -35,6 +37,8 @@
 #include <linux/ssb/ssb_embedded.h>
 #include <linux/bcma/bcma_soc.h>
 #include <asm/bootinfo.h>
+#include <asm/idle.h>
+#include <asm/prom.h>
 #include <asm/reboot.h>
 #include <asm/time.h>
 #include <bcm47xx.h>
@@ -213,12 +217,14 @@
 #ifdef CONFIG_BCM47XX_BCMA
 		bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
 		bcm47xx_register_bcma();
+		bcm47xx_set_system_type(bcm47xx_bus.bcma.bus.chipinfo.id);
 #endif
 	} else {
 		printk(KERN_INFO "bcm47xx: using ssb bus\n");
 #ifdef CONFIG_BCM47XX_SSB
 		bcm47xx_bus_type = BCM47XX_BUS_TYPE_SSB;
 		bcm47xx_register_ssb();
+		bcm47xx_set_system_type(bcm47xx_bus.ssb.chip_id);
 #endif
 	}
 
@@ -226,8 +232,34 @@
 	_machine_halt = bcm47xx_machine_halt;
 	pm_power_off = bcm47xx_machine_halt;
 	bcm47xx_board_detect();
+	mips_set_machine_name(bcm47xx_board_get_name());
 }
 
+static int __init bcm47xx_cpu_fixes(void)
+{
+	switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+	case BCM47XX_BUS_TYPE_SSB:
+		/* Nothing to do */
+		break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+	case BCM47XX_BUS_TYPE_BCMA:
+		/* The BCM4706 has a problem with the CPU wait instruction.
+		 * When r4k_wait or r4k_wait_irqoff is used will just hang and
+		 * not return from a msleep(). Removing the cpu_wait
+		 * functionality is a workaround for this problem. The BCM4716
+		 * does not have this problem.
+		 */
+		if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
+			cpu_wait = NULL;
+		break;
+#endif
+	}
+	return 0;
+}
+arch_initcall(bcm47xx_cpu_fixes);
+
 static struct fixed_phy_status bcm47xx_fixed_phy_status __initdata = {
 	.link	= 1,
 	.speed	= SPEED_100,
@@ -248,6 +280,9 @@
 		break;
 #endif
 	}
+	bcm47xx_buttons_register();
+	bcm47xx_leds_register();
+
 	fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status);
 	return 0;
 }
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index ad03c93..a8b5408 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -135,7 +135,7 @@
 }
 
 static void nvram_read_macaddr(const char *prefix, const char *name,
-			       u8 (*val)[6], bool fallback)
+			       u8 val[6], bool fallback)
 {
 	char buf[100];
 	int err;
@@ -144,11 +144,11 @@
 	if (err < 0)
 		return;
 
-	bcm47xx_nvram_parse_macaddr(buf, *val);
+	bcm47xx_nvram_parse_macaddr(buf, val);
 }
 
 static void nvram_read_alpha2(const char *prefix, const char *name,
-			     char (*val)[2], bool fallback)
+			     char val[2], bool fallback)
 {
 	char buf[10];
 	int err;
@@ -162,7 +162,7 @@
 		pr_warn("alpha2 is too long %s\n", buf);
 		return;
 	}
-	memcpy(val, buf, sizeof(val));
+	memcpy(val, buf, 2);
 }
 
 static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom,
@@ -180,7 +180,7 @@
 		      fallback);
 	nvram_read_s8(prefix, NULL, "ag1", &sprom->antenna_gain.a1, 0,
 		      fallback);
-	nvram_read_alpha2(prefix, "ccode", &sprom->alpha2, fallback);
+	nvram_read_alpha2(prefix, "ccode", sprom->alpha2, fallback);
 }
 
 static void bcm47xx_fill_sprom_r12389(struct ssb_sprom *sprom,
@@ -633,20 +633,20 @@
 static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
 					const char *prefix, bool fallback)
 {
-	nvram_read_macaddr(prefix, "et0macaddr", &sprom->et0mac, fallback);
+	nvram_read_macaddr(prefix, "et0macaddr", sprom->et0mac, fallback);
 	nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0,
 		      fallback);
 	nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0,
 		      fallback);
 
-	nvram_read_macaddr(prefix, "et1macaddr", &sprom->et1mac, fallback);
+	nvram_read_macaddr(prefix, "et1macaddr", sprom->et1mac, fallback);
 	nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0,
 		      fallback);
 	nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0,
 		      fallback);
 
-	nvram_read_macaddr(prefix, "macaddr", &sprom->il0mac, fallback);
-	nvram_read_macaddr(prefix, "il0macaddr", &sprom->il0mac, fallback);
+	nvram_read_macaddr(prefix, "macaddr", sprom->il0mac, fallback);
+	nvram_read_macaddr(prefix, "il0macaddr", sprom->il0mac, fallback);
 }
 
 static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
diff --git a/arch/mips/bcm47xx/wgt634u.c b/arch/mips/bcm47xx/wgt634u.c
deleted file mode 100644
index c63a4c2..0000000
--- a/arch/mips/bcm47xx/wgt634u.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
- */
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/leds.h>
-#include <linux/mtd/physmap.h>
-#include <linux/ssb/ssb.h>
-#include <linux/ssb/ssb_embedded.h>
-#include <linux/interrupt.h>
-#include <linux/reboot.h>
-#include <linux/gpio.h>
-#include <asm/mach-bcm47xx/bcm47xx.h>
-
-/* GPIO definitions for the WGT634U */
-#define WGT634U_GPIO_LED	3
-#define WGT634U_GPIO_RESET	2
-#define WGT634U_GPIO_TP1	7
-#define WGT634U_GPIO_TP2	6
-#define WGT634U_GPIO_TP3	5
-#define WGT634U_GPIO_TP4	4
-#define WGT634U_GPIO_TP5	1
-
-static struct gpio_led wgt634u_leds[] = {
-	{
-		.name = "power",
-		.gpio = WGT634U_GPIO_LED,
-		.active_low = 1,
-		.default_trigger = "heartbeat",
-	},
-};
-
-static struct gpio_led_platform_data wgt634u_led_data = {
-	.num_leds =	ARRAY_SIZE(wgt634u_leds),
-	.leds =		wgt634u_leds,
-};
-
-static struct platform_device wgt634u_gpio_leds = {
-	.name =		"leds-gpio",
-	.id =		-1,
-	.dev = {
-		.platform_data = &wgt634u_led_data,
-	}
-};
-
-
-/* 8MiB flash. The struct mtd_partition matches original Netgear WGT634U
-   firmware. */
-static struct mtd_partition wgt634u_partitions[] = {
-	{
-		.name	    = "cfe",
-		.offset	    = 0,
-		.size	    = 0x60000,		/* 384k */
-		.mask_flags = MTD_WRITEABLE	/* force read-only */
-	},
-	{
-		.name	= "config",
-		.offset = 0x60000,
-		.size	= 0x20000		/* 128k */
-	},
-	{
-		.name	= "linux",
-		.offset = 0x80000,
-		.size	= 0x140000		/* 1280k */
-	},
-	{
-		.name	= "jffs",
-		.offset = 0x1c0000,
-		.size	= 0x620000		/* 6272k */
-	},
-	{
-		.name	= "nvram",
-		.offset = 0x7e0000,
-		.size	= 0x20000		/* 128k */
-	},
-};
-
-static struct physmap_flash_data wgt634u_flash_data = {
-	.parts	  = wgt634u_partitions,
-	.nr_parts = ARRAY_SIZE(wgt634u_partitions)
-};
-
-static struct resource wgt634u_flash_resource = {
-	.flags = IORESOURCE_MEM,
-};
-
-static struct platform_device wgt634u_flash = {
-	.name	       = "physmap-flash",
-	.id	       = 0,
-	.dev	       = { .platform_data = &wgt634u_flash_data, },
-	.resource      = &wgt634u_flash_resource,
-	.num_resources = 1,
-};
-
-/* Platform devices */
-static struct platform_device *wgt634u_devices[] __initdata = {
-	&wgt634u_flash,
-	&wgt634u_gpio_leds,
-};
-
-static irqreturn_t gpio_interrupt(int irq, void *ignored)
-{
-	int state;
-
-	/* Interrupts are shared, check if the current one is
-	   a GPIO interrupt. */
-	if (!ssb_chipco_irq_status(&bcm47xx_bus.ssb.chipco,
-				   SSB_CHIPCO_IRQ_GPIO))
-		return IRQ_NONE;
-
-	state = gpio_get_value(WGT634U_GPIO_RESET);
-
-	/* Interrupt are level triggered, revert the interrupt polarity
-	   to clear the interrupt. */
-	ssb_gpio_polarity(&bcm47xx_bus.ssb, 1 << WGT634U_GPIO_RESET,
-			  state ? 1 << WGT634U_GPIO_RESET : 0);
-
-	if (!state) {
-		printk(KERN_INFO "Reset button pressed");
-		ctrl_alt_del();
-	}
-
-	return IRQ_HANDLED;
-}
-
-static int __init wgt634u_init(void)
-{
-	/* There is no easy way to detect that we are running on a WGT634U
-	 * machine. Use the MAC address as an heuristic. Netgear Inc. has
-	 * been allocated ranges 00:09:5b:xx:xx:xx and 00:0f:b5:xx:xx:xx.
-	 */
-	u8 *et0mac;
-
-	if (bcm47xx_bus_type != BCM47XX_BUS_TYPE_SSB)
-		return -ENODEV;
-
-	et0mac = bcm47xx_bus.ssb.sprom.et0mac;
-
-	if (et0mac[0] == 0x00 &&
-	    ((et0mac[1] == 0x09 && et0mac[2] == 0x5b) ||
-	     (et0mac[1] == 0x0f && et0mac[2] == 0xb5))) {
-		struct ssb_mipscore *mcore = &bcm47xx_bus.ssb.mipscore;
-
-		printk(KERN_INFO "WGT634U machine detected.\n");
-
-		if (!request_irq(gpio_to_irq(WGT634U_GPIO_RESET),
-				 gpio_interrupt, IRQF_SHARED,
-				 "WGT634U GPIO", &bcm47xx_bus.ssb.chipco)) {
-			gpio_direction_input(WGT634U_GPIO_RESET);
-			ssb_gpio_intmask(&bcm47xx_bus.ssb,
-					 1 << WGT634U_GPIO_RESET,
-					 1 << WGT634U_GPIO_RESET);
-			ssb_chipco_irq_mask(&bcm47xx_bus.ssb.chipco,
-					    SSB_CHIPCO_IRQ_GPIO,
-					    SSB_CHIPCO_IRQ_GPIO);
-		}
-
-		wgt634u_flash_data.width = mcore->pflash.buswidth;
-		wgt634u_flash_resource.start = mcore->pflash.window;
-		wgt634u_flash_resource.end = mcore->pflash.window
-					   + mcore->pflash.window_size
-					   - 1;
-		return platform_add_devices(wgt634u_devices,
-					    ARRAY_SIZE(wgt634u_devices));
-	} else
-		return -ENODEV;
-}
-
-module_init(wgt634u_init);
diff --git a/arch/mips/bcm63xx/Kconfig b/arch/mips/bcm63xx/Kconfig
index b78306c..a057fdf1 100644
--- a/arch/mips/bcm63xx/Kconfig
+++ b/arch/mips/bcm63xx/Kconfig
@@ -3,33 +3,41 @@
 
 config BCM63XX_CPU_3368
 	bool "support 3368 CPU"
+	select SYS_HAS_CPU_BMIPS4350
 	select HW_HAS_PCI
 
 config BCM63XX_CPU_6328
 	bool "support 6328 CPU"
+	select SYS_HAS_CPU_BMIPS4350
 	select HW_HAS_PCI
 
 config BCM63XX_CPU_6338
 	bool "support 6338 CPU"
+	select SYS_HAS_CPU_BMIPS32_3300
 	select HW_HAS_PCI
 
 config BCM63XX_CPU_6345
 	bool "support 6345 CPU"
+	select SYS_HAS_CPU_BMIPS32_3300
 
 config BCM63XX_CPU_6348
 	bool "support 6348 CPU"
+	select SYS_HAS_CPU_BMIPS32_3300
 	select HW_HAS_PCI
 
 config BCM63XX_CPU_6358
 	bool "support 6358 CPU"
+	select SYS_HAS_CPU_BMIPS4350
 	select HW_HAS_PCI
 
 config BCM63XX_CPU_6362
 	bool "support 6362 CPU"
+	select SYS_HAS_CPU_BMIPS4350
 	select HW_HAS_PCI
 
 config BCM63XX_CPU_6368
 	bool "support 6368 CPU"
+	select SYS_HAS_CPU_BMIPS4350
 	select HW_HAS_PCI
 endmenu
 
diff --git a/arch/mips/bcm63xx/Makefile b/arch/mips/bcm63xx/Makefile
index ac28073..9019f54 100644
--- a/arch/mips/bcm63xx/Makefile
+++ b/arch/mips/bcm63xx/Makefile
@@ -1,7 +1,7 @@
 obj-y		+= clk.o cpu.o cs.o gpio.o irq.o nvram.o prom.o reset.o \
 		   setup.o timer.o dev-dsp.o dev-enet.o dev-flash.o \
-		   dev-pcmcia.o dev-rng.o dev-spi.o dev-uart.o dev-wdt.o \
-		   dev-usb-usbd.o
+		   dev-pcmcia.o dev-rng.o dev-spi.o dev-hsspi.o dev-uart.o \
+		   dev-wdt.o dev-usb-usbd.o
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
 
 obj-y		+= boards/
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 5b974eb..33727e7 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -23,6 +23,7 @@
 #include <bcm63xx_dev_enet.h>
 #include <bcm63xx_dev_dsp.h>
 #include <bcm63xx_dev_flash.h>
+#include <bcm63xx_dev_hsspi.h>
 #include <bcm63xx_dev_pcmcia.h>
 #include <bcm63xx_dev_spi.h>
 #include <bcm63xx_dev_usb_usbd.h>
@@ -915,6 +916,8 @@
 
 	bcm63xx_spi_register();
 
+	bcm63xx_hsspi_register();
+
 	bcm63xx_flash_register();
 
 	bcm63xx_led_data.num_leds = ARRAY_SIZE(board.leds);
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index 43da4ae..6375652 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -226,6 +226,28 @@
 };
 
 /*
+ * HSSPI clock
+ */
+static void hsspi_set(struct clk *clk, int enable)
+{
+	u32 mask;
+
+	if (BCMCPU_IS_6328())
+		mask = CKCTL_6328_HSSPI_EN;
+	else if (BCMCPU_IS_6362())
+		mask = CKCTL_6362_HSSPI_EN;
+	else
+		return;
+
+	bcm_hwclock_set(mask, enable);
+}
+
+static struct clk clk_hsspi = {
+	.set	= hsspi_set,
+};
+
+
+/*
  * XTM clock
  */
 static void xtm_set(struct clk *clk, int enable)
@@ -346,6 +368,8 @@
 		return &clk_usbd;
 	if (!strcmp(id, "spi"))
 		return &clk_spi;
+	if (!strcmp(id, "hsspi"))
+		return &clk_hsspi;
 	if (!strcmp(id, "xtm"))
 		return &clk_xtm;
 	if (!strcmp(id, "periph"))
@@ -366,3 +390,21 @@
 }
 
 EXPORT_SYMBOL(clk_put);
+
+#define HSSPI_PLL_HZ_6328	133333333
+#define HSSPI_PLL_HZ_6362	400000000
+
+static int __init bcm63xx_clk_init(void)
+{
+	switch (bcm63xx_get_cpu_id()) {
+	case BCM6328_CPU_ID:
+		clk_hsspi.rate = HSSPI_PLL_HZ_6328;
+		break;
+	case BCM6362_CPU_ID:
+		clk_hsspi.rate = HSSPI_PLL_HZ_6362;
+		break;
+	}
+
+	return 0;
+}
+arch_initcall(bcm63xx_clk_init);
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index b713cd6..1b1b8a8 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -123,7 +123,9 @@
 
 static unsigned int detect_cpu_clock(void)
 {
-	switch (bcm63xx_get_cpu_id()) {
+	u16 cpu_id = bcm63xx_get_cpu_id();
+
+	switch (cpu_id) {
 	case BCM3368_CPU_ID:
 		return 300000000;
 
@@ -249,7 +251,7 @@
 	}
 
 	default:
-		BUG();
+		panic("Failed to detect clock for CPU with id=%04X\n", cpu_id);
 	}
 }
 
diff --git a/arch/mips/bcm63xx/dev-hsspi.c b/arch/mips/bcm63xx/dev-hsspi.c
new file mode 100644
index 0000000..696abc4
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-hsspi.c
@@ -0,0 +1,47 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_hsspi.h>
+#include <bcm63xx_regs.h>
+
+static struct resource spi_resources[] = {
+	{
+		.start		= -1, /* filled at runtime */
+		.end		= -1, /* filled at runtime */
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= -1, /* filled at runtime */
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bcm63xx_hsspi_device = {
+	.name		= "bcm63xx-hsspi",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(spi_resources),
+	.resource	= spi_resources,
+};
+
+int __init bcm63xx_hsspi_register(void)
+{
+	if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362())
+		return -ENODEV;
+
+	spi_resources[0].start = bcm63xx_regset_address(RSET_HSSPI);
+	spi_resources[0].end = spi_resources[0].start;
+	spi_resources[0].end += RSET_HSSPI_SIZE - 1;
+	spi_resources[1].start = bcm63xx_get_irq_number(IRQ_HSSPI);
+
+	return platform_device_register(&bcm63xx_hsspi_device);
+}
diff --git a/arch/mips/bcm63xx/early_printk.c b/arch/mips/bcm63xx/early_printk.c
index aa8f7f9..6092226 100644
--- a/arch/mips/bcm63xx/early_printk.c
+++ b/arch/mips/bcm63xx/early_printk.c
@@ -6,9 +6,8 @@
  * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  */
 
-#include <linux/init.h>
 #include <bcm63xx_io.h>
-#include <bcm63xx_regs.h>
+#include <linux/serial_bcm63xx.h>
 
 static void wait_xfered(void)
 {
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
index 8ac4e09..e1f27d6 100644
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -59,14 +59,12 @@
 	/* do low level board init */
 	board_prom_init();
 
-	if (IS_ENABLED(CONFIG_CPU_BMIPS4350) && IS_ENABLED(CONFIG_SMP)) {
-		/* set up SMP */
-		register_smp_ops(&bmips_smp_ops);
-
+	/* set up SMP */
+	if (!register_bmips_smp_ops()) {
 		/*
-		 * BCM6328 might not have its second CPU enabled, while BCM6358
-		 * needs special handling for its shared TLB, so disable SMP
-		 * for now.
+		 * BCM6328 might not have its second CPU enabled, while BCM3368
+		 * and BCM6358 need special handling for their shared TLB, so
+		 * disable SMP for now.
 		 */
 		if (BCMCPU_IS_6328()) {
 			reg = bcm_readl(BCM_6328_OTP_BASE +
@@ -74,7 +72,7 @@
 
 			if (reg & OTP_6328_REG3_TP1_DISABLED)
 				bmips_smp_enabled = 0;
-		} else if (BCMCPU_IS_6358()) {
+		} else if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) {
 			bmips_smp_enabled = 0;
 		}
 
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index ca0c343..61af6b6 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -27,10 +27,10 @@
 	-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
 	-DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
 
-targets := head.o decompress.o dbg.o uart-16550.o uart-alchemy.o
+targets := head.o decompress.o string.o dbg.o uart-16550.o uart-alchemy.o
 
 # decompressor objects (linked with vmlinuz)
-vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/dbg.o
+vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o $(obj)/dbg.o
 
 ifdef CONFIG_DEBUG_ZBOOT
 vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
diff --git a/arch/mips/boot/compressed/dbg.c b/arch/mips/boot/compressed/dbg.c
index 134a616..06c6a5b 100644
--- a/arch/mips/boot/compressed/dbg.c
+++ b/arch/mips/boot/compressed/dbg.c
@@ -6,7 +6,6 @@
  * need to implement your own putc().
  */
 #include <linux/compiler.h>
-#include <linux/init.h>
 #include <linux/types.h>
 
 void __weak putc(char c)
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index a8c6fd6..c00c4dd 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -43,33 +43,11 @@
 /* activate the code for pre-boot environment */
 #define STATIC static
 
-#if defined(CONFIG_KERNEL_GZIP) || defined(CONFIG_KERNEL_XZ) || \
-	defined(CONFIG_KERNEL_LZ4)
-void *memcpy(void *dest, const void *src, size_t n)
-{
-	int i;
-	const char *s = src;
-	char *d = dest;
-
-	for (i = 0; i < n; i++)
-		d[i] = s[i];
-	return dest;
-}
-#endif
 #ifdef CONFIG_KERNEL_GZIP
 #include "../../../../lib/decompress_inflate.c"
 #endif
 
 #ifdef CONFIG_KERNEL_BZIP2
-void *memset(void *s, int c, size_t n)
-{
-	int i;
-	char *ss = s;
-
-	for (i = 0; i < n; i++)
-		ss[i] = c;
-	return s;
-}
 #include "../../../../lib/decompress_bunzip2.c"
 #endif
 
diff --git a/arch/mips/boot/compressed/string.c b/arch/mips/boot/compressed/string.c
new file mode 100644
index 0000000..9de9885
--- /dev/null
+++ b/arch/mips/boot/compressed/string.c
@@ -0,0 +1,28 @@
+/*
+ * arch/mips/boot/compressed/string.c
+ *
+ * Very small subset of simple string routines
+ */
+
+#include <linux/types.h>
+
+void *memcpy(void *dest, const void *src, size_t n)
+{
+	int i;
+	const char *s = src;
+	char *d = dest;
+
+	for (i = 0; i < n; i++)
+		d[i] = s[i];
+	return dest;
+}
+
+void *memset(void *s, int c, size_t n)
+{
+	int i;
+	char *ss = s;
+
+	for (i = 0; i < n; i++)
+		ss[i] = c;
+	return s;
+}
diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c
index c01d343..237494b 100644
--- a/arch/mips/boot/compressed/uart-16550.c
+++ b/arch/mips/boot/compressed/uart-16550.c
@@ -4,7 +4,6 @@
 
 #include <linux/types.h>
 #include <linux/serial_reg.h>
-#include <linux/init.h>
 
 #include <asm/addrspace.h>
 
@@ -19,8 +18,8 @@
 #endif
 
 #ifdef CONFIG_MACH_JZ4740
-#define UART0_BASE  0xB0030000
-#define PORT(offset) (UART0_BASE + (4 * offset))
+#include <asm/mach-jz4740/base.h>
+#define PORT(offset) (CKSEG1ADDR(JZ4740_UART0_BASE_ADDR) + (4 * offset))
 #endif
 
 #ifdef CONFIG_CPU_XLR
diff --git a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
index 132bccc..8241fc6 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
@@ -47,6 +47,7 @@
  * state. It points to a bootmem named block.
  */
 __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
+EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
 
 /**
  * Initialize the Global queue state pointer.
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index 0a1283c..b764df6 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -722,3 +722,30 @@
 	}
 	return 0;
 }
+
+/**
+ * Get the clock type used for the USB block based on board type.
+ * Used by the USB code for auto configuration of clock type.
+ *
+ * Return USB clock type enumeration
+ */
+enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void)
+{
+	switch (cvmx_sysinfo_get()->board_type) {
+	case CVMX_BOARD_TYPE_BBGW_REF:
+	case CVMX_BOARD_TYPE_LANAI2_A:
+	case CVMX_BOARD_TYPE_LANAI2_U:
+	case CVMX_BOARD_TYPE_LANAI2_G:
+	case CVMX_BOARD_TYPE_NIC10E_66:
+	case CVMX_BOARD_TYPE_UBNT_E100:
+		return USB_CLOCK_TYPE_CRYSTAL_12;
+	case CVMX_BOARD_TYPE_NIC10E:
+		return USB_CLOCK_TYPE_REF_12;
+	default:
+		break;
+	}
+	/* Most boards except NIC10e use a 12MHz crystal */
+	if (OCTEON_IS_MODEL(OCTEON_FAM_2))
+		return USB_CLOCK_TYPE_CRYSTAL_12;
+	return USB_CLOCK_TYPE_REF_48;
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
index 65d2bc9..453d7f6 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
@@ -251,6 +251,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_setup_red);
 
 /**
  * Setup the common GMX settings that determine the number of
@@ -384,6 +385,7 @@
 	}
 	return -1;
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_get_ipd_port);
 
 /**
  * Returns the interface number for an IPD/PKO port number.
@@ -408,6 +410,7 @@
 
 	return -1;
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_get_interface_num);
 
 /**
  * Returns the interface index number for an IPD/PKO port
@@ -431,3 +434,4 @@
 
 	return -1;
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_get_interface_index_num);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index d63d20d..8553ad5 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -67,7 +67,7 @@
 void (*cvmx_override_ipd_port_setup) (int ipd_port);
 
 /* Port count per interface */
-static int interface_port_count[4] = { 0, 0, 0, 0 };
+static int interface_port_count[5];
 
 /* Port last configured link info index by IPD/PKO port */
 static cvmx_helper_link_info_t
@@ -88,6 +88,7 @@
 	else
 		return 3;
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_get_number_of_interfaces);
 
 /**
  * Return the number of ports on an interface. Depending on the
@@ -102,6 +103,7 @@
 {
 	return interface_port_count[interface];
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface);
 
 /**
  * Get the operating mode of an interface. Depending on the Octeon
@@ -179,6 +181,7 @@
 			return CVMX_HELPER_INTERFACE_MODE_RGMII;
 	}
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_interface_get_mode);
 
 /**
  * Configure the IPD/PIP tagging and QoS options for a specific
@@ -825,6 +828,7 @@
 		__cvmx_helper_errata_fix_ipd_ptr_alignment();
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_ipd_and_packet_input_enable);
 
 /**
  * Initialize the PIP, IPD, and PKO hardware to support
@@ -903,6 +907,7 @@
 #endif
 	return result;
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_initialize_packet_io_global);
 
 /**
  * Does core local initialization for packet io
@@ -947,6 +952,7 @@
 	 */
 	return port_link_info[ipd_port];
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_link_autoconf);
 
 /**
  * Return the link state of an IPD/PKO port as returned by
@@ -1005,6 +1011,7 @@
 	}
 	return result;
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_link_get);
 
 /**
  * Configure an IPD/PKO port for the specified link state. This
@@ -1060,6 +1067,7 @@
 		port_link_info[ipd_port].u64 = link_info.u64;
 	return result;
 }
+EXPORT_SYMBOL_GPL(cvmx_helper_link_set);
 
 /**
  * Configure a port for internal and/or external loopback. Internal loopback
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index f2c8775..008b881 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -140,7 +140,7 @@
 	pko_reg_flags.s.ena_pko = 0;
 	cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
 }
-
+EXPORT_SYMBOL_GPL(cvmx_pko_disable);
 
 /**
  * Reset the packet output.
@@ -182,6 +182,7 @@
 	}
 	__cvmx_pko_reset();
 }
+EXPORT_SYMBOL_GPL(cvmx_pko_shutdown);
 
 /**
  * Configure a output port and the associated queues for use.
diff --git a/arch/mips/cavium-octeon/executive/cvmx-spi.c b/arch/mips/cavium-octeon/executive/cvmx-spi.c
index ef5198d..459e3b1 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-spi.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-spi.c
@@ -177,6 +177,7 @@
 
 	return res;
 }
+EXPORT_SYMBOL_GPL(cvmx_spi_restart_interface);
 
 /**
  * Callback to perform SPI4 reset
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 1830874..6df0f4d 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -171,6 +171,7 @@
 static struct of_device_id __initdata octeon_ids[] = {
 	{ .compatible = "simple-bus", },
 	{ .compatible = "cavium,octeon-6335-uctl", },
+	{ .compatible = "cavium,octeon-5750-usbn", },
 	{ .compatible = "cavium,octeon-3860-bootbus", },
 	{ .compatible = "cavium,mdio-mux", },
 	{ .compatible = "gpio-leds", },
@@ -336,14 +337,14 @@
 	int p;
 	int count = 0;
 
-	if (cvmx_helper_interface_enumerate(idx) == 0)
-		count = cvmx_helper_ports_on_interface(idx);
-
 	snprintf(name_buffer, sizeof(name_buffer), "interface@%d", idx);
 	iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer);
 	if (iface < 0)
 		return;
 
+	if (cvmx_helper_interface_enumerate(idx) == 0)
+		count = cvmx_helper_ports_on_interface(idx);
+
 	for (p = 0; p < 16; p++)
 		octeon_fdt_pip_port(iface, idx, p, count - 1, pmac);
 }
@@ -682,6 +683,37 @@
 		}
 	}
 
+	/* DWC2 USB */
+	alias_prop = fdt_getprop(initial_boot_params, aliases,
+				 "usbn", NULL);
+	if (alias_prop) {
+		int usbn = fdt_path_offset(initial_boot_params, alias_prop);
+
+		if (usbn >= 0 && (current_cpu_type() == CPU_CAVIUM_OCTEON2 ||
+				  !octeon_has_feature(OCTEON_FEATURE_USB))) {
+			pr_debug("Deleting usbn\n");
+			fdt_nop_node(initial_boot_params, usbn);
+			fdt_nop_property(initial_boot_params, aliases, "usbn");
+		} else  {
+			__be32 new_f[1];
+			enum cvmx_helper_board_usb_clock_types c;
+			c = __cvmx_helper_board_usb_get_clock_type();
+			switch (c) {
+			case USB_CLOCK_TYPE_REF_48:
+				new_f[0] = cpu_to_be32(48000000);
+				fdt_setprop_inplace(initial_boot_params, usbn,
+						    "refclk-frequency",  new_f, sizeof(new_f));
+				/* Fall through ...*/
+			case USB_CLOCK_TYPE_REF_12:
+				/* Missing "refclk-type" defaults to external. */
+				fdt_nop_property(initial_boot_params, usbn, "refclk-type");
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
 	return 0;
 }
 
diff --git a/arch/mips/cavium-octeon/octeon_3xxx.dts b/arch/mips/cavium-octeon/octeon_3xxx.dts
index 88cb42d..fa33115 100644
--- a/arch/mips/cavium-octeon/octeon_3xxx.dts
+++ b/arch/mips/cavium-octeon/octeon_3xxx.dts
@@ -550,6 +550,24 @@
 				big-endian-regs;
 			};
 		};
+
+		usbn: usbn@1180068000000 {
+			compatible = "cavium,octeon-5750-usbn";
+			reg = <0x11800 0x68000000 0x0 0x1000>;
+			ranges; /* Direct mapping */
+			#address-cells = <2>;
+			#size-cells = <2>;
+			/* 12MHz, 24MHz and 48MHz allowed */
+			refclk-frequency = <12000000>;
+			/* Either "crystal" or "external" */
+			refclk-type = "crystal";
+
+			usbc@16f0010000000 {
+				compatible = "cavium,octeon-5750-usbc";
+				reg = <0x16f00 0x10000000 0x0 0x80000>;
+				interrupts = <0 56>;
+			};
+		};
 	};
 
 	aliases {
@@ -566,6 +584,7 @@
 		flash0 = &flash0;
 		cf0 = &cf0;
 		uctl = &uctl;
+		usbn = &usbn;
 		led0 = &led0;
 	};
  };
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 24a2167..67a078f 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -6,7 +6,6 @@
  * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
  */
 #include <linux/cpu.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/smp.h>
 #include <linux/interrupt.h>
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index 80e012fa..320772c 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -86,7 +86,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FIRMWARE_IN_KERNEL is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 4ca8e5c..0db4eb3 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -1,623 +1,86 @@
 CONFIG_BCM47XX=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_KEXEC=y
-# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
-CONFIG_TINY_RCU=y
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_RELAY=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_UIDGID_STRICT_TYPE_CHECKS=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_LZMA=y
-CONFIG_EXPERT=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EMBEDDED=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_PCI=y
-CONFIG_BINFMT_MISC=m
+# CONFIG_SUSPEND is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
 CONFIG_IP_MULTIPLE_TABLES=y
 CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
 CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
 CONFIG_SYN_COOKIES=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_DIAG=m
 CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_BIC=y
-CONFIG_TCP_CONG_CUBIC=m
-CONFIG_TCP_CONG_HSTCP=m
-CONFIG_TCP_CONG_HYBLA=m
-CONFIG_TCP_CONG_SCALABLE=m
-CONFIG_TCP_CONG_LP=m
-CONFIG_TCP_CONG_VENO=m
-CONFIG_TCP_CONG_YEAH=m
-CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_IPV6_PRIVACY=y
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
-CONFIG_NETWORK_SECMARK=y
+CONFIG_IPV6_MROUTE=y
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_UDPLITE=m
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_NETBIOS_NS=m
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
-CONFIG_NETFILTER_XT_TARGET_DSCP=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_TRACE=m
-CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_TIME=m
-CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_IP_VS=m
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_DH=m
-CONFIG_IP_VS_SH=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-CONFIG_IP_VS_FTP=m
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
-CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_AH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_RAW=m
-CONFIG_BRIDGE_NF_EBTABLES=m
-CONFIG_BRIDGE_EBT_BROUTE=m
-CONFIG_BRIDGE_EBT_T_FILTER=m
-CONFIG_BRIDGE_EBT_T_NAT=m
-CONFIG_BRIDGE_EBT_802_3=m
-CONFIG_BRIDGE_EBT_AMONG=m
-CONFIG_BRIDGE_EBT_ARP=m
-CONFIG_BRIDGE_EBT_IP=m
-CONFIG_BRIDGE_EBT_LIMIT=m
-CONFIG_BRIDGE_EBT_MARK=m
-CONFIG_BRIDGE_EBT_PKTTYPE=m
-CONFIG_BRIDGE_EBT_STP=m
-CONFIG_BRIDGE_EBT_VLAN=m
-CONFIG_BRIDGE_EBT_ARPREPLY=m
-CONFIG_BRIDGE_EBT_DNAT=m
-CONFIG_BRIDGE_EBT_MARK_T=m
-CONFIG_BRIDGE_EBT_REDIRECT=m
-CONFIG_BRIDGE_EBT_SNAT=m
-CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
-CONFIG_IP_DCCP=m
-CONFIG_TIPC=m
-CONFIG_TIPC_ADVANCED=y
-CONFIG_ATM=m
-CONFIG_ATM_CLIP=m
-CONFIG_ATM_LANE=m
-CONFIG_ATM_MPOA=m
-CONFIG_ATM_BR2684=m
-CONFIG_BRIDGE=m
-CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q=y
 CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CBQ=m
-CONFIG_NET_SCH_HTB=m
-CONFIG_NET_SCH_HFSC=m
-CONFIG_NET_SCH_ATM=m
-CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RED=m
-CONFIG_NET_SCH_SFQ=m
-CONFIG_NET_SCH_TEQL=m
-CONFIG_NET_SCH_TBF=m
-CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
-CONFIG_NET_SCH_NETEM=m
-CONFIG_NET_SCH_INGRESS=m
-CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
-CONFIG_NET_CLS_ROUTE4=m
-CONFIG_NET_CLS_FW=m
-CONFIG_NET_CLS_U32=m
-CONFIG_CLS_U32_PERF=y
-CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_CMP=m
-CONFIG_NET_EMATCH_NBYTE=m
-CONFIG_NET_EMATCH_U32=m
-CONFIG_NET_EMATCH_META=m
-CONFIG_NET_EMATCH_TEXT=m
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_POLICE=m
-CONFIG_NET_ACT_GACT=m
-CONFIG_GACT_PROB=y
-CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_IPT=m
-CONFIG_NET_ACT_NAT=m
-CONFIG_NET_ACT_PEDIT=m
-CONFIG_NET_ACT_SIMP=m
-CONFIG_NET_CLS_IND=y
-CONFIG_NET_PKTGEN=m
-CONFIG_BT=m
-CONFIG_BT_HCIUART=m
-CONFIG_BT_HCIUART_H4=y
-CONFIG_BT_HCIUART_BCSP=y
-CONFIG_BT_HCIUART_LL=y
-CONFIG_BT_HCIBCM203X=m
-CONFIG_BT_HCIBPA10X=m
-CONFIG_BT_HCIBFUSB=m
-CONFIG_BT_HCIVHCI=m
-CONFIG_CFG80211=m
-CONFIG_MAC80211=m
-CONFIG_MAC80211_RC_PID=y
-CONFIG_MAC80211_RC_DEFAULT_PID=y
-CONFIG_MAC80211_MESH=y
-CONFIG_RFKILL=m
-CONFIG_RFKILL_INPUT=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_FW_LOADER=m
-CONFIG_CONNECTOR=m
+CONFIG_NET_SCH_FQ_CODEL=y
+CONFIG_HAMRADIO=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
+CONFIG_MTD_BCM47XX_PARTS=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_RAM=y
-CONFIG_MTD_ROM=y
-CONFIG_MTD_ABSENT=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
 CONFIG_MTD_PHYSMAP=y
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_ATA_OVER_ETH=m
-CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
-CONFIG_SCSI_TGT=m
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=m
-CONFIG_CHR_DEV_SCH=m
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_ISCSI_TCP=m
+CONFIG_MTD_BCM47XXSFLASH=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_BCM47XXNFLASH=y
 CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_EQUALIZER=m
-CONFIG_TUN=m
-CONFIG_VETH=m
-CONFIG_PHYLIB=m
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_BROADCOM_PHY=m
-CONFIG_ICPLUS_PHY=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
 CONFIG_B44=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-CONFIG_ATH_COMMON=m
-CONFIG_ATH5K=m
-CONFIG_B43=m
-CONFIG_B43LEGACY=m
-CONFIG_ZD1211RW=m
-CONFIG_USB_CATC=m
-CONFIG_USB_KAWETH=m
-CONFIG_USB_PEGASUS=m
-CONFIG_USB_RTL8150=m
-CONFIG_USB_USBNET=m
-CONFIG_USB_NET_DM9601=m
-CONFIG_USB_NET_GL620A=m
-CONFIG_USB_NET_PLUSB=m
-CONFIG_USB_NET_MCS7830=m
-CONFIG_USB_NET_RNDIS_HOST=m
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_EPSON2888=y
-CONFIG_USB_KC2190=y
-CONFIG_USB_SIERRA_NET=m
-CONFIG_ATM_DUMMY=m
-CONFIG_ATM_TCP=m
-CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_PPPOATM=m
-CONFIG_SLIP=m
-CONFIG_INPUT_EVDEV=m
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
+CONFIG_TIGON3=y
+CONFIG_BGMAC=y
+CONFIG_ATH_CARDS=y
+CONFIG_ATH5K=y
+CONFIG_B43=y
+CONFIG_B43LEGACY=y
+CONFIG_BRCMSMAC=y
+CONFIG_ISDN=y
 CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
 CONFIG_SERIAL_8250_CONSOLE=y
 # CONFIG_SERIAL_8250_PCI is not set
 CONFIG_SERIAL_8250_NR_UARTS=2
 CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
-CONFIG_W1=m
-CONFIG_W1_MASTER_MATROX=m
-CONFIG_W1_MASTER_DS2490=m
-CONFIG_W1_SLAVE_THERM=m
-CONFIG_W1_SLAVE_SMEM=m
-CONFIG_W1_SLAVE_DS2433=m
-CONFIG_W1_SLAVE_DS2760=m
-# CONFIG_HWMON is not set
-CONFIG_THERMAL=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_HW_RANDOM=y
+CONFIG_GPIO_SYSFS=y
 CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_BCM47XX_WDT=y
+CONFIG_SSB_DEBUG=y
 CONFIG_SSB_DRIVER_GIGE=y
-CONFIG_DISPLAY_SUPPORT=m
-CONFIG_SOUND=m
-CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_DUMMY=m
-CONFIG_SND_VIRMIDI=m
-CONFIG_SND_USB_AUDIO=m
-CONFIG_HID=m
-CONFIG_USB_HID=m
-CONFIG_USB_HIDDEV=y
+CONFIG_BCMA_DRIVER_GMAC_CMN=y
 CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_U132_HCD=m
-CONFIG_USB_R8A66597_HCD=m
-CONFIG_USB_ACM=m
-CONFIG_USB_PRINTER=m
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_USB_STORAGE_ALAUDA=y
-CONFIG_USB_STORAGE_ONETOUCH=y
-CONFIG_USB_STORAGE_KARMA=y
-CONFIG_USB_MDC800=m
-CONFIG_USB_MICROTEK=m
-CONFIG_USB_SERIAL=m
-CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_SERIAL_AIRCABLE=m
-CONFIG_USB_SERIAL_ARK3116=m
-CONFIG_USB_SERIAL_BELKIN=m
-CONFIG_USB_SERIAL_CH341=m
-CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-CONFIG_USB_SERIAL_CYPRESS_M8=m
-CONFIG_USB_SERIAL_EMPEG=m
-CONFIG_USB_SERIAL_FTDI_SIO=m
-CONFIG_USB_SERIAL_FUNSOFT=m
-CONFIG_USB_SERIAL_VISOR=m
-CONFIG_USB_SERIAL_IPAQ=m
-CONFIG_USB_SERIAL_IR=m
-CONFIG_USB_SERIAL_GARMIN=m
-CONFIG_USB_SERIAL_IPW=m
-CONFIG_USB_SERIAL_KEYSPAN_PDA=m
-CONFIG_USB_SERIAL_KLSI=m
-CONFIG_USB_SERIAL_KOBIL_SCT=m
-CONFIG_USB_SERIAL_MCT_U232=m
-CONFIG_USB_SERIAL_MOS7720=m
-CONFIG_USB_SERIAL_MOS7840=m
-CONFIG_USB_SERIAL_NAVMAN=m
-CONFIG_USB_SERIAL_PL2303=m
-CONFIG_USB_SERIAL_OTI6858=m
-CONFIG_USB_SERIAL_HP4X=m
-CONFIG_USB_SERIAL_SAFE=m
-CONFIG_USB_SERIAL_SIERRAWIRELESS=m
-CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
-CONFIG_USB_SERIAL_OPTION=m
-CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_SERIAL_DEBUG=m
-CONFIG_USB_ADUTUX=m
-CONFIG_USB_RIO500=m
-CONFIG_USB_LEGOTOWER=m
-CONFIG_USB_LCD=m
-CONFIG_USB_LED=m
-CONFIG_USB_CYPRESS_CY7C63=m
-CONFIG_USB_CYTHERM=m
-CONFIG_USB_IDMOUSE=m
-CONFIG_USB_FTDI_ELAN=m
-CONFIG_USB_SISUSBVGA=m
-CONFIG_USB_LD=m
-CONFIG_USB_TRANCEVIBRATOR=m
-CONFIG_USB_IOWARRIOR=m
-CONFIG_USB_TEST=m
-CONFIG_USB_ATM=m
-CONFIG_USB_SPEEDTOUCH=m
-CONFIG_USB_CXACRU=m
-CONFIG_USB_UEAGLEATM=m
-CONFIG_USB_XUSBATM=m
-CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_NET2280=y
-CONFIG_USB_ZERO=m
-CONFIG_USB_ETH=m
-CONFIG_USB_GADGETFS=m
-CONFIG_USB_MASS_STORAGE=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_USB_MIDI_GADGET=m
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
+CONFIG_USB_HCD_BCMA=y
+CONFIG_USB_HCD_SSB=y
 CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_JFS_FS=m
-CONFIG_JFS_POSIX_ACL=y
-CONFIG_JFS_SECURITY=y
-CONFIG_XFS_FS=m
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_XFS_RT=y
-CONFIG_GFS2_FS=m
-CONFIG_QUOTA=y
-CONFIG_QUOTA_NETLINK_INTERFACE=y
-CONFIG_QFMT_V1=m
-CONFIG_QFMT_V2=m
-CONFIG_AUTOFS_FS=m
-CONFIG_AUTOFS4_FS=m
-CONFIG_FUSE_FS=m
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_NTFS_FS=m
-CONFIG_NTFS_RW=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_ADFS_FS=m
-CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=m
-CONFIG_JFFS2_FS_XATTR=y
-CONFIG_CRAMFS=m
-CONFIG_VXFS_FS=m
-CONFIG_MINIX_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_CIFS=m
-CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_POSIX=y
-CONFIG_NCP_FS=m
-CONFIG_NCPFS_NFS_NS=y
-CONFIG_NCPFS_OS2_NS=y
-CONFIG_NCPFS_NLS=y
-CONFIG_NCPFS_EXTRAS=y
-CONFIG_CODA_FS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_DLM=m
-CONFIG_DLM_DEBUG=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_REDUCED=y
+CONFIG_STRIP_ASM_SYMS=y
 CONFIG_DEBUG_FS=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRC16=m
-CONFIG_CRC7=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,115200"
+CONFIG_CRC32_SARWATE=y
diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig
index 9190051..3fec264 100644
--- a/arch/mips/configs/bcm63xx_defconfig
+++ b/arch/mips/configs/bcm63xx_defconfig
@@ -44,7 +44,6 @@
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig
index 5419adb..23b6693 100644
--- a/arch/mips/configs/cobalt_defconfig
+++ b/arch/mips/configs/cobalt_defconfig
@@ -19,7 +19,6 @@
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLKDEVS=y
 CONFIG_MTD_JEDECPROBE=y
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index fb64589..8f219da 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -165,7 +165,6 @@
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
diff --git a/arch/mips/configs/jmr3927_defconfig b/arch/mips/configs/jmr3927_defconfig
index db5705e..9bc08f2 100644
--- a/arch/mips/configs/jmr3927_defconfig
+++ b/arch/mips/configs/jmr3927_defconfig
@@ -22,7 +22,6 @@
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_CFI=y
diff --git a/arch/mips/configs/lasat_defconfig b/arch/mips/configs/lasat_defconfig
index d9f3db2..0179c7f 100644
--- a/arch/mips/configs/lasat_defconfig
+++ b/arch/mips/configs/lasat_defconfig
@@ -31,7 +31,6 @@
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index 8a66602..d759318 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -4,7 +4,7 @@
 CONFIG_MIPS_MT_SMP=y
 CONFIG_SCHED_SMT=y
 CONFIG_MIPS_CMP=y
-CONFIG_NR_CPUS=8
+CONFIG_NR_CPUS=2
 CONFIG_HZ_100=y
 CONFIG_LOCALVERSION="cmp"
 CONFIG_SYSVIPC=y
@@ -58,7 +58,6 @@
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_CBQ=m
 CONFIG_NET_SCH_HTB=m
diff --git a/arch/mips/configs/markeins_defconfig b/arch/mips/configs/markeins_defconfig
index 636f82b..4c2c0c4 100644
--- a/arch/mips/configs/markeins_defconfig
+++ b/arch/mips/configs/markeins_defconfig
@@ -124,7 +124,6 @@
 CONFIG_IP6_NF_RAW=m
 CONFIG_FW_LOADER=m
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 9fa8f16..593946a 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -246,7 +246,6 @@
 CONFIG_BT_HCIVHCI=m
 CONFIG_CONNECTOR=m
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig
index f292576..c887066 100644
--- a/arch/mips/configs/pnx8335_stb225_defconfig
+++ b/arch/mips/configs/pnx8335_stb225_defconfig
@@ -31,7 +31,6 @@
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig
new file mode 100644
index 0000000..2b96547
--- /dev/null
+++ b/arch/mips/configs/qi_lb60_defconfig
@@ -0,0 +1,188 @@
+CONFIG_MACH_JZ4740=y
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HZ_100=y
+CONFIG_PREEMPT=y
+# CONFIG_SECCOMP is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_CUBIC is not set
+CONFIG_TCP_CONG_WESTWOOD=y
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_JZ4740=y
+CONFIG_MTD_UBI=y
+CONFIG_NETDEVICES=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_MATRIX=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+CONFIG_LEGACY_PTY_COUNT=2
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_DMA is not set
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+# CONFIG_HW_RANDOM is not set
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_JZ4740=y
+CONFIG_CHARGER_GPIO=y
+# CONFIG_HWMON is not set
+CONFIG_MFD_JZ4740_ADC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_FB=y
+CONFIG_FB_JZ4740=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_MIPS is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_JZ4740_SOC=y
+CONFIG_SND_JZ4740_SOC_QI_LB60=y
+CONFIG_USB=y
+CONFIG_USB_OTG_BLACKLIST_HUB=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_GADGET=y
+CONFIG_USB_MUSB_JZ4740=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG=y
+CONFIG_USB_ETH=y
+# CONFIG_USB_ETH_RNDIS is not set
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+CONFIG_MMC_JZ4740=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_JZ4740=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_JZ4740=y
+CONFIG_PWM=y
+CONFIG_PWM_JZ4740=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_READABLE_ASM=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_PANIC_ON_OOPS=y
+# CONFIG_FTRACE is not set
+CONFIG_KGDB=y
+CONFIG_RUNTIME_DEBUG=y
+CONFIG_CRYPTO_ZLIB=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_FONTS=y
+CONFIG_FONT_SUN8x16=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index b85b1213..5d9d708 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -114,7 +114,6 @@
 CONFIG_HAMRADIO=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_BLOCK2MTD=y
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index 9cba856..f8bf9b4 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -35,7 +35,6 @@
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=m
diff --git a/arch/mips/fw/arc/file.c b/arch/mips/fw/arc/file.c
index a8b0803..49fd3ff 100644
--- a/arch/mips/fw/arc/file.c
+++ b/arch/mips/fw/arc/file.c
@@ -8,7 +8,6 @@
  * Copyright (C) 1994, 1995, 1996, 1999 Ralf Baechle
  * Copyright (C) 1999 Silicon Graphics, Inc.
  */
-#include <linux/init.h>
 
 #include <asm/fw/arc/types.h>
 #include <asm/sgialib.h>
diff --git a/arch/mips/include/asm/amon.h b/arch/mips/include/asm/amon.h
index c3dc1a6..3cc03c6 100644
--- a/arch/mips/include/asm/amon.h
+++ b/arch/mips/include/asm/amon.h
@@ -1,7 +1,12 @@
 /*
- * Amon support
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * Arbitrary Monitor Support (AMON)
  */
-
-int amon_cpu_avail(int);
-void amon_cpu_start(int, unsigned long, unsigned long,
-		    unsigned long, unsigned long);
+int amon_cpu_avail(int cpu);
+int amon_cpu_start(int cpu, unsigned long pc, unsigned long sp,
+		   unsigned long gp, unsigned long a0);
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index 2413afe..70e1f17 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -12,27 +12,6 @@
 #include <asm/fpregdef.h>
 #include <asm/mipsregs.h>
 
-	.macro	fpu_save_double thread status tmp1=t0
-	cfc1	\tmp1,  fcr31
-	sdc1	$f0,  THREAD_FPR0(\thread)
-	sdc1	$f2,  THREAD_FPR2(\thread)
-	sdc1	$f4,  THREAD_FPR4(\thread)
-	sdc1	$f6,  THREAD_FPR6(\thread)
-	sdc1	$f8,  THREAD_FPR8(\thread)
-	sdc1	$f10, THREAD_FPR10(\thread)
-	sdc1	$f12, THREAD_FPR12(\thread)
-	sdc1	$f14, THREAD_FPR14(\thread)
-	sdc1	$f16, THREAD_FPR16(\thread)
-	sdc1	$f18, THREAD_FPR18(\thread)
-	sdc1	$f20, THREAD_FPR20(\thread)
-	sdc1	$f22, THREAD_FPR22(\thread)
-	sdc1	$f24, THREAD_FPR24(\thread)
-	sdc1	$f26, THREAD_FPR26(\thread)
-	sdc1	$f28, THREAD_FPR28(\thread)
-	sdc1	$f30, THREAD_FPR30(\thread)
-	sw	\tmp1, THREAD_FCR31(\thread)
-	.endm
-
 	.macro	fpu_save_single thread tmp=t0
 	cfc1	\tmp,  fcr31
 	swc1	$f0,  THREAD_FPR0(\thread)
@@ -70,27 +49,6 @@
 	sw	\tmp, THREAD_FCR31(\thread)
 	.endm
 
-	.macro	fpu_restore_double thread status tmp=t0
-	lw	\tmp, THREAD_FCR31(\thread)
-	ldc1	$f0,  THREAD_FPR0(\thread)
-	ldc1	$f2,  THREAD_FPR2(\thread)
-	ldc1	$f4,  THREAD_FPR4(\thread)
-	ldc1	$f6,  THREAD_FPR6(\thread)
-	ldc1	$f8,  THREAD_FPR8(\thread)
-	ldc1	$f10, THREAD_FPR10(\thread)
-	ldc1	$f12, THREAD_FPR12(\thread)
-	ldc1	$f14, THREAD_FPR14(\thread)
-	ldc1	$f16, THREAD_FPR16(\thread)
-	ldc1	$f18, THREAD_FPR18(\thread)
-	ldc1	$f20, THREAD_FPR20(\thread)
-	ldc1	$f22, THREAD_FPR22(\thread)
-	ldc1	$f24, THREAD_FPR24(\thread)
-	ldc1	$f26, THREAD_FPR26(\thread)
-	ldc1	$f28, THREAD_FPR28(\thread)
-	ldc1	$f30, THREAD_FPR30(\thread)
-	ctc1	\tmp, fcr31
-	.endm
-
 	.macro	fpu_restore_single thread tmp=t0
 	lw	\tmp, THREAD_FCR31(\thread)
 	lwc1	$f0,  THREAD_FPR0(\thread)
diff --git a/arch/mips/include/asm/asmmacro-64.h b/arch/mips/include/asm/asmmacro-64.h
index 08a527d..38ea609 100644
--- a/arch/mips/include/asm/asmmacro-64.h
+++ b/arch/mips/include/asm/asmmacro-64.h
@@ -13,102 +13,6 @@
 #include <asm/fpregdef.h>
 #include <asm/mipsregs.h>
 
-	.macro	fpu_save_16even thread tmp=t0
-	cfc1	\tmp, fcr31
-	sdc1	$f0,  THREAD_FPR0(\thread)
-	sdc1	$f2,  THREAD_FPR2(\thread)
-	sdc1	$f4,  THREAD_FPR4(\thread)
-	sdc1	$f6,  THREAD_FPR6(\thread)
-	sdc1	$f8,  THREAD_FPR8(\thread)
-	sdc1	$f10, THREAD_FPR10(\thread)
-	sdc1	$f12, THREAD_FPR12(\thread)
-	sdc1	$f14, THREAD_FPR14(\thread)
-	sdc1	$f16, THREAD_FPR16(\thread)
-	sdc1	$f18, THREAD_FPR18(\thread)
-	sdc1	$f20, THREAD_FPR20(\thread)
-	sdc1	$f22, THREAD_FPR22(\thread)
-	sdc1	$f24, THREAD_FPR24(\thread)
-	sdc1	$f26, THREAD_FPR26(\thread)
-	sdc1	$f28, THREAD_FPR28(\thread)
-	sdc1	$f30, THREAD_FPR30(\thread)
-	sw	\tmp, THREAD_FCR31(\thread)
-	.endm
-
-	.macro	fpu_save_16odd thread
-	sdc1	$f1,  THREAD_FPR1(\thread)
-	sdc1	$f3,  THREAD_FPR3(\thread)
-	sdc1	$f5,  THREAD_FPR5(\thread)
-	sdc1	$f7,  THREAD_FPR7(\thread)
-	sdc1	$f9,  THREAD_FPR9(\thread)
-	sdc1	$f11, THREAD_FPR11(\thread)
-	sdc1	$f13, THREAD_FPR13(\thread)
-	sdc1	$f15, THREAD_FPR15(\thread)
-	sdc1	$f17, THREAD_FPR17(\thread)
-	sdc1	$f19, THREAD_FPR19(\thread)
-	sdc1	$f21, THREAD_FPR21(\thread)
-	sdc1	$f23, THREAD_FPR23(\thread)
-	sdc1	$f25, THREAD_FPR25(\thread)
-	sdc1	$f27, THREAD_FPR27(\thread)
-	sdc1	$f29, THREAD_FPR29(\thread)
-	sdc1	$f31, THREAD_FPR31(\thread)
-	.endm
-
-	.macro	fpu_save_double thread status tmp
-	sll	\tmp, \status, 5
-	bgez	\tmp, 2f
-	fpu_save_16odd \thread
-2:
-	fpu_save_16even \thread \tmp
-	.endm
-
-	.macro	fpu_restore_16even thread tmp=t0
-	lw	\tmp, THREAD_FCR31(\thread)
-	ldc1	$f0,  THREAD_FPR0(\thread)
-	ldc1	$f2,  THREAD_FPR2(\thread)
-	ldc1	$f4,  THREAD_FPR4(\thread)
-	ldc1	$f6,  THREAD_FPR6(\thread)
-	ldc1	$f8,  THREAD_FPR8(\thread)
-	ldc1	$f10, THREAD_FPR10(\thread)
-	ldc1	$f12, THREAD_FPR12(\thread)
-	ldc1	$f14, THREAD_FPR14(\thread)
-	ldc1	$f16, THREAD_FPR16(\thread)
-	ldc1	$f18, THREAD_FPR18(\thread)
-	ldc1	$f20, THREAD_FPR20(\thread)
-	ldc1	$f22, THREAD_FPR22(\thread)
-	ldc1	$f24, THREAD_FPR24(\thread)
-	ldc1	$f26, THREAD_FPR26(\thread)
-	ldc1	$f28, THREAD_FPR28(\thread)
-	ldc1	$f30, THREAD_FPR30(\thread)
-	ctc1	\tmp, fcr31
-	.endm
-
-	.macro	fpu_restore_16odd thread
-	ldc1	$f1,  THREAD_FPR1(\thread)
-	ldc1	$f3,  THREAD_FPR3(\thread)
-	ldc1	$f5,  THREAD_FPR5(\thread)
-	ldc1	$f7,  THREAD_FPR7(\thread)
-	ldc1	$f9,  THREAD_FPR9(\thread)
-	ldc1	$f11, THREAD_FPR11(\thread)
-	ldc1	$f13, THREAD_FPR13(\thread)
-	ldc1	$f15, THREAD_FPR15(\thread)
-	ldc1	$f17, THREAD_FPR17(\thread)
-	ldc1	$f19, THREAD_FPR19(\thread)
-	ldc1	$f21, THREAD_FPR21(\thread)
-	ldc1	$f23, THREAD_FPR23(\thread)
-	ldc1	$f25, THREAD_FPR25(\thread)
-	ldc1	$f27, THREAD_FPR27(\thread)
-	ldc1	$f29, THREAD_FPR29(\thread)
-	ldc1	$f31, THREAD_FPR31(\thread)
-	.endm
-
-	.macro	fpu_restore_double thread status tmp
-	sll	\tmp, \status, 5
-	bgez	\tmp, 1f				# 16 register mode?
-
-	fpu_restore_16odd \thread
-1:	fpu_restore_16even \thread \tmp
-	.endm
-
 	.macro	cpu_save_nonscratch thread
 	LONG_S	s0, THREAD_REG16(\thread)
 	LONG_S	s1, THREAD_REG17(\thread)
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 6c8342a..3220c93 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -62,6 +62,113 @@
 	.endm
 #endif /* CONFIG_MIPS_MT_SMTC */
 
+	.macro	fpu_save_16even thread tmp=t0
+	cfc1	\tmp, fcr31
+	sdc1	$f0,  THREAD_FPR0(\thread)
+	sdc1	$f2,  THREAD_FPR2(\thread)
+	sdc1	$f4,  THREAD_FPR4(\thread)
+	sdc1	$f6,  THREAD_FPR6(\thread)
+	sdc1	$f8,  THREAD_FPR8(\thread)
+	sdc1	$f10, THREAD_FPR10(\thread)
+	sdc1	$f12, THREAD_FPR12(\thread)
+	sdc1	$f14, THREAD_FPR14(\thread)
+	sdc1	$f16, THREAD_FPR16(\thread)
+	sdc1	$f18, THREAD_FPR18(\thread)
+	sdc1	$f20, THREAD_FPR20(\thread)
+	sdc1	$f22, THREAD_FPR22(\thread)
+	sdc1	$f24, THREAD_FPR24(\thread)
+	sdc1	$f26, THREAD_FPR26(\thread)
+	sdc1	$f28, THREAD_FPR28(\thread)
+	sdc1	$f30, THREAD_FPR30(\thread)
+	sw	\tmp, THREAD_FCR31(\thread)
+	.endm
+
+	.macro	fpu_save_16odd thread
+	.set	push
+	.set	mips64r2
+	sdc1	$f1,  THREAD_FPR1(\thread)
+	sdc1	$f3,  THREAD_FPR3(\thread)
+	sdc1	$f5,  THREAD_FPR5(\thread)
+	sdc1	$f7,  THREAD_FPR7(\thread)
+	sdc1	$f9,  THREAD_FPR9(\thread)
+	sdc1	$f11, THREAD_FPR11(\thread)
+	sdc1	$f13, THREAD_FPR13(\thread)
+	sdc1	$f15, THREAD_FPR15(\thread)
+	sdc1	$f17, THREAD_FPR17(\thread)
+	sdc1	$f19, THREAD_FPR19(\thread)
+	sdc1	$f21, THREAD_FPR21(\thread)
+	sdc1	$f23, THREAD_FPR23(\thread)
+	sdc1	$f25, THREAD_FPR25(\thread)
+	sdc1	$f27, THREAD_FPR27(\thread)
+	sdc1	$f29, THREAD_FPR29(\thread)
+	sdc1	$f31, THREAD_FPR31(\thread)
+	.set	pop
+	.endm
+
+	.macro	fpu_save_double thread status tmp
+#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+	sll	\tmp, \status, 5
+	bgez	\tmp, 10f
+	fpu_save_16odd \thread
+10:
+#endif
+	fpu_save_16even \thread \tmp
+	.endm
+
+	.macro	fpu_restore_16even thread tmp=t0
+	lw	\tmp, THREAD_FCR31(\thread)
+	ldc1	$f0,  THREAD_FPR0(\thread)
+	ldc1	$f2,  THREAD_FPR2(\thread)
+	ldc1	$f4,  THREAD_FPR4(\thread)
+	ldc1	$f6,  THREAD_FPR6(\thread)
+	ldc1	$f8,  THREAD_FPR8(\thread)
+	ldc1	$f10, THREAD_FPR10(\thread)
+	ldc1	$f12, THREAD_FPR12(\thread)
+	ldc1	$f14, THREAD_FPR14(\thread)
+	ldc1	$f16, THREAD_FPR16(\thread)
+	ldc1	$f18, THREAD_FPR18(\thread)
+	ldc1	$f20, THREAD_FPR20(\thread)
+	ldc1	$f22, THREAD_FPR22(\thread)
+	ldc1	$f24, THREAD_FPR24(\thread)
+	ldc1	$f26, THREAD_FPR26(\thread)
+	ldc1	$f28, THREAD_FPR28(\thread)
+	ldc1	$f30, THREAD_FPR30(\thread)
+	ctc1	\tmp, fcr31
+	.endm
+
+	.macro	fpu_restore_16odd thread
+	.set	push
+	.set	mips64r2
+	ldc1	$f1,  THREAD_FPR1(\thread)
+	ldc1	$f3,  THREAD_FPR3(\thread)
+	ldc1	$f5,  THREAD_FPR5(\thread)
+	ldc1	$f7,  THREAD_FPR7(\thread)
+	ldc1	$f9,  THREAD_FPR9(\thread)
+	ldc1	$f11, THREAD_FPR11(\thread)
+	ldc1	$f13, THREAD_FPR13(\thread)
+	ldc1	$f15, THREAD_FPR15(\thread)
+	ldc1	$f17, THREAD_FPR17(\thread)
+	ldc1	$f19, THREAD_FPR19(\thread)
+	ldc1	$f21, THREAD_FPR21(\thread)
+	ldc1	$f23, THREAD_FPR23(\thread)
+	ldc1	$f25, THREAD_FPR25(\thread)
+	ldc1	$f27, THREAD_FPR27(\thread)
+	ldc1	$f29, THREAD_FPR29(\thread)
+	ldc1	$f31, THREAD_FPR31(\thread)
+	.set	pop
+	.endm
+
+	.macro	fpu_restore_double thread status tmp
+#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+	sll	\tmp, \status, 5
+	bgez	\tmp, 10f				# 16 register mode?
+
+	fpu_restore_16odd \thread
+10:
+#endif
+	fpu_restore_16even \thread \tmp
+	.endm
+
 /*
  * Temporary until all gas have MT ASE support
  */
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
index 27bd060..cbacceb 100644
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -46,8 +46,35 @@
 
 #include <linux/cpumask.h>
 #include <asm/r4kcache.h>
+#include <asm/smp-ops.h>
 
-extern struct plat_smp_ops bmips_smp_ops;
+extern struct plat_smp_ops bmips43xx_smp_ops;
+extern struct plat_smp_ops bmips5000_smp_ops;
+
+static inline int register_bmips_smp_ops(void)
+{
+#if IS_ENABLED(CONFIG_CPU_BMIPS) && IS_ENABLED(CONFIG_SMP)
+	switch (current_cpu_type()) {
+	case CPU_BMIPS32:
+	case CPU_BMIPS3300:
+		return register_up_smp_ops();
+	case CPU_BMIPS4350:
+	case CPU_BMIPS4380:
+		register_smp_ops(&bmips43xx_smp_ops);
+		break;
+	case CPU_BMIPS5000:
+		register_smp_ops(&bmips5000_smp_ops);
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	return 0;
+#else
+	return -ENODEV;
+#endif
+}
+
 extern char bmips_reset_nmi_vec;
 extern char bmips_reset_nmi_vec_end;
 extern char bmips_smp_movevec;
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index d445d06..6e70b03 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -20,6 +20,13 @@
 #ifndef cpu_has_tlb
 #define cpu_has_tlb		(cpu_data[0].options & MIPS_CPU_TLB)
 #endif
+#ifndef cpu_has_tlbinv
+#define cpu_has_tlbinv		(cpu_data[0].options & MIPS_CPU_TLBINV)
+#endif
+#ifndef cpu_has_segments
+#define cpu_has_segments	(cpu_data[0].options & MIPS_CPU_SEGMENTS)
+#endif
+
 
 /*
  * For the moment we don't consider R6000 and R8000 so we can assume that
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index 21c8e29..8f7adf0 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -52,6 +52,9 @@
 	unsigned int		cputype;
 	int			isa_level;
 	int			tlbsize;
+	int			tlbsizevtlb;
+	int			tlbsizeftlbsets;
+	int			tlbsizeftlbways;
 	struct cache_desc	icache; /* Primary I-cache */
 	struct cache_desc	dcache; /* Primary D or combined I/D cache */
 	struct cache_desc	scache; /* Secondary cache */
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 4a402cc..02f591b 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -27,10 +27,7 @@
 #ifdef CONFIG_SYS_HAS_CPU_MIPS32_R1
 	case CPU_4KC:
 	case CPU_ALCHEMY:
-	case CPU_BMIPS3300:
-	case CPU_BMIPS4350:
 	case CPU_PR4450:
-	case CPU_BMIPS32:
 	case CPU_JZRISC:
 #endif
 
@@ -47,6 +44,8 @@
 	case CPU_74K:
 	case CPU_M14KC:
 	case CPU_M14KEC:
+	case CPU_INTERAPTIV:
+	case CPU_PROAPTIV:
 #endif
 
 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
@@ -163,6 +162,16 @@
 	case CPU_CAVIUM_OCTEON2:
 #endif
 
+#if defined(CONFIG_SYS_HAS_CPU_BMIPS32_3300) || \
+	defined (CONFIG_SYS_HAS_CPU_MIPS32_R1)
+	case CPU_BMIPS32:
+	case CPU_BMIPS3300:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_BMIPS4350
+	case CPU_BMIPS4350:
+#endif
+
 #ifdef CONFIG_SYS_HAS_CPU_BMIPS4380
 	case CPU_BMIPS4380:
 #endif
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index d2035e1..76411df 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -111,6 +111,10 @@
 #define PRID_IMP_1074K		0x9a00
 #define PRID_IMP_M14KC		0x9c00
 #define PRID_IMP_M14KEC		0x9e00
+#define PRID_IMP_INTERAPTIV_UP	0xa000
+#define PRID_IMP_INTERAPTIV_MP	0xa100
+#define PRID_IMP_PROAPTIV_UP	0xa200
+#define PRID_IMP_PROAPTIV_MP	0xa300
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -194,6 +198,7 @@
 #define PRID_IMP_NETLOGIC_XLP8XX	0x1000
 #define PRID_IMP_NETLOGIC_XLP3XX	0x1100
 #define PRID_IMP_NETLOGIC_XLP2XX	0x1200
+#define PRID_IMP_NETLOGIC_XLP9XX	0x1500
 
 /*
  * Particular Revision values for bits 7:0 of the PRId register.
@@ -249,6 +254,8 @@
 
 #define FPIR_IMP_NONE		0x0000
 
+#if !defined(__ASSEMBLY__)
+
 enum cpu_type_enum {
 	CPU_UNKNOWN,
 
@@ -289,7 +296,7 @@
 	CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
 	CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
 	CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
-	CPU_M14KEC,
+	CPU_M14KEC, CPU_INTERAPTIV, CPU_PROAPTIV,
 
 	/*
 	 * MIPS64 class processors
@@ -301,6 +308,7 @@
 	CPU_LAST
 };
 
+#endif /* !__ASSEMBLY */
 
 /*
  * ISA Level encodings
@@ -348,6 +356,8 @@
 #define MIPS_CPU_PCI		0x00400000 /* CPU has Perf Ctr Int indicator */
 #define MIPS_CPU_RIXI		0x00800000 /* CPU has TLB Read/eXec Inhibit */
 #define MIPS_CPU_MICROMIPS	0x01000000 /* CPU has microMIPS capability */
+#define MIPS_CPU_TLBINV		0x02000000 /* CPU supports TLBINV/F */
+#define MIPS_CPU_SEGMENTS	0x04000000 /* CPU supports Segmentation Control registers */
 
 /*
  * CPU ASE encodings
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
index 242cbb3..bc5e85d 100644
--- a/arch/mips/include/asm/dma-coherence.h
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -9,7 +9,16 @@
 #ifndef __ASM_DMA_COHERENCE_H
 #define __ASM_DMA_COHERENCE_H
 
+#ifdef CONFIG_DMA_MAYBE_COHERENT
 extern int coherentio;
 extern int hw_coherentio;
+#else
+#ifdef CONFIG_DMA_COHERENT
+#define coherentio	1
+#else
+#define coherentio	0
+#endif
+#define hw_coherentio	0
+#endif /* CONFIG_DMA_MAYBE_COHERENT */
 
 #endif
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index a66359e..d414405 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -36,6 +36,7 @@
 #define EF_MIPS_ABI2		0x00000020
 #define EF_MIPS_OPTIONS_FIRST	0x00000080
 #define EF_MIPS_32BITMODE	0x00000100
+#define EF_MIPS_FP64		0x00000200
 #define EF_MIPS_ABI		0x0000f000
 #define EF_MIPS_ARCH		0xf0000000
 
@@ -176,6 +177,18 @@
 #ifdef CONFIG_32BIT
 
 /*
+ * In order to be sure that we don't attempt to execute an O32 binary which
+ * requires 64 bit FP (FR=1) on a system which does not support it we refuse
+ * to execute any binary which has bits specified by the following macro set
+ * in its ELF header flags.
+ */
+#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
+# define __MIPS_O32_FP64_MUST_BE_ZERO	0
+#else
+# define __MIPS_O32_FP64_MUST_BE_ZERO	EF_MIPS_FP64
+#endif
+
+/*
  * This is used to ensure we don't load something for the wrong architecture.
  */
 #define elf_check_arch(hdr)						\
@@ -192,6 +205,8 @@
 	if (((__h->e_flags & EF_MIPS_ABI) != 0) &&			\
 	    ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32))		\
 		__res = 0;						\
+	if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO)		\
+		__res = 0;						\
 									\
 	__res;								\
 })
@@ -249,6 +264,11 @@
 
 #define SET_PERSONALITY(ex)						\
 do {									\
+	if ((ex).e_flags & EF_MIPS_FP64)				\
+		clear_thread_flag(TIF_32BIT_FPREGS);			\
+	else								\
+		set_thread_flag(TIF_32BIT_FPREGS);			\
+									\
 	if (personality(current->personality) != PER_LINUX)		\
 		set_personality(PER_LINUX);				\
 									\
@@ -271,14 +291,18 @@
 #endif
 
 #ifdef CONFIG_MIPS32_O32
-#define __SET_PERSONALITY32_O32()					\
+#define __SET_PERSONALITY32_O32(ex)					\
 	do {								\
 		set_thread_flag(TIF_32BIT_REGS);			\
 		set_thread_flag(TIF_32BIT_ADDR);			\
+									\
+		if (!((ex).e_flags & EF_MIPS_FP64))			\
+			set_thread_flag(TIF_32BIT_FPREGS);		\
+									\
 		current->thread.abi = &mips_abi_32;			\
 	} while (0)
 #else
-#define __SET_PERSONALITY32_O32()					\
+#define __SET_PERSONALITY32_O32(ex)					\
 	do { } while (0)
 #endif
 
@@ -289,7 +313,7 @@
 	     ((ex).e_flags & EF_MIPS_ABI) == 0)				\
 		__SET_PERSONALITY32_N32();				\
 	else								\
-		__SET_PERSONALITY32_O32();				\
+		__SET_PERSONALITY32_O32(ex);                            \
 } while (0)
 #else
 #define __SET_PERSONALITY32(ex) do { } while (0)
@@ -300,6 +324,7 @@
 	unsigned int p;							\
 									\
 	clear_thread_flag(TIF_32BIT_REGS);				\
+	clear_thread_flag(TIF_32BIT_FPREGS);				\
 	clear_thread_flag(TIF_32BIT_ADDR);				\
 									\
 	if ((ex).e_ident[EI_CLASS] == ELFCLASS32)			\
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index d088e5d..6b97495 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -33,11 +33,50 @@
 extern void _save_fp(struct task_struct *);
 extern void _restore_fp(struct task_struct *);
 
-#define __enable_fpu()							\
-do {									\
-	set_c0_status(ST0_CU1);						\
-	enable_fpu_hazard();						\
-} while (0)
+/*
+ * This enum specifies a mode in which we want the FPU to operate, for cores
+ * which implement the Status.FR bit. Note that FPU_32BIT & FPU_64BIT
+ * purposefully have the values 0 & 1 respectively, so that an integer value
+ * of Status.FR can be trivially casted to the corresponding enum fpu_mode.
+ */
+enum fpu_mode {
+	FPU_32BIT = 0,		/* FR = 0 */
+	FPU_64BIT,		/* FR = 1 */
+	FPU_AS_IS,
+};
+
+static inline int __enable_fpu(enum fpu_mode mode)
+{
+	int fr;
+
+	switch (mode) {
+	case FPU_AS_IS:
+		/* just enable the FPU in its current mode */
+		set_c0_status(ST0_CU1);
+		enable_fpu_hazard();
+		return 0;
+
+	case FPU_64BIT:
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_MIPS64))
+		/* we only have a 32-bit FPU */
+		return SIGFPE;
+#endif
+		/* fall through */
+	case FPU_32BIT:
+		/* set CU1 & change FR appropriately */
+		fr = (int)mode;
+		change_c0_status(ST0_CU1 | ST0_FR, ST0_CU1 | (fr ? ST0_FR : 0));
+		enable_fpu_hazard();
+
+		/* check FR has the desired value */
+		return (!!(read_c0_status() & ST0_FR) == !!fr) ? 0 : SIGFPE;
+
+	default:
+		BUG();
+	}
+
+	return SIGFPE;
+}
 
 #define __disable_fpu()							\
 do {									\
@@ -45,19 +84,6 @@
 	disable_fpu_hazard();						\
 } while (0)
 
-#define enable_fpu()							\
-do {									\
-	if (cpu_has_fpu)						\
-		__enable_fpu();						\
-} while (0)
-
-#define disable_fpu()							\
-do {									\
-	if (cpu_has_fpu)						\
-		__disable_fpu();					\
-} while (0)
-
-
 #define clear_fpu_owner()	clear_thread_flag(TIF_USEDFPU)
 
 static inline int __is_fpu_owner(void)
@@ -70,27 +96,46 @@
 	return cpu_has_fpu && __is_fpu_owner();
 }
 
-static inline void __own_fpu(void)
+static inline int __own_fpu(void)
 {
-	__enable_fpu();
+	enum fpu_mode mode;
+	int ret;
+
+	mode = !test_thread_flag(TIF_32BIT_FPREGS);
+	ret = __enable_fpu(mode);
+	if (ret)
+		return ret;
+
 	KSTK_STATUS(current) |= ST0_CU1;
+	if (mode == FPU_64BIT)
+		KSTK_STATUS(current) |= ST0_FR;
+	else /* mode == FPU_32BIT */
+		KSTK_STATUS(current) &= ~ST0_FR;
+
 	set_thread_flag(TIF_USEDFPU);
+	return 0;
 }
 
-static inline void own_fpu_inatomic(int restore)
+static inline int own_fpu_inatomic(int restore)
 {
+	int ret = 0;
+
 	if (cpu_has_fpu && !__is_fpu_owner()) {
-		__own_fpu();
-		if (restore)
+		ret = __own_fpu();
+		if (restore && !ret)
 			_restore_fp(current);
 	}
+	return ret;
 }
 
-static inline void own_fpu(int restore)
+static inline int own_fpu(int restore)
 {
+	int ret;
+
 	preempt_disable();
-	own_fpu_inatomic(restore);
+	ret = own_fpu_inatomic(restore);
 	preempt_enable();
+	return ret;
 }
 
 static inline void lose_fpu(int save)
@@ -106,16 +151,21 @@
 	preempt_enable();
 }
 
-static inline void init_fpu(void)
+static inline int init_fpu(void)
 {
+	int ret = 0;
+
 	preempt_disable();
 	if (cpu_has_fpu) {
-		__own_fpu();
-		_init_fpu();
+		ret = __own_fpu();
+		if (!ret)
+			_init_fpu();
 	} else {
 		fpu_emulator_init_fpu();
 	}
+
 	preempt_enable();
+	return ret;
 }
 
 static inline void save_fp(struct task_struct *tsk)
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index b0dd0c8..572e63e 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -19,7 +19,6 @@
 
 #ifdef __KERNEL__
 
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/uaccess.h>
 #include <asm/kmap_types.h>
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 3296696..a995fce 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -391,9 +391,6 @@
 	uint32_t guest_kernel_asid[NR_CPUS];
 	struct mm_struct guest_kernel_mm, guest_user_mm;
 
-	struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE];
-
-
 	struct hrtimer comparecount_timer;
 
 	int last_sched_cpu;
@@ -529,7 +526,6 @@
 
 extern void kvm_mips_dump_host_tlbs(void);
 extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
-extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);
 extern void kvm_mips_flush_host_tlb(int skip_kseg0);
 extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
 extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
@@ -541,10 +537,7 @@
 						   unsigned long gva);
 extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
 				    struct kvm_vcpu *vcpu);
-extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu);
-extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);
 extern void kvm_local_flush_tlb_all(void);
-extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);
 extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
 extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index b86a125..cd41e93 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -16,7 +16,6 @@
 #define __ASM_MACH_AR71XX_REGS_H
 
 #include <linux/types.h>
-#include <linux/init.h>
 #include <linux/io.h>
 #include <linux/bitops.h>
 
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
index cc7563b..7527c1d 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
@@ -56,4 +56,6 @@
 				 const char *prefix);
 #endif
 
+void bcm47xx_set_system_type(u16 chip_id);
+
 #endif /* __ASM_BCM47XX_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
index 00867dd..40005fb 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
@@ -66,6 +66,7 @@
 	BCM47XX_BOARD_LINKSYS_WRT310NV1,
 	BCM47XX_BOARD_LINKSYS_WRT310NV2,
 	BCM47XX_BOARD_LINKSYS_WRT54G3GV2,
+	BCM47XX_BOARD_LINKSYS_WRT54GSV1,
 	BCM47XX_BOARD_LINKSYS_WRT610NV1,
 	BCM47XX_BOARD_LINKSYS_WRT610NV2,
 	BCM47XX_BOARD_LINKSYS_WRTSL54GS,
diff --git a/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h
new file mode 100644
index 0000000..b7992cd
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h
@@ -0,0 +1,82 @@
+#ifndef __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb			1
+#define cpu_has_4kex			1
+#define cpu_has_3k_cache		0
+#define cpu_has_4k_cache		1
+#define cpu_has_tx39_cache		0
+#define cpu_has_fpu			0
+#define cpu_has_32fpr			0
+#define cpu_has_counter			1
+#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB)
+#define cpu_has_watch			1
+#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA)
+#define cpu_has_watch			0
+#endif
+#define cpu_has_divec			1
+#define cpu_has_vce			0
+#define cpu_has_cache_cdex_p		0
+#define cpu_has_cache_cdex_s		0
+#define cpu_has_prefetch		1
+#define cpu_has_mcheck			1
+#define cpu_has_ejtag			1
+#define cpu_has_llsc			1
+
+/* cpu_has_mips16 */
+#define cpu_has_mdmx			0
+#define cpu_has_mips3d			0
+#define cpu_has_rixi			0
+#define cpu_has_mmips			0
+#define cpu_has_smartmips		0
+#define cpu_has_vtag_icache		0
+/* cpu_has_dc_aliases */
+#define cpu_has_ic_fills_f_dc		0
+#define cpu_has_pindexed_dcache		0
+#define cpu_icache_snoops_remote_store	0
+
+#define cpu_has_mips_2			1
+#define cpu_has_mips_3			0
+#define cpu_has_mips32r1		1
+#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB)
+#define cpu_has_mips32r2		1
+#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA)
+#define cpu_has_mips32r2		0
+#endif
+#define cpu_has_mips64r1		0
+#define cpu_has_mips64r2		0
+
+#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB)
+#define cpu_has_dsp			1
+#define cpu_has_dsp2			1
+#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA)
+#define cpu_has_dsp			0
+#define cpu_has_dsp2			0
+#endif
+#define cpu_has_mipsmt			0
+/* cpu_has_userlocal */
+
+#define cpu_has_nofpuex			0
+#define cpu_has_64bits			0
+#define cpu_has_64bit_zero_reg		0
+#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB)
+#define cpu_has_vint			1
+#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA)
+#define cpu_has_vint			0
+#endif
+#define cpu_has_veic			0
+#define cpu_has_inclusive_pcaches	0
+
+#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB)
+#define cpu_dcache_line_size()		32
+#define cpu_icache_line_size()		32
+#define cpu_has_perf_cntr_intr_bit	1
+#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA)
+#define cpu_dcache_line_size()		16
+#define cpu_icache_line_size()		16
+#define cpu_has_perf_cntr_intr_bit	0
+#endif
+#define cpu_scache_line_size()		0
+#define cpu_has_vz			0
+
+#endif /* __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
index 19f9134..3112f08 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -145,6 +145,7 @@
 	RSET_UART1,
 	RSET_GPIO,
 	RSET_SPI,
+	RSET_HSSPI,
 	RSET_UDC0,
 	RSET_OHCI0,
 	RSET_OHCI_PRIV,
@@ -193,6 +194,7 @@
 #define RSET_ENETDMAS_SIZE(chans)	(16 * (chans))
 #define RSET_ENETSW_SIZE		65536
 #define RSET_UART_SIZE			24
+#define RSET_HSSPI_SIZE			1536
 #define RSET_UDC_SIZE			256
 #define RSET_OHCI_SIZE			256
 #define RSET_EHCI_SIZE			256
@@ -265,6 +267,7 @@
 #define BCM_6328_UART1_BASE		(0xb0000120)
 #define BCM_6328_GPIO_BASE		(0xb0000080)
 #define BCM_6328_SPI_BASE		(0xdeadbeef)
+#define BCM_6328_HSSPI_BASE		(0xb0001000)
 #define BCM_6328_UDC0_BASE		(0xdeadbeef)
 #define BCM_6328_USBDMA_BASE		(0xb000c000)
 #define BCM_6328_OHCI0_BASE		(0xb0002600)
@@ -313,6 +316,7 @@
 #define BCM_6338_UART1_BASE		(0xdeadbeef)
 #define BCM_6338_GPIO_BASE		(0xfffe0400)
 #define BCM_6338_SPI_BASE		(0xfffe0c00)
+#define BCM_6338_HSSPI_BASE		(0xdeadbeef)
 #define BCM_6338_UDC0_BASE		(0xdeadbeef)
 #define BCM_6338_USBDMA_BASE		(0xfffe2400)
 #define BCM_6338_OHCI0_BASE		(0xdeadbeef)
@@ -360,6 +364,7 @@
 #define BCM_6345_UART1_BASE		(0xdeadbeef)
 #define BCM_6345_GPIO_BASE		(0xfffe0400)
 #define BCM_6345_SPI_BASE		(0xdeadbeef)
+#define BCM_6345_HSSPI_BASE		(0xdeadbeef)
 #define BCM_6345_UDC0_BASE		(0xdeadbeef)
 #define BCM_6345_USBDMA_BASE		(0xfffe2800)
 #define BCM_6345_ENET0_BASE		(0xfffe1800)
@@ -406,6 +411,7 @@
 #define BCM_6348_UART1_BASE		(0xdeadbeef)
 #define BCM_6348_GPIO_BASE		(0xfffe0400)
 #define BCM_6348_SPI_BASE		(0xfffe0c00)
+#define BCM_6348_HSSPI_BASE		(0xdeadbeef)
 #define BCM_6348_UDC0_BASE		(0xfffe1000)
 #define BCM_6348_USBDMA_BASE		(0xdeadbeef)
 #define BCM_6348_OHCI0_BASE		(0xfffe1b00)
@@ -451,6 +457,7 @@
 #define BCM_6358_UART1_BASE		(0xfffe0120)
 #define BCM_6358_GPIO_BASE		(0xfffe0080)
 #define BCM_6358_SPI_BASE		(0xfffe0800)
+#define BCM_6358_HSSPI_BASE		(0xdeadbeef)
 #define BCM_6358_UDC0_BASE		(0xfffe0800)
 #define BCM_6358_USBDMA_BASE		(0xdeadbeef)
 #define BCM_6358_OHCI0_BASE		(0xfffe1400)
@@ -553,6 +560,7 @@
 #define BCM_6368_UART1_BASE		(0xb0000120)
 #define BCM_6368_GPIO_BASE		(0xb0000080)
 #define BCM_6368_SPI_BASE		(0xb0000800)
+#define BCM_6368_HSSPI_BASE		(0xdeadbeef)
 #define BCM_6368_UDC0_BASE		(0xdeadbeef)
 #define BCM_6368_USBDMA_BASE		(0xb0004800)
 #define BCM_6368_OHCI0_BASE		(0xb0001600)
@@ -604,6 +612,7 @@
 	__GEN_RSET_BASE(__cpu, UART1)					\
 	__GEN_RSET_BASE(__cpu, GPIO)					\
 	__GEN_RSET_BASE(__cpu, SPI)					\
+	__GEN_RSET_BASE(__cpu, HSSPI)					\
 	__GEN_RSET_BASE(__cpu, UDC0)					\
 	__GEN_RSET_BASE(__cpu, OHCI0)					\
 	__GEN_RSET_BASE(__cpu, OHCI_PRIV)				\
@@ -647,6 +656,7 @@
 	[RSET_UART1]		= BCM_## __cpu ##_UART1_BASE,		\
 	[RSET_GPIO]		= BCM_## __cpu ##_GPIO_BASE,		\
 	[RSET_SPI]		= BCM_## __cpu ##_SPI_BASE,		\
+	[RSET_HSSPI]		= BCM_## __cpu ##_HSSPI_BASE,		\
 	[RSET_UDC0]		= BCM_## __cpu ##_UDC0_BASE,		\
 	[RSET_OHCI0]		= BCM_## __cpu ##_OHCI0_BASE,		\
 	[RSET_OHCI_PRIV]	= BCM_## __cpu ##_OHCI_PRIV_BASE,	\
@@ -727,6 +737,7 @@
 	IRQ_ENET0,
 	IRQ_ENET1,
 	IRQ_ENET_PHY,
+	IRQ_HSSPI,
 	IRQ_OHCI0,
 	IRQ_EHCI0,
 	IRQ_USBD,
@@ -815,6 +826,7 @@
 #define BCM_6328_ENET0_IRQ		0
 #define BCM_6328_ENET1_IRQ		0
 #define BCM_6328_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 12)
+#define BCM_6328_HSSPI_IRQ		(IRQ_INTERNAL_BASE + 29)
 #define BCM_6328_OHCI0_IRQ		(BCM_6328_HIGH_IRQ_BASE + 9)
 #define BCM_6328_EHCI0_IRQ		(BCM_6328_HIGH_IRQ_BASE + 10)
 #define BCM_6328_USBD_IRQ		(IRQ_INTERNAL_BASE + 4)
@@ -860,6 +872,7 @@
 #define BCM_6338_ENET0_IRQ		(IRQ_INTERNAL_BASE + 8)
 #define BCM_6338_ENET1_IRQ		0
 #define BCM_6338_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 9)
+#define BCM_6338_HSSPI_IRQ		0
 #define BCM_6338_OHCI0_IRQ		0
 #define BCM_6338_EHCI0_IRQ		0
 #define BCM_6338_USBD_IRQ		0
@@ -898,6 +911,7 @@
 #define BCM_6345_ENET0_IRQ		(IRQ_INTERNAL_BASE + 8)
 #define BCM_6345_ENET1_IRQ		0
 #define BCM_6345_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 12)
+#define BCM_6345_HSSPI_IRQ		0
 #define BCM_6345_OHCI0_IRQ		0
 #define BCM_6345_EHCI0_IRQ		0
 #define BCM_6345_USBD_IRQ		0
@@ -936,6 +950,7 @@
 #define BCM_6348_ENET0_IRQ		(IRQ_INTERNAL_BASE + 8)
 #define BCM_6348_ENET1_IRQ		(IRQ_INTERNAL_BASE + 7)
 #define BCM_6348_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 9)
+#define BCM_6348_HSSPI_IRQ		0
 #define BCM_6348_OHCI0_IRQ		(IRQ_INTERNAL_BASE + 12)
 #define BCM_6348_EHCI0_IRQ		0
 #define BCM_6348_USBD_IRQ		0
@@ -974,6 +989,7 @@
 #define BCM_6358_ENET0_IRQ		(IRQ_INTERNAL_BASE + 8)
 #define BCM_6358_ENET1_IRQ		(IRQ_INTERNAL_BASE + 6)
 #define BCM_6358_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 9)
+#define BCM_6358_HSSPI_IRQ		0
 #define BCM_6358_OHCI0_IRQ		(IRQ_INTERNAL_BASE + 5)
 #define BCM_6358_EHCI0_IRQ		(IRQ_INTERNAL_BASE + 10)
 #define BCM_6358_USBD_IRQ		0
@@ -1086,6 +1102,7 @@
 #define BCM_6368_ENET0_IRQ		0
 #define BCM_6368_ENET1_IRQ		0
 #define BCM_6368_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 15)
+#define BCM_6368_HSSPI_IRQ		0
 #define BCM_6368_OHCI0_IRQ		(IRQ_INTERNAL_BASE + 5)
 #define BCM_6368_EHCI0_IRQ		(IRQ_INTERNAL_BASE + 7)
 #define BCM_6368_USBD_IRQ		(IRQ_INTERNAL_BASE + 8)
@@ -1133,6 +1150,7 @@
 	[IRQ_ENET0]		= BCM_## __cpu ##_ENET0_IRQ,		\
 	[IRQ_ENET1]		= BCM_## __cpu ##_ENET1_IRQ,		\
 	[IRQ_ENET_PHY]		= BCM_## __cpu ##_ENET_PHY_IRQ,		\
+	[IRQ_HSSPI]		= BCM_## __cpu ##_HSSPI_IRQ,		\
 	[IRQ_OHCI0]		= BCM_## __cpu ##_OHCI0_IRQ,		\
 	[IRQ_EHCI0]		= BCM_## __cpu ##_EHCI0_IRQ,		\
 	[IRQ_USBD]		= BCM_## __cpu ##_USBD_IRQ,		\
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_hsspi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_hsspi.h
new file mode 100644
index 0000000..1b1acaf
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_hsspi.h
@@ -0,0 +1,8 @@
+#ifndef BCM63XX_DEV_HSSPI_H
+#define BCM63XX_DEV_HSSPI_H
+
+#include <linux/types.h>
+
+int bcm63xx_hsspi_register(void);
+
+#endif /* BCM63XX_DEV_HSSPI_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 9875db3..ab427f8 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -463,126 +463,6 @@
 #define WDT_SOFTRESET_REG		0xc
 
 /*************************************************************************
- * _REG relative to RSET_UARTx
- *************************************************************************/
-
-/* UART Control Register */
-#define UART_CTL_REG			0x0
-#define UART_CTL_RXTMOUTCNT_SHIFT	0
-#define UART_CTL_RXTMOUTCNT_MASK	(0x1f << UART_CTL_RXTMOUTCNT_SHIFT)
-#define UART_CTL_RSTTXDN_SHIFT		5
-#define UART_CTL_RSTTXDN_MASK		(1 << UART_CTL_RSTTXDN_SHIFT)
-#define UART_CTL_RSTRXFIFO_SHIFT		6
-#define UART_CTL_RSTRXFIFO_MASK		(1 << UART_CTL_RSTRXFIFO_SHIFT)
-#define UART_CTL_RSTTXFIFO_SHIFT		7
-#define UART_CTL_RSTTXFIFO_MASK		(1 << UART_CTL_RSTTXFIFO_SHIFT)
-#define UART_CTL_STOPBITS_SHIFT		8
-#define UART_CTL_STOPBITS_MASK		(0xf << UART_CTL_STOPBITS_SHIFT)
-#define UART_CTL_STOPBITS_1		(0x7 << UART_CTL_STOPBITS_SHIFT)
-#define UART_CTL_STOPBITS_2		(0xf << UART_CTL_STOPBITS_SHIFT)
-#define UART_CTL_BITSPERSYM_SHIFT	12
-#define UART_CTL_BITSPERSYM_MASK	(0x3 << UART_CTL_BITSPERSYM_SHIFT)
-#define UART_CTL_XMITBRK_SHIFT		14
-#define UART_CTL_XMITBRK_MASK		(1 << UART_CTL_XMITBRK_SHIFT)
-#define UART_CTL_RSVD_SHIFT		15
-#define UART_CTL_RSVD_MASK		(1 << UART_CTL_RSVD_SHIFT)
-#define UART_CTL_RXPAREVEN_SHIFT		16
-#define UART_CTL_RXPAREVEN_MASK		(1 << UART_CTL_RXPAREVEN_SHIFT)
-#define UART_CTL_RXPAREN_SHIFT		17
-#define UART_CTL_RXPAREN_MASK		(1 << UART_CTL_RXPAREN_SHIFT)
-#define UART_CTL_TXPAREVEN_SHIFT		18
-#define UART_CTL_TXPAREVEN_MASK		(1 << UART_CTL_TXPAREVEN_SHIFT)
-#define UART_CTL_TXPAREN_SHIFT		18
-#define UART_CTL_TXPAREN_MASK		(1 << UART_CTL_TXPAREN_SHIFT)
-#define UART_CTL_LOOPBACK_SHIFT		20
-#define UART_CTL_LOOPBACK_MASK		(1 << UART_CTL_LOOPBACK_SHIFT)
-#define UART_CTL_RXEN_SHIFT		21
-#define UART_CTL_RXEN_MASK		(1 << UART_CTL_RXEN_SHIFT)
-#define UART_CTL_TXEN_SHIFT		22
-#define UART_CTL_TXEN_MASK		(1 << UART_CTL_TXEN_SHIFT)
-#define UART_CTL_BRGEN_SHIFT		23
-#define UART_CTL_BRGEN_MASK		(1 << UART_CTL_BRGEN_SHIFT)
-
-/* UART Baudword register */
-#define UART_BAUD_REG			0x4
-
-/* UART Misc Control register */
-#define UART_MCTL_REG			0x8
-#define UART_MCTL_DTR_SHIFT		0
-#define UART_MCTL_DTR_MASK		(1 << UART_MCTL_DTR_SHIFT)
-#define UART_MCTL_RTS_SHIFT		1
-#define UART_MCTL_RTS_MASK		(1 << UART_MCTL_RTS_SHIFT)
-#define UART_MCTL_RXFIFOTHRESH_SHIFT	8
-#define UART_MCTL_RXFIFOTHRESH_MASK	(0xf << UART_MCTL_RXFIFOTHRESH_SHIFT)
-#define UART_MCTL_TXFIFOTHRESH_SHIFT	12
-#define UART_MCTL_TXFIFOTHRESH_MASK	(0xf << UART_MCTL_TXFIFOTHRESH_SHIFT)
-#define UART_MCTL_RXFIFOFILL_SHIFT	16
-#define UART_MCTL_RXFIFOFILL_MASK	(0x1f << UART_MCTL_RXFIFOFILL_SHIFT)
-#define UART_MCTL_TXFIFOFILL_SHIFT	24
-#define UART_MCTL_TXFIFOFILL_MASK	(0x1f << UART_MCTL_TXFIFOFILL_SHIFT)
-
-/* UART External Input Configuration register */
-#define UART_EXTINP_REG			0xc
-#define UART_EXTINP_RI_SHIFT		0
-#define UART_EXTINP_RI_MASK		(1 << UART_EXTINP_RI_SHIFT)
-#define UART_EXTINP_CTS_SHIFT		1
-#define UART_EXTINP_CTS_MASK		(1 << UART_EXTINP_CTS_SHIFT)
-#define UART_EXTINP_DCD_SHIFT		2
-#define UART_EXTINP_DCD_MASK		(1 << UART_EXTINP_DCD_SHIFT)
-#define UART_EXTINP_DSR_SHIFT		3
-#define UART_EXTINP_DSR_MASK		(1 << UART_EXTINP_DSR_SHIFT)
-#define UART_EXTINP_IRSTAT(x)		(1 << (x + 4))
-#define UART_EXTINP_IRMASK(x)		(1 << (x + 8))
-#define UART_EXTINP_IR_RI		0
-#define UART_EXTINP_IR_CTS		1
-#define UART_EXTINP_IR_DCD		2
-#define UART_EXTINP_IR_DSR		3
-#define UART_EXTINP_RI_NOSENSE_SHIFT	16
-#define UART_EXTINP_RI_NOSENSE_MASK	(1 << UART_EXTINP_RI_NOSENSE_SHIFT)
-#define UART_EXTINP_CTS_NOSENSE_SHIFT	17
-#define UART_EXTINP_CTS_NOSENSE_MASK	(1 << UART_EXTINP_CTS_NOSENSE_SHIFT)
-#define UART_EXTINP_DCD_NOSENSE_SHIFT	18
-#define UART_EXTINP_DCD_NOSENSE_MASK	(1 << UART_EXTINP_DCD_NOSENSE_SHIFT)
-#define UART_EXTINP_DSR_NOSENSE_SHIFT	19
-#define UART_EXTINP_DSR_NOSENSE_MASK	(1 << UART_EXTINP_DSR_NOSENSE_SHIFT)
-
-/* UART Interrupt register */
-#define UART_IR_REG			0x10
-#define UART_IR_MASK(x)			(1 << (x + 16))
-#define UART_IR_STAT(x)			(1 << (x))
-#define UART_IR_EXTIP			0
-#define UART_IR_TXUNDER			1
-#define UART_IR_TXOVER			2
-#define UART_IR_TXTRESH			3
-#define UART_IR_TXRDLATCH		4
-#define UART_IR_TXEMPTY			5
-#define UART_IR_RXUNDER			6
-#define UART_IR_RXOVER			7
-#define UART_IR_RXTIMEOUT		8
-#define UART_IR_RXFULL			9
-#define UART_IR_RXTHRESH		10
-#define UART_IR_RXNOTEMPTY		11
-#define UART_IR_RXFRAMEERR		12
-#define UART_IR_RXPARERR		13
-#define UART_IR_RXBRK			14
-#define UART_IR_TXDONE			15
-
-/* UART Fifo register */
-#define UART_FIFO_REG			0x14
-#define UART_FIFO_VALID_SHIFT		0
-#define UART_FIFO_VALID_MASK		0xff
-#define UART_FIFO_FRAMEERR_SHIFT	8
-#define UART_FIFO_FRAMEERR_MASK		(1 << UART_FIFO_FRAMEERR_SHIFT)
-#define UART_FIFO_PARERR_SHIFT		9
-#define UART_FIFO_PARERR_MASK		(1 << UART_FIFO_PARERR_SHIFT)
-#define UART_FIFO_BRKDET_SHIFT		10
-#define UART_FIFO_BRKDET_MASK		(1 << UART_FIFO_BRKDET_SHIFT)
-#define UART_FIFO_ANYERR_MASK		(UART_FIFO_FRAMEERR_MASK |	\
-					UART_FIFO_PARERR_MASK |		\
-					UART_FIFO_BRKDET_MASK)
-
-
-/*************************************************************************
  * _REG relative to RSET_GPIO
  *************************************************************************/
 
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index a9e8f6b..7629c35 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -49,11 +49,7 @@
 
 static inline int plat_device_is_coherent(struct device *dev)
 {
-#ifdef CONFIG_DMA_COHERENT
-	return 1;
-#else
 	return coherentio;
-#endif
 }
 
 #ifdef CONFIG_SWIOTLB
diff --git a/arch/mips/include/asm/mach-generic/floppy.h b/arch/mips/include/asm/mach-generic/floppy.h
index 5b5cd68..e2561d9 100644
--- a/arch/mips/include/asm/mach-generic/floppy.h
+++ b/arch/mips/include/asm/mach-generic/floppy.h
@@ -9,7 +9,6 @@
 #define __ASM_MACH_GENERIC_FLOPPY_H
 
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/sched.h>
 #include <linux/linkage.h>
diff --git a/arch/mips/include/asm/mach-generic/ide.h b/arch/mips/include/asm/mach-generic/ide.h
index affa66f5..4ae5fbc 100644
--- a/arch/mips/include/asm/mach-generic/ide.h
+++ b/arch/mips/include/asm/mach-generic/ide.h
@@ -23,7 +23,7 @@
 static inline void __ide_flush_prologue(void)
 {
 #ifdef CONFIG_SMP
-	if (cpu_has_dc_aliases)
+	if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
 		preempt_disable();
 #endif
 }
@@ -31,14 +31,14 @@
 static inline void __ide_flush_epilogue(void)
 {
 #ifdef CONFIG_SMP
-	if (cpu_has_dc_aliases)
+	if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
 		preempt_enable();
 #endif
 }
 
 static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size)
 {
-	if (cpu_has_dc_aliases) {
+	if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
 		unsigned long end = addr + size;
 
 		while (addr < end) {
diff --git a/arch/mips/include/asm/mach-jazz/floppy.h b/arch/mips/include/asm/mach-jazz/floppy.h
index 62aa1e2..4b86c88a 100644
--- a/arch/mips/include/asm/mach-jazz/floppy.h
+++ b/arch/mips/include/asm/mach-jazz/floppy.h
@@ -9,7 +9,6 @@
 #define __ASM_MACH_JAZZ_FLOPPY_H
 
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/linkage.h>
 #include <linux/types.h>
 #include <linux/mm.h>
diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h
index 05988c2..069b43a 100644
--- a/arch/mips/include/asm/mach-jz4740/platform.h
+++ b/arch/mips/include/asm/mach-jz4740/platform.h
@@ -21,6 +21,7 @@
 
 extern struct platform_device jz4740_usb_ohci_device;
 extern struct platform_device jz4740_udc_device;
+extern struct platform_device jz4740_udc_xceiv_device;
 extern struct platform_device jz4740_mmc_device;
 extern struct platform_device jz4740_rtc_device;
 extern struct platform_device jz4740_i2c_device;
diff --git a/arch/mips/include/asm/mach-netlogic/irq.h b/arch/mips/include/asm/mach-netlogic/irq.h
index 868ed8a..c0dbd53 100644
--- a/arch/mips/include/asm/mach-netlogic/irq.h
+++ b/arch/mips/include/asm/mach-netlogic/irq.h
@@ -9,7 +9,8 @@
 #define __ASM_NETLOGIC_IRQ_H
 
 #include <asm/mach-netlogic/multi-node.h>
-#define NR_IRQS			(64 * NLM_NR_NODES)
+#define NLM_IRQS_PER_NODE	1024
+#define NR_IRQS			(NLM_IRQS_PER_NODE * NLM_NR_NODES)
 
 #define MIPS_CPU_IRQ_BASE	0
 
diff --git a/arch/mips/include/asm/mach-netlogic/multi-node.h b/arch/mips/include/asm/mach-netlogic/multi-node.h
index d62fc77..9ed8dac 100644
--- a/arch/mips/include/asm/mach-netlogic/multi-node.h
+++ b/arch/mips/include/asm/mach-netlogic/multi-node.h
@@ -47,8 +47,37 @@
 #endif
 #endif
 
-#define NLM_CORES_PER_NODE	8
 #define NLM_THREADS_PER_CORE	4
-#define NLM_CPUS_PER_NODE	(NLM_CORES_PER_NODE * NLM_THREADS_PER_CORE)
+#ifdef CONFIG_CPU_XLR
+#define nlm_cores_per_node()	8
+#else
+extern unsigned int xlp_cores_per_node;
+#define nlm_cores_per_node()	xlp_cores_per_node
+#endif
+
+#define nlm_threads_per_node()	(nlm_cores_per_node() * NLM_THREADS_PER_CORE)
+#define nlm_cpuid_to_node(c)	((c) / nlm_threads_per_node())
+
+struct nlm_soc_info {
+	unsigned long	coremask;	/* cores enabled on the soc */
+	unsigned long	ebase;		/* not used now */
+	uint64_t	irqmask;	/* EIMR for the node */
+	uint64_t	sysbase;	/* only for XLP - sys block base */
+	uint64_t	picbase;	/* PIC block base */
+	spinlock_t	piclock;	/* lock for PIC access */
+	cpumask_t	cpumask;	/* logical cpu mask for node */
+	unsigned int	socbus;
+};
+
+extern struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
+#define nlm_get_node(i)		(&nlm_nodes[i])
+#define nlm_node_present(n)	((n) >= 0 && (n) < NLM_NR_NODES && \
+					nlm_get_node(n)->coremask != 0)
+#ifdef CONFIG_CPU_XLR
+#define nlm_current_node()	(&nlm_nodes[0])
+#else
+#define nlm_current_node()	(&nlm_nodes[nlm_nodeid()])
+#endif
+void nlm_node_init(int node);
 
 #endif
diff --git a/arch/mips/include/asm/mach-netlogic/topology.h b/arch/mips/include/asm/mach-netlogic/topology.h
new file mode 100644
index 0000000..0da99fa
--- /dev/null
+++ b/arch/mips/include/asm/mach-netlogic/topology.h
@@ -0,0 +1,20 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Broadcom Corporation
+ */
+#ifndef _ASM_MACH_NETLOGIC_TOPOLOGY_H
+#define _ASM_MACH_NETLOGIC_TOPOLOGY_H
+
+#include <asm/mach-netlogic/multi-node.h>
+
+#define topology_physical_package_id(cpu)	cpu_to_node(cpu)
+#define topology_core_id(cpu)	(cpu_logical_map(cpu) / NLM_THREADS_PER_CORE)
+#define topology_thread_cpumask(cpu)		(&cpu_sibling_map[cpu])
+#define topology_core_cpumask(cpu)	cpumask_of_node(cpu_to_node(cpu))
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_MACH_NETLOGIC_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/mips-boards/piix4.h b/arch/mips/include/asm/mips-boards/piix4.h
index e332279..836e2ed 100644
--- a/arch/mips/include/asm/mips-boards/piix4.h
+++ b/arch/mips/include/asm/mips-boards/piix4.h
@@ -26,6 +26,10 @@
 #define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE	(1 << 7)
 #define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK		0xf
 #define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX		16
+/* SERIRQ Control */
+#define PIIX4_FUNC0_SERIRQC			0x64
+#define   PIIX4_FUNC0_SERIRQC_EN			(1 << 7)
+#define   PIIX4_FUNC0_SERIRQC_CONT			(1 << 6)
 /* Top Of Memory */
 #define PIIX4_FUNC0_TOM				0x69
 #define   PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK		0xf0
@@ -34,6 +38,9 @@
 #define   PIIX4_FUNC0_DLC_USBPR_EN			(1 << 2)
 #define   PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN		(1 << 1)
 #define   PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN	(1 << 0)
+/* General Configuration */
+#define PIIX4_FUNC0_GENCFG			0xb0
+#define   PIIX4_FUNC0_GENCFG_SERIRQ			(1 << 16)
 
 /* IDE Timing */
 #define PIIX4_FUNC1_IDETIM_PRIMARY_LO		0x40
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index e033141..bbc3dd4 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -14,6 +14,7 @@
 #define _ASM_MIPSREGS_H
 
 #include <linux/linkage.h>
+#include <linux/types.h>
 #include <asm/hazards.h>
 #include <asm/war.h>
 
@@ -573,7 +574,9 @@
 #define MIPS_CONF1_IA		(_ULCAST_(7) << 16)
 #define MIPS_CONF1_IL		(_ULCAST_(7) << 19)
 #define MIPS_CONF1_IS		(_ULCAST_(7) << 22)
-#define MIPS_CONF1_TLBS		(_ULCAST_(63)<< 25)
+#define MIPS_CONF1_TLBS_SHIFT   (25)
+#define MIPS_CONF1_TLBS_SIZE    (6)
+#define MIPS_CONF1_TLBS         (_ULCAST_(63) << MIPS_CONF1_TLBS_SHIFT)
 
 #define MIPS_CONF2_SA		(_ULCAST_(15)<<	 0)
 #define MIPS_CONF2_SL		(_ULCAST_(15)<<	 4)
@@ -587,21 +590,53 @@
 #define MIPS_CONF3_TL		(_ULCAST_(1) <<	 0)
 #define MIPS_CONF3_SM		(_ULCAST_(1) <<	 1)
 #define MIPS_CONF3_MT		(_ULCAST_(1) <<	 2)
+#define MIPS_CONF3_CDMM		(_ULCAST_(1) <<	 3)
 #define MIPS_CONF3_SP		(_ULCAST_(1) <<	 4)
 #define MIPS_CONF3_VINT		(_ULCAST_(1) <<	 5)
 #define MIPS_CONF3_VEIC		(_ULCAST_(1) <<	 6)
 #define MIPS_CONF3_LPA		(_ULCAST_(1) <<	 7)
+#define MIPS_CONF3_ITL		(_ULCAST_(1) <<	 8)
+#define MIPS_CONF3_CTXTC	(_ULCAST_(1) <<	 9)
 #define MIPS_CONF3_DSP		(_ULCAST_(1) << 10)
 #define MIPS_CONF3_DSP2P	(_ULCAST_(1) << 11)
 #define MIPS_CONF3_RXI		(_ULCAST_(1) << 12)
 #define MIPS_CONF3_ULRI		(_ULCAST_(1) << 13)
 #define MIPS_CONF3_ISA		(_ULCAST_(3) << 14)
 #define MIPS_CONF3_ISA_OE	(_ULCAST_(1) << 16)
+#define MIPS_CONF3_MCU		(_ULCAST_(1) << 17)
+#define MIPS_CONF3_MMAR		(_ULCAST_(7) << 18)
+#define MIPS_CONF3_IPLW		(_ULCAST_(3) << 21)
 #define MIPS_CONF3_VZ		(_ULCAST_(1) << 23)
+#define MIPS_CONF3_PW		(_ULCAST_(1) << 24)
+#define MIPS_CONF3_SC		(_ULCAST_(1) << 25)
+#define MIPS_CONF3_BI		(_ULCAST_(1) << 26)
+#define MIPS_CONF3_BP		(_ULCAST_(1) << 27)
+#define MIPS_CONF3_MSA		(_ULCAST_(1) << 28)
+#define MIPS_CONF3_CMGCR	(_ULCAST_(1) << 29)
+#define MIPS_CONF3_BPG		(_ULCAST_(1) << 30)
 
+#define MIPS_CONF4_MMUSIZEEXT_SHIFT	(0)
 #define MIPS_CONF4_MMUSIZEEXT	(_ULCAST_(255) << 0)
+#define MIPS_CONF4_FTLBSETS_SHIFT	(0)
+#define MIPS_CONF4_FTLBSETS_SHIFT	(0)
+#define MIPS_CONF4_FTLBSETS	(_ULCAST_(15) << MIPS_CONF4_FTLBSETS_SHIFT)
+#define MIPS_CONF4_FTLBWAYS_SHIFT	(4)
+#define MIPS_CONF4_FTLBWAYS	(_ULCAST_(15) << MIPS_CONF4_FTLBWAYS_SHIFT)
+#define MIPS_CONF4_FTLBPAGESIZE_SHIFT	(8)
+/* bits 10:8 in FTLB-only configurations */
+#define MIPS_CONF4_FTLBPAGESIZE (_ULCAST_(7) << MIPS_CONF4_FTLBPAGESIZE_SHIFT)
+/* bits 12:8 in VTLB-FTLB only configurations */
+#define MIPS_CONF4_VFTLBPAGESIZE (_ULCAST_(31) << MIPS_CONF4_FTLBPAGESIZE_SHIFT)
 #define MIPS_CONF4_MMUEXTDEF	(_ULCAST_(3) << 14)
 #define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14)
+#define MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT	(_ULCAST_(2) << 14)
+#define MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT	(_ULCAST_(3) << 14)
+#define MIPS_CONF4_KSCREXIST	(_ULCAST_(255) << 16)
+#define MIPS_CONF4_VTLBSIZEEXT_SHIFT	(24)
+#define MIPS_CONF4_VTLBSIZEEXT	(_ULCAST_(15) << MIPS_CONF4_VTLBSIZEEXT_SHIFT)
+#define MIPS_CONF4_AE		(_ULCAST_(1) << 28)
+#define MIPS_CONF4_IE		(_ULCAST_(3) << 29)
+#define MIPS_CONF4_TLBINV	(_ULCAST_(2) << 29)
 
 #define MIPS_CONF5_NF		(_ULCAST_(1) << 0)
 #define MIPS_CONF5_UFR		(_ULCAST_(1) << 2)
@@ -611,11 +646,15 @@
 #define MIPS_CONF5_K		(_ULCAST_(1) << 30)
 
 #define MIPS_CONF6_SYND		(_ULCAST_(1) << 13)
+/* proAptiv FTLB on/off bit */
+#define MIPS_CONF6_FTLBEN	(_ULCAST_(1) << 15)
 
 #define MIPS_CONF7_WII		(_ULCAST_(1) << 31)
 
 #define MIPS_CONF7_RPS		(_ULCAST_(1) << 2)
 
+/*  EntryHI bit definition */
+#define MIPS_ENTRYHI_EHINV	(_ULCAST_(1) << 10)
 
 /*
  * Bits in the MIPS32/64 coprocessor 1 (FPU) revision register.
@@ -628,6 +667,26 @@
 #define MIPS_FPIR_L		(_ULCAST_(1) << 21)
 #define MIPS_FPIR_F64		(_ULCAST_(1) << 22)
 
+/*
+ * Bits in the MIPS32 Memory Segmentation registers.
+ */
+#define MIPS_SEGCFG_PA_SHIFT	9
+#define MIPS_SEGCFG_PA		(_ULCAST_(127) << MIPS_SEGCFG_PA_SHIFT)
+#define MIPS_SEGCFG_AM_SHIFT	4
+#define MIPS_SEGCFG_AM		(_ULCAST_(7) << MIPS_SEGCFG_AM_SHIFT)
+#define MIPS_SEGCFG_EU_SHIFT	3
+#define MIPS_SEGCFG_EU		(_ULCAST_(1) << MIPS_SEGCFG_EU_SHIFT)
+#define MIPS_SEGCFG_C_SHIFT	0
+#define MIPS_SEGCFG_C		(_ULCAST_(7) << MIPS_SEGCFG_C_SHIFT)
+
+#define MIPS_SEGCFG_UUSK	_ULCAST_(7)
+#define MIPS_SEGCFG_USK		_ULCAST_(5)
+#define MIPS_SEGCFG_MUSUK	_ULCAST_(4)
+#define MIPS_SEGCFG_MUSK	_ULCAST_(3)
+#define MIPS_SEGCFG_MSK		_ULCAST_(2)
+#define MIPS_SEGCFG_MK		_ULCAST_(1)
+#define MIPS_SEGCFG_UK		_ULCAST_(0)
+
 #ifndef __ASSEMBLY__
 
 /*
@@ -649,6 +708,19 @@
 }
 
 /*
+ * TLB Invalidate Flush
+ */
+static inline void tlbinvf(void)
+{
+	__asm__ __volatile__(
+		".set push\n\t"
+		".set noreorder\n\t"
+		".word 0x42000004\n\t" /* tlbinvf */
+		".set pop");
+}
+
+
+/*
  * Functions to access the R10000 performance counters.	 These are basically
  * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
  * performance counter number encoded into bits 1 ... 5 of the instruction.
@@ -1102,6 +1174,15 @@
 #define read_c0_ebase()		__read_32bit_c0_register($15, 1)
 #define write_c0_ebase(val)	__write_32bit_c0_register($15, 1, val)
 
+/* MIPSR3 */
+#define read_c0_segctl0()	__read_32bit_c0_register($5, 2)
+#define write_c0_segctl0(val)	__write_32bit_c0_register($5, 2, val)
+
+#define read_c0_segctl1()	__read_32bit_c0_register($5, 3)
+#define write_c0_segctl1(val)	__write_32bit_c0_register($5, 3, val)
+
+#define read_c0_segctl2()	__read_32bit_c0_register($5, 4)
+#define write_c0_segctl2(val)	__write_32bit_c0_register($5, 4, val)
 
 /* Cavium OCTEON (cnMIPS) */
 #define read_c0_cvmcount()	__read_ulong_c0_register($9, 6)
diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h
index bb68c33..c281f03 100644
--- a/arch/mips/include/asm/netlogic/common.h
+++ b/arch/mips/include/asm/netlogic/common.h
@@ -84,7 +84,6 @@
  */
 void nlm_init_boot_cpu(void);
 unsigned int nlm_get_cpu_frequency(void);
-void nlm_node_init(int node);
 extern struct plat_smp_ops nlm_smp_ops;
 extern char nlm_reset_entry[], nlm_reset_entry_end[];
 
@@ -94,26 +93,16 @@
 extern unsigned int nlm_threads_per_core;
 extern cpumask_t nlm_cpumask;
 
-struct nlm_soc_info {
-	unsigned long coremask; /* cores enabled on the soc */
-	unsigned long ebase;
-	uint64_t irqmask;
-	uint64_t sysbase;	/* only for XLP */
-	uint64_t picbase;
-	spinlock_t piclock;
-};
-
-#define nlm_get_node(i)		(&nlm_nodes[i])
-#ifdef CONFIG_CPU_XLR
-#define nlm_current_node()	(&nlm_nodes[0])
-#else
-#define nlm_current_node()	(&nlm_nodes[nlm_nodeid()])
-#endif
-
 struct irq_data;
 uint64_t nlm_pci_irqmask(int node);
+void nlm_setup_pic_irq(int node, int picirq, int irq, int irt);
 void nlm_set_pic_extra_ack(int node, int irq,  void (*xack)(struct irq_data *));
 
+#ifdef CONFIG_PCI_MSI
+void nlm_dispatch_msi(int node, int lirq);
+void nlm_dispatch_msix(int node, int msixirq);
+#endif
+
 /*
  * The NR_IRQs is divided between nodes, each of them has a separate irq space
  */
@@ -122,7 +111,6 @@
 	return node * NR_IRQS / NLM_NR_NODES + irq;
 }
 
-extern struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
 extern int nlm_cpu_ready[];
 #endif
 #endif /* _NETLOGIC_COMMON_H_ */
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index f299d31..de9aada 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -146,7 +146,12 @@
 
 static inline int nlm_nodeid(void)
 {
-	return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
+	uint32_t prid = read_c0_prid();
+
+	if ((prid & 0xff00) == PRID_IMP_NETLOGIC_XLP9XX)
+		return (__read_32bit_c0_register($15, 1) >> 7) & 0x7;
+	else
+		return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
 }
 
 static inline unsigned int nlm_core_id(void)
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/bridge.h b/arch/mips/include/asm/netlogic/xlp-hal/bridge.h
index 4e8eacb..3067f98 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/bridge.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/bridge.h
@@ -69,44 +69,9 @@
 #define BRIDGE_FLASH_LIMIT3		0x13
 
 #define BRIDGE_DRAM_BAR(i)		(0x14 + (i))
-#define BRIDGE_DRAM_BAR0		0x14
-#define BRIDGE_DRAM_BAR1		0x15
-#define BRIDGE_DRAM_BAR2		0x16
-#define BRIDGE_DRAM_BAR3		0x17
-#define BRIDGE_DRAM_BAR4		0x18
-#define BRIDGE_DRAM_BAR5		0x19
-#define BRIDGE_DRAM_BAR6		0x1a
-#define BRIDGE_DRAM_BAR7		0x1b
-
 #define BRIDGE_DRAM_LIMIT(i)		(0x1c + (i))
-#define BRIDGE_DRAM_LIMIT0		0x1c
-#define BRIDGE_DRAM_LIMIT1		0x1d
-#define BRIDGE_DRAM_LIMIT2		0x1e
-#define BRIDGE_DRAM_LIMIT3		0x1f
-#define BRIDGE_DRAM_LIMIT4		0x20
-#define BRIDGE_DRAM_LIMIT5		0x21
-#define BRIDGE_DRAM_LIMIT6		0x22
-#define BRIDGE_DRAM_LIMIT7		0x23
-
 #define BRIDGE_DRAM_NODE_TRANSLN(i)	(0x24 + (i))
-#define BRIDGE_DRAM_NODE_TRANSLN0	0x24
-#define BRIDGE_DRAM_NODE_TRANSLN1	0x25
-#define BRIDGE_DRAM_NODE_TRANSLN2	0x26
-#define BRIDGE_DRAM_NODE_TRANSLN3	0x27
-#define BRIDGE_DRAM_NODE_TRANSLN4	0x28
-#define BRIDGE_DRAM_NODE_TRANSLN5	0x29
-#define BRIDGE_DRAM_NODE_TRANSLN6	0x2a
-#define BRIDGE_DRAM_NODE_TRANSLN7	0x2b
-
 #define BRIDGE_DRAM_CHNL_TRANSLN(i)	(0x2c + (i))
-#define BRIDGE_DRAM_CHNL_TRANSLN0	0x2c
-#define BRIDGE_DRAM_CHNL_TRANSLN1	0x2d
-#define BRIDGE_DRAM_CHNL_TRANSLN2	0x2e
-#define BRIDGE_DRAM_CHNL_TRANSLN3	0x2f
-#define BRIDGE_DRAM_CHNL_TRANSLN4	0x30
-#define BRIDGE_DRAM_CHNL_TRANSLN5	0x31
-#define BRIDGE_DRAM_CHNL_TRANSLN6	0x32
-#define BRIDGE_DRAM_CHNL_TRANSLN7	0x33
 
 #define BRIDGE_PCIEMEM_BASE0		0x34
 #define BRIDGE_PCIEMEM_BASE1		0x35
@@ -178,12 +143,42 @@
 #define BRIDGE_GIO_WEIGHT		0x2cb
 #define BRIDGE_FLASH_WEIGHT		0x2cc
 
+/* FIXME verify */
+#define BRIDGE_9XX_FLASH_BAR(i)		(0x11 + (i))
+#define BRIDGE_9XX_FLASH_BAR_LIMIT(i)	(0x15 + (i))
+
+#define BRIDGE_9XX_DRAM_BAR(i)		(0x19 + (i))
+#define BRIDGE_9XX_DRAM_LIMIT(i)	(0x29 + (i))
+#define BRIDGE_9XX_DRAM_NODE_TRANSLN(i)	(0x39 + (i))
+#define BRIDGE_9XX_DRAM_CHNL_TRANSLN(i)	(0x49 + (i))
+
+#define BRIDGE_9XX_ADDRESS_ERROR0	0x9d
+#define BRIDGE_9XX_ADDRESS_ERROR1	0x9e
+#define BRIDGE_9XX_ADDRESS_ERROR2	0x9f
+
+#define BRIDGE_9XX_PCIEMEM_BASE0	0x59
+#define BRIDGE_9XX_PCIEMEM_BASE1	0x5a
+#define BRIDGE_9XX_PCIEMEM_BASE2	0x5b
+#define BRIDGE_9XX_PCIEMEM_BASE3	0x5c
+#define BRIDGE_9XX_PCIEMEM_LIMIT0	0x5d
+#define BRIDGE_9XX_PCIEMEM_LIMIT1	0x5e
+#define BRIDGE_9XX_PCIEMEM_LIMIT2	0x5f
+#define BRIDGE_9XX_PCIEMEM_LIMIT3	0x60
+#define BRIDGE_9XX_PCIEIO_BASE0		0x61
+#define BRIDGE_9XX_PCIEIO_BASE1		0x62
+#define BRIDGE_9XX_PCIEIO_BASE2		0x63
+#define BRIDGE_9XX_PCIEIO_BASE3		0x64
+#define BRIDGE_9XX_PCIEIO_LIMIT0	0x65
+#define BRIDGE_9XX_PCIEIO_LIMIT1	0x66
+#define BRIDGE_9XX_PCIEIO_LIMIT2	0x67
+#define BRIDGE_9XX_PCIEIO_LIMIT3	0x68
+
 #ifndef __ASSEMBLY__
 
 #define nlm_read_bridge_reg(b, r)	nlm_read_reg(b, r)
 #define nlm_write_bridge_reg(b, r, v)	nlm_write_reg(b, r, v)
-#define nlm_get_bridge_pcibase(node)	\
-			nlm_pcicfg_base(XLP_IO_BRIDGE_OFFSET(node))
+#define nlm_get_bridge_pcibase(node)	nlm_pcicfg_base(cpu_is_xlp9xx() ? \
+		XLP9XX_IO_BRIDGE_OFFSET(node) : XLP_IO_BRIDGE_OFFSET(node))
 #define nlm_get_bridge_regbase(node)	\
 			(nlm_get_bridge_pcibase(node) + XLP_IO_PCI_HDRSZ)
 
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
index 55eee77..1f23dfa 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
@@ -48,8 +48,10 @@
 #define XLP_IO_SIZE			(64 << 20)	/* ECFG space size */
 #define XLP_IO_PCI_HDRSZ		0x100
 #define XLP_IO_DEV(node, dev)		((dev) + (node) * 8)
-#define XLP_HDR_OFFSET(node, bus, dev, fn)	(((bus) << 20) | \
-				((XLP_IO_DEV(node, dev)) << 15) | ((fn) << 12))
+#define XLP_IO_PCI_OFFSET(b, d, f)	(((b) << 20) | ((d) << 15) | ((f) << 12))
+
+#define XLP_HDR_OFFSET(node, bus, dev, fn) \
+		XLP_IO_PCI_OFFSET(bus, XLP_IO_DEV(node, dev), fn)
 
 #define XLP_IO_BRIDGE_OFFSET(node)	XLP_HDR_OFFSET(node, 0, 0, 0)
 /* coherent inter chip */
@@ -109,6 +111,36 @@
 #define XLP_IO_MMC_OFFSET(node, slot)	\
 		((XLP_IO_SD_OFFSET(node))+(slot*0x100)+XLP_IO_PCI_HDRSZ)
 
+/* Things have changed drastically in XLP 9XX */
+#define XLP9XX_HDR_OFFSET(n, d, f)	\
+			XLP_IO_PCI_OFFSET(xlp9xx_get_socbus(n), d, f)
+
+#define XLP9XX_IO_BRIDGE_OFFSET(node)	XLP_IO_PCI_OFFSET(0, 0, node)
+#define XLP9XX_IO_PIC_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 2, 0)
+#define XLP9XX_IO_UART_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 2, 2)
+#define XLP9XX_IO_SYS_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 6, 0)
+#define XLP9XX_IO_FUSE_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 6, 1)
+#define XLP9XX_IO_JTAG_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 6, 4)
+
+#define XLP9XX_IO_PCIE_OFFSET(node, i)	XLP9XX_HDR_OFFSET(node, 1, i)
+#define XLP9XX_IO_PCIE0_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 1, 0)
+#define XLP9XX_IO_PCIE2_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 1, 2)
+#define XLP9XX_IO_PCIE3_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 1, 3)
+
+/* XLP9xx USB block */
+#define XLP9XX_IO_USB_OFFSET(node, i)		XLP9XX_HDR_OFFSET(node, 4, i)
+#define XLP9XX_IO_USB_XHCI0_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 4, 1)
+#define XLP9XX_IO_USB_XHCI1_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 4, 2)
+
+/* XLP9XX on-chip SATA controller */
+#define XLP9XX_IO_SATA_OFFSET(node)		XLP9XX_HDR_OFFSET(node, 3, 2)
+
+#define XLP9XX_IO_NOR_OFFSET(node)		XLP9XX_HDR_OFFSET(node, 7, 0)
+#define XLP9XX_IO_NAND_OFFSET(node)		XLP9XX_HDR_OFFSET(node, 7, 1)
+#define XLP9XX_IO_SPI_OFFSET(node)		XLP9XX_HDR_OFFSET(node, 7, 2)
+/* SD flash */
+#define XLP9XX_IO_MMCSD_OFFSET(node)		XLP9XX_HDR_OFFSET(node, 7, 3)
+
 /* PCI config header register id's */
 #define XLP_PCI_CFGREG0			0x00
 #define XLP_PCI_CFGREG1			0x01
@@ -156,11 +188,23 @@
 #define PCI_DEVICE_ID_NLM_MMC		0x1018
 #define PCI_DEVICE_ID_NLM_XHCI		0x101d
 
+#define PCI_DEVICE_ID_XLP9XX_SATA	0x901A
+#define PCI_DEVICE_ID_XLP9XX_XHCI	0x901D
+
 #ifndef __ASSEMBLY__
 
 #define nlm_read_pci_reg(b, r)		nlm_read_reg(b, r)
 #define nlm_write_pci_reg(b, r, v)	nlm_write_reg(b, r, v)
 
+static inline int xlp9xx_get_socbus(int node)
+{
+	uint64_t socbridge;
+
+	if (node == 0)
+		return 1;
+	socbridge = nlm_pcicfg_base(XLP9XX_IO_BRIDGE_OFFSET(node));
+	return (nlm_read_pci_reg(socbridge, 0x6) >> 8) & 0xff;
+}
 #endif /* !__ASSEMBLY */
 
 #endif /* __NLM_HAL_IOMAP_H__ */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
index b559cb9..d4deb87 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
@@ -52,25 +52,48 @@
 #define PCIE_BYTE_SWAP_MEM_LIM		0x248
 #define PCIE_BYTE_SWAP_IO_BASE		0x249
 #define PCIE_BYTE_SWAP_IO_LIM		0x24A
+
+#define PCIE_BRIDGE_MSIX_ADDR_BASE	0x24F
+#define PCIE_BRIDGE_MSIX_ADDR_LIMIT	0x250
 #define PCIE_MSI_STATUS			0x25A
 #define PCIE_MSI_EN			0x25B
+#define PCIE_MSIX_STATUS		0x25D
+#define PCIE_INT_STATUS0		0x25F
+#define PCIE_INT_STATUS1		0x260
 #define PCIE_INT_EN0			0x261
+#define PCIE_INT_EN1			0x262
 
-/* PCIE_MSI_EN */
-#define PCIE_MSI_VECTOR_INT_EN		0xFFFFFFFF
+/* XLP9XX has basic changes */
+#define PCIE_9XX_BYTE_SWAP_MEM_BASE	0x25c
+#define PCIE_9XX_BYTE_SWAP_MEM_LIM	0x25d
+#define PCIE_9XX_BYTE_SWAP_IO_BASE	0x25e
+#define PCIE_9XX_BYTE_SWAP_IO_LIM	0x25f
 
-/* PCIE_INT_EN0 */
-#define PCIE_MSI_INT_EN			(1 << 9)
+/* other */
+#define PCIE_NLINKS			4
 
+/* MSI addresses */
+#define MSI_ADDR_BASE			0xfffee00000ULL
+#define MSI_ADDR_SZ			0x10000
+#define MSI_LINK_ADDR(n, l)		(MSI_ADDR_BASE + \
+				(PCIE_NLINKS * (n) + (l)) * MSI_ADDR_SZ)
+#define MSIX_ADDR_BASE			0xfffef00000ULL
+#define MSIX_LINK_ADDR(n, l)		(MSIX_ADDR_BASE + \
+				(PCIE_NLINKS * (n) + (l)) * MSI_ADDR_SZ)
 #ifndef __ASSEMBLY__
 
 #define nlm_read_pcie_reg(b, r)		nlm_read_reg(b, r)
 #define nlm_write_pcie_reg(b, r, v)	nlm_write_reg(b, r, v)
-#define nlm_get_pcie_base(node, inst)	\
-			nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node, inst))
-#define nlm_get_pcie_regbase(node, inst)	\
-			(nlm_get_pcie_base(node, inst) + XLP_IO_PCI_HDRSZ)
+#define nlm_get_pcie_base(node, inst)	nlm_pcicfg_base(cpu_is_xlp9xx() ? \
+	XLP9XX_IO_PCIE_OFFSET(node, inst) : XLP_IO_PCIE_OFFSET(node, inst))
 
-int xlp_pcie_link_irt(int link);
+#ifdef CONFIG_PCI_MSI
+void xlp_init_node_msi_irqs(int node, int link);
+#else
+static inline void xlp_init_node_msi_irqs(int node, int link) {}
+#endif
+
+struct pci_dev *xlp_get_pcie_link(const struct pci_dev *dev);
+
 #endif
 #endif /* __NLM_HAL_PCIBUS_H__ */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
index 105389b..f10bf3b 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -150,12 +150,19 @@
 #define PIC_IRT0		0x74
 #define PIC_IRT(i)		(PIC_IRT0 + ((i) * 2))
 
-#define TIMER_CYCLES_MAXVAL	0xffffffffffffffffULL
+#define PIC_9XX_PENDING_0	0x6
+#define PIC_9XX_PENDING_1	0x8
+#define PIC_9XX_PENDING_2	0xa
+#define PIC_9XX_PENDING_3	0xc
+
+#define PIC_9XX_IRT0		0x1c0
+#define PIC_9XX_IRT(i)		(PIC_9XX_IRT0 + ((i) * 2))
 
 /*
  *    IRT Map
  */
 #define PIC_NUM_IRTS		160
+#define PIC_9XX_NUM_IRTS	256
 
 #define PIC_IRT_WD_0_INDEX	0
 #define PIC_IRT_WD_1_INDEX	1
@@ -193,14 +200,9 @@
 #define PIC_IRT_PCIE_LINK_INDEX(num)	((num) + PIC_IRT_PCIE_LINK_0_INDEX)
 
 #define PIC_CLOCK_TIMER			7
-#define PIC_IRQ_BASE			8
 
 #if !defined(LOCORE) && !defined(__ASSEMBLY__)
 
-#define PIC_IRT_FIRST_IRQ		(PIC_IRQ_BASE)
-#define PIC_IRT_LAST_IRQ		63
-#define PIC_IRQ_IS_IRT(irq)		((irq) >= PIC_IRT_FIRST_IRQ)
-
 /*
  *   Misc
  */
@@ -210,30 +212,26 @@
 
 #define nlm_read_pic_reg(b, r)	nlm_read_reg64(b, r)
 #define nlm_write_pic_reg(b, r, v) nlm_write_reg64(b, r, v)
-#define nlm_get_pic_pcibase(node) nlm_pcicfg_base(XLP_IO_PIC_OFFSET(node))
+#define nlm_get_pic_pcibase(node)	nlm_pcicfg_base(cpu_is_xlp9xx() ? \
+		XLP9XX_IO_PIC_OFFSET(node) : XLP_IO_PIC_OFFSET(node))
 #define nlm_get_pic_regbase(node) (nlm_get_pic_pcibase(node) + XLP_IO_PCI_HDRSZ)
 
 /* We use PIC on node 0 as a timer */
 #define pic_timer_freq()		nlm_get_pic_frequency(0)
 
 /* IRT and h/w interrupt routines */
-static inline int
-nlm_pic_read_irt(uint64_t base, int irt_index)
-{
-	return nlm_read_pic_reg(base, PIC_IRT(irt_index));
-}
-
 static inline void
-nlm_set_irt_to_cpu(uint64_t base, int irt, int cpu)
+nlm_9xx_pic_write_irt(uint64_t base, int irt_num, int en, int nmi,
+	int sch, int vec, int dt, int db, int cpu)
 {
 	uint64_t val;
 
-	val = nlm_read_pic_reg(base, PIC_IRT(irt));
-	/* clear cpuset and mask */
-	val &= ~((0x7ull << 16) | 0xffff);
-	/* set DB, cpuset and cpumask */
-	val |= (1 << 19) | ((cpu >> 4) << 16) | (1 << (cpu & 0xf));
-	nlm_write_pic_reg(base, PIC_IRT(irt), val);
+	val = (((uint64_t)en & 0x1) << 22) | ((nmi & 0x1) << 23) |
+			((0 /*mc*/) << 20) | ((vec & 0x3f) << 24) |
+			((dt & 0x1) << 21) | (0 /*ptr*/ << 16) |
+			(cpu & 0x3ff);
+
+	nlm_write_pic_reg(base, PIC_9XX_IRT(irt_num), val);
 }
 
 static inline void
@@ -254,9 +252,13 @@
 nlm_pic_write_irt_direct(uint64_t base, int irt_num, int en, int nmi,
 	int sch, int vec, int cpu)
 {
-	nlm_pic_write_irt(base, irt_num, en, nmi, sch, vec, 1,
-		(cpu >> 4),		/* thread group */
-		1 << (cpu & 0xf));	/* thread mask */
+	if (cpu_is_xlp9xx())
+		nlm_9xx_pic_write_irt(base, irt_num, en, nmi, sch, vec,
+							1, 0, cpu);
+	else
+		nlm_pic_write_irt(base, irt_num, en, nmi, sch, vec, 1,
+			(cpu >> 4),		/* thread group */
+			1 << (cpu & 0xf));	/* thread mask */
 }
 
 static inline uint64_t
@@ -298,8 +300,13 @@
 {
 	uint64_t reg;
 
-	reg = nlm_read_pic_reg(base, PIC_IRT(irt));
-	nlm_write_pic_reg(base, PIC_IRT(irt), reg | (1u << 31));
+	if (cpu_is_xlp9xx()) {
+		reg = nlm_read_pic_reg(base, PIC_9XX_IRT(irt));
+		nlm_write_pic_reg(base, PIC_9XX_IRT(irt), reg | (1 << 22));
+	} else {
+		reg = nlm_read_pic_reg(base, PIC_IRT(irt));
+		nlm_write_pic_reg(base, PIC_IRT(irt), reg | (1u << 31));
+	}
 }
 
 static inline void
@@ -307,8 +314,15 @@
 {
 	uint64_t reg;
 
-	reg = nlm_read_pic_reg(base, PIC_IRT(irt));
-	nlm_write_pic_reg(base, PIC_IRT(irt), reg & ~((uint64_t)1 << 31));
+	if (cpu_is_xlp9xx()) {
+		reg = nlm_read_pic_reg(base, PIC_9XX_IRT(irt));
+		reg &= ~((uint64_t)1 << 22);
+		nlm_write_pic_reg(base, PIC_9XX_IRT(irt), reg);
+	} else {
+		reg = nlm_read_pic_reg(base, PIC_IRT(irt));
+		reg &= ~((uint64_t)1 << 31);
+		nlm_write_pic_reg(base, PIC_IRT(irt), reg);
+	}
 }
 
 static inline void
@@ -316,8 +330,13 @@
 {
 	uint64_t ipi;
 
-	ipi = ((uint64_t)nmi << 31) | (irq << 20);
-	ipi |= ((hwt >> 4) << 16) | (1 << (hwt & 0xf)); /* cpuset and mask */
+	if (cpu_is_xlp9xx())
+		ipi = (nmi << 23) | (irq << 24) |
+			(0/*mcm*/ << 20) | (0/*ptr*/ << 16) | hwt;
+	else
+		ipi = ((uint64_t)nmi << 31) | (irq << 20) |
+			((hwt >> 4) << 16) | (1 << (hwt & 0xf));
+
 	nlm_write_pic_reg(base, PIC_IPI_CTL, ipi);
 }
 
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/sys.h b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
index fcf2833c..d9b107f 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/sys.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
@@ -147,13 +147,29 @@
 #define SYS_SYS_PLL_MEM_REQ			0x2a3
 #define SYS_PLL_MEM_STAT			0x2a4
 
+/* Registers changed on 9XX */
+#define SYS_9XX_POWER_ON_RESET_CFG		0x00
+#define SYS_9XX_CHIP_RESET			0x01
+#define SYS_9XX_CPU_RESET			0x02
+#define SYS_9XX_CPU_NONCOHERENT_MODE		0x03
+
+/* XLP 9XX fuse block registers */
+#define FUSE_9XX_DEVCFG6			0xc6
+
 #ifndef __ASSEMBLY__
 
 #define nlm_read_sys_reg(b, r)		nlm_read_reg(b, r)
 #define nlm_write_sys_reg(b, r, v)	nlm_write_reg(b, r, v)
-#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
+#define nlm_get_sys_pcibase(node)	nlm_pcicfg_base(cpu_is_xlp9xx() ? \
+		XLP9XX_IO_SYS_OFFSET(node) : XLP_IO_SYS_OFFSET(node))
 #define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
 
+/* XLP9XX fuse block */
+#define nlm_get_fuse_pcibase(node)	\
+			nlm_pcicfg_base(XLP9XX_IO_FUSE_OFFSET(node))
+#define nlm_get_fuse_regbase(node)	\
+			(nlm_get_fuse_pcibase(node) + XLP_IO_PCI_HDRSZ)
+
 unsigned int nlm_get_pic_frequency(int node);
 #endif
 #endif
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/uart.h b/arch/mips/include/asm/netlogic/xlp-hal/uart.h
index 86d16e1..a6c5442 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/uart.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/uart.h
@@ -94,7 +94,8 @@
 #define nlm_read_uart_reg(b, r)		nlm_read_reg(b, r)
 #define nlm_write_uart_reg(b, r, v)	nlm_write_reg(b, r, v)
 #define nlm_get_uart_pcibase(node, inst)	\
-		nlm_pcicfg_base(XLP_IO_UART_OFFSET(node, inst))
+	nlm_pcicfg_base(cpu_is_xlp9xx() ?  XLP9XX_IO_UART_OFFSET(node) : \
+						XLP_IO_UART_OFFSET(node, inst))
 #define nlm_get_uart_regbase(node, inst)	\
 			(nlm_get_uart_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
 
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
index 470f209..2b0c959 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
@@ -37,10 +37,9 @@
 
 #define PIC_UART_0_IRQ			17
 #define PIC_UART_1_IRQ			18
-#define PIC_PCIE_LINK_0_IRQ		19
-#define PIC_PCIE_LINK_1_IRQ		20
-#define PIC_PCIE_LINK_2_IRQ		21
-#define PIC_PCIE_LINK_3_IRQ		22
+
+#define PIC_PCIE_LINK_LEGACY_IRQ_BASE	19
+#define PIC_PCIE_LINK_LEGACY_IRQ(i)	(19 + (i))
 
 #define PIC_EHCI_0_IRQ			23
 #define PIC_EHCI_1_IRQ			24
@@ -51,6 +50,8 @@
 #define PIC_2XX_XHCI_0_IRQ		23
 #define PIC_2XX_XHCI_1_IRQ		24
 #define PIC_2XX_XHCI_2_IRQ		25
+#define PIC_9XX_XHCI_0_IRQ		23
+#define PIC_9XX_XHCI_1_IRQ		24
 
 #define PIC_MMC_IRQ			29
 #define PIC_I2C_0_IRQ			30
@@ -58,6 +59,23 @@
 #define PIC_I2C_2_IRQ			32
 #define PIC_I2C_3_IRQ			33
 
+#define PIC_PCIE_LINK_MSI_IRQ_BASE	44	/* 44 - 47 MSI IRQ */
+#define PIC_PCIE_LINK_MSI_IRQ(i)	(44 + (i))
+
+/* MSI-X with second link-level dispatch */
+#define PIC_PCIE_MSIX_IRQ_BASE		48	/* 48 - 51 MSI-X IRQ */
+#define PIC_PCIE_MSIX_IRQ(i)		(48 + (i))
+
+#define NLM_MSIX_VEC_BASE		96	/* 96 - 127 - MSIX mapped */
+#define NLM_MSI_VEC_BASE		128	/* 128 -255 - MSI mapped */
+
+#define NLM_PIC_INDIRECT_VEC_BASE	512
+#define NLM_GPIO_VEC_BASE		768
+
+#define PIC_IRQ_BASE			8
+#define PIC_IRT_FIRST_IRQ		PIC_IRQ_BASE
+#define PIC_IRT_LAST_IRQ		63
+
 #ifndef __ASSEMBLY__
 
 /* SMP support functions */
@@ -68,6 +86,9 @@
 void nlm_hal_init(void);
 int xlp_get_dram_map(int n, uint64_t *dram_map);
 
+struct pci_dev;
+int xlp_socdev_to_node(const struct pci_dev *dev);
+
 /* Device tree related */
 void xlp_early_init_devtree(void);
 void *xlp_dt_init(void *fdtp);
@@ -76,8 +97,15 @@
 {
 	int chip = read_c0_prid() & 0xff00;
 
-	return chip == PRID_IMP_NETLOGIC_XLP2XX;
+	return chip == PRID_IMP_NETLOGIC_XLP2XX ||
+		chip == PRID_IMP_NETLOGIC_XLP9XX;
 }
 
+static inline int cpu_is_xlp9xx(void)
+{
+	int chip = read_c0_prid() & 0xff00;
+
+	return chip == PRID_IMP_NETLOGIC_XLP9XX;
+}
 #endif /* !__ASSEMBLY__ */
 #endif /* _ASM_NLM_XLP_H */
diff --git a/arch/mips/include/asm/netlogic/xlr/xlr.h b/arch/mips/include/asm/netlogic/xlr/xlr.h
index c1667e0..ceb991c 100644
--- a/arch/mips/include/asm/netlogic/xlr/xlr.h
+++ b/arch/mips/include/asm/netlogic/xlr/xlr.h
@@ -35,11 +35,6 @@
 #ifndef _ASM_NLM_XLR_H
 #define _ASM_NLM_XLR_H
 
-/* Platform UART functions */
-struct uart_port;
-unsigned int nlm_xlr_uart_in(struct uart_port *, int);
-void nlm_xlr_uart_out(struct uart_port *, int, int);
-
 /* SMP helpers */
 void xlr_wakeup_secondary_cpus(void);
 
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index 41785dd..8933203 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -36,6 +36,13 @@
 
 #include <asm/octeon/cvmx-helper.h>
 
+enum cvmx_helper_board_usb_clock_types {
+	USB_CLOCK_TYPE_REF_12,
+	USB_CLOCK_TYPE_REF_24,
+	USB_CLOCK_TYPE_REF_48,
+	USB_CLOCK_TYPE_CRYSTAL_12,
+};
+
 typedef enum {
 	set_phy_link_flags_autoneg = 0x1,
 	set_phy_link_flags_flow_control_dont_touch = 0x0 << 1,
@@ -154,4 +161,6 @@
  */
 extern int __cvmx_helper_board_hardware_enable(int interface);
 
+enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void);
+
 #endif /* __CVMX_HELPER_BOARD_H__ */
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index f6be474..5e08bcc 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -11,6 +11,8 @@
 
 #include <spaces.h>
 #include <linux/const.h>
+#include <linux/kernel.h>
+#include <asm/mipsregs.h>
 
 /*
  * PAGE_SHIFT determines the page size
@@ -33,6 +35,29 @@
 #define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)
 #define PAGE_MASK	(~((1 << PAGE_SHIFT) - 1))
 
+/*
+ * This is used for calculating the real page sizes
+ * for FTLB or VTLB + FTLB confugrations.
+ */
+static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
+{
+	switch (mmuextdef) {
+	case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
+		if (PAGE_SIZE == (1 << 30))
+			return 5;
+		if (PAGE_SIZE == (1llu << 32))
+			return 6;
+		if (PAGE_SIZE > (256 << 10))
+			return 7; /* reserved */
+			/* fall through */
+	case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
+		return (PAGE_SHIFT - 10) / 2;
+	default:
+		panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n",
+		      mmuextdef >> 14);
+	}
+}
+
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 #define HPAGE_SHIFT	(PAGE_SHIFT + PAGE_SHIFT - 3)
 #define HPAGE_SIZE	(_AC(1,UL) << HPAGE_SHIFT)
diff --git a/arch/mips/include/asm/rtlx.h b/arch/mips/include/asm/rtlx.h
index 90985b6..c102065 100644
--- a/arch/mips/include/asm/rtlx.h
+++ b/arch/mips/include/asm/rtlx.h
@@ -1,13 +1,18 @@
 /*
- * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
  */
-
 #ifndef __ASM_RTLX_H_
 #define __ASM_RTLX_H_
 
 #include <irq.h>
 
+#define RTLX_MODULE_NAME "rtlx"
+
 #define LX_NODE_BASE 10
 
 #define MIPS_CPU_RTLX_IRQ 0
@@ -15,18 +20,31 @@
 #define RTLX_VERSION 2
 #define RTLX_xID 0x12345600
 #define RTLX_ID (RTLX_xID | RTLX_VERSION)
+#define RTLX_BUFFER_SIZE 2048
 #define RTLX_CHANNELS 8
 
 #define RTLX_CHANNEL_STDIO	0
 #define RTLX_CHANNEL_DBG	1
 #define RTLX_CHANNEL_SYSIO	2
 
-extern int rtlx_open(int index, int can_sleep);
-extern int rtlx_release(int index);
-extern ssize_t rtlx_read(int index, void __user *buff, size_t count);
-extern ssize_t rtlx_write(int index, const void __user *buffer, size_t count);
-extern unsigned int rtlx_read_poll(int index, int can_sleep);
-extern unsigned int rtlx_write_poll(int index);
+void rtlx_starting(int vpe);
+void rtlx_stopping(int vpe);
+
+int rtlx_open(int index, int can_sleep);
+int rtlx_release(int index);
+ssize_t rtlx_read(int index, void __user *buff, size_t count);
+ssize_t rtlx_write(int index, const void __user *buffer, size_t count);
+unsigned int rtlx_read_poll(int index, int can_sleep);
+unsigned int rtlx_write_poll(int index);
+
+int __init rtlx_module_init(void);
+void __exit rtlx_module_exit(void);
+
+void _interrupt_sp(void);
+
+extern struct vpe_notifications rtlx_notify;
+extern const struct file_operations rtlx_fops;
+extern void (*aprp_hook)(void);
 
 enum rtlx_state {
 	RTLX_STATE_UNUSED = 0,
@@ -35,10 +53,15 @@
 	RTLX_STATE_OPENED
 };
 
-#define RTLX_BUFFER_SIZE 2048
+extern struct chan_waitqueues {
+	wait_queue_head_t rt_queue;
+	wait_queue_head_t lx_queue;
+	atomic_t in_open;
+	struct mutex mutex;
+} channel_wqs[RTLX_CHANNELS];
 
 /* each channel supports read and write.
-   linux (vpe0) reads lx_buffer	 and writes rt_buffer
+   linux (vpe0) reads lx_buffer and writes rt_buffer
    SP (vpe1) reads rt_buffer and writes lx_buffer
 */
 struct rtlx_channel {
@@ -55,11 +78,11 @@
 	char *lx_buffer;
 };
 
-struct rtlx_info {
+extern struct rtlx_info {
 	unsigned long id;
 	enum rtlx_state state;
+	int ap_int_pending;	/* Status of 0 or 1 for CONFIG_MIPS_CMP only */
 
 	struct rtlx_channel channel[RTLX_CHANNELS];
-};
-
+} *rtlx;
 #endif /* __ASM_RTLX_H_ */
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index eb0af15..278d45a 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -19,11 +19,19 @@
 
 struct task_struct;
 
-/*
- * switch_to(n) should switch tasks to task nr n, first
- * checking that n isn't the current task, in which case it does nothing.
+/**
+ * resume - resume execution of a task
+ * @prev:	The task previously executed.
+ * @next:	The task to begin executing.
+ * @next_ti:	task_thread_info(next).
+ * @usedfpu:	Non-zero if prev's FP context should be saved.
+ *
+ * This function is used whilst scheduling to save the context of prev & load
+ * the context of next. Returns prev.
  */
-extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu);
+extern asmlinkage struct task_struct *resume(struct task_struct *prev,
+		struct task_struct *next, struct thread_info *next_ti,
+		u32 usedfpu);
 
 extern unsigned int ll_bit;
 extern struct task_struct *ll_task;
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 81c8913..33e8dbf 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -29,7 +29,7 @@
 static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
 	struct task_struct *task, struct pt_regs *regs, unsigned int n)
 {
-	unsigned long usp = regs->regs[29];
+	unsigned long usp __maybe_unused = regs->regs[29];
 
 	switch (n) {
 	case 0: case 1: case 2: case 3:
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 4f58ef6..24846f9 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -110,11 +110,12 @@
 #define TIF_NOHZ		19	/* in adaptive nohz mode */
 #define TIF_FIXADE		20	/* Fix address errors in software */
 #define TIF_LOGADE		21	/* Log address errors to syslog */
-#define TIF_32BIT_REGS		22	/* also implies 16/32 fprs */
+#define TIF_32BIT_REGS		22	/* 32-bit general purpose registers */
 #define TIF_32BIT_ADDR		23	/* 32-bit address space (o32/n32) */
 #define TIF_FPUBOUND		24	/* thread bound to FPU-full CPU set */
 #define TIF_LOAD_WATCH		25	/* If set, load watch registers */
 #define TIF_SYSCALL_TRACEPOINT	26	/* syscall tracepoint instrumentation */
+#define TIF_32BIT_FPREGS	27	/* 32-bit floating point registers */
 #define TIF_SYSCALL_TRACE	31	/* syscall trace active */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -131,6 +132,7 @@
 #define _TIF_32BIT_ADDR		(1<<TIF_32BIT_ADDR)
 #define _TIF_FPUBOUND		(1<<TIF_FPUBOUND)
 #define _TIF_LOAD_WATCH		(1<<TIF_LOAD_WATCH)
+#define _TIF_32BIT_FPREGS	(1<<TIF_32BIT_FPREGS)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 
 #define _TIF_WORK_SYSCALL_ENTRY	(_TIF_NOHZ | _TIF_SYSCALL_TRACE |	\
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index c67842b..4a23493 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -18,6 +18,10 @@
  */
 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
 
+#define UNIQUE_ENTRYHI(idx)						\
+		((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) |		\
+		 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
+
 #include <asm-generic/tlb.h>
 
 #endif /* __ASM_TLB_H */
diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h
index 0880fe8..7849f39 100644
--- a/arch/mips/include/asm/vpe.h
+++ b/arch/mips/include/asm/vpe.h
@@ -1,24 +1,95 @@
 /*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
  * Copyright (C) 2005 MIPS Technologies, Inc.  All rights reserved.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
  */
-
 #ifndef _ASM_VPE_H
 #define _ASM_VPE_H
 
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+
+#define VPE_MODULE_NAME "vpe"
+#define VPE_MODULE_MINOR 1
+
+/* grab the likely amount of memory we will need. */
+#ifdef CONFIG_MIPS_VPE_LOADER_TOM
+#define P_SIZE (2 * 1024 * 1024)
+#else
+/* add an overhead to the max kmalloc size for non-striped symbols/etc */
+#define P_SIZE (256 * 1024)
+#endif
+
+#define MAX_VPES 16
+#define VPE_PATH_MAX 256
+
+static inline int aprp_cpu_index(void)
+{
+#ifdef CONFIG_MIPS_CMP
+	return setup_max_cpus;
+#else
+	extern int tclimit;
+	return tclimit;
+#endif
+}
+
+enum vpe_state {
+	VPE_STATE_UNUSED = 0,
+	VPE_STATE_INUSE,
+	VPE_STATE_RUNNING
+};
+
+enum tc_state {
+	TC_STATE_UNUSED = 0,
+	TC_STATE_INUSE,
+	TC_STATE_RUNNING,
+	TC_STATE_DYNAMIC
+};
+
+struct vpe {
+	enum vpe_state state;
+
+	/* (device) minor associated with this vpe */
+	int minor;
+
+	/* elfloader stuff */
+	void *load_addr;
+	unsigned long len;
+	char *pbuffer;
+	unsigned long plen;
+	char cwd[VPE_PATH_MAX];
+
+	unsigned long __start;
+
+	/* tc's associated with this vpe */
+	struct list_head tc;
+
+	/* The list of vpe's */
+	struct list_head list;
+
+	/* shared symbol address */
+	void *shared_ptr;
+
+	/* the list of who wants to know when something major happens */
+	struct list_head notify;
+
+	unsigned int ntcs;
+};
+
+struct tc {
+	enum tc_state state;
+	int index;
+
+	struct vpe *pvpe;	/* parent VPE */
+	struct list_head tc;	/* The list of TC's with this VPE */
+	struct list_head list;	/* The global list of tc's */
+};
+
 struct vpe_notifications {
 	void (*start)(int vpe);
 	void (*stop)(int vpe);
@@ -26,10 +97,34 @@
 	struct list_head list;
 };
 
+struct vpe_control {
+	spinlock_t vpe_list_lock;
+	struct list_head vpe_list;      /* Virtual processing elements */
+	spinlock_t tc_list_lock;
+	struct list_head tc_list;       /* Thread contexts */
+};
 
-extern int vpe_notify(int index, struct vpe_notifications *notify);
+extern unsigned long physical_memsize;
+extern struct vpe_control vpecontrol;
+extern const struct file_operations vpe_fops;
 
-extern void *vpe_get_shared(int index);
-extern char *vpe_getcwd(int index);
+int vpe_notify(int index, struct vpe_notifications *notify);
 
+void *vpe_get_shared(int index);
+char *vpe_getcwd(int index);
+
+struct vpe *get_vpe(int minor);
+struct tc *get_tc(int index);
+struct vpe *alloc_vpe(int minor);
+struct tc *alloc_tc(int index);
+void release_vpe(struct vpe *v);
+
+void *alloc_progmem(unsigned long len);
+void release_progmem(void *ptr);
+
+int __weak vpe_run(struct vpe *v);
+void cleanup_tc(struct tc *tc);
+
+int __init vpe_module_init(void);
+void __exit vpe_module_exit(void);
 #endif /* _ASM_VPE_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index e5a676e..b39ba25 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -98,8 +98,9 @@
  */
 enum cop_op {
 	mfc_op	      = 0x00, dmfc_op	    = 0x01,
-	cfc_op	      = 0x02, mtc_op	    = 0x04,
-	dmtc_op	      = 0x05, ctc_op	    = 0x06,
+	cfc_op	      = 0x02, mfhc_op	    = 0x03,
+	mtc_op        = 0x04, dmtc_op	    = 0x05,
+	ctc_op	      = 0x06, mthc_op	    = 0x07,
 	bc_op	      = 0x08, cop_op	    = 0x10,
 	copm_op	      = 0x18
 };
@@ -397,8 +398,10 @@
 	mm_movt1_op = 0xa5,
 	mm_ftruncw_op = 0xac,
 	mm_fneg1_op = 0xad,
+	mm_mfhc1_op = 0xc0,
 	mm_froundl_op = 0xcc,
 	mm_fcvtd1_op = 0xcd,
+	mm_mthc1_op = 0xe0,
 	mm_froundw_op = 0xec,
 	mm_fcvts1_op = 0xed,
 };
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 1dee279..d6e154a 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -369,16 +369,18 @@
 #define __NR_process_vm_writev		(__NR_Linux + 346)
 #define __NR_kcmp			(__NR_Linux + 347)
 #define __NR_finit_module		(__NR_Linux + 348)
+#define __NR_sched_setattr		(__NR_Linux + 349)
+#define __NR_sched_getattr		(__NR_Linux + 350)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls		348
+#define __NR_Linux_syscalls		350
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux			4000
-#define __NR_O32_Linux_syscalls		348
+#define __NR_O32_Linux_syscalls		350
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
@@ -695,16 +697,18 @@
 #define __NR_kcmp			(__NR_Linux + 306)
 #define __NR_finit_module		(__NR_Linux + 307)
 #define __NR_getdents64			(__NR_Linux + 308)
+#define __NR_sched_setattr		(__NR_Linux + 309)
+#define __NR_sched_getattr		(__NR_Linux + 310)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls		308
+#define __NR_Linux_syscalls		310
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux			5000
-#define __NR_64_Linux_syscalls		308
+#define __NR_64_Linux_syscalls		310
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
@@ -1025,15 +1029,17 @@
 #define __NR_process_vm_writev		(__NR_Linux + 310)
 #define __NR_kcmp			(__NR_Linux + 311)
 #define __NR_finit_module		(__NR_Linux + 312)
+#define __NR_sched_setattr		(__NR_Linux + 313)
+#define __NR_sched_getattr		(__NR_Linux + 314)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls		312
+#define __NR_Linux_syscalls		314
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux			6000
-#define __NR_N32_Linux_syscalls		312
+#define __NR_N32_Linux_syscalls		314
 
 #endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 8a5ec0e..c01900e 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -427,6 +427,7 @@
 
 static struct platform_device *jz_platform_devices[] __initdata = {
 	&jz4740_udc_device,
+	&jz4740_udc_xceiv_device,
 	&jz4740_mmc_device,
 	&jz4740_nand_device,
 	&qi_lb60_keypad,
diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c
index df65677..a447101 100644
--- a/arch/mips/jz4740/platform.c
+++ b/arch/mips/jz4740/platform.c
@@ -14,13 +14,14 @@
  */
 
 #include <linux/device.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/resource.h>
 
 #include <linux/dma-mapping.h>
 
+#include <linux/usb/musb.h>
+
 #include <asm/mach-jz4740/platform.h>
 #include <asm/mach-jz4740/base.h>
 #include <asm/mach-jz4740/irq.h>
@@ -56,29 +57,35 @@
 	.resource	= jz4740_usb_ohci_resources,
 };
 
-/* UDC (USB gadget controller) */
-static struct resource jz4740_usb_gdt_resources[] = {
-	{
-		.start	= JZ4740_UDC_BASE_ADDR,
-		.end	= JZ4740_UDC_BASE_ADDR + 0x1000 - 1,
-		.flags	= IORESOURCE_MEM,
+/* USB Device Controller */
+struct platform_device jz4740_udc_xceiv_device = {
+	.name = "usb_phy_gen_xceiv",
+	.id   = 0,
+};
+
+static struct resource jz4740_udc_resources[] = {
+	[0] = {
+		.start = JZ4740_UDC_BASE_ADDR,
+		.end   = JZ4740_UDC_BASE_ADDR + 0x10000 - 1,
+		.flags = IORESOURCE_MEM,
 	},
-	{
-		.start	= JZ4740_IRQ_UDC,
-		.end	= JZ4740_IRQ_UDC,
-		.flags	= IORESOURCE_IRQ,
+	[1] = {
+		.start = JZ4740_IRQ_UDC,
+		.end   = JZ4740_IRQ_UDC,
+		.flags = IORESOURCE_IRQ,
+		.name  = "mc",
 	},
 };
 
 struct platform_device jz4740_udc_device = {
-	.name		= "jz-udc",
-	.id		= -1,
-	.dev = {
-		.dma_mask = &jz4740_udc_device.dev.coherent_dma_mask,
+	.name = "musb-jz4740",
+	.id   = -1,
+	.dev  = {
+		.dma_mask          = &jz4740_udc_device.dev.coherent_dma_mask,
 		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
-	.num_resources	= ARRAY_SIZE(jz4740_usb_gdt_resources),
-	.resource	= jz4740_usb_gdt_resources,
+	.num_resources = ARRAY_SIZE(jz4740_udc_resources),
+	.resource      = jz4740_udc_resources,
 };
 
 /* MMC/SD controller */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 1c1b717..26c6175 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -30,6 +30,7 @@
 obj-$(CONFIG_CSRC_SB1250)	+= csrc-sb1250.o
 obj-$(CONFIG_SYNC_R4K)		+= sync-r4k.o
 
+obj-$(CONFIG_DEBUG_FS)		+= segment.o
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
 obj-$(CONFIG_MODULES)		+= mips_ksyms.o module.o
 obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o
@@ -55,7 +56,11 @@
 obj-$(CONFIG_CPU_MIPSR2)	+= spram.o
 
 obj-$(CONFIG_MIPS_VPE_LOADER)	+= vpe.o
+obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
+obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o
 obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
+obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
+obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
 
 obj-$(CONFIG_I8259)		+= i8259.o
 obj-$(CONFIG_IRQ_CPU)		+= irq_cpu.o
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 202e581..7faf5f2 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -28,6 +28,18 @@
 typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
 /*
+ * In order to be sure that we don't attempt to execute an O32 binary which
+ * requires 64 bit FP (FR=1) on a system which does not support it we refuse
+ * to execute any binary which has bits specified by the following macro set
+ * in its ELF header flags.
+ */
+#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
+# define __MIPS_O32_FP64_MUST_BE_ZERO	0
+#else
+# define __MIPS_O32_FP64_MUST_BE_ZERO	EF_MIPS_FP64
+#endif
+
+/*
  * This is used to ensure we don't load something for the wrong architecture.
  */
 #define elf_check_arch(hdr)						\
@@ -44,6 +56,8 @@
 	if (((__h->e_flags & EF_MIPS_ABI) != 0) &&			\
 	    ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32))		\
 		__res = 0;						\
+	if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO)		\
+		__res = 0;						\
 									\
 	__res;								\
 })
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index bd79c4f..a5bf73d 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -8,11 +8,11 @@
  * Reset/NMI/re-entry vectors for BMIPS processors
  */
 
-#include <linux/init.h>
 
 #include <asm/asm.h>
 #include <asm/asmmacro.h>
 #include <asm/cacheops.h>
+#include <asm/cpu.h>
 #include <asm/regdef.h>
 #include <asm/mipsregs.h>
 #include <asm/stackframe.h>
@@ -91,12 +91,18 @@
 	beqz	k0, bmips_smp_entry
 
 #if defined(CONFIG_CPU_BMIPS5000)
+	mfc0	k0, CP0_PRID
+	li	k1, PRID_IMP_BMIPS5000
+	andi	k0, 0xff00
+	bne	k0, k1, 1f
+
 	/* if we're not on core 0, this must be the SMP boot signal */
 	li	k1, (3 << 25)
 	mfc0	k0, $22
 	and	k0, k1
 	bnez	k0, bmips_smp_entry
-#endif
+1:
+#endif /* CONFIG_CPU_BMIPS5000 */
 #endif /* CONFIG_SMP */
 
 	/* nope, it's just a regular NMI */
@@ -139,7 +145,12 @@
 	xori	k0, 0x04
 	mtc0	k0, CP0_CONFIG
 
+	mfc0	k0, CP0_PRID
+	andi	k0, 0xff00
 #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+	li	k1, PRID_IMP_BMIPS43XX
+	bne	k0, k1, 2f
+
 	/* initialize CPU1's local I-cache */
 	li	k0, 0x80000000
 	li	k1, 0x80010000
@@ -150,14 +161,21 @@
 1:	cache	Index_Store_Tag_I, 0(k0)
 	addiu	k0, 16
 	bne	k0, k1, 1b
-#elif defined(CONFIG_CPU_BMIPS5000)
+
+	b	3f
+2:
+#endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */
+#if defined(CONFIG_CPU_BMIPS5000)
 	/* set exception vector base */
+	li	k1, PRID_IMP_BMIPS5000
+	bne	k0, k1, 3f
+
 	la	k0, ebase
 	lw	k0, 0(k0)
 	mtc0	k0, $15, 1
 	BARRIER
-#endif
-
+#endif /* CONFIG_CPU_BMIPS5000 */
+3:
 	/* jump back to kseg0 in case we need to remap the kseg1 area */
 	la	k0, 1f
 	jr	k0
@@ -221,8 +239,18 @@
 LEAF(bmips_enable_xks01)
 
 #if defined(CONFIG_XKS01)
-
+	mfc0	t0, CP0_PRID
+	andi	t2, t0, 0xff00
 #if defined(CONFIG_CPU_BMIPS4380)
+	li	t1, PRID_IMP_BMIPS43XX
+	bne	t2, t1, 1f
+
+	andi	t0, 0xff
+	addiu	t1, t0, -PRID_REV_BMIPS4380_HI
+	bgtz	t1, 2f
+	addiu	t0, -PRID_REV_BMIPS4380_LO
+	bltz	t0, 2f
+
 	mfc0	t0, $22, 3
 	li	t1, 0x1ff0
 	li	t2, (1 << 12) | (1 << 9)
@@ -231,7 +259,13 @@
 	or	t0, t2
 	mtc0	t0, $22, 3
 	BARRIER
-#elif defined(CONFIG_CPU_BMIPS5000)
+	b	2f
+1:
+#endif /* CONFIG_CPU_BMIPS4380 */
+#if defined(CONFIG_CPU_BMIPS5000)
+	li	t1, PRID_IMP_BMIPS5000
+	bne	t2, t1, 2f
+
 	mfc0	t0, $22, 5
 	li	t1, 0x01ff
 	li	t2, (1 << 8) | (1 << 5)
@@ -240,12 +274,8 @@
 	or	t0, t2
 	mtc0	t0, $22, 5
 	BARRIER
-#else
-
-#error Missing XKS01 setup
-
-#endif
-
+#endif /* CONFIG_CPU_BMIPS5000 */
+2:
 #endif /* defined(CONFIG_XKS01) */
 
 	jr	ra
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c814287..530f832 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -112,7 +112,7 @@
 	unsigned long tmp, fpu_id;
 
 	tmp = read_c0_status();
-	__enable_fpu();
+	__enable_fpu(FPU_AS_IS);
 	fpu_id = read_32bit_cp1_register(CP1_REVISION);
 	write_c0_status(tmp);
 	return fpu_id;
@@ -163,6 +163,25 @@
 static char unknown_isa[] = KERN_ERR \
 	"Unsupported ISA type, c0.config0: %d.";
 
+static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
+{
+	unsigned int config6;
+	/*
+	 * Config6 is implementation dependent and it's currently only
+	 * used by proAptiv
+	 */
+	if (c->cputype == CPU_PROAPTIV) {
+		config6 = read_c0_config6();
+		if (enable)
+			/* Enable FTLB */
+			write_c0_config6(config6 | MIPS_CONF6_FTLBEN);
+		else
+			/* Disable FTLB */
+			write_c0_config6(config6 &  ~MIPS_CONF6_FTLBEN);
+		back_to_back_c0_hazard();
+	}
+}
+
 static inline unsigned int decode_config0(struct cpuinfo_mips *c)
 {
 	unsigned int config0;
@@ -170,8 +189,13 @@
 
 	config0 = read_c0_config();
 
-	if (((config0 & MIPS_CONF_MT) >> 7) == 1)
+	/*
+	 * Look for Standard TLB or Dual VTLB and FTLB
+	 */
+	if ((((config0 & MIPS_CONF_MT) >> 7) == 1) ||
+	    (((config0 & MIPS_CONF_MT) >> 7) == 4))
 		c->options |= MIPS_CPU_TLB;
+
 	isa = (config0 & MIPS_CONF_AT) >> 13;
 	switch (isa) {
 	case 0:
@@ -226,8 +250,11 @@
 		c->options |= MIPS_CPU_FPU;
 		c->options |= MIPS_CPU_32FPR;
 	}
-	if (cpu_has_tlb)
+	if (cpu_has_tlb) {
 		c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
+		c->tlbsizevtlb = c->tlbsize;
+		c->tlbsizeftlbsets = 0;
+	}
 
 	return config1 & MIPS_CONF_M;
 }
@@ -272,6 +299,8 @@
 		c->options |= MIPS_CPU_MICROMIPS;
 	if (config3 & MIPS_CONF3_VZ)
 		c->ases |= MIPS_ASE_VZ;
+	if (config3 & MIPS_CONF3_SC)
+		c->options |= MIPS_CPU_SEGMENTS;
 
 	return config3 & MIPS_CONF_M;
 }
@@ -279,12 +308,51 @@
 static inline unsigned int decode_config4(struct cpuinfo_mips *c)
 {
 	unsigned int config4;
+	unsigned int newcf4;
+	unsigned int mmuextdef;
+	unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE;
 
 	config4 = read_c0_config4();
 
-	if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT
-	    && cpu_has_tlb)
-		c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
+	if (cpu_has_tlb) {
+		if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
+			c->options |= MIPS_CPU_TLBINV;
+		mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+		switch (mmuextdef) {
+		case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
+			c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
+			c->tlbsizevtlb = c->tlbsize;
+			break;
+		case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
+			c->tlbsizevtlb +=
+				((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
+				  MIPS_CONF4_VTLBSIZEEXT_SHIFT) * 0x40;
+			c->tlbsize = c->tlbsizevtlb;
+			ftlb_page = MIPS_CONF4_VFTLBPAGESIZE;
+			/* fall through */
+		case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
+			newcf4 = (config4 & ~ftlb_page) |
+				(page_size_ftlb(mmuextdef) <<
+				 MIPS_CONF4_FTLBPAGESIZE_SHIFT);
+			write_c0_config4(newcf4);
+			back_to_back_c0_hazard();
+			config4 = read_c0_config4();
+			if (config4 != newcf4) {
+				pr_err("PAGE_SIZE 0x%lx is not supported by FTLB (config4=0x%x)\n",
+				       PAGE_SIZE, config4);
+				/* Switch FTLB off */
+				set_ftlb_enable(c, 0);
+				break;
+			}
+			c->tlbsizeftlbsets = 1 <<
+				((config4 & MIPS_CONF4_FTLBSETS) >>
+				 MIPS_CONF4_FTLBSETS_SHIFT);
+			c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
+					      MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
+			c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets;
+			break;
+		}
+	}
 
 	c->kscratch_mask = (config4 >> 16) & 0xff;
 
@@ -312,6 +380,9 @@
 
 	c->scache.flags = MIPS_CACHE_NOT_PRESENT;
 
+	/* Enable FTLB if present */
+	set_ftlb_enable(c, 1);
+
 	ok = decode_config0(c);			/* Read Config registers.  */
 	BUG_ON(!ok);				/* Arch spec violation!	 */
 	if (ok)
@@ -675,7 +746,6 @@
 
 static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
 {
-	decode_configs(c);
 	switch (c->processor_id & PRID_IMP_MASK) {
 	case PRID_IMP_4KC:
 		c->cputype = CPU_4KC;
@@ -739,8 +809,26 @@
 		c->cputype = CPU_74K;
 		__cpu_name[cpu] = "MIPS 1074Kc";
 		break;
+	case PRID_IMP_INTERAPTIV_UP:
+		c->cputype = CPU_INTERAPTIV;
+		__cpu_name[cpu] = "MIPS interAptiv";
+		break;
+	case PRID_IMP_INTERAPTIV_MP:
+		c->cputype = CPU_INTERAPTIV;
+		__cpu_name[cpu] = "MIPS interAptiv (multi)";
+		break;
+	case PRID_IMP_PROAPTIV_UP:
+		c->cputype = CPU_PROAPTIV;
+		__cpu_name[cpu] = "MIPS proAptiv";
+		break;
+	case PRID_IMP_PROAPTIV_MP:
+		c->cputype = CPU_PROAPTIV;
+		__cpu_name[cpu] = "MIPS proAptiv (multi)";
+		break;
 	}
 
+	decode_configs(c);
+
 	spram_config();
 }
 
@@ -943,6 +1031,7 @@
 
 	switch (c->processor_id & PRID_IMP_MASK) {
 	case PRID_IMP_NETLOGIC_XLP2XX:
+	case PRID_IMP_NETLOGIC_XLP9XX:
 		c->cputype = CPU_XLP;
 		__cpu_name[cpu] = "Broadcom XLPII";
 		break;
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index 93aa302..d212646 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -5,7 +5,6 @@
 #include <linux/bootmem.h>
 #include <linux/crash_dump.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/types.h>
 #include <linux/sched.h>
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 47d7583..d84f6a5 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -476,6 +476,7 @@
 	BUILD_HANDLER ov ov sti silent			/* #12 */
 	BUILD_HANDLER tr tr sti silent			/* #13 */
 	BUILD_HANDLER fpe fpe fpe silent		/* #15 */
+	BUILD_HANDLER ftlb ftlb none silent		/* #16 */
 	BUILD_HANDLER mdmx mdmx sti silent		/* #22 */
 #ifdef	CONFIG_HARDWARE_WATCHPOINTS
 	/*
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index f7991d9..3553243 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -184,6 +184,8 @@
 	case CPU_24K:
 	case CPU_34K:
 	case CPU_1004K:
+	case CPU_INTERAPTIV:
+	case CPU_PROAPTIV:
 		cpu_wait = r4k_wait;
 		if (read_c0_config7() & MIPS_CONF7_WII)
 			cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 8c58d8a..00d2097 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -65,26 +65,25 @@
 				cpu_data[n].watch_reg_masks[i]);
 		seq_printf(m, "]\n");
 	}
-	if (cpu_has_mips_r) {
-		seq_printf(m, "isa\t\t\t: mips1");
-		if (cpu_has_mips_2)
-			seq_printf(m, "%s", " mips2");
-		if (cpu_has_mips_3)
-			seq_printf(m, "%s", " mips3");
-		if (cpu_has_mips_4)
-			seq_printf(m, "%s", " mips4");
-		if (cpu_has_mips_5)
-			seq_printf(m, "%s", " mips5");
-		if (cpu_has_mips32r1)
-			seq_printf(m, "%s", " mips32r1");
-		if (cpu_has_mips32r2)
-			seq_printf(m, "%s", " mips32r2");
-		if (cpu_has_mips64r1)
-			seq_printf(m, "%s", " mips64r1");
-		if (cpu_has_mips64r2)
-			seq_printf(m, "%s", " mips64r2");
-		seq_printf(m, "\n");
-	}
+
+	seq_printf(m, "isa\t\t\t: mips1");
+	if (cpu_has_mips_2)
+		seq_printf(m, "%s", " mips2");
+	if (cpu_has_mips_3)
+		seq_printf(m, "%s", " mips3");
+	if (cpu_has_mips_4)
+		seq_printf(m, "%s", " mips4");
+	if (cpu_has_mips_5)
+		seq_printf(m, "%s", " mips5");
+	if (cpu_has_mips32r1)
+		seq_printf(m, "%s", " mips32r1");
+	if (cpu_has_mips32r2)
+		seq_printf(m, "%s", " mips32r2");
+	if (cpu_has_mips64r1)
+		seq_printf(m, "%s", " mips64r1");
+	if (cpu_has_mips64r2)
+		seq_printf(m, "%s", " mips64r2");
+	seq_printf(m, "\n");
 
 	seq_printf(m, "ASEs implemented\t:");
 	if (cpu_has_mips16)	seq_printf(m, "%s", " mips16");
@@ -107,7 +106,14 @@
 	seq_printf(m, "kscratch registers\t: %d\n",
 		      hweight8(cpu_data[n].kscratch_mask));
 	seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
-
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+	if (cpu_has_mipsmt) {
+		seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
+#if defined(CONFIG_MIPS_MT_SMTC)
+		seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
+#endif
+	}
+#endif
 	sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
 		      cpu_has_vce ? "%u" : "not available");
 	seq_printf(m, fmt, 'D', vced_count);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ddc7610..6ae540e 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -60,15 +60,11 @@
 
 	/* New thread loses kernel privileges. */
 	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
-#ifdef CONFIG_64BIT
-	status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR;
-#endif
 	status |= KU_USER;
 	regs->cp0_status = status;
 	clear_used_math();
 	clear_fpu_owner();
-	if (cpu_has_dsp)
-		__init_dsp();
+	init_dsp();
 	regs->cp0_epc = pc;
 	regs->regs[29] = sp;
 }
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index b52e1d2..7da9b76 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -137,13 +137,13 @@
 		if (cpu_has_mipsmt) {
 			unsigned int vpflags = dvpe();
 			flags = read_c0_status();
-			__enable_fpu();
+			__enable_fpu(FPU_AS_IS);
 			__asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
 			write_c0_status(flags);
 			evpe(vpflags);
 		} else {
 			flags = read_c0_status();
-			__enable_fpu();
+			__enable_fpu(FPU_AS_IS);
 			__asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
 			write_c0_status(flags);
 		}
@@ -408,6 +408,7 @@
 	/* Read the word at location addr in the USER area. */
 	case PTRACE_PEEKUSR: {
 		struct pt_regs *regs;
+		fpureg_t *fregs;
 		unsigned long tmp = 0;
 
 		regs = task_pt_regs(child);
@@ -418,26 +419,28 @@
 			tmp = regs->regs[addr];
 			break;
 		case FPR_BASE ... FPR_BASE + 31:
-			if (tsk_used_math(child)) {
-				fpureg_t *fregs = get_fpu_regs(child);
+			if (!tsk_used_math(child)) {
+				/* FP not yet used */
+				tmp = -1;
+				break;
+			}
+			fregs = get_fpu_regs(child);
 
 #ifdef CONFIG_32BIT
+			if (test_thread_flag(TIF_32BIT_FPREGS)) {
 				/*
 				 * The odd registers are actually the high
 				 * order bits of the values stored in the even
 				 * registers - unless we're using r2k_switch.S.
 				 */
 				if (addr & 1)
-					tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
+					tmp = fregs[(addr & ~1) - 32] >> 32;
 				else
-					tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
-#endif
-#ifdef CONFIG_64BIT
-				tmp = fregs[addr - FPR_BASE];
-#endif
-			} else {
-				tmp = -1;	/* FP not yet used  */
+					tmp = fregs[addr - 32];
+				break;
 			}
+#endif
+			tmp = fregs[addr - FPR_BASE];
 			break;
 		case PC:
 			tmp = regs->cp0_epc;
@@ -483,13 +486,13 @@
 			if (cpu_has_mipsmt) {
 				unsigned int vpflags = dvpe();
 				flags = read_c0_status();
-				__enable_fpu();
+				__enable_fpu(FPU_AS_IS);
 				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
 				write_c0_status(flags);
 				evpe(vpflags);
 			} else {
 				flags = read_c0_status();
-				__enable_fpu();
+				__enable_fpu(FPU_AS_IS);
 				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
 				write_c0_status(flags);
 			}
@@ -554,22 +557,25 @@
 				child->thread.fpu.fcr31 = 0;
 			}
 #ifdef CONFIG_32BIT
-			/*
-			 * The odd registers are actually the high order bits
-			 * of the values stored in the even registers - unless
-			 * we're using r2k_switch.S.
-			 */
-			if (addr & 1) {
-				fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
-				fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
-			} else {
-				fregs[addr - FPR_BASE] &= ~0xffffffffLL;
-				fregs[addr - FPR_BASE] |= data;
+			if (test_thread_flag(TIF_32BIT_FPREGS)) {
+				/*
+				 * The odd registers are actually the high
+				 * order bits of the values stored in the even
+				 * registers - unless we're using r2k_switch.S.
+				 */
+				if (addr & 1) {
+					fregs[(addr & ~1) - FPR_BASE] &=
+						0xffffffff;
+					fregs[(addr & ~1) - FPR_BASE] |=
+						((u64)data) << 32;
+				} else {
+					fregs[addr - FPR_BASE] &= ~0xffffffffLL;
+					fregs[addr - FPR_BASE] |= data;
+				}
+				break;
 			}
 #endif
-#ifdef CONFIG_64BIT
 			fregs[addr - FPR_BASE] = data;
-#endif
 			break;
 		}
 		case PC:
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 9486055..b8aa2dd 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -80,6 +80,7 @@
 	/* Read the word at location addr in the USER area. */
 	case PTRACE_PEEKUSR: {
 		struct pt_regs *regs;
+		fpureg_t *fregs;
 		unsigned int tmp;
 
 		regs = task_pt_regs(child);
@@ -90,21 +91,25 @@
 			tmp = regs->regs[addr];
 			break;
 		case FPR_BASE ... FPR_BASE + 31:
-			if (tsk_used_math(child)) {
-				fpureg_t *fregs = get_fpu_regs(child);
-
+			if (!tsk_used_math(child)) {
+				/* FP not yet used */
+				tmp = -1;
+				break;
+			}
+			fregs = get_fpu_regs(child);
+			if (test_thread_flag(TIF_32BIT_FPREGS)) {
 				/*
 				 * The odd registers are actually the high
 				 * order bits of the values stored in the even
 				 * registers - unless we're using r2k_switch.S.
 				 */
 				if (addr & 1)
-					tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
+					tmp = fregs[(addr & ~1) - 32] >> 32;
 				else
-					tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
-			} else {
-				tmp = -1;	/* FP not yet used  */
+					tmp = fregs[addr - 32];
+				break;
 			}
+			tmp = fregs[addr - FPR_BASE];
 			break;
 		case PC:
 			tmp = regs->cp0_epc;
@@ -147,13 +152,13 @@
 			if (cpu_has_mipsmt) {
 				unsigned int vpflags = dvpe();
 				flags = read_c0_status();
-				__enable_fpu();
+				__enable_fpu(FPU_AS_IS);
 				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
 				write_c0_status(flags);
 				evpe(vpflags);
 			} else {
 				flags = read_c0_status();
-				__enable_fpu();
+				__enable_fpu(FPU_AS_IS);
 				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
 				write_c0_status(flags);
 			}
@@ -236,20 +241,24 @@
 				       sizeof(child->thread.fpu));
 				child->thread.fpu.fcr31 = 0;
 			}
-			/*
-			 * The odd registers are actually the high order bits
-			 * of the values stored in the even registers - unless
-			 * we're using r2k_switch.S.
-			 */
-			if (addr & 1) {
-				fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
-				fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
-			} else {
-				fregs[addr - FPR_BASE] &= ~0xffffffffLL;
-				/* Must cast, lest sign extension fill upper
-				   bits!  */
-				fregs[addr - FPR_BASE] |= (unsigned int)data;
+			if (test_thread_flag(TIF_32BIT_FPREGS)) {
+				/*
+				 * The odd registers are actually the high
+				 * order bits of the values stored in the even
+				 * registers - unless we're using r2k_switch.S.
+				 */
+				if (addr & 1) {
+					fregs[(addr & ~1) - FPR_BASE] &=
+						0xffffffff;
+					fregs[(addr & ~1) - FPR_BASE] |=
+						((u64)data) << 32;
+				} else {
+					fregs[addr - FPR_BASE] &= ~0xffffffffLL;
+					fregs[addr - FPR_BASE] |= data;
+				}
+				break;
 			}
+			fregs[addr - FPR_BASE] = data;
 			break;
 		}
 		case PC:
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 55ffe14..253b2fb 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -35,7 +35,15 @@
 LEAF(_save_fp_context)
 	cfc1	t1, fcr31
 
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+	.set	push
+#ifdef CONFIG_MIPS32_R2
+	.set	mips64r2
+	mfc0	t0, CP0_STATUS
+	sll	t0, t0, 5
+	bgez	t0, 1f			# skip storing odd if FR=0
+	 nop
+#endif
 	/* Store the 16 odd double precision registers */
 	EX	sdc1 $f1, SC_FPREGS+8(a0)
 	EX	sdc1 $f3, SC_FPREGS+24(a0)
@@ -53,6 +61,7 @@
 	EX	sdc1 $f27, SC_FPREGS+216(a0)
 	EX	sdc1 $f29, SC_FPREGS+232(a0)
 	EX	sdc1 $f31, SC_FPREGS+248(a0)
+1:	.set	pop
 #endif
 
 	/* Store the 16 even double precision registers */
@@ -82,7 +91,31 @@
 LEAF(_save_fp_context32)
 	cfc1	t1, fcr31
 
-	EX	sdc1 $f0, SC32_FPREGS+0(a0)
+	mfc0	t0, CP0_STATUS
+	sll	t0, t0, 5
+	bgez	t0, 1f			# skip storing odd if FR=0
+	 nop
+
+	/* Store the 16 odd double precision registers */
+	EX      sdc1 $f1, SC32_FPREGS+8(a0)
+	EX      sdc1 $f3, SC32_FPREGS+24(a0)
+	EX      sdc1 $f5, SC32_FPREGS+40(a0)
+	EX      sdc1 $f7, SC32_FPREGS+56(a0)
+	EX      sdc1 $f9, SC32_FPREGS+72(a0)
+	EX      sdc1 $f11, SC32_FPREGS+88(a0)
+	EX      sdc1 $f13, SC32_FPREGS+104(a0)
+	EX      sdc1 $f15, SC32_FPREGS+120(a0)
+	EX      sdc1 $f17, SC32_FPREGS+136(a0)
+	EX      sdc1 $f19, SC32_FPREGS+152(a0)
+	EX      sdc1 $f21, SC32_FPREGS+168(a0)
+	EX      sdc1 $f23, SC32_FPREGS+184(a0)
+	EX      sdc1 $f25, SC32_FPREGS+200(a0)
+	EX      sdc1 $f27, SC32_FPREGS+216(a0)
+	EX      sdc1 $f29, SC32_FPREGS+232(a0)
+	EX      sdc1 $f31, SC32_FPREGS+248(a0)
+
+	/* Store the 16 even double precision registers */
+1:	EX	sdc1 $f0, SC32_FPREGS+0(a0)
 	EX	sdc1 $f2, SC32_FPREGS+16(a0)
 	EX	sdc1 $f4, SC32_FPREGS+32(a0)
 	EX	sdc1 $f6, SC32_FPREGS+48(a0)
@@ -114,7 +147,16 @@
  */
 LEAF(_restore_fp_context)
 	EX	lw t0, SC_FPC_CSR(a0)
-#ifdef CONFIG_64BIT
+
+#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+	.set	push
+#ifdef CONFIG_MIPS32_R2
+	.set	mips64r2
+	mfc0	t0, CP0_STATUS
+	sll	t0, t0, 5
+	bgez	t0, 1f			# skip loading odd if FR=0
+	 nop
+#endif
 	EX	ldc1 $f1, SC_FPREGS+8(a0)
 	EX	ldc1 $f3, SC_FPREGS+24(a0)
 	EX	ldc1 $f5, SC_FPREGS+40(a0)
@@ -131,6 +173,7 @@
 	EX	ldc1 $f27, SC_FPREGS+216(a0)
 	EX	ldc1 $f29, SC_FPREGS+232(a0)
 	EX	ldc1 $f31, SC_FPREGS+248(a0)
+1:	.set pop
 #endif
 	EX	ldc1 $f0, SC_FPREGS+0(a0)
 	EX	ldc1 $f2, SC_FPREGS+16(a0)
@@ -157,7 +200,30 @@
 LEAF(_restore_fp_context32)
 	/* Restore an o32 sigcontext.  */
 	EX	lw t0, SC32_FPC_CSR(a0)
-	EX	ldc1 $f0, SC32_FPREGS+0(a0)
+
+	mfc0	t0, CP0_STATUS
+	sll	t0, t0, 5
+	bgez	t0, 1f			# skip loading odd if FR=0
+	 nop
+
+	EX      ldc1 $f1, SC32_FPREGS+8(a0)
+	EX      ldc1 $f3, SC32_FPREGS+24(a0)
+	EX      ldc1 $f5, SC32_FPREGS+40(a0)
+	EX      ldc1 $f7, SC32_FPREGS+56(a0)
+	EX      ldc1 $f9, SC32_FPREGS+72(a0)
+	EX      ldc1 $f11, SC32_FPREGS+88(a0)
+	EX      ldc1 $f13, SC32_FPREGS+104(a0)
+	EX      ldc1 $f15, SC32_FPREGS+120(a0)
+	EX      ldc1 $f17, SC32_FPREGS+136(a0)
+	EX      ldc1 $f19, SC32_FPREGS+152(a0)
+	EX      ldc1 $f21, SC32_FPREGS+168(a0)
+	EX      ldc1 $f23, SC32_FPREGS+184(a0)
+	EX      ldc1 $f25, SC32_FPREGS+200(a0)
+	EX      ldc1 $f27, SC32_FPREGS+216(a0)
+	EX      ldc1 $f29, SC32_FPREGS+232(a0)
+	EX      ldc1 $f31, SC32_FPREGS+248(a0)
+
+1:	EX	ldc1 $f0, SC32_FPREGS+0(a0)
 	EX	ldc1 $f2, SC32_FPREGS+16(a0)
 	EX	ldc1 $f4, SC32_FPREGS+32(a0)
 	EX	ldc1 $f6, SC32_FPREGS+48(a0)
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 078de5ea..cc78dd9 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -123,7 +123,7 @@
  * Save a thread's fp context.
  */
 LEAF(_save_fp)
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
 	mfc0	t0, CP0_STATUS
 #endif
 	fpu_save_double a0 t0 t1		# clobbers t1
@@ -134,7 +134,7 @@
  * Restore a thread's fp context.
  */
 LEAF(_restore_fp)
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
 	mfc0	t0, CP0_STATUS
 #endif
 	fpu_restore_double a0 t0 t1		# clobbers t1
@@ -228,6 +228,47 @@
 	mtc1	t1, $f29
 	mtc1	t1, $f30
 	mtc1	t1, $f31
+
+#ifdef CONFIG_CPU_MIPS32_R2
+	.set    push
+	.set    mips64r2
+	sll     t0, t0, 5			# is Status.FR set?
+	bgez    t0, 1f				# no: skip setting upper 32b
+
+	mthc1   t1, $f0
+	mthc1   t1, $f1
+	mthc1   t1, $f2
+	mthc1   t1, $f3
+	mthc1   t1, $f4
+	mthc1   t1, $f5
+	mthc1   t1, $f6
+	mthc1   t1, $f7
+	mthc1   t1, $f8
+	mthc1   t1, $f9
+	mthc1   t1, $f10
+	mthc1   t1, $f11
+	mthc1   t1, $f12
+	mthc1   t1, $f13
+	mthc1   t1, $f14
+	mthc1   t1, $f15
+	mthc1   t1, $f16
+	mthc1   t1, $f17
+	mthc1   t1, $f18
+	mthc1   t1, $f19
+	mthc1   t1, $f20
+	mthc1   t1, $f21
+	mthc1   t1, $f22
+	mthc1   t1, $f23
+	mthc1   t1, $f24
+	mthc1   t1, $f25
+	mthc1   t1, $f26
+	mthc1   t1, $f27
+	mthc1   t1, $f28
+	mthc1   t1, $f29
+	mthc1   t1, $f30
+	mthc1   t1, $f31
+1:	.set    pop
+#endif /* CONFIG_CPU_MIPS32_R2 */
 #else
 	.set	mips3
 	dmtc1	t1, $f0
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
new file mode 100644
index 0000000..56dc696
--- /dev/null
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -0,0 +1,116 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+#include <asm/rtlx.h>
+
+static int major;
+
+static void rtlx_interrupt(void)
+{
+	int i;
+	struct rtlx_info *info;
+	struct rtlx_info **p = vpe_get_shared(aprp_cpu_index());
+
+	if (p == NULL || *p == NULL)
+		return;
+
+	info = *p;
+
+	if (info->ap_int_pending == 1 && smp_processor_id() == 0) {
+		for (i = 0; i < RTLX_CHANNELS; i++) {
+			wake_up(&channel_wqs[i].lx_queue);
+			wake_up(&channel_wqs[i].rt_queue);
+		}
+		info->ap_int_pending = 0;
+	}
+}
+
+void _interrupt_sp(void)
+{
+	smp_send_reschedule(aprp_cpu_index());
+}
+
+int __init rtlx_module_init(void)
+{
+	struct device *dev;
+	int i, err;
+
+	if (!cpu_has_mipsmt) {
+		pr_warn("VPE loader: not a MIPS MT capable processor\n");
+		return -ENODEV;
+	}
+
+	if (num_possible_cpus() - aprp_cpu_index() < 1) {
+		pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
+			"Pass maxcpus=<n> argument as kernel argument\n");
+
+		return -ENODEV;
+	}
+
+	major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
+	if (major < 0) {
+		pr_err("rtlx_module_init: unable to register device\n");
+		return major;
+	}
+
+	/* initialise the wait queues */
+	for (i = 0; i < RTLX_CHANNELS; i++) {
+		init_waitqueue_head(&channel_wqs[i].rt_queue);
+		init_waitqueue_head(&channel_wqs[i].lx_queue);
+		atomic_set(&channel_wqs[i].in_open, 0);
+		mutex_init(&channel_wqs[i].mutex);
+
+		dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
+				    "%s%d", RTLX_MODULE_NAME, i);
+		if (IS_ERR(dev)) {
+			err = PTR_ERR(dev);
+			goto out_chrdev;
+		}
+	}
+
+	/* set up notifiers */
+	rtlx_notify.start = rtlx_starting;
+	rtlx_notify.stop = rtlx_stopping;
+	vpe_notify(aprp_cpu_index(), &rtlx_notify);
+
+	if (cpu_has_vint) {
+		aprp_hook = rtlx_interrupt;
+	} else {
+		pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
+		err = -ENODEV;
+		goto out_class;
+	}
+
+	return 0;
+
+out_class:
+	for (i = 0; i < RTLX_CHANNELS; i++)
+		device_destroy(mt_class, MKDEV(major, i));
+out_chrdev:
+	unregister_chrdev(major, RTLX_MODULE_NAME);
+
+	return err;
+}
+
+void __exit rtlx_module_exit(void)
+{
+	int i;
+
+	for (i = 0; i < RTLX_CHANNELS; i++)
+		device_destroy(mt_class, MKDEV(major, i));
+	unregister_chrdev(major, RTLX_MODULE_NAME);
+}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
new file mode 100644
index 0000000..91d61ba
--- /dev/null
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -0,0 +1,148 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+#include <asm/rtlx.h>
+
+static int major;
+
+static void rtlx_dispatch(void)
+{
+	if (read_c0_cause() & read_c0_status() & C_SW0)
+		do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
+}
+
+/*
+ * Interrupt handler may be called before rtlx_init has otherwise had
+ * a chance to run.
+ */
+static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
+{
+	unsigned int vpeflags;
+	unsigned long flags;
+	int i;
+
+	/* Ought not to be strictly necessary for SMTC builds */
+	local_irq_save(flags);
+	vpeflags = dvpe();
+	set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
+	irq_enable_hazard();
+	evpe(vpeflags);
+	local_irq_restore(flags);
+
+	for (i = 0; i < RTLX_CHANNELS; i++) {
+		wake_up(&channel_wqs[i].lx_queue);
+		wake_up(&channel_wqs[i].rt_queue);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction rtlx_irq = {
+	.handler	= rtlx_interrupt,
+	.name		= "RTLX",
+};
+
+static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
+
+void _interrupt_sp(void)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	dvpe();
+	settc(1);
+	write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0);
+	evpe(EVPE_ENABLE);
+	local_irq_restore(flags);
+}
+
+int __init rtlx_module_init(void)
+{
+	struct device *dev;
+	int i, err;
+
+	if (!cpu_has_mipsmt) {
+		pr_warn("VPE loader: not a MIPS MT capable processor\n");
+		return -ENODEV;
+	}
+
+	if (aprp_cpu_index() == 0) {
+		pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
+			"Pass maxtcs=<n> argument as kernel argument\n");
+
+		return -ENODEV;
+	}
+
+	major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
+	if (major < 0) {
+		pr_err("rtlx_module_init: unable to register device\n");
+		return major;
+	}
+
+	/* initialise the wait queues */
+	for (i = 0; i < RTLX_CHANNELS; i++) {
+		init_waitqueue_head(&channel_wqs[i].rt_queue);
+		init_waitqueue_head(&channel_wqs[i].lx_queue);
+		atomic_set(&channel_wqs[i].in_open, 0);
+		mutex_init(&channel_wqs[i].mutex);
+
+		dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
+				    "%s%d", RTLX_MODULE_NAME, i);
+		if (IS_ERR(dev)) {
+			err = PTR_ERR(dev);
+			goto out_chrdev;
+		}
+	}
+
+	/* set up notifiers */
+	rtlx_notify.start = rtlx_starting;
+	rtlx_notify.stop = rtlx_stopping;
+	vpe_notify(aprp_cpu_index(), &rtlx_notify);
+
+	if (cpu_has_vint) {
+		aprp_hook = rtlx_dispatch;
+	} else {
+		pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
+		err = -ENODEV;
+		goto out_class;
+	}
+
+	rtlx_irq.dev_id = rtlx;
+	err = setup_irq(rtlx_irq_num, &rtlx_irq);
+	if (err)
+		goto out_class;
+
+	return 0;
+
+out_class:
+	for (i = 0; i < RTLX_CHANNELS; i++)
+		device_destroy(mt_class, MKDEV(major, i));
+out_chrdev:
+	unregister_chrdev(major, RTLX_MODULE_NAME);
+
+	return err;
+}
+
+void __exit rtlx_module_exit(void)
+{
+	int i;
+
+	for (i = 0; i < RTLX_CHANNELS; i++)
+		device_destroy(mt_class, MKDEV(major, i));
+	unregister_chrdev(major, RTLX_MODULE_NAME);
+}
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 2c12ea1..31b1b76 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -1,114 +1,51 @@
 /*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
  * Copyright (C) 2005 MIPS Technologies, Inc.  All rights reserved.
  * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org)
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
  */
-
-#include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/fs.h>
-#include <linux/init.h>
-#include <asm/uaccess.h>
-#include <linux/list.h>
-#include <linux/vmalloc.h>
-#include <linux/elf.h>
-#include <linux/seq_file.h>
 #include <linux/syscalls.h>
 #include <linux/moduleloader.h>
-#include <linux/interrupt.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/atomic.h>
 #include <asm/mipsmtregs.h>
 #include <asm/mips_mt.h>
-#include <asm/cacheflush.h>
-#include <linux/atomic.h>
-#include <asm/cpu.h>
 #include <asm/processor.h>
-#include <asm/vpe.h>
 #include <asm/rtlx.h>
 #include <asm/setup.h>
+#include <asm/vpe.h>
 
-static struct rtlx_info *rtlx;
-static int major;
-static char module_name[] = "rtlx";
-
-static struct chan_waitqueues {
-	wait_queue_head_t rt_queue;
-	wait_queue_head_t lx_queue;
-	atomic_t in_open;
-	struct mutex mutex;
-} channel_wqs[RTLX_CHANNELS];
-
-static struct vpe_notifications notify;
 static int sp_stopping;
-
-extern void *vpe_get_shared(int index);
-
-static void rtlx_dispatch(void)
-{
-	do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
-}
-
-
-/* Interrupt handler may be called before rtlx_init has otherwise had
-   a chance to run.
-*/
-static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
-{
-	unsigned int vpeflags;
-	unsigned long flags;
-	int i;
-
-	/* Ought not to be strictly necessary for SMTC builds */
-	local_irq_save(flags);
-	vpeflags = dvpe();
-	set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
-	irq_enable_hazard();
-	evpe(vpeflags);
-	local_irq_restore(flags);
-
-	for (i = 0; i < RTLX_CHANNELS; i++) {
-			wake_up(&channel_wqs[i].lx_queue);
-			wake_up(&channel_wqs[i].rt_queue);
-	}
-
-	return IRQ_HANDLED;
-}
+struct rtlx_info *rtlx;
+struct chan_waitqueues channel_wqs[RTLX_CHANNELS];
+struct vpe_notifications rtlx_notify;
+void (*aprp_hook)(void) = NULL;
+EXPORT_SYMBOL(aprp_hook);
 
 static void __used dump_rtlx(void)
 {
 	int i;
 
-	printk("id 0x%lx state %d\n", rtlx->id, rtlx->state);
+	pr_info("id 0x%lx state %d\n", rtlx->id, rtlx->state);
 
 	for (i = 0; i < RTLX_CHANNELS; i++) {
 		struct rtlx_channel *chan = &rtlx->channel[i];
 
-		printk(" rt_state %d lx_state %d buffer_size %d\n",
-		       chan->rt_state, chan->lx_state, chan->buffer_size);
+		pr_info(" rt_state %d lx_state %d buffer_size %d\n",
+			chan->rt_state, chan->lx_state, chan->buffer_size);
 
-		printk(" rt_read %d rt_write %d\n",
-		       chan->rt_read, chan->rt_write);
+		pr_info(" rt_read %d rt_write %d\n",
+			chan->rt_read, chan->rt_write);
 
-		printk(" lx_read %d lx_write %d\n",
-		       chan->lx_read, chan->lx_write);
+		pr_info(" lx_read %d lx_write %d\n",
+			chan->lx_read, chan->lx_write);
 
-		printk(" rt_buffer <%s>\n", chan->rt_buffer);
-		printk(" lx_buffer <%s>\n", chan->lx_buffer);
+		pr_info(" rt_buffer <%s>\n", chan->rt_buffer);
+		pr_info(" lx_buffer <%s>\n", chan->lx_buffer);
 	}
 }
 
@@ -116,8 +53,7 @@
 static int rtlx_init(struct rtlx_info *rtlxi)
 {
 	if (rtlxi->id != RTLX_ID) {
-		printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n",
-			rtlxi, rtlxi->id);
+		pr_err("no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id);
 		return -ENOEXEC;
 	}
 
@@ -127,20 +63,20 @@
 }
 
 /* notifications */
-static void starting(int vpe)
+void rtlx_starting(int vpe)
 {
 	int i;
 	sp_stopping = 0;
 
 	/* force a reload of rtlx */
-	rtlx=NULL;
+	rtlx = NULL;
 
 	/* wake up any sleeping rtlx_open's */
 	for (i = 0; i < RTLX_CHANNELS; i++)
 		wake_up_interruptible(&channel_wqs[i].lx_queue);
 }
 
-static void stopping(int vpe)
+void rtlx_stopping(int vpe)
 {
 	int i;
 
@@ -158,31 +94,30 @@
 	int ret = 0;
 
 	if (index >= RTLX_CHANNELS) {
-		printk(KERN_DEBUG "rtlx_open index out of range\n");
+		pr_debug(KERN_DEBUG "rtlx_open index out of range\n");
 		return -ENOSYS;
 	}
 
 	if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
-		printk(KERN_DEBUG "rtlx_open channel %d already opened\n",
-		       index);
+		pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
 		ret = -EBUSY;
 		goto out_fail;
 	}
 
 	if (rtlx == NULL) {
-		if( (p = vpe_get_shared(tclimit)) == NULL) {
-		    if (can_sleep) {
-			ret = __wait_event_interruptible(
+		p = vpe_get_shared(aprp_cpu_index());
+		if (p == NULL) {
+			if (can_sleep) {
+				ret = __wait_event_interruptible(
 					channel_wqs[index].lx_queue,
-					(p = vpe_get_shared(tclimit)));
-			if (ret)
+					(p = vpe_get_shared(aprp_cpu_index())));
+				if (ret)
+					goto out_fail;
+			} else {
+				pr_debug("No SP program loaded, and device opened with O_NONBLOCK\n");
+				ret = -ENOSYS;
 				goto out_fail;
-		    } else {
-			printk(KERN_DEBUG "No SP program loaded, and device "
-					"opened with O_NONBLOCK\n");
-			ret = -ENOSYS;
-			goto out_fail;
-		    }
+			}
 		}
 
 		smp_rmb();
@@ -204,24 +139,24 @@
 					ret = -ERESTARTSYS;
 					goto out_fail;
 				}
-				finish_wait(&channel_wqs[index].lx_queue, &wait);
+				finish_wait(&channel_wqs[index].lx_queue,
+					    &wait);
 			} else {
-				pr_err(" *vpe_get_shared is NULL. "
-				       "Has an SP program been loaded?\n");
+				pr_err(" *vpe_get_shared is NULL. Has an SP program been loaded?\n");
 				ret = -ENOSYS;
 				goto out_fail;
 			}
 		}
 
 		if ((unsigned int)*p < KSEG0) {
-			printk(KERN_WARNING "vpe_get_shared returned an "
-			       "invalid pointer maybe an error code %d\n",
-			       (int)*p);
+			pr_warn("vpe_get_shared returned an invalid pointer maybe an error code %d\n",
+				(int)*p);
 			ret = -ENOSYS;
 			goto out_fail;
 		}
 
-		if ((ret = rtlx_init(*p)) < 0)
+		ret = rtlx_init(*p);
+		if (ret < 0)
 			goto out_ret;
 	}
 
@@ -352,7 +287,7 @@
 	size_t fl;
 
 	if (rtlx == NULL)
-		return(-ENOSYS);
+		return -ENOSYS;
 
 	rt = &rtlx->channel[index];
 
@@ -361,8 +296,8 @@
 	rt_read = rt->rt_read;
 
 	/* total number of bytes to copy */
-	count = min(count, (size_t)write_spacefree(rt_read, rt->rt_write,
-							rt->buffer_size));
+	count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write,
+						     rt->buffer_size));
 
 	/* first bit from write pointer to the end of the buffer, or count */
 	fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
@@ -372,9 +307,8 @@
 		goto out;
 
 	/* if there's any left copy to the beginning of the buffer */
-	if (count - fl) {
+	if (count - fl)
 		failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
-	}
 
 out:
 	count -= failed;
@@ -384,6 +318,8 @@
 	smp_wmb();
 	mutex_unlock(&channel_wqs[index].mutex);
 
+	_interrupt_sp();
+
 	return count;
 }
 
@@ -398,7 +334,7 @@
 	return rtlx_release(iminor(inode));
 }
 
-static unsigned int file_poll(struct file *file, poll_table * wait)
+static unsigned int file_poll(struct file *file, poll_table *wait)
 {
 	int minor = iminor(file_inode(file));
 	unsigned int mask = 0;
@@ -420,21 +356,20 @@
 	return mask;
 }
 
-static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
-			 loff_t * ppos)
+static ssize_t file_read(struct file *file, char __user *buffer, size_t count,
+			 loff_t *ppos)
 {
 	int minor = iminor(file_inode(file));
 
 	/* data available? */
-	if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) {
-		return 0;	// -EAGAIN makes cat whinge
-	}
+	if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1))
+		return 0;	/* -EAGAIN makes 'cat' whine */
 
 	return rtlx_read(minor, buffer, count);
 }
 
-static ssize_t file_write(struct file *file, const char __user * buffer,
-			  size_t count, loff_t * ppos)
+static ssize_t file_write(struct file *file, const char __user *buffer,
+			  size_t count, loff_t *ppos)
 {
 	int minor = iminor(file_inode(file));
 
@@ -454,100 +389,16 @@
 	return rtlx_write(minor, buffer, count);
 }
 
-static const struct file_operations rtlx_fops = {
+const struct file_operations rtlx_fops = {
 	.owner =   THIS_MODULE,
-	.open =	   file_open,
+	.open =    file_open,
 	.release = file_release,
 	.write =   file_write,
-	.read =	   file_read,
-	.poll =	   file_poll,
+	.read =    file_read,
+	.poll =    file_poll,
 	.llseek =  noop_llseek,
 };
 
-static struct irqaction rtlx_irq = {
-	.handler	= rtlx_interrupt,
-	.name		= "RTLX",
-};
-
-static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
-
-static char register_chrdev_failed[] __initdata =
-	KERN_ERR "rtlx_module_init: unable to register device\n";
-
-static int __init rtlx_module_init(void)
-{
-	struct device *dev;
-	int i, err;
-
-	if (!cpu_has_mipsmt) {
-		printk("VPE loader: not a MIPS MT capable processor\n");
-		return -ENODEV;
-	}
-
-	if (tclimit == 0) {
-		printk(KERN_WARNING "No TCs reserved for AP/SP, not "
-		       "initializing RTLX.\nPass maxtcs=<n> argument as kernel "
-		       "argument\n");
-
-		return -ENODEV;
-	}
-
-	major = register_chrdev(0, module_name, &rtlx_fops);
-	if (major < 0) {
-		printk(register_chrdev_failed);
-		return major;
-	}
-
-	/* initialise the wait queues */
-	for (i = 0; i < RTLX_CHANNELS; i++) {
-		init_waitqueue_head(&channel_wqs[i].rt_queue);
-		init_waitqueue_head(&channel_wqs[i].lx_queue);
-		atomic_set(&channel_wqs[i].in_open, 0);
-		mutex_init(&channel_wqs[i].mutex);
-
-		dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
-				    "%s%d", module_name, i);
-		if (IS_ERR(dev)) {
-			err = PTR_ERR(dev);
-			goto out_chrdev;
-		}
-	}
-
-	/* set up notifiers */
-	notify.start = starting;
-	notify.stop = stopping;
-	vpe_notify(tclimit, &notify);
-
-	if (cpu_has_vint)
-		set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
-	else {
-		pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
-		err = -ENODEV;
-		goto out_chrdev;
-	}
-
-	rtlx_irq.dev_id = rtlx;
-	setup_irq(rtlx_irq_num, &rtlx_irq);
-
-	return 0;
-
-out_chrdev:
-	for (i = 0; i < RTLX_CHANNELS; i++)
-		device_destroy(mt_class, MKDEV(major, i));
-
-	return err;
-}
-
-static void __exit rtlx_module_exit(void)
-{
-	int i;
-
-	for (i = 0; i < RTLX_CHANNELS; i++)
-		device_destroy(mt_class, MKDEV(major, i));
-
-	unregister_chrdev(major, module_name);
-}
-
 module_init(rtlx_module_init);
 module_exit(rtlx_module_exit);
 
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index e8e541b..a5b14f4 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -563,3 +563,5 @@
 	PTR	sys_process_vm_writev
 	PTR	sys_kcmp
 	PTR	sys_finit_module
+	PTR	sys_sched_setattr
+	PTR	sys_sched_getattr		/* 4350 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 57e3742..b56e254 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -425,4 +425,6 @@
 	PTR	sys_kcmp
 	PTR	sys_finit_module
 	PTR	sys_getdents64
+	PTR	sys_sched_setattr
+	PTR	sys_sched_getattr		/* 5310 */
 	.size	sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2f48f59..f7e5b72 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -418,4 +418,6 @@
 	PTR	compat_sys_process_vm_writev	/* 6310 */
 	PTR	sys_kcmp
 	PTR	sys_finit_module
+	PTR	sys_sched_setattr
+	PTR	sys_sched_getattr
 	.size	sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index f1acdb4..6788727d 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -541,4 +541,6 @@
 	PTR	compat_sys_process_vm_writev
 	PTR	sys_kcmp
 	PTR	sys_finit_module
+	PTR	sys_sched_setattr
+	PTR	sys_sched_getattr		/* 4350 */
 	.size	sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c
new file mode 100644
index 0000000..076ead2
--- /dev/null
+++ b/arch/mips/kernel/segment.c
@@ -0,0 +1,110 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+
+static void build_segment_config(char *str, unsigned int cfg)
+{
+	unsigned int am;
+	static const char * const am_str[] = {
+		"UK", "MK", "MSK", "MUSK", "MUSUK", "USK",
+		"RSRVD", "UUSK"};
+
+	/* Segment access mode. */
+	am = (cfg & MIPS_SEGCFG_AM) >> MIPS_SEGCFG_AM_SHIFT;
+	str += sprintf(str, "%-5s", am_str[am]);
+
+	/*
+	 * Access modes MK, MSK and MUSK are mapped segments. Therefore
+	 * there is no direct physical address mapping.
+	 */
+	if ((am == 0) || (am > 3)) {
+		str += sprintf(str, "         %03lx",
+			((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT));
+		str += sprintf(str, "         %01ld",
+			((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT));
+	} else {
+		str += sprintf(str, "         UND");
+		str += sprintf(str, "         U");
+	}
+
+	/* Exception configuration. */
+	str += sprintf(str, "       %01ld\n",
+		((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT));
+}
+
+static int show_segments(struct seq_file *m, void *v)
+{
+	unsigned int segcfg;
+	char str[42];
+
+	seq_puts(m, "Segment   Virtual    Size   Access Mode   Physical   Caching   EU\n");
+	seq_puts(m, "-------   -------    ----   -----------   --------   -------   --\n");
+
+	segcfg = read_c0_segctl0();
+	build_segment_config(str, segcfg);
+	seq_printf(m, "   0      e0000000   512M      %s", str);
+
+	segcfg >>= 16;
+	build_segment_config(str, segcfg);
+	seq_printf(m, "   1      c0000000   512M      %s", str);
+
+	segcfg = read_c0_segctl1();
+	build_segment_config(str, segcfg);
+	seq_printf(m, "   2      a0000000   512M      %s", str);
+
+	segcfg >>= 16;
+	build_segment_config(str, segcfg);
+	seq_printf(m, "   3      80000000   512M      %s", str);
+
+	segcfg = read_c0_segctl2();
+	build_segment_config(str, segcfg);
+	seq_printf(m, "   4      40000000    1G       %s", str);
+
+	segcfg >>= 16;
+	build_segment_config(str, segcfg);
+	seq_printf(m, "   5      00000000    1G       %s\n", str);
+
+	return 0;
+}
+
+static int segments_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_segments, NULL);
+}
+
+static const struct file_operations segments_fops = {
+	.open		= segments_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int __init segments_info(void)
+{
+	extern struct dentry *mips_debugfs_dir;
+	struct dentry *segments;
+
+	if (cpu_has_segments) {
+		if (!mips_debugfs_dir)
+			return -ENODEV;
+
+		segments = debugfs_create_file("segments", S_IRUGO,
+					       mips_debugfs_dir, NULL,
+					       &segments_fops);
+		if (!segments)
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+device_initcall(segments_info);
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 2f285ab..5199563 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -71,8 +71,9 @@
 	int err;
 	while (1) {
 		lock_fpu_owner();
-		own_fpu_inatomic(1);
-		err = save_fp_context(sc); /* this might fail */
+		err = own_fpu_inatomic(1);
+		if (!err)
+			err = save_fp_context(sc); /* this might fail */
 		unlock_fpu_owner();
 		if (likely(!err))
 			break;
@@ -91,8 +92,9 @@
 	int err, tmp __maybe_unused;
 	while (1) {
 		lock_fpu_owner();
-		own_fpu_inatomic(0);
-		err = restore_fp_context(sc); /* this might fail */
+		err = own_fpu_inatomic(0);
+		if (!err)
+			err = restore_fp_context(sc); /* this might fail */
 		unlock_fpu_owner();
 		if (likely(!err))
 			break;
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 1905a41..3d60f77 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -85,8 +85,9 @@
 	int err;
 	while (1) {
 		lock_fpu_owner();
-		own_fpu_inatomic(1);
-		err = save_fp_context32(sc); /* this might fail */
+		err = own_fpu_inatomic(1);
+		if (!err)
+			err = save_fp_context32(sc); /* this might fail */
 		unlock_fpu_owner();
 		if (likely(!err))
 			break;
@@ -105,8 +106,9 @@
 	int err, tmp __maybe_unused;
 	while (1) {
 		lock_fpu_owner();
-		own_fpu_inatomic(0);
-		err = restore_fp_context32(sc); /* this might fail */
+		err = own_fpu_inatomic(0);
+		if (!err)
+			err = restore_fp_context32(sc); /* this might fail */
 		unlock_fpu_owner();
 		if (likely(!err))
 			break;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 2362665..ea4c2dc 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -49,8 +49,10 @@
 unsigned long bmips_smp_boot_sp;
 unsigned long bmips_smp_boot_gp;
 
-static void bmips_send_ipi_single(int cpu, unsigned int action);
-static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id);
+static void bmips43xx_send_ipi_single(int cpu, unsigned int action);
+static void bmips5000_send_ipi_single(int cpu, unsigned int action);
+static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id);
+static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id);
 
 /* SW interrupts 0,1 are used for interprocessor signaling */
 #define IPI0_IRQ			(MIPS_CPU_IRQ_BASE + 0)
@@ -64,49 +66,58 @@
 static void __init bmips_smp_setup(void)
 {
 	int i, cpu = 1, boot_cpu = 0;
-
-#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
 	int cpu_hw_intr;
 
-	/* arbitration priority */
-	clear_c0_brcm_cmt_ctrl(0x30);
+	switch (current_cpu_type()) {
+	case CPU_BMIPS4350:
+	case CPU_BMIPS4380:
+		/* arbitration priority */
+		clear_c0_brcm_cmt_ctrl(0x30);
 
-	/* NBK and weak order flags */
-	set_c0_brcm_config_0(0x30000);
+		/* NBK and weak order flags */
+		set_c0_brcm_config_0(0x30000);
 
-	/* Find out if we are running on TP0 or TP1 */
-	boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
+		/* Find out if we are running on TP0 or TP1 */
+		boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
 
-	/*
-	 * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread
-	 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
-	 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
-	 */
-	if (boot_cpu == 0)
-		cpu_hw_intr = 0x02;
-	else
-		cpu_hw_intr = 0x1d;
+		/*
+		 * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other
+		 * thread
+		 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
+		 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
+		 */
+		if (boot_cpu == 0)
+			cpu_hw_intr = 0x02;
+		else
+			cpu_hw_intr = 0x1d;
 
-	change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15));
+		change_c0_brcm_cmt_intr(0xf8018000,
+					(cpu_hw_intr << 27) | (0x03 << 15));
 
-	/* single core, 2 threads (2 pipelines) */
-	max_cpus = 2;
-#elif defined(CONFIG_CPU_BMIPS5000)
-	/* enable raceless SW interrupts */
-	set_c0_brcm_config(0x03 << 22);
+		/* single core, 2 threads (2 pipelines) */
+		max_cpus = 2;
 
-	/* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */
-	change_c0_brcm_mode(0x1f << 27, 0x02 << 27);
+		break;
+	case CPU_BMIPS5000:
+		/* enable raceless SW interrupts */
+		set_c0_brcm_config(0x03 << 22);
 
-	/* N cores, 2 threads per core */
-	max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1;
+		/* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */
+		change_c0_brcm_mode(0x1f << 27, 0x02 << 27);
 
-	/* clear any pending SW interrupts */
-	for (i = 0; i < max_cpus; i++) {
-		write_c0_brcm_action(ACTION_CLR_IPI(i, 0));
-		write_c0_brcm_action(ACTION_CLR_IPI(i, 1));
+		/* N cores, 2 threads per core */
+		max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1;
+
+		/* clear any pending SW interrupts */
+		for (i = 0; i < max_cpus; i++) {
+			write_c0_brcm_action(ACTION_CLR_IPI(i, 0));
+			write_c0_brcm_action(ACTION_CLR_IPI(i, 1));
+		}
+
+		break;
+	default:
+		max_cpus = 1;
 	}
-#endif
 
 	if (!bmips_smp_enabled)
 		max_cpus = 1;
@@ -134,6 +145,20 @@
  */
 static void bmips_prepare_cpus(unsigned int max_cpus)
 {
+	irqreturn_t (*bmips_ipi_interrupt)(int irq, void *dev_id);
+
+	switch (current_cpu_type()) {
+	case CPU_BMIPS4350:
+	case CPU_BMIPS4380:
+		bmips_ipi_interrupt = bmips43xx_ipi_interrupt;
+		break;
+	case CPU_BMIPS5000:
+		bmips_ipi_interrupt = bmips5000_ipi_interrupt;
+		break;
+	default:
+		return;
+	}
+
 	if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
 			"smp_ipi0", NULL))
 		panic("Can't request IPI0 interrupt");
@@ -168,26 +193,39 @@
 
 	pr_info("SMP: Booting CPU%d...\n", cpu);
 
-	if (cpumask_test_cpu(cpu, &bmips_booted_mask))
-		bmips_send_ipi_single(cpu, 0);
-	else {
-#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
-		/* Reset slave TP1 if booting from TP0 */
-		if (cpu_logical_map(cpu) == 1)
-			set_c0_brcm_cmt_ctrl(0x01);
-#elif defined(CONFIG_CPU_BMIPS5000)
-		if (cpu & 0x01)
-			write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
-		else {
-			/*
-			 * core N thread 0 was already booted; just
-			 * pulse the NMI line
-			 */
-			bmips_write_zscm_reg(0x210, 0xc0000000);
-			udelay(10);
-			bmips_write_zscm_reg(0x210, 0x00);
+	if (cpumask_test_cpu(cpu, &bmips_booted_mask)) {
+		switch (current_cpu_type()) {
+		case CPU_BMIPS4350:
+		case CPU_BMIPS4380:
+			bmips43xx_send_ipi_single(cpu, 0);
+			break;
+		case CPU_BMIPS5000:
+			bmips5000_send_ipi_single(cpu, 0);
+			break;
 		}
-#endif
+	}
+	else {
+		switch (current_cpu_type()) {
+		case CPU_BMIPS4350:
+		case CPU_BMIPS4380:
+			/* Reset slave TP1 if booting from TP0 */
+			if (cpu_logical_map(cpu) == 1)
+				set_c0_brcm_cmt_ctrl(0x01);
+			break;
+		case CPU_BMIPS5000:
+			if (cpu & 0x01)
+				write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
+			else {
+				/*
+				 * core N thread 0 was already booted; just
+				 * pulse the NMI line
+				 */
+				bmips_write_zscm_reg(0x210, 0xc0000000);
+				udelay(10);
+				bmips_write_zscm_reg(0x210, 0x00);
+			}
+			break;
+		}
 		cpumask_set_cpu(cpu, &bmips_booted_mask);
 	}
 }
@@ -199,26 +237,32 @@
 {
 	/* move NMI vector to kseg0, in case XKS01 is enabled */
 
-#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
-	void __iomem *cbr = BMIPS_GET_CBR();
+	void __iomem *cbr;
 	unsigned long old_vec;
 	unsigned long relo_vector;
 	int boot_cpu;
 
-	boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
-	relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
-			  BMIPS_RELO_VECTOR_CONTROL_1;
+	switch (current_cpu_type()) {
+	case CPU_BMIPS4350:
+	case CPU_BMIPS4380:
+		cbr = BMIPS_GET_CBR();
 
-	old_vec = __raw_readl(cbr + relo_vector);
-	__raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
+		boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
+		relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
+				  BMIPS_RELO_VECTOR_CONTROL_1;
 
-	clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
-#elif defined(CONFIG_CPU_BMIPS5000)
-	write_c0_brcm_bootvec(read_c0_brcm_bootvec() &
-		(smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000));
+		old_vec = __raw_readl(cbr + relo_vector);
+		__raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
 
-	write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
-#endif
+		clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
+		break;
+	case CPU_BMIPS5000:
+		write_c0_brcm_bootvec(read_c0_brcm_bootvec() &
+			(smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000));
+
+		write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
+		break;
+	}
 }
 
 /*
@@ -243,8 +287,6 @@
 {
 }
 
-#if defined(CONFIG_CPU_BMIPS5000)
-
 /*
  * BMIPS5000 raceless IPIs
  *
@@ -253,12 +295,12 @@
  * IPI1 is used for SMP_CALL_FUNCTION
  */
 
-static void bmips_send_ipi_single(int cpu, unsigned int action)
+static void bmips5000_send_ipi_single(int cpu, unsigned int action)
 {
 	write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION));
 }
 
-static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id)
+static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
 {
 	int action = irq - IPI0_IRQ;
 
@@ -272,7 +314,14 @@
 	return IRQ_HANDLED;
 }
 
-#else
+static void bmips5000_send_ipi_mask(const struct cpumask *mask,
+	unsigned int action)
+{
+	unsigned int i;
+
+	for_each_cpu(i, mask)
+		bmips5000_send_ipi_single(i, action);
+}
 
 /*
  * BMIPS43xx racey IPIs
@@ -287,7 +336,7 @@
 static DEFINE_SPINLOCK(ipi_lock);
 static DEFINE_PER_CPU(int, ipi_action_mask);
 
-static void bmips_send_ipi_single(int cpu, unsigned int action)
+static void bmips43xx_send_ipi_single(int cpu, unsigned int action)
 {
 	unsigned long flags;
 
@@ -298,7 +347,7 @@
 	spin_unlock_irqrestore(&ipi_lock, flags);
 }
 
-static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id)
+static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
 {
 	unsigned long flags;
 	int action, cpu = irq - IPI0_IRQ;
@@ -317,15 +366,13 @@
 	return IRQ_HANDLED;
 }
 
-#endif /* BMIPS type */
-
-static void bmips_send_ipi_mask(const struct cpumask *mask,
+static void bmips43xx_send_ipi_mask(const struct cpumask *mask,
 	unsigned int action)
 {
 	unsigned int i;
 
 	for_each_cpu(i, mask)
-		bmips_send_ipi_single(i, action);
+		bmips43xx_send_ipi_single(i, action);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -381,15 +428,30 @@
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
-struct plat_smp_ops bmips_smp_ops = {
+struct plat_smp_ops bmips43xx_smp_ops = {
 	.smp_setup		= bmips_smp_setup,
 	.prepare_cpus		= bmips_prepare_cpus,
 	.boot_secondary		= bmips_boot_secondary,
 	.smp_finish		= bmips_smp_finish,
 	.init_secondary		= bmips_init_secondary,
 	.cpus_done		= bmips_cpus_done,
-	.send_ipi_single	= bmips_send_ipi_single,
-	.send_ipi_mask		= bmips_send_ipi_mask,
+	.send_ipi_single	= bmips43xx_send_ipi_single,
+	.send_ipi_mask		= bmips43xx_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+	.cpu_disable		= bmips_cpu_disable,
+	.cpu_die		= bmips_cpu_die,
+#endif
+};
+
+struct plat_smp_ops bmips5000_smp_ops = {
+	.smp_setup		= bmips_smp_setup,
+	.prepare_cpus		= bmips_prepare_cpus,
+	.boot_secondary		= bmips_boot_secondary,
+	.smp_finish		= bmips_smp_finish,
+	.init_secondary		= bmips_init_secondary,
+	.cpus_done		= bmips_cpus_done,
+	.send_ipi_single	= bmips5000_send_ipi_single,
+	.send_ipi_mask		= bmips5000_send_ipi_mask,
 #ifdef CONFIG_HOTPLUG_CPU
 	.cpu_disable		= bmips_cpu_disable,
 	.cpu_die		= bmips_cpu_die,
@@ -427,43 +489,47 @@
 
 	BUG_ON(ebase != CKSEG0);
 
-#if defined(CONFIG_CPU_BMIPS4350)
-	/*
-	 * BMIPS4350 cannot relocate the normal vectors, but it
-	 * can relocate the BEV=1 vectors.  So CPU1 starts up at
-	 * the relocated BEV=1, IV=0 general exception vector @
-	 * 0xa000_0380.
-	 *
-	 * set_uncached_handler() is used here because:
-	 *  - CPU1 will run this from uncached space
-	 *  - None of the cacheflush functions are set up yet
-	 */
-	set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0,
-		&bmips_smp_int_vec, 0x80);
-	__sync();
-	return;
-#elif defined(CONFIG_CPU_BMIPS4380)
-	/*
-	 * 0x8000_0000: reset/NMI (initially in kseg1)
-	 * 0x8000_0400: normal vectors
-	 */
-	new_ebase = 0x80000400;
-	cbr = BMIPS_GET_CBR();
-	__raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
-	__raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
-#elif defined(CONFIG_CPU_BMIPS5000)
-	/*
-	 * 0x8000_0000: reset/NMI (initially in kseg1)
-	 * 0x8000_1000: normal vectors
-	 */
-	new_ebase = 0x80001000;
-	write_c0_brcm_bootvec(0xa0088008);
-	write_c0_ebase(new_ebase);
-	if (max_cpus > 2)
-		bmips_write_zscm_reg(0xa0, 0xa008a008);
-#else
-	return;
-#endif
+	switch (current_cpu_type()) {
+	case CPU_BMIPS4350:
+		/*
+		 * BMIPS4350 cannot relocate the normal vectors, but it
+		 * can relocate the BEV=1 vectors.  So CPU1 starts up at
+		 * the relocated BEV=1, IV=0 general exception vector @
+		 * 0xa000_0380.
+		 *
+		 * set_uncached_handler() is used here because:
+		 *  - CPU1 will run this from uncached space
+		 *  - None of the cacheflush functions are set up yet
+		 */
+		set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0,
+			&bmips_smp_int_vec, 0x80);
+		__sync();
+		return;
+	case CPU_BMIPS4380:
+		/*
+		 * 0x8000_0000: reset/NMI (initially in kseg1)
+		 * 0x8000_0400: normal vectors
+		 */
+		new_ebase = 0x80000400;
+		cbr = BMIPS_GET_CBR();
+		__raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
+		__raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+		break;
+	case CPU_BMIPS5000:
+		/*
+		 * 0x8000_0000: reset/NMI (initially in kseg1)
+		 * 0x8000_1000: normal vectors
+		 */
+		new_ebase = 0x80001000;
+		write_c0_brcm_bootvec(0xa0088008);
+		write_c0_ebase(new_ebase);
+		if (max_cpus > 2)
+			bmips_write_zscm_reg(0xa0, 0xa008a008);
+		break;
+	default:
+		return;
+	}
+
 	board_nmi_handler_setup = &bmips_nmi_handler_setup;
 	ebase = new_ebase;
 }
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 5969f1e..1b925d8 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -199,11 +199,14 @@
 	pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
 		 smp_processor_id(), __func__, max_cpus);
 
+#ifdef CONFIG_MIPS_MT
 	/*
 	 * FIXME: some of these options are per-system, some per-core and
 	 * some per-cpu
 	 */
 	mips_mt_set_cpuoptions();
+#endif
+
 }
 
 struct plat_smp_ops cmp_smp_ops = {
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 57a3f7a..0fb8cef 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -71,6 +71,7 @@
 
 		/* Record this as available CPU */
 		set_cpu_possible(tc, true);
+		set_cpu_present(tc, true);
 		__cpu_number_map[tc]	= ++ncpu;
 		__cpu_logical_map[ncpu] = tc;
 	}
@@ -112,12 +113,39 @@
 	write_tc_c0_tchalt(TCHALT_H);
 }
 
+#ifdef CONFIG_IRQ_GIC
+static void mp_send_ipi_single(int cpu, unsigned int action)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	switch (action) {
+	case SMP_CALL_FUNCTION:
+		gic_send_ipi(plat_ipi_call_int_xlate(cpu));
+		break;
+
+	case SMP_RESCHEDULE_YOURSELF:
+		gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
+		break;
+	}
+
+	local_irq_restore(flags);
+}
+#endif
+
 static void vsmp_send_ipi_single(int cpu, unsigned int action)
 {
 	int i;
 	unsigned long flags;
 	int vpflags;
 
+#ifdef CONFIG_IRQ_GIC
+	if (gic_present) {
+		mp_send_ipi_single(cpu, action);
+		return;
+	}
+#endif
 	local_irq_save(flags);
 
 	vpflags = dvpe();	/* can't access the other CPU's registers whilst MVPE enabled */
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 93f8681..b242e2c 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -8,7 +8,6 @@
  *
  * Copyright (C) 2007, 2008 MIPS Technologies, Inc.
  */
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/ptrace.h>
 #include <linux/stddef.h>
@@ -206,6 +205,8 @@
 	case CPU_34K:
 	case CPU_74K:
 	case CPU_1004K:
+	case CPU_INTERAPTIV:
+	case CPU_PROAPTIV:
 		config0 = read_c0_config();
 		/* FIXME: addresses are Malta specific */
 		if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 84536bf..c24ad5f 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -11,7 +11,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/irqflags.h>
 #include <linux/cpumask.h>
 
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index f9c8746..e0b4996 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -78,6 +78,7 @@
 extern asmlinkage void handle_ov(void);
 extern asmlinkage void handle_tr(void);
 extern asmlinkage void handle_fpe(void);
+extern asmlinkage void handle_ftlb(void);
 extern asmlinkage void handle_mdmx(void);
 extern asmlinkage void handle_watch(void);
 extern asmlinkage void handle_mt(void);
@@ -1080,7 +1081,7 @@
 	unsigned long old_epc, old31;
 	unsigned int opcode;
 	unsigned int cpid;
-	int status;
+	int status, err;
 	unsigned long __maybe_unused flags;
 
 	prev_state = exception_enter();
@@ -1153,19 +1154,19 @@
 
 	case 1:
 		if (used_math())	/* Using the FPU again.	 */
-			own_fpu(1);
+			err = own_fpu(1);
 		else {			/* First time FPU user.	 */
-			init_fpu();
+			err = init_fpu();
 			set_used_math();
 		}
 
-		if (!raw_cpu_has_fpu) {
+		if (!raw_cpu_has_fpu || err) {
 			int sig;
 			void __user *fault_addr = NULL;
 			sig = fpu_emulator_cop1Handler(regs,
 						       &current->thread.fpu,
 						       0, &fault_addr);
-			if (!process_fpemu_return(sig, fault_addr))
+			if (!process_fpemu_return(sig, fault_addr) && !err)
 				mt_ase_fp_affinity();
 		}
 
@@ -1336,6 +1337,8 @@
 	case CPU_34K:
 	case CPU_74K:
 	case CPU_1004K:
+	case CPU_INTERAPTIV:
+	case CPU_PROAPTIV:
 		{
 #define ERRCTL_PE	0x80000000
 #define ERRCTL_L2P	0x00800000
@@ -1425,14 +1428,27 @@
 	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
 	       reg_val & (1<<30) ? "secondary" : "primary",
 	       reg_val & (1<<31) ? "data" : "insn");
-	printk("Error bits: %s%s%s%s%s%s%s\n",
-	       reg_val & (1<<29) ? "ED " : "",
-	       reg_val & (1<<28) ? "ET " : "",
-	       reg_val & (1<<26) ? "EE " : "",
-	       reg_val & (1<<25) ? "EB " : "",
-	       reg_val & (1<<24) ? "EI " : "",
-	       reg_val & (1<<23) ? "E1 " : "",
-	       reg_val & (1<<22) ? "E0 " : "");
+	if (cpu_has_mips_r2 &&
+	    ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
+		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
+			reg_val & (1<<29) ? "ED " : "",
+			reg_val & (1<<28) ? "ET " : "",
+			reg_val & (1<<27) ? "ES " : "",
+			reg_val & (1<<26) ? "EE " : "",
+			reg_val & (1<<25) ? "EB " : "",
+			reg_val & (1<<24) ? "EI " : "",
+			reg_val & (1<<23) ? "E1 " : "",
+			reg_val & (1<<22) ? "E0 " : "");
+	} else {
+		pr_err("Error bits: %s%s%s%s%s%s%s\n",
+			reg_val & (1<<29) ? "ED " : "",
+			reg_val & (1<<28) ? "ET " : "",
+			reg_val & (1<<26) ? "EE " : "",
+			reg_val & (1<<25) ? "EB " : "",
+			reg_val & (1<<24) ? "EI " : "",
+			reg_val & (1<<23) ? "E1 " : "",
+			reg_val & (1<<22) ? "E0 " : "");
+	}
 	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
 
 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
@@ -1446,6 +1462,34 @@
 	panic("Can't handle the cache error!");
 }
 
+asmlinkage void do_ftlb(void)
+{
+	const int field = 2 * sizeof(unsigned long);
+	unsigned int reg_val;
+
+	/* For the moment, report the problem and hang. */
+	if (cpu_has_mips_r2 &&
+	    ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
+		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
+		       read_c0_ecc());
+		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
+		reg_val = read_c0_cacheerr();
+		pr_err("c0_cacheerr == %08x\n", reg_val);
+
+		if ((reg_val & 0xc0000000) == 0xc0000000) {
+			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
+		} else {
+			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
+			       reg_val & (1<<30) ? "secondary" : "primary",
+			       reg_val & (1<<31) ? "data" : "insn");
+		}
+	} else {
+		pr_err("FTLB error exception\n");
+	}
+	/* Just print the cacheerr bits for now */
+	cache_parity_error();
+}
+
 /*
  * SDBBP EJTAG debug exception handler.
  * We skip the instruction and return to the next instruction.
@@ -1995,6 +2039,7 @@
 	if (cpu_has_fpu && !cpu_has_nofpuex)
 		set_except_vector(15, handle_fpe);
 
+	set_except_vector(16, handle_ftlb);
 	set_except_vector(22, handle_mdmx);
 
 	if (cpu_has_mcheck)
diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c
new file mode 100644
index 0000000..9268ebc
--- /dev/null
+++ b/arch/mips/kernel/vpe-cmp.c
@@ -0,0 +1,180 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <asm/vpe.h>
+
+static int major;
+
+void cleanup_tc(struct tc *tc)
+{
+
+}
+
+static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t len)
+{
+	struct vpe *vpe = get_vpe(aprp_cpu_index());
+	struct vpe_notifications *notifier;
+
+	list_for_each_entry(notifier, &vpe->notify, list)
+		notifier->stop(aprp_cpu_index());
+
+	release_progmem(vpe->load_addr);
+	vpe->state = VPE_STATE_UNUSED;
+
+	return len;
+}
+static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
+
+static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
+			 char *buf)
+{
+	struct vpe *vpe = get_vpe(aprp_cpu_index());
+
+	return sprintf(buf, "%d\n", vpe->ntcs);
+}
+
+static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t len)
+{
+	struct vpe *vpe = get_vpe(aprp_cpu_index());
+	unsigned long new;
+	int ret;
+
+	ret = kstrtoul(buf, 0, &new);
+	if (ret < 0)
+		return ret;
+
+	/* APRP can only reserve one TC in a VPE and no more. */
+	if (new != 1)
+		return -EINVAL;
+
+	vpe->ntcs = new;
+
+	return len;
+}
+static DEVICE_ATTR_RW(ntcs);
+
+static struct attribute *vpe_attrs[] = {
+	&dev_attr_kill.attr,
+	&dev_attr_ntcs.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vpe);
+
+static void vpe_device_release(struct device *cd)
+{
+	kfree(cd);
+}
+
+static struct class vpe_class = {
+	.name = "vpe",
+	.owner = THIS_MODULE,
+	.dev_release = vpe_device_release,
+	.dev_groups = vpe_groups,
+};
+
+static struct device vpe_device;
+
+int __init vpe_module_init(void)
+{
+	struct vpe *v = NULL;
+	struct tc *t;
+	int err;
+
+	if (!cpu_has_mipsmt) {
+		pr_warn("VPE loader: not a MIPS MT capable processor\n");
+		return -ENODEV;
+	}
+
+	if (num_possible_cpus() - aprp_cpu_index() < 1) {
+		pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
+			"Pass maxcpus=<n> argument as kernel argument\n");
+		return -ENODEV;
+	}
+
+	major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
+	if (major < 0) {
+		pr_warn("VPE loader: unable to register character device\n");
+		return major;
+	}
+
+	err = class_register(&vpe_class);
+	if (err) {
+		pr_err("vpe_class registration failed\n");
+		goto out_chrdev;
+	}
+
+	device_initialize(&vpe_device);
+	vpe_device.class	= &vpe_class,
+	vpe_device.parent	= NULL,
+	dev_set_name(&vpe_device, "vpe_sp");
+	vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
+	err = device_add(&vpe_device);
+	if (err) {
+		pr_err("Adding vpe_device failed\n");
+		goto out_class;
+	}
+
+	t = alloc_tc(aprp_cpu_index());
+	if (!t) {
+		pr_warn("VPE: unable to allocate TC\n");
+		err = -ENOMEM;
+		goto out_dev;
+	}
+
+	/* VPE */
+	v = alloc_vpe(aprp_cpu_index());
+	if (v == NULL) {
+		pr_warn("VPE: unable to allocate VPE\n");
+		kfree(t);
+		err = -ENOMEM;
+		goto out_dev;
+	}
+
+	v->ntcs = 1;
+
+	/* add the tc to the list of this vpe's tc's. */
+	list_add(&t->tc, &v->tc);
+
+	/* TC */
+	t->pvpe = v;	/* set the parent vpe */
+
+	return 0;
+
+out_dev:
+	device_del(&vpe_device);
+
+out_class:
+	class_unregister(&vpe_class);
+
+out_chrdev:
+	unregister_chrdev(major, VPE_MODULE_NAME);
+
+	return err;
+}
+
+void __exit vpe_module_exit(void)
+{
+	struct vpe *v, *n;
+
+	device_del(&vpe_device);
+	class_unregister(&vpe_class);
+	unregister_chrdev(major, VPE_MODULE_NAME);
+
+	/* No locking needed here */
+	list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list)
+		if (v->state != VPE_STATE_UNUSED)
+			release_vpe(v);
+}
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
new file mode 100644
index 0000000..949ae0e
--- /dev/null
+++ b/arch/mips/kernel/vpe-mt.c
@@ -0,0 +1,523 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+
+static int major;
+
+/* The number of TCs and VPEs physically available on the core */
+static int hw_tcs, hw_vpes;
+
+/* We are prepared so configure and start the VPE... */
+int vpe_run(struct vpe *v)
+{
+	unsigned long flags, val, dmt_flag;
+	struct vpe_notifications *notifier;
+	unsigned int vpeflags;
+	struct tc *t;
+
+	/* check we are the Master VPE */
+	local_irq_save(flags);
+	val = read_c0_vpeconf0();
+	if (!(val & VPECONF0_MVP)) {
+		pr_warn("VPE loader: only Master VPE's are able to config MT\n");
+		local_irq_restore(flags);
+
+		return -1;
+	}
+
+	dmt_flag = dmt();
+	vpeflags = dvpe();
+
+	if (list_empty(&v->tc)) {
+		evpe(vpeflags);
+		emt(dmt_flag);
+		local_irq_restore(flags);
+
+		pr_warn("VPE loader: No TC's associated with VPE %d\n",
+			v->minor);
+
+		return -ENOEXEC;
+	}
+
+	t = list_first_entry(&v->tc, struct tc, tc);
+
+	/* Put MVPE's into 'configuration state' */
+	set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+	settc(t->index);
+
+	/* should check it is halted, and not activated */
+	if ((read_tc_c0_tcstatus() & TCSTATUS_A) ||
+	   !(read_tc_c0_tchalt() & TCHALT_H)) {
+		evpe(vpeflags);
+		emt(dmt_flag);
+		local_irq_restore(flags);
+
+		pr_warn("VPE loader: TC %d is already active!\n",
+			t->index);
+
+		return -ENOEXEC;
+	}
+
+	/*
+	 * Write the address we want it to start running from in the TCPC
+	 * register.
+	 */
+	write_tc_c0_tcrestart((unsigned long)v->__start);
+	write_tc_c0_tccontext((unsigned long)0);
+
+	/*
+	 * Mark the TC as activated, not interrupt exempt and not dynamically
+	 * allocatable
+	 */
+	val = read_tc_c0_tcstatus();
+	val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
+	write_tc_c0_tcstatus(val);
+
+	write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
+
+	/*
+	 * The sde-kit passes 'memsize' to __start in $a3, so set something
+	 * here...  Or set $a3 to zero and define DFLT_STACK_SIZE and
+	 * DFLT_HEAP_SIZE when you compile your program
+	 */
+	mttgpr(6, v->ntcs);
+	mttgpr(7, physical_memsize);
+
+	/* set up VPE1 */
+	/*
+	 * bind the TC to VPE 1 as late as possible so we only have the final
+	 * VPE registers to set up, and so an EJTAG probe can trigger on it
+	 */
+	write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
+
+	write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
+
+	back_to_back_c0_hazard();
+
+	/* Set up the XTC bit in vpeconf0 to point at our tc */
+	write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
+			      | (t->index << VPECONF0_XTC_SHIFT));
+
+	back_to_back_c0_hazard();
+
+	/* enable this VPE */
+	write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
+
+	/* clear out any left overs from a previous program */
+	write_vpe_c0_status(0);
+	write_vpe_c0_cause(0);
+
+	/* take system out of configuration state */
+	clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+	/*
+	 * SMTC/SMVP kernels manage VPE enable independently,
+	 * but uniprocessor kernels need to turn it on, even
+	 * if that wasn't the pre-dvpe() state.
+	 */
+#ifdef CONFIG_SMP
+	evpe(vpeflags);
+#else
+	evpe(EVPE_ENABLE);
+#endif
+	emt(dmt_flag);
+	local_irq_restore(flags);
+
+	list_for_each_entry(notifier, &v->notify, list)
+		notifier->start(VPE_MODULE_MINOR);
+
+	return 0;
+}
+
+void cleanup_tc(struct tc *tc)
+{
+	unsigned long flags;
+	unsigned int mtflags, vpflags;
+	int tmp;
+
+	local_irq_save(flags);
+	mtflags = dmt();
+	vpflags = dvpe();
+	/* Put MVPE's into 'configuration state' */
+	set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+	settc(tc->index);
+	tmp = read_tc_c0_tcstatus();
+
+	/* mark not allocated and not dynamically allocatable */
+	tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+	tmp |= TCSTATUS_IXMT;	/* interrupt exempt */
+	write_tc_c0_tcstatus(tmp);
+
+	write_tc_c0_tchalt(TCHALT_H);
+	mips_ihb();
+
+	clear_c0_mvpcontrol(MVPCONTROL_VPC);
+	evpe(vpflags);
+	emt(mtflags);
+	local_irq_restore(flags);
+}
+
+/* module wrapper entry points */
+/* give me a vpe */
+void *vpe_alloc(void)
+{
+	int i;
+	struct vpe *v;
+
+	/* find a vpe */
+	for (i = 1; i < MAX_VPES; i++) {
+		v = get_vpe(i);
+		if (v != NULL) {
+			v->state = VPE_STATE_INUSE;
+			return v;
+		}
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(vpe_alloc);
+
+/* start running from here */
+int vpe_start(void *vpe, unsigned long start)
+{
+	struct vpe *v = vpe;
+
+	v->__start = start;
+	return vpe_run(v);
+}
+EXPORT_SYMBOL(vpe_start);
+
+/* halt it for now */
+int vpe_stop(void *vpe)
+{
+	struct vpe *v = vpe;
+	struct tc *t;
+	unsigned int evpe_flags;
+
+	evpe_flags = dvpe();
+
+	t = list_entry(v->tc.next, struct tc, tc);
+	if (t != NULL) {
+		settc(t->index);
+		write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
+	}
+
+	evpe(evpe_flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(vpe_stop);
+
+/* I've done with it thank you */
+int vpe_free(void *vpe)
+{
+	struct vpe *v = vpe;
+	struct tc *t;
+	unsigned int evpe_flags;
+
+	t = list_entry(v->tc.next, struct tc, tc);
+	if (t == NULL)
+		return -ENOEXEC;
+
+	evpe_flags = dvpe();
+
+	/* Put MVPE's into 'configuration state' */
+	set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+	settc(t->index);
+	write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
+
+	/* halt the TC */
+	write_tc_c0_tchalt(TCHALT_H);
+	mips_ihb();
+
+	/* mark the TC unallocated */
+	write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
+
+	v->state = VPE_STATE_UNUSED;
+
+	clear_c0_mvpcontrol(MVPCONTROL_VPC);
+	evpe(evpe_flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(vpe_free);
+
+static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t len)
+{
+	struct vpe *vpe = get_vpe(aprp_cpu_index());
+	struct vpe_notifications *notifier;
+
+	list_for_each_entry(notifier, &vpe->notify, list)
+		notifier->stop(aprp_cpu_index());
+
+	release_progmem(vpe->load_addr);
+	cleanup_tc(get_tc(aprp_cpu_index()));
+	vpe_stop(vpe);
+	vpe_free(vpe);
+
+	return len;
+}
+static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
+
+static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
+			 char *buf)
+{
+	struct vpe *vpe = get_vpe(aprp_cpu_index());
+
+	return sprintf(buf, "%d\n", vpe->ntcs);
+}
+
+static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t len)
+{
+	struct vpe *vpe = get_vpe(aprp_cpu_index());
+	unsigned long new;
+	int ret;
+
+	ret = kstrtoul(buf, 0, &new);
+	if (ret < 0)
+		return ret;
+
+	if (new == 0 || new > (hw_tcs - aprp_cpu_index()))
+		return -EINVAL;
+
+	vpe->ntcs = new;
+
+	return len;
+}
+static DEVICE_ATTR_RW(ntcs);
+
+static struct attribute *vpe_attrs[] = {
+	&dev_attr_kill.attr,
+	&dev_attr_ntcs.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vpe);
+
+static void vpe_device_release(struct device *cd)
+{
+	kfree(cd);
+}
+
+static struct class vpe_class = {
+	.name = "vpe",
+	.owner = THIS_MODULE,
+	.dev_release = vpe_device_release,
+	.dev_groups = vpe_groups,
+};
+
+static struct device vpe_device;
+
+int __init vpe_module_init(void)
+{
+	unsigned int mtflags, vpflags;
+	unsigned long flags, val;
+	struct vpe *v = NULL;
+	struct tc *t;
+	int tc, err;
+
+	if (!cpu_has_mipsmt) {
+		pr_warn("VPE loader: not a MIPS MT capable processor\n");
+		return -ENODEV;
+	}
+
+	if (vpelimit == 0) {
+		pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
+			"Pass maxvpes=<n> argument as kernel argument\n");
+
+		return -ENODEV;
+	}
+
+	if (aprp_cpu_index() == 0) {
+		pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n"
+			"Pass maxtcs=<n> argument as kernel argument\n");
+
+		return -ENODEV;
+	}
+
+	major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
+	if (major < 0) {
+		pr_warn("VPE loader: unable to register character device\n");
+		return major;
+	}
+
+	err = class_register(&vpe_class);
+	if (err) {
+		pr_err("vpe_class registration failed\n");
+		goto out_chrdev;
+	}
+
+	device_initialize(&vpe_device);
+	vpe_device.class	= &vpe_class,
+	vpe_device.parent	= NULL,
+	dev_set_name(&vpe_device, "vpe1");
+	vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
+	err = device_add(&vpe_device);
+	if (err) {
+		pr_err("Adding vpe_device failed\n");
+		goto out_class;
+	}
+
+	local_irq_save(flags);
+	mtflags = dmt();
+	vpflags = dvpe();
+
+	/* Put MVPE's into 'configuration state' */
+	set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+	val = read_c0_mvpconf0();
+	hw_tcs = (val & MVPCONF0_PTC) + 1;
+	hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+
+	for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) {
+		/*
+		 * Must re-enable multithreading temporarily or in case we
+		 * reschedule send IPIs or similar we might hang.
+		 */
+		clear_c0_mvpcontrol(MVPCONTROL_VPC);
+		evpe(vpflags);
+		emt(mtflags);
+		local_irq_restore(flags);
+		t = alloc_tc(tc);
+		if (!t) {
+			err = -ENOMEM;
+			goto out_dev;
+		}
+
+		local_irq_save(flags);
+		mtflags = dmt();
+		vpflags = dvpe();
+		set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+		/* VPE's */
+		if (tc < hw_tcs) {
+			settc(tc);
+
+			v = alloc_vpe(tc);
+			if (v == NULL) {
+				pr_warn("VPE: unable to allocate VPE\n");
+				goto out_reenable;
+			}
+
+			v->ntcs = hw_tcs - aprp_cpu_index();
+
+			/* add the tc to the list of this vpe's tc's. */
+			list_add(&t->tc, &v->tc);
+
+			/* deactivate all but vpe0 */
+			if (tc >= aprp_cpu_index()) {
+				unsigned long tmp = read_vpe_c0_vpeconf0();
+
+				tmp &= ~VPECONF0_VPA;
+
+				/* master VPE */
+				tmp |= VPECONF0_MVP;
+				write_vpe_c0_vpeconf0(tmp);
+			}
+
+			/* disable multi-threading with TC's */
+			write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() &
+						~VPECONTROL_TE);
+
+			if (tc >= vpelimit) {
+				/*
+				 * Set config to be the same as vpe0,
+				 * particularly kseg0 coherency alg
+				 */
+				write_vpe_c0_config(read_c0_config());
+			}
+		}
+
+		/* TC's */
+		t->pvpe = v;	/* set the parent vpe */
+
+		if (tc >= aprp_cpu_index()) {
+			unsigned long tmp;
+
+			settc(tc);
+
+			/* Any TC that is bound to VPE0 gets left as is - in
+			 * case we are running SMTC on VPE0. A TC that is bound
+			 * to any other VPE gets bound to VPE0, ideally I'd like
+			 * to make it homeless but it doesn't appear to let me
+			 * bind a TC to a non-existent VPE. Which is perfectly
+			 * reasonable.
+			 *
+			 * The (un)bound state is visible to an EJTAG probe so
+			 * may notify GDB...
+			 */
+			tmp = read_tc_c0_tcbind();
+			if (tmp & TCBIND_CURVPE) {
+				/* tc is bound >vpe0 */
+				write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
+
+				t->pvpe = get_vpe(0);	/* set the parent vpe */
+			}
+
+			/* halt the TC */
+			write_tc_c0_tchalt(TCHALT_H);
+			mips_ihb();
+
+			tmp = read_tc_c0_tcstatus();
+
+			/* mark not activated and not dynamically allocatable */
+			tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+			tmp |= TCSTATUS_IXMT;	/* interrupt exempt */
+			write_tc_c0_tcstatus(tmp);
+		}
+	}
+
+out_reenable:
+	/* release config state */
+	clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+	evpe(vpflags);
+	emt(mtflags);
+	local_irq_restore(flags);
+
+	return 0;
+
+out_dev:
+	device_del(&vpe_device);
+
+out_class:
+	class_unregister(&vpe_class);
+
+out_chrdev:
+	unregister_chrdev(major, VPE_MODULE_NAME);
+
+	return err;
+}
+
+void __exit vpe_module_exit(void)
+{
+	struct vpe *v, *n;
+
+	device_del(&vpe_device);
+	class_unregister(&vpe_class);
+	unregister_chrdev(major, VPE_MODULE_NAME);
+
+	/* No locking needed here */
+	list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
+		if (v->state != VPE_STATE_UNUSED)
+			release_vpe(v);
+	}
+}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 2d5c142..11da314 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -1,37 +1,22 @@
 /*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
  * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
  *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-/*
- * VPE support module
- *
- * Provides support for loading a MIPS SP program on VPE1.
- * The SP environment is rather simple, no tlb's.  It needs to be relocatable
- * (or partially linked). You should initialise your stack in the startup
- * code. This loader looks for the symbol __start and sets up
- * execution to resume from there. The MIPS SDE kit contains suitable examples.
- *
- * To load and run, simply cat a SP 'program file' to /dev/vpe1.
- * i.e cat spapp >/dev/vpe1.
+ * VPE spport module for loading a MIPS SP program into VPE1. The SP
+ * environment is rather simple since there are no TLBs. It needs
+ * to be relocatable (or partiall linked). Initialize your stack in
+ * the startup-code. The loader looks for the symbol __start and sets
+ * up the execution to resume from there. To load and run, simply do
+ * a cat SP 'binary' to the /dev/vpe1 device.
  */
 #include <linux/kernel.h>
 #include <linux/device.h>
 #include <linux/fs.h>
 #include <linux/init.h>
-#include <asm/uaccess.h>
 #include <linux/slab.h>
 #include <linux/list.h>
 #include <linux/vmalloc.h>
@@ -46,13 +31,10 @@
 #include <asm/mipsmtregs.h>
 #include <asm/cacheflush.h>
 #include <linux/atomic.h>
-#include <asm/cpu.h>
 #include <asm/mips_mt.h>
 #include <asm/processor.h>
 #include <asm/vpe.h>
 
-typedef void *vpe_handle;
-
 #ifndef ARCH_SHF_SMALL
 #define ARCH_SHF_SMALL 0
 #endif
@@ -60,95 +42,15 @@
 /* If this is set, the section belongs in the init part of the module */
 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
 
-/*
- * The number of TCs and VPEs physically available on the core
- */
-static int hw_tcs, hw_vpes;
-static char module_name[] = "vpe";
-static int major;
-static const int minor = 1;	/* fixed for now  */
-
-/* grab the likely amount of memory we will need. */
-#ifdef CONFIG_MIPS_VPE_LOADER_TOM
-#define P_SIZE (2 * 1024 * 1024)
-#else
-/* add an overhead to the max kmalloc size for non-striped symbols/etc */
-#define P_SIZE (256 * 1024)
-#endif
-
-extern unsigned long physical_memsize;
-
-#define MAX_VPES 16
-#define VPE_PATH_MAX 256
-
-enum vpe_state {
-	VPE_STATE_UNUSED = 0,
-	VPE_STATE_INUSE,
-	VPE_STATE_RUNNING
-};
-
-enum tc_state {
-	TC_STATE_UNUSED = 0,
-	TC_STATE_INUSE,
-	TC_STATE_RUNNING,
-	TC_STATE_DYNAMIC
-};
-
-struct vpe {
-	enum vpe_state state;
-
-	/* (device) minor associated with this vpe */
-	int minor;
-
-	/* elfloader stuff */
-	void *load_addr;
-	unsigned long len;
-	char *pbuffer;
-	unsigned long plen;
-	char cwd[VPE_PATH_MAX];
-
-	unsigned long __start;
-
-	/* tc's associated with this vpe */
-	struct list_head tc;
-
-	/* The list of vpe's */
-	struct list_head list;
-
-	/* shared symbol address */
-	void *shared_ptr;
-
-	/* the list of who wants to know when something major happens */
-	struct list_head notify;
-
-	unsigned int ntcs;
-};
-
-struct tc {
-	enum tc_state state;
-	int index;
-
-	struct vpe *pvpe;	/* parent VPE */
-	struct list_head tc;	/* The list of TC's with this VPE */
-	struct list_head list;	/* The global list of tc's */
-};
-
-struct {
-	spinlock_t vpe_list_lock;
-	struct list_head vpe_list;	/* Virtual processing elements */
-	spinlock_t tc_list_lock;
-	struct list_head tc_list;	/* Thread contexts */
-} vpecontrol = {
+struct vpe_control vpecontrol = {
 	.vpe_list_lock	= __SPIN_LOCK_UNLOCKED(vpe_list_lock),
 	.vpe_list	= LIST_HEAD_INIT(vpecontrol.vpe_list),
 	.tc_list_lock	= __SPIN_LOCK_UNLOCKED(tc_list_lock),
 	.tc_list	= LIST_HEAD_INIT(vpecontrol.tc_list)
 };
 
-static void release_progmem(void *ptr);
-
 /* get the vpe associated with this minor */
-static struct vpe *get_vpe(int minor)
+struct vpe *get_vpe(int minor)
 {
 	struct vpe *res, *v;
 
@@ -158,7 +60,7 @@
 	res = NULL;
 	spin_lock(&vpecontrol.vpe_list_lock);
 	list_for_each_entry(v, &vpecontrol.vpe_list, list) {
-		if (v->minor == minor) {
+		if (v->minor == VPE_MODULE_MINOR) {
 			res = v;
 			break;
 		}
@@ -169,7 +71,7 @@
 }
 
 /* get the vpe associated with this minor */
-static struct tc *get_tc(int index)
+struct tc *get_tc(int index)
 {
 	struct tc *res, *t;
 
@@ -187,12 +89,13 @@
 }
 
 /* allocate a vpe and associate it with this minor (or index) */
-static struct vpe *alloc_vpe(int minor)
+struct vpe *alloc_vpe(int minor)
 {
 	struct vpe *v;
 
-	if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL)
-		return NULL;
+	v = kzalloc(sizeof(struct vpe), GFP_KERNEL);
+	if (v == NULL)
+		goto out;
 
 	INIT_LIST_HEAD(&v->tc);
 	spin_lock(&vpecontrol.vpe_list_lock);
@@ -200,17 +103,19 @@
 	spin_unlock(&vpecontrol.vpe_list_lock);
 
 	INIT_LIST_HEAD(&v->notify);
-	v->minor = minor;
+	v->minor = VPE_MODULE_MINOR;
 
+out:
 	return v;
 }
 
 /* allocate a tc. At startup only tc0 is running, all other can be halted. */
-static struct tc *alloc_tc(int index)
+struct tc *alloc_tc(int index)
 {
 	struct tc *tc;
 
-	if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL)
+	tc = kzalloc(sizeof(struct tc), GFP_KERNEL);
+	if (tc == NULL)
 		goto out;
 
 	INIT_LIST_HEAD(&tc->tc);
@@ -225,7 +130,7 @@
 }
 
 /* clean up and free everything */
-static void release_vpe(struct vpe *v)
+void release_vpe(struct vpe *v)
 {
 	list_del(&v->list);
 	if (v->load_addr)
@@ -233,28 +138,8 @@
 	kfree(v);
 }
 
-static void __maybe_unused dump_mtregs(void)
-{
-	unsigned long val;
-
-	val = read_c0_config3();
-	printk("config3 0x%lx MT %ld\n", val,
-	       (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
-
-	val = read_c0_mvpcontrol();
-	printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
-	       (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
-	       (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
-	       (val & MVPCONTROL_EVP));
-
-	val = read_c0_mvpconf0();
-	printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
-	       (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
-	       val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
-}
-
-/* Find some VPE program space	*/
-static void *alloc_progmem(unsigned long len)
+/* Find some VPE program space */
+void *alloc_progmem(unsigned long len)
 {
 	void *addr;
 
@@ -273,7 +158,7 @@
 	return addr;
 }
 
-static void release_progmem(void *ptr)
+void release_progmem(void *ptr)
 {
 #ifndef CONFIG_MIPS_VPE_LOADER_TOM
 	kfree(ptr);
@@ -281,7 +166,7 @@
 }
 
 /* Update size with this section: return offset. */
-static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
+static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
 {
 	long ret;
 
@@ -294,8 +179,8 @@
    might -- code, read-only data, read-write data, small data.	Tally
    sizes, and place the offsets into sh_entsize fields: high bit means it
    belongs in init. */
-static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
-			    Elf_Shdr * sechdrs, const char *secstrings)
+static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
+			    Elf_Shdr *sechdrs, const char *secstrings)
 {
 	static unsigned long const masks[][2] = {
 		/* NOTE: all executable code must be the first section
@@ -315,7 +200,6 @@
 		for (i = 0; i < hdr->e_shnum; ++i) {
 			Elf_Shdr *s = &sechdrs[i];
 
-			//  || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
 			if ((s->sh_flags & masks[m][0]) != masks[m][0]
 			    || (s->sh_flags & masks[m][1])
 			    || s->sh_entsize != ~0UL)
@@ -330,7 +214,6 @@
 	}
 }
 
-
 /* from module-elf32.c, but subverted a little */
 
 struct mips_hi16 {
@@ -353,20 +236,18 @@
 {
 	int rel;
 
-	if( !(*location & 0xffff) ) {
+	if (!(*location & 0xffff)) {
 		rel = (int)v - gp_addr;
-	}
-	else {
+	} else {
 		/* .sbss + gp(relative) + offset */
 		/* kludge! */
 		rel =  (int)(short)((int)v + gp_offs +
 				    (int)(short)(*location & 0xffff) - gp_addr);
 	}
 
-	if( (rel > 32768) || (rel < -32768) ) {
-		printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: "
-		       "relative address 0x%x out of range of gp register\n",
-		       rel);
+	if ((rel > 32768) || (rel < -32768)) {
+		pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n",
+			 rel);
 		return -ENOEXEC;
 	}
 
@@ -380,12 +261,12 @@
 {
 	int rel;
 	rel = (((unsigned int)v - (unsigned int)location));
-	rel >>= 2;		// because the offset is in _instructions_ not bytes.
-	rel -= 1;		// and one instruction less due to the branch delay slot.
+	rel >>= 2; /* because the offset is in _instructions_ not bytes. */
+	rel -= 1;  /* and one instruction less due to the branch delay slot. */
 
-	if( (rel > 32768) || (rel < -32768) ) {
-		printk(KERN_DEBUG "VPE loader: "
-		       "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
+	if ((rel > 32768) || (rel < -32768)) {
+		pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n",
+			 rel);
 		return -ENOEXEC;
 	}
 
@@ -406,8 +287,7 @@
 			   Elf32_Addr v)
 {
 	if (v % 4) {
-		printk(KERN_DEBUG "VPE loader: apply_r_mips_26 "
-		       " unaligned relocation\n");
+		pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n");
 		return -ENOEXEC;
 	}
 
@@ -438,7 +318,7 @@
 	 * the carry we need to add.  Save the information, and let LO16 do the
 	 * actual relocation.
 	 */
-	n = kmalloc(sizeof *n, GFP_KERNEL);
+	n = kmalloc(sizeof(*n), GFP_KERNEL);
 	if (!n)
 		return -ENOMEM;
 
@@ -470,9 +350,7 @@
 			 * The value for the HI16 had best be the same.
 			 */
 			if (v != l->value) {
-				printk(KERN_DEBUG "VPE loader: "
-				       "apply_r_mips_lo16/hi16: \t"
-				       "inconsistent value information\n");
+				pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n");
 				goto out_free;
 			}
 
@@ -568,20 +446,19 @@
 			+ ELF32_R_SYM(r_info);
 
 		if (!sym->st_value) {
-			printk(KERN_DEBUG "%s: undefined weak symbol %s\n",
-			       me->name, strtab + sym->st_name);
+			pr_debug("%s: undefined weak symbol %s\n",
+				 me->name, strtab + sym->st_name);
 			/* just print the warning, dont barf */
 		}
 
 		v = sym->st_value;
 
 		res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
-		if( res ) {
+		if (res) {
 			char *r = rstrs[ELF32_R_TYPE(r_info)];
-			printk(KERN_WARNING "VPE loader: .text+0x%x "
-			       "relocation type %s for symbol \"%s\" failed\n",
-			       rel[i].r_offset, r ? r : "UNKNOWN",
-			       strtab + sym->st_name);
+			pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n",
+				rel[i].r_offset, r ? r : "UNKNOWN",
+				strtab + sym->st_name);
 			return res;
 		}
 	}
@@ -596,10 +473,8 @@
 }
 /* end module-elf32.c */
 
-
-
 /* Change all symbols so that sh_value encodes the pointer directly. */
-static void simplify_symbols(Elf_Shdr * sechdrs,
+static void simplify_symbols(Elf_Shdr *sechdrs,
 			    unsigned int symindex,
 			    const char *strtab,
 			    const char *secstrings,
@@ -640,18 +515,16 @@
 			break;
 
 		case SHN_MIPS_SCOMMON:
-			printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON "
-			       "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name,
-			       sym[i].st_shndx);
-			// .sbss section
+			pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n",
+				 strtab + sym[i].st_name, sym[i].st_shndx);
+			/* .sbss section */
 			break;
 
 		default:
 			secbase = sechdrs[sym[i].st_shndx].sh_addr;
 
-			if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) {
+			if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0)
 				save_gp_address(secbase, sym[i].st_value);
-			}
 
 			sym[i].st_value += secbase;
 			break;
@@ -660,142 +533,21 @@
 }
 
 #ifdef DEBUG_ELFLOADER
-static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
+static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex,
 			    const char *strtab, struct module *mod)
 {
 	Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
 	unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
 
-	printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n);
+	pr_debug("dump_elfsymbols: n %d\n", n);
 	for (i = 1; i < n; i++) {
-		printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i,
-		       strtab + sym[i].st_name, sym[i].st_value);
+		pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name,
+			 sym[i].st_value);
 	}
 }
 #endif
 
-/* We are prepared so configure and start the VPE... */
-static int vpe_run(struct vpe * v)
-{
-	unsigned long flags, val, dmt_flag;
-	struct vpe_notifications *n;
-	unsigned int vpeflags;
-	struct tc *t;
-
-	/* check we are the Master VPE */
-	local_irq_save(flags);
-	val = read_c0_vpeconf0();
-	if (!(val & VPECONF0_MVP)) {
-		printk(KERN_WARNING
-		       "VPE loader: only Master VPE's are allowed to configure MT\n");
-		local_irq_restore(flags);
-
-		return -1;
-	}
-
-	dmt_flag = dmt();
-	vpeflags = dvpe();
-
-	if (list_empty(&v->tc)) {
-		evpe(vpeflags);
-		emt(dmt_flag);
-		local_irq_restore(flags);
-
-		printk(KERN_WARNING
-		       "VPE loader: No TC's associated with VPE %d\n",
-		       v->minor);
-
-		return -ENOEXEC;
-	}
-
-	t = list_first_entry(&v->tc, struct tc, tc);
-
-	/* Put MVPE's into 'configuration state' */
-	set_c0_mvpcontrol(MVPCONTROL_VPC);
-
-	settc(t->index);
-
-	/* should check it is halted, and not activated */
-	if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
-		evpe(vpeflags);
-		emt(dmt_flag);
-		local_irq_restore(flags);
-
-		printk(KERN_WARNING "VPE loader: TC %d is already active!\n",
-		       t->index);
-
-		return -ENOEXEC;
-	}
-
-	/* Write the address we want it to start running from in the TCPC register. */
-	write_tc_c0_tcrestart((unsigned long)v->__start);
-	write_tc_c0_tccontext((unsigned long)0);
-
-	/*
-	 * Mark the TC as activated, not interrupt exempt and not dynamically
-	 * allocatable
-	 */
-	val = read_tc_c0_tcstatus();
-	val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
-	write_tc_c0_tcstatus(val);
-
-	write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
-
-	/*
-	 * The sde-kit passes 'memsize' to __start in $a3, so set something
-	 * here...  Or set $a3 to zero and define DFLT_STACK_SIZE and
-	 * DFLT_HEAP_SIZE when you compile your program
-	 */
-	mttgpr(6, v->ntcs);
-	mttgpr(7, physical_memsize);
-
-	/* set up VPE1 */
-	/*
-	 * bind the TC to VPE 1 as late as possible so we only have the final
-	 * VPE registers to set up, and so an EJTAG probe can trigger on it
-	 */
-	write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
-
-	write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
-
-	back_to_back_c0_hazard();
-
-	/* Set up the XTC bit in vpeconf0 to point at our tc */
-	write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
-			      | (t->index << VPECONF0_XTC_SHIFT));
-
-	back_to_back_c0_hazard();
-
-	/* enable this VPE */
-	write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
-
-	/* clear out any left overs from a previous program */
-	write_vpe_c0_status(0);
-	write_vpe_c0_cause(0);
-
-	/* take system out of configuration state */
-	clear_c0_mvpcontrol(MVPCONTROL_VPC);
-
-	/*
-	 * SMTC/SMVP kernels manage VPE enable independently,
-	 * but uniprocessor kernels need to turn it on, even
-	 * if that wasn't the pre-dvpe() state.
-	 */
-#ifdef CONFIG_SMP
-	evpe(vpeflags);
-#else
-	evpe(EVPE_ENABLE);
-#endif
-	emt(dmt_flag);
-	local_irq_restore(flags);
-
-	list_for_each_entry(n, &v->notify, list)
-		n->start(minor);
-
-	return 0;
-}
-
-static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
+static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs,
 				      unsigned int symindex, const char *strtab,
 				      struct module *mod)
 {
@@ -803,16 +555,14 @@
 	unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
 
 	for (i = 1; i < n; i++) {
-		if (strcmp(strtab + sym[i].st_name, "__start") == 0) {
+		if (strcmp(strtab + sym[i].st_name, "__start") == 0)
 			v->__start = sym[i].st_value;
-		}
 
-		if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) {
+		if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0)
 			v->shared_ptr = (void *)sym[i].st_value;
-		}
 	}
 
-	if ( (v->__start == 0) || (v->shared_ptr == NULL))
+	if ((v->__start == 0) || (v->shared_ptr == NULL))
 		return -1;
 
 	return 0;
@@ -823,14 +573,14 @@
  * contents of the program (p)buffer performing relocatations/etc, free's it
  * when finished.
  */
-static int vpe_elfload(struct vpe * v)
+static int vpe_elfload(struct vpe *v)
 {
 	Elf_Ehdr *hdr;
 	Elf_Shdr *sechdrs;
 	long err = 0;
 	char *secstrings, *strtab = NULL;
 	unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
-	struct module mod;	// so we can re-use the relocations code
+	struct module mod; /* so we can re-use the relocations code */
 
 	memset(&mod, 0, sizeof(struct module));
 	strcpy(mod.name, "VPE loader");
@@ -844,8 +594,7 @@
 	    || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
 	    || !elf_check_arch(hdr)
 	    || hdr->e_shentsize != sizeof(*sechdrs)) {
-		printk(KERN_WARNING
-		       "VPE loader: program wrong arch or weird elf version\n");
+		pr_warn("VPE loader: program wrong arch or weird elf version\n");
 
 		return -ENOEXEC;
 	}
@@ -854,8 +603,7 @@
 		relocate = 1;
 
 	if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
-		printk(KERN_ERR "VPE loader: program length %u truncated\n",
-		       len);
+		pr_err("VPE loader: program length %u truncated\n", len);
 
 		return -ENOEXEC;
 	}
@@ -870,22 +618,24 @@
 
 	if (relocate) {
 		for (i = 1; i < hdr->e_shnum; i++) {
-			if (sechdrs[i].sh_type != SHT_NOBITS
-			    && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
-				printk(KERN_ERR "VPE program length %u truncated\n",
+			if ((sechdrs[i].sh_type != SHT_NOBITS) &&
+			    (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) {
+				pr_err("VPE program length %u truncated\n",
 				       len);
 				return -ENOEXEC;
 			}
 
 			/* Mark all sections sh_addr with their address in the
 			   temporary image. */
-			sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
+			sechdrs[i].sh_addr = (size_t) hdr +
+				sechdrs[i].sh_offset;
 
 			/* Internal symbols and strings. */
 			if (sechdrs[i].sh_type == SHT_SYMTAB) {
 				symindex = i;
 				strindex = sechdrs[i].sh_link;
-				strtab = (char *)hdr + sechdrs[strindex].sh_offset;
+				strtab = (char *)hdr +
+					sechdrs[strindex].sh_offset;
 			}
 		}
 		layout_sections(&mod, hdr, sechdrs, secstrings);
@@ -912,8 +662,9 @@
 			/* Update sh_addr to point to copy in image. */
 			sechdrs[i].sh_addr = (unsigned long)dest;
 
-			printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n",
-			       secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
+			pr_debug(" section sh_name %s sh_addr 0x%x\n",
+				 secstrings + sechdrs[i].sh_name,
+				 sechdrs[i].sh_addr);
 		}
 
 		/* Fix up syms, so that st_value is a pointer to location. */
@@ -934,17 +685,18 @@
 				continue;
 
 			if (sechdrs[i].sh_type == SHT_REL)
-				err = apply_relocations(sechdrs, strtab, symindex, i,
-							&mod);
+				err = apply_relocations(sechdrs, strtab,
+							symindex, i, &mod);
 			else if (sechdrs[i].sh_type == SHT_RELA)
-				err = apply_relocate_add(sechdrs, strtab, symindex, i,
-							 &mod);
+				err = apply_relocate_add(sechdrs, strtab,
+							 symindex, i, &mod);
 			if (err < 0)
 				return err;
 
 		}
 	} else {
-		struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
+		struct elf_phdr *phdr = (struct elf_phdr *)
+						((char *)hdr + hdr->e_phoff);
 
 		for (i = 0; i < hdr->e_phnum; i++) {
 			if (phdr->p_type == PT_LOAD) {
@@ -962,11 +714,15 @@
 			if (sechdrs[i].sh_type == SHT_SYMTAB) {
 				symindex = i;
 				strindex = sechdrs[i].sh_link;
-				strtab = (char *)hdr + sechdrs[strindex].sh_offset;
+				strtab = (char *)hdr +
+					sechdrs[strindex].sh_offset;
 
-				/* mark the symtab's address for when we try to find the
-				   magic symbols */
-				sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
+				/*
+				 * mark symtab's address for when we try
+				 * to find the magic symbols
+				 */
+				sechdrs[i].sh_addr = (size_t) hdr +
+					sechdrs[i].sh_offset;
 			}
 		}
 	}
@@ -977,53 +733,19 @@
 
 	if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
 		if (v->__start == 0) {
-			printk(KERN_WARNING "VPE loader: program does not contain "
-			       "a __start symbol\n");
+			pr_warn("VPE loader: program does not contain a __start symbol\n");
 			return -ENOEXEC;
 		}
 
 		if (v->shared_ptr == NULL)
-			printk(KERN_WARNING "VPE loader: "
-			       "program does not contain vpe_shared symbol.\n"
-			       " Unable to use AMVP (AP/SP) facilities.\n");
+			pr_warn("VPE loader: program does not contain vpe_shared symbol.\n"
+				" Unable to use AMVP (AP/SP) facilities.\n");
 	}
 
-	printk(" elf loaded\n");
+	pr_info(" elf loaded\n");
 	return 0;
 }
 
-static void cleanup_tc(struct tc *tc)
-{
-	unsigned long flags;
-	unsigned int mtflags, vpflags;
-	int tmp;
-
-	local_irq_save(flags);
-	mtflags = dmt();
-	vpflags = dvpe();
-	/* Put MVPE's into 'configuration state' */
-	set_c0_mvpcontrol(MVPCONTROL_VPC);
-
-	settc(tc->index);
-	tmp = read_tc_c0_tcstatus();
-
-	/* mark not allocated and not dynamically allocatable */
-	tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
-	tmp |= TCSTATUS_IXMT;	/* interrupt exempt */
-	write_tc_c0_tcstatus(tmp);
-
-	write_tc_c0_tchalt(TCHALT_H);
-	mips_ihb();
-
-	/* bind it to anything other than VPE1 */
-//	write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
-
-	clear_c0_mvpcontrol(MVPCONTROL_VPC);
-	evpe(vpflags);
-	emt(mtflags);
-	local_irq_restore(flags);
-}
-
 static int getcwd(char *buff, int size)
 {
 	mm_segment_t old_fs;
@@ -1043,39 +765,39 @@
 static int vpe_open(struct inode *inode, struct file *filp)
 {
 	enum vpe_state state;
-	struct vpe_notifications *not;
+	struct vpe_notifications *notifier;
 	struct vpe *v;
 	int ret;
 
-	if (minor != iminor(inode)) {
+	if (VPE_MODULE_MINOR != iminor(inode)) {
 		/* assume only 1 device at the moment. */
-		pr_warning("VPE loader: only vpe1 is supported\n");
+		pr_warn("VPE loader: only vpe1 is supported\n");
 
 		return -ENODEV;
 	}
 
-	if ((v = get_vpe(tclimit)) == NULL) {
-		pr_warning("VPE loader: unable to get vpe\n");
+	v = get_vpe(aprp_cpu_index());
+	if (v == NULL) {
+		pr_warn("VPE loader: unable to get vpe\n");
 
 		return -ENODEV;
 	}
 
 	state = xchg(&v->state, VPE_STATE_INUSE);
 	if (state != VPE_STATE_UNUSED) {
-		printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
+		pr_debug("VPE loader: tc in use dumping regs\n");
 
-		list_for_each_entry(not, &v->notify, list) {
-			not->stop(tclimit);
-		}
+		list_for_each_entry(notifier, &v->notify, list)
+			notifier->stop(aprp_cpu_index());
 
 		release_progmem(v->load_addr);
-		cleanup_tc(get_tc(tclimit));
+		cleanup_tc(get_tc(aprp_cpu_index()));
 	}
 
 	/* this of-course trashes what was there before... */
 	v->pbuffer = vmalloc(P_SIZE);
 	if (!v->pbuffer) {
-		pr_warning("VPE loader: unable to allocate memory\n");
+		pr_warn("VPE loader: unable to allocate memory\n");
 		return -ENOMEM;
 	}
 	v->plen = P_SIZE;
@@ -1085,7 +807,7 @@
 	v->cwd[0] = 0;
 	ret = getcwd(v->cwd, VPE_PATH_MAX);
 	if (ret < 0)
-		printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret);
+		pr_warn("VPE loader: open, getcwd returned %d\n", ret);
 
 	v->shared_ptr = NULL;
 	v->__start = 0;
@@ -1099,20 +821,20 @@
 	Elf_Ehdr *hdr;
 	int ret = 0;
 
-	v = get_vpe(tclimit);
+	v = get_vpe(aprp_cpu_index());
 	if (v == NULL)
 		return -ENODEV;
 
 	hdr = (Elf_Ehdr *) v->pbuffer;
 	if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
-		if (vpe_elfload(v) >= 0) {
+		if ((vpe_elfload(v) >= 0) && vpe_run) {
 			vpe_run(v);
 		} else {
-			printk(KERN_WARNING "VPE loader: ELF load failed.\n");
+			pr_warn("VPE loader: ELF load failed.\n");
 			ret = -ENOEXEC;
 		}
 	} else {
-		printk(KERN_WARNING "VPE loader: only elf files are supported\n");
+		pr_warn("VPE loader: only elf files are supported\n");
 		ret = -ENOEXEC;
 	}
 
@@ -1130,22 +852,22 @@
 	return ret;
 }
 
-static ssize_t vpe_write(struct file *file, const char __user * buffer,
-			 size_t count, loff_t * ppos)
+static ssize_t vpe_write(struct file *file, const char __user *buffer,
+			 size_t count, loff_t *ppos)
 {
 	size_t ret = count;
 	struct vpe *v;
 
-	if (iminor(file_inode(file)) != minor)
+	if (iminor(file_inode(file)) != VPE_MODULE_MINOR)
 		return -ENODEV;
 
-	v = get_vpe(tclimit);
+	v = get_vpe(aprp_cpu_index());
+
 	if (v == NULL)
 		return -ENODEV;
 
 	if ((count + v->len) > v->plen) {
-		printk(KERN_WARNING
-		       "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
+		pr_warn("VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
 		return -ENOMEM;
 	}
 
@@ -1157,7 +879,7 @@
 	return ret;
 }
 
-static const struct file_operations vpe_fops = {
+const struct file_operations vpe_fops = {
 	.owner = THIS_MODULE,
 	.open = vpe_open,
 	.release = vpe_release,
@@ -1165,396 +887,40 @@
 	.llseek = noop_llseek,
 };
 
-/* module wrapper entry points */
-/* give me a vpe */
-vpe_handle vpe_alloc(void)
-{
-	int i;
-	struct vpe *v;
-
-	/* find a vpe */
-	for (i = 1; i < MAX_VPES; i++) {
-		if ((v = get_vpe(i)) != NULL) {
-			v->state = VPE_STATE_INUSE;
-			return v;
-		}
-	}
-	return NULL;
-}
-
-EXPORT_SYMBOL(vpe_alloc);
-
-/* start running from here */
-int vpe_start(vpe_handle vpe, unsigned long start)
-{
-	struct vpe *v = vpe;
-
-	v->__start = start;
-	return vpe_run(v);
-}
-
-EXPORT_SYMBOL(vpe_start);
-
-/* halt it for now */
-int vpe_stop(vpe_handle vpe)
-{
-	struct vpe *v = vpe;
-	struct tc *t;
-	unsigned int evpe_flags;
-
-	evpe_flags = dvpe();
-
-	if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) {
-
-		settc(t->index);
-		write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
-	}
-
-	evpe(evpe_flags);
-
-	return 0;
-}
-
-EXPORT_SYMBOL(vpe_stop);
-
-/* I've done with it thank you */
-int vpe_free(vpe_handle vpe)
-{
-	struct vpe *v = vpe;
-	struct tc *t;
-	unsigned int evpe_flags;
-
-	if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
-		return -ENOEXEC;
-	}
-
-	evpe_flags = dvpe();
-
-	/* Put MVPE's into 'configuration state' */
-	set_c0_mvpcontrol(MVPCONTROL_VPC);
-
-	settc(t->index);
-	write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
-
-	/* halt the TC */
-	write_tc_c0_tchalt(TCHALT_H);
-	mips_ihb();
-
-	/* mark the TC unallocated */
-	write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
-
-	v->state = VPE_STATE_UNUSED;
-
-	clear_c0_mvpcontrol(MVPCONTROL_VPC);
-	evpe(evpe_flags);
-
-	return 0;
-}
-
-EXPORT_SYMBOL(vpe_free);
-
 void *vpe_get_shared(int index)
 {
-	struct vpe *v;
+	struct vpe *v = get_vpe(index);
 
-	if ((v = get_vpe(index)) == NULL)
+	if (v == NULL)
 		return NULL;
 
 	return v->shared_ptr;
 }
-
 EXPORT_SYMBOL(vpe_get_shared);
 
 int vpe_notify(int index, struct vpe_notifications *notify)
 {
-	struct vpe *v;
+	struct vpe *v = get_vpe(index);
 
-	if ((v = get_vpe(index)) == NULL)
+	if (v == NULL)
 		return -1;
 
 	list_add(&notify->list, &v->notify);
 	return 0;
 }
-
 EXPORT_SYMBOL(vpe_notify);
 
 char *vpe_getcwd(int index)
 {
-	struct vpe *v;
+	struct vpe *v = get_vpe(index);
 
-	if ((v = get_vpe(index)) == NULL)
+	if (v == NULL)
 		return NULL;
 
 	return v->cwd;
 }
-
 EXPORT_SYMBOL(vpe_getcwd);
 
-static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
-			  const char *buf, size_t len)
-{
-	struct vpe *vpe = get_vpe(tclimit);
-	struct vpe_notifications *not;
-
-	list_for_each_entry(not, &vpe->notify, list) {
-		not->stop(tclimit);
-	}
-
-	release_progmem(vpe->load_addr);
-	cleanup_tc(get_tc(tclimit));
-	vpe_stop(vpe);
-	vpe_free(vpe);
-
-	return len;
-}
-static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
-
-static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
-			 char *buf)
-{
-	struct vpe *vpe = get_vpe(tclimit);
-
-	return sprintf(buf, "%d\n", vpe->ntcs);
-}
-
-static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
-			  const char *buf, size_t len)
-{
-	struct vpe *vpe = get_vpe(tclimit);
-	unsigned long new;
-	char *endp;
-
-	new = simple_strtoul(buf, &endp, 0);
-	if (endp == buf)
-		goto out_einval;
-
-	if (new == 0 || new > (hw_tcs - tclimit))
-		goto out_einval;
-
-	vpe->ntcs = new;
-
-	return len;
-
-out_einval:
-	return -EINVAL;
-}
-static DEVICE_ATTR_RW(ntcs);
-
-static struct attribute *vpe_attrs[] = {
-	&dev_attr_kill.attr,
-	&dev_attr_ntcs.attr,
-	NULL,
-};
-ATTRIBUTE_GROUPS(vpe);
-
-static void vpe_device_release(struct device *cd)
-{
-	kfree(cd);
-}
-
-struct class vpe_class = {
-	.name = "vpe",
-	.owner = THIS_MODULE,
-	.dev_release = vpe_device_release,
-	.dev_groups = vpe_groups,
-};
-
-struct device vpe_device;
-
-static int __init vpe_module_init(void)
-{
-	unsigned int mtflags, vpflags;
-	unsigned long flags, val;
-	struct vpe *v = NULL;
-	struct tc *t;
-	int tc, err;
-
-	if (!cpu_has_mipsmt) {
-		printk("VPE loader: not a MIPS MT capable processor\n");
-		return -ENODEV;
-	}
-
-	if (vpelimit == 0) {
-		printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
-		       "initializing VPE loader.\nPass maxvpes=<n> argument as "
-		       "kernel argument\n");
-
-		return -ENODEV;
-	}
-
-	if (tclimit == 0) {
-		printk(KERN_WARNING "No TCs reserved for AP/SP, not "
-		       "initializing VPE loader.\nPass maxtcs=<n> argument as "
-		       "kernel argument\n");
-
-		return -ENODEV;
-	}
-
-	major = register_chrdev(0, module_name, &vpe_fops);
-	if (major < 0) {
-		printk("VPE loader: unable to register character device\n");
-		return major;
-	}
-
-	err = class_register(&vpe_class);
-	if (err) {
-		printk(KERN_ERR "vpe_class registration failed\n");
-		goto out_chrdev;
-	}
-
-	device_initialize(&vpe_device);
-	vpe_device.class	= &vpe_class,
-	vpe_device.parent	= NULL,
-	dev_set_name(&vpe_device, "vpe1");
-	vpe_device.devt = MKDEV(major, minor);
-	err = device_add(&vpe_device);
-	if (err) {
-		printk(KERN_ERR "Adding vpe_device failed\n");
-		goto out_class;
-	}
-
-	local_irq_save(flags);
-	mtflags = dmt();
-	vpflags = dvpe();
-
-	/* Put MVPE's into 'configuration state' */
-	set_c0_mvpcontrol(MVPCONTROL_VPC);
-
-	/* dump_mtregs(); */
-
-	val = read_c0_mvpconf0();
-	hw_tcs = (val & MVPCONF0_PTC) + 1;
-	hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
-
-	for (tc = tclimit; tc < hw_tcs; tc++) {
-		/*
-		 * Must re-enable multithreading temporarily or in case we
-		 * reschedule send IPIs or similar we might hang.
-		 */
-		clear_c0_mvpcontrol(MVPCONTROL_VPC);
-		evpe(vpflags);
-		emt(mtflags);
-		local_irq_restore(flags);
-		t = alloc_tc(tc);
-		if (!t) {
-			err = -ENOMEM;
-			goto out;
-		}
-
-		local_irq_save(flags);
-		mtflags = dmt();
-		vpflags = dvpe();
-		set_c0_mvpcontrol(MVPCONTROL_VPC);
-
-		/* VPE's */
-		if (tc < hw_tcs) {
-			settc(tc);
-
-			if ((v = alloc_vpe(tc)) == NULL) {
-				printk(KERN_WARNING "VPE: unable to allocate VPE\n");
-
-				goto out_reenable;
-			}
-
-			v->ntcs = hw_tcs - tclimit;
-
-			/* add the tc to the list of this vpe's tc's. */
-			list_add(&t->tc, &v->tc);
-
-			/* deactivate all but vpe0 */
-			if (tc >= tclimit) {
-				unsigned long tmp = read_vpe_c0_vpeconf0();
-
-				tmp &= ~VPECONF0_VPA;
-
-				/* master VPE */
-				tmp |= VPECONF0_MVP;
-				write_vpe_c0_vpeconf0(tmp);
-			}
-
-			/* disable multi-threading with TC's */
-			write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
-
-			if (tc >= vpelimit) {
-				/*
-				 * Set config to be the same as vpe0,
-				 * particularly kseg0 coherency alg
-				 */
-				write_vpe_c0_config(read_c0_config());
-			}
-		}
-
-		/* TC's */
-		t->pvpe = v;	/* set the parent vpe */
-
-		if (tc >= tclimit) {
-			unsigned long tmp;
-
-			settc(tc);
-
-			/* Any TC that is bound to VPE0 gets left as is - in case
-			   we are running SMTC on VPE0. A TC that is bound to any
-			   other VPE gets bound to VPE0, ideally I'd like to make
-			   it homeless but it doesn't appear to let me bind a TC
-			   to a non-existent VPE. Which is perfectly reasonable.
-
-			   The (un)bound state is visible to an EJTAG probe so may
-			   notify GDB...
-			*/
-
-			if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) {
-				/* tc is bound >vpe0 */
-				write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
-
-				t->pvpe = get_vpe(0);	/* set the parent vpe */
-			}
-
-			/* halt the TC */
-			write_tc_c0_tchalt(TCHALT_H);
-			mips_ihb();
-
-			tmp = read_tc_c0_tcstatus();
-
-			/* mark not activated and not dynamically allocatable */
-			tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
-			tmp |= TCSTATUS_IXMT;	/* interrupt exempt */
-			write_tc_c0_tcstatus(tmp);
-		}
-	}
-
-out_reenable:
-	/* release config state */
-	clear_c0_mvpcontrol(MVPCONTROL_VPC);
-
-	evpe(vpflags);
-	emt(mtflags);
-	local_irq_restore(flags);
-
-	return 0;
-
-out_class:
-	class_unregister(&vpe_class);
-out_chrdev:
-	unregister_chrdev(major, module_name);
-
-out:
-	return err;
-}
-
-static void __exit vpe_module_exit(void)
-{
-	struct vpe *v, *n;
-
-	device_del(&vpe_device);
-	unregister_chrdev(major, module_name);
-
-	/* No locking needed here */
-	list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
-		if (v->state != VPE_STATE_UNUSED)
-			release_vpe(v);
-	}
-}
-
 module_init(vpe_module_init);
 module_exit(vpe_module_exit);
 MODULE_DESCRIPTION("MIPS VPE Loader");
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index 73b3482..da5186f 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -1001,7 +1001,6 @@
 	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
 		     HRTIMER_MODE_REL);
 	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
-	kvm_mips_init_shadow_tlb(vcpu);
 	return 0;
 }
 
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
index c777dd3..50ab9c4 100644
--- a/arch/mips/kvm/kvm_tlb.c
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -10,7 +10,6 @@
 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 */
 
-#include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
@@ -25,6 +24,7 @@
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
+#include <asm/tlb.h>
 
 #undef CONFIG_MIPS_MT
 #include <asm/r4kcache.h>
@@ -35,9 +35,6 @@
 
 #define PRIx64 "llx"
 
-/* Use VZ EntryHi.EHINV to invalidate TLB entries */
-#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
-
 atomic_t kvm_mips_instance;
 EXPORT_SYMBOL(kvm_mips_instance);
 
@@ -147,30 +144,6 @@
 	}
 }
 
-void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
-{
-	int i;
-	volatile struct kvm_mips_tlb tlb;
-
-	printk("Shadow TLBs:\n");
-	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
-		tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
-		printk("TLB%c%3d Hi 0x%08lx ",
-		       (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
-		       i, tlb.tlb_hi);
-		printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
-		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
-		       (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
-		       (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
-		       (tlb.tlb_lo0 >> 3) & 7);
-		printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
-		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
-		       (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
-		       (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
-		       (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
-	}
-}
-
 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
 {
 	int srcu_idx, err = 0;
@@ -657,70 +630,6 @@
 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
 }
 
-void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
-{
-	unsigned long flags;
-	unsigned long old_entryhi;
-	unsigned long old_pagemask;
-	int entry = 0;
-	int cpu = smp_processor_id();
-
-	local_irq_save(flags);
-
-	old_entryhi = read_c0_entryhi();
-	old_pagemask = read_c0_pagemask();
-
-	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
-		write_c0_index(entry);
-		mtc0_tlbw_hazard();
-		tlb_read();
-		tlbw_use_hazard();
-
-		vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
-		vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
-		vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
-		vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
-	}
-
-	write_c0_entryhi(old_entryhi);
-	write_c0_pagemask(old_pagemask);
-	mtc0_tlbw_hazard();
-
-	local_irq_restore(flags);
-
-}
-
-void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
-{
-	unsigned long flags;
-	unsigned long old_ctx;
-	int entry;
-	int cpu = smp_processor_id();
-
-	local_irq_save(flags);
-
-	old_ctx = read_c0_entryhi();
-
-	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
-		write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
-		mtc0_tlbw_hazard();
-		write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
-		write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
-
-		write_c0_index(entry);
-		mtc0_tlbw_hazard();
-
-		tlb_write_indexed();
-		tlbw_use_hazard();
-	}
-
-	tlbw_use_hazard();
-	write_c0_entryhi(old_ctx);
-	mtc0_tlbw_hazard();
-	local_irq_restore(flags);
-}
-
-
 void kvm_local_flush_tlb_all(void)
 {
 	unsigned long flags;
@@ -749,30 +658,6 @@
 	local_irq_restore(flags);
 }
 
-void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
-{
-	int cpu, entry;
-
-	for_each_possible_cpu(cpu) {
-		for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
-			vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
-			    UNIQUE_ENTRYHI(entry);
-			vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
-			vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
-			vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
-			    read_c0_pagemask();
-#ifdef DEBUG
-			kvm_debug
-			    ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
-			     cpu, entry,
-			     vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
-			     vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
-			     vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
-#endif
-		}
-	}
-}
-
 /* Restore ASID once we are scheduled back after preemption */
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
@@ -810,14 +695,6 @@
 			 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
 	}
 
-	/* Only reload shadow host TLB if new ASIDs haven't been allocated */
-#if 0
-	if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
-		kvm_mips_flush_host_tlb(0);
-		kvm_shadow_tlb_load(vcpu);
-	}
-#endif
-
 	if (!newasid) {
 		/* If we preempted while the guest was executing, then reload the pre-empted ASID */
 		if (current->flags & PF_VCPU) {
@@ -863,12 +740,6 @@
 	vcpu->arch.preempt_entryhi = read_c0_entryhi();
 	vcpu->arch.last_sched_cpu = cpu;
 
-#if 0
-	if ((atomic_read(&kvm_mips_instance) > 1)) {
-		kvm_shadow_tlb_put(vcpu);
-	}
-#endif
-
 	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
 	     ASID_VERSION_MASK)) {
 		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
@@ -930,10 +801,8 @@
 }
 
 EXPORT_SYMBOL(kvm_local_flush_tlb_all);
-EXPORT_SYMBOL(kvm_shadow_tlb_put);
 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
-EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
@@ -941,8 +810,6 @@
 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
 EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
-EXPORT_SYMBOL(kvm_shadow_tlb_load);
-EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
 EXPORT_SYMBOL(kvm_get_inst);
 EXPORT_SYMBOL(kvm_arch_vcpu_load);
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
index 1ab576d..8750dc0 100644
--- a/arch/mips/lantiq/xway/clk.c
+++ b/arch/mips/lantiq/xway/clk.c
@@ -8,7 +8,6 @@
 
 #include <linux/io.h>
 #include <linux/export.h>
-#include <linux/init.h>
 #include <linux/clk.h>
 
 #include <asm/time.h>
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 08f7ebd..78a91fa 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -220,10 +220,6 @@
 	int i;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		panic("Failed to get dma resource");
-
-	/* remap dma register range */
 	ltq_dma_membase = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(ltq_dma_membase))
 		panic("Failed to remap dma resource");
diff --git a/arch/mips/lasat/at93c.c b/arch/mips/lasat/at93c.c
index 793e234..942f32b 100644
--- a/arch/mips/lasat/at93c.c
+++ b/arch/mips/lasat/at93c.c
@@ -8,7 +8,6 @@
 #include <linux/delay.h>
 #include <asm/lasat/lasat.h>
 #include <linux/module.h>
-#include <linux/init.h>
 
 #include "at93c.h"
 
diff --git a/arch/mips/lasat/picvue.c b/arch/mips/lasat/picvue.c
index 7eb3348..d613b97 100644
--- a/arch/mips/lasat/picvue.c
+++ b/arch/mips/lasat/picvue.c
@@ -9,7 +9,6 @@
 #include <asm/bootinfo.h>
 #include <asm/lasat/lasat.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/string.h>
 
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index d8522f8..09d5dee 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -8,7 +8,6 @@
  *	Author: Maciej W. Rozycki <macro@mips.com>
  */
 
-#include <linux/init.h>
 
 #include <asm/addrspace.h>
 #include <asm/bug.h>
diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c
index 4dc2f5f..aed32b8 100644
--- a/arch/mips/loongson/lemote-2f/clock.c
+++ b/arch/mips/loongson/lemote-2f/clock.c
@@ -10,7 +10,6 @@
 #include <linux/cpufreq.h>
 #include <linux/errno.h>
 #include <linux/export.h>
-#include <linux/init.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index efe0088..506925b 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -417,14 +417,20 @@
 			case mm_mtc1_op:
 			case mm_cfc1_op:
 			case mm_ctc1_op:
+			case mm_mfhc1_op:
+			case mm_mthc1_op:
 				if (insn.mm_fp1_format.op == mm_mfc1_op)
 					op = mfc_op;
 				else if (insn.mm_fp1_format.op == mm_mtc1_op)
 					op = mtc_op;
 				else if (insn.mm_fp1_format.op == mm_cfc1_op)
 					op = cfc_op;
-				else
+				else if (insn.mm_fp1_format.op == mm_ctc1_op)
 					op = ctc_op;
+				else if (insn.mm_fp1_format.op == mm_mfhc1_op)
+					op = mfhc_op;
+				else
+					op = mthc_op;
 				mips32_insn.fp1_format.opcode = cop1_op;
 				mips32_insn.fp1_format.op = op;
 				mips32_insn.fp1_format.rt =
@@ -853,20 +859,20 @@
  * In the Linux kernel, we support selection of FPR format on the
  * basis of the Status.FR bit.	If an FPU is not present, the FR bit
  * is hardwired to zero, which would imply a 32-bit FPU even for
- * 64-bit CPUs so we rather look at TIF_32BIT_REGS.
+ * 64-bit CPUs so we rather look at TIF_32BIT_FPREGS.
  * FPU emu is slow and bulky and optimizing this function offers fairly
  * sizeable benefits so we try to be clever and make this function return
  * a constant whenever possible, that is on 64-bit kernels without O32
- * compatibility enabled and on 32-bit kernels.
+ * compatibility enabled and on 32-bit without 64-bit FPU support.
  */
 static inline int cop1_64bit(struct pt_regs *xcp)
 {
 #if defined(CONFIG_64BIT) && !defined(CONFIG_MIPS32_O32)
 	return 1;
-#elif defined(CONFIG_64BIT) && defined(CONFIG_MIPS32_O32)
-	return !test_thread_flag(TIF_32BIT_REGS);
-#else
+#elif defined(CONFIG_32BIT) && !defined(CONFIG_MIPS_O32_FP64_SUPPORT)
 	return 0;
+#else
+	return !test_thread_flag(TIF_32BIT_FPREGS);
 #endif
 }
 
@@ -878,6 +884,10 @@
 			ctx->fpr[x & ~1] >> 32 << 32 | (u32)(si) : \
 			ctx->fpr[x & ~1] << 32 >> 32 | (u64)(si) << 32)
 
+#define SIFROMHREG(si, x)	((si) = (int)(ctx->fpr[x] >> 32))
+#define SITOHREG(si, x)		(ctx->fpr[x] = \
+				ctx->fpr[x] << 32 >> 32 | (u64)(si) << 32)
+
 #define DIFROMREG(di, x) ((di) = ctx->fpr[x & ~(cop1_64bit(xcp) == 0)])
 #define DITOREG(di, x)	(ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = (di))
 
@@ -1055,6 +1065,25 @@
 			break;
 #endif
 
+		case mfhc_op:
+			if (!cpu_has_mips_r2)
+				goto sigill;
+
+			/* copregister rd -> gpr[rt] */
+			if (MIPSInst_RT(ir) != 0) {
+				SIFROMHREG(xcp->regs[MIPSInst_RT(ir)],
+					MIPSInst_RD(ir));
+			}
+			break;
+
+		case mthc_op:
+			if (!cpu_has_mips_r2)
+				goto sigill;
+
+			/* copregister rd <- gpr[rt] */
+			SITOHREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
+			break;
+
 		case mfc_op:
 			/* copregister rd -> gpr[rt] */
 			if (MIPSInst_RT(ir) != 0) {
@@ -1263,6 +1292,7 @@
 #endif
 
 	default:
+sigill:
 		return SIGILL;
 	}
 
diff --git a/arch/mips/math-emu/kernel_linkage.c b/arch/mips/math-emu/kernel_linkage.c
index 1c58657..3aeae07 100644
--- a/arch/mips/math-emu/kernel_linkage.c
+++ b/arch/mips/math-emu/kernel_linkage.c
@@ -89,8 +89,9 @@
 {
 	int i;
 	int err = 0;
+	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
 
-	for (i = 0; i < 32; i+=2) {
+	for (i = 0; i < 32; i += inc) {
 		err |=
 		    __put_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
 	}
@@ -103,8 +104,9 @@
 {
 	int i;
 	int err = 0;
+	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
 
-	for (i = 0; i < 32; i+=2) {
+	for (i = 0; i < 32; i += inc) {
 		err |=
 		    __get_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
 	}
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index c8efdb5..f41a5c5 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -6,7 +6,6 @@
  * Copyright (C) 2005-2007 Cavium Networks
  */
 #include <linux/export.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 2fcde0c..135ec31 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -9,7 +9,6 @@
  * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
  * Copyright (C) 2001, 2004, 2007  Maciej W. Rozycki
  */
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 49e572d..c14259e 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1020,10 +1020,14 @@
 		 */
 		config1 = read_c0_config1();
 
-		if ((lsize = ((config1 >> 19) & 7)))
-			c->icache.linesz = 2 << lsize;
-		else
-			c->icache.linesz = lsize;
+		lsize = (config1 >> 19) & 7;
+
+		/* IL == 7 is reserved */
+		if (lsize == 7)
+			panic("Invalid icache line size");
+
+		c->icache.linesz = lsize ? 2 << lsize : 0;
+
 		c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
 		c->icache.ways = 1 + ((config1 >> 16) & 7);
 
@@ -1040,10 +1044,14 @@
 		 */
 		c->dcache.flags = 0;
 
-		if ((lsize = ((config1 >> 10) & 7)))
-			c->dcache.linesz = 2 << lsize;
-		else
-			c->dcache.linesz= lsize;
+		lsize = (config1 >> 10) & 7;
+
+		/* DL == 7 is reserved */
+		if (lsize == 7)
+			panic("Invalid dcache line size");
+
+		c->dcache.linesz = lsize ? 2 << lsize : 0;
+
 		c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
 		c->dcache.ways = 1 + ((config1 >> 7) & 7);
 
@@ -1105,6 +1113,8 @@
 	case CPU_34K:
 	case CPU_74K:
 	case CPU_1004K:
+	case CPU_INTERAPTIV:
+	case CPU_PROAPTIV:
 		if (current_cpu_type() == CPU_74K)
 			alias_74k_erratum(c);
 		if ((read_c0_config7() & (1 << 16))) {
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 15f813c..fde7e56 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -8,7 +8,6 @@
  */
 #include <linux/fs.h>
 #include <linux/fcntl.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/linkage.h>
 #include <linux/module.h>
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index 191cf6e..5d5f296 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -15,7 +15,6 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  */
-#include <linux/init.h>
 
 #include <asm/asm.h>
 #include <asm/regdef.h>
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 2e94185..44b6dff 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -23,6 +23,7 @@
 
 #include <dma-coherence.h>
 
+#ifdef CONFIG_DMA_MAYBE_COHERENT
 int coherentio = 0;	/* User defined DMA coherency from command line. */
 EXPORT_SYMBOL_GPL(coherentio);
 int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */
@@ -42,6 +43,7 @@
 	return 0;
 }
 early_param("nocoherentio", setnocoherentio);
+#endif
 
 static inline struct page *dma_addr_to_page(struct device *dev,
 	dma_addr_t dma_addr)
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index 01fda44..77e0ae0 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -11,7 +11,6 @@
  * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  */
 
-#include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 1215617..6b59617 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -171,8 +171,6 @@
 	return (void*) vaddr;
 }
 
-#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
-
 void kunmap_coherent(void)
 {
 #ifndef CONFIG_MIPS_MT_SMTC
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index cbd81d1..58033c4 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -8,7 +8,6 @@
  * Copyright (C) 2008  Thiemo Seufer
  * Copyright (C) 2012  MIPS Technologies, Inc.
  */
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 08d05ae..7a56aee 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -76,6 +76,8 @@
 	case CPU_34K:
 	case CPU_74K:
 	case CPU_1004K:
+	case CPU_INTERAPTIV:
+	case CPU_PROAPTIV:
 	case CPU_BMIPS5000:
 		if (config2 & (1 << 12))
 			return 0;
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index aaffbba..9ac1efc 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -6,7 +6,6 @@
 
 #undef DEBUG
 
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/bitops.h>
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 9aca109..d657493 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -10,7 +10,6 @@
  * Copyright (C) 2002  Ralf Baechle
  * Copyright (C) 2002  Maciej W. Rozycki
  */
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index da3b0b9..ae4ca24 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -20,16 +20,11 @@
 #include <asm/bootinfo.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
+#include <asm/tlb.h>
 #include <asm/tlbmisc.h>
 
 extern void build_tlb_refill_handler(void);
 
-/*
- * Make sure all entries differ.  If they're not different
- * MIPS32 will take revenge ...
- */
-#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
-
 /* Atomicity and interruptability */
 #ifdef CONFIG_MIPS_MT_SMTC
 
@@ -77,7 +72,7 @@
 {
 	unsigned long flags;
 	unsigned long old_ctx;
-	int entry;
+	int entry, ftlbhighset;
 
 	ENTER_CRITICAL(flags);
 	/* Save old context and create impossible VPN2 value */
@@ -88,13 +83,30 @@
 	entry = read_c0_wired();
 
 	/* Blast 'em all away. */
-	while (entry < current_cpu_data.tlbsize) {
-		/* Make sure all entries differ. */
-		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
-		write_c0_index(entry);
-		mtc0_tlbw_hazard();
-		tlb_write_indexed();
-		entry++;
+	if (cpu_has_tlbinv) {
+		if (current_cpu_data.tlbsizevtlb) {
+			write_c0_index(0);
+			mtc0_tlbw_hazard();
+			tlbinvf();  /* invalidate VTLB */
+		}
+		ftlbhighset = current_cpu_data.tlbsizevtlb +
+			current_cpu_data.tlbsizeftlbsets;
+		for (entry = current_cpu_data.tlbsizevtlb;
+		     entry < ftlbhighset;
+		     entry++) {
+			write_c0_index(entry);
+			mtc0_tlbw_hazard();
+			tlbinvf();  /* invalidate one FTLB set */
+		}
+	} else {
+		while (entry < current_cpu_data.tlbsize) {
+			/* Make sure all entries differ. */
+			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+			write_c0_index(entry);
+			mtc0_tlbw_hazard();
+			tlb_write_indexed();
+			entry++;
+		}
 	}
 	tlbw_use_hazard();
 	write_c0_entryhi(old_ctx);
@@ -133,7 +145,9 @@
 		start = round_down(start, PAGE_SIZE << 1);
 		end = round_up(end, PAGE_SIZE << 1);
 		size = (end - start) >> (PAGE_SHIFT + 1);
-		if (size <= current_cpu_data.tlbsize/2) {
+		if (size <= (current_cpu_data.tlbsizeftlbsets ?
+			     current_cpu_data.tlbsize / 8 :
+			     current_cpu_data.tlbsize / 2)) {
 			int oldpid = read_c0_entryhi();
 			int newpid = cpu_asid(cpu, mm);
 
@@ -172,7 +186,9 @@
 	ENTER_CRITICAL(flags);
 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 	size = (size + 1) >> 1;
-	if (size <= current_cpu_data.tlbsize / 2) {
+	if (size <= (current_cpu_data.tlbsizeftlbsets ?
+		     current_cpu_data.tlbsize / 8 :
+		     current_cpu_data.tlbsize / 2)) {
 		int pid = read_c0_entryhi();
 
 		start &= (PAGE_MASK << 1);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 6a99733..138a2ec 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -8,7 +8,6 @@
  * Carsten Langgaard, carstenl@mips.com
  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
  */
-#include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 183f2b5..b234b1b 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -26,7 +26,6 @@
 #include <linux/types.h>
 #include <linux/smp.h>
 #include <linux/string.h>
-#include <linux/init.h>
 #include <linux/cache.h>
 
 #include <asm/cacheflush.h>
@@ -510,6 +509,7 @@
 		switch (current_cpu_type()) {
 		case CPU_M14KC:
 		case CPU_74K:
+		case CPU_PROAPTIV:
 			break;
 
 		default:
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 060000f..b8d580c 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -15,7 +15,6 @@
 
 #include <linux/kernel.h>
 #include <linux/types.h>
-#include <linux/init.h>
 
 #include <asm/inst.h>
 #include <asm/elf.h>
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 0c72458..3abd609 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -15,7 +15,6 @@
 
 #include <linux/kernel.h>
 #include <linux/types.h>
-#include <linux/init.h>
 
 #include <asm/inst.h>
 #include <asm/elf.h>
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 72fdedb..eae0ba3 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -9,7 +9,5 @@
 				   malta-int.o malta-memory.o malta-platform.o \
 				   malta-reset.o malta-setup.o malta-time.o
 
-obj-$(CONFIG_EARLY_PRINTK)	+= malta-console.o
-
 # FIXME FIXME FIXME
 obj-$(CONFIG_MIPS_MT_SMTC)	+= malta-smtc.o
diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c
index 1e47844..592ac04 100644
--- a/arch/mips/mti-malta/malta-amon.c
+++ b/arch/mips/mti-malta/malta-amon.c
@@ -1,30 +1,20 @@
 /*
- * Copyright (C) 2007  MIPS Technologies, Inc.
- *	All rights reserved.
-
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
+ * Copyright (C) 2007 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Arbitrary Monitor interface
+ * Arbitrary Monitor Interface
  */
-
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 
 #include <asm/addrspace.h>
-#include <asm/mips-boards/launch.h>
 #include <asm/mipsmtregs.h>
+#include <asm/mips-boards/launch.h>
+#include <asm/vpe.h>
 
 int amon_cpu_avail(int cpu)
 {
@@ -48,7 +38,7 @@
 	return 1;
 }
 
-void amon_cpu_start(int cpu,
+int amon_cpu_start(int cpu,
 		    unsigned long pc, unsigned long sp,
 		    unsigned long gp, unsigned long a0)
 {
@@ -56,10 +46,10 @@
 		(struct cpulaunch  *)CKSEG0ADDR(CPULAUNCH);
 
 	if (!amon_cpu_avail(cpu))
-		return;
+		return -1;
 	if (cpu == smp_processor_id()) {
 		pr_debug("launch: I am cpu%d!\n", cpu);
-		return;
+		return -1;
 	}
 	launch += cpu;
 
@@ -78,4 +68,21 @@
 		;
 	smp_rmb();	/* Target will be updating flags soon */
 	pr_debug("launch: cpu%d gone!\n", cpu);
+
+	return 0;
 }
+
+#ifdef CONFIG_MIPS_VPE_LOADER
+int vpe_run(struct vpe *v)
+{
+	struct vpe_notifications *n;
+
+	if (amon_cpu_start(aprp_cpu_index(), v->__start, 0, 0, 0) < 0)
+		return -1;
+
+	list_for_each_entry(n, &v->notify, list)
+		n->start(VPE_MODULE_MINOR);
+
+	return 0;
+}
+#endif
diff --git a/arch/mips/mti-malta/malta-console.c b/arch/mips/mti-malta/malta-console.c
deleted file mode 100644
index 43bcfb4..0000000
--- a/arch/mips/mti-malta/malta-console.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Putting things on the screen/serial line using YAMONs facilities.
- */
-#include <linux/console.h>
-#include <linux/init.h>
-#include <linux/serial_reg.h>
-#include <asm/io.h>
-
-
-#define PORT(offset) (0x3f8 + (offset))
-
-
-static inline unsigned int serial_in(int offset)
-{
-	return inb(PORT(offset));
-}
-
-static inline void serial_out(int offset, int value)
-{
-	outb(value, PORT(offset));
-}
-
-int prom_putchar(char c)
-{
-	while ((serial_in(UART_LSR) & UART_LSR_THRE) == 0)
-		;
-
-	serial_out(UART_TX, c);
-
-	return 1;
-}
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index ff8caff..fcebfce 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/serial_8250.h>
 
 #include <asm/cacheflush.h>
 #include <asm/smp-ops.h>
@@ -44,32 +45,39 @@
 	char parity = '\0', bits = '\0', flow = '\0';
 	char *s;
 
-	if ((strstr(fw_getcmdline(), "console=")) == NULL) {
-		s = fw_getenv("modetty0");
-		if (s) {
-			while (*s >= '0' && *s <= '9')
-				baud = baud*10 + *s++ - '0';
-			if (*s == ',')
-				s++;
-			if (*s)
-				parity = *s++;
-			if (*s == ',')
-				s++;
-			if (*s)
-				bits = *s++;
-			if (*s == ',')
-				s++;
-			if (*s == 'h')
-				flow = 'r';
-		}
-		if (baud == 0)
-			baud = 38400;
-		if (parity != 'n' && parity != 'o' && parity != 'e')
-			parity = 'n';
-		if (bits != '7' && bits != '8')
-			bits = '8';
-		if (flow == '\0')
+	s = fw_getenv("modetty0");
+	if (s) {
+		while (*s >= '0' && *s <= '9')
+			baud = baud*10 + *s++ - '0';
+		if (*s == ',')
+			s++;
+		if (*s)
+			parity = *s++;
+		if (*s == ',')
+			s++;
+		if (*s)
+			bits = *s++;
+		if (*s == ',')
+			s++;
+		if (*s == 'h')
 			flow = 'r';
+	}
+	if (baud == 0)
+		baud = 38400;
+	if (parity != 'n' && parity != 'o' && parity != 'e')
+		parity = 'n';
+	if (bits != '7' && bits != '8')
+		bits = '8';
+	if (flow == '\0')
+		flow = 'r';
+
+	if ((strstr(fw_getcmdline(), "earlycon=")) == NULL) {
+		sprintf(console_string, "uart8250,io,0x3f8,%d%c%c", baud,
+			parity, bits);
+		setup_early_serial8250_console(console_string);
+	}
+
+	if ((strstr(fw_getcmdline(), "console=")) == NULL) {
 		sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
 			parity, bits, flow);
 		strcat(fw_getcmdline(), console_string);
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 0892575..ca3e3a4 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -1,25 +1,16 @@
 /*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
  * Carsten Langgaard, carstenl@mips.com
  * Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
  * Copyright (C) 2001 Ralf Baechle
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
  *
  * Routines for generic manipulation of the interrupts found on the MIPS
- * Malta board.
- * The interrupt controller is located in the South Bridge a PIIX4 device
- * with two internal 82C95 interrupt controllers.
+ * Malta board. The interrupt controller is located in the South Bridge
+ * a PIIX4 device with two internal 82C95 interrupt controllers.
  */
 #include <linux/init.h>
 #include <linux/irq.h>
@@ -44,6 +35,7 @@
 #include <asm/gic.h>
 #include <asm/gcmpregs.h>
 #include <asm/setup.h>
+#include <asm/rtlx.h>
 
 int gcmp_present = -1;
 static unsigned long _msc01_biu_base;
@@ -90,7 +82,7 @@
 		BONITO_PCIMAP_CFG = 0;
 		break;
 	default:
-		printk(KERN_WARNING "Unknown system controller.\n");
+		pr_emerg("Unknown system controller.\n");
 		return -1;
 	}
 	return irq;
@@ -126,6 +118,11 @@
 	}
 
 	do_IRQ(MALTA_INT_BASE + irq);
+
+#ifdef MIPS_VPE_APSP_API
+	if (aprp_hook)
+		aprp_hook();
+#endif
 }
 
 static void malta_ipi_irqdispatch(void)
@@ -149,11 +146,11 @@
 	unsigned int intrcause, datalo, datahi;
 	struct pt_regs *regs = get_irq_regs();
 
-	printk(KERN_EMERG "CoreHI interrupt, shouldn't happen, we die here!\n");
-	printk(KERN_EMERG "epc	 : %08lx\nStatus: %08lx\n"
-			"Cause : %08lx\nbadVaddr : %08lx\n",
-			regs->cp0_epc, regs->cp0_status,
-			regs->cp0_cause, regs->cp0_badvaddr);
+	pr_emerg("CoreHI interrupt, shouldn't happen, we die here!\n");
+	pr_emerg("epc	 : %08lx\nStatus: %08lx\n"
+		 "Cause : %08lx\nbadVaddr : %08lx\n",
+		 regs->cp0_epc, regs->cp0_status,
+		 regs->cp0_cause, regs->cp0_badvaddr);
 
 	/* Read all the registers and then print them as there is a
 	   problem with interspersed printk's upsetting the Bonito controller.
@@ -171,8 +168,8 @@
 		intrcause = GT_READ(GT_INTRCAUSE_OFS);
 		datalo = GT_READ(GT_CPUERR_ADDRLO_OFS);
 		datahi = GT_READ(GT_CPUERR_ADDRHI_OFS);
-		printk(KERN_EMERG "GT_INTRCAUSE = %08x\n", intrcause);
-		printk(KERN_EMERG "GT_CPUERR_ADDR = %02x%08x\n",
+		pr_emerg("GT_INTRCAUSE = %08x\n", intrcause);
+		pr_emerg("GT_CPUERR_ADDR = %02x%08x\n",
 				datahi, datalo);
 		break;
 	case MIPS_REVISION_SCON_BONITO:
@@ -184,14 +181,14 @@
 		intedge = BONITO_INTEDGE;
 		intsteer = BONITO_INTSTEER;
 		pcicmd = BONITO_PCICMD;
-		printk(KERN_EMERG "BONITO_INTISR = %08x\n", intisr);
-		printk(KERN_EMERG "BONITO_INTEN = %08x\n", inten);
-		printk(KERN_EMERG "BONITO_INTPOL = %08x\n", intpol);
-		printk(KERN_EMERG "BONITO_INTEDGE = %08x\n", intedge);
-		printk(KERN_EMERG "BONITO_INTSTEER = %08x\n", intsteer);
-		printk(KERN_EMERG "BONITO_PCICMD = %08x\n", pcicmd);
-		printk(KERN_EMERG "BONITO_PCIBADADDR = %08x\n", pcibadaddr);
-		printk(KERN_EMERG "BONITO_PCIMSTAT = %08x\n", pcimstat);
+		pr_emerg("BONITO_INTISR = %08x\n", intisr);
+		pr_emerg("BONITO_INTEN = %08x\n", inten);
+		pr_emerg("BONITO_INTPOL = %08x\n", intpol);
+		pr_emerg("BONITO_INTEDGE = %08x\n", intedge);
+		pr_emerg("BONITO_INTSTEER = %08x\n", intsteer);
+		pr_emerg("BONITO_PCICMD = %08x\n", pcicmd);
+		pr_emerg("BONITO_PCIBADADDR = %08x\n", pcibadaddr);
+		pr_emerg("BONITO_PCIMSTAT = %08x\n", pcimstat);
 		break;
 	}
 
@@ -313,6 +310,11 @@
 
 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 {
+#ifdef MIPS_VPE_APSP_API
+	if (aprp_hook)
+		aprp_hook();
+#endif
+
 	scheduler_ipi();
 
 	return IRQ_HANDLED;
@@ -365,13 +367,13 @@
 	.flags = IRQF_NO_THREAD,
 };
 
-static msc_irqmap_t __initdata msc_irqmap[] = {
+static msc_irqmap_t msc_irqmap[] __initdata = {
 	{MSC01C_INT_TMR,		MSC01_IRQ_EDGE, 0},
 	{MSC01C_INT_PCI,		MSC01_IRQ_LEVEL, 0},
 };
-static int __initdata msc_nr_irqs = ARRAY_SIZE(msc_irqmap);
+static int msc_nr_irqs __initdata = ARRAY_SIZE(msc_irqmap);
 
-static msc_irqmap_t __initdata msc_eicirqmap[] = {
+static msc_irqmap_t msc_eicirqmap[] __initdata = {
 	{MSC01E_INT_SW0,		MSC01_IRQ_LEVEL, 0},
 	{MSC01E_INT_SW1,		MSC01_IRQ_LEVEL, 0},
 	{MSC01E_INT_I8259A,		MSC01_IRQ_LEVEL, 0},
@@ -384,7 +386,7 @@
 	{MSC01E_INT_CPUCTR,		MSC01_IRQ_LEVEL, 0}
 };
 
-static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
+static int msc_nr_eicirqs __initdata = ARRAY_SIZE(msc_eicirqmap);
 
 /*
  * This GIC specific tabular array defines the association between External
@@ -431,9 +433,12 @@
 	if (gcmp_present >= 0)
 		return gcmp_present;
 
-	_gcmp_base = (unsigned long) ioremap_nocache(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
-	_msc01_biu_base = (unsigned long) ioremap_nocache(MSC01_BIU_REG_BASE, MSC01_BIU_ADDRSPACE_SZ);
-	gcmp_present = (GCMPGCB(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) == GCMP_BASE_ADDR;
+	_gcmp_base = (unsigned long) ioremap_nocache(GCMP_BASE_ADDR,
+		GCMP_ADDRSPACE_SZ);
+	_msc01_biu_base = (unsigned long) ioremap_nocache(MSC01_BIU_REG_BASE,
+		MSC01_BIU_ADDRSPACE_SZ);
+	gcmp_present = ((GCMPGCB(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) ==
+		GCMP_BASE_ADDR);
 
 	if (gcmp_present)
 		pr_debug("GCMP present\n");
@@ -443,9 +448,8 @@
 /* Return the number of IOCU's present */
 int __init gcmp_niocu(void)
 {
-  return gcmp_present ?
-    (GCMPGCB(GC) & GCMP_GCB_GC_NUMIOCU_MSK) >> GCMP_GCB_GC_NUMIOCU_SHF :
-    0;
+	return gcmp_present ? ((GCMPGCB(GC) & GCMP_GCB_GC_NUMIOCU_MSK) >>
+		GCMP_GCB_GC_NUMIOCU_SHF) : 0;
 }
 
 /* Set GCMP region attributes */
@@ -594,11 +598,14 @@
 			set_vi_handler(MIPSCPU_INT_IPI1, malta_ipi_irqdispatch);
 		}
 		/* Argh.. this really needs sorting out.. */
-		printk("CPU%d: status register was %08x\n", smp_processor_id(), read_c0_status());
+		pr_info("CPU%d: status register was %08x\n",
+			smp_processor_id(), read_c0_status());
 		write_c0_status(read_c0_status() | STATUSF_IP3 | STATUSF_IP4);
-		printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status());
+		pr_info("CPU%d: status register now %08x\n",
+			smp_processor_id(), read_c0_status());
 		write_c0_status(0x1100dc00);
-		printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status());
+		pr_info("CPU%d: status register frc %08x\n",
+			smp_processor_id(), read_c0_status());
 		for (i = 0; i < nr_cpu_ids; i++) {
 			arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
 					 GIC_RESCHED_INT(i), &irq_resched);
@@ -616,11 +623,15 @@
 			cpu_ipi_call_irq = MSC01E_INT_SW1;
 		} else {
 			if (cpu_has_vint) {
-				set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
-				set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
+				set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ,
+					ipi_resched_dispatch);
+				set_vi_handler (MIPS_CPU_IPI_CALL_IRQ,
+					ipi_call_dispatch);
 			}
-			cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
-			cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
+			cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE +
+				MIPS_CPU_IPI_RESCHED_IRQ;
+			cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE +
+				MIPS_CPU_IPI_CALL_IRQ;
 		}
 		arch_init_ipiirq(cpu_ipi_resched_irq, &irq_resched);
 		arch_init_ipiirq(cpu_ipi_call_irq, &irq_call);
@@ -630,9 +641,7 @@
 
 void malta_be_init(void)
 {
-	if (gcmp_present) {
-		/* Could change CM error mask register */
-	}
+	/* Could change CM error mask register. */
 }
 
 
@@ -712,14 +721,14 @@
 			if (cause < 16) {
 				unsigned long cca_bits = (cm_error >> 15) & 7;
 				unsigned long tr_bits = (cm_error >> 12) & 7;
-				unsigned long mcmd_bits = (cm_error >> 7) & 0x1f;
+				unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
 				unsigned long stag_bits = (cm_error >> 3) & 15;
 				unsigned long sport_bits = (cm_error >> 0) & 7;
 
 				snprintf(buf, sizeof(buf),
 					 "CCA=%lu TR=%s MCmd=%s STag=%lu "
 					 "SPort=%lu\n",
-					 cca_bits, tr[tr_bits], mcmd[mcmd_bits],
+					 cca_bits, tr[tr_bits], mcmd[cmd_bits],
 					 stag_bits, sport_bits);
 			} else {
 				/* glob state & sresp together */
@@ -728,7 +737,7 @@
 				unsigned long c1_bits = (cm_error >> 12) & 7;
 				unsigned long c0_bits = (cm_error >> 9) & 7;
 				unsigned long sc_bit = (cm_error >> 8) & 1;
-				unsigned long mcmd_bits = (cm_error >> 3) & 0x1f;
+				unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
 				unsigned long sport_bits = (cm_error >> 0) & 7;
 				snprintf(buf, sizeof(buf),
 					 "C3=%s C2=%s C1=%s C0=%s SC=%s "
@@ -736,16 +745,16 @@
 					 core[c3_bits], core[c2_bits],
 					 core[c1_bits], core[c0_bits],
 					 sc_bit ? "True" : "False",
-					 mcmd[mcmd_bits], sport_bits);
+					 mcmd[cmd_bits], sport_bits);
 			}
 
 			ocause = (cm_other & GCMP_GCB_GMEO_ERROR_2ND_MSK) >>
 				 GCMP_GCB_GMEO_ERROR_2ND_SHF;
 
-			printk("CM_ERROR=%08lx %s <%s>\n", cm_error,
+			pr_err("CM_ERROR=%08lx %s <%s>\n", cm_error,
 			       causes[cause], buf);
-			printk("CM_ADDR =%08lx\n", cm_addr);
-			printk("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
+			pr_err("CM_ADDR =%08lx\n", cm_addr);
+			pr_err("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
 
 			/* reprime cause register */
 			GCMPGCB(GCMEC) = 0;
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index 132f866..e1dd1c1 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -47,6 +47,7 @@
 static struct plat_serial8250_port uart8250_data[] = {
 	SMC_PORT(0x3F8, 4),
 	SMC_PORT(0x2F8, 3),
+#ifndef CONFIG_MIPS_CMP
 	{
 		.mapbase	= 0x1f000900,	/* The CBUS UART */
 		.irq		= MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
@@ -55,6 +56,7 @@
 		.flags		= CBUS_UART_FLAGS,
 		.regshift	= 3,
 	},
+#endif
 	{ },
 };
 
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index a18af5f..3190099 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -42,8 +42,6 @@
 #include <asm/mips-boards/generic.h>
 #include <asm/mips-boards/maltaint.h>
 
-unsigned long cpu_khz;
-
 static int mips_cpu_timer_irq;
 static int mips_cpu_perf_irq;
 extern int cp0_perfcount_irq;
@@ -168,11 +166,24 @@
 	return mips_cpu_timer_irq;
 }
 
+static void __init init_rtc(void)
+{
+	/* stop the clock whilst setting it up */
+	CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
+
+	/* 32KHz time base */
+	CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
+
+	/* start the clock */
+	CMOS_WRITE(RTC_24H, RTC_CONTROL);
+}
+
 void __init plat_time_init(void)
 {
 	unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
 	unsigned int freq;
 
+	init_rtc();
 	estimate_frequencies();
 
 	freq = mips_hpt_frequency;
@@ -182,7 +193,6 @@
 	freq = freqround(freq, 5000);
 	printk("CPU frequency %d.%02d MHz\n", freq/1000000,
 	       (freq%1000000)*100/1000000);
-	cpu_khz = freq / 1000;
 
 	mips_scroll_message();
 
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
index be11420..071786f 100644
--- a/arch/mips/mti-sead3/Makefile
+++ b/arch/mips/mti-sead3/Makefile
@@ -21,5 +21,7 @@
 obj-$(CONFIG_USB_EHCI_HCD)	+= sead3-ehci.o
 obj-$(CONFIG_OF)		+= sead3.dtb.o
 
+CFLAGS_sead3-setup.o = -I$(src)/../../../scripts/dtc/libfdt
+
 $(obj)/%.dtb: $(obj)/%.dts
 	$(call if_changed,dtc)
diff --git a/arch/mips/mti-sead3/sead3-pic32-bus.c b/arch/mips/mti-sead3/sead3-pic32-bus.c
index eb2bf93..3b12aa5 100644
--- a/arch/mips/mti-sead3/sead3-pic32-bus.c
+++ b/arch/mips/mti-sead3/sead3-pic32-bus.c
@@ -8,7 +8,6 @@
 #include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
-#include <linux/init.h>
 #include <linux/io.h>
 #include <linux/errno.h>
 
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index 928ba84..bf7fe48 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -4,13 +4,15 @@
  * for more details.
  *
  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
  */
 #include <linux/init.h>
+#include <linux/libfdt.h>
 #include <linux/of_platform.h>
 #include <linux/of_fdt.h>
-#include <linux/bootmem.h>
 
 #include <asm/prom.h>
+#include <asm/fw/fw.h>
 
 #include <asm/mips-boards/generic.h>
 
@@ -19,8 +21,73 @@
 	return "MIPS SEAD3";
 }
 
+static uint32_t get_memsize_from_cmdline(void)
+{
+	int memsize = 0;
+	char *p = arcs_cmdline;
+	char *s = "memsize=";
+
+	p = strstr(p, s);
+	if (p) {
+		p += strlen(s);
+		memsize = memparse(p, NULL);
+	}
+
+	return memsize;
+}
+
+static uint32_t get_memsize_from_env(void)
+{
+	int memsize = 0;
+	char *p;
+
+	p = fw_getenv("memsize");
+	if (p)
+		memsize = memparse(p, NULL);
+
+	return memsize;
+}
+
+static uint32_t get_memsize(void)
+{
+	uint32_t memsize;
+
+	memsize = get_memsize_from_cmdline();
+	if (memsize)
+		return memsize;
+
+	return get_memsize_from_env();
+}
+
+static void __init parse_memsize_param(void)
+{
+	int offset;
+	const uint64_t *prop_value;
+	int prop_len;
+	uint32_t memsize = get_memsize();
+
+	if (!memsize)
+		return;
+
+	offset = fdt_path_offset(&__dtb_start, "/memory");
+	if (offset > 0) {
+		uint64_t new_value;
+		/*
+		 * reg contains 2 32-bits BE values, offset and size. We just
+		 * want to replace the size value without affecting the offset
+		 */
+		prop_value = fdt_getprop(&__dtb_start, offset, "reg", &prop_len);
+		new_value = be64_to_cpu(*prop_value);
+		new_value =  (new_value & ~0xffffffffllu) | memsize;
+		fdt_setprop_inplace_u64(&__dtb_start, offset, "reg", new_value);
+	}
+}
+
 void __init plat_mem_setup(void)
 {
+	/* allow command line/bootloader env to override memory size in DT */
+	parse_memsize_param();
+
 	/*
 	 * Load the builtin devicetree. This causes the chosen node to be
 	 * parsed resulting in our memory appearing
@@ -30,16 +97,15 @@
 
 void __init device_tree_init(void)
 {
-	unsigned long base, size;
-
 	if (!initial_boot_params)
 		return;
 
-	base = virt_to_phys((void *)initial_boot_params);
-	size = be32_to_cpu(initial_boot_params->totalsize);
-
-	/* Before we do anything, lets reserve the dt blob */
-	reserve_bootmem(base, size, BOOTMEM_DEFAULT);
-
-	unflatten_device_tree();
+	unflatten_and_copy_device_tree();
 }
+
+static int __init customize_machine(void)
+{
+	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+	return 0;
+}
+arch_initcall(customize_machine);
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 552d26c..678d03d 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -13,8 +13,6 @@
 #include <asm/irq.h>
 #include <asm/mips-boards/generic.h>
 
-unsigned long cpu_khz;
-
 static int mips_cpu_timer_irq;
 static int mips_cpu_perf_irq;
 
@@ -109,8 +107,6 @@
 	pr_debug("CPU frequency %d.%02d MHz\n", (est_freq / 1000000),
 		(est_freq % 1000000) * 100 / 1000000);
 
-	cpu_khz = est_freq / 1000;
-
 	mips_scroll_message();
 
 	plat_perf_setup();
diff --git a/arch/mips/mti-sead3/sead3.dts b/arch/mips/mti-sead3/sead3.dts
index 658f437..e4b317d 100644
--- a/arch/mips/mti-sead3/sead3.dts
+++ b/arch/mips/mti-sead3/sead3.dts
@@ -15,10 +15,6 @@
 		};
 	};
 
-	chosen {
-		bootargs = "console=ttyS1,38400 rootdelay=10 root=/dev/sda3";
-	};
-
 	memory {
 		device_type = "memory";
 		reg = <0x0 0x08000000>;
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
index 852a4ee..4eb683a 100644
--- a/arch/mips/netlogic/Kconfig
+++ b/arch/mips/netlogic/Kconfig
@@ -28,6 +28,15 @@
 	  pointer to the kernel.  The corresponding DTS file is at
 	  arch/mips/netlogic/dts/xlp_fvp.dts
 
+config DT_XLP_GVP
+	bool "Built-in device tree for XLP GVP boards"
+	default y
+	help
+	  Add an FDT blob for XLP GVP board into the kernel.
+	  This DTB will be used if the firmware does not pass in a DTB
+	  pointer to the kernel.  The corresponding DTS file is at
+	  arch/mips/netlogic/dts/xlp_gvp.dts
+
 config NLM_MULTINODE
 	bool "Support for multi-chip boards"
 	depends on NLM_XLP_BOARD
diff --git a/arch/mips/netlogic/common/earlycons.c b/arch/mips/netlogic/common/earlycons.c
index 1902fa2..769f930 100644
--- a/arch/mips/netlogic/common/earlycons.c
+++ b/arch/mips/netlogic/common/earlycons.c
@@ -37,9 +37,11 @@
 
 #include <asm/mipsregs.h>
 #include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
 
 #if defined(CONFIG_CPU_XLP)
 #include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
 #include <asm/netlogic/xlp-hal/uart.h>
 #elif defined(CONFIG_CPU_XLR)
 #include <asm/netlogic/xlr/iomap.h>
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 1c7e3a1..5afc4b7 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -180,6 +180,7 @@
 #endif
 }
 
+
 void nlm_setup_pic_irq(int node, int picirq, int irq, int irt)
 {
 	struct nlm_pic_irq *pic_data;
@@ -207,32 +208,32 @@
 
 static void nlm_init_node_irqs(int node)
 {
-	int i, irt;
-	uint64_t irqmask;
 	struct nlm_soc_info *nodep;
+	int i, irt;
 
 	pr_info("Init IRQ for node %d\n", node);
 	nodep = nlm_get_node(node);
-	irqmask = PERCPU_IRQ_MASK;
+	nodep->irqmask = PERCPU_IRQ_MASK;
 	for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) {
 		irt = nlm_irq_to_irt(i);
-		if (irt == -1)
+		if (irt == -1)		/* unused irq */
 			continue;
-		nlm_setup_pic_irq(node, i, i, irt);
-		/* set interrupts to first cpu in node */
+		nodep->irqmask |= 1ull << i;
+		if (irt == -2)		/* not a direct PIC irq */
+			continue;
+
 		nlm_pic_init_irt(nodep->picbase, irt, i,
-					node * NLM_CPUS_PER_NODE, 0);
-		irqmask |= (1ull << i);
+				node * nlm_threads_per_node(), 0);
+		nlm_setup_pic_irq(node, i, i, irt);
 	}
-	nodep->irqmask = irqmask;
 }
 
 void nlm_smp_irq_init(int hwcpuid)
 {
 	int node, cpu;
 
-	node = hwcpuid / NLM_CPUS_PER_NODE;
-	cpu  = hwcpuid % NLM_CPUS_PER_NODE;
+	node = nlm_cpuid_to_node(hwcpuid);
+	cpu  = hwcpuid % nlm_threads_per_node();
 
 	if (cpu == 0 && node != 0)
 		nlm_init_node_irqs(node);
@@ -256,13 +257,23 @@
 		return;
 	}
 
+#if defined(CONFIG_PCI_MSI) && defined(CONFIG_CPU_XLP)
+	/* PCI interrupts need a second level dispatch for MSI bits */
+	if (i >= PIC_PCIE_LINK_MSI_IRQ(0) && i <= PIC_PCIE_LINK_MSI_IRQ(3)) {
+		nlm_dispatch_msi(node, i);
+		return;
+	}
+	if (i >= PIC_PCIE_MSIX_IRQ(0) && i <= PIC_PCIE_MSIX_IRQ(3)) {
+		nlm_dispatch_msix(node, i);
+		return;
+	}
+
+#endif
 	/* top level irq handling */
 	do_IRQ(nlm_irq_to_xirq(node, i));
 }
 
 #ifdef CONFIG_OF
-static struct irq_domain *xlp_pic_domain;
-
 static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
 	.xlate = irq_domain_xlate_onetwocell,
 };
@@ -271,8 +282,9 @@
 					struct device_node *parent)
 {
 	const int n_picirqs = PIC_IRT_LAST_IRQ - PIC_IRQ_BASE + 1;
+	struct irq_domain *xlp_pic_domain;
 	struct resource res;
-	int socid, ret;
+	int socid, ret, bus;
 
 	/* we need a hack to get the PIC's SoC chip id */
 	ret = of_address_to_resource(node, 0, &res);
@@ -280,7 +292,34 @@
 		pr_err("PIC %s: reg property not found!\n", node->name);
 		return -EINVAL;
 	}
-	socid = (res.start >> 18) & 0x3;
+
+	if (cpu_is_xlp9xx()) {
+		bus = (res.start >> 20) & 0xf;
+		for (socid = 0; socid < NLM_NR_NODES; socid++) {
+			if (!nlm_node_present(socid))
+				continue;
+			if (nlm_get_node(socid)->socbus == bus)
+				break;
+		}
+		if (socid == NLM_NR_NODES) {
+			pr_err("PIC %s: Node mapping for bus %d not found!\n",
+					node->name, bus);
+			return -EINVAL;
+		}
+	} else {
+		socid = (res.start >> 18) & 0x3;
+		if (!nlm_node_present(socid)) {
+			pr_err("PIC %s: node %d does not exist!\n",
+							node->name, socid);
+			return -EINVAL;
+		}
+	}
+
+	if (!nlm_node_present(socid)) {
+		pr_err("PIC %s: node %d does not exist!\n", node->name, socid);
+		return -EINVAL;
+	}
+
 	xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs,
 		nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
 		&xlp_pic_irq_domain_ops, NULL);
@@ -288,8 +327,7 @@
 		pr_err("PIC %s: Creating legacy domain failed!\n", node->name);
 		return -EINVAL;
 	}
-	pr_info("Node %d: IRQ domain created for PIC@%pa\n", socid,
-							&res.start);
+	pr_info("Node %d: IRQ domain created for PIC@%pR\n", socid, &res);
 	return 0;
 }
 
diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S
index adb1828..b231fe1 100644
--- a/arch/mips/netlogic/common/reset.S
+++ b/arch/mips/netlogic/common/reset.S
@@ -32,10 +32,10 @@
  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <linux/init.h>
 
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/cacheops.h>
 #include <asm/regdef.h>
 #include <asm/mipsregs.h>
 #include <asm/stackframe.h>
@@ -50,8 +50,8 @@
 #include <asm/netlogic/xlp-hal/cpucontrol.h>
 
 #define CP0_EBASE	$15
-#define SYS_CPU_COHERENT_BASE(node)	CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
-			XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \
+#define SYS_CPU_COHERENT_BASE	CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
+			XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
 			SYS_CPU_NONCOHERENT_MODE * 4
 
 /* Enable XLP features and workarounds in the LSU */
@@ -74,35 +74,55 @@
 .endm
 
 /*
- * Low level flush for L1D cache on XLP, the normal cache ops does
- * not do the complete and correct cache flush.
+ * L1D cache has to be flushed before enabling threads in XLP.
+ * On XLP8xx/XLP3xx, we do a low level flush using processor control
+ * registers. On XLPII CPUs, usual cache instructions work.
  */
 .macro	xlp_flush_l1_dcache
+	mfc0	t0, CP0_EBASE, 0
+	andi	t0, t0, 0xff00
+	slt	t1, t0, 0x1200
+	beqz	t1, 15f
+	nop
+
+	/* XLP8xx low level cache flush */
 	li	t0, LSU_DEBUG_DATA0
 	li	t1, LSU_DEBUG_ADDR
 	li	t2, 0		/* index */
 	li	t3, 0x1000	/* loop count */
-1:
+11:
 	sll	v0, t2, 5
 	mtcr	zero, t0
 	ori	v1, v0, 0x3	/* way0 | write_enable | write_active */
 	mtcr	v1, t1
-2:
+12:
 	mfcr	v1, t1
 	andi	v1, 0x1		/* wait for write_active == 0 */
-	bnez	v1, 2b
+	bnez	v1, 12b
 	nop
 	mtcr	zero, t0
 	ori	v1, v0, 0x7	/* way1 | write_enable | write_active */
 	mtcr	v1, t1
-3:
+13:
 	mfcr	v1, t1
 	andi	v1, 0x1		/* wait for write_active == 0 */
-	bnez	v1, 3b
+	bnez	v1, 13b
 	nop
 	addi	t2, 1
-	bne	t3, t2, 1b
+	bne	t3, t2, 11b
 	nop
+	b	17f
+	nop
+
+	/* XLPII CPUs, Invalidate all 64k of L1 D-cache */
+15:
+	li	t0, 0x80000000
+	li	t1, 0x80010000
+16:	cache	Index_Writeback_Inv_D, 0(t0)
+	addiu	t0, t0, 32
+	bne	t0, t1, 16b
+	nop
+17:
 .endm
 
 /*
@@ -138,6 +158,13 @@
 	nop
 
 1:	/* Entry point on core wakeup */
+	mfc0	t0, CP0_EBASE, 0	/* processor ID */
+	andi	t0, 0xff00
+	li	t1, 0x1500		/* XLP 9xx */
+	beq	t0, t1, 2f		/* does not need to set coherent */
+	nop
+
+	/* set bit in SYS coherent register for the core */
 	mfc0	t0, CP0_EBASE, 1
 	mfc0	t1, CP0_EBASE, 1
 	srl	t1, 5
@@ -149,7 +176,7 @@
 	li	t1, 0x1
 	sll	t0, t1, t0
 	nor	t0, t0, zero		/* t0 <- ~(1 << core) */
-	li	t2, SYS_CPU_COHERENT_BASE(0)
+	li	t2, SYS_CPU_COHERENT_BASE
 	add	t2, t2, t3		/* t2 <- SYS offset for node */
 	lw	t1, 0(t2)
 	and	t1, t1, t0
@@ -159,13 +186,13 @@
 	lw	t1, 0(t2)
 	sync
 
+2:
 	/* Configure LSU on Non-0 Cores. */
 	xlp_config_lsu
 	/* FALL THROUGH */
 
 /*
- * Wake up sibling threads from the initial thread in
- * a core.
+ * Wake up sibling threads from the initial thread in a core.
  */
 EXPORT(nlm_boot_siblings)
 	/* core L1D flush before enable threads */
@@ -181,8 +208,10 @@
 	/*
 	 * The new hardware thread starts at the next instruction
 	 * For all the cases other than core 0 thread 0, we will
-	* jump to the secondary wait function.
-	*/
+	 * jump to the secondary wait function.
+
+	 * NOTE: All GPR contents are lost after the mtcr above!
+	 */
 	mfc0	v0, CP0_EBASE, 1
 	andi	v0, 0x3ff		/* v0 <- node/core */
 
@@ -196,7 +225,7 @@
 #endif
 	mtc0	t1, CP0_STATUS
 
-	/* mark CPU ready, careful here, previous mtcr trashed registers */
+	/* mark CPU ready */
 	li	t3, CKSEG1ADDR(RESET_DATA_PHYS)
 	ADDIU	t1, t3, BOOT_CPU_READY
 	sll	v1, v0, 2
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index c0eded0..6baae15 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -63,7 +63,7 @@
 	uint64_t picbase;
 
 	cpu = cpu_logical_map(logical_cpu);
-	node = cpu / NLM_CPUS_PER_NODE;
+	node = nlm_cpuid_to_node(cpu);
 	picbase = nlm_get_node(node)->picbase;
 
 	if (action & SMP_CALL_FUNCTION)
@@ -152,7 +152,7 @@
 	int cpu, node;
 
 	cpu = cpu_logical_map(logical_cpu);
-	node = cpu / NLM_CPUS_PER_NODE;
+	node = nlm_cpuid_to_node(logical_cpu);
 	nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
 	nlm_next_gp = (unsigned long)task_thread_info(idle);
 
@@ -164,7 +164,7 @@
 void __init nlm_smp_setup(void)
 {
 	unsigned int boot_cpu;
-	int num_cpus, i, ncore;
+	int num_cpus, i, ncore, node;
 	volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
 	char buf[64];
 
@@ -187,6 +187,8 @@
 			__cpu_number_map[i] = num_cpus;
 			__cpu_logical_map[num_cpus] = i;
 			set_cpu_possible(num_cpus, true);
+			node = nlm_cpuid_to_node(i);
+			cpumask_set_cpu(num_cpus, &nlm_get_node(node)->cpumask);
 			++num_cpus;
 		}
 	}
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index aa6cff0..8597657 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -32,7 +32,6 @@
  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <linux/init.h>
 
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
@@ -98,7 +97,7 @@
  * In case of RMIboot bootloader which is used on XLR boards, the CPUs
  * be already woken up and waiting in bootloader code.
  * This will get them out of the bootloader code and into linux. Needed
- *  because the bootloader area will be taken and initialized by linux.
+ * because the bootloader area will be taken and initialized by linux.
  */
 NESTED(nlm_rmiboot_preboot, 16, sp)
 	mfc0	t0, $15, 1	/* read ebase */
@@ -133,6 +132,7 @@
 	or	t1, t2, v1	/* put in new value */
 	mtcr	t1, t0		/* update core control */
 
+	/* wait for NMI to hit */
 1:	wait
 	b	1b
 	nop
diff --git a/arch/mips/netlogic/dts/Makefile b/arch/mips/netlogic/dts/Makefile
index 0b9be5f..25c8e87 100644
--- a/arch/mips/netlogic/dts/Makefile
+++ b/arch/mips/netlogic/dts/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o
 obj-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb.o
 obj-$(CONFIG_DT_XLP_FVP) += xlp_fvp.dtb.o
+obj-$(CONFIG_DT_XLP_GVP) += xlp_gvp.dtb.o
diff --git a/arch/mips/netlogic/dts/xlp_gvp.dts b/arch/mips/netlogic/dts/xlp_gvp.dts
new file mode 100644
index 0000000..047d27f
--- /dev/null
+++ b/arch/mips/netlogic/dts/xlp_gvp.dts
@@ -0,0 +1,76 @@
+/*
+ * XLP9XX Device Tree Source for GVP boards
+ */
+
+/dts-v1/;
+/ {
+	model = "netlogic,XLP-GVP";
+	compatible = "netlogic,xlp";
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	soc {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
+			  1 0  0 0x16000000  0x02000000>; // GBU chipselects
+
+		serial0: serial@30000 {
+			device_type = "serial";
+			compatible = "ns16550";
+			reg = <0 0x112100 0xa00>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clock-frequency = <125000000>;
+			interrupt-parent = <&pic>;
+			interrupts = <17>;
+		};
+		pic: pic@4000 {
+			interrupt-controller;
+			#address-cells = <0>;
+			#interrupt-cells = <1>;
+			reg = <0 0x110000 0x200>;
+		};
+
+		nor_flash@1,0 {
+			compatible = "cfi-flash";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			bank-width = <2>;
+			reg = <1 0 0x1000000>;
+
+			partition@0 {
+				label = "x-loader";
+				reg = <0x0 0x100000>; /* 1M */
+				read-only;
+			};
+
+			partition@100000 {
+				label = "u-boot";
+				reg = <0x100000 0x100000>; /* 1M */
+			};
+
+			partition@200000 {
+				label = "kernel";
+				reg = <0x200000 0x500000>; /* 5M */
+			};
+
+			partition@700000 {
+				label = "rootfs";
+				reg = <0x700000 0x800000>; /* 8M */
+			};
+
+			partition@f00000 {
+				label = "env";
+				reg = <0xf00000 0x100000>; /* 1M */
+				read-only;
+			};
+		};
+
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+	};
+};
diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c
index 8316d54..5754097 100644
--- a/arch/mips/netlogic/xlp/dt.c
+++ b/arch/mips/netlogic/xlp/dt.c
@@ -42,13 +42,18 @@
 #include <asm/prom.h>
 
 extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[],
-	__dtb_xlp_fvp_begin[], __dtb_start[];
+	__dtb_xlp_fvp_begin[], __dtb_xlp_gvp_begin[], __dtb_start[];
 static void *xlp_fdt_blob;
 
 void __init *xlp_dt_init(void *fdtp)
 {
 	if (!fdtp) {
 		switch (current_cpu_data.processor_id & 0xff00) {
+#ifdef CONFIG_DT_XLP_GVP
+		case PRID_IMP_NETLOGIC_XLP9XX:
+			fdtp = __dtb_xlp_gvp_begin;
+			break;
+#endif
 #ifdef CONFIG_DT_XLP_FVP
 		case PRID_IMP_NETLOGIC_XLP2XX:
 			fdtp = __dtb_xlp_fvp_begin;
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index 56c50ba..997cd9e 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -57,6 +57,10 @@
 	nodep->sysbase = nlm_get_sys_regbase(node);
 	nodep->picbase = nlm_get_pic_regbase(node);
 	nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
+	if (cpu_is_xlp9xx())
+		nodep->socbus = xlp9xx_get_socbus(node);
+	else
+		nodep->socbus = 0;
 	spin_lock_init(&nodep->piclock);
 }
 
@@ -65,6 +69,26 @@
 	uint64_t pcibase;
 	int devoff, irt;
 
+	/* bypass for 9xx */
+	if (cpu_is_xlp9xx()) {
+		switch (irq) {
+		case PIC_9XX_XHCI_0_IRQ:
+			return 114;
+		case PIC_9XX_XHCI_1_IRQ:
+			return 115;
+		case PIC_UART_0_IRQ:
+			return 133;
+		case PIC_UART_1_IRQ:
+			return 134;
+		case PIC_PCIE_LINK_LEGACY_IRQ(0):
+		case PIC_PCIE_LINK_LEGACY_IRQ(1):
+		case PIC_PCIE_LINK_LEGACY_IRQ(2):
+		case PIC_PCIE_LINK_LEGACY_IRQ(3):
+			return 191 + irq - PIC_PCIE_LINK_LEGACY_IRQ_BASE;
+		}
+		return -1;
+	}
+
 	devoff = 0;
 	switch (irq) {
 	case PIC_UART_0_IRQ:
@@ -135,9 +159,17 @@
 		case PIC_I2C_3_IRQ:
 			irt = irt + 3; break;
 		}
-	} else if (irq >= PIC_PCIE_LINK_0_IRQ && irq <= PIC_PCIE_LINK_3_IRQ) {
+	} else if (irq >= PIC_PCIE_LINK_LEGACY_IRQ(0) &&
+			irq <= PIC_PCIE_LINK_LEGACY_IRQ(3)) {
 		/* HW bug, PCI IRT entries are bad on early silicon, fix */
-		irt = PIC_IRT_PCIE_LINK_INDEX(irq - PIC_PCIE_LINK_0_IRQ);
+		irt = PIC_IRT_PCIE_LINK_INDEX(irq -
+					PIC_PCIE_LINK_LEGACY_IRQ_BASE);
+	} else if (irq >= PIC_PCIE_LINK_MSI_IRQ(0) &&
+			irq <= PIC_PCIE_LINK_MSI_IRQ(3)) {
+		irt = -2;
+	} else if (irq >= PIC_PCIE_MSIX_IRQ(0) &&
+			irq <= PIC_PCIE_MSIX_IRQ(3)) {
+		irt = -2;
 	} else {
 		irt = -1;
 	}
@@ -151,7 +183,10 @@
 	uint64_t num, sysbase;
 
 	sysbase = nlm_get_node(node)->sysbase;
-	rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
+	if (cpu_is_xlp9xx())
+		rstval = nlm_read_sys_reg(sysbase, SYS_9XX_POWER_ON_RESET_CFG);
+	else
+		rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
 	if (cpu_is_xlpii()) {
 		num = 1000000ULL * (400 * 3 + 100 * (rstval >> 26));
 		denom = 3;
@@ -265,6 +300,10 @@
 
 unsigned int nlm_get_pic_frequency(int node)
 {
+	/* TODO Has to calculate freq as like 2xx */
+	if (cpu_is_xlp9xx())
+		return 250000000;
+
 	if (cpu_is_xlpii())
 		return nlm_2xx_get_pic_frequency(node);
 	else
@@ -284,21 +323,33 @@
 {
 	uint64_t bridgebase, base, lim;
 	uint32_t val;
+	unsigned int barreg, limreg, xlatreg;
 	int i, node, rv;
 
 	/* Look only at mapping on Node 0, we don't handle crazy configs */
 	bridgebase = nlm_get_bridge_regbase(0);
 	rv = 0;
 	for (i = 0; i < 8; i++) {
-		val = nlm_read_bridge_reg(bridgebase,
-					BRIDGE_DRAM_NODE_TRANSLN(i));
-		node = (val >> 1) & 0x3;
-		if (n >= 0 && n != node)
-			continue;
-		val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_BAR(i));
+		if (cpu_is_xlp9xx()) {
+			barreg = BRIDGE_9XX_DRAM_BAR(i);
+			limreg = BRIDGE_9XX_DRAM_LIMIT(i);
+			xlatreg = BRIDGE_9XX_DRAM_NODE_TRANSLN(i);
+		} else {
+			barreg = BRIDGE_DRAM_BAR(i);
+			limreg = BRIDGE_DRAM_LIMIT(i);
+			xlatreg = BRIDGE_DRAM_NODE_TRANSLN(i);
+		}
+		if (n >= 0) {
+			/* node specified, get node mapping of BAR */
+			val = nlm_read_bridge_reg(bridgebase, xlatreg);
+			node = (val >> 1) & 0x3;
+			if (n != node)
+				continue;
+		}
+		val = nlm_read_bridge_reg(bridgebase, barreg);
 		val = (val >>  12) & 0xfffff;
 		base = (uint64_t) val << 20;
-		val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_LIMIT(i));
+		val = nlm_read_bridge_reg(bridgebase, limreg);
 		val = (val >>  12) & 0xfffff;
 		if (val == 0)   /* BAR not used */
 			continue;
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 54e75c7..8c60a2d 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -51,12 +51,16 @@
 struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
 cpumask_t nlm_cpumask = CPU_MASK_CPU0;
 unsigned int nlm_threads_per_core;
+unsigned int xlp_cores_per_node;
 
 static void nlm_linux_exit(void)
 {
 	uint64_t sysbase = nlm_get_node(0)->sysbase;
 
-	nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
+	if (cpu_is_xlp9xx())
+		nlm_write_sys_reg(sysbase, SYS_9XX_CHIP_RESET, 1);
+	else
+		nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
 	for ( ; ; )
 		cpu_wait();
 }
@@ -92,6 +96,14 @@
 
 void __init plat_mem_setup(void)
 {
+#ifdef CONFIG_SMP
+	nlm_wakeup_secondary_cpus();
+
+	/* update TLB size after waking up threads */
+	current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
+	register_smp_ops(&nlm_smp_ops);
+#endif
 	_machine_restart = (void (*)(char *))nlm_linux_exit;
 	_machine_halt	= nlm_linux_exit;
 	pm_power_off	= nlm_linux_exit;
@@ -110,6 +122,7 @@
 const char *get_system_type(void)
 {
 	switch (read_c0_prid() & 0xff00) {
+	case PRID_IMP_NETLOGIC_XLP9XX:
 	case PRID_IMP_NETLOGIC_XLP2XX:
 		return "Broadcom XLPII Series";
 	default:
@@ -149,6 +162,10 @@
 	void *reset_vec;
 
 	nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
+	if (cpu_is_xlp9xx())
+		xlp_cores_per_node = 32;
+	else
+		xlp_cores_per_node = 8;
 	nlm_init_boot_cpu();
 	xlp_mmu_init();
 	nlm_node_init(0);
@@ -162,11 +179,5 @@
 
 #ifdef CONFIG_SMP
 	cpumask_setall(&nlm_cpumask);
-	nlm_wakeup_secondary_cpus();
-
-	/* update TLB size after waking up threads */
-	current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
-
-	register_smp_ops(&nlm_smp_ops);
 #endif
 }
diff --git a/arch/mips/netlogic/xlp/usb-init-xlp2.c b/arch/mips/netlogic/xlp/usb-init-xlp2.c
index 36e9c22..17ade1c 100644
--- a/arch/mips/netlogic/xlp/usb-init-xlp2.c
+++ b/arch/mips/netlogic/xlp/usb-init-xlp2.c
@@ -37,6 +37,7 @@
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/pci.h>
+#include <linux/pci_ids.h>
 #include <linux/platform_device.h>
 #include <linux/irq.h>
 
@@ -83,12 +84,14 @@
 #define nlm_read_usb_reg(b, r)		nlm_read_reg(b, r)
 #define nlm_write_usb_reg(b, r, v)	nlm_write_reg(b, r, v)
 
-#define nlm_xlpii_get_usb_pcibase(node, inst)		\
-	nlm_pcicfg_base(XLP2XX_IO_USB_OFFSET(node, inst))
+#define nlm_xlpii_get_usb_pcibase(node, inst)			\
+			nlm_pcicfg_base(cpu_is_xlp9xx() ?	\
+			XLP9XX_IO_USB_OFFSET(node, inst) :	\
+			XLP2XX_IO_USB_OFFSET(node, inst))
 #define nlm_xlpii_get_usb_regbase(node, inst)		\
 	(nlm_xlpii_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
 
-static void xlpii_usb_ack(struct irq_data *data)
+static void xlp2xx_usb_ack(struct irq_data *data)
 {
 	u64 port_addr;
 
@@ -109,6 +112,29 @@
 	nlm_write_usb_reg(port_addr, XLPII_USB3_INT_REG, 0xffffffff);
 }
 
+static void xlp9xx_usb_ack(struct irq_data *data)
+{
+	u64 port_addr;
+	int node, irq;
+
+	/* Find the node and irq on the node */
+	irq = data->irq % NLM_IRQS_PER_NODE;
+	node = data->irq / NLM_IRQS_PER_NODE;
+
+	switch (irq) {
+	case PIC_9XX_XHCI_0_IRQ:
+		port_addr = nlm_xlpii_get_usb_regbase(node, 1);
+		break;
+	case PIC_9XX_XHCI_1_IRQ:
+		port_addr = nlm_xlpii_get_usb_regbase(node, 2);
+		break;
+	default:
+		pr_err("No matching USB irq %d node  %d!\n", irq, node);
+		return;
+	}
+	nlm_write_usb_reg(port_addr, XLPII_USB3_INT_REG, 0xffffffff);
+}
+
 static void nlm_xlpii_usb_hw_reset(int node, int port)
 {
 	u64 port_addr, xhci_base, pci_base;
@@ -178,17 +204,33 @@
 
 static int __init nlm_platform_xlpii_usb_init(void)
 {
+	int node;
+
 	if (!cpu_is_xlpii())
 		return 0;
 
-	pr_info("Initializing 2XX USB Interface\n");
-	nlm_xlpii_usb_hw_reset(0, 1);
-	nlm_xlpii_usb_hw_reset(0, 2);
-	nlm_xlpii_usb_hw_reset(0, 3);
-	nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_0_IRQ, xlpii_usb_ack);
-	nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_1_IRQ, xlpii_usb_ack);
-	nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_2_IRQ, xlpii_usb_ack);
+	if (!cpu_is_xlp9xx()) {
+		/* XLP 2XX single node */
+		pr_info("Initializing 2XX USB Interface\n");
+		nlm_xlpii_usb_hw_reset(0, 1);
+		nlm_xlpii_usb_hw_reset(0, 2);
+		nlm_xlpii_usb_hw_reset(0, 3);
+		nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_0_IRQ, xlp2xx_usb_ack);
+		nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_1_IRQ, xlp2xx_usb_ack);
+		nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_2_IRQ, xlp2xx_usb_ack);
+		return 0;
+	}
 
+	/* XLP 9XX, multi-node */
+	pr_info("Initializing 9XX USB Interface\n");
+	for (node = 0; node < NLM_NR_NODES; node++) {
+		if (!nlm_node_present(node))
+			continue;
+		nlm_xlpii_usb_hw_reset(node, 1);
+		nlm_xlpii_usb_hw_reset(node, 2);
+		nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_0_IRQ, xlp9xx_usb_ack);
+		nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_1_IRQ, xlp9xx_usb_ack);
+	}
 	return 0;
 }
 
@@ -196,8 +238,26 @@
 
 static u64 xlp_usb_dmamask = ~(u32)0;
 
-/* Fixup IRQ for USB devices on XLP the SoC PCIe bus */
-static void nlm_usb_fixup_final(struct pci_dev *dev)
+/* Fixup the IRQ for USB devices which is exist on XLP9XX SOC PCIE bus */
+static void nlm_xlp9xx_usb_fixup_final(struct pci_dev *dev)
+{
+	int node;
+
+	node = xlp_socdev_to_node(dev);
+	dev->dev.dma_mask		= &xlp_usb_dmamask;
+	dev->dev.coherent_dma_mask	= DMA_BIT_MASK(32);
+	switch (dev->devfn) {
+	case 0x21:
+		dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_0_IRQ);
+		break;
+	case 0x22:
+		dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_1_IRQ);
+		break;
+	}
+}
+
+/* Fixup the IRQ for USB devices which is exist on XLP2XX SOC PCIE bus */
+static void nlm_xlp2xx_usb_fixup_final(struct pci_dev *dev)
 {
 	dev->dev.dma_mask		= &xlp_usb_dmamask;
 	dev->dev.coherent_dma_mask	= DMA_BIT_MASK(32);
@@ -214,5 +274,7 @@
 	}
 }
 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_XHCI,
+		nlm_xlp9xx_usb_fixup_final);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_XHCI,
-		nlm_usb_fixup_final);
+		nlm_xlp2xx_usb_fixup_final);
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
index 682d563..9a92617 100644
--- a/arch/mips/netlogic/xlp/wakeup.c
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -32,7 +32,6 @@
  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/threads.h>
 
@@ -47,14 +46,14 @@
 #include <asm/netlogic/mips-extns.h>
 
 #include <asm/netlogic/xlp-hal/iomap.h>
-#include <asm/netlogic/xlp-hal/pic.h>
 #include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
 #include <asm/netlogic/xlp-hal/sys.h>
 
 static int xlp_wakeup_core(uint64_t sysbase, int node, int core)
 {
 	uint32_t coremask, value;
-	int count;
+	int count, resetreg;
 
 	coremask = (1 << core);
 
@@ -65,12 +64,24 @@
 		nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, value);
 	}
 
-	/* Remove CPU Reset */
-	value = nlm_read_sys_reg(sysbase, SYS_CPU_RESET);
-	value &= ~coremask;
-	nlm_write_sys_reg(sysbase, SYS_CPU_RESET, value);
+	/* On 9XX, mark coherent first */
+	if (cpu_is_xlp9xx()) {
+		value = nlm_read_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE);
+		value &= ~coremask;
+		nlm_write_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE, value);
+	}
 
-	/* Poll for CPU to mark itself coherent */
+	/* Remove CPU Reset */
+	resetreg = cpu_is_xlp9xx() ? SYS_9XX_CPU_RESET : SYS_CPU_RESET;
+	value = nlm_read_sys_reg(sysbase, resetreg);
+	value &= ~coremask;
+	nlm_write_sys_reg(sysbase, resetreg, value);
+
+	/* We are done on 9XX */
+	if (cpu_is_xlp9xx())
+		return 1;
+
+	/* Poll for CPU to mark itself coherent on other type of XLP */
 	count = 100000;
 	do {
 		value = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
@@ -84,7 +95,7 @@
 	volatile uint32_t *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
 	int i, count, notready;
 
-	count = 0x20000000;
+	count = 0x800000;
 	do {
 		notready = nlm_threads_per_core;
 		for (i = 0; i < nlm_threads_per_core; i++)
@@ -98,27 +109,62 @@
 static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
 {
 	struct nlm_soc_info *nodep;
-	uint64_t syspcibase;
-	uint32_t syscoremask;
+	uint64_t syspcibase, fusebase;
+	uint32_t syscoremask, mask, fusemask;
 	int core, n, cpu;
 
 	for (n = 0; n < NLM_NR_NODES; n++) {
-		syspcibase = nlm_get_sys_pcibase(n);
-		if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
-			break;
-
-		/* read cores in reset from SYS */
-		if (n != 0)
+		if (n != 0) {
+			/* check if node exists and is online */
+			if (cpu_is_xlp9xx()) {
+				int b = xlp9xx_get_socbus(n);
+				pr_info("Node %d SoC PCI bus %d.\n", n, b);
+				if (b == 0)
+					break;
+			} else {
+				syspcibase = nlm_get_sys_pcibase(n);
+				if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
+					break;
+			}
 			nlm_node_init(n);
-		nodep = nlm_get_node(n);
-		syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
-		/* The boot cpu */
-		if (n == 0) {
-			syscoremask |= 1;
-			nodep->coremask = 1;
 		}
 
-		for (core = 0; core < NLM_CORES_PER_NODE; core++) {
+		/* read cores in reset from SYS */
+		nodep = nlm_get_node(n);
+
+		if (cpu_is_xlp9xx()) {
+			fusebase = nlm_get_fuse_regbase(n);
+			fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6);
+			mask = 0xfffff;
+		} else {
+			fusemask = nlm_read_sys_reg(nodep->sysbase,
+						SYS_EFUSE_DEVICE_CFG_STATUS0);
+			switch (read_c0_prid() & 0xff00) {
+			case PRID_IMP_NETLOGIC_XLP3XX:
+				mask = 0xf;
+				break;
+			case PRID_IMP_NETLOGIC_XLP2XX:
+				mask = 0x3;
+				break;
+			case PRID_IMP_NETLOGIC_XLP8XX:
+			default:
+				mask = 0xff;
+				break;
+			}
+		}
+
+		/*
+		 * Fused out cores are set in the fusemask, and the remaining
+		 * cores are renumbered to range 0 .. nactive-1
+		 */
+		syscoremask = (1 << hweight32(~fusemask & mask)) - 1;
+
+		/* The boot cpu */
+		if (n == 0)
+			nodep->coremask = 1;
+
+		pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
+		for (core = 0; core < nlm_cores_per_node(); core++) {
 			/* we will be on node 0 core 0 */
 			if (n == 0 && core == 0)
 				continue;
@@ -128,7 +174,7 @@
 				continue;
 
 			/* see if at least the first hw thread is enabled */
-			cpu = (n * NLM_CORES_PER_NODE + core)
+			cpu = (n * nlm_cores_per_node() + core)
 						* NLM_THREADS_PER_CORE;
 			if (!cpumask_test_cpu(cpu, wakeup_mask))
 				continue;
@@ -141,7 +187,8 @@
 			nodep->coremask |= 1u << core;
 
 			/* spin until the hw threads sets their ready */
-			wait_for_cpus(cpu, 0);
+			if (!wait_for_cpus(cpu, 0))
+				pr_err("Node %d : timeout core %d\n", n, core);
 		}
 	}
 }
@@ -153,7 +200,8 @@
 	 * first wakeup core 0 threads
 	 */
 	xlp_boot_core0_siblings();
-	wait_for_cpus(0, 0);
+	if (!wait_for_cpus(0, 0))
+		pr_err("Node 0 : timeout core 0\n");
 
 	/* now get other cores out of reset */
 	xlp_enable_secondary_cores(&nlm_cpumask);
diff --git a/arch/mips/netlogic/xlr/platform.c b/arch/mips/netlogic/xlr/platform.c
index 7b96a91..4785932 100644
--- a/arch/mips/netlogic/xlr/platform.c
+++ b/arch/mips/netlogic/xlr/platform.c
@@ -23,7 +23,7 @@
 #include <asm/netlogic/xlr/pic.h>
 #include <asm/netlogic/xlr/xlr.h>
 
-unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset)
+static unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset)
 {
 	uint64_t uartbase;
 	unsigned int value;
@@ -41,7 +41,7 @@
 	return value;
 }
 
-void nlm_xlr_uart_out(struct uart_port *p, int offset, int value)
+static void nlm_xlr_uart_out(struct uart_port *p, int offset, int value)
 {
 	uint64_t uartbase;
 
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index 921be5f..d118b9a 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -60,25 +60,6 @@
 struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
 cpumask_t nlm_cpumask = CPU_MASK_CPU0;
 
-static void __init nlm_early_serial_setup(void)
-{
-	struct uart_port s;
-	unsigned long uart_base;
-
-	uart_base = (unsigned long)nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET);
-	memset(&s, 0, sizeof(s));
-	s.flags		= ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
-	s.iotype	= UPIO_MEM32;
-	s.regshift	= 2;
-	s.irq		= PIC_UART_0_IRQ;
-	s.uartclk	= PIC_CLK_HZ;
-	s.serial_in	= nlm_xlr_uart_in;
-	s.serial_out	= nlm_xlr_uart_out;
-	s.mapbase	= uart_base;
-	s.membase	= (unsigned char __iomem *)uart_base;
-	early_serial_setup(&s);
-}
-
 static void nlm_linux_exit(void)
 {
 	uint64_t gpiobase;
@@ -214,7 +195,6 @@
 	memcpy(reset_vec, (void *)nlm_reset_entry,
 			(nlm_reset_entry_end - nlm_reset_entry));
 
-	nlm_early_serial_setup();
 	build_arcs_cmdline(argv);
 	prom_add_memory();
 
diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c
index 9fb81fa..d61cba1 100644
--- a/arch/mips/netlogic/xlr/wakeup.c
+++ b/arch/mips/netlogic/xlr/wakeup.c
@@ -32,7 +32,6 @@
  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/threads.h>
 
@@ -70,7 +69,7 @@
 
 	/* Fill up the coremask early */
 	nodep->coremask = 1;
-	for (i = 1; i < NLM_CORES_PER_NODE; i++) {
+	for (i = 1; i < nlm_cores_per_node(); i++) {
 		for (j = 1000000; j > 0; j--) {
 			if (cpu_ready[i * NLM_THREADS_PER_CORE])
 				break;
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 4d1736f..2a86e38 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -86,6 +86,8 @@
 	case CPU_34K:
 	case CPU_1004K:
 	case CPU_74K:
+	case CPU_INTERAPTIV:
+	case CPU_PROAPTIV:
 	case CPU_LOONGSON1:
 	case CPU_SB1:
 	case CPU_SB1A:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 3a2b6e9..4d94d75 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -376,6 +376,14 @@
 		op_model_mipsxx_ops.cpu_type = "mips/74K";
 		break;
 
+	case CPU_INTERAPTIV:
+		op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
+		break;
+
+	case CPU_PROAPTIV:
+		op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
+		break;
+
 	case CPU_5KC:
 		op_model_mipsxx_ops.cpu_type = "mips/5K";
 		break;
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 719e455..137f2a6 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -60,4 +60,5 @@
 
 ifdef CONFIG_PCI_MSI
 obj-$(CONFIG_CAVIUM_OCTEON_SOC) += msi-octeon.o
+obj-$(CONFIG_CPU_XLP)		+= msi-xlp.o
 endif
diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c
index df36e23..7a0eda7 100644
--- a/arch/mips/pci/fixup-malta.c
+++ b/arch/mips/pci/fixup-malta.c
@@ -54,6 +54,7 @@
 static void malta_piix_func0_fixup(struct pci_dev *pdev)
 {
 	unsigned char reg_val;
+	u32 reg_val32;
 	/* PIIX PIRQC[A:D] irq mappings */
 	static int piixirqmap[PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX] = {
 		0,  0,	0,  3,
@@ -83,6 +84,16 @@
 		pci_write_config_byte(pdev, PIIX4_FUNC0_TOM, reg_val |
 				PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK);
 	}
+
+	/* Mux SERIRQ to its pin */
+	pci_read_config_dword(pdev, PIIX4_FUNC0_GENCFG, &reg_val32);
+	pci_write_config_dword(pdev, PIIX4_FUNC0_GENCFG,
+			       reg_val32 | PIIX4_FUNC0_GENCFG_SERIRQ);
+
+	/* Enable SERIRQ */
+	pci_read_config_byte(pdev, PIIX4_FUNC0_SERIRQC, &reg_val);
+	reg_val |= PIIX4_FUNC0_SERIRQC_EN | PIIX4_FUNC0_SERIRQC_CONT;
+	pci_write_config_byte(pdev, PIIX4_FUNC0_SERIRQC, reg_val);
 }
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
diff --git a/arch/mips/pci/fixup-rc32434.c b/arch/mips/pci/fixup-rc32434.c
index d0f6ecb..7fcafd5 100644
--- a/arch/mips/pci/fixup-rc32434.c
+++ b/arch/mips/pci/fixup-rc32434.c
@@ -27,7 +27,6 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 
 #include <asm/mach-rc32434/rc32434.h>
 #include <asm/mach-rc32434/irq.h>
diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c
index 1441bec..8feae91 100644
--- a/arch/mips/pci/fixup-sb1250.c
+++ b/arch/mips/pci/fixup-sb1250.c
@@ -8,7 +8,6 @@
  *	2 of the License, or (at your option) any later version.
  */
 
-#include <linux/init.h>
 #include <linux/pci.h>
 
 /*
diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c
new file mode 100644
index 0000000..afd8405
--- /dev/null
+++ b/arch/mips/pci/msi-xlp.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/console.h>
+
+#include <asm/io.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/mips-extns.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#include <asm/netlogic/xlp-hal/pcibus.h>
+#include <asm/netlogic/xlp-hal/bridge.h>
+
+#define XLP_MSIVEC_PER_LINK	32
+#define XLP_MSIXVEC_TOTAL	32
+#define XLP_MSIXVEC_PER_LINK	8
+
+/* 128 MSI irqs per node, mapped starting at NLM_MSI_VEC_BASE */
+static inline int nlm_link_msiirq(int link, int msivec)
+{
+	return NLM_MSI_VEC_BASE + link * XLP_MSIVEC_PER_LINK + msivec;
+}
+
+static inline int nlm_irq_msivec(int irq)
+{
+	return irq % XLP_MSIVEC_PER_LINK;
+}
+
+static inline int nlm_irq_msilink(int irq)
+{
+	return (irq % (XLP_MSIVEC_PER_LINK * PCIE_NLINKS)) /
+						XLP_MSIVEC_PER_LINK;
+}
+
+/*
+ * Only 32 MSI-X vectors are possible because there are only 32 PIC
+ * interrupts for MSI. We split them statically and use 8 MSI-X vectors
+ * per link - this keeps the allocation and lookup simple.
+ */
+static inline int nlm_link_msixirq(int link, int bit)
+{
+	return NLM_MSIX_VEC_BASE + link * XLP_MSIXVEC_PER_LINK + bit;
+}
+
+static inline int nlm_irq_msixvec(int irq)
+{
+	return irq % XLP_MSIXVEC_TOTAL;  /* works when given xirq */
+}
+
+static inline int nlm_irq_msixlink(int irq)
+{
+	return nlm_irq_msixvec(irq) / XLP_MSIXVEC_PER_LINK;
+}
+
+/*
+ * Per link MSI and MSI-X information, set as IRQ handler data for
+ * MSI and MSI-X interrupts.
+ */
+struct xlp_msi_data {
+	struct nlm_soc_info *node;
+	uint64_t	lnkbase;
+	uint32_t	msi_enabled_mask;
+	uint32_t	msi_alloc_mask;
+	uint32_t	msix_alloc_mask;
+	spinlock_t	msi_lock;
+};
+
+/*
+ * MSI Chip definitions
+ *
+ * On XLP, there is a PIC interrupt associated with each PCIe link on the
+ * chip (which appears as a PCI bridge to us). This gives us 32 MSI irqa
+ * per link and 128 overall.
+ *
+ * When a device connected to the link raises a MSI interrupt, we get a
+ * link interrupt and we then have to look at PCIE_MSI_STATUS register at
+ * the bridge to map it to the IRQ
+ */
+static void xlp_msi_enable(struct irq_data *d)
+{
+	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	unsigned long flags;
+	int vec;
+
+	vec = nlm_irq_msivec(d->irq);
+	spin_lock_irqsave(&md->msi_lock, flags);
+	md->msi_enabled_mask |= 1u << vec;
+	nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
+	spin_unlock_irqrestore(&md->msi_lock, flags);
+}
+
+static void xlp_msi_disable(struct irq_data *d)
+{
+	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	unsigned long flags;
+	int vec;
+
+	vec = nlm_irq_msivec(d->irq);
+	spin_lock_irqsave(&md->msi_lock, flags);
+	md->msi_enabled_mask &= ~(1u << vec);
+	nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
+	spin_unlock_irqrestore(&md->msi_lock, flags);
+}
+
+static void xlp_msi_mask_ack(struct irq_data *d)
+{
+	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	int link, vec;
+
+	link = nlm_irq_msilink(d->irq);
+	vec = nlm_irq_msivec(d->irq);
+	xlp_msi_disable(d);
+
+	/* Ack MSI on bridge */
+	nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec);
+
+	/* Ack at eirr and PIC */
+	ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link));
+	nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link));
+}
+
+static struct irq_chip xlp_msi_chip = {
+	.name		= "XLP-MSI",
+	.irq_enable	= xlp_msi_enable,
+	.irq_disable	= xlp_msi_disable,
+	.irq_mask_ack	= xlp_msi_mask_ack,
+	.irq_unmask	= xlp_msi_enable,
+};
+
+/*
+ * The MSI-X interrupt handling is different from MSI, there are 32
+ * MSI-X interrupts generated by the PIC and each of these correspond
+ * to a MSI-X vector (0-31) that can be assigned.
+ *
+ * We divide the MSI-X vectors to 8 per link and do a per-link
+ * allocation
+ *
+ * Enable and disable done using standard MSI functions.
+ */
+static void xlp_msix_mask_ack(struct irq_data *d)
+{
+	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	int link, msixvec;
+
+	msixvec = nlm_irq_msixvec(d->irq);
+	link = nlm_irq_msixlink(d->irq);
+	mask_msi_irq(d);
+
+	/* Ack MSI on bridge */
+	nlm_write_reg(md->lnkbase, PCIE_MSIX_STATUS, 1u << msixvec);
+
+	/* Ack at eirr and PIC */
+	ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link));
+	nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_MSIX_INDEX(msixvec));
+}
+
+static struct irq_chip xlp_msix_chip = {
+	.name		= "XLP-MSIX",
+	.irq_enable	= unmask_msi_irq,
+	.irq_disable	= mask_msi_irq,
+	.irq_mask_ack	= xlp_msix_mask_ack,
+	.irq_unmask	= unmask_msi_irq,
+};
+
+void destroy_irq(unsigned int irq)
+{
+	    /* nothing to do yet */
+}
+
+void arch_teardown_msi_irq(unsigned int irq)
+{
+	destroy_irq(irq);
+}
+
+/*
+ * Setup a PCIe link for MSI.  By default, the links are in
+ * legacy interrupt mode.  We will switch them to MSI mode
+ * at the first MSI request.
+ */
+static void xlp_config_link_msi(uint64_t lnkbase, int lirq, uint64_t msiaddr)
+{
+	u32 val;
+
+	val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
+	if ((val & 0x200) == 0) {
+		val |= 0x200;		/* MSI Interrupt enable */
+		nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
+	}
+
+	val = nlm_read_reg(lnkbase, 0x1);	/* CMD */
+	if ((val & 0x0400) == 0) {
+		val |= 0x0400;
+		nlm_write_reg(lnkbase, 0x1, val);
+	}
+
+	/* Update IRQ in the PCI irq reg */
+	val = nlm_read_pci_reg(lnkbase, 0xf);
+	val &= ~0x1fu;
+	val |= (1 << 8) | lirq;
+	nlm_write_pci_reg(lnkbase, 0xf, val);
+
+	/* MSI addr */
+	nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRH, msiaddr >> 32);
+	nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRL, msiaddr & 0xffffffff);
+
+	/* MSI cap for bridge */
+	val = nlm_read_reg(lnkbase, PCIE_BRIDGE_MSI_CAP);
+	if ((val & (1 << 16)) == 0) {
+		val |= 0xb << 16;		/* mmc32, msi enable */
+		nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_CAP, val);
+	}
+}
+
+/*
+ * Allocate a MSI vector on a link
+ */
+static int xlp_setup_msi(uint64_t lnkbase, int node, int link,
+	struct msi_desc *desc)
+{
+	struct xlp_msi_data *md;
+	struct msi_msg msg;
+	unsigned long flags;
+	int msivec, irt, lirq, xirq, ret;
+	uint64_t msiaddr;
+
+	/* Get MSI data for the link */
+	lirq = PIC_PCIE_LINK_MSI_IRQ(link);
+	xirq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
+	md = irq_get_handler_data(xirq);
+	msiaddr = MSI_LINK_ADDR(node, link);
+
+	spin_lock_irqsave(&md->msi_lock, flags);
+	if (md->msi_alloc_mask == 0) {
+		/* switch the link IRQ to MSI range */
+		xlp_config_link_msi(lnkbase, lirq, msiaddr);
+		irt = PIC_IRT_PCIE_LINK_INDEX(link);
+		nlm_setup_pic_irq(node, lirq, lirq, irt);
+		nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq,
+				 node * nlm_threads_per_node(), 1 /*en */);
+	}
+
+	/* allocate a MSI vec, and tell the bridge about it */
+	msivec = fls(md->msi_alloc_mask);
+	if (msivec == XLP_MSIVEC_PER_LINK) {
+		spin_unlock_irqrestore(&md->msi_lock, flags);
+		return -ENOMEM;
+	}
+	md->msi_alloc_mask |= (1u << msivec);
+	spin_unlock_irqrestore(&md->msi_lock, flags);
+
+	msg.address_hi = msiaddr >> 32;
+	msg.address_lo = msiaddr & 0xffffffff;
+	msg.data = 0xc00 | msivec;
+
+	xirq = xirq + msivec;		/* msi mapped to global irq space */
+	ret = irq_set_msi_desc(xirq, desc);
+	if (ret < 0) {
+		destroy_irq(xirq);
+		return ret;
+	}
+
+	write_msi_msg(xirq, &msg);
+	return 0;
+}
+
+/*
+ * Switch a link to MSI-X mode
+ */
+static void xlp_config_link_msix(uint64_t lnkbase, int lirq, uint64_t msixaddr)
+{
+	u32 val;
+
+	val = nlm_read_reg(lnkbase, 0x2C);
+	if ((val & 0x80000000U) == 0) {
+		val |= 0x80000000U;
+		nlm_write_reg(lnkbase, 0x2C, val);
+	}
+	val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
+	if ((val & 0x200) == 0) {
+		val |= 0x200;		/* MSI Interrupt enable */
+		nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
+	}
+
+	val = nlm_read_reg(lnkbase, 0x1);	/* CMD */
+	if ((val & 0x0400) == 0) {
+		val |= 0x0400;
+		nlm_write_reg(lnkbase, 0x1, val);
+	}
+
+	/* Update IRQ in the PCI irq reg */
+	val = nlm_read_pci_reg(lnkbase, 0xf);
+	val &= ~0x1fu;
+	val |= (1 << 8) | lirq;
+	nlm_write_pci_reg(lnkbase, 0xf, val);
+
+	/* MSI-X addresses */
+	nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_BASE, msixaddr >> 8);
+	nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_LIMIT,
+					(msixaddr + MSI_ADDR_SZ) >> 8);
+}
+
+/*
+ *  Allocate a MSI-X vector
+ */
+static int xlp_setup_msix(uint64_t lnkbase, int node, int link,
+	struct msi_desc *desc)
+{
+	struct xlp_msi_data *md;
+	struct msi_msg msg;
+	unsigned long flags;
+	int t, msixvec, lirq, xirq, ret;
+	uint64_t msixaddr;
+
+	/* Get MSI data for the link */
+	lirq = PIC_PCIE_MSIX_IRQ(link);
+	xirq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
+	md = irq_get_handler_data(xirq);
+	msixaddr = MSIX_LINK_ADDR(node, link);
+
+	spin_lock_irqsave(&md->msi_lock, flags);
+	/* switch the PCIe link to MSI-X mode at the first alloc */
+	if (md->msix_alloc_mask == 0)
+		xlp_config_link_msix(lnkbase, lirq, msixaddr);
+
+	/* allocate a MSI-X vec, and tell the bridge about it */
+	t = fls(md->msix_alloc_mask);
+	if (t == XLP_MSIXVEC_PER_LINK) {
+		spin_unlock_irqrestore(&md->msi_lock, flags);
+		return -ENOMEM;
+	}
+	md->msix_alloc_mask |= (1u << t);
+	spin_unlock_irqrestore(&md->msi_lock, flags);
+
+	xirq += t;
+	msixvec = nlm_irq_msixvec(xirq);
+	msg.address_hi = msixaddr >> 32;
+	msg.address_lo = msixaddr & 0xffffffff;
+	msg.data = 0xc00 | msixvec;
+
+	ret = irq_set_msi_desc(xirq, desc);
+	if (ret < 0) {
+		destroy_irq(xirq);
+		return ret;
+	}
+
+	write_msi_msg(xirq, &msg);
+	return 0;
+}
+
+int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+{
+	struct pci_dev *lnkdev;
+	uint64_t lnkbase;
+	int node, link, slot;
+
+	lnkdev = xlp_get_pcie_link(dev);
+	if (lnkdev == NULL) {
+		dev_err(&dev->dev, "Could not find bridge\n");
+		return 1;
+	}
+	slot = PCI_SLOT(lnkdev->devfn);
+	link = PCI_FUNC(lnkdev->devfn);
+	node = slot / 8;
+	lnkbase = nlm_get_pcie_base(node, link);
+
+	if (desc->msi_attrib.is_msix)
+		return xlp_setup_msix(lnkbase, node, link, desc);
+	else
+		return xlp_setup_msi(lnkbase, node, link, desc);
+}
+
+void __init xlp_init_node_msi_irqs(int node, int link)
+{
+	struct nlm_soc_info *nodep;
+	struct xlp_msi_data *md;
+	int irq, i, irt, msixvec;
+
+	pr_info("[%d %d] Init node PCI IRT\n", node, link);
+	nodep = nlm_get_node(node);
+
+	/* Alloc an MSI block for the link */
+	md = kzalloc(sizeof(*md), GFP_KERNEL);
+	spin_lock_init(&md->msi_lock);
+	md->msi_enabled_mask = 0;
+	md->msi_alloc_mask = 0;
+	md->msix_alloc_mask = 0;
+	md->node = nodep;
+	md->lnkbase = nlm_get_pcie_base(node, link);
+
+	/* extended space for MSI interrupts */
+	irq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
+	for (i = irq; i < irq + XLP_MSIVEC_PER_LINK; i++) {
+		irq_set_chip_and_handler(i, &xlp_msi_chip, handle_level_irq);
+		irq_set_handler_data(i, md);
+	}
+
+	for (i = 0; i < XLP_MSIXVEC_PER_LINK; i++) {
+		/* Initialize MSI-X irts to generate one interrupt per link */
+		msixvec = link * XLP_MSIXVEC_PER_LINK + i;
+		irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec);
+		nlm_pic_init_irt(nodep->picbase, irt, PIC_PCIE_MSIX_IRQ(link),
+			node * nlm_threads_per_node(), 1 /* enable */);
+
+		/* Initialize MSI-X extended irq space for the link  */
+		irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i));
+		irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq);
+		irq_set_handler_data(irq, md);
+	}
+
+}
+
+void nlm_dispatch_msi(int node, int lirq)
+{
+	struct xlp_msi_data *md;
+	int link, i, irqbase;
+	u32 status;
+
+	link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE;
+	irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
+	md = irq_get_handler_data(irqbase);
+	status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) &
+						md->msi_enabled_mask;
+	while (status) {
+		i = __ffs(status);
+		do_IRQ(irqbase + i);
+		status &= status - 1;
+	}
+}
+
+void nlm_dispatch_msix(int node, int lirq)
+{
+	struct xlp_msi_data *md;
+	int link, i, irqbase;
+	u32 status;
+
+	link = lirq - PIC_PCIE_MSIX_IRQ_BASE;
+	irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
+	md = irq_get_handler_data(irqbase);
+	status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS);
+
+	/* narrow it down to the MSI-x vectors for our link */
+	status = (status >> (link * XLP_MSIXVEC_PER_LINK)) &
+			((1 << XLP_MSIXVEC_PER_LINK) - 1);
+
+	while (status) {
+		i = __ffs(status);
+		do_IRQ(irqbase + i);
+		status &= status - 1;
+	}
+}
diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c
index 6144bb3..13eea69 100644
--- a/arch/mips/pci/ops-bcm63xx.c
+++ b/arch/mips/pci/ops-bcm63xx.c
@@ -9,7 +9,6 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/io.h>
 
diff --git a/arch/mips/pci/ops-bonito64.c b/arch/mips/pci/ops-bonito64.c
index 830352e..c06205a 100644
--- a/arch/mips/pci/ops-bonito64.c
+++ b/arch/mips/pci/ops-bonito64.c
@@ -22,7 +22,6 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 
 #include <asm/mips-boards/bonito64.h>
 
diff --git a/arch/mips/pci/ops-lantiq.c b/arch/mips/pci/ops-lantiq.c
index 16e7c25..e5738ee 100644
--- a/arch/mips/pci/ops-lantiq.c
+++ b/arch/mips/pci/ops-lantiq.c
@@ -9,7 +9,6 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
 #include <asm/addrspace.h>
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c
index 98254af..24138bb 100644
--- a/arch/mips/pci/ops-loongson2.c
+++ b/arch/mips/pci/ops-loongson2.c
@@ -14,7 +14,6 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/export.h>
 
 #include <loongson.h>
diff --git a/arch/mips/pci/ops-mace.c b/arch/mips/pci/ops-mace.c
index 1cfb558..6b5821f 100644
--- a/arch/mips/pci/ops-mace.c
+++ b/arch/mips/pci/ops-mace.c
@@ -6,7 +6,6 @@
  * Copyright (C) 2000, 2001 Keith M Wesolowski
  */
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/types.h>
 #include <asm/pci.h>
diff --git a/arch/mips/pci/ops-msc.c b/arch/mips/pci/ops-msc.c
index 92a8543..dbbf365 100644
--- a/arch/mips/pci/ops-msc.c
+++ b/arch/mips/pci/ops-msc.c
@@ -24,7 +24,6 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 
 #include <asm/mips-boards/msc01_pci.h>
 
diff --git a/arch/mips/pci/ops-nile4.c b/arch/mips/pci/ops-nile4.c
index 499e35c..a1a7c9f 100644
--- a/arch/mips/pci/ops-nile4.c
+++ b/arch/mips/pci/ops-nile4.c
@@ -1,5 +1,4 @@
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <asm/bootinfo.h>
 
diff --git a/arch/mips/pci/ops-rc32434.c b/arch/mips/pci/ops-rc32434.c
index 7c7182e..874ed6d 100644
--- a/arch/mips/pci/ops-rc32434.c
+++ b/arch/mips/pci/ops-rc32434.c
@@ -26,7 +26,6 @@
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/io.h>
 #include <linux/pci.h>
 #include <linux/types.h>
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 162b4cb..0f09eaf 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -7,7 +7,6 @@
  * Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  */
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/pci.h>
diff --git a/arch/mips/pci/pci-malta.c b/arch/mips/pci/pci-malta.c
index 37134dd..f1a7389 100644
--- a/arch/mips/pci/pci-malta.c
+++ b/arch/mips/pci/pci-malta.c
@@ -241,9 +241,9 @@
 		return;
 	}
 
-	/* Change start address to avoid conflicts with ACPI and SMB devices */
-	if (controller->io_resource->start < 0x00002000UL)
-		controller->io_resource->start = 0x00002000UL;
+	/* PIIX4 ACPI starts at 0x1000 */
+	if (controller->io_resource->start < 0x00001000UL)
+		controller->io_resource->start = 0x00001000UL;
 
 	iomem_resource.end &= 0xfffffffffULL;			/* 64 GB */
 	ioport_resource.end = controller->io_resource->end;
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index adeff2b..72919ae 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -436,9 +436,6 @@
 		return -ENOMEM;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -EINVAL;
-
 	rpc->base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(rpc->base))
 		return PTR_ERR(rpc->base);
diff --git a/arch/mips/pci/pci-xlp.c b/arch/mips/pci/pci-xlp.c
index 653d2db..7babf01 100644
--- a/arch/mips/pci/pci-xlp.c
+++ b/arch/mips/pci/pci-xlp.c
@@ -47,10 +47,11 @@
 #include <asm/netlogic/interrupt.h>
 #include <asm/netlogic/haldefs.h>
 #include <asm/netlogic/common.h>
+#include <asm/netlogic/mips-extns.h>
 
 #include <asm/netlogic/xlp-hal/iomap.h>
-#include <asm/netlogic/xlp-hal/pic.h>
 #include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
 #include <asm/netlogic/xlp-hal/pcibus.h>
 #include <asm/netlogic/xlp-hal/bridge.h>
 
@@ -66,9 +67,22 @@
 	u32 *cfgaddr;
 
 	where &= ~3;
-	if (bus->number == 0 && PCI_SLOT(devfn) == 1 && where == 0x954)
+	if (cpu_is_xlp9xx()) {
+		/* be very careful on SoC buses */
+		if (bus->number == 0) {
+			/* Scan only existing nodes - uboot bug? */
+			if (PCI_SLOT(devfn) != 0 ||
+					   !nlm_node_present(PCI_FUNC(devfn)))
+				return 0xffffffff;
+		} else if (bus->parent->number == 0) {	/* SoC bus */
+			if (PCI_SLOT(devfn) == 0)	/* b.0.0 hangs */
+				return 0xffffffff;
+			if (devfn == 44)		/* b.5.4 hangs */
+				return 0xffffffff;
+		}
+	} else if (bus->number == 0 && PCI_SLOT(devfn) == 1 && where == 0x954) {
 		return 0xffffffff;
-
+	}
 	cfgaddr = (u32 *)(pci_config_base +
 			pci_cfg_addr(bus->number, devfn, where));
 	data = *cfgaddr;
@@ -162,27 +176,39 @@
 	.io_offset	= 0x00000000UL,
 };
 
-static struct pci_dev *xlp_get_pcie_link(const struct pci_dev *dev)
+struct pci_dev *xlp_get_pcie_link(const struct pci_dev *dev)
 {
 	struct pci_bus *bus, *p;
 
-	/* Find the bridge on bus 0 */
 	bus = dev->bus;
-	for (p = bus->parent; p && p->number != 0; p = p->parent)
-		bus = p;
 
-	return p ? bus->self : NULL;
+	if (cpu_is_xlp9xx()) {
+		/* find bus with grand parent number == 0 */
+		for (p = bus->parent; p && p->parent && p->parent->number != 0;
+				p = p->parent)
+			bus = p;
+		return (p && p->parent) ? bus->self : NULL;
+	} else {
+		/* Find the bridge on bus 0 */
+		for (p = bus->parent; p && p->number != 0; p = p->parent)
+			bus = p;
+
+		return p ? bus->self : NULL;
+	}
 }
 
-static inline int nlm_pci_link_to_irq(int link)
+int xlp_socdev_to_node(const struct pci_dev *lnkdev)
 {
-	return PIC_PCIE_LINK_0_IRQ + link;
+	if (cpu_is_xlp9xx())
+		return PCI_FUNC(lnkdev->bus->self->devfn);
+	else
+		return PCI_SLOT(lnkdev->devfn) / 8;
 }
 
 int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
 	struct pci_dev *lnkdev;
-	int lnkslot, lnkfunc;
+	int lnkfunc, node;
 
 	/*
 	 * For XLP PCIe, there is an IRQ per Link, find out which
@@ -191,9 +217,11 @@
 	lnkdev = xlp_get_pcie_link(dev);
 	if (lnkdev == NULL)
 		return 0;
+
 	lnkfunc = PCI_FUNC(lnkdev->devfn);
-	lnkslot = PCI_SLOT(lnkdev->devfn);
-	return nlm_irq_to_xirq(lnkslot / 8, nlm_pci_link_to_irq(lnkfunc));
+	node = xlp_socdev_to_node(lnkdev);
+
+	return nlm_irq_to_xirq(node, PIC_PCIE_LINK_LEGACY_IRQ(lnkfunc));
 }
 
 /* Do platform specific device initialization at pci_enable_device() time */
@@ -220,17 +248,38 @@
 	 *  Enable byte swap in hardware. Program each link's PCIe SWAP regions
 	 * from the link's address ranges.
 	 */
-	reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_BASE0 + link);
-	nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_BASE, reg);
+	if (cpu_is_xlp9xx()) {
+		reg = nlm_read_bridge_reg(nbubase,
+				BRIDGE_9XX_PCIEMEM_BASE0 + link);
+		nlm_write_pci_reg(lnkbase, PCIE_9XX_BYTE_SWAP_MEM_BASE, reg);
 
-	reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_LIMIT0 + link);
-	nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_LIM, reg | 0xfff);
+		reg = nlm_read_bridge_reg(nbubase,
+				BRIDGE_9XX_PCIEMEM_LIMIT0 + link);
+		nlm_write_pci_reg(lnkbase,
+				PCIE_9XX_BYTE_SWAP_MEM_LIM, reg | 0xfff);
 
-	reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_BASE0 + link);
-	nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_BASE, reg);
+		reg = nlm_read_bridge_reg(nbubase,
+				BRIDGE_9XX_PCIEIO_BASE0 + link);
+		nlm_write_pci_reg(lnkbase, PCIE_9XX_BYTE_SWAP_IO_BASE, reg);
 
-	reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_LIMIT0 + link);
-	nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff);
+		reg = nlm_read_bridge_reg(nbubase,
+				BRIDGE_9XX_PCIEIO_LIMIT0 + link);
+		nlm_write_pci_reg(lnkbase,
+				PCIE_9XX_BYTE_SWAP_IO_LIM, reg | 0xfff);
+	} else {
+		reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_BASE0 + link);
+		nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_BASE, reg);
+
+		reg = nlm_read_bridge_reg(nbubase,
+					BRIDGE_PCIEMEM_LIMIT0 + link);
+		nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_LIM, reg | 0xfff);
+
+		reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_BASE0 + link);
+		nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_BASE, reg);
+
+		reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_LIMIT0 + link);
+		nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff);
+	}
 }
 #else
 /* Swap configuration not needed in little-endian mode */
@@ -239,7 +288,6 @@
 
 static int __init pcibios_init(void)
 {
-	struct nlm_soc_info *nodep;
 	uint64_t pciebase;
 	int link, n;
 	u32 reg;
@@ -253,20 +301,20 @@
 	ioport_resource.end   = ~0;
 
 	for (n = 0; n < NLM_NR_NODES; n++) {
-		nodep = nlm_get_node(n);
-		if (!nodep->coremask)
-			continue;	/* node does not exist */
+		if (!nlm_node_present(n))
+			continue;
 
-		for (link = 0; link < 4; link++) {
+		for (link = 0; link < PCIE_NLINKS; link++) {
 			pciebase = nlm_get_pcie_base(n, link);
 			if (nlm_read_pci_reg(pciebase, 0) == 0xffffffff)
 				continue;
 			xlp_config_pci_bswap(n, link);
+			xlp_init_node_msi_irqs(n, link);
 
 			/* put in intpin and irq - u-boot does not */
 			reg = nlm_read_pci_reg(pciebase, 0xf);
-			reg &= ~0x1fu;
-			reg |= (1 << 8) | nlm_pci_link_to_irq(link);
+			reg &= ~0x1ffu;
+			reg |= (1 << 8) | PIC_PCIE_LINK_LEGACY_IRQ(link);
 			nlm_write_pci_reg(pciebase, 0xf, reg);
 			pr_info("XLP PCIe: Link %d-%d initialized.\n", n, link);
 		}
diff --git a/arch/mips/pmcs-msp71xx/Kconfig b/arch/mips/pmcs-msp71xx/Kconfig
index 3482b8c..6073ca4 100644
--- a/arch/mips/pmcs-msp71xx/Kconfig
+++ b/arch/mips/pmcs-msp71xx/Kconfig
@@ -6,6 +6,7 @@
 	bool "PMC-Sierra MSP4200 Eval Board"
 	select IRQ_MSP_SLP
 	select HW_HAS_PCI
+	select MIPS_L1_CACHE_SHIFT_4
 
 config PMC_MSP4200_GW
 	bool "PMC-Sierra MSP4200 VoIP Gateway"
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index 424f034..1bfd1c1 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -15,6 +15,7 @@
 
 	config SOC_RT288X
 		bool "RT288x"
+		select MIPS_L1_CACHE_SHIFT_4
 
 	config SOC_RT305X
 		bool "RT305x"
diff --git a/arch/mips/sgi-ip27/ip27-console.c b/arch/mips/sgi-ip27/ip27-console.c
index b952d5b..45fdfbc 100644
--- a/arch/mips/sgi-ip27/ip27-console.c
+++ b/arch/mips/sgi-ip27/ip27-console.c
@@ -5,7 +5,6 @@
  *
  * Copyright (C) 2001, 2002 Ralf Baechle
  */
-#include <linux/init.h>
 
 #include <asm/page.h>
 #include <asm/sn/addrs.h>
diff --git a/arch/mips/sgi-ip27/ip27-irq-pci.c b/arch/mips/sgi-ip27/ip27-irq-pci.c
index ec22ec5..2a1c407 100644
--- a/arch/mips/sgi-ip27/ip27-irq-pci.c
+++ b/arch/mips/sgi-ip27/ip27-irq-pci.c
@@ -8,7 +8,6 @@
 
 #undef DEBUG
 
-#include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/errno.h>
 #include <linux/signal.h>
diff --git a/arch/mips/sgi-ip27/ip27-klconfig.c b/arch/mips/sgi-ip27/ip27-klconfig.c
index 7afe146..c873d62 100644
--- a/arch/mips/sgi-ip27/ip27-klconfig.c
+++ b/arch/mips/sgi-ip27/ip27-klconfig.c
@@ -2,7 +2,6 @@
  * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  */
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index d59b820..20f582a 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -7,7 +7,6 @@
  * Generic XTALK initialization code
  */
 
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/smp.h>
 #include <asm/sn/types.h>
diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile
index a3d0fef..3f1ea5d 100644
--- a/arch/mn10300/Makefile
+++ b/arch/mn10300/Makefile
@@ -92,14 +92,6 @@
   echo  '* zImage        - Compressed kernel image (arch/$(ARCH)/boot/zImage)'
 endef
 
-# If you make sure the .S files get compiled with debug info,
-# uncomment the following to disable optimisations
-# that are unhelpful whilst debugging.
-ifdef CONFIG_DEBUG_INFO
-#KBUILD_CFLAGS	+= -O1
-KBUILD_AFLAGS	+= -Wa,--gdwarf2
-endif
-
 #
 # include the appropriate processor- and unit-specific headers
 #
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index d8a455e..fec8bf9 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -853,38 +853,45 @@
 
 /* ========================================================[ return ] === */
 
-_work_pending:
-	/*
-	 * if (current_thread_info->flags & _TIF_NEED_RESCHED)
-	 *     schedule();
-	 */
-	l.lwz   r5,TI_FLAGS(r10)
-	l.andi	r3,r5,_TIF_NEED_RESCHED
-	l.sfnei r3,0
-	l.bnf   _work_notifysig
-	 l.nop
-	l.jal   schedule
-	 l.nop
-	l.j	_resume_userspace
-	 l.nop
-
-/* Handle pending signals and notify-resume requests.
- * do_notify_resume must be passed the latest pushed pt_regs, not
- * necessarily the "userspace" ones.  Also, pt_regs->syscallno
- * must be set so that the syscall restart functionality works.
- */
-_work_notifysig:
-	l.jal	do_notify_resume
-	 l.ori	r3,r1,0		  /* pt_regs */
-
 _resume_userspace:
 	DISABLE_INTERRUPTS(r3,r4)
-	l.lwz	r3,TI_FLAGS(r10)
-	l.andi	r3,r3,_TIF_WORK_MASK
-	l.sfnei	r3,0
-	l.bf	_work_pending
+	l.lwz	r4,TI_FLAGS(r10)
+	l.andi	r13,r4,_TIF_WORK_MASK
+	l.sfeqi	r13,0
+	l.bf	_restore_all
 	 l.nop
 
+_work_pending:
+	l.lwz	r5,PT_ORIG_GPR11(r1)
+	l.sfltsi r5,0
+	l.bnf	1f
+	 l.nop
+	l.andi	r5,r5,0
+1:
+	l.jal	do_work_pending
+	 l.ori	r3,r1,0			/* pt_regs */
+
+	l.sfeqi	r11,0
+	l.bf	_restore_all
+	 l.nop
+	l.sfltsi r11,0
+	l.bnf	1f
+	 l.nop
+	l.and	r11,r11,r0
+	l.ori	r11,r11,__NR_restart_syscall
+	l.j	_syscall_check_trace_enter
+	 l.nop
+1:
+	l.lwz	r11,PT_ORIG_GPR11(r1)
+	/* Restore arg registers */
+	l.lwz	r3,PT_GPR3(r1)
+	l.lwz	r4,PT_GPR4(r1)
+	l.lwz	r5,PT_GPR5(r1)
+	l.lwz	r6,PT_GPR6(r1)
+	l.lwz	r7,PT_GPR7(r1)
+	l.j	_syscall_check_trace_enter
+	 l.lwz	r8,PT_GPR8(r1)
+
 _restore_all:
 	RESTORE_ALL
 	/* This returns to userspace code */
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index ae167f7..66775bc 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -28,24 +28,24 @@
 #include <linux/tracehook.h>
 
 #include <asm/processor.h>
+#include <asm/syscall.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 
 #define DEBUG_SIG 0
 
 struct rt_sigframe {
-	struct siginfo *pinfo;
-	void *puc;
 	struct siginfo info;
 	struct ucontext uc;
 	unsigned char retcode[16];	/* trampoline code */
 };
 
-static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+static int restore_sigcontext(struct pt_regs *regs,
+			      struct sigcontext __user *sc)
 {
-	unsigned int err = 0;
+	int err = 0;
 
-	/* Alwys make any pending restarted system call return -EINTR */
+	/* Always make any pending restarted system calls return -EINTR */
 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
 	/*
@@ -53,25 +53,21 @@
 	 * (sc is already checked for VERIFY_READ since the sigframe was
 	 *  checked in sys_sigreturn previously)
 	 */
-	if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
-		goto badframe;
-	if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long)))
-		goto badframe;
-	if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long)))
-		goto badframe;
+	err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long));
+	err |= __copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long));
+	err |= __copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long));
 
 	/* make sure the SM-bit is cleared so user-mode cannot fool us */
 	regs->sr &= ~SPR_SR_SM;
 
+	regs->orig_gpr11 = -1;	/* Avoid syscall restart checks */
+
 	/* TODO: the other ports use regs->orig_XX to disable syscall checks
 	 * after this completes, but we don't use that mechanism. maybe we can
 	 * use it now ?
 	 */
 
 	return err;
-
-badframe:
-	return 1;
 }
 
 asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
@@ -111,21 +107,18 @@
  * Set up a signal frame.
  */
 
-static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
-			    unsigned long mask)
+static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 {
 	int err = 0;
 
 	/* copy the regs */
-
+	/* There should be no need to save callee-saved registers here...
+	 * ...but we save them anyway.  Revisit this
+	 */
 	err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
 	err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
 	err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
 
-	/* then some other stuff */
-
-	err |= __put_user(mask, &sc->oldmask);
-
 	return err;
 }
 
@@ -173,55 +166,53 @@
  * trampoline which performs the syscall sigreturn, or a provided
  * user-mode trampoline.
  */
-static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-			  sigset_t *set, struct pt_regs *regs)
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+			  struct pt_regs *regs)
 {
 	struct rt_sigframe *frame;
 	unsigned long return_ip;
 	int err = 0;
 
-	frame = get_sigframe(ka, regs, sizeof(*frame));
+	frame = get_sigframe(&ksig->ka, regs, sizeof(*frame));
 
 	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-		goto give_sigsegv;
+		return -EFAULT;
 
-	err |= __put_user(&frame->info, &frame->pinfo);
-	err |= __put_user(&frame->uc, &frame->puc);
+	/* Create siginfo.  */
+	if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+		err |= copy_siginfo_to_user(&frame->info, &ksig->info);
 
-	if (ka->sa.sa_flags & SA_SIGINFO)
-		err |= copy_siginfo_to_user(&frame->info, info);
-	if (err)
-		goto give_sigsegv;
-
-	/* Clear all the bits of the ucontext we don't use.  */
-	err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
+	/* Create the ucontext.  */
 	err |= __put_user(0, &frame->uc.uc_flags);
 	err |= __put_user(NULL, &frame->uc.uc_link);
 	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
-	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
+	err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
 
 	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 
 	if (err)
-		goto give_sigsegv;
+		return -EFAULT;
 
 	/* trampoline - the desired return ip is the retcode itself */
 	return_ip = (unsigned long)&frame->retcode;
-	/* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
-	err |= __put_user(0xa960, (short *)(frame->retcode + 0));
-	err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
+	/* This is:
+		l.ori r11,r0,__NR_sigreturn
+		l.sys 1
+	 */
+	err |= __put_user(0xa960,             (short *)(frame->retcode + 0));
+	err |= __put_user(__NR_rt_sigreturn,  (short *)(frame->retcode + 2));
 	err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
 	err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
 
 	if (err)
-		goto give_sigsegv;
+		return -EFAULT;
 
 	/* TODO what is the current->exec_domain stuff and invmap ? */
 
 	/* Set up registers for signal handler */
-	regs->pc = (unsigned long)ka->sa.sa_handler; /* what we enter NOW */
+	regs->pc = (unsigned long)ksig->ka.sa.sa_handler; /* what we enter NOW */
 	regs->gpr[9] = (unsigned long)return_ip;     /* what we enter LATER */
-	regs->gpr[3] = (unsigned long)sig;           /* arg 1: signo */
+	regs->gpr[3] = (unsigned long)ksig->sig;           /* arg 1: signo */
 	regs->gpr[4] = (unsigned long)&frame->info;  /* arg 2: (siginfo_t*) */
 	regs->gpr[5] = (unsigned long)&frame->uc;    /* arg 3: ucontext */
 
@@ -229,25 +220,16 @@
 	regs->sp = (unsigned long)frame;
 
 	return 0;
-
-give_sigsegv:
-	force_sigsegv(sig, current);
-	return -EFAULT;
 }
 
 static inline void
-handle_signal(unsigned long sig,
-	      siginfo_t *info, struct k_sigaction *ka,
-	      struct pt_regs *regs)
+handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 {
 	int ret;
 
-	ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs);
-	if (ret)
-		return;
+	ret = setup_rt_frame(ksig, sigmask_to_save(), regs);
 
-	signal_delivered(sig, info, ka, regs,
-				 test_thread_flag(TIF_SINGLESTEP));
+	signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
 }
 
 /*
@@ -262,82 +244,99 @@
  * mode below.
  */
 
-void do_signal(struct pt_regs *regs)
+int do_signal(struct pt_regs *regs, int syscall)
 {
-	siginfo_t info;
-	int signr;
-	struct k_sigaction ka;
+	struct ksignal ksig;
+	unsigned long continue_addr = 0;
+	unsigned long restart_addr = 0;
+	unsigned long retval = 0;
+	int restart = 0;
+
+	if (syscall) {
+		continue_addr = regs->pc;
+		restart_addr = continue_addr - 4;
+		retval = regs->gpr[11];
+
+		/*
+		 * Setup syscall restart here so that a debugger will
+		 * see the already changed PC.
+		 */
+		switch (retval) {
+		case -ERESTART_RESTARTBLOCK:
+			restart = -2;
+			/* Fall through */
+		case -ERESTARTNOHAND:
+		case -ERESTARTSYS:
+		case -ERESTARTNOINTR:
+			restart++;
+			regs->gpr[11] = regs->orig_gpr11;
+			regs->pc = restart_addr;
+			break;
+		}
+	}
 
 	/*
-	 * We want the common case to go fast, which
-	 * is why we may in certain cases get here from
-	 * kernel mode. Just return without doing anything
-	 * if so.
+	 * Get the signal to deliver.  During the call to get_signal the
+	 * debugger may change all our registers so we may need to revert
+	 * the decision to restart the syscall; specifically, if the PC is
+	 * changed, don't restart the syscall.
 	 */
-	if (!user_mode(regs))
-		return;
-
-	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-
-	/* If we are coming out of a syscall then we need
-	 * to check if the syscall was interrupted and wants to be
-	 * restarted after handling the signal.  If so, the original
-	 * syscall number is put back into r11 and the PC rewound to
-	 * point at the l.sys instruction that resulted in the
-	 * original syscall.  Syscall results other than the four
-	 * below mean that the syscall executed to completion and no
-	 * restart is necessary.
-	 */
-	if (regs->orig_gpr11) {
-		int restart = 0;
-
-		switch (regs->gpr[11]) {
-		case -ERESTART_RESTARTBLOCK:
-		case -ERESTARTNOHAND:
-			/* Restart if there is no signal handler */
-			restart = (signr <= 0);
-			break;
-		case -ERESTARTSYS:
-			/* Restart if there no signal handler or
-			 * SA_RESTART flag is set */
-			restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART));
-			break;
-		case -ERESTARTNOINTR:
-			/* Always restart */
-			restart = 1;
-			break;
+	if (get_signal(&ksig)) {
+		if (unlikely(restart) && regs->pc == restart_addr) {
+			if (retval == -ERESTARTNOHAND ||
+			    retval == -ERESTART_RESTARTBLOCK
+			    || (retval == -ERESTARTSYS
+			        && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
+				/* No automatic restart */
+				regs->gpr[11] = -EINTR;
+				regs->pc = continue_addr;
+			}
 		}
-
-		if (restart) {
-			if (regs->gpr[11] == -ERESTART_RESTARTBLOCK)
-				regs->gpr[11] = __NR_restart_syscall;
-			else
-				regs->gpr[11] = regs->orig_gpr11;
-			regs->pc -= 4;
-		} else {
-			regs->gpr[11] = -EINTR;
-		}
-	}
-
-	if (signr <= 0) {
-		/* no signal to deliver so we just put the saved sigmask
-		 * back */
+		handle_signal(&ksig, regs);
+	} else {
+		/* no handler */
 		restore_saved_sigmask();
-	} else {		/* signr > 0 */
-		/* Whee!  Actually deliver the signal.  */
-		handle_signal(signr, &info, &ka, regs);
+		/*
+		 * Restore pt_regs PC as syscall restart will be handled by
+		 * kernel without return to userspace
+		 */
+		if (unlikely(restart) && regs->pc == restart_addr) {
+			regs->pc = continue_addr;
+			return restart;
+		}
 	}
 
-	return;
+	return 0;
 }
 
-asmlinkage void do_notify_resume(struct pt_regs *regs)
+asmlinkage int
+do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 {
-	if (current_thread_info()->flags & _TIF_SIGPENDING)
-		do_signal(regs);
-
-	if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
-		clear_thread_flag(TIF_NOTIFY_RESUME);
-		tracehook_notify_resume(regs);
-	}
+	do {
+		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
+			schedule();
+		} else {
+			if (unlikely(!user_mode(regs)))
+				return 0;
+			local_irq_enable();
+			if (thread_flags & _TIF_SIGPENDING) {
+				int restart = do_signal(regs, syscall);
+				if (unlikely(restart)) {
+					/*
+					 * Restart without handlers.
+					 * Deal with it without leaving
+					 * the kernel space.
+					 */
+					return restart;
+				}
+				syscall = 0;
+			} else {
+				clear_thread_flag(TIF_NOTIFY_RESUME);
+				tracehook_notify_resume(regs);
+			}
+		}
+		local_irq_disable();
+		thread_flags = current_thread_info()->flags;
+	} while (thread_flags & _TIF_WORK_MASK);
+	return 0;
 }
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index 88d0962..2bedafe 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -33,22 +33,9 @@
 
 int hpux_execve(struct pt_regs *regs)
 {
-	int error;
-	struct filename *filename;
-
-	filename = getname((const char __user *) regs->gr[26]);
-	error = PTR_ERR(filename);
-	if (IS_ERR(filename))
-		goto out;
-
-	error = do_execve(filename->name,
+	return  do_execve(getname((const char __user *) regs->gr[26]),
 			  (const char __user *const __user *) regs->gr[25],
 			  (const char __user *const __user *) regs->gr[24]);
-
-	putname(filename);
-
-out:
-	return error;
 }
 
 struct hpux_dirent {
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 2f9b751..de65f66 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -132,7 +132,6 @@
 static inline void *kmap(struct page *page)
 {
 	might_sleep();
-	flush_dcache_page(page);
 	return page_address(page);
 }
 
@@ -144,7 +143,6 @@
 static inline void *kmap_atomic(struct page *page)
 {
 	pagefault_disable();
-	flush_dcache_page(page);
 	return page_address(page);
 }
 
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index ad2b503..3391d06 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -348,4 +348,8 @@
 
 #define ELF_HWCAP	0
 
+struct mm_struct;
+extern unsigned long arch_randomize_brk(struct mm_struct *);
+#define arch_randomize_brk arch_randomize_brk
+
 #endif
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index c53fc63..637fe03 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -29,7 +29,8 @@
 void clear_page_asm(void *page);
 void copy_page_asm(void *to, void *from);
 #define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
-#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+			struct page *pg);
 
 /* #define CONFIG_PARISC_TMPALIAS */
 
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 34899b5..22b89d1 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -511,6 +511,7 @@
 /* We provide our own get_unmapped_area to provide cache coherency */
 
 #define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index cc2290a..198a86f 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -30,6 +30,8 @@
 #endif
 #define current_text_addr() ({ void *pc; current_ia(pc); pc; })
 
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
 #define TASK_SIZE_OF(tsk)       ((tsk)->thread.task_size)
 #define TASK_SIZE	        TASK_SIZE_OF(current)
 #define TASK_UNMAPPED_BASE      (current->thread.map_base)
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index d5f97ea..4b9b10c 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -76,6 +76,16 @@
 #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP |	\
 				 _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT)
 
+#ifdef CONFIG_64BIT
+# ifdef CONFIG_COMPAT
+#  define is_32bit_task()	(test_thread_flag(TIF_32BIT))
+# else
+#  define is_32bit_task()	(0)
+# endif
+#else
+# define is_32bit_task()	(1)
+#endif
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_PARISC_THREAD_INFO_H */
diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
index f3a8aa5..c0ae625 100644
--- a/arch/parisc/include/uapi/asm/errno.h
+++ b/arch/parisc/include/uapi/asm/errno.h
@@ -106,7 +106,7 @@
 
 #define	EALREADY	244	/* Operation already in progress */
 #define	EINPROGRESS	245	/* Operation now in progress */
-#define	EWOULDBLOCK	246	/* Operation would block (Linux returns EAGAIN) */
+#define	EWOULDBLOCK	EAGAIN	/* Operation would block (Not HPUX compliant) */
 #define	ENOTEMPTY	247	/* Directory not empty */
 #define	ENAMETOOLONG	248	/* File name too long */
 #define	ELOOP		249	/* Too many symbolic links encountered */
diff --git a/arch/parisc/include/uapi/asm/stat.h b/arch/parisc/include/uapi/asm/stat.h
index d76fbda..b606b36 100644
--- a/arch/parisc/include/uapi/asm/stat.h
+++ b/arch/parisc/include/uapi/asm/stat.h
@@ -5,67 +5,65 @@
 
 struct stat {
 	unsigned int	st_dev;		/* dev_t is 32 bits on parisc */
-	ino_t		st_ino;		/* 32 bits */
-	mode_t		st_mode;	/* 16 bits */
+	unsigned int	st_ino;		/* 32 bits */
+	unsigned short	st_mode;	/* 16 bits */
 	unsigned short	st_nlink;	/* 16 bits */
 	unsigned short	st_reserved1;	/* old st_uid */
 	unsigned short	st_reserved2;	/* old st_gid */
 	unsigned int	st_rdev;
-	off_t		st_size;
-	time_t		st_atime;
+	signed int	st_size;
+	signed int	st_atime;
 	unsigned int	st_atime_nsec;
-	time_t		st_mtime;
+	signed int	st_mtime;
 	unsigned int	st_mtime_nsec;
-	time_t		st_ctime;
+	signed int	st_ctime;
 	unsigned int	st_ctime_nsec;
 	int		st_blksize;
 	int		st_blocks;
 	unsigned int	__unused1;	/* ACL stuff */
 	unsigned int	__unused2;	/* network */
-	ino_t		__unused3;	/* network */
+	unsigned int	__unused3;	/* network */
 	unsigned int	__unused4;	/* cnodes */
 	unsigned short	__unused5;	/* netsite */
 	short		st_fstype;
 	unsigned int	st_realdev;
 	unsigned short	st_basemode;
 	unsigned short	st_spareshort;
-	uid_t		st_uid;
-	gid_t		st_gid;
+	unsigned int	st_uid;
+	unsigned int	st_gid;
 	unsigned int	st_spare4[3];
 };
 
 #define STAT_HAVE_NSEC
 
-typedef __kernel_off64_t	off64_t;
-
 struct hpux_stat64 {
 	unsigned int	st_dev;		/* dev_t is 32 bits on parisc */
-	ino_t           st_ino;         /* 32 bits */
-	mode_t		st_mode;	/* 16 bits */
+	unsigned int	st_ino;         /* 32 bits */
+	unsigned short	st_mode;	/* 16 bits */
 	unsigned short	st_nlink;	/* 16 bits */
 	unsigned short	st_reserved1;	/* old st_uid */
 	unsigned short	st_reserved2;	/* old st_gid */
 	unsigned int	st_rdev;
-	off64_t		st_size;
-	time_t		st_atime;
+	signed long long st_size;
+	signed int	st_atime;
 	unsigned int	st_spare1;
-	time_t		st_mtime;
+	signed int	st_mtime;
 	unsigned int	st_spare2;
-	time_t		st_ctime;
+	signed int	st_ctime;
 	unsigned int	st_spare3;
 	int		st_blksize;
-	__u64		st_blocks;
+	unsigned long long st_blocks;
 	unsigned int	__unused1;	/* ACL stuff */
 	unsigned int	__unused2;	/* network */
-	ino_t           __unused3;      /* network */
+	unsigned int	__unused3;      /* network */
 	unsigned int	__unused4;	/* cnodes */
 	unsigned short	__unused5;	/* netsite */
 	short		st_fstype;
 	unsigned int	st_realdev;
 	unsigned short	st_basemode;
 	unsigned short	st_spareshort;
-	uid_t		st_uid;
-	gid_t		st_gid;
+	unsigned int	st_uid;
+	unsigned int	st_gid;
 	unsigned int	st_spare4[3];
 };
 
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 2c8b9bd..4270679 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -826,8 +826,10 @@
 #define __NR_process_vm_writev	(__NR_Linux + 331)
 #define __NR_kcmp		(__NR_Linux + 332)
 #define __NR_finit_module	(__NR_Linux + 333)
+#define __NR_sched_setattr	(__NR_Linux + 334)
+#define __NR_sched_getattr	(__NR_Linux + 335)
 
-#define __NR_Linux_syscalls	(__NR_finit_module + 1)
+#define __NR_Linux_syscalls	(__NR_sched_getattr + 1)
 
 
 #define __IGNORE_select		/* newselect */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index a725455..ac87a40 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -388,6 +388,20 @@
 }
 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
 
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+	struct page *pg)
+{
+       /* Copy using kernel mapping.  No coherency is needed (all in
+	  kunmap) for the `to' page.  However, the `from' page needs to
+	  be flushed through a mapping equivalent to the user mapping
+	  before it can be accessed through the kernel mapping. */
+	preempt_disable();
+	flush_dcache_page_asm(__pa(vfrom), vaddr);
+	preempt_enable();
+	copy_page_asm(vto, vfrom);
+}
+EXPORT_SYMBOL(copy_user_page);
+
 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 {
 	unsigned long flags;
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 55f92b6..0bbbf0d 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -13,7 +13,7 @@
  *    Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
  *    Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
  *    Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
- *    Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
+ *    Copyright (C) 2001-2014 Helge Deller <deller@gmx.de>
  *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
  *
  *
@@ -49,6 +49,7 @@
 #include <linux/kallsyms.h>
 #include <linux/uaccess.h>
 #include <linux/rcupdate.h>
+#include <linux/random.h>
 
 #include <asm/io.h>
 #include <asm/asm-offsets.h>
@@ -286,3 +287,21 @@
 	return ptr;
 }
 #endif
+
+static inline unsigned long brk_rnd(void)
+{
+	/* 8MB for 32bit, 1GB for 64bit */
+	if (is_32bit_task())
+		return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+	else
+		return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+	unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+
+	if (ret < mm->brk)
+		return mm->brk;
+	return ret;
+}
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 0d3a9d4..b7cadc4 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -5,6 +5,7 @@
  *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
  *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
  *    Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
+ *    Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
  *
  *
  *    This program is free software; you can redistribute it and/or modify
@@ -23,6 +24,7 @@
  */
 
 #include <asm/uaccess.h>
+#include <asm/elf.h>
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/linkage.h>
@@ -32,78 +34,230 @@
 #include <linux/syscalls.h>
 #include <linux/utsname.h>
 #include <linux/personality.h>
+#include <linux/random.h>
 
-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+/* we construct an artificial offset for the mapping based on the physical
+ * address of the kernel mapping variable */
+#define GET_LAST_MMAP(filp)		\
+	(filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
+#define SET_LAST_MMAP(filp, val)	\
+	 { /* nothing */ }
+
+static int get_offset(unsigned int last_mmap)
 {
-	struct vm_unmapped_area_info info;
+	return (last_mmap & (SHMLBA-1)) >> PAGE_SHIFT;
+}
 
-	info.flags = 0;
-	info.length = len;
-	info.low_limit = PAGE_ALIGN(addr);
-	info.high_limit = TASK_SIZE;
-	info.align_mask = 0;
-	info.align_offset = 0;
-	return vm_unmapped_area(&info);
+static unsigned long shared_align_offset(unsigned int last_mmap,
+					 unsigned long pgoff)
+{
+	return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
+}
+
+static inline unsigned long COLOR_ALIGN(unsigned long addr,
+			 unsigned int last_mmap, unsigned long pgoff)
+{
+	unsigned long base = (addr+SHMLBA-1) & ~(SHMLBA-1);
+	unsigned long off  = (SHMLBA-1) &
+		(shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
+
+	return base + off;
 }
 
 /*
- * We need to know the offset to use.  Old scheme was to look for
- * existing mapping and use the same offset.  New scheme is to use the
- * address of the kernel data structure as the seed for the offset.
- * We'll see how that works...
- *
- * The mapping is cacheline aligned, so there's no information in the bottom
- * few bits of the address.  We're looking for 10 bits (4MB / 4k), so let's
- * drop the bottom 8 bits and use bits 8-17.  
+ * Top of mmap area (just below the process stack).
  */
-static int get_offset(struct address_space *mapping)
+
+static unsigned long mmap_upper_limit(void)
 {
-	return (unsigned long) mapping >> 8;
+	unsigned long stack_base;
+
+	/* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */
+	stack_base = rlimit_max(RLIMIT_STACK);
+	if (stack_base > (1 << 30))
+		stack_base = 1 << 30;
+
+	return PAGE_ALIGN(STACK_TOP - stack_base);
 }
 
-static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
-{
-	struct address_space *mapping = filp ? filp->f_mapping : NULL;
-
-	return (get_offset(mapping) + pgoff) << PAGE_SHIFT;
-}
-
-static unsigned long get_shared_area(struct file *filp, unsigned long addr,
-		unsigned long len, unsigned long pgoff)
-{
-	struct vm_unmapped_area_info info;
-
-	info.flags = 0;
-	info.length = len;
-	info.low_limit = PAGE_ALIGN(addr);
-	info.high_limit = TASK_SIZE;
-	info.align_mask = PAGE_MASK & (SHMLBA - 1);
-	info.align_offset = shared_align_offset(filp, pgoff);
-	return vm_unmapped_area(&info);
-}
 
 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 		unsigned long len, unsigned long pgoff, unsigned long flags)
 {
-	if (len > TASK_SIZE)
-		return -ENOMEM;
-	if (flags & MAP_FIXED) {
-		if ((flags & MAP_SHARED) &&
-		    (addr - shared_align_offset(filp, pgoff)) & (SHMLBA - 1))
-			return -EINVAL;
-		return addr;
-	}
-	if (!addr)
-		addr = TASK_UNMAPPED_BASE;
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long task_size = TASK_SIZE;
+	int do_color_align, last_mmap;
+	struct vm_unmapped_area_info info;
 
+	if (len > task_size)
+		return -ENOMEM;
+
+	do_color_align = 0;
 	if (filp || (flags & MAP_SHARED))
-		addr = get_shared_area(filp, addr, len, pgoff);
-	else
-		addr = get_unshared_area(addr, len);
+		do_color_align = 1;
+	last_mmap = GET_LAST_MMAP(filp);
+
+	if (flags & MAP_FIXED) {
+		if ((flags & MAP_SHARED) && last_mmap &&
+		    (addr - shared_align_offset(last_mmap, pgoff))
+				& (SHMLBA - 1))
+			return -EINVAL;
+		goto found_addr;
+	}
+
+	if (addr) {
+		if (do_color_align && last_mmap)
+			addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+		else
+			addr = PAGE_ALIGN(addr);
+
+		vma = find_vma(mm, addr);
+		if (task_size - len >= addr &&
+		    (!vma || addr + len <= vma->vm_start))
+			goto found_addr;
+	}
+
+	info.flags = 0;
+	info.length = len;
+	info.low_limit = mm->mmap_legacy_base;
+	info.high_limit = mmap_upper_limit();
+	info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+	info.align_offset = shared_align_offset(last_mmap, pgoff);
+	addr = vm_unmapped_area(&info);
+
+found_addr:
+	if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
+		SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
 
 	return addr;
 }
 
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+			  const unsigned long len, const unsigned long pgoff,
+			  const unsigned long flags)
+{
+	struct vm_area_struct *vma;
+	struct mm_struct *mm = current->mm;
+	unsigned long addr = addr0;
+	int do_color_align, last_mmap;
+	struct vm_unmapped_area_info info;
+
+#ifdef CONFIG_64BIT
+	/* This should only ever run for 32-bit processes.  */
+	BUG_ON(!test_thread_flag(TIF_32BIT));
+#endif
+
+	/* requested length too big for entire address space */
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+
+	do_color_align = 0;
+	if (filp || (flags & MAP_SHARED))
+		do_color_align = 1;
+	last_mmap = GET_LAST_MMAP(filp);
+
+	if (flags & MAP_FIXED) {
+		if ((flags & MAP_SHARED) && last_mmap &&
+		    (addr - shared_align_offset(last_mmap, pgoff))
+			& (SHMLBA - 1))
+			return -EINVAL;
+		goto found_addr;
+	}
+
+	/* requesting a specific address */
+	if (addr) {
+		if (do_color_align && last_mmap)
+			addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+		else
+			addr = PAGE_ALIGN(addr);
+		vma = find_vma(mm, addr);
+		if (TASK_SIZE - len >= addr &&
+		    (!vma || addr + len <= vma->vm_start))
+			goto found_addr;
+	}
+
+	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+	info.length = len;
+	info.low_limit = PAGE_SIZE;
+	info.high_limit = mm->mmap_base;
+	info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+	info.align_offset = shared_align_offset(last_mmap, pgoff);
+	addr = vm_unmapped_area(&info);
+	if (!(addr & ~PAGE_MASK))
+		goto found_addr;
+	VM_BUG_ON(addr != -ENOMEM);
+
+	/*
+	 * A failed mmap() very likely causes application failure,
+	 * so fall back to the bottom-up function here. This scenario
+	 * can happen with large stack limits and large mmap()
+	 * allocations.
+	 */
+	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+
+found_addr:
+	if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
+		SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
+
+	return addr;
+}
+
+static int mmap_is_legacy(void)
+{
+	if (current->personality & ADDR_COMPAT_LAYOUT)
+		return 1;
+
+	/* parisc stack always grows up - so a unlimited stack should
+	 * not be an indicator to use the legacy memory layout.
+	 * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+	 *	return 1;
+	 */
+
+	return sysctl_legacy_va_layout;
+}
+
+static unsigned long mmap_rnd(void)
+{
+	unsigned long rnd = 0;
+
+	/*
+	*  8 bits of randomness in 32bit mmaps, 20 address space bits
+	* 28 bits of randomness in 64bit mmaps, 40 address space bits
+	*/
+	if (current->flags & PF_RANDOMIZE) {
+		if (is_32bit_task())
+			rnd = get_random_int() % (1<<8);
+		else
+			rnd = get_random_int() % (1<<28);
+	}
+	return rnd << PAGE_SHIFT;
+}
+
+static unsigned long mmap_legacy_base(void)
+{
+	return TASK_UNMAPPED_BASE + mmap_rnd();
+}
+
+/*
+ * This function, called very early during the creation of a new
+ * process VM image, sets up which VM layout function to use:
+ */
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+	mm->mmap_legacy_base = mmap_legacy_base();
+	mm->mmap_base = mmap_upper_limit();
+
+	if (mmap_is_legacy()) {
+		mm->mmap_base = mm->mmap_legacy_base;
+		mm->get_unmapped_area = arch_get_unmapped_area;
+	} else {
+		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+	}
+}
+
+
 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
 	unsigned long prot, unsigned long flags, unsigned long fd,
 	unsigned long pgoff)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 0c91072..8fa3fbb 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -429,6 +429,8 @@
 	ENTRY_COMP(process_vm_writev)
 	ENTRY_SAME(kcmp)
 	ENTRY_SAME(finit_module)
+	ENTRY_SAME(sched_setattr)
+	ENTRY_SAME(sched_getattr)	/* 335 */
 
 	/* Nothing yet */
 
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 25493a0..957bf34 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -342,6 +342,8 @@
        bool "Transactional Memory support for POWERPC"
        depends on PPC_BOOK3S_64
        depends on SMP
+       select ALTIVEC
+       select VSX
        default n
        ---help---
          Support user-mode Transactional Memory on POWERPC.
@@ -532,6 +534,7 @@
 
 config PPC_64K_PAGES
 	bool "64k page size" if 44x || PPC_STD_MMU_64 || PPC_BOOK3E_64
+	depends on !PPC_FSL_BOOK3E
 	select PPC_HAS_HASH_64K if PPC_STD_MMU_64
 
 config PPC_256K_PAGES
@@ -1045,11 +1048,6 @@
 
 source "crypto/Kconfig"
 
-config PPC_CLOCK
-	bool
-	default n
-	select HAVE_CLK
-
 config PPC_LIB_RHEAP
 	bool
 
diff --git a/arch/powerpc/boot/dts/ac14xx.dts b/arch/powerpc/boot/dts/ac14xx.dts
index a543c40..a1b8837 100644
--- a/arch/powerpc/boot/dts/ac14xx.dts
+++ b/arch/powerpc/boot/dts/ac14xx.dts
@@ -139,7 +139,14 @@
 		};
 	};
 
+	clocks {
+		osc {
+			clock-frequency = <25000000>;
+		};
+	};
+
 	soc@80000000 {
+		bus-frequency = <80000000>;	/* 80 MHz ips bus */
 
 		clock@f00 {
 			compatible = "fsl,mpc5121rev2-clock", "fsl,mpc5121-clock";
diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi
index 2d7cb04..2c0e155 100644
--- a/arch/powerpc/boot/dts/mpc5121.dtsi
+++ b/arch/powerpc/boot/dts/mpc5121.dtsi
@@ -9,6 +9,8 @@
  * option) any later version.
  */
 
+#include <dt-bindings/clock/mpc512x-clock.h>
+
 /dts-v1/;
 
 / {
@@ -49,6 +51,10 @@
 		compatible = "fsl,mpc5121-mbx";
 		reg = <0x20000000 0x4000>;
 		interrupts = <66 0x8>;
+		clocks = <&clks MPC512x_CLK_MBX_BUS>,
+			 <&clks MPC512x_CLK_MBX_3D>,
+			 <&clks MPC512x_CLK_MBX>;
+		clock-names = "mbx-bus", "mbx-3d", "mbx";
 	};
 
 	sram@30000000 {
@@ -62,6 +68,8 @@
 		interrupts = <6 8>;
 		#address-cells = <1>;
 		#size-cells = <1>;
+		clocks = <&clks MPC512x_CLK_NFC>;
+		clock-names = "ipg";
 	};
 
 	localbus@80000020 {
@@ -73,6 +81,17 @@
 		ranges = <0x0 0x0 0xfc000000 0x04000000>;
 	};
 
+	clocks {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		osc: osc {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <33000000>;
+		};
+	};
+
 	soc@80000000 {
 		compatible = "fsl,mpc5121-immr";
 		#address-cells = <1>;
@@ -117,9 +136,12 @@
 		};
 
 		/* Clock control */
-		clock@f00 {
+		clks: clock@f00 {
 			compatible = "fsl,mpc5121-clock";
 			reg = <0xf00 0x100>;
+			#clock-cells = <1>;
+			clocks = <&osc>;
+			clock-names = "osc";
 		};
 
 		/* Power Management Controller */
@@ -139,12 +161,24 @@
 			compatible = "fsl,mpc5121-mscan";
 			reg = <0x1300 0x80>;
 			interrupts = <12 0x8>;
+			clocks = <&clks MPC512x_CLK_BDLC>,
+				 <&clks MPC512x_CLK_IPS>,
+				 <&clks MPC512x_CLK_SYS>,
+				 <&clks MPC512x_CLK_REF>,
+				 <&clks MPC512x_CLK_MSCAN0_MCLK>;
+			clock-names = "ipg", "ips", "sys", "ref", "mclk";
 		};
 
 		can@1380 {
 			compatible = "fsl,mpc5121-mscan";
 			reg = <0x1380 0x80>;
 			interrupts = <13 0x8>;
+			clocks = <&clks MPC512x_CLK_BDLC>,
+				 <&clks MPC512x_CLK_IPS>,
+				 <&clks MPC512x_CLK_SYS>,
+				 <&clks MPC512x_CLK_REF>,
+				 <&clks MPC512x_CLK_MSCAN1_MCLK>;
+			clock-names = "ipg", "ips", "sys", "ref", "mclk";
 		};
 
 		sdhc@1500 {
@@ -153,6 +187,9 @@
 			interrupts = <8 0x8>;
 			dmas = <&dma0 30>;
 			dma-names = "rx-tx";
+			clocks = <&clks MPC512x_CLK_IPS>,
+				 <&clks MPC512x_CLK_SDHC>;
+			clock-names = "ipg", "per";
 		};
 
 		i2c@1700 {
@@ -161,6 +198,8 @@
 			compatible = "fsl,mpc5121-i2c", "fsl-i2c";
 			reg = <0x1700 0x20>;
 			interrupts = <9 0x8>;
+			clocks = <&clks MPC512x_CLK_I2C>;
+			clock-names = "ipg";
 		};
 
 		i2c@1720 {
@@ -169,6 +208,8 @@
 			compatible = "fsl,mpc5121-i2c", "fsl-i2c";
 			reg = <0x1720 0x20>;
 			interrupts = <10 0x8>;
+			clocks = <&clks MPC512x_CLK_I2C>;
+			clock-names = "ipg";
 		};
 
 		i2c@1740 {
@@ -177,6 +218,8 @@
 			compatible = "fsl,mpc5121-i2c", "fsl-i2c";
 			reg = <0x1740 0x20>;
 			interrupts = <11 0x8>;
+			clocks = <&clks MPC512x_CLK_I2C>;
+			clock-names = "ipg";
 		};
 
 		i2ccontrol@1760 {
@@ -188,30 +231,48 @@
 			compatible = "fsl,mpc5121-axe";
 			reg = <0x2000 0x100>;
 			interrupts = <42 0x8>;
+			clocks = <&clks MPC512x_CLK_AXE>;
+			clock-names = "ipg";
 		};
 
 		display@2100 {
 			compatible = "fsl,mpc5121-diu";
 			reg = <0x2100 0x100>;
 			interrupts = <64 0x8>;
+			clocks = <&clks MPC512x_CLK_DIU>;
+			clock-names = "ipg";
 		};
 
 		can@2300 {
 			compatible = "fsl,mpc5121-mscan";
 			reg = <0x2300 0x80>;
 			interrupts = <90 0x8>;
+			clocks = <&clks MPC512x_CLK_BDLC>,
+				 <&clks MPC512x_CLK_IPS>,
+				 <&clks MPC512x_CLK_SYS>,
+				 <&clks MPC512x_CLK_REF>,
+				 <&clks MPC512x_CLK_MSCAN2_MCLK>;
+			clock-names = "ipg", "ips", "sys", "ref", "mclk";
 		};
 
 		can@2380 {
 			compatible = "fsl,mpc5121-mscan";
 			reg = <0x2380 0x80>;
 			interrupts = <91 0x8>;
+			clocks = <&clks MPC512x_CLK_BDLC>,
+				 <&clks MPC512x_CLK_IPS>,
+				 <&clks MPC512x_CLK_SYS>,
+				 <&clks MPC512x_CLK_REF>,
+				 <&clks MPC512x_CLK_MSCAN3_MCLK>;
+			clock-names = "ipg", "ips", "sys", "ref", "mclk";
 		};
 
 		viu@2400 {
 			compatible = "fsl,mpc5121-viu";
 			reg = <0x2400 0x400>;
 			interrupts = <67 0x8>;
+			clocks = <&clks MPC512x_CLK_VIU>;
+			clock-names = "ipg";
 		};
 
 		mdio@2800 {
@@ -219,6 +280,8 @@
 			reg = <0x2800 0x800>;
 			#address-cells = <1>;
 			#size-cells = <0>;
+			clocks = <&clks MPC512x_CLK_FEC>;
+			clock-names = "per";
 		};
 
 		eth0: ethernet@2800 {
@@ -227,6 +290,8 @@
 			reg = <0x2800 0x800>;
 			local-mac-address = [ 00 00 00 00 00 00 ];
 			interrupts = <4 0x8>;
+			clocks = <&clks MPC512x_CLK_FEC>;
+			clock-names = "per";
 		};
 
 		/* USB1 using external ULPI PHY */
@@ -238,6 +303,8 @@
 			interrupts = <43 0x8>;
 			dr_mode = "otg";
 			phy_type = "ulpi";
+			clocks = <&clks MPC512x_CLK_USB1>;
+			clock-names = "ipg";
 		};
 
 		/* USB0 using internal UTMI PHY */
@@ -249,6 +316,8 @@
 			interrupts = <44 0x8>;
 			dr_mode = "otg";
 			phy_type = "utmi_wide";
+			clocks = <&clks MPC512x_CLK_USB2>;
+			clock-names = "ipg";
 		};
 
 		/* IO control */
@@ -267,6 +336,8 @@
 			compatible = "fsl,mpc5121-pata";
 			reg = <0x10200 0x100>;
 			interrupts = <5 0x8>;
+			clocks = <&clks MPC512x_CLK_PATA>;
+			clock-names = "ipg";
 		};
 
 		/* 512x PSCs are not 52xx PSC compatible */
@@ -278,6 +349,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC0>,
+				 <&clks MPC512x_CLK_PSC0_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		/* PSC1 */
@@ -287,6 +361,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC1>,
+				 <&clks MPC512x_CLK_PSC1_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		/* PSC2 */
@@ -296,6 +373,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC2>,
+				 <&clks MPC512x_CLK_PSC2_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		/* PSC3 */
@@ -305,6 +385,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC3>,
+				 <&clks MPC512x_CLK_PSC3_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		/* PSC4 */
@@ -314,6 +397,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC4>,
+				 <&clks MPC512x_CLK_PSC4_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		/* PSC5 */
@@ -323,6 +409,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC5>,
+				 <&clks MPC512x_CLK_PSC5_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		/* PSC6 */
@@ -332,6 +421,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC6>,
+				 <&clks MPC512x_CLK_PSC6_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		/* PSC7 */
@@ -341,6 +433,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC7>,
+				 <&clks MPC512x_CLK_PSC7_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		/* PSC8 */
@@ -350,6 +445,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC8>,
+				 <&clks MPC512x_CLK_PSC8_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		/* PSC9 */
@@ -359,6 +457,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC9>,
+				 <&clks MPC512x_CLK_PSC9_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		/* PSC10 */
@@ -368,6 +469,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC10>,
+				 <&clks MPC512x_CLK_PSC10_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		/* PSC11 */
@@ -377,12 +481,17 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC11>,
+				 <&clks MPC512x_CLK_PSC11_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		pscfifo@11f00 {
 			compatible = "fsl,mpc5121-psc-fifo";
 			reg = <0x11f00 0x100>;
 			interrupts = <40 0x8>;
+			clocks = <&clks MPC512x_CLK_PSC_FIFO>;
+			clock-names = "ipg";
 		};
 
 		dma0: dma@14000 {
@@ -400,6 +509,8 @@
 		#address-cells = <3>;
 		#size-cells = <2>;
 		#interrupt-cells = <1>;
+		clocks = <&clks MPC512x_CLK_PCI>;
+		clock-names = "ipg";
 
 		reg = <0x80008500 0x100	/* internal registers */
 		       0x80008300 0x8>;	/* config space access registers */
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts
index a618dfc..e4f2974 100644
--- a/arch/powerpc/boot/dts/mpc5125twr.dts
+++ b/arch/powerpc/boot/dts/mpc5125twr.dts
@@ -12,6 +12,8 @@
  * option) any later version.
  */
 
+#include <dt-bindings/clock/mpc512x-clock.h>
+
 /dts-v1/;
 
 / {
@@ -54,6 +56,17 @@
 		reg = <0x30000000 0x08000>;		// 32K at 0x30000000
 	};
 
+	clocks {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		osc: osc {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <33000000>;
+		};
+	};
+
 	soc@80000000 {
 		compatible = "fsl,mpc5121-immr";
 		#address-cells = <1>;
@@ -87,9 +100,12 @@
 			reg = <0xe00 0x100>;
 		};
 
-		clock@f00 {	// Clock control
+		clks: clock@f00 {	// Clock control
 			compatible = "fsl,mpc5121-clock";
 			reg = <0xf00 0x100>;
+			#clock-cells = <1>;
+			clocks = <&osc>;
+			clock-names = "osc";
 		};
 
 		pmc@1000{  // Power Management Controller
@@ -114,18 +130,33 @@
 			compatible = "fsl,mpc5121-mscan";
 			interrupts = <12 0x8>;
 			reg = <0x1300 0x80>;
+			clocks = <&clks MPC512x_CLK_BDLC>,
+				 <&clks MPC512x_CLK_IPS>,
+				 <&clks MPC512x_CLK_SYS>,
+				 <&clks MPC512x_CLK_REF>,
+				 <&clks MPC512x_CLK_MSCAN0_MCLK>;
+			clock-names = "ipg", "ips", "sys", "ref", "mclk";
 		};
 
 		can@1380 {
 			compatible = "fsl,mpc5121-mscan";
 			interrupts = <13 0x8>;
 			reg = <0x1380 0x80>;
+			clocks = <&clks MPC512x_CLK_BDLC>,
+				 <&clks MPC512x_CLK_IPS>,
+				 <&clks MPC512x_CLK_SYS>,
+				 <&clks MPC512x_CLK_REF>,
+				 <&clks MPC512x_CLK_MSCAN1_MCLK>;
+			clock-names = "ipg", "ips", "sys", "ref", "mclk";
 		};
 
 		sdhc@1500 {
 			compatible = "fsl,mpc5121-sdhc";
 			interrupts = <8 0x8>;
 			reg = <0x1500 0x100>;
+			clocks = <&clks MPC512x_CLK_IPS>,
+				 <&clks MPC512x_CLK_SDHC>;
+			clock-names = "ipg", "per";
 		};
 
 		i2c@1700 {
@@ -134,6 +165,8 @@
 			compatible = "fsl,mpc5121-i2c", "fsl-i2c";
 			reg = <0x1700 0x20>;
 			interrupts = <0x9 0x8>;
+			clocks = <&clks MPC512x_CLK_I2C>;
+			clock-names = "ipg";
 		};
 
 		i2c@1720 {
@@ -142,6 +175,8 @@
 			compatible = "fsl,mpc5121-i2c", "fsl-i2c";
 			reg = <0x1720 0x20>;
 			interrupts = <0xa 0x8>;
+			clocks = <&clks MPC512x_CLK_I2C>;
+			clock-names = "ipg";
 		};
 
 		i2c@1740 {
@@ -150,6 +185,8 @@
 			compatible = "fsl,mpc5121-i2c", "fsl-i2c";
 			reg = <0x1740 0x20>;
 			interrupts = <0xb 0x8>;
+			clocks = <&clks MPC512x_CLK_I2C>;
+			clock-names = "ipg";
 		};
 
 		i2ccontrol@1760 {
@@ -161,6 +198,8 @@
 			compatible = "fsl,mpc5121-diu";
 			reg = <0x2100 0x100>;
 			interrupts = <64 0x8>;
+			clocks = <&clks MPC512x_CLK_DIU>;
+			clock-names = "ipg";
 		};
 
 		mdio@2800 {
@@ -180,6 +219,8 @@
 			interrupts = <4 0x8>;
 			phy-handle = < &phy0 >;
 			phy-connection-type = "rmii";
+			clocks = <&clks MPC512x_CLK_FEC>;
+			clock-names = "per";
 		};
 
 		// IO control
@@ -200,6 +241,8 @@
 			interrupts = <43 0x8>;
 			dr_mode = "host";
 			phy_type = "ulpi";
+			clocks = <&clks MPC512x_CLK_USB1>;
+			clock-names = "ipg";
 			status = "disabled";
 		};
 
@@ -211,6 +254,9 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC1>,
+				 <&clks MPC512x_CLK_PSC1_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		// PSC9 uart1 aka ttyPSC1
@@ -220,12 +266,17 @@
 			interrupts = <40 0x8>;
 			fsl,rx-fifo-size = <16>;
 			fsl,tx-fifo-size = <16>;
+			clocks = <&clks MPC512x_CLK_PSC9>,
+				 <&clks MPC512x_CLK_PSC9_MCLK>;
+			clock-names = "ipg", "mclk";
 		};
 
 		pscfifo@11f00 {
 			compatible = "fsl,mpc5121-psc-fifo";
 			reg = <0x11f00 0x100>;
 			interrupts = <40 0x8>;
+			clocks = <&clks MPC512x_CLK_PSC_FIFO>;
+			clock-names = "ipg";
 		};
 
 		dma@14000 {
diff --git a/arch/powerpc/include/asm/clk_interface.h b/arch/powerpc/include/asm/clk_interface.h
deleted file mode 100644
index ab1882c..0000000
--- a/arch/powerpc/include/asm/clk_interface.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __ASM_POWERPC_CLK_INTERFACE_H
-#define __ASM_POWERPC_CLK_INTERFACE_H
-
-#include <linux/clk.h>
-
-struct clk_interface {
-	struct clk*	(*clk_get)	(struct device *dev, const char *id);
-	int		(*clk_enable)	(struct clk *clk);
-	void		(*clk_disable)	(struct clk *clk);
-	unsigned long	(*clk_get_rate)	(struct clk *clk);
-	void		(*clk_put)	(struct clk *clk);
-	long		(*clk_round_rate) (struct clk *clk, unsigned long rate);
-	int 		(*clk_set_rate)	(struct clk *clk, unsigned long rate);
-	int		(*clk_set_parent) (struct clk *clk, struct clk *parent);
-	struct clk*	(*clk_get_parent) (struct clk *clk);
-};
-
-extern struct clk_interface clk_functions;
-
-#endif /* __ASM_POWERPC_CLK_INTERFACE_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index e27e9ad..150866b 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -134,6 +134,7 @@
 }
 
 extern int dma_set_mask(struct device *dev, u64 dma_mask);
+extern int __dma_set_mask(struct device *dev, u64 dma_mask);
 
 #define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)
 
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 9e39ceb..d4dd41f 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -172,10 +172,20 @@
 };
 
 extern struct eeh_ops *eeh_ops;
-extern int eeh_subsystem_enabled;
+extern bool eeh_subsystem_enabled;
 extern raw_spinlock_t confirm_error_lock;
 extern int eeh_probe_mode;
 
+static inline bool eeh_enabled(void)
+{
+	return eeh_subsystem_enabled;
+}
+
+static inline void eeh_set_enable(bool mode)
+{
+	eeh_subsystem_enabled = mode;
+}
+
 #define EEH_PROBE_MODE_DEV	(1<<0)	/* From PCI device	*/
 #define EEH_PROBE_MODE_DEVTREE	(1<<1)	/* From device tree	*/
 
@@ -246,7 +256,7 @@
  * If this macro yields TRUE, the caller relays to eeh_check_failure()
  * which does further tests out of line.
  */
-#define EEH_POSSIBLE_ERROR(val, type)	((val) == (type)~0 && eeh_subsystem_enabled)
+#define EEH_POSSIBLE_ERROR(val, type)	((val) == (type)~0 && eeh_enabled())
 
 /*
  * Reads from a device which has been isolated by EEH will return
@@ -257,6 +267,13 @@
 
 #else /* !CONFIG_EEH */
 
+static inline bool eeh_enabled(void)
+{
+        return false;
+}
+
+static inline void eeh_set_enable(bool mode) { }
+
 static inline int eeh_init(void)
 {
 	return 0;
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index 86b0ac7..334459a 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -460,5 +460,116 @@
 
 	return r3;
 }
+
+#ifdef CONFIG_EPAPR_PARAVIRT
+static inline unsigned long epapr_hypercall(unsigned long *in,
+			    unsigned long *out,
+			    unsigned long nr)
+{
+	unsigned long register r0 asm("r0");
+	unsigned long register r3 asm("r3") = in[0];
+	unsigned long register r4 asm("r4") = in[1];
+	unsigned long register r5 asm("r5") = in[2];
+	unsigned long register r6 asm("r6") = in[3];
+	unsigned long register r7 asm("r7") = in[4];
+	unsigned long register r8 asm("r8") = in[5];
+	unsigned long register r9 asm("r9") = in[6];
+	unsigned long register r10 asm("r10") = in[7];
+	unsigned long register r11 asm("r11") = nr;
+	unsigned long register r12 asm("r12");
+
+	asm volatile("bl	epapr_hypercall_start"
+		     : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
+		       "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
+		       "=r"(r12)
+		     : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
+		       "r"(r9), "r"(r10), "r"(r11)
+		     : "memory", "cc", "xer", "ctr", "lr");
+
+	out[0] = r4;
+	out[1] = r5;
+	out[2] = r6;
+	out[3] = r7;
+	out[4] = r8;
+	out[5] = r9;
+	out[6] = r10;
+	out[7] = r11;
+
+	return r3;
+}
+#else
+static unsigned long epapr_hypercall(unsigned long *in,
+				   unsigned long *out,
+				   unsigned long nr)
+{
+	return EV_UNIMPLEMENTED;
+}
+#endif
+
+static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
+{
+	unsigned long in[8];
+	unsigned long out[8];
+	unsigned long r;
+
+	r = epapr_hypercall(in, out, nr);
+	*r2 = out[0];
+
+	return r;
+}
+
+static inline long epapr_hypercall0(unsigned int nr)
+{
+	unsigned long in[8];
+	unsigned long out[8];
+
+	return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
+{
+	unsigned long in[8];
+	unsigned long out[8];
+
+	in[0] = p1;
+	return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
+				    unsigned long p2)
+{
+	unsigned long in[8];
+	unsigned long out[8];
+
+	in[0] = p1;
+	in[1] = p2;
+	return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
+				    unsigned long p2, unsigned long p3)
+{
+	unsigned long in[8];
+	unsigned long out[8];
+
+	in[0] = p1;
+	in[1] = p2;
+	in[2] = p3;
+	return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
+				    unsigned long p2, unsigned long p3,
+				    unsigned long p4)
+{
+	unsigned long in[8];
+	unsigned long out[8];
+
+	in[0] = p1;
+	in[1] = p2;
+	in[2] = p3;
+	in[3] = p4;
+	return epapr_hypercall(in, out, nr);
+}
 #endif /* !__ASSEMBLY__ */
 #endif /* _EPAPR_HCALLS_H */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index d750336..623f297 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -127,7 +127,7 @@
 					    unsigned long addr, pte_t *ptep)
 {
 #ifdef CONFIG_PPC64
-	return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
+	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
 #else
 	return __pte(pte_update(ptep, ~0UL, 0));
 #endif
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index f7a8036..42632c7 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -77,6 +77,7 @@
 #ifdef CONFIG_IOMMU_API
 	struct iommu_group *it_group;
 #endif
+	void (*set_bypass)(struct iommu_table *tbl, bool enable);
 };
 
 /* Pure 2^n version of get_order */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 1503d8c..19eb74a 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -92,14 +92,17 @@
 #define BOOK3S_INTERRUPT_FP_UNAVAIL	0x800
 #define BOOK3S_INTERRUPT_DECREMENTER	0x900
 #define BOOK3S_INTERRUPT_HV_DECREMENTER	0x980
+#define BOOK3S_INTERRUPT_DOORBELL	0xa00
 #define BOOK3S_INTERRUPT_SYSCALL	0xc00
 #define BOOK3S_INTERRUPT_TRACE		0xd00
 #define BOOK3S_INTERRUPT_H_DATA_STORAGE	0xe00
 #define BOOK3S_INTERRUPT_H_INST_STORAGE	0xe20
 #define BOOK3S_INTERRUPT_H_EMUL_ASSIST	0xe40
+#define BOOK3S_INTERRUPT_H_DOORBELL	0xe80
 #define BOOK3S_INTERRUPT_PERFMON	0xf00
 #define BOOK3S_INTERRUPT_ALTIVEC	0xf20
 #define BOOK3S_INTERRUPT_VSX		0xf40
+#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL	0xf80
 
 #define BOOK3S_IRQPRIO_SYSTEM_RESET		0
 #define BOOK3S_IRQPRIO_DATA_SEGMENT		1
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bc23b1b..83851aa 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -186,9 +186,6 @@
 
 extern void kvmppc_entry_trampoline(void);
 extern void kvmppc_hv_entry_trampoline(void);
-extern void kvmppc_load_up_fpu(void);
-extern void kvmppc_load_up_altivec(void);
-extern void kvmppc_load_up_vsx(void);
 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
@@ -271,16 +268,25 @@
 	return vcpu->arch.pc;
 }
 
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
 {
-	ulong pc = kvmppc_get_pc(vcpu);
+	return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
+}
 
+static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
+{
 	/* Load the instruction manually if it failed to do so in the
 	 * exit path */
 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
 		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
 
-	return vcpu->arch.last_inst;
+	return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
+		vcpu->arch.last_inst;
+}
+
+static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+{
+	return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
 }
 
 /*
@@ -290,14 +296,7 @@
  */
 static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
 {
-	ulong pc = kvmppc_get_pc(vcpu) - 4;
-
-	/* Load the instruction manually if it failed to do so in the
-	 * exit path */
-	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
-		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
-
-	return vcpu->arch.last_inst;
+	return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
 }
 
 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 192917d..f3a91dc 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -88,6 +88,7 @@
 	u8 hwthread_req;
 	u8 hwthread_state;
 	u8 host_ipi;
+	u8 ptid;
 	struct kvm_vcpu *kvm_vcpu;
 	struct kvmppc_vcore *kvm_vcore;
 	unsigned long xics_phys;
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index dd8f615..80d46b5 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -63,6 +63,12 @@
 	return vcpu->arch.xer;
 }
 
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
+{
+	/* XXX Would need to check TLB entry */
+	return false;
+}
+
 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
 {
 	return vcpu->arch.last_inst;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 237d1d2..1eaea2d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -288,6 +288,7 @@
 	int n_woken;
 	int nap_count;
 	int napping_threads;
+	int first_vcpuid;
 	u16 pcpu;
 	u16 last_cpu;
 	u8 vcore_state;
@@ -298,10 +299,12 @@
 	u64 stolen_tb;
 	u64 preempt_tb;
 	struct kvm_vcpu *runner;
+	struct kvm *kvm;
 	u64 tb_offset;		/* guest timebase - host timebase */
 	ulong lpcr;
 	u32 arch_compat;
 	ulong pcr;
+	ulong dpdes;		/* doorbell state (POWER8) */
 };
 
 #define VCORE_ENTRY_COUNT(vc)	((vc)->entry_exit_count & 0xff)
@@ -410,8 +413,7 @@
 
 	ulong gpr[32];
 
-	u64 fpr[32];
-	u64 fpscr;
+	struct thread_fp_state fp;
 
 #ifdef CONFIG_SPE
 	ulong evr[32];
@@ -420,12 +422,7 @@
 	u64 acc;
 #endif
 #ifdef CONFIG_ALTIVEC
-	vector128 vr[32];
-	vector128 vscr;
-#endif
-
-#ifdef CONFIG_VSX
-	u64 vsr[64];
+	struct thread_vr_state vr;
 #endif
 
 #ifdef CONFIG_KVM_BOOKE_HV
@@ -452,6 +449,7 @@
 	ulong pc;
 	ulong ctr;
 	ulong lr;
+	ulong tar;
 
 	ulong xer;
 	u32 cr;
@@ -461,13 +459,30 @@
 	ulong guest_owned_ext;
 	ulong purr;
 	ulong spurr;
+	ulong ic;
+	ulong vtb;
 	ulong dscr;
 	ulong amr;
 	ulong uamor;
+	ulong iamr;
 	u32 ctrl;
+	u32 dabrx;
 	ulong dabr;
+	ulong dawr;
+	ulong dawrx;
+	ulong ciabr;
 	ulong cfar;
 	ulong ppr;
+	ulong pspb;
+	ulong fscr;
+	ulong ebbhr;
+	ulong ebbrr;
+	ulong bescr;
+	ulong csigr;
+	ulong tacr;
+	ulong tcscr;
+	ulong acop;
+	ulong wort;
 	ulong shadow_srr1;
 #endif
 	u32 vrsave; /* also USPRG0 */
@@ -502,10 +517,33 @@
 	u32 ccr1;
 	u32 dbsr;
 
-	u64 mmcr[3];
+	u64 mmcr[5];
 	u32 pmc[8];
+	u32 spmc[2];
 	u64 siar;
 	u64 sdar;
+	u64 sier;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	u64 tfhar;
+	u64 texasr;
+	u64 tfiar;
+
+	u32 cr_tm;
+	u64 lr_tm;
+	u64 ctr_tm;
+	u64 amr_tm;
+	u64 ppr_tm;
+	u64 dscr_tm;
+	u64 tar_tm;
+
+	ulong gpr_tm[32];
+
+	struct thread_fp_state fp_tm;
+
+	struct thread_vr_state vr_tm;
+	u32 vrsave_tm; /* also USPRG0 */
+
+#endif
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	struct mutex exit_timing_lock;
@@ -546,6 +584,7 @@
 #endif
 	gpa_t paddr_accessed;
 	gva_t vaddr_accessed;
+	pgd_t *pgdir;
 
 	u8 io_gpr; /* GPR used as IO source/target */
 	u8 mmio_is_bigendian;
@@ -603,7 +642,6 @@
 	struct list_head run_list;
 	struct task_struct *run_task;
 	struct kvm_run *kvm_run;
-	pgd_t *pgdir;
 
 	spinlock_t vpa_update_lock;
 	struct kvmppc_vpa vpa;
@@ -616,9 +654,12 @@
 	spinlock_t tbacct_lock;
 	u64 busy_stolen;
 	u64 busy_preempt;
+	unsigned long intr_msr;
 #endif
 };
 
+#define VCPU_FPR(vcpu, i)	(vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
+
 /* Values for vcpu->arch.state */
 #define KVMPPC_VCPU_NOTREADY		0
 #define KVMPPC_VCPU_RUNNABLE		1
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 2b11965..336a91a 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -39,10 +39,6 @@
 	return 1;
 }
 
-extern unsigned long kvm_hypercall(unsigned long *in,
-				   unsigned long *out,
-				   unsigned long nr);
-
 #else
 
 static inline int kvm_para_available(void)
@@ -50,82 +46,8 @@
 	return 0;
 }
 
-static unsigned long kvm_hypercall(unsigned long *in,
-				   unsigned long *out,
-				   unsigned long nr)
-{
-	return EV_UNIMPLEMENTED;
-}
-
 #endif
 
-static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2)
-{
-	unsigned long in[8];
-	unsigned long out[8];
-	unsigned long r;
-
-	r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
-	*r2 = out[0];
-
-	return r;
-}
-
-static inline long kvm_hypercall0(unsigned int nr)
-{
-	unsigned long in[8];
-	unsigned long out[8];
-
-	return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
-}
-
-static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
-{
-	unsigned long in[8];
-	unsigned long out[8];
-
-	in[0] = p1;
-	return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
-}
-
-static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
-				  unsigned long p2)
-{
-	unsigned long in[8];
-	unsigned long out[8];
-
-	in[0] = p1;
-	in[1] = p2;
-	return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
-}
-
-static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
-				  unsigned long p2, unsigned long p3)
-{
-	unsigned long in[8];
-	unsigned long out[8];
-
-	in[0] = p1;
-	in[1] = p2;
-	in[2] = p3;
-	return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
-}
-
-static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
-				  unsigned long p2, unsigned long p3,
-				  unsigned long p4)
-{
-	unsigned long in[8];
-	unsigned long out[8];
-
-	in[0] = p1;
-	in[1] = p2;
-	in[2] = p3;
-	in[3] = p4;
-	return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
-}
-
-
 static inline unsigned int kvm_arch_para_features(void)
 {
 	unsigned long r;
@@ -133,7 +55,7 @@
 	if (!kvm_para_available())
 		return 0;
 
-	if(kvm_hypercall0_1(KVM_HC_FEATURES, &r))
+	if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r))
 		return 0;
 
 	return r;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index c8317fb..fcd53f0 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -54,12 +54,13 @@
 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
                               unsigned int rt, unsigned int bytes,
-                              int is_bigendian);
+			      int is_default_endian);
 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
                                unsigned int rt, unsigned int bytes,
-                               int is_bigendian);
+			       int is_default_endian);
 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                               u64 val, unsigned int bytes, int is_bigendian);
+			       u64 val, unsigned int bytes,
+			       int is_default_endian);
 
 extern int kvmppc_emulate_instruction(struct kvm_run *run,
                                       struct kvm_vcpu *vcpu);
@@ -455,6 +456,12 @@
 	trace_hardirqs_on();
 
 #ifdef CONFIG_PPC64
+	/*
+	 * To avoid races, the caller must have gone directly from having
+	 * interrupts fully-enabled to hard-disabled.
+	 */
+	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
+
 	/* Only need to enable IRQs by hard enabling them after this */
 	local_paca->irq_happened = 0;
 	local_paca->soft_enabled = 1;
diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h
index 887d3d6..4a69cd1 100644
--- a/arch/powerpc/include/asm/mpc5121.h
+++ b/arch/powerpc/include/asm/mpc5121.h
@@ -37,7 +37,12 @@
 	u32	cccr;	/* CFM Clock Control Register */
 	u32	dccr;	/* DIU Clock Control Register */
 	u32	mscan_ccr[4];	/* MSCAN Clock Control Registers */
-	u8	res[0x98]; /* Reserved */
+	u32	out_ccr[4];	/* OUT CLK Configure Registers */
+	u32	rsv0[2];	/* Reserved */
+	u32	scfr3;		/* System Clock Frequency Register 3 */
+	u32	rsv1[3];	/* Reserved */
+	u32	spll_lock_cnt;	/* System PLL Lock Counter */
+	u8	res[0x6c];	/* Reserved */
 };
 
 /*
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index d27960c..eb92610 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -195,6 +195,7 @@
 static inline unsigned long pte_update(struct mm_struct *mm,
 				       unsigned long addr,
 				       pte_t *ptep, unsigned long clr,
+				       unsigned long set,
 				       int huge)
 {
 #ifdef PTE_ATOMIC_UPDATES
@@ -205,14 +206,15 @@
 	andi.	%1,%0,%6\n\
 	bne-	1b \n\
 	andc	%1,%0,%4 \n\
+	or	%1,%1,%7\n\
 	stdcx.	%1,0,%3 \n\
 	bne-	1b"
 	: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
-	: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
+	: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
 	: "cc" );
 #else
 	unsigned long old = pte_val(*ptep);
-	*ptep = __pte(old & ~clr);
+	*ptep = __pte((old & ~clr) | set);
 #endif
 	/* huge pages use the old page table lock */
 	if (!huge)
@@ -231,9 +233,9 @@
 {
 	unsigned long old;
 
-       	if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+	if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
 		return 0;
-	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
+	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
 	return (old & _PAGE_ACCESSED) != 0;
 }
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -252,7 +254,7 @@
 	if ((pte_val(*ptep) & _PAGE_RW) == 0)
 		return;
 
-	pte_update(mm, addr, ptep, _PAGE_RW, 0);
+	pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
 }
 
 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
@@ -261,7 +263,7 @@
 	if ((pte_val(*ptep) & _PAGE_RW) == 0)
 		return;
 
-	pte_update(mm, addr, ptep, _PAGE_RW, 1);
+	pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
 }
 
 /*
@@ -284,14 +286,14 @@
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 				       unsigned long addr, pte_t *ptep)
 {
-	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
+	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
 	return __pte(old);
 }
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 			     pte_t * ptep)
 {
-	pte_update(mm, addr, ptep, ~0UL, 0);
+	pte_update(mm, addr, ptep, ~0UL, 0, 0);
 }
 
 
@@ -506,7 +508,9 @@
 
 extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
 					 unsigned long addr,
-					 pmd_t *pmdp, unsigned long clr);
+					 pmd_t *pmdp,
+					 unsigned long clr,
+					 unsigned long set);
 
 static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
 					      unsigned long addr, pmd_t *pmdp)
@@ -515,7 +519,7 @@
 
 	if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
 		return 0;
-	old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED);
+	old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
 	return ((old & _PAGE_ACCESSED) != 0);
 }
 
@@ -542,7 +546,7 @@
 	if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
 		return;
 
-	pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW);
+	pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
 }
 
 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
@@ -560,9 +564,9 @@
 			    pmd_t *pmdp);
 
 #define pmd_move_must_withdraw pmd_move_must_withdraw
-typedef struct spinlock spinlock_t;
-static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
-					 spinlock_t *old_pmd_ptl)
+struct spinlock;
+static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+					 struct spinlock *old_pmd_ptl)
 {
 	/*
 	 * Archs like ppc64 use pgtable to store per pmd
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index b999ca3..3ebb188 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -75,12 +75,34 @@
 	return pte;
 }
 
+#define ptep_set_numa ptep_set_numa
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+				 pte_t *ptep)
+{
+	if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
+		VM_BUG_ON(1);
+
+	pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
+	return;
+}
+
 #define pmd_numa pmd_numa
 static inline int pmd_numa(pmd_t pmd)
 {
 	return pte_numa(pmd_pte(pmd));
 }
 
+#define pmdp_set_numa pmdp_set_numa
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+				 pmd_t *pmdp)
+{
+	if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
+		VM_BUG_ON(1);
+
+	pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
+	return;
+}
+
 #define pmd_mknonnuma pmd_mknonnuma
 static inline pmd_t pmd_mknonnuma(pmd_t pmd)
 {
@@ -287,6 +309,27 @@
 #endif
 pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
 				 unsigned *shift);
+
+static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
+				     unsigned long *pte_sizep)
+{
+	pte_t *ptep;
+	unsigned long ps = *pte_sizep;
+	unsigned int shift;
+
+	ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
+	if (!ptep)
+		return NULL;
+	if (shift)
+		*pte_sizep = 1ul << shift;
+	else
+		*pte_sizep = PAGE_SIZE;
+
+	if (ps > *pte_sizep)
+		return NULL;
+
+	return ptep;
+}
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 8ca20ac..b62de43 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -450,13 +450,6 @@
 
 extern int powersave_nap;	/* set if nap mode can be used in idle loop */
 extern void power7_nap(void);
-
-#ifdef CONFIG_PSERIES_IDLE
-extern void update_smt_snooze_delay(int cpu, int residency);
-#else
-static inline void update_smt_snooze_delay(int cpu, int residency) {}
-#endif
-
 extern void flush_instruction_cache(void);
 extern void hard_reset_now(void);
 extern void poweroff_now(void);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 62b114e..90c06ec 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -223,17 +223,26 @@
 #define   CTRL_TE	0x00c00000	/* thread enable */
 #define   CTRL_RUNLATCH	0x1
 #define SPRN_DAWR	0xB4
+#define SPRN_CIABR	0xBB
+#define   CIABR_PRIV		0x3
+#define   CIABR_PRIV_USER	1
+#define   CIABR_PRIV_SUPER	2
+#define   CIABR_PRIV_HYPER	3
 #define SPRN_DAWRX	0xBC
-#define   DAWRX_USER	(1UL << 0)
-#define   DAWRX_KERNEL	(1UL << 1)
-#define   DAWRX_HYP	(1UL << 2)
+#define   DAWRX_USER	__MASK(0)
+#define   DAWRX_KERNEL	__MASK(1)
+#define   DAWRX_HYP	__MASK(2)
+#define   DAWRX_WTI	__MASK(3)
+#define   DAWRX_WT	__MASK(4)
+#define   DAWRX_DR	__MASK(5)
+#define   DAWRX_DW	__MASK(6)
 #define SPRN_DABR	0x3F5	/* Data Address Breakpoint Register */
 #define SPRN_DABR2	0x13D	/* e300 */
 #define SPRN_DABRX	0x3F7	/* Data Address Breakpoint Register Extension */
-#define   DABRX_USER	(1UL << 0)
-#define   DABRX_KERNEL	(1UL << 1)
-#define   DABRX_HYP	(1UL << 2)
-#define   DABRX_BTI	(1UL << 3)
+#define   DABRX_USER	__MASK(0)
+#define   DABRX_KERNEL	__MASK(1)
+#define   DABRX_HYP	__MASK(2)
+#define   DABRX_BTI	__MASK(3)
 #define   DABRX_ALL     (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
 #define SPRN_DAR	0x013	/* Data Address Register */
 #define SPRN_DBCR	0x136	/* e300 Data Breakpoint Control Reg */
@@ -260,6 +269,8 @@
 #define SPRN_HRMOR	0x139	/* Real mode offset register */
 #define SPRN_HSRR0	0x13A	/* Hypervisor Save/Restore 0 */
 #define SPRN_HSRR1	0x13B	/* Hypervisor Save/Restore 1 */
+#define SPRN_IC		0x350	/* Virtual Instruction Count */
+#define SPRN_VTB	0x351	/* Virtual Time Base */
 /* HFSCR and FSCR bit numbers are the same */
 #define FSCR_TAR_LG	8	/* Enable Target Address Register */
 #define FSCR_EBB_LG	7	/* Enable Event Based Branching */
@@ -298,9 +309,13 @@
 #define   LPCR_RMLS    0x1C000000      /* impl dependent rmo limit sel */
 #define	  LPCR_RMLS_SH	(63-37)
 #define   LPCR_ILE     0x02000000      /* !HV irqs set MSR:LE */
+#define   LPCR_AIL	0x01800000	/* Alternate interrupt location */
 #define   LPCR_AIL_0	0x00000000	/* MMU off exception offset 0x0 */
 #define   LPCR_AIL_3	0x01800000	/* MMU on exception offset 0xc00...4xxx */
-#define   LPCR_PECE	0x00007000	/* powersave exit cause enable */
+#define   LPCR_ONL	0x00040000	/* online - PURR/SPURR count */
+#define   LPCR_PECE	0x0001f000	/* powersave exit cause enable */
+#define     LPCR_PECEDP	0x00010000	/* directed priv dbells cause exit */
+#define     LPCR_PECEDH	0x00008000	/* directed hyp dbells cause exit */
 #define     LPCR_PECE0	0x00004000	/* ext. exceptions can cause exit */
 #define     LPCR_PECE1	0x00002000	/* decrementer can cause exit */
 #define     LPCR_PECE2	0x00001000	/* machine check etc can cause exit */
@@ -322,6 +337,8 @@
 #define SPRN_PCR	0x152	/* Processor compatibility register */
 #define   PCR_VEC_DIS	(1ul << (63-0))	/* Vec. disable (bit NA since POWER8) */
 #define   PCR_VSX_DIS	(1ul << (63-1))	/* VSX disable (bit NA since POWER8) */
+#define   PCR_TM_DIS	(1ul << (63-2))	/* Trans. memory disable (POWER8) */
+#define   PCR_ARCH_206	0x4		/* Architecture 2.06 */
 #define   PCR_ARCH_205	0x2		/* Architecture 2.05 */
 #define	SPRN_HEIR	0x153	/* Hypervisor Emulated Instruction Register */
 #define SPRN_TLBINDEXR	0x154	/* P7 TLB control register */
@@ -368,6 +385,8 @@
 #define DER_EBRKE	0x00000002	/* External Breakpoint Interrupt */
 #define DER_DPIE	0x00000001	/* Dev. Port Nonmaskable Request */
 #define SPRN_DMISS	0x3D0		/* Data TLB Miss Register */
+#define SPRN_DHDES	0x0B1		/* Directed Hyp. Doorbell Exc. State */
+#define SPRN_DPDES	0x0B0		/* Directed Priv. Doorbell Exc. State */
 #define SPRN_EAR	0x11A		/* External Address Register */
 #define SPRN_HASH1	0x3D2		/* Primary Hash Address Register */
 #define SPRN_HASH2	0x3D3		/* Secondary Hash Address Resgister */
@@ -427,6 +446,7 @@
 #define SPRN_IABR	0x3F2	/* Instruction Address Breakpoint Register */
 #define SPRN_IABR2	0x3FA		/* 83xx */
 #define SPRN_IBCR	0x135		/* 83xx Insn Breakpoint Control Reg */
+#define SPRN_IAMR	0x03D		/* Instr. Authority Mask Reg */
 #define SPRN_HID4	0x3F4		/* 970 HID4 */
 #define  HID4_LPES0	 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
 #define	 HID4_RMLS2_SH	 (63 - 2)	/* Real mode limit bottom 2 bits */
@@ -541,6 +561,7 @@
 #define SPRN_PIR	0x3FF	/* Processor Identification Register */
 #endif
 #define SPRN_TIR	0x1BE	/* Thread Identification Register */
+#define SPRN_PSPB	0x09F	/* Problem State Priority Boost reg */
 #define SPRN_PTEHI	0x3D5	/* 981 7450 PTE HI word (S/W TLB load) */
 #define SPRN_PTELO	0x3D6	/* 982 7450 PTE LO word (S/W TLB load) */
 #define SPRN_PURR	0x135	/* Processor Utilization of Resources Reg */
@@ -682,6 +703,7 @@
 #define SPRN_EBBHR	804	/* Event based branch handler register */
 #define SPRN_EBBRR	805	/* Event based branch return register */
 #define SPRN_BESCR	806	/* Branch event status and control register */
+#define SPRN_WORT	895	/* Workload optimization register - thread */
 
 #define SPRN_PMC1	787
 #define SPRN_PMC2	788
@@ -698,6 +720,11 @@
 #define   SIER_SIHV		0x1000000	/* Sampled MSR_HV */
 #define   SIER_SIAR_VALID	0x0400000	/* SIAR contents valid */
 #define   SIER_SDAR_VALID	0x0200000	/* SDAR contents valid */
+#define SPRN_TACR	888
+#define SPRN_TCSCR	889
+#define SPRN_CSIGR	890
+#define SPRN_SPMC1	892
+#define SPRN_SPMC2	893
 
 /* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
 #define MMCR0_USER_MASK	(MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 4ee06fe..d0e784e 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,6 +8,7 @@
 
 #ifdef __powerpc64__
 
+extern char __start_interrupts[];
 extern char __end_interrupts[];
 
 extern char __prom_init_toc_start[];
@@ -21,6 +22,17 @@
 	return 0;
 }
 
+static inline int overlaps_interrupt_vector_text(unsigned long start,
+							unsigned long end)
+{
+	unsigned long real_start, real_end;
+	real_start = __start_interrupts - _stext;
+	real_end = __end_interrupts - _stext;
+
+	return start < (unsigned long)__va(real_end) &&
+		(unsigned long)__va(real_start) < end;
+}
+
 static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
 {
 	return start < (unsigned long)__init_end &&
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index aace905..0e83e7d 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -25,10 +25,8 @@
 static inline void save_tar(struct thread_struct *prev) {}
 #endif
 
-extern void load_up_fpu(void);
 extern void enable_kernel_fp(void);
 extern void enable_kernel_altivec(void);
-extern void load_up_altivec(struct task_struct *);
 extern int emulate_altivec(struct pt_regs *);
 extern void __giveup_vsx(struct task_struct *);
 extern void giveup_vsx(struct task_struct *);
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 43523fe..3ddf702 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -359,3 +359,5 @@
 COMPAT_SYS(process_vm_writev)
 SYSCALL(finit_module)
 SYSCALL(ni_syscall) /* sys_kcmp */
+SYSCALL_SPU(sched_setattr)
+SYSCALL_SPU(sched_getattr)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 3ca819f..4494f02 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls		355
+#define __NR_syscalls		357
 
 #define __NR__exit __NR_exit
 #define NR_syscalls	__NR_syscalls
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index 0d9cecd..c53f5f6 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -4,11 +4,11 @@
 #ifdef __KERNEL__
 
 /* Default link addresses for the vDSOs */
-#define VDSO32_LBASE	0x100000
-#define VDSO64_LBASE	0x100000
+#define VDSO32_LBASE	0x0
+#define VDSO64_LBASE	0x0
 
 /* Default map addresses for 32bit vDSO */
-#define VDSO32_MBASE	VDSO32_LBASE
+#define VDSO32_MBASE	0x100000
 
 #define VDSO_VERSION_STRING	LINUX_2.6.15
 
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 6836ec7..a6665be 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -545,6 +545,7 @@
 #define KVM_REG_PPC_TCSCR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
 #define KVM_REG_PPC_PID		(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
 #define KVM_REG_PPC_ACOP	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
+#define KVM_REG_PPC_WORT	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4)
 
 #define KVM_REG_PPC_VRSAVE	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
 #define KVM_REG_PPC_LPCR	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
@@ -553,6 +554,8 @@
 /* Architecture compatibility level */
 #define KVM_REG_PPC_ARCH_COMPAT	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
 
+#define KVM_REG_PPC_DABRX	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
+
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
  */
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
index 85059a0..5d836b7 100644
--- a/arch/powerpc/include/uapi/asm/tm.h
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -6,6 +6,8 @@
  * the failure is persistent.  PAPR saves 0xff-0xe0 for the hypervisor.
  */
 #define TM_CAUSE_PERSISTENT	0x01
+#define TM_CAUSE_KVM_RESCHED	0xe0  /* From PAPR */
+#define TM_CAUSE_KVM_FAC_UNAV	0xe2  /* From PAPR */
 #define TM_CAUSE_RESCHED	0xde
 #define TM_CAUSE_TLBI		0xdc
 #define TM_CAUSE_FAC_UNAV	0xda
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 74cb4d7..881bf2e 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -377,6 +377,7 @@
 #define __NR_process_vm_writev	352
 #define __NR_finit_module	353
 #define __NR_kcmp		354
-
+#define __NR_sched_setattr	355
+#define __NR_sched_getattr	356
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 904d713..fcc9a89 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -48,7 +48,6 @@
 obj-$(CONFIG_PPC_970_NAP)	+= idle_power4.o
 obj-$(CONFIG_PPC_P7_NAP)	+= idle_power7.o
 obj-$(CONFIG_PPC_OF)		+= of_platform.o prom_parse.o
-obj-$(CONFIG_PPC_CLOCK)		+= clock.o
 procfs-y			:= proc_powerpc.o
 obj-$(CONFIG_PROC_FS)		+= $(procfs-y)
 rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI)	:= rtas_pci.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8d1d94d..b5aacf7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -438,18 +438,14 @@
 	DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
 	DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
 	DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
-	DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
-	DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
+	DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
 #ifdef CONFIG_ALTIVEC
-	DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
-	DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
-#endif
-#ifdef CONFIG_VSX
-	DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
+	DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
 #endif
 	DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
 	DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
 	DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+	DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar));
 	DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
 	DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -497,16 +493,24 @@
 	DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
 	DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
 	DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+	DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
 #endif
 #ifdef CONFIG_PPC_BOOK3S
 	DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
 	DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
 	DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
+	DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
+	DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb));
 	DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
 	DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
 	DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
+	DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr));
 	DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
 	DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
+	DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx));
+	DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr));
+	DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx));
+	DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr));
 	DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
 	DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
 	DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
@@ -515,8 +519,10 @@
 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+	DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc));
 	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
 	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
+	DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier));
 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
@@ -524,20 +530,47 @@
 	DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
 	DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
 	DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
-	DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
 	DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
 	DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
+	DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
+	DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
+	DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
+	DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
+	DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr));
+	DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr));
+	DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr));
+	DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
+	DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
+	DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
 	DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
 	DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
 	DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
 	DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
 	DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
+	DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
 	DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
 	DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
 	DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
+	DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes));
 	DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
 	DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
 	DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
+	DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
+	DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
+	DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm));
+	DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr));
+	DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
+	DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
+	DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
+	DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
+	DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
+	DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
+	DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm));
+	DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm));
+	DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm));
+#endif
 
 #ifdef CONFIG_PPC_BOOK3S_64
 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -602,6 +635,7 @@
 	HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
 	HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
 	HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
+	HSTATE_FIELD(HSTATE_PTID, ptid);
 	HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
 	HSTATE_FIELD(HSTATE_PMC, host_pmc);
 	HSTATE_FIELD(HSTATE_PURR, host_purr);
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index abfa011..2912b87 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -793,6 +793,9 @@
 {
 	remove_index_dirs(cache_dir);
 
+	/* Remove cache dir from sysfs */
+	kobject_del(cache_dir->kobj);
+
 	kobject_put(cache_dir->kobj);
 
 	kfree(cache_dir);
diff --git a/arch/powerpc/kernel/clock.c b/arch/powerpc/kernel/clock.c
deleted file mode 100644
index a764b47..0000000
--- a/arch/powerpc/kernel/clock.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Dummy clk implementations for powerpc.
- * These need to be overridden in platform code.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/export.h>
-#include <asm/clk_interface.h>
-
-struct clk_interface clk_functions;
-
-struct clk *clk_get(struct device *dev, const char *id)
-{
-	if (clk_functions.clk_get)
-		return clk_functions.clk_get(dev, id);
-	return ERR_PTR(-ENOSYS);
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
-	if (clk_functions.clk_put)
-		clk_functions.clk_put(clk);
-}
-EXPORT_SYMBOL(clk_put);
-
-int clk_enable(struct clk *clk)
-{
-	if (clk_functions.clk_enable)
-		return clk_functions.clk_enable(clk);
-	return -ENOSYS;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-	if (clk_functions.clk_disable)
-		clk_functions.clk_disable(clk);
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
-	if (clk_functions.clk_get_rate)
-		return clk_functions.clk_get_rate(clk);
-	return 0;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
-	if (clk_functions.clk_round_rate)
-		return clk_functions.clk_round_rate(clk, rate);
-	return -ENOSYS;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
-	if (clk_functions.clk_set_rate)
-		return clk_functions.clk_set_rate(clk, rate);
-	return -ENOSYS;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-struct clk *clk_get_parent(struct clk *clk)
-{
-	if (clk_functions.clk_get_parent)
-		return clk_functions.clk_get_parent(clk);
-	return ERR_PTR(-ENOSYS);
-}
-EXPORT_SYMBOL(clk_get_parent);
-
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
-	if (clk_functions.clk_set_parent)
-		return clk_functions.clk_set_parent(clk, parent);
-	return -ENOSYS;
-}
-EXPORT_SYMBOL(clk_set_parent);
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 8032b97..ee78f6e 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -191,12 +191,10 @@
 
 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
 
-int dma_set_mask(struct device *dev, u64 dma_mask)
+int __dma_set_mask(struct device *dev, u64 dma_mask)
 {
 	struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
-	if (ppc_md.dma_set_mask)
-		return ppc_md.dma_set_mask(dev, dma_mask);
 	if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
 		return dma_ops->set_dma_mask(dev, dma_mask);
 	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
@@ -204,6 +202,12 @@
 	*dev->dma_mask = dma_mask;
 	return 0;
 }
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (ppc_md.dma_set_mask)
+		return ppc_md.dma_set_mask(dev, dma_mask);
+	return __dma_set_mask(dev, dma_mask);
+}
 EXPORT_SYMBOL(dma_set_mask);
 
 u64 dma_get_required_mask(struct device *dev)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 148db72..e7b76a6 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -28,6 +28,7 @@
 #include <linux/pci.h>
 #include <linux/proc_fs.h>
 #include <linux/rbtree.h>
+#include <linux/reboot.h>
 #include <linux/seq_file.h>
 #include <linux/spinlock.h>
 #include <linux/export.h>
@@ -89,7 +90,7 @@
 /* Platform dependent EEH operations */
 struct eeh_ops *eeh_ops = NULL;
 
-int eeh_subsystem_enabled;
+bool eeh_subsystem_enabled = false;
 EXPORT_SYMBOL(eeh_subsystem_enabled);
 
 /*
@@ -364,7 +365,7 @@
 
 	eeh_stats.total_mmio_ffs++;
 
-	if (!eeh_subsystem_enabled)
+	if (!eeh_enabled())
 		return 0;
 
 	if (!edev) {
@@ -747,6 +748,17 @@
 	return -EEXIST;
 }
 
+static int eeh_reboot_notifier(struct notifier_block *nb,
+			       unsigned long action, void *unused)
+{
+	eeh_set_enable(false);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block eeh_reboot_nb = {
+	.notifier_call = eeh_reboot_notifier,
+};
+
 /**
  * eeh_init - EEH initialization
  *
@@ -778,6 +790,14 @@
 	if (machine_is(powernv) && cnt++ <= 0)
 		return ret;
 
+	/* Register reboot notifier */
+	ret = register_reboot_notifier(&eeh_reboot_nb);
+	if (ret) {
+		pr_warn("%s: Failed to register notifier (%d)\n",
+			__func__, ret);
+		return ret;
+	}
+
 	/* call platform initialization function */
 	if (!eeh_ops) {
 		pr_warning("%s: Platform EEH operation not found\n",
@@ -822,7 +842,7 @@
 			return ret;
 	}
 
-	if (eeh_subsystem_enabled)
+	if (eeh_enabled())
 		pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
 	else
 		pr_warning("EEH: No capable adapters found\n");
@@ -897,7 +917,7 @@
 	struct device_node *dn;
 	struct eeh_dev *edev;
 
-	if (!dev || !eeh_subsystem_enabled)
+	if (!dev || !eeh_enabled())
 		return;
 
 	pr_debug("EEH: Adding device %s\n", pci_name(dev));
@@ -1005,7 +1025,7 @@
 {
 	struct eeh_dev *edev;
 
-	if (!dev || !eeh_subsystem_enabled)
+	if (!dev || !eeh_enabled())
 		return;
 	edev = pci_dev_to_eeh_dev(dev);
 
@@ -1045,7 +1065,7 @@
 
 static int proc_eeh_show(struct seq_file *m, void *v)
 {
-	if (0 == eeh_subsystem_enabled) {
+	if (!eeh_enabled()) {
 		seq_printf(m, "EEH Subsystem is globally disabled\n");
 		seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
 	} else {
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 7bb30dc..fdc679d 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -362,9 +362,13 @@
 	 */
 	if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
 		return NULL;
+
 	driver = eeh_pcid_get(dev);
-	if (driver && driver->err_handler)
-		return NULL;
+	if (driver) {
+		eeh_pcid_put(dev);
+		if (driver->err_handler)
+			return NULL;
+	}
 
 	/* Remove it from PCI subsystem */
 	pr_debug("EEH: Removing %s without EEH sensitive driver\n",
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d773dd4..88e3ec6 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1088,6 +1088,14 @@
 	memset(tbl->it_map, 0xff, sz);
 	iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
 
+	/*
+	 * Disable iommu bypass, otherwise the user can DMA to all of
+	 * our physical memory via the bypass window instead of just
+	 * the pages that has been explicitly mapped into the iommu
+	 */
+	if (tbl->set_bypass)
+		tbl->set_bypass(tbl, false);
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(iommu_take_ownership);
@@ -1102,6 +1110,10 @@
 	/* Restore bit#0 set by iommu_init_table() */
 	if (tbl->it_offset == 0)
 		set_bit(0, tbl->it_map);
+
+	/* The kernel owns the device now, we can restore the iommu bypass */
+	if (tbl->set_bypass)
+		tbl->set_bypass(tbl, true);
 }
 EXPORT_SYMBOL_GPL(iommu_release_ownership);
 
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9729b23..1d0848b 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -559,8 +559,13 @@
 #ifdef CONFIG_PPC64
 		cpu_nr = i;
 #else
+#ifdef CONFIG_SMP
 		cpu_nr = get_hard_smp_processor_id(i);
+#else
+		cpu_nr = 0;
 #endif
+#endif
+
 		memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
 		tp = critirq_ctx[cpu_nr];
 		tp->cpu = cpu_nr;
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index db28032..6a01752 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -413,13 +413,13 @@
 {
 	u32 *features = data;
 
-	ulong in[8];
+	ulong in[8] = {0};
 	ulong out[8];
 
 	in[0] = KVM_MAGIC_PAGE;
 	in[1] = KVM_MAGIC_PAGE;
 
-	kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
+	epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
 
 	*features = out[0];
 }
@@ -711,43 +711,6 @@
 			 kvm_patching_worked ? "worked" : "failed");
 }
 
-unsigned long kvm_hypercall(unsigned long *in,
-			    unsigned long *out,
-			    unsigned long nr)
-{
-	unsigned long register r0 asm("r0");
-	unsigned long register r3 asm("r3") = in[0];
-	unsigned long register r4 asm("r4") = in[1];
-	unsigned long register r5 asm("r5") = in[2];
-	unsigned long register r6 asm("r6") = in[3];
-	unsigned long register r7 asm("r7") = in[4];
-	unsigned long register r8 asm("r8") = in[5];
-	unsigned long register r9 asm("r9") = in[6];
-	unsigned long register r10 asm("r10") = in[7];
-	unsigned long register r11 asm("r11") = nr;
-	unsigned long register r12 asm("r12");
-
-	asm volatile("bl	epapr_hypercall_start"
-		     : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
-		       "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
-		       "=r"(r12)
-		     : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
-		       "r"(r9), "r"(r10), "r"(r11)
-		     : "memory", "cc", "xer", "ctr", "lr");
-
-	out[0] = r4;
-	out[1] = r5;
-	out[2] = r6;
-	out[3] = r7;
-	out[4] = r8;
-	out[5] = r9;
-	out[6] = r10;
-	out[7] = r11;
-
-	return r3;
-}
-EXPORT_SYMBOL_GPL(kvm_hypercall);
-
 static __init void kvm_free_tmp(void)
 {
 	free_reserved_area(&kvm_tmp[kvm_tmp_index],
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 75d4f73..015ae55 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -196,7 +196,9 @@
 
 /* Values we need to export to the second kernel via the device tree. */
 static phys_addr_t kernel_end;
+static phys_addr_t crashk_base;
 static phys_addr_t crashk_size;
+static unsigned long long mem_limit;
 
 static struct property kernel_end_prop = {
 	.name = "linux,kernel-end",
@@ -207,7 +209,7 @@
 static struct property crashk_base_prop = {
 	.name = "linux,crashkernel-base",
 	.length = sizeof(phys_addr_t),
-	.value = &crashk_res.start,
+	.value = &crashk_base
 };
 
 static struct property crashk_size_prop = {
@@ -219,9 +221,11 @@
 static struct property memory_limit_prop = {
 	.name = "linux,memory-limit",
 	.length = sizeof(unsigned long long),
-	.value = &memory_limit,
+	.value = &mem_limit,
 };
 
+#define cpu_to_be_ulong	__PASTE(cpu_to_be, BITS_PER_LONG)
+
 static void __init export_crashk_values(struct device_node *node)
 {
 	struct property *prop;
@@ -237,8 +241,9 @@
 		of_remove_property(node, prop);
 
 	if (crashk_res.start != 0) {
+		crashk_base = cpu_to_be_ulong(crashk_res.start),
 		of_add_property(node, &crashk_base_prop);
-		crashk_size = resource_size(&crashk_res);
+		crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
 		of_add_property(node, &crashk_size_prop);
 	}
 
@@ -246,6 +251,7 @@
 	 * memory_limit is required by the kexec-tools to limit the
 	 * crash regions to the actual memory used.
 	 */
+	mem_limit = cpu_to_be_ulong(memory_limit);
 	of_update_property(node, &memory_limit_prop);
 }
 
@@ -264,7 +270,7 @@
 		of_remove_property(node, prop);
 
 	/* information needed by userspace when using default_machine_kexec */
-	kernel_end = __pa(_end);
+	kernel_end = cpu_to_be_ulong(__pa(_end));
 	of_add_property(node, &kernel_end_prop);
 
 	export_crashk_values(node);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index be4e6d6..59d229a 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -369,6 +369,7 @@
 
 /* Values we need to export to the second kernel via the device tree. */
 static unsigned long htab_base;
+static unsigned long htab_size;
 
 static struct property htab_base_prop = {
 	.name = "linux,htab-base",
@@ -379,7 +380,7 @@
 static struct property htab_size_prop = {
 	.name = "linux,htab-size",
 	.length = sizeof(unsigned long),
-	.value = &htab_size_bytes,
+	.value = &htab_size,
 };
 
 static int __init export_htab_values(void)
@@ -403,8 +404,9 @@
 	if (prop)
 		of_remove_property(node, prop);
 
-	htab_base = __pa(htab_address);
+	htab_base = cpu_to_be64(__pa(htab_address));
 	of_add_property(node, &htab_base_prop);
+	htab_size = cpu_to_be64(htab_size_bytes);
 	of_add_property(node, &htab_size_prop);
 
 	of_node_put(node);
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 879f096..7c6bb4b 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -57,11 +57,14 @@
 	mtlr	r0
 	blr
 
+/*
+ * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
+ */
 _GLOBAL(call_do_irq)
 	mflr	r0
 	stw	r0,4(r1)
 	lwz	r10,THREAD+KSP_LIMIT(r2)
-	addi	r11,r3,THREAD_INFO_GAP
+	addi	r11,r4,THREAD_INFO_GAP
 	stwu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
 	mr	r1,r4
 	stw	r10,8(r1)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 64b7a6e..8d4c247f1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -811,7 +811,7 @@
  * schedule DABR
  */
 #ifndef CONFIG_HAVE_HW_BREAKPOINT
-	if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
+	if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
 		set_breakpoint(&new->thread.hw_brk);
 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
 #endif
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index b47a0e1..1482327 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -69,8 +69,8 @@
 	 * R_PPC64_RELATIVE ones.
 	 */
 	mtctr	r8
-5:	lwz	r0,12(9)	/* ELF64_R_TYPE(reloc->r_info) */
-	cmpwi	r0,R_PPC64_RELATIVE
+5:	ld	r0,8(9)		/* ELF64_R_TYPE(reloc->r_info) */
+	cmpdi	r0,R_PPC64_RELATIVE
 	bne	6f
 	ld	r6,0(r9)	/* reloc->r_offset */
 	ld	r0,16(r9)	/* reloc->r_addend */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 2b0da27..04cc4fc 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -247,7 +247,12 @@
 	/* interrupt stacks must be in lowmem, we get that for free on ppc32
 	 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
 	for_each_possible_cpu(i) {
+#ifdef CONFIG_SMP
 		hw_cpu = get_hard_smp_processor_id(i);
+#else
+		hw_cpu = 0;
+#endif
+
 		critirq_ctx[hw_cpu] = (struct thread_info *)
 			__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
 #ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 6ce69e6..a67e00a 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1022,29 +1022,24 @@
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 	tm_frame = &rt_sf->uc_transact.uc_mcontext;
 	if (MSR_TM_ACTIVE(regs->msr)) {
+		if (__put_user((unsigned long)&rt_sf->uc_transact,
+			       &rt_sf->uc.uc_link) ||
+		    __put_user((unsigned long)tm_frame,
+			       &rt_sf->uc_transact.uc_regs))
+			goto badframe;
 		if (save_tm_user_regs(regs, frame, tm_frame, sigret))
 			goto badframe;
 	}
 	else
 #endif
 	{
+		if (__put_user(0, &rt_sf->uc.uc_link))
+			goto badframe;
 		if (save_user_regs(regs, frame, tm_frame, sigret, 1))
 			goto badframe;
 	}
 	regs->link = tramp;
 
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-	if (MSR_TM_ACTIVE(regs->msr)) {
-		if (__put_user((unsigned long)&rt_sf->uc_transact,
-			       &rt_sf->uc.uc_link)
-		    || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs))
-			goto badframe;
-	}
-	else
-#endif
-		if (__put_user(0, &rt_sf->uc.uc_link))
-			goto badframe;
-
 	current->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
 
 	/* create a stack frame for the caller of the handler */
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index d4a43e6..97e1dc9 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -51,8 +51,6 @@
 		return -EINVAL;
 
 	per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
-	update_smt_snooze_delay(cpu->dev.id, snooze);
-
 	return count;
 }
 
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
index 79683d0..6ac107a 100644
--- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
@@ -6,7 +6,7 @@
 	.globl vdso32_start, vdso32_end
 	.balign PAGE_SIZE
 vdso32_start:
-	.incbin "arch/powerpc/kernel/vdso32/vdso32.so"
+	.incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
 	.balign PAGE_SIZE
 vdso32_end:
 
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
index 8df9e24..df60fca 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
@@ -6,7 +6,7 @@
 	.globl vdso64_start, vdso64_end
 	.balign PAGE_SIZE
 vdso64_start:
-	.incbin "arch/powerpc/kernel/vdso64/vdso64.so"
+	.incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
 	.balign PAGE_SIZE
 vdso64_end:
 
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 93221e8..9cb4b0a 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -21,6 +21,8 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/export.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
 
 #include <asm/reg.h>
 #include <asm/cputable.h>
@@ -231,3 +233,5 @@
 
 module_init(kvmppc_44x_init);
 module_exit(kvmppc_44x_exit);
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 8912608..94e597e 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -18,6 +18,8 @@
 #include <linux/err.h>
 #include <linux/export.h>
 #include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
 
 #include <asm/reg.h>
 #include <asm/cputable.h>
@@ -575,10 +577,10 @@
 			break;
 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 			i = reg->id - KVM_REG_PPC_FPR0;
-			val = get_reg_val(reg->id, vcpu->arch.fpr[i]);
+			val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
 			break;
 		case KVM_REG_PPC_FPSCR:
-			val = get_reg_val(reg->id, vcpu->arch.fpscr);
+			val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
 			break;
 #ifdef CONFIG_ALTIVEC
 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
@@ -586,19 +588,30 @@
 				r = -ENXIO;
 				break;
 			}
-			val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0];
+			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
 			break;
 		case KVM_REG_PPC_VSCR:
 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 				r = -ENXIO;
 				break;
 			}
-			val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
+			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
 			break;
 		case KVM_REG_PPC_VRSAVE:
 			val = get_reg_val(reg->id, vcpu->arch.vrsave);
 			break;
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
+			if (cpu_has_feature(CPU_FTR_VSX)) {
+				long int i = reg->id - KVM_REG_PPC_VSR0;
+				val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
+				val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
+			} else {
+				r = -ENXIO;
+			}
+			break;
+#endif /* CONFIG_VSX */
 		case KVM_REG_PPC_DEBUG_INST: {
 			u32 opcode = INS_TW;
 			r = copy_to_user((u32 __user *)(long)reg->addr,
@@ -654,10 +667,10 @@
 			break;
 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 			i = reg->id - KVM_REG_PPC_FPR0;
-			vcpu->arch.fpr[i] = set_reg_val(reg->id, val);
+			VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
 			break;
 		case KVM_REG_PPC_FPSCR:
-			vcpu->arch.fpscr = set_reg_val(reg->id, val);
+			vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
 			break;
 #ifdef CONFIG_ALTIVEC
 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
@@ -665,14 +678,14 @@
 				r = -ENXIO;
 				break;
 			}
-			vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
 			break;
 		case KVM_REG_PPC_VSCR:
 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 				r = -ENXIO;
 				break;
 			}
-			vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
+			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
 			break;
 		case KVM_REG_PPC_VRSAVE:
 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
@@ -682,6 +695,17 @@
 			vcpu->arch.vrsave = set_reg_val(reg->id, val);
 			break;
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
+			if (cpu_has_feature(CPU_FTR_VSX)) {
+				long int i = reg->id - KVM_REG_PPC_VSR0;
+				vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
+				vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
+			} else {
+				r = -ENXIO;
+			}
+			break;
+#endif /* CONFIG_VSX */
 #ifdef CONFIG_KVM_XICS
 		case KVM_REG_PPC_ICP_STATE:
 			if (!vcpu->arch.icp) {
@@ -879,3 +903,9 @@
 
 module_init(kvmppc_book3s_init);
 module_exit(kvmppc_book3s_exit);
+
+/* On 32bit this is our one and only kernel module */
+#ifdef CONFIG_KVM_BOOK3S_32
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
+#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 3a0abd2..5fac89d 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -243,6 +243,11 @@
 	/* Now tell our Shadow PTE code about the new page */
 
 	pte = kvmppc_mmu_hpte_cache_next(vcpu);
+	if (!pte) {
+		kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
+		r = -EAGAIN;
+		goto out;
+	}
 
 	dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
 		    orig_pte->may_write ? 'w' : '-',
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index c5d1484..303ece7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -262,7 +262,7 @@
 
 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
 {
-	kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
+	kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
 }
 
 /*
@@ -562,7 +562,7 @@
 	 * we just return and retry the instruction.
 	 */
 
-	if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
+	if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store)
 		return RESUME_GUEST;
 
 	/*
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 852989a..20d4ea8 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -25,9 +25,5 @@
 #endif
 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
-EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
-#ifdef CONFIG_ALTIVEC
-EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
-#endif
 #endif
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3818bd9..17fc949 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -31,6 +31,7 @@
 #include <linux/spinlock.h>
 #include <linux/page-flags.h>
 #include <linux/srcu.h>
+#include <linux/miscdevice.h>
 
 #include <asm/reg.h>
 #include <asm/cputable.h>
@@ -85,10 +86,13 @@
 
 	/* CPU points to the first thread of the core */
 	if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
+#ifdef CONFIG_KVM_XICS
 		int real_cpu = cpu + vcpu->arch.ptid;
 		if (paca[real_cpu].kvm_hstate.xics_phys)
 			xics_wake_cpu(real_cpu);
-		else if (cpu_online(cpu))
+		else
+#endif
+		if (cpu_online(cpu))
 			smp_send_reschedule(cpu);
 	}
 	put_cpu();
@@ -182,14 +186,28 @@
 
 		switch (arch_compat) {
 		case PVR_ARCH_205:
-			pcr = PCR_ARCH_205;
+			/*
+			 * If an arch bit is set in PCR, all the defined
+			 * higher-order arch bits also have to be set.
+			 */
+			pcr = PCR_ARCH_206 | PCR_ARCH_205;
 			break;
 		case PVR_ARCH_206:
 		case PVR_ARCH_206p:
+			pcr = PCR_ARCH_206;
+			break;
+		case PVR_ARCH_207:
 			break;
 		default:
 			return -EINVAL;
 		}
+
+		if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
+			/* POWER7 can't emulate POWER8 */
+			if (!(pcr & PCR_ARCH_206))
+				return -EINVAL;
+			pcr &= ~PCR_ARCH_206;
+		}
 	}
 
 	spin_lock(&vc->lock);
@@ -637,6 +655,7 @@
 		r = RESUME_GUEST;
 		break;
 	case BOOK3S_INTERRUPT_EXTERNAL:
+	case BOOK3S_INTERRUPT_H_DOORBELL:
 		vcpu->stat.ext_intr_exits++;
 		r = RESUME_GUEST;
 		break;
@@ -673,12 +692,10 @@
 		/* hcall - punt to userspace */
 		int i;
 
-		if (vcpu->arch.shregs.msr & MSR_PR) {
-			/* sc 1 from userspace - reflect to guest syscall */
-			kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
-			r = RESUME_GUEST;
-			break;
-		}
+		/* hypercall with MSR_PR has already been handled in rmode,
+		 * and never reaches here.
+		 */
+
 		run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
 		for (i = 0; i < 9; ++i)
 			run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
@@ -708,7 +725,16 @@
 	 * we don't emulate any guest instructions at this stage.
 	 */
 	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
-		kvmppc_core_queue_program(vcpu, 0x80000);
+		kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+		r = RESUME_GUEST;
+		break;
+	/*
+	 * This occurs if the guest (kernel or userspace), does something that
+	 * is prohibited by HFSCR.  We just generate a program interrupt to
+	 * the guest.
+	 */
+	case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
+		kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
 		r = RESUME_GUEST;
 		break;
 	default:
@@ -766,10 +792,34 @@
 
 	spin_lock(&vc->lock);
 	/*
+	 * If ILE (interrupt little-endian) has changed, update the
+	 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
+	 */
+	if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
+		struct kvm *kvm = vcpu->kvm;
+		struct kvm_vcpu *vcpu;
+		int i;
+
+		mutex_lock(&kvm->lock);
+		kvm_for_each_vcpu(i, vcpu, kvm) {
+			if (vcpu->arch.vcore != vc)
+				continue;
+			if (new_lpcr & LPCR_ILE)
+				vcpu->arch.intr_msr |= MSR_LE;
+			else
+				vcpu->arch.intr_msr &= ~MSR_LE;
+		}
+		mutex_unlock(&kvm->lock);
+	}
+
+	/*
 	 * Userspace can only modify DPFD (default prefetch depth),
 	 * ILE (interrupt little-endian) and TC (translation control).
+	 * On POWER8 userspace can also modify AIL (alt. interrupt loc.)
 	 */
 	mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
+	if (cpu_has_feature(CPU_FTR_ARCH_207S))
+		mask |= LPCR_AIL;
 	vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
 	spin_unlock(&vc->lock);
 }
@@ -787,6 +837,9 @@
 	case KVM_REG_PPC_DABR:
 		*val = get_reg_val(id, vcpu->arch.dabr);
 		break;
+	case KVM_REG_PPC_DABRX:
+		*val = get_reg_val(id, vcpu->arch.dabrx);
+		break;
 	case KVM_REG_PPC_DSCR:
 		*val = get_reg_val(id, vcpu->arch.dscr);
 		break;
@@ -802,7 +855,7 @@
 	case KVM_REG_PPC_UAMOR:
 		*val = get_reg_val(id, vcpu->arch.uamor);
 		break;
-	case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
+	case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
 		i = id - KVM_REG_PPC_MMCR0;
 		*val = get_reg_val(id, vcpu->arch.mmcr[i]);
 		break;
@@ -810,33 +863,87 @@
 		i = id - KVM_REG_PPC_PMC1;
 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
 		break;
+	case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
+		i = id - KVM_REG_PPC_SPMC1;
+		*val = get_reg_val(id, vcpu->arch.spmc[i]);
+		break;
 	case KVM_REG_PPC_SIAR:
 		*val = get_reg_val(id, vcpu->arch.siar);
 		break;
 	case KVM_REG_PPC_SDAR:
 		*val = get_reg_val(id, vcpu->arch.sdar);
 		break;
-#ifdef CONFIG_VSX
-	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
-		if (cpu_has_feature(CPU_FTR_VSX)) {
-			/* VSX => FP reg i is stored in arch.vsr[2*i] */
-			long int i = id - KVM_REG_PPC_FPR0;
-			*val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
-		} else {
-			/* let generic code handle it */
-			r = -EINVAL;
-		}
+	case KVM_REG_PPC_SIER:
+		*val = get_reg_val(id, vcpu->arch.sier);
 		break;
-	case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
-		if (cpu_has_feature(CPU_FTR_VSX)) {
-			long int i = id - KVM_REG_PPC_VSR0;
-			val->vsxval[0] = vcpu->arch.vsr[2 * i];
-			val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
-		} else {
-			r = -ENXIO;
-		}
+	case KVM_REG_PPC_IAMR:
+		*val = get_reg_val(id, vcpu->arch.iamr);
 		break;
-#endif /* CONFIG_VSX */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	case KVM_REG_PPC_TFHAR:
+		*val = get_reg_val(id, vcpu->arch.tfhar);
+		break;
+	case KVM_REG_PPC_TFIAR:
+		*val = get_reg_val(id, vcpu->arch.tfiar);
+		break;
+	case KVM_REG_PPC_TEXASR:
+		*val = get_reg_val(id, vcpu->arch.texasr);
+		break;
+#endif
+	case KVM_REG_PPC_FSCR:
+		*val = get_reg_val(id, vcpu->arch.fscr);
+		break;
+	case KVM_REG_PPC_PSPB:
+		*val = get_reg_val(id, vcpu->arch.pspb);
+		break;
+	case KVM_REG_PPC_EBBHR:
+		*val = get_reg_val(id, vcpu->arch.ebbhr);
+		break;
+	case KVM_REG_PPC_EBBRR:
+		*val = get_reg_val(id, vcpu->arch.ebbrr);
+		break;
+	case KVM_REG_PPC_BESCR:
+		*val = get_reg_val(id, vcpu->arch.bescr);
+		break;
+	case KVM_REG_PPC_TAR:
+		*val = get_reg_val(id, vcpu->arch.tar);
+		break;
+	case KVM_REG_PPC_DPDES:
+		*val = get_reg_val(id, vcpu->arch.vcore->dpdes);
+		break;
+	case KVM_REG_PPC_DAWR:
+		*val = get_reg_val(id, vcpu->arch.dawr);
+		break;
+	case KVM_REG_PPC_DAWRX:
+		*val = get_reg_val(id, vcpu->arch.dawrx);
+		break;
+	case KVM_REG_PPC_CIABR:
+		*val = get_reg_val(id, vcpu->arch.ciabr);
+		break;
+	case KVM_REG_PPC_IC:
+		*val = get_reg_val(id, vcpu->arch.ic);
+		break;
+	case KVM_REG_PPC_VTB:
+		*val = get_reg_val(id, vcpu->arch.vtb);
+		break;
+	case KVM_REG_PPC_CSIGR:
+		*val = get_reg_val(id, vcpu->arch.csigr);
+		break;
+	case KVM_REG_PPC_TACR:
+		*val = get_reg_val(id, vcpu->arch.tacr);
+		break;
+	case KVM_REG_PPC_TCSCR:
+		*val = get_reg_val(id, vcpu->arch.tcscr);
+		break;
+	case KVM_REG_PPC_PID:
+		*val = get_reg_val(id, vcpu->arch.pid);
+		break;
+	case KVM_REG_PPC_ACOP:
+		*val = get_reg_val(id, vcpu->arch.acop);
+		break;
+	case KVM_REG_PPC_WORT:
+		*val = get_reg_val(id, vcpu->arch.wort);
+		break;
 	case KVM_REG_PPC_VPA_ADDR:
 		spin_lock(&vcpu->arch.vpa_update_lock);
 		*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
@@ -890,6 +997,9 @@
 	case KVM_REG_PPC_DABR:
 		vcpu->arch.dabr = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_DABRX:
+		vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
+		break;
 	case KVM_REG_PPC_DSCR:
 		vcpu->arch.dscr = set_reg_val(id, *val);
 		break;
@@ -905,7 +1015,7 @@
 	case KVM_REG_PPC_UAMOR:
 		vcpu->arch.uamor = set_reg_val(id, *val);
 		break;
-	case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
+	case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
 		i = id - KVM_REG_PPC_MMCR0;
 		vcpu->arch.mmcr[i] = set_reg_val(id, *val);
 		break;
@@ -913,33 +1023,90 @@
 		i = id - KVM_REG_PPC_PMC1;
 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
+		i = id - KVM_REG_PPC_SPMC1;
+		vcpu->arch.spmc[i] = set_reg_val(id, *val);
+		break;
 	case KVM_REG_PPC_SIAR:
 		vcpu->arch.siar = set_reg_val(id, *val);
 		break;
 	case KVM_REG_PPC_SDAR:
 		vcpu->arch.sdar = set_reg_val(id, *val);
 		break;
-#ifdef CONFIG_VSX
-	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
-		if (cpu_has_feature(CPU_FTR_VSX)) {
-			/* VSX => FP reg i is stored in arch.vsr[2*i] */
-			long int i = id - KVM_REG_PPC_FPR0;
-			vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
-		} else {
-			/* let generic code handle it */
-			r = -EINVAL;
-		}
+	case KVM_REG_PPC_SIER:
+		vcpu->arch.sier = set_reg_val(id, *val);
 		break;
-	case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
-		if (cpu_has_feature(CPU_FTR_VSX)) {
-			long int i = id - KVM_REG_PPC_VSR0;
-			vcpu->arch.vsr[2 * i] = val->vsxval[0];
-			vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
-		} else {
-			r = -ENXIO;
-		}
+	case KVM_REG_PPC_IAMR:
+		vcpu->arch.iamr = set_reg_val(id, *val);
 		break;
-#endif /* CONFIG_VSX */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	case KVM_REG_PPC_TFHAR:
+		vcpu->arch.tfhar = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_TFIAR:
+		vcpu->arch.tfiar = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_TEXASR:
+		vcpu->arch.texasr = set_reg_val(id, *val);
+		break;
+#endif
+	case KVM_REG_PPC_FSCR:
+		vcpu->arch.fscr = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_PSPB:
+		vcpu->arch.pspb = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_EBBHR:
+		vcpu->arch.ebbhr = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_EBBRR:
+		vcpu->arch.ebbrr = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_BESCR:
+		vcpu->arch.bescr = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_TAR:
+		vcpu->arch.tar = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_DPDES:
+		vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_DAWR:
+		vcpu->arch.dawr = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_DAWRX:
+		vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
+		break;
+	case KVM_REG_PPC_CIABR:
+		vcpu->arch.ciabr = set_reg_val(id, *val);
+		/* Don't allow setting breakpoints in hypervisor code */
+		if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
+			vcpu->arch.ciabr &= ~CIABR_PRIV;	/* disable */
+		break;
+	case KVM_REG_PPC_IC:
+		vcpu->arch.ic = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_VTB:
+		vcpu->arch.vtb = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_CSIGR:
+		vcpu->arch.csigr = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_TACR:
+		vcpu->arch.tacr = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_TCSCR:
+		vcpu->arch.tcscr = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_PID:
+		vcpu->arch.pid = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_ACOP:
+		vcpu->arch.acop = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_WORT:
+		vcpu->arch.wort = set_reg_val(id, *val);
+		break;
 	case KVM_REG_PPC_VPA_ADDR:
 		addr = set_reg_val(id, *val);
 		r = -EINVAL;
@@ -1017,6 +1184,7 @@
 	spin_lock_init(&vcpu->arch.vpa_update_lock);
 	spin_lock_init(&vcpu->arch.tbacct_lock);
 	vcpu->arch.busy_preempt = TB_NIL;
+	vcpu->arch.intr_msr = MSR_SF | MSR_ME;
 
 	kvmppc_mmu_book3s_hv_init(vcpu);
 
@@ -1034,6 +1202,8 @@
 			init_waitqueue_head(&vcore->wq);
 			vcore->preempt_tb = TB_NIL;
 			vcore->lpcr = kvm->arch.lpcr;
+			vcore->first_vcpuid = core * threads_per_core;
+			vcore->kvm = kvm;
 		}
 		kvm->arch.vcores[core] = vcore;
 		kvm->arch.online_vcores++;
@@ -1047,6 +1217,7 @@
 	++vcore->num_threads;
 	spin_unlock(&vcore->lock);
 	vcpu->arch.vcore = vcore;
+	vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
 
 	vcpu->arch.cpu_type = KVM_CPU_3S_64;
 	kvmppc_sanity_check(vcpu);
@@ -1110,7 +1281,7 @@
 	}
 }
 
-extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern void __kvmppc_vcore_entry(void);
 
 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
 				   struct kvm_vcpu *vcpu)
@@ -1184,13 +1355,16 @@
 	tpaca = &paca[cpu];
 	tpaca->kvm_hstate.kvm_vcpu = vcpu;
 	tpaca->kvm_hstate.kvm_vcore = vc;
-	tpaca->kvm_hstate.napping = 0;
+	tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
 	vcpu->cpu = vc->pcpu;
 	smp_wmb();
 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
-	if (vcpu->arch.ptid) {
+	if (cpu != smp_processor_id()) {
+#ifdef CONFIG_KVM_XICS
 		xics_wake_cpu(cpu);
-		++vc->n_woken;
+#endif
+		if (vcpu->arch.ptid)
+			++vc->n_woken;
 	}
 #endif
 }
@@ -1247,10 +1421,10 @@
  */
 static void kvmppc_run_core(struct kvmppc_vcore *vc)
 {
-	struct kvm_vcpu *vcpu, *vcpu0, *vnext;
+	struct kvm_vcpu *vcpu, *vnext;
 	long ret;
 	u64 now;
-	int ptid, i, need_vpa_update;
+	int i, need_vpa_update;
 	int srcu_idx;
 	struct kvm_vcpu *vcpus_to_update[threads_per_core];
 
@@ -1288,25 +1462,6 @@
 	}
 
 	/*
-	 * Assign physical thread IDs, first to non-ceded vcpus
-	 * and then to ceded ones.
-	 */
-	ptid = 0;
-	vcpu0 = NULL;
-	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
-		if (!vcpu->arch.ceded) {
-			if (!ptid)
-				vcpu0 = vcpu;
-			vcpu->arch.ptid = ptid++;
-		}
-	}
-	if (!vcpu0)
-		goto out;	/* nothing to run; should never happen */
-	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
-		if (vcpu->arch.ceded)
-			vcpu->arch.ptid = ptid++;
-
-	/*
 	 * Make sure we are running on thread 0, and that
 	 * secondary threads are offline.
 	 */
@@ -1322,15 +1477,19 @@
 		kvmppc_create_dtl_entry(vcpu, vc);
 	}
 
+	/* Set this explicitly in case thread 0 doesn't have a vcpu */
+	get_paca()->kvm_hstate.kvm_vcore = vc;
+	get_paca()->kvm_hstate.ptid = 0;
+
 	vc->vcore_state = VCORE_RUNNING;
 	preempt_disable();
 	spin_unlock(&vc->lock);
 
 	kvm_guest_enter();
 
-	srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu);
+	srcu_idx = srcu_read_lock(&vc->kvm->srcu);
 
-	__kvmppc_vcore_entry(NULL, vcpu0);
+	__kvmppc_vcore_entry();
 
 	spin_lock(&vc->lock);
 	/* disable sending of IPIs on virtual external irqs */
@@ -1345,7 +1504,7 @@
 	vc->vcore_state = VCORE_EXITING;
 	spin_unlock(&vc->lock);
 
-	srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx);
+	srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
 
 	/* make sure updates to secondary vcpu structs are visible now */
 	smp_mb();
@@ -1453,7 +1612,6 @@
 	if (!signal_pending(current)) {
 		if (vc->vcore_state == VCORE_RUNNING &&
 		    VCORE_EXIT_COUNT(vc) == 0) {
-			vcpu->arch.ptid = vc->n_runnable - 1;
 			kvmppc_create_dtl_entry(vcpu, vc);
 			kvmppc_start_thread(vcpu);
 		} else if (vc->vcore_state == VCORE_SLEEPING) {
@@ -2048,6 +2206,9 @@
 			LPCR_VPM0 | LPCR_VPM1;
 		kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
 			(VRMA_VSID << SLB_VSID_SHIFT_1T);
+		/* On POWER8 turn on online bit to enable PURR/SPURR */
+		if (cpu_has_feature(CPU_FTR_ARCH_207S))
+			lpcr |= LPCR_ONL;
 	}
 	kvm->arch.lpcr = lpcr;
 
@@ -2222,3 +2383,5 @@
 module_init(kvmppc_book3s_init_hv);
 module_exit(kvmppc_book3s_exit_hv);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 928142c..e873796 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -35,7 +35,7 @@
  ****************************************************************************/
 
 /* Registers:
- *  r4: vcpu pointer
+ *  none
  */
 _GLOBAL(__kvmppc_vcore_entry)
 
@@ -57,9 +57,11 @@
 	std	r3, HSTATE_DSCR(r13)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
+BEGIN_FTR_SECTION
 	/* Save host DABR */
 	mfspr	r3, SPRN_DABR
 	std	r3, HSTATE_DABR(r13)
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 
 	/* Hard-disable interrupts */
 	mfmsr   r10
@@ -69,7 +71,6 @@
 	mtmsrd  r10,1
 
 	/* Save host PMU registers */
-	/* R4 is live here (vcpu pointer) but not r3 or r5 */
 	li	r3, 1
 	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
 	mfspr	r7, SPRN_MMCR0		/* save MMCR0 */
@@ -134,16 +135,15 @@
 	 * enters the guest with interrupts enabled.
 	 */
 BEGIN_FTR_SECTION
+	ld	r4, HSTATE_KVM_VCPU(r13)
 	ld	r0, VCPU_PENDING_EXC(r4)
 	li	r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
 	oris	r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
 	and.	r0, r0, r7
 	beq	32f
-	mr	r31, r4
 	lhz	r3, PACAPACAINDEX(r13)
 	bl	smp_send_reschedule
 	nop
-	mr	r4, r31
 32:
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 #endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 8689e2e..37fb3ca 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -134,7 +134,7 @@
 	unlock_rmap(rmap);
 }
 
-static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva,
+static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
 			      int writing, unsigned long *pte_sizep)
 {
 	pte_t *ptep;
@@ -232,7 +232,8 @@
 
 		/* Look up the Linux PTE for the backing page */
 		pte_size = psize;
-		pte = lookup_linux_pte(pgdir, hva, writing, &pte_size);
+		pte = lookup_linux_pte_and_update(pgdir, hva, writing,
+						  &pte_size);
 		if (pte_present(pte)) {
 			if (writing && !pte_write(pte))
 				/* make the actual HPTE be read-only */
@@ -672,7 +673,8 @@
 			memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
 			if (memslot) {
 				hva = __gfn_to_hva_memslot(memslot, gfn);
-				pte = lookup_linux_pte(pgdir, hva, 1, &psize);
+				pte = lookup_linux_pte_and_update(pgdir, hva,
+								  1, &psize);
 				if (pte_present(pte) && !pte_write(pte))
 					r = hpte_make_readonly(r);
 			}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index be4fa04a..e66d4ec 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -33,6 +33,10 @@
 #error Need to fix lppaca and SLB shadow accesses in little endian mode
 #endif
 
+/* Values in HSTATE_NAPPING(r13) */
+#define NAPPING_CEDE	1
+#define NAPPING_NOVCPU	2
+
 /*
  * Call kvmppc_hv_entry in real mode.
  * Must be called with interrupts hard-disabled.
@@ -57,29 +61,23 @@
 	RFI
 
 kvmppc_call_hv_entry:
+	ld	r4, HSTATE_KVM_VCPU(r13)
 	bl	kvmppc_hv_entry
 
 	/* Back from guest - restore host state and return to caller */
 
+BEGIN_FTR_SECTION
 	/* Restore host DABR and DABRX */
 	ld	r5,HSTATE_DABR(r13)
 	li	r6,7
 	mtspr	SPRN_DABR,r5
 	mtspr	SPRN_DABRX,r6
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 
 	/* Restore SPRG3 */
 	ld	r3,PACA_SPRG3(r13)
 	mtspr	SPRN_SPRG3,r3
 
-	/*
-	 * Reload DEC.  HDEC interrupts were disabled when
-	 * we reloaded the host's LPCR value.
-	 */
-	ld	r3, HSTATE_DECEXP(r13)
-	mftb	r4
-	subf	r4, r4, r3
-	mtspr	SPRN_DEC, r4
-
 	/* Reload the host's PMU registers */
 	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
 	lbz	r4, LPPACA_PMCINUSE(r3)
@@ -115,6 +113,15 @@
 23:
 
 	/*
+	 * Reload DEC.  HDEC interrupts were disabled when
+	 * we reloaded the host's LPCR value.
+	 */
+	ld	r3, HSTATE_DECEXP(r13)
+	mftb	r4
+	subf	r4, r4, r3
+	mtspr	SPRN_DEC, r4
+
+	/*
 	 * For external and machine check interrupts, we need
 	 * to call the Linux handler to process the interrupt.
 	 * We do that by jumping to absolute address 0x500 for
@@ -153,15 +160,75 @@
 
 13:	b	machine_check_fwnmi
 
+kvmppc_primary_no_guest:
+	/* We handle this much like a ceded vcpu */
+	/* set our bit in napping_threads */
+	ld	r5, HSTATE_KVM_VCORE(r13)
+	lbz	r7, HSTATE_PTID(r13)
+	li	r0, 1
+	sld	r0, r0, r7
+	addi	r6, r5, VCORE_NAPPING_THREADS
+1:	lwarx	r3, 0, r6
+	or	r3, r3, r0
+	stwcx.	r3, 0, r6
+	bne	1b
+	/* order napping_threads update vs testing entry_exit_count */
+	isync
+	li	r12, 0
+	lwz	r7, VCORE_ENTRY_EXIT(r5)
+	cmpwi	r7, 0x100
+	bge	kvm_novcpu_exit	/* another thread already exiting */
+	li	r3, NAPPING_NOVCPU
+	stb	r3, HSTATE_NAPPING(r13)
+	li	r3, 1
+	stb	r3, HSTATE_HWTHREAD_REQ(r13)
+
+	b	kvm_do_nap
+
+kvm_novcpu_wakeup:
+	ld	r1, HSTATE_HOST_R1(r13)
+	ld	r5, HSTATE_KVM_VCORE(r13)
+	li	r0, 0
+	stb	r0, HSTATE_NAPPING(r13)
+	stb	r0, HSTATE_HWTHREAD_REQ(r13)
+
+	/* check the wake reason */
+	bl	kvmppc_check_wake_reason
+	
+	/* see if any other thread is already exiting */
+	lwz	r0, VCORE_ENTRY_EXIT(r5)
+	cmpwi	r0, 0x100
+	bge	kvm_novcpu_exit
+
+	/* clear our bit in napping_threads */
+	lbz	r7, HSTATE_PTID(r13)
+	li	r0, 1
+	sld	r0, r0, r7
+	addi	r6, r5, VCORE_NAPPING_THREADS
+4:	lwarx	r7, 0, r6
+	andc	r7, r7, r0
+	stwcx.	r7, 0, r6
+	bne	4b
+
+	/* See if the wake reason means we need to exit */
+	cmpdi	r3, 0
+	bge	kvm_novcpu_exit
+
+	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
+	ld	r4, HSTATE_KVM_VCPU(r13)
+	cmpdi	r4, 0
+	bne	kvmppc_got_guest
+
+kvm_novcpu_exit:
+	b	hdec_soon
+
 /*
- * We come in here when wakened from nap mode on a secondary hw thread.
+ * We come in here when wakened from nap mode.
  * Relocation is off and most register values are lost.
  * r13 points to the PACA.
  */
 	.globl	kvm_start_guest
 kvm_start_guest:
-	ld	r1,PACAEMERGSP(r13)
-	subi	r1,r1,STACK_FRAME_OVERHEAD
 	ld	r2,PACATOC(r13)
 
 	li	r0,KVM_HWTHREAD_IN_KVM
@@ -173,8 +240,13 @@
 
 	/* were we napping due to cede? */
 	lbz	r0,HSTATE_NAPPING(r13)
-	cmpwi	r0,0
-	bne	kvm_end_cede
+	cmpwi	r0,NAPPING_CEDE
+	beq	kvm_end_cede
+	cmpwi	r0,NAPPING_NOVCPU
+	beq	kvm_novcpu_wakeup
+
+	ld	r1,PACAEMERGSP(r13)
+	subi	r1,r1,STACK_FRAME_OVERHEAD
 
 	/*
 	 * We weren't napping due to cede, so this must be a secondary
@@ -184,40 +256,22 @@
 	 */
 
 	/* Check the wake reason in SRR1 to see why we got here */
-	mfspr	r3,SPRN_SRR1
-	rlwinm	r3,r3,44-31,0x7		/* extract wake reason field */
-	cmpwi	r3,4			/* was it an external interrupt? */
-	bne	27f			/* if not */
-	ld	r5,HSTATE_XICS_PHYS(r13)
-	li	r7,XICS_XIRR		/* if it was an external interrupt, */
-	lwzcix	r8,r5,r7		/* get and ack the interrupt */
-	sync
-	clrldi.	r9,r8,40		/* get interrupt source ID. */
-	beq	28f			/* none there? */
-	cmpwi	r9,XICS_IPI		/* was it an IPI? */
-	bne	29f
-	li	r0,0xff
-	li	r6,XICS_MFRR
-	stbcix	r0,r5,r6		/* clear IPI */
-	stwcix	r8,r5,r7		/* EOI the interrupt */
-	sync				/* order loading of vcpu after that */
+	bl	kvmppc_check_wake_reason
+	cmpdi	r3, 0
+	bge	kvm_no_guest
 
 	/* get vcpu pointer, NULL if we have no vcpu to run */
 	ld	r4,HSTATE_KVM_VCPU(r13)
 	cmpdi	r4,0
 	/* if we have no vcpu to run, go back to sleep */
 	beq	kvm_no_guest
-	b	30f
 
-27:	/* XXX should handle hypervisor maintenance interrupts etc. here */
-	b	kvm_no_guest
-28:	/* SRR1 said external but ICP said nope?? */
-	b	kvm_no_guest
-29:	/* External non-IPI interrupt to offline secondary thread? help?? */
-	stw	r8,HSTATE_SAVED_XIRR(r13)
-	b	kvm_no_guest
+	/* Set HSTATE_DSCR(r13) to something sensible */
+	LOAD_REG_ADDR(r6, dscr_default)
+	ld	r6, 0(r6)
+	std	r6, HSTATE_DSCR(r13)
 
-30:	bl	kvmppc_hv_entry
+	bl	kvmppc_hv_entry
 
 	/* Back from the guest, go back to nap */
 	/* Clear our vcpu pointer so we don't come back in early */
@@ -229,18 +283,6 @@
 	 * visible we could be given another vcpu.
 	 */
 	lwsync
-	/* Clear any pending IPI - we're an offline thread */
-	ld	r5, HSTATE_XICS_PHYS(r13)
-	li	r7, XICS_XIRR
-	lwzcix	r3, r5, r7		/* ack any pending interrupt */
-	rlwinm.	r0, r3, 0, 0xffffff	/* any pending? */
-	beq	37f
-	sync
-	li	r0, 0xff
-	li	r6, XICS_MFRR
-	stbcix	r0, r5, r6		/* clear the IPI */
-	stwcix	r3, r5, r7		/* EOI it */
-37:	sync
 
 	/* increment the nap count and then go to nap mode */
 	ld	r4, HSTATE_KVM_VCORE(r13)
@@ -253,6 +295,7 @@
 kvm_no_guest:
 	li	r0, KVM_HWTHREAD_IN_NAP
 	stb	r0, HSTATE_HWTHREAD_STATE(r13)
+kvm_do_nap:
 	li	r3, LPCR_PECE0
 	mfspr	r4, SPRN_LPCR
 	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@@ -277,7 +320,7 @@
 
 	/* Required state:
 	 *
-	 * R4 = vcpu pointer
+	 * R4 = vcpu pointer (or NULL)
 	 * MSR = ~IR|DR
 	 * R13 = PACA
 	 * R1 = host R1
@@ -287,122 +330,12 @@
 	std	r0, PPC_LR_STKOFF(r1)
 	stdu	r1, -112(r1)
 
-	/* Set partition DABR */
-	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
-	li	r5,3
-	ld	r6,VCPU_DABR(r4)
-	mtspr	SPRN_DABRX,r5
-	mtspr	SPRN_DABR,r6
-BEGIN_FTR_SECTION
-	isync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
-	/* Load guest PMU registers */
-	/* R4 is live here (vcpu pointer) */
-	li	r3, 1
-	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
-	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
-	isync
-	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
-	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
-	lwz	r6, VCPU_PMC + 8(r4)
-	lwz	r7, VCPU_PMC + 12(r4)
-	lwz	r8, VCPU_PMC + 16(r4)
-	lwz	r9, VCPU_PMC + 20(r4)
-BEGIN_FTR_SECTION
-	lwz	r10, VCPU_PMC + 24(r4)
-	lwz	r11, VCPU_PMC + 28(r4)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-	mtspr	SPRN_PMC1, r3
-	mtspr	SPRN_PMC2, r5
-	mtspr	SPRN_PMC3, r6
-	mtspr	SPRN_PMC4, r7
-	mtspr	SPRN_PMC5, r8
-	mtspr	SPRN_PMC6, r9
-BEGIN_FTR_SECTION
-	mtspr	SPRN_PMC7, r10
-	mtspr	SPRN_PMC8, r11
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-	ld	r3, VCPU_MMCR(r4)
-	ld	r5, VCPU_MMCR + 8(r4)
-	ld	r6, VCPU_MMCR + 16(r4)
-	ld	r7, VCPU_SIAR(r4)
-	ld	r8, VCPU_SDAR(r4)
-	mtspr	SPRN_MMCR1, r5
-	mtspr	SPRN_MMCRA, r6
-	mtspr	SPRN_SIAR, r7
-	mtspr	SPRN_SDAR, r8
-	mtspr	SPRN_MMCR0, r3
-	isync
-
-	/* Load up FP, VMX and VSX registers */
-	bl	kvmppc_load_fp
-
-	ld	r14, VCPU_GPR(R14)(r4)
-	ld	r15, VCPU_GPR(R15)(r4)
-	ld	r16, VCPU_GPR(R16)(r4)
-	ld	r17, VCPU_GPR(R17)(r4)
-	ld	r18, VCPU_GPR(R18)(r4)
-	ld	r19, VCPU_GPR(R19)(r4)
-	ld	r20, VCPU_GPR(R20)(r4)
-	ld	r21, VCPU_GPR(R21)(r4)
-	ld	r22, VCPU_GPR(R22)(r4)
-	ld	r23, VCPU_GPR(R23)(r4)
-	ld	r24, VCPU_GPR(R24)(r4)
-	ld	r25, VCPU_GPR(R25)(r4)
-	ld	r26, VCPU_GPR(R26)(r4)
-	ld	r27, VCPU_GPR(R27)(r4)
-	ld	r28, VCPU_GPR(R28)(r4)
-	ld	r29, VCPU_GPR(R29)(r4)
-	ld	r30, VCPU_GPR(R30)(r4)
-	ld	r31, VCPU_GPR(R31)(r4)
-
-BEGIN_FTR_SECTION
-	/* Switch DSCR to guest value */
-	ld	r5, VCPU_DSCR(r4)
-	mtspr	SPRN_DSCR, r5
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
-	/*
-	 * Set the decrementer to the guest decrementer.
-	 */
-	ld	r8,VCPU_DEC_EXPIRES(r4)
-	mftb	r7
-	subf	r3,r7,r8
-	mtspr	SPRN_DEC,r3
-	stw	r3,VCPU_DEC(r4)
-
-	ld	r5, VCPU_SPRG0(r4)
-	ld	r6, VCPU_SPRG1(r4)
-	ld	r7, VCPU_SPRG2(r4)
-	ld	r8, VCPU_SPRG3(r4)
-	mtspr	SPRN_SPRG0, r5
-	mtspr	SPRN_SPRG1, r6
-	mtspr	SPRN_SPRG2, r7
-	mtspr	SPRN_SPRG3, r8
-
 	/* Save R1 in the PACA */
 	std	r1, HSTATE_HOST_R1(r13)
 
-	/* Load up DAR and DSISR */
-	ld	r5, VCPU_DAR(r4)
-	lwz	r6, VCPU_DSISR(r4)
-	mtspr	SPRN_DAR, r5
-	mtspr	SPRN_DSISR, r6
-
 	li	r6, KVM_GUEST_MODE_HOST_HV
 	stb	r6, HSTATE_IN_GUEST(r13)
 
-BEGIN_FTR_SECTION
-	/* Restore AMR and UAMOR, set AMOR to all 1s */
-	ld	r5,VCPU_AMR(r4)
-	ld	r6,VCPU_UAMOR(r4)
-	li	r7,-1
-	mtspr	SPRN_AMR,r5
-	mtspr	SPRN_UAMOR,r6
-	mtspr	SPRN_AMOR,r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
 	/* Clear out SLB */
 	li	r6,0
 	slbmte	r6,r6
@@ -428,8 +361,8 @@
 	bne	21b
 
 	/* Primary thread switches to guest partition. */
-	ld	r9,VCPU_KVM(r4)		/* pointer to struct kvm */
-	lwz	r6,VCPU_PTID(r4)
+	ld	r9,VCORE_KVM(r5)	/* pointer to struct kvm */
+	lbz	r6,HSTATE_PTID(r13)
 	cmpwi	r6,0
 	bne	20f
 	ld	r6,KVM_SDR1(r9)
@@ -457,7 +390,13 @@
 	andc	r7,r7,r0
 	stdcx.	r7,0,r6
 	bne	23b
-	li	r6,128			/* and flush the TLB */
+	/* Flush the TLB of any entries for this LPID */
+	/* use arch 2.07S as a proxy for POWER8 */
+BEGIN_FTR_SECTION
+	li	r6,512			/* POWER8 has 512 sets */
+FTR_SECTION_ELSE
+	li	r6,128			/* POWER7 has 128 sets */
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
 	mtctr	r6
 	li	r7,0x800		/* IS field = 0b10 */
 	ptesync
@@ -487,6 +426,13 @@
 	beq	38f
 	mtspr	SPRN_PCR, r7
 38:
+
+BEGIN_FTR_SECTION
+	/* DPDES is shared between threads */
+	ld	r8, VCORE_DPDES(r5)
+	mtspr	SPRN_DPDES, r8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
 	li	r0,1
 	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
 	b	10f
@@ -503,32 +449,11 @@
 	mtspr	SPRN_RMOR,r8
 	isync
 
-	/* Increment yield count if they have a VPA */
-	ld	r3, VCPU_VPA(r4)
-	cmpdi	r3, 0
-	beq	25f
-	lwz	r5, LPPACA_YIELDCOUNT(r3)
-	addi	r5, r5, 1
-	stw	r5, LPPACA_YIELDCOUNT(r3)
-	li	r6, 1
-	stb	r6, VCPU_VPA_DIRTY(r4)
-25:
 	/* Check if HDEC expires soon */
 	mfspr	r3,SPRN_HDEC
-	cmpwi	r3,10
+	cmpwi	r3,512		/* 1 microsecond */
 	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
-	mr	r9,r4
 	blt	hdec_soon
-
-	/* Save purr/spurr */
-	mfspr	r5,SPRN_PURR
-	mfspr	r6,SPRN_SPURR
-	std	r5,HSTATE_PURR(r13)
-	std	r6,HSTATE_SPURR(r13)
-	ld	r7,VCPU_PURR(r4)
-	ld	r8,VCPU_SPURR(r4)
-	mtspr	SPRN_PURR,r7
-	mtspr	SPRN_SPURR,r8
 	b	31f
 
 	/*
@@ -539,7 +464,8 @@
 	 * We also have to invalidate the TLB since its
 	 * entries aren't tagged with the LPID.
 	 */
-30:	ld	r9,VCPU_KVM(r4)		/* pointer to struct kvm */
+30:	ld	r5,HSTATE_KVM_VCORE(r13)
+	ld	r9,VCORE_KVM(r5)	/* pointer to struct kvm */
 
 	/* first take native_tlbie_lock */
 	.section ".toc","aw"
@@ -604,7 +530,6 @@
 	mfspr	r3,SPRN_HDEC
 	cmpwi	r3,10
 	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
-	mr	r9,r4
 	blt	hdec_soon
 
 	/* Enable HDEC interrupts */
@@ -619,9 +544,14 @@
 	mfspr	r0,SPRN_HID0
 	mfspr	r0,SPRN_HID0
 	mfspr	r0,SPRN_HID0
+31:
+	/* Do we have a guest vcpu to run? */
+	cmpdi	r4, 0
+	beq	kvmppc_primary_no_guest
+kvmppc_got_guest:
 
 	/* Load up guest SLB entries */
-31:	lwz	r5,VCPU_SLB_MAX(r4)
+	lwz	r5,VCPU_SLB_MAX(r4)
 	cmpwi	r5,0
 	beq	9f
 	mtctr	r5
@@ -632,6 +562,209 @@
 	addi	r6,r6,VCPU_SLB_SIZE
 	bdnz	1b
 9:
+	/* Increment yield count if they have a VPA */
+	ld	r3, VCPU_VPA(r4)
+	cmpdi	r3, 0
+	beq	25f
+	lwz	r5, LPPACA_YIELDCOUNT(r3)
+	addi	r5, r5, 1
+	stw	r5, LPPACA_YIELDCOUNT(r3)
+	li	r6, 1
+	stb	r6, VCPU_VPA_DIRTY(r4)
+25:
+
+BEGIN_FTR_SECTION
+	/* Save purr/spurr */
+	mfspr	r5,SPRN_PURR
+	mfspr	r6,SPRN_SPURR
+	std	r5,HSTATE_PURR(r13)
+	std	r6,HSTATE_SPURR(r13)
+	ld	r7,VCPU_PURR(r4)
+	ld	r8,VCPU_SPURR(r4)
+	mtspr	SPRN_PURR,r7
+	mtspr	SPRN_SPURR,r8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+BEGIN_FTR_SECTION
+	/* Set partition DABR */
+	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
+	lwz	r5,VCPU_DABRX(r4)
+	ld	r6,VCPU_DABR(r4)
+	mtspr	SPRN_DABRX,r5
+	mtspr	SPRN_DABR,r6
+ BEGIN_FTR_SECTION_NESTED(89)
+	isync
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+
+	/* Load guest PMU registers */
+	/* R4 is live here (vcpu pointer) */
+	li	r3, 1
+	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
+	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
+	isync
+	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
+	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
+	lwz	r6, VCPU_PMC + 8(r4)
+	lwz	r7, VCPU_PMC + 12(r4)
+	lwz	r8, VCPU_PMC + 16(r4)
+	lwz	r9, VCPU_PMC + 20(r4)
+BEGIN_FTR_SECTION
+	lwz	r10, VCPU_PMC + 24(r4)
+	lwz	r11, VCPU_PMC + 28(r4)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+	mtspr	SPRN_PMC1, r3
+	mtspr	SPRN_PMC2, r5
+	mtspr	SPRN_PMC3, r6
+	mtspr	SPRN_PMC4, r7
+	mtspr	SPRN_PMC5, r8
+	mtspr	SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+	mtspr	SPRN_PMC7, r10
+	mtspr	SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+	ld	r3, VCPU_MMCR(r4)
+	ld	r5, VCPU_MMCR + 8(r4)
+	ld	r6, VCPU_MMCR + 16(r4)
+	ld	r7, VCPU_SIAR(r4)
+	ld	r8, VCPU_SDAR(r4)
+	mtspr	SPRN_MMCR1, r5
+	mtspr	SPRN_MMCRA, r6
+	mtspr	SPRN_SIAR, r7
+	mtspr	SPRN_SDAR, r8
+BEGIN_FTR_SECTION
+	ld	r5, VCPU_MMCR + 24(r4)
+	ld	r6, VCPU_SIER(r4)
+	lwz	r7, VCPU_PMC + 24(r4)
+	lwz	r8, VCPU_PMC + 28(r4)
+	ld	r9, VCPU_MMCR + 32(r4)
+	mtspr	SPRN_MMCR2, r5
+	mtspr	SPRN_SIER, r6
+	mtspr	SPRN_SPMC1, r7
+	mtspr	SPRN_SPMC2, r8
+	mtspr	SPRN_MMCRS, r9
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+	mtspr	SPRN_MMCR0, r3
+	isync
+
+	/* Load up FP, VMX and VSX registers */
+	bl	kvmppc_load_fp
+
+	ld	r14, VCPU_GPR(R14)(r4)
+	ld	r15, VCPU_GPR(R15)(r4)
+	ld	r16, VCPU_GPR(R16)(r4)
+	ld	r17, VCPU_GPR(R17)(r4)
+	ld	r18, VCPU_GPR(R18)(r4)
+	ld	r19, VCPU_GPR(R19)(r4)
+	ld	r20, VCPU_GPR(R20)(r4)
+	ld	r21, VCPU_GPR(R21)(r4)
+	ld	r22, VCPU_GPR(R22)(r4)
+	ld	r23, VCPU_GPR(R23)(r4)
+	ld	r24, VCPU_GPR(R24)(r4)
+	ld	r25, VCPU_GPR(R25)(r4)
+	ld	r26, VCPU_GPR(R26)(r4)
+	ld	r27, VCPU_GPR(R27)(r4)
+	ld	r28, VCPU_GPR(R28)(r4)
+	ld	r29, VCPU_GPR(R29)(r4)
+	ld	r30, VCPU_GPR(R30)(r4)
+	ld	r31, VCPU_GPR(R31)(r4)
+
+BEGIN_FTR_SECTION
+	/* Switch DSCR to guest value */
+	ld	r5, VCPU_DSCR(r4)
+	mtspr	SPRN_DSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+BEGIN_FTR_SECTION
+	/* Skip next section on POWER7 or PPC970 */
+	b	8f
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+	mfmsr	r8
+	li	r0, 1
+	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+	mtmsrd	r8
+
+	/* Load up POWER8-specific registers */
+	ld	r5, VCPU_IAMR(r4)
+	lwz	r6, VCPU_PSPB(r4)
+	ld	r7, VCPU_FSCR(r4)
+	mtspr	SPRN_IAMR, r5
+	mtspr	SPRN_PSPB, r6
+	mtspr	SPRN_FSCR, r7
+	ld	r5, VCPU_DAWR(r4)
+	ld	r6, VCPU_DAWRX(r4)
+	ld	r7, VCPU_CIABR(r4)
+	ld	r8, VCPU_TAR(r4)
+	mtspr	SPRN_DAWR, r5
+	mtspr	SPRN_DAWRX, r6
+	mtspr	SPRN_CIABR, r7
+	mtspr	SPRN_TAR, r8
+	ld	r5, VCPU_IC(r4)
+	ld	r6, VCPU_VTB(r4)
+	mtspr	SPRN_IC, r5
+	mtspr	SPRN_VTB, r6
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	ld	r5, VCPU_TFHAR(r4)
+	ld	r6, VCPU_TFIAR(r4)
+	ld	r7, VCPU_TEXASR(r4)
+	mtspr	SPRN_TFHAR, r5
+	mtspr	SPRN_TFIAR, r6
+	mtspr	SPRN_TEXASR, r7
+#endif
+	ld	r8, VCPU_EBBHR(r4)
+	mtspr	SPRN_EBBHR, r8
+	ld	r5, VCPU_EBBRR(r4)
+	ld	r6, VCPU_BESCR(r4)
+	ld	r7, VCPU_CSIGR(r4)
+	ld	r8, VCPU_TACR(r4)
+	mtspr	SPRN_EBBRR, r5
+	mtspr	SPRN_BESCR, r6
+	mtspr	SPRN_CSIGR, r7
+	mtspr	SPRN_TACR, r8
+	ld	r5, VCPU_TCSCR(r4)
+	ld	r6, VCPU_ACOP(r4)
+	lwz	r7, VCPU_GUEST_PID(r4)
+	ld	r8, VCPU_WORT(r4)
+	mtspr	SPRN_TCSCR, r5
+	mtspr	SPRN_ACOP, r6
+	mtspr	SPRN_PID, r7
+	mtspr	SPRN_WORT, r8
+8:
+
+	/*
+	 * Set the decrementer to the guest decrementer.
+	 */
+	ld	r8,VCPU_DEC_EXPIRES(r4)
+	mftb	r7
+	subf	r3,r7,r8
+	mtspr	SPRN_DEC,r3
+	stw	r3,VCPU_DEC(r4)
+
+	ld	r5, VCPU_SPRG0(r4)
+	ld	r6, VCPU_SPRG1(r4)
+	ld	r7, VCPU_SPRG2(r4)
+	ld	r8, VCPU_SPRG3(r4)
+	mtspr	SPRN_SPRG0, r5
+	mtspr	SPRN_SPRG1, r6
+	mtspr	SPRN_SPRG2, r7
+	mtspr	SPRN_SPRG3, r8
+
+	/* Load up DAR and DSISR */
+	ld	r5, VCPU_DAR(r4)
+	lwz	r6, VCPU_DSISR(r4)
+	mtspr	SPRN_DAR, r5
+	mtspr	SPRN_DSISR, r6
+
+BEGIN_FTR_SECTION
+	/* Restore AMR and UAMOR, set AMOR to all 1s */
+	ld	r5,VCPU_AMR(r4)
+	ld	r6,VCPU_UAMOR(r4)
+	li	r7,-1
+	mtspr	SPRN_AMR,r5
+	mtspr	SPRN_UAMOR,r6
+	mtspr	SPRN_AMOR,r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
 	/* Restore state of CTRL run bit; assume 1 on entry */
 	lwz	r5,VCPU_CTRL(r4)
@@ -647,48 +780,53 @@
 	mtctr	r6
 	mtxer	r7
 
+kvmppc_cede_reentry:		/* r4 = vcpu, r13 = paca */
 	ld	r10, VCPU_PC(r4)
 	ld	r11, VCPU_MSR(r4)
-kvmppc_cede_reentry:		/* r4 = vcpu, r13 = paca */
 	ld	r6, VCPU_SRR0(r4)
 	ld	r7, VCPU_SRR1(r4)
+	mtspr	SPRN_SRR0, r6
+	mtspr	SPRN_SRR1, r7
 
+deliver_guest_interrupt:
 	/* r11 = vcpu->arch.msr & ~MSR_HV */
 	rldicl	r11, r11, 63 - MSR_HV_LG, 1
 	rotldi	r11, r11, 1 + MSR_HV_LG
 	ori	r11, r11, MSR_ME
 
 	/* Check if we can deliver an external or decrementer interrupt now */
-	ld	r0,VCPU_PENDING_EXC(r4)
-	lis	r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
-	and	r0,r0,r8
-	cmpdi	cr1,r0,0
-	andi.	r0,r11,MSR_EE
-	beq	cr1,11f
+	ld	r0, VCPU_PENDING_EXC(r4)
+	rldicl	r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
+	cmpdi	cr1, r0, 0
+	andi.	r8, r11, MSR_EE
 BEGIN_FTR_SECTION
-	mfspr	r8,SPRN_LPCR
-	ori	r8,r8,LPCR_MER
-	mtspr	SPRN_LPCR,r8
+	mfspr	r8, SPRN_LPCR
+	/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
+	rldimi	r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
+	mtspr	SPRN_LPCR, r8
 	isync
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	beq	5f
-	li	r0,BOOK3S_INTERRUPT_EXTERNAL
-12:	mr	r6,r10
+	li	r0, BOOK3S_INTERRUPT_EXTERNAL
+	bne	cr1, 12f
+	mfspr	r0, SPRN_DEC
+	cmpwi	r0, 0
+	li	r0, BOOK3S_INTERRUPT_DECREMENTER
+	bge	5f
+
+12:	mtspr	SPRN_SRR0, r10
 	mr	r10,r0
-	mr	r7,r11
-	li	r11,(MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11,r11,63
-	b	5f
-11:	beq	5f
-	mfspr	r0,SPRN_DEC
-	cmpwi	r0,0
-	li	r0,BOOK3S_INTERRUPT_DECREMENTER
-	blt	12b
+	mtspr	SPRN_SRR1, r11
+	ld	r11, VCPU_INTR_MSR(r4)
+5:
 
-	/* Move SRR0 and SRR1 into the respective regs */
-5:	mtspr	SPRN_SRR0, r6
-	mtspr	SPRN_SRR1, r7
-
+/*
+ * Required state:
+ * R4 = vcpu
+ * R10: value for HSRR0
+ * R11: value for HSRR1
+ * R13 = PACA
+ */
 fast_guest_return:
 	li	r0,0
 	stb	r0,VCPU_CEDED(r4)	/* cancel cede */
@@ -868,39 +1006,19 @@
 	/* External interrupt, first check for host_ipi. If this is
 	 * set, we know the host wants us out so let's do it now
 	 */
-do_ext_interrupt:
 	bl	kvmppc_read_intr
 	cmpdi	r3, 0
 	bgt	ext_interrupt_to_host
 
-	/* Allright, looks like an IPI for the guest, we need to set MER */
 	/* Check if any CPU is heading out to the host, if so head out too */
 	ld	r5, HSTATE_KVM_VCORE(r13)
 	lwz	r0, VCORE_ENTRY_EXIT(r5)
 	cmpwi	r0, 0x100
 	bge	ext_interrupt_to_host
 
-	/* See if there is a pending interrupt for the guest */
-	mfspr	r8, SPRN_LPCR
-	ld	r0, VCPU_PENDING_EXC(r9)
-	/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
-	rldicl.	r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
-	rldimi	r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
-	beq	2f
-
-	/* And if the guest EE is set, we can deliver immediately, else
-	 * we return to the guest with MER set
-	 */
-	andi.	r0, r11, MSR_EE
-	beq	2f
-	mtspr	SPRN_SRR0, r10
-	mtspr	SPRN_SRR1, r11
-	li	r10, BOOK3S_INTERRUPT_EXTERNAL
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
-2:	mr	r4, r9
-	mtspr	SPRN_LPCR, r8
-	b	fast_guest_return
+	/* Return to guest after delivering any pending interrupt */
+	mr	r4, r9
+	b	deliver_guest_interrupt
 
 ext_interrupt_to_host:
 
@@ -975,13 +1093,194 @@
 	mtspr	SPRN_SPURR,r4
 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
 
+	/* Save DEC */
+	mfspr	r5,SPRN_DEC
+	mftb	r6
+	extsw	r5,r5
+	add	r5,r5,r6
+	std	r5,VCPU_DEC_EXPIRES(r9)
+
+BEGIN_FTR_SECTION
+	b	8f
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+	mfmsr	r8
+	li	r0, 1
+	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+	mtmsrd	r8
+
+	/* Save POWER8-specific registers */
+	mfspr	r5, SPRN_IAMR
+	mfspr	r6, SPRN_PSPB
+	mfspr	r7, SPRN_FSCR
+	std	r5, VCPU_IAMR(r9)
+	stw	r6, VCPU_PSPB(r9)
+	std	r7, VCPU_FSCR(r9)
+	mfspr	r5, SPRN_IC
+	mfspr	r6, SPRN_VTB
+	mfspr	r7, SPRN_TAR
+	std	r5, VCPU_IC(r9)
+	std	r6, VCPU_VTB(r9)
+	std	r7, VCPU_TAR(r9)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	mfspr	r5, SPRN_TFHAR
+	mfspr	r6, SPRN_TFIAR
+	mfspr	r7, SPRN_TEXASR
+	std	r5, VCPU_TFHAR(r9)
+	std	r6, VCPU_TFIAR(r9)
+	std	r7, VCPU_TEXASR(r9)
+#endif
+	mfspr	r8, SPRN_EBBHR
+	std	r8, VCPU_EBBHR(r9)
+	mfspr	r5, SPRN_EBBRR
+	mfspr	r6, SPRN_BESCR
+	mfspr	r7, SPRN_CSIGR
+	mfspr	r8, SPRN_TACR
+	std	r5, VCPU_EBBRR(r9)
+	std	r6, VCPU_BESCR(r9)
+	std	r7, VCPU_CSIGR(r9)
+	std	r8, VCPU_TACR(r9)
+	mfspr	r5, SPRN_TCSCR
+	mfspr	r6, SPRN_ACOP
+	mfspr	r7, SPRN_PID
+	mfspr	r8, SPRN_WORT
+	std	r5, VCPU_TCSCR(r9)
+	std	r6, VCPU_ACOP(r9)
+	stw	r7, VCPU_GUEST_PID(r9)
+	std	r8, VCPU_WORT(r9)
+8:
+
+	/* Save and reset AMR and UAMOR before turning on the MMU */
+BEGIN_FTR_SECTION
+	mfspr	r5,SPRN_AMR
+	mfspr	r6,SPRN_UAMOR
+	std	r5,VCPU_AMR(r9)
+	std	r6,VCPU_UAMOR(r9)
+	li	r6,0
+	mtspr	SPRN_AMR,r6
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+	/* Switch DSCR back to host value */
+BEGIN_FTR_SECTION
+	mfspr	r8, SPRN_DSCR
+	ld	r7, HSTATE_DSCR(r13)
+	std	r8, VCPU_DSCR(r9)
+	mtspr	SPRN_DSCR, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+	/* Save non-volatile GPRs */
+	std	r14, VCPU_GPR(R14)(r9)
+	std	r15, VCPU_GPR(R15)(r9)
+	std	r16, VCPU_GPR(R16)(r9)
+	std	r17, VCPU_GPR(R17)(r9)
+	std	r18, VCPU_GPR(R18)(r9)
+	std	r19, VCPU_GPR(R19)(r9)
+	std	r20, VCPU_GPR(R20)(r9)
+	std	r21, VCPU_GPR(R21)(r9)
+	std	r22, VCPU_GPR(R22)(r9)
+	std	r23, VCPU_GPR(R23)(r9)
+	std	r24, VCPU_GPR(R24)(r9)
+	std	r25, VCPU_GPR(R25)(r9)
+	std	r26, VCPU_GPR(R26)(r9)
+	std	r27, VCPU_GPR(R27)(r9)
+	std	r28, VCPU_GPR(R28)(r9)
+	std	r29, VCPU_GPR(R29)(r9)
+	std	r30, VCPU_GPR(R30)(r9)
+	std	r31, VCPU_GPR(R31)(r9)
+
+	/* Save SPRGs */
+	mfspr	r3, SPRN_SPRG0
+	mfspr	r4, SPRN_SPRG1
+	mfspr	r5, SPRN_SPRG2
+	mfspr	r6, SPRN_SPRG3
+	std	r3, VCPU_SPRG0(r9)
+	std	r4, VCPU_SPRG1(r9)
+	std	r5, VCPU_SPRG2(r9)
+	std	r6, VCPU_SPRG3(r9)
+
+	/* save FP state */
+	mr	r3, r9
+	bl	kvmppc_save_fp
+
+	/* Increment yield count if they have a VPA */
+	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
+	cmpdi	r8, 0
+	beq	25f
+	lwz	r3, LPPACA_YIELDCOUNT(r8)
+	addi	r3, r3, 1
+	stw	r3, LPPACA_YIELDCOUNT(r8)
+	li	r3, 1
+	stb	r3, VCPU_VPA_DIRTY(r9)
+25:
+	/* Save PMU registers if requested */
+	/* r8 and cr0.eq are live here */
+	li	r3, 1
+	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
+	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
+	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
+	mfspr	r6, SPRN_MMCRA
+BEGIN_FTR_SECTION
+	/* On P7, clear MMCRA in order to disable SDAR updates */
+	li	r7, 0
+	mtspr	SPRN_MMCRA, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+	isync
+	beq	21f			/* if no VPA, save PMU stuff anyway */
+	lbz	r7, LPPACA_PMCINUSE(r8)
+	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
+	bne	21f
+	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
+	b	22f
+21:	mfspr	r5, SPRN_MMCR1
+	mfspr	r7, SPRN_SIAR
+	mfspr	r8, SPRN_SDAR
+	std	r4, VCPU_MMCR(r9)
+	std	r5, VCPU_MMCR + 8(r9)
+	std	r6, VCPU_MMCR + 16(r9)
+	std	r7, VCPU_SIAR(r9)
+	std	r8, VCPU_SDAR(r9)
+	mfspr	r3, SPRN_PMC1
+	mfspr	r4, SPRN_PMC2
+	mfspr	r5, SPRN_PMC3
+	mfspr	r6, SPRN_PMC4
+	mfspr	r7, SPRN_PMC5
+	mfspr	r8, SPRN_PMC6
+BEGIN_FTR_SECTION
+	mfspr	r10, SPRN_PMC7
+	mfspr	r11, SPRN_PMC8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+	stw	r3, VCPU_PMC(r9)
+	stw	r4, VCPU_PMC + 4(r9)
+	stw	r5, VCPU_PMC + 8(r9)
+	stw	r6, VCPU_PMC + 12(r9)
+	stw	r7, VCPU_PMC + 16(r9)
+	stw	r8, VCPU_PMC + 20(r9)
+BEGIN_FTR_SECTION
+	stw	r10, VCPU_PMC + 24(r9)
+	stw	r11, VCPU_PMC + 28(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+BEGIN_FTR_SECTION
+	mfspr	r4, SPRN_MMCR2
+	mfspr	r5, SPRN_SIER
+	mfspr	r6, SPRN_SPMC1
+	mfspr	r7, SPRN_SPMC2
+	mfspr	r8, SPRN_MMCRS
+	std	r4, VCPU_MMCR + 24(r9)
+	std	r5, VCPU_SIER(r9)
+	stw	r6, VCPU_PMC + 24(r9)
+	stw	r7, VCPU_PMC + 28(r9)
+	std	r8, VCPU_MMCR + 32(r9)
+	lis	r4, 0x8000
+	mtspr	SPRN_MMCRS, r4
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+22:
 	/* Clear out SLB */
 	li	r5,0
 	slbmte	r5,r5
 	slbia
 	ptesync
 
-hdec_soon:			/* r9 = vcpu, r12 = trap, r13 = paca */
+hdec_soon:			/* r12 = trap, r13 = paca */
 BEGIN_FTR_SECTION
 	b	32f
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
@@ -1014,8 +1313,6 @@
 	 */
 	cmpwi	r3,0x100	/* Are we the first here? */
 	bge	43f
-	cmpwi	r3,1		/* Are any other threads in the guest? */
-	ble	43f
 	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
 	beq	40f
 	li	r0,0
@@ -1026,7 +1323,7 @@
 	 * doesn't wake CPUs up from nap.
 	 */
 	lwz	r3,VCORE_NAPPING_THREADS(r5)
-	lwz	r4,VCPU_PTID(r9)
+	lbz	r4,HSTATE_PTID(r13)
 	li	r0,1
 	sld	r0,r0,r4
 	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */
@@ -1045,10 +1342,11 @@
 	addi	r6,r6,PACA_SIZE
 	bne	42b
 
+secondary_too_late:
 	/* Secondary threads wait for primary to do partition switch */
-43:	ld	r4,VCPU_KVM(r9)		/* pointer to struct kvm */
-	ld	r5,HSTATE_KVM_VCORE(r13)
-	lwz	r3,VCPU_PTID(r9)
+43:	ld	r5,HSTATE_KVM_VCORE(r13)
+	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */
+	lbz	r3,HSTATE_PTID(r13)
 	cmpwi	r3,0
 	beq	15f
 	HMT_LOW
@@ -1076,6 +1374,15 @@
 	mtspr	SPRN_LPID,r7
 	isync
 
+BEGIN_FTR_SECTION
+	/* DPDES is shared between threads */
+	mfspr	r7, SPRN_DPDES
+	std	r7, VCORE_DPDES(r5)
+	/* clear DPDES so we don't get guest doorbells in the host */
+	li	r8, 0
+	mtspr	SPRN_DPDES, r8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
 	/* Subtract timebase offset from timebase */
 	ld	r8,VCORE_TB_OFFSET(r5)
 	cmpdi	r8,0
@@ -1113,7 +1420,8 @@
 	 * We have to lock against concurrent tlbies, and
 	 * we have to flush the whole TLB.
 	 */
-32:	ld	r4,VCPU_KVM(r9)		/* pointer to struct kvm */
+32:	ld	r5,HSTATE_KVM_VCORE(r13)
+	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */
 
 	/* Take the guest's tlbie_lock */
 #ifdef __BIG_ENDIAN__
@@ -1203,6 +1511,56 @@
 	add	r5,r5,r6
 	std	r5,VCPU_DEC_EXPIRES(r9)
 
+BEGIN_FTR_SECTION
+	b	8f
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+	mfmsr	r8
+	li	r0, 1
+	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+	mtmsrd	r8
+
+	/* Save POWER8-specific registers */
+	mfspr	r5, SPRN_IAMR
+	mfspr	r6, SPRN_PSPB
+	mfspr	r7, SPRN_FSCR
+	std	r5, VCPU_IAMR(r9)
+	stw	r6, VCPU_PSPB(r9)
+	std	r7, VCPU_FSCR(r9)
+	mfspr	r5, SPRN_IC
+	mfspr	r6, SPRN_VTB
+	mfspr	r7, SPRN_TAR
+	std	r5, VCPU_IC(r9)
+	std	r6, VCPU_VTB(r9)
+	std	r7, VCPU_TAR(r9)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	mfspr	r5, SPRN_TFHAR
+	mfspr	r6, SPRN_TFIAR
+	mfspr	r7, SPRN_TEXASR
+	std	r5, VCPU_TFHAR(r9)
+	std	r6, VCPU_TFIAR(r9)
+	std	r7, VCPU_TEXASR(r9)
+#endif
+	mfspr	r8, SPRN_EBBHR
+	std	r8, VCPU_EBBHR(r9)
+	mfspr	r5, SPRN_EBBRR
+	mfspr	r6, SPRN_BESCR
+	mfspr	r7, SPRN_CSIGR
+	mfspr	r8, SPRN_TACR
+	std	r5, VCPU_EBBRR(r9)
+	std	r6, VCPU_BESCR(r9)
+	std	r7, VCPU_CSIGR(r9)
+	std	r8, VCPU_TACR(r9)
+	mfspr	r5, SPRN_TCSCR
+	mfspr	r6, SPRN_ACOP
+	mfspr	r7, SPRN_PID
+	mfspr	r8, SPRN_WORT
+	std	r5, VCPU_TCSCR(r9)
+	std	r6, VCPU_ACOP(r9)
+	stw	r7, VCPU_GUEST_PID(r9)
+	std	r8, VCPU_WORT(r9)
+8:
+
 	/* Save and reset AMR and UAMOR before turning on the MMU */
 BEGIN_FTR_SECTION
 	mfspr	r5,SPRN_AMR
@@ -1217,130 +1575,10 @@
 	li	r0, KVM_GUEST_MODE_NONE
 	stb	r0, HSTATE_IN_GUEST(r13)
 
-	/* Switch DSCR back to host value */
-BEGIN_FTR_SECTION
-	mfspr	r8, SPRN_DSCR
-	ld	r7, HSTATE_DSCR(r13)
-	std	r8, VCPU_DSCR(r9)
-	mtspr	SPRN_DSCR, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
-	/* Save non-volatile GPRs */
-	std	r14, VCPU_GPR(R14)(r9)
-	std	r15, VCPU_GPR(R15)(r9)
-	std	r16, VCPU_GPR(R16)(r9)
-	std	r17, VCPU_GPR(R17)(r9)
-	std	r18, VCPU_GPR(R18)(r9)
-	std	r19, VCPU_GPR(R19)(r9)
-	std	r20, VCPU_GPR(R20)(r9)
-	std	r21, VCPU_GPR(R21)(r9)
-	std	r22, VCPU_GPR(R22)(r9)
-	std	r23, VCPU_GPR(R23)(r9)
-	std	r24, VCPU_GPR(R24)(r9)
-	std	r25, VCPU_GPR(R25)(r9)
-	std	r26, VCPU_GPR(R26)(r9)
-	std	r27, VCPU_GPR(R27)(r9)
-	std	r28, VCPU_GPR(R28)(r9)
-	std	r29, VCPU_GPR(R29)(r9)
-	std	r30, VCPU_GPR(R30)(r9)
-	std	r31, VCPU_GPR(R31)(r9)
-
-	/* Save SPRGs */
-	mfspr	r3, SPRN_SPRG0
-	mfspr	r4, SPRN_SPRG1
-	mfspr	r5, SPRN_SPRG2
-	mfspr	r6, SPRN_SPRG3
-	std	r3, VCPU_SPRG0(r9)
-	std	r4, VCPU_SPRG1(r9)
-	std	r5, VCPU_SPRG2(r9)
-	std	r6, VCPU_SPRG3(r9)
-
-	/* save FP state */
-	mr	r3, r9
-	bl	.kvmppc_save_fp
-
-	/* Increment yield count if they have a VPA */
-	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
-	cmpdi	r8, 0
-	beq	25f
-	lwz	r3, LPPACA_YIELDCOUNT(r8)
-	addi	r3, r3, 1
-	stw	r3, LPPACA_YIELDCOUNT(r8)
-	li	r3, 1
-	stb	r3, VCPU_VPA_DIRTY(r9)
-25:
-	/* Save PMU registers if requested */
-	/* r8 and cr0.eq are live here */
-	li	r3, 1
-	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
-	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
-	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
-	mfspr	r6, SPRN_MMCRA
-BEGIN_FTR_SECTION
-	/* On P7, clear MMCRA in order to disable SDAR updates */
-	li	r7, 0
-	mtspr	SPRN_MMCRA, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-	isync
-	beq	21f			/* if no VPA, save PMU stuff anyway */
-	lbz	r7, LPPACA_PMCINUSE(r8)
-	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
-	bne	21f
-	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
-	b	22f
-21:	mfspr	r5, SPRN_MMCR1
-	mfspr	r7, SPRN_SIAR
-	mfspr	r8, SPRN_SDAR
-	std	r4, VCPU_MMCR(r9)
-	std	r5, VCPU_MMCR + 8(r9)
-	std	r6, VCPU_MMCR + 16(r9)
-	std	r7, VCPU_SIAR(r9)
-	std	r8, VCPU_SDAR(r9)
-	mfspr	r3, SPRN_PMC1
-	mfspr	r4, SPRN_PMC2
-	mfspr	r5, SPRN_PMC3
-	mfspr	r6, SPRN_PMC4
-	mfspr	r7, SPRN_PMC5
-	mfspr	r8, SPRN_PMC6
-BEGIN_FTR_SECTION
-	mfspr	r10, SPRN_PMC7
-	mfspr	r11, SPRN_PMC8
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-	stw	r3, VCPU_PMC(r9)
-	stw	r4, VCPU_PMC + 4(r9)
-	stw	r5, VCPU_PMC + 8(r9)
-	stw	r6, VCPU_PMC + 12(r9)
-	stw	r7, VCPU_PMC + 16(r9)
-	stw	r8, VCPU_PMC + 20(r9)
-BEGIN_FTR_SECTION
-	stw	r10, VCPU_PMC + 24(r9)
-	stw	r11, VCPU_PMC + 28(r9)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-22:
 	ld	r0, 112+PPC_LR_STKOFF(r1)
 	addi	r1, r1, 112
 	mtlr	r0
 	blr
-secondary_too_late:
-	ld	r5,HSTATE_KVM_VCORE(r13)
-	HMT_LOW
-13:	lbz	r3,VCORE_IN_GUEST(r5)
-	cmpwi	r3,0
-	bne	13b
-	HMT_MEDIUM
-	li	r0, KVM_GUEST_MODE_NONE
-	stb	r0, HSTATE_IN_GUEST(r13)
-	ld	r11,PACA_SLBSHADOWPTR(r13)
-
-	.rept	SLB_NUM_BOLTED
-	ld	r5,SLBSHADOW_SAVEAREA(r11)
-	ld	r6,SLBSHADOW_SAVEAREA+8(r11)
-	andis.	r7,r5,SLB_ESID_V@h
-	beq	1f
-	slbmte	r6,r5
-1:	addi	r11,r11,16
-	.endr
-	b	22b
 
 /*
  * Check whether an HDSI is an HPTE not found fault or something else.
@@ -1386,8 +1624,7 @@
 	mtspr	SPRN_SRR0, r10
 	mtspr	SPRN_SRR1, r11
 	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 fast_interrupt_c_return:
 6:	ld	r7, VCPU_CTR(r9)
 	lwz	r8, VCPU_XER(r9)
@@ -1456,8 +1693,7 @@
 1:	mtspr	SPRN_SRR0, r10
 	mtspr	SPRN_SRR1, r11
 	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 	b	fast_interrupt_c_return
 
 3:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
@@ -1474,7 +1710,8 @@
 hcall_try_real_mode:
 	ld	r3,VCPU_GPR(R3)(r9)
 	andi.	r0,r11,MSR_PR
-	bne	guest_exit_cont
+	/* sc 1 from userspace - reflect to guest syscall */
+	bne	sc_1_fast_return
 	clrrdi	r3,r3,2
 	cmpldi	r3,hcall_real_table_end - hcall_real_table
 	bge	guest_exit_cont
@@ -1495,6 +1732,14 @@
 	ld	r11,VCPU_MSR(r4)
 	b	fast_guest_return
 
+sc_1_fast_return:
+	mtspr	SPRN_SRR0,r10
+	mtspr	SPRN_SRR1,r11
+	li	r10, BOOK3S_INTERRUPT_SYSCALL
+	ld	r11, VCPU_INTR_MSR(r9)
+	mr	r4,r9
+	b	fast_guest_return
+
 	/* We've attempted a real mode hcall, but it's punted it back
 	 * to userspace.  We need to restore some clobbered volatiles
 	 * before resuming the pass-it-to-qemu path */
@@ -1588,14 +1833,34 @@
 	.long	0		/* 0x11c */
 	.long	0		/* 0x120 */
 	.long	.kvmppc_h_bulk_remove - hcall_real_table
+	.long	0		/* 0x128 */
+	.long	0		/* 0x12c */
+	.long	0		/* 0x130 */
+	.long	.kvmppc_h_set_xdabr - hcall_real_table
 hcall_real_table_end:
 
 ignore_hdec:
 	mr	r4,r9
 	b	fast_guest_return
 
+_GLOBAL(kvmppc_h_set_xdabr)
+	andi.	r0, r5, DABRX_USER | DABRX_KERNEL
+	beq	6f
+	li	r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
+	andc.	r0, r5, r0
+	beq	3f
+6:	li	r3, H_PARAMETER
+	blr
+
 _GLOBAL(kvmppc_h_set_dabr)
+	li	r5, DABRX_USER | DABRX_KERNEL
+3:
+BEGIN_FTR_SECTION
+	b	2f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 	std	r4,VCPU_DABR(r3)
+	stw	r5, VCPU_DABRX(r3)
+	mtspr	SPRN_DABRX, r5
 	/* Work around P7 bug where DABR can get corrupted on mtspr */
 1:	mtspr	SPRN_DABR,r4
 	mfspr	r5, SPRN_DABR
@@ -1605,6 +1870,17 @@
 	li	r3,0
 	blr
 
+	/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
+2:	rlwimi	r5, r4, 5, DAWRX_DR | DAWRX_DW
+	rlwimi	r5, r4, 1, DAWRX_WT
+	clrrdi	r4, r4, 3
+	std	r4, VCPU_DAWR(r3)
+	std	r5, VCPU_DAWRX(r3)
+	mtspr	SPRN_DAWR, r4
+	mtspr	SPRN_DAWRX, r5
+	li	r3, 0
+	blr
+
 _GLOBAL(kvmppc_h_cede)
 	ori	r11,r11,MSR_EE
 	std	r11,VCPU_MSR(r3)
@@ -1628,7 +1904,7 @@
 	 * up to the host.
 	 */
 	ld	r5,HSTATE_KVM_VCORE(r13)
-	lwz	r6,VCPU_PTID(r3)
+	lbz	r6,HSTATE_PTID(r13)
 	lwz	r8,VCORE_ENTRY_EXIT(r5)
 	clrldi	r8,r8,56
 	li	r0,1
@@ -1643,9 +1919,8 @@
 	bne	31b
 	/* order napping_threads update vs testing entry_exit_count */
 	isync
-	li	r0,1
+	li	r0,NAPPING_CEDE
 	stb	r0,HSTATE_NAPPING(r13)
-	mr	r4,r3
 	lwz	r7,VCORE_ENTRY_EXIT(r5)
 	cmpwi	r7,0x100
 	bge	33f		/* another thread already exiting */
@@ -1677,16 +1952,19 @@
 	std	r31, VCPU_GPR(R31)(r3)
 
 	/* save FP state */
-	bl	.kvmppc_save_fp
+	bl	kvmppc_save_fp
 
 	/*
-	 * Take a nap until a decrementer or external interrupt occurs,
-	 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
+	 * Take a nap until a decrementer or external or doobell interrupt
+	 * occurs, with PECE1, PECE0 and PECEDP set in LPCR
 	 */
 	li	r0,1
 	stb	r0,HSTATE_HWTHREAD_REQ(r13)
 	mfspr	r5,SPRN_LPCR
 	ori	r5,r5,LPCR_PECE0 | LPCR_PECE1
+BEGIN_FTR_SECTION
+	oris	r5,r5,LPCR_PECEDP@h
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 	mtspr	SPRN_LPCR,r5
 	isync
 	li	r0, 0
@@ -1698,6 +1976,11 @@
 	nap
 	b	.
 
+33:	mr	r4, r3
+	li	r3, 0
+	li	r12, 0
+	b	34f
+
 kvm_end_cede:
 	/* get vcpu pointer */
 	ld	r4, HSTATE_KVM_VCPU(r13)
@@ -1727,12 +2010,15 @@
 	ld	r29, VCPU_GPR(R29)(r4)
 	ld	r30, VCPU_GPR(R30)(r4)
 	ld	r31, VCPU_GPR(R31)(r4)
+ 
+	/* Check the wake reason in SRR1 to see why we got here */
+	bl	kvmppc_check_wake_reason
 
 	/* clear our bit in vcore->napping_threads */
-33:	ld	r5,HSTATE_KVM_VCORE(r13)
-	lwz	r3,VCPU_PTID(r4)
+34:	ld	r5,HSTATE_KVM_VCORE(r13)
+	lbz	r7,HSTATE_PTID(r13)
 	li	r0,1
-	sld	r0,r0,r3
+	sld	r0,r0,r7
 	addi	r6,r5,VCORE_NAPPING_THREADS
 32:	lwarx	r7,0,r6
 	andc	r7,r7,r0
@@ -1741,23 +2027,18 @@
 	li	r0,0
 	stb	r0,HSTATE_NAPPING(r13)
 
-	/* Check the wake reason in SRR1 to see why we got here */
-	mfspr	r3, SPRN_SRR1
-	rlwinm	r3, r3, 44-31, 0x7	/* extract wake reason field */
-	cmpwi	r3, 4			/* was it an external interrupt? */
-	li	r12, BOOK3S_INTERRUPT_EXTERNAL
+	/* See if the wake reason means we need to exit */
+	stw	r12, VCPU_TRAP(r4)
 	mr	r9, r4
-	ld	r10, VCPU_PC(r9)
-	ld	r11, VCPU_MSR(r9)
-	beq	do_ext_interrupt	/* if so */
+	cmpdi	r3, 0
+	bgt	guest_exit_cont
 
 	/* see if any other thread is already exiting */
 	lwz	r0,VCORE_ENTRY_EXIT(r5)
 	cmpwi	r0,0x100
-	blt	kvmppc_cede_reentry	/* if not go back to guest */
+	bge	guest_exit_cont
 
-	/* some threads are exiting, so go to the guest exit path */
-	b	hcall_real_fallback
+	b	kvmppc_cede_reentry	/* if not go back to guest */
 
 	/* cede when already previously prodded case */
 kvm_cede_prodded:
@@ -1783,11 +2064,48 @@
 	beq	mc_cont
 	/* If not, deliver a machine check.  SRR0/1 are already set */
 	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 	b	fast_interrupt_c_return
 
 /*
+ * Check the reason we woke from nap, and take appropriate action.
+ * Returns:
+ *	0 if nothing needs to be done
+ *	1 if something happened that needs to be handled by the host
+ *	-1 if there was a guest wakeup (IPI)
+ *
+ * Also sets r12 to the interrupt vector for any interrupt that needs
+ * to be handled now by the host (0x500 for external interrupt), or zero.
+ */
+kvmppc_check_wake_reason:
+	mfspr	r6, SPRN_SRR1
+BEGIN_FTR_SECTION
+	rlwinm	r6, r6, 45-31, 0xf	/* extract wake reason field (P8) */
+FTR_SECTION_ELSE
+	rlwinm	r6, r6, 45-31, 0xe	/* P7 wake reason field is 3 bits */
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
+	cmpwi	r6, 8			/* was it an external interrupt? */
+	li	r12, BOOK3S_INTERRUPT_EXTERNAL
+	beq	kvmppc_read_intr	/* if so, see what it was */
+	li	r3, 0
+	li	r12, 0
+	cmpwi	r6, 6			/* was it the decrementer? */
+	beq	0f
+BEGIN_FTR_SECTION
+	cmpwi	r6, 5			/* privileged doorbell? */
+	beq	0f
+	cmpwi	r6, 3			/* hypervisor doorbell? */
+	beq	3f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+	li	r3, 1			/* anything else, return 1 */
+0:	blr
+
+	/* hypervisor doorbell */
+3:	li	r12, BOOK3S_INTERRUPT_H_DOORBELL
+	li	r3, 1
+	blr
+
+/*
  * Determine what sort of external interrupt is pending (if any).
  * Returns:
  *	0 if no interrupt is pending
@@ -1818,7 +2136,6 @@
 	 * interrupts directly to the guest
 	 */
 	cmpwi	r3, XICS_IPI		/* if there is, is it an IPI? */
-	li	r3, 1
 	bne	42f
 
 	/* It's an IPI, clear the MFRR and EOI it */
@@ -1844,19 +2161,25 @@
 	 * before exit, it will be picked up by the host ICP driver
 	 */
 	stw	r0, HSTATE_SAVED_XIRR(r13)
+	li	r3, 1
 	b	1b
 
 43:	/* We raced with the host, we need to resend that IPI, bummer */
 	li	r0, IPI_PRIORITY
 	stbcix	r0, r6, r8		/* set the IPI */
 	sync
+	li	r3, 1
 	b	1b
 
 /*
  * Save away FP, VMX and VSX registers.
  * r3 = vcpu pointer
+ * N.B. r30 and r31 are volatile across this function,
+ * thus it is not callable from C.
  */
-_GLOBAL(kvmppc_save_fp)
+kvmppc_save_fp:
+	mflr	r30
+	mr	r31,r3
 	mfmsr	r5
 	ori	r8,r5,MSR_FP
 #ifdef CONFIG_ALTIVEC
@@ -1871,42 +2194,17 @@
 #endif
 	mtmsrd	r8
 	isync
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-	reg = 0
-	.rept	32
-	li	r6,reg*16+VCPU_VSRS
-	STXVD2X(reg,R6,R3)
-	reg = reg + 1
-	.endr
-FTR_SECTION_ELSE
-#endif
-	reg = 0
-	.rept	32
-	stfd	reg,reg*8+VCPU_FPRS(r3)
-	reg = reg + 1
-	.endr
-#ifdef CONFIG_VSX
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
-#endif
-	mffs	fr0
-	stfd	fr0,VCPU_FPSCR(r3)
-
+	addi	r3,r3,VCPU_FPRS
+	bl	.store_fp_state
 #ifdef CONFIG_ALTIVEC
 BEGIN_FTR_SECTION
-	reg = 0
-	.rept	32
-	li	r6,reg*16+VCPU_VRS
-	stvx	reg,r6,r3
-	reg = reg + 1
-	.endr
-	mfvscr	vr0
-	li	r6,VCPU_VSCR
-	stvx	vr0,r6,r3
+	addi	r3,r31,VCPU_VRS
+	bl	.store_vr_state
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif
 	mfspr	r6,SPRN_VRSAVE
 	stw	r6,VCPU_VRSAVE(r3)
+	mtlr	r30
 	mtmsrd	r5
 	isync
 	blr
@@ -1914,9 +2212,12 @@
 /*
  * Load up FP, VMX and VSX registers
  * r4 = vcpu pointer
+ * N.B. r30 and r31 are volatile across this function,
+ * thus it is not callable from C.
  */
-	.globl	kvmppc_load_fp
 kvmppc_load_fp:
+	mflr	r30
+	mr	r31,r4
 	mfmsr	r9
 	ori	r8,r9,MSR_FP
 #ifdef CONFIG_ALTIVEC
@@ -1931,42 +2232,18 @@
 #endif
 	mtmsrd	r8
 	isync
-	lfd	fr0,VCPU_FPSCR(r4)
-	MTFSF_L(fr0)
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-	reg = 0
-	.rept	32
-	li	r7,reg*16+VCPU_VSRS
-	LXVD2X(reg,R7,R4)
-	reg = reg + 1
-	.endr
-FTR_SECTION_ELSE
-#endif
-	reg = 0
-	.rept	32
-	lfd	reg,reg*8+VCPU_FPRS(r4)
-	reg = reg + 1
-	.endr
-#ifdef CONFIG_VSX
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
-#endif
-
+	addi	r3,r4,VCPU_FPRS
+	bl	.load_fp_state
 #ifdef CONFIG_ALTIVEC
 BEGIN_FTR_SECTION
-	li	r7,VCPU_VSCR
-	lvx	vr0,r7,r4
-	mtvscr	vr0
-	reg = 0
-	.rept	32
-	li	r7,reg*16+VCPU_VRS
-	lvx	reg,r7,r4
-	reg = reg + 1
-	.endr
+	addi	r3,r31,VCPU_VRS
+	bl	.load_vr_state
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif
 	lwz	r7,VCPU_VRSAVE(r4)
 	mtspr	SPRN_VRSAVE,r7
+	mtlr	r30
+	mr	r4,r31
 	blr
 
 /*
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index a59a25a..c1abd95 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -160,7 +160,7 @@
 
 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
 {
-	kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]);
+	kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]);
 }
 
 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
@@ -207,11 +207,11 @@
 	/* put in registers */
 	switch (ls_type) {
 	case FPU_LS_SINGLE:
-		kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]);
+		kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs));
 		vcpu->arch.qpr[rs] = *((u32*)tmp);
 		break;
 	case FPU_LS_DOUBLE:
-		vcpu->arch.fpr[rs] = *((u64*)tmp);
+		VCPU_FPR(vcpu, rs) = *((u64*)tmp);
 		break;
 	}
 
@@ -233,18 +233,18 @@
 
 	switch (ls_type) {
 	case FPU_LS_SINGLE:
-		kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp);
+		kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp);
 		val = *((u32*)tmp);
 		len = sizeof(u32);
 		break;
 	case FPU_LS_SINGLE_LOW:
-		*((u32*)tmp) = vcpu->arch.fpr[rs];
-		val = vcpu->arch.fpr[rs] & 0xffffffff;
+		*((u32*)tmp) = VCPU_FPR(vcpu, rs);
+		val = VCPU_FPR(vcpu, rs) & 0xffffffff;
 		len = sizeof(u32);
 		break;
 	case FPU_LS_DOUBLE:
-		*((u64*)tmp) = vcpu->arch.fpr[rs];
-		val = vcpu->arch.fpr[rs];
+		*((u64*)tmp) = VCPU_FPR(vcpu, rs);
+		val = VCPU_FPR(vcpu, rs);
 		len = sizeof(u64);
 		break;
 	default:
@@ -301,7 +301,7 @@
 	emulated = EMULATE_DONE;
 
 	/* put in registers */
-	kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]);
+	kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs));
 	vcpu->arch.qpr[rs] = tmp[1];
 
 	dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
@@ -319,7 +319,7 @@
 	u32 tmp[2];
 	int len = w ? sizeof(u32) : sizeof(u64);
 
-	kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]);
+	kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]);
 	tmp[1] = vcpu->arch.qpr[rs];
 
 	r = kvmppc_st(vcpu, &addr, len, tmp, true);
@@ -512,7 +512,6 @@
 						 u32 *src2, u32 *src3))
 {
 	u32 *qpr = vcpu->arch.qpr;
-	u64 *fpr = vcpu->arch.fpr;
 	u32 ps0_out;
 	u32 ps0_in1, ps0_in2, ps0_in3;
 	u32 ps1_in1, ps1_in2, ps1_in3;
@@ -521,20 +520,20 @@
 	WARN_ON(rc);
 
 	/* PS0 */
-	kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
-	kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
-	kvm_cvt_df(&fpr[reg_in3], &ps0_in3);
+	kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
+	kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
+	kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3);
 
 	if (scalar & SCALAR_LOW)
 		ps0_in2 = qpr[reg_in2];
 
-	func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
+	func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
 
 	dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
 			  ps0_in1, ps0_in2, ps0_in3, ps0_out);
 
 	if (!(scalar & SCALAR_NO_PS0))
-		kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
+		kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
 
 	/* PS1 */
 	ps1_in1 = qpr[reg_in1];
@@ -545,7 +544,7 @@
 		ps1_in2 = ps0_in2;
 
 	if (!(scalar & SCALAR_NO_PS1))
-		func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
+		func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
 
 	dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
 			  ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
@@ -561,7 +560,6 @@
 						 u32 *src2))
 {
 	u32 *qpr = vcpu->arch.qpr;
-	u64 *fpr = vcpu->arch.fpr;
 	u32 ps0_out;
 	u32 ps0_in1, ps0_in2;
 	u32 ps1_out;
@@ -571,20 +569,20 @@
 	WARN_ON(rc);
 
 	/* PS0 */
-	kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
+	kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
 
 	if (scalar & SCALAR_LOW)
 		ps0_in2 = qpr[reg_in2];
 	else
-		kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
+		kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
 
-	func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
+	func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
 
 	if (!(scalar & SCALAR_NO_PS0)) {
 		dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
 				  ps0_in1, ps0_in2, ps0_out);
 
-		kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
+		kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
 	}
 
 	/* PS1 */
@@ -594,7 +592,7 @@
 	if (scalar & SCALAR_HIGH)
 		ps1_in2 = ps0_in2;
 
-	func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
+	func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
 
 	if (!(scalar & SCALAR_NO_PS1)) {
 		qpr[reg_out] = ps1_out;
@@ -612,7 +610,6 @@
 						 u32 *dst, u32 *src1))
 {
 	u32 *qpr = vcpu->arch.qpr;
-	u64 *fpr = vcpu->arch.fpr;
 	u32 ps0_out, ps0_in;
 	u32 ps1_in;
 
@@ -620,17 +617,17 @@
 	WARN_ON(rc);
 
 	/* PS0 */
-	kvm_cvt_df(&fpr[reg_in], &ps0_in);
-	func(&vcpu->arch.fpscr, &ps0_out, &ps0_in);
+	kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in);
+	func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in);
 
 	dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
 			  ps0_in, ps0_out);
 
-	kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
+	kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
 
 	/* PS1 */
 	ps1_in = qpr[reg_in];
-	func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in);
+	func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in);
 
 	dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
 			  ps1_in, qpr[reg_out]);
@@ -649,10 +646,10 @@
 	int ax_rc = inst_get_field(inst, 21, 25);
 	short full_d = inst_get_field(inst, 16, 31);
 
-	u64 *fpr_d = &vcpu->arch.fpr[ax_rd];
-	u64 *fpr_a = &vcpu->arch.fpr[ax_ra];
-	u64 *fpr_b = &vcpu->arch.fpr[ax_rb];
-	u64 *fpr_c = &vcpu->arch.fpr[ax_rc];
+	u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd);
+	u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra);
+	u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb);
+	u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc);
 
 	bool rcomp = (inst & 1) ? true : false;
 	u32 cr = kvmppc_get_cr(vcpu);
@@ -674,11 +671,11 @@
 	/* Do we need to clear FE0 / FE1 here? Don't think so. */
 
 #ifdef DEBUG
-	for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
+	for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
 		u32 f;
-		kvm_cvt_df(&vcpu->arch.fpr[i], &f);
+		kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
 		dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx    QPR[%d] = 0x%x\n",
-			i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
+			i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]);
 	}
 #endif
 
@@ -764,8 +761,8 @@
 			break;
 		}
 		case OP_4X_PS_NEG:
-			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
-			vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL;
+			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
+			VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL;
 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
 			vcpu->arch.qpr[ax_rd] ^= 0x80000000;
 			break;
@@ -775,7 +772,7 @@
 			break;
 		case OP_4X_PS_MR:
 			WARN_ON(rcomp);
-			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
+			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
 			break;
 		case OP_4X_PS_CMPO1:
@@ -784,44 +781,44 @@
 			break;
 		case OP_4X_PS_NABS:
 			WARN_ON(rcomp);
-			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
-			vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL;
+			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
+			VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL;
 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
 			vcpu->arch.qpr[ax_rd] |= 0x80000000;
 			break;
 		case OP_4X_PS_ABS:
 			WARN_ON(rcomp);
-			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
-			vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL;
+			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
+			VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL;
 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
 			vcpu->arch.qpr[ax_rd] &= ~0x80000000;
 			break;
 		case OP_4X_PS_MERGE00:
 			WARN_ON(rcomp);
-			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
-			/* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
-			kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
+			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
+			/* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
+			kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
 				   &vcpu->arch.qpr[ax_rd]);
 			break;
 		case OP_4X_PS_MERGE01:
 			WARN_ON(rcomp);
-			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
+			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
 			break;
 		case OP_4X_PS_MERGE10:
 			WARN_ON(rcomp);
-			/* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
+			/* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
 			kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
-				   &vcpu->arch.fpr[ax_rd]);
-			/* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
-			kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
+				   &VCPU_FPR(vcpu, ax_rd));
+			/* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
+			kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
 				   &vcpu->arch.qpr[ax_rd]);
 			break;
 		case OP_4X_PS_MERGE11:
 			WARN_ON(rcomp);
-			/* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
+			/* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
 			kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
-				   &vcpu->arch.fpr[ax_rd]);
+				   &VCPU_FPR(vcpu, ax_rd));
 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
 			break;
 		}
@@ -856,7 +853,7 @@
 		case OP_4A_PS_SUM1:
 			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
 					ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
-			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc];
+			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc);
 			break;
 		case OP_4A_PS_SUM0:
 			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
@@ -1106,45 +1103,45 @@
 	case 59:
 		switch (inst_get_field(inst, 21, 30)) {
 		case OP_59_FADDS:
-			fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+			fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
 			kvmppc_sync_qpr(vcpu, ax_rd);
 			break;
 		case OP_59_FSUBS:
-			fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+			fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
 			kvmppc_sync_qpr(vcpu, ax_rd);
 			break;
 		case OP_59_FDIVS:
-			fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+			fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
 			kvmppc_sync_qpr(vcpu, ax_rd);
 			break;
 		case OP_59_FRES:
-			fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+			fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
 			kvmppc_sync_qpr(vcpu, ax_rd);
 			break;
 		case OP_59_FRSQRTES:
-			fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+			fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
 			kvmppc_sync_qpr(vcpu, ax_rd);
 			break;
 		}
 		switch (inst_get_field(inst, 26, 30)) {
 		case OP_59_FMULS:
-			fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
+			fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
 			kvmppc_sync_qpr(vcpu, ax_rd);
 			break;
 		case OP_59_FMSUBS:
-			fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+			fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
 			kvmppc_sync_qpr(vcpu, ax_rd);
 			break;
 		case OP_59_FMADDS:
-			fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+			fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
 			kvmppc_sync_qpr(vcpu, ax_rd);
 			break;
 		case OP_59_FNMSUBS:
-			fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+			fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
 			kvmppc_sync_qpr(vcpu, ax_rd);
 			break;
 		case OP_59_FNMADDS:
-			fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+			fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
 			kvmppc_sync_qpr(vcpu, ax_rd);
 			break;
 		}
@@ -1159,12 +1156,12 @@
 			break;
 		case OP_63_MFFS:
 			/* XXX missing CR */
-			*fpr_d = vcpu->arch.fpscr;
+			*fpr_d = vcpu->arch.fp.fpscr;
 			break;
 		case OP_63_MTFSF:
 			/* XXX missing fm bits */
 			/* XXX missing CR */
-			vcpu->arch.fpscr = *fpr_b;
+			vcpu->arch.fp.fpscr = *fpr_b;
 			break;
 		case OP_63_FCMPU:
 		{
@@ -1172,7 +1169,7 @@
 			u32 cr0_mask = 0xf0000000;
 			u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
 
-			fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
+			fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
 			cr &= ~(cr0_mask >> cr_shift);
 			cr |= (cr & cr0_mask) >> cr_shift;
 			break;
@@ -1183,40 +1180,40 @@
 			u32 cr0_mask = 0xf0000000;
 			u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
 
-			fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
+			fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
 			cr &= ~(cr0_mask >> cr_shift);
 			cr |= (cr & cr0_mask) >> cr_shift;
 			break;
 		}
 		case OP_63_FNEG:
-			fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+			fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
 			break;
 		case OP_63_FMR:
 			*fpr_d = *fpr_b;
 			break;
 		case OP_63_FABS:
-			fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+			fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
 			break;
 		case OP_63_FCPSGN:
-			fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+			fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
 			break;
 		case OP_63_FDIV:
-			fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+			fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
 			break;
 		case OP_63_FADD:
-			fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+			fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
 			break;
 		case OP_63_FSUB:
-			fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+			fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
 			break;
 		case OP_63_FCTIW:
-			fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+			fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
 			break;
 		case OP_63_FCTIWZ:
-			fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+			fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
 			break;
 		case OP_63_FRSP:
-			fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+			fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
 			kvmppc_sync_qpr(vcpu, ax_rd);
 			break;
 		case OP_63_FRSQRTE:
@@ -1224,39 +1221,39 @@
 			double one = 1.0f;
 
 			/* fD = sqrt(fB) */
-			fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+			fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
 			/* fD = 1.0f / fD */
-			fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
+			fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
 			break;
 		}
 		}
 		switch (inst_get_field(inst, 26, 30)) {
 		case OP_63_FMUL:
-			fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
+			fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
 			break;
 		case OP_63_FSEL:
-			fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+			fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
 			break;
 		case OP_63_FMSUB:
-			fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+			fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
 			break;
 		case OP_63_FMADD:
-			fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+			fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
 			break;
 		case OP_63_FNMSUB:
-			fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+			fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
 			break;
 		case OP_63_FNMADD:
-			fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+			fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
 			break;
 		}
 		break;
 	}
 
 #ifdef DEBUG
-	for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
+	for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
 		u32 f;
-		kvm_cvt_df(&vcpu->arch.fpr[i], &f);
+		kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
 		dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
 	}
 #endif
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 5b9e906..c5c052a 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -41,6 +41,7 @@
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
 #include <linux/module.h>
+#include <linux/miscdevice.h>
 
 #include "book3s.h"
 
@@ -566,12 +567,6 @@
 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
 {
 	struct thread_struct *t = &current->thread;
-	u64 *vcpu_fpr = vcpu->arch.fpr;
-#ifdef CONFIG_VSX
-	u64 *vcpu_vsx = vcpu->arch.vsr;
-#endif
-	u64 *thread_fpr = &t->fp_state.fpr[0][0];
-	int i;
 
 	/*
 	 * VSX instructions can access FP and vector registers, so if
@@ -594,26 +589,16 @@
 		 * both the traditional FP registers and the added VSX
 		 * registers into thread.fp_state.fpr[].
 		 */
-		if (current->thread.regs->msr & MSR_FP)
+		if (t->regs->msr & MSR_FP)
 			giveup_fpu(current);
-		for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
-			vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
-
-		vcpu->arch.fpscr = t->fp_state.fpscr;
-
-#ifdef CONFIG_VSX
-		if (cpu_has_feature(CPU_FTR_VSX))
-			for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
-				vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
-#endif
+		t->fp_save_area = NULL;
 	}
 
 #ifdef CONFIG_ALTIVEC
 	if (msr & MSR_VEC) {
 		if (current->thread.regs->msr & MSR_VEC)
 			giveup_altivec(current);
-		memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
-		vcpu->arch.vscr = t->vr_state.vscr;
+		t->vr_save_area = NULL;
 	}
 #endif
 
@@ -661,12 +646,6 @@
 			     ulong msr)
 {
 	struct thread_struct *t = &current->thread;
-	u64 *vcpu_fpr = vcpu->arch.fpr;
-#ifdef CONFIG_VSX
-	u64 *vcpu_vsx = vcpu->arch.vsr;
-#endif
-	u64 *thread_fpr = &t->fp_state.fpr[0][0];
-	int i;
 
 	/* When we have paired singles, we emulate in software */
 	if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
@@ -704,27 +683,20 @@
 #endif
 
 	if (msr & MSR_FP) {
-		for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
-			thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
-#ifdef CONFIG_VSX
-		for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
-			thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
-#endif
-		t->fp_state.fpscr = vcpu->arch.fpscr;
-		t->fpexc_mode = 0;
-		kvmppc_load_up_fpu();
+		enable_kernel_fp();
+		load_fp_state(&vcpu->arch.fp);
+		t->fp_save_area = &vcpu->arch.fp;
 	}
 
 	if (msr & MSR_VEC) {
 #ifdef CONFIG_ALTIVEC
-		memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
-		t->vr_state.vscr = vcpu->arch.vscr;
-		t->vrsave = -1;
-		kvmppc_load_up_altivec();
+		enable_kernel_altivec();
+		load_vr_state(&vcpu->arch.vr);
+		t->vr_save_area = &vcpu->arch.vr;
 #endif
 	}
 
-	current->thread.regs->msr |= msr;
+	t->regs->msr |= msr;
 	vcpu->arch.guest_owned_ext |= msr;
 	kvmppc_recalc_shadow_msr(vcpu);
 
@@ -743,11 +715,15 @@
 	if (!lost_ext)
 		return;
 
-	if (lost_ext & MSR_FP)
-		kvmppc_load_up_fpu();
+	if (lost_ext & MSR_FP) {
+		enable_kernel_fp();
+		load_fp_state(&vcpu->arch.fp);
+	}
 #ifdef CONFIG_ALTIVEC
-	if (lost_ext & MSR_VEC)
-		kvmppc_load_up_altivec();
+	if (lost_ext & MSR_VEC) {
+		enable_kernel_altivec();
+		load_vr_state(&vcpu->arch.vr);
+	}
 #endif
 	current->thread.regs->msr |= lost_ext;
 }
@@ -873,6 +849,7 @@
 	/* We're good on these - the host merely wanted to get our attention */
 	case BOOK3S_INTERRUPT_DECREMENTER:
 	case BOOK3S_INTERRUPT_HV_DECREMENTER:
+	case BOOK3S_INTERRUPT_DOORBELL:
 		vcpu->stat.dec_exits++;
 		r = RESUME_GUEST;
 		break;
@@ -1045,14 +1022,14 @@
 		 * and if we really did time things so badly, then we just exit
 		 * again due to a host external interrupt.
 		 */
-		local_irq_disable();
 		s = kvmppc_prepare_to_enter(vcpu);
-		if (s <= 0) {
-			local_irq_enable();
+		if (s <= 0)
 			r = s;
-		} else {
+		else {
+			/* interrupts now hard-disabled */
 			kvmppc_fix_ee_before_entry();
 		}
+
 		kvmppc_handle_lost_ext(vcpu);
 	}
 
@@ -1133,19 +1110,6 @@
 	case KVM_REG_PPC_HIOR:
 		*val = get_reg_val(id, to_book3s(vcpu)->hior);
 		break;
-#ifdef CONFIG_VSX
-	case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
-		long int i = id - KVM_REG_PPC_VSR0;
-
-		if (!cpu_has_feature(CPU_FTR_VSX)) {
-			r = -ENXIO;
-			break;
-		}
-		val->vsxval[0] = vcpu->arch.fpr[i];
-		val->vsxval[1] = vcpu->arch.vsr[i];
-		break;
-	}
-#endif /* CONFIG_VSX */
 	default:
 		r = -EINVAL;
 		break;
@@ -1164,19 +1128,6 @@
 		to_book3s(vcpu)->hior = set_reg_val(id, *val);
 		to_book3s(vcpu)->hior_explicit = true;
 		break;
-#ifdef CONFIG_VSX
-	case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
-		long int i = id - KVM_REG_PPC_VSR0;
-
-		if (!cpu_has_feature(CPU_FTR_VSX)) {
-			r = -ENXIO;
-			break;
-		}
-		vcpu->arch.fpr[i] = val->vsxval[0];
-		vcpu->arch.vsr[i] = val->vsxval[1];
-		break;
-	}
-#endif /* CONFIG_VSX */
 	default:
 		r = -EINVAL;
 		break;
@@ -1274,17 +1225,9 @@
 static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
 	int ret;
-	struct thread_fp_state fp;
-	int fpexc_mode;
 #ifdef CONFIG_ALTIVEC
-	struct thread_vr_state vr;
 	unsigned long uninitialized_var(vrsave);
-	int used_vr;
 #endif
-#ifdef CONFIG_VSX
-	int used_vsr;
-#endif
-	ulong ext_msr;
 
 	/* Check if we can run the vcpu at all */
 	if (!vcpu->arch.sane) {
@@ -1299,40 +1242,27 @@
 	 * really did time things so badly, then we just exit again due to
 	 * a host external interrupt.
 	 */
-	local_irq_disable();
 	ret = kvmppc_prepare_to_enter(vcpu);
-	if (ret <= 0) {
-		local_irq_enable();
+	if (ret <= 0)
 		goto out;
-	}
+	/* interrupts now hard-disabled */
 
-	/* Save FPU state in stack */
+	/* Save FPU state in thread_struct */
 	if (current->thread.regs->msr & MSR_FP)
 		giveup_fpu(current);
-	fp = current->thread.fp_state;
-	fpexc_mode = current->thread.fpexc_mode;
 
 #ifdef CONFIG_ALTIVEC
-	/* Save Altivec state in stack */
-	used_vr = current->thread.used_vr;
-	if (used_vr) {
-		if (current->thread.regs->msr & MSR_VEC)
-			giveup_altivec(current);
-		vr = current->thread.vr_state;
-		vrsave = current->thread.vrsave;
-	}
+	/* Save Altivec state in thread_struct */
+	if (current->thread.regs->msr & MSR_VEC)
+		giveup_altivec(current);
 #endif
 
 #ifdef CONFIG_VSX
-	/* Save VSX state in stack */
-	used_vsr = current->thread.used_vsr;
-	if (used_vsr && (current->thread.regs->msr & MSR_VSX))
+	/* Save VSX state in thread_struct */
+	if (current->thread.regs->msr & MSR_VSX)
 		__giveup_vsx(current);
 #endif
 
-	/* Remember the MSR with disabled extensions */
-	ext_msr = current->thread.regs->msr;
-
 	/* Preload FPU if it's enabled */
 	if (vcpu->arch.shared->msr & MSR_FP)
 		kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@@ -1347,25 +1277,6 @@
 	/* Make sure we save the guest FPU/Altivec/VSX state */
 	kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
 
-	current->thread.regs->msr = ext_msr;
-
-	/* Restore FPU/VSX state from stack */
-	current->thread.fp_state = fp;
-	current->thread.fpexc_mode = fpexc_mode;
-
-#ifdef CONFIG_ALTIVEC
-	/* Restore Altivec state from stack */
-	if (used_vr && current->thread.used_vr) {
-		current->thread.vr_state = vr;
-		current->thread.vrsave = vrsave;
-	}
-	current->thread.used_vr = used_vr;
-#endif
-
-#ifdef CONFIG_VSX
-	current->thread.used_vsr = used_vsr;
-#endif
-
 out:
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	return ret;
@@ -1606,4 +1517,6 @@
 module_exit(kvmppc_book3s_exit_pr);
 
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
 #endif
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index c3c5231..9eec675 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -162,51 +162,4 @@
 	mtsrr1	r6
 	RFI
 
-#if defined(CONFIG_PPC_BOOK3S_32)
-#define STACK_LR	INT_FRAME_SIZE+4
-
-/* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */
-#define MSR_EXT_START						\
-	PPC_STL	r20, _NIP(r1);					\
-	mfmsr	r20;						\
-	LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE);			\
-	andc	r3,r20,r3;		/* Disable DR,EE */	\
-	mtmsr	r3;						\
-	sync
-
-#define MSR_EXT_END						\
-	mtmsr	r20;			/* Enable DR,EE */	\
-	sync;							\
-	PPC_LL	r20, _NIP(r1)
-
-#elif defined(CONFIG_PPC_BOOK3S_64)
-#define STACK_LR	_LINK
-#define MSR_EXT_START
-#define MSR_EXT_END
-#endif
-
-/*
- * Activate current's external feature (FPU/Altivec/VSX)
- */
-#define define_load_up(what) 					\
-								\
-_GLOBAL(kvmppc_load_up_ ## what);				\
-	PPC_STLU r1, -INT_FRAME_SIZE(r1);			\
-	mflr	r3;						\
-	PPC_STL	r3, STACK_LR(r1);				\
-	MSR_EXT_START;						\
-								\
-	bl	FUNC(load_up_ ## what);				\
-								\
-	MSR_EXT_END;						\
-	PPC_LL	r3, STACK_LR(r1);				\
-	mtlr	r3;						\
-	addi	r1, r1, INT_FRAME_SIZE;				\
-	blr
-
-define_load_up(fpu)
-#ifdef CONFIG_ALTIVEC
-define_load_up(altivec)
-#endif
-
 #include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index bc50c97..1e0cc2a 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -361,6 +361,8 @@
 	beqa	BOOK3S_INTERRUPT_DECREMENTER
 	cmpwi	r12, BOOK3S_INTERRUPT_PERFMON
 	beqa	BOOK3S_INTERRUPT_PERFMON
+	cmpwi	r12, BOOK3S_INTERRUPT_DOORBELL
+	beqa	BOOK3S_INTERRUPT_DOORBELL
 
 	RFI
 kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 02a17dc..d1acd32 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -1246,8 +1246,10 @@
 		kvm->arch.xics = xics;
 	mutex_unlock(&kvm->lock);
 
-	if (ret)
+	if (ret) {
+		kfree(xics);
 		return ret;
+	}
 
 	xics_debugfs_init(xics);
 
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 0591e05..ab62109 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -643,7 +643,7 @@
 		local_irq_enable();
 		kvm_vcpu_block(vcpu);
 		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
-		local_irq_disable();
+		hard_irq_disable();
 
 		kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
 		r = 1;
@@ -682,34 +682,22 @@
 {
 	int ret, s;
 	struct debug_reg debug;
-#ifdef CONFIG_PPC_FPU
-	struct thread_fp_state fp;
-	int fpexc_mode;
-#endif
 
 	if (!vcpu->arch.sane) {
 		kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		return -EINVAL;
 	}
 
-	local_irq_disable();
 	s = kvmppc_prepare_to_enter(vcpu);
 	if (s <= 0) {
-		local_irq_enable();
 		ret = s;
 		goto out;
 	}
+	/* interrupts now hard-disabled */
 
 #ifdef CONFIG_PPC_FPU
 	/* Save userspace FPU state in stack */
 	enable_kernel_fp();
-	fp = current->thread.fp_state;
-	fpexc_mode = current->thread.fpexc_mode;
-
-	/* Restore guest FPU state to thread */
-	memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
-	       sizeof(vcpu->arch.fpr));
-	current->thread.fp_state.fpscr = vcpu->arch.fpscr;
 
 	/*
 	 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -728,6 +716,7 @@
 	debug = current->thread.debug;
 	current->thread.debug = vcpu->arch.shadow_dbg_reg;
 
+	vcpu->arch.pgdir = current->mm->pgd;
 	kvmppc_fix_ee_before_entry();
 
 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -743,15 +732,6 @@
 	kvmppc_save_guest_fp(vcpu);
 
 	vcpu->fpu_active = 0;
-
-	/* Save guest FPU state from thread */
-	memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
-	       sizeof(vcpu->arch.fpr));
-	vcpu->arch.fpscr = current->thread.fp_state.fpscr;
-
-	/* Restore userspace FPU state from stack */
-	current->thread.fp_state = fp;
-	current->thread.fpexc_mode = fpexc_mode;
 #endif
 
 out:
@@ -898,17 +878,6 @@
 	int s;
 	int idx;
 
-#ifdef CONFIG_PPC64
-	WARN_ON(local_paca->irq_happened != 0);
-#endif
-
-	/*
-	 * We enter with interrupts disabled in hardware, but
-	 * we need to call hard_irq_disable anyway to ensure that
-	 * the software state is kept in sync.
-	 */
-	hard_irq_disable();
-
 	/* update before a new last_exit_type is rewritten */
 	kvmppc_update_timing_stats(vcpu);
 
@@ -1217,12 +1186,11 @@
 	 * aren't already exiting to userspace for some other reason.
 	 */
 	if (!(r & RESUME_HOST)) {
-		local_irq_disable();
 		s = kvmppc_prepare_to_enter(vcpu);
-		if (s <= 0) {
-			local_irq_enable();
+		if (s <= 0)
 			r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
-		} else {
+		else {
+			/* interrupts now hard-disabled */
 			kvmppc_fix_ee_before_entry();
 		}
 	}
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 09bfd9b..b632cd3 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -136,7 +136,9 @@
 {
 #ifdef CONFIG_PPC_FPU
 	if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
-		load_up_fpu();
+		enable_kernel_fp();
+		load_fp_state(&vcpu->arch.fp);
+		current->thread.fp_save_area = &vcpu->arch.fp;
 		current->thread.regs->msr |= MSR_FP;
 	}
 #endif
@@ -151,6 +153,7 @@
 #ifdef CONFIG_PPC_FPU
 	if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
 		giveup_fpu(current);
+	current->thread.fp_save_area = NULL;
 #endif
 }
 
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index a0d6929..e4185f6 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -33,6 +33,8 @@
 
 #ifdef CONFIG_64BIT
 #include <asm/exception-64e.h>
+#include <asm/hw_irq.h>
+#include <asm/irqflags.h>
 #else
 #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
 #endif
@@ -467,6 +469,15 @@
 	mtspr	SPRN_EPCR, r3
 	isync
 
+#ifdef CONFIG_64BIT
+	/*
+	 * We enter with interrupts disabled in hardware, but
+	 * we need to call RECONCILE_IRQ_STATE to ensure
+	 * that the software state is kept in sync.
+	 */
+	RECONCILE_IRQ_STATE(r3,r5)
+#endif
+
 	/* Switch to kernel stack and jump to handler. */
 	PPC_LL	r3, HOST_RUN(r1)
 	mr	r5, r14 /* intno */
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 497b142..2e02ed8 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -16,6 +16,8 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/export.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
 
 #include <asm/reg.h>
 #include <asm/cputable.h>
@@ -573,3 +575,5 @@
 
 module_init(kvmppc_e500_init);
 module_exit(kvmppc_e500_exit);
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index 4fd9650..a326178 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -31,11 +31,13 @@
 #define E500_TLB_NUM   2
 
 /* entry is mapped somewhere in host TLB */
-#define E500_TLB_VALID		(1 << 0)
+#define E500_TLB_VALID		(1 << 31)
 /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
-#define E500_TLB_BITMAP		(1 << 1)
+#define E500_TLB_BITMAP		(1 << 30)
 /* TLB1 entry is mapped by host TLB0 */
-#define E500_TLB_TLB0		(1 << 2)
+#define E500_TLB_TLB0		(1 << 29)
+/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
+#define E500_TLB_MAS2_ATTR	(0x7f)
 
 struct tlbe_ref {
 	pfn_t pfn;		/* valid only for TLB0, except briefly */
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index ebca6b8..50860e9 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -127,7 +127,7 @@
 }
 
 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
-		unsigned int eaddr, int as)
+		gva_t eaddr, int as)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 	unsigned int victim, tsized;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index ecf2247..dd2cc03 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -65,15 +65,6 @@
 	return mas3;
 }
 
-static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
-{
-#ifdef CONFIG_SMP
-	return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
-#else
-	return mas2 & MAS2_ATTRIB_MASK;
-#endif
-}
-
 /*
  * writing shadow tlb entry to host TLB
  */
@@ -231,15 +222,15 @@
 		ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
 	}
 
-	/* Already invalidated in between */
-	if (!(ref->flags & E500_TLB_VALID))
-		return;
-
-	/* Guest tlbe is backed by at most one host tlbe per shadow pid. */
-	kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
+	/*
+	 * If TLB entry is still valid then it's a TLB0 entry, and thus
+	 * backed by at most one host tlbe per shadow pid
+	 */
+	if (ref->flags & E500_TLB_VALID)
+		kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
 
 	/* Mark the TLB as not backed by the host anymore */
-	ref->flags &= ~E500_TLB_VALID;
+	ref->flags = 0;
 }
 
 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
@@ -249,10 +240,13 @@
 
 static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
 					 struct kvm_book3e_206_tlb_entry *gtlbe,
-					 pfn_t pfn)
+					 pfn_t pfn, unsigned int wimg)
 {
 	ref->pfn = pfn;
-	ref->flags |= E500_TLB_VALID;
+	ref->flags = E500_TLB_VALID;
+
+	/* Use guest supplied MAS2_G and MAS2_E */
+	ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
 
 	/* Mark the page accessed */
 	kvm_set_pfn_accessed(pfn);
@@ -316,8 +310,7 @@
 
 	/* Force IPROT=0 for all guest mappings. */
 	stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
-	stlbe->mas2 = (gvaddr & MAS2_EPN) |
-		      e500_shadow_mas2_attrib(gtlbe->mas2, pr);
+	stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
 	stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
 			e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
 
@@ -339,6 +332,10 @@
 	int ret = 0;
 	unsigned long mmu_seq;
 	struct kvm *kvm = vcpu_e500->vcpu.kvm;
+	unsigned long tsize_pages = 0;
+	pte_t *ptep;
+	unsigned int wimg = 0;
+	pgd_t *pgdir;
 
 	/* used to check for invalidations in progress */
 	mmu_seq = kvm->mmu_notifier_seq;
@@ -405,7 +402,7 @@
 			 */
 
 			for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
-				unsigned long gfn_start, gfn_end, tsize_pages;
+				unsigned long gfn_start, gfn_end;
 				tsize_pages = 1 << (tsize - 2);
 
 				gfn_start = gfn & ~(tsize_pages - 1);
@@ -447,11 +444,12 @@
 	}
 
 	if (likely(!pfnmap)) {
-		unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
+		tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
 		pfn = gfn_to_pfn_memslot(slot, gfn);
 		if (is_error_noslot_pfn(pfn)) {
-			printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
-					(long)gfn);
+			if (printk_ratelimit())
+				pr_err("%s: real page not found for gfn %lx\n",
+				       __func__, (long)gfn);
 			return -EINVAL;
 		}
 
@@ -466,7 +464,18 @@
 		goto out;
 	}
 
-	kvmppc_e500_ref_setup(ref, gtlbe, pfn);
+
+	pgdir = vcpu_e500->vcpu.arch.pgdir;
+	ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages);
+	if (pte_present(*ptep))
+		wimg = (*ptep >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
+	else {
+		if (printk_ratelimit())
+			pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
+				__func__, (long)gfn, pfn);
+		return -EINVAL;
+	}
+	kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
 
 	kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
 				ref, gvaddr, stlbe);
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 4132cd2..17e4562 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -16,6 +16,8 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/export.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
 
 #include <asm/reg.h>
 #include <asm/cputable.h>
@@ -391,3 +393,5 @@
 
 module_init(kvmppc_e500mc_init);
 module_exit(kvmppc_e500mc_exit);
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 2f9a087..c2b887b 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -219,7 +219,6 @@
  * lmw
  * stmw
  *
- * XXX is_bigendian should depend on MMU mapping or MSR[LE]
  */
 /* XXX Should probably auto-generate instruction decoding for a particular core
  * from opcode tables in the future. */
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 2861ae9..efbd996 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -1635,6 +1635,7 @@
 
 	dev->kvm->arch.mpic = NULL;
 	kfree(opp);
+	kfree(dev);
 }
 
 static int mpic_set_default_irq_routing(struct openpic *opp)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 9ae9768..3cf541a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -68,14 +68,16 @@
  */
 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
 {
-	int r = 1;
+	int r;
 
-	WARN_ON_ONCE(!irqs_disabled());
+	WARN_ON(irqs_disabled());
+	hard_irq_disable();
+
 	while (true) {
 		if (need_resched()) {
 			local_irq_enable();
 			cond_resched();
-			local_irq_disable();
+			hard_irq_disable();
 			continue;
 		}
 
@@ -101,7 +103,7 @@
 			local_irq_enable();
 			trace_kvm_check_requests(vcpu);
 			r = kvmppc_core_check_requests(vcpu);
-			local_irq_disable();
+			hard_irq_disable();
 			if (r > 0)
 				continue;
 			break;
@@ -113,22 +115,12 @@
 			continue;
 		}
 
-#ifdef CONFIG_PPC64
-		/* lazy EE magic */
-		hard_irq_disable();
-		if (lazy_irq_pending()) {
-			/* Got an interrupt in between, try again */
-			local_irq_enable();
-			local_irq_disable();
-			kvm_guest_exit();
-			continue;
-		}
-#endif
-
 		kvm_guest_enter();
-		break;
+		return 1;
 	}
 
+	/* return to host */
+	local_irq_enable();
 	return r;
 }
 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
@@ -656,14 +648,14 @@
 		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
 		break;
 	case KVM_MMIO_REG_FPR:
-		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
+		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
 		break;
 #ifdef CONFIG_PPC_BOOK3S
 	case KVM_MMIO_REG_QPR:
 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
 		break;
 	case KVM_MMIO_REG_FQPR:
-		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
+		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
 		break;
 #endif
@@ -673,9 +665,19 @@
 }
 
 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                       unsigned int rt, unsigned int bytes, int is_bigendian)
+		       unsigned int rt, unsigned int bytes,
+		       int is_default_endian)
 {
 	int idx, ret;
+	int is_bigendian;
+
+	if (kvmppc_need_byteswap(vcpu)) {
+		/* Default endianness is "little endian". */
+		is_bigendian = !is_default_endian;
+	} else {
+		/* Default endianness is "big endian". */
+		is_bigendian = is_default_endian;
+	}
 
 	if (bytes > sizeof(run->mmio.data)) {
 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
@@ -711,21 +713,31 @@
 
 /* Same as above, but sign extends */
 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                        unsigned int rt, unsigned int bytes, int is_bigendian)
+			unsigned int rt, unsigned int bytes,
+			int is_default_endian)
 {
 	int r;
 
 	vcpu->arch.mmio_sign_extend = 1;
-	r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
+	r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
 
 	return r;
 }
 
 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                        u64 val, unsigned int bytes, int is_bigendian)
+			u64 val, unsigned int bytes, int is_default_endian)
 {
 	void *data = run->mmio.data;
 	int idx, ret;
+	int is_bigendian;
+
+	if (kvmppc_need_byteswap(vcpu)) {
+		/* Default endianness is "little endian". */
+		is_bigendian = !is_default_endian;
+	} else {
+		/* Default endianness is "big endian". */
+		is_bigendian = is_default_endian;
+	}
 
 	if (bytes > sizeof(run->mmio.data)) {
 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index de68812..d766d6e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -207,6 +207,20 @@
 		if (overlaps_kernel_text(vaddr, vaddr + step))
 			tprot &= ~HPTE_R_N;
 
+		/*
+		 * If relocatable, check if it overlaps interrupt vectors that
+		 * are copied down to real 0. For relocatable kernel
+		 * (e.g. kdump case) we copy interrupt vectors down to real
+		 * address 0. Mark that region as executable. This is
+		 * because on p8 system with relocation on exception feature
+		 * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
+		 * in order to execute the interrupt handlers in virtual
+		 * mode the vector region need to be marked as executable.
+		 */
+		if ((PHYSICAL_START > MEMORY_START) &&
+			overlaps_interrupt_vector_text(vaddr, vaddr + step))
+				tprot &= ~HPTE_R_N;
+
 		hash = hpt_hash(vpn, shift, ssize);
 		hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 90bb6d9..eb92365 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -472,12 +472,13 @@
 {
 	struct hugepd_freelist **batchp;
 
-	batchp = &__get_cpu_var(hugepd_freelist_cur);
+	batchp = &get_cpu_var(hugepd_freelist_cur);
 
 	if (atomic_read(&tlb->mm->mm_users) < 2 ||
 	    cpumask_equal(mm_cpumask(tlb->mm),
 			  cpumask_of(smp_processor_id()))) {
 		kmem_cache_free(hugepte_cache, hugepte);
+        put_cpu_var(hugepd_freelist_cur);
 		return;
 	}
 
@@ -491,6 +492,7 @@
 		call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
 		*batchp = NULL;
 	}
+	put_cpu_var(hugepd_freelist_cur);
 }
 #endif
 
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 86a63de..30a42e2 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1785,7 +1785,7 @@
 static int topology_update_init(void)
 {
 	start_topology_update();
-	proc_create("powerpc/topology_updates", 644, NULL, &topology_ops);
+	proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops);
 
 	return 0;
 }
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 65b7b65..62bf5e8 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -510,7 +510,8 @@
 }
 
 unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
-				  pmd_t *pmdp, unsigned long clr)
+				  pmd_t *pmdp, unsigned long clr,
+				  unsigned long set)
 {
 
 	unsigned long old, tmp;
@@ -526,14 +527,15 @@
 		andi.	%1,%0,%6\n\
 		bne-	1b \n\
 		andc	%1,%0,%4 \n\
+		or	%1,%1,%7\n\
 		stdcx.	%1,0,%3 \n\
 		bne-	1b"
 	: "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
-	: "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY)
+	: "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set)
 	: "cc" );
 #else
 	old = pmd_val(*pmdp);
-	*pmdp = __pmd(old & ~clr);
+	*pmdp = __pmd((old & ~clr) | set);
 #endif
 	if (old & _PAGE_HASHPTE)
 		hpte_do_hugepage_flush(mm, addr, pmdp);
@@ -708,7 +710,7 @@
 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 		     pmd_t *pmdp)
 {
-	pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT);
+	pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
 }
 
 /*
@@ -835,7 +837,7 @@
 	unsigned long old;
 	pgtable_t *pgtable_slot;
 
-	old = pmd_hugepage_update(mm, addr, pmdp, ~0UL);
+	old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
 	old_pmd = __pmd(old);
 	/*
 	 * We have pmd == none and we are holding page_table_lock.
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 7ce9cf3..b0c75cc 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -408,7 +408,7 @@
 	if (fixed && (addr & ((1ul << pshift) - 1)))
 		return -EINVAL;
 	if (fixed && addr > (mm->task_size - len))
-		return -EINVAL;
+		return -ENOMEM;
 
 	/* If hint, make sure it matches our alignment restrictions */
 	if (!fixed && addr) {
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index a770df2d..6c0b1f5 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -78,7 +78,7 @@
 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 	arch_enter_lazy_mmu_mode();
 	for (; npages > 0; --npages) {
-		pte_update(mm, addr, pte, 0, 0);
+		pte_update(mm, addr, pte, 0, 0, 0);
 		addr += PAGE_SIZE;
 		++pte;
 	}
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 16250b1..c95eb32 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -240,6 +240,7 @@
 	beq	tlb_miss_common_bolted
 	b	itlb_miss_kernel_bolted
 
+#ifdef CONFIG_PPC_FSL_BOOK3E
 /*
  * TLB miss handling for e6500 and derivatives, using hardware tablewalk.
  *
@@ -409,7 +410,7 @@
 	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
 	tlb_epilog_bolted
 	b	exc_instruction_storage_book3e
-
+#endif /* CONFIG_PPC_FSL_BOOK3E */
 
 /**********************************************************************
  *                                                                    *
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 735839b..b37a58e 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -557,10 +557,12 @@
 		patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
 		patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
 		break;
+#ifdef CONFIG_PPC_FSL_BOOK3E
 	case PPC_HTW_E6500:
 		patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
 		patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
 		break;
+#endif
 	}
 	pr_info("MMU: Book3E HW tablewalk %s\n",
 		book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 29b89e8..67cf220 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1147,6 +1147,9 @@
 	mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
 
 	mb();
+	if (cpuhw->bhrb_users)
+		ppmu->config_bhrb(cpuhw->bhrb_filter);
+
 	write_mmcr0(cpuhw, mmcr0);
 
 	/*
@@ -1158,8 +1161,6 @@
 	}
 
  out:
-	if (cpuhw->bhrb_users)
-		ppmu->config_bhrb(cpuhw->bhrb_filter);
 
 	local_irq_restore(flags);
 }
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index a3f7abd..96cee20 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -25,6 +25,37 @@
 #define PM_BRU_FIN			0x10068
 #define PM_BR_MPRED_CMPL		0x400f6
 
+/* All L1 D cache load references counted at finish, gated by reject */
+#define PM_LD_REF_L1			0x100ee
+/* Load Missed L1 */
+#define PM_LD_MISS_L1			0x3e054
+/* Store Missed L1 */
+#define PM_ST_MISS_L1			0x300f0
+/* L1 cache data prefetches */
+#define PM_L1_PREF			0x0d8b8
+/* Instruction fetches from L1 */
+#define PM_INST_FROM_L1			0x04080
+/* Demand iCache Miss */
+#define PM_L1_ICACHE_MISS		0x200fd
+/* Instruction Demand sectors wriittent into IL1 */
+#define PM_L1_DEMAND_WRITE		0x0408c
+/* Instruction prefetch written into IL1 */
+#define PM_IC_PREF_WRITE		0x0408e
+/* The data cache was reloaded from local core's L3 due to a demand load */
+#define PM_DATA_FROM_L3			0x4c042
+/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+#define PM_DATA_FROM_L3MISS		0x300fe
+/* All successful D-side store dispatches for this thread */
+#define PM_L2_ST			0x17080
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+#define PM_L2_ST_MISS			0x17082
+/* Total HW L3 prefetches(Load+store) */
+#define PM_L3_PREF_ALL			0x4e052
+/* Data PTEG reload */
+#define PM_DTLB_MISS			0x300fc
+/* ITLB Reloaded */
+#define PM_ITLB_MISS			0x400fc
+
 
 /*
  * Raw event encoding for POWER8:
@@ -557,6 +588,8 @@
 	[PERF_COUNT_HW_INSTRUCTIONS] =			PM_INST_CMPL,
 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =		PM_BRU_FIN,
 	[PERF_COUNT_HW_BRANCH_MISSES] =			PM_BR_MPRED_CMPL,
+	[PERF_COUNT_HW_CACHE_REFERENCES] =		PM_LD_REF_L1,
+	[PERF_COUNT_HW_CACHE_MISSES] =			PM_LD_MISS_L1,
 };
 
 static u64 power8_bhrb_filter_map(u64 branch_sample_type)
@@ -596,6 +629,116 @@
 	mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
 }
 
+#define C(x)	PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+	[ C(L1D) ] = {
+		[ C(OP_READ) ] = {
+			[ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
+			[ C(RESULT_MISS)   ] = PM_LD_MISS_L1,
+		},
+		[ C(OP_WRITE) ] = {
+			[ C(RESULT_ACCESS) ] = 0,
+			[ C(RESULT_MISS)   ] = PM_ST_MISS_L1,
+		},
+		[ C(OP_PREFETCH) ] = {
+			[ C(RESULT_ACCESS) ] = PM_L1_PREF,
+			[ C(RESULT_MISS)   ] = 0,
+		},
+	},
+	[ C(L1I) ] = {
+		[ C(OP_READ) ] = {
+			[ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
+			[ C(RESULT_MISS)   ] = PM_L1_ICACHE_MISS,
+		},
+		[ C(OP_WRITE) ] = {
+			[ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
+			[ C(RESULT_MISS)   ] = -1,
+		},
+		[ C(OP_PREFETCH) ] = {
+			[ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
+			[ C(RESULT_MISS)   ] = 0,
+		},
+	},
+	[ C(LL) ] = {
+		[ C(OP_READ) ] = {
+			[ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
+			[ C(RESULT_MISS)   ] = PM_DATA_FROM_L3MISS,
+		},
+		[ C(OP_WRITE) ] = {
+			[ C(RESULT_ACCESS) ] = PM_L2_ST,
+			[ C(RESULT_MISS)   ] = PM_L2_ST_MISS,
+		},
+		[ C(OP_PREFETCH) ] = {
+			[ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
+			[ C(RESULT_MISS)   ] = 0,
+		},
+	},
+	[ C(DTLB) ] = {
+		[ C(OP_READ) ] = {
+			[ C(RESULT_ACCESS) ] = 0,
+			[ C(RESULT_MISS)   ] = PM_DTLB_MISS,
+		},
+		[ C(OP_WRITE) ] = {
+			[ C(RESULT_ACCESS) ] = -1,
+			[ C(RESULT_MISS)   ] = -1,
+		},
+		[ C(OP_PREFETCH) ] = {
+			[ C(RESULT_ACCESS) ] = -1,
+			[ C(RESULT_MISS)   ] = -1,
+		},
+	},
+	[ C(ITLB) ] = {
+		[ C(OP_READ) ] = {
+			[ C(RESULT_ACCESS) ] = 0,
+			[ C(RESULT_MISS)   ] = PM_ITLB_MISS,
+		},
+		[ C(OP_WRITE) ] = {
+			[ C(RESULT_ACCESS) ] = -1,
+			[ C(RESULT_MISS)   ] = -1,
+		},
+		[ C(OP_PREFETCH) ] = {
+			[ C(RESULT_ACCESS) ] = -1,
+			[ C(RESULT_MISS)   ] = -1,
+		},
+	},
+	[ C(BPU) ] = {
+		[ C(OP_READ) ] = {
+			[ C(RESULT_ACCESS) ] = PM_BRU_FIN,
+			[ C(RESULT_MISS)   ] = PM_BR_MPRED_CMPL,
+		},
+		[ C(OP_WRITE) ] = {
+			[ C(RESULT_ACCESS) ] = -1,
+			[ C(RESULT_MISS)   ] = -1,
+		},
+		[ C(OP_PREFETCH) ] = {
+			[ C(RESULT_ACCESS) ] = -1,
+			[ C(RESULT_MISS)   ] = -1,
+		},
+	},
+	[ C(NODE) ] = {
+		[ C(OP_READ) ] = {
+			[ C(RESULT_ACCESS) ] = -1,
+			[ C(RESULT_MISS)   ] = -1,
+		},
+		[ C(OP_WRITE) ] = {
+			[ C(RESULT_ACCESS) ] = -1,
+			[ C(RESULT_MISS)   ] = -1,
+		},
+		[ C(OP_PREFETCH) ] = {
+			[ C(RESULT_ACCESS) ] = -1,
+			[ C(RESULT_MISS)   ] = -1,
+		},
+	},
+};
+
+#undef C
+
 static struct power_pmu power8_pmu = {
 	.name			= "POWER8",
 	.n_counter		= 6,
@@ -611,6 +754,7 @@
 	.flags			= PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
 	.n_generic		= ARRAY_SIZE(power8_generic_events),
 	.generic_events		= power8_generic_events,
+	.cache_events		= &power8_cache_events,
 	.attr_groups		= power8_pmu_attr_groups,
 	.bhrb_nr		= 32,
 };
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index fc9c1cb..5aa3f4b 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -1,9 +1,9 @@
 config PPC_MPC512x
 	bool "512x-based boards"
 	depends on 6xx
+	select COMMON_CLK
 	select FSL_SOC
 	select IPIC
-	select PPC_CLOCK
 	select PPC_PCI_CHOICE
 	select FSL_PCI if PCI
 	select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/powerpc/platforms/512x/Makefile b/arch/powerpc/platforms/512x/Makefile
index 72fb934..0169312 100644
--- a/arch/powerpc/platforms/512x/Makefile
+++ b/arch/powerpc/platforms/512x/Makefile
@@ -1,7 +1,8 @@
 #
 # Makefile for the Freescale PowerPC 512x linux kernel.
 #
-obj-y				+= clock.o mpc512x_shared.o
+obj-$(CONFIG_COMMON_CLK)	+= clock-commonclk.o
+obj-y				+= mpc512x_shared.o
 obj-$(CONFIG_MPC5121_ADS)	+= mpc5121_ads.o mpc5121_ads_cpld.o
 obj-$(CONFIG_MPC512x_GENERIC)	+= mpc512x_generic.o
 obj-$(CONFIG_PDM360NG)		+= pdm360ng.o
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
new file mode 100644
index 0000000..6eb614a
--- /dev/null
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -0,0 +1,1221 @@
+/*
+ * Copyright (C) 2013 DENX Software Engineering
+ *
+ * Gerhard Sittig, <gsi@denx.de>
+ *
+ * common clock driver support for the MPC512x platform
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <asm/mpc5121.h>
+#include <dt-bindings/clock/mpc512x-clock.h>
+
+#include "mpc512x.h"		/* our public mpc5121_clk_init() API */
+
+/* helpers to keep the MCLK intermediates "somewhere" in our table */
+enum {
+	MCLK_IDX_MUX0,
+	MCLK_IDX_EN0,
+	MCLK_IDX_DIV0,
+	MCLK_MAX_IDX,
+};
+
+#define NR_PSCS			12
+#define NR_MSCANS		4
+#define NR_SPDIFS		1
+#define NR_OUTCLK		4
+#define NR_MCLKS		(NR_PSCS + NR_MSCANS + NR_SPDIFS + NR_OUTCLK)
+
+/* extend the public set of clocks by adding internal slots for management */
+enum {
+	/* arrange for adjacent numbers after the public set */
+	MPC512x_CLK_START_PRIVATE = MPC512x_CLK_LAST_PUBLIC,
+	/* clocks which aren't announced to the public */
+	MPC512x_CLK_DDR,
+	MPC512x_CLK_MEM,
+	MPC512x_CLK_IIM,
+	/* intermediates in div+gate combos or fractional dividers */
+	MPC512x_CLK_DDR_UG,
+	MPC512x_CLK_SDHC_x4,
+	MPC512x_CLK_SDHC_UG,
+	MPC512x_CLK_SDHC2_UG,
+	MPC512x_CLK_DIU_x4,
+	MPC512x_CLK_DIU_UG,
+	MPC512x_CLK_MBX_BUS_UG,
+	MPC512x_CLK_MBX_UG,
+	MPC512x_CLK_MBX_3D_UG,
+	MPC512x_CLK_PCI_UG,
+	MPC512x_CLK_NFC_UG,
+	MPC512x_CLK_LPC_UG,
+	MPC512x_CLK_SPDIF_TX_IN,
+	/* intermediates for the mux+gate+div+mux MCLK generation */
+	MPC512x_CLK_MCLKS_FIRST,
+	MPC512x_CLK_MCLKS_LAST = MPC512x_CLK_MCLKS_FIRST
+				+ NR_MCLKS * MCLK_MAX_IDX,
+	/* internal, symbolic spec for the number of slots */
+	MPC512x_CLK_LAST_PRIVATE,
+};
+
+/* data required for the OF clock provider registration */
+static struct clk *clks[MPC512x_CLK_LAST_PRIVATE];
+static struct clk_onecell_data clk_data;
+
+/* CCM register access */
+static struct mpc512x_ccm __iomem *clkregs;
+static DEFINE_SPINLOCK(clklock);
+
+/* SoC variants {{{ */
+
+/*
+ * tell SoC variants apart as they are rather similar yet not identical,
+ * cache the result in an enum to not repeatedly run the expensive OF test
+ *
+ * MPC5123 is an MPC5121 without the MBX graphics accelerator
+ *
+ * MPC5125 has many more differences: no MBX, no AXE, no VIU, no SPDIF,
+ * no PATA, no SATA, no PCI, two FECs (of different compatibility name),
+ * only 10 PSCs (of different compatibility name), two SDHCs, different
+ * NFC IP block, output clocks, system PLL status query, different CPMF
+ * interpretation, no CFM, different fourth PSC/CAN mux0 input -- yet
+ * those differences can get folded into this clock provider support
+ * code and don't warrant a separate highly redundant implementation
+ */
+
+static enum soc_type {
+	MPC512x_SOC_MPC5121,
+	MPC512x_SOC_MPC5123,
+	MPC512x_SOC_MPC5125,
+} soc;
+
+static void mpc512x_clk_determine_soc(void)
+{
+	if (of_machine_is_compatible("fsl,mpc5121")) {
+		soc = MPC512x_SOC_MPC5121;
+		return;
+	}
+	if (of_machine_is_compatible("fsl,mpc5123")) {
+		soc = MPC512x_SOC_MPC5123;
+		return;
+	}
+	if (of_machine_is_compatible("fsl,mpc5125")) {
+		soc = MPC512x_SOC_MPC5125;
+		return;
+	}
+}
+
+static bool soc_has_mbx(void)
+{
+	if (soc == MPC512x_SOC_MPC5121)
+		return true;
+	return false;
+}
+
+static bool soc_has_axe(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return false;
+	return true;
+}
+
+static bool soc_has_viu(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return false;
+	return true;
+}
+
+static bool soc_has_spdif(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return false;
+	return true;
+}
+
+static bool soc_has_pata(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return false;
+	return true;
+}
+
+static bool soc_has_sata(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return false;
+	return true;
+}
+
+static bool soc_has_pci(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return false;
+	return true;
+}
+
+static bool soc_has_fec2(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return true;
+	return false;
+}
+
+static int soc_max_pscnum(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return 10;
+	return 12;
+}
+
+static bool soc_has_sdhc2(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return true;
+	return false;
+}
+
+static bool soc_has_nfc_5125(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return true;
+	return false;
+}
+
+static bool soc_has_outclk(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return true;
+	return false;
+}
+
+static bool soc_has_cpmf_0_bypass(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return true;
+	return false;
+}
+
+static bool soc_has_mclk_mux0_canin(void)
+{
+	if (soc == MPC512x_SOC_MPC5125)
+		return true;
+	return false;
+}
+
+/* }}} SoC variants */
+/* common clk API wrappers {{{ */
+
+/* convenience wrappers around the common clk API */
+static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
+{
+	return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+}
+
+static inline struct clk *mpc512x_clk_factor(
+	const char *name, const char *parent_name,
+	int mul, int div)
+{
+	int clkflags;
+
+	clkflags = CLK_SET_RATE_PARENT;
+	return clk_register_fixed_factor(NULL, name, parent_name, clkflags,
+					 mul, div);
+}
+
+static inline struct clk *mpc512x_clk_divider(
+	const char *name, const char *parent_name, u8 clkflags,
+	u32 __iomem *reg, u8 pos, u8 len, int divflags)
+{
+	return clk_register_divider(NULL, name, parent_name, clkflags,
+				    reg, pos, len, divflags, &clklock);
+}
+
+static inline struct clk *mpc512x_clk_divtable(
+	const char *name, const char *parent_name,
+	u32 __iomem *reg, u8 pos, u8 len,
+	const struct clk_div_table *divtab)
+{
+	u8 divflags;
+
+	divflags = 0;
+	return clk_register_divider_table(NULL, name, parent_name, 0,
+					  reg, pos, len, divflags,
+					  divtab, &clklock);
+}
+
+static inline struct clk *mpc512x_clk_gated(
+	const char *name, const char *parent_name,
+	u32 __iomem *reg, u8 pos)
+{
+	int clkflags;
+
+	clkflags = CLK_SET_RATE_PARENT;
+	return clk_register_gate(NULL, name, parent_name, clkflags,
+				 reg, pos, 0, &clklock);
+}
+
+static inline struct clk *mpc512x_clk_muxed(const char *name,
+	const char **parent_names, int parent_count,
+	u32 __iomem *reg, u8 pos, u8 len)
+{
+	int clkflags;
+	u8 muxflags;
+
+	clkflags = CLK_SET_RATE_PARENT;
+	muxflags = 0;
+	return clk_register_mux(NULL, name,
+				parent_names, parent_count, clkflags,
+				reg, pos, len, muxflags, &clklock);
+}
+
+/* }}} common clk API wrappers */
+
+/* helper to isolate a bit field from a register */
+static inline int get_bit_field(uint32_t __iomem *reg, uint8_t pos, uint8_t len)
+{
+	uint32_t val;
+
+	val = in_be32(reg);
+	val >>= pos;
+	val &= (1 << len) - 1;
+	return val;
+}
+
+/* get the SPMF and translate it into the "sys pll" multiplier */
+static int get_spmf_mult(void)
+{
+	static int spmf_to_mult[] = {
+		68, 1, 12, 16, 20, 24, 28, 32,
+		36, 40, 44, 48, 52, 56, 60, 64,
+	};
+	int spmf;
+
+	spmf = get_bit_field(&clkregs->spmr, 24, 4);
+	return spmf_to_mult[spmf];
+}
+
+/*
+ * get the SYS_DIV value and translate it into a divide factor
+ *
+ * values returned from here are a multiple of the real factor since the
+ * divide ratio is fractional
+ */
+static int get_sys_div_x2(void)
+{
+	static int sysdiv_code_to_x2[] = {
+		4, 5, 6, 7, 8, 9, 10, 14,
+		12, 16, 18, 22, 20, 24, 26, 30,
+		28, 32, 34, 38, 36, 40, 42, 46,
+		44, 48, 50, 54, 52, 56, 58, 62,
+		60, 64, 66,
+	};
+	int divcode;
+
+	divcode = get_bit_field(&clkregs->scfr2, 26, 6);
+	return sysdiv_code_to_x2[divcode];
+}
+
+/*
+ * get the CPMF value and translate it into a multiplier factor
+ *
+ * values returned from here are a multiple of the real factor since the
+ * multiplier ratio is fractional
+ */
+static int get_cpmf_mult_x2(void)
+{
+	static int cpmf_to_mult_x36[] = {
+		/* 0b000 is "times 36" */
+		72, 2, 2, 3, 4, 5, 6, 7,
+	};
+	static int cpmf_to_mult_0by[] = {
+		/* 0b000 is "bypass" */
+		2, 2, 2, 3, 4, 5, 6, 7,
+	};
+
+	int *cpmf_to_mult;
+	int cpmf;
+
+	cpmf = get_bit_field(&clkregs->spmr, 16, 4);
+	if (soc_has_cpmf_0_bypass())
+		cpmf_to_mult = cpmf_to_mult_0by;
+	else
+		cpmf_to_mult = cpmf_to_mult_x36;
+	return cpmf_to_mult[cpmf];
+}
+
+/*
+ * some of the clock dividers do scale in a linear way, yet not all of
+ * their bit combinations are legal; use a divider table to get a
+ * resulting set of applicable divider values
+ */
+
+/* applies to the IPS_DIV, and PCI_DIV values */
+static struct clk_div_table divtab_2346[] = {
+	{ .val = 2, .div = 2, },
+	{ .val = 3, .div = 3, },
+	{ .val = 4, .div = 4, },
+	{ .val = 6, .div = 6, },
+	{ .div = 0, },
+};
+
+/* applies to the MBX_DIV, LPC_DIV, and NFC_DIV values */
+static struct clk_div_table divtab_1234[] = {
+	{ .val = 1, .div = 1, },
+	{ .val = 2, .div = 2, },
+	{ .val = 3, .div = 3, },
+	{ .val = 4, .div = 4, },
+	{ .div = 0, },
+};
+
+static int get_freq_from_dt(char *propname)
+{
+	struct device_node *np;
+	const unsigned int *prop;
+	int val;
+
+	val = 0;
+	np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-immr");
+	if (np) {
+		prop = of_get_property(np, propname, NULL);
+		if (prop)
+			val = *prop;
+	    of_node_put(np);
+	}
+	return val;
+}
+
+static void mpc512x_clk_preset_data(void)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(clks); i++)
+		clks[i] = ERR_PTR(-ENODEV);
+}
+
+/*
+ * - receives the "bus frequency" from the caller (that's the IPS clock
+ *   rate, the historical source of clock information)
+ * - fetches the system PLL multiplier and divider values as well as the
+ *   IPS divider value from hardware
+ * - determines the REF clock rate either from the XTAL/OSC spec (if
+ *   there is a device tree node describing the oscillator) or from the
+ *   IPS bus clock (supported for backwards compatibility, such that
+ *   setups without XTAL/OSC specs keep working)
+ * - creates the "ref" clock item in the clock tree, such that
+ *   subsequent code can create the remainder of the hierarchy (REF ->
+ *   SYS -> CSB -> IPS) from the REF clock rate and the returned mul/div
+ *   values
+ */
+static void mpc512x_clk_setup_ref_clock(struct device_node *np, int bus_freq,
+					int *sys_mul, int *sys_div,
+					int *ips_div)
+{
+	struct clk *osc_clk;
+	int calc_freq;
+
+	/* fetch mul/div factors from the hardware */
+	*sys_mul = get_spmf_mult();
+	*sys_mul *= 2;		/* compensate for the fractional divider */
+	*sys_div = get_sys_div_x2();
+	*ips_div = get_bit_field(&clkregs->scfr1, 23, 3);
+
+	/* lookup the oscillator clock for its rate */
+	osc_clk = of_clk_get_by_name(np, "osc");
+
+	/*
+	 * either descend from OSC to REF (and in bypassing verify the
+	 * IPS rate), or backtrack from IPS and multiplier values that
+	 * were fetched from hardware to REF and thus to the OSC value
+	 *
+	 * in either case the REF clock gets created here and the
+	 * remainder of the clock tree can get spanned from there
+	 */
+	if (!IS_ERR(osc_clk)) {
+		clks[MPC512x_CLK_REF] = mpc512x_clk_factor("ref", "osc", 1, 1);
+		calc_freq = clk_get_rate(clks[MPC512x_CLK_REF]);
+		calc_freq *= *sys_mul;
+		calc_freq /= *sys_div;
+		calc_freq /= 2;
+		calc_freq /= *ips_div;
+		if (bus_freq && calc_freq != bus_freq)
+			pr_warn("calc rate %d != OF spec %d\n",
+				calc_freq, bus_freq);
+	} else {
+		calc_freq = bus_freq;	/* start with IPS */
+		calc_freq *= *ips_div;	/* IPS -> CSB */
+		calc_freq *= 2;		/* CSB -> SYS */
+		calc_freq *= *sys_div;	/* SYS -> PLL out */
+		calc_freq /= *sys_mul;	/* PLL out -> REF == OSC */
+		clks[MPC512x_CLK_REF] = mpc512x_clk_fixed("ref", calc_freq);
+	}
+}
+
+/* MCLK helpers {{{ */
+
+/*
+ * helper code for the MCLK subtree setup
+ *
+ * the overview in section 5.2.4 of the MPC5121e Reference Manual rev4
+ * suggests that all instances of the "PSC clock generation" are equal,
+ * and that one might re-use the PSC setup for MSCAN clock generation
+ * (section 5.2.5) as well, at least the logic if not the data for
+ * description
+ *
+ * the details (starting at page 5-20) show differences in the specific
+ * inputs of the first mux stage ("can clk in", "spdif tx"), and the
+ * factual non-availability of the second mux stage (it's present yet
+ * only one input is valid)
+ *
+ * the MSCAN clock related registers (starting at page 5-35) all
+ * reference "spdif clk" at the first mux stage and don't mention any
+ * "can clk" at all, which somehow is unexpected
+ *
+ * TODO re-check the document, and clarify whether the RM is correct in
+ * the overview or in the details, and whether the difference is a
+ * clipboard induced error or results from chip revisions
+ *
+ * it turns out that the RM rev4 as of 2012-06 talks about "can" for the
+ * PSCs while RM rev3 as of 2008-10 talks about "spdif", so I guess that
+ * first a doc update is required which better reflects reality in the
+ * SoC before the implementation should follow while no questions remain
+ */
+
+/*
+ * note that this declaration raises a checkpatch warning, but
+ * it's the very data type dictated by <linux/clk-provider.h>,
+ * "fixing" this warning will break compilation
+ */
+static const char *parent_names_mux0_spdif[] = {
+	"sys", "ref", "psc-mclk-in", "spdif-tx",
+};
+
+static const char *parent_names_mux0_canin[] = {
+	"sys", "ref", "psc-mclk-in", "can-clk-in",
+};
+
+enum mclk_type {
+	MCLK_TYPE_PSC,
+	MCLK_TYPE_MSCAN,
+	MCLK_TYPE_SPDIF,
+	MCLK_TYPE_OUTCLK,
+};
+
+struct mclk_setup_data {
+	enum mclk_type type;
+	bool has_mclk1;
+	const char *name_mux0;
+	const char *name_en0;
+	const char *name_div0;
+	const char *parent_names_mux1[2];
+	const char *name_mclk;
+};
+
+#define MCLK_SETUP_DATA_PSC(id) { \
+	MCLK_TYPE_PSC, 0, \
+	"psc" #id "-mux0", \
+	"psc" #id "-en0", \
+	"psc" #id "_mclk_div", \
+	{ "psc" #id "_mclk_div", "dummy", }, \
+	"psc" #id "_mclk", \
+}
+
+#define MCLK_SETUP_DATA_MSCAN(id) { \
+	MCLK_TYPE_MSCAN, 0, \
+	"mscan" #id "-mux0", \
+	"mscan" #id "-en0", \
+	"mscan" #id "_mclk_div", \
+	{ "mscan" #id "_mclk_div", "dummy", }, \
+	"mscan" #id "_mclk", \
+}
+
+#define MCLK_SETUP_DATA_SPDIF { \
+	MCLK_TYPE_SPDIF, 1, \
+	"spdif-mux0", \
+	"spdif-en0", \
+	"spdif_mclk_div", \
+	{ "spdif_mclk_div", "spdif-rx", }, \
+	"spdif_mclk", \
+}
+
+#define MCLK_SETUP_DATA_OUTCLK(id) { \
+	MCLK_TYPE_OUTCLK, 0, \
+	"out" #id "-mux0", \
+	"out" #id "-en0", \
+	"out" #id "_mclk_div", \
+	{ "out" #id "_mclk_div", "dummy", }, \
+	"out" #id "_clk", \
+}
+
+static struct mclk_setup_data mclk_psc_data[] = {
+	MCLK_SETUP_DATA_PSC(0),
+	MCLK_SETUP_DATA_PSC(1),
+	MCLK_SETUP_DATA_PSC(2),
+	MCLK_SETUP_DATA_PSC(3),
+	MCLK_SETUP_DATA_PSC(4),
+	MCLK_SETUP_DATA_PSC(5),
+	MCLK_SETUP_DATA_PSC(6),
+	MCLK_SETUP_DATA_PSC(7),
+	MCLK_SETUP_DATA_PSC(8),
+	MCLK_SETUP_DATA_PSC(9),
+	MCLK_SETUP_DATA_PSC(10),
+	MCLK_SETUP_DATA_PSC(11),
+};
+
+static struct mclk_setup_data mclk_mscan_data[] = {
+	MCLK_SETUP_DATA_MSCAN(0),
+	MCLK_SETUP_DATA_MSCAN(1),
+	MCLK_SETUP_DATA_MSCAN(2),
+	MCLK_SETUP_DATA_MSCAN(3),
+};
+
+static struct mclk_setup_data mclk_spdif_data[] = {
+	MCLK_SETUP_DATA_SPDIF,
+};
+
+static struct mclk_setup_data mclk_outclk_data[] = {
+	MCLK_SETUP_DATA_OUTCLK(0),
+	MCLK_SETUP_DATA_OUTCLK(1),
+	MCLK_SETUP_DATA_OUTCLK(2),
+	MCLK_SETUP_DATA_OUTCLK(3),
+};
+
+/* setup the MCLK clock subtree of an individual PSC/MSCAN/SPDIF */
+static void mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t idx)
+{
+	size_t clks_idx_pub, clks_idx_int;
+	u32 __iomem *mccr_reg;	/* MCLK control register (mux, en, div) */
+	int div;
+
+	/* derive a few parameters from the component type and index */
+	switch (entry->type) {
+	case MCLK_TYPE_PSC:
+		clks_idx_pub = MPC512x_CLK_PSC0_MCLK + idx;
+		clks_idx_int = MPC512x_CLK_MCLKS_FIRST
+			     + (idx) * MCLK_MAX_IDX;
+		mccr_reg = &clkregs->psc_ccr[idx];
+		break;
+	case MCLK_TYPE_MSCAN:
+		clks_idx_pub = MPC512x_CLK_MSCAN0_MCLK + idx;
+		clks_idx_int = MPC512x_CLK_MCLKS_FIRST
+			     + (NR_PSCS + idx) * MCLK_MAX_IDX;
+		mccr_reg = &clkregs->mscan_ccr[idx];
+		break;
+	case MCLK_TYPE_SPDIF:
+		clks_idx_pub = MPC512x_CLK_SPDIF_MCLK;
+		clks_idx_int = MPC512x_CLK_MCLKS_FIRST
+			     + (NR_PSCS + NR_MSCANS) * MCLK_MAX_IDX;
+		mccr_reg = &clkregs->spccr;
+		break;
+	case MCLK_TYPE_OUTCLK:
+		clks_idx_pub = MPC512x_CLK_OUT0_CLK + idx;
+		clks_idx_int = MPC512x_CLK_MCLKS_FIRST
+			     + (NR_PSCS + NR_MSCANS + NR_SPDIFS + idx)
+			     * MCLK_MAX_IDX;
+		mccr_reg = &clkregs->out_ccr[idx];
+		break;
+	default:
+		return;
+	}
+
+	/*
+	 * this was grabbed from the PPC_CLOCK implementation, which
+	 * enforced a specific MCLK divider while the clock was gated
+	 * during setup (that's a documented hardware requirement)
+	 *
+	 * the PPC_CLOCK implementation might even have violated the
+	 * "MCLK <= IPS" constraint, the fixed divider value of 1
+	 * results in a divider of 2 and thus MCLK = SYS/2 which equals
+	 * CSB which is greater than IPS; the serial port setup may have
+	 * adjusted the divider which the clock setup might have left in
+	 * an undesirable state
+	 *
+	 * initial setup is:
+	 * - MCLK 0 from SYS
+	 * - MCLK DIV such to not exceed the IPS clock
+	 * - MCLK 0 enabled
+	 * - MCLK 1 from MCLK DIV
+	 */
+	div = clk_get_rate(clks[MPC512x_CLK_SYS]);
+	div /= clk_get_rate(clks[MPC512x_CLK_IPS]);
+	out_be32(mccr_reg, (0 << 16));
+	out_be32(mccr_reg, (0 << 16) | ((div - 1) << 17));
+	out_be32(mccr_reg, (1 << 16) | ((div - 1) << 17));
+
+	/*
+	 * create the 'struct clk' items of the MCLK's clock subtree
+	 *
+	 * note that by design we always create all nodes and won't take
+	 * shortcuts here, because
+	 * - the "internal" MCLK_DIV and MCLK_OUT signal in turn are
+	 *   selectable inputs to the CFM while those who "actually use"
+	 *   the PSC/MSCAN/SPDIF (serial drivers et al) need the MCLK
+	 *   for their bitrate
+	 * - in the absence of "aliases" for clocks we need to create
+	 *   individial 'struct clk' items for whatever might get
+	 *   referenced or looked up, even if several of those items are
+	 *   identical from the logical POV (their rate value)
+	 * - for easier future maintenance and for better reflection of
+	 *   the SoC's documentation, it appears appropriate to generate
+	 *   clock items even for those muxers which actually are NOPs
+	 *   (those with two inputs of which one is reserved)
+	 */
+	clks[clks_idx_int + MCLK_IDX_MUX0] = mpc512x_clk_muxed(
+			entry->name_mux0,
+			soc_has_mclk_mux0_canin()
+				? &parent_names_mux0_canin[0]
+				: &parent_names_mux0_spdif[0],
+			ARRAY_SIZE(parent_names_mux0_spdif),
+			mccr_reg, 14, 2);
+	clks[clks_idx_int + MCLK_IDX_EN0] = mpc512x_clk_gated(
+			entry->name_en0, entry->name_mux0,
+			mccr_reg, 16);
+	clks[clks_idx_int + MCLK_IDX_DIV0] = mpc512x_clk_divider(
+			entry->name_div0,
+			entry->name_en0, CLK_SET_RATE_GATE,
+			mccr_reg, 17, 15, 0);
+	if (entry->has_mclk1) {
+		clks[clks_idx_pub] = mpc512x_clk_muxed(
+				entry->name_mclk,
+				&entry->parent_names_mux1[0],
+				ARRAY_SIZE(entry->parent_names_mux1),
+				mccr_reg, 7, 1);
+	} else {
+		clks[clks_idx_pub] = mpc512x_clk_factor(
+				entry->name_mclk,
+				entry->parent_names_mux1[0],
+				1, 1);
+	}
+}
+
+/* }}} MCLK helpers */
+
+static void mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq)
+{
+	int sys_mul, sys_div, ips_div;
+	int mul, div;
+	size_t mclk_idx;
+	int freq;
+
+	/*
+	 * developer's notes:
+	 * - consider whether to handle clocks which have both gates and
+	 *   dividers via intermediates or by means of composites
+	 * - fractional dividers appear to not map well to composites
+	 *   since they can be seen as a fixed multiplier and an
+	 *   adjustable divider, while composites can only combine at
+	 *   most one of a mux, div, and gate each into one 'struct clk'
+	 *   item
+	 * - PSC/MSCAN/SPDIF clock generation OTOH already is very
+	 *   specific and cannot get mapped to componsites (at least not
+	 *   a single one, maybe two of them, but then some of these
+	 *   intermediate clock signals get referenced elsewhere (e.g.
+	 *   in the clock frequency measurement, CFM) and thus need
+	 *   publicly available names
+	 * - the current source layout appropriately reflects the
+	 *   hardware setup, and it works, so it's questionable whether
+	 *   further changes will result in big enough a benefit
+	 */
+
+	/* regardless of whether XTAL/OSC exists, have REF created */
+	mpc512x_clk_setup_ref_clock(np, busfreq, &sys_mul, &sys_div, &ips_div);
+
+	/* now setup the REF -> SYS -> CSB -> IPS hierarchy */
+	clks[MPC512x_CLK_SYS] = mpc512x_clk_factor("sys", "ref",
+						   sys_mul, sys_div);
+	clks[MPC512x_CLK_CSB] = mpc512x_clk_factor("csb", "sys", 1, 2);
+	clks[MPC512x_CLK_IPS] = mpc512x_clk_divtable("ips", "csb",
+						     &clkregs->scfr1, 23, 3,
+						     divtab_2346);
+	/* now setup anything below SYS and CSB and IPS */
+
+	clks[MPC512x_CLK_DDR_UG] = mpc512x_clk_factor("ddr-ug", "sys", 1, 2);
+
+	/*
+	 * the Reference Manual discusses that for SDHC only even divide
+	 * ratios are supported because clock domain synchronization
+	 * between 'per' and 'ipg' is broken;
+	 * keep the divider's bit 0 cleared (per reset value), and only
+	 * allow to setup the divider's bits 7:1, which results in that
+	 * only even divide ratios can get configured upon rate changes;
+	 * keep the "x4" name because this bit shift hack is an internal
+	 * implementation detail, the "fractional divider with quarters"
+	 * semantics remains
+	 */
+	clks[MPC512x_CLK_SDHC_x4] = mpc512x_clk_factor("sdhc-x4", "csb", 2, 1);
+	clks[MPC512x_CLK_SDHC_UG] = mpc512x_clk_divider("sdhc-ug", "sdhc-x4", 0,
+							&clkregs->scfr2, 1, 7,
+							CLK_DIVIDER_ONE_BASED);
+	if (soc_has_sdhc2()) {
+		clks[MPC512x_CLK_SDHC2_UG] = mpc512x_clk_divider(
+				"sdhc2-ug", "sdhc-x4", 0, &clkregs->scfr2,
+				9, 7, CLK_DIVIDER_ONE_BASED);
+	}
+
+	clks[MPC512x_CLK_DIU_x4] = mpc512x_clk_factor("diu-x4", "csb", 4, 1);
+	clks[MPC512x_CLK_DIU_UG] = mpc512x_clk_divider("diu-ug", "diu-x4", 0,
+						       &clkregs->scfr1, 0, 8,
+						       CLK_DIVIDER_ONE_BASED);
+
+	/*
+	 * the "power architecture PLL" was setup from data which was
+	 * sampled from the reset config word, at this point in time the
+	 * configuration can be considered fixed and read only (i.e. no
+	 * longer adjustable, or no longer in need of adjustment), which
+	 * is why we don't register a PLL here but assume fixed factors
+	 */
+	mul = get_cpmf_mult_x2();
+	div = 2;	/* compensate for the fractional factor */
+	clks[MPC512x_CLK_E300] = mpc512x_clk_factor("e300", "csb", mul, div);
+
+	if (soc_has_mbx()) {
+		clks[MPC512x_CLK_MBX_BUS_UG] = mpc512x_clk_factor(
+				"mbx-bus-ug", "csb", 1, 2);
+		clks[MPC512x_CLK_MBX_UG] = mpc512x_clk_divtable(
+				"mbx-ug", "mbx-bus-ug", &clkregs->scfr1,
+				14, 3, divtab_1234);
+		clks[MPC512x_CLK_MBX_3D_UG] = mpc512x_clk_factor(
+				"mbx-3d-ug", "mbx-ug", 1, 1);
+	}
+	if (soc_has_pci()) {
+		clks[MPC512x_CLK_PCI_UG] = mpc512x_clk_divtable(
+				"pci-ug", "csb", &clkregs->scfr1,
+				20, 3, divtab_2346);
+	}
+	if (soc_has_nfc_5125()) {
+		/*
+		 * XXX TODO implement 5125 NFC clock setup logic,
+		 * with high/low period counters in clkregs->scfr3,
+		 * currently there are no users so it's ENOIMPL
+		 */
+		clks[MPC512x_CLK_NFC_UG] = ERR_PTR(-ENOTSUPP);
+	} else {
+		clks[MPC512x_CLK_NFC_UG] = mpc512x_clk_divtable(
+				"nfc-ug", "ips", &clkregs->scfr1,
+				8, 3, divtab_1234);
+	}
+	clks[MPC512x_CLK_LPC_UG] = mpc512x_clk_divtable("lpc-ug", "ips",
+							&clkregs->scfr1, 11, 3,
+							divtab_1234);
+
+	clks[MPC512x_CLK_LPC] = mpc512x_clk_gated("lpc", "lpc-ug",
+						  &clkregs->sccr1, 30);
+	clks[MPC512x_CLK_NFC] = mpc512x_clk_gated("nfc", "nfc-ug",
+						  &clkregs->sccr1, 29);
+	if (soc_has_pata()) {
+		clks[MPC512x_CLK_PATA] = mpc512x_clk_gated(
+				"pata", "ips", &clkregs->sccr1, 28);
+	}
+	/* for PSCs there is a "registers" gate and a bitrate MCLK subtree */
+	for (mclk_idx = 0; mclk_idx < soc_max_pscnum(); mclk_idx++) {
+		char name[12];
+		snprintf(name, sizeof(name), "psc%d", mclk_idx);
+		clks[MPC512x_CLK_PSC0 + mclk_idx] = mpc512x_clk_gated(
+				name, "ips", &clkregs->sccr1, 27 - mclk_idx);
+		mpc512x_clk_setup_mclk(&mclk_psc_data[mclk_idx], mclk_idx);
+	}
+	clks[MPC512x_CLK_PSC_FIFO] = mpc512x_clk_gated("psc-fifo", "ips",
+						       &clkregs->sccr1, 15);
+	if (soc_has_sata()) {
+		clks[MPC512x_CLK_SATA] = mpc512x_clk_gated(
+				"sata", "ips", &clkregs->sccr1, 14);
+	}
+	clks[MPC512x_CLK_FEC] = mpc512x_clk_gated("fec", "ips",
+						  &clkregs->sccr1, 13);
+	if (soc_has_pci()) {
+		clks[MPC512x_CLK_PCI] = mpc512x_clk_gated(
+				"pci", "pci-ug", &clkregs->sccr1, 11);
+	}
+	clks[MPC512x_CLK_DDR] = mpc512x_clk_gated("ddr", "ddr-ug",
+						  &clkregs->sccr1, 10);
+	if (soc_has_fec2()) {
+		clks[MPC512x_CLK_FEC2] = mpc512x_clk_gated(
+				"fec2", "ips", &clkregs->sccr1, 9);
+	}
+
+	clks[MPC512x_CLK_DIU] = mpc512x_clk_gated("diu", "diu-ug",
+						  &clkregs->sccr2, 31);
+	if (soc_has_axe()) {
+		clks[MPC512x_CLK_AXE] = mpc512x_clk_gated(
+				"axe", "csb", &clkregs->sccr2, 30);
+	}
+	clks[MPC512x_CLK_MEM] = mpc512x_clk_gated("mem", "ips",
+						  &clkregs->sccr2, 29);
+	clks[MPC512x_CLK_USB1] = mpc512x_clk_gated("usb1", "csb",
+						   &clkregs->sccr2, 28);
+	clks[MPC512x_CLK_USB2] = mpc512x_clk_gated("usb2", "csb",
+						   &clkregs->sccr2, 27);
+	clks[MPC512x_CLK_I2C] = mpc512x_clk_gated("i2c", "ips",
+						  &clkregs->sccr2, 26);
+	/* MSCAN differs from PSC with just one gate for multiple components */
+	clks[MPC512x_CLK_BDLC] = mpc512x_clk_gated("bdlc", "ips",
+						   &clkregs->sccr2, 25);
+	for (mclk_idx = 0; mclk_idx < ARRAY_SIZE(mclk_mscan_data); mclk_idx++)
+		mpc512x_clk_setup_mclk(&mclk_mscan_data[mclk_idx], mclk_idx);
+	clks[MPC512x_CLK_SDHC] = mpc512x_clk_gated("sdhc", "sdhc-ug",
+						   &clkregs->sccr2, 24);
+	/* there is only one SPDIF component, which shares MCLK support code */
+	if (soc_has_spdif()) {
+		clks[MPC512x_CLK_SPDIF] = mpc512x_clk_gated(
+				"spdif", "ips", &clkregs->sccr2, 23);
+		mpc512x_clk_setup_mclk(&mclk_spdif_data[0], 0);
+	}
+	if (soc_has_mbx()) {
+		clks[MPC512x_CLK_MBX_BUS] = mpc512x_clk_gated(
+				"mbx-bus", "mbx-bus-ug", &clkregs->sccr2, 22);
+		clks[MPC512x_CLK_MBX] = mpc512x_clk_gated(
+				"mbx", "mbx-ug", &clkregs->sccr2, 21);
+		clks[MPC512x_CLK_MBX_3D] = mpc512x_clk_gated(
+				"mbx-3d", "mbx-3d-ug", &clkregs->sccr2, 20);
+	}
+	clks[MPC512x_CLK_IIM] = mpc512x_clk_gated("iim", "csb",
+						  &clkregs->sccr2, 19);
+	if (soc_has_viu()) {
+		clks[MPC512x_CLK_VIU] = mpc512x_clk_gated(
+				"viu", "csb", &clkregs->sccr2, 18);
+	}
+	if (soc_has_sdhc2()) {
+		clks[MPC512x_CLK_SDHC2] = mpc512x_clk_gated(
+				"sdhc-2", "sdhc2-ug", &clkregs->sccr2, 17);
+	}
+
+	if (soc_has_outclk()) {
+		size_t idx;	/* used as mclk_idx, just to trim line length */
+		for (idx = 0; idx < ARRAY_SIZE(mclk_outclk_data); idx++)
+			mpc512x_clk_setup_mclk(&mclk_outclk_data[idx], idx);
+	}
+
+	/*
+	 * externally provided clocks (when implemented in hardware,
+	 * device tree may specify values which otherwise were unknown)
+	 */
+	freq = get_freq_from_dt("psc_mclk_in");
+	if (!freq)
+		freq = 25000000;
+	clks[MPC512x_CLK_PSC_MCLK_IN] = mpc512x_clk_fixed("psc_mclk_in", freq);
+	if (soc_has_mclk_mux0_canin()) {
+		freq = get_freq_from_dt("can_clk_in");
+		clks[MPC512x_CLK_CAN_CLK_IN] = mpc512x_clk_fixed(
+				"can_clk_in", freq);
+	} else {
+		freq = get_freq_from_dt("spdif_tx_in");
+		clks[MPC512x_CLK_SPDIF_TX_IN] = mpc512x_clk_fixed(
+				"spdif_tx_in", freq);
+		freq = get_freq_from_dt("spdif_rx_in");
+		clks[MPC512x_CLK_SPDIF_TX_IN] = mpc512x_clk_fixed(
+				"spdif_rx_in", freq);
+	}
+
+	/* fixed frequency for AC97, always 24.567MHz */
+	clks[MPC512x_CLK_AC97] = mpc512x_clk_fixed("ac97", 24567000);
+
+	/*
+	 * pre-enable those "internal" clock items which never get
+	 * claimed by any peripheral driver, to not have the clock
+	 * subsystem disable them late at startup
+	 */
+	clk_prepare_enable(clks[MPC512x_CLK_DUMMY]);
+	clk_prepare_enable(clks[MPC512x_CLK_E300]);	/* PowerPC CPU */
+	clk_prepare_enable(clks[MPC512x_CLK_DDR]);	/* DRAM */
+	clk_prepare_enable(clks[MPC512x_CLK_MEM]);	/* SRAM */
+	clk_prepare_enable(clks[MPC512x_CLK_IPS]);	/* SoC periph */
+	clk_prepare_enable(clks[MPC512x_CLK_LPC]);	/* boot media */
+}
+
+/*
+ * registers the set of public clocks (those listed in the dt-bindings/
+ * header file) for OF lookups, keeps the intermediates private to us
+ */
+static void mpc5121_clk_register_of_provider(struct device_node *np)
+{
+	clk_data.clks = clks;
+	clk_data.clk_num = MPC512x_CLK_LAST_PUBLIC + 1;	/* _not_ ARRAY_SIZE() */
+	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+
+/*
+ * temporary support for the period of time between introduction of CCF
+ * support and the adjustment of peripheral drivers to OF based lookups
+ */
+static void mpc5121_clk_provide_migration_support(void)
+{
+
+	/*
+	 * pre-enable those clock items which are not yet appropriately
+	 * acquired by their peripheral driver
+	 *
+	 * the PCI clock cannot get acquired by its peripheral driver,
+	 * because for this platform the driver won't probe(), instead
+	 * initialization is done from within the .setup_arch() routine
+	 * at a point in time where the clock provider has not been
+	 * setup yet and thus isn't available yet
+	 *
+	 * so we "pre-enable" the clock here, to not have the clock
+	 * subsystem automatically disable this item in a late init call
+	 *
+	 * this PCI clock pre-enable workaround only applies when there
+	 * are device tree nodes for PCI and thus the peripheral driver
+	 * has attached to bridges, otherwise the PCI clock remains
+	 * unused and so it gets disabled
+	 */
+	clk_prepare_enable(clks[MPC512x_CLK_PSC3_MCLK]);/* serial console */
+	if (of_find_compatible_node(NULL, "pci", "fsl,mpc5121-pci"))
+		clk_prepare_enable(clks[MPC512x_CLK_PCI]);
+}
+
+/*
+ * those macros are not exactly pretty, but they encapsulate a lot
+ * of copy'n'paste heavy code which is even more ugly, and reduce
+ * the potential for inconsistencies in those many code copies
+ */
+#define FOR_NODES(compatname) \
+	for_each_compatible_node(np, NULL, compatname)
+
+#define NODE_PREP do { \
+	of_address_to_resource(np, 0, &res); \
+	snprintf(devname, sizeof(devname), "%08x.%s", res.start, np->name); \
+} while (0)
+
+#define NODE_CHK(clkname, clkitem, regnode, regflag) do { \
+	struct clk *clk; \
+	clk = of_clk_get_by_name(np, clkname); \
+	if (IS_ERR(clk)) { \
+		clk = clkitem; \
+		clk_register_clkdev(clk, clkname, devname); \
+		if (regnode) \
+			clk_register_clkdev(clk, clkname, np->name); \
+		did_register |= DID_REG_ ## regflag; \
+		pr_debug("clock alias name '%s' for dev '%s' pointer %p\n", \
+			 clkname, devname, clk); \
+	} else { \
+		clk_put(clk); \
+	} \
+} while (0)
+
+/*
+ * register source code provided fallback results for clock lookups,
+ * these get consulted when OF based clock lookup fails (that is in the
+ * case of not yet adjusted device tree data, where clock related specs
+ * are missing)
+ */
+static void mpc5121_clk_provide_backwards_compat(void)
+{
+	enum did_reg_flags {
+		DID_REG_PSC	= BIT(0),
+		DID_REG_PSCFIFO	= BIT(1),
+		DID_REG_NFC	= BIT(2),
+		DID_REG_CAN	= BIT(3),
+		DID_REG_I2C	= BIT(4),
+		DID_REG_DIU	= BIT(5),
+		DID_REG_VIU	= BIT(6),
+		DID_REG_FEC	= BIT(7),
+		DID_REG_USB	= BIT(8),
+		DID_REG_PATA	= BIT(9),
+	};
+
+	int did_register;
+	struct device_node *np;
+	struct resource res;
+	int idx;
+	char devname[32];
+
+	did_register = 0;
+
+	FOR_NODES(mpc512x_select_psc_compat()) {
+		NODE_PREP;
+		idx = (res.start >> 8) & 0xf;
+		NODE_CHK("ipg", clks[MPC512x_CLK_PSC0 + idx], 0, PSC);
+		NODE_CHK("mclk", clks[MPC512x_CLK_PSC0_MCLK + idx], 0, PSC);
+	}
+
+	FOR_NODES("fsl,mpc5121-psc-fifo") {
+		NODE_PREP;
+		NODE_CHK("ipg", clks[MPC512x_CLK_PSC_FIFO], 1, PSCFIFO);
+	}
+
+	FOR_NODES("fsl,mpc5121-nfc") {
+		NODE_PREP;
+		NODE_CHK("ipg", clks[MPC512x_CLK_NFC], 0, NFC);
+	}
+
+	FOR_NODES("fsl,mpc5121-mscan") {
+		NODE_PREP;
+		idx = 0;
+		idx += (res.start & 0x2000) ? 2 : 0;
+		idx += (res.start & 0x0080) ? 1 : 0;
+		NODE_CHK("ipg", clks[MPC512x_CLK_BDLC], 0, CAN);
+		NODE_CHK("mclk", clks[MPC512x_CLK_MSCAN0_MCLK + idx], 0, CAN);
+	}
+
+	/*
+	 * do register the 'ips', 'sys', and 'ref' names globally
+	 * instead of inside each individual CAN node, as there is no
+	 * potential for a name conflict (in contrast to 'ipg' and 'mclk')
+	 */
+	if (did_register & DID_REG_CAN) {
+		clk_register_clkdev(clks[MPC512x_CLK_IPS], "ips", NULL);
+		clk_register_clkdev(clks[MPC512x_CLK_SYS], "sys", NULL);
+		clk_register_clkdev(clks[MPC512x_CLK_REF], "ref", NULL);
+	}
+
+	FOR_NODES("fsl,mpc5121-i2c") {
+		NODE_PREP;
+		NODE_CHK("ipg", clks[MPC512x_CLK_I2C], 0, I2C);
+	}
+
+	/*
+	 * workaround for the fact that the I2C driver does an "anonymous"
+	 * lookup (NULL name spec, which yields the first clock spec) for
+	 * which we cannot register an alias -- a _global_ 'ipg' alias that
+	 * is not bound to any device name and returns the I2C clock item
+	 * is not a good idea
+	 *
+	 * so we have the lookup in the peripheral driver fail, which is
+	 * silent and non-fatal, and pre-enable the clock item here such
+	 * that register access is possible
+	 *
+	 * see commit b3bfce2b "i2c: mpc: cleanup clock API use" for
+	 * details, adjusting s/NULL/"ipg"/ in i2c-mpc.c would make this
+	 * workaround obsolete
+	 */
+	if (did_register & DID_REG_I2C)
+		clk_prepare_enable(clks[MPC512x_CLK_I2C]);
+
+	FOR_NODES("fsl,mpc5121-diu") {
+		NODE_PREP;
+		NODE_CHK("ipg", clks[MPC512x_CLK_DIU], 1, DIU);
+	}
+
+	FOR_NODES("fsl,mpc5121-viu") {
+		NODE_PREP;
+		NODE_CHK("ipg", clks[MPC512x_CLK_VIU], 0, VIU);
+	}
+
+	/*
+	 * note that 2771399a "fs_enet: cleanup clock API use" did use the
+	 * "per" string for the clock lookup in contrast to the "ipg" name
+	 * which most other nodes are using -- this is not a fatal thing
+	 * but just something to keep in mind when doing compatibility
+	 * registration, it's a non-issue with up-to-date device tree data
+	 */
+	FOR_NODES("fsl,mpc5121-fec") {
+		NODE_PREP;
+		NODE_CHK("per", clks[MPC512x_CLK_FEC], 0, FEC);
+	}
+	FOR_NODES("fsl,mpc5121-fec-mdio") {
+		NODE_PREP;
+		NODE_CHK("per", clks[MPC512x_CLK_FEC], 0, FEC);
+	}
+	/*
+	 * MPC5125 has two FECs: FEC1 at 0x2800, FEC2 at 0x4800;
+	 * the clock items don't "form an array" since FEC2 was
+	 * added only later and was not allowed to shift all other
+	 * clock item indices, so the numbers aren't adjacent
+	 */
+	FOR_NODES("fsl,mpc5125-fec") {
+		NODE_PREP;
+		if (res.start & 0x4000)
+			idx = MPC512x_CLK_FEC2;
+		else
+			idx = MPC512x_CLK_FEC;
+		NODE_CHK("per", clks[idx], 0, FEC);
+	}
+
+	FOR_NODES("fsl,mpc5121-usb2-dr") {
+		NODE_PREP;
+		idx = (res.start & 0x4000) ? 1 : 0;
+		NODE_CHK("ipg", clks[MPC512x_CLK_USB1 + idx], 0, USB);
+	}
+
+	FOR_NODES("fsl,mpc5121-pata") {
+		NODE_PREP;
+		NODE_CHK("ipg", clks[MPC512x_CLK_PATA], 0, PATA);
+	}
+
+	/*
+	 * try to collapse diagnostics into a single line of output yet
+	 * provide a full list of what is missing, to avoid noise in the
+	 * absence of up-to-date device tree data -- backwards
+	 * compatibility to old DTBs is a requirement, updates may be
+	 * desirable or preferrable but are not at all mandatory
+	 */
+	if (did_register) {
+		pr_notice("device tree lacks clock specs, adding fallbacks (0x%x,%s%s%s%s%s%s%s%s%s%s)\n",
+			  did_register,
+			  (did_register & DID_REG_PSC) ? " PSC" : "",
+			  (did_register & DID_REG_PSCFIFO) ? " PSCFIFO" : "",
+			  (did_register & DID_REG_NFC) ? " NFC" : "",
+			  (did_register & DID_REG_CAN) ? " CAN" : "",
+			  (did_register & DID_REG_I2C) ? " I2C" : "",
+			  (did_register & DID_REG_DIU) ? " DIU" : "",
+			  (did_register & DID_REG_VIU) ? " VIU" : "",
+			  (did_register & DID_REG_FEC) ? " FEC" : "",
+			  (did_register & DID_REG_USB) ? " USB" : "",
+			  (did_register & DID_REG_PATA) ? " PATA" : "");
+	} else {
+		pr_debug("device tree has clock specs, no fallbacks added\n");
+	}
+}
+
+int __init mpc5121_clk_init(void)
+{
+	struct device_node *clk_np;
+	int busfreq;
+
+	/* map the clock control registers */
+	clk_np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-clock");
+	if (!clk_np)
+		return -ENODEV;
+	clkregs = of_iomap(clk_np, 0);
+	WARN_ON(!clkregs);
+
+	/* determine the SoC variant we run on */
+	mpc512x_clk_determine_soc();
+
+	/* invalidate all not yet registered clock slots */
+	mpc512x_clk_preset_data();
+
+	/*
+	 * have the device tree scanned for "fixed-clock" nodes (which
+	 * includes the oscillator node if the board's DT provides one)
+	 */
+	of_clk_init(NULL);
+
+	/*
+	 * add a dummy clock for those situations where a clock spec is
+	 * required yet no real clock is involved
+	 */
+	clks[MPC512x_CLK_DUMMY] = mpc512x_clk_fixed("dummy", 0);
+
+	/*
+	 * have all the real nodes in the clock tree populated from REF
+	 * down to all leaves, either starting from the OSC node or from
+	 * a REF root that was created from the IPS bus clock input
+	 */
+	busfreq = get_freq_from_dt("bus-frequency");
+	mpc512x_clk_setup_clock_tree(clk_np, busfreq);
+
+	/* register as an OF clock provider */
+	mpc5121_clk_register_of_provider(clk_np);
+
+	/*
+	 * unbreak not yet adjusted peripheral drivers during migration
+	 * towards fully operational common clock support, and allow
+	 * operation in the absence of clock related device tree specs
+	 */
+	mpc5121_clk_provide_migration_support();
+	mpc5121_clk_provide_backwards_compat();
+
+	return 0;
+}
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
deleted file mode 100644
index fd8a376..0000000
--- a/arch/powerpc/platforms/512x/clock.c
+++ /dev/null
@@ -1,754 +0,0 @@
-/*
- * Copyright (C) 2007,2008 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: John Rigby <jrigby@freescale.com>
- *
- * Implements the clk api defined in include/linux/clk.h
- *
- *    Original based on linux/arch/arm/mach-integrator/clock.c
- *
- *    Copyright (C) 2004 ARM Limited.
- *    Written by Deep Blue Solutions Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/clk.h>
-#include <linux/mutex.h>
-#include <linux/io.h>
-
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <asm/mpc5xxx.h>
-#include <asm/mpc5121.h>
-#include <asm/clk_interface.h>
-
-#include "mpc512x.h"
-
-#undef CLK_DEBUG
-
-static int clocks_initialized;
-
-#define CLK_HAS_RATE	0x1	/* has rate in MHz */
-#define CLK_HAS_CTRL	0x2	/* has control reg and bit */
-
-struct clk {
-	struct list_head node;
-	char name[32];
-	int flags;
-	struct device *dev;
-	unsigned long rate;
-	struct module *owner;
-	void (*calc) (struct clk *);
-	struct clk *parent;
-	int reg, bit;		/* CLK_HAS_CTRL */
-	int div_shift;		/* only used by generic_div_clk_calc */
-};
-
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-
-static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
-{
-	struct clk *p, *clk = ERR_PTR(-ENOENT);
-	int dev_match;
-	int id_match;
-
-	if (dev == NULL || id == NULL)
-		return clk;
-
-	mutex_lock(&clocks_mutex);
-	list_for_each_entry(p, &clocks, node) {
-		dev_match = id_match = 0;
-
-		if (dev == p->dev)
-			dev_match++;
-		if (strcmp(id, p->name) == 0)
-			id_match++;
-		if ((dev_match || id_match) && try_module_get(p->owner)) {
-			clk = p;
-			break;
-		}
-	}
-	mutex_unlock(&clocks_mutex);
-
-	return clk;
-}
-
-#ifdef CLK_DEBUG
-static void dump_clocks(void)
-{
-	struct clk *p;
-
-	mutex_lock(&clocks_mutex);
-	printk(KERN_INFO "CLOCKS:\n");
-	list_for_each_entry(p, &clocks, node) {
-		pr_info("  %s=%ld", p->name, p->rate);
-		if (p->parent)
-			pr_cont(" %s=%ld", p->parent->name,
-			       p->parent->rate);
-		if (p->flags & CLK_HAS_CTRL)
-			pr_cont(" reg/bit=%d/%d", p->reg, p->bit);
-		pr_cont("\n");
-	}
-	mutex_unlock(&clocks_mutex);
-}
-#define	DEBUG_CLK_DUMP() dump_clocks()
-#else
-#define	DEBUG_CLK_DUMP()
-#endif
-
-
-static void mpc5121_clk_put(struct clk *clk)
-{
-	module_put(clk->owner);
-}
-
-#define NRPSC 12
-
-struct mpc512x_clockctl {
-	u32 spmr;		/* System PLL Mode Reg */
-	u32 sccr[2];		/* System Clk Ctrl Reg 1 & 2 */
-	u32 scfr1;		/* System Clk Freq Reg 1 */
-	u32 scfr2;		/* System Clk Freq Reg 2 */
-	u32 reserved;
-	u32 bcr;		/* Bread Crumb Reg */
-	u32 pccr[NRPSC];	/* PSC Clk Ctrl Reg 0-11 */
-	u32 spccr;		/* SPDIF Clk Ctrl Reg */
-	u32 cccr;		/* CFM Clk Ctrl Reg */
-	u32 dccr;		/* DIU Clk Cnfg Reg */
-};
-
-static struct mpc512x_clockctl __iomem *clockctl;
-
-static int mpc5121_clk_enable(struct clk *clk)
-{
-	unsigned int mask;
-
-	if (clk->flags & CLK_HAS_CTRL) {
-		mask = in_be32(&clockctl->sccr[clk->reg]);
-		mask |= 1 << clk->bit;
-		out_be32(&clockctl->sccr[clk->reg], mask);
-	}
-	return 0;
-}
-
-static void mpc5121_clk_disable(struct clk *clk)
-{
-	unsigned int mask;
-
-	if (clk->flags & CLK_HAS_CTRL) {
-		mask = in_be32(&clockctl->sccr[clk->reg]);
-		mask &= ~(1 << clk->bit);
-		out_be32(&clockctl->sccr[clk->reg], mask);
-	}
-}
-
-static unsigned long mpc5121_clk_get_rate(struct clk *clk)
-{
-	if (clk->flags & CLK_HAS_RATE)
-		return clk->rate;
-	else
-		return 0;
-}
-
-static long mpc5121_clk_round_rate(struct clk *clk, unsigned long rate)
-{
-	return rate;
-}
-
-static int mpc5121_clk_set_rate(struct clk *clk, unsigned long rate)
-{
-	return 0;
-}
-
-static int clk_register(struct clk *clk)
-{
-	mutex_lock(&clocks_mutex);
-	list_add(&clk->node, &clocks);
-	mutex_unlock(&clocks_mutex);
-	return 0;
-}
-
-static unsigned long spmf_mult(void)
-{
-	/*
-	 * Convert spmf to multiplier
-	 */
-	static int spmf_to_mult[] = {
-		68, 1, 12, 16,
-		20, 24, 28, 32,
-		36, 40, 44, 48,
-		52, 56, 60, 64
-	};
-	int spmf = (in_be32(&clockctl->spmr) >> 24) & 0xf;
-	return spmf_to_mult[spmf];
-}
-
-static unsigned long sysdiv_div_x_2(void)
-{
-	/*
-	 * Convert sysdiv to divisor x 2
-	 * Some divisors have fractional parts so
-	 * multiply by 2 then divide by this value
-	 */
-	static int sysdiv_to_div_x_2[] = {
-		4, 5, 6, 7,
-		8, 9, 10, 14,
-		12, 16, 18, 22,
-		20, 24, 26, 30,
-		28, 32, 34, 38,
-		36, 40, 42, 46,
-		44, 48, 50, 54,
-		52, 56, 58, 62,
-		60, 64, 66,
-	};
-	int sysdiv = (in_be32(&clockctl->scfr2) >> 26) & 0x3f;
-	return sysdiv_to_div_x_2[sysdiv];
-}
-
-static unsigned long ref_to_sys(unsigned long rate)
-{
-	rate *= spmf_mult();
-	rate *= 2;
-	rate /= sysdiv_div_x_2();
-
-	return rate;
-}
-
-static unsigned long sys_to_ref(unsigned long rate)
-{
-	rate *= sysdiv_div_x_2();
-	rate /= 2;
-	rate /= spmf_mult();
-
-	return rate;
-}
-
-static long ips_to_ref(unsigned long rate)
-{
-	int ips_div = (in_be32(&clockctl->scfr1) >> 23) & 0x7;
-
-	rate *= ips_div;	/* csb_clk = ips_clk * ips_div */
-	rate *= 2;		/* sys_clk = csb_clk * 2 */
-	return sys_to_ref(rate);
-}
-
-static unsigned long devtree_getfreq(char *clockname)
-{
-	struct device_node *np;
-	const unsigned int *prop;
-	unsigned int val = 0;
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-immr");
-	if (np) {
-		prop = of_get_property(np, clockname, NULL);
-		if (prop)
-			val = *prop;
-	    of_node_put(np);
-	}
-	return val;
-}
-
-static void ref_clk_calc(struct clk *clk)
-{
-	unsigned long rate;
-
-	rate = devtree_getfreq("bus-frequency");
-	if (rate == 0) {
-		printk(KERN_ERR "No bus-frequency in dev tree\n");
-		clk->rate = 0;
-		return;
-	}
-	clk->rate = ips_to_ref(rate);
-}
-
-static struct clk ref_clk = {
-	.name = "ref_clk",
-	.calc = ref_clk_calc,
-};
-
-
-static void sys_clk_calc(struct clk *clk)
-{
-	clk->rate = ref_to_sys(ref_clk.rate);
-}
-
-static struct clk sys_clk = {
-	.name = "sys_clk",
-	.calc = sys_clk_calc,
-};
-
-static void diu_clk_calc(struct clk *clk)
-{
-	int diudiv_x_2 = in_be32(&clockctl->scfr1) & 0xff;
-	unsigned long rate;
-
-	rate = sys_clk.rate;
-
-	rate *= 2;
-	rate /= diudiv_x_2;
-
-	clk->rate = rate;
-}
-
-static void viu_clk_calc(struct clk *clk)
-{
-	unsigned long rate;
-
-	rate = sys_clk.rate;
-	rate /= 2;
-	clk->rate = rate;
-}
-
-static void half_clk_calc(struct clk *clk)
-{
-	clk->rate = clk->parent->rate / 2;
-}
-
-static void generic_div_clk_calc(struct clk *clk)
-{
-	int div = (in_be32(&clockctl->scfr1) >> clk->div_shift) & 0x7;
-
-	clk->rate = clk->parent->rate / div;
-}
-
-static void unity_clk_calc(struct clk *clk)
-{
-	clk->rate = clk->parent->rate;
-}
-
-static struct clk csb_clk = {
-	.name = "csb_clk",
-	.calc = half_clk_calc,
-	.parent = &sys_clk,
-};
-
-static void e300_clk_calc(struct clk *clk)
-{
-	int spmf = (in_be32(&clockctl->spmr) >> 16) & 0xf;
-	int ratex2 = clk->parent->rate * spmf;
-
-	clk->rate = ratex2 / 2;
-}
-
-static struct clk e300_clk = {
-	.name = "e300_clk",
-	.calc = e300_clk_calc,
-	.parent = &csb_clk,
-};
-
-static struct clk ips_clk = {
-	.name = "ips_clk",
-	.calc = generic_div_clk_calc,
-	.parent = &csb_clk,
-	.div_shift = 23,
-};
-
-/*
- * Clocks controlled by SCCR1 (.reg = 0)
- */
-static struct clk lpc_clk = {
-	.name = "lpc_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 0,
-	.bit = 30,
-	.calc = generic_div_clk_calc,
-	.parent = &ips_clk,
-	.div_shift = 11,
-};
-
-static struct clk nfc_clk = {
-	.name = "nfc_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 0,
-	.bit = 29,
-	.calc = generic_div_clk_calc,
-	.parent = &ips_clk,
-	.div_shift = 8,
-};
-
-static struct clk pata_clk = {
-	.name = "pata_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 0,
-	.bit = 28,
-	.calc = unity_clk_calc,
-	.parent = &ips_clk,
-};
-
-/*
- * PSC clocks (bits 27 - 16)
- * are setup elsewhere
- */
-
-static struct clk sata_clk = {
-	.name = "sata_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 0,
-	.bit = 14,
-	.calc = unity_clk_calc,
-	.parent = &ips_clk,
-};
-
-static struct clk fec_clk = {
-	.name = "fec_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 0,
-	.bit = 13,
-	.calc = unity_clk_calc,
-	.parent = &ips_clk,
-};
-
-static struct clk pci_clk = {
-	.name = "pci_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 0,
-	.bit = 11,
-	.calc = generic_div_clk_calc,
-	.parent = &csb_clk,
-	.div_shift = 20,
-};
-
-/*
- * Clocks controlled by SCCR2 (.reg = 1)
- */
-static struct clk diu_clk = {
-	.name = "diu_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 31,
-	.calc = diu_clk_calc,
-};
-
-static struct clk viu_clk = {
-	.name = "viu_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 18,
-	.calc = viu_clk_calc,
-};
-
-static struct clk axe_clk = {
-	.name = "axe_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 30,
-	.calc = unity_clk_calc,
-	.parent = &csb_clk,
-};
-
-static struct clk usb1_clk = {
-	.name = "usb1_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 28,
-	.calc = unity_clk_calc,
-	.parent = &csb_clk,
-};
-
-static struct clk usb2_clk = {
-	.name = "usb2_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 27,
-	.calc = unity_clk_calc,
-	.parent = &csb_clk,
-};
-
-static struct clk i2c_clk = {
-	.name = "i2c_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 26,
-	.calc = unity_clk_calc,
-	.parent = &ips_clk,
-};
-
-static struct clk mscan_clk = {
-	.name = "mscan_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 25,
-	.calc = unity_clk_calc,
-	.parent = &ips_clk,
-};
-
-static struct clk sdhc_clk = {
-	.name = "sdhc_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 24,
-	.calc = unity_clk_calc,
-	.parent = &ips_clk,
-};
-
-static struct clk mbx_bus_clk = {
-	.name = "mbx_bus_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 22,
-	.calc = half_clk_calc,
-	.parent = &csb_clk,
-};
-
-static struct clk mbx_clk = {
-	.name = "mbx_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 21,
-	.calc = unity_clk_calc,
-	.parent = &csb_clk,
-};
-
-static struct clk mbx_3d_clk = {
-	.name = "mbx_3d_clk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 20,
-	.calc = generic_div_clk_calc,
-	.parent = &mbx_bus_clk,
-	.div_shift = 14,
-};
-
-static void psc_mclk_in_calc(struct clk *clk)
-{
-	clk->rate = devtree_getfreq("psc_mclk_in");
-	if (!clk->rate)
-		clk->rate = 25000000;
-}
-
-static struct clk psc_mclk_in = {
-	.name = "psc_mclk_in",
-	.calc = psc_mclk_in_calc,
-};
-
-static struct clk spdif_txclk = {
-	.name = "spdif_txclk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 23,
-};
-
-static struct clk spdif_rxclk = {
-	.name = "spdif_rxclk",
-	.flags = CLK_HAS_CTRL,
-	.reg = 1,
-	.bit = 23,
-};
-
-static void ac97_clk_calc(struct clk *clk)
-{
-	/* ac97 bit clock is always 24.567 MHz */
-	clk->rate = 24567000;
-}
-
-static struct clk ac97_clk = {
-	.name = "ac97_clk_in",
-	.calc = ac97_clk_calc,
-};
-
-static struct clk *rate_clks[] = {
-	&ref_clk,
-	&sys_clk,
-	&diu_clk,
-	&viu_clk,
-	&csb_clk,
-	&e300_clk,
-	&ips_clk,
-	&fec_clk,
-	&sata_clk,
-	&pata_clk,
-	&nfc_clk,
-	&lpc_clk,
-	&mbx_bus_clk,
-	&mbx_clk,
-	&mbx_3d_clk,
-	&axe_clk,
-	&usb1_clk,
-	&usb2_clk,
-	&i2c_clk,
-	&mscan_clk,
-	&sdhc_clk,
-	&pci_clk,
-	&psc_mclk_in,
-	&spdif_txclk,
-	&spdif_rxclk,
-	&ac97_clk,
-	NULL
-};
-
-static void rate_clk_init(struct clk *clk)
-{
-	if (clk->calc) {
-		clk->calc(clk);
-		clk->flags |= CLK_HAS_RATE;
-		clk_register(clk);
-	} else {
-		printk(KERN_WARNING
-		       "Could not initialize clk %s without a calc routine\n",
-		       clk->name);
-	}
-}
-
-static void rate_clks_init(void)
-{
-	struct clk **cpp, *clk;
-
-	cpp = rate_clks;
-	while ((clk = *cpp++))
-		rate_clk_init(clk);
-}
-
-/*
- * There are two clk enable registers with 32 enable bits each
- * psc clocks and device clocks are all stored in dev_clks
- */
-static struct clk dev_clks[2][32];
-
-/*
- * Given a psc number return the dev_clk
- * associated with it
- */
-static struct clk *psc_dev_clk(int pscnum)
-{
-	int reg, bit;
-	struct clk *clk;
-
-	reg = 0;
-	bit = 27 - pscnum;
-
-	clk = &dev_clks[reg][bit];
-	clk->reg = 0;
-	clk->bit = bit;
-	return clk;
-}
-
-/*
- * PSC clock rate calculation
- */
-static void psc_calc_rate(struct clk *clk, int pscnum, struct device_node *np)
-{
-	unsigned long mclk_src = sys_clk.rate;
-	unsigned long mclk_div;
-
-	/*
-	 * Can only change value of mclk divider
-	 * when the divider is disabled.
-	 *
-	 * Zero is not a valid divider so minimum
-	 * divider is 1
-	 *
-	 * disable/set divider/enable
-	 */
-	out_be32(&clockctl->pccr[pscnum], 0);
-	out_be32(&clockctl->pccr[pscnum], 0x00020000);
-	out_be32(&clockctl->pccr[pscnum], 0x00030000);
-
-	if (in_be32(&clockctl->pccr[pscnum]) & 0x80) {
-		clk->rate = spdif_rxclk.rate;
-		return;
-	}
-
-	switch ((in_be32(&clockctl->pccr[pscnum]) >> 14) & 0x3) {
-	case 0:
-		mclk_src = sys_clk.rate;
-		break;
-	case 1:
-		mclk_src = ref_clk.rate;
-		break;
-	case 2:
-		mclk_src = psc_mclk_in.rate;
-		break;
-	case 3:
-		mclk_src = spdif_txclk.rate;
-		break;
-	}
-
-	mclk_div = ((in_be32(&clockctl->pccr[pscnum]) >> 17) & 0x7fff) + 1;
-	clk->rate = mclk_src / mclk_div;
-}
-
-/*
- * Find all psc nodes in device tree and assign a clock
- * with name "psc%d_mclk" and dev pointing at the device
- * returned from of_find_device_by_node
- */
-static void psc_clks_init(void)
-{
-	struct device_node *np;
-	struct platform_device *ofdev;
-	u32 reg;
-	const char *psc_compat;
-
-	psc_compat = mpc512x_select_psc_compat();
-	if (!psc_compat)
-		return;
-
-	for_each_compatible_node(np, NULL, psc_compat) {
-		if (!of_property_read_u32(np, "reg", &reg)) {
-			int pscnum = (reg & 0xf00) >> 8;
-			struct clk *clk = psc_dev_clk(pscnum);
-
-			clk->flags = CLK_HAS_RATE | CLK_HAS_CTRL;
-			ofdev = of_find_device_by_node(np);
-			clk->dev = &ofdev->dev;
-			/*
-			 * AC97 is special rate clock does
-			 * not go through normal path
-			 */
-			if (of_device_is_compatible(np, "fsl,mpc5121-psc-ac97"))
-				clk->rate = ac97_clk.rate;
-			else
-				psc_calc_rate(clk, pscnum, np);
-			sprintf(clk->name, "psc%d_mclk", pscnum);
-			clk_register(clk);
-			clk_enable(clk);
-		}
-	}
-}
-
-static struct clk_interface mpc5121_clk_functions = {
-	.clk_get		= mpc5121_clk_get,
-	.clk_enable		= mpc5121_clk_enable,
-	.clk_disable		= mpc5121_clk_disable,
-	.clk_get_rate		= mpc5121_clk_get_rate,
-	.clk_put		= mpc5121_clk_put,
-	.clk_round_rate		= mpc5121_clk_round_rate,
-	.clk_set_rate		= mpc5121_clk_set_rate,
-	.clk_set_parent		= NULL,
-	.clk_get_parent		= NULL,
-};
-
-int __init mpc5121_clk_init(void)
-{
-	struct device_node *np;
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-clock");
-	if (np) {
-		clockctl = of_iomap(np, 0);
-		of_node_put(np);
-	}
-
-	if (!clockctl) {
-		printk(KERN_ERR "Could not map clock control registers\n");
-		return 0;
-	}
-
-	rate_clks_init();
-	psc_clks_init();
-
-	/* leave clockctl mapped forever */
-	/*iounmap(clockctl); */
-	DEBUG_CLK_DUMP();
-	clocks_initialized++;
-	clk_functions = mpc5121_clk_functions;
-	return 0;
-}
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index 36b5652..adb95f03 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -12,6 +12,7 @@
  * (at your option) any later version.
  */
 
+#include <linux/clk.h>
 #include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/irq.h>
@@ -68,98 +69,112 @@
 	bool		in_use;
 };
 
-#define DIU_DIV_MASK	0x000000ff
+/* receives a pixel clock spec in pico seconds, adjusts the DIU clock rate */
 static void mpc512x_set_pixel_clock(unsigned int pixclock)
 {
-	unsigned long bestval, bestfreq, speed, busfreq;
-	unsigned long minpixclock, maxpixclock, pixval;
-	struct mpc512x_ccm __iomem *ccm;
 	struct device_node *np;
-	u32 temp;
-	long err;
-	int i;
+	struct clk *clk_diu;
+	unsigned long epsilon, minpixclock, maxpixclock;
+	unsigned long offset, want, got, delta;
 
-	np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-clock");
+	/* lookup and enable the DIU clock */
+	np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-diu");
 	if (!np) {
-		pr_err("Can't find clock control module.\n");
+		pr_err("Could not find DIU device tree node.\n");
 		return;
 	}
-
-	ccm = of_iomap(np, 0);
+	clk_diu = of_clk_get(np, 0);
+	if (IS_ERR(clk_diu)) {
+		/* backwards compat with device trees that lack clock specs */
+		clk_diu = clk_get_sys(np->name, "ipg");
+	}
 	of_node_put(np);
-	if (!ccm) {
-		pr_err("Can't map clock control module reg.\n");
+	if (IS_ERR(clk_diu)) {
+		pr_err("Could not lookup DIU clock.\n");
+		return;
+	}
+	if (clk_prepare_enable(clk_diu)) {
+		pr_err("Could not enable DIU clock.\n");
 		return;
 	}
 
-	np = of_find_node_by_type(NULL, "cpu");
-	if (np) {
-		const unsigned int *prop =
-			of_get_property(np, "bus-frequency", NULL);
+	/*
+	 * convert the picoseconds spec into the desired clock rate,
+	 * determine the acceptable clock range for the monitor (+/- 5%),
+	 * do the calculation in steps to avoid integer overflow
+	 */
+	pr_debug("DIU pixclock in ps - %u\n", pixclock);
+	pixclock = (1000000000 / pixclock) * 1000;
+	pr_debug("DIU pixclock freq  - %u\n", pixclock);
+	epsilon = pixclock / 20; /* pixclock * 0.05 */
+	pr_debug("DIU deviation      - %lu\n", epsilon);
+	minpixclock = pixclock - epsilon;
+	maxpixclock = pixclock + epsilon;
+	pr_debug("DIU minpixclock    - %lu\n", minpixclock);
+	pr_debug("DIU maxpixclock    - %lu\n", maxpixclock);
 
-		of_node_put(np);
-		if (prop) {
-			busfreq = *prop;
-		} else {
-			pr_err("Can't get bus-frequency property\n");
-			return;
-		}
-	} else {
-		pr_err("Can't find 'cpu' node.\n");
+	/*
+	 * check whether the DIU supports the desired pixel clock
+	 *
+	 * - simply request the desired clock and see what the
+	 *   platform's clock driver will make of it, assuming that it
+	 *   will setup the best approximation of the requested value
+	 * - try other candidate frequencies in the order of decreasing
+	 *   preference (i.e. with increasing distance from the desired
+	 *   pixel clock, and checking the lower frequency before the
+	 *   higher frequency to not overload the hardware) until the
+	 *   first match is found -- any potential subsequent match
+	 *   would only be as good as the former match or typically
+	 *   would be less preferrable
+	 *
+	 * the offset increment of pixelclock divided by 64 is an
+	 * arbitrary choice -- it's simple to calculate, in the typical
+	 * case we expect the first check to succeed already, in the
+	 * worst case seven frequencies get tested (the exact center and
+	 * three more values each to the left and to the right) before
+	 * the 5% tolerance window is exceeded, resulting in fast enough
+	 * execution yet high enough probability of finding a suitable
+	 * value, while the error rate will be in the order of single
+	 * percents
+	 */
+	for (offset = 0; offset <= epsilon; offset += pixclock / 64) {
+		want = pixclock - offset;
+		pr_debug("DIU checking clock - %lu\n", want);
+		clk_set_rate(clk_diu, want);
+		got = clk_get_rate(clk_diu);
+		delta = abs(pixclock - got);
+		if (delta < epsilon)
+			break;
+		if (!offset)
+			continue;
+		want = pixclock + offset;
+		pr_debug("DIU checking clock - %lu\n", want);
+		clk_set_rate(clk_diu, want);
+		got = clk_get_rate(clk_diu);
+		delta = abs(pixclock - got);
+		if (delta < epsilon)
+			break;
+	}
+	if (offset <= epsilon) {
+		pr_debug("DIU clock accepted - %lu\n", want);
+		pr_debug("DIU pixclock want %u, got %lu, delta %lu, eps %lu\n",
+			 pixclock, got, delta, epsilon);
 		return;
 	}
+	pr_warn("DIU pixclock auto search unsuccessful\n");
 
-	/* Pixel Clock configuration */
-	pr_debug("DIU: Bus Frequency = %lu\n", busfreq);
-	speed = busfreq * 4; /* DIU_DIV ratio is 4 * CSB_CLK / DIU_CLK */
-
-	/* Calculate the pixel clock with the smallest error */
-	/* calculate the following in steps to avoid overflow */
-	pr_debug("DIU pixclock in ps - %d\n", pixclock);
-	temp = (1000000000 / pixclock) * 1000;
-	pixclock = temp;
-	pr_debug("DIU pixclock freq - %u\n", pixclock);
-
-	temp = temp / 20; /* pixclock * 0.05 */
-	pr_debug("deviation = %d\n", temp);
-	minpixclock = pixclock - temp;
-	maxpixclock = pixclock + temp;
-	pr_debug("DIU minpixclock - %lu\n", minpixclock);
-	pr_debug("DIU maxpixclock - %lu\n", maxpixclock);
-	pixval = speed/pixclock;
-	pr_debug("DIU pixval = %lu\n", pixval);
-
-	err = LONG_MAX;
-	bestval = pixval;
-	pr_debug("DIU bestval = %lu\n", bestval);
-
-	bestfreq = 0;
-	for (i = -1; i <= 1; i++) {
-		temp = speed / (pixval+i);
-		pr_debug("DIU test pixval i=%d, pixval=%lu, temp freq. = %u\n",
-			i, pixval, temp);
-		if ((temp < minpixclock) || (temp > maxpixclock))
-			pr_debug("DIU exceeds monitor range (%lu to %lu)\n",
-				minpixclock, maxpixclock);
-		else if (abs(temp - pixclock) < err) {
-			pr_debug("Entered the else if block %d\n", i);
-			err = abs(temp - pixclock);
-			bestval = pixval + i;
-			bestfreq = temp;
-		}
-	}
-
-	pr_debug("DIU chose = %lx\n", bestval);
-	pr_debug("DIU error = %ld\n NomPixClk ", err);
-	pr_debug("DIU: Best Freq = %lx\n", bestfreq);
-	/* Modify DIU_DIV in CCM SCFR1 */
-	temp = in_be32(&ccm->scfr1);
-	pr_debug("DIU: Current value of SCFR1: 0x%08x\n", temp);
-	temp &= ~DIU_DIV_MASK;
-	temp |= (bestval & DIU_DIV_MASK);
-	out_be32(&ccm->scfr1, temp);
-	pr_debug("DIU: Modified value of SCFR1: 0x%08x\n", temp);
-	iounmap(ccm);
+	/*
+	 * what is the most appropriate action to take when the search
+	 * for an available pixel clock which is acceptable to the
+	 * monitor has failed?  disable the DIU (clock) or just provide
+	 * a "best effort"?  we go with the latter
+	 */
+	pr_warn("DIU pixclock best effort fallback (backend's choice)\n");
+	clk_set_rate(clk_diu, pixclock);
+	got = clk_get_rate(clk_diu);
+	delta = abs(pixclock - got);
+	pr_debug("DIU pixclock want %u, got %lu, delta %lu, eps %lu\n",
+		 pixclock, got, delta, epsilon);
 }
 
 static enum fsl_diu_monitor_port
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index af54174..b625a2c 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -1,7 +1,7 @@
 config PPC_MPC52xx
 	bool "52xx-based boards"
 	depends on 6xx
-	select PPC_CLOCK
+	select COMMON_CLK
 	select PPC_PCI_CHOICE
 
 config PPC_MPC5200_SIMPLE
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index e1e7161..f514743 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -44,7 +44,8 @@
 
 	/* We simply send special EEH event */
 	if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
-	    (events & OPAL_EVENT_PCI_ERROR))
+	    (events & OPAL_EVENT_PCI_ERROR) &&
+	    eeh_enabled())
 		eeh_send_failure_event(NULL);
 
 	return 0;
@@ -489,8 +490,7 @@
 static int ioda_eeh_reset(struct eeh_pe *pe, int option)
 {
 	struct pci_controller *hose = pe->phb;
-	struct eeh_dev *edev;
-	struct pci_dev *dev;
+	struct pci_bus *bus;
 	int ret;
 
 	/*
@@ -519,31 +519,11 @@
 	if (pe->type & EEH_PE_PHB) {
 		ret = ioda_eeh_phb_reset(hose, option);
 	} else {
-		if (pe->type & EEH_PE_DEVICE) {
-			/*
-			 * If it's device PE, we didn't refer to the parent
-			 * PCI bus yet. So we have to figure it out indirectly.
-			 */
-			edev = list_first_entry(&pe->edevs,
-					struct eeh_dev, list);
-			dev = eeh_dev_to_pci_dev(edev);
-			dev = dev->bus->self;
-		} else {
-			/*
-			 * If it's bus PE, the parent PCI bus is already there
-			 * and just pick it up.
-			 */
-			dev = pe->bus->self;
-		}
-
-		/*
-		 * Do reset based on the fact that the direct upstream bridge
-		 * is root bridge (port) or not.
-		 */
-		if (dev->bus->number == 0)
+		bus = eeh_pe_bus_get(pe);
+		if (pci_is_root_bus(bus))
 			ret = ioda_eeh_root_reset(hose, option);
 		else
-			ret = ioda_eeh_bridge_reset(hose, dev, option);
+			ret = ioda_eeh_bridge_reset(hose, bus->self, option);
 	}
 
 	return ret;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index a79fddc..a59788e 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -145,7 +145,7 @@
 	 * Enable EEH explicitly so that we will do EEH check
 	 * while accessing I/O stuff
 	 */
-	eeh_subsystem_enabled = 1;
+	eeh_set_enable(true);
 
 	/* Save memory bars */
 	eeh_save_bars(edev);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 7d6dcc6..3b2b4fb 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -21,6 +21,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/msi.h>
+#include <linux/memblock.h>
 
 #include <asm/sections.h>
 #include <asm/io.h>
@@ -460,9 +461,39 @@
 		return;
 
 	pe = &phb->ioda.pe_array[pdn->pe_number];
+	WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
 	set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
 }
 
+static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
+				     struct pci_dev *pdev, u64 dma_mask)
+{
+	struct pci_dn *pdn = pci_get_pdn(pdev);
+	struct pnv_ioda_pe *pe;
+	uint64_t top;
+	bool bypass = false;
+
+	if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+		return -ENODEV;;
+
+	pe = &phb->ioda.pe_array[pdn->pe_number];
+	if (pe->tce_bypass_enabled) {
+		top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
+		bypass = (dma_mask >= top);
+	}
+
+	if (bypass) {
+		dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
+		set_dma_ops(&pdev->dev, &dma_direct_ops);
+		set_dma_offset(&pdev->dev, pe->tce_bypass_base);
+	} else {
+		dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
+		set_dma_ops(&pdev->dev, &dma_iommu_ops);
+		set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+	}
+	return 0;
+}
+
 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
 {
 	struct pci_dev *dev;
@@ -657,6 +688,56 @@
 		__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
 }
 
+static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
+{
+	struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
+					      tce32_table);
+	uint16_t window_id = (pe->pe_number << 1 ) + 1;
+	int64_t rc;
+
+	pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
+	if (enable) {
+		phys_addr_t top = memblock_end_of_DRAM();
+
+		top = roundup_pow_of_two(top);
+		rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+						     pe->pe_number,
+						     window_id,
+						     pe->tce_bypass_base,
+						     top);
+	} else {
+		rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+						     pe->pe_number,
+						     window_id,
+						     pe->tce_bypass_base,
+						     0);
+
+		/*
+		 * We might want to reset the DMA ops of all devices on
+		 * this PE. However in theory, that shouldn't be necessary
+		 * as this is used for VFIO/KVM pass-through and the device
+		 * hasn't yet been returned to its kernel driver
+		 */
+	}
+	if (rc)
+		pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
+	else
+		pe->tce_bypass_enabled = enable;
+}
+
+static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
+					  struct pnv_ioda_pe *pe)
+{
+	/* TVE #1 is selected by PCI address bit 59 */
+	pe->tce_bypass_base = 1ull << 59;
+
+	/* Install set_bypass callback for VFIO */
+	pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
+
+	/* Enable bypass by default */
+	pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
+}
+
 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
 				       struct pnv_ioda_pe *pe)
 {
@@ -727,6 +808,8 @@
 	else
 		pnv_ioda_setup_bus_dma(pe, pe->pbus);
 
+	/* Also create a bypass window */
+	pnv_pci_ioda2_setup_bypass_pe(phb, pe);
 	return;
 fail:
 	if (pe->tce32_seg >= 0)
@@ -1286,6 +1369,7 @@
 
 	/* Setup TCEs */
 	phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
+	phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
 
 	/* Setup shutdown function for kexec */
 	phb->shutdown = pnv_pci_ioda_shutdown;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b555ebc..95633d7 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -634,6 +634,16 @@
 		pnv_pci_dma_fallback_setup(hose, pdev);
 }
 
+int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
+{
+	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+	struct pnv_phb *phb = hose->private_data;
+
+	if (phb && phb->dma_set_mask)
+		return phb->dma_set_mask(phb, pdev, dma_mask);
+	return __dma_set_mask(&pdev->dev, dma_mask);
+}
+
 void pnv_pci_shutdown(void)
 {
 	struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 13f1942..cde1694 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -54,7 +54,9 @@
 	struct iommu_table	tce32_table;
 	phys_addr_t		tce_inval_reg_phys;
 
-	/* XXX TODO: Add support for additional 64-bit iommus */
+	/* 64-bit TCE bypass region */
+	bool			tce_bypass_enabled;
+	uint64_t		tce_bypass_base;
 
 	/* MSIs. MVE index is identical for for 32 and 64 bit MSI
 	 * and -1 if not supported. (It's actually identical to the
@@ -113,6 +115,8 @@
 			 unsigned int hwirq, unsigned int virq,
 			 unsigned int is_64, struct msi_msg *msg);
 	void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
+	int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev,
+			    u64 dma_mask);
 	void (*fixup_phb)(struct pci_controller *hose);
 	u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
 	void (*shutdown)(struct pnv_phb *phb);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index de6819b..0051e10 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -7,12 +7,20 @@
 static inline void pnv_smp_init(void) { }
 #endif
 
+struct pci_dev;
+
 #ifdef CONFIG_PCI
 extern void pnv_pci_init(void);
 extern void pnv_pci_shutdown(void);
+extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask);
 #else
 static inline void pnv_pci_init(void) { }
 static inline void pnv_pci_shutdown(void) { }
+
+static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
+{
+	return -ENODEV;
+}
 #endif
 
 extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index a932feb..110f4fb 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -26,6 +26,8 @@
 #include <linux/of_fdt.h>
 #include <linux/interrupt.h>
 #include <linux/bug.h>
+#include <linux/cpuidle.h>
+#include <linux/pci.h>
 
 #include <asm/machdep.h>
 #include <asm/firmware.h>
@@ -140,6 +142,13 @@
 {
 }
 
+static int pnv_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (dev_is_pci(dev))
+		return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask);
+	return __dma_set_mask(dev, dma_mask);
+}
+
 static void pnv_shutdown(void)
 {
 	/* Let the PCI code clear up IODA tables */
@@ -216,6 +225,16 @@
 	return 1;
 }
 
+void powernv_idle(void)
+{
+	/* Hook to cpuidle framework if available, else
+	 * call on default platform idle code
+	 */
+	if (cpuidle_idle_call()) {
+		power7_idle();
+	}
+}
+
 define_machine(powernv) {
 	.name			= "PowerNV",
 	.probe			= pnv_probe,
@@ -225,8 +244,9 @@
 	.show_cpuinfo		= pnv_show_cpuinfo,
 	.progress		= pnv_progress,
 	.machine_shutdown	= pnv_shutdown,
-	.power_save             = power7_idle,
+	.power_save             = powernv_idle,
 	.calibrate_decr		= generic_calibrate_decr,
+	.dma_set_mask		= pnv_dma_set_mask,
 #ifdef CONFIG_KEXEC
 	.kexec_cpu_down		= pnv_kexec_cpu_down,
 #endif
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index e666432..80b1d57 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -20,6 +20,7 @@
 	select PPC_DOORBELL
 	select HAVE_CONTEXT_TRACKING
 	select HOTPLUG_CPU if SMP
+	select ARCH_RANDOM
 	default y
 
 config PPC_SPLPAR
@@ -119,12 +120,3 @@
 	  which are accessible through a debugfs file.
 
 	  Say N if you are unsure.
-
-config PSERIES_IDLE
-	bool "Cpuidle driver for pSeries platforms"
-	depends on CPU_IDLE
-	depends on PPC_PSERIES
-	default y
-	help
-	  Select this option to enable processor idle state management
-	  through cpuidle subsystem.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index fbccac9..0348079 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -21,7 +21,6 @@
 obj-$(CONFIG_CMM)		+= cmm.o
 obj-$(CONFIG_DTL)		+= dtl.o
 obj-$(CONFIG_IO_EVENT_IRQ)	+= io_event_irq.o
-obj-$(CONFIG_PSERIES_IDLE)	+= processor_idle.o
 obj-$(CONFIG_LPARCFG)		+= lparcfg.o
 
 ifeq ($(CONFIG_PPC_PSERIES),y)
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 9ef3cc8..8a8f047 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -265,7 +265,7 @@
 			enable = 1;
 
 		if (enable) {
-			eeh_subsystem_enabled = 1;
+			eeh_set_enable(true);
 			eeh_add_to_parent_pe(edev);
 
 			pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 70670a2..c413ec1 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -113,7 +113,8 @@
 {
 	struct device_node *dn, *pdn;
 	struct pci_bus *bus;
-	const __be32 *pcie_link_speed_stats;
+	u32 pcie_link_speed_stats[2];
+	int rc;
 
 	bus = bridge->bus;
 
@@ -122,38 +123,45 @@
 		return 0;
 
 	for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
-		pcie_link_speed_stats = of_get_property(pdn,
-			"ibm,pcie-link-speed-stats", NULL);
-		if (pcie_link_speed_stats)
+		rc = of_property_read_u32_array(pdn,
+				"ibm,pcie-link-speed-stats",
+				&pcie_link_speed_stats[0], 2);
+		if (!rc)
 			break;
 	}
 
 	of_node_put(pdn);
 
-	if (!pcie_link_speed_stats) {
+	if (rc) {
 		pr_err("no ibm,pcie-link-speed-stats property\n");
 		return 0;
 	}
 
-	switch (be32_to_cpup(pcie_link_speed_stats)) {
+	switch (pcie_link_speed_stats[0]) {
 	case 0x01:
 		bus->max_bus_speed = PCIE_SPEED_2_5GT;
 		break;
 	case 0x02:
 		bus->max_bus_speed = PCIE_SPEED_5_0GT;
 		break;
+	case 0x04:
+		bus->max_bus_speed = PCIE_SPEED_8_0GT;
+		break;
 	default:
 		bus->max_bus_speed = PCI_SPEED_UNKNOWN;
 		break;
 	}
 
-	switch (be32_to_cpup(pcie_link_speed_stats)) {
+	switch (pcie_link_speed_stats[1]) {
 	case 0x01:
 		bus->cur_bus_speed = PCIE_SPEED_2_5GT;
 		break;
 	case 0x02:
 		bus->cur_bus_speed = PCIE_SPEED_5_0GT;
 		break;
+	case 0x04:
+		bus->cur_bus_speed = PCIE_SPEED_8_0GT;
+		break;
 	default:
 		bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
 		break;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8e639d7..972df0ff 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -430,8 +430,7 @@
 {
 	long rc;
 
-	if (firmware_has_feature(FW_FEATURE_SET_MODE) &&
-	    (image->type != KEXEC_TYPE_CRASH)) {
+	if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
 		rc = pSeries_disable_reloc_on_exc();
 		if (rc != H_SUCCESS)
 			pr_warning("Warning: Failed to disable relocation on "
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 1c16141..47b6b9f 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -109,27 +109,28 @@
 	struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
 	unsigned long phys_mem, phys_end;
 	void *user_mem;
-	struct bio_vec *vec;
+	struct bio_vec vec;
 	unsigned int transfered;
-	unsigned short idx;
+	struct bvec_iter iter;
 
-	phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
+	phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
+				    AXON_RAM_SECTOR_SHIFT);
 	phys_end = bank->io_addr + bank->size;
 	transfered = 0;
-	bio_for_each_segment(vec, bio, idx) {
-		if (unlikely(phys_mem + vec->bv_len > phys_end)) {
+	bio_for_each_segment(vec, bio, iter) {
+		if (unlikely(phys_mem + vec.bv_len > phys_end)) {
 			bio_io_error(bio);
 			return;
 		}
 
-		user_mem = page_address(vec->bv_page) + vec->bv_offset;
+		user_mem = page_address(vec.bv_page) + vec.bv_offset;
 		if (bio_data_dir(bio) == READ)
-			memcpy(user_mem, (void *) phys_mem, vec->bv_len);
+			memcpy(user_mem, (void *) phys_mem, vec.bv_len);
 		else
-			memcpy((void *) phys_mem, user_mem, vec->bv_len);
+			memcpy((void *) phys_mem, user_mem, vec.bv_len);
 
-		phys_mem += vec->bv_len;
-		transfered += vec->bv_len;
+		phys_mem += vec.bv_len;
+		transfered += vec.bv_len;
 	}
 	bio_endio(bio, 0);
 }
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index bd968a4..62c47bb 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -292,6 +292,7 @@
 	iommu_table_dart.it_offset = 0;
 	/* it_size is in number of entries */
 	iommu_table_dart.it_size = dart_tablesize / sizeof(u32);
+	iommu_table_dart.it_page_shift = IOMMU_PAGE_SHIFT_4K;
 
 	/* Initialize the common IOMMU code */
 	iommu_table_dart.it_base = (unsigned long)dart_vbase;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0e166ed..8209744 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -886,25 +886,25 @@
 
 	/* Default: read HW settings */
 	if (flow_type == IRQ_TYPE_DEFAULT) {
-		switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
-			       MPIC_INFO(VECPRI_SENSE_MASK))) {
-			case MPIC_INFO(VECPRI_SENSE_EDGE) |
-			     MPIC_INFO(VECPRI_POLARITY_POSITIVE):
-				flow_type = IRQ_TYPE_EDGE_RISING;
-				break;
-			case MPIC_INFO(VECPRI_SENSE_EDGE) |
-			     MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
-				flow_type = IRQ_TYPE_EDGE_FALLING;
-				break;
-			case MPIC_INFO(VECPRI_SENSE_LEVEL) |
-			     MPIC_INFO(VECPRI_POLARITY_POSITIVE):
-				flow_type = IRQ_TYPE_LEVEL_HIGH;
-				break;
-			case MPIC_INFO(VECPRI_SENSE_LEVEL) |
-			     MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
-				flow_type = IRQ_TYPE_LEVEL_LOW;
-				break;
-		}
+		int vold_ps;
+
+		vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
+				  MPIC_INFO(VECPRI_SENSE_MASK));
+
+		if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
+				MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
+			flow_type = IRQ_TYPE_EDGE_RISING;
+		else if	(vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
+				     MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
+			flow_type = IRQ_TYPE_EDGE_FALLING;
+		else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
+				     MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
+			flow_type = IRQ_TYPE_LEVEL_HIGH;
+		else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
+				     MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
+			flow_type = IRQ_TYPE_LEVEL_LOW;
+		else
+			WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold);
 	}
 
 	/* Apply to irq desc */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a90731b..b079098 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -309,16 +309,23 @@
 
 	if (xmon_speaker == me)
 		return;
+
 	for (;;) {
-		if (xmon_speaker == 0) {
-			last_speaker = cmpxchg(&xmon_speaker, 0, me);
-			if (last_speaker == 0)
-				return;
-		}
-		timeout = 10000000;
+		last_speaker = cmpxchg(&xmon_speaker, 0, me);
+		if (last_speaker == 0)
+			return;
+
+		/*
+		 * Wait a full second for the lock, we might be on a slow
+		 * console, but check every 100us.
+		 */
+		timeout = 10000;
 		while (xmon_speaker == last_speaker) {
-			if (--timeout > 0)
+			if (--timeout > 0) {
+				udelay(100);
 				continue;
+			}
+
 			/* hostile takeover */
 			prev = cmpxchg(&xmon_speaker, last_speaker, me);
 			if (prev == last_speaker)
@@ -397,7 +404,6 @@
 	}
 
 	xmon_fault_jmp[cpu] = recurse_jmp;
-	cpumask_set_cpu(cpu, &cpus_in_xmon);
 
 	bp = NULL;
 	if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
@@ -419,6 +425,8 @@
 		release_output_lock();
 	}
 
+	cpumask_set_cpu(cpu, &cpus_in_xmon);
+
  waiting:
 	secondary = 1;
 	while (secondary && !xmon_gate) {
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 4c4a1ce..47c8630 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -529,6 +529,7 @@
 {
 	int rc;
 
+	init_virt_timer(&appldata_timer);
 	appldata_timer.function = appldata_timer_function;
 	appldata_timer.data = (unsigned long) &appldata_work;
 
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index b3feabd..cf3c008 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -25,6 +25,7 @@
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/spinlock.h>
 #include "crypt_s390.h"
 
 #define AES_KEYLEN_128		1
@@ -32,6 +33,7 @@
 #define AES_KEYLEN_256		4
 
 static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
 static char keylen_flag;
 
 struct s390_aes_ctx {
@@ -758,43 +760,67 @@
 	return aes_set_key(tfm, in_key, key_len);
 }
 
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+	unsigned int i, n;
+
+	/* only use complete blocks, max. PAGE_SIZE */
+	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
+	for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+		memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
+		       AES_BLOCK_SIZE);
+		crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
+	}
+	return n;
+}
+
 static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
 			 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
 {
 	int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
-	unsigned int i, n, nbytes;
-	u8 buf[AES_BLOCK_SIZE];
-	u8 *out, *in;
+	unsigned int n, nbytes;
+	u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
+	u8 *out, *in, *ctrptr = ctrbuf;
 
 	if (!walk->nbytes)
 		return ret;
 
-	memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
+	if (spin_trylock(&ctrblk_lock))
+		ctrptr = ctrblk;
+
+	memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
 	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
 		out = walk->dst.virt.addr;
 		in = walk->src.virt.addr;
 		while (nbytes >= AES_BLOCK_SIZE) {
-			/* only use complete blocks, max. PAGE_SIZE */
-			n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
-						 nbytes & ~(AES_BLOCK_SIZE - 1);
-			for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
-				memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
-				       AES_BLOCK_SIZE);
-				crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
-			}
-			ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
-			if (ret < 0 || ret != n)
+			if (ctrptr == ctrblk)
+				n = __ctrblk_init(ctrptr, nbytes);
+			else
+				n = AES_BLOCK_SIZE;
+			ret = crypt_s390_kmctr(func, sctx->key, out, in,
+					       n, ctrptr);
+			if (ret < 0 || ret != n) {
+				if (ctrptr == ctrblk)
+					spin_unlock(&ctrblk_lock);
 				return -EIO;
+			}
 			if (n > AES_BLOCK_SIZE)
-				memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
+				memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
 				       AES_BLOCK_SIZE);
-			crypto_inc(ctrblk, AES_BLOCK_SIZE);
+			crypto_inc(ctrptr, AES_BLOCK_SIZE);
 			out += n;
 			in += n;
 			nbytes -= n;
 		}
 		ret = blkcipher_walk_done(desc, walk, nbytes);
 	}
+	if (ctrptr == ctrblk) {
+		if (nbytes)
+			memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
+		else
+			memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+		spin_unlock(&ctrblk_lock);
+	}
 	/*
 	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
 	 */
@@ -802,14 +828,15 @@
 		out = walk->dst.virt.addr;
 		in = walk->src.virt.addr;
 		ret = crypt_s390_kmctr(func, sctx->key, buf, in,
-				       AES_BLOCK_SIZE, ctrblk);
+				       AES_BLOCK_SIZE, ctrbuf);
 		if (ret < 0 || ret != AES_BLOCK_SIZE)
 			return -EIO;
 		memcpy(out, buf, nbytes);
-		crypto_inc(ctrblk, AES_BLOCK_SIZE);
+		crypto_inc(ctrbuf, AES_BLOCK_SIZE);
 		ret = blkcipher_walk_done(desc, walk, 0);
+		memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
 	}
-	memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
+
 	return ret;
 }
 
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 200f2a1..0a5aac8 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -25,6 +25,7 @@
 #define DES3_KEY_SIZE	(3 * DES_KEY_SIZE)
 
 static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
 
 struct s390_des_ctx {
 	u8 iv[DES_BLOCK_SIZE];
@@ -105,29 +106,35 @@
 }
 
 static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
-			    u8 *iv, struct blkcipher_walk *walk)
+			    struct blkcipher_walk *walk)
 {
+	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 	int ret = blkcipher_walk_virt(desc, walk);
 	unsigned int nbytes = walk->nbytes;
+	struct {
+		u8 iv[DES_BLOCK_SIZE];
+		u8 key[DES3_KEY_SIZE];
+	} param;
 
 	if (!nbytes)
 		goto out;
 
-	memcpy(iv, walk->iv, DES_BLOCK_SIZE);
+	memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
+	memcpy(param.key, ctx->key, DES3_KEY_SIZE);
 	do {
 		/* only use complete blocks */
 		unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
 		u8 *out = walk->dst.virt.addr;
 		u8 *in = walk->src.virt.addr;
 
-		ret = crypt_s390_kmc(func, iv, out, in, n);
+		ret = crypt_s390_kmc(func, &param, out, in, n);
 		if (ret < 0 || ret != n)
 			return -EIO;
 
 		nbytes &= DES_BLOCK_SIZE - 1;
 		ret = blkcipher_walk_done(desc, walk, nbytes);
 	} while ((nbytes = walk->nbytes));
-	memcpy(walk->iv, iv, DES_BLOCK_SIZE);
+	memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
 
 out:
 	return ret;
@@ -179,22 +186,20 @@
 			   struct scatterlist *dst, struct scatterlist *src,
 			   unsigned int nbytes)
 {
-	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
+	return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
 }
 
 static int cbc_des_decrypt(struct blkcipher_desc *desc,
 			   struct scatterlist *dst, struct scatterlist *src,
 			   unsigned int nbytes)
 {
-	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
+	return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
 }
 
 static struct crypto_alg cbc_des_alg = {
@@ -327,22 +332,20 @@
 			    struct scatterlist *dst, struct scatterlist *src,
 			    unsigned int nbytes)
 {
-	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
+	return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
 }
 
 static int cbc_des3_decrypt(struct blkcipher_desc *desc,
 			    struct scatterlist *dst, struct scatterlist *src,
 			    unsigned int nbytes)
 {
-	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
+	return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
 }
 
 static struct crypto_alg cbc_des3_alg = {
@@ -366,54 +369,80 @@
 	}
 };
 
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+	unsigned int i, n;
+
+	/* align to block size, max. PAGE_SIZE */
+	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
+	for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+		memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
+		crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
+	}
+	return n;
+}
+
 static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
-			    struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
+			    struct s390_des_ctx *ctx,
+			    struct blkcipher_walk *walk)
 {
 	int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
-	unsigned int i, n, nbytes;
-	u8 buf[DES_BLOCK_SIZE];
-	u8 *out, *in;
+	unsigned int n, nbytes;
+	u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
+	u8 *out, *in, *ctrptr = ctrbuf;
 
-	memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
+	if (!walk->nbytes)
+		return ret;
+
+	if (spin_trylock(&ctrblk_lock))
+		ctrptr = ctrblk;
+
+	memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
 	while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
 		out = walk->dst.virt.addr;
 		in = walk->src.virt.addr;
 		while (nbytes >= DES_BLOCK_SIZE) {
-			/* align to block size, max. PAGE_SIZE */
-			n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
-				nbytes & ~(DES_BLOCK_SIZE - 1);
-			for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
-				memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
-				       DES_BLOCK_SIZE);
-				crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
-			}
-			ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
-			if (ret < 0 || ret != n)
+			if (ctrptr == ctrblk)
+				n = __ctrblk_init(ctrptr, nbytes);
+			else
+				n = DES_BLOCK_SIZE;
+			ret = crypt_s390_kmctr(func, ctx->key, out, in,
+					       n, ctrptr);
+			if (ret < 0 || ret != n) {
+				if (ctrptr == ctrblk)
+					spin_unlock(&ctrblk_lock);
 				return -EIO;
+			}
 			if (n > DES_BLOCK_SIZE)
-				memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
+				memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
 				       DES_BLOCK_SIZE);
-			crypto_inc(ctrblk, DES_BLOCK_SIZE);
+			crypto_inc(ctrptr, DES_BLOCK_SIZE);
 			out += n;
 			in += n;
 			nbytes -= n;
 		}
 		ret = blkcipher_walk_done(desc, walk, nbytes);
 	}
-
+	if (ctrptr == ctrblk) {
+		if (nbytes)
+			memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
+		else
+			memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+		spin_unlock(&ctrblk_lock);
+	}
 	/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
 	if (nbytes) {
 		out = walk->dst.virt.addr;
 		in = walk->src.virt.addr;
 		ret = crypt_s390_kmctr(func, ctx->key, buf, in,
-				       DES_BLOCK_SIZE, ctrblk);
+				       DES_BLOCK_SIZE, ctrbuf);
 		if (ret < 0 || ret != DES_BLOCK_SIZE)
 			return -EIO;
 		memcpy(out, buf, nbytes);
-		crypto_inc(ctrblk, DES_BLOCK_SIZE);
+		crypto_inc(ctrbuf, DES_BLOCK_SIZE);
 		ret = blkcipher_walk_done(desc, walk, 0);
+		memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
 	}
-	memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
 	return ret;
 }
 
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d5bc375..eef3dd3 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -106,9 +106,22 @@
 	__u64	gbea;			/* 0x0180 */
 	__u8	reserved188[24];	/* 0x0188 */
 	__u32	fac;			/* 0x01a0 */
-	__u8	reserved1a4[92];	/* 0x01a4 */
+	__u8	reserved1a4[68];	/* 0x01a4 */
+	__u64	itdba;			/* 0x01e8 */
+	__u8	reserved1f0[16];	/* 0x01f0 */
 } __attribute__((packed));
 
+struct kvm_s390_itdb {
+	__u8	data[256];
+} __packed;
+
+struct sie_page {
+	struct kvm_s390_sie_block sie_block;
+	__u8 reserved200[1024];		/* 0x0200 */
+	struct kvm_s390_itdb itdb;	/* 0x0600 */
+	__u8 reserved700[2304];		/* 0x0700 */
+} __packed;
+
 struct kvm_vcpu_stat {
 	u32 exit_userspace;
 	u32 exit_null;
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index e030d2b..db02052 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -286,8 +286,8 @@
 }
 
 #ifdef CONFIG_SYSVIPC
-COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
-		unsigned long, third, compat_uptr_t, ptr)
+COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, compat_ulong_t, second,
+		compat_ulong_t, third, compat_uptr_t, ptr)
 {
 	if (call >> 16)		/* hack for backward compatibility */
 		return -EINVAL;
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index b9e25ae..d7c0050 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -59,7 +59,7 @@
 	.quad	0			# cr12: tracing off
 	.quad	0			# cr13: home space segment table
 	.quad	0xc0000000		# cr14: machine check handling off
-	.quad	0			# cr15: linkage stack operations
+	.quad	.Llinkage_stack		# cr15: linkage stack operations
 .Lpcmsk:.quad	0x0000000180000000
 .L4malign:.quad 0xffffffffffc00000
 .Lscan2g:.quad	0x80000000 + 0x20000 - 8	# 2GB + 128K - 8
@@ -67,12 +67,15 @@
 .Lparmaddr:
 	.quad	PARMAREA
 	.align	64
-.Lduct: .long	0,0,0,0,.Lduald,0,0,0
+.Lduct: .long	0,.Laste,.Laste,0,.Lduald,0,0,0
 	.long	0,0,0,0,0,0,0,0
+.Laste:	.quad	0,0xffffffffffffffff,0,0,0,0,0,0
 	.align	128
 .Lduald:.rept	8
 	.long	0x80000000,0,0,0	# invalid access-list entries
 	.endr
+.Llinkage_stack:
+	.long	0,0,0x89000000,0,0,0,0x8a000000,0
 
 ENTRY(_ehead)
 
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 5ddbbde..eeb1ac7 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -112,6 +112,17 @@
 static int handle_prog(struct kvm_vcpu *vcpu)
 {
 	vcpu->stat.exit_program_interruption++;
+
+	/* Restore ITDB to Program-Interruption TDB in guest memory */
+	if (IS_TE_ENABLED(vcpu) &&
+	    !(current->thread.per_flags & PER_FLAG_NO_TE) &&
+	    IS_ITDB_VALID(vcpu)) {
+		copy_to_guest(vcpu, TDB_ADDR, vcpu->arch.sie_block->itdba,
+			      sizeof(struct kvm_s390_itdb));
+		memset((void *) vcpu->arch.sie_block->itdba, 0,
+		       sizeof(struct kvm_s390_itdb));
+	}
+
 	trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
 	return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
 }
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 7635c00..e0676f3 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -395,6 +395,9 @@
 						    CPUSTAT_STOPPED |
 						    CPUSTAT_GED);
 	vcpu->arch.sie_block->ecb   = 6;
+	if (test_vfacility(50) && test_vfacility(73))
+		vcpu->arch.sie_block->ecb |= 0x10;
+
 	vcpu->arch.sie_block->ecb2  = 8;
 	vcpu->arch.sie_block->eca   = 0xC1002001U;
 	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
@@ -411,6 +414,7 @@
 				      unsigned int id)
 {
 	struct kvm_vcpu *vcpu;
+	struct sie_page *sie_page;
 	int rc = -EINVAL;
 
 	if (id >= KVM_MAX_VCPUS)
@@ -422,12 +426,13 @@
 	if (!vcpu)
 		goto out;
 
-	vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
-					get_zeroed_page(GFP_KERNEL);
-
-	if (!vcpu->arch.sie_block)
+	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
+	if (!sie_page)
 		goto out_free_cpu;
 
+	vcpu->arch.sie_block = &sie_page->sie_block;
+	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
+
 	vcpu->arch.sie_block->icpua = id;
 	if (!kvm_is_ucontrol(kvm)) {
 		if (!kvm->arch.sca) {
@@ -1182,8 +1187,8 @@
 		return -ENOMEM;
 	}
 	memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
-	vfacilities[0] &= 0xff82fff3f47c0000UL;
-	vfacilities[1] &= 0x001c000000000000UL;
+	vfacilities[0] &= 0xff82fff3f4fc2000UL;
+	vfacilities[1] &= 0x005c000000000000UL;
 	return 0;
 }
 
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 095cf51..f9559b0 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -26,6 +26,12 @@
 
 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
 
+/* Transactional Memory Execution related macros */
+#define IS_TE_ENABLED(vcpu)	((vcpu->arch.sie_block->ecb & 0x10))
+#define TDB_ADDR		0x1800UL
+#define TDB_FORMAT1		1
+#define IS_ITDB_VALID(vcpu)	((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
+
 #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
 do { \
 	debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index a90d45e..27c50f4 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -12,6 +12,8 @@
 #include <linux/mm.h>
 #include <linux/gfp.h>
 #include <linux/init.h>
+#include <asm/setup.h>
+#include <asm/ipl.h>
 
 #define ESSA_SET_STABLE		1
 #define ESSA_SET_UNUSED		2
@@ -41,6 +43,14 @@
 
 	if (!cmma_flag)
 		return;
+	/*
+	 * Disable CMM for dump, otherwise  the tprot based memory
+	 * detection can fail because of unstable pages.
+	 */
+	if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
+		cmma_flag = 0;
+		return;
+	}
 	asm volatile(
 		"       .insn rrf,0xb9ab0000,%1,%1,0,0\n"
 		"0:     la      %0,0\n"
diff --git a/arch/score/lib/checksum.S b/arch/score/lib/checksum.S
index 706157e..1141f2b 100644
--- a/arch/score/lib/checksum.S
+++ b/arch/score/lib/checksum.S
@@ -137,7 +137,7 @@
 	ldi r25, 0
 	mv r10, r5
 	cmpi.c	r5, 0x8
-	blt	small_csumcpy		/* < 8(singed) bytes to copy */
+	blt	small_csumcpy		/* < 8(signed) bytes to copy */
 	cmpi.c	r5, 0x0
 	beq	out
 	andri.c	r25, src, 0x1		/* odd buffer? */
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index 62ced58..b73274f 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -408,8 +408,10 @@
 #define __NR_kern_features	340
 #define __NR_kcmp		341
 #define __NR_finit_module	342
+#define __NR_sched_setattr	343
+#define __NR_sched_getattr	344
 
-#define NR_syscalls		343
+#define NR_syscalls		345
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK	0x00000001
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index cb5d272..de1c844 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -6,7 +6,6 @@
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/cpumask.h>
 #include <linux/spinlock.h>
 #include <asm/cpudata.h>
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c
index e306fb08..acf8314 100644
--- a/arch/sparc/kernel/ebus.c
+++ b/arch/sparc/kernel/ebus.c
@@ -7,7 +7,6 @@
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
index 4eb1a5a..b7ddcdd 100644
--- a/arch/sparc/kernel/hvtramp.S
+++ b/arch/sparc/kernel/hvtramp.S
@@ -3,7 +3,6 @@
  * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
  */
 
-#include <linux/init.h>
 
 #include <asm/thread_info.h>
 #include <asm/hypervisor.h>
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
index de199bf..3241f56 100644
--- a/arch/sparc/kernel/of_device_common.c
+++ b/arch/sparc/kernel/of_device_common.c
@@ -1,7 +1,6 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
-#include <linux/init.h>
 #include <linux/export.h>
 #include <linux/mod_devicetable.h>
 #include <linux/errno.h>
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 7de8d1f..1555bbc 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1005,6 +1005,5 @@
 
 	return 0;
 }
-
-module_init(of_pci_slot_init);
+device_initcall(of_pci_slot_init);
 #endif
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index a689598..944a065 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -5,7 +5,6 @@
 
 #include <linux/string.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/device.h>
 #include <linux/of_device.h>
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index fdd819d..510baec 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -22,7 +22,6 @@
 #include <linux/reboot.h>
 #include <linux/delay.h>
 #include <linux/pm.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 
 #include <asm/auxio.h>
diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c
index e521c54..bf4ccb1 100644
--- a/arch/sparc/kernel/sparc_ksyms_32.c
+++ b/arch/sparc/kernel/sparc_ksyms_32.c
@@ -6,7 +6,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c
index 9f5e24d..a92d5d2 100644
--- a/arch/sparc/kernel/sparc_ksyms_64.c
+++ b/arch/sparc/kernel/sparc_ksyms_64.c
@@ -7,7 +7,6 @@
 
 #include <linux/export.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/bitops.h>
 
 #include <asm/cpudata.h>
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 7b87171..151ace8 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -85,4 +85,4 @@
 /*325*/	.long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
 /*330*/	.long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
 /*335*/	.long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/	.long sys_ni_syscall, sys_kcmp, sys_finit_module
+/*340*/	.long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 6d81597..4bd4e2b 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -86,7 +86,7 @@
 	.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
 /*330*/	.word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
 	.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
-/*340*/	.word sys_kern_features, sys_kcmp, sys_finit_module
+/*340*/	.word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 
 #endif /* CONFIG_COMPAT */
 
@@ -164,4 +164,4 @@
 	.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
 /*330*/	.word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
 	.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/	.word sys_kern_features, sys_kcmp, sys_finit_module
+/*340*/	.word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S
index 76dcbd3..3eed99f 100644
--- a/arch/sparc/kernel/trampoline_32.S
+++ b/arch/sparc/kernel/trampoline_32.S
@@ -5,7 +5,6 @@
  * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  */
 
-#include <linux/init.h>
 #include <asm/head.h>
 #include <asm/psr.h>
 #include <asm/page.h>
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index ad4bde3..737f8cb 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -4,7 +4,6 @@
  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  */
 
-#include <linux/init.h>
 
 #include <asm/head.h>
 #include <asm/asi.h>
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 3096317..9bd9ce8 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -4,7 +4,6 @@
  * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
  */
 
-#include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index ad3bf4b..b12cb5e 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -4,7 +4,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/percpu.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
index 04a4540..e58b817 100644
--- a/arch/sparc/prom/p1275.c
+++ b/arch/sparc/prom/p1275.c
@@ -5,7 +5,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/string.h>
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 78f1f2d..ffd4493 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -281,7 +281,6 @@
 			u32 dummy, u32 low, u32 high);
 long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
 			 u32 dummy, u32 low, u32 high);
-long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len);
 long compat_sys_sync_file_range2(int fd, unsigned int flags,
 				 u32 offset_lo, u32 offset_hi,
 				 u32 nbytes_lo, u32 nbytes_hi);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 940e50e..0af5250 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -444,6 +444,7 @@
 	bool "Intel MID platform support"
 	depends on X86_32
 	depends on X86_EXTENDED_PLATFORM
+	depends on X86_PLATFORM_DEVICES
 	depends on PCI
 	depends on PCI_GOANY
 	depends on X86_IO_APIC
@@ -1051,9 +1052,9 @@
 	  This options enables microcode patch loading support for Intel
 	  processors.
 
-	  For latest news and information on obtaining all the required
-	  Intel ingredients for this driver, check:
-	  <http://www.urbanmyth.org/microcode/>.
+	  For the current Intel microcode data package go to
+	  <https://downloadcenter.intel.com> and search for
+	  'Linux Processor Microcode Data File'.
 
 config MICROCODE_AMD
 	bool "AMD microcode loading support"
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 0f3621e..321a52c 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -184,6 +184,7 @@
 config X86_DECODER_SELFTEST
 	bool "x86 instruction decoder selftest"
 	depends on DEBUG_KERNEL && KPROBES
+	depends on !COMPILE_TEST
 	---help---
 	 Perform x86 instruction decoder selftests at build time.
 	 This option is useful for checking the sanity of x86 instruction
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 13b22e0..eeda43a 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -11,6 +11,28 @@
         KBUILD_DEFCONFIG := $(ARCH)_defconfig
 endif
 
+# How to compile the 16-bit code.  Note we always compile for -march=i386;
+# that way we can complain to the user if the CPU is insufficient.
+#
+# The -m16 option is supported by GCC >= 4.9 and clang >= 3.5. For
+# older versions of GCC, we need to play evil and unreliable tricks to
+# attempt to ensure that our asm(".code16gcc") is first in the asm
+# output.
+CODE16GCC_CFLAGS := -m32 -include $(srctree)/arch/x86/boot/code16gcc.h \
+		    $(call cc-option, -fno-toplevel-reorder,\
+		      $(call cc-option, -fno-unit-at-a-time))
+M16_CFLAGS	 := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
+
+REALMODE_CFLAGS	:= $(M16_CFLAGS) -g -Os -D__KERNEL__ \
+		   -DDISABLE_BRANCH_PROFILING \
+		   -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
+		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+		   -mno-mmx -mno-sse \
+		   $(call cc-option, -ffreestanding) \
+		   $(call cc-option, -fno-stack-protector) \
+		   $(call cc-option, -mpreferred-stack-boundary=2)
+export REALMODE_CFLAGS
+
 # BITS is used as extension for files which are available in a 32 bit
 # and a 64 bit version to simplify shared Makefiles.
 # e.g.: obj-y += foo_$(BITS).o
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index de70669..878df7e 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -51,20 +51,7 @@
 
 # ---------------------------------------------------------------------------
 
-# How to compile the 16-bit code.  Note we always compile for -march=i386,
-# that way we can complain to the user if the CPU is insufficient.
-KBUILD_CFLAGS	:= $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
-		   -DDISABLE_BRANCH_PROFILING \
-		   -Wall -Wstrict-prototypes \
-		   -march=i386 -mregparm=3 \
-		   -include $(srctree)/$(src)/code16gcc.h \
-		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
-		   -mno-mmx -mno-sse \
-		   $(call cc-option, -ffreestanding) \
-		   $(call cc-option, -fno-toplevel-reorder,\
-		   $(call cc-option, -fno-unit-at-a-time)) \
-		   $(call cc-option, -fno-stack-protector) \
-		   $(call cc-option, -mpreferred-stack-boundary=2)
+KBUILD_CFLAGS	:= $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
 KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
 
diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c
index a9fcb7c..431fa5f 100644
--- a/arch/x86/boot/cpuflags.c
+++ b/arch/x86/boot/cpuflags.c
@@ -28,20 +28,35 @@
 	return fsw == 0 && (fcw & 0x103f) == 0x003f;
 }
 
+/*
+ * For building the 16-bit code we want to explicitly specify 32-bit
+ * push/pop operations, rather than just saying 'pushf' or 'popf' and
+ * letting the compiler choose. But this is also included from the
+ * compressed/ directory where it may be 64-bit code, and thus needs
+ * to be 'pushfq' or 'popfq' in that case.
+ */
+#ifdef __x86_64__
+#define PUSHF "pushfq"
+#define POPF "popfq"
+#else
+#define PUSHF "pushfl"
+#define POPF "popfl"
+#endif
+
 int has_eflag(unsigned long mask)
 {
 	unsigned long f0, f1;
 
-	asm volatile("pushf	\n\t"
-		     "pushf	\n\t"
+	asm volatile(PUSHF "	\n\t"
+		     PUSHF "	\n\t"
 		     "pop %0	\n\t"
 		     "mov %0,%1	\n\t"
 		     "xor %2,%1	\n\t"
 		     "push %1	\n\t"
-		     "popf	\n\t"
-		     "pushf	\n\t"
+		     POPF "	\n\t"
+		     PUSHF "	\n\t"
 		     "pop %1	\n\t"
-		     "popf"
+		     POPF
 		     : "=&r" (f0), "=&r" (f1)
 		     : "ri" (mask));
 
diff --git a/arch/x86/boot/video.h b/arch/x86/boot/video.h
index ff339c5..0bb2549 100644
--- a/arch/x86/boot/video.h
+++ b/arch/x86/boot/video.h
@@ -80,7 +80,7 @@
 	u16 xmode_n;		/* Size of unprobed mode range */
 };
 
-#define __videocard struct card_info __attribute__((section(".videocards")))
+#define __videocard struct card_info __attribute__((used,section(".videocards")))
 extern struct card_info video_cards[], video_cards_end[];
 
 int mode_defined(u16 mode);	/* video.c */
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index a54ee1d..aaac3b2 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -19,7 +19,7 @@
 extern void amd_flush_garts(void);
 extern int amd_numa_init(void);
 extern int amd_get_subcaches(int);
-extern int amd_set_subcaches(int, int);
+extern int amd_set_subcaches(int, unsigned long);
 
 struct amd_l3_cache {
 	unsigned indices;
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 3b978c4..3d6b9f8 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -132,6 +132,8 @@
 extern void efi_sync_low_kernel_mappings(void);
 extern void efi_setup_page_tables(void);
 extern void __init old_map_region(efi_memory_desc_t *md);
+extern void __init runtime_code_page_mkexec(void);
+extern void __init efi_runtime_mkexec(void);
 
 struct efi_setup_data {
 	u64 fw_vendor;
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 1df1159..c7678e4 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -85,28 +85,9 @@
 	return ret;
 }
 
-static inline uint32_t kvm_cpuid_base(void)
-{
-	if (boot_cpu_data.cpuid_level < 0)
-		return 0;	/* So we don't blow up on old processors */
-
-	if (cpu_has_hypervisor)
-		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
-
-	return 0;
-}
-
-static inline bool kvm_para_available(void)
-{
-	return kvm_cpuid_base() != 0;
-}
-
-static inline unsigned int kvm_arch_para_features(void)
-{
-	return cpuid_eax(KVM_CPUID_FEATURES);
-}
-
 #ifdef CONFIG_KVM_GUEST
+bool kvm_para_available(void);
+unsigned int kvm_arch_para_features(void);
 void __init kvm_guest_init(void);
 void kvm_async_pf_task_wait(u32 token);
 void kvm_async_pf_task_wake(u32 token);
@@ -126,6 +107,16 @@
 #define kvm_async_pf_task_wait(T) do {} while(0)
 #define kvm_async_pf_task_wake(T) do {} while(0)
 
+static inline bool kvm_para_available(void)
+{
+	return 0;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+	return 0;
+}
+
 static inline u32 kvm_read_and_reset_pf_reason(void)
 {
 	return 0;
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 401f350..cd6e161 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -781,9 +781,9 @@
  */
 #define PV_CALLEE_SAVE_REGS_THUNK(func)					\
 	extern typeof(func) __raw_callee_save_##func;			\
-	static void *__##func##__ __used = func;			\
 									\
 	asm(".pushsection .text;"					\
+	    ".globl __raw_callee_save_" #func " ; "			\
 	    "__raw_callee_save_" #func ": "				\
 	    PV_SAVE_ALL_CALLER_REGS					\
 	    "call " #func ";"						\
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index aab8f67..7549b8b 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -388,10 +388,11 @@
 	_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
 
 /* Simple instruction patching code. */
-#define DEF_NATIVE(ops, name, code) 					\
-	extern const char start_##ops##_##name[] __visible,		\
-			  end_##ops##_##name[] __visible;		\
-	asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
+#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
+
+#define DEF_NATIVE(ops, name, code)					\
+	__visible extern const char start_##ops##_##name[], end_##ops##_##name[];	\
+	asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
 
 unsigned paravirt_patch_nop(void);
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index bbc8b12..5ad38ad 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -445,10 +445,20 @@
 	return a.pte == b.pte;
 }
 
+static inline int pteval_present(pteval_t pteval)
+{
+	/*
+	 * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
+	 * way clearly states that the intent is that protnone and numa
+	 * hinting ptes are considered present for the purposes of
+	 * pagetable operations like zapping, protection changes, gup etc.
+	 */
+	return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
+}
+
 static inline int pte_present(pte_t a)
 {
-	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
-			       _PAGE_NUMA);
+	return pteval_present(pte_flags(a));
 }
 
 #define pte_accessible pte_accessible
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index a83aa44..1aa9ccd 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -121,7 +121,8 @@
 
 /* Set of bits not changed in pte_modify */
 #define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
-			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
+			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |	\
+			 _PAGE_SOFT_DIRTY)
 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
 
 #define _PAGE_CACHE_MASK	(_PAGE_PCD | _PAGE_PWT)
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 3ba3de4..e1940c0 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -163,9 +163,11 @@
  */
 #ifndef __ASSEMBLY__
 
-
-/* how to get the current stack pointer from C */
-register unsigned long current_stack_pointer asm("esp") __used;
+#define current_stack_pointer ({		\
+	unsigned long sp;			\
+	asm("mov %%esp,%0" : "=g" (sp));	\
+	sp;					\
+})
 
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index e6d90ba..04905bf 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,7 +62,7 @@
 
 static inline void __flush_tlb_one(unsigned long addr)
 {
-	count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
+	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
 	__flush_tlb_single(addr);
 }
 
@@ -93,13 +93,13 @@
  */
 static inline void __flush_tlb_up(void)
 {
-	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
 	__flush_tlb();
 }
 
 static inline void flush_tlb_all(void)
 {
-	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
 	__flush_tlb_all();
 }
 
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 6b964a0..062921e 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -12,7 +12,6 @@
 extern int is_uv_system(void);
 extern void uv_cpu_init(void);
 extern void uv_nmi_init(void);
-extern void uv_register_nmi_notifier(void);
 extern void uv_system_init(void);
 extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 						 struct mm_struct *mm,
@@ -26,7 +25,6 @@
 static inline int is_uv_system(void)	{ return 0; }
 static inline void uv_cpu_init(void)	{ }
 static inline void uv_system_init(void)	{ }
-static inline void uv_register_nmi_notifier(void) { }
 static inline const struct cpumask *
 uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
 		    unsigned long start, unsigned long end, unsigned int cpu)
diff --git a/arch/x86/include/uapi/asm/sembuf.h b/arch/x86/include/uapi/asm/sembuf.h
index ee50c80..cc2d6a3 100644
--- a/arch/x86/include/uapi/asm/sembuf.h
+++ b/arch/x86/include/uapi/asm/sembuf.h
@@ -13,12 +13,12 @@
 struct semid64_ds {
 	struct ipc64_perm sem_perm;	/* permissions .. see ipc.h */
 	__kernel_time_t	sem_otime;	/* last semop time */
-	unsigned long	__unused1;
+	__kernel_ulong_t __unused1;
 	__kernel_time_t	sem_ctime;	/* last change time */
-	unsigned long	__unused2;
-	unsigned long	sem_nsems;	/* no. of semaphores in array */
-	unsigned long	__unused3;
-	unsigned long	__unused4;
+	__kernel_ulong_t __unused2;
+	__kernel_ulong_t sem_nsems;	/* no. of semaphores in array */
+	__kernel_ulong_t __unused3;
+	__kernel_ulong_t __unused4;
 };
 
 #endif /* _ASM_X86_SEMBUF_H */
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 59554dc..dec8de4 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -179,7 +179,7 @@
 	return (mask >> (4 * cuid)) & 0xf;
 }
 
-int amd_set_subcaches(int cpu, int mask)
+int amd_set_subcaches(int cpu, unsigned long mask)
 {
 	static unsigned int reset, ban;
 	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index ad0dc04..d263b13 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -980,7 +980,6 @@
 	uv_nmi_setup();
 	uv_cpu_init();
 	uv_scir_register_cpu_notifier();
-	uv_register_nmi_notifier();
 	proc_mkdir("sgi_uv", NULL);
 
 	/* register Legacy VGA I/O redirection handler */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index d3153e2..c67ffa6 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -767,10 +767,7 @@
 
 static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
 {
-	tlb_flushall_shift = 5;
-
-	if (c->x86 <= 0x11)
-		tlb_flushall_shift = 4;
+	tlb_flushall_shift = 6;
 }
 
 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 24b6fd1..8e28bf2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -284,8 +284,13 @@
 	raw_local_save_flags(eflags);
 	BUG_ON(eflags & X86_EFLAGS_AC);
 
-	if (cpu_has(c, X86_FEATURE_SMAP))
+	if (cpu_has(c, X86_FEATURE_SMAP)) {
+#ifdef CONFIG_X86_SMAP
 		set_in_cr4(X86_CR4_SMAP);
+#else
+		clear_in_cr4(X86_CR4_SMAP);
+#endif
+	}
 }
 
 /*
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3db61c6..5cd9bfa 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -640,21 +640,17 @@
 	case 0x61d: /* six-core 45 nm xeon "Dunnington" */
 		tlb_flushall_shift = -1;
 		break;
+	case 0x63a: /* Ivybridge */
+		tlb_flushall_shift = 2;
+		break;
 	case 0x61a: /* 45 nm nehalem, "Bloomfield" */
 	case 0x61e: /* 45 nm nehalem, "Lynnfield" */
 	case 0x625: /* 32 nm nehalem, "Clarkdale" */
 	case 0x62c: /* 32 nm nehalem, "Gulftown" */
 	case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
 	case 0x62f: /* 32 nm Xeon E7 */
-		tlb_flushall_shift = 6;
-		break;
 	case 0x62a: /* SandyBridge */
 	case 0x62d: /* SandyBridge, "Romely-EP" */
-		tlb_flushall_shift = 5;
-		break;
-	case 0x63a: /* Ivybridge */
-		tlb_flushall_shift = 1;
-		break;
 	default:
 		tlb_flushall_shift = 6;
 	}
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
index 8384c0f..617a9e2 100644
--- a/arch/x86/kernel/cpu/microcode/amd_early.c
+++ b/arch/x86/kernel/cpu/microcode/amd_early.c
@@ -285,6 +285,15 @@
 
 	uci->cpu_sig.sig = cpuid_eax(0x00000001);
 }
+
+static void __init get_bsp_sig(void)
+{
+	unsigned int bsp = boot_cpu_data.cpu_index;
+	struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
+
+	if (!uci->cpu_sig.sig)
+		smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+}
 #else
 void load_ucode_amd_ap(void)
 {
@@ -337,31 +346,37 @@
 
 int __init save_microcode_in_initrd_amd(void)
 {
+	unsigned long cont;
 	enum ucode_state ret;
 	u32 eax;
 
-#ifdef CONFIG_X86_32
-	unsigned int bsp = boot_cpu_data.cpu_index;
-	struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
+	if (!container)
+		return -EINVAL;
 
-	if (!uci->cpu_sig.sig)
-		smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+#ifdef CONFIG_X86_32
+	get_bsp_sig();
+	cont = (unsigned long)container;
+#else
+	/*
+	 * We need the physical address of the container for both bitness since
+	 * boot_params.hdr.ramdisk_image is a physical address.
+	 */
+	cont = __pa(container);
+#endif
 
 	/*
-	 * Take into account the fact that the ramdisk might get relocated
-	 * and therefore we need to recompute the container's position in
-	 * virtual memory space.
+	 * Take into account the fact that the ramdisk might get relocated and
+	 * therefore we need to recompute the container's position in virtual
+	 * memory space.
 	 */
-	container = (u8 *)(__va((u32)relocated_ramdisk) +
-			   ((u32)container - boot_params.hdr.ramdisk_image));
-#endif
+	if (relocated_ramdisk)
+		container = (u8 *)(__va(relocated_ramdisk) +
+			     (cont - boot_params.hdr.ramdisk_image));
+
 	if (ucode_new_rev)
 		pr_info("microcode: updated early to new patch_level=0x%08x\n",
 			ucode_new_rev);
 
-	if (!container)
-		return -EINVAL;
-
 	eax   = cpuid_eax(0x00000001);
 	eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
 
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index ce2d0a2..0e25a1b 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -683,7 +683,7 @@
 	}
 
 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
-	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
 	__flush_tlb();
 
 	/* Save MTRR state */
@@ -697,7 +697,7 @@
 static void post_set(void) __releases(set_atomicity_lock)
 {
 	/* Flush TLBs (no need to flush caches - they are disabled) */
-	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
 	__flush_tlb();
 
 	/* Intel (P6) standard MTRRs */
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index d4bdd25..e625319 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -77,8 +77,7 @@
 	return addr >= start && addr < end;
 }
 
-static int
-do_ftrace_mod_code(unsigned long ip, const void *new_code)
+static unsigned long text_ip_addr(unsigned long ip)
 {
 	/*
 	 * On x86_64, kernel text mappings are mapped read-only with
@@ -91,7 +90,7 @@
 	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
 		ip = (unsigned long)__va(__pa_symbol(ip));
 
-	return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
+	return ip;
 }
 
 static const unsigned char *ftrace_nop_replace(void)
@@ -123,8 +122,10 @@
 	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
 		return -EINVAL;
 
+	ip = text_ip_addr(ip);
+
 	/* replace the text with the new text */
-	if (do_ftrace_mod_code(ip, new_code))
+	if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
 		return -EPERM;
 
 	sync_core();
@@ -221,37 +222,51 @@
 	return -EINVAL;
 }
 
-int ftrace_update_ftrace_func(ftrace_func_t func)
+static unsigned long ftrace_update_func;
+
+static int update_ftrace_func(unsigned long ip, void *new)
 {
-	unsigned long ip = (unsigned long)(&ftrace_call);
-	unsigned char old[MCOUNT_INSN_SIZE], *new;
+	unsigned char old[MCOUNT_INSN_SIZE];
 	int ret;
 
-	memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
-	new = ftrace_call_replace(ip, (unsigned long)func);
+	memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
+
+	ftrace_update_func = ip;
+	/* Make sure the breakpoints see the ftrace_update_func update */
+	smp_wmb();
 
 	/* See comment above by declaration of modifying_ftrace_code */
 	atomic_inc(&modifying_ftrace_code);
 
 	ret = ftrace_modify_code(ip, old, new);
 
+	atomic_dec(&modifying_ftrace_code);
+
+	return ret;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+	unsigned long ip = (unsigned long)(&ftrace_call);
+	unsigned char *new;
+	int ret;
+
+	new = ftrace_call_replace(ip, (unsigned long)func);
+	ret = update_ftrace_func(ip, new);
+
 	/* Also update the regs callback function */
 	if (!ret) {
 		ip = (unsigned long)(&ftrace_regs_call);
-		memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
 		new = ftrace_call_replace(ip, (unsigned long)func);
-		ret = ftrace_modify_code(ip, old, new);
+		ret = update_ftrace_func(ip, new);
 	}
 
-	atomic_dec(&modifying_ftrace_code);
-
 	return ret;
 }
 
 static int is_ftrace_caller(unsigned long ip)
 {
-	if (ip == (unsigned long)(&ftrace_call) ||
-		ip == (unsigned long)(&ftrace_regs_call))
+	if (ip == ftrace_update_func)
 		return 1;
 
 	return 0;
@@ -677,45 +692,41 @@
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern void ftrace_graph_call(void);
 
-static int ftrace_mod_jmp(unsigned long ip,
-			  int old_offset, int new_offset)
+static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
 {
-	unsigned char code[MCOUNT_INSN_SIZE];
+	static union ftrace_code_union calc;
 
-	if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
-		return -EFAULT;
+	/* Jmp not a call (ignore the .e8) */
+	calc.e8		= 0xe9;
+	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
 
-	if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
-		return -EINVAL;
+	/*
+	 * ftrace external locks synchronize the access to the static variable.
+	 */
+	return calc.code;
+}
 
-	*(int *)(&code[1]) = new_offset;
+static int ftrace_mod_jmp(unsigned long ip, void *func)
+{
+	unsigned char *new;
 
-	if (do_ftrace_mod_code(ip, &code))
-		return -EPERM;
+	new = ftrace_jmp_replace(ip, (unsigned long)func);
 
-	return 0;
+	return update_ftrace_func(ip, new);
 }
 
 int ftrace_enable_ftrace_graph_caller(void)
 {
 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
-	int old_offset, new_offset;
 
-	old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
-	new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
-
-	return ftrace_mod_jmp(ip, old_offset, new_offset);
+	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
 }
 
 int ftrace_disable_ftrace_graph_caller(void)
 {
 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
-	int old_offset, new_offset;
 
-	old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
-	new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
-
-	return ftrace_mod_jmp(ip, old_offset, new_offset);
+	return ftrace_mod_jmp(ip, &ftrace_stub);
 }
 
 #endif /* !CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index dbb6087..d99f31d 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -266,6 +266,14 @@
 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
 
 #ifdef CONFIG_HOTPLUG_CPU
+
+/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
+ * below, which is protected by stop_machine().  Putting them on the stack
+ * results in a stack frame overflow.  Dynamically allocating could result in a
+ * failure so declare these two cpumasks as global.
+ */
+static struct cpumask affinity_new, online_new;
+
 /*
  * This cpu is going to be removed and its vectors migrated to the remaining
  * online cpus.  Check to see if there are enough vectors in the remaining cpus.
@@ -277,7 +285,6 @@
 	unsigned int this_cpu, vector, this_count, count;
 	struct irq_desc *desc;
 	struct irq_data *data;
-	struct cpumask affinity_new, online_new;
 
 	this_cpu = smp_processor_id();
 	cpumask_copy(&online_new, cpu_online_mask);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 6dd802c..713f1b3 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -500,6 +500,38 @@
 #endif
 }
 
+static noinline uint32_t __kvm_cpuid_base(void)
+{
+	if (boot_cpu_data.cpuid_level < 0)
+		return 0;	/* So we don't blow up on old processors */
+
+	if (cpu_has_hypervisor)
+		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
+
+	return 0;
+}
+
+static inline uint32_t kvm_cpuid_base(void)
+{
+	static int kvm_cpuid_base = -1;
+
+	if (kvm_cpuid_base == -1)
+		kvm_cpuid_base = __kvm_cpuid_base();
+
+	return kvm_cpuid_base;
+}
+
+bool kvm_para_available(void)
+{
+	return kvm_cpuid_base() != 0;
+}
+EXPORT_SYMBOL_GPL(kvm_para_available);
+
+unsigned int kvm_arch_para_features(void)
+{
+	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
+}
+
 static uint32_t __init kvm_detect(void)
 {
 	return kvm_cpuid_base();
@@ -673,7 +705,7 @@
 /* Track spinlock on which a cpu is waiting */
 static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
 
-static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
+__visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
 {
 	struct kvm_lock_waiting *w;
 	int cpu;
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 872079a..f7d0672 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,8 +100,10 @@
 	flag |= __GFP_ZERO;
 again:
 	page = NULL;
-	if (!(flag & GFP_ATOMIC))
+	/* CMA can be used only in the context which permits sleeping */
+	if (flag & __GFP_WAIT)
 		page = dma_alloc_from_contiguous(dev, count, get_order(size));
+	/* fallback */
 	if (!page)
 		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
 	if (!page)
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 04ee1e2..7c6acd4 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -571,3 +571,40 @@
 			quirk_amd_nb_node);
 
 #endif
+
+#ifdef CONFIG_PCI
+/*
+ * Processor does not ensure DRAM scrub read/write sequence
+ * is atomic wrt accesses to CC6 save state area. Therefore
+ * if a concurrent scrub read/write access is to same address
+ * the entry may appear as if it is not written. This quirk
+ * applies to Fam16h models 00h-0Fh
+ *
+ * See "Revision Guide" for AMD F16h models 00h-0fh,
+ * document 51810 rev. 3.04, Nov 2013
+ */
+static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
+{
+	u32 val;
+
+	/*
+	 * Suggested workaround:
+	 * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
+	 */
+	pci_read_config_dword(dev, 0x58, &val);
+	if (val & 0x1F) {
+		val &= ~(0x1F);
+		pci_write_config_dword(dev, 0x58, val);
+	}
+
+	pci_read_config_dword(dev, 0x5C, &val);
+	if (val & BIT(0)) {
+		val &= ~BIT(0);
+		pci_write_config_dword(dev, 0x5c, val);
+	}
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
+			amd_disable_seq_and_redirect_scrub);
+
+#endif
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 19e5adb..acb3b60 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -209,7 +209,7 @@
 	 * dance when its actually needed.
 	 */
 
-	preempt_disable();
+	preempt_disable_notrace();
 	data = this_cpu_read(cyc2ns.head);
 	tail = this_cpu_read(cyc2ns.tail);
 
@@ -229,7 +229,7 @@
 		if (!--data->__count)
 			this_cpu_write(cyc2ns.tail, data);
 	}
-	preempt_enable();
+	preempt_enable_notrace();
 
 	return ns;
 }
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 992f890..f6584a9 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -33,7 +33,7 @@
  * and vice versa.
  */
 
-static unsigned long vsmp_save_fl(void)
+asmlinkage unsigned long vsmp_save_fl(void)
 {
 	unsigned long flags = native_save_fl();
 
@@ -43,7 +43,7 @@
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
 
-static void vsmp_restore_fl(unsigned long flags)
+__visible void vsmp_restore_fl(unsigned long flags)
 {
 	if (flags & X86_EFLAGS_IF)
 		flags &= ~X86_EFLAGS_AC;
@@ -53,7 +53,7 @@
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
 
-static void vsmp_irq_disable(void)
+asmlinkage void vsmp_irq_disable(void)
 {
 	unsigned long flags = native_save_fl();
 
@@ -61,7 +61,7 @@
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
 
-static void vsmp_irq_enable(void)
+asmlinkage void vsmp_irq_enable(void)
 {
 	unsigned long flags = native_save_fl();
 
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index f1e48951..a2a1bb7 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -72,4 +72,12 @@
 	return best && (best->ecx & bit(X86_FEATURE_PCID));
 }
 
+static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 1, 0);
+	return best && (best->ecx & bit(X86_FEATURE_X2APIC));
+}
+
 #endif
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index c8b0d0d..6a11845 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -65,7 +65,7 @@
 		struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map);
 
 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
-void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
+int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
 		struct kvm_lapic_state *s);
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5c88791..a06f101 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4392,7 +4392,7 @@
 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	u64 msr;
+	struct msr_data apic_base_msr;
 
 	vmx->rmode.vm86_active = 0;
 
@@ -4400,10 +4400,11 @@
 
 	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
 	kvm_set_cr8(&vmx->vcpu, 0);
-	msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+	apic_base_msr.data = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
 	if (kvm_vcpu_is_bsp(&vmx->vcpu))
-		msr |= MSR_IA32_APICBASE_BSP;
-	kvm_set_apic_base(&vmx->vcpu, msr);
+		apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
+	apic_base_msr.host_initiated = true;
+	kvm_set_apic_base(&vmx->vcpu, &apic_base_msr);
 
 	vmx_segment_cache_clear(vmx);
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0c76f7c..39c28f09 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -257,10 +257,26 @@
 }
 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
 
-void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
+int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
-	/* TODO: reserve bits check */
-	kvm_lapic_set_base(vcpu, data);
+	u64 old_state = vcpu->arch.apic_base &
+		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
+	u64 new_state = msr_info->data &
+		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
+	u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
+		0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
+
+	if (!msr_info->host_initiated &&
+	    ((msr_info->data & reserved_bits) != 0 ||
+	     new_state == X2APIC_ENABLE ||
+	     (new_state == MSR_IA32_APICBASE_ENABLE &&
+	      old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
+	     (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
+	      old_state == 0)))
+		return 1;
+
+	kvm_lapic_set_base(vcpu, msr_info->data);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
@@ -1840,6 +1856,7 @@
 		if (__copy_to_user((void __user *)addr, instructions, 4))
 			return 1;
 		kvm->arch.hv_hypercall = data;
+		mark_page_dirty(kvm, gfn);
 		break;
 	}
 	case HV_X64_MSR_REFERENCE_TSC: {
@@ -1868,19 +1885,21 @@
 {
 	switch (msr) {
 	case HV_X64_MSR_APIC_ASSIST_PAGE: {
+		u64 gfn;
 		unsigned long addr;
 
 		if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
 			vcpu->arch.hv_vapic = data;
 			break;
 		}
-		addr = gfn_to_hva(vcpu->kvm, data >>
-				  HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
+		gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
+		addr = gfn_to_hva(vcpu->kvm, gfn);
 		if (kvm_is_error_hva(addr))
 			return 1;
 		if (__clear_user((void __user *)addr, PAGE_SIZE))
 			return 1;
 		vcpu->arch.hv_vapic = data;
+		mark_page_dirty(vcpu->kvm, gfn);
 		break;
 	}
 	case HV_X64_MSR_EOI:
@@ -2006,8 +2025,7 @@
 	case 0x200 ... 0x2ff:
 		return set_msr_mtrr(vcpu, msr, data);
 	case MSR_IA32_APICBASE:
-		kvm_set_apic_base(vcpu, data);
-		break;
+		return kvm_set_apic_base(vcpu, msr_info);
 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
 		return kvm_x2apic_msr_write(vcpu, msr, data);
 	case MSR_IA32_TSCDEADLINE:
@@ -2598,10 +2616,10 @@
 	case KVM_CAP_GET_TSC_KHZ:
 	case KVM_CAP_KVMCLOCK_CTRL:
 	case KVM_CAP_READONLY_MEM:
+	case KVM_CAP_HYPERV_TIME:
 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
 	case KVM_CAP_ASSIGN_DEV_IRQ:
 	case KVM_CAP_PCI_2_3:
-	case KVM_CAP_HYPERV_TIME:
 #endif
 		r = 1;
 		break;
@@ -6409,6 +6427,7 @@
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 				  struct kvm_sregs *sregs)
 {
+	struct msr_data apic_base_msr;
 	int mmu_reset_needed = 0;
 	int pending_vec, max_bits, idx;
 	struct desc_ptr dt;
@@ -6432,7 +6451,9 @@
 
 	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
 	kvm_x86_ops->set_efer(vcpu, sregs->efer);
-	kvm_set_apic_base(vcpu, sregs->apic_base);
+	apic_base_msr.data = sregs->apic_base;
+	apic_base_msr.host_initiated = true;
+	kvm_set_apic_base(vcpu, &apic_base_msr);
 
 	mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
 	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index bdf8532..ad1fb5f 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -233,13 +233,13 @@
  * flags word contains all kind of stuff, but in practice Linux only cares
  * about the interrupt flag.  Our "save_flags()" just returns that.
  */
-static unsigned long save_fl(void)
+asmlinkage unsigned long lguest_save_fl(void)
 {
 	return lguest_data.irq_enabled;
 }
 
 /* Interrupts go off... */
-static void irq_disable(void)
+asmlinkage void lguest_irq_disable(void)
 {
 	lguest_data.irq_enabled = 0;
 }
@@ -253,8 +253,8 @@
  * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the
  * C function, then restores it.
  */
-PV_CALLEE_SAVE_REGS_THUNK(save_fl);
-PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
+PV_CALLEE_SAVE_REGS_THUNK(lguest_save_fl);
+PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable);
 /*:*/
 
 /* These are in i386_head.S */
@@ -1291,9 +1291,9 @@
 	 */
 
 	/* Interrupt-related operations */
-	pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
+	pv_irq_ops.save_fl = PV_CALLEE_SAVE(lguest_save_fl);
 	pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
-	pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
+	pv_irq_ops.irq_disable = PV_CALLEE_SAVE(lguest_irq_disable);
 	pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
 	pv_irq_ops.safe_halt = lguest_safe_halt;
 
diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c
index 59d353d..a544908 100644
--- a/arch/x86/math-emu/errors.c
+++ b/arch/x86/math-emu/errors.c
@@ -330,11 +330,6 @@
 
 	RE_ENTRANT_CHECK_OFF;
 	if ((~control_word & n & CW_Exceptions) || (n == EX_INTERNAL)) {
-#ifdef PRINT_MESSAGES
-		/* My message from the sponsor */
-		printk(FPU_VERSION " " __DATE__ " (C) W. Metzenthen.\n");
-#endif /* PRINT_MESSAGES */
-
 		/* Get a name string for error reporting */
 		for (i = 0; exception_names[i].type; i++)
 			if ((exception_names[i].type & n) ==
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9d591c8..6dea040 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1001,6 +1001,12 @@
 
 static inline bool smap_violation(int error_code, struct pt_regs *regs)
 {
+	if (!IS_ENABLED(CONFIG_X86_SMAP))
+		return false;
+
+	if (!static_cpu_has(X86_FEATURE_SMAP))
+		return false;
+
 	if (error_code & PF_USER)
 		return false;
 
@@ -1087,11 +1093,9 @@
 	if (unlikely(error_code & PF_RSVD))
 		pgtable_bad(regs, error_code, address);
 
-	if (static_cpu_has(X86_FEATURE_SMAP)) {
-		if (unlikely(smap_violation(error_code, regs))) {
-			bad_area_nosemaphore(regs, error_code, address);
-			return;
-		}
+	if (unlikely(smap_violation(error_code, regs))) {
+		bad_area_nosemaphore(regs, error_code, address);
+		return;
 	}
 
 	/*
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 81b2750..27aa0455 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -493,14 +493,6 @@
 		struct numa_memblk *mb = &mi->blk[i];
 		memblock_set_node(mb->start, mb->end - mb->start,
 				  &memblock.memory, mb->nid);
-
-		/*
-		 * At this time, all memory regions reserved by memblock are
-		 * used by the kernel. Set the nid in memblock.reserved will
-		 * mark out all the nodes the kernel resides in.
-		 */
-		memblock_set_node(mb->start, mb->end - mb->start,
-				  &memblock.reserved, mb->nid);
 	}
 
 	/*
@@ -565,10 +557,21 @@
 static void __init numa_clear_kernel_node_hotplug(void)
 {
 	int i, nid;
-	nodemask_t numa_kernel_nodes;
+	nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
 	unsigned long start, end;
 	struct memblock_type *type = &memblock.reserved;
 
+	/*
+	 * At this time, all memory regions reserved by memblock are
+	 * used by the kernel. Set the nid in memblock.reserved will
+	 * mark out all the nodes the kernel resides in.
+	 */
+	for (i = 0; i < numa_meminfo.nr_blks; i++) {
+		struct numa_memblk *mb = &numa_meminfo.blk[i];
+		memblock_set_node(mb->start, mb->end - mb->start,
+				  &memblock.reserved, mb->nid);
+	}
+
 	/* Mark all kernel nodes. */
 	for (i = 0; i < type->cnt; i++)
 		node_set(type->regions[i].nid, numa_kernel_nodes);
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 0342d27..47b6436 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -52,6 +52,8 @@
 			nid, start, end);
 	printk(KERN_DEBUG "  Setting physnode_map array to node %d for pfns:\n", nid);
 	printk(KERN_DEBUG "  ");
+	start = round_down(start, PAGES_PER_SECTION);
+	end = round_up(end, PAGES_PER_SECTION);
 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
 		physnode_map[pfn / PAGES_PER_SECTION] = nid;
 		printk(KERN_CONT "%lx ", pfn);
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 1a25187..1953e9c 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -42,15 +42,25 @@
 	return acpi_numa < 0;
 }
 
-/* Callback for SLIT parsing */
+/*
+ * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them.  I/O localities are
+ * not supported at this point.
+ */
 void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
 {
 	int i, j;
 
-	for (i = 0; i < slit->locality_count; i++)
-		for (j = 0; j < slit->locality_count; j++)
+	for (i = 0; i < slit->locality_count; i++) {
+		if (pxm_to_node(i) == NUMA_NO_NODE)
+			continue;
+		for (j = 0; j < slit->locality_count; j++) {
+			if (pxm_to_node(j) == NUMA_NO_NODE)
+				continue;
 			numa_set_distance(pxm_to_node(i), pxm_to_node(j),
 				slit->entry[slit->locality_count * i + j]);
+		}
+	}
 }
 
 /* Callback for Proximity Domain -> x2APIC mapping */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index ae699b3..dd8dda1 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -103,7 +103,7 @@
 	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
 		return;
 
-	count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
 		if (f->flush_end == TLB_FLUSH_ALL)
 			local_flush_tlb();
@@ -131,7 +131,7 @@
 	info.flush_start = start;
 	info.flush_end = end;
 
-	count_vm_event(NR_TLB_REMOTE_FLUSH);
+	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
 	if (is_uv_system()) {
 		unsigned int cpu;
 
@@ -151,44 +151,19 @@
 
 	preempt_disable();
 
-	count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
 	local_flush_tlb();
 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
 		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
 	preempt_enable();
 }
 
-/*
- * It can find out the THP large page, or
- * HUGETLB page in tlb_flush when THP disabled
- */
-static inline unsigned long has_large_page(struct mm_struct *mm,
-				 unsigned long start, unsigned long end)
-{
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
-	unsigned long addr = ALIGN(start, HPAGE_SIZE);
-	for (; addr < end; addr += HPAGE_SIZE) {
-		pgd = pgd_offset(mm, addr);
-		if (likely(!pgd_none(*pgd))) {
-			pud = pud_offset(pgd, addr);
-			if (likely(!pud_none(*pud))) {
-				pmd = pmd_offset(pud, addr);
-				if (likely(!pmd_none(*pmd)))
-					if (pmd_large(*pmd))
-						return addr;
-			}
-		}
-	}
-	return 0;
-}
-
 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 				unsigned long end, unsigned long vmflag)
 {
 	unsigned long addr;
 	unsigned act_entries, tlb_entries = 0;
+	unsigned long nr_base_pages;
 
 	preempt_disable();
 	if (current->active_mm != mm)
@@ -210,21 +185,20 @@
 		tlb_entries = tlb_lli_4k[ENTRIES];
 	else
 		tlb_entries = tlb_lld_4k[ENTRIES];
+
 	/* Assume all of TLB entries was occupied by this task */
-	act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
+	act_entries = tlb_entries >> tlb_flushall_shift;
+	act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm;
+	nr_base_pages = (end - start) >> PAGE_SHIFT;
 
 	/* tlb_flushall_shift is on balance point, details in commit log */
-	if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
-		count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+	if (nr_base_pages > act_entries) {
+		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
 		local_flush_tlb();
 	} else {
-		if (has_large_page(mm, start, end)) {
-			local_flush_tlb();
-			goto flush_all;
-		}
 		/* flush range by one by one 'invlpg' */
 		for (addr = start; addr < end;	addr += PAGE_SIZE) {
-			count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
+			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
 			__flush_tlb_single(addr);
 		}
 
@@ -262,7 +236,7 @@
 
 static void do_flush_tlb_all(void *info)
 {
-	count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
 	__flush_tlb_all();
 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
 		leave_mm(smp_processor_id());
@@ -270,7 +244,7 @@
 
 void flush_tlb_all(void)
 {
-	count_vm_event(NR_TLB_REMOTE_FLUSH);
+	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
 	on_each_cpu(do_flush_tlb_all, NULL, 1);
 }
 
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index 7145ec6..f15103d 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -42,14 +42,15 @@
 
 	if (bgrt_tab->header.length < sizeof(*bgrt_tab))
 		return;
-	if (bgrt_tab->version != 1)
+	if (bgrt_tab->version != 1 || bgrt_tab->status != 1)
 		return;
 	if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address)
 		return;
 
 	image = efi_lookup_mapped_addr(bgrt_tab->image_address);
 	if (!image) {
-		image = ioremap(bgrt_tab->image_address, sizeof(bmp_header));
+		image = early_memremap(bgrt_tab->image_address,
+				       sizeof(bmp_header));
 		ioremapped = true;
 		if (!image)
 			return;
@@ -57,7 +58,7 @@
 
 	memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
 	if (ioremapped)
-		iounmap(image);
+		early_iounmap(image, sizeof(bmp_header));
 	bgrt_image_size = bmp_header.size;
 
 	bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL);
@@ -65,7 +66,8 @@
 		return;
 
 	if (ioremapped) {
-		image = ioremap(bgrt_tab->image_address, bmp_header.size);
+		image = early_memremap(bgrt_tab->image_address,
+				       bmp_header.size);
 		if (!image) {
 			kfree(bgrt_image);
 			bgrt_image = NULL;
@@ -75,5 +77,5 @@
 
 	memcpy_fromio(bgrt_image, image, bgrt_image_size);
 	if (ioremapped)
-		iounmap(image);
+		early_iounmap(image, bmp_header.size);
 }
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index d62ec87..1a201ac 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -792,7 +792,7 @@
 		set_memory_nx(addr, npages);
 }
 
-static void __init runtime_code_page_mkexec(void)
+void __init runtime_code_page_mkexec(void)
 {
 	efi_memory_desc_t *md;
 	void *p;
@@ -1069,8 +1069,7 @@
 	efi.update_capsule = virt_efi_update_capsule;
 	efi.query_capsule_caps = virt_efi_query_capsule_caps;
 
-	if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
-		runtime_code_page_mkexec();
+	efi_runtime_mkexec();
 
 	kfree(new_memmap);
 
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 249b183..0b74cdf 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -77,3 +77,9 @@
 
 	local_irq_restore(efi_rt_eflags);
 }
+
+void __init efi_runtime_mkexec(void)
+{
+	if (__supported_pte_mask & _PAGE_NX)
+		runtime_code_page_mkexec();
+}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 6284f15..0c2a234 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -233,3 +233,12 @@
 {
 	efi_setup = phys_addr + sizeof(struct setup_data);
 }
+
+void __init efi_runtime_mkexec(void)
+{
+	if (!efi_enabled(EFI_OLD_MEMMAP))
+		return;
+
+	if (__supported_pte_mask & _PAGE_NX)
+		runtime_code_page_mkexec();
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.h b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
index 8f568dd..79bb09d 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
+++ b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
@@ -12,6 +12,7 @@
 #ifndef _PLATFORM_IPC_H_
 #define _PLATFORM_IPC_H_
 
-extern void __init ipc_device_handler(struct sfi_device_table_entry *pentry,
-			struct devs_id *dev) __attribute__((weak));
+void __init
+ipc_device_handler(struct sfi_device_table_entry *pentry, struct devs_id *dev);
+
 #endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.h b/arch/x86/platform/intel-mid/device_libs/platform_msic.h
index 917eb56..b7be1d0 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic.h
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic.h
@@ -14,6 +14,6 @@
 
 extern struct intel_msic_platform_data msic_pdata;
 
-extern void *msic_generic_platform_data(void *info,
-			enum intel_msic_block block) __attribute__((weak));
+void *msic_generic_platform_data(void *info, enum intel_msic_block block);
+
 #endif
diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
index a537ffc..46aa25c 100644
--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
@@ -14,6 +14,6 @@
 /* For every CPU addition a new get_<cpuname>_ops interface needs
  * to be added.
  */
-extern void * __cpuinit get_penwell_ops(void) __attribute__((weak));
-extern void * __cpuinit get_cloverview_ops(void) __attribute__((weak));
-extern void * __init get_tangier_ops(void) __attribute__((weak));
+extern void *get_penwell_ops(void) __attribute__((weak));
+extern void *get_cloverview_ops(void) __attribute__((weak));
+extern void *get_tangier_ops(void) __attribute__((weak));
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
index 4f7884e..23381d2 100644
--- a/arch/x86/platform/intel-mid/mfld.c
+++ b/arch/x86/platform/intel-mid/mfld.c
@@ -58,18 +58,18 @@
 	return 0;
 }
 
-static void __init penwell_arch_setup()
+static void __init penwell_arch_setup(void)
 {
 	x86_platform.calibrate_tsc = mfld_calibrate_tsc;
 	pm_power_off = mfld_power_off;
 }
 
-void * __cpuinit get_penwell_ops()
+void *get_penwell_ops(void)
 {
 	return &penwell_ops;
 }
 
-void * __cpuinit get_cloverview_ops()
+void *get_cloverview_ops(void)
 {
 	return &penwell_ops;
 }
diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
index 09d1015..aaca917 100644
--- a/arch/x86/platform/intel-mid/mrfl.c
+++ b/arch/x86/platform/intel-mid/mrfl.c
@@ -97,7 +97,7 @@
 	.arch_setup = tangier_arch_setup,
 };
 
-void * __cpuinit get_tangier_ops()
+void *get_tangier_ops(void)
 {
 	return &tangier_ops;
 }
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 8eeccba..be27da6 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -74,7 +74,6 @@
 static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
 static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
 static atomic_t uv_nmi_slave_continue;
-static atomic_t uv_nmi_kexec_failed;
 static cpumask_var_t uv_nmi_cpu_mask;
 
 /* Values for uv_nmi_slave_continue */
@@ -149,7 +148,8 @@
  *  "dump"	- dump process stack for each cpu
  *  "ips"	- dump IP info for each cpu
  *  "kdump"	- do crash dump
- *  "kdb"	- enter KDB/KGDB (default)
+ *  "kdb"	- enter KDB (default)
+ *  "kgdb"	- enter KGDB
  */
 static char uv_nmi_action[8] = "kdb";
 module_param_string(action, uv_nmi_action, sizeof(uv_nmi_action), 0644);
@@ -504,6 +504,7 @@
 }
 
 #if defined(CONFIG_KEXEC)
+static atomic_t uv_nmi_kexec_failed;
 static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
 {
 	/* Call crash to dump system state */
@@ -537,18 +538,45 @@
 }
 #endif /* !CONFIG_KEXEC */
 
+#ifdef CONFIG_KGDB
 #ifdef CONFIG_KGDB_KDB
-/* Call KDB from NMI handler */
-static void uv_call_kdb(int cpu, struct pt_regs *regs, int master)
+static inline int uv_nmi_kdb_reason(void)
 {
-	int ret;
+	return KDB_REASON_SYSTEM_NMI;
+}
+#else /* !CONFIG_KGDB_KDB */
+static inline int uv_nmi_kdb_reason(void)
+{
+	/* Insure user is expecting to attach gdb remote */
+	if (uv_nmi_action_is("kgdb"))
+		return 0;
 
+	pr_err("UV: NMI error: KDB is not enabled in this kernel\n");
+	return -1;
+}
+#endif /* CONFIG_KGDB_KDB */
+
+/*
+ * Call KGDB/KDB from NMI handler
+ *
+ * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or
+ * 'kdb' has no affect on which is used.  See the KGDB documention for further
+ * information.
+ */
+static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
+{
 	if (master) {
+		int reason = uv_nmi_kdb_reason();
+		int ret;
+
+		if (reason < 0)
+			return;
+
 		/* call KGDB NMI handler as MASTER */
-		ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs,
-					&uv_nmi_slave_continue);
+		ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
+				&uv_nmi_slave_continue);
 		if (ret) {
-			pr_alert("KDB returned error, is kgdboc set?\n");
+			pr_alert("KGDB returned error, is kgdboc set?\n");
 			atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
 		}
 	} else {
@@ -567,12 +595,12 @@
 	uv_nmi_sync_exit(master);
 }
 
-#else /* !CONFIG_KGDB_KDB */
-static inline void uv_call_kdb(int cpu, struct pt_regs *regs, int master)
+#else /* !CONFIG_KGDB */
+static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
 {
-	pr_err("UV: NMI error: KGDB/KDB is not enabled in this kernel\n");
+	pr_err("UV: NMI error: KGDB is not enabled in this kernel\n");
 }
-#endif /* !CONFIG_KGDB_KDB */
+#endif /* !CONFIG_KGDB */
 
 /*
  * UV NMI handler
@@ -606,9 +634,9 @@
 	if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump"))
 		uv_nmi_dump_state(cpu, regs, master);
 
-	/* Call KDB if enabled */
-	else if (uv_nmi_action_is("kdb"))
-		uv_call_kdb(cpu, regs, master);
+	/* Call KGDB/KDB if enabled */
+	else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb"))
+		uv_call_kgdb_kdb(cpu, regs, master);
 
 	/* Clear per_cpu "in nmi" flag */
 	atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT);
@@ -634,7 +662,7 @@
 /*
  * NMI handler for pulling in CPUs when perf events are grabbing our NMI
  */
-int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
+static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
 {
 	int ret;
 
@@ -651,7 +679,7 @@
 	return ret;
 }
 
-void uv_register_nmi_notifier(void)
+static void uv_register_nmi_notifier(void)
 {
 	if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
 		pr_warn("UV: NMI handler failed to register\n");
@@ -695,6 +723,5 @@
 		uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
 	}
 	BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
+	uv_register_nmi_notifier();
 }
-
-
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 9cac825..3497f14 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -64,20 +64,7 @@
 
 # ---------------------------------------------------------------------------
 
-# How to compile the 16-bit code.  Note we always compile for -march=i386,
-# that way we can complain to the user if the CPU is insufficient.
-KBUILD_CFLAGS	:= $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
-		   -I$(srctree)/arch/x86/boot \
-		   -DDISABLE_BRANCH_PROFILING \
-		   -Wall -Wstrict-prototypes \
-		   -march=i386 -mregparm=3 \
-		   -include $(srctree)/$(src)/../../boot/code16gcc.h \
-		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
-		   -mno-mmx -mno-sse \
-		   $(call cc-option, -ffreestanding) \
-		   $(call cc-option, -fno-toplevel-reorder,\
-		   $(call cc-option, -fno-unit-at-a-time)) \
-		   $(call cc-option, -fno-stack-protector) \
-		   $(call cc-option, -mpreferred-stack-boundary=2)
+KBUILD_CFLAGS	:= $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
+		   -I$(srctree)/arch/x86/boot
 KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 11f9285..cfbdbdb 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -1025,6 +1025,29 @@
 	}
 }
 
+/*
+ * As an aid to debugging problems with different linkers
+ * print summary information about the relocs.
+ * Since different linkers tend to emit the sections in
+ * different orders we use the section names in the output.
+ */
+static int do_reloc_info(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
+				const char *symname)
+{
+	printf("%s\t%s\t%s\t%s\n",
+		sec_name(sec->shdr.sh_info),
+		rel_type(ELF_R_TYPE(rel->r_info)),
+		symname,
+		sec_name(sym->st_shndx));
+	return 0;
+}
+
+static void print_reloc_info(void)
+{
+	printf("reloc section\treloc type\tsymbol\tsymbol section\n");
+	walk_relocs(do_reloc_info);
+}
+
 #if ELF_BITS == 64
 # define process process_64
 #else
@@ -1032,7 +1055,8 @@
 #endif
 
 void process(FILE *fp, int use_real_mode, int as_text,
-	     int show_absolute_syms, int show_absolute_relocs)
+	     int show_absolute_syms, int show_absolute_relocs,
+	     int show_reloc_info)
 {
 	regex_init(use_real_mode);
 	read_ehdr(fp);
@@ -1050,5 +1074,9 @@
 		print_absolute_relocs();
 		return;
 	}
+	if (show_reloc_info) {
+		print_reloc_info();
+		return;
+	}
 	emit_relocs(as_text, use_real_mode);
 }
diff --git a/arch/x86/tools/relocs.h b/arch/x86/tools/relocs.h
index 07cdb1e..f595906 100644
--- a/arch/x86/tools/relocs.h
+++ b/arch/x86/tools/relocs.h
@@ -29,8 +29,9 @@
 };
 
 void process_32(FILE *fp, int use_real_mode, int as_text,
-		int show_absolute_syms, int show_absolute_relocs);
+		int show_absolute_syms, int show_absolute_relocs,
+		int show_reloc_info);
 void process_64(FILE *fp, int use_real_mode, int as_text,
-		int show_absolute_syms, int show_absolute_relocs);
-
+		int show_absolute_syms, int show_absolute_relocs,
+		int show_reloc_info);
 #endif /* RELOCS_H */
diff --git a/arch/x86/tools/relocs_common.c b/arch/x86/tools/relocs_common.c
index 44d3968..acab636b 100644
--- a/arch/x86/tools/relocs_common.c
+++ b/arch/x86/tools/relocs_common.c
@@ -11,12 +11,13 @@
 
 static void usage(void)
 {
-	die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n");
+	die("relocs [--abs-syms|--abs-relocs|--reloc-info|--text|--realmode]" \
+	    " vmlinux\n");
 }
 
 int main(int argc, char **argv)
 {
-	int show_absolute_syms, show_absolute_relocs;
+	int show_absolute_syms, show_absolute_relocs, show_reloc_info;
 	int as_text, use_real_mode;
 	const char *fname;
 	FILE *fp;
@@ -25,6 +26,7 @@
 
 	show_absolute_syms = 0;
 	show_absolute_relocs = 0;
+	show_reloc_info = 0;
 	as_text = 0;
 	use_real_mode = 0;
 	fname = NULL;
@@ -39,6 +41,10 @@
 				show_absolute_relocs = 1;
 				continue;
 			}
+			if (strcmp(arg, "--reloc-info") == 0) {
+				show_reloc_info = 1;
+				continue;
+			}
 			if (strcmp(arg, "--text") == 0) {
 				as_text = 1;
 				continue;
@@ -67,10 +73,12 @@
 	rewind(fp);
 	if (e_ident[EI_CLASS] == ELFCLASS64)
 		process_64(fp, use_real_mode, as_text,
-			   show_absolute_syms, show_absolute_relocs);
+			   show_absolute_syms, show_absolute_relocs,
+			   show_reloc_info);
 	else
 		process_32(fp, use_real_mode, as_text,
-			   show_absolute_syms, show_absolute_relocs);
+			   show_absolute_syms, show_absolute_relocs,
+			   show_reloc_info);
 	fclose(fp);
 	return 0;
 }
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a4d7b64..201d09a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1473,6 +1473,18 @@
 	 * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
 	 * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
 	write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
+
+	if (!cpu)
+		return;
+	/*
+	 * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
+	 * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
+	*/
+	if (cpu_has_pse)
+		set_in_cr4(X86_CR4_PSE);
+
+	if (cpu_has_pge)
+		set_in_cr4(X86_CR4_PGE);
 }
 
 /*
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index 103c93f..c985835 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -162,14 +162,15 @@
 	rc = arch_gnttab_map_shared(pfns, nr_grant_frames, nr_grant_frames,
 				    &xen_auto_xlat_grant_frames.vaddr);
 
-	kfree(pages);
 	if (rc) {
 		pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__,
 			nr_grant_frames, rc);
 		free_xenballooned_pages(nr_grant_frames, pages);
+		kfree(pages);
 		kfree(pfns);
 		return rc;
 	}
+	kfree(pages);
 
 	xen_auto_xlat_grant_frames.pfn = pfns;
 	xen_auto_xlat_grant_frames.count = nr_grant_frames;
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 76ca326..08f763d 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -23,7 +23,7 @@
 	(void)HYPERVISOR_xen_version(0, NULL);
 }
 
-static unsigned long xen_save_fl(void)
+asmlinkage unsigned long xen_save_fl(void)
 {
 	struct vcpu_info *vcpu;
 	unsigned long flags;
@@ -41,7 +41,7 @@
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
 
-static void xen_restore_fl(unsigned long flags)
+__visible void xen_restore_fl(unsigned long flags)
 {
 	struct vcpu_info *vcpu;
 
@@ -63,7 +63,7 @@
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
 
-static void xen_irq_disable(void)
+asmlinkage void xen_irq_disable(void)
 {
 	/* There's a one instruction preempt window here.  We need to
 	   make sure we're don't switch CPUs between getting the vcpu
@@ -74,7 +74,7 @@
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
 
-static void xen_irq_enable(void)
+asmlinkage void xen_irq_enable(void)
 {
 	struct vcpu_info *vcpu;
 
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c1d406f..256282e 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -365,7 +365,7 @@
 /* Assume pteval_t is equivalent to all the other *val_t types. */
 static pteval_t pte_mfn_to_pfn(pteval_t val)
 {
-	if (val & _PAGE_PRESENT) {
+	if (pteval_present(val)) {
 		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
 		unsigned long pfn = mfn_to_pfn(mfn);
 
@@ -381,7 +381,7 @@
 
 static pteval_t pte_pfn_to_mfn(pteval_t val)
 {
-	if (val & _PAGE_PRESENT) {
+	if (pteval_present(val)) {
 		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
 		pteval_t flags = val & PTE_FLAGS_MASK;
 		unsigned long mfn;
@@ -431,7 +431,7 @@
 	return val;
 }
 
-static pteval_t xen_pte_val(pte_t pte)
+__visible pteval_t xen_pte_val(pte_t pte)
 {
 	pteval_t pteval = pte.pte;
 #if 0
@@ -448,7 +448,7 @@
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
 
-static pgdval_t xen_pgd_val(pgd_t pgd)
+__visible pgdval_t xen_pgd_val(pgd_t pgd)
 {
 	return pte_mfn_to_pfn(pgd.pgd);
 }
@@ -479,7 +479,7 @@
 	WARN_ON(pat != 0x0007010600070106ull);
 }
 
-static pte_t xen_make_pte(pteval_t pte)
+__visible pte_t xen_make_pte(pteval_t pte)
 {
 	phys_addr_t addr = (pte & PTE_PFN_MASK);
 #if 0
@@ -514,14 +514,14 @@
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
 
-static pgd_t xen_make_pgd(pgdval_t pgd)
+__visible pgd_t xen_make_pgd(pgdval_t pgd)
 {
 	pgd = pte_pfn_to_mfn(pgd);
 	return native_make_pgd(pgd);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
 
-static pmdval_t xen_pmd_val(pmd_t pmd)
+__visible pmdval_t xen_pmd_val(pmd_t pmd)
 {
 	return pte_mfn_to_pfn(pmd.pmd);
 }
@@ -580,7 +580,7 @@
 }
 #endif	/* CONFIG_X86_PAE */
 
-static pmd_t xen_make_pmd(pmdval_t pmd)
+__visible pmd_t xen_make_pmd(pmdval_t pmd)
 {
 	pmd = pte_pfn_to_mfn(pmd);
 	return native_make_pmd(pmd);
@@ -588,13 +588,13 @@
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
 
 #if PAGETABLE_LEVELS == 4
-static pudval_t xen_pud_val(pud_t pud)
+__visible pudval_t xen_pud_val(pud_t pud)
 {
 	return pte_mfn_to_pfn(pud.pud);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
 
-static pud_t xen_make_pud(pudval_t pud)
+__visible pud_t xen_make_pud(pudval_t pud)
 {
 	pud = pte_pfn_to_mfn(pud);
 
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index dd5f905..0982233 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -35,7 +35,7 @@
 extern const char xen_hypervisor_callback[];
 extern const char xen_failsafe_callback[];
 #ifdef CONFIG_X86_64
-extern const char nmi[];
+extern asmlinkage void nmi(void);
 #endif
 extern void xen_sysenter_target(void);
 extern void xen_syscall_target(void);
@@ -577,7 +577,7 @@
 void xen_enable_nmi(void)
 {
 #ifdef CONFIG_X86_64
-	if (register_callback(CALLBACKTYPE_nmi, nmi))
+	if (register_callback(CALLBACKTYPE_nmi, (char *)nmi))
 		BUG();
 #endif
 }
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 0e36cde..581521c 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -106,7 +106,7 @@
 static cpumask_t waiting_cpus;
 
 static bool xen_pvspin = true;
-static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
+__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
 {
 	int irq = __this_cpu_read(lock_kicker_irq);
 	struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting);
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 8c6e819..48eebac 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -103,18 +103,18 @@
 
 static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
 {
-	int i;
-	struct bio_vec *bvec;
-	sector_t sector = bio->bi_sector;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
+	sector_t sector = bio->bi_iter.bi_sector;
 
-	bio_for_each_segment(bvec, bio, i) {
-		char *buffer = __bio_kmap_atomic(bio, i);
-		unsigned len = bvec->bv_len >> SECTOR_SHIFT;
+	bio_for_each_segment(bvec, bio, iter) {
+		char *buffer = __bio_kmap_atomic(bio, iter);
+		unsigned len = bvec.bv_len >> SECTOR_SHIFT;
 
 		simdisk_transfer(dev, sector, len, buffer,
 				bio_data_dir(bio) == WRITE);
 		sector += len;
-		__bio_kunmap_atomic(bio);
+		__bio_kunmap_atomic(buffer);
 	}
 	return 0;
 }
diff --git a/block/blk-core.c b/block/blk-core.c
index 8bdd012..853f927 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -38,6 +38,7 @@
 
 #include "blk.h"
 #include "blk-cgroup.h"
+#include "blk-mq.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -130,7 +131,7 @@
 	bio_advance(bio, nbytes);
 
 	/* don't actually finish bio if it's part of flush sequence */
-	if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+	if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
 		bio_endio(bio, error);
 }
 
@@ -245,7 +246,16 @@
 void blk_sync_queue(struct request_queue *q)
 {
 	del_timer_sync(&q->timeout);
-	cancel_delayed_work_sync(&q->delay_work);
+
+	if (q->mq_ops) {
+		struct blk_mq_hw_ctx *hctx;
+		int i;
+
+		queue_for_each_hw_ctx(q, hctx, i)
+			cancel_delayed_work_sync(&hctx->delayed_work);
+	} else {
+		cancel_delayed_work_sync(&q->delay_work);
+	}
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -497,8 +507,13 @@
 	 * Drain all requests queued before DYING marking. Set DEAD flag to
 	 * prevent that q->request_fn() gets invoked after draining finished.
 	 */
-	spin_lock_irq(lock);
-	__blk_drain_queue(q, true);
+	if (q->mq_ops) {
+		blk_mq_drain_queue(q);
+		spin_lock_irq(lock);
+	} else {
+		spin_lock_irq(lock);
+		__blk_drain_queue(q, true);
+	}
 	queue_flag_set(QUEUE_FLAG_DEAD, q);
 	spin_unlock_irq(lock);
 
@@ -678,11 +693,20 @@
 	if (!uninit_q)
 		return NULL;
 
+	uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+	if (!uninit_q->flush_rq)
+		goto out_cleanup_queue;
+
 	q = blk_init_allocated_queue(uninit_q, rfn, lock);
 	if (!q)
-		blk_cleanup_queue(uninit_q);
-
+		goto out_free_flush_rq;
 	return q;
+
+out_free_flush_rq:
+	kfree(uninit_q->flush_rq);
+out_cleanup_queue:
+	blk_cleanup_queue(uninit_q);
+	return NULL;
 }
 EXPORT_SYMBOL(blk_init_queue_node);
 
@@ -1112,7 +1136,7 @@
 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 {
 	if (q->mq_ops)
-		return blk_mq_alloc_request(q, rw, gfp_mask, false);
+		return blk_mq_alloc_request(q, rw, gfp_mask);
 	else
 		return blk_old_get_request(q, rw, gfp_mask);
 }
@@ -1263,6 +1287,11 @@
 	if (unlikely(!q))
 		return;
 
+	if (q->mq_ops) {
+		blk_mq_free_request(req);
+		return;
+	}
+
 	blk_pm_put_request(req);
 
 	elv_completed_request(q, req);
@@ -1326,7 +1355,7 @@
 	bio->bi_io_vec->bv_offset = 0;
 	bio->bi_io_vec->bv_len = len;
 
-	bio->bi_size = len;
+	bio->bi_iter.bi_size = len;
 	bio->bi_vcnt = 1;
 	bio->bi_phys_segments = 1;
 
@@ -1351,7 +1380,7 @@
 
 	req->biotail->bi_next = bio;
 	req->biotail = bio;
-	req->__data_len += bio->bi_size;
+	req->__data_len += bio->bi_iter.bi_size;
 	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
 
 	blk_account_io_start(req, false);
@@ -1380,8 +1409,8 @@
 	 * not touch req->buffer either...
 	 */
 	req->buffer = bio_data(bio);
-	req->__sector = bio->bi_sector;
-	req->__data_len += bio->bi_size;
+	req->__sector = bio->bi_iter.bi_sector;
+	req->__data_len += bio->bi_iter.bi_size;
 	req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
 
 	blk_account_io_start(req, false);
@@ -1459,7 +1488,7 @@
 		req->cmd_flags |= REQ_FAILFAST_MASK;
 
 	req->errors = 0;
-	req->__sector = bio->bi_sector;
+	req->__sector = bio->bi_iter.bi_sector;
 	req->ioprio = bio_prio(bio);
 	blk_rq_bio_prep(req->q, req, bio);
 }
@@ -1583,12 +1612,12 @@
 	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
 		struct hd_struct *p = bdev->bd_part;
 
-		bio->bi_sector += p->start_sect;
+		bio->bi_iter.bi_sector += p->start_sect;
 		bio->bi_bdev = bdev->bd_contains;
 
 		trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
 				      bdev->bd_dev,
-				      bio->bi_sector - p->start_sect);
+				      bio->bi_iter.bi_sector - p->start_sect);
 	}
 }
 
@@ -1654,7 +1683,7 @@
 	/* Test device or partition size, when known. */
 	maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
 	if (maxsector) {
-		sector_t sector = bio->bi_sector;
+		sector_t sector = bio->bi_iter.bi_sector;
 
 		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
 			/*
@@ -1690,7 +1719,7 @@
 		       "generic_make_request: Trying to access "
 			"nonexistent block-device %s (%Lu)\n",
 			bdevname(bio->bi_bdev, b),
-			(long long) bio->bi_sector);
+			(long long) bio->bi_iter.bi_sector);
 		goto end_io;
 	}
 
@@ -1704,9 +1733,9 @@
 	}
 
 	part = bio->bi_bdev->bd_part;
-	if (should_fail_request(part, bio->bi_size) ||
+	if (should_fail_request(part, bio->bi_iter.bi_size) ||
 	    should_fail_request(&part_to_disk(part)->part0,
-				bio->bi_size))
+				bio->bi_iter.bi_size))
 		goto end_io;
 
 	/*
@@ -1865,7 +1894,7 @@
 		if (rw & WRITE) {
 			count_vm_events(PGPGOUT, count);
 		} else {
-			task_io_account_read(bio->bi_size);
+			task_io_account_read(bio->bi_iter.bi_size);
 			count_vm_events(PGPGIN, count);
 		}
 
@@ -1874,7 +1903,7 @@
 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
 			current->comm, task_pid_nr(current),
 				(rw & WRITE) ? "WRITE" : "READ",
-				(unsigned long long)bio->bi_sector,
+				(unsigned long long)bio->bi_iter.bi_sector,
 				bdevname(bio->bi_bdev, b),
 				count);
 		}
@@ -2007,7 +2036,7 @@
 	for (bio = rq->bio; bio; bio = bio->bi_next) {
 		if ((bio->bi_rw & ff) != ff)
 			break;
-		bytes += bio->bi_size;
+		bytes += bio->bi_iter.bi_size;
 	}
 
 	/* this could lead to infinite loop */
@@ -2378,9 +2407,9 @@
 	total_bytes = 0;
 	while (req->bio) {
 		struct bio *bio = req->bio;
-		unsigned bio_bytes = min(bio->bi_size, nr_bytes);
+		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
 
-		if (bio_bytes == bio->bi_size)
+		if (bio_bytes == bio->bi_iter.bi_size)
 			req->bio = bio->bi_next;
 
 		req_bio_endio(req, bio, bio_bytes, error);
@@ -2728,7 +2757,7 @@
 		rq->nr_phys_segments = bio_phys_segments(q, bio);
 		rq->buffer = bio_data(bio);
 	}
-	rq->__data_len = bio->bi_size;
+	rq->__data_len = bio->bi_iter.bi_size;
 	rq->bio = rq->biotail = bio;
 
 	if (bio->bi_bdev)
@@ -2746,10 +2775,10 @@
 void rq_flush_dcache_pages(struct request *rq)
 {
 	struct req_iterator iter;
-	struct bio_vec *bvec;
+	struct bio_vec bvec;
 
 	rq_for_each_segment(bvec, rq, iter)
-		flush_dcache_page(bvec->bv_page);
+		flush_dcache_page(bvec.bv_page);
 }
 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
 #endif
diff --git a/block/blk-exec.c b/block/blk-exec.c
index c3edf9d..c68613b 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -60,8 +60,12 @@
 	rq->rq_disk = bd_disk;
 	rq->end_io = done;
 
+	/*
+	 * don't check dying flag for MQ because the request won't
+	 * be resued after dying flag is set
+	 */
 	if (q->mq_ops) {
-		blk_mq_insert_request(q, rq, true);
+		blk_mq_insert_request(q, rq, at_head, true);
 		return;
 	}
 
diff --git a/block/blk-flush.c b/block/blk-flush.c
index fb6f3c0..66e2b69 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,20 +130,26 @@
 	blk_clear_rq_complete(rq);
 }
 
-static void mq_flush_data_run(struct work_struct *work)
+static void mq_flush_run(struct work_struct *work)
 {
 	struct request *rq;
 
-	rq = container_of(work, struct request, mq_flush_data);
+	rq = container_of(work, struct request, mq_flush_work);
 
 	memset(&rq->csd, 0, sizeof(rq->csd));
 	blk_mq_run_request(rq, true, false);
 }
 
-static void blk_mq_flush_data_insert(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq)
 {
-	INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
-	kblockd_schedule_work(rq->q, &rq->mq_flush_data);
+	if (rq->q->mq_ops) {
+		INIT_WORK(&rq->mq_flush_work, mq_flush_run);
+		kblockd_schedule_work(rq->q, &rq->mq_flush_work);
+		return false;
+	} else {
+		list_add_tail(&rq->queuelist, &rq->q->queue_head);
+		return true;
+	}
 }
 
 /**
@@ -187,12 +193,7 @@
 
 	case REQ_FSEQ_DATA:
 		list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
-		if (q->mq_ops)
-			blk_mq_flush_data_insert(rq);
-		else {
-			list_add(&rq->queuelist, &q->queue_head);
-			queued = true;
-		}
+		queued = blk_flush_queue_rq(rq);
 		break;
 
 	case REQ_FSEQ_DONE:
@@ -216,9 +217,6 @@
 	}
 
 	kicked = blk_kick_flush(q);
-	/* blk_mq_run_flush will run queue */
-	if (q->mq_ops)
-		return queued;
 	return kicked | queued;
 }
 
@@ -230,10 +228,9 @@
 	struct request *rq, *n;
 	unsigned long flags = 0;
 
-	if (q->mq_ops) {
-		blk_mq_free_request(flush_rq);
+	if (q->mq_ops)
 		spin_lock_irqsave(&q->mq_flush_lock, flags);
-	}
+
 	running = &q->flush_queue[q->flush_running_idx];
 	BUG_ON(q->flush_pending_idx == q->flush_running_idx);
 
@@ -263,49 +260,14 @@
 	 * kblockd.
 	 */
 	if (queued || q->flush_queue_delayed) {
-		if (!q->mq_ops)
-			blk_run_queue_async(q);
-		else
-		/*
-		 * This can be optimized to only run queues with requests
-		 * queued if necessary.
-		 */
-			blk_mq_run_queues(q, true);
+		WARN_ON(q->mq_ops);
+		blk_run_queue_async(q);
 	}
 	q->flush_queue_delayed = 0;
 	if (q->mq_ops)
 		spin_unlock_irqrestore(&q->mq_flush_lock, flags);
 }
 
-static void mq_flush_work(struct work_struct *work)
-{
-	struct request_queue *q;
-	struct request *rq;
-
-	q = container_of(work, struct request_queue, mq_flush_work);
-
-	/* We don't need set REQ_FLUSH_SEQ, it's for consistency */
-	rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
-		__GFP_WAIT|GFP_ATOMIC, true);
-	rq->cmd_type = REQ_TYPE_FS;
-	rq->end_io = flush_end_io;
-
-	blk_mq_run_request(rq, true, false);
-}
-
-/*
- * We can't directly use q->flush_rq, because it doesn't have tag and is not in
- * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
- * so offload the work to workqueue.
- *
- * Note: we assume a flush request finished in any hardware queue will flush
- * the whole disk cache.
- */
-static void mq_run_flush(struct request_queue *q)
-{
-	kblockd_schedule_work(q, &q->mq_flush_work);
-}
-
 /**
  * blk_kick_flush - consider issuing flush request
  * @q: request_queue being kicked
@@ -340,19 +302,31 @@
 	 * different from running_idx, which means flush is in flight.
 	 */
 	q->flush_pending_idx ^= 1;
+
 	if (q->mq_ops) {
-		mq_run_flush(q);
-		return true;
+		struct blk_mq_ctx *ctx = first_rq->mq_ctx;
+		struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+		blk_mq_rq_init(hctx, q->flush_rq);
+		q->flush_rq->mq_ctx = ctx;
+
+		/*
+		 * Reuse the tag value from the fist waiting request,
+		 * with blk-mq the tag is generated during request
+		 * allocation and drivers can rely on it being inside
+		 * the range they asked for.
+		 */
+		q->flush_rq->tag = first_rq->tag;
+	} else {
+		blk_rq_init(q, q->flush_rq);
 	}
 
-	blk_rq_init(q, &q->flush_rq);
-	q->flush_rq.cmd_type = REQ_TYPE_FS;
-	q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
-	q->flush_rq.rq_disk = first_rq->rq_disk;
-	q->flush_rq.end_io = flush_end_io;
+	q->flush_rq->cmd_type = REQ_TYPE_FS;
+	q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+	q->flush_rq->rq_disk = first_rq->rq_disk;
+	q->flush_rq->end_io = flush_end_io;
 
-	list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
-	return true;
+	return blk_flush_queue_rq(q->flush_rq);
 }
 
 static void flush_data_end_io(struct request *rq, int error)
@@ -548,7 +522,7 @@
 	 * copied from blk_rq_pos(rq).
 	 */
 	if (error_sector)
-		*error_sector = bio->bi_sector;
+		*error_sector = bio->bi_iter.bi_sector;
 
 	bio_put(bio);
 	return ret;
@@ -558,5 +532,4 @@
 void blk_mq_init_flush(struct request_queue *q)
 {
 	spin_lock_init(&q->mq_flush_lock);
-	INIT_WORK(&q->mq_flush_work, mq_flush_work);
 }
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 03cf717..7fbab84 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -43,30 +43,32 @@
  */
 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
 {
-	struct bio_vec *iv, *ivprv = NULL;
+	struct bio_vec iv, ivprv = { NULL };
 	unsigned int segments = 0;
 	unsigned int seg_size = 0;
-	unsigned int i = 0;
+	struct bvec_iter iter;
+	int prev = 0;
 
-	bio_for_each_integrity_vec(iv, bio, i) {
+	bio_for_each_integrity_vec(iv, bio, iter) {
 
-		if (ivprv) {
-			if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+		if (prev) {
+			if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
 				goto new_segment;
 
-			if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+			if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
 				goto new_segment;
 
-			if (seg_size + iv->bv_len > queue_max_segment_size(q))
+			if (seg_size + iv.bv_len > queue_max_segment_size(q))
 				goto new_segment;
 
-			seg_size += iv->bv_len;
+			seg_size += iv.bv_len;
 		} else {
 new_segment:
 			segments++;
-			seg_size = iv->bv_len;
+			seg_size = iv.bv_len;
 		}
 
+		prev = 1;
 		ivprv = iv;
 	}
 
@@ -87,24 +89,25 @@
 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
 			    struct scatterlist *sglist)
 {
-	struct bio_vec *iv, *ivprv = NULL;
+	struct bio_vec iv, ivprv = { NULL };
 	struct scatterlist *sg = NULL;
 	unsigned int segments = 0;
-	unsigned int i = 0;
+	struct bvec_iter iter;
+	int prev = 0;
 
-	bio_for_each_integrity_vec(iv, bio, i) {
+	bio_for_each_integrity_vec(iv, bio, iter) {
 
-		if (ivprv) {
-			if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+		if (prev) {
+			if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
 				goto new_segment;
 
-			if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+			if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
 				goto new_segment;
 
-			if (sg->length + iv->bv_len > queue_max_segment_size(q))
+			if (sg->length + iv.bv_len > queue_max_segment_size(q))
 				goto new_segment;
 
-			sg->length += iv->bv_len;
+			sg->length += iv.bv_len;
 		} else {
 new_segment:
 			if (!sg)
@@ -114,10 +117,11 @@
 				sg = sg_next(sg);
 			}
 
-			sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset);
+			sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
 			segments++;
 		}
 
+		prev = 1;
 		ivprv = iv;
 	}
 
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9b5b561..97a733c 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -108,17 +108,25 @@
 			req_sects = end_sect - sector;
 		}
 
-		bio->bi_sector = sector;
+		bio->bi_iter.bi_sector = sector;
 		bio->bi_end_io = bio_batch_end_io;
 		bio->bi_bdev = bdev;
 		bio->bi_private = &bb;
 
-		bio->bi_size = req_sects << 9;
+		bio->bi_iter.bi_size = req_sects << 9;
 		nr_sects -= req_sects;
 		sector = end_sect;
 
 		atomic_inc(&bb.done);
 		submit_bio(type, bio);
+
+		/*
+		 * We can loop for a long time in here, if someone does
+		 * full device discards (like mkfs). Be nice and allow
+		 * us to schedule out to avoid softlocking if preempt
+		 * is disabled.
+		 */
+		cond_resched();
 	}
 	blk_finish_plug(&plug);
 
@@ -174,7 +182,7 @@
 			break;
 		}
 
-		bio->bi_sector = sector;
+		bio->bi_iter.bi_sector = sector;
 		bio->bi_end_io = bio_batch_end_io;
 		bio->bi_bdev = bdev;
 		bio->bi_private = &bb;
@@ -184,11 +192,11 @@
 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
 
 		if (nr_sects > max_write_same_sectors) {
-			bio->bi_size = max_write_same_sectors << 9;
+			bio->bi_iter.bi_size = max_write_same_sectors << 9;
 			nr_sects -= max_write_same_sectors;
 			sector += max_write_same_sectors;
 		} else {
-			bio->bi_size = nr_sects << 9;
+			bio->bi_iter.bi_size = nr_sects << 9;
 			nr_sects = 0;
 		}
 
@@ -240,7 +248,7 @@
 			break;
 		}
 
-		bio->bi_sector = sector;
+		bio->bi_iter.bi_sector = sector;
 		bio->bi_bdev   = bdev;
 		bio->bi_end_io = bio_batch_end_io;
 		bio->bi_private = &bb;
diff --git a/block/blk-map.c b/block/blk-map.c
index 623e1cd..ae4ae10 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,7 +20,7 @@
 		rq->biotail->bi_next = bio;
 		rq->biotail = bio;
 
-		rq->__data_len += bio->bi_size;
+		rq->__data_len += bio->bi_iter.bi_size;
 	}
 	return 0;
 }
@@ -76,7 +76,7 @@
 
 	ret = blk_rq_append_bio(q, rq, bio);
 	if (!ret)
-		return bio->bi_size;
+		return bio->bi_iter.bi_size;
 
 	/* if it was boucned we must call the end io function */
 	bio_endio(bio, 0);
@@ -220,7 +220,7 @@
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
-	if (bio->bi_size != len) {
+	if (bio->bi_iter.bi_size != len) {
 		/*
 		 * Grab an extra reference to this bio, as bio_unmap_user()
 		 * expects to be able to drop it twice as it happens on the
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1ffc589..6c583f9 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,38 +12,47 @@
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 					     struct bio *bio)
 {
-	struct bio_vec *bv, *bvprv = NULL;
-	int cluster, i, high, highprv = 1;
+	struct bio_vec bv, bvprv = { NULL };
+	int cluster, high, highprv = 1;
 	unsigned int seg_size, nr_phys_segs;
 	struct bio *fbio, *bbio;
+	struct bvec_iter iter;
 
 	if (!bio)
 		return 0;
 
+	/*
+	 * This should probably be returning 0, but blk_add_request_payload()
+	 * (Christoph!!!!)
+	 */
+	if (bio->bi_rw & REQ_DISCARD)
+		return 1;
+
+	if (bio->bi_rw & REQ_WRITE_SAME)
+		return 1;
+
 	fbio = bio;
 	cluster = blk_queue_cluster(q);
 	seg_size = 0;
 	nr_phys_segs = 0;
 	for_each_bio(bio) {
-		bio_for_each_segment(bv, bio, i) {
+		bio_for_each_segment(bv, bio, iter) {
 			/*
 			 * the trick here is making sure that a high page is
 			 * never considered part of another segment, since that
 			 * might change with the bounce page.
 			 */
-			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
-			if (high || highprv)
-				goto new_segment;
-			if (cluster) {
-				if (seg_size + bv->bv_len
+			high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
+			if (!high && !highprv && cluster) {
+				if (seg_size + bv.bv_len
 				    > queue_max_segment_size(q))
 					goto new_segment;
-				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
 					goto new_segment;
-				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
 					goto new_segment;
 
-				seg_size += bv->bv_len;
+				seg_size += bv.bv_len;
 				bvprv = bv;
 				continue;
 			}
@@ -54,7 +63,7 @@
 
 			nr_phys_segs++;
 			bvprv = bv;
-			seg_size = bv->bv_len;
+			seg_size = bv.bv_len;
 			highprv = high;
 		}
 		bbio = bio;
@@ -87,6 +96,9 @@
 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
 				   struct bio *nxt)
 {
+	struct bio_vec end_bv = { NULL }, nxt_bv;
+	struct bvec_iter iter;
+
 	if (!blk_queue_cluster(q))
 		return 0;
 
@@ -97,34 +109,40 @@
 	if (!bio_has_data(bio))
 		return 1;
 
-	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+	bio_for_each_segment(end_bv, bio, iter)
+		if (end_bv.bv_len == iter.bi_size)
+			break;
+
+	nxt_bv = bio_iovec(nxt);
+
+	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
 		return 0;
 
 	/*
 	 * bio and nxt are contiguous in memory; check if the queue allows
 	 * these two to be merged into one
 	 */
-	if (BIO_SEG_BOUNDARY(q, bio, nxt))
+	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
 		return 1;
 
 	return 0;
 }
 
-static void
+static inline void
 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
-		     struct scatterlist *sglist, struct bio_vec **bvprv,
+		     struct scatterlist *sglist, struct bio_vec *bvprv,
 		     struct scatterlist **sg, int *nsegs, int *cluster)
 {
 
 	int nbytes = bvec->bv_len;
 
-	if (*bvprv && *cluster) {
+	if (*sg && *cluster) {
 		if ((*sg)->length + nbytes > queue_max_segment_size(q))
 			goto new_segment;
 
-		if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
+		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
 			goto new_segment;
-		if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
+		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
 			goto new_segment;
 
 		(*sg)->length += nbytes;
@@ -150,7 +168,49 @@
 		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
 		(*nsegs)++;
 	}
-	*bvprv = bvec;
+	*bvprv = *bvec;
+}
+
+static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
+			     struct scatterlist *sglist,
+			     struct scatterlist **sg)
+{
+	struct bio_vec bvec, bvprv = { NULL };
+	struct bvec_iter iter;
+	int nsegs, cluster;
+
+	nsegs = 0;
+	cluster = blk_queue_cluster(q);
+
+	if (bio->bi_rw & REQ_DISCARD) {
+		/*
+		 * This is a hack - drivers should be neither modifying the
+		 * biovec, nor relying on bi_vcnt - but because of
+		 * blk_add_request_payload(), a discard bio may or may not have
+		 * a payload we need to set up here (thank you Christoph) and
+		 * bi_vcnt is really the only way of telling if we need to.
+		 */
+
+		if (bio->bi_vcnt)
+			goto single_segment;
+
+		return 0;
+	}
+
+	if (bio->bi_rw & REQ_WRITE_SAME) {
+single_segment:
+		*sg = sglist;
+		bvec = bio_iovec(bio);
+		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
+		return 1;
+	}
+
+	for_each_bio(bio)
+		bio_for_each_segment(bvec, bio, iter)
+			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
+					     &nsegs, &cluster);
+
+	return nsegs;
 }
 
 /*
@@ -160,24 +220,11 @@
 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
 		  struct scatterlist *sglist)
 {
-	struct bio_vec *bvec, *bvprv;
-	struct req_iterator iter;
-	struct scatterlist *sg;
-	int nsegs, cluster;
+	struct scatterlist *sg = NULL;
+	int nsegs = 0;
 
-	nsegs = 0;
-	cluster = blk_queue_cluster(q);
-
-	/*
-	 * for each bio in rq
-	 */
-	bvprv = NULL;
-	sg = NULL;
-	rq_for_each_segment(bvec, rq, iter) {
-		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
-				     &nsegs, &cluster);
-	} /* segments in rq */
-
+	if (rq->bio)
+		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
 
 	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
@@ -223,21 +270,13 @@
 int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
 		   struct scatterlist *sglist)
 {
-	struct bio_vec *bvec, *bvprv;
-	struct scatterlist *sg;
-	int nsegs, cluster;
-	unsigned long i;
+	struct scatterlist *sg = NULL;
+	int nsegs;
+	struct bio *next = bio->bi_next;
+	bio->bi_next = NULL;
 
-	nsegs = 0;
-	cluster = blk_queue_cluster(q);
-
-	bvprv = NULL;
-	sg = NULL;
-	bio_for_each_segment(bvec, bio, i) {
-		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
-				     &nsegs, &cluster);
-	} /* segments in bio */
-
+	nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
+	bio->bi_next = next;
 	if (sg)
 		sg_mark_end(sg);
 
@@ -543,9 +582,9 @@
 
 int blk_try_merge(struct request *rq, struct bio *bio)
 {
-	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
 		return ELEVATOR_BACK_MERGE;
-	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
 		return ELEVATOR_FRONT_MERGE;
 	return ELEVATOR_NO_MERGE;
 }
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 0045ace..3146bef 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -28,36 +28,6 @@
 	return NOTIFY_OK;
 }
 
-static void blk_mq_cpu_notify(void *data, unsigned long action,
-			      unsigned int cpu)
-{
-	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-		/*
-		 * If the CPU goes away, ensure that we run any pending
-		 * completions.
-		 */
-		struct llist_node *node;
-		struct request *rq;
-
-		local_irq_disable();
-
-		node = llist_del_all(&per_cpu(ipi_lists, cpu));
-		while (node) {
-			struct llist_node *next = node->next;
-
-			rq = llist_entry(node, struct request, ll_list);
-			__blk_mq_end_io(rq, rq->errors);
-			node = next;
-		}
-
-		local_irq_enable();
-	}
-}
-
-static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = {
-	.notifier_call	= blk_mq_main_cpu_notify,
-};
-
 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
 {
 	BUG_ON(!notifier->notify);
@@ -82,12 +52,7 @@
 	notifier->data = data;
 }
 
-static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = {
-	.notify = blk_mq_cpu_notify,
-};
-
 void __init blk_mq_cpu_init(void)
 {
-	register_hotcpu_notifier(&blk_mq_main_cpu_notifier);
-	blk_mq_register_cpu_notifier(&cpu_notifier);
+	hotcpu_notifier(blk_mq_main_cpu_notify, 0);
 }
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index d64a02f..83ae96c 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -36,7 +36,8 @@
 {
 	int tag;
 
-	tag = percpu_ida_alloc(&tags->free_tags, gfp);
+	tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ?
+			       TASK_UNINTERRUPTIBLE : TASK_RUNNING);
 	if (tag < 0)
 		return BLK_MQ_TAG_FAIL;
 	return tag + tags->nr_reserved_tags;
@@ -52,7 +53,8 @@
 		return BLK_MQ_TAG_FAIL;
 	}
 
-	tag = percpu_ida_alloc(&tags->reserved_tags, gfp);
+	tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ?
+			       TASK_UNINTERRUPTIBLE : TASK_RUNNING);
 	if (tag < 0)
 		return BLK_MQ_TAG_FAIL;
 	return tag;
@@ -182,7 +184,7 @@
 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
 {
 	char *orig_page = page;
-	int cpu;
+	unsigned int cpu;
 
 	if (!tags)
 		return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c79126e..1fa9dd1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -27,8 +27,6 @@
 
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
 
-DEFINE_PER_CPU(struct llist_head, ipi_lists);
-
 static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
 					   unsigned int cpu)
 {
@@ -106,10 +104,13 @@
 
 	spin_lock_irq(q->queue_lock);
 	ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
-		!blk_queue_bypass(q), *q->queue_lock);
+		!blk_queue_bypass(q) || blk_queue_dying(q),
+		*q->queue_lock);
 	/* inc usage with lock hold to avoid freeze_queue runs here */
-	if (!ret)
+	if (!ret && !blk_queue_dying(q))
 		__percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+	else if (blk_queue_dying(q))
+		ret = -ENODEV;
 	spin_unlock_irq(q->queue_lock);
 
 	return ret;
@@ -120,6 +121,22 @@
 	__percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
 }
 
+static void __blk_mq_drain_queue(struct request_queue *q)
+{
+	while (true) {
+		s64 count;
+
+		spin_lock_irq(q->queue_lock);
+		count = percpu_counter_sum(&q->mq_usage_counter);
+		spin_unlock_irq(q->queue_lock);
+
+		if (count == 0)
+			break;
+		blk_mq_run_queues(q, false);
+		msleep(10);
+	}
+}
+
 /*
  * Guarantee no request is in use, so we can change any data structure of
  * the queue afterward.
@@ -133,21 +150,13 @@
 	queue_flag_set(QUEUE_FLAG_BYPASS, q);
 	spin_unlock_irq(q->queue_lock);
 
-	if (!drain)
-		return;
+	if (drain)
+		__blk_mq_drain_queue(q);
+}
 
-	while (true) {
-		s64 count;
-
-		spin_lock_irq(q->queue_lock);
-		count = percpu_counter_sum(&q->mq_usage_counter);
-		spin_unlock_irq(q->queue_lock);
-
-		if (count == 0)
-			break;
-		blk_mq_run_queues(q, false);
-		msleep(10);
-	}
+void blk_mq_drain_queue(struct request_queue *q)
+{
+	__blk_mq_drain_queue(q);
 }
 
 static void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -179,6 +188,8 @@
 
 	rq->mq_ctx = ctx;
 	rq->cmd_flags = rw_flags;
+	rq->start_time = jiffies;
+	set_start_time_ns(rq);
 	ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
 }
 
@@ -215,15 +226,14 @@
 	return rq;
 }
 
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
-		gfp_t gfp, bool reserved)
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
 {
 	struct request *rq;
 
 	if (blk_mq_queue_enter(q))
 		return NULL;
 
-	rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
+	rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
 	if (rq)
 		blk_mq_put_ctx(rq->mq_ctx);
 	return rq;
@@ -247,7 +257,7 @@
 /*
  * Re-init and set pdu, if we have it
  */
-static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
 {
 	blk_rq_init(hctx->queue, rq);
 
@@ -294,7 +304,7 @@
 		bio_endio(bio, error);
 }
 
-void blk_mq_complete_request(struct request *rq, int error)
+void blk_mq_end_io(struct request *rq, int error)
 {
 	struct bio *bio = rq->bio;
 	unsigned int bytes = 0;
@@ -305,7 +315,7 @@
 		struct bio *next = bio->bi_next;
 
 		bio->bi_next = NULL;
-		bytes += bio->bi_size;
+		bytes += bio->bi_iter.bi_size;
 		blk_mq_bio_endio(rq, bio, error);
 		bio = next;
 	}
@@ -319,87 +329,55 @@
 	else
 		blk_mq_free_request(rq);
 }
+EXPORT_SYMBOL(blk_mq_end_io);
 
-void __blk_mq_end_io(struct request *rq, int error)
+static void __blk_mq_complete_request_remote(void *data)
 {
-	if (!blk_mark_rq_complete(rq))
-		blk_mq_complete_request(rq, error);
+	struct request *rq = data;
+
+	rq->q->softirq_done_fn(rq);
 }
 
-#if defined(CONFIG_SMP)
-
-/*
- * Called with interrupts disabled.
- */
-static void ipi_end_io(void *data)
-{
-	struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id());
-	struct llist_node *entry, *next;
-	struct request *rq;
-
-	entry = llist_del_all(list);
-
-	while (entry) {
-		next = entry->next;
-		rq = llist_entry(entry, struct request, ll_list);
-		__blk_mq_end_io(rq, rq->errors);
-		entry = next;
-	}
-}
-
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
-			  struct request *rq, const int error)
-{
-	struct call_single_data *data = &rq->csd;
-
-	rq->errors = error;
-	rq->ll_list.next = NULL;
-
-	/*
-	 * If the list is non-empty, an existing IPI must already
-	 * be "in flight". If that is the case, we need not schedule
-	 * a new one.
-	 */
-	if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
-		data->func = ipi_end_io;
-		data->flags = 0;
-		__smp_call_function_single(ctx->cpu, data, 0);
-	}
-
-	return true;
-}
-#else /* CONFIG_SMP */
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
-			  struct request *rq, const int error)
-{
-	return false;
-}
-#endif
-
-/*
- * End IO on this request on a multiqueue enabled driver. We'll either do
- * it directly inline, or punt to a local IPI handler on the matching
- * remote CPU.
- */
-void blk_mq_end_io(struct request *rq, int error)
+void __blk_mq_complete_request(struct request *rq)
 {
 	struct blk_mq_ctx *ctx = rq->mq_ctx;
 	int cpu;
 
-	if (!ctx->ipi_redirect)
-		return __blk_mq_end_io(rq, error);
+	if (!ctx->ipi_redirect) {
+		rq->q->softirq_done_fn(rq);
+		return;
+	}
 
 	cpu = get_cpu();
-
-	if (cpu == ctx->cpu || !cpu_online(ctx->cpu) ||
-	    !ipi_remote_cpu(ctx, cpu, rq, error))
-		__blk_mq_end_io(rq, error);
-
+	if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
+		rq->csd.func = __blk_mq_complete_request_remote;
+		rq->csd.info = rq;
+		rq->csd.flags = 0;
+		__smp_call_function_single(ctx->cpu, &rq->csd, 0);
+	} else {
+		rq->q->softirq_done_fn(rq);
+	}
 	put_cpu();
 }
-EXPORT_SYMBOL(blk_mq_end_io);
 
-static void blk_mq_start_request(struct request *rq)
+/**
+ * blk_mq_complete_request - end I/O on a request
+ * @rq:		the request being processed
+ *
+ * Description:
+ *	Ends all I/O on a request. It does not handle partial completions.
+ *	The actual completion happens out-of-order, through a IPI handler.
+ **/
+void blk_mq_complete_request(struct request *rq)
+{
+	if (unlikely(blk_should_fake_timeout(rq->q)))
+		return;
+	if (!blk_mark_rq_complete(rq))
+		__blk_mq_complete_request(rq);
+}
+EXPORT_SYMBOL(blk_mq_complete_request);
+
+static void blk_mq_start_request(struct request *rq, bool last)
 {
 	struct request_queue *q = rq->q;
 
@@ -412,6 +390,25 @@
 	 */
 	rq->deadline = jiffies + q->rq_timeout;
 	set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+	if (q->dma_drain_size && blk_rq_bytes(rq)) {
+		/*
+		 * Make sure space for the drain appears.  We know we can do
+		 * this because max_hw_segments has been adjusted to be one
+		 * fewer than the device can handle.
+		 */
+		rq->nr_phys_segments++;
+	}
+
+	/*
+	 * Flag the last request in the series so that drivers know when IO
+	 * should be kicked off, if they don't do it on a per-request basis.
+	 *
+	 * Note: the flag isn't the only condition drivers should do kick off.
+	 * If drive is busy, the last request might not have the bit set.
+	 */
+	if (last)
+		rq->cmd_flags |= REQ_END;
 }
 
 static void blk_mq_requeue_request(struct request *rq)
@@ -420,6 +417,11 @@
 
 	trace_block_rq_requeue(q, rq);
 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+	rq->cmd_flags &= ~REQ_END;
+
+	if (q->dma_drain_size && blk_rq_bytes(rq))
+		rq->nr_phys_segments--;
 }
 
 struct blk_mq_timeout_data {
@@ -587,19 +589,8 @@
 
 		rq = list_first_entry(&rq_list, struct request, queuelist);
 		list_del_init(&rq->queuelist);
-		blk_mq_start_request(rq);
 
-		/*
-		 * Last request in the series. Flag it as such, this
-		 * enables drivers to know when IO should be kicked off,
-		 * if they don't do it on a per-request basis.
-		 *
-		 * Note: the flag isn't the only condition drivers
-		 * should do kick off. If drive is busy, the last
-		 * request might not have the bit set.
-		 */
-		if (list_empty(&rq_list))
-			rq->cmd_flags |= REQ_END;
+		blk_mq_start_request(rq, list_empty(&rq_list));
 
 		ret = q->mq_ops->queue_rq(hctx, rq);
 		switch (ret) {
@@ -617,8 +608,8 @@
 			break;
 		default:
 			pr_err("blk-mq: bad return on queue: %d\n", ret);
-			rq->errors = -EIO;
 		case BLK_MQ_RQ_QUEUE_ERROR:
+			rq->errors = -EIO;
 			blk_mq_end_io(rq, rq->errors);
 			break;
 		}
@@ -721,13 +712,16 @@
 }
 
 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
-				    struct request *rq)
+				    struct request *rq, bool at_head)
 {
 	struct blk_mq_ctx *ctx = rq->mq_ctx;
 
 	trace_block_rq_insert(hctx->queue, rq);
 
-	list_add_tail(&rq->queuelist, &ctx->rq_list);
+	if (at_head)
+		list_add(&rq->queuelist, &ctx->rq_list);
+	else
+		list_add_tail(&rq->queuelist, &ctx->rq_list);
 	blk_mq_hctx_mark_pending(hctx, ctx);
 
 	/*
@@ -737,7 +731,7 @@
 }
 
 void blk_mq_insert_request(struct request_queue *q, struct request *rq,
-			   bool run_queue)
+			   bool at_head, bool run_queue)
 {
 	struct blk_mq_hw_ctx *hctx;
 	struct blk_mq_ctx *ctx, *current_ctx;
@@ -756,7 +750,7 @@
 			rq->mq_ctx = ctx;
 		}
 		spin_lock(&ctx->lock);
-		__blk_mq_insert_request(hctx, rq);
+		__blk_mq_insert_request(hctx, rq, at_head);
 		spin_unlock(&ctx->lock);
 
 		blk_mq_put_ctx(current_ctx);
@@ -788,7 +782,7 @@
 
 	/* ctx->cpu might be offline */
 	spin_lock(&ctx->lock);
-	__blk_mq_insert_request(hctx, rq);
+	__blk_mq_insert_request(hctx, rq, false);
 	spin_unlock(&ctx->lock);
 
 	blk_mq_put_ctx(current_ctx);
@@ -826,7 +820,7 @@
 		rq = list_first_entry(list, struct request, queuelist);
 		list_del_init(&rq->queuelist);
 		rq->mq_ctx = ctx;
-		__blk_mq_insert_request(hctx, rq);
+		__blk_mq_insert_request(hctx, rq, false);
 	}
 	spin_unlock(&ctx->lock);
 
@@ -916,6 +910,11 @@
 
 	blk_queue_bounce(q, &bio);
 
+	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+		bio_endio(bio, -EIO);
+		return;
+	}
+
 	if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
 		return;
 
@@ -978,7 +977,7 @@
 		__blk_mq_free_request(hctx, ctx, rq);
 	else {
 		blk_mq_bio_to_request(rq, bio);
-		__blk_mq_insert_request(hctx, rq);
+		__blk_mq_insert_request(hctx, rq, false);
 	}
 
 	spin_unlock(&ctx->lock);
@@ -1091,8 +1090,8 @@
 	struct page *page;
 
 	while (!list_empty(&hctx->page_list)) {
-		page = list_first_entry(&hctx->page_list, struct page, list);
-		list_del_init(&page->list);
+		page = list_first_entry(&hctx->page_list, struct page, lru);
+		list_del_init(&page->lru);
 		__free_pages(page, page->private);
 	}
 
@@ -1156,7 +1155,7 @@
 			break;
 
 		page->private = this_order;
-		list_add_tail(&page->list, &hctx->page_list);
+		list_add_tail(&page->lru, &hctx->page_list);
 
 		p = page_address(page);
 		entries_per_page = order_to_size(this_order) / rq_size;
@@ -1337,15 +1336,6 @@
 		reg->queue_depth = BLK_MQ_MAX_DEPTH;
 	}
 
-	/*
-	 * Set aside a tag for flush requests.  It will only be used while
-	 * another flush request is in progress but outside the driver.
-	 *
-	 * TODO: only allocate if flushes are supported
-	 */
-	reg->queue_depth++;
-	reg->reserved_tags++;
-
 	if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
 		return ERR_PTR(-EINVAL);
 
@@ -1388,17 +1378,27 @@
 	q->mq_ops = reg->ops;
 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
 
+	q->sg_reserved_size = INT_MAX;
+
 	blk_queue_make_request(q, blk_mq_make_request);
 	blk_queue_rq_timed_out(q, reg->ops->timeout);
 	if (reg->timeout)
 		blk_queue_rq_timeout(q, reg->timeout);
 
+	if (reg->ops->complete)
+		blk_queue_softirq_done(q, reg->ops->complete);
+
 	blk_mq_init_flush(q);
 	blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
 
-	if (blk_mq_init_hw_queues(q, reg, driver_data))
+	q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
+				cache_line_size()), GFP_KERNEL);
+	if (!q->flush_rq)
 		goto err_hw;
 
+	if (blk_mq_init_hw_queues(q, reg, driver_data))
+		goto err_flush_rq;
+
 	blk_mq_map_swqueue(q);
 
 	mutex_lock(&all_q_mutex);
@@ -1406,6 +1406,9 @@
 	mutex_unlock(&all_q_mutex);
 
 	return q;
+
+err_flush_rq:
+	kfree(q->flush_rq);
 err_hw:
 	kfree(q->mq_map);
 err_map:
@@ -1429,7 +1432,6 @@
 	int i;
 
 	queue_for_each_hw_ctx(q, hctx, i) {
-		cancel_delayed_work_sync(&hctx->delayed_work);
 		kfree(hctx->ctx_map);
 		kfree(hctx->ctxs);
 		blk_mq_free_rq_map(hctx);
@@ -1451,7 +1453,6 @@
 	list_del_init(&q->all_q_node);
 	mutex_unlock(&all_q_mutex);
 }
-EXPORT_SYMBOL(blk_mq_free_queue);
 
 /* Basically redo blk_mq_init_queue with queue frozen */
 static void blk_mq_queue_reinit(struct request_queue *q)
@@ -1495,11 +1496,6 @@
 
 static int __init blk_mq_init(void)
 {
-	unsigned int i;
-
-	for_each_possible_cpu(i)
-		init_llist_head(&per_cpu(ipi_lists, i));
-
 	blk_mq_cpu_init();
 
 	/* Must be called after percpu_counter_hotcpu_callback() */
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 52bf1f9..ed0035c 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -22,11 +22,13 @@
 	struct kobject		kobj;
 };
 
-void __blk_mq_end_io(struct request *rq, int error);
-void blk_mq_complete_request(struct request *rq, int error);
+void __blk_mq_complete_request(struct request *rq);
 void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_init_flush(struct request_queue *q);
+void blk_mq_drain_queue(struct request_queue *q);
+void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
 
 /*
  * CPU hotplug helpers
@@ -38,7 +40,6 @@
 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
 void blk_mq_cpu_init(void);
-DECLARE_PER_CPU(struct llist_head, ipi_lists);
 
 /*
  * CPU -> queue mappings
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 05e8267..5d21239 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -592,6 +592,10 @@
 		ret = -1;
 	}
 
+	t->raid_partial_stripes_expensive =
+		max(t->raid_partial_stripes_expensive,
+		    b->raid_partial_stripes_expensive);
+
 	/* Find lowest common alignment_offset */
 	t->alignment_offset = lcm(t->alignment_offset, alignment)
 		& (max(t->physical_block_size, t->io_min) - 1);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 9777952..7500f87 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -11,6 +11,7 @@
 
 #include "blk.h"
 #include "blk-cgroup.h"
+#include "blk-mq.h"
 
 struct queue_sysfs_entry {
 	struct attribute attr;
@@ -548,6 +549,8 @@
 	if (q->mq_ops)
 		blk_mq_free_queue(q);
 
+	kfree(q->flush_rq);
+
 	blk_trace_shutdown(q);
 
 	bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a760857..1474c3a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -877,14 +877,14 @@
 	do_div(tmp, HZ);
 	bytes_allowed = tmp;
 
-	if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
+	if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
 		if (wait)
 			*wait = 0;
 		return 1;
 	}
 
 	/* Calc approx time to dispatch */
-	extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
+	extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
 	jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
 
 	if (!jiffy_wait)
@@ -987,7 +987,7 @@
 	bool rw = bio_data_dir(bio);
 
 	/* Charge the bio to the group */
-	tg->bytes_disp[rw] += bio->bi_size;
+	tg->bytes_disp[rw] += bio->bi_iter.bi_size;
 	tg->io_disp[rw]++;
 
 	/*
@@ -1003,8 +1003,8 @@
 	 */
 	if (!(bio->bi_rw & REQ_THROTTLED)) {
 		bio->bi_rw |= REQ_THROTTLED;
-		throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size,
-					     bio->bi_rw);
+		throtl_update_dispatch_stats(tg_to_blkg(tg),
+					     bio->bi_iter.bi_size, bio->bi_rw);
 	}
 }
 
@@ -1503,7 +1503,7 @@
 	if (tg) {
 		if (!tg->has_rules[rw]) {
 			throtl_update_dispatch_stats(tg_to_blkg(tg),
-						     bio->bi_size, bio->bi_rw);
+					bio->bi_iter.bi_size, bio->bi_rw);
 			goto out_unlock_rcu;
 		}
 	}
@@ -1559,7 +1559,7 @@
 	/* out-of-limit, queue to @tg */
 	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
 		   rw == READ ? 'R' : 'W',
-		   tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
+		   tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
 		   tg->io_disp[rw], tg->iops[rw],
 		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
 
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index bba81c9..d96f7061 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -91,7 +91,7 @@
 	case BLK_EH_HANDLED:
 		/* Can we use req->errors here? */
 		if (q->mq_ops)
-			blk_mq_complete_request(req, req->errors);
+			__blk_mq_complete_request(req);
 		else
 			__blk_complete_request(req);
 		break;
diff --git a/block/blk.h b/block/blk.h
index c90e1d8..d23b415 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -113,7 +113,7 @@
 			q->flush_queue_delayed = 1;
 			return NULL;
 		}
-		if (unlikely(blk_queue_dying(q)) ||
+		if (unlikely(blk_queue_bypass(q)) ||
 		    !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
 			return NULL;
 	}
diff --git a/block/cmdline-parser.c b/block/cmdline-parser.c
index cc2637f..9dbc67e 100644
--- a/block/cmdline-parser.c
+++ b/block/cmdline-parser.c
@@ -4,8 +4,7 @@
  * Written by Cai Zhiyong <caizhiyong@huawei.com>
  *
  */
-#include <linux/buffer_head.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/cmdline-parser.h>
 
 static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
@@ -159,6 +158,7 @@
 		*parts = next_parts;
 	}
 }
+EXPORT_SYMBOL(cmdline_parts_free);
 
 int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
 {
@@ -206,6 +206,7 @@
 	cmdline_parts_free(parts);
 	goto done;
 }
+EXPORT_SYMBOL(cmdline_parts_parse);
 
 struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
 					 const char *bdev)
@@ -214,17 +215,17 @@
 		parts = parts->next_parts;
 	return parts;
 }
+EXPORT_SYMBOL(cmdline_parts_find);
 
 /*
  *  add_part()
  *    0 success.
  *    1 can not add so many partitions.
  */
-void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-		       int slot,
-		       int (*add_part)(int, struct cmdline_subpart *, void *),
-		       void *param)
-
+int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+		      int slot,
+		      int (*add_part)(int, struct cmdline_subpart *, void *),
+		      void *param)
 {
 	sector_t from = 0;
 	struct cmdline_subpart *subpart;
@@ -247,4 +248,7 @@
 		if (add_part(slot, subpart, param))
 			break;
 	}
+
+	return slot;
 }
+EXPORT_SYMBOL(cmdline_parts_set);
diff --git a/block/elevator.c b/block/elevator.c
index b7ff286..42c45a7 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -440,7 +440,7 @@
 	/*
 	 * See if our hash lookup can find a potential backmerge.
 	 */
-	__rq = elv_rqhash_find(q, bio->bi_sector);
+	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
 	if (__rq && elv_rq_merge_ok(__rq, bio)) {
 		*req = __rq;
 		return ELEVATOR_BACK_MERGE;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 625e3e4..2648797 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -323,12 +323,14 @@
 
 	if (hdr->iovec_count) {
 		size_t iov_data_len;
-		struct iovec *iov;
+		struct iovec *iov = NULL;
 
 		ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
 					    0, NULL, &iov);
-		if (ret < 0)
+		if (ret < 0) {
+			kfree(iov);
 			goto out;
+		}
 
 		iov_data_len = ret;
 		ret = 0;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index c9311be..c29c2c3 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -261,7 +261,7 @@
 
 	apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
 	if (apic_id < 0) {
-		acpi_handle_err(pr->handle, "failed to get CPU APIC ID.\n");
+		acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
 		return -ENODEV;
 	}
 	pr->apic_id = apic_id;
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 24db8e1..4ed1aa3 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -108,7 +108,7 @@
 /*
  * Optionally enable output from the AML Debug Object.
  */
-bool ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
 
 /*
  * Optionally copy the entire DSDT to local memory (instead of simply
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 470e754..018a428 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -549,7 +549,7 @@
 {
 	unsigned long x;
 	struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
-	if (sscanf(buf, "%ld\n", &x) == 1)
+	if (sscanf(buf, "%lu\n", &x) == 1)
 		battery->alarm = x/1000;
 	if (acpi_battery_present(battery))
 		acpi_battery_set_alarm(battery);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 384da5a..fcb59c2 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -33,6 +33,7 @@
 #include <linux/proc_fs.h>
 #include <linux/acpi.h>
 #include <linux/slab.h>
+#include <linux/regulator/machine.h>
 #ifdef CONFIG_X86
 #include <asm/mpspec.h>
 #endif
@@ -509,6 +510,14 @@
 		goto error0;
 	}
 
+	/*
+	 * If the system is using ACPI then we can be reasonably
+	 * confident that any regulators are managed by the firmware
+	 * so tell the regulator core it has everything it needs to
+	 * know.
+	 */
+	regulator_has_full_constraints();
+
 	return;
 
       error0:
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 0b6ae6e..368f9dd 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -79,9 +79,10 @@
 	ACPI_COMPANION_SET(dev, adev);
 	dev->release = acpi_container_release;
 	ret = device_register(dev);
-	if (ret)
+	if (ret) {
+		put_device(dev);
 		return ret;
-
+	}
 	adev->driver_data = dev;
 	return 1;
 }
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index d49f1e4..c14a00d 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -727,18 +727,6 @@
 #endif /* CONFIG_PM_SLEEP */
 
 /**
- * acpi_dev_pm_get_node - Get ACPI device node for the given physical device.
- * @dev: Device to get the ACPI node for.
- */
-struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
-{
-	acpi_handle handle = ACPI_HANDLE(dev);
-	struct acpi_device *adev;
-
-	return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL;
-}
-
-/**
  * acpi_dev_pm_low_power - Put ACPI device into a low-power state.
  * @dev: Device to put into a low-power state.
  * @adev: ACPI device node corresponding to @dev.
@@ -778,7 +766,7 @@
  */
 int acpi_dev_runtime_suspend(struct device *dev)
 {
-	struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+	struct acpi_device *adev = ACPI_COMPANION(dev);
 	bool remote_wakeup;
 	int error;
 
@@ -809,7 +797,7 @@
  */
 int acpi_dev_runtime_resume(struct device *dev)
 {
-	struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+	struct acpi_device *adev = ACPI_COMPANION(dev);
 	int error;
 
 	if (!adev)
@@ -862,7 +850,7 @@
  */
 int acpi_dev_suspend_late(struct device *dev)
 {
-	struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+	struct acpi_device *adev = ACPI_COMPANION(dev);
 	u32 target_state;
 	bool wakeup;
 	int error;
@@ -894,7 +882,7 @@
  */
 int acpi_dev_resume_early(struct device *dev)
 {
-	struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+	struct acpi_device *adev = ACPI_COMPANION(dev);
 	int error;
 
 	if (!adev)
@@ -985,7 +973,7 @@
  */
 int acpi_dev_pm_attach(struct device *dev, bool power_on)
 {
-	struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+	struct acpi_device *adev = ACPI_COMPANION(dev);
 
 	if (!adev)
 		return -ENODEV;
@@ -1017,7 +1005,7 @@
  */
 void acpi_dev_pm_detach(struct device *dev, bool power_off)
 {
-	struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+	struct acpi_device *adev = ACPI_COMPANION(dev);
 
 	if (adev && dev->pm_domain == &acpi_general_pm_domain) {
 		dev->pm_domain = NULL;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index c431c88..e9b3081 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -609,7 +609,7 @@
 static void dock_notify(struct dock_station *ds, u32 event)
 {
 	acpi_handle handle = ds->handle;
-	struct acpi_device *ad;
+	struct acpi_device *adev = NULL;
 	int surprise_removal = 0;
 
 	/*
@@ -632,7 +632,8 @@
 	switch (event) {
 	case ACPI_NOTIFY_BUS_CHECK:
 	case ACPI_NOTIFY_DEVICE_CHECK:
-		if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) {
+		acpi_bus_get_device(handle, &adev);
+		if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
 			begin_dock(ds);
 			dock(ds);
 			if (!dock_present(ds)) {
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 50fe34f..75c28ea 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -60,7 +60,7 @@
 				seq_printf(seq, "%c%-8s  %s:%s\n",
 					dev->wakeup.flags.run_wake ? '*' : ' ',
 					(device_may_wakeup(&dev->dev) ||
-					(ldev && device_may_wakeup(ldev))) ?
+					device_may_wakeup(ldev)) ?
 					"enabled" : "disabled",
 					ldev->bus ? ldev->bus->name :
 					"no-bus", dev_name(ldev));
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 34e7b3c..a4eea9a 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -44,13 +44,13 @@
 		(struct acpi_madt_local_apic *)entry;
 
 	if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
-		return 0;
+		return -ENODEV;
 
 	if (lapic->processor_id != acpi_id)
-		return 0;
+		return -EINVAL;
 
 	*apic_id = lapic->id;
-	return 1;
+	return 0;
 }
 
 static int map_x2apic_id(struct acpi_subtable_header *entry,
@@ -60,14 +60,14 @@
 		(struct acpi_madt_local_x2apic *)entry;
 
 	if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
-		return 0;
+		return -ENODEV;
 
 	if (device_declaration && (apic->uid == acpi_id)) {
 		*apic_id = apic->local_apic_id;
-		return 1;
+		return 0;
 	}
 
-	return 0;
+	return -EINVAL;
 }
 
 static int map_lsapic_id(struct acpi_subtable_header *entry,
@@ -77,16 +77,16 @@
 		(struct acpi_madt_local_sapic *)entry;
 
 	if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
-		return 0;
+		return -ENODEV;
 
 	if (device_declaration) {
 		if ((entry->length < 16) || (lsapic->uid != acpi_id))
-			return 0;
+			return -EINVAL;
 	} else if (lsapic->processor_id != acpi_id)
-		return 0;
+		return -EINVAL;
 
 	*apic_id = (lsapic->id << 8) | lsapic->eid;
-	return 1;
+	return 0;
 }
 
 static int map_madt_entry(int type, u32 acpi_id)
@@ -116,13 +116,13 @@
 		struct acpi_subtable_header *header =
 			(struct acpi_subtable_header *)entry;
 		if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
-			if (map_lapic_id(header, acpi_id, &apic_id))
+			if (!map_lapic_id(header, acpi_id, &apic_id))
 				break;
 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
-			if (map_x2apic_id(header, type, acpi_id, &apic_id))
+			if (!map_x2apic_id(header, type, acpi_id, &apic_id))
 				break;
 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
-			if (map_lsapic_id(header, type, acpi_id, &apic_id))
+			if (!map_lsapic_id(header, type, acpi_id, &apic_id))
 				break;
 		}
 		entry += header->length;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e00365c..57b053f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -484,7 +484,6 @@
 static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
 {
 	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
-	struct acpi_scan_handler *handler = data;
 	struct acpi_device *adev;
 	acpi_status status;
 
@@ -500,7 +499,10 @@
 		break;
 	case ACPI_NOTIFY_EJECT_REQUEST:
 		acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
-		if (!handler->hotplug.enabled) {
+		if (!adev->handler)
+			goto err_out;
+
+		if (!adev->handler->hotplug.enabled) {
 			acpi_handle_err(handle, "Eject disabled\n");
 			ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
 			goto err_out;
@@ -2105,6 +2107,7 @@
 	list_for_each_entry_reverse(child, &adev->children, node)
 		acpi_bus_trim(child);
 
+	adev->flags.match_driver = false;
 	if (handler) {
 		if (handler->detach)
 			handler->detach(adev);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 443dc93..91a32ce 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -226,7 +226,7 @@
 /* /sys/modules/acpi/parameters/aml_debug_output */
 
 module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
-		   bool, 0644);
+		   byte, 0644);
 MODULE_PARM_DESC(aml_debug_output,
 		 "To enable/disable the ACPI Debug Object output.");
 
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 0347a37..85e3b61 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -99,10 +99,6 @@
 
 		union acpi_object *element = &(package->package.elements[i]);
 
-		if (!element) {
-			return AE_BAD_DATA;
-		}
-
 		switch (element->type) {
 
 		case ACPI_TYPE_INTEGER:
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index f0447d3..a697b77 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -170,6 +170,14 @@
 	},
 	{
 	.callback = video_detect_force_vendor,
+	.ident = "HP EliteBook Revolve 810",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+		DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"),
+		},
+	},
+	{
+	.callback = video_detect_force_vendor,
 	.ident = "Lenovo Yoga 13",
 	.matches = {
 		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9c1a11d..05c8a44 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4125,12 +4125,13 @@
 			clk_prepare_enable(hpriv->port_clks[port]);
 
 		sprintf(port_number, "port%d", port);
-		hpriv->port_phys[port] = devm_phy_get(&pdev->dev, port_number);
+		hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
+							       port_number);
 		if (IS_ERR(hpriv->port_phys[port])) {
 			rc = PTR_ERR(hpriv->port_phys[port]);
 			hpriv->port_phys[port] = NULL;
-			if ((rc != -EPROBE_DEFER) && (rc != -ENODEV))
-				dev_warn(&pdev->dev, "error getting phy");
+			if (rc != -EPROBE_DEFER)
+				dev_warn(&pdev->dev, "error getting phy %d", rc);
 
 			/* Cleanup only the initialized ports */
 			hpriv->n_ports = port;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c53efe6..c477899 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -133,9 +133,16 @@
 			goto out;
 		}
 
+		if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
 		/* Found all components */
 		ret = master->ops->bind(master->dev);
 		if (ret < 0) {
+			devres_release_group(master->dev, NULL);
+			dev_info(master->dev, "master bind failed: %d\n", ret);
 			master_remove_components(master);
 			goto out;
 		}
@@ -166,6 +173,7 @@
 {
 	if (master->bound) {
 		master->ops->unbind(master->dev);
+		devres_release_group(master->dev, NULL);
 		master->bound = false;
 	}
 
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 1e16cbd..61d6d62 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -616,36 +616,35 @@
 	if (ret)
 		return ret;
 
-	seq_printf(s, "\nDma-buf Objects:\n");
-	seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n");
+	seq_puts(s, "\nDma-buf Objects:\n");
+	seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
 
 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
 		ret = mutex_lock_interruptible(&buf_obj->lock);
 
 		if (ret) {
-			seq_printf(s,
-				  "\tERROR locking buffer object: skipping\n");
+			seq_puts(s,
+				 "\tERROR locking buffer object: skipping\n");
 			continue;
 		}
 
-		seq_printf(s, "\t");
-
-		seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
-				buf_obj->exp_name, buf_obj->size,
+		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
+				buf_obj->size,
 				buf_obj->file->f_flags, buf_obj->file->f_mode,
-				(long)(buf_obj->file->f_count.counter));
+				(long)(buf_obj->file->f_count.counter),
+				buf_obj->exp_name);
 
-		seq_printf(s, "\t\tAttached Devices:\n");
+		seq_puts(s, "\tAttached Devices:\n");
 		attach_count = 0;
 
 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
-			seq_printf(s, "\t\t");
+			seq_puts(s, "\t");
 
-			seq_printf(s, "%s\n", attach_obj->dev->init_name);
+			seq_printf(s, "%s\n", dev_name(attach_obj->dev));
 			attach_count++;
 		}
 
-		seq_printf(s, "\n\t\tTotal %d devices attached\n",
+		seq_printf(s, "Total %d devices attached\n\n",
 				attach_count);
 
 		count++;
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 7c081b3..0ee48be 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -75,6 +75,7 @@
 config BCMA_DRIVER_GPIO
 	bool "BCMA GPIO driver"
 	depends on BCMA && GPIOLIB
+	select IRQ_DOMAIN if BCMA_HOST_SOC
 	help
 	  Driver to provide access to the GPIO pins of the bcma bus.
 
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 45f0996..25f9887 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -9,6 +9,9 @@
  */
 
 #include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/export.h>
 #include <linux/bcma/bcma.h>
 
@@ -73,19 +76,136 @@
 	bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
 }
 
+#if IS_BUILTIN(CONFIG_BCMA_HOST_SOC)
 static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
 {
 	struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
 
 	if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
-		return bcma_core_irq(cc->core);
+		return irq_find_mapping(cc->irq_domain, gpio);
 	else
 		return -EINVAL;
 }
 
+static void bcma_gpio_irq_unmask(struct irq_data *d)
+{
+	struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+	int gpio = irqd_to_hwirq(d);
+	u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
+
+	bcma_chipco_gpio_polarity(cc, BIT(gpio), val);
+	bcma_chipco_gpio_intmask(cc, BIT(gpio), BIT(gpio));
+}
+
+static void bcma_gpio_irq_mask(struct irq_data *d)
+{
+	struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+	int gpio = irqd_to_hwirq(d);
+
+	bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
+}
+
+static struct irq_chip bcma_gpio_irq_chip = {
+	.name		= "BCMA-GPIO",
+	.irq_mask	= bcma_gpio_irq_mask,
+	.irq_unmask	= bcma_gpio_irq_unmask,
+};
+
+static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
+{
+	struct bcma_drv_cc *cc = dev_id;
+	u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN);
+	u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ);
+	u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL);
+	unsigned long irqs = (val ^ pol) & mask;
+	int gpio;
+
+	if (!irqs)
+		return IRQ_NONE;
+
+	for_each_set_bit(gpio, &irqs, cc->gpio.ngpio)
+		generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio));
+	bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
+
+	return IRQ_HANDLED;
+}
+
+static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+{
+	struct gpio_chip *chip = &cc->gpio;
+	int gpio, hwirq, err;
+
+	if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
+		return 0;
+
+	cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
+					       &irq_domain_simple_ops, cc);
+	if (!cc->irq_domain) {
+		err = -ENODEV;
+		goto err_irq_domain;
+	}
+	for (gpio = 0; gpio < chip->ngpio; gpio++) {
+		int irq = irq_create_mapping(cc->irq_domain, gpio);
+
+		irq_set_chip_data(irq, cc);
+		irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip,
+					 handle_simple_irq);
+	}
+
+	hwirq = bcma_core_irq(cc->core);
+	err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
+			  cc);
+	if (err)
+		goto err_req_irq;
+
+	bcma_chipco_gpio_intmask(cc, ~0, 0);
+	bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
+
+	return 0;
+
+err_req_irq:
+	for (gpio = 0; gpio < chip->ngpio; gpio++) {
+		int irq = irq_find_mapping(cc->irq_domain, gpio);
+
+		irq_dispose_mapping(irq);
+	}
+	irq_domain_remove(cc->irq_domain);
+err_irq_domain:
+	return err;
+}
+
+static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+{
+	struct gpio_chip *chip = &cc->gpio;
+	int gpio;
+
+	if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
+		return;
+
+	bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
+	free_irq(bcma_core_irq(cc->core), cc);
+	for (gpio = 0; gpio < chip->ngpio; gpio++) {
+		int irq = irq_find_mapping(cc->irq_domain, gpio);
+
+		irq_dispose_mapping(irq);
+	}
+	irq_domain_remove(cc->irq_domain);
+}
+#else
+static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+{
+	return 0;
+}
+
+static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+{
+}
+#endif
+
 int bcma_gpio_init(struct bcma_drv_cc *cc)
 {
 	struct gpio_chip *chip = &cc->gpio;
+	int err;
 
 	chip->label		= "bcma_gpio";
 	chip->owner		= THIS_MODULE;
@@ -95,7 +215,9 @@
 	chip->set		= bcma_gpio_set_value;
 	chip->direction_input	= bcma_gpio_direction_input;
 	chip->direction_output	= bcma_gpio_direction_output;
+#if IS_BUILTIN(CONFIG_BCMA_HOST_SOC)
 	chip->to_irq		= bcma_gpio_to_irq;
+#endif
 	chip->ngpio		= 16;
 	/* There is just one SoC in one device and its GPIO addresses should be
 	 * deterministic to address them more easily. The other buses could get
@@ -105,10 +227,21 @@
 	else
 		chip->base		= -1;
 
-	return gpiochip_add(chip);
+	err = bcma_gpio_irq_domain_init(cc);
+	if (err)
+		return err;
+
+	err = gpiochip_add(chip);
+	if (err) {
+		bcma_gpio_irq_domain_exit(cc);
+		return err;
+	}
+
+	return 0;
 }
 
 int bcma_gpio_unregister(struct bcma_drv_cc *cc)
 {
+	bcma_gpio_irq_domain_exit(cc);
 	return gpiochip_remove(&cc->gpio);
 }
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 9ffa90c..014a1cf 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -108,6 +108,8 @@
 
 source "drivers/block/mtip32xx/Kconfig"
 
+source "drivers/block/zram/Kconfig"
+
 config BLK_CPQ_DA
 	tristate "Compaq SMART2 support"
 	depends on PCI && VIRT_TO_BUS && 0
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 816d979..02b688d 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -42,6 +42,7 @@
 
 obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
 obj-$(CONFIG_BLK_DEV_NULL_BLK)	+= null_blk.o
+obj-$(CONFIG_ZRAM) += zram/
 
 nvme-y		:= nvme-core.o nvme-scsi.o
 skd-y		:= skd_main.o
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 14a9d19..9220f8e 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -100,11 +100,8 @@
 
 struct buf {
 	ulong nframesout;
-	ulong resid;
-	ulong bv_resid;
-	sector_t sector;
 	struct bio *bio;
-	struct bio_vec *bv;
+	struct bvec_iter iter;
 	struct request *rq;
 };
 
@@ -120,13 +117,10 @@
 	ulong waited;
 	ulong waited_total;
 	struct aoetgt *t;		/* parent target I belong to */
-	sector_t lba;
 	struct sk_buff *skb;		/* command skb freed on module exit */
 	struct sk_buff *r_skb;		/* response skb for async processing */
 	struct buf *buf;
-	struct bio_vec *bv;
-	ulong bcnt;
-	ulong bv_off;
+	struct bvec_iter iter;
 	char flags;
 };
 
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d251543..8184451 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -196,8 +196,7 @@
 
 	t = f->t;
 	f->buf = NULL;
-	f->lba = 0;
-	f->bv = NULL;
+	memset(&f->iter, 0, sizeof(f->iter));
 	f->r_skb = NULL;
 	f->flags = 0;
 	list_add(&f->head, &t->ffree);
@@ -295,21 +294,14 @@
 }
 
 static void
-skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
+skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
 {
 	int frag = 0;
-	ulong fcnt;
-loop:
-	fcnt = bv->bv_len - (off - bv->bv_offset);
-	if (fcnt > cnt)
-		fcnt = cnt;
-	skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
-	cnt -= fcnt;
-	if (cnt <= 0)
-		return;
-	bv++;
-	off = bv->bv_offset;
-	goto loop;
+	struct bio_vec bv;
+
+	__bio_for_each_segment(bv, bio, iter, iter)
+		skb_fill_page_desc(skb, frag++, bv.bv_page,
+				   bv.bv_offset, bv.bv_len);
 }
 
 static void
@@ -346,12 +338,10 @@
 	t->nout++;
 	f->waited = 0;
 	f->waited_total = 0;
-	if (f->buf)
-		f->lba = f->buf->sector;
 
 	/* set up ata header */
-	ah->scnt = f->bcnt >> 9;
-	put_lba(ah, f->lba);
+	ah->scnt = f->iter.bi_size >> 9;
+	put_lba(ah, f->iter.bi_sector);
 	if (t->d->flags & DEVFL_EXT) {
 		ah->aflags |= AOEAFL_EXT;
 	} else {
@@ -360,11 +350,11 @@
 		ah->lba3 |= 0xe0;	/* LBA bit + obsolete 0xa0 */
 	}
 	if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
-		skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
+		skb_fillup(skb, f->buf->bio, f->iter);
 		ah->aflags |= AOEAFL_WRITE;
-		skb->len += f->bcnt;
-		skb->data_len = f->bcnt;
-		skb->truesize += f->bcnt;
+		skb->len += f->iter.bi_size;
+		skb->data_len = f->iter.bi_size;
+		skb->truesize += f->iter.bi_size;
 		t->wpkts++;
 	} else {
 		t->rpkts++;
@@ -382,7 +372,6 @@
 	struct buf *buf;
 	struct sk_buff *skb;
 	struct sk_buff_head queue;
-	ulong bcnt, fbcnt;
 
 	buf = nextbuf(d);
 	if (buf == NULL)
@@ -390,39 +379,22 @@
 	f = newframe(d);
 	if (f == NULL)
 		return 0;
-	bcnt = d->maxbcnt;
-	if (bcnt == 0)
-		bcnt = DEFAULTBCNT;
-	if (bcnt > buf->resid)
-		bcnt = buf->resid;
-	fbcnt = bcnt;
-	f->bv = buf->bv;
-	f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
-	do {
-		if (fbcnt < buf->bv_resid) {
-			buf->bv_resid -= fbcnt;
-			buf->resid -= fbcnt;
-			break;
-		}
-		fbcnt -= buf->bv_resid;
-		buf->resid -= buf->bv_resid;
-		if (buf->resid == 0) {
-			d->ip.buf = NULL;
-			break;
-		}
-		buf->bv++;
-		buf->bv_resid = buf->bv->bv_len;
-		WARN_ON(buf->bv_resid == 0);
-	} while (fbcnt);
 
 	/* initialize the headers & frame */
 	f->buf = buf;
-	f->bcnt = bcnt;
-	ata_rw_frameinit(f);
+	f->iter = buf->iter;
+	f->iter.bi_size = min_t(unsigned long,
+				d->maxbcnt ?: DEFAULTBCNT,
+				f->iter.bi_size);
+	bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
+
+	if (!buf->iter.bi_size)
+		d->ip.buf = NULL;
 
 	/* mark all tracking fields and load out */
 	buf->nframesout += 1;
-	buf->sector += bcnt >> 9;
+
+	ata_rw_frameinit(f);
 
 	skb = skb_clone(f->skb, GFP_ATOMIC);
 	if (skb) {
@@ -613,10 +585,7 @@
 	skb = nf->skb;
 	nf->skb = f->skb;
 	nf->buf = f->buf;
-	nf->bcnt = f->bcnt;
-	nf->lba = f->lba;
-	nf->bv = f->bv;
-	nf->bv_off = f->bv_off;
+	nf->iter = f->iter;
 	nf->waited = 0;
 	nf->waited_total = f->waited_total;
 	nf->sent = f->sent;
@@ -648,19 +617,19 @@
 	}
 	f->flags |= FFL_PROBE;
 	ifrotate(t);
-	f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
+	f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
 	ata_rw_frameinit(f);
 	skb = f->skb;
-	for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
+	for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
 		if (n < PAGE_SIZE)
 			m = n;
 		else
 			m = PAGE_SIZE;
 		skb_fill_page_desc(skb, frag, empty_page, 0, m);
 	}
-	skb->len += f->bcnt;
-	skb->data_len = f->bcnt;
-	skb->truesize += f->bcnt;
+	skb->len += f->iter.bi_size;
+	skb->data_len = f->iter.bi_size;
+	skb->truesize += f->iter.bi_size;
 
 	skb = skb_clone(f->skb, GFP_ATOMIC);
 	if (skb) {
@@ -897,15 +866,15 @@
 static void
 bio_pageinc(struct bio *bio)
 {
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	struct page *page;
-	int i;
+	struct bvec_iter iter;
 
-	bio_for_each_segment(bv, bio, i) {
+	bio_for_each_segment(bv, bio, iter) {
 		/* Non-zero page count for non-head members of
 		 * compound pages is no longer allowed by the kernel.
 		 */
-		page = compound_trans_head(bv->bv_page);
+		page = compound_trans_head(bv.bv_page);
 		atomic_inc(&page->_count);
 	}
 }
@@ -913,12 +882,12 @@
 static void
 bio_pagedec(struct bio *bio)
 {
-	struct bio_vec *bv;
 	struct page *page;
-	int i;
+	struct bio_vec bv;
+	struct bvec_iter iter;
 
-	bio_for_each_segment(bv, bio, i) {
-		page = compound_trans_head(bv->bv_page);
+	bio_for_each_segment(bv, bio, iter) {
+		page = compound_trans_head(bv.bv_page);
 		atomic_dec(&page->_count);
 	}
 }
@@ -929,12 +898,8 @@
 	memset(buf, 0, sizeof(*buf));
 	buf->rq = rq;
 	buf->bio = bio;
-	buf->resid = bio->bi_size;
-	buf->sector = bio->bi_sector;
+	buf->iter = bio->bi_iter;
 	bio_pageinc(bio);
-	buf->bv = bio_iovec(bio);
-	buf->bv_resid = buf->bv->bv_len;
-	WARN_ON(buf->bv_resid == 0);
 }
 
 static struct buf *
@@ -1119,24 +1084,18 @@
 }
 
 static void
-bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
+bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
 {
-	ulong fcnt;
-	char *p;
 	int soff = 0;
-loop:
-	fcnt = bv->bv_len - (off - bv->bv_offset);
-	if (fcnt > cnt)
-		fcnt = cnt;
-	p = page_address(bv->bv_page) + off;
-	skb_copy_bits(skb, soff, p, fcnt);
-	soff += fcnt;
-	cnt -= fcnt;
-	if (cnt <= 0)
-		return;
-	bv++;
-	off = bv->bv_offset;
-	goto loop;
+	struct bio_vec bv;
+
+	iter.bi_size = cnt;
+
+	__bio_for_each_segment(bv, bio, iter, iter) {
+		char *p = page_address(bv.bv_page) + bv.bv_offset;
+		skb_copy_bits(skb, soff, p, bv.bv_len);
+		soff += bv.bv_len;
+	}
 }
 
 void
@@ -1152,7 +1111,7 @@
 	do {
 		bio = rq->bio;
 		bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
-	} while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
+	} while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
 
 	/* cf. http://lkml.org/lkml/2006/10/31/28 */
 	if (!fastfail)
@@ -1229,7 +1188,15 @@
 			clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
 			break;
 		}
-		bvcpy(f->bv, f->bv_off, skb, n);
+		if (n > f->iter.bi_size) {
+			pr_err_ratelimited("%s e%ld.%d.  bytes=%ld need=%u\n",
+				"aoe: too-large data size in read from",
+				(long) d->aoemajor, d->aoeminor,
+				n, f->iter.bi_size);
+			clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+			break;
+		}
+		bvcpy(skb, f->buf->bio, f->iter, n);
 	case ATA_CMD_PIO_WRITE:
 	case ATA_CMD_PIO_WRITE_EXT:
 		spin_lock_irq(&d->lock);
@@ -1272,7 +1239,7 @@
 
 	aoe_freetframe(f);
 
-	if (buf && --buf->nframesout == 0 && buf->resid == 0)
+	if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
 		aoe_end_buf(d, buf);
 
 	spin_unlock_irq(&d->lock);
@@ -1727,7 +1694,7 @@
 {
 	if (buf == NULL)
 		return;
-	buf->resid = 0;
+	buf->iter.bi_size = 0;
 	clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
 	if (buf->nframesout == 0)
 		aoe_end_buf(d, buf);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d91f1a5..e73b85c 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -328,18 +328,18 @@
 	struct block_device *bdev = bio->bi_bdev;
 	struct brd_device *brd = bdev->bd_disk->private_data;
 	int rw;
-	struct bio_vec *bvec;
+	struct bio_vec bvec;
 	sector_t sector;
-	int i;
+	struct bvec_iter iter;
 	int err = -EIO;
 
-	sector = bio->bi_sector;
+	sector = bio->bi_iter.bi_sector;
 	if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
 		goto out;
 
 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
 		err = 0;
-		discard_from_brd(brd, sector, bio->bi_size);
+		discard_from_brd(brd, sector, bio->bi_iter.bi_size);
 		goto out;
 	}
 
@@ -347,10 +347,10 @@
 	if (rw == READA)
 		rw = READ;
 
-	bio_for_each_segment(bvec, bio, i) {
-		unsigned int len = bvec->bv_len;
-		err = brd_do_bvec(brd, bvec->bv_page, len,
-					bvec->bv_offset, rw, sector);
+	bio_for_each_segment(bvec, bio, iter) {
+		unsigned int len = bvec.bv_len;
+		err = brd_do_bvec(brd, bvec.bv_page, len,
+					bvec.bv_offset, rw, sector);
 		if (err)
 			break;
 		sector += len >> SECTOR_SHIFT;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index b35fc4f..036e8ab 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -5004,7 +5004,7 @@
 
 	i = alloc_cciss_hba(pdev);
 	if (i < 0)
-		return -1;
+		return -ENOMEM;
 
 	h = hba[i];
 	h->pdev = pdev;
@@ -5205,7 +5205,7 @@
 	 */
 	pci_set_drvdata(pdev, NULL);
 	free_hba(h);
-	return -1;
+	return -ENODEV;
 }
 
 static void cciss_shutdown(struct pci_dev *pdev)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 28c73ca..a9b13f2 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -159,7 +159,7 @@
 
 	bio = bio_alloc_drbd(GFP_NOIO);
 	bio->bi_bdev = bdev->md_bdev;
-	bio->bi_sector = sector;
+	bio->bi_iter.bi_sector = sector;
 	err = -EIO;
 	if (bio_add_page(bio, page, size, 0) != size)
 		goto out;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b12c11e..597f111 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1028,7 +1028,7 @@
 	} else
 		page = b->bm_pages[page_nr];
 	bio->bi_bdev = mdev->ldev->md_bdev;
-	bio->bi_sector = on_disk_sector;
+	bio->bi_iter.bi_sector = on_disk_sector;
 	/* bio_add_page of a single page to an empty bio will always succeed,
 	 * according to api.  Do we want to assert that? */
 	bio_add_page(bio, page, len, 0);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9e3818b..929468e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1537,15 +1537,17 @@
 
 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
 {
-	struct bio_vec *bvec;
-	int i;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
+
 	/* hint all but last page with MSG_MORE */
-	bio_for_each_segment(bvec, bio, i) {
+	bio_for_each_segment(bvec, bio, iter) {
 		int err;
 
-		err = _drbd_no_send_page(mdev, bvec->bv_page,
-					 bvec->bv_offset, bvec->bv_len,
-					 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+		err = _drbd_no_send_page(mdev, bvec.bv_page,
+					 bvec.bv_offset, bvec.bv_len,
+					 bio_iter_last(bvec, iter)
+					 ? 0 : MSG_MORE);
 		if (err)
 			return err;
 	}
@@ -1554,15 +1556,16 @@
 
 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
 {
-	struct bio_vec *bvec;
-	int i;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
+
 	/* hint all but last page with MSG_MORE */
-	bio_for_each_segment(bvec, bio, i) {
+	bio_for_each_segment(bvec, bio, iter) {
 		int err;
 
-		err = _drbd_send_page(mdev, bvec->bv_page,
-				      bvec->bv_offset, bvec->bv_len,
-				      i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+		err = _drbd_send_page(mdev, bvec.bv_page,
+				      bvec.bv_offset, bvec.bv_len,
+				      bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
 		if (err)
 			return err;
 	}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6fa6673..d073305 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1333,7 +1333,7 @@
 		goto fail;
 	}
 	/* > peer_req->i.sector, unless this is the first bio */
-	bio->bi_sector = sector;
+	bio->bi_iter.bi_sector = sector;
 	bio->bi_bdev = mdev->ldev->backing_bdev;
 	bio->bi_rw = rw;
 	bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@
 				dev_err(DEV,
 					"bio_add_page failed for len=%u, "
 					"bi_vcnt=0 (bi_sector=%llu)\n",
-					len, (unsigned long long)bio->bi_sector);
+					len, (uint64_t)bio->bi_iter.bi_sector);
 				err = -ENOSPC;
 				goto fail;
 			}
@@ -1595,9 +1595,10 @@
 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
 			   sector_t sector, int data_size)
 {
-	struct bio_vec *bvec;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 	struct bio *bio;
-	int dgs, err, i, expect;
+	int dgs, err, expect;
 	void *dig_in = mdev->tconn->int_dig_in;
 	void *dig_vv = mdev->tconn->int_dig_vv;
 
@@ -1615,13 +1616,13 @@
 	mdev->recv_cnt += data_size>>9;
 
 	bio = req->master_bio;
-	D_ASSERT(sector == bio->bi_sector);
+	D_ASSERT(sector == bio->bi_iter.bi_sector);
 
-	bio_for_each_segment(bvec, bio, i) {
-		void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
-		expect = min_t(int, data_size, bvec->bv_len);
+	bio_for_each_segment(bvec, bio, iter) {
+		void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
+		expect = min_t(int, data_size, bvec.bv_len);
 		err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
-		kunmap(bvec->bv_page);
+		kunmap(bvec.bv_page);
 		if (err)
 			return err;
 		data_size -= expect;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index fec7bef..104a040 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -77,8 +77,8 @@
 	req->epoch       = 0;
 
 	drbd_clear_interval(&req->i);
-	req->i.sector     = bio_src->bi_sector;
-	req->i.size      = bio_src->bi_size;
+	req->i.sector     = bio_src->bi_iter.bi_sector;
+	req->i.size      = bio_src->bi_iter.bi_size;
 	req->i.local = true;
 	req->i.waiting = false;
 
@@ -1280,7 +1280,7 @@
 	/*
 	 * what we "blindly" assume:
 	 */
-	D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
+	D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
 
 	inc_ap_bio(mdev);
 	__drbd_make_request(mdev, bio, start_time);
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 978cb1a..28e15d9 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -269,7 +269,7 @@
 
 /* Short lived temporary struct on the stack.
  * We could squirrel the error to be returned into
- * bio->bi_size, or similar. But that would be too ugly. */
+ * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
 struct bio_and_error {
 	struct bio *bio;
 	int error;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 891c0ec..84d3175 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -313,8 +313,8 @@
 {
 	struct hash_desc desc;
 	struct scatterlist sg;
-	struct bio_vec *bvec;
-	int i;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 
 	desc.tfm = tfm;
 	desc.flags = 0;
@@ -322,8 +322,8 @@
 	sg_init_table(&sg, 1);
 	crypto_hash_init(&desc);
 
-	bio_for_each_segment(bvec, bio, i) {
-		sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
+	bio_for_each_segment(bvec, bio, iter) {
+		sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
 		crypto_hash_update(&desc, &sg, sg.length);
 	}
 	crypto_hash_final(&desc, digest);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 000abe2..2023043 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2351,7 +2351,7 @@
 /* Compute maximal contiguous buffer size. */
 static int buffer_chain_size(void)
 {
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	int size;
 	struct req_iterator iter;
 	char *base;
@@ -2360,10 +2360,10 @@
 	size = 0;
 
 	rq_for_each_segment(bv, current_req, iter) {
-		if (page_address(bv->bv_page) + bv->bv_offset != base + size)
+		if (page_address(bv.bv_page) + bv.bv_offset != base + size)
 			break;
 
-		size += bv->bv_len;
+		size += bv.bv_len;
 	}
 
 	return size >> 9;
@@ -2389,7 +2389,7 @@
 static void copy_buffer(int ssize, int max_sector, int max_sector_2)
 {
 	int remaining;		/* number of transferred 512-byte sectors */
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	char *buffer;
 	char *dma_buffer;
 	int size;
@@ -2427,10 +2427,10 @@
 		if (!remaining)
 			break;
 
-		size = bv->bv_len;
+		size = bv.bv_len;
 		SUPBOUND(size, remaining);
 
-		buffer = page_address(bv->bv_page) + bv->bv_offset;
+		buffer = page_address(bv.bv_page) + bv.bv_offset;
 		if (dma_buffer + size >
 		    floppy_track_buffer + (max_buffer_sectors << 10) ||
 		    dma_buffer < floppy_track_buffer) {
@@ -3691,9 +3691,12 @@
 	if (!(mode & FMODE_NDELAY)) {
 		if (mode & (FMODE_READ|FMODE_WRITE)) {
 			UDRS->last_checked = 0;
+			clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
 			check_disk_change(bdev);
 			if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
 				goto out;
+			if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+				goto out;
 		}
 		res = -EROFS;
 		if ((mode & FMODE_WRITE) &&
@@ -3746,17 +3749,29 @@
  * a disk in the drive, and whether that disk is writable.
  */
 
-static void floppy_rb0_complete(struct bio *bio, int err)
+struct rb0_cbdata {
+	int drive;
+	struct completion complete;
+};
+
+static void floppy_rb0_cb(struct bio *bio, int err)
 {
-	complete((struct completion *)bio->bi_private);
+	struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
+	int drive = cbdata->drive;
+
+	if (err) {
+		pr_info("floppy: error %d while reading block 0", err);
+		set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+	}
+	complete(&cbdata->complete);
 }
 
-static int __floppy_read_block_0(struct block_device *bdev)
+static int __floppy_read_block_0(struct block_device *bdev, int drive)
 {
 	struct bio bio;
 	struct bio_vec bio_vec;
-	struct completion complete;
 	struct page *page;
+	struct rb0_cbdata cbdata;
 	size_t size;
 
 	page = alloc_page(GFP_NOIO);
@@ -3769,23 +3784,26 @@
 	if (!size)
 		size = 1024;
 
+	cbdata.drive = drive;
+
 	bio_init(&bio);
 	bio.bi_io_vec = &bio_vec;
 	bio_vec.bv_page = page;
 	bio_vec.bv_len = size;
 	bio_vec.bv_offset = 0;
 	bio.bi_vcnt = 1;
-	bio.bi_size = size;
+	bio.bi_iter.bi_size = size;
 	bio.bi_bdev = bdev;
-	bio.bi_sector = 0;
+	bio.bi_iter.bi_sector = 0;
 	bio.bi_flags = (1 << BIO_QUIET);
-	init_completion(&complete);
-	bio.bi_private = &complete;
-	bio.bi_end_io = floppy_rb0_complete;
+	bio.bi_private = &cbdata;
+	bio.bi_end_io = floppy_rb0_cb;
 
 	submit_bio(READ, &bio);
 	process_fd_request();
-	wait_for_completion(&complete);
+
+	init_completion(&cbdata.complete);
+	wait_for_completion(&cbdata.complete);
 
 	__free_page(page);
 
@@ -3827,7 +3845,7 @@
 			UDRS->generation++;
 		if (drive_no_geom(drive)) {
 			/* auto-sensing */
-			res = __floppy_read_block_0(opened_bdev[drive]);
+			res = __floppy_read_block_0(opened_bdev[drive], drive);
 		} else {
 			if (cf)
 				poll_drive(false, FD_RAW_NEED_DISK);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c8dac73..66e8c3b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -288,9 +288,10 @@
 {
 	int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
 			struct page *page);
-	struct bio_vec *bvec;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 	struct page *page = NULL;
-	int i, ret = 0;
+	int ret = 0;
 
 	if (lo->transfer != transfer_none) {
 		page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
@@ -302,11 +303,11 @@
 		do_lo_send = do_lo_send_direct_write;
 	}
 
-	bio_for_each_segment(bvec, bio, i) {
-		ret = do_lo_send(lo, bvec, pos, page);
+	bio_for_each_segment(bvec, bio, iter) {
+		ret = do_lo_send(lo, &bvec, pos, page);
 		if (ret < 0)
 			break;
-		pos += bvec->bv_len;
+		pos += bvec.bv_len;
 	}
 	if (page) {
 		kunmap(page);
@@ -392,20 +393,20 @@
 static int
 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
 {
-	struct bio_vec *bvec;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 	ssize_t s;
-	int i;
 
-	bio_for_each_segment(bvec, bio, i) {
-		s = do_lo_receive(lo, bvec, bsize, pos);
+	bio_for_each_segment(bvec, bio, iter) {
+		s = do_lo_receive(lo, &bvec, bsize, pos);
 		if (s < 0)
 			return s;
 
-		if (s != bvec->bv_len) {
+		if (s != bvec.bv_len) {
 			zero_fill_bio(bio);
 			break;
 		}
-		pos += bvec->bv_len;
+		pos += bvec.bv_len;
 	}
 	return 0;
 }
@@ -415,7 +416,7 @@
 	loff_t pos;
 	int ret;
 
-	pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+	pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
 
 	if (bio_rw(bio) == WRITE) {
 		struct file *file = lo->lo_backing_file;
@@ -444,7 +445,7 @@
 				goto out;
 			}
 			ret = file->f_op->fallocate(file, mode, pos,
-						    bio->bi_size);
+						    bio->bi_iter.bi_size);
 			if (unlikely(ret && ret != -EINVAL &&
 				     ret != -EOPNOTSUPP))
 				ret = -EIO;
@@ -798,7 +799,7 @@
 
 	/*
 	 * We use punch hole to reclaim the free space used by the
-	 * image a.k.a. discard. However we do support discard if
+	 * image a.k.a. discard. However we do not support discard if
 	 * encryption is enabled, because it may give an attacker
 	 * useful information.
 	 */
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 7bc363f..eb59b12 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -915,7 +915,7 @@
 
 	/* disk reset */
 	if (prv_data->dev_attr == MG_STORAGE_DEV) {
-		/* If POR seq. not yet finised, wait */
+		/* If POR seq. not yet finished, wait */
 		err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
 		if (err)
 			goto probe_err_3b;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 050c712..51602695 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -41,10 +41,31 @@
 #include "mtip32xx.h"
 
 #define HW_CMD_SLOT_SZ		(MTIP_MAX_COMMAND_SLOTS * 32)
-#define HW_CMD_TBL_SZ		(AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16))
-#define HW_CMD_TBL_AR_SZ	(HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS)
-#define HW_PORT_PRIV_DMA_SZ \
-		(HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
+
+/* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */
+#define AHCI_RX_FIS_SZ          0x100
+#define AHCI_RX_FIS_OFFSET      0x0
+#define AHCI_IDFY_SZ            ATA_SECT_SIZE
+#define AHCI_IDFY_OFFSET        0x400
+#define AHCI_SECTBUF_SZ         ATA_SECT_SIZE
+#define AHCI_SECTBUF_OFFSET     0x800
+#define AHCI_SMARTBUF_SZ        ATA_SECT_SIZE
+#define AHCI_SMARTBUF_OFFSET    0xC00
+/* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */
+#define BLOCK_DMA_ALLOC_SZ      4096
+
+/* DMA region containing command table (should be 8192 bytes) */
+#define AHCI_CMD_SLOT_SZ        sizeof(struct mtip_cmd_hdr)
+#define AHCI_CMD_TBL_SZ         (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
+#define AHCI_CMD_TBL_OFFSET     0x0
+
+/* DMA region per command (contains header and SGL) */
+#define AHCI_CMD_TBL_HDR_SZ     0x80
+#define AHCI_CMD_TBL_HDR_OFFSET 0x0
+#define AHCI_CMD_TBL_SGL_SZ     (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
+#define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
+#define CMD_DMA_ALLOC_SZ        (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
+
 
 #define HOST_CAP_NZDMA		(1 << 19)
 #define HOST_HSORG		0xFC
@@ -899,8 +920,9 @@
 			fail_reason = "thermal shutdown";
 		}
 		if (buf[288] == 0xBF) {
+			set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
 			dev_info(&dd->pdev->dev,
-				"Drive indicates rebuild has failed.\n");
+				"Drive indicates rebuild has failed. Secure erase required.\n");
 			fail_all_ncq_cmds = 1;
 			fail_reason = "rebuild failed";
 		}
@@ -1566,6 +1588,12 @@
 	}
 #endif
 
+	/* Check security locked state */
+	if (port->identify[128] & 0x4)
+		set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+	else
+		clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+
 #ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
 	/* Demux ID.DRAT & ID.RZAT to determine trim support */
 	if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
@@ -1887,6 +1915,10 @@
 	strlcpy(cbuf, (char *)(port->identify+27), 41);
 	dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
 
+	dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
+		port->identify[128],
+		port->identify[128] & 0x4 ? "(LOCKED)" : "");
+
 	if (mtip_hw_get_capacity(port->dd, &sectors))
 		dev_info(&port->dd->pdev->dev,
 			"Capacity: %llu sectors (%llu MB)\n",
@@ -3313,6 +3345,118 @@
 }
 
 /*
+ * DMA region teardown
+ *
+ * @dd Pointer to driver_data structure
+ *
+ * return value
+ *      None
+ */
+static void mtip_dma_free(struct driver_data *dd)
+{
+	int i;
+	struct mtip_port *port = dd->port;
+
+	if (port->block1)
+		dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+					port->block1, port->block1_dma);
+
+	if (port->command_list) {
+		dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
+				port->command_list, port->command_list_dma);
+	}
+
+	for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
+		if (port->commands[i].command)
+			dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+				port->commands[i].command,
+				port->commands[i].command_dma);
+	}
+}
+
+/*
+ * DMA region setup
+ *
+ * @dd Pointer to driver_data structure
+ *
+ * return value
+ *      -ENOMEM Not enough free DMA region space to initialize driver
+ */
+static int mtip_dma_alloc(struct driver_data *dd)
+{
+	struct mtip_port *port = dd->port;
+	int i, rv = 0;
+	u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
+
+	/* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
+	port->block1 =
+		dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+					&port->block1_dma, GFP_KERNEL);
+	if (!port->block1)
+		return -ENOMEM;
+	memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ);
+
+	/* Allocate dma memory for command list */
+	port->command_list =
+		dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
+					&port->command_list_dma, GFP_KERNEL);
+	if (!port->command_list) {
+		dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+					port->block1, port->block1_dma);
+		port->block1 = NULL;
+		port->block1_dma = 0;
+		return -ENOMEM;
+	}
+	memset(port->command_list, 0, AHCI_CMD_TBL_SZ);
+
+	/* Setup all pointers into first DMA region */
+	port->rxfis         = port->block1 + AHCI_RX_FIS_OFFSET;
+	port->rxfis_dma     = port->block1_dma + AHCI_RX_FIS_OFFSET;
+	port->identify      = port->block1 + AHCI_IDFY_OFFSET;
+	port->identify_dma  = port->block1_dma + AHCI_IDFY_OFFSET;
+	port->log_buf       = port->block1 + AHCI_SECTBUF_OFFSET;
+	port->log_buf_dma   = port->block1_dma + AHCI_SECTBUF_OFFSET;
+	port->smart_buf     = port->block1 + AHCI_SMARTBUF_OFFSET;
+	port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
+
+	/* Setup per command SGL DMA region */
+
+	/* Point the command headers at the command tables */
+	for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
+		port->commands[i].command =
+			dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+				&port->commands[i].command_dma, GFP_KERNEL);
+		if (!port->commands[i].command) {
+			rv = -ENOMEM;
+			mtip_dma_free(dd);
+			return rv;
+		}
+		memset(port->commands[i].command, 0, CMD_DMA_ALLOC_SZ);
+
+		port->commands[i].command_header = port->command_list +
+					(sizeof(struct mtip_cmd_hdr) * i);
+		port->commands[i].command_header_dma =
+					dd->port->command_list_dma +
+					(sizeof(struct mtip_cmd_hdr) * i);
+
+		if (host_cap_64)
+			port->commands[i].command_header->ctbau =
+				__force_bit2int cpu_to_le32(
+				(port->commands[i].command_dma >> 16) >> 16);
+
+		port->commands[i].command_header->ctba =
+				__force_bit2int cpu_to_le32(
+				port->commands[i].command_dma & 0xFFFFFFFF);
+
+		sg_init_table(port->commands[i].sg, MTIP_MAX_SG);
+
+		/* Mark command as currently inactive */
+		atomic_set(&dd->port->commands[i].active, 0);
+	}
+	return 0;
+}
+
+/*
  * Called once for each card.
  *
  * @dd Pointer to the driver data structure.
@@ -3370,83 +3514,10 @@
 	dd->port->mmio	= dd->mmio + PORT_OFFSET;
 	dd->port->dd	= dd;
 
-	/* Allocate memory for the command list. */
-	dd->port->command_list =
-		dmam_alloc_coherent(&dd->pdev->dev,
-			HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
-			&dd->port->command_list_dma,
-			GFP_KERNEL);
-	if (!dd->port->command_list) {
-		dev_err(&dd->pdev->dev,
-			"Memory allocation: command list\n");
-		rv = -ENOMEM;
+	/* DMA allocations */
+	rv = mtip_dma_alloc(dd);
+	if (rv < 0)
 		goto out1;
-	}
-
-	/* Clear the memory we have allocated. */
-	memset(dd->port->command_list,
-		0,
-		HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4));
-
-	/* Setup the addresse of the RX FIS. */
-	dd->port->rxfis	    = dd->port->command_list + HW_CMD_SLOT_SZ;
-	dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
-
-	/* Setup the address of the command tables. */
-	dd->port->command_table	  = dd->port->rxfis + AHCI_RX_FIS_SZ;
-	dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
-
-	/* Setup the address of the identify data. */
-	dd->port->identify     = dd->port->command_table +
-					HW_CMD_TBL_AR_SZ;
-	dd->port->identify_dma = dd->port->command_tbl_dma +
-					HW_CMD_TBL_AR_SZ;
-
-	/* Setup the address of the sector buffer - for some non-ncq cmds */
-	dd->port->sector_buffer	= (void *) dd->port->identify + ATA_SECT_SIZE;
-	dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
-
-	/* Setup the address of the log buf - for read log command */
-	dd->port->log_buf = (void *)dd->port->sector_buffer  + ATA_SECT_SIZE;
-	dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE;
-
-	/* Setup the address of the smart buf - for smart read data command */
-	dd->port->smart_buf = (void *)dd->port->log_buf  + ATA_SECT_SIZE;
-	dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE;
-
-
-	/* Point the command headers at the command tables. */
-	for (i = 0; i < num_command_slots; i++) {
-		dd->port->commands[i].command_header =
-					dd->port->command_list +
-					(sizeof(struct mtip_cmd_hdr) * i);
-		dd->port->commands[i].command_header_dma =
-					dd->port->command_list_dma +
-					(sizeof(struct mtip_cmd_hdr) * i);
-
-		dd->port->commands[i].command =
-			dd->port->command_table + (HW_CMD_TBL_SZ * i);
-		dd->port->commands[i].command_dma =
-			dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
-
-		if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
-			dd->port->commands[i].command_header->ctbau =
-			__force_bit2int cpu_to_le32(
-			(dd->port->commands[i].command_dma >> 16) >> 16);
-		dd->port->commands[i].command_header->ctba =
-			__force_bit2int cpu_to_le32(
-			dd->port->commands[i].command_dma & 0xFFFFFFFF);
-
-		/*
-		 * If this is not done, a bug is reported by the stock
-		 * FC11 i386. Due to the fact that it has lots of kernel
-		 * debugging enabled.
-		 */
-		sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
-
-		/* Mark all commands as currently inactive.*/
-		atomic_set(&dd->port->commands[i].active, 0);
-	}
 
 	/* Setup the pointers to the extended s_active and CI registers. */
 	for (i = 0; i < dd->slot_groups; i++) {
@@ -3594,12 +3665,8 @@
 
 out2:
 	mtip_deinit_port(dd->port);
+	mtip_dma_free(dd);
 
-	/* Free the command/command header memory. */
-	dmam_free_coherent(&dd->pdev->dev,
-				HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
-				dd->port->command_list,
-				dd->port->command_list_dma);
 out1:
 	/* Free the memory allocated for the for structure. */
 	kfree(dd->port);
@@ -3622,7 +3689,8 @@
 	 * saves its state.
 	 */
 	if (!dd->sr) {
-		if (!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
+		if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
+		    !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
 			if (mtip_standby_immediate(dd->port))
 				dev_warn(&dd->pdev->dev,
 					"STANDBY IMMEDIATE failed\n");
@@ -3641,11 +3709,9 @@
 	irq_set_affinity_hint(dd->pdev->irq, NULL);
 	devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
 
-	/* Free the command/command header memory. */
-	dmam_free_coherent(&dd->pdev->dev,
-			HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
-			dd->port->command_list,
-			dd->port->command_list_dma);
+	/* Free dma regions */
+	mtip_dma_free(dd);
+
 	/* Free the memory allocated for the for structure. */
 	kfree(dd->port);
 	dd->port = NULL;
@@ -3962,8 +4028,9 @@
 {
 	struct driver_data *dd = queue->queuedata;
 	struct scatterlist *sg;
-	struct bio_vec *bvec;
-	int i, nents = 0;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
+	int nents = 0;
 	int tag = 0, unaligned = 0;
 
 	if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3993,7 +4060,7 @@
 	}
 
 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
-		bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
+		bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
 						bio_sectors(bio)));
 		return;
 	}
@@ -4006,7 +4073,8 @@
 
 	if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
 							dd->unal_qdepth) {
-		if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+		if (bio->bi_iter.bi_sector % 8 != 0)
+			/* Unaligned on 4k boundaries */
 			unaligned = 1;
 		else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
 			unaligned = 1;
@@ -4025,17 +4093,17 @@
 		}
 
 		/* Create the scatter list for this bio. */
-		bio_for_each_segment(bvec, bio, i) {
+		bio_for_each_segment(bvec, bio, iter) {
 			sg_set_page(&sg[nents],
-					bvec->bv_page,
-					bvec->bv_len,
-					bvec->bv_offset);
+					bvec.bv_page,
+					bvec.bv_len,
+					bvec.bv_offset);
 			nents++;
 		}
 
 		/* Issue the read/write. */
 		mtip_hw_submit_io(dd,
-				bio->bi_sector,
+				bio->bi_iter.bi_sector,
 				bio_sectors(bio),
 				nents,
 				tag,
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 9be7a15..b52e9a6 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -69,7 +69,7 @@
  * Maximum number of scatter gather entries
  * a single command may have.
  */
-#define MTIP_MAX_SG		128
+#define MTIP_MAX_SG		504
 
 /*
  * Maximum number of slot groups (Command Issue & s_active registers)
@@ -92,7 +92,7 @@
 
 /* Driver name and version strings */
 #define MTIP_DRV_NAME		"mtip32xx"
-#define MTIP_DRV_VERSION	"1.2.6os3"
+#define MTIP_DRV_VERSION	"1.3.0"
 
 /* Maximum number of minor device numbers per device. */
 #define MTIP_MAX_MINORS		16
@@ -391,15 +391,13 @@
 	 */
 	dma_addr_t rxfis_dma;
 	/*
-	 * Pointer to the beginning of the command table memory as used
-	 * by the driver.
+	 * Pointer to the DMA region for RX Fis, Identify, RLE10, and SMART
 	 */
-	void *command_table;
+	void *block1;
 	/*
-	 * Pointer to the beginning of the command table memory as used
-	 * by the DMA.
+	 * DMA address of region for RX Fis, Identify, RLE10, and SMART
 	 */
-	dma_addr_t command_tbl_dma;
+	dma_addr_t block1_dma;
 	/*
 	 * Pointer to the beginning of the identify data memory as used
 	 * by the driver.
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2dc3b51..55298db 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,18 +271,18 @@
 
 	if (nbd_cmd(req) == NBD_CMD_WRITE) {
 		struct req_iterator iter;
-		struct bio_vec *bvec;
+		struct bio_vec bvec;
 		/*
 		 * we are really probing at internals to determine
 		 * whether to set MSG_MORE or not...
 		 */
 		rq_for_each_segment(bvec, req, iter) {
 			flags = 0;
-			if (!rq_iter_last(req, iter))
+			if (!rq_iter_last(bvec, iter))
 				flags = MSG_MORE;
 			dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
-					nbd->disk->disk_name, req, bvec->bv_len);
-			result = sock_send_bvec(nbd, bvec, flags);
+					nbd->disk->disk_name, req, bvec.bv_len);
+			result = sock_send_bvec(nbd, &bvec, flags);
 			if (result <= 0) {
 				dev_err(disk_to_dev(nbd->disk),
 					"Send data failed (result %d)\n",
@@ -378,10 +378,10 @@
 			nbd->disk->disk_name, req);
 	if (nbd_cmd(req) == NBD_CMD_READ) {
 		struct req_iterator iter;
-		struct bio_vec *bvec;
+		struct bio_vec bvec;
 
 		rq_for_each_segment(bvec, req, iter) {
-			result = sock_recv_bvec(nbd, bvec);
+			result = sock_recv_bvec(nbd, &bvec);
 			if (result <= 0) {
 				dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
 					result);
@@ -389,7 +389,7 @@
 				return req;
 			}
 			dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
-				nbd->disk->disk_name, req, bvec->bv_len);
+				nbd->disk->disk_name, req, bvec.bv_len);
 		}
 	}
 	return req;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 83a598e..091b9ea 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -60,7 +60,9 @@
 	NULL_IRQ_NONE		= 0,
 	NULL_IRQ_SOFTIRQ	= 1,
 	NULL_IRQ_TIMER		= 2,
+};
 
+enum {
 	NULL_Q_BIO		= 0,
 	NULL_Q_RQ		= 1,
 	NULL_Q_MQ		= 2,
@@ -172,18 +174,20 @@
 
 static void end_cmd(struct nullb_cmd *cmd)
 {
-	if (cmd->rq) {
-		if (queue_mode == NULL_Q_MQ)
-			blk_mq_end_io(cmd->rq, 0);
-		else {
-			INIT_LIST_HEAD(&cmd->rq->queuelist);
-			blk_end_request_all(cmd->rq, 0);
-		}
-	} else if (cmd->bio)
+	switch (queue_mode)  {
+	case NULL_Q_MQ:
+		blk_mq_end_io(cmd->rq, 0);
+		return;
+	case NULL_Q_RQ:
+		INIT_LIST_HEAD(&cmd->rq->queuelist);
+		blk_end_request_all(cmd->rq, 0);
+		break;
+	case NULL_Q_BIO:
 		bio_endio(cmd->bio, 0);
+		break;
+	}
 
-	if (queue_mode != NULL_Q_MQ)
-		free_cmd(cmd);
+	free_cmd(cmd);
 }
 
 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -195,6 +199,7 @@
 	cq = &per_cpu(completion_queues, smp_processor_id());
 
 	while ((entry = llist_del_all(&cq->list)) != NULL) {
+		entry = llist_reverse_order(entry);
 		do {
 			cmd = container_of(entry, struct nullb_cmd, ll_list);
 			end_cmd(cmd);
@@ -221,62 +226,32 @@
 
 static void null_softirq_done_fn(struct request *rq)
 {
-	blk_end_request_all(rq, 0);
+	end_cmd(rq->special);
 }
 
-#ifdef CONFIG_SMP
-
-static void null_ipi_cmd_end_io(void *data)
-{
-	struct completion_queue *cq;
-	struct llist_node *entry, *next;
-	struct nullb_cmd *cmd;
-
-	cq = &per_cpu(completion_queues, smp_processor_id());
-
-	entry = llist_del_all(&cq->list);
-
-	while (entry) {
-		next = entry->next;
-		cmd = llist_entry(entry, struct nullb_cmd, ll_list);
-		end_cmd(cmd);
-		entry = next;
-	}
-}
-
-static void null_cmd_end_ipi(struct nullb_cmd *cmd)
-{
-	struct call_single_data *data = &cmd->csd;
-	int cpu = get_cpu();
-	struct completion_queue *cq = &per_cpu(completion_queues, cpu);
-
-	cmd->ll_list.next = NULL;
-
-	if (llist_add(&cmd->ll_list, &cq->list)) {
-		data->func = null_ipi_cmd_end_io;
-		data->flags = 0;
-		__smp_call_function_single(cpu, data, 0);
-	}
-
-	put_cpu();
-}
-
-#endif /* CONFIG_SMP */
-
 static inline void null_handle_cmd(struct nullb_cmd *cmd)
 {
 	/* Complete IO by inline, softirq or timer */
 	switch (irqmode) {
+	case NULL_IRQ_SOFTIRQ:
+		switch (queue_mode)  {
+		case NULL_Q_MQ:
+			blk_mq_complete_request(cmd->rq);
+			break;
+		case NULL_Q_RQ:
+			blk_complete_request(cmd->rq);
+			break;
+		case NULL_Q_BIO:
+			/*
+			 * XXX: no proper submitting cpu information available.
+			 */
+			end_cmd(cmd);
+			break;
+		}
+		break;
 	case NULL_IRQ_NONE:
 		end_cmd(cmd);
 		break;
-	case NULL_IRQ_SOFTIRQ:
-#ifdef CONFIG_SMP
-		null_cmd_end_ipi(cmd);
-#else
-		end_cmd(cmd);
-#endif
-		break;
 	case NULL_IRQ_TIMER:
 		null_cmd_end_timer(cmd);
 		break;
@@ -411,6 +386,7 @@
 	.queue_rq       = null_queue_rq,
 	.map_queue      = blk_mq_map_queue,
 	.init_hctx	= null_init_hctx,
+	.complete	= null_softirq_done_fn,
 };
 
 static struct blk_mq_reg null_mq_reg = {
@@ -609,13 +585,11 @@
 {
 	unsigned int i;
 
-#if !defined(CONFIG_SMP)
-	if (irqmode == NULL_IRQ_SOFTIRQ) {
-		pr_warn("null_blk: softirq completions not available.\n");
-		pr_warn("null_blk: using direct completions.\n");
-		irqmode = NULL_IRQ_NONE;
+	if (bs > PAGE_SIZE) {
+		pr_warn("null_blk: invalid block size\n");
+		pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
+		bs = PAGE_SIZE;
 	}
-#endif
 
 	if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
 		if (submit_queues < nr_online_nodes) {
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 26d03fa..51824d1 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -46,7 +46,6 @@
 #define NVME_Q_DEPTH 1024
 #define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
-#define NVME_MINORS 64
 #define ADMIN_TIMEOUT	(60 * HZ)
 
 static int nvme_major;
@@ -58,6 +57,17 @@
 static DEFINE_SPINLOCK(dev_list_lock);
 static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
+static struct workqueue_struct *nvme_workq;
+
+static void nvme_reset_failed_dev(struct work_struct *ws);
+
+struct async_cmd_info {
+	struct kthread_work work;
+	struct kthread_worker *worker;
+	u32 result;
+	int status;
+	void *ctx;
+};
 
 /*
  * An NVM Express queue.  Each device has at least two (one for admin
@@ -66,6 +76,7 @@
 struct nvme_queue {
 	struct device *q_dmadev;
 	struct nvme_dev *dev;
+	char irqname[24];	/* nvme4294967295-65535\0 */
 	spinlock_t q_lock;
 	struct nvme_command *sq_cmds;
 	volatile struct nvme_completion *cqes;
@@ -80,9 +91,11 @@
 	u16 sq_head;
 	u16 sq_tail;
 	u16 cq_head;
+	u16 qid;
 	u8 cq_phase;
 	u8 cqe_seen;
 	u8 q_suspended;
+	struct async_cmd_info cmdinfo;
 	unsigned long cmdid_data[];
 };
 
@@ -97,6 +110,7 @@
 	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
+	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
 	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@ -111,6 +125,7 @@
 	nvme_completion_fn fn;
 	void *ctx;
 	unsigned long timeout;
+	int aborted;
 };
 
 static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
@@ -154,6 +169,7 @@
 	info[cmdid].fn = handler;
 	info[cmdid].ctx = ctx;
 	info[cmdid].timeout = jiffies + timeout;
+	info[cmdid].aborted = 0;
 	return cmdid;
 }
 
@@ -172,6 +188,7 @@
 #define CMD_CTX_COMPLETED	(0x310 + CMD_CTX_BASE)
 #define CMD_CTX_INVALID		(0x314 + CMD_CTX_BASE)
 #define CMD_CTX_FLUSH		(0x318 + CMD_CTX_BASE)
+#define CMD_CTX_ABORT		(0x31C + CMD_CTX_BASE)
 
 static void special_completion(struct nvme_dev *dev, void *ctx,
 						struct nvme_completion *cqe)
@@ -180,6 +197,10 @@
 		return;
 	if (ctx == CMD_CTX_FLUSH)
 		return;
+	if (ctx == CMD_CTX_ABORT) {
+		++dev->abort_limit;
+		return;
+	}
 	if (ctx == CMD_CTX_COMPLETED) {
 		dev_warn(&dev->pci_dev->dev,
 				"completed id %d twice on queue %d\n",
@@ -196,6 +217,15 @@
 	dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
 }
 
+static void async_completion(struct nvme_dev *dev, void *ctx,
+						struct nvme_completion *cqe)
+{
+	struct async_cmd_info *cmdinfo = ctx;
+	cmdinfo->result = le32_to_cpup(&cqe->result);
+	cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+	queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
+}
+
 /*
  * Called with local interrupts disabled and the q_lock held.  May not sleep.
  */
@@ -441,104 +471,19 @@
 	return total_len;
 }
 
-struct nvme_bio_pair {
-	struct bio b1, b2, *parent;
-	struct bio_vec *bv1, *bv2;
-	int err;
-	atomic_t cnt;
-};
-
-static void nvme_bio_pair_endio(struct bio *bio, int err)
-{
-	struct nvme_bio_pair *bp = bio->bi_private;
-
-	if (err)
-		bp->err = err;
-
-	if (atomic_dec_and_test(&bp->cnt)) {
-		bio_endio(bp->parent, bp->err);
-		kfree(bp->bv1);
-		kfree(bp->bv2);
-		kfree(bp);
-	}
-}
-
-static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
-							int len, int offset)
-{
-	struct nvme_bio_pair *bp;
-
-	BUG_ON(len > bio->bi_size);
-	BUG_ON(idx > bio->bi_vcnt);
-
-	bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
-	if (!bp)
-		return NULL;
-	bp->err = 0;
-
-	bp->b1 = *bio;
-	bp->b2 = *bio;
-
-	bp->b1.bi_size = len;
-	bp->b2.bi_size -= len;
-	bp->b1.bi_vcnt = idx;
-	bp->b2.bi_idx = idx;
-	bp->b2.bi_sector += len >> 9;
-
-	if (offset) {
-		bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
-								GFP_ATOMIC);
-		if (!bp->bv1)
-			goto split_fail_1;
-
-		bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
-								GFP_ATOMIC);
-		if (!bp->bv2)
-			goto split_fail_2;
-
-		memcpy(bp->bv1, bio->bi_io_vec,
-			bio->bi_max_vecs * sizeof(struct bio_vec));
-		memcpy(bp->bv2, bio->bi_io_vec,
-			bio->bi_max_vecs * sizeof(struct bio_vec));
-
-		bp->b1.bi_io_vec = bp->bv1;
-		bp->b2.bi_io_vec = bp->bv2;
-		bp->b2.bi_io_vec[idx].bv_offset += offset;
-		bp->b2.bi_io_vec[idx].bv_len -= offset;
-		bp->b1.bi_io_vec[idx].bv_len = offset;
-		bp->b1.bi_vcnt++;
-	} else
-		bp->bv1 = bp->bv2 = NULL;
-
-	bp->b1.bi_private = bp;
-	bp->b2.bi_private = bp;
-
-	bp->b1.bi_end_io = nvme_bio_pair_endio;
-	bp->b2.bi_end_io = nvme_bio_pair_endio;
-
-	bp->parent = bio;
-	atomic_set(&bp->cnt, 2);
-
-	return bp;
-
- split_fail_2:
-	kfree(bp->bv1);
- split_fail_1:
-	kfree(bp);
-	return NULL;
-}
-
 static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
-						int idx, int len, int offset)
+				 int len)
 {
-	struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
-	if (!bp)
+	struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
+	if (!split)
 		return -ENOMEM;
 
+	bio_chain(split, bio);
+
 	if (bio_list_empty(&nvmeq->sq_cong))
 		add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
-	bio_list_add(&nvmeq->sq_cong, &bp->b1);
-	bio_list_add(&nvmeq->sq_cong, &bp->b2);
+	bio_list_add(&nvmeq->sq_cong, split);
+	bio_list_add(&nvmeq->sq_cong, bio);
 
 	return 0;
 }
@@ -550,41 +495,44 @@
 static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
 		struct bio *bio, enum dma_data_direction dma_dir, int psegs)
 {
-	struct bio_vec *bvec, *bvprv = NULL;
+	struct bio_vec bvec, bvprv;
+	struct bvec_iter iter;
 	struct scatterlist *sg = NULL;
-	int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+	int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
+	int first = 1;
 
 	if (nvmeq->dev->stripe_size)
 		split_len = nvmeq->dev->stripe_size -
-			((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
+			((bio->bi_iter.bi_sector << 9) &
+			 (nvmeq->dev->stripe_size - 1));
 
 	sg_init_table(iod->sg, psegs);
-	bio_for_each_segment(bvec, bio, i) {
-		if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
-			sg->length += bvec->bv_len;
+	bio_for_each_segment(bvec, bio, iter) {
+		if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
+			sg->length += bvec.bv_len;
 		} else {
-			if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
-				return nvme_split_and_submit(bio, nvmeq, i,
-								length, 0);
+			if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
+				return nvme_split_and_submit(bio, nvmeq,
+							     length);
 
 			sg = sg ? sg + 1 : iod->sg;
-			sg_set_page(sg, bvec->bv_page, bvec->bv_len,
-							bvec->bv_offset);
+			sg_set_page(sg, bvec.bv_page,
+				    bvec.bv_len, bvec.bv_offset);
 			nsegs++;
 		}
 
-		if (split_len - length < bvec->bv_len)
-			return nvme_split_and_submit(bio, nvmeq, i, split_len,
-							split_len - length);
-		length += bvec->bv_len;
+		if (split_len - length < bvec.bv_len)
+			return nvme_split_and_submit(bio, nvmeq, split_len);
+		length += bvec.bv_len;
 		bvprv = bvec;
+		first = 0;
 	}
 	iod->nents = nsegs;
 	sg_mark_end(sg);
 	if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
 		return -ENOMEM;
 
-	BUG_ON(length != bio->bi_size);
+	BUG_ON(length != bio->bi_iter.bi_size);
 	return length;
 }
 
@@ -608,8 +556,8 @@
 	iod->npages = 0;
 
 	range->cattr = cpu_to_le32(0);
-	range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
-	range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+	range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
+	range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
 
 	memset(cmnd, 0, sizeof(*cmnd));
 	cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +622,7 @@
 	}
 
 	result = -ENOMEM;
-	iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+	iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
 	if (!iod)
 		goto nomem;
 	iod->private = bio;
@@ -723,7 +671,7 @@
 	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
 	length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
 								GFP_ATOMIC);
-	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
 	cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
 	cmnd->rw.control = cpu_to_le16(control);
 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
@@ -775,7 +723,7 @@
 	if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
 		return 0;
 
-	writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+	writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
 	nvmeq->cq_head = head;
 	nvmeq->cq_phase = phase;
 
@@ -886,12 +834,34 @@
 	return cmdinfo.status;
 }
 
+static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
+			struct nvme_command *cmd,
+			struct async_cmd_info *cmdinfo, unsigned timeout)
+{
+	int cmdid;
+
+	cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
+	if (cmdid < 0)
+		return cmdid;
+	cmdinfo->status = -EINTR;
+	cmd->common.command_id = cmdid;
+	nvme_submit_cmd(nvmeq, cmd);
+	return 0;
+}
+
 int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
 								u32 *result)
 {
 	return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
 }
 
+static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
+		struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
+{
+	return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+								ADMIN_TIMEOUT);
+}
+
 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
 {
 	int status;
@@ -1002,6 +972,56 @@
 }
 
 /**
+ * nvme_abort_cmd - Attempt aborting a command
+ * @cmdid: Command id of a timed out IO
+ * @queue: The queue with timed out IO
+ *
+ * Schedule controller reset if the command was already aborted once before and
+ * still hasn't been returned to the driver, or if this is the admin queue.
+ */
+static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
+{
+	int a_cmdid;
+	struct nvme_command cmd;
+	struct nvme_dev *dev = nvmeq->dev;
+	struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+	if (!nvmeq->qid || info[cmdid].aborted) {
+		if (work_busy(&dev->reset_work))
+			return;
+		list_del_init(&dev->node);
+		dev_warn(&dev->pci_dev->dev,
+			"I/O %d QID %d timeout, reset controller\n", cmdid,
+								nvmeq->qid);
+		PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
+		queue_work(nvme_workq, &dev->reset_work);
+		return;
+	}
+
+	if (!dev->abort_limit)
+		return;
+
+	a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+								ADMIN_TIMEOUT);
+	if (a_cmdid < 0)
+		return;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.abort.opcode = nvme_admin_abort_cmd;
+	cmd.abort.cid = cmdid;
+	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
+	cmd.abort.command_id = a_cmdid;
+
+	--dev->abort_limit;
+	info[cmdid].aborted = 1;
+	info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;
+
+	dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
+							nvmeq->qid);
+	nvme_submit_cmd(dev->queues[0], &cmd);
+}
+
+/**
  * nvme_cancel_ios - Cancel outstanding I/Os
  * @queue: The queue to cancel I/Os on
  * @timeout: True to only cancel I/Os which have timed out
@@ -1024,7 +1044,12 @@
 			continue;
 		if (info[cmdid].ctx == CMD_CTX_CANCELLED)
 			continue;
-		dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
+		if (timeout && nvmeq->dev->initialized) {
+			nvme_abort_cmd(cmdid, nvmeq);
+			continue;
+		}
+		dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
+								nvmeq->qid);
 		ctx = cancel_cmdid(nvmeq, cmdid, &fn);
 		fn(nvmeq->dev, ctx, &cqe);
 	}
@@ -1046,26 +1071,31 @@
 	kfree(nvmeq);
 }
 
-static void nvme_free_queues(struct nvme_dev *dev)
+static void nvme_free_queues(struct nvme_dev *dev, int lowest)
 {
 	int i;
 
-	for (i = dev->queue_count - 1; i >= 0; i--) {
+	for (i = dev->queue_count - 1; i >= lowest; i--) {
 		nvme_free_queue(dev->queues[i]);
 		dev->queue_count--;
 		dev->queues[i] = NULL;
 	}
 }
 
-static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+/**
+ * nvme_suspend_queue - put queue into suspended state
+ * @nvmeq - queue to suspend
+ *
+ * Returns 1 if already suspended, 0 otherwise.
+ */
+static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 {
-	struct nvme_queue *nvmeq = dev->queues[qid];
-	int vector = dev->entry[nvmeq->cq_vector].vector;
+	int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
 
 	spin_lock_irq(&nvmeq->q_lock);
 	if (nvmeq->q_suspended) {
 		spin_unlock_irq(&nvmeq->q_lock);
-		return;
+		return 1;
 	}
 	nvmeq->q_suspended = 1;
 	spin_unlock_irq(&nvmeq->q_lock);
@@ -1073,18 +1103,35 @@
 	irq_set_affinity_hint(vector, NULL);
 	free_irq(vector, nvmeq);
 
-	/* Don't tell the adapter to delete the admin queue */
-	if (qid) {
-		adapter_delete_sq(dev, qid);
-		adapter_delete_cq(dev, qid);
-	}
+	return 0;
+}
 
+static void nvme_clear_queue(struct nvme_queue *nvmeq)
+{
 	spin_lock_irq(&nvmeq->q_lock);
 	nvme_process_cq(nvmeq);
 	nvme_cancel_ios(nvmeq, false);
 	spin_unlock_irq(&nvmeq->q_lock);
 }
 
+static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+{
+	struct nvme_queue *nvmeq = dev->queues[qid];
+
+	if (!nvmeq)
+		return;
+	if (nvme_suspend_queue(nvmeq))
+		return;
+
+	/* Don't tell the adapter to delete the admin queue.
+	 * Don't tell a removed adapter to delete IO queues. */
+	if (qid && readl(&dev->bar->csts) != -1) {
+		adapter_delete_sq(dev, qid);
+		adapter_delete_cq(dev, qid);
+	}
+	nvme_clear_queue(nvmeq);
+}
+
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
 							int depth, int vector)
 {
@@ -1107,15 +1154,18 @@
 
 	nvmeq->q_dmadev = dmadev;
 	nvmeq->dev = dev;
+	snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
+			dev->instance, qid);
 	spin_lock_init(&nvmeq->q_lock);
 	nvmeq->cq_head = 0;
 	nvmeq->cq_phase = 1;
 	init_waitqueue_head(&nvmeq->sq_full);
 	init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
 	bio_list_init(&nvmeq->sq_cong);
-	nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
 	nvmeq->q_depth = depth;
 	nvmeq->cq_vector = vector;
+	nvmeq->qid = qid;
 	nvmeq->q_suspended = 1;
 	dev->queue_count++;
 
@@ -1134,11 +1184,10 @@
 {
 	if (use_threaded_interrupts)
 		return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
-					nvme_irq_check, nvme_irq,
-					IRQF_DISABLED | IRQF_SHARED,
+					nvme_irq_check, nvme_irq, IRQF_SHARED,
 					name, nvmeq);
 	return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
-				IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+				IRQF_SHARED, name, nvmeq);
 }
 
 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1149,7 +1198,7 @@
 	nvmeq->sq_tail = 0;
 	nvmeq->cq_head = 0;
 	nvmeq->cq_phase = 1;
-	nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
 	memset(nvmeq->cmdid_data, 0, extra);
 	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
 	nvme_cancel_ios(nvmeq, false);
@@ -1169,13 +1218,13 @@
 	if (result < 0)
 		goto release_cq;
 
-	result = queue_request_irq(dev, nvmeq, "nvme");
+	result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
 	if (result < 0)
 		goto release_sq;
 
-	spin_lock(&nvmeq->q_lock);
+	spin_lock_irq(&nvmeq->q_lock);
 	nvme_init_queue(nvmeq, qid);
-	spin_unlock(&nvmeq->q_lock);
+	spin_unlock_irq(&nvmeq->q_lock);
 
 	return result;
 
@@ -1287,13 +1336,13 @@
 	if (result)
 		return result;
 
-	result = queue_request_irq(dev, nvmeq, "nvme admin");
+	result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
 	if (result)
 		return result;
 
-	spin_lock(&nvmeq->q_lock);
+	spin_lock_irq(&nvmeq->q_lock);
 	nvme_init_queue(nvmeq, 0);
-	spin_unlock(&nvmeq->q_lock);
+	spin_unlock_irq(&nvmeq->q_lock);
 	return result;
 }
 
@@ -1569,10 +1618,47 @@
 	}
 }
 
+#ifdef CONFIG_COMPAT
+static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
+					unsigned int cmd, unsigned long arg)
+{
+	struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+	switch (cmd) {
+	case SG_IO:
+		return nvme_sg_io32(ns, arg);
+	}
+	return nvme_ioctl(bdev, mode, cmd, arg);
+}
+#else
+#define nvme_compat_ioctl	NULL
+#endif
+
+static int nvme_open(struct block_device *bdev, fmode_t mode)
+{
+	struct nvme_ns *ns = bdev->bd_disk->private_data;
+	struct nvme_dev *dev = ns->dev;
+
+	kref_get(&dev->kref);
+	return 0;
+}
+
+static void nvme_free_dev(struct kref *kref);
+
+static void nvme_release(struct gendisk *disk, fmode_t mode)
+{
+	struct nvme_ns *ns = disk->private_data;
+	struct nvme_dev *dev = ns->dev;
+
+	kref_put(&dev->kref, nvme_free_dev);
+}
+
 static const struct block_device_operations nvme_fops = {
 	.owner		= THIS_MODULE,
 	.ioctl		= nvme_ioctl,
-	.compat_ioctl	= nvme_ioctl,
+	.compat_ioctl	= nvme_compat_ioctl,
+	.open		= nvme_open,
+	.release	= nvme_release,
 };
 
 static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
@@ -1596,13 +1682,25 @@
 
 static int nvme_kthread(void *data)
 {
-	struct nvme_dev *dev;
+	struct nvme_dev *dev, *next;
 
 	while (!kthread_should_stop()) {
 		set_current_state(TASK_INTERRUPTIBLE);
 		spin_lock(&dev_list_lock);
-		list_for_each_entry(dev, &dev_list, node) {
+		list_for_each_entry_safe(dev, next, &dev_list, node) {
 			int i;
+			if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
+							dev->initialized) {
+				if (work_busy(&dev->reset_work))
+					continue;
+				list_del_init(&dev->node);
+				dev_warn(&dev->pci_dev->dev,
+					"Failed status, reset controller\n");
+				PREPARE_WORK(&dev->reset_work,
+							nvme_reset_failed_dev);
+				queue_work(nvme_workq, &dev->reset_work);
+				continue;
+			}
 			for (i = 0; i < dev->queue_count; i++) {
 				struct nvme_queue *nvmeq = dev->queues[i];
 				if (!nvmeq)
@@ -1623,33 +1721,6 @@
 	return 0;
 }
 
-static DEFINE_IDA(nvme_index_ida);
-
-static int nvme_get_ns_idx(void)
-{
-	int index, error;
-
-	do {
-		if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
-			return -1;
-
-		spin_lock(&dev_list_lock);
-		error = ida_get_new(&nvme_index_ida, &index);
-		spin_unlock(&dev_list_lock);
-	} while (error == -EAGAIN);
-
-	if (error)
-		index = -1;
-	return index;
-}
-
-static void nvme_put_ns_idx(int index)
-{
-	spin_lock(&dev_list_lock);
-	ida_remove(&nvme_index_ida, index);
-	spin_unlock(&dev_list_lock);
-}
-
 static void nvme_config_discard(struct nvme_ns *ns)
 {
 	u32 logical_block_size = queue_logical_block_size(ns->queue);
@@ -1683,7 +1754,7 @@
 	ns->dev = dev;
 	ns->queue->queuedata = ns;
 
-	disk = alloc_disk(NVME_MINORS);
+	disk = alloc_disk(0);
 	if (!disk)
 		goto out_free_queue;
 	ns->ns_id = nsid;
@@ -1696,12 +1767,12 @@
 		blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
 
 	disk->major = nvme_major;
-	disk->minors = NVME_MINORS;
-	disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+	disk->first_minor = 0;
 	disk->fops = &nvme_fops;
 	disk->private_data = ns;
 	disk->queue = ns->queue;
 	disk->driverfs_dev = &dev->pci_dev->dev;
+	disk->flags = GENHD_FL_EXT_DEVT;
 	sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
 	set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
 
@@ -1717,15 +1788,6 @@
 	return NULL;
 }
 
-static void nvme_ns_free(struct nvme_ns *ns)
-{
-	int index = ns->disk->first_minor / NVME_MINORS;
-	put_disk(ns->disk);
-	nvme_put_ns_idx(index);
-	blk_cleanup_queue(ns->queue);
-	kfree(ns);
-}
-
 static int set_queue_count(struct nvme_dev *dev, int count)
 {
 	int status;
@@ -1741,11 +1803,12 @@
 
 static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
 {
-	return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+	return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
 }
 
 static int nvme_setup_io_queues(struct nvme_dev *dev)
 {
+	struct nvme_queue *adminq = dev->queues[0];
 	struct pci_dev *pdev = dev->pci_dev;
 	int result, cpu, i, vecs, nr_io_queues, size, q_depth;
 
@@ -1772,7 +1835,7 @@
 	}
 
 	/* Deregister the admin queue's interrupt */
-	free_irq(dev->entry[0].vector, dev->queues[0]);
+	free_irq(dev->entry[0].vector, adminq);
 
 	vecs = nr_io_queues;
 	for (i = 0; i < vecs; i++)
@@ -1810,9 +1873,9 @@
 	 */
 	nr_io_queues = vecs;
 
-	result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+	result = queue_request_irq(dev, adminq, adminq->irqname);
 	if (result) {
-		dev->queues[0]->q_suspended = 1;
+		adminq->q_suspended = 1;
 		goto free_queues;
 	}
 
@@ -1821,9 +1884,9 @@
 	for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
 		struct nvme_queue *nvmeq = dev->queues[i];
 
-		spin_lock(&nvmeq->q_lock);
+		spin_lock_irq(&nvmeq->q_lock);
 		nvme_cancel_ios(nvmeq, false);
-		spin_unlock(&nvmeq->q_lock);
+		spin_unlock_irq(&nvmeq->q_lock);
 
 		nvme_free_queue(nvmeq);
 		dev->queue_count--;
@@ -1864,7 +1927,7 @@
 	return 0;
 
  free_queues:
-	nvme_free_queues(dev);
+	nvme_free_queues(dev, 1);
 	return result;
 }
 
@@ -1876,6 +1939,7 @@
  */
 static int nvme_dev_add(struct nvme_dev *dev)
 {
+	struct pci_dev *pdev = dev->pci_dev;
 	int res;
 	unsigned nn, i;
 	struct nvme_ns *ns;
@@ -1885,8 +1949,7 @@
 	dma_addr_t dma_addr;
 	int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
 
-	mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
-								GFP_KERNEL);
+	mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
 	if (!mem)
 		return -ENOMEM;
 
@@ -1899,13 +1962,14 @@
 	ctrl = mem;
 	nn = le32_to_cpup(&ctrl->nn);
 	dev->oncs = le16_to_cpup(&ctrl->oncs);
+	dev->abort_limit = ctrl->acl + 1;
 	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
 	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
 	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
 	if (ctrl->mdts)
 		dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
-	if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
-			(dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+	if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+			(pdev->device == 0x0953) && ctrl->vs[3])
 		dev->stripe_size = 1 << (ctrl->vs[3] + shift);
 
 	id_ns = mem;
@@ -1953,16 +2017,21 @@
 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
 		goto disable;
 
-	pci_set_drvdata(pdev, dev);
 	dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
 	if (!dev->bar)
 		goto disable;
-
-	dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
+	if (readl(&dev->bar->csts) == -1) {
+		result = -ENODEV;
+		goto unmap;
+	}
+	dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
 	dev->dbs = ((void __iomem *)dev->bar) + 4096;
 
 	return 0;
 
+ unmap:
+	iounmap(dev->bar);
+	dev->bar = NULL;
  disable:
 	pci_release_regions(pdev);
  disable_pci:
@@ -1980,37 +2049,183 @@
 	if (dev->bar) {
 		iounmap(dev->bar);
 		dev->bar = NULL;
+		pci_release_regions(dev->pci_dev);
 	}
 
-	pci_release_regions(dev->pci_dev);
 	if (pci_is_enabled(dev->pci_dev))
 		pci_disable_device(dev->pci_dev);
 }
 
+struct nvme_delq_ctx {
+	struct task_struct *waiter;
+	struct kthread_worker *worker;
+	atomic_t refcount;
+};
+
+static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
+{
+	dq->waiter = current;
+	mb();
+
+	for (;;) {
+		set_current_state(TASK_KILLABLE);
+		if (!atomic_read(&dq->refcount))
+			break;
+		if (!schedule_timeout(ADMIN_TIMEOUT) ||
+					fatal_signal_pending(current)) {
+			set_current_state(TASK_RUNNING);
+
+			nvme_disable_ctrl(dev, readq(&dev->bar->cap));
+			nvme_disable_queue(dev, 0);
+
+			send_sig(SIGKILL, dq->worker->task, 1);
+			flush_kthread_worker(dq->worker);
+			return;
+		}
+	}
+	set_current_state(TASK_RUNNING);
+}
+
+static void nvme_put_dq(struct nvme_delq_ctx *dq)
+{
+	atomic_dec(&dq->refcount);
+	if (dq->waiter)
+		wake_up_process(dq->waiter);
+}
+
+static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
+{
+	atomic_inc(&dq->refcount);
+	return dq;
+}
+
+static void nvme_del_queue_end(struct nvme_queue *nvmeq)
+{
+	struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
+
+	nvme_clear_queue(nvmeq);
+	nvme_put_dq(dq);
+}
+
+static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
+						kthread_work_func_t fn)
+{
+	struct nvme_command c;
+
+	memset(&c, 0, sizeof(c));
+	c.delete_queue.opcode = opcode;
+	c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
+
+	init_kthread_work(&nvmeq->cmdinfo.work, fn);
+	return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
+}
+
+static void nvme_del_cq_work_handler(struct kthread_work *work)
+{
+	struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+							cmdinfo.work);
+	nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_cq(struct nvme_queue *nvmeq)
+{
+	return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
+						nvme_del_cq_work_handler);
+}
+
+static void nvme_del_sq_work_handler(struct kthread_work *work)
+{
+	struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+							cmdinfo.work);
+	int status = nvmeq->cmdinfo.status;
+
+	if (!status)
+		status = nvme_delete_cq(nvmeq);
+	if (status)
+		nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_sq(struct nvme_queue *nvmeq)
+{
+	return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
+						nvme_del_sq_work_handler);
+}
+
+static void nvme_del_queue_start(struct kthread_work *work)
+{
+	struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+							cmdinfo.work);
+	allow_signal(SIGKILL);
+	if (nvme_delete_sq(nvmeq))
+		nvme_del_queue_end(nvmeq);
+}
+
+static void nvme_disable_io_queues(struct nvme_dev *dev)
+{
+	int i;
+	DEFINE_KTHREAD_WORKER_ONSTACK(worker);
+	struct nvme_delq_ctx dq;
+	struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
+					&worker, "nvme%d", dev->instance);
+
+	if (IS_ERR(kworker_task)) {
+		dev_err(&dev->pci_dev->dev,
+			"Failed to create queue del task\n");
+		for (i = dev->queue_count - 1; i > 0; i--)
+			nvme_disable_queue(dev, i);
+		return;
+	}
+
+	dq.waiter = NULL;
+	atomic_set(&dq.refcount, 0);
+	dq.worker = &worker;
+	for (i = dev->queue_count - 1; i > 0; i--) {
+		struct nvme_queue *nvmeq = dev->queues[i];
+
+		if (nvme_suspend_queue(nvmeq))
+			continue;
+		nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
+		nvmeq->cmdinfo.worker = dq.worker;
+		init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
+		queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
+	}
+	nvme_wait_dq(&dq, dev);
+	kthread_stop(kworker_task);
+}
+
 static void nvme_dev_shutdown(struct nvme_dev *dev)
 {
 	int i;
 
-	for (i = dev->queue_count - 1; i >= 0; i--)
-		nvme_disable_queue(dev, i);
+	dev->initialized = 0;
 
 	spin_lock(&dev_list_lock);
 	list_del_init(&dev->node);
 	spin_unlock(&dev_list_lock);
 
-	if (dev->bar)
+	if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
+		for (i = dev->queue_count - 1; i >= 0; i--) {
+			struct nvme_queue *nvmeq = dev->queues[i];
+			nvme_suspend_queue(nvmeq);
+			nvme_clear_queue(nvmeq);
+		}
+	} else {
+		nvme_disable_io_queues(dev);
 		nvme_shutdown_ctrl(dev);
+		nvme_disable_queue(dev, 0);
+	}
 	nvme_dev_unmap(dev);
 }
 
 static void nvme_dev_remove(struct nvme_dev *dev)
 {
-	struct nvme_ns *ns, *next;
+	struct nvme_ns *ns;
 
-	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
-		list_del(&ns->list);
-		del_gendisk(ns->disk);
-		nvme_ns_free(ns);
+	list_for_each_entry(ns, &dev->namespaces, list) {
+		if (ns->disk->flags & GENHD_FL_UP)
+			del_gendisk(ns->disk);
+		if (!blk_queue_dying(ns->queue))
+			blk_cleanup_queue(ns->queue);
 	}
 }
 
@@ -2067,14 +2282,22 @@
 	spin_unlock(&dev_list_lock);
 }
 
+static void nvme_free_namespaces(struct nvme_dev *dev)
+{
+	struct nvme_ns *ns, *next;
+
+	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+		list_del(&ns->list);
+		put_disk(ns->disk);
+		kfree(ns);
+	}
+}
+
 static void nvme_free_dev(struct kref *kref)
 {
 	struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
-	nvme_dev_remove(dev);
-	nvme_dev_shutdown(dev);
-	nvme_free_queues(dev);
-	nvme_release_instance(dev);
-	nvme_release_prp_pools(dev);
+
+	nvme_free_namespaces(dev);
 	kfree(dev->queues);
 	kfree(dev->entry);
 	kfree(dev);
@@ -2138,6 +2361,7 @@
 	return result;
 
  disable:
+	nvme_disable_queue(dev, 0);
 	spin_lock(&dev_list_lock);
 	list_del_init(&dev->node);
 	spin_unlock(&dev_list_lock);
@@ -2146,6 +2370,71 @@
 	return result;
 }
 
+static int nvme_remove_dead_ctrl(void *arg)
+{
+	struct nvme_dev *dev = (struct nvme_dev *)arg;
+	struct pci_dev *pdev = dev->pci_dev;
+
+	if (pci_get_drvdata(pdev))
+		pci_stop_and_remove_bus_device(pdev);
+	kref_put(&dev->kref, nvme_free_dev);
+	return 0;
+}
+
+static void nvme_remove_disks(struct work_struct *ws)
+{
+	int i;
+	struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+
+	nvme_dev_remove(dev);
+	spin_lock(&dev_list_lock);
+	for (i = dev->queue_count - 1; i > 0; i--) {
+		BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
+		nvme_free_queue(dev->queues[i]);
+		dev->queue_count--;
+		dev->queues[i] = NULL;
+	}
+	spin_unlock(&dev_list_lock);
+}
+
+static int nvme_dev_resume(struct nvme_dev *dev)
+{
+	int ret;
+
+	ret = nvme_dev_start(dev);
+	if (ret && ret != -EBUSY)
+		return ret;
+	if (ret == -EBUSY) {
+		spin_lock(&dev_list_lock);
+		PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
+		queue_work(nvme_workq, &dev->reset_work);
+		spin_unlock(&dev_list_lock);
+	}
+	dev->initialized = 1;
+	return 0;
+}
+
+static void nvme_dev_reset(struct nvme_dev *dev)
+{
+	nvme_dev_shutdown(dev);
+	if (nvme_dev_resume(dev)) {
+		dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
+		kref_get(&dev->kref);
+		if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
+							dev->instance))) {
+			dev_err(&dev->pci_dev->dev,
+				"Failed to start controller remove task\n");
+			kref_put(&dev->kref, nvme_free_dev);
+		}
+	}
+}
+
+static void nvme_reset_failed_dev(struct work_struct *ws)
+{
+	struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+	nvme_dev_reset(dev);
+}
+
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	int result = -ENOMEM;
@@ -2164,8 +2453,9 @@
 		goto free;
 
 	INIT_LIST_HEAD(&dev->namespaces);
+	INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
 	dev->pci_dev = pdev;
-
+	pci_set_drvdata(pdev, dev);
 	result = nvme_set_instance(dev);
 	if (result)
 		goto free;
@@ -2181,6 +2471,7 @@
 		goto release_pools;
 	}
 
+	kref_init(&dev->kref);
 	result = nvme_dev_add(dev);
 	if (result)
 		goto shutdown;
@@ -2195,15 +2486,16 @@
 	if (result)
 		goto remove;
 
-	kref_init(&dev->kref);
+	dev->initialized = 1;
 	return 0;
 
  remove:
 	nvme_dev_remove(dev);
+	nvme_free_namespaces(dev);
  shutdown:
 	nvme_dev_shutdown(dev);
  release_pools:
-	nvme_free_queues(dev);
+	nvme_free_queues(dev, 0);
 	nvme_release_prp_pools(dev);
  release:
 	nvme_release_instance(dev);
@@ -2214,10 +2506,28 @@
 	return result;
 }
 
+static void nvme_shutdown(struct pci_dev *pdev)
+{
+	struct nvme_dev *dev = pci_get_drvdata(pdev);
+	nvme_dev_shutdown(dev);
+}
+
 static void nvme_remove(struct pci_dev *pdev)
 {
 	struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+	spin_lock(&dev_list_lock);
+	list_del_init(&dev->node);
+	spin_unlock(&dev_list_lock);
+
+	pci_set_drvdata(pdev, NULL);
+	flush_work(&dev->reset_work);
 	misc_deregister(&dev->miscdev);
+	nvme_dev_remove(dev);
+	nvme_dev_shutdown(dev);
+	nvme_free_queues(dev, 0);
+	nvme_release_instance(dev);
+	nvme_release_prp_pools(dev);
 	kref_put(&dev->kref, nvme_free_dev);
 }
 
@@ -2241,13 +2551,12 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct nvme_dev *ndev = pci_get_drvdata(pdev);
-	int ret;
 
-	ret = nvme_dev_start(ndev);
-	/* XXX: should remove gendisks if resume fails */
-	if (ret)
-		nvme_free_queues(ndev);
-	return ret;
+	if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
+		PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
+		queue_work(nvme_workq, &ndev->reset_work);
+	}
+	return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
@@ -2274,6 +2583,7 @@
 	.id_table	= nvme_id_table,
 	.probe		= nvme_probe,
 	.remove		= nvme_remove,
+	.shutdown	= nvme_shutdown,
 	.driver		= {
 		.pm	= &nvme_dev_pm_ops,
 	},
@@ -2288,9 +2598,14 @@
 	if (IS_ERR(nvme_thread))
 		return PTR_ERR(nvme_thread);
 
+	result = -ENOMEM;
+	nvme_workq = create_singlethread_workqueue("nvme");
+	if (!nvme_workq)
+		goto kill_kthread;
+
 	result = register_blkdev(nvme_major, "nvme");
 	if (result < 0)
-		goto kill_kthread;
+		goto kill_workq;
 	else if (result > 0)
 		nvme_major = result;
 
@@ -2301,6 +2616,8 @@
 
  unregister_blkdev:
 	unregister_blkdev(nvme_major, "nvme");
+ kill_workq:
+	destroy_workqueue(nvme_workq);
  kill_kthread:
 	kthread_stop(nvme_thread);
 	return result;
@@ -2310,6 +2627,7 @@
 {
 	pci_unregister_driver(&nvme_driver);
 	unregister_blkdev(nvme_major, "nvme");
+	destroy_workqueue(nvme_workq);
 	kthread_stop(nvme_thread);
 }
 
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a4ff4e..4a0ceb6 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -25,6 +25,7 @@
 #include <linux/bio.h>
 #include <linux/bitops.h>
 #include <linux/blkdev.h>
+#include <linux/compat.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
@@ -3038,6 +3039,152 @@
 	return retcode;
 }
 
+#ifdef CONFIG_COMPAT
+typedef struct sg_io_hdr32 {
+	compat_int_t interface_id;	/* [i] 'S' for SCSI generic (required) */
+	compat_int_t dxfer_direction;	/* [i] data transfer direction  */
+	unsigned char cmd_len;		/* [i] SCSI command length ( <= 16 bytes) */
+	unsigned char mx_sb_len;		/* [i] max length to write to sbp */
+	unsigned short iovec_count;	/* [i] 0 implies no scatter gather */
+	compat_uint_t dxfer_len;		/* [i] byte count of data transfer */
+	compat_uint_t dxferp;		/* [i], [*io] points to data transfer memory
+					      or scatter gather list */
+	compat_uptr_t cmdp;		/* [i], [*i] points to command to perform */
+	compat_uptr_t sbp;		/* [i], [*o] points to sense_buffer memory */
+	compat_uint_t timeout;		/* [i] MAX_UINT->no timeout (unit: millisec) */
+	compat_uint_t flags;		/* [i] 0 -> default, see SG_FLAG... */
+	compat_int_t pack_id;		/* [i->o] unused internally (normally) */
+	compat_uptr_t usr_ptr;		/* [i->o] unused internally */
+	unsigned char status;		/* [o] scsi status */
+	unsigned char masked_status;	/* [o] shifted, masked scsi status */
+	unsigned char msg_status;		/* [o] messaging level data (optional) */
+	unsigned char sb_len_wr;		/* [o] byte count actually written to sbp */
+	unsigned short host_status;	/* [o] errors from host adapter */
+	unsigned short driver_status;	/* [o] errors from software driver */
+	compat_int_t resid;		/* [o] dxfer_len - actual_transferred */
+	compat_uint_t duration;		/* [o] time taken by cmd (unit: millisec) */
+	compat_uint_t info;		/* [o] auxiliary information */
+} sg_io_hdr32_t;  /* 64 bytes long (on sparc32) */
+
+typedef struct sg_iovec32 {
+	compat_uint_t iov_base;
+	compat_uint_t iov_len;
+} sg_iovec32_t;
+
+static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
+{
+	sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
+	sg_iovec32_t __user *iov32 = dxferp;
+	int i;
+
+	for (i = 0; i < iovec_count; i++) {
+		u32 base, len;
+
+		if (get_user(base, &iov32[i].iov_base) ||
+		    get_user(len, &iov32[i].iov_len) ||
+		    put_user(compat_ptr(base), &iov[i].iov_base) ||
+		    put_user(len, &iov[i].iov_len))
+			return -EFAULT;
+	}
+
+	if (put_user(iov, &sgio->dxferp))
+		return -EFAULT;
+	return 0;
+}
+
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg)
+{
+	sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg;
+	sg_io_hdr_t __user *sgio;
+	u16 iovec_count;
+	u32 data;
+	void __user *dxferp;
+	int err;
+	int interface_id;
+
+	if (get_user(interface_id, &sgio32->interface_id))
+		return -EFAULT;
+	if (interface_id != 'S')
+		return -EINVAL;
+
+	if (get_user(iovec_count, &sgio32->iovec_count))
+		return -EFAULT;
+
+	{
+		void __user *top = compat_alloc_user_space(0);
+		void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
+				       (iovec_count * sizeof(sg_iovec_t)));
+		if (new > top)
+			return -EINVAL;
+
+		sgio = new;
+	}
+
+	/* Ok, now construct.  */
+	if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
+			 (2 * sizeof(int)) +
+			 (2 * sizeof(unsigned char)) +
+			 (1 * sizeof(unsigned short)) +
+			 (1 * sizeof(unsigned int))))
+		return -EFAULT;
+
+	if (get_user(data, &sgio32->dxferp))
+		return -EFAULT;
+	dxferp = compat_ptr(data);
+	if (iovec_count) {
+		if (sg_build_iovec(sgio, dxferp, iovec_count))
+			return -EFAULT;
+	} else {
+		if (put_user(dxferp, &sgio->dxferp))
+			return -EFAULT;
+	}
+
+	{
+		unsigned char __user *cmdp;
+		unsigned char __user *sbp;
+
+		if (get_user(data, &sgio32->cmdp))
+			return -EFAULT;
+		cmdp = compat_ptr(data);
+
+		if (get_user(data, &sgio32->sbp))
+			return -EFAULT;
+		sbp = compat_ptr(data);
+
+		if (put_user(cmdp, &sgio->cmdp) ||
+		    put_user(sbp, &sgio->sbp))
+			return -EFAULT;
+	}
+
+	if (copy_in_user(&sgio->timeout, &sgio32->timeout,
+			 3 * sizeof(int)))
+		return -EFAULT;
+
+	if (get_user(data, &sgio32->usr_ptr))
+		return -EFAULT;
+	if (put_user(compat_ptr(data), &sgio->usr_ptr))
+		return -EFAULT;
+
+	err = nvme_sg_io(ns, sgio);
+	if (err >= 0) {
+		void __user *datap;
+
+		if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
+				 sizeof(int)) ||
+		    get_user(datap, &sgio->usr_ptr) ||
+		    put_user((u32)(unsigned long)datap,
+			     &sgio32->usr_ptr) ||
+		    copy_in_user(&sgio32->status, &sgio->status,
+				 (4 * sizeof(unsigned char)) +
+				 (2 * sizeof(unsigned short)) +
+				 (3 * sizeof(int))))
+			err = -EFAULT;
+	}
+
+	return err;
+}
+#endif
+
 int nvme_sg_get_version_num(int __user *ip)
 {
 	return put_user(sg_version_num, ip);
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 4a27b1d..2ce3dfd 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -581,7 +581,7 @@
 
 	if (hdr.magic != PG_MAGIC)
 		return -EINVAL;
-	if (hdr.dlen > PG_MAX_DATA)
+	if (hdr.dlen < 0 || hdr.dlen > PG_MAX_DATA)
 		return -EINVAL;
 	if ((count - hs) > PG_MAX_DATA)
 		return -EINVAL;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ff8668c..a2af73d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -651,7 +651,7 @@
 
 	for (;;) {
 		tmp = rb_entry(n, struct pkt_rb_node, rb_node);
-		if (s <= tmp->bio->bi_sector)
+		if (s <= tmp->bio->bi_iter.bi_sector)
 			next = n->rb_left;
 		else
 			next = n->rb_right;
@@ -660,12 +660,12 @@
 		n = next;
 	}
 
-	if (s > tmp->bio->bi_sector) {
+	if (s > tmp->bio->bi_iter.bi_sector) {
 		tmp = pkt_rbtree_next(tmp);
 		if (!tmp)
 			return NULL;
 	}
-	BUG_ON(s > tmp->bio->bi_sector);
+	BUG_ON(s > tmp->bio->bi_iter.bi_sector);
 	return tmp;
 }
 
@@ -676,13 +676,13 @@
 {
 	struct rb_node **p = &pd->bio_queue.rb_node;
 	struct rb_node *parent = NULL;
-	sector_t s = node->bio->bi_sector;
+	sector_t s = node->bio->bi_iter.bi_sector;
 	struct pkt_rb_node *tmp;
 
 	while (*p) {
 		parent = *p;
 		tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
-		if (s < tmp->bio->bi_sector)
+		if (s < tmp->bio->bi_iter.bi_sector)
 			p = &(*p)->rb_left;
 		else
 			p = &(*p)->rb_right;
@@ -706,7 +706,9 @@
 			     WRITE : READ, __GFP_WAIT);
 
 	if (cgc->buflen) {
-		if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
+		ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
+				      __GFP_WAIT);
+		if (ret)
 			goto out;
 	}
 
@@ -857,7 +859,8 @@
 			spin_lock(&pd->iosched.lock);
 			bio = bio_list_peek(&pd->iosched.write_queue);
 			spin_unlock(&pd->iosched.lock);
-			if (bio && (bio->bi_sector == pd->iosched.last_write))
+			if (bio && (bio->bi_iter.bi_sector ==
+				    pd->iosched.last_write))
 				need_write_seek = 0;
 			if (need_write_seek && reads_queued) {
 				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +891,8 @@
 			continue;
 
 		if (bio_data_dir(bio) == READ)
-			pd->iosched.successive_reads += bio->bi_size >> 10;
+			pd->iosched.successive_reads +=
+				bio->bi_iter.bi_size >> 10;
 		else {
 			pd->iosched.successive_reads = 0;
 			pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +982,7 @@
 
 	pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
 		bio, (unsigned long long)pkt->sector,
-		(unsigned long long)bio->bi_sector, err);
+		(unsigned long long)bio->bi_iter.bi_sector, err);
 
 	if (err)
 		atomic_inc(&pkt->io_errors);
@@ -1026,8 +1030,9 @@
 	memset(written, 0, sizeof(written));
 	spin_lock(&pkt->lock);
 	bio_list_for_each(bio, &pkt->orig_bios) {
-		int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
-		int num_frames = bio->bi_size / CD_FRAMESIZE;
+		int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
+			(CD_FRAMESIZE >> 9);
+		int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
 		pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
 		BUG_ON(first_frame < 0);
 		BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1058,7 @@
 
 		bio = pkt->r_bios[f];
 		bio_reset(bio);
-		bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+		bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
 		bio->bi_bdev = pd->bdev;
 		bio->bi_end_io = pkt_end_io_read;
 		bio->bi_private = pkt;
@@ -1150,8 +1155,8 @@
 	bio_reset(pkt->bio);
 	pkt->bio->bi_bdev = pd->bdev;
 	pkt->bio->bi_rw = REQ_WRITE;
-	pkt->bio->bi_sector = new_sector;
-	pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+	pkt->bio->bi_iter.bi_sector = new_sector;
+	pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
 	pkt->bio->bi_vcnt = pkt->frames;
 
 	pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1218,7 @@
 	node = first_node;
 	while (node) {
 		bio = node->bio;
-		zone = get_zone(bio->bi_sector, pd);
+		zone = get_zone(bio->bi_iter.bi_sector, pd);
 		list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
 			if (p->sector == zone) {
 				bio = NULL;
@@ -1252,14 +1257,14 @@
 	pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
 	while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
 		bio = node->bio;
-		pkt_dbg(2, pd, "found zone=%llx\n",
-			(unsigned long long)get_zone(bio->bi_sector, pd));
-		if (get_zone(bio->bi_sector, pd) != zone)
+		pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
+			get_zone(bio->bi_iter.bi_sector, pd));
+		if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
 			break;
 		pkt_rbtree_erase(pd, node);
 		spin_lock(&pkt->lock);
 		bio_list_add(&pkt->orig_bios, bio);
-		pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+		pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
 		spin_unlock(&pkt->lock);
 	}
 	/* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1298,7 @@
 	struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
 
 	bio_reset(pkt->w_bio);
-	pkt->w_bio->bi_sector = pkt->sector;
+	pkt->w_bio->bi_iter.bi_sector = pkt->sector;
 	pkt->w_bio->bi_bdev = pd->bdev;
 	pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
 	pkt->w_bio->bi_private = pkt;
@@ -2335,75 +2340,29 @@
 	pkt_bio_finished(pd);
 }
 
-static void pkt_make_request(struct request_queue *q, struct bio *bio)
+static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
 {
-	struct pktcdvd_device *pd;
-	char b[BDEVNAME_SIZE];
+	struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+	struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
+
+	psd->pd = pd;
+	psd->bio = bio;
+	cloned_bio->bi_bdev = pd->bdev;
+	cloned_bio->bi_private = psd;
+	cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+	pd->stats.secs_r += bio_sectors(bio);
+	pkt_queue_bio(pd, cloned_bio);
+}
+
+static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+{
+	struct pktcdvd_device *pd = q->queuedata;
 	sector_t zone;
 	struct packet_data *pkt;
 	int was_empty, blocked_bio;
 	struct pkt_rb_node *node;
 
-	pd = q->queuedata;
-	if (!pd) {
-		pr_err("%s incorrect request queue\n",
-		       bdevname(bio->bi_bdev, b));
-		goto end_io;
-	}
-
-	/*
-	 * Clone READ bios so we can have our own bi_end_io callback.
-	 */
-	if (bio_data_dir(bio) == READ) {
-		struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
-		struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
-
-		psd->pd = pd;
-		psd->bio = bio;
-		cloned_bio->bi_bdev = pd->bdev;
-		cloned_bio->bi_private = psd;
-		cloned_bio->bi_end_io = pkt_end_io_read_cloned;
-		pd->stats.secs_r += bio_sectors(bio);
-		pkt_queue_bio(pd, cloned_bio);
-		return;
-	}
-
-	if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
-		pkt_notice(pd, "WRITE for ro device (%llu)\n",
-			   (unsigned long long)bio->bi_sector);
-		goto end_io;
-	}
-
-	if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
-		pkt_err(pd, "wrong bio size\n");
-		goto end_io;
-	}
-
-	blk_queue_bounce(q, &bio);
-
-	zone = get_zone(bio->bi_sector, pd);
-	pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
-		(unsigned long long)bio->bi_sector,
-		(unsigned long long)bio_end_sector(bio));
-
-	/* Check if we have to split the bio */
-	{
-		struct bio_pair *bp;
-		sector_t last_zone;
-		int first_sectors;
-
-		last_zone = get_zone(bio_end_sector(bio) - 1, pd);
-		if (last_zone != zone) {
-			BUG_ON(last_zone != zone + pd->settings.size);
-			first_sectors = last_zone - bio->bi_sector;
-			bp = bio_split(bio, first_sectors);
-			BUG_ON(!bp);
-			pkt_make_request(q, &bp->bio1);
-			pkt_make_request(q, &bp->bio2);
-			bio_pair_release(bp);
-			return;
-		}
-	}
+	zone = get_zone(bio->bi_iter.bi_sector, pd);
 
 	/*
 	 * If we find a matching packet in state WAITING or READ_WAIT, we can
@@ -2417,7 +2376,8 @@
 			if ((pkt->state == PACKET_WAITING_STATE) ||
 			    (pkt->state == PACKET_READ_WAIT_STATE)) {
 				bio_list_add(&pkt->orig_bios, bio);
-				pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+				pkt->write_size +=
+					bio->bi_iter.bi_size / CD_FRAMESIZE;
 				if ((pkt->write_size >= pkt->frames) &&
 				    (pkt->state == PACKET_WAITING_STATE)) {
 					atomic_inc(&pkt->run_sm);
@@ -2476,6 +2436,64 @@
 		 */
 		wake_up(&pd->wqueue);
 	}
+}
+
+static void pkt_make_request(struct request_queue *q, struct bio *bio)
+{
+	struct pktcdvd_device *pd;
+	char b[BDEVNAME_SIZE];
+	struct bio *split;
+
+	pd = q->queuedata;
+	if (!pd) {
+		pr_err("%s incorrect request queue\n",
+		       bdevname(bio->bi_bdev, b));
+		goto end_io;
+	}
+
+	pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
+		(unsigned long long)bio->bi_iter.bi_sector,
+		(unsigned long long)bio_end_sector(bio));
+
+	/*
+	 * Clone READ bios so we can have our own bi_end_io callback.
+	 */
+	if (bio_data_dir(bio) == READ) {
+		pkt_make_request_read(pd, bio);
+		return;
+	}
+
+	if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+		pkt_notice(pd, "WRITE for ro device (%llu)\n",
+			   (unsigned long long)bio->bi_iter.bi_sector);
+		goto end_io;
+	}
+
+	if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
+		pkt_err(pd, "wrong bio size\n");
+		goto end_io;
+	}
+
+	blk_queue_bounce(q, &bio);
+
+	do {
+		sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
+		sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
+
+		if (last_zone != zone) {
+			BUG_ON(last_zone != zone + pd->settings.size);
+
+			split = bio_split(bio, last_zone -
+					  bio->bi_iter.bi_sector,
+					  GFP_NOIO, fs_bio_set);
+			bio_chain(split, bio);
+		} else {
+			split = bio;
+		}
+
+		pkt_make_request_write(q, split);
+	} while (split != bio);
+
 	return;
 end_io:
 	bio_io_error(bio);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d754a88..c120d70 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -94,26 +94,25 @@
 {
 	unsigned int offset = 0;
 	struct req_iterator iter;
-	struct bio_vec *bvec;
+	struct bio_vec bvec;
 	unsigned int i = 0;
 	size_t size;
 	void *buf;
 
 	rq_for_each_segment(bvec, req, iter) {
 		unsigned long flags;
-		dev_dbg(&dev->sbd.core,
-			"%s:%u: bio %u: %u segs %u sectors from %lu\n",
-			__func__, __LINE__, i, bio_segments(iter.bio),
-			bio_sectors(iter.bio), iter.bio->bi_sector);
+		dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
+			__func__, __LINE__, i, bio_sectors(iter.bio),
+			iter.bio->bi_iter.bi_sector);
 
-		size = bvec->bv_len;
-		buf = bvec_kmap_irq(bvec, &flags);
+		size = bvec.bv_len;
+		buf = bvec_kmap_irq(&bvec, &flags);
 		if (gather)
 			memcpy(dev->bounce_buf+offset, buf, size);
 		else
 			memcpy(buf, dev->bounce_buf+offset, size);
 		offset += size;
-		flush_kernel_dcache_page(bvec->bv_page);
+		flush_kernel_dcache_page(bvec.bv_page);
 		bvec_kunmap_irq(buf, &flags);
 		i++;
 	}
@@ -130,7 +129,7 @@
 
 #ifdef DEBUG
 	unsigned int n = 0;
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	struct req_iterator iter;
 
 	rq_for_each_segment(bv, req, iter)
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 06a2e53..ef45cfb 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -553,16 +553,16 @@
 	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	int write = bio_data_dir(bio) == WRITE;
 	const char *op = write ? "write" : "read";
-	loff_t offset = bio->bi_sector << 9;
+	loff_t offset = bio->bi_iter.bi_sector << 9;
 	int error = 0;
-	struct bio_vec *bvec;
-	unsigned int i;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 	struct bio *next;
 
-	bio_for_each_segment(bvec, bio, i) {
+	bio_for_each_segment(bvec, bio, iter) {
 		/* PS3 is ppc64, so we don't handle highmem */
-		char *ptr = page_address(bvec->bv_page) + bvec->bv_offset;
-		size_t len = bvec->bv_len, retlen;
+		char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
+		size_t len = bvec.bv_len, retlen;
 
 		dev_dbg(&dev->core, "    %s %zu bytes at offset %llu\n", op,
 			len, offset);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 16cab66..b365e0d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1156,23 +1156,23 @@
  */
 static void zero_bio_chain(struct bio *chain, int start_ofs)
 {
-	struct bio_vec *bv;
+	struct bio_vec bv;
+	struct bvec_iter iter;
 	unsigned long flags;
 	void *buf;
-	int i;
 	int pos = 0;
 
 	while (chain) {
-		bio_for_each_segment(bv, chain, i) {
-			if (pos + bv->bv_len > start_ofs) {
+		bio_for_each_segment(bv, chain, iter) {
+			if (pos + bv.bv_len > start_ofs) {
 				int remainder = max(start_ofs - pos, 0);
-				buf = bvec_kmap_irq(bv, &flags);
+				buf = bvec_kmap_irq(&bv, &flags);
 				memset(buf + remainder, 0,
-				       bv->bv_len - remainder);
-				flush_dcache_page(bv->bv_page);
+				       bv.bv_len - remainder);
+				flush_dcache_page(bv.bv_page);
 				bvec_kunmap_irq(buf, &flags);
 			}
-			pos += bv->bv_len;
+			pos += bv.bv_len;
 		}
 
 		chain = chain->bi_next;
@@ -1220,74 +1220,14 @@
 					unsigned int len,
 					gfp_t gfpmask)
 {
-	struct bio_vec *bv;
-	unsigned int resid;
-	unsigned short idx;
-	unsigned int voff;
-	unsigned short end_idx;
-	unsigned short vcnt;
 	struct bio *bio;
 
-	/* Handle the easy case for the caller */
-
-	if (!offset && len == bio_src->bi_size)
-		return bio_clone(bio_src, gfpmask);
-
-	if (WARN_ON_ONCE(!len))
-		return NULL;
-	if (WARN_ON_ONCE(len > bio_src->bi_size))
-		return NULL;
-	if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
-		return NULL;
-
-	/* Find first affected segment... */
-
-	resid = offset;
-	bio_for_each_segment(bv, bio_src, idx) {
-		if (resid < bv->bv_len)
-			break;
-		resid -= bv->bv_len;
-	}
-	voff = resid;
-
-	/* ...and the last affected segment */
-
-	resid += len;
-	__bio_for_each_segment(bv, bio_src, end_idx, idx) {
-		if (resid <= bv->bv_len)
-			break;
-		resid -= bv->bv_len;
-	}
-	vcnt = end_idx - idx + 1;
-
-	/* Build the clone */
-
-	bio = bio_alloc(gfpmask, (unsigned int) vcnt);
+	bio = bio_clone(bio_src, gfpmask);
 	if (!bio)
 		return NULL;	/* ENOMEM */
 
-	bio->bi_bdev = bio_src->bi_bdev;
-	bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
-	bio->bi_rw = bio_src->bi_rw;
-	bio->bi_flags |= 1 << BIO_CLONED;
-
-	/*
-	 * Copy over our part of the bio_vec, then update the first
-	 * and last (or only) entries.
-	 */
-	memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
-			vcnt * sizeof (struct bio_vec));
-	bio->bi_io_vec[0].bv_offset += voff;
-	if (vcnt > 1) {
-		bio->bi_io_vec[0].bv_len -= voff;
-		bio->bi_io_vec[vcnt - 1].bv_len = resid;
-	} else {
-		bio->bi_io_vec[0].bv_len = len;
-	}
-
-	bio->bi_vcnt = vcnt;
-	bio->bi_size = len;
-	bio->bi_idx = 0;
+	bio_advance(bio, offset);
+	bio->bi_iter.bi_size = len;
 
 	return bio;
 }
@@ -1318,7 +1258,7 @@
 
 	/* Build up a chain of clone bios up to the limit */
 
-	if (!bi || off >= bi->bi_size || !len)
+	if (!bi || off >= bi->bi_iter.bi_size || !len)
 		return NULL;		/* Nothing to clone */
 
 	end = &chain;
@@ -1330,7 +1270,7 @@
 			rbd_warn(NULL, "bio_chain exhausted with %u left", len);
 			goto out_err;	/* EINVAL; ran out of bio's */
 		}
-		bi_size = min_t(unsigned int, bi->bi_size - off, len);
+		bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
 		bio = bio_clone_range(bi, off, bi_size, gfpmask);
 		if (!bio)
 			goto out_err;	/* ENOMEM */
@@ -1339,7 +1279,7 @@
 		end = &bio->bi_next;
 
 		off += bi_size;
-		if (off == bi->bi_size) {
+		if (off == bi->bi_iter.bi_size) {
 			bi = bi->bi_next;
 			off = 0;
 		}
@@ -2227,7 +2167,8 @@
 
 	if (type == OBJ_REQUEST_BIO) {
 		bio_list = data_desc;
-		rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
+		rbd_assert(img_offset ==
+			   bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
 	} else {
 		rbd_assert(type == OBJ_REQUEST_PAGES);
 		pages = data_desc;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 2284f5d..2839d37 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -174,7 +174,7 @@
 	if (!card)
 		goto req_err;
 
-	if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
+	if (bio_end_sector(bio) > get_capacity(card->gendisk))
 		goto req_err;
 
 	if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@
 		goto req_err;
 	}
 
-	if (bio->bi_size == 0) {
+	if (bio->bi_iter.bi_size == 0) {
 		dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
 		goto req_err;
 	}
@@ -208,7 +208,7 @@
 
 	dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
 		 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
-		 (u64)bio->bi_sector << 9, bio->bi_size);
+		 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
 
 	st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
 				    bio_dma_done_cb, bio_meta);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index fc88ba3..cf8cd29 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -684,7 +684,8 @@
 			   void *cb_data)
 {
 	struct list_head dma_list[RSXX_MAX_TARGETS];
-	struct bio_vec *bvec;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 	unsigned long long addr8;
 	unsigned int laddr;
 	unsigned int bv_len;
@@ -696,7 +697,7 @@
 	int st;
 	int i;
 
-	addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
+	addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
 	atomic_set(n_dmas, 0);
 
 	for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +706,7 @@
 	}
 
 	if (bio->bi_rw & REQ_DISCARD) {
-		bv_len = bio->bi_size;
+		bv_len = bio->bi_iter.bi_size;
 
 		while (bv_len > 0) {
 			tgt   = rsxx_get_dma_tgt(card, addr8);
@@ -722,9 +723,9 @@
 			bv_len -= RSXX_HW_BLK_SIZE;
 		}
 	} else {
-		bio_for_each_segment(bvec, bio, i) {
-			bv_len = bvec->bv_len;
-			bv_off = bvec->bv_offset;
+		bio_for_each_segment(bvec, bio, iter) {
+			bv_len = bvec.bv_len;
+			bv_off = bvec.bv_offset;
 
 			while (bv_len > 0) {
 				tgt   = rsxx_get_dma_tgt(card, addr8);
@@ -736,7 +737,7 @@
 				st = rsxx_queue_dma(card, &dma_list[tgt],
 							bio_data_dir(bio),
 							dma_off, dma_len,
-							laddr, bvec->bv_page,
+							laddr, bvec.bv_page,
 							bv_off, cb, cb_data);
 				if (st)
 					goto bvec_err;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 3fb6ab4..d5e2d12 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1744,20 +1744,6 @@
 	kfree(host);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
-static int __init carm_init(void)
-{
-	return pci_register_driver(&carm_driver);
-}
-
-static void __exit carm_exit(void)
-{
-	pci_unregister_driver(&carm_driver);
-}
-
-module_init(carm_init);
-module_exit(carm_exit);
-
-
+module_pci_driver(carm_driver);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index ad70868..4cf81b5 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -108,8 +108,7 @@
 				    * have been written
 				    */
 	struct bio	*bio, *currentbio, **biotail;
-	int		current_idx;
-	sector_t	current_sector;
+	struct bvec_iter current_iter;
 
 	struct request_queue *queue;
 
@@ -118,7 +117,7 @@
 		struct mm_dma_desc	*desc;
 		int	 		cnt, headcnt;
 		struct bio		*bio, **biotail;
-		int			idx;
+		struct bvec_iter	iter;
 	} mm_pages[2];
 #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
 
@@ -344,16 +343,13 @@
 	dma_addr_t dma_handle;
 	int offset;
 	struct bio *bio;
-	struct bio_vec *vec;
-	int idx;
+	struct bio_vec vec;
 	int rw;
-	int len;
 
 	bio = card->currentbio;
 	if (!bio && card->bio) {
 		card->currentbio = card->bio;
-		card->current_idx = card->bio->bi_idx;
-		card->current_sector = card->bio->bi_sector;
+		card->current_iter = card->bio->bi_iter;
 		card->bio = card->bio->bi_next;
 		if (card->bio == NULL)
 			card->biotail = &card->bio;
@@ -362,18 +358,17 @@
 	}
 	if (!bio)
 		return 0;
-	idx = card->current_idx;
 
 	rw = bio_rw(bio);
 	if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
 		return 0;
 
-	vec = bio_iovec_idx(bio, idx);
-	len = vec->bv_len;
+	vec = bio_iter_iovec(bio, card->current_iter);
+
 	dma_handle = pci_map_page(card->dev,
-				  vec->bv_page,
-				  vec->bv_offset,
-				  len,
+				  vec.bv_page,
+				  vec.bv_offset,
+				  vec.bv_len,
 				  (rw == READ) ?
 				  PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
 
@@ -381,7 +376,7 @@
 	desc = &p->desc[p->cnt];
 	p->cnt++;
 	if (p->bio == NULL)
-		p->idx = idx;
+		p->iter = card->current_iter;
 	if ((p->biotail) != &bio->bi_next) {
 		*(p->biotail) = bio;
 		p->biotail = &(bio->bi_next);
@@ -391,8 +386,8 @@
 	desc->data_dma_handle = dma_handle;
 
 	desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
-	desc->local_addr = cpu_to_le64(card->current_sector << 9);
-	desc->transfer_size = cpu_to_le32(len);
+	desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
+	desc->transfer_size = cpu_to_le32(vec.bv_len);
 	offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
 	desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
 	desc->zero1 = desc->zero2 = 0;
@@ -407,10 +402,9 @@
 		desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
 	desc->sem_control_bits = desc->control_bits;
 
-	card->current_sector += (len >> 9);
-	idx++;
-	card->current_idx = idx;
-	if (idx >= bio->bi_vcnt)
+
+	bio_advance_iter(bio, &card->current_iter, vec.bv_len);
+	if (!card->current_iter.bi_size)
 		card->currentbio = NULL;
 
 	return 1;
@@ -439,23 +433,25 @@
 		struct mm_dma_desc *desc = &page->desc[page->headcnt];
 		int control = le32_to_cpu(desc->sem_control_bits);
 		int last = 0;
-		int idx;
+		struct bio_vec vec;
 
 		if (!(control & DMASCR_DMA_COMPLETE)) {
 			control = dma_status;
 			last = 1;
 		}
+
 		page->headcnt++;
-		idx = page->idx;
-		page->idx++;
-		if (page->idx >= bio->bi_vcnt) {
+		vec = bio_iter_iovec(bio, page->iter);
+		bio_advance_iter(bio, &page->iter, vec.bv_len);
+
+		if (!page->iter.bi_size) {
 			page->bio = bio->bi_next;
 			if (page->bio)
-				page->idx = page->bio->bi_idx;
+				page->iter = page->bio->bi_iter;
 		}
 
 		pci_unmap_page(card->dev, desc->data_dma_handle,
-			       bio_iovec_idx(bio, idx)->bv_len,
+			       vec.bv_len,
 				 (control & DMASCR_TRANSFER_READ) ?
 				PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
 		if (control & DMASCR_HARD_ERROR) {
@@ -532,7 +528,8 @@
 {
 	struct cardinfo *card = q->queuedata;
 	pr_debug("mm_make_request %llu %u\n",
-		 (unsigned long long)bio->bi_sector, bio->bi_size);
+		 (unsigned long long)bio->bi_iter.bi_sector,
+		 bio->bi_iter.bi_size);
 
 	spin_lock_irq(&card->lock);
 	*card->biotail = bio;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a680d4..b1cb3f4 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -110,9 +110,9 @@
 	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
 }
 
-static inline void virtblk_request_done(struct virtblk_req *vbr)
+static inline void virtblk_request_done(struct request *req)
 {
-	struct request *req = vbr->req;
+	struct virtblk_req *vbr = req->special;
 	int error = virtblk_result(vbr);
 
 	if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -138,7 +138,7 @@
 	do {
 		virtqueue_disable_cb(vq);
 		while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
-			virtblk_request_done(vbr);
+			blk_mq_complete_request(vbr->req);
 			req_done = true;
 		}
 		if (unlikely(virtqueue_is_broken(vq)))
@@ -479,6 +479,7 @@
 	.map_queue	= blk_mq_map_queue,
 	.alloc_hctx	= blk_mq_alloc_single_hw_queue,
 	.free_hctx	= blk_mq_free_single_hw_queue,
+	.complete	= virtblk_request_done,
 };
 
 static struct blk_mq_reg virtio_mq_reg = {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 6620b73..64c60ed 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -299,7 +299,7 @@
 	BUG_ON(num != 0);
 }
 
-static void unmap_purged_grants(struct work_struct *work)
+void xen_blkbk_unmap_purged_grants(struct work_struct *work)
 {
 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -375,7 +375,7 @@
 
 	pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
 
-	INIT_LIST_HEAD(&blkif->persistent_purge_list);
+	BUG_ON(!list_empty(&blkif->persistent_purge_list));
 	root = &blkif->persistent_gnts;
 purge_list:
 	foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -420,7 +420,6 @@
 	blkif->vbd.overflow_max_grants = 0;
 
 	/* We can defer this work */
-	INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
 	schedule_work(&blkif->persistent_purge_work);
 	pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
 	return;
@@ -625,16 +624,8 @@
 			print_stats(blkif);
 	}
 
-	/* Since we are shutting down remove all pages from the buffer */
-	shrink_free_pagepool(blkif, 0 /* All */);
-
-	/* Free all persistent grant pages */
-	if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
-		free_persistent_gnts(blkif, &blkif->persistent_gnts,
-			blkif->persistent_gnt_c);
-
-	BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
-	blkif->persistent_gnt_c = 0;
+	/* Drain pending purge work */
+	flush_work(&blkif->persistent_purge_work);
 
 	if (log_stats)
 		print_stats(blkif);
@@ -646,6 +637,23 @@
 }
 
 /*
+ * Remove persistent grants and empty the pool of free pages
+ */
+void xen_blkbk_free_caches(struct xen_blkif *blkif)
+{
+	/* Free all persistent grant pages */
+	if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
+		free_persistent_gnts(blkif, &blkif->persistent_gnts,
+			blkif->persistent_gnt_c);
+
+	BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
+	blkif->persistent_gnt_c = 0;
+
+	/* Since we are shutting down remove all pages from the buffer */
+	shrink_free_pagepool(blkif, 0 /* All */);
+}
+
+/*
  * Unmap the grant references, and also remove the M2P over-rides
  * used in the 'pending_req'.
  */
@@ -838,7 +846,7 @@
 	struct grant_page **pages = pending_req->indirect_pages;
 	struct xen_blkif *blkif = pending_req->blkif;
 	int indirect_grefs, rc, n, nseg, i;
-	struct blkif_request_segment_aligned *segments = NULL;
+	struct blkif_request_segment *segments = NULL;
 
 	nseg = pending_req->nr_pages;
 	indirect_grefs = INDIRECT_PAGES(nseg);
@@ -934,9 +942,7 @@
 {
 	atomic_set(&blkif->drain, 1);
 	do {
-		/* The initial value is one, and one refcnt taken at the
-		 * start of the xen_blkif_schedule thread. */
-		if (atomic_read(&blkif->refcnt) <= 2)
+		if (atomic_read(&blkif->inflight) == 0)
 			break;
 		wait_for_completion_interruptible_timeout(
 				&blkif->drain_complete, HZ);
@@ -976,17 +982,30 @@
 	 * the proper response on the ring.
 	 */
 	if (atomic_dec_and_test(&pending_req->pendcnt)) {
-		xen_blkbk_unmap(pending_req->blkif,
+		struct xen_blkif *blkif = pending_req->blkif;
+
+		xen_blkbk_unmap(blkif,
 		                pending_req->segments,
 		                pending_req->nr_pages);
-		make_response(pending_req->blkif, pending_req->id,
+		make_response(blkif, pending_req->id,
 			      pending_req->operation, pending_req->status);
-		xen_blkif_put(pending_req->blkif);
-		if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
-			if (atomic_read(&pending_req->blkif->drain))
-				complete(&pending_req->blkif->drain_complete);
+		free_req(blkif, pending_req);
+		/*
+		 * Make sure the request is freed before releasing blkif,
+		 * or there could be a race between free_req and the
+		 * cleanup done in xen_blkif_free during shutdown.
+		 *
+		 * NB: The fact that we might try to wake up pending_free_wq
+		 * before drain_complete (in case there's a drain going on)
+		 * it's not a problem with our current implementation
+		 * because we can assure there's no thread waiting on
+		 * pending_free_wq if there's a drain going on, but it has
+		 * to be taken into account if the current model is changed.
+		 */
+		if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
+			complete(&blkif->drain_complete);
 		}
-		free_req(pending_req->blkif, pending_req);
+		xen_blkif_put(blkif);
 	}
 }
 
@@ -1240,6 +1259,7 @@
 	 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
 	 */
 	xen_blkif_get(blkif);
+	atomic_inc(&blkif->inflight);
 
 	for (i = 0; i < nseg; i++) {
 		while ((bio == NULL) ||
@@ -1257,7 +1277,7 @@
 			bio->bi_bdev    = preq.bdev;
 			bio->bi_private = pending_req;
 			bio->bi_end_io  = end_block_io_op;
-			bio->bi_sector  = preq.sector_number;
+			bio->bi_iter.bi_sector  = preq.sector_number;
 		}
 
 		preq.sector_number += seg[i].nsec;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 8d88075..be05277 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -57,7 +57,7 @@
 #define MAX_INDIRECT_SEGMENTS 256
 
 #define SEGS_PER_INDIRECT_FRAME \
-	(PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+	(PAGE_SIZE/sizeof(struct blkif_request_segment))
 #define MAX_INDIRECT_PAGES \
 	((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
 #define INDIRECT_PAGES(_segs) \
@@ -278,6 +278,7 @@
 	/* for barrier (drain) requests */
 	struct completion	drain_complete;
 	atomic_t		drain;
+	atomic_t		inflight;
 	/* One thread per one blkif. */
 	struct task_struct	*xenblkd;
 	unsigned int		waiting_reqs;
@@ -376,6 +377,7 @@
 irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
 int xen_blkif_schedule(void *arg);
 int xen_blkif_purge_persistent(void *arg);
+void xen_blkbk_free_caches(struct xen_blkif *blkif);
 
 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
 			      struct backend_info *be, int state);
@@ -383,6 +385,7 @@
 int xen_blkbk_barrier(struct xenbus_transaction xbt,
 		      struct backend_info *be, int state);
 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
+void xen_blkbk_unmap_purged_grants(struct work_struct *work);
 
 static inline void blkif_get_x86_32_req(struct blkif_request *dst,
 					struct blkif_x86_32_request *src)
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2014a0..9a547e6 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -125,8 +125,11 @@
 	blkif->persistent_gnts.rb_node = NULL;
 	spin_lock_init(&blkif->free_pages_lock);
 	INIT_LIST_HEAD(&blkif->free_pages);
+	INIT_LIST_HEAD(&blkif->persistent_purge_list);
 	blkif->free_pages_num = 0;
 	atomic_set(&blkif->persistent_gnt_in_use, 0);
+	atomic_set(&blkif->inflight, 0);
+	INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
 
 	INIT_LIST_HEAD(&blkif->pending_free);
 
@@ -259,6 +262,17 @@
 	if (!atomic_dec_and_test(&blkif->refcnt))
 		BUG();
 
+	/* Remove all persistent grants and the cache of ballooned pages. */
+	xen_blkbk_free_caches(blkif);
+
+	/* Make sure everything is drained before shutting down */
+	BUG_ON(blkif->persistent_gnt_c != 0);
+	BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
+	BUG_ON(blkif->free_pages_num != 0);
+	BUG_ON(!list_empty(&blkif->persistent_purge_list));
+	BUG_ON(!list_empty(&blkif->free_pages));
+	BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
+
 	/* Check that there is no request in use */
 	list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
 		list_del(&req->free_list);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index f9c43f9..efe1b47 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -162,7 +162,7 @@
 #define DEV_NAME	"xvd"	/* name in /dev */
 
 #define SEGS_PER_INDIRECT_FRAME \
-	(PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+	(PAGE_SIZE/sizeof(struct blkif_request_segment))
 #define INDIRECT_GREFS(_segs) \
 	((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
 
@@ -393,7 +393,7 @@
 	unsigned long id;
 	unsigned int fsect, lsect;
 	int i, ref, n;
-	struct blkif_request_segment_aligned *segments = NULL;
+	struct blkif_request_segment *segments = NULL;
 
 	/*
 	 * Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@
 			} else {
 				n = i % SEGS_PER_INDIRECT_FRAME;
 				segments[n] =
-					(struct blkif_request_segment_aligned) {
+					(struct blkif_request_segment) {
 							.gref       = ref,
 							.first_sect = fsect,
 							.last_sect  = lsect };
@@ -1547,7 +1547,7 @@
 			for (i = 0; i < pending; i++) {
 				offset = (i * segs * PAGE_SIZE) >> 9;
 				size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
-					   (unsigned int)(bio->bi_size >> 9) - offset);
+					   (unsigned int)bio_sectors(bio) - offset);
 				cloned_bio = bio_clone(bio, GFP_NOIO);
 				BUG_ON(cloned_bio == NULL);
 				bio_trim(cloned_bio, offset, size);
@@ -1904,13 +1904,16 @@
 	case XenbusStateReconfiguring:
 	case XenbusStateReconfigured:
 	case XenbusStateUnknown:
-	case XenbusStateClosed:
 		break;
 
 	case XenbusStateConnected:
 		blkfront_connect(info);
 		break;
 
+	case XenbusStateClosed:
+		if (dev->state == XenbusStateClosed)
+			break;
+		/* Missed the backend's Closing state -- fallthrough */
 	case XenbusStateClosing:
 		blkfront_closing(info);
 		break;
diff --git a/drivers/staging/zram/Kconfig b/drivers/block/zram/Kconfig
similarity index 92%
rename from drivers/staging/zram/Kconfig
rename to drivers/block/zram/Kconfig
index 983314c..3450be8 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -14,7 +14,6 @@
 	  disks and maybe many more.
 
 	  See zram.txt for more information.
-	  Project home: <https://compcache.googlecode.com/>
 
 config ZRAM_DEBUG
 	bool "Compressed RAM block device debug support"
diff --git a/drivers/staging/zram/Makefile b/drivers/block/zram/Makefile
similarity index 100%
rename from drivers/staging/zram/Makefile
rename to drivers/block/zram/Makefile
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
similarity index 87%
rename from drivers/staging/zram/zram_drv.c
rename to drivers/block/zram/zram_drv.c
index 3277d98..011e55d 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2,6 +2,7 @@
  * Compressed RAM block device
  *
  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
+ *               2012, 2013 Minchan Kim
  *
  * This code is released using a dual license strategy: BSD/GPL
  * You can choose the licence that better fits your requirements.
@@ -9,7 +10,6 @@
  * Released under the terms of 3-clause BSD License
  * Released under the terms of GNU General Public License Version 2.0
  *
- * Project home: http://compcache.googlecode.com
  */
 
 #define KMSG_COMPONENT "zram"
@@ -104,7 +104,7 @@
 {
 	struct zram *zram = dev_to_zram(dev);
 
-	return sprintf(buf, "%u\n", zram->stats.pages_zero);
+	return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
 }
 
 static ssize_t orig_data_size_show(struct device *dev,
@@ -113,7 +113,7 @@
 	struct zram *zram = dev_to_zram(dev);
 
 	return sprintf(buf, "%llu\n",
-		(u64)(zram->stats.pages_stored) << PAGE_SHIFT);
+		(u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
 }
 
 static ssize_t compr_data_size_show(struct device *dev,
@@ -140,6 +140,7 @@
 	return sprintf(buf, "%llu\n", val);
 }
 
+/* flag operations needs meta->tb_lock */
 static int zram_test_flag(struct zram_meta *meta, u32 index,
 			enum zram_pageflags flag)
 {
@@ -171,13 +172,14 @@
 	u64 start, end, bound;
 
 	/* unaligned request */
-	if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+	if (unlikely(bio->bi_iter.bi_sector &
+		     (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
 		return 0;
-	if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+	if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
 		return 0;
 
-	start = bio->bi_sector;
-	end = start + (bio->bi_size >> SECTOR_SHIFT);
+	start = bio->bi_iter.bi_sector;
+	end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
 	bound = zram->disksize >> SECTOR_SHIFT;
 	/* out of range range */
 	if (unlikely(start >= bound || end > bound || start > end))
@@ -227,6 +229,8 @@
 		goto free_table;
 	}
 
+	rwlock_init(&meta->tb_lock);
+	mutex_init(&meta->buffer_lock);
 	return meta;
 
 free_table:
@@ -279,6 +283,7 @@
 	flush_dcache_page(page);
 }
 
+/* NOTE: caller should hold meta->tb_lock with write-side */
 static void zram_free_page(struct zram *zram, size_t index)
 {
 	struct zram_meta *meta = zram->meta;
@@ -292,21 +297,21 @@
 		 */
 		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
 			zram_clear_flag(meta, index, ZRAM_ZERO);
-			zram->stats.pages_zero--;
+			atomic_dec(&zram->stats.pages_zero);
 		}
 		return;
 	}
 
 	if (unlikely(size > max_zpage_size))
-		zram->stats.bad_compress--;
+		atomic_dec(&zram->stats.bad_compress);
 
 	zs_free(meta->mem_pool, handle);
 
 	if (size <= PAGE_SIZE / 2)
-		zram->stats.good_compress--;
+		atomic_dec(&zram->stats.good_compress);
 
 	atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
-	zram->stats.pages_stored--;
+	atomic_dec(&zram->stats.pages_stored);
 
 	meta->table[index].handle = 0;
 	meta->table[index].size = 0;
@@ -318,20 +323,26 @@
 	size_t clen = PAGE_SIZE;
 	unsigned char *cmem;
 	struct zram_meta *meta = zram->meta;
-	unsigned long handle = meta->table[index].handle;
+	unsigned long handle;
+	u16 size;
+
+	read_lock(&meta->tb_lock);
+	handle = meta->table[index].handle;
+	size = meta->table[index].size;
 
 	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+		read_unlock(&meta->tb_lock);
 		clear_page(mem);
 		return 0;
 	}
 
 	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
-	if (meta->table[index].size == PAGE_SIZE)
+	if (size == PAGE_SIZE)
 		copy_page(mem, cmem);
 	else
-		ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
-						mem, &clen);
+		ret = lzo1x_decompress_safe(cmem, size,	mem, &clen);
 	zs_unmap_object(meta->mem_pool, handle);
+	read_unlock(&meta->tb_lock);
 
 	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret != LZO_E_OK)) {
@@ -352,11 +363,14 @@
 	struct zram_meta *meta = zram->meta;
 	page = bvec->bv_page;
 
+	read_lock(&meta->tb_lock);
 	if (unlikely(!meta->table[index].handle) ||
 			zram_test_flag(meta, index, ZRAM_ZERO)) {
+		read_unlock(&meta->tb_lock);
 		handle_zero_page(bvec);
 		return 0;
 	}
+	read_unlock(&meta->tb_lock);
 
 	if (is_partial_io(bvec))
 		/* Use  a temporary buffer to decompress the page */
@@ -399,6 +413,7 @@
 	struct page *page;
 	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
 	struct zram_meta *meta = zram->meta;
+	bool locked = false;
 
 	page = bvec->bv_page;
 	src = meta->compress_buffer;
@@ -418,6 +433,8 @@
 			goto out;
 	}
 
+	mutex_lock(&meta->buffer_lock);
+	locked = true;
 	user_mem = kmap_atomic(page);
 
 	if (is_partial_io(bvec)) {
@@ -432,25 +449,18 @@
 	if (page_zero_filled(uncmem)) {
 		kunmap_atomic(user_mem);
 		/* Free memory associated with this sector now. */
+		write_lock(&zram->meta->tb_lock);
 		zram_free_page(zram, index);
-
-		zram->stats.pages_zero++;
 		zram_set_flag(meta, index, ZRAM_ZERO);
+		write_unlock(&zram->meta->tb_lock);
+
+		atomic_inc(&zram->stats.pages_zero);
 		ret = 0;
 		goto out;
 	}
 
-	/*
-	 * zram_slot_free_notify could miss free so that let's
-	 * double check.
-	 */
-	if (unlikely(meta->table[index].handle ||
-			zram_test_flag(meta, index, ZRAM_ZERO)))
-		zram_free_page(zram, index);
-
 	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
 			       meta->compress_workmem);
-
 	if (!is_partial_io(bvec)) {
 		kunmap_atomic(user_mem);
 		user_mem = NULL;
@@ -463,7 +473,7 @@
 	}
 
 	if (unlikely(clen > max_zpage_size)) {
-		zram->stats.bad_compress++;
+		atomic_inc(&zram->stats.bad_compress);
 		clen = PAGE_SIZE;
 		src = NULL;
 		if (is_partial_io(bvec))
@@ -493,18 +503,22 @@
 	 * Free memory associated with this sector
 	 * before overwriting unused sectors.
 	 */
+	write_lock(&zram->meta->tb_lock);
 	zram_free_page(zram, index);
 
 	meta->table[index].handle = handle;
 	meta->table[index].size = clen;
+	write_unlock(&zram->meta->tb_lock);
 
 	/* Update stats */
 	atomic64_add(clen, &zram->stats.compr_size);
-	zram->stats.pages_stored++;
+	atomic_inc(&zram->stats.pages_stored);
 	if (clen <= PAGE_SIZE / 2)
-		zram->stats.good_compress++;
+		atomic_inc(&zram->stats.good_compress);
 
 out:
+	if (locked)
+		mutex_unlock(&meta->buffer_lock);
 	if (is_partial_io(bvec))
 		kfree(uncmem);
 
@@ -513,36 +527,15 @@
 	return ret;
 }
 
-static void handle_pending_slot_free(struct zram *zram)
-{
-	struct zram_slot_free *free_rq;
-
-	spin_lock(&zram->slot_free_lock);
-	while (zram->slot_free_rq) {
-		free_rq = zram->slot_free_rq;
-		zram->slot_free_rq = free_rq->next;
-		zram_free_page(zram, free_rq->index);
-		kfree(free_rq);
-	}
-	spin_unlock(&zram->slot_free_lock);
-}
-
 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
 			int offset, struct bio *bio, int rw)
 {
 	int ret;
 
-	if (rw == READ) {
-		down_read(&zram->lock);
-		handle_pending_slot_free(zram);
+	if (rw == READ)
 		ret = zram_bvec_read(zram, bvec, index, offset, bio);
-		up_read(&zram->lock);
-	} else {
-		down_write(&zram->lock);
-		handle_pending_slot_free(zram);
+	else
 		ret = zram_bvec_write(zram, bvec, index, offset);
-		up_write(&zram->lock);
-	}
 
 	return ret;
 }
@@ -552,8 +545,6 @@
 	size_t index;
 	struct zram_meta *meta;
 
-	flush_work(&zram->free_work);
-
 	down_write(&zram->init_lock);
 	if (!zram->init_done) {
 		up_write(&zram->init_lock);
@@ -680,9 +671,10 @@
 
 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
 {
-	int i, offset;
+	int offset;
 	u32 index;
-	struct bio_vec *bvec;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 
 	switch (rw) {
 	case READ:
@@ -693,36 +685,37 @@
 		break;
 	}
 
-	index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
-	offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
+	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
+	offset = (bio->bi_iter.bi_sector &
+		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
 
-	bio_for_each_segment(bvec, bio, i) {
+	bio_for_each_segment(bvec, bio, iter) {
 		int max_transfer_size = PAGE_SIZE - offset;
 
-		if (bvec->bv_len > max_transfer_size) {
+		if (bvec.bv_len > max_transfer_size) {
 			/*
 			 * zram_bvec_rw() can only make operation on a single
 			 * zram page. Split the bio vector.
 			 */
 			struct bio_vec bv;
 
-			bv.bv_page = bvec->bv_page;
+			bv.bv_page = bvec.bv_page;
 			bv.bv_len = max_transfer_size;
-			bv.bv_offset = bvec->bv_offset;
+			bv.bv_offset = bvec.bv_offset;
 
 			if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
 				goto out;
 
-			bv.bv_len = bvec->bv_len - max_transfer_size;
+			bv.bv_len = bvec.bv_len - max_transfer_size;
 			bv.bv_offset += max_transfer_size;
 			if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
 				goto out;
 		} else
-			if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
+			if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
 			    < 0)
 				goto out;
 
-		update_position(&index, &offset, bvec);
+		update_position(&index, &offset, &bvec);
 	}
 
 	set_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -759,40 +752,19 @@
 	bio_io_error(bio);
 }
 
-static void zram_slot_free(struct work_struct *work)
-{
-	struct zram *zram;
-
-	zram = container_of(work, struct zram, free_work);
-	down_write(&zram->lock);
-	handle_pending_slot_free(zram);
-	up_write(&zram->lock);
-}
-
-static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
-{
-	spin_lock(&zram->slot_free_lock);
-	free_rq->next = zram->slot_free_rq;
-	zram->slot_free_rq = free_rq;
-	spin_unlock(&zram->slot_free_lock);
-}
-
 static void zram_slot_free_notify(struct block_device *bdev,
 				unsigned long index)
 {
 	struct zram *zram;
-	struct zram_slot_free *free_rq;
+	struct zram_meta *meta;
 
 	zram = bdev->bd_disk->private_data;
+	meta = zram->meta;
+
+	write_lock(&meta->tb_lock);
+	zram_free_page(zram, index);
+	write_unlock(&meta->tb_lock);
 	atomic64_inc(&zram->stats.notify_free);
-
-	free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
-	if (!free_rq)
-		return;
-
-	free_rq->index = index;
-	add_slot_free(zram, free_rq);
-	schedule_work(&zram->free_work);
 }
 
 static const struct block_device_operations zram_devops = {
@@ -836,13 +808,8 @@
 {
 	int ret = -ENOMEM;
 
-	init_rwsem(&zram->lock);
 	init_rwsem(&zram->init_lock);
 
-	INIT_WORK(&zram->free_work, zram_slot_free);
-	spin_lock_init(&zram->slot_free_lock);
-	zram->slot_free_rq = NULL;
-
 	zram->queue = blk_alloc_queue(GFP_KERNEL);
 	if (!zram->queue) {
 		pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
similarity index 74%
rename from drivers/staging/zram/zram_drv.h
rename to drivers/block/zram/zram_drv.h
index 97a3acf..ad8aa35 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -2,6 +2,7 @@
  * Compressed RAM block device
  *
  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
+ *               2012, 2013 Minchan Kim
  *
  * This code is released using a dual license strategy: BSD/GPL
  * You can choose the licence that better fits your requirements.
@@ -9,7 +10,6 @@
  * Released under the terms of 3-clause BSD License
  * Released under the terms of GNU General Public License Version 2.0
  *
- * Project home: http://compcache.googlecode.com
  */
 
 #ifndef _ZRAM_DRV_H_
@@ -17,8 +17,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
-
-#include "../zsmalloc/zsmalloc.h"
+#include <linux/zsmalloc.h>
 
 /*
  * Some arbitrary value. This is just to catch
@@ -69,10 +68,6 @@
 	u8 flags;
 } __aligned(4);
 
-/*
- * All 64bit fields should only be manipulated by 64bit atomic accessors.
- * All modifications to 32bit counter should be protected by zram->lock.
- */
 struct zram_stats {
 	atomic64_t compr_size;	/* compressed size of pages stored */
 	atomic64_t num_reads;	/* failed + successful */
@@ -81,33 +76,23 @@
 	atomic64_t failed_writes;	/* can happen when memory is too low */
 	atomic64_t invalid_io;	/* non-page-aligned I/O requests */
 	atomic64_t notify_free;	/* no. of swap slot free notifications */
-	u32 pages_zero;		/* no. of zero filled pages */
-	u32 pages_stored;	/* no. of pages currently stored */
-	u32 good_compress;	/* % of pages with compression ratio<=50% */
-	u32 bad_compress;	/* % of pages with compression ratio>=75% */
+	atomic_t pages_zero;		/* no. of zero filled pages */
+	atomic_t pages_stored;	/* no. of pages currently stored */
+	atomic_t good_compress;	/* % of pages with compression ratio<=50% */
+	atomic_t bad_compress;	/* % of pages with compression ratio>=75% */
 };
 
 struct zram_meta {
+	rwlock_t tb_lock;	/* protect table */
 	void *compress_workmem;
 	void *compress_buffer;
 	struct table *table;
 	struct zs_pool *mem_pool;
-};
-
-struct zram_slot_free {
-	unsigned long index;
-	struct zram_slot_free *next;
+	struct mutex buffer_lock; /* protect compress buffers */
 };
 
 struct zram {
 	struct zram_meta *meta;
-	struct rw_semaphore lock; /* protect compression buffers, table,
-				   * 32bit stat counters against concurrent
-				   * notifications, reads and writes */
-
-	struct work_struct free_work;  /* handle pending free request */
-	struct zram_slot_free *slot_free_rq; /* list head of free request */
-
 	struct request_queue *queue;
 	struct gendisk *disk;
 	int init_done;
@@ -118,7 +103,6 @@
 	 * we can store in a disk.
 	 */
 	u64 disksize;	/* bytes */
-	spinlock_t slot_free_lock;
 
 	struct zram_stats stats;
 };
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index b6739cb..962fd35 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -979,7 +979,7 @@
 
 	nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
 
-	ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
+	ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
 	if (!ports)
 		return -ENOMEM;
 
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 5980cb9..51e75ad 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -561,11 +561,11 @@
 	int err;
 
 	err = request_irq(HW_EVENT_GDROM_CMD, gdrom_command_interrupt,
-		IRQF_DISABLED, "gdrom_command", &gd);
+		0, "gdrom_command", &gd);
 	if (err)
 		return err;
 	err = request_irq(HW_EVENT_GDROM_DMA, gdrom_dma_interrupt,
-		IRQF_DISABLED, "gdrom_dma", &gd);
+		0, "gdrom_dma", &gd);
 	if (err)
 		free_irq(HW_EVENT_GDROM_CMD, &gd);
 	return err;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fa3243d..1386749 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -499,6 +499,7 @@
 config MAX_RAW_DEVS
 	int "Maximum number of RAW devices to support (1-65536)"
 	depends on RAW_DRIVER
+	range 1 65536
 	default "256"
 	help
 	  The maximum number of RAW devices that are supported.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 290fe5b..a324f93 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -49,7 +49,7 @@
 obj-$(CONFIG_TELCLOCK)		+= tlclk.o
 
 obj-$(CONFIG_MWAVE)		+= mwave/
-obj-$(CONFIG_AGP)		+= agp/
+obj-y				+= agp/
 obj-$(CONFIG_PCMCIA)		+= pcmcia/
 
 obj-$(CONFIG_HANGCHECK_TIMER)	+= hangcheck-timer.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index d8b1b57..c528f96 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -68,6 +68,7 @@
 config AGP_INTEL
 	tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
 	depends on AGP && X86
+	select INTEL_GTT
 	help
 	  This option gives you AGP support for the GLX component of X
 	  on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
@@ -155,3 +156,7 @@
           This option gives you AGP GART support for the SGI TIO chipset
           for IA64 processors.
 
+config INTEL_GTT
+	tristate
+	depends on X86 && PCI
+
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 8eb56e2..604489b 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -13,7 +13,7 @@
 obj-$(CONFIG_AGP_PARISC)	+= parisc-agp.o
 obj-$(CONFIG_AGP_I460)		+= i460-agp.o
 obj-$(CONFIG_AGP_INTEL)		+= intel-agp.o
-obj-$(CONFIG_AGP_INTEL)		+= intel-gtt.o
+obj-$(CONFIG_INTEL_GTT)		+= intel-gtt.o
 obj-$(CONFIG_AGP_NVIDIA)	+= nvidia-agp.o
 obj-$(CONFIG_AGP_SGI_TIOCA)	+= sgi-agp.o
 obj-$(CONFIG_AGP_SIS)		+= sis-agp.o
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index a7c2765..f9b9ca5 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -14,9 +14,6 @@
 #include "intel-agp.h"
 #include <drm/intel-gtt.h>
 
-int intel_agp_enabled;
-EXPORT_SYMBOL(intel_agp_enabled);
-
 static int intel_fetch_size(void)
 {
 	int i;
@@ -806,8 +803,6 @@
 found_gmch:
 	pci_set_drvdata(pdev, bridge);
 	err = agp_add_bridge(bridge);
-	if (!err)
-		intel_agp_enabled = 1;
 	return err;
 }
 
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index ad5da1f..5c85350 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -94,6 +94,7 @@
 #define IS_IRONLAKE	intel_private.driver->is_ironlake
 #define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_gtt_map_memory(struct page **pages,
 				unsigned int num_entries,
 				struct sg_table *st)
@@ -168,6 +169,7 @@
 	__free_pages(page, 2);
 	atomic_dec(&agp_bridge->current_memory_agp);
 }
+#endif
 
 #define I810_GTT_ORDER 4
 static int i810_setup(void)
@@ -208,6 +210,7 @@
 	free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
 }
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
 				      int type)
 {
@@ -288,6 +291,7 @@
 	}
 	kfree(curr);
 }
+#endif
 
 static int intel_gtt_setup_scratch_page(void)
 {
@@ -645,7 +649,9 @@
 		return -ENOMEM;
 	}
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 	global_cache_flush();   /* FIXME: ? */
+#endif
 
 	intel_private.stolen_size = intel_gtt_stolen_size();
 
@@ -666,6 +672,7 @@
 	return 0;
 }
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_fake_agp_fetch_size(void)
 {
 	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
@@ -684,6 +691,7 @@
 
 	return 0;
 }
+#endif
 
 static void i830_cleanup(void)
 {
@@ -795,6 +803,7 @@
 	return 0;
 }
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
 {
 	agp_bridge->gatt_table_real = NULL;
@@ -819,6 +828,7 @@
 
 	return 0;
 }
+#endif
 
 static bool i830_check_flags(unsigned int flags)
 {
@@ -857,6 +867,7 @@
 }
 EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static void intel_gtt_insert_pages(unsigned int first_entry,
 				   unsigned int num_entries,
 				   struct page **pages,
@@ -922,6 +933,7 @@
 	mem->is_flushed = true;
 	return ret;
 }
+#endif
 
 void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
 {
@@ -935,6 +947,7 @@
 }
 EXPORT_SYMBOL(intel_gtt_clear_range);
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
 					 off_t pg_start, int type)
 {
@@ -976,6 +989,7 @@
 	/* always return NULL for other allocation types for now */
 	return NULL;
 }
+#endif
 
 static int intel_alloc_chipset_flush_resource(void)
 {
@@ -1129,6 +1143,7 @@
 	return 0;
 }
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static const struct agp_bridge_driver intel_fake_agp_driver = {
 	.owner			= THIS_MODULE,
 	.size_type		= FIXED_APER_SIZE,
@@ -1150,6 +1165,7 @@
 	.agp_destroy_page	= agp_generic_destroy_page,
 	.agp_destroy_pages      = agp_generic_destroy_pages,
 };
+#endif
 
 static const struct intel_gtt_driver i81x_gtt_driver = {
 	.gen = 1,
@@ -1367,11 +1383,13 @@
 
 	intel_private.refcount++;
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 	if (bridge) {
 		bridge->driver = &intel_fake_agp_driver;
 		bridge->dev_private_data = &intel_private;
 		bridge->dev = bridge_pdev;
 	}
+#endif
 
 	intel_private.bridge_dev = pci_dev_get(bridge_pdev);
 
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index e210f85..d915707 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -4,7 +4,7 @@
  * Copyright (C) 2001  Massimo Dal Zotto <dz@debian.org>
  *
  * Hwmon integration:
- * Copyright (C) 2011  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2011  Jean Delvare <jdelvare@suse.de>
  * Copyright (C) 2013  Guenter Roeck <linux@roeck-us.net>
  *
  * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 671c385..03f4189 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2724,6 +2724,7 @@
 static int ipmi_parisc_probe(struct parisc_device *dev)
 {
 	struct smi_info *info;
+	int rv;
 
 	info = smi_info_alloc();
 
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index f3223aa..6e8d65e 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -190,7 +190,7 @@
 	struct raw_device_data *rawdev;
 	struct block_device *bdev;
 
-	if (number <= 0 || number >= MAX_RAW_MINORS)
+	if (number <= 0 || number >= max_raw_minors)
 		return -EINVAL;
 
 	rawdev = &raw_devices[number];
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index feea87c..6928d09 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -890,12 +890,10 @@
 	} else {
 		/* Failback to copying a page */
 		struct page *page = alloc_page(GFP_KERNEL);
-		char *src = buf->ops->map(pipe, buf, 1);
-		char *dst;
+		char *src;
 
 		if (!page)
 			return -ENOMEM;
-		dst = kmap(page);
 
 		offset = sd->pos & ~PAGE_MASK;
 
@@ -903,9 +901,8 @@
 		if (len + offset > PAGE_SIZE)
 			len = PAGE_SIZE - offset;
 
-		memcpy(dst + offset, src + buf->offset, len);
-
-		kunmap(page);
+		src = buf->ops->map(pipe, buf, 1);
+		memcpy(page_address(page) + offset, src + buf->offset, len);
 		buf->ops->unmap(pipe, buf, src);
 
 		sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 5176e76..0595dc6 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -17,6 +17,7 @@
 #include <linux/jiffies.h>
 #include <linux/clockchips.h>
 #include <linux/types.h>
+#include <linux/clk.h>
 
 #include <linux/io.h>
 #include <asm/mach/time.h>
@@ -98,24 +99,6 @@
 	return;
 }
 
-static void __init kona_timers_init(struct device_node *node)
-{
-	u32 freq;
-
-	if (!of_property_read_u32(node, "clock-frequency", &freq))
-		arch_timer_rate = freq;
-	else
-		panic("clock-frequency not set in the .dts file");
-
-	/* Setup IRQ numbers */
-	timers.tmr_irq = irq_of_parse_and_map(node, 0);
-
-	/* Setup IO addresses */
-	timers.tmr_regs = of_iomap(node, 0);
-
-	kona_timer_disable_and_clear(timers.tmr_regs);
-}
-
 static int kona_timer_set_next_event(unsigned long clc,
 				  struct clock_event_device *unused)
 {
@@ -190,7 +173,34 @@
 
 static void __init kona_timer_init(struct device_node *node)
 {
-	kona_timers_init(node);
+	u32 freq;
+	struct clk *external_clk;
+
+	if (!of_device_is_available(node)) {
+		pr_info("Kona Timer v1 marked as disabled in device tree\n");
+		return;
+	}
+
+	external_clk = of_clk_get_by_name(node, NULL);
+
+	if (!IS_ERR(external_clk)) {
+		arch_timer_rate = clk_get_rate(external_clk);
+		clk_prepare_enable(external_clk);
+	} else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
+		arch_timer_rate = freq;
+	} else {
+		pr_err("Kona Timer v1 unable to determine clock-frequency");
+		return;
+	}
+
+	/* Setup IRQ numbers */
+	timers.tmr_irq = irq_of_parse_and_map(node, 0);
+
+	/* Setup IO addresses */
+	timers.tmr_regs = of_iomap(node, 0);
+
+	kona_timer_disable_and_clear(timers.tmr_regs);
+
 	kona_timer_clockevents_init();
 	setup_irq(timers.tmr_irq, &kona_timer_irq);
 	kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 79e5608..18448a7 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -919,7 +919,7 @@
 	}
 }
 
-static void __exit acpi_cpufreq_boost_exit(void)
+static void acpi_cpufreq_boost_exit(void)
 {
 	if (msrs) {
 		unregister_cpu_notifier(&boost_nb);
@@ -969,9 +969,10 @@
 	acpi_cpufreq_boost_init();
 
 	ret = cpufreq_register_driver(&acpi_cpufreq_driver);
-	if (ret)
+	if (ret) {
 		free_acpi_perf_data();
-
+		acpi_cpufreq_boost_exit();
+	}
 	return ret;
 }
 
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7e257b2..c788abf 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -51,12 +51,11 @@
 	return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
 }
 
-static u64 energy_divisor;
-
 struct sample {
 	int32_t core_pct_busy;
 	u64 aperf;
 	u64 mperf;
+	unsigned long long tsc;
 	int freq;
 };
 
@@ -96,6 +95,7 @@
 
 	u64	prev_aperf;
 	u64	prev_mperf;
+	unsigned long long prev_tsc;
 	int	sample_ptr;
 	struct sample samples[SAMPLE_COUNT];
 };
@@ -548,30 +548,41 @@
 					struct sample *sample)
 {
 	u64 core_pct;
-	core_pct = div64_u64(int_tofp(sample->aperf * 100),
-			     sample->mperf);
-	sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
+	u64 c0_pct;
 
-	sample->core_pct_busy = core_pct;
+	core_pct = div64_u64(sample->aperf * 100, sample->mperf);
+
+	c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
+	sample->freq = fp_toint(
+		mul_fp(int_tofp(cpu->pstate.max_pstate),
+			int_tofp(core_pct * 1000)));
+
+	sample->core_pct_busy = mul_fp(int_tofp(core_pct),
+				div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
 }
 
 static inline void intel_pstate_sample(struct cpudata *cpu)
 {
 	u64 aperf, mperf;
+	unsigned long long tsc;
 
 	rdmsrl(MSR_IA32_APERF, aperf);
 	rdmsrl(MSR_IA32_MPERF, mperf);
+	tsc = native_read_tsc();
 
 	cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
 	cpu->samples[cpu->sample_ptr].aperf = aperf;
 	cpu->samples[cpu->sample_ptr].mperf = mperf;
+	cpu->samples[cpu->sample_ptr].tsc = tsc;
 	cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
 	cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+	cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
 
 	intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
 
 	cpu->prev_aperf = aperf;
 	cpu->prev_mperf = mperf;
+	cpu->prev_tsc = tsc;
 }
 
 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
@@ -617,12 +628,10 @@
 {
 	struct cpudata *cpu = (struct cpudata *) __data;
 	struct sample *sample;
-	u64 energy;
 
 	intel_pstate_sample(cpu);
 
 	sample = &cpu->samples[cpu->sample_ptr];
-	rdmsrl(MSR_PKG_ENERGY_STATUS, energy);
 
 	intel_pstate_adjust_busy_pstate(cpu);
 
@@ -631,7 +640,6 @@
 			cpu->pstate.current_pstate,
 			sample->mperf,
 			sample->aperf,
-			div64_u64(energy, energy_divisor),
 			sample->freq);
 
 	intel_pstate_set_sample_time(cpu);
@@ -913,7 +921,6 @@
 	int cpu, rc = 0;
 	const struct x86_cpu_id *id;
 	struct cpu_defaults *cpu_info;
-	u64 units;
 
 	if (no_load)
 		return -ENODEV;
@@ -947,9 +954,6 @@
 	if (rc)
 		goto out;
 
-	rdmsrl(MSR_RAPL_POWER_UNIT, units);
-	energy_divisor = 1 << ((units >> 8) & 0x1f); /* bits{12:8} */
-
 	intel_pstate_debug_expose_params();
 	intel_pstate_sysfs_expose_params();
 
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index b3fb81d..f04e25f 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -35,6 +35,11 @@
 source "drivers/cpuidle/Kconfig.arm"
 endmenu
 
+menu "POWERPC CPU Idle Drivers"
+depends on PPC
+source "drivers/cpuidle/Kconfig.powerpc"
+endmenu
+
 endif
 
 config ARCH_NEEDS_CPU_IDLE_COUPLED
diff --git a/drivers/cpuidle/Kconfig.powerpc b/drivers/cpuidle/Kconfig.powerpc
new file mode 100644
index 0000000..66c3a09
--- /dev/null
+++ b/drivers/cpuidle/Kconfig.powerpc
@@ -0,0 +1,20 @@
+#
+# POWERPC CPU Idle Drivers
+#
+config PSERIES_CPUIDLE
+	bool "Cpuidle driver for pSeries platforms"
+	depends on CPU_IDLE
+	depends on PPC_PSERIES
+	default y
+	help
+	  Select this option to enable processor idle state management
+	  through cpuidle subsystem.
+
+config POWERNV_CPUIDLE
+	bool "Cpuidle driver for powernv platforms"
+	depends on CPU_IDLE
+	depends on PPC_POWERNV
+	default y
+	help
+	  Select this option to enable processor idle state management
+	  through cpuidle subsystem.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 527be28..f71ae1b 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -13,3 +13,8 @@
 obj-$(CONFIG_ARM_ZYNQ_CPUIDLE)		+= cpuidle-zynq.o
 obj-$(CONFIG_ARM_U8500_CPUIDLE)         += cpuidle-ux500.o
 obj-$(CONFIG_ARM_AT91_CPUIDLE)          += cpuidle-at91.o
+
+###############################################################################
+# POWERPC drivers
+obj-$(CONFIG_PSERIES_CPUIDLE)		+= cpuidle-pseries.o
+obj-$(CONFIG_POWERNV_CPUIDLE)		+= cpuidle-powernv.o
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
new file mode 100644
index 0000000..78fd174
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -0,0 +1,169 @@
+/*
+ *  cpuidle-powernv - idle state cpuidle driver.
+ *  Adapted from drivers/cpuidle/cpuidle-pseries
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+
+struct cpuidle_driver powernv_idle_driver = {
+	.name             = "powernv_idle",
+	.owner            = THIS_MODULE,
+};
+
+static int max_idle_state;
+static struct cpuidle_state *cpuidle_state_table;
+
+static int snooze_loop(struct cpuidle_device *dev,
+			struct cpuidle_driver *drv,
+			int index)
+{
+	local_irq_enable();
+	set_thread_flag(TIF_POLLING_NRFLAG);
+
+	while (!need_resched()) {
+		HMT_low();
+		HMT_very_low();
+	}
+
+	HMT_medium();
+	clear_thread_flag(TIF_POLLING_NRFLAG);
+	smp_mb();
+	return index;
+}
+
+static int nap_loop(struct cpuidle_device *dev,
+			struct cpuidle_driver *drv,
+			int index)
+{
+	power7_idle();
+	return index;
+}
+
+/*
+ * States for dedicated partition case.
+ */
+static struct cpuidle_state powernv_states[] = {
+	{ /* Snooze */
+		.name = "snooze",
+		.desc = "snooze",
+		.flags = CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 0,
+		.target_residency = 0,
+		.enter = &snooze_loop },
+	{ /* NAP */
+		.name = "NAP",
+		.desc = "NAP",
+		.flags = CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 10,
+		.target_residency = 100,
+		.enter = &nap_loop },
+};
+
+static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
+			unsigned long action, void *hcpu)
+{
+	int hotcpu = (unsigned long)hcpu;
+	struct cpuidle_device *dev =
+				per_cpu(cpuidle_devices, hotcpu);
+
+	if (dev && cpuidle_get_driver()) {
+		switch (action) {
+		case CPU_ONLINE:
+		case CPU_ONLINE_FROZEN:
+			cpuidle_pause_and_lock();
+			cpuidle_enable_device(dev);
+			cpuidle_resume_and_unlock();
+			break;
+
+		case CPU_DEAD:
+		case CPU_DEAD_FROZEN:
+			cpuidle_pause_and_lock();
+			cpuidle_disable_device(dev);
+			cpuidle_resume_and_unlock();
+			break;
+
+		default:
+			return NOTIFY_DONE;
+		}
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block setup_hotplug_notifier = {
+	.notifier_call = powernv_cpuidle_add_cpu_notifier,
+};
+
+/*
+ * powernv_cpuidle_driver_init()
+ */
+static int powernv_cpuidle_driver_init(void)
+{
+	int idle_state;
+	struct cpuidle_driver *drv = &powernv_idle_driver;
+
+	drv->state_count = 0;
+
+	for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
+		/* Is the state not enabled? */
+		if (cpuidle_state_table[idle_state].enter == NULL)
+			continue;
+
+		drv->states[drv->state_count] =	/* structure copy */
+			cpuidle_state_table[idle_state];
+
+		drv->state_count += 1;
+	}
+
+	return 0;
+}
+
+/*
+ * powernv_idle_probe()
+ * Choose state table for shared versus dedicated partition
+ */
+static int powernv_idle_probe(void)
+{
+
+	if (cpuidle_disable != IDLE_NO_OVERRIDE)
+		return -ENODEV;
+
+	if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+		cpuidle_state_table = powernv_states;
+		max_idle_state = ARRAY_SIZE(powernv_states);
+ 	} else
+ 		return -ENODEV;
+
+	return 0;
+}
+
+static int __init powernv_processor_idle_init(void)
+{
+	int retval;
+
+	retval = powernv_idle_probe();
+	if (retval)
+		return retval;
+
+	powernv_cpuidle_driver_init();
+	retval = cpuidle_register(&powernv_idle_driver, NULL);
+	if (retval) {
+		printk(KERN_DEBUG "Registration of powernv driver failed.\n");
+		return retval;
+	}
+
+	register_cpu_notifier(&setup_hotplug_notifier);
+	printk(KERN_DEBUG "powernv_idle_driver registered\n");
+	return 0;
+}
+
+device_initcall(powernv_processor_idle_init);
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/drivers/cpuidle/cpuidle-pseries.c
similarity index 75%
rename from arch/powerpc/platforms/pseries/processor_idle.c
rename to drivers/cpuidle/cpuidle-pseries.c
index 002d5b4..7ab564a 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -1,5 +1,5 @@
 /*
- *  processor_idle - idle state cpuidle driver.
+ *  cpuidle-pseries - idle state cpuidle driver.
  *  Adapted from drivers/idle/intel_idle.c and
  *  drivers/acpi/processor_idle.c
  *
@@ -24,9 +24,7 @@
 	.owner            = THIS_MODULE,
 };
 
-#define MAX_IDLE_STATE_COUNT	2
-
-static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
+static int max_idle_state;
 static struct cpuidle_state *cpuidle_state_table;
 
 static inline void idle_loop_prolog(unsigned long *in_purr)
@@ -54,13 +52,12 @@
 			int index)
 {
 	unsigned long in_purr;
-	int cpu = dev->cpu;
 
 	idle_loop_prolog(&in_purr);
 	local_irq_enable();
 	set_thread_flag(TIF_POLLING_NRFLAG);
 
-	while ((!need_resched()) && cpu_online(cpu)) {
+	while (!need_resched()) {
 		HMT_low();
 		HMT_very_low();
 	}
@@ -135,7 +132,7 @@
 /*
  * States for dedicated partition case.
  */
-static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
+static struct cpuidle_state dedicated_states[] = {
 	{ /* Snooze */
 		.name = "snooze",
 		.desc = "snooze",
@@ -155,7 +152,7 @@
 /*
  * States for shared partition case.
  */
-static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
+static struct cpuidle_state shared_states[] = {
 	{ /* Shared Cede */
 		.name = "Shared Cede",
 		.desc = "Shared Cede",
@@ -165,29 +162,12 @@
 		.enter = &shared_cede_loop },
 };
 
-void update_smt_snooze_delay(int cpu, int residency)
-{
-	struct cpuidle_driver *drv = cpuidle_get_driver();
-	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
-
-	if (cpuidle_state_table != dedicated_states)
-		return;
-
-	if (residency < 0) {
-		/* Disable the Nap state on that cpu */
-		if (dev)
-			dev->states_usage[1].disable = 1;
-	} else
-		if (drv)
-			drv->states[1].target_residency = residency;
-}
-
 static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
 			unsigned long action, void *hcpu)
 {
 	int hotcpu = (unsigned long)hcpu;
 	struct cpuidle_device *dev =
-			per_cpu_ptr(cpuidle_devices, hotcpu);
+				per_cpu(cpuidle_devices, hotcpu);
 
 	if (dev && cpuidle_get_driver()) {
 		switch (action) {
@@ -226,12 +206,8 @@
 
 	drv->state_count = 0;
 
-	for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
-
-		if (idle_state > max_idle_state)
-			break;
-
-		/* is the state not enabled? */
+	for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
+		/* Is the state not enabled? */
 		if (cpuidle_state_table[idle_state].enter == NULL)
 			continue;
 
@@ -251,21 +227,19 @@
 static int pseries_idle_probe(void)
 {
 
-	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
-		return -ENODEV;
-
 	if (cpuidle_disable != IDLE_NO_OVERRIDE)
 		return -ENODEV;
 
-	if (max_idle_state == 0) {
-		printk(KERN_DEBUG "pseries processor idle disabled.\n");
-		return -EPERM;
-	}
-
-	if (lppaca_shared_proc(get_lppaca()))
-		cpuidle_state_table = shared_states;
-	else
-		cpuidle_state_table = dedicated_states;
+	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+		if (lppaca_shared_proc(get_lppaca())) {
+			cpuidle_state_table = shared_states;
+			max_idle_state = ARRAY_SIZE(shared_states);
+		} else {
+			cpuidle_state_table = dedicated_states;
+			max_idle_state = ARRAY_SIZE(dedicated_states);
+		}
+	} else
+		return -ENODEV;
 
 	return 0;
 }
@@ -287,22 +261,7 @@
 
 	register_cpu_notifier(&setup_hotplug_notifier);
 	printk(KERN_DEBUG "pseries_idle_driver registered\n");
-
 	return 0;
 }
 
-static void __exit pseries_processor_idle_exit(void)
-{
-
-	unregister_cpu_notifier(&setup_hotplug_notifier);
-	cpuidle_unregister(&pseries_idle_driver);
-
-	return;
-}
-
-module_init(pseries_processor_idle_init);
-module_exit(pseries_processor_idle_exit);
-
-MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("Cpuidle driver for POWER");
-MODULE_LICENSE("GPL");
+device_initcall(pseries_processor_idle_init);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 6c4c000..1e5481d 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -158,6 +158,15 @@
 	return sl->entry_nr * sizeof(struct nx842_slentry);
 }
 
+static inline unsigned long nx842_get_pa(void *addr)
+{
+	if (is_vmalloc_addr(addr))
+		return page_to_phys(vmalloc_to_page(addr))
+		       + offset_in_page(addr);
+	else
+		return __pa(addr);
+}
+
 static int nx842_build_scatterlist(unsigned long buf, int len,
 			struct nx842_scatterlist *sl)
 {
@@ -168,7 +177,7 @@
 
 	entry = sl->entries;
 	while (len) {
-		entry->ptr = __pa(buf);
+		entry->ptr = nx842_get_pa((void *)buf);
 		nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
 		if (nextpage < buf + len) {
 			/* we aren't at the end yet */
@@ -370,8 +379,8 @@
 	op.flags = NX842_OP_COMPRESS;
 	csbcpb = &workmem->csbcpb;
 	memset(csbcpb, 0, sizeof(*csbcpb));
-	op.csbcpb = __pa(csbcpb);
-	op.out = __pa(slout.entries);
+	op.csbcpb = nx842_get_pa(csbcpb);
+	op.out = nx842_get_pa(slout.entries);
 
 	for (i = 0; i < hdr->blocks_nr; i++) {
 		/*
@@ -401,13 +410,13 @@
 		 */
 		if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
 			/* Create direct DDE */
-			op.in = __pa(inbuf);
+			op.in = nx842_get_pa((void *)inbuf);
 			op.inlen = max_sync_size;
 
 		} else {
 			/* Create indirect DDE (scatterlist) */
 			nx842_build_scatterlist(inbuf, max_sync_size, &slin);
-			op.in = __pa(slin.entries);
+			op.in = nx842_get_pa(slin.entries);
 			op.inlen = -nx842_get_scatterlist_size(&slin);
 		}
 
@@ -565,7 +574,7 @@
 	op.flags = NX842_OP_DECOMPRESS;
 	csbcpb = &workmem->csbcpb;
 	memset(csbcpb, 0, sizeof(*csbcpb));
-	op.csbcpb = __pa(csbcpb);
+	op.csbcpb = nx842_get_pa(csbcpb);
 
 	/*
 	 * max_sync_size may have changed since compression,
@@ -597,12 +606,12 @@
 		if (likely((inbuf & NX842_HW_PAGE_MASK) ==
 			((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
 			/* Create direct DDE */
-			op.in = __pa(inbuf);
+			op.in = nx842_get_pa((void *)inbuf);
 			op.inlen = hdr->sizes[i];
 		} else {
 			/* Create indirect DDE (scatterlist) */
 			nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
-			op.in = __pa(slin.entries);
+			op.in = nx842_get_pa(slin.entries);
 			op.inlen = -nx842_get_scatterlist_size(&slin);
 		}
 
@@ -613,12 +622,12 @@
 		 */
 		if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
 			/* Create direct DDE */
-			op.out = __pa(outbuf);
+			op.out = nx842_get_pa((void *)outbuf);
 			op.outlen = max_sync_size;
 		} else {
 			/* Create indirect DDE (scatterlist) */
 			nx842_build_scatterlist(outbuf, max_sync_size, &slout);
-			op.out = __pa(slout.entries);
+			op.out = nx842_get_pa(slout.entries);
 			op.outlen = -nx842_get_scatterlist_size(&slout);
 		}
 
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 31f3adb..7d2f435 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -67,7 +67,7 @@
 
 config ARM_EXYNOS4_BUS_DEVFREQ
 	bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
-	depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412
+	depends on (CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
 	select ARCH_HAS_OPP
 	select DEVFREQ_GOV_SIMPLE_ONDEMAND
 	help
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c10eb89..605b016 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -306,6 +306,12 @@
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config DMA_BCM2835
+	tristate "BCM2835 DMA engine support"
+	depends on (ARCH_BCM2835 || MACH_BCM2708)
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+
 config TI_CPPI41
 	tristate "AM33xx CPPI41 DMA support"
 	depends on ARCH_OMAP
@@ -336,6 +342,15 @@
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_OF
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da9..a029d0f4 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -38,7 +38,9 @@
 obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
 obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index e69b03c..1e506af 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -30,11 +30,12 @@
  * @adev:	ACPI device to match with
  * @adma:	struct acpi_dma of the given DMA controller
  *
- * Returns 1 on success, 0 when no information is available, or appropriate
- * errno value on error.
- *
  * In order to match a device from DSDT table to the corresponding CSRT device
  * we use MMIO address and IRQ.
+ *
+ * Return:
+ * 1 on success, 0 when no information is available, or appropriate errno value
+ * on error.
  */
 static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
 		struct acpi_device *adev, struct acpi_dma *adma)
@@ -101,7 +102,6 @@
  *
  * We are using this table to get the request line range of the specific DMA
  * controller to be used later.
- *
  */
 static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
 {
@@ -141,10 +141,11 @@
  * @data		pointer to controller specific data to be used by
  *			translation function
  *
- * Returns 0 on success or appropriate errno value on error.
- *
  * Allocated memory should be freed with appropriate acpi_dma_controller_free()
  * call.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
  */
 int acpi_dma_controller_register(struct device *dev,
 		struct dma_chan *(*acpi_dma_xlate)
@@ -188,6 +189,9 @@
  * @dev:	struct device of DMA controller
  *
  * Memory allocated by acpi_dma_controller_register() is freed here.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
  */
 int acpi_dma_controller_free(struct device *dev)
 {
@@ -225,6 +229,9 @@
  * Managed acpi_dma_controller_register(). DMA controller registered by this
  * function are automatically freed on driver detach. See
  * acpi_dma_controller_register() for more information.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
  */
 int devm_acpi_dma_controller_register(struct device *dev,
 		struct dma_chan *(*acpi_dma_xlate)
@@ -267,8 +274,6 @@
  * @adma:	struct acpi_dma of DMA controller
  * @dma_spec:	dma specifier to update
  *
- * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
- *
  * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
  * Descriptor":
  *	DMA Request Line bits is a platform-relative number uniquely
@@ -276,6 +281,9 @@
  *	mapping is done in a controller-specific OS driver.
  * That's why we can safely adjust slave_id when the appropriate controller is
  * found.
+ *
+ * Return:
+ * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
  */
 static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
 		struct acpi_dma_spec *dma_spec)
@@ -334,7 +342,8 @@
  * @dev:	struct device to get DMA request from
  * @index:	index of FixedDMA descriptor for @dev
  *
- * Returns pointer to appropriate dma channel on success or NULL on error.
+ * Return:
+ * Pointer to appropriate dma channel on success or NULL on error.
  */
 struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
 		size_t index)
@@ -403,7 +412,8 @@
  * translate the names "tx" and "rx" here based on the most common case where
  * the first FixedDMA descriptor is TX and second is RX.
  *
- * Returns pointer to appropriate dma channel on success or NULL on error.
+ * Return:
+ * Pointer to appropriate dma channel on success or NULL on error.
  */
 struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
 		const char *name)
@@ -427,8 +437,10 @@
  * @adma: pointer to ACPI DMA controller data
  *
  * A simple translation function for ACPI based devices. Passes &struct
- * dma_spec to the DMA controller driver provided filter function. Returns
- * pointer to the channel if found or %NULL otherwise.
+ * dma_spec to the DMA controller driver provided filter function.
+ *
+ * Return:
+ * Pointer to the channel if found or %NULL otherwise.
  */
 struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
 		struct acpi_dma *adma)
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index ec4ee5c..8114731 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,6 +83,7 @@
 #include <linux/dmaengine.h>
 #include <linux/dmapool.h>
 #include <linux/dma-mapping.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
@@ -1771,6 +1772,7 @@
 
 	return false;
 }
+EXPORT_SYMBOL_GPL(pl08x_filter_id);
 
 /*
  * Just check that the device is there and active
@@ -2167,7 +2169,7 @@
 	/* Register slave channels */
 	ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
 			pl08x->pd->num_slave_channels, true);
-	if (ret <= 0) {
+	if (ret < 0) {
 		dev_warn(&pl08x->adev->dev,
 			"%s failed to enumerate slave channels - %d\n",
 				__func__, ret);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
new file mode 100644
index 0000000..a036021
--- /dev/null
+++ b/drivers/dma/bcm2835-dma.c
@@ -0,0 +1,707 @@
+/*
+ * BCM2835 DMA engine support
+ *
+ * This driver only supports cyclic DMA transfers
+ * as needed for the I2S module.
+ *
+ * Author:      Florian Meier <florian.meier@koalo.de>
+ *              Copyright 2013
+ *
+ * Based on
+ *	OMAP DMAengine support by Russell King
+ *
+ *	BCM2708 DMA Driver
+ *	Copyright (C) 2010 Broadcom
+ *
+ *	Raspberry Pi PCM I2S ALSA Driver
+ *	Copyright (c) by Phil Poole 2013
+ *
+ *	MARVELL MMP Peripheral DMA Driver
+ *	Copyright 2012 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+struct bcm2835_dmadev {
+	struct dma_device ddev;
+	spinlock_t lock;
+	void __iomem *base;
+	struct device_dma_parameters dma_parms;
+};
+
+struct bcm2835_dma_cb {
+	uint32_t info;
+	uint32_t src;
+	uint32_t dst;
+	uint32_t length;
+	uint32_t stride;
+	uint32_t next;
+	uint32_t pad[2];
+};
+
+struct bcm2835_chan {
+	struct virt_dma_chan vc;
+	struct list_head node;
+
+	struct dma_slave_config	cfg;
+	bool cyclic;
+	unsigned int dreq;
+
+	int ch;
+	struct bcm2835_desc *desc;
+
+	void __iomem *chan_base;
+	int irq_number;
+};
+
+struct bcm2835_desc {
+	struct virt_dma_desc vd;
+	enum dma_transfer_direction dir;
+
+	unsigned int control_block_size;
+	struct bcm2835_dma_cb *control_block_base;
+	dma_addr_t control_block_base_phys;
+
+	unsigned int frames;
+	size_t size;
+};
+
+#define BCM2835_DMA_CS		0x00
+#define BCM2835_DMA_ADDR	0x04
+#define BCM2835_DMA_SOURCE_AD	0x0c
+#define BCM2835_DMA_DEST_AD	0x10
+#define BCM2835_DMA_NEXTCB	0x1C
+
+/* DMA CS Control and Status bits */
+#define BCM2835_DMA_ACTIVE	BIT(0)
+#define BCM2835_DMA_INT	BIT(2)
+#define BCM2835_DMA_ISPAUSED	BIT(4)  /* Pause requested or not active */
+#define BCM2835_DMA_ISHELD	BIT(5)  /* Is held by DREQ flow control */
+#define BCM2835_DMA_ERR	BIT(8)
+#define BCM2835_DMA_ABORT	BIT(30) /* Stop current CB, go to next, WO */
+#define BCM2835_DMA_RESET	BIT(31) /* WO, self clearing */
+
+#define BCM2835_DMA_INT_EN	BIT(0)
+#define BCM2835_DMA_D_INC	BIT(4)
+#define BCM2835_DMA_D_DREQ	BIT(6)
+#define BCM2835_DMA_S_INC	BIT(8)
+#define BCM2835_DMA_S_DREQ	BIT(10)
+
+#define BCM2835_DMA_PER_MAP(x)	((x) << 16)
+
+#define BCM2835_DMA_DATA_TYPE_S8	1
+#define BCM2835_DMA_DATA_TYPE_S16	2
+#define BCM2835_DMA_DATA_TYPE_S32	4
+#define BCM2835_DMA_DATA_TYPE_S128	16
+
+#define BCM2835_DMA_BULK_MASK	BIT(0)
+#define BCM2835_DMA_FIQ_MASK	(BIT(2) | BIT(3))
+
+/* Valid only for channels 0 - 14, 15 has its own base address */
+#define BCM2835_DMA_CHAN(n)	((n) << 8) /* Base address */
+#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
+
+static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
+{
+	return container_of(d, struct bcm2835_dmadev, ddev);
+}
+
+static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct bcm2835_chan, vc.chan);
+}
+
+static inline struct bcm2835_desc *to_bcm2835_dma_desc(
+		struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct bcm2835_desc, vd.tx);
+}
+
+static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
+{
+	struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
+	dma_free_coherent(desc->vd.tx.chan->device->dev,
+			desc->control_block_size,
+			desc->control_block_base,
+			desc->control_block_base_phys);
+	kfree(desc);
+}
+
+static int bcm2835_dma_abort(void __iomem *chan_base)
+{
+	unsigned long cs;
+	long int timeout = 10000;
+
+	cs = readl(chan_base + BCM2835_DMA_CS);
+	if (!(cs & BCM2835_DMA_ACTIVE))
+		return 0;
+
+	/* Write 0 to the active bit - Pause the DMA */
+	writel(0, chan_base + BCM2835_DMA_CS);
+
+	/* Wait for any current AXI transfer to complete */
+	while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
+		cpu_relax();
+		cs = readl(chan_base + BCM2835_DMA_CS);
+	}
+
+	/* We'll un-pause when we set of our next DMA */
+	if (!timeout)
+		return -ETIMEDOUT;
+
+	if (!(cs & BCM2835_DMA_ACTIVE))
+		return 0;
+
+	/* Terminate the control block chain */
+	writel(0, chan_base + BCM2835_DMA_NEXTCB);
+
+	/* Abort the whole DMA */
+	writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
+	       chan_base + BCM2835_DMA_CS);
+
+	return 0;
+}
+
+static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
+{
+	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+	struct bcm2835_desc *d;
+
+	if (!vd) {
+		c->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	c->desc = d = to_bcm2835_dma_desc(&vd->tx);
+
+	writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
+	writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+}
+
+static irqreturn_t bcm2835_dma_callback(int irq, void *data)
+{
+	struct bcm2835_chan *c = data;
+	struct bcm2835_desc *d;
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+
+	/* Acknowledge interrupt */
+	writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
+
+	d = c->desc;
+
+	if (d) {
+		/* TODO Only works for cyclic DMA */
+		vchan_cyclic_callback(&d->vd);
+	}
+
+	/* Keep the DMA engine running */
+	writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+	dev_dbg(c->vc.chan.device->dev,
+			"Allocating DMA channel %d\n", c->ch);
+
+	return request_irq(c->irq_number,
+			bcm2835_dma_callback, 0, "DMA IRQ", c);
+}
+
+static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+	vchan_free_chan_resources(&c->vc);
+	free_irq(c->irq_number, c);
+
+	dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
+}
+
+static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
+{
+	return d->size;
+}
+
+static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
+{
+	unsigned int i;
+	size_t size;
+
+	for (size = i = 0; i < d->frames; i++) {
+		struct bcm2835_dma_cb *control_block =
+			&d->control_block_base[i];
+		size_t this_size = control_block->length;
+		dma_addr_t dma;
+
+		if (d->dir == DMA_DEV_TO_MEM)
+			dma = control_block->dst;
+		else
+			dma = control_block->src;
+
+		if (size)
+			size += this_size;
+		else if (addr >= dma && addr < dma + this_size)
+			size += dma + this_size - addr;
+	}
+
+	return size;
+}
+
+static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
+	dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	enum dma_status ret;
+	unsigned long flags;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_COMPLETE || !txstate)
+		return ret;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	vd = vchan_find_desc(&c->vc, cookie);
+	if (vd) {
+		txstate->residue =
+			bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
+	} else if (c->desc && c->desc->vd.tx.cookie == cookie) {
+		struct bcm2835_desc *d = c->desc;
+		dma_addr_t pos;
+
+		if (d->dir == DMA_MEM_TO_DEV)
+			pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
+		else if (d->dir == DMA_DEV_TO_MEM)
+			pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
+		else
+			pos = 0;
+
+		txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
+	} else {
+		txstate->residue = 0;
+	}
+
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	return ret;
+}
+
+static void bcm2835_dma_issue_pending(struct dma_chan *chan)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	unsigned long flags;
+
+	c->cyclic = true; /* Nothing else is implemented */
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (vchan_issue_pending(&c->vc) && !c->desc)
+		bcm2835_dma_start_desc(c);
+
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
+	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	enum dma_slave_buswidth dev_width;
+	struct bcm2835_desc *d;
+	dma_addr_t dev_addr;
+	unsigned int es, sync_type;
+	unsigned int frame;
+
+	/* Grab configuration */
+	if (!is_slave_direction(direction)) {
+		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+		return NULL;
+	}
+
+	if (direction == DMA_DEV_TO_MEM) {
+		dev_addr = c->cfg.src_addr;
+		dev_width = c->cfg.src_addr_width;
+		sync_type = BCM2835_DMA_S_DREQ;
+	} else {
+		dev_addr = c->cfg.dst_addr;
+		dev_width = c->cfg.dst_addr_width;
+		sync_type = BCM2835_DMA_D_DREQ;
+	}
+
+	/* Bus width translates to the element size (ES) */
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = BCM2835_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		return NULL;
+	}
+
+	/* Now allocate and setup the descriptor. */
+	d = kzalloc(sizeof(*d), GFP_NOWAIT);
+	if (!d)
+		return NULL;
+
+	d->dir = direction;
+	d->frames = buf_len / period_len;
+
+	/* Allocate memory for control blocks */
+	d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
+	d->control_block_base = dma_zalloc_coherent(chan->device->dev,
+			d->control_block_size, &d->control_block_base_phys,
+			GFP_NOWAIT);
+
+	if (!d->control_block_base) {
+		kfree(d);
+		return NULL;
+	}
+
+	/*
+	 * Iterate over all frames, create a control block
+	 * for each frame and link them together.
+	 */
+	for (frame = 0; frame < d->frames; frame++) {
+		struct bcm2835_dma_cb *control_block =
+			&d->control_block_base[frame];
+
+		/* Setup adresses */
+		if (d->dir == DMA_DEV_TO_MEM) {
+			control_block->info = BCM2835_DMA_D_INC;
+			control_block->src = dev_addr;
+			control_block->dst = buf_addr + frame * period_len;
+		} else {
+			control_block->info = BCM2835_DMA_S_INC;
+			control_block->src = buf_addr + frame * period_len;
+			control_block->dst = dev_addr;
+		}
+
+		/* Enable interrupt */
+		control_block->info |= BCM2835_DMA_INT_EN;
+
+		/* Setup synchronization */
+		if (sync_type != 0)
+			control_block->info |= sync_type;
+
+		/* Setup DREQ channel */
+		if (c->dreq != 0)
+			control_block->info |=
+				BCM2835_DMA_PER_MAP(c->dreq);
+
+		/* Length of a frame */
+		control_block->length = period_len;
+		d->size += control_block->length;
+
+		/*
+		 * Next block is the next frame.
+		 * This DMA engine driver currently only supports cyclic DMA.
+		 * Therefore, wrap around at number of frames.
+		 */
+		control_block->next = d->control_block_base_phys +
+			sizeof(struct bcm2835_dma_cb)
+			* ((frame + 1) % d->frames);
+	}
+
+	return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
+static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
+		struct dma_slave_config *cfg)
+{
+	if ((cfg->direction == DMA_DEV_TO_MEM &&
+	     cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+	    (cfg->direction == DMA_MEM_TO_DEV &&
+	     cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+	    !is_slave_direction(cfg->direction)) {
+		return -EINVAL;
+	}
+
+	c->cfg = *cfg;
+
+	return 0;
+}
+
+static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
+{
+	struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
+	unsigned long flags;
+	int timeout = 10000;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+
+	/* Prevent this channel being scheduled */
+	spin_lock(&d->lock);
+	list_del_init(&c->node);
+	spin_unlock(&d->lock);
+
+	/*
+	 * Stop DMA activity: we assume the callback will not be called
+	 * after bcm_dma_abort() returns (even if it does, it will see
+	 * c->desc is NULL and exit.)
+	 */
+	if (c->desc) {
+		c->desc = NULL;
+		bcm2835_dma_abort(c->chan_base);
+
+		/* Wait for stopping */
+		while (--timeout) {
+			if (!(readl(c->chan_base + BCM2835_DMA_CS) &
+						BCM2835_DMA_ACTIVE))
+				break;
+
+			cpu_relax();
+		}
+
+		if (!timeout)
+			dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
+	}
+
+	vchan_get_all_descriptors(&c->vc, &head);
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_dma_desc_free_list(&c->vc, &head);
+
+	return 0;
+}
+
+static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+	unsigned long arg)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+	switch (cmd) {
+	case DMA_SLAVE_CONFIG:
+		return bcm2835_dma_slave_config(c,
+				(struct dma_slave_config *)arg);
+
+	case DMA_TERMINATE_ALL:
+		return bcm2835_dma_terminate_all(c);
+
+	default:
+		return -ENXIO;
+	}
+}
+
+static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
+{
+	struct bcm2835_chan *c;
+
+	c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return -ENOMEM;
+
+	c->vc.desc_free = bcm2835_dma_desc_free;
+	vchan_init(&c->vc, &d->ddev);
+	INIT_LIST_HEAD(&c->node);
+
+	d->ddev.chancnt++;
+
+	c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
+	c->ch = chan_id;
+	c->irq_number = irq;
+
+	return 0;
+}
+
+static void bcm2835_dma_free(struct bcm2835_dmadev *od)
+{
+	struct bcm2835_chan *c, *next;
+
+	list_for_each_entry_safe(c, next, &od->ddev.channels,
+				 vc.chan.device_node) {
+		list_del(&c->vc.chan.device_node);
+		tasklet_kill(&c->vc.task);
+	}
+}
+
+static const struct of_device_id bcm2835_dma_of_match[] = {
+	{ .compatible = "brcm,bcm2835-dma", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
+
+static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
+					   struct of_dma *ofdma)
+{
+	struct bcm2835_dmadev *d = ofdma->of_dma_data;
+	struct dma_chan *chan;
+
+	chan = dma_get_any_slave_channel(&d->ddev);
+	if (!chan)
+		return NULL;
+
+	/* Set DREQ from param */
+	to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
+
+	return chan;
+}
+
+static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
+	struct dma_slave_caps *caps)
+{
+	caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	caps->cmd_pause = false;
+	caps->cmd_terminate = true;
+
+	return 0;
+}
+
+static int bcm2835_dma_probe(struct platform_device *pdev)
+{
+	struct bcm2835_dmadev *od;
+	struct resource *res;
+	void __iomem *base;
+	int rc;
+	int i;
+	int irq;
+	uint32_t chans_available;
+
+	if (!pdev->dev.dma_mask)
+		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (rc)
+		return rc;
+
+	od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
+	if (!od)
+		return -ENOMEM;
+
+	pdev->dev.dma_parms = &od->dma_parms;
+	dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	od->base = base;
+
+	dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+	dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
+	dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+	od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
+	od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
+	od->ddev.device_tx_status = bcm2835_dma_tx_status;
+	od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
+	od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
+	od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
+	od->ddev.device_control = bcm2835_dma_control;
+	od->ddev.dev = &pdev->dev;
+	INIT_LIST_HEAD(&od->ddev.channels);
+	spin_lock_init(&od->lock);
+
+	platform_set_drvdata(pdev, od);
+
+	/* Request DMA channel mask from device tree */
+	if (of_property_read_u32(pdev->dev.of_node,
+			"brcm,dma-channel-mask",
+			&chans_available)) {
+		dev_err(&pdev->dev, "Failed to get channel mask\n");
+		rc = -EINVAL;
+		goto err_no_dma;
+	}
+
+	/*
+	 * Do not use the FIQ and BULK channels,
+	 * because they are used by the GPU.
+	 */
+	chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK);
+
+	for (i = 0; i < pdev->num_resources; i++) {
+		irq = platform_get_irq(pdev, i);
+		if (irq < 0)
+			break;
+
+		if (chans_available & (1 << i)) {
+			rc = bcm2835_dma_chan_init(od, i, irq);
+			if (rc)
+				goto err_no_dma;
+		}
+	}
+
+	dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
+
+	/* Device-tree DMA controller registration */
+	rc = of_dma_controller_register(pdev->dev.of_node,
+			bcm2835_dma_xlate, od);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to register DMA controller\n");
+		goto err_no_dma;
+	}
+
+	rc = dma_async_device_register(&od->ddev);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"Failed to register slave DMA engine device: %d\n", rc);
+		goto err_no_dma;
+	}
+
+	dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
+
+	return 0;
+
+err_no_dma:
+	bcm2835_dma_free(od);
+	return rc;
+}
+
+static int bcm2835_dma_remove(struct platform_device *pdev)
+{
+	struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&od->ddev);
+	bcm2835_dma_free(od);
+
+	return 0;
+}
+
+static struct platform_driver bcm2835_dma_driver = {
+	.probe	= bcm2835_dma_probe,
+	.remove	= bcm2835_dma_remove,
+	.driver = {
+		.name = "bcm2835-dma",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(bcm2835_dma_of_match),
+	},
+};
+
+module_platform_driver(bcm2835_dma_driver);
+
+MODULE_ALIAS("platform:bcm2835-dma");
+MODULE_DESCRIPTION("BCM2835 DMA engine driver");
+MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index c29dacf..c18aebf 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -972,8 +972,10 @@
 		goto err_chans;
 
 	irq = irq_of_parse_and_map(dev->of_node, 0);
-	if (!irq)
+	if (!irq) {
+		ret = -EINVAL;
 		goto err_irq;
+	}
 
 	cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
 
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 9dfcaf5c..05b6dea 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -31,7 +31,7 @@
 		S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
 
-static char test_device[20];
+static char test_device[32];
 module_param_string(device, test_device, sizeof(test_device),
 		S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
@@ -89,7 +89,7 @@
 struct dmatest_params {
 	unsigned int	buf_size;
 	char		channel[20];
-	char		device[20];
+	char		device[32];
 	unsigned int	threads_per_chan;
 	unsigned int	max_channels;
 	unsigned int	iterations;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 7516be4..13ac3f2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -218,8 +218,10 @@
 	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
 	u32		ctllo;
 
-	/* Software emulation of LLP mode relies on interrupts to continue
-	 * multi block transfer. */
+	/*
+	 * Software emulation of LLP mode relies on interrupts to continue
+	 * multi block transfer.
+	 */
 	ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
 
 	channel_writel(dwc, SAR, desc->lli.sar);
@@ -253,8 +255,7 @@
 						&dwc->flags);
 		if (was_soft_llp) {
 			dev_err(chan2dev(&dwc->chan),
-				"BUG: Attempted to start new LLP transfer "
-				"inside ongoing one\n");
+				"BUG: Attempted to start new LLP transfer inside ongoing one\n");
 			return;
 		}
 
@@ -420,8 +421,7 @@
 		return;
 	}
 
-	dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
-			(unsigned long long)llp);
+	dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
 
 	list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
 		/* Initial residue value */
@@ -567,9 +567,9 @@
 			unlikely(status_xfer & dwc->mask)) {
 		int i;
 
-		dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
-				"interrupt, stopping DMA transfer\n",
-				status_xfer ? "xfer" : "error");
+		dev_err(chan2dev(&dwc->chan),
+			"cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
+			status_xfer ? "xfer" : "error");
 
 		spin_lock_irqsave(&dwc->lock, flags);
 
@@ -711,9 +711,8 @@
 	u32			ctllo;
 
 	dev_vdbg(chan2dev(chan),
-			"%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
-			(unsigned long long)dest, (unsigned long long)src,
-			len, flags);
+			"%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
+			&dest, &src, len, flags);
 
 	if (unlikely(!len)) {
 		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1401,9 +1400,9 @@
 	/* Let's make a cyclic list */
 	last->lli.llp = cdesc->desc[0]->txd.phys;
 
-	dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
-			"period %zu periods %d\n", (unsigned long long)buf_addr,
-			buf_len, period_len, periods);
+	dev_dbg(chan2dev(&dwc->chan),
+			"cyclic prepared buf %pad len %zu period %zu periods %d\n",
+			&buf_addr, buf_len, period_len, periods);
 
 	cdesc->periods = periods;
 	dwc->cdesc = cdesc;
@@ -1603,9 +1602,11 @@
 			dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
 					   dwc_params);
 
-			/* Decode maximum block size for given channel. The
+			/*
+			 * Decode maximum block size for given channel. The
 			 * stored 4 bit value represents blocks from 0x00 for 3
-			 * up to 0x0a for 4095. */
+			 * up to 0x0a for 4095.
+			 */
 			dwc->block_size =
 				(4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
 			dwc->nollp =
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 2539ea0..cd8da45 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -699,8 +699,8 @@
 	echan->alloced = true;
 	echan->slot[0] = echan->ch_num;
 
-	dev_info(dev, "allocated channel for %u:%u\n",
-		 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
+	dev_dbg(dev, "allocated channel for %u:%u\n",
+		EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
 
 	return 0;
 
@@ -736,7 +736,7 @@
 		echan->alloced = false;
 	}
 
-	dev_info(dev, "freeing channel for %u\n", echan->ch_num);
+	dev_dbg(dev, "freeing channel for %u\n", echan->ch_num);
 }
 
 /* Send pending descriptor to hardware */
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 1ffc244..d56e835 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -41,7 +41,7 @@
  * channel is allowed to transfer before the DMA engine pauses
  * the current channel and switches to the next channel
  */
-#define FSL_DMA_MR_BWC         0x08000000
+#define FSL_DMA_MR_BWC         0x0A000000
 
 /* Special MR definition for MPC8349 */
 #define FSL_DMA_MR_EOTIE	0x00000080
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index c75679d..4e79183 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -323,6 +323,7 @@
 	struct clk			*clk_ipg;
 	struct clk			*clk_ahb;
 	spinlock_t			channel_0_lock;
+	u32				script_number;
 	struct sdma_script_start_addrs	*script_addrs;
 	const struct sdma_driver_data	*drvdata;
 };
@@ -724,6 +725,10 @@
 		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
 		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
 		break;
+	case IMX_DMATYPE_SSI_DUAL:
+		per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
+		emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
+		break;
 	case IMX_DMATYPE_SSI_SP:
 	case IMX_DMATYPE_MMC:
 	case IMX_DMATYPE_SDHC:
@@ -1238,6 +1243,7 @@
 }
 
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2	38
 
 static void sdma_add_scripts(struct sdma_engine *sdma,
 		const struct sdma_script_start_addrs *addr)
@@ -1246,7 +1252,11 @@
 	s32 *saddr_arr = (u32 *)sdma->script_addrs;
 	int i;
 
-	for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
+	/* use the default firmware in ROM if missing external firmware */
+	if (!sdma->script_number)
+		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+
+	for (i = 0; i < sdma->script_number; i++)
 		if (addr_arr[i] > 0)
 			saddr_arr[i] = addr_arr[i];
 }
@@ -1272,6 +1282,17 @@
 		goto err_firmware;
 	if (header->ram_code_start + header->ram_code_size > fw->size)
 		goto err_firmware;
+	switch (header->version_major) {
+		case 1:
+			sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+			break;
+		case 2:
+			sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
+			break;
+		default:
+			dev_err(sdma->dev, "unknown firmware version\n");
+			goto err_firmware;
+	}
 
 	addr = (void *)header + header->script_addrs_start;
 	ram_code = (void *)header + header->ram_code_start;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index e260754..a1f911a 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -477,7 +477,7 @@
 	dma_addr_t addr, src = 0, dst = 0;
 	int num = sglen, i;
 
-	if (sgl == 0)
+	if (sgl == NULL)
 		return NULL;
 
 	for_each_sg(sgl, sg, sglen, i) {
@@ -817,7 +817,7 @@
 	return 0;
 }
 
-SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
 
 static struct platform_driver k3_pdma_driver = {
 	.driver		= {
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c6a01ea..b439679 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -5,6 +5,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -32,38 +33,37 @@
 #define DTADR		0x0208
 #define DCMD		0x020c
 
-#define DCSR_RUN	(1 << 31)	/* Run Bit (read / write) */
-#define DCSR_NODESC	(1 << 30)	/* No-Descriptor Fetch (read / write) */
-#define DCSR_STOPIRQEN	(1 << 29)	/* Stop Interrupt Enable (read / write) */
-#define DCSR_REQPEND	(1 << 8)	/* Request Pending (read-only) */
-#define DCSR_STOPSTATE	(1 << 3)	/* Stop State (read-only) */
-#define DCSR_ENDINTR	(1 << 2)	/* End Interrupt (read / write) */
-#define DCSR_STARTINTR	(1 << 1)	/* Start Interrupt (read / write) */
-#define DCSR_BUSERR	(1 << 0)	/* Bus Error Interrupt (read / write) */
+#define DCSR_RUN	BIT(31)	/* Run Bit (read / write) */
+#define DCSR_NODESC	BIT(30)	/* No-Descriptor Fetch (read / write) */
+#define DCSR_STOPIRQEN	BIT(29)	/* Stop Interrupt Enable (read / write) */
+#define DCSR_REQPEND	BIT(8)	/* Request Pending (read-only) */
+#define DCSR_STOPSTATE	BIT(3)	/* Stop State (read-only) */
+#define DCSR_ENDINTR	BIT(2)	/* End Interrupt (read / write) */
+#define DCSR_STARTINTR	BIT(1)	/* Start Interrupt (read / write) */
+#define DCSR_BUSERR	BIT(0)	/* Bus Error Interrupt (read / write) */
 
-#define DCSR_EORIRQEN	(1 << 28)       /* End of Receive Interrupt Enable (R/W) */
-#define DCSR_EORJMPEN	(1 << 27)       /* Jump to next descriptor on EOR */
-#define DCSR_EORSTOPEN	(1 << 26)       /* STOP on an EOR */
-#define DCSR_SETCMPST	(1 << 25)       /* Set Descriptor Compare Status */
-#define DCSR_CLRCMPST	(1 << 24)       /* Clear Descriptor Compare Status */
-#define DCSR_CMPST	(1 << 10)       /* The Descriptor Compare Status */
-#define DCSR_EORINTR	(1 << 9)        /* The end of Receive */
+#define DCSR_EORIRQEN	BIT(28)	/* End of Receive Interrupt Enable (R/W) */
+#define DCSR_EORJMPEN	BIT(27)	/* Jump to next descriptor on EOR */
+#define DCSR_EORSTOPEN	BIT(26)	/* STOP on an EOR */
+#define DCSR_SETCMPST	BIT(25)	/* Set Descriptor Compare Status */
+#define DCSR_CLRCMPST	BIT(24)	/* Clear Descriptor Compare Status */
+#define DCSR_CMPST	BIT(10)	/* The Descriptor Compare Status */
+#define DCSR_EORINTR	BIT(9)	/* The end of Receive */
 
-#define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + \
-				 (((n) & 0x3f) << 2))
-#define DRCMR_MAPVLD	(1 << 7)	/* Map Valid (read / write) */
-#define DRCMR_CHLNUM	0x1f		/* mask for Channel Number (read / write) */
+#define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
+#define DRCMR_MAPVLD	BIT(7)	/* Map Valid (read / write) */
+#define DRCMR_CHLNUM	0x1f	/* mask for Channel Number (read / write) */
 
 #define DDADR_DESCADDR	0xfffffff0	/* Address of next descriptor (mask) */
-#define DDADR_STOP	(1 << 0)	/* Stop (read / write) */
+#define DDADR_STOP	BIT(0)	/* Stop (read / write) */
 
-#define DCMD_INCSRCADDR	(1 << 31)	/* Source Address Increment Setting. */
-#define DCMD_INCTRGADDR	(1 << 30)	/* Target Address Increment Setting. */
-#define DCMD_FLOWSRC	(1 << 29)	/* Flow Control by the source. */
-#define DCMD_FLOWTRG	(1 << 28)	/* Flow Control by the target. */
-#define DCMD_STARTIRQEN	(1 << 22)	/* Start Interrupt Enable */
-#define DCMD_ENDIRQEN	(1 << 21)	/* End Interrupt Enable */
-#define DCMD_ENDIAN	(1 << 18)	/* Device Endian-ness. */
+#define DCMD_INCSRCADDR	BIT(31)	/* Source Address Increment Setting. */
+#define DCMD_INCTRGADDR	BIT(30)	/* Target Address Increment Setting. */
+#define DCMD_FLOWSRC	BIT(29)	/* Flow Control by the source. */
+#define DCMD_FLOWTRG	BIT(28)	/* Flow Control by the target. */
+#define DCMD_STARTIRQEN	BIT(22)	/* Start Interrupt Enable */
+#define DCMD_ENDIRQEN	BIT(21)	/* End Interrupt Enable */
+#define DCMD_ENDIAN	BIT(18)	/* Device Endian-ness. */
 #define DCMD_BURST8	(1 << 16)	/* 8 byte burst */
 #define DCMD_BURST16	(2 << 16)	/* 16 byte burst */
 #define DCMD_BURST32	(3 << 16)	/* 32 byte burst */
@@ -132,10 +132,14 @@
 	spinlock_t phy_lock; /* protect alloc/free phy channels */
 };
 
-#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
-#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
-#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
-#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
+#define tx_to_mmp_pdma_desc(tx)					\
+	container_of(tx, struct mmp_pdma_desc_sw, async_tx)
+#define to_mmp_pdma_desc(lh)					\
+	container_of(lh, struct mmp_pdma_desc_sw, node)
+#define to_mmp_pdma_chan(dchan)					\
+	container_of(dchan, struct mmp_pdma_chan, chan)
+#define to_mmp_pdma_dev(dmadev)					\
+	container_of(dmadev, struct mmp_pdma_device, device)
 
 static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
 {
@@ -162,19 +166,18 @@
 	writel(dalgn, phy->base + DALGN);
 
 	reg = (phy->idx << 2) + DCSR;
-	writel(readl(phy->base + reg) | DCSR_RUN,
-					phy->base + reg);
+	writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
 }
 
 static void disable_chan(struct mmp_pdma_phy *phy)
 {
 	u32 reg;
 
-	if (phy) {
-		reg = (phy->idx << 2) + DCSR;
-		writel(readl(phy->base + reg) & ~DCSR_RUN,
-						phy->base + reg);
-	}
+	if (!phy)
+		return;
+
+	reg = (phy->idx << 2) + DCSR;
+	writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
 }
 
 static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -183,26 +186,27 @@
 	u32 dint = readl(phy->base + DINT);
 	u32 reg = (phy->idx << 2) + DCSR;
 
-	if (dint & BIT(phy->idx)) {
-		/* clear irq */
-		dcsr = readl(phy->base + reg);
-		writel(dcsr, phy->base + reg);
-		if ((dcsr & DCSR_BUSERR) && (phy->vchan))
-			dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
-		return 0;
-	}
-	return -EAGAIN;
+	if (!(dint & BIT(phy->idx)))
+		return -EAGAIN;
+
+	/* clear irq */
+	dcsr = readl(phy->base + reg);
+	writel(dcsr, phy->base + reg);
+	if ((dcsr & DCSR_BUSERR) && (phy->vchan))
+		dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
+
+	return 0;
 }
 
 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
 {
 	struct mmp_pdma_phy *phy = dev_id;
 
-	if (clear_chan_irq(phy) == 0) {
-		tasklet_schedule(&phy->vchan->tasklet);
-		return IRQ_HANDLED;
-	} else
+	if (clear_chan_irq(phy) != 0)
 		return IRQ_NONE;
+
+	tasklet_schedule(&phy->vchan->tasklet);
+	return IRQ_HANDLED;
 }
 
 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
@@ -224,8 +228,8 @@
 
 	if (irq_num)
 		return IRQ_HANDLED;
-	else
-		return IRQ_NONE;
+
+	return IRQ_NONE;
 }
 
 /* lookup free phy channel as descending priority */
@@ -245,9 +249,9 @@
 	 */
 
 	spin_lock_irqsave(&pdev->phy_lock, flags);
-	for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
+	for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
 		for (i = 0; i < pdev->dma_channels; i++) {
-			if (prio != ((i & 0xf) >> 2))
+			if (prio != (i & 0xf) >> 2)
 				continue;
 			phy = &pdev->phy[i];
 			if (!phy->vchan) {
@@ -389,14 +393,16 @@
 	if (chan->desc_pool)
 		return 1;
 
-	chan->desc_pool =
-		dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
-				  sizeof(struct mmp_pdma_desc_sw),
-				  __alignof__(struct mmp_pdma_desc_sw), 0);
+	chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
+					  chan->dev,
+					  sizeof(struct mmp_pdma_desc_sw),
+					  __alignof__(struct mmp_pdma_desc_sw),
+					  0);
 	if (!chan->desc_pool) {
 		dev_err(chan->dev, "unable to allocate descriptor pool\n");
 		return -ENOMEM;
 	}
+
 	mmp_pdma_free_phy(chan);
 	chan->idle = true;
 	chan->dev_addr = 0;
@@ -404,7 +410,7 @@
 }
 
 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
-				  struct list_head *list)
+				    struct list_head *list)
 {
 	struct mmp_pdma_desc_sw *desc, *_desc;
 
@@ -434,8 +440,8 @@
 
 static struct dma_async_tx_descriptor *
 mmp_pdma_prep_memcpy(struct dma_chan *dchan,
-	dma_addr_t dma_dst, dma_addr_t dma_src,
-	size_t len, unsigned long flags)
+		     dma_addr_t dma_dst, dma_addr_t dma_src,
+		     size_t len, unsigned long flags)
 {
 	struct mmp_pdma_chan *chan;
 	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -515,8 +521,8 @@
 
 static struct dma_async_tx_descriptor *
 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
-			 unsigned int sg_len, enum dma_transfer_direction dir,
-			 unsigned long flags, void *context)
+		       unsigned int sg_len, enum dma_transfer_direction dir,
+		       unsigned long flags, void *context)
 {
 	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
@@ -591,10 +597,11 @@
 	return NULL;
 }
 
-static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
-	struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
-	size_t period_len, enum dma_transfer_direction direction,
-	unsigned long flags, void *context)
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
+			 dma_addr_t buf_addr, size_t len, size_t period_len,
+			 enum dma_transfer_direction direction,
+			 unsigned long flags, void *context)
 {
 	struct mmp_pdma_chan *chan;
 	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -636,8 +643,8 @@
 			goto fail;
 		}
 
-		new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
-					(DCMD_LENGTH & period_len);
+		new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
+				  (DCMD_LENGTH & period_len));
 		new->desc.dsadr = dma_src;
 		new->desc.dtadr = dma_dst;
 
@@ -677,12 +684,11 @@
 }
 
 static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
-		unsigned long arg)
+			    unsigned long arg)
 {
 	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 	struct dma_slave_config *cfg = (void *)arg;
 	unsigned long flags;
-	int ret = 0;
 	u32 maxburst = 0, addr = 0;
 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 
@@ -739,11 +745,12 @@
 		return -ENOSYS;
 	}
 
-	return ret;
+	return 0;
 }
 
 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
-			dma_cookie_t cookie, struct dma_tx_state *txstate)
+					  dma_cookie_t cookie,
+					  struct dma_tx_state *txstate)
 {
 	return dma_cookie_status(dchan, cookie, txstate);
 }
@@ -845,15 +852,14 @@
 	return 0;
 }
 
-static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
-							int idx, int irq)
+static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
 {
 	struct mmp_pdma_phy *phy  = &pdev->phy[idx];
 	struct mmp_pdma_chan *chan;
 	int ret;
 
-	chan = devm_kzalloc(pdev->dev,
-			sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+	chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan),
+			    GFP_KERNEL);
 	if (chan == NULL)
 		return -ENOMEM;
 
@@ -861,8 +867,8 @@
 	phy->base = pdev->base;
 
 	if (irq) {
-		ret = devm_request_irq(pdev->dev, irq,
-			mmp_pdma_chan_handler, 0, "pdma", phy);
+		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
+				       "pdma", phy);
 		if (ret) {
 			dev_err(pdev->dev, "channel request irq fail!\n");
 			return ret;
@@ -877,8 +883,7 @@
 	INIT_LIST_HEAD(&chan->chain_running);
 
 	/* register virt channel to dma engine */
-	list_add_tail(&chan->chan.device_node,
-			&pdev->device.channels);
+	list_add_tail(&chan->chan.device_node, &pdev->device.channels);
 
 	return 0;
 }
@@ -894,14 +899,12 @@
 {
 	struct mmp_pdma_device *d = ofdma->of_dma_data;
 	struct dma_chan *chan;
-	struct mmp_pdma_chan *c;
 
 	chan = dma_get_any_slave_channel(&d->device);
 	if (!chan)
 		return NULL;
 
-	c = to_mmp_pdma_chan(chan);
-	c->drcmr = dma_spec->args[0];
+	to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
 
 	return chan;
 }
@@ -918,6 +921,7 @@
 	pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
 	if (!pdev)
 		return -ENOMEM;
+
 	pdev->dev = &op->dev;
 
 	spin_lock_init(&pdev->phy_lock);
@@ -929,8 +933,8 @@
 
 	of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
 	if (of_id)
-		of_property_read_u32(pdev->dev->of_node,
-				"#dma-channels", &dma_channels);
+		of_property_read_u32(pdev->dev->of_node, "#dma-channels",
+				     &dma_channels);
 	else if (pdata && pdata->dma_channels)
 		dma_channels = pdata->dma_channels;
 	else
@@ -942,8 +946,9 @@
 			irq_num++;
 	}
 
-	pdev->phy = devm_kzalloc(pdev->dev,
-		dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+	pdev->phy = devm_kcalloc(pdev->dev,
+				 dma_channels, sizeof(struct mmp_pdma_chan),
+				 GFP_KERNEL);
 	if (pdev->phy == NULL)
 		return -ENOMEM;
 
@@ -952,8 +957,8 @@
 	if (irq_num != dma_channels) {
 		/* all chan share one irq, demux inside */
 		irq = platform_get_irq(op, 0);
-		ret = devm_request_irq(pdev->dev, irq,
-			mmp_pdma_int_handler, 0, "pdma", pdev);
+		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
+				       "pdma", pdev);
 		if (ret)
 			return ret;
 	}
@@ -1029,7 +1034,7 @@
 	if (chan->device->dev->driver != &mmp_pdma_driver.driver)
 		return false;
 
-	c->drcmr = *(unsigned int *) param;
+	c->drcmr = *(unsigned int *)param;
 
 	return true;
 }
@@ -1037,6 +1042,6 @@
 
 module_platform_driver(mmp_pdma_driver);
 
-MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
+MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
 MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 3ddacc1..33f96aa 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -121,11 +121,13 @@
 	int				idx;
 	enum mmp_tdma_type		type;
 	int				irq;
-	unsigned long			reg_base;
+	void __iomem			*reg_base;
 
 	size_t				buf_len;
 	size_t				period_len;
 	size_t				pos;
+
+	struct gen_pool			*pool;
 };
 
 #define TDMA_CHANNEL_NUM 2
@@ -182,7 +184,7 @@
 
 static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
 {
-	unsigned int tdcr;
+	unsigned int tdcr = 0;
 
 	mmp_tdma_disable_chan(tdmac);
 
@@ -324,7 +326,7 @@
 	struct gen_pool *gpool;
 	int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
 
-	gpool = sram_get_gpool("asram");
+	gpool = tdmac->pool;
 	if (tdmac->desc_arr)
 		gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
 				size);
@@ -374,7 +376,7 @@
 	struct gen_pool *gpool;
 	int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
 
-	gpool = sram_get_gpool("asram");
+	gpool = tdmac->pool;
 	if (!gpool)
 		return NULL;
 
@@ -505,7 +507,8 @@
 }
 
 static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
-						int idx, int irq, int type)
+					int idx, int irq,
+					int type, struct gen_pool *pool)
 {
 	struct mmp_tdma_chan *tdmac;
 
@@ -526,7 +529,8 @@
 	tdmac->chan.device = &tdev->device;
 	tdmac->idx	   = idx;
 	tdmac->type	   = type;
-	tdmac->reg_base	   = (unsigned long)tdev->base + idx * 4;
+	tdmac->reg_base	   = tdev->base + idx * 4;
+	tdmac->pool	   = pool;
 	tdmac->status = DMA_COMPLETE;
 	tdev->tdmac[tdmac->idx] = tdmac;
 	tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
@@ -553,6 +557,7 @@
 	int i, ret;
 	int irq = 0, irq_num = 0;
 	int chan_num = TDMA_CHANNEL_NUM;
+	struct gen_pool *pool;
 
 	of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
 	if (of_id)
@@ -579,6 +584,15 @@
 
 	INIT_LIST_HEAD(&tdev->device.channels);
 
+	if (pdev->dev.of_node)
+		pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0);
+	else
+		pool = sram_get_gpool("asram");
+	if (!pool) {
+		dev_err(&pdev->dev, "asram pool not available\n");
+		return -ENOMEM;
+	}
+
 	if (irq_num != chan_num) {
 		irq = platform_get_irq(pdev, 0);
 		ret = devm_request_irq(&pdev->dev, irq,
@@ -590,7 +604,7 @@
 	/* initialize channel parameters */
 	for (i = 0; i < chan_num; i++) {
 		irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
-		ret = mmp_tdma_chan_init(tdev, i, irq, type);
+		ret = mmp_tdma_chan_init(tdev, i, irq, type, pool);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3258e48
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_OFF_ADDRESS_SOURCE			0
+#define REG_OFF_ADDRESS_DEST			4
+#define REG_OFF_CYCLES				8
+#define REG_OFF_CTRL				12
+#define REG_OFF_CHAN_SIZE			16
+
+#define APB_DMA_ENABLE				BIT(0)
+#define APB_DMA_FIN_INT_STS			BIT(1)
+#define APB_DMA_FIN_INT_EN			BIT(2)
+#define APB_DMA_BURST_MODE			BIT(3)
+#define APB_DMA_ERR_INT_STS			BIT(4)
+#define APB_DMA_ERR_INT_EN			BIT(5)
+
+/*
+ * Unset: APB
+ * Set:   AHB
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_DEST				0x1000
+
+#define APB_DMA_SOURCE_MASK			0x700
+#define APB_DMA_DEST_MASK			0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4  (Burst=1)
+ * 010: +2 (Burst=0), +8  (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4  (Burst=1)
+ * 110: -2 (Burst=0), -8  (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0:    No request / Grant signal
+ * 1-15: Request    / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8			0x00
+#define MOXART_DMA_DATA_TYPE_S16		0x01
+#define MOXART_DMA_DATA_TYPE_S32		0x02
+
+struct moxart_sg {
+	dma_addr_t addr;
+	uint32_t len;
+};
+
+struct moxart_desc {
+	enum dma_transfer_direction	dma_dir;
+	dma_addr_t			dev_addr;
+	unsigned int			sglen;
+	unsigned int			dma_cycles;
+	struct virt_dma_desc		vd;
+	uint8_t				es;
+	struct moxart_sg		sg[0];
+};
+
+struct moxart_chan {
+	struct virt_dma_chan		vc;
+
+	void __iomem			*base;
+	struct moxart_desc		*desc;
+
+	struct dma_slave_config		cfg;
+
+	bool				allocated;
+	bool				error;
+	int				ch_num;
+	unsigned int			line_reqno;
+	unsigned int			sgidx;
+};
+
+struct moxart_dmadev {
+	struct dma_device		dma_slave;
+	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+	struct moxart_dmadev		*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+	[MOXART_DMA_DATA_TYPE_S8] = 1,
+	[MOXART_DMA_DATA_TYPE_S16] = 2,
+	[MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+	struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+	u32 ctrl;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+
+	if (ch->desc)
+		ch->desc = NULL;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	vchan_get_all_descriptors(&ch->vc, &head);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+	vchan_dma_desc_free_list(&ch->vc, &head);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	u32 ctrl;
+
+	ch->cfg = *cfg;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (ch->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		return -EINVAL;
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_desc *d;
+	enum dma_slave_buswidth dev_width;
+	dma_addr_t dev_addr;
+	struct scatterlist *sgent;
+	unsigned int es;
+	unsigned int i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+			__func__);
+		return NULL;
+	}
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = ch->cfg.src_addr;
+		dev_width = ch->cfg.src_addr_width;
+	} else {
+		dev_addr = ch->cfg.dst_addr;
+		dev_width = ch->cfg.dst_addr_width;
+	}
+
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = MOXART_DMA_DATA_TYPE_S8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+			__func__, dev_width);
+		return NULL;
+	}
+
+	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dma_dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		d->sg[i].addr = sg_dma_address(sgent);
+		d->sg[i].len = sg_dma_len(sgent);
+	}
+
+	d->sglen = sg_len;
+
+	ch->error = 0;
+
+	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dmadev *mdc = ofdma->of_dma_data;
+	struct dma_chan *chan;
+	struct moxart_chan *ch;
+
+	chan = dma_get_any_slave_channel(&mdc->dma_slave);
+	if (!chan)
+		return NULL;
+
+	ch = to_moxart_dma_chan(chan);
+	ch->line_reqno = dma_spec->args[0];
+
+	return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	vchan_free_chan_resources(&ch->vc);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+				  dma_addr_t dst_addr)
+{
+	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+	struct moxart_desc *d = ch->desc;
+	unsigned int sglen_div = es_bytes[d->es];
+
+	d->dma_cycles = len >> sglen_div;
+
+	/*
+	 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+	 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+	 */
+	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+		__func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+	u32 ctrl;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+	struct moxart_desc *d = ch->desc;
+	struct moxart_sg *sg = ch->desc->sg + idx;
+
+	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+	moxart_set_transfer_params(ch, sg->len);
+
+	moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&ch->vc);
+
+	if (!vd) {
+		ch->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	ch->desc = to_moxart_dma_desc(&vd->tx);
+	ch->sgidx = 0;
+
+	moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	if (vchan_issue_pending(&ch->vc) && !ch->desc)
+		moxart_dma_start_desc(chan);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d,
+				   unsigned int completed_sgs)
+{
+	unsigned int i;
+	size_t size;
+
+	for (size = i = completed_sgs; i < d->sglen; i++)
+		size += d->sg[i].len;
+
+	return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+	size_t size;
+	unsigned int completed_cycles, cycles;
+
+	size = moxart_dma_desc_size(ch->desc, ch->sgidx);
+	cycles = readl(ch->base + REG_OFF_CYCLES);
+	completed_cycles = (ch->desc->dma_cycles - cycles);
+	size -= completed_cycles << es_bytes[ch->desc->es];
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+	return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	struct moxart_desc *d;
+	enum dma_status ret;
+	unsigned long flags;
+
+	/*
+	 * dma_cookie_status() assigns initial residue value.
+	 */
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	vd = vchan_find_desc(&ch->vc, cookie);
+	if (vd) {
+		d = to_moxart_dma_desc(&vd->tx);
+		txstate->residue = moxart_dma_desc_size(d, 0);
+	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+		txstate->residue = moxart_dma_desc_size_in_flight(ch);
+	}
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+	if (ch->error)
+		return DMA_ERROR;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dmadev *mc = devid;
+	struct moxart_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+	unsigned long flags;
+	u32 ctrl;
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (!ch->allocated)
+			continue;
+
+		ctrl = readl(ch->base + REG_OFF_CTRL);
+
+		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+			__func__, ch, ch->base, ctrl);
+
+		if (ctrl & APB_DMA_FIN_INT_STS) {
+			ctrl &= ~APB_DMA_FIN_INT_STS;
+			if (ch->desc) {
+				spin_lock_irqsave(&ch->vc.lock, flags);
+				if (++ch->sgidx < ch->desc->sglen) {
+					moxart_dma_start_sg(ch, ch->sgidx);
+				} else {
+					vchan_cookie_complete(&ch->desc->vd);
+					moxart_dma_start_desc(&ch->vc.chan);
+				}
+				spin_unlock_irqrestore(&ch->vc.lock, flags);
+			}
+		}
+
+		if (ctrl & APB_DMA_ERR_INT_STS) {
+			ctrl &= ~APB_DMA_ERR_INT_STS;
+			ch->error = 1;
+		}
+
+		writel(ctrl, ch->base + REG_OFF_CTRL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_chan *ch;
+	struct moxart_dmadev *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq == NO_IRQ) {
+		dev_err(dev, "no IRQ resource\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr))
+		return PTR_ERR(dma_base_addr);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	ch = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		ch->ch_num = i;
+		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+		ch->allocated = 0;
+
+		ch->vc.desc_free = moxart_dma_desc_free;
+		vchan_init(&ch->vc, &mdc->dma_slave);
+
+		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+			__func__, i, ch->ch_num, ch->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 53fb0c8..766b68e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -497,8 +497,8 @@
 		if (!mv_can_chain(grp_start))
 			goto submit_done;
 
-		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
-			old_chain_tail->async_tx.phys);
+		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
+			&old_chain_tail->async_tx.phys);
 
 		/* fix up the hardware chain */
 		mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
@@ -527,7 +527,8 @@
 /* returns the number of allocated descriptors */
 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
 {
-	char *hw_desc;
+	void *virt_desc;
+	dma_addr_t dma_desc;
 	int idx;
 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 	struct mv_xor_desc_slot *slot = NULL;
@@ -542,17 +543,16 @@
 				" %d descriptor slots", idx);
 			break;
 		}
-		hw_desc = (char *) mv_chan->dma_desc_pool_virt;
-		slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+		virt_desc = mv_chan->dma_desc_pool_virt;
+		slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
 
 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
 		slot->async_tx.tx_submit = mv_xor_tx_submit;
 		INIT_LIST_HEAD(&slot->chain_node);
 		INIT_LIST_HEAD(&slot->slot_node);
 		INIT_LIST_HEAD(&slot->tx_list);
-		hw_desc = (char *) mv_chan->dma_desc_pool;
-		slot->async_tx.phys =
-			(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+		dma_desc = mv_chan->dma_desc_pool;
+		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
 		slot->idx = idx++;
 
 		spin_lock_bh(&mv_chan->lock);
@@ -582,8 +582,8 @@
 	int slot_cnt;
 
 	dev_dbg(mv_chan_to_devp(mv_chan),
-		"%s dest: %x src %x len: %u flags: %ld\n",
-		__func__, dest, src, len, flags);
+		"%s dest: %pad src %pad len: %u flags: %ld\n",
+		__func__, &dest, &src, len, flags);
 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
 		return NULL;
 
@@ -626,8 +626,8 @@
 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
 
 	dev_dbg(mv_chan_to_devp(mv_chan),
-		"%s src_cnt: %d len: dest %x %u flags: %ld\n",
-		__func__, src_cnt, len, dest, flags);
+		"%s src_cnt: %d len: %u dest %pad flags: %ld\n",
+		__func__, src_cnt, len, &dest, flags);
 
 	spin_lock_bh(&mv_chan->lock);
 	slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 2f66cf4..362e7c4 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -190,7 +190,7 @@
 {
 	struct omap_chan *c = to_omap_dma_chan(chan);
 
-	dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+	dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
 
 	return omap_request_dma(c->dma_sig, "DMA engine",
 		omap_dma_callback, c, &c->dma_ch);
@@ -203,7 +203,7 @@
 	vchan_free_chan_resources(&c->vc);
 	omap_free_dma(c->dma_ch);
 
-	dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+	dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
 }
 
 static size_t omap_dma_sg_size(struct omap_sg *sg)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index c90edec..73fa9b7 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -543,7 +543,9 @@
 	/* DMA-Engine Channel */
 	struct dma_chan chan;
 
-	/* List of to be xfered descriptors */
+	/* List of submitted descriptors */
+	struct list_head submitted_list;
+	/* List of issued descriptors */
 	struct list_head work_list;
 	/* List of completed descriptors */
 	struct list_head completed_list;
@@ -578,12 +580,16 @@
 	/* DMA-Engine Device */
 	struct dma_device ddma;
 
+	/* Holds info about sg limitations */
+	struct device_dma_parameters dma_parms;
+
 	/* Pool of descriptors available for the DMAC's channels */
 	struct list_head desc_pool;
 	/* To protect desc_pool manipulation */
 	spinlock_t pool_lock;
 
 	/* Peripheral channels connected to this DMAC */
+	unsigned int num_peripherals;
 	struct dma_pl330_chan *peripherals; /* keep at end */
 };
 
@@ -606,11 +612,6 @@
 	struct dma_pl330_chan *pchan;
 };
 
-struct dma_pl330_filter_args {
-	struct dma_pl330_dmac *pdmac;
-	unsigned int chan_id;
-};
-
 static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
 {
 	if (r && r->xfer_cb)
@@ -2298,16 +2299,6 @@
 	tasklet_schedule(&pch->task);
 }
 
-static bool pl330_dt_filter(struct dma_chan *chan, void *param)
-{
-	struct dma_pl330_filter_args *fargs = param;
-
-	if (chan->device != &fargs->pdmac->ddma)
-		return false;
-
-	return (chan->chan_id == fargs->chan_id);
-}
-
 bool pl330_filter(struct dma_chan *chan, void *param)
 {
 	u8 *peri_id;
@@ -2325,23 +2316,16 @@
 {
 	int count = dma_spec->args_count;
 	struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
-	struct dma_pl330_filter_args fargs;
-	dma_cap_mask_t cap;
-
-	if (!pdmac)
-		return NULL;
+	unsigned int chan_id;
 
 	if (count != 1)
 		return NULL;
 
-	fargs.pdmac = pdmac;
-	fargs.chan_id = dma_spec->args[0];
+	chan_id = dma_spec->args[0];
+	if (chan_id >= pdmac->num_peripherals)
+		return NULL;
 
-	dma_cap_zero(cap);
-	dma_cap_set(DMA_SLAVE, cap);
-	dma_cap_set(DMA_CYCLIC, cap);
-
-	return dma_request_channel(cap, pl330_dt_filter, &fargs);
+	return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan);
 }
 
 static int pl330_alloc_chan_resources(struct dma_chan *chan)
@@ -2385,6 +2369,11 @@
 		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
 
 		/* Mark all desc done */
+		list_for_each_entry(desc, &pch->submitted_list, node) {
+			desc->status = FREE;
+			dma_cookie_complete(&desc->txd);
+		}
+
 		list_for_each_entry(desc, &pch->work_list , node) {
 			desc->status = FREE;
 			dma_cookie_complete(&desc->txd);
@@ -2395,6 +2384,7 @@
 			dma_cookie_complete(&desc->txd);
 		}
 
+		list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
 		list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
 		list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
 		spin_unlock_irqrestore(&pch->lock, flags);
@@ -2453,7 +2443,14 @@
 
 static void pl330_issue_pending(struct dma_chan *chan)
 {
-	pl330_tasklet((unsigned long) to_pchan(chan));
+	struct dma_pl330_chan *pch = to_pchan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pch->lock, flags);
+	list_splice_tail_init(&pch->submitted_list, &pch->work_list);
+	spin_unlock_irqrestore(&pch->lock, flags);
+
+	pl330_tasklet((unsigned long)pch);
 }
 
 /*
@@ -2480,11 +2477,11 @@
 
 		dma_cookie_assign(&desc->txd);
 
-		list_move_tail(&desc->node, &pch->work_list);
+		list_move_tail(&desc->node, &pch->submitted_list);
 	}
 
 	cookie = dma_cookie_assign(&last->txd);
-	list_add_tail(&last->node, &pch->work_list);
+	list_add_tail(&last->node, &pch->submitted_list);
 	spin_unlock_irqrestore(&pch->lock, flags);
 
 	return cookie;
@@ -2960,6 +2957,8 @@
 	else
 		num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
 
+	pdmac->num_peripherals = num_chan;
+
 	pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
 	if (!pdmac->peripherals) {
 		ret = -ENOMEM;
@@ -2974,6 +2973,7 @@
 		else
 			pch->chan.private = adev->dev.of_node;
 
+		INIT_LIST_HEAD(&pch->submitted_list);
 		INIT_LIST_HEAD(&pch->work_list);
 		INIT_LIST_HEAD(&pch->completed_list);
 		spin_lock_init(&pch->lock);
@@ -3021,6 +3021,9 @@
 			"unable to register DMA to the generic DT DMA helpers\n");
 		}
 	}
+
+	adev->dev.dma_parms = &pdmac->dma_parms;
+
 	/*
 	 * This is the limit for transfers with a buswidth of 1, larger
 	 * buswidths will have larger limits.
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 8bba298..ce7a8d7 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4114,6 +4114,7 @@
 	regs = ioremap(res.start, resource_size(&res));
 	if (!regs) {
 		dev_err(&ofdev->dev, "failed to ioremap regs!\n");
+		ret = -ENOMEM;
 		goto err_regs_alloc;
 	}
 
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 6aec3ad..d4d3a310 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -640,6 +640,25 @@
 }
 EXPORT_SYMBOL(sirfsoc_dma_filter_id);
 
+#define SIRFSOC_DMA_BUSWIDTHS \
+	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
+	struct dma_slave_caps *caps)
+{
+	caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+	caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	caps->cmd_pause = true;
+	caps->cmd_terminate = true;
+
+	return 0;
+}
+
 static int sirfsoc_dma_probe(struct platform_device *op)
 {
 	struct device_node *dn = op->dev.of_node;
@@ -712,6 +731,7 @@
 	dma->device_tx_status = sirfsoc_dma_tx_status;
 	dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
 	dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
+	dma->device_slave_caps = sirfsoc_dma_device_slave_caps;
 
 	INIT_LIST_HEAD(&dma->channels);
 	dma_cap_set(DMA_SLAVE, dma->cap_mask);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index d11bb36..03ad64e 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -100,6 +100,11 @@
 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP		BIT(27)
 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1		(1 << 16)
 
+/* Tegra148 specific registers */
+#define TEGRA_APBDMA_CHAN_WCOUNT		0x20
+
+#define TEGRA_APBDMA_CHAN_WORD_TRANSFER		0x24
+
 /*
  * If any burst is in flight and DMA paused then this is the time to complete
  * on-flight burst and update DMA status register.
@@ -109,21 +114,22 @@
 /* Channel base address offset from APBDMA base address */
 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET	0x1000
 
-/* DMA channel register space size */
-#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE	0x20
-
 struct tegra_dma;
 
 /*
  * tegra_dma_chip_data Tegra chip specific DMA data
  * @nr_channels: Number of channels available in the controller.
+ * @channel_reg_size: Channel register size/stride.
  * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
  * @support_channel_pause: Support channel wise pause of dma.
+ * @support_separate_wcount_reg: Support separate word count register.
  */
 struct tegra_dma_chip_data {
 	int nr_channels;
+	int channel_reg_size;
 	int max_dma_count;
 	bool support_channel_pause;
+	bool support_separate_wcount_reg;
 };
 
 /* DMA channel registers */
@@ -133,6 +139,7 @@
 	unsigned long	apb_ptr;
 	unsigned long	ahb_seq;
 	unsigned long	apb_seq;
+	unsigned long	wcount;
 };
 
 /*
@@ -426,6 +433,8 @@
 	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
+	if (tdc->tdma->chip_data->support_separate_wcount_reg)
+		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
 
 	/* Start DMA */
 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
@@ -465,6 +474,9 @@
 	/* Safe to program new configuration */
 	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
+	if (tdc->tdma->chip_data->support_separate_wcount_reg)
+		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
+						nsg_req->ch_regs.wcount);
 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
 				nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
 	nsg_req->configured = true;
@@ -718,6 +730,7 @@
 	struct tegra_dma_desc *dma_desc;
 	unsigned long flags;
 	unsigned long status;
+	unsigned long wcount;
 	bool was_busy;
 
 	spin_lock_irqsave(&tdc->lock, flags);
@@ -738,6 +751,10 @@
 		tdc->isr_handler(tdc, true);
 		status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
 	}
+	if (tdc->tdma->chip_data->support_separate_wcount_reg)
+		wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
+	else
+		wcount = status;
 
 	was_busy = tdc->busy;
 	tegra_dma_stop(tdc);
@@ -746,7 +763,7 @@
 		sgreq = list_first_entry(&tdc->pending_sg_req,
 					typeof(*sgreq), node);
 		sgreq->dma_desc->bytes_transferred +=
-				get_current_xferred_count(tdc, sgreq, status);
+				get_current_xferred_count(tdc, sgreq, wcount);
 	}
 	tegra_dma_resume(tdc);
 
@@ -908,6 +925,17 @@
 	return -EINVAL;
 }
 
+static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
+	struct tegra_dma_channel_regs *ch_regs, u32 len)
+{
+	u32 len_field = (len - 4) & 0xFFFC;
+
+	if (tdc->tdma->chip_data->support_separate_wcount_reg)
+		ch_regs->wcount = len_field;
+	else
+		ch_regs->csr |= len_field;
+}
+
 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
 	struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
 	enum dma_transfer_direction direction, unsigned long flags,
@@ -991,7 +1019,8 @@
 
 		sg_req->ch_regs.apb_ptr = apb_ptr;
 		sg_req->ch_regs.ahb_ptr = mem;
-		sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+		sg_req->ch_regs.csr = csr;
+		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
 		sg_req->ch_regs.apb_seq = apb_seq;
 		sg_req->ch_regs.ahb_seq = ahb_seq;
 		sg_req->configured = false;
@@ -1120,7 +1149,8 @@
 		ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
 		sg_req->ch_regs.apb_ptr = apb_ptr;
 		sg_req->ch_regs.ahb_ptr = mem;
-		sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+		sg_req->ch_regs.csr = csr;
+		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
 		sg_req->ch_regs.apb_seq = apb_seq;
 		sg_req->ch_regs.ahb_seq = ahb_seq;
 		sg_req->configured = false;
@@ -1234,27 +1264,45 @@
 /* Tegra20 specific DMA controller information */
 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
 	.nr_channels		= 16,
+	.channel_reg_size	= 0x20,
 	.max_dma_count		= 1024UL * 64,
 	.support_channel_pause	= false,
+	.support_separate_wcount_reg = false,
 };
 
 /* Tegra30 specific DMA controller information */
 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
 	.nr_channels		= 32,
+	.channel_reg_size	= 0x20,
 	.max_dma_count		= 1024UL * 64,
 	.support_channel_pause	= false,
+	.support_separate_wcount_reg = false,
 };
 
 /* Tegra114 specific DMA controller information */
 static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
 	.nr_channels		= 32,
+	.channel_reg_size	= 0x20,
 	.max_dma_count		= 1024UL * 64,
 	.support_channel_pause	= true,
+	.support_separate_wcount_reg = false,
+};
+
+/* Tegra148 specific DMA controller information */
+static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
+	.nr_channels		= 32,
+	.channel_reg_size	= 0x40,
+	.max_dma_count		= 1024UL * 64,
+	.support_channel_pause	= true,
+	.support_separate_wcount_reg = true,
 };
 
 
 static const struct of_device_id tegra_dma_of_match[] = {
 	{
+		.compatible = "nvidia,tegra148-apbdma",
+		.data = &tegra148_dma_chip_data,
+	}, {
 		.compatible = "nvidia,tegra114-apbdma",
 		.data = &tegra114_dma_chip_data,
 	}, {
@@ -1348,7 +1396,7 @@
 		struct tegra_dma_channel *tdc = &tdma->channels[i];
 
 		tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
-					i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
+					i * cdata->channel_reg_size;
 
 		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
 		if (!res) {
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 85c19d6..181b952 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -84,10 +84,12 @@
 static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
 {
 	struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+	dma_cookie_t cookie;
 
+	cookie = vd->tx.cookie;
 	dma_cookie_complete(&vd->tx);
 	dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
-		vd, vd->tx.cookie);
+		 vd, cookie);
 	list_add_tail(&vd->node, &vc->desc_completed);
 
 	tasklet_schedule(&vc->task);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index e8c9ef0..33edd67 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -559,7 +559,8 @@
  *
  *		called with the mem_ctls_mutex held
  */
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+				bool init)
 {
 	edac_dbg(0, "\n");
 
@@ -567,7 +568,9 @@
 	if (mci->op_state != OP_RUNNING_POLL)
 		return;
 
-	INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+	if (init)
+		INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+
 	mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
 }
 
@@ -601,7 +604,7 @@
  *	user space has updated our poll period value, need to
  *	reset our workq delays
  */
-void edac_mc_reset_delay_period(int value)
+void edac_mc_reset_delay_period(unsigned long value)
 {
 	struct mem_ctl_info *mci;
 	struct list_head *item;
@@ -611,7 +614,7 @@
 	list_for_each(item, &mc_devices) {
 		mci = list_entry(item, struct mem_ctl_info, link);
 
-		edac_mc_workq_setup(mci, (unsigned long) value);
+		edac_mc_workq_setup(mci, value, false);
 	}
 
 	mutex_unlock(&mem_ctls_mutex);
@@ -782,7 +785,7 @@
 		/* This instance is NOW RUNNING */
 		mci->op_state = OP_RUNNING_POLL;
 
-		edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+		edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true);
 	} else {
 		mci->op_state = OP_RUNNING_INTERRUPT;
 	}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 51c0362..b335c6a 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -52,18 +52,20 @@
 
 static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
 {
-	long l;
+	unsigned long l;
 	int ret;
 
 	if (!val)
 		return -EINVAL;
 
-	ret = kstrtol(val, 0, &l);
+	ret = kstrtoul(val, 0, &l);
 	if (ret)
 		return ret;
-	if ((int)l != l)
+
+	if (l < 1000)
 		return -EINVAL;
-	*((int *)kp->arg) = l;
+
+	*((unsigned long *)kp->arg) = l;
 
 	/* notify edac_mc engine to reset the poll period */
 	edac_mc_reset_delay_period(l);
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 3d139c6..f2118bf 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -52,7 +52,7 @@
 extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
 extern void edac_device_reset_delay_period(struct edac_device_ctl_info
 					   *edac_dev, unsigned long value);
-extern void edac_mc_reset_delay_period(int value);
+extern void edac_mc_reset_delay_period(unsigned long value);
 
 extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
 
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 6973387..903f24d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -403,6 +403,7 @@
 
 config GPIO_TB10X
 	bool
+	select GENERIC_IRQ_CHIP
 	select OF_GPIO
 
 comment "I2C GPIO expanders:"
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 233d088..f32357e 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012-2013 Broadcom Corporation
+ * Copyright (C) 2012-2014 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -657,6 +657,6 @@
 
 module_platform_driver(bcm_kona_gpio_driver);
 
-MODULE_AUTHOR("Broadcom");
+MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
 MODULE_DESCRIPTION("Broadcom Kona GPIO Driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index d355027..3c2ba2ad 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -97,3 +97,4 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
 MODULE_DESCRIPTION("CLPS711X GPIO driver");
+MODULE_ALIAS("platform:clps711x-gpio");
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index d1b50ef..e585163 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -394,8 +394,8 @@
 
 static int intel_gpio_runtime_idle(struct device *dev)
 {
-	pm_schedule_suspend(dev, 500);
-	return -EBUSY;
+	int err = pm_schedule_suspend(dev, 500);
+	return err ?: -EBUSY;
 }
 
 static const struct dev_pm_ops intel_gpio_pm_ops = {
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 1d136ec..7081304 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -40,6 +40,8 @@
 #error GPIO32 option is not enabled for your xtensa core variant
 #endif
 
+#if XCHAL_HAVE_CP
+
 static inline unsigned long enable_cp(unsigned long *cpenable)
 {
 	unsigned long flags;
@@ -57,6 +59,20 @@
 	local_irq_restore(flags);
 }
 
+#else
+
+static inline unsigned long enable_cp(unsigned long *cpenable)
+{
+	*cpenable = 0; /* avoid uninitialized value warning */
+	return 0;
+}
+
+static inline void disable_cp(unsigned long flags, unsigned long cpenable)
+{
+}
+
+#endif /* XCHAL_HAVE_CP */
+
 static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset)
 {
 	return 1; /* input only */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f864275..8e7fa4d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -20,6 +20,10 @@
 	  details.  You should also select and configure AGP
 	  (/dev/agpgart) support if it is available for your platform.
 
+config DRM_MIPI_DSI
+	bool
+	depends on DRM
+
 config DRM_USB
 	tristate
 	depends on DRM
@@ -188,6 +192,10 @@
 
 source "drivers/gpu/drm/qxl/Kconfig"
 
+source "drivers/gpu/drm/bochs/Kconfig"
+
 source "drivers/gpu/drm/msm/Kconfig"
 
 source "drivers/gpu/drm/tegra/Kconfig"
+
+source "drivers/gpu/drm/panel/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index cc08b84..292a79d 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,6 +18,7 @@
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
 drm-$(CONFIG_PCI) += ati_pcigart.o
+drm-$(CONFIG_DRM_PANEL) += drm_panel.o
 
 drm-usb-y   := drm_usb.o
 
@@ -31,6 +32,7 @@
 CFLAGS_drm_trace_points.o := -I$(src)
 
 obj-$(CONFIG_DRM)	+= drm.o
+obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
 obj-$(CONFIG_DRM_USB)   += drm_usb.o
 obj-$(CONFIG_DRM_TTM)	+= ttm/
 obj-$(CONFIG_DRM_TDFX)	+= tdfx/
@@ -56,6 +58,8 @@
 obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
 obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
 obj-$(CONFIG_DRM_QXL) += qxl/
+obj-$(CONFIG_DRM_BOCHS) += bochs/
 obj-$(CONFIG_DRM_MSM) += msm/
 obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-y			+= i2c/
+obj-y			+= panel/
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
index 40d3715..50ae88a 100644
--- a/drivers/gpu/drm/armada/Kconfig
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -5,6 +5,7 @@
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	help
 	  Support the "LCD" controllers found on the Marvell Armada 510
 	  devices.  There are two controllers on the device, each controller
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 62d0ff3..acf3a36 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -128,6 +128,7 @@
 		return -ENOMEM;
 	}
 
+	platform_set_drvdata(dev->platformdev, dev);
 	dev->dev_private = priv;
 
 	/* Get the implementation specific driver data. */
@@ -381,7 +382,7 @@
 
 static int armada_drm_remove(struct platform_device *pdev)
 {
-	drm_platform_exit(&armada_drm_driver, pdev);
+	drm_put_dev(platform_get_drvdata(pdev));
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 7b33e14..a28640f 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -65,7 +65,7 @@
 	 * then the BO is being moved and we should
 	 * store up the damage until later.
 	 */
-	if (!in_interrupt())
+	if (drm_can_sleep())
 		ret = ast_bo_reserve(bo, true);
 	if (ret) {
 		if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index af0b868..50535fd 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -189,53 +189,6 @@
 	return 0;
 }
 
-uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp)
-{
-	struct ast_private *ast = dev->dev_private;
-	uint32_t dclk, jreg;
-	uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500;
-
-	dram_bus_width = ast->dram_bus_width;
-	mclk = ast->mclk;
-
-	if (ast->chip == AST2100 ||
-	    ast->chip == AST1100 ||
-	    ast->chip == AST2200 ||
-	    ast->chip == AST2150 ||
-	    ast->dram_bus_width == 16)
-		dram_efficency = 600;
-	else if (ast->chip == AST2300)
-		dram_efficency = 400;
-
-	dram_bandwidth = mclk * dram_bus_width * 2 / 8;
-	actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000;
-
-	if (ast->chip == AST1180)
-		dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
-	else {
-		jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
-		if ((jreg & 0x08) && (ast->chip == AST2000))
-			dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8);
-		else if ((jreg & 0x08) && (bpp == 8))
-			dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8);
-		else
-			dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
-	}
-
-	if (ast->chip == AST2100 ||
-	    ast->chip == AST2200 ||
-	    ast->chip == AST2300 ||
-	    ast->chip == AST1180) {
-		if (dclk > 200)
-			dclk = 200;
-	} else {
-		if (dclk > 165)
-			dclk = 165;
-	}
-
-	return dclk;
-}
-
 static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
 	struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
@@ -449,7 +402,7 @@
 	return 0;
 }
 
-void ast_bo_unref(struct ast_bo **bo)
+static void ast_bo_unref(struct ast_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
 
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 7fc9f72..cca063b 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -404,7 +404,7 @@
 	}
 }
 
-void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
+static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
 		      struct ast_vbios_mode_info *vbios_mode)
 {
 	struct ast_private *ast = dev->dev_private;
@@ -415,7 +415,7 @@
 	ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
 }
 
-bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
 		     struct ast_vbios_mode_info *vbios_mode)
 {
 	switch (crtc->fb->bits_per_pixel) {
@@ -427,7 +427,7 @@
 	return true;
 }
 
-void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
 {
 	struct ast_private *ast = crtc->dev->dev_private;
 	u32 addr;
@@ -623,7 +623,7 @@
 	.destroy = ast_crtc_destroy,
 };
 
-int ast_crtc_init(struct drm_device *dev)
+static int ast_crtc_init(struct drm_device *dev)
 {
 	struct ast_crtc *crtc;
 	int i;
@@ -710,7 +710,7 @@
 	.mode_set = ast_encoder_mode_set,
 };
 
-int ast_encoder_init(struct drm_device *dev)
+static int ast_encoder_init(struct drm_device *dev)
 {
 	struct ast_encoder *ast_encoder;
 
@@ -777,7 +777,7 @@
 	.destroy = ast_connector_destroy,
 };
 
-int ast_connector_init(struct drm_device *dev)
+static int ast_connector_init(struct drm_device *dev)
 {
 	struct ast_connector *ast_connector;
 	struct drm_connector *connector;
@@ -810,7 +810,7 @@
 }
 
 /* allocate cursor cache and pin at start of VRAM */
-int ast_cursor_init(struct drm_device *dev)
+static int ast_cursor_init(struct drm_device *dev)
 {
 	struct ast_private *ast = dev->dev_private;
 	int size;
@@ -847,7 +847,7 @@
 	return ret;
 }
 
-void ast_cursor_fini(struct drm_device *dev)
+static void ast_cursor_fini(struct drm_device *dev)
 {
 	struct ast_private *ast = dev->dev_private;
 	ttm_bo_kunmap(&ast->cache_kmap);
@@ -965,7 +965,7 @@
 	kfree(i2c);
 }
 
-void ast_show_cursor(struct drm_crtc *crtc)
+static void ast_show_cursor(struct drm_crtc *crtc)
 {
 	struct ast_private *ast = crtc->dev->dev_private;
 	u8 jreg;
@@ -976,7 +976,7 @@
 	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
 }
 
-void ast_hide_cursor(struct drm_crtc *crtc)
+static void ast_hide_cursor(struct drm_crtc *crtc)
 {
 	struct ast_private *ast = crtc->dev->dev_private;
 	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 32aecb3..4ea9b17 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -80,7 +80,7 @@
 	return 0;
 }
 
-void
+static void
 ast_ttm_global_release(struct ast_private *ast)
 {
 	if (ast->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@
 	kfree(bo);
 }
 
-bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
+static bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
 {
 	if (bo->destroy == &ast_bo_ttm_destroy)
 		return true;
@@ -208,7 +208,7 @@
 };
 
 
-struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
 				 unsigned long size, uint32_t page_flags,
 				 struct page *dummy_read_page)
 {
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
new file mode 100644
index 0000000..c8fcf12
--- /dev/null
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -0,0 +1,11 @@
+config DRM_BOCHS
+	tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
+	depends on DRM && PCI
+	select DRM_KMS_HELPER
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select DRM_TTM
+	help
+	  Choose this option for qemu.
+	  If M is selected the module will be called bochs-drm.
diff --git a/drivers/gpu/drm/bochs/Makefile b/drivers/gpu/drm/bochs/Makefile
new file mode 100644
index 0000000..844a556
--- /dev/null
+++ b/drivers/gpu/drm/bochs/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_fbdev.o bochs_hw.o
+
+obj-$(CONFIG_DRM_BOCHS)	+= bochs-drm.o
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
new file mode 100644
index 0000000..741965c
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -0,0 +1,164 @@
+#include <linux/io.h>
+#include <linux/fb.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_page_alloc.h>
+
+/* ---------------------------------------------------------------------- */
+
+#define VBE_DISPI_IOPORT_INDEX           0x01CE
+#define VBE_DISPI_IOPORT_DATA            0x01CF
+
+#define VBE_DISPI_INDEX_ID               0x0
+#define VBE_DISPI_INDEX_XRES             0x1
+#define VBE_DISPI_INDEX_YRES             0x2
+#define VBE_DISPI_INDEX_BPP              0x3
+#define VBE_DISPI_INDEX_ENABLE           0x4
+#define VBE_DISPI_INDEX_BANK             0x5
+#define VBE_DISPI_INDEX_VIRT_WIDTH       0x6
+#define VBE_DISPI_INDEX_VIRT_HEIGHT      0x7
+#define VBE_DISPI_INDEX_X_OFFSET         0x8
+#define VBE_DISPI_INDEX_Y_OFFSET         0x9
+#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa
+
+#define VBE_DISPI_ID0                    0xB0C0
+#define VBE_DISPI_ID1                    0xB0C1
+#define VBE_DISPI_ID2                    0xB0C2
+#define VBE_DISPI_ID3                    0xB0C3
+#define VBE_DISPI_ID4                    0xB0C4
+#define VBE_DISPI_ID5                    0xB0C5
+
+#define VBE_DISPI_DISABLED               0x00
+#define VBE_DISPI_ENABLED                0x01
+#define VBE_DISPI_GETCAPS                0x02
+#define VBE_DISPI_8BIT_DAC               0x20
+#define VBE_DISPI_LFB_ENABLED            0x40
+#define VBE_DISPI_NOCLEARMEM             0x80
+
+/* ---------------------------------------------------------------------- */
+
+enum bochs_types {
+	BOCHS_QEMU_STDVGA,
+	BOCHS_UNKNOWN,
+};
+
+struct bochs_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+struct bochs_device {
+	/* hw */
+	void __iomem   *mmio;
+	int            ioports;
+	void __iomem   *fb_map;
+	unsigned long  fb_base;
+	unsigned long  fb_size;
+
+	/* mode */
+	u16 xres;
+	u16 yres;
+	u16 yres_virtual;
+	u32 stride;
+	u32 bpp;
+
+	/* drm */
+	struct drm_device  *dev;
+	struct drm_crtc crtc;
+	struct drm_encoder encoder;
+	struct drm_connector connector;
+	bool mode_config_initialized;
+
+	/* ttm */
+	struct {
+		struct drm_global_reference mem_global_ref;
+		struct ttm_bo_global_ref bo_global_ref;
+		struct ttm_bo_device bdev;
+		bool initialized;
+	} ttm;
+
+	/* fbdev */
+	struct {
+		struct bochs_framebuffer gfb;
+		struct drm_fb_helper helper;
+		int size;
+		int x1, y1, x2, y2; /* dirty rect */
+		spinlock_t dirty_lock;
+		bool initialized;
+	} fb;
+};
+
+#define to_bochs_framebuffer(x) container_of(x, struct bochs_framebuffer, base)
+
+struct bochs_bo {
+	struct ttm_buffer_object bo;
+	struct ttm_placement placement;
+	struct ttm_bo_kmap_obj kmap;
+	struct drm_gem_object gem;
+	u32 placements[3];
+	int pin_count;
+};
+
+static inline struct bochs_bo *bochs_bo(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct bochs_bo, bo);
+}
+
+static inline struct bochs_bo *gem_to_bochs_bo(struct drm_gem_object *gem)
+{
+	return container_of(gem, struct bochs_bo, gem);
+}
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo)
+{
+	return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* bochs_hw.c */
+int bochs_hw_init(struct drm_device *dev, uint32_t flags);
+void bochs_hw_fini(struct drm_device *dev);
+
+void bochs_hw_setmode(struct bochs_device *bochs,
+		      struct drm_display_mode *mode);
+void bochs_hw_setbase(struct bochs_device *bochs,
+		      int x, int y, u64 addr);
+
+/* bochs_mm.c */
+int bochs_mm_init(struct bochs_device *bochs);
+void bochs_mm_fini(struct bochs_device *bochs);
+int bochs_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+		     struct drm_gem_object **obj);
+int bochs_gem_init_object(struct drm_gem_object *obj);
+void bochs_gem_free_object(struct drm_gem_object *obj);
+int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
+		      struct drm_mode_create_dumb *args);
+int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+			   uint32_t handle, uint64_t *offset);
+
+int bochs_framebuffer_init(struct drm_device *dev,
+			   struct bochs_framebuffer *gfb,
+			   struct drm_mode_fb_cmd2 *mode_cmd,
+			   struct drm_gem_object *obj);
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int bochs_bo_unpin(struct bochs_bo *bo);
+
+extern const struct drm_mode_config_funcs bochs_mode_funcs;
+
+/* bochs_kms.c */
+int bochs_kms_init(struct bochs_device *bochs);
+void bochs_kms_fini(struct bochs_device *bochs);
+
+/* bochs_fbdev.c */
+int bochs_fbdev_init(struct bochs_device *bochs);
+void bochs_fbdev_fini(struct bochs_device *bochs);
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
new file mode 100644
index 0000000..395bba2
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -0,0 +1,178 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "bochs.h"
+
+static bool enable_fbdev = true;
+module_param_named(fbdev, enable_fbdev, bool, 0444);
+MODULE_PARM_DESC(fbdev, "register fbdev device");
+
+/* ---------------------------------------------------------------------- */
+/* drm interface                                                          */
+
+static int bochs_unload(struct drm_device *dev)
+{
+	struct bochs_device *bochs = dev->dev_private;
+
+	bochs_fbdev_fini(bochs);
+	bochs_kms_fini(bochs);
+	bochs_mm_fini(bochs);
+	bochs_hw_fini(dev);
+	kfree(bochs);
+	dev->dev_private = NULL;
+	return 0;
+}
+
+static int bochs_load(struct drm_device *dev, unsigned long flags)
+{
+	struct bochs_device *bochs;
+	int ret;
+
+	bochs = kzalloc(sizeof(*bochs), GFP_KERNEL);
+	if (bochs == NULL)
+		return -ENOMEM;
+	dev->dev_private = bochs;
+	bochs->dev = dev;
+
+	ret = bochs_hw_init(dev, flags);
+	if (ret)
+		goto err;
+
+	ret = bochs_mm_init(bochs);
+	if (ret)
+		goto err;
+
+	ret = bochs_kms_init(bochs);
+	if (ret)
+		goto err;
+
+	if (enable_fbdev)
+		bochs_fbdev_init(bochs);
+
+	return 0;
+
+err:
+	bochs_unload(dev);
+	return ret;
+}
+
+static const struct file_operations bochs_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.llseek		= no_llseek,
+	.mmap           = bochs_mmap,
+};
+
+static struct drm_driver bochs_driver = {
+	.driver_features	= DRIVER_GEM | DRIVER_MODESET,
+	.load			= bochs_load,
+	.unload			= bochs_unload,
+	.fops			= &bochs_fops,
+	.name			= "bochs-drm",
+	.desc			= "bochs dispi vga interface (qemu stdvga)",
+	.date			= "20130925",
+	.major			= 1,
+	.minor			= 0,
+	.gem_free_object        = bochs_gem_free_object,
+	.dumb_create            = bochs_dumb_create,
+	.dumb_map_offset        = bochs_dumb_mmap_offset,
+	.dumb_destroy           = drm_gem_dumb_destroy,
+};
+
+/* ---------------------------------------------------------------------- */
+/* pci interface                                                          */
+
+static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+	struct apertures_struct *ap;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return -ENOMEM;
+
+	ap->ranges[0].base = pci_resource_start(pdev, 0);
+	ap->ranges[0].size = pci_resource_len(pdev, 0);
+	remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
+	kfree(ap);
+
+	return 0;
+}
+
+static int bochs_pci_probe(struct pci_dev *pdev,
+			   const struct pci_device_id *ent)
+{
+	int ret;
+
+	ret = bochs_kick_out_firmware_fb(pdev);
+	if (ret)
+		return ret;
+
+	return drm_get_pci_dev(pdev, ent, &bochs_driver);
+}
+
+static void bochs_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(bochs_pci_tbl) = {
+	{
+		.vendor      = 0x1234,
+		.device      = 0x1111,
+		.subvendor   = 0x1af4,
+		.subdevice   = 0x1100,
+		.driver_data = BOCHS_QEMU_STDVGA,
+	},
+	{
+		.vendor      = 0x1234,
+		.device      = 0x1111,
+		.subvendor   = PCI_ANY_ID,
+		.subdevice   = PCI_ANY_ID,
+		.driver_data = BOCHS_UNKNOWN,
+	},
+	{ /* end of list */ }
+};
+
+static struct pci_driver bochs_pci_driver = {
+	.name =		"bochs-drm",
+	.id_table =	bochs_pci_tbl,
+	.probe =	bochs_pci_probe,
+	.remove =	bochs_pci_remove,
+};
+
+/* ---------------------------------------------------------------------- */
+/* module init/exit                                                       */
+
+static int __init bochs_init(void)
+{
+	return drm_pci_init(&bochs_driver, &bochs_pci_driver);
+}
+
+static void __exit bochs_exit(void)
+{
+	drm_pci_exit(&bochs_driver, &bochs_pci_driver);
+}
+
+module_init(bochs_init);
+module_exit(bochs_exit);
+
+MODULE_DEVICE_TABLE(pci, bochs_pci_tbl);
+MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
new file mode 100644
index 0000000..4da5206
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -0,0 +1,215 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static struct fb_ops bochsfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = sys_fillrect,
+	.fb_copyarea = sys_copyarea,
+	.fb_imageblit = sys_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int bochsfb_create_object(struct bochs_device *bochs,
+				 struct drm_mode_fb_cmd2 *mode_cmd,
+				 struct drm_gem_object **gobj_p)
+{
+	struct drm_device *dev = bochs->dev;
+	struct drm_gem_object *gobj;
+	u32 size;
+	int ret = 0;
+
+	size = mode_cmd->pitches[0] * mode_cmd->height;
+	ret = bochs_gem_create(dev, size, true, &gobj);
+	if (ret)
+		return ret;
+
+	*gobj_p = gobj;
+	return ret;
+}
+
+static int bochsfb_create(struct drm_fb_helper *helper,
+			  struct drm_fb_helper_surface_size *sizes)
+{
+	struct bochs_device *bochs =
+		container_of(helper, struct bochs_device, fb.helper);
+	struct drm_device *dev = bochs->dev;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct device *device = &dev->pdev->dev;
+	struct drm_gem_object *gobj = NULL;
+	struct bochs_bo *bo = NULL;
+	int size, ret;
+
+	if (sizes->surface_bpp != 32)
+		return -EINVAL;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+
+	/* alloc, pin & map bo */
+	ret = bochsfb_create_object(bochs, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+		return ret;
+	}
+
+	bo = gem_to_bochs_bo(gobj);
+
+	ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+	if (ret)
+		return ret;
+
+	ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+	if (ret) {
+		DRM_ERROR("failed to pin fbcon\n");
+		ttm_bo_unreserve(&bo->bo);
+		return ret;
+	}
+
+	ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages,
+			  &bo->kmap);
+	if (ret) {
+		DRM_ERROR("failed to kmap fbcon\n");
+		ttm_bo_unreserve(&bo->bo);
+		return ret;
+	}
+
+	ttm_bo_unreserve(&bo->bo);
+
+	/* init fb device */
+	info = framebuffer_alloc(0, device);
+	if (info == NULL)
+		return -ENOMEM;
+
+	info->par = &bochs->fb.helper;
+
+	ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
+	if (ret)
+		return ret;
+
+	bochs->fb.size = size;
+
+	/* setup helper */
+	fb = &bochs->fb.gfb.base;
+	bochs->fb.helper.fb = fb;
+	bochs->fb.helper.fbdev = info;
+
+	strcpy(info->fix.id, "bochsdrmfb");
+
+	info->flags = FBINFO_DEFAULT;
+	info->fbops = &bochsfb_ops;
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width,
+			       sizes->fb_height);
+
+	info->screen_base = bo->kmap.virtual;
+	info->screen_size = size;
+
+#if 0
+	/* FIXME: get this right for mmap(/dev/fb0) */
+	info->fix.smem_start = bochs_bo_mmap_offset(bo);
+	info->fix.smem_len = size;
+#endif
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int bochs_fbdev_destroy(struct bochs_device *bochs)
+{
+	struct bochs_framebuffer *gfb = &bochs->fb.gfb;
+	struct fb_info *info;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	if (bochs->fb.helper.fbdev) {
+		info = bochs->fb.helper.fbdev;
+
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	if (gfb->obj) {
+		drm_gem_object_unreference_unlocked(gfb->obj);
+		gfb->obj = NULL;
+	}
+
+	drm_fb_helper_fini(&bochs->fb.helper);
+	drm_framebuffer_unregister_private(&gfb->base);
+	drm_framebuffer_cleanup(&gfb->base);
+
+	return 0;
+}
+
+void bochs_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			u16 blue, int regno)
+{
+}
+
+void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			u16 *blue, int regno)
+{
+	*red   = regno;
+	*green = regno;
+	*blue  = regno;
+}
+
+static struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
+	.gamma_set = bochs_fb_gamma_set,
+	.gamma_get = bochs_fb_gamma_get,
+	.fb_probe = bochsfb_create,
+};
+
+int bochs_fbdev_init(struct bochs_device *bochs)
+{
+	int ret;
+
+	bochs->fb.helper.funcs = &bochs_fb_helper_funcs;
+	spin_lock_init(&bochs->fb.dirty_lock);
+
+	ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
+				 1, 1);
+	if (ret)
+		return ret;
+
+	drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
+	drm_helper_disable_unused_functions(bochs->dev);
+	drm_fb_helper_initial_config(&bochs->fb.helper, 32);
+
+	bochs->fb.initialized = true;
+	return 0;
+}
+
+void bochs_fbdev_fini(struct bochs_device *bochs)
+{
+	if (!bochs->fb.initialized)
+		return;
+
+	bochs_fbdev_destroy(bochs);
+	bochs->fb.initialized = false;
+}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
new file mode 100644
index 0000000..dbe619e
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -0,0 +1,177 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val)
+{
+	if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
+		return;
+
+	if (bochs->mmio) {
+		int offset = ioport - 0x3c0 + 0x400;
+		writeb(val, bochs->mmio + offset);
+	} else {
+		outb(val, ioport);
+	}
+}
+
+static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg)
+{
+	u16 ret = 0;
+
+	if (bochs->mmio) {
+		int offset = 0x500 + (reg << 1);
+		ret = readw(bochs->mmio + offset);
+	} else {
+		outw(reg, VBE_DISPI_IOPORT_INDEX);
+		ret = inw(VBE_DISPI_IOPORT_DATA);
+	}
+	return ret;
+}
+
+static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val)
+{
+	if (bochs->mmio) {
+		int offset = 0x500 + (reg << 1);
+		writew(val, bochs->mmio + offset);
+	} else {
+		outw(reg, VBE_DISPI_IOPORT_INDEX);
+		outw(val, VBE_DISPI_IOPORT_DATA);
+	}
+}
+
+int bochs_hw_init(struct drm_device *dev, uint32_t flags)
+{
+	struct bochs_device *bochs = dev->dev_private;
+	struct pci_dev *pdev = dev->pdev;
+	unsigned long addr, size, mem, ioaddr, iosize;
+	u16 id;
+
+	if (/* (ent->driver_data == BOCHS_QEMU_STDVGA) && */
+	    (pdev->resource[2].flags & IORESOURCE_MEM)) {
+		/* mmio bar with vga and bochs registers present */
+		if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
+			DRM_ERROR("Cannot request mmio region\n");
+			return -EBUSY;
+		}
+		ioaddr = pci_resource_start(pdev, 2);
+		iosize = pci_resource_len(pdev, 2);
+		bochs->mmio = ioremap(ioaddr, iosize);
+		if (bochs->mmio == NULL) {
+			DRM_ERROR("Cannot map mmio region\n");
+			return -ENOMEM;
+		}
+	} else {
+		ioaddr = VBE_DISPI_IOPORT_INDEX;
+		iosize = 2;
+		if (!request_region(ioaddr, iosize, "bochs-drm")) {
+			DRM_ERROR("Cannot request ioports\n");
+			return -EBUSY;
+		}
+		bochs->ioports = 1;
+	}
+
+	id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID);
+	mem = bochs_dispi_read(bochs, VBE_DISPI_INDEX_VIDEO_MEMORY_64K)
+		* 64 * 1024;
+	if ((id & 0xfff0) != VBE_DISPI_ID0) {
+		DRM_ERROR("ID mismatch\n");
+		return -ENODEV;
+	}
+
+	if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0)
+		return -ENODEV;
+	addr = pci_resource_start(pdev, 0);
+	size = pci_resource_len(pdev, 0);
+	if (addr == 0)
+		return -ENODEV;
+	if (size != mem) {
+		DRM_ERROR("Size mismatch: pci=%ld, bochs=%ld\n",
+			size, mem);
+		size = min(size, mem);
+	}
+
+	if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
+		DRM_ERROR("Cannot request framebuffer\n");
+		return -EBUSY;
+	}
+
+	bochs->fb_map = ioremap(addr, size);
+	if (bochs->fb_map == NULL) {
+		DRM_ERROR("Cannot map framebuffer\n");
+		return -ENOMEM;
+	}
+	bochs->fb_base = addr;
+	bochs->fb_size = size;
+
+	DRM_INFO("Found bochs VGA, ID 0x%x.\n", id);
+	DRM_INFO("Framebuffer size %ld kB @ 0x%lx, %s @ 0x%lx.\n",
+		 size / 1024, addr,
+		 bochs->ioports ? "ioports" : "mmio",
+		 ioaddr);
+	return 0;
+}
+
+void bochs_hw_fini(struct drm_device *dev)
+{
+	struct bochs_device *bochs = dev->dev_private;
+
+	if (bochs->mmio)
+		iounmap(bochs->mmio);
+	if (bochs->ioports)
+		release_region(VBE_DISPI_IOPORT_INDEX, 2);
+	if (bochs->fb_map)
+		iounmap(bochs->fb_map);
+	pci_release_regions(dev->pdev);
+}
+
+void bochs_hw_setmode(struct bochs_device *bochs,
+		      struct drm_display_mode *mode)
+{
+	bochs->xres = mode->hdisplay;
+	bochs->yres = mode->vdisplay;
+	bochs->bpp = 32;
+	bochs->stride = mode->hdisplay * (bochs->bpp / 8);
+	bochs->yres_virtual = bochs->fb_size / bochs->stride;
+
+	DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n",
+			 bochs->xres, bochs->yres, bochs->bpp,
+			 bochs->yres_virtual);
+
+	bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
+
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP,         bochs->bpp);
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES,        bochs->xres);
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES,        bochs->yres);
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK,        0);
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH,  bochs->xres);
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT,
+			  bochs->yres_virtual);
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET,    0);
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET,    0);
+
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
+			  VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
+}
+
+void bochs_hw_setbase(struct bochs_device *bochs,
+		      int x, int y, u64 addr)
+{
+	unsigned long offset = (unsigned long)addr +
+		y * bochs->stride +
+		x * (bochs->bpp / 8);
+	int vy = offset / bochs->stride;
+	int vx = (offset % bochs->stride) * 8 / bochs->bpp;
+
+	DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n",
+			 x, y, addr, offset, vx, vy);
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
+}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
new file mode 100644
index 0000000..62ec7d4
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -0,0 +1,294 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+static int defx = 1024;
+static int defy = 768;
+
+module_param(defx, int, 0444);
+module_param(defy, int, 0444);
+MODULE_PARM_DESC(defx, "default x resolution");
+MODULE_PARM_DESC(defy, "default y resolution");
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+	default:
+		return;
+	}
+}
+
+static bool bochs_crtc_mode_fixup(struct drm_crtc *crtc,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+				    struct drm_framebuffer *old_fb)
+{
+	struct bochs_device *bochs =
+		container_of(crtc, struct bochs_device, crtc);
+	struct bochs_framebuffer *bochs_fb;
+	struct bochs_bo *bo;
+	u64 gpu_addr = 0;
+	int ret;
+
+	if (old_fb) {
+		bochs_fb = to_bochs_framebuffer(old_fb);
+		bo = gem_to_bochs_bo(bochs_fb->obj);
+		ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+		if (ret) {
+			DRM_ERROR("failed to reserve old_fb bo\n");
+		} else {
+			bochs_bo_unpin(bo);
+			ttm_bo_unreserve(&bo->bo);
+		}
+	}
+
+	if (WARN_ON(crtc->fb == NULL))
+		return -EINVAL;
+
+	bochs_fb = to_bochs_framebuffer(crtc->fb);
+	bo = gem_to_bochs_bo(bochs_fb->obj);
+	ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+	if (ret)
+		return ret;
+
+	ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+	if (ret) {
+		ttm_bo_unreserve(&bo->bo);
+		return ret;
+	}
+
+	ttm_bo_unreserve(&bo->bo);
+	bochs_hw_setbase(bochs, x, y, gpu_addr);
+	return 0;
+}
+
+static int bochs_crtc_mode_set(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode,
+			       int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct bochs_device *bochs =
+		container_of(crtc, struct bochs_device, crtc);
+
+	bochs_hw_setmode(bochs, mode);
+	bochs_crtc_mode_set_base(crtc, x, y, old_fb);
+	return 0;
+}
+
+static void bochs_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				 u16 *blue, uint32_t start, uint32_t size)
+{
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs bochs_crtc_funcs = {
+	.gamma_set = bochs_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
+	.dpms = bochs_crtc_dpms,
+	.mode_fixup = bochs_crtc_mode_fixup,
+	.mode_set = bochs_crtc_mode_set,
+	.mode_set_base = bochs_crtc_mode_set_base,
+	.prepare = bochs_crtc_prepare,
+	.commit = bochs_crtc_commit,
+	.load_lut = bochs_crtc_load_lut,
+};
+
+static void bochs_crtc_init(struct drm_device *dev)
+{
+	struct bochs_device *bochs = dev->dev_private;
+	struct drm_crtc *crtc = &bochs->crtc;
+
+	drm_crtc_init(dev, crtc, &bochs_crtc_funcs);
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+	drm_crtc_helper_add(crtc, &bochs_helper_funcs);
+}
+
+static bool bochs_encoder_mode_fixup(struct drm_encoder *encoder,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void bochs_encoder_mode_set(struct drm_encoder *encoder,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void bochs_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+}
+
+static void bochs_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void bochs_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = {
+	.dpms = bochs_encoder_dpms,
+	.mode_fixup = bochs_encoder_mode_fixup,
+	.mode_set = bochs_encoder_mode_set,
+	.prepare = bochs_encoder_prepare,
+	.commit = bochs_encoder_commit,
+};
+
+static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+static void bochs_encoder_init(struct drm_device *dev)
+{
+	struct bochs_device *bochs = dev->dev_private;
+	struct drm_encoder *encoder = &bochs->encoder;
+
+	encoder->possible_crtcs = 0x1;
+	drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
+			 DRM_MODE_ENCODER_DAC);
+	drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs);
+}
+
+
+int bochs_connector_get_modes(struct drm_connector *connector)
+{
+	int count;
+
+	count = drm_add_modes_noedid(connector, 8192, 8192);
+	drm_set_preferred_mode(connector, defx, defy);
+	return count;
+}
+
+static int bochs_connector_mode_valid(struct drm_connector *connector,
+				      struct drm_display_mode *mode)
+{
+	struct bochs_device *bochs =
+		container_of(connector, struct bochs_device, connector);
+	unsigned long size = mode->hdisplay * mode->vdisplay * 4;
+
+	/*
+	 * Make sure we can fit two framebuffers into video memory.
+	 * This allows up to 1600x1200 with 16 MB (default size).
+	 * If you want more try this:
+	 *     'qemu -vga std -global VGA.vgamem_mb=32 $otherargs'
+	 */
+	if (size * 2 > bochs->fb_size)
+		return MODE_BAD;
+
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+bochs_connector_best_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	/* pick the encoder ids */
+	if (enc_id) {
+		obj = drm_mode_object_find(connector->dev, enc_id,
+					   DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			return NULL;
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
+}
+
+static enum drm_connector_status bochs_connector_detect(struct drm_connector
+							*connector, bool force)
+{
+	return connector_status_connected;
+}
+
+struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
+	.get_modes = bochs_connector_get_modes,
+	.mode_valid = bochs_connector_mode_valid,
+	.best_encoder = bochs_connector_best_encoder,
+};
+
+struct drm_connector_funcs bochs_connector_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = bochs_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = drm_connector_cleanup,
+};
+
+static void bochs_connector_init(struct drm_device *dev)
+{
+	struct bochs_device *bochs = dev->dev_private;
+	struct drm_connector *connector = &bochs->connector;
+
+	drm_connector_init(dev, connector, &bochs_connector_connector_funcs,
+			   DRM_MODE_CONNECTOR_VIRTUAL);
+	drm_connector_helper_add(connector,
+				 &bochs_connector_connector_helper_funcs);
+}
+
+
+int bochs_kms_init(struct bochs_device *bochs)
+{
+	drm_mode_config_init(bochs->dev);
+	bochs->mode_config_initialized = true;
+
+	bochs->dev->mode_config.max_width = 8192;
+	bochs->dev->mode_config.max_height = 8192;
+
+	bochs->dev->mode_config.fb_base = bochs->fb_base;
+	bochs->dev->mode_config.preferred_depth = 24;
+	bochs->dev->mode_config.prefer_shadow = 0;
+
+	bochs->dev->mode_config.funcs = (void *)&bochs_mode_funcs;
+
+	bochs_crtc_init(bochs->dev);
+	bochs_encoder_init(bochs->dev);
+	bochs_connector_init(bochs->dev);
+	drm_mode_connector_attach_encoder(&bochs->connector,
+					  &bochs->encoder);
+
+	return 0;
+}
+
+void bochs_kms_fini(struct bochs_device *bochs)
+{
+	if (bochs->mode_config_initialized) {
+		drm_mode_config_cleanup(bochs->dev);
+		bochs->mode_config_initialized = false;
+	}
+}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
new file mode 100644
index 0000000..ce68587
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -0,0 +1,546 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+static void bochs_ttm_placement(struct bochs_bo *bo, int domain);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
+{
+	return container_of(bd, struct bochs_device, ttm.bdev);
+}
+
+static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int bochs_ttm_global_init(struct bochs_device *bochs)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	global_ref = &bochs->ttm.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &bochs_ttm_mem_global_init;
+	global_ref->release = &bochs_ttm_mem_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	bochs->ttm.bo_global_ref.mem_glob =
+		bochs->ttm.mem_global_ref.object;
+	global_ref = &bochs->ttm.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&bochs->ttm.mem_global_ref);
+		return r;
+	}
+
+	return 0;
+}
+
+static void bochs_ttm_global_release(struct bochs_device *bochs)
+{
+	if (bochs->ttm.mem_global_ref.release == NULL)
+		return;
+
+	drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
+	drm_global_item_unref(&bochs->ttm.mem_global_ref);
+	bochs->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+	struct bochs_bo *bo;
+
+	bo = container_of(tbo, struct bochs_bo, bo);
+	drm_gem_object_release(&bo->gem);
+	kfree(bo);
+}
+
+static bool bochs_ttm_bo_is_bochs_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &bochs_bo_ttm_destroy)
+		return true;
+	return false;
+}
+
+static int bochs_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+				  struct ttm_mem_type_manager *man)
+{
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		man->func = &ttm_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED |
+			TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+bochs_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+	struct bochs_bo *bochsbo = bochs_bo(bo);
+
+	if (!bochs_ttm_bo_is_bochs_bo(bo))
+		return;
+
+	bochs_ttm_placement(bochsbo, TTM_PL_FLAG_SYSTEM);
+	*pl = bochsbo->placement;
+}
+
+static int bochs_bo_verify_access(struct ttm_buffer_object *bo,
+				  struct file *filp)
+{
+	struct bochs_bo *bochsbo = bochs_bo(bo);
+
+	return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp);
+}
+
+static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				    struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct bochs_device *bochs = bochs_bdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = bochs->fb_base;
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev,
+				  struct ttm_mem_reg *mem)
+{
+}
+
+static int bochs_bo_move(struct ttm_buffer_object *bo,
+			 bool evict, bool interruptible,
+			 bool no_wait_gpu,
+			 struct ttm_mem_reg *new_mem)
+{
+	return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+}
+
+
+static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
+{
+	ttm_tt_fini(tt);
+	kfree(tt);
+}
+
+static struct ttm_backend_func bochs_tt_backend_func = {
+	.destroy = &bochs_ttm_backend_destroy,
+};
+
+static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
+					  unsigned long size,
+					  uint32_t page_flags,
+					  struct page *dummy_read_page)
+{
+	struct ttm_tt *tt;
+
+	tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+	if (tt == NULL)
+		return NULL;
+	tt->func = &bochs_tt_backend_func;
+	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+		kfree(tt);
+		return NULL;
+	}
+	return tt;
+}
+
+struct ttm_bo_driver bochs_bo_driver = {
+	.ttm_tt_create = bochs_ttm_tt_create,
+	.ttm_tt_populate = ttm_pool_populate,
+	.ttm_tt_unpopulate = ttm_pool_unpopulate,
+	.init_mem_type = bochs_bo_init_mem_type,
+	.evict_flags = bochs_bo_evict_flags,
+	.move = bochs_bo_move,
+	.verify_access = bochs_bo_verify_access,
+	.io_mem_reserve = &bochs_ttm_io_mem_reserve,
+	.io_mem_free = &bochs_ttm_io_mem_free,
+};
+
+int bochs_mm_init(struct bochs_device *bochs)
+{
+	struct ttm_bo_device *bdev = &bochs->ttm.bdev;
+	int ret;
+
+	ret = bochs_ttm_global_init(bochs);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_device_init(&bochs->ttm.bdev,
+				 bochs->ttm.bo_global_ref.ref.object,
+				 &bochs_bo_driver, DRM_FILE_PAGE_OFFSET,
+				 true);
+	if (ret) {
+		DRM_ERROR("Error initialising bo driver; %d\n", ret);
+		return ret;
+	}
+
+	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+			     bochs->fb_size >> PAGE_SHIFT);
+	if (ret) {
+		DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+		return ret;
+	}
+
+	bochs->ttm.initialized = true;
+	return 0;
+}
+
+void bochs_mm_fini(struct bochs_device *bochs)
+{
+	if (!bochs->ttm.initialized)
+		return;
+
+	ttm_bo_device_release(&bochs->ttm.bdev);
+	bochs_ttm_global_release(bochs);
+	bochs->ttm.initialized = false;
+}
+
+static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
+{
+	u32 c = 0;
+	bo->placement.fpfn = 0;
+	bo->placement.lpfn = 0;
+	bo->placement.placement = bo->placements;
+	bo->placement.busy_placement = bo->placements;
+	if (domain & TTM_PL_FLAG_VRAM) {
+		bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED
+			| TTM_PL_FLAG_VRAM;
+	}
+	if (domain & TTM_PL_FLAG_SYSTEM) {
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	}
+	if (!c) {
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	}
+	bo->placement.num_placement = c;
+	bo->placement.num_busy_placement = c;
+}
+
+static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo)
+{
+	return bo->bo.offset;
+}
+
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+	int i, ret;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = bochs_bo_gpu_offset(bo);
+		return 0;
+	}
+
+	bochs_ttm_placement(bo, pl_flag);
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret)
+		return ret;
+
+	bo->pin_count = 1;
+	if (gpu_addr)
+		*gpu_addr = bochs_bo_gpu_offset(bo);
+	return 0;
+}
+
+int bochs_bo_unpin(struct bochs_bo *bo)
+{
+	int i, ret;
+
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+
+	if (bo->pin_count)
+		return 0;
+
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct bochs_device *bochs;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return drm_mmap(filp, vma);
+
+	file_priv = filp->private_data;
+	bochs = file_priv->minor->dev->dev_private;
+	return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int bochs_bo_create(struct drm_device *dev, int size, int align,
+			   uint32_t flags, struct bochs_bo **pbochsbo)
+{
+	struct bochs_device *bochs = dev->dev_private;
+	struct bochs_bo *bochsbo;
+	size_t acc_size;
+	int ret;
+
+	bochsbo = kzalloc(sizeof(struct bochs_bo), GFP_KERNEL);
+	if (!bochsbo)
+		return -ENOMEM;
+
+	ret = drm_gem_object_init(dev, &bochsbo->gem, size);
+	if (ret) {
+		kfree(bochsbo);
+		return ret;
+	}
+
+	bochsbo->bo.bdev = &bochs->ttm.bdev;
+	bochsbo->bo.bdev->dev_mapping = dev->dev_mapping;
+
+	bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+	acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size,
+				       sizeof(struct bochs_bo));
+
+	ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size,
+			  ttm_bo_type_device, &bochsbo->placement,
+			  align >> PAGE_SHIFT, false, NULL, acc_size,
+			  NULL, bochs_bo_ttm_destroy);
+	if (ret)
+		return ret;
+
+	*pbochsbo = bochsbo;
+	return 0;
+}
+
+int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+		     struct drm_gem_object **obj)
+{
+	struct bochs_bo *bochsbo;
+	int ret;
+
+	*obj = NULL;
+
+	size = ALIGN(size, PAGE_SIZE);
+	if (size == 0)
+		return -EINVAL;
+
+	ret = bochs_bo_create(dev, size, 0, 0, &bochsbo);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("failed to allocate GEM object\n");
+		return ret;
+	}
+	*obj = &bochsbo->gem;
+	return 0;
+}
+
+int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
+		      struct drm_mode_create_dumb *args)
+{
+	struct drm_gem_object *gobj;
+	u32 handle;
+	int ret;
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	ret = bochs_gem_create(dev, args->size, false,
+			       &gobj);
+	if (ret)
+		return ret;
+
+	ret = drm_gem_handle_create(file, gobj, &handle);
+	drm_gem_object_unreference_unlocked(gobj);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+	return 0;
+}
+
+static void bochs_bo_unref(struct bochs_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+
+	tbo = &((*bo)->bo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
+
+}
+
+void bochs_gem_free_object(struct drm_gem_object *obj)
+{
+	struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj);
+
+	if (!bochs_bo)
+		return;
+	bochs_bo_unref(&bochs_bo);
+}
+
+int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+			   uint32_t handle, uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int ret;
+	struct bochs_bo *bo;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file, handle);
+	if (obj == NULL) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+
+	bo = gem_to_bochs_bo(obj);
+	*offset = bochs_bo_mmap_offset(bo);
+
+	drm_gem_object_unreference(obj);
+	ret = 0;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
+	if (bochs_fb->obj)
+		drm_gem_object_unreference_unlocked(bochs_fb->obj);
+	drm_framebuffer_cleanup(fb);
+	kfree(fb);
+}
+
+static const struct drm_framebuffer_funcs bochs_fb_funcs = {
+	.destroy = bochs_user_framebuffer_destroy,
+};
+
+int bochs_framebuffer_init(struct drm_device *dev,
+			   struct bochs_framebuffer *gfb,
+			   struct drm_mode_fb_cmd2 *mode_cmd,
+			   struct drm_gem_object *obj)
+{
+	int ret;
+
+	drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+	gfb->obj = obj;
+	ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs);
+	if (ret) {
+		DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+		return ret;
+	}
+	return 0;
+}
+
+static struct drm_framebuffer *
+bochs_user_framebuffer_create(struct drm_device *dev,
+			      struct drm_file *filp,
+			      struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct bochs_framebuffer *bochs_fb;
+	int ret;
+
+	DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
+	       mode_cmd->width, mode_cmd->height,
+	       (mode_cmd->pixel_format)       & 0xff,
+	       (mode_cmd->pixel_format >> 8)  & 0xff,
+	       (mode_cmd->pixel_format >> 16) & 0xff,
+	       (mode_cmd->pixel_format >> 24) & 0xff);
+
+	if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
+		return ERR_PTR(-ENOENT);
+
+	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+	if (obj == NULL)
+		return ERR_PTR(-ENOENT);
+
+	bochs_fb = kzalloc(sizeof(*bochs_fb), GFP_KERNEL);
+	if (!bochs_fb) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = bochs_framebuffer_init(dev, bochs_fb, mode_cmd, obj);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(obj);
+		kfree(bochs_fb);
+		return ERR_PTR(ret);
+	}
+	return &bochs_fb->base;
+}
+
+const struct drm_mode_config_funcs bochs_mode_funcs = {
+	.fb_create = bochs_user_framebuffer_create,
+};
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index b6aded7..117d3ec 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -222,7 +222,7 @@
 void cirrus_driver_irq_preinstall(struct drm_device *dev);
 int cirrus_driver_irq_postinstall(struct drm_device *dev);
 void cirrus_driver_irq_uninstall(struct drm_device *dev);
-irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t cirrus_driver_irq_handler(int irq, void *arg);
 
 				/* cirrus_kms.c */
 int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index b27e956..32bbba0 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -39,7 +39,7 @@
 	 * then the BO is being moved and we should
 	 * store up the damage until later.
 	 */
-	if (!in_interrupt())
+	if (drm_can_sleep())
 		ret = cirrus_bo_reserve(bo, true);
 	if (ret) {
 		if (ret != -EBUSY)
@@ -233,6 +233,9 @@
 	info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = cdev->mc.vram_size;
 
+	info->fix.smem_start = cdev->dev->mode_config.fb_base;
+	info->fix.smem_len = cdev->mc.vram_size;
+
 	info->screen_base = sysram;
 	info->screen_size = size;
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 78e76f2..4b0170c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,7 +255,7 @@
 	return 0;
 }
 
-void cirrus_bo_unref(struct cirrus_bo **bo)
+static void cirrus_bo_unref(struct cirrus_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index adabc3d..530f78f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -102,7 +102,7 @@
 	return true;
 }
 
-void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
+static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
 {
 	struct cirrus_device *cdev = crtc->dev->dev_private;
 	u32 addr;
@@ -273,8 +273,8 @@
 		sr07 |= 0x11;
 		break;
 	case 16:
-		sr07 |= 0xc1;
-		hdr = 0xc0;
+		sr07 |= 0x17;
+		hdr = 0xc1;
 		break;
 	case 24:
 		sr07 |= 0x15;
@@ -453,7 +453,7 @@
 {
 }
 
-void cirrus_encoder_destroy(struct drm_encoder *encoder)
+static void cirrus_encoder_destroy(struct drm_encoder *encoder)
 {
 	struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
 	drm_encoder_cleanup(encoder);
@@ -492,7 +492,7 @@
 }
 
 
-int cirrus_vga_get_modes(struct drm_connector *connector)
+static int cirrus_vga_get_modes(struct drm_connector *connector)
 {
 	int count;
 
@@ -509,7 +509,7 @@
 	return MODE_OK;
 }
 
-struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
+static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
 						  *connector)
 {
 	int enc_id = connector->encoder_ids[0];
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 75becde..8b37c25 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -80,7 +80,7 @@
 	return 0;
 }
 
-void
+static void
 cirrus_ttm_global_release(struct cirrus_device *cirrus)
 {
 	if (cirrus->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@
 	kfree(bo);
 }
 
-bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
+static bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
 {
 	if (bo->destroy == &cirrus_bo_ttm_destroy)
 		return true;
@@ -208,7 +208,7 @@
 };
 
 
-struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
 				 unsigned long size, uint32_t page_flags,
 				 struct page *dummy_read_page)
 {
@@ -375,26 +375,6 @@
 	return 0;
 }
 
-int cirrus_bo_unpin(struct cirrus_bo *bo)
-{
-	int i, ret;
-	if (!bo->pin_count) {
-		DRM_ERROR("unpin bad %p\n", bo);
-		return 0;
-	}
-	bo->pin_count--;
-	if (bo->pin_count)
-		return 0;
-
-	for (i = 0; i < bo->placement.num_placement ; i++)
-		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 int cirrus_bo_push_sysram(struct cirrus_bo *bo)
 {
 	int i, ret;
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index e301d65..dde205c 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -53,7 +53,7 @@
  */
 int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
 {
-	DRM_AGP_KERN *kern;
+	struct agp_kern_info *kern;
 
 	if (!dev->agp || !dev->agp->acquired)
 		return -EINVAL;
@@ -198,17 +198,15 @@
 int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
 {
 	struct drm_agp_mem *entry;
-	DRM_AGP_MEM *memory;
+	struct agp_memory *memory;
 	unsigned long pages;
 	u32 type;
 
 	if (!dev->agp || !dev->agp->acquired)
 		return -EINVAL;
-	if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
+	if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL)))
 		return -ENOMEM;
 
-	memset(entry, 0, sizeof(*entry));
-
 	pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
 	type = (u32) request->type;
 	if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
@@ -393,14 +391,16 @@
  * Gets the drm_agp_t structure which is made available by the agpgart module
  * via the inter_module_* functions. Creates and initializes a drm_agp_head
  * structure.
+ *
+ * Note that final cleanup of the kmalloced structure is directly done in
+ * drm_pci_agp_destroy.
  */
 struct drm_agp_head *drm_agp_init(struct drm_device *dev)
 {
 	struct drm_agp_head *head = NULL;
 
-	if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
+	if (!(head = kzalloc(sizeof(*head), GFP_KERNEL)))
 		return NULL;
-	memset((void *)head, 0, sizeof(*head));
 	head->bridge = agp_find_bridge(dev->pdev);
 	if (!head->bridge) {
 		if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
@@ -439,7 +439,7 @@
 {
 	struct drm_agp_mem *entry, *tempe;
 
-	if (!drm_core_has_AGP(dev) || !dev->agp)
+	if (!dev->agp)
 		return;
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
@@ -460,35 +460,20 @@
 }
 
 /**
- * drm_agp_destroy - Destroy AGP head
- * @dev: DRM device
- *
- * Destroy resources that were previously allocated via drm_agp_initp. Caller
- * must ensure to clean up all AGP resources before calling this. See
- * drm_agp_clear().
- *
- * Call this to destroy AGP heads allocated via drm_agp_init().
- */
-void drm_agp_destroy(struct drm_agp_head *agp)
-{
-	kfree(agp);
-}
-
-/**
  * Binds a collection of pages into AGP memory at the given offset, returning
  * the AGP memory structure containing them.
  *
  * No reference is held on the pages during this time -- it is up to the
  * caller to handle that.
  */
-DRM_AGP_MEM *
+struct agp_memory *
 drm_agp_bind_pages(struct drm_device *dev,
 		   struct page **pages,
 		   unsigned long num_pages,
 		   uint32_t gtt_offset,
 		   u32 type)
 {
-	DRM_AGP_MEM *mem;
+	struct agp_memory *mem;
 	int ret, i;
 
 	DRM_DEBUG("\n");
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 39a7183..0406110 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -114,7 +114,7 @@
 
 	for (idx = 0; idx < nr_pages; ++idx) {
 
-		if (DRM_COPY_FROM_USER(buf->data[idx],
+		if (copy_from_user(buf->data[idx],
 			user_data + idx * PAGE_SIZE,
 			min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
 			DRM_ERROR("Failed to copy user data (%p) to drm buffer"
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 471e051d..edec31f 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -261,7 +261,7 @@
 		struct drm_agp_mem *entry;
 		int valid = 0;
 
-		if (!drm_core_has_AGP(dev)) {
+		if (!dev->agp) {
 			kfree(map);
 			return -EINVAL;
 		}
@@ -303,9 +303,6 @@
 
 		break;
 	}
-	case _DRM_GEM:
-		DRM_ERROR("tried to addmap GEM object\n");
-		break;
 	case _DRM_SCATTER_GATHER:
 		if (!dev->sg) {
 			kfree(map);
@@ -483,9 +480,6 @@
 		dmah.size = map->size;
 		__drm_pci_free(dev, &dmah);
 		break;
-	case _DRM_GEM:
-		DRM_ERROR("tried to rmmap GEM object\n");
-		break;
 	}
 	kfree(map);
 
@@ -1396,7 +1390,7 @@
 	spin_unlock(&dev->count_lock);
 
 	if (request->count >= dma->buf_count) {
-		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+		if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
 		    || (drm_core_check_feature(dev, DRIVER_SG)
 			&& (dma->flags & _DRM_DMA_USE_SG))) {
 			struct drm_local_map *map = dev->agp_buffer_map;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d6cf77c..3b7d32d 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -675,6 +675,29 @@
 EXPORT_SYMBOL(drm_crtc_cleanup);
 
 /**
+ * drm_crtc_index - find the index of a registered CRTC
+ * @crtc: CRTC to find index for
+ *
+ * Given a registered CRTC, return the index of that CRTC within a DRM
+ * device's list of CRTCs.
+ */
+unsigned int drm_crtc_index(struct drm_crtc *crtc)
+{
+	unsigned int index = 0;
+	struct drm_crtc *tmp;
+
+	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+		if (tmp == crtc)
+			return index;
+
+		index++;
+	}
+
+	BUG();
+}
+EXPORT_SYMBOL(drm_crtc_index);
+
+/**
  * drm_mode_probed_add - add a mode to a connector's probed mode list
  * @connector: connector the new mode
  * @mode: mode data
@@ -2767,10 +2790,8 @@
 	}
 
 	if (fb->funcs->dirty) {
-		drm_modeset_lock_all(dev);
 		ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
 				       clips, num_clips);
-		drm_modeset_unlock_all(dev);
 	} else {
 		ret = -ENOSYS;
 	}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 01361ab..ea92b82 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -324,35 +324,6 @@
 }
 EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 
-/**
- * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
- * @encoder: encoder to test
- * @crtc: crtc to test
- *
- * Return false if @encoder can't be driven by @crtc, true otherwise.
- */
-static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
-				struct drm_crtc *crtc)
-{
-	struct drm_device *dev;
-	struct drm_crtc *tmp;
-	int crtc_mask = 1;
-
-	WARN(!crtc, "checking null crtc?\n");
-
-	dev = crtc->dev;
-
-	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
-		if (tmp == crtc)
-			break;
-		crtc_mask <<= 1;
-	}
-
-	if (encoder->possible_crtcs & crtc_mask)
-		return true;
-	return false;
-}
-
 /*
  * Check the CRTC we're going to map each output to vs. its current
  * CRTC.  If they don't match, we have to disable the output and the CRTC
@@ -536,7 +507,7 @@
 	 * are later needed by vblank and swap-completion
 	 * timestamping. They are derived from true hwmode.
 	 */
-	drm_calc_timestamping_constants(crtc);
+	drm_calc_timestamping_constants(crtc, &crtc->hwmode);
 
 	/* FIXME: add subpixel order */
 done:
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index d9137e4..345be03 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -315,9 +315,6 @@
 	if (drm_device_is_unplugged(dev))
 		return -ENODEV;
 
-	atomic_inc(&dev->ioctl_count);
-	++file_priv->ioctl_count;
-
 	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
 	    ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
 		goto err_i1;
@@ -410,7 +407,6 @@
 
 	if (kdata != stack_kdata)
 		kfree(kdata);
-	atomic_dec(&dev->ioctl_count);
 	if (retcode)
 		DRM_DEBUG("ret = %d\n", retcode);
 	return retcode;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 8835dcd..b924306 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -605,347 +605,347 @@
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
 		   752, 800, 0, 480, 490, 492, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 2 - 720x480@60Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 3 - 720x480@60Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 4 - 1280x720@60Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
 		   1430, 1650, 0, 720, 725, 730, 750, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 5 - 1920x1080i@60Hz */
 	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 6 - 1440x480i@60Hz */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 7 - 1440x480i@60Hz */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 8 - 1440x240@60Hz */
 	{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 240, 244, 247, 262, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 9 - 1440x240@60Hz */
 	{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 240, 244, 247, 262, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 10 - 2880x480i@60Hz */
 	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 11 - 2880x480i@60Hz */
 	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 12 - 2880x240@60Hz */
 	{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 240, 244, 247, 262, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 13 - 2880x240@60Hz */
 	{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 240, 244, 247, 262, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 14 - 1440x480@60Hz */
 	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
 		   1596, 1716, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 15 - 1440x480@60Hz */
 	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
 		   1596, 1716, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 16 - 1920x1080@60Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 17 - 720x576@50Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 18 - 720x576@50Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 19 - 1280x720@50Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
 		   1760, 1980, 0, 720, 725, 730, 750, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 20 - 1920x1080i@50Hz */
 	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 21 - 1440x576i@50Hz */
 	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 22 - 1440x576i@50Hz */
 	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 23 - 1440x288@50Hz */
 	{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 288, 290, 293, 312, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 24 - 1440x288@50Hz */
 	{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 288, 290, 293, 312, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 25 - 2880x576i@50Hz */
 	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 26 - 2880x576i@50Hz */
 	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 27 - 2880x288@50Hz */
 	{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 288, 290, 293, 312, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 28 - 2880x288@50Hz */
 	{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 288, 290, 293, 312, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 29 - 1440x576@50Hz */
 	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
 		   1592, 1728, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 30 - 1440x576@50Hz */
 	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
 		   1592, 1728, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 31 - 1920x1080@50Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 32 - 1920x1080@24Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
 		   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 24, },
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 33 - 1920x1080@25Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 25, },
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 34 - 1920x1080@30Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 30, },
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 35 - 2880x480@60Hz */
 	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
 		   3192, 3432, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 36 - 2880x480@60Hz */
 	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
 		   3192, 3432, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 60, },
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 37 - 2880x576@50Hz */
 	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
 		   3184, 3456, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 38 - 2880x576@50Hz */
 	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
 		   3184, 3456, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 39 - 1920x1080i@50Hz */
 	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
 		   2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE),
-	  .vrefresh = 50, },
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 40 - 1920x1080i@100Hz */
 	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE),
-	  .vrefresh = 100, },
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 41 - 1280x720@100Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
 		   1760, 1980, 0, 720, 725, 730, 750, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 100, },
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 42 - 720x576@100Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 100, },
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 43 - 720x576@100Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 100, },
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 44 - 1440x576i@100Hz */
 	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 100, },
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 45 - 1440x576i@100Hz */
 	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 100, },
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 46 - 1920x1080i@120Hz */
 	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE),
-	  .vrefresh = 120, },
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 47 - 1280x720@120Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
 		   1430, 1650, 0, 720, 725, 730, 750, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 120, },
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 48 - 720x480@120Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 120, },
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 49 - 720x480@120Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 120, },
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 50 - 1440x480i@120Hz */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 120, },
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 51 - 1440x480i@120Hz */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 120, },
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 52 - 720x576@200Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 200, },
+	  .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 53 - 720x576@200Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 200, },
+	  .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 54 - 1440x576i@200Hz */
 	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 200, },
+	  .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 55 - 1440x576i@200Hz */
 	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 200, },
+	  .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 56 - 720x480@240Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 240, },
+	  .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 57 - 720x480@240Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-	  .vrefresh = 240, },
+	  .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 58 - 1440x480i@240 */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 240, },
+	  .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
 	/* 59 - 1440x480i@240 */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 240, },
+	  .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 60 - 1280x720@24Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
 		   3080, 3300, 0, 720, 725, 730, 750, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 24, },
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 61 - 1280x720@25Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
 		   3740, 3960, 0, 720, 725, 730, 750, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 25, },
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 62 - 1280x720@30Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
 		   3080, 3300, 0, 720, 725, 730, 750, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	  .vrefresh = 30, },
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 63 - 1920x1080@120Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	 .vrefresh = 120, },
+	 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 64 - 1920x1080@100Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-	 .vrefresh = 100, },
+	 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 };
 
 /*
@@ -2562,25 +2562,40 @@
 	return modes;
 }
 
+static struct drm_display_mode *
+drm_display_mode_from_vic_index(struct drm_connector *connector,
+				const u8 *video_db, u8 video_len,
+				u8 video_index)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *newmode;
+	u8 cea_mode;
+
+	if (video_db == NULL || video_index >= video_len)
+		return NULL;
+
+	/* CEA modes are numbered 1..127 */
+	cea_mode = (video_db[video_index] & 127) - 1;
+	if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
+		return NULL;
+
+	newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+	newmode->vrefresh = 0;
+
+	return newmode;
+}
+
 static int
 do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
 {
-	struct drm_device *dev = connector->dev;
-	const u8 *mode;
-	u8 cea_mode;
-	int modes = 0;
+	int i, modes = 0;
 
-	for (mode = db; mode < db + len; mode++) {
-		cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
-		if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
-			struct drm_display_mode *newmode;
-			newmode = drm_mode_duplicate(dev,
-						     &edid_cea_modes[cea_mode]);
-			if (newmode) {
-				newmode->vrefresh = 0;
-				drm_mode_probed_add(connector, newmode);
-				modes++;
-			}
+	for (i = 0; i < len; i++) {
+		struct drm_display_mode *mode;
+		mode = drm_display_mode_from_vic_index(connector, db, len, i);
+		if (mode) {
+			drm_mode_probed_add(connector, mode);
+			modes++;
 		}
 	}
 
@@ -2674,21 +2689,13 @@
 static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
 			       const u8 *video_db, u8 video_len, u8 video_index)
 {
-	struct drm_device *dev = connector->dev;
 	struct drm_display_mode *newmode;
 	int modes = 0;
-	u8 cea_mode;
-
-	if (video_db == NULL || video_index >= video_len)
-		return 0;
-
-	/* CEA modes are numbered 1..127 */
-	cea_mode = (video_db[video_index] & 127) - 1;
-	if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
-		return 0;
 
 	if (structure & (1 << 0)) {
-		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		newmode = drm_display_mode_from_vic_index(connector, video_db,
+							  video_len,
+							  video_index);
 		if (newmode) {
 			newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
 			drm_mode_probed_add(connector, newmode);
@@ -2696,7 +2703,9 @@
 		}
 	}
 	if (structure & (1 << 6)) {
-		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		newmode = drm_display_mode_from_vic_index(connector, video_db,
+							  video_len,
+							  video_index);
 		if (newmode) {
 			newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
 			drm_mode_probed_add(connector, newmode);
@@ -2704,7 +2713,9 @@
 		}
 	}
 	if (structure & (1 << 8)) {
-		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		newmode = drm_display_mode_from_vic_index(connector, video_db,
+							  video_len,
+							  video_index);
 		if (newmode) {
 			newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
 			drm_mode_probed_add(connector, newmode);
@@ -2728,7 +2739,7 @@
 do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
 		   const u8 *video_db, u8 video_len)
 {
-	int modes = 0, offset = 0, i, multi_present = 0;
+	int modes = 0, offset = 0, i, multi_present = 0, multi_len;
 	u8 vic_len, hdmi_3d_len = 0;
 	u16 mask;
 	u16 structure_all;
@@ -2774,32 +2785,84 @@
 	}
 	offset += 1 + vic_len;
 
-	if (!(multi_present == 1 || multi_present == 2))
-		goto out;
-
-	if ((multi_present == 1 && len < (9 + offset)) ||
-	    (multi_present == 2 && len < (11 + offset)))
-		goto out;
-
-	if ((multi_present == 1 && hdmi_3d_len < 2) ||
-	    (multi_present == 2 && hdmi_3d_len < 4))
-		goto out;
-
-	/* 3D_Structure_ALL */
-	structure_all = (db[8 + offset] << 8) | db[9 + offset];
-
-	/* check if 3D_MASK is present */
-	if (multi_present == 2)
-		mask = (db[10 + offset] << 8) | db[11 + offset];
+	if (multi_present == 1)
+		multi_len = 2;
+	else if (multi_present == 2)
+		multi_len = 4;
 	else
-		mask = 0xffff;
+		multi_len = 0;
 
-	for (i = 0; i < 16; i++) {
-		if (mask & (1 << i))
-			modes += add_3d_struct_modes(connector,
-						     structure_all,
-						     video_db,
-						     video_len, i);
+	if (len < (8 + offset + hdmi_3d_len - 1))
+		goto out;
+
+	if (hdmi_3d_len < multi_len)
+		goto out;
+
+	if (multi_present == 1 || multi_present == 2) {
+		/* 3D_Structure_ALL */
+		structure_all = (db[8 + offset] << 8) | db[9 + offset];
+
+		/* check if 3D_MASK is present */
+		if (multi_present == 2)
+			mask = (db[10 + offset] << 8) | db[11 + offset];
+		else
+			mask = 0xffff;
+
+		for (i = 0; i < 16; i++) {
+			if (mask & (1 << i))
+				modes += add_3d_struct_modes(connector,
+						structure_all,
+						video_db,
+						video_len, i);
+		}
+	}
+
+	offset += multi_len;
+
+	for (i = 0; i < (hdmi_3d_len - multi_len); i++) {
+		int vic_index;
+		struct drm_display_mode *newmode = NULL;
+		unsigned int newflag = 0;
+		bool detail_present;
+
+		detail_present = ((db[8 + offset + i] & 0x0f) > 7);
+
+		if (detail_present && (i + 1 == hdmi_3d_len - multi_len))
+			break;
+
+		/* 2D_VIC_order_X */
+		vic_index = db[8 + offset + i] >> 4;
+
+		/* 3D_Structure_X */
+		switch (db[8 + offset + i] & 0x0f) {
+		case 0:
+			newflag = DRM_MODE_FLAG_3D_FRAME_PACKING;
+			break;
+		case 6:
+			newflag = DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+			break;
+		case 8:
+			/* 3D_Detail_X */
+			if ((db[9 + offset + i] >> 4) == 1)
+				newflag = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+			break;
+		}
+
+		if (newflag != 0) {
+			newmode = drm_display_mode_from_vic_index(connector,
+								  video_db,
+								  video_len,
+								  vic_index);
+
+			if (newmode) {
+				newmode->flags |= newflag;
+				drm_mode_probed_add(connector, newmode);
+				modes++;
+			}
+		}
+
+		if (detail_present)
+			i++;
 	}
 
 out:
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9081172..1b4c7a5 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -141,7 +141,7 @@
 	return (edid[0x7e] + 1) * EDID_LENGTH;
 }
 
-static u8 *edid_load(struct drm_connector *connector, const char *name,
+static void *edid_load(struct drm_connector *connector, const char *name,
 			const char *connector_name)
 {
 	const struct firmware *fw = NULL;
@@ -263,7 +263,7 @@
 	if (*last == '\n')
 		*last = '\0';
 
-	edid = (struct edid *) edid_load(connector, edidname, connector_name);
+	edid = edid_load(connector, edidname, connector_name);
 	if (IS_ERR_OR_NULL(edid))
 		return 0;
 
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0a19401..98a0363 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -359,6 +359,11 @@
 	struct drm_crtc *crtc;
 	int bound = 0, crtcs_bound = 0;
 
+	/* Sometimes user space wants everything disabled, so don't steal the
+	 * display if there's a master. */
+	if (dev->primary->master)
+		return false;
+
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		if (crtc->fb)
 			crtcs_bound++;
@@ -368,6 +373,7 @@
 
 	if (bound < crtcs_bound)
 		return false;
+
 	return true;
 }
 
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index c5b929c..7f2af9a 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -232,7 +232,6 @@
 		goto out_put_pid;
 	}
 
-	priv->ioctl_count = 0;
 	/* for compatibility root is always authenticated */
 	priv->always_authenticated = capable(CAP_SYS_ADMIN);
 	priv->authenticated = priv->always_authenticated;
@@ -392,9 +391,6 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	atomic_set(&dev->ioctl_count, 0);
-	atomic_set(&dev->vma_count, 0);
-
 	dev->sigdata.lock = NULL;
 
 	dev->context_flag = 0;
@@ -578,12 +574,7 @@
 	 */
 
 	if (!--dev->open_count) {
-		if (atomic_read(&dev->ioctl_count)) {
-			DRM_ERROR("Device busy: %d\n",
-				  atomic_read(&dev->ioctl_count));
-			retcode = -EBUSY;
-		} else
-			retcode = drm_lastclose(dev);
+		retcode = drm_lastclose(dev);
 		if (drm_device_is_unplugged(dev))
 			drm_put_dev(dev);
 	}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4761ade..5bbad87 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -91,19 +91,19 @@
 int
 drm_gem_init(struct drm_device *dev)
 {
-	struct drm_gem_mm *mm;
+	struct drm_vma_offset_manager *vma_offset_manager;
 
 	mutex_init(&dev->object_name_lock);
 	idr_init(&dev->object_name_idr);
 
-	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
-	if (!mm) {
+	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+	if (!vma_offset_manager) {
 		DRM_ERROR("out of memory\n");
 		return -ENOMEM;
 	}
 
-	dev->mm_private = mm;
-	drm_vma_offset_manager_init(&mm->vma_manager,
+	dev->vma_offset_manager = vma_offset_manager;
+	drm_vma_offset_manager_init(vma_offset_manager,
 				    DRM_FILE_PAGE_OFFSET_START,
 				    DRM_FILE_PAGE_OFFSET_SIZE);
 
@@ -113,11 +113,10 @@
 void
 drm_gem_destroy(struct drm_device *dev)
 {
-	struct drm_gem_mm *mm = dev->mm_private;
 
-	drm_vma_offset_manager_destroy(&mm->vma_manager);
-	kfree(mm);
-	dev->mm_private = NULL;
+	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+	kfree(dev->vma_offset_manager);
+	dev->vma_offset_manager = NULL;
 }
 
 /**
@@ -129,11 +128,12 @@
 {
 	struct file *filp;
 
+	drm_gem_private_object_init(dev, obj, size);
+
 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
 	if (IS_ERR(filp))
 		return PTR_ERR(filp);
 
-	drm_gem_private_object_init(dev, obj, size);
 	obj->filp = filp;
 
 	return 0;
@@ -175,11 +175,6 @@
 	mutex_unlock(&filp->prime.lock);
 }
 
-static void drm_gem_object_ref_bug(struct kref *list_kref)
-{
-	BUG();
-}
-
 /**
  * Called after the last handle to the object has been closed
  *
@@ -195,13 +190,6 @@
 	if (obj->name) {
 		idr_remove(&dev->object_name_idr, obj->name);
 		obj->name = 0;
-		/*
-		 * The object name held a reference to this object, drop
-		 * that now.
-		*
-		* This cannot be the last reference, since the handle holds one too.
-		 */
-		kref_put(&obj->refcount, drm_gem_object_ref_bug);
 	}
 }
 
@@ -374,9 +362,8 @@
 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_gem_mm *mm = dev->mm_private;
 
-	drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
+	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
 }
 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
 
@@ -398,9 +385,8 @@
 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_gem_mm *mm = dev->mm_private;
 
-	return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
 				  size / PAGE_SIZE);
 }
 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
@@ -602,9 +588,6 @@
 			goto err;
 
 		obj->name = ret;
-
-		/* Allocate a reference for the name table.  */
-		drm_gem_object_reference(obj);
 	}
 
 	args->name = (uint64_t) obj->name;
@@ -833,7 +816,6 @@
 {
 	struct drm_file *priv = filp->private_data;
 	struct drm_device *dev = priv->minor->dev;
-	struct drm_gem_mm *mm = dev->mm_private;
 	struct drm_gem_object *obj;
 	struct drm_vma_offset_node *node;
 	int ret = 0;
@@ -843,7 +825,8 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
+	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+					   vma->vm_pgoff,
 					   vma_pages(vma));
 	if (!node) {
 		mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 7d5a152..7473035 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -186,14 +186,14 @@
 	struct drm_file *priv;
 
 	mutex_lock(&dev->struct_mutex);
-	seq_printf(m, "a dev	pid    uid	magic	  ioctls\n\n");
+	seq_printf(m, "a dev	pid    uid	magic\n\n");
 	list_for_each_entry(priv, &dev->filelist, lhead) {
-		seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+		seq_printf(m, "%c %3d %5d %5d %10u\n",
 			   priv->authenticated ? 'y' : 'n',
 			   priv->minor->index,
 			   pid_vnr(priv->pid),
 			   from_kuid_munged(seq_user_ns(m), priv->uid),
-			   priv->magic, priv->ioctl_count);
+			   priv->magic);
 	}
 	mutex_unlock(&dev->struct_mutex);
 	return 0;
@@ -234,14 +234,18 @@
 	struct drm_device *dev = node->minor->dev;
 	struct drm_vma_entry *pt;
 	struct vm_area_struct *vma;
+	unsigned long vma_count = 0;
 #if defined(__i386__)
 	unsigned int pgprot;
 #endif
 
 	mutex_lock(&dev->struct_mutex);
-	seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
-		   atomic_read(&dev->vma_count),
-		   high_memory, (void *)(unsigned long)virt_to_phys(high_memory));
+	list_for_each_entry(pt, &dev->vmalist, head)
+		vma_count++;
+
+	seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
+		   vma_count, high_memory,
+		   (void *)(unsigned long)virt_to_phys(high_memory));
 
 	list_for_each_entry(pt, &dev->vmalist, head) {
 		vma = pt->vma;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index dffc836..f4dc9b7 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -296,6 +296,18 @@
 	case DRM_CAP_ASYNC_PAGE_FLIP:
 		req->value = dev->mode_config.async_page_flip;
 		break;
+	case DRM_CAP_CURSOR_WIDTH:
+		if (dev->mode_config.cursor_width)
+			req->value = dev->mode_config.cursor_width;
+		else
+			req->value = 64;
+		break;
+	case DRM_CAP_CURSOR_HEIGHT:
+		if (dev->mode_config.cursor_height)
+			req->value = dev->mode_config.cursor_height;
+		else
+			req->value = 64;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 64c34d5..c2676b5 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -368,7 +368,7 @@
 	if (dev->num_crtcs) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		for (i = 0; i < dev->num_crtcs; i++) {
-			DRM_WAKEUP(&dev->vblank[i].queue);
+			wake_up(&dev->vblank[i].queue);
 			dev->vblank[i].enabled = false;
 			dev->vblank[i].last =
 				dev->driver->get_vblank_counter(dev, i);
@@ -436,45 +436,41 @@
 }
 
 /**
- * drm_calc_timestamping_constants - Calculate and
- * store various constants which are later needed by
- * vblank and swap-completion timestamping, e.g, by
- * drm_calc_vbltimestamp_from_scanoutpos().
- * They are derived from crtc's true scanout timing,
- * so they take things like panel scaling or other
- * adjustments into account.
+ * drm_calc_timestamping_constants - Calculate vblank timestamp constants
  *
  * @crtc drm_crtc whose timestamp constants should be updated.
+ * @mode display mode containing the scanout timings
  *
+ * Calculate and store various constants which are later
+ * needed by vblank and swap-completion timestamping, e.g,
+ * by drm_calc_vbltimestamp_from_scanoutpos(). They are
+ * derived from crtc's true scanout timing, so they take
+ * things like panel scaling or other adjustments into account.
  */
-void drm_calc_timestamping_constants(struct drm_crtc *crtc)
+void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+				     const struct drm_display_mode *mode)
 {
-	s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
-	u64 dotclock;
-
-	/* Dot clock in Hz: */
-	dotclock = (u64) crtc->hwmode.clock * 1000;
-
-	/* Fields of interlaced scanout modes are only half a frame duration.
-	 * Double the dotclock to get half the frame-/line-/pixelduration.
-	 */
-	if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
-		dotclock *= 2;
+	int linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+	int dotclock = mode->crtc_clock;
 
 	/* Valid dotclock? */
 	if (dotclock > 0) {
-		int frame_size;
-		/* Convert scanline length in pixels and video dot clock to
-		 * line duration, frame duration and pixel duration in
-		 * nanoseconds:
+		int frame_size = mode->crtc_htotal * mode->crtc_vtotal;
+
+		/*
+		 * Convert scanline length in pixels and video
+		 * dot clock to line duration, frame duration
+		 * and pixel duration in nanoseconds:
 		 */
-		pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
-		linedur_ns  = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
-					      1000000000), dotclock);
-		frame_size = crtc->hwmode.crtc_htotal *
-				crtc->hwmode.crtc_vtotal;
-		framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
-					      dotclock);
+		pixeldur_ns = 1000000 / dotclock;
+		linedur_ns  = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
+		framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
+
+		/*
+		 * Fields of interlaced scanout modes are only half a frame duration.
+		 */
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			framedur_ns /= 2;
 	} else
 		DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
 			  crtc->base.id);
@@ -484,11 +480,11 @@
 	crtc->framedur_ns = framedur_ns;
 
 	DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
-		  crtc->base.id, crtc->hwmode.crtc_htotal,
-		  crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+		  crtc->base.id, mode->crtc_htotal,
+		  mode->crtc_vtotal, mode->crtc_vdisplay);
 	DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
-		  crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
-		  (int) linedur_ns, (int) pixeldur_ns);
+		  crtc->base.id, dotclock, framedur_ns,
+		  linedur_ns, pixeldur_ns);
 }
 EXPORT_SYMBOL(drm_calc_timestamping_constants);
 
@@ -521,6 +517,7 @@
  *         0 = Default.
  *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
  * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ * @mode: mode which defines the scanout timings
  *
  * Returns negative value on error, failure or if not supported in current
  * video mode:
@@ -540,14 +537,14 @@
 					  int *max_error,
 					  struct timeval *vblank_time,
 					  unsigned flags,
-					  struct drm_crtc *refcrtc)
+					  const struct drm_crtc *refcrtc,
+					  const struct drm_display_mode *mode)
 {
 	ktime_t stime, etime, mono_time_offset;
 	struct timeval tv_etime;
-	struct drm_display_mode *mode;
-	int vbl_status, vtotal, vdisplay;
+	int vbl_status;
 	int vpos, hpos, i;
-	s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+	int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
 	bool invbl;
 
 	if (crtc < 0 || crtc >= dev->num_crtcs) {
@@ -561,10 +558,6 @@
 		return -EIO;
 	}
 
-	mode = &refcrtc->hwmode;
-	vtotal = mode->crtc_vtotal;
-	vdisplay = mode->crtc_vdisplay;
-
 	/* Durations of frames, lines, pixels in nanoseconds. */
 	framedur_ns = refcrtc->framedur_ns;
 	linedur_ns  = refcrtc->linedur_ns;
@@ -573,7 +566,7 @@
 	/* If mode timing undefined, just return as no-op:
 	 * Happens during initial modesetting of a crtc.
 	 */
-	if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+	if (framedur_ns == 0) {
 		DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
 		return -EAGAIN;
 	}
@@ -590,7 +583,7 @@
 		 * Get vertical and horizontal scanout position vpos, hpos,
 		 * and bounding timestamps stime, etime, pre/post query.
 		 */
-		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
+		vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
 							       &hpos, &stime, &etime);
 
 		/*
@@ -611,18 +604,18 @@
 		duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
 
 		/* Accept result with <  max_error nsecs timing uncertainty. */
-		if (duration_ns <= (s64) *max_error)
+		if (duration_ns <= *max_error)
 			break;
 	}
 
 	/* Noisy system timing? */
 	if (i == DRM_TIMESTAMP_MAXRETRIES) {
 		DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
-			  crtc, (int) duration_ns/1000, *max_error/1000, i);
+			  crtc, duration_ns/1000, *max_error/1000, i);
 	}
 
 	/* Return upper bound of timestamp precision error. */
-	*max_error = (int) duration_ns;
+	*max_error = duration_ns;
 
 	/* Check if in vblank area:
 	 * vpos is >=0 in video scanout area, but negative
@@ -635,25 +628,7 @@
 	 * since start of scanout at first display scanline. delta_ns
 	 * can be negative if start of scanout hasn't happened yet.
 	 */
-	delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
-
-	/* Is vpos outside nominal vblank area, but less than
-	 * 1/100 of a frame height away from start of vblank?
-	 * If so, assume this isn't a massively delayed vblank
-	 * interrupt, but a vblank interrupt that fired a few
-	 * microseconds before true start of vblank. Compensate
-	 * by adding a full frame duration to the final timestamp.
-	 * Happens, e.g., on ATI R500, R600.
-	 *
-	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
-	 */
-	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
-	    ((vdisplay - vpos) < vtotal / 100)) {
-		delta_ns = delta_ns - framedur_ns;
-
-		/* Signal this correction as "applied". */
-		vbl_status |= 0x8;
-	}
+	delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
 
 	if (!drm_timestamp_monotonic)
 		etime = ktime_sub(etime, mono_time_offset);
@@ -673,7 +648,7 @@
 		  crtc, (int)vbl_status, hpos, vpos,
 		  (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
 		  (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
-		  (int)duration_ns/1000, i);
+		  duration_ns/1000, i);
 
 	vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
 	if (invbl)
@@ -960,7 +935,7 @@
 	if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
 	    (drm_vblank_offdelay > 0))
 		mod_timer(&dev->vblank_disable_timer,
-			  jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
+			  jiffies + ((drm_vblank_offdelay * HZ)/1000));
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
@@ -980,7 +955,7 @@
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	vblank_disable_and_save(dev, crtc);
-	DRM_WAKEUP(&dev->vblank[crtc].queue);
+	wake_up(&dev->vblank[crtc].queue);
 
 	/* Send any queued vblank events, lest the natives grow disquiet */
 	seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1244,7 +1219,7 @@
 	DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
 		  vblwait->request.sequence, crtc);
 	dev->vblank[crtc].last_wait = vblwait->request.sequence;
-	DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
+	DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
 		    (((drm_vblank_count(dev, crtc) -
 		       vblwait->request.sequence) <= (1 << 23)) ||
 		     !dev->irq_enabled));
@@ -1363,7 +1338,7 @@
 			  crtc, (int) diff_ns);
 	}
 
-	DRM_WAKEUP(&dev->vblank[crtc].queue);
+	wake_up(&dev->vblank[crtc].queue);
 	drm_handle_vblank_events(dev, crtc);
 
 	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 64e44fa..00c67c0 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -82,19 +82,19 @@
 }
 
 /** Wrapper around agp_free_memory() */
-void drm_free_agp(DRM_AGP_MEM * handle, int pages)
+void drm_free_agp(struct agp_memory * handle, int pages)
 {
 	agp_free_memory(handle);
 }
 
 /** Wrapper around agp_bind_memory() */
-int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+int drm_bind_agp(struct agp_memory * handle, unsigned int start)
 {
 	return agp_bind_memory(handle, start);
 }
 
 /** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(DRM_AGP_MEM * handle)
+int drm_unbind_agp(struct agp_memory * handle)
 {
 	return agp_unbind_memory(handle);
 }
@@ -110,8 +110,7 @@
 
 void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
 {
-	if (drm_core_has_AGP(dev) &&
-	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+	if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
 		map->handle = agp_remap(map->offset, map->size, dev);
 	else
 		map->handle = ioremap(map->offset, map->size);
@@ -120,8 +119,7 @@
 
 void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
 {
-	if (drm_core_has_AGP(dev) &&
-	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+	if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
 		map->handle = agp_remap(map->offset, map->size, dev);
 	else
 		map->handle = ioremap_wc(map->offset, map->size);
@@ -133,8 +131,7 @@
 	if (!map->handle || !map->size)
 		return;
 
-	if (drm_core_has_AGP(dev) &&
-	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+	if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
 		vunmap(map->handle);
 	else
 		iounmap(map->handle);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
new file mode 100644
index 0000000..b155ee2
--- /dev/null
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -0,0 +1,315 @@
+/*
+ * MIPI DSI Bus
+ *
+ * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_mipi_dsi.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <video/mipi_display.h>
+
+static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
+{
+	return of_driver_match_device(dev, drv);
+}
+
+static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
+	.runtime_suspend = pm_generic_runtime_suspend,
+	.runtime_resume = pm_generic_runtime_resume,
+	.suspend = pm_generic_suspend,
+	.resume = pm_generic_resume,
+	.freeze = pm_generic_freeze,
+	.thaw = pm_generic_thaw,
+	.poweroff = pm_generic_poweroff,
+	.restore = pm_generic_restore,
+};
+
+static struct bus_type mipi_dsi_bus_type = {
+	.name = "mipi-dsi",
+	.match = mipi_dsi_device_match,
+	.pm = &mipi_dsi_device_pm_ops,
+};
+
+static void mipi_dsi_dev_release(struct device *dev)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+	of_node_put(dev->of_node);
+	kfree(dsi);
+}
+
+static const struct device_type mipi_dsi_device_type = {
+	.release = mipi_dsi_dev_release,
+};
+
+static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
+{
+	struct mipi_dsi_device *dsi;
+
+	dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
+	if (!dsi)
+		return ERR_PTR(-ENOMEM);
+
+	dsi->host = host;
+	dsi->dev.bus = &mipi_dsi_bus_type;
+	dsi->dev.parent = host->dev;
+	dsi->dev.type = &mipi_dsi_device_type;
+
+	device_initialize(&dsi->dev);
+
+	return dsi;
+}
+
+static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
+{
+	struct mipi_dsi_host *host = dsi->host;
+
+	dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev),  dsi->channel);
+
+	return device_add(&dsi->dev);
+}
+
+static struct mipi_dsi_device *
+of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
+{
+	struct mipi_dsi_device *dsi;
+	struct device *dev = host->dev;
+	int ret;
+	u32 reg;
+
+	ret = of_property_read_u32(node, "reg", &reg);
+	if (ret) {
+		dev_err(dev, "device node %s has no valid reg property: %d\n",
+			node->full_name, ret);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (reg > 3) {
+		dev_err(dev, "device node %s has invalid reg property: %u\n",
+			node->full_name, reg);
+		return ERR_PTR(-EINVAL);
+	}
+
+	dsi = mipi_dsi_device_alloc(host);
+	if (IS_ERR(dsi)) {
+		dev_err(dev, "failed to allocate DSI device %s: %ld\n",
+			node->full_name, PTR_ERR(dsi));
+		return dsi;
+	}
+
+	dsi->dev.of_node = of_node_get(node);
+	dsi->channel = reg;
+
+	ret = mipi_dsi_device_add(dsi);
+	if (ret) {
+		dev_err(dev, "failed to add DSI device %s: %d\n",
+			node->full_name, ret);
+		kfree(dsi);
+		return ERR_PTR(ret);
+	}
+
+	return dsi;
+}
+
+int mipi_dsi_host_register(struct mipi_dsi_host *host)
+{
+	struct device_node *node;
+
+	for_each_available_child_of_node(host->dev->of_node, node)
+		of_mipi_dsi_device_add(host, node);
+
+	return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_host_register);
+
+static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+	device_unregister(&dsi->dev);
+
+	return 0;
+}
+
+void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
+{
+	device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+}
+EXPORT_SYMBOL(mipi_dsi_host_unregister);
+
+/**
+ * mipi_dsi_attach - attach a DSI device to its DSI host
+ * @dsi: DSI peripheral
+ */
+int mipi_dsi_attach(struct mipi_dsi_device *dsi)
+{
+	const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+	if (!ops || !ops->attach)
+		return -ENOSYS;
+
+	return ops->attach(dsi->host, dsi);
+}
+EXPORT_SYMBOL(mipi_dsi_attach);
+
+/**
+ * mipi_dsi_detach - detach a DSI device from its DSI host
+ * @dsi: DSI peripheral
+ */
+int mipi_dsi_detach(struct mipi_dsi_device *dsi)
+{
+	const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+	if (!ops || !ops->detach)
+		return -ENOSYS;
+
+	return ops->detach(dsi->host, dsi);
+}
+EXPORT_SYMBOL(mipi_dsi_detach);
+
+/**
+ * mipi_dsi_dcs_write - send DCS write command
+ * @dsi: DSI device
+ * @channel: virtual channel
+ * @data: pointer to the command followed by parameters
+ * @len: length of @data
+ */
+int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
+		       const void *data, size_t len)
+{
+	const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+	struct mipi_dsi_msg msg = {
+		.channel = channel,
+		.tx_buf = data,
+		.tx_len = len
+	};
+
+	if (!ops || !ops->transfer)
+		return -ENOSYS;
+
+	switch (len) {
+	case 0:
+		return -EINVAL;
+	case 1:
+		msg.type = MIPI_DSI_DCS_SHORT_WRITE;
+		break;
+	case 2:
+		msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
+		break;
+	default:
+		msg.type = MIPI_DSI_DCS_LONG_WRITE;
+		break;
+	}
+
+	return ops->transfer(dsi->host, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_write);
+
+/**
+ * mipi_dsi_dcs_read - send DCS read request command
+ * @dsi: DSI device
+ * @channel: virtual channel
+ * @cmd: DCS read command
+ * @data: pointer to read buffer
+ * @len: length of @data
+ *
+ * Function returns number of read bytes or error code.
+ */
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
+			  u8 cmd, void *data, size_t len)
+{
+	const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+	struct mipi_dsi_msg msg = {
+		.channel = channel,
+		.type = MIPI_DSI_DCS_READ,
+		.tx_buf = &cmd,
+		.tx_len = 1,
+		.rx_buf = data,
+		.rx_len = len
+	};
+
+	if (!ops || !ops->transfer)
+		return -ENOSYS;
+
+	return ops->transfer(dsi->host, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read);
+
+static int mipi_dsi_drv_probe(struct device *dev)
+{
+	struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+	return drv->probe(dsi);
+}
+
+static int mipi_dsi_drv_remove(struct device *dev)
+{
+	struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+	return drv->remove(dsi);
+}
+
+/**
+ * mipi_dsi_driver_register - register a driver for DSI devices
+ * @drv: DSI driver structure
+ */
+int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
+{
+	drv->driver.bus = &mipi_dsi_bus_type;
+	if (drv->probe)
+		drv->driver.probe = mipi_dsi_drv_probe;
+	if (drv->remove)
+		drv->driver.remove = mipi_dsi_drv_remove;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(mipi_dsi_driver_register);
+
+/**
+ * mipi_dsi_driver_unregister - unregister a driver for DSI devices
+ * @drv: DSI driver structure
+ */
+void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(mipi_dsi_driver_unregister);
+
+static int __init mipi_dsi_bus_init(void)
+{
+	return bus_register(&mipi_dsi_bus_type);
+}
+postcore_initcall(mipi_dsi_bus_init);
+
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("MIPI DSI Bus");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
new file mode 100644
index 0000000..2ef988e
--- /dev/null
+++ b/drivers/gpu/drm/drm_panel.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
+static DEFINE_MUTEX(panel_lock);
+static LIST_HEAD(panel_list);
+
+void drm_panel_init(struct drm_panel *panel)
+{
+	INIT_LIST_HEAD(&panel->list);
+}
+EXPORT_SYMBOL(drm_panel_init);
+
+int drm_panel_add(struct drm_panel *panel)
+{
+	mutex_lock(&panel_lock);
+	list_add_tail(&panel->list, &panel_list);
+	mutex_unlock(&panel_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_panel_add);
+
+void drm_panel_remove(struct drm_panel *panel)
+{
+	mutex_lock(&panel_lock);
+	list_del_init(&panel->list);
+	mutex_unlock(&panel_lock);
+}
+EXPORT_SYMBOL(drm_panel_remove);
+
+int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
+{
+	if (panel->connector)
+		return -EBUSY;
+
+	panel->connector = connector;
+	panel->drm = connector->dev;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_panel_attach);
+
+int drm_panel_detach(struct drm_panel *panel)
+{
+	panel->connector = NULL;
+	panel->drm = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_panel_detach);
+
+#ifdef CONFIG_OF
+struct drm_panel *of_drm_find_panel(struct device_node *np)
+{
+	struct drm_panel *panel;
+
+	mutex_lock(&panel_lock);
+
+	list_for_each_entry(panel, &panel_list, list) {
+		if (panel->dev->of_node == np) {
+			mutex_unlock(&panel_lock);
+			return panel;
+		}
+	}
+
+	mutex_unlock(&panel_lock);
+	return NULL;
+}
+EXPORT_SYMBOL(of_drm_find_panel);
+#endif
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM panel infrastructure");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 0267979..5736aaa 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -262,16 +262,11 @@
 	return 0;
 }
 
-static int drm_pci_agp_init(struct drm_device *dev)
+static void drm_pci_agp_init(struct drm_device *dev)
 {
-	if (drm_core_has_AGP(dev)) {
+	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
 		if (drm_pci_device_is_agp(dev))
 			dev->agp = drm_agp_init(dev);
-		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
-		    && (dev->agp == NULL)) {
-			DRM_ERROR("Cannot initialize the agpgart module.\n");
-			return -EINVAL;
-		}
 		if (dev->agp) {
 			dev->agp->agp_mtrr = arch_phys_wc_add(
 				dev->agp->agp_info.aper_base,
@@ -279,15 +274,14 @@
 				1024 * 1024);
 		}
 	}
-	return 0;
 }
 
-static void drm_pci_agp_destroy(struct drm_device *dev)
+void drm_pci_agp_destroy(struct drm_device *dev)
 {
-	if (drm_core_has_AGP(dev) && dev->agp) {
+	if (dev->agp) {
 		arch_phys_wc_del(dev->agp->agp_mtrr);
 		drm_agp_clear(dev);
-		drm_agp_destroy(dev->agp);
+		kfree(dev->agp);
 		dev->agp = NULL;
 	}
 }
@@ -299,8 +293,6 @@
 	.set_busid = drm_pci_set_busid,
 	.set_unique = drm_pci_set_unique,
 	.irq_by_busid = drm_pci_irq_by_busid,
-	.agp_init = drm_pci_agp_init,
-	.agp_destroy = drm_pci_agp_destroy,
 };
 
 /**
@@ -338,17 +330,25 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		pci_set_drvdata(pdev, dev);
 
+	drm_pci_agp_init(dev);
+
 	ret = drm_dev_register(dev, ent->driver_data);
 	if (ret)
-		goto err_pci;
+		goto err_agp;
 
 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
 		 driver->date, pci_name(pdev), dev->primary->index);
 
+	/* No locking needed since shadow-attach is single-threaded since it may
+	 * only be called from the per-driver module init hook. */
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
+
 	return 0;
 
-err_pci:
+err_agp:
+	drm_pci_agp_destroy(dev);
 	pci_disable_device(pdev);
 err_free:
 	drm_dev_free(dev);
@@ -375,7 +375,6 @@
 
 	DRM_DEBUG("\n");
 
-	INIT_LIST_HEAD(&driver->device_list);
 	driver->kdriver.pci = pdriver;
 	driver->bus = &drm_pci_bus;
 
@@ -383,6 +382,7 @@
 		return pci_register_driver(pdriver);
 
 	/* If not using KMS, fall back to stealth mode manual scanning. */
+	INIT_LIST_HEAD(&driver->legacy_dev_list);
 	for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
 		pid = &pdriver->id_table[i];
 
@@ -452,6 +452,7 @@
 	return -1;
 }
 
+void drm_pci_agp_destroy(struct drm_device *dev) {}
 #endif
 
 EXPORT_SYMBOL(drm_pci_init);
@@ -465,8 +466,11 @@
 	if (driver->driver_features & DRIVER_MODESET) {
 		pci_unregister_driver(pdriver);
 	} else {
-		list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+		list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
+					 legacy_dev_list) {
 			drm_put_dev(dev);
+			list_del(&dev->legacy_dev_list);
+		}
 	}
 	DRM_INFO("Module unloaded\n");
 }
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index fc24fee..21fc820 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -147,18 +147,6 @@
 
 	driver->kdriver.platform_device = platform_device;
 	driver->bus = &drm_platform_bus;
-	INIT_LIST_HEAD(&driver->device_list);
 	return drm_get_platform_dev(platform_device, driver);
 }
 EXPORT_SYMBOL(drm_platform_init);
-
-void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
-{
-	struct drm_device *dev, *tmp;
-	DRM_DEBUG("\n");
-
-	list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
-		drm_put_dev(dev);
-	DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_platform_exit);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 66dd3a0..98a33c580 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -99,13 +99,19 @@
 			 const char *function_name,
 			 const char *format, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	if (drm_debug & request_level) {
-		if (function_name)
-			printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
 		va_start(args, format);
-		vprintk(format, args);
+		vaf.fmt = format;
+		vaf.va = &args;
+
+		if (function_name)
+			printk(KERN_DEBUG "[%s:%s], %pV", prefix,
+			       function_name, &vaf);
+		else
+			printk(KERN_DEBUG "%pV", &vaf);
 		va_end(args);
 	}
 }
@@ -521,16 +527,10 @@
 
 	mutex_lock(&drm_global_mutex);
 
-	if (dev->driver->bus->agp_init) {
-		ret = dev->driver->bus->agp_init(dev);
-		if (ret)
-			goto out_unlock;
-	}
-
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
 		if (ret)
-			goto err_agp;
+			goto out_unlock;
 	}
 
 	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
@@ -557,8 +557,6 @@
 			goto err_unload;
 	}
 
-	list_add_tail(&dev->driver_item, &dev->driver->device_list);
-
 	ret = 0;
 	goto out_unlock;
 
@@ -571,9 +569,6 @@
 	drm_unplug_minor(dev->render);
 err_control_node:
 	drm_unplug_minor(dev->control);
-err_agp:
-	if (dev->driver->bus->agp_destroy)
-		dev->driver->bus->agp_destroy(dev);
 out_unlock:
 	mutex_unlock(&drm_global_mutex);
 	return ret;
@@ -597,8 +592,8 @@
 	if (dev->driver->unload)
 		dev->driver->unload(dev);
 
-	if (dev->driver->bus->agp_destroy)
-		dev->driver->bus->agp_destroy(dev);
+	if (dev->agp)
+		drm_pci_agp_destroy(dev);
 
 	drm_vblank_cleanup(dev);
 
@@ -608,7 +603,5 @@
 	drm_unplug_minor(dev->control);
 	drm_unplug_minor(dev->render);
 	drm_unplug_minor(dev->primary);
-
-	list_del(&dev->driver_item);
 }
 EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index b179b70..0f8cb1a 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -1,4 +1,5 @@
 #include <drm/drmP.h>
+#include <drm/drm_usb.h>
 #include <linux/usb.h>
 #include <linux/module.h>
 
@@ -63,7 +64,6 @@
 	int res;
 	DRM_DEBUG("\n");
 
-	INIT_LIST_HEAD(&driver->device_list);
 	driver->kdriver.usb = udriver;
 	driver->bus = &drm_usb_bus;
 
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 93e95d7..24e045c 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -101,7 +101,7 @@
 	/*
 	 * Find the right map
 	 */
-	if (!drm_core_has_AGP(dev))
+	if (!dev->agp)
 		goto vm_fault_error;
 
 	if (!dev->agp || !dev->agp->cant_use_aperture)
@@ -220,7 +220,6 @@
 
 	DRM_DEBUG("0x%08lx,0x%08lx\n",
 		  vma->vm_start, vma->vm_end - vma->vm_start);
-	atomic_dec(&dev->vma_count);
 
 	map = vma->vm_private_data;
 
@@ -266,9 +265,6 @@
 				dmah.size = map->size;
 				__drm_pci_free(dev, &dmah);
 				break;
-			case _DRM_GEM:
-				DRM_ERROR("tried to rmmap GEM object\n");
-				break;
 			}
 			kfree(map);
 		}
@@ -408,7 +404,6 @@
 
 	DRM_DEBUG("0x%08lx,0x%08lx\n",
 		  vma->vm_start, vma->vm_end - vma->vm_start);
-	atomic_inc(&dev->vma_count);
 
 	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
 	if (vma_entry) {
@@ -436,7 +431,6 @@
 
 	DRM_DEBUG("0x%08lx,0x%08lx\n",
 		  vma->vm_start, vma->vm_end - vma->vm_start);
-	atomic_dec(&dev->vma_count);
 
 	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
 		if (pt->vma == vma) {
@@ -595,7 +589,7 @@
 	switch (map->type) {
 #if !defined(__arm__)
 	case _DRM_AGP:
-		if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
+		if (dev->agp && dev->agp->cant_use_aperture) {
 			/*
 			 * On some platforms we can't talk to bus dma address from the CPU, so for
 			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f227f54..6e1a1a2 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -51,7 +51,7 @@
 
 config DRM_EXYNOS_IPP
 	bool "Exynos DRM IPP"
-	depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
+	depends on DRM_EXYNOS
 	help
 	  Choose this option if you want to use IPP feature for DRM.
 
@@ -69,6 +69,6 @@
 
 config DRM_EXYNOS_GSC
 	bool "Exynos DRM GSC"
-	depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+	depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
 	help
 	  Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 22b8f5e..215131a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -14,6 +14,8 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
+#include <linux/anon_inodes.h>
+
 #include <drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
@@ -119,6 +121,8 @@
 
 	drm_vblank_offdelay = VBLANK_OFF_DELAY;
 
+	platform_set_drvdata(dev->platformdev, dev);
+
 	return 0;
 
 err_drm_device:
@@ -150,9 +154,14 @@
 	return 0;
 }
 
+static const struct file_operations exynos_drm_gem_fops = {
+	.mmap = exynos_drm_gem_mmap_buffer,
+};
+
 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct drm_exynos_file_private *file_priv;
+	struct file *anon_filp;
 	int ret;
 
 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
@@ -162,11 +171,23 @@
 	file->driver_priv = file_priv;
 
 	ret = exynos_drm_subdrv_open(dev, file);
-	if (ret) {
-		kfree(file_priv);
-		file->driver_priv = NULL;
+	if (ret)
+		goto out;
+
+	anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
+					NULL, 0);
+	if (IS_ERR(anon_filp)) {
+		ret = PTR_ERR(anon_filp);
+		goto out;
 	}
 
+	anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
+	file_priv->anon_filp = anon_filp;
+
+	return ret;
+out:
+	kfree(file_priv);
+	file->driver_priv = NULL;
 	return ret;
 }
 
@@ -179,6 +200,7 @@
 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 {
 	struct exynos_drm_private *private = dev->dev_private;
+	struct drm_exynos_file_private *file_priv;
 	struct drm_pending_vblank_event *v, *vt;
 	struct drm_pending_event *e, *et;
 	unsigned long flags;
@@ -204,6 +226,9 @@
 	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
+	file_priv = file->driver_priv;
+	if (file_priv->anon_filp)
+		fput(file_priv->anon_filp);
 
 	kfree(file->driver_priv);
 	file->driver_priv = NULL;
@@ -305,7 +330,7 @@
 
 static int exynos_drm_platform_remove(struct platform_device *pdev)
 {
-	drm_platform_exit(&exynos_drm_driver, pdev);
+	drm_put_dev(platform_get_drvdata(pdev));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index eaa1966..0eaf5a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -226,6 +226,7 @@
 struct drm_exynos_file_private {
 	struct exynos_drm_g2d_private	*g2d_priv;
 	struct exynos_drm_ipp_private	*ipp_priv;
+	struct file			*anon_filp;
 };
 
 /*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a61878b..a20440c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -347,7 +347,7 @@
 	 */
 	if (!wait_event_timeout(ctx->wait_vsync_queue,
 				!atomic_read(&ctx->wait_vsync_event),
-				DRM_HZ/20))
+				HZ/20))
 		DRM_DEBUG_KMS("vblank wait timed out.\n");
 }
 
@@ -706,7 +706,7 @@
 	/* set wait vsync event to zero and wake up queue. */
 	if (atomic_read(&ctx->wait_vsync_event)) {
 		atomic_set(&ctx->wait_vsync_event, 0);
-		DRM_WAKEUP(&ctx->wait_vsync_queue);
+		wake_up(&ctx->wait_vsync_queue);
 	}
 out:
 	return IRQ_HANDLED;
@@ -954,7 +954,7 @@
 	}
 
 	ctx->driver_data = drm_fimd_get_driver_data(pdev);
-	DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+	init_waitqueue_head(&ctx->wait_vsync_queue);
 	atomic_set(&ctx->wait_vsync_event, 0);
 
 	subdrv = &ctx->subdrv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 380aec2..6c1885e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -607,7 +607,7 @@
 		reg_type = REG_TYPE_NONE;
 		DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
 		break;
-	};
+	}
 
 	return reg_type;
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index be59d50..42d2904 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -338,46 +338,22 @@
 			&args->offset);
 }
 
-static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
-							struct file *filp)
-{
-	struct drm_file *file_priv;
-
-	/* find current process's drm_file from filelist. */
-	list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
-		if (file_priv->filp == filp)
-			return file_priv;
-
-	WARN_ON(1);
-
-	return ERR_PTR(-EFAULT);
-}
-
-static int exynos_drm_gem_mmap_buffer(struct file *filp,
+int exynos_drm_gem_mmap_buffer(struct file *filp,
 				      struct vm_area_struct *vma)
 {
 	struct drm_gem_object *obj = filp->private_data;
 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
 	struct drm_device *drm_dev = obj->dev;
 	struct exynos_drm_gem_buf *buffer;
-	struct drm_file *file_priv;
 	unsigned long vm_size;
 	int ret;
 
+	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 	vma->vm_private_data = obj;
 	vma->vm_ops = drm_dev->driver->gem_vm_ops;
 
-	/* restore it to driver's fops. */
-	filp->f_op = fops_get(drm_dev->driver->fops);
-
-	file_priv = exynos_drm_find_drm_file(drm_dev, filp);
-	if (IS_ERR(file_priv))
-		return PTR_ERR(file_priv);
-
-	/* restore it to drm_file. */
-	filp->private_data = file_priv;
-
 	update_vm_cache_attr(exynos_gem_obj, vma);
 
 	vm_size = vma->vm_end - vma->vm_start;
@@ -411,15 +387,13 @@
 	return 0;
 }
 
-static const struct file_operations exynos_drm_gem_fops = {
-	.mmap = exynos_drm_gem_mmap_buffer,
-};
-
 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
+	struct drm_exynos_file_private *exynos_file_priv;
 	struct drm_exynos_gem_mmap *args = data;
 	struct drm_gem_object *obj;
+	struct file *anon_filp;
 	unsigned long addr;
 
 	if (!(dev->driver->driver_features & DRIVER_GEM)) {
@@ -427,47 +401,25 @@
 		return -ENODEV;
 	}
 
+	mutex_lock(&dev->struct_mutex);
+
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
+		mutex_unlock(&dev->struct_mutex);
 		return -EINVAL;
 	}
 
-	/*
-	 * We have to use gem object and its fops for specific mmaper,
-	 * but vm_mmap() can deliver only filp. So we have to change
-	 * filp->f_op and filp->private_data temporarily, then restore
-	 * again. So it is important to keep lock until restoration the
-	 * settings to prevent others from misuse of filp->f_op or
-	 * filp->private_data.
-	 */
-	mutex_lock(&dev->struct_mutex);
+	exynos_file_priv = file_priv->driver_priv;
+	anon_filp = exynos_file_priv->anon_filp;
+	anon_filp->private_data = obj;
 
-	/*
-	 * Set specific mmper's fops. And it will be restored by
-	 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
-	 * This is used to call specific mapper temporarily.
-	 */
-	file_priv->filp->f_op = &exynos_drm_gem_fops;
-
-	/*
-	 * Set gem object to private_data so that specific mmaper
-	 * can get the gem object. And it will be restored by
-	 * exynos_drm_gem_mmap_buffer to drm_file.
-	 */
-	file_priv->filp->private_data = obj;
-
-	addr = vm_mmap(file_priv->filp, 0, args->size,
-			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
+	addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
+			MAP_SHARED, 0);
 
 	drm_gem_object_unreference(obj);
 
 	if (IS_ERR_VALUE(addr)) {
-		/* check filp->f_op, filp->private_data are restored */
-		if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
-			file_priv->filp->f_op = fops_get(dev->driver->fops);
-			file_priv->filp->private_data = file_priv;
-		}
 		mutex_unlock(&dev->struct_mutex);
 		return (int)addr;
 	}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index b8c818b..1592c0b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -122,6 +122,9 @@
 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
 
+int exynos_drm_gem_mmap_buffer(struct file *filp,
+				      struct vm_area_struct *vma);
+
 /* map user space allocated by malloc to pages. */
 int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
 				      struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d519a4e..09312b8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -16,7 +16,6 @@
 #include <linux/types.h>
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
-#include <plat/map-base.h>
 
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
@@ -826,7 +825,7 @@
 		DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
 
 		/*
-		 * quf == NULL condition means all event deletion.
+		 * qbuf == NULL condition means all event deletion.
 		 * stop operations want to delete all event list.
 		 * another case delete only same buf id.
 		 */
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a0e10ae..c021ddc 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
+#include <linux/hdmi.h>
 
 #include <drm/exynos_drm.h>
 
@@ -59,19 +60,6 @@
 #define HDMI_AUI_VERSION	0x01
 #define HDMI_AUI_LENGTH	0x0A
 
-/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
-enum HDMI_PACKET_TYPE {
-	/* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
-	/* InfoFrame packet type */
-	HDMI_PACKET_TYPE_INFOFRAME = 0x80,
-	/* Vendor-Specific InfoFrame */
-	HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
-	/* Auxiliary Video information InfoFrame */
-	HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
-	/* Audio information InfoFrame */
-	HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
-};
-
 enum hdmi_type {
 	HDMI_TYPE13,
 	HDMI_TYPE14,
@@ -379,12 +367,6 @@
 	},
 };
 
-struct hdmi_infoframe {
-	enum HDMI_PACKET_TYPE type;
-	u8 ver;
-	u8 len;
-};
-
 static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
 {
 	return readl(hdata->regs + reg_id);
@@ -682,7 +664,7 @@
 }
 
 static void hdmi_reg_infoframe(struct hdmi_context *hdata,
-			struct hdmi_infoframe *infoframe)
+			union hdmi_infoframe *infoframe)
 {
 	u32 hdr_sum;
 	u8 chksum;
@@ -700,13 +682,15 @@
 		return;
 	}
 
-	switch (infoframe->type) {
-	case HDMI_PACKET_TYPE_AVI:
+	switch (infoframe->any.type) {
+	case HDMI_INFOFRAME_TYPE_AVI:
 		hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
-		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
-		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
-		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
-		hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
+		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
+				infoframe->any.version);
+		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
+		hdr_sum = infoframe->any.type + infoframe->any.version +
+			  infoframe->any.length;
 
 		/* Output format zero hardcoded ,RGB YBCR selection */
 		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
@@ -722,18 +706,20 @@
 		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
 
 		chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
-					infoframe->len, hdr_sum);
+					infoframe->any.length, hdr_sum);
 		DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
 		hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
 		break;
-	case HDMI_PACKET_TYPE_AUI:
+	case HDMI_INFOFRAME_TYPE_AUDIO:
 		hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
-		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
-		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
-		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
-		hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
+		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
+				infoframe->any.version);
+		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
+		hdr_sum = infoframe->any.type + infoframe->any.version +
+			  infoframe->any.length;
 		chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
-					infoframe->len, hdr_sum);
+					infoframe->any.length, hdr_sum);
 		DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
 		hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
 		break;
@@ -985,7 +971,7 @@
 
 static void hdmi_conf_init(struct hdmi_context *hdata)
 {
-	struct hdmi_infoframe infoframe;
+	union hdmi_infoframe infoframe;
 
 	/* disable HPD interrupts from HDMI IP block, use GPIO instead */
 	hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
@@ -1021,14 +1007,14 @@
 		hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
 		hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
 	} else {
-		infoframe.type = HDMI_PACKET_TYPE_AVI;
-		infoframe.ver = HDMI_AVI_VERSION;
-		infoframe.len = HDMI_AVI_LENGTH;
+		infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
+		infoframe.any.version = HDMI_AVI_VERSION;
+		infoframe.any.length = HDMI_AVI_LENGTH;
 		hdmi_reg_infoframe(hdata, &infoframe);
 
-		infoframe.type = HDMI_PACKET_TYPE_AUI;
-		infoframe.ver = HDMI_AUI_VERSION;
-		infoframe.len = HDMI_AUI_LENGTH;
+		infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
+		infoframe.any.version = HDMI_AUI_VERSION;
+		infoframe.any.length = HDMI_AUI_LENGTH;
 		hdmi_reg_infoframe(hdata, &infoframe);
 
 		/* enable AVI packet every vsync, fixes purple line problem */
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 63bc5f9..2dfa48c 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -868,7 +868,7 @@
 	 */
 	if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
 				!atomic_read(&mixer_ctx->wait_vsync_event),
-				DRM_HZ/20))
+				HZ/20))
 		DRM_DEBUG_KMS("vblank wait timed out.\n");
 }
 
@@ -1019,7 +1019,7 @@
 		/* set wait vsync event to zero and wake up queue. */
 		if (atomic_read(&ctx->wait_vsync_event)) {
 			atomic_set(&ctx->wait_vsync_event, 0);
-			DRM_WAKEUP(&ctx->wait_vsync_queue);
+			wake_up(&ctx->wait_vsync_queue);
 		}
 	}
 
@@ -1209,7 +1209,7 @@
 	drm_hdmi_ctx->ctx = (void *)ctx;
 	ctx->vp_enabled = drv->is_vp_enabled;
 	ctx->mxr_ver = drv->version;
-	DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+	init_waitqueue_head(&ctx->wait_vsync_queue);
 	atomic_set(&ctx->wait_vsync_event, 0);
 
 	platform_set_drvdata(pdev, drm_hdmi_ctx);
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index d5ef1a5..de6f62a 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -326,7 +326,7 @@
 	struct psb_framebuffer *psbfb = &fbdev->pfb;
 	struct drm_device *dev = psbfb->base.dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	unsigned long _end = jiffies + DRM_HZ;
+	unsigned long _end = jiffies + HZ;
 	int busy = 0;
 	unsigned long flags;
 
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index f88a181..0490ce3 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -483,7 +483,7 @@
 
 	if (send_bytes > 16)
 		return -1;
-	msg[0] = AUX_NATIVE_WRITE << 4;
+	msg[0] = DP_AUX_NATIVE_WRITE << 4;
 	msg[1] = address >> 8;
 	msg[2] = address & 0xff;
 	msg[3] = send_bytes - 1;
@@ -493,9 +493,10 @@
 		ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
 		if (ret < 0)
 			return ret;
-		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+		ack >>= 4;
+		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
 			break;
-		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
 			udelay(100);
 		else
 			return -EIO;
@@ -523,7 +524,7 @@
 	uint8_t ack;
 	int ret;
 
-	msg[0] = AUX_NATIVE_READ << 4;
+	msg[0] = DP_AUX_NATIVE_READ << 4;
 	msg[1] = address >> 8;
 	msg[2] = address & 0xff;
 	msg[3] = recv_bytes - 1;
@@ -538,12 +539,12 @@
 			return -EPROTO;
 		if (ret < 0)
 			return ret;
-		ack = reply[0];
-		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+		ack = reply[0] >> 4;
+		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
 			memcpy(recv, reply + 1, ret - 1);
 			return ret - 1;
 		}
-		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
 			udelay(100);
 		else
 			return -EIO;
@@ -569,12 +570,12 @@
 
 	/* Set up the command byte */
 	if (mode & MODE_I2C_READ)
-		msg[0] = AUX_I2C_READ << 4;
+		msg[0] = DP_AUX_I2C_READ << 4;
 	else
-		msg[0] = AUX_I2C_WRITE << 4;
+		msg[0] = DP_AUX_I2C_WRITE << 4;
 
 	if (!(mode & MODE_I2C_STOP))
-		msg[0] |= AUX_I2C_MOT << 4;
+		msg[0] |= DP_AUX_I2C_MOT << 4;
 
 	msg[1] = address >> 8;
 	msg[2] = address;
@@ -606,16 +607,16 @@
 			return ret;
 		}
 
-		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
-		case AUX_NATIVE_REPLY_ACK:
+		switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+		case DP_AUX_NATIVE_REPLY_ACK:
 			/* I2C-over-AUX Reply field is only valid
 			 * when paired with AUX ACK.
 			 */
 			break;
-		case AUX_NATIVE_REPLY_NACK:
+		case DP_AUX_NATIVE_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_ch native nack\n");
 			return -EREMOTEIO;
-		case AUX_NATIVE_REPLY_DEFER:
+		case DP_AUX_NATIVE_REPLY_DEFER:
 			udelay(100);
 			continue;
 		default:
@@ -624,16 +625,16 @@
 			return -EREMOTEIO;
 		}
 
-		switch (reply[0] & AUX_I2C_REPLY_MASK) {
-		case AUX_I2C_REPLY_ACK:
+		switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
+		case DP_AUX_I2C_REPLY_ACK:
 			if (mode == MODE_I2C_READ) {
 				*read_byte = reply[1];
 			}
 			return reply_bytes - 1;
-		case AUX_I2C_REPLY_NACK:
+		case DP_AUX_I2C_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_i2c nack\n");
 			return -EREMOTEIO;
-		case AUX_I2C_REPLY_DEFER:
+		case DP_AUX_I2C_REPLY_DEFER:
 			DRM_DEBUG_KMS("aux_i2c defer\n");
 			udelay(100);
 			break;
@@ -677,7 +678,7 @@
 	return ret;
 }
 
-void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+static void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 	struct drm_display_mode *adjusted_mode)
 {
 	adjusted_mode->hdisplay = fixed_mode->hdisplay;
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 24e8af3..386de2c 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -349,6 +349,7 @@
 	/* If we didn't get a handle then turn the cursor off */
 	if (!handle) {
 		temp = CURSOR_MODE_DISABLE;
+		mutex_lock(&dev->struct_mutex);
 
 		if (gma_power_begin(dev, false)) {
 			REG_WRITE(control, temp);
@@ -365,6 +366,7 @@
 			gma_crtc->cursor_obj = NULL;
 		}
 
+		mutex_unlock(&dev->struct_mutex);
 		return 0;
 	}
 
@@ -374,9 +376,12 @@
 		return -EINVAL;
 	}
 
+	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file_priv, handle);
-	if (!obj)
-		return -ENOENT;
+	if (!obj) {
+		ret = -ENOENT;
+		goto unlock;
+	}
 
 	if (obj->size < width * height * 4) {
 		dev_dbg(dev->dev, "Buffer is too small\n");
@@ -440,10 +445,13 @@
 	}
 
 	gma_crtc->cursor_obj = obj;
+unlock:
+	mutex_unlock(&dev->struct_mutex);
 	return ret;
 
 unref_cursor:
 	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index b59e658..5ad6a03 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -212,8 +212,8 @@
 #define PSB_HIGH_REG_OFFS 0x0600
 
 #define PSB_NUM_VBLANKS 2
-#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
-#define PSB_LID_DELAY (DRM_HZ / 10)
+#define PSB_WATCHDOG_DELAY (HZ * 2)
+#define PSB_LID_DELAY (HZ / 10)
 
 #define MDFLD_PNW_B0 0x04
 #define MDFLD_PNW_C0 0x08
@@ -232,7 +232,7 @@
 #define MDFLD_DSR_RR		45
 #define MDFLD_DPU_ENABLE 	(1 << 31)
 #define MDFLD_DSR_FULLSCREEN 	(1 << 30)
-#define MDFLD_DSR_DELAY		(DRM_HZ / MDFLD_DSR_RR)
+#define MDFLD_DSR_DELAY		(HZ / MDFLD_DSR_RR)
 
 #define PSB_PWR_STATE_ON		1
 #define PSB_PWR_STATE_OFF		2
@@ -769,7 +769,7 @@
  *psb_irq.c
  */
 
-extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t psb_irq_handler(int irq, void *arg);
 extern int psb_irq_enable_dpst(struct drm_device *dev);
 extern int psb_irq_disable_dpst(struct drm_device *dev);
 extern void psb_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index bde27fd..dc2c8eb 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -250,11 +250,6 @@
 extern int intelfb_probe(struct drm_device *dev);
 extern int intelfb_remove(struct drm_device *dev,
 			  struct drm_framebuffer *fb);
-extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
-							*dev, struct
-							drm_mode_fb_cmd
-							*mode_cmd,
-							void *mm_private);
 extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
 				      const struct drm_display_mode *mode,
 				      struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index ba48303..f883f9e 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -200,7 +200,7 @@
 		mid_pipe_event_handler(dev, 1);
 }
 
-irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t psb_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = arg;
 	struct drm_psb_private *dev_priv = dev->dev_private;
@@ -253,7 +253,7 @@
 
 	PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
 	(void) PSB_RVDC32(PSB_INT_IDENTITY_R);
-	DRM_READMEMORYBARRIER();
+	rmb();
 
 	if (!handled)
 		return IRQ_NONE;
@@ -450,21 +450,6 @@
 	return 0;
 }
 
-#ifdef PSB_FIXME
-static int psb_vblank_do_wait(struct drm_device *dev,
-			      unsigned int *sequence, atomic_t *counter)
-{
-	unsigned int cur_vblank;
-	int ret = 0;
-	DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(counter))
-		      - *sequence) <= (1 << 23)));
-	*sequence = cur_vblank;
-
-	return ret;
-}
-#endif
-
 /*
  * It is used to enable VBLANK interrupt
  */
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index debb7f1..d0b45ff 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -32,7 +32,7 @@
 void psb_irq_preinstall(struct drm_device *dev);
 int  psb_irq_postinstall(struct drm_device *dev);
 void psb_irq_uninstall(struct drm_device *dev);
-irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t psb_irq_handler(int irq, void *arg);
 
 int psb_irq_enable_dpst(struct drm_device *dev);
 int psb_irq_disable_dpst(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 400b0c4..faa77f5 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -208,7 +208,7 @@
 # define PLL_SERIAL_1_SRL_IZ(x)   (((x) & 3) << 1)
 # define PLL_SERIAL_1_SRL_MAN_IZ  (1 << 6)
 #define REG_PLL_SERIAL_2          REG(0x02, 0x01)     /* read/write */
-# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
+# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0)
 # define PLL_SERIAL_2_SRL_PR(x)   (((x) & 0xf) << 4)
 #define REG_PLL_SERIAL_3          REG(0x02, 0x02)     /* read/write */
 # define PLL_SERIAL_3_SRL_CCIR    (1 << 0)
@@ -528,10 +528,10 @@
 {
 	uint8_t buf[PB(5) + 1];
 
+	memset(buf, 0, sizeof(buf));
 	buf[HB(0)] = 0x84;
 	buf[HB(1)] = 0x01;
 	buf[HB(2)] = 10;
-	buf[PB(0)] = 0;
 	buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
 	buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
 	buf[PB(4)] = p->audio_frame[4];
@@ -824,6 +824,11 @@
 	}
 
 	div = 148500 / mode->clock;
+	if (div != 0) {
+		div--;
+		if (div > 3)
+			div = 3;
+	}
 
 	/* mute the audio FIFO: */
 	reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -913,7 +918,7 @@
 
 	if (priv->rev == TDA19988) {
 		/* let incoming pixels fill the active space (if any) */
-		reg_write(encoder, REG_ENABLE_SPACE, 0x01);
+		reg_write(encoder, REG_ENABLE_SPACE, 0x00);
 	}
 
 	/* must be last register set: */
@@ -1094,6 +1099,8 @@
 {
 	struct tda998x_priv *priv = to_tda998x_priv(encoder);
 	drm_i2c_encoder_destroy(encoder);
+	if (priv->cec)
+		i2c_unregister_device(priv->cec);
 	kfree(priv);
 }
 
@@ -1142,8 +1149,12 @@
 	priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
 	priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
 
-	priv->current_page = 0;
+	priv->current_page = 0xff;
 	priv->cec = i2c_new_dummy(client->adapter, 0x34);
+	if (!priv->cec) {
+		kfree(priv);
+		return -ENODEV;
+	}
 	priv->dpms = DRM_MODE_DPMS_OFF;
 
 	encoder_slave->slave_priv = priv;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 249fdff..aeace37 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -1193,6 +1193,10 @@
 
 int i810_driver_load(struct drm_device *dev, unsigned long flags)
 {
+	/* Our userspace depends upon the agp mapping support. */
+	if (!dev->agp)
+		return -EINVAL;
+
 	pci_set_master(dev->pdev);
 
 	return 0;
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index d8180d2..441ccf8 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -57,7 +57,7 @@
 
 static struct drm_driver driver = {
 	.driver_features =
-	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+	    DRIVER_USE_AGP |
 	    DRIVER_HAVE_DMA,
 	.dev_priv_size = sizeof(drm_i810_buf_priv_t),
 	.load = i810_driver_load,
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 6199d0b..73ed59e 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -1,8 +1,10 @@
 config DRM_I915
 	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
 	depends on DRM
-	depends on AGP
-	depends on AGP_INTEL
+	depends on X86 && PCI
+	depends on (AGP || AGP=n)
+	select INTEL_GTT
+	select AGP_INTEL if AGP
 	# we need shmfs for the swappable backing store, and in particular
 	# the shmem_readpage() which depends upon tmpfs
 	select SHMEM
@@ -35,15 +37,14 @@
 config DRM_I915_KMS
 	bool "Enable modesetting on intel by default"
 	depends on DRM_I915
+	default y
 	help
-	  Choose this option if you want kernel modesetting enabled by default,
-	  and you have a new enough userspace to support this. Running old
-	  userspaces with this enabled will cause pain.  Note that this causes
-	  the driver to bind to PCI devices, which precludes loading things
-	  like intelfb.
+	  Choose this option if you want kernel modesetting enabled by default.
+
+	  If in doubt, say "Y".
 
 config DRM_I915_FBDEV
-	bool "Enable legacy fbdev support for the modesettting intel driver"
+	bool "Enable legacy fbdev support for the modesetting intel driver"
 	depends on DRM_I915
 	select DRM_KMS_FB_HELPER
 	select FB_CFB_FILLRECT
@@ -55,9 +56,12 @@
 	  support. Note that this support also provide the linux console
 	  support on top of the intel modesetting driver.
 
+	  If in doubt, say "Y".
+
 config DRM_I915_PRELIMINARY_HW_SUPPORT
 	bool "Enable preliminary support for prerelease Intel hardware by default"
 	depends on DRM_I915
+	default n
 	help
 	  Choose this option if you have prerelease Intel hardware and want the
 	  i915 driver to support it by default.  You can enable such support at
@@ -65,3 +69,15 @@
 	  option changes the default for that module option.
 
 	  If in doubt, say "N".
+
+config DRM_I915_UMS
+	bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
+	depends on DRM_I915
+	default n
+	help
+	  Choose this option if you still need userspace modesetting.
+
+	  Userspace modesetting is deprecated for quite some time now, so
+	  enable this only if you have ancient versions of the DDX drivers.
+
+	  If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d4ae48b..9fd44f5 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -4,7 +4,6 @@
 
 ccflags-y := -Iinclude/drm
 i915-y := i915_drv.o i915_dma.o i915_irq.o \
-	  i915_debugfs.o \
 	  i915_gpu_error.o \
           i915_suspend.o \
 	  i915_gem.o \
@@ -54,6 +53,8 @@
 
 i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
 
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+
 obj-$(CONFIG_DRM_I915)  += i915.o
 
 CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index c4a255b..954acb2 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -87,49 +87,6 @@
  * when switching the resolution.
  */
 
-static void enable_dvo(struct intel_dvo_device *dvo)
-{
-	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
-	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_gmbus *bus = container_of(adapter,
-					       struct intel_gmbus,
-					       adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
-
-	DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
-
-	ns->dvoc = I915_READ(DVO_C);
-	ns->pll_a = I915_READ(_DPLL_A);
-	ns->srcdim = I915_READ(DVOC_SRCDIM);
-	ns->fw_blc = I915_READ(FW_BLC);
-
-	I915_WRITE(DVOC, 0x10004084);
-	I915_WRITE(_DPLL_A, 0xd0820000);
-	I915_WRITE(DVOC_SRCDIM, 0x400300);	// 1024x768
-	I915_WRITE(FW_BLC, 0x1080304);
-
-	I915_WRITE(DVOC, 0x90004084);
-}
-
-/*
- * Restore the I915 registers modified by the above
- * trigger function.
- */
-static void restore_dvo(struct intel_dvo_device *dvo)
-{
-	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_gmbus *bus = container_of(adapter,
-					       struct intel_gmbus,
-					       adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
-	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
-
-	I915_WRITE(DVOC, ns->dvoc);
-	I915_WRITE(_DPLL_A, ns->pll_a);
-	I915_WRITE(DVOC_SRCDIM, ns->srcdim);
-	I915_WRITE(FW_BLC, ns->fw_blc);
-}
-
 /*
 ** Read a register from the ns2501.
 ** Returns true if successful, false otherwise.
@@ -300,7 +257,7 @@
 			    struct drm_display_mode *adjusted_mode)
 {
 	bool ok;
-	bool restore = false;
+	int retries = 10;
 	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 
 	DRM_DEBUG_KMS
@@ -476,20 +433,7 @@
 			ns->reg_8_shadow |= NS2501_8_BPAS;
 		}
 		ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
-
-		if (!ok) {
-			if (restore)
-				restore_dvo(dvo);
-			enable_dvo(dvo);
-			restore = true;
-		}
-	} while (!ok);
-	/*
-	 * Restore the old i915 registers before
-	 * forcing the ns2501 on.
-	 */
-	if (restore)
-		restore_dvo(dvo);
+	} while (!ok && retries--);
 }
 
 /* set the NS2501 power state */
@@ -510,7 +454,7 @@
 static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	bool ok;
-	bool restore = false;
+	int retries = 10;
 	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 	unsigned char ch;
 
@@ -537,16 +481,7 @@
 			ok &=
 			    ns2501_writeb(dvo, 0x35,
 					  enable ? 0xff : 0x00);
-			if (!ok) {
-				if (restore)
-					restore_dvo(dvo);
-				enable_dvo(dvo);
-				restore = true;
-			}
-		} while (!ok);
-
-		if (restore)
-			restore_dvo(dvo);
+		} while (!ok && retries--);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6ed45a9..b2b46c5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,8 +40,6 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-#if defined(CONFIG_DEBUG_FS)
-
 enum {
 	ACTIVE_LIST,
 	INACTIVE_LIST,
@@ -406,16 +404,26 @@
 	seq_putc(m, '\n');
 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 		struct file_stats stats;
+		struct task_struct *task;
 
 		memset(&stats, 0, sizeof(stats));
 		idr_for_each(&file->object_idr, per_file_stats, &stats);
+		/*
+		 * Although we have a valid reference on file->pid, that does
+		 * not guarantee that the task_struct who called get_pid() is
+		 * still alive (e.g. get_pid(current) => fork() => exit()).
+		 * Therefore, we need to protect this ->comm access using RCU.
+		 */
+		rcu_read_lock();
+		task = pid_task(file->pid, PIDTYPE_PID);
 		seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
-			   get_pid_task(file->pid, PIDTYPE_PID)->comm,
+			   task ? task->comm : "<unknown>",
 			   stats.count,
 			   stats.total,
 			   stats.active,
 			   stats.inactive,
 			   stats.unbound);
+		rcu_read_unlock();
 	}
 
 	mutex_unlock(&dev->struct_mutex);
@@ -564,10 +572,12 @@
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	for_each_ring(ring, dev_priv, i)
 		i915_ring_seqno_info(m, ring);
 
+	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -585,6 +595,7 @@
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	if (INTEL_INFO(dev)->gen >= 8) {
 		int i;
@@ -711,6 +722,7 @@
 		}
 		i915_ring_seqno_info(m, ring);
 	}
+	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -904,9 +916,11 @@
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	crstanddelay = I915_READ16(CRSTANDVID);
 
+	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
 	seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
@@ -919,7 +933,9 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret = 0;
+
+	intel_runtime_pm_get(dev_priv);
 
 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
@@ -945,9 +961,9 @@
 		/* RPSTAT1 is in the GT power well */
 		ret = mutex_lock_interruptible(&dev->struct_mutex);
 		if (ret)
-			return ret;
+			goto out;
 
-		gen6_gt_force_wake_get(dev_priv);
+		gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 		reqf = I915_READ(GEN6_RPNSWREQ);
 		reqf &= ~GEN6_TURBO_DISABLE;
@@ -970,7 +986,7 @@
 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
 		cagf *= GT_FREQUENCY_MULTIPLIER;
 
-		gen6_gt_force_wake_put(dev_priv);
+		gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 		mutex_unlock(&dev->struct_mutex);
 
 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1018,23 +1034,24 @@
 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
 
-		val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
+		val = valleyview_rps_max_freq(dev_priv);
 		seq_printf(m, "max GPU freq: %d MHz\n",
-			   vlv_gpu_freq(dev_priv->mem_freq, val));
+			   vlv_gpu_freq(dev_priv, val));
 
-		val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
+		val = valleyview_rps_min_freq(dev_priv);
 		seq_printf(m, "min GPU freq: %d MHz\n",
-			   vlv_gpu_freq(dev_priv->mem_freq, val));
+			   vlv_gpu_freq(dev_priv, val));
 
 		seq_printf(m, "current GPU freq: %d MHz\n",
-			   vlv_gpu_freq(dev_priv->mem_freq,
-					(freq_sts >> 8) & 0xff));
+			   vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
 		mutex_unlock(&dev_priv->rps.hw_lock);
 	} else {
 		seq_puts(m, "no P-state info available\n");
 	}
 
-	return 0;
+out:
+	intel_runtime_pm_put(dev_priv);
+	return ret;
 }
 
 static int i915_delayfreq_table(struct seq_file *m, void *unused)
@@ -1048,6 +1065,7 @@
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	for (i = 0; i < 16; i++) {
 		delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -1055,6 +1073,8 @@
 			   (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
 	}
 
+	intel_runtime_pm_put(dev_priv);
+
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -1076,12 +1096,14 @@
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	for (i = 1; i <= 32; i++) {
 		inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
 		seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
 	}
 
+	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -1099,11 +1121,13 @@
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	rgvmodectl = I915_READ(MEMMODECTL);
 	rstdbyctl = I915_READ(RSTDBYCTL);
 	crstandvid = I915_READ16(CRSTANDVID);
 
+	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
 	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -1154,6 +1178,50 @@
 	return 0;
 }
 
+static int vlv_drpc_info(struct seq_file *m)
+{
+
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 rpmodectl1, rcctl1;
+	unsigned fw_rendercount = 0, fw_mediacount = 0;
+
+	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+	rcctl1 = I915_READ(GEN6_RC_CONTROL);
+
+	seq_printf(m, "Video Turbo Mode: %s\n",
+		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+	seq_printf(m, "Turbo enabled: %s\n",
+		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
+	seq_printf(m, "HW control enabled: %s\n",
+		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
+	seq_printf(m, "SW control enabled: %s\n",
+		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+			  GEN6_RP_MEDIA_SW_MODE));
+	seq_printf(m, "RC6 Enabled: %s\n",
+		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+					GEN6_RC_CTL_EI_MODE(1))));
+	seq_printf(m, "Render Power Well: %s\n",
+			(I915_READ(VLV_GTLC_PW_STATUS) &
+				VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+	seq_printf(m, "Media Power Well: %s\n",
+			(I915_READ(VLV_GTLC_PW_STATUS) &
+				VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+
+	spin_lock_irq(&dev_priv->uncore.lock);
+	fw_rendercount = dev_priv->uncore.fw_rendercount;
+	fw_mediacount = dev_priv->uncore.fw_mediacount;
+	spin_unlock_irq(&dev_priv->uncore.lock);
+
+	seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
+	seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
+
+
+	return 0;
+}
+
+
 static int gen6_drpc_info(struct seq_file *m)
 {
 
@@ -1167,6 +1235,7 @@
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	spin_lock_irq(&dev_priv->uncore.lock);
 	forcewake_count = dev_priv->uncore.forcewake_count;
@@ -1192,6 +1261,8 @@
 	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
+	intel_runtime_pm_put(dev_priv);
+
 	seq_printf(m, "Video Turbo Mode: %s\n",
 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
 	seq_printf(m, "HW control enabled: %s\n",
@@ -1256,7 +1327,9 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 
-	if (IS_GEN6(dev) || IS_GEN7(dev))
+	if (IS_VALLEYVIEW(dev))
+		return vlv_drpc_info(m);
+	else if (IS_GEN6(dev) || IS_GEN7(dev))
 		return gen6_drpc_info(m);
 	else
 		return ironlake_drpc_info(m);
@@ -1268,7 +1341,7 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	if (!I915_HAS_FBC(dev)) {
+	if (!HAS_FBC(dev)) {
 		seq_puts(m, "FBC unsupported on this chipset\n");
 		return 0;
 	}
@@ -1330,7 +1403,7 @@
 		return 0;
 	}
 
-	if (I915_READ(IPS_CTL) & IPS_ENABLE)
+	if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
 		seq_puts(m, "enabled\n");
 	else
 		seq_puts(m, "disabled\n");
@@ -1406,6 +1479,7 @@
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
@@ -1422,6 +1496,7 @@
 			   ((ia_freq >> 8) & 0xff) * 100);
 	}
 
+	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return 0;
@@ -1437,8 +1512,10 @@
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+	intel_runtime_pm_put(dev_priv);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -1565,13 +1642,21 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned forcewake_count;
+	unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
 
 	spin_lock_irq(&dev_priv->uncore.lock);
-	forcewake_count = dev_priv->uncore.forcewake_count;
+	if (IS_VALLEYVIEW(dev)) {
+		fw_rendercount = dev_priv->uncore.fw_rendercount;
+		fw_mediacount = dev_priv->uncore.fw_mediacount;
+	} else
+		forcewake_count = dev_priv->uncore.forcewake_count;
 	spin_unlock_irq(&dev_priv->uncore.lock);
 
-	seq_printf(m, "forcewake count = %u\n", forcewake_count);
+	if (IS_VALLEYVIEW(dev)) {
+		seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
+		seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
+	} else
+		seq_printf(m, "forcewake count = %u\n", forcewake_count);
 
 	return 0;
 }
@@ -1610,6 +1695,7 @@
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
@@ -1641,6 +1727,7 @@
 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
 			   I915_READ(DISP_ARB_CTL));
 	}
+	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -1701,16 +1788,19 @@
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	int ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	if (INTEL_INFO(dev)->gen >= 8)
 		gen8_ppgtt_info(m, dev);
 	else if (INTEL_INFO(dev)->gen >= 6)
 		gen6_ppgtt_info(m, dev);
 
+	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -1735,28 +1825,28 @@
 
 	seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
 
-	seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
-	seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
+	seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0)));
+	seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1)));
 
-	seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
-	seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
+	seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0)));
+	seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1)));
 
-	seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
-	seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
+	seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0)));
+	seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1)));
 
-	seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
-	seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
+	seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0)));
+	seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1)));
 
 	seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0));
 
 	mutex_unlock(&dev_priv->dpio_lock);
 
@@ -1784,6 +1874,8 @@
 	u32 psrperf = 0;
 	bool enabled = false;
 
+	intel_runtime_pm_get(dev_priv);
+
 	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
 	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
 
@@ -1796,6 +1888,7 @@
 			EDP_PSR_PERF_CNT_MASK;
 	seq_printf(m, "Performance_Counter: %u\n", psrperf);
 
+	intel_runtime_pm_put(dev_priv);
 	return 0;
 }
 
@@ -1845,6 +1938,76 @@
 	return 0;
 }
 
+static const char *power_domain_str(enum intel_display_power_domain domain)
+{
+	switch (domain) {
+	case POWER_DOMAIN_PIPE_A:
+		return "PIPE_A";
+	case POWER_DOMAIN_PIPE_B:
+		return "PIPE_B";
+	case POWER_DOMAIN_PIPE_C:
+		return "PIPE_C";
+	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+		return "PIPE_A_PANEL_FITTER";
+	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+		return "PIPE_B_PANEL_FITTER";
+	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+		return "PIPE_C_PANEL_FITTER";
+	case POWER_DOMAIN_TRANSCODER_A:
+		return "TRANSCODER_A";
+	case POWER_DOMAIN_TRANSCODER_B:
+		return "TRANSCODER_B";
+	case POWER_DOMAIN_TRANSCODER_C:
+		return "TRANSCODER_C";
+	case POWER_DOMAIN_TRANSCODER_EDP:
+		return "TRANSCODER_EDP";
+	case POWER_DOMAIN_VGA:
+		return "VGA";
+	case POWER_DOMAIN_AUDIO:
+		return "AUDIO";
+	case POWER_DOMAIN_INIT:
+		return "INIT";
+	default:
+		WARN_ON(1);
+		return "?";
+	}
+}
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	int i;
+
+	mutex_lock(&power_domains->lock);
+
+	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+	for (i = 0; i < power_domains->power_well_count; i++) {
+		struct i915_power_well *power_well;
+		enum intel_display_power_domain power_domain;
+
+		power_well = &power_domains->power_wells[i];
+		seq_printf(m, "%-25s %d\n", power_well->name,
+			   power_well->count);
+
+		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
+		     power_domain++) {
+			if (!(BIT(power_domain) & power_well->domains))
+				continue;
+
+			seq_printf(m, "  %-23s %d\n",
+				 power_domain_str(power_domain),
+				 power_domains->domain_use_count[power_domain]);
+		}
+	}
+
+	mutex_unlock(&power_domains->lock);
+
+	return 0;
+}
+
 struct pipe_crc_info {
 	const char *name;
 	struct drm_device *dev;
@@ -1857,6 +2020,9 @@
 	struct drm_i915_private *dev_priv = info->dev->dev_private;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
+	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+		return -ENODEV;
+
 	spin_lock_irq(&pipe_crc->lock);
 
 	if (pipe_crc->opened) {
@@ -2005,8 +2171,8 @@
 	info->dev = dev;
 	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
 				  &i915_pipe_crc_fops);
-	if (IS_ERR(ent))
-		return PTR_ERR(ent);
+	if (!ent)
+		return -ENOMEM;
 
 	return drm_add_fake_info_node(minor, ent, info);
 }
@@ -2347,7 +2513,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-	u32 val;
+	u32 val = 0; /* shut up gcc */
 	int ret;
 
 	if (pipe_crc->source == source)
@@ -2742,7 +2908,7 @@
 	struct i915_vma *vma, *x;
 	int ret;
 
-	DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
+	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
 
 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
 	 * on ioctls on -EAGAIN. */
@@ -2810,8 +2976,7 @@
 		return ret;
 
 	if (IS_VALLEYVIEW(dev))
-		*val = vlv_gpu_freq(dev_priv->mem_freq,
-				    dev_priv->rps.max_delay);
+		*val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
 	else
 		*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -2841,9 +3006,9 @@
 	 * Turbo will still be enabled, but won't go above the set value.
 	 */
 	if (IS_VALLEYVIEW(dev)) {
-		val = vlv_freq_opcode(dev_priv->mem_freq, val);
+		val = vlv_freq_opcode(dev_priv, val);
 		dev_priv->rps.max_delay = val;
-		gen6_set_rps(dev, val);
+		valleyview_set_rps(dev, val);
 	} else {
 		do_div(val, GT_FREQUENCY_MULTIPLIER);
 		dev_priv->rps.max_delay = val;
@@ -2876,8 +3041,7 @@
 		return ret;
 
 	if (IS_VALLEYVIEW(dev))
-		*val = vlv_gpu_freq(dev_priv->mem_freq,
-				    dev_priv->rps.min_delay);
+		*val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
 	else
 		*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -2907,7 +3071,7 @@
 	 * Turbo will still be enabled, but won't go below the set value.
 	 */
 	if (IS_VALLEYVIEW(dev)) {
-		val = vlv_freq_opcode(dev_priv->mem_freq, val);
+		val = vlv_freq_opcode(dev_priv, val);
 		dev_priv->rps.min_delay = val;
 		valleyview_set_rps(dev, val);
 	} else {
@@ -2938,8 +3102,11 @@
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 
 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+
+	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev_priv->dev->struct_mutex);
 
 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -2960,6 +3127,7 @@
 	if (val > 3)
 		return -EINVAL;
 
+	intel_runtime_pm_get(dev_priv);
 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
 
 	/* Update the cache sharing policy here as well */
@@ -2968,6 +3136,7 @@
 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
 
+	intel_runtime_pm_put(dev_priv);
 	return 0;
 }
 
@@ -2983,7 +3152,8 @@
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
-	gen6_gt_force_wake_get(dev_priv);
+	intel_runtime_pm_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 	return 0;
 }
@@ -2996,7 +3166,8 @@
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+	intel_runtime_pm_put(dev_priv);
 
 	return 0;
 }
@@ -3016,8 +3187,8 @@
 				  S_IRUSR,
 				  root, dev,
 				  &i915_forcewake_fops);
-	if (IS_ERR(ent))
-		return PTR_ERR(ent);
+	if (!ent)
+		return -ENOMEM;
 
 	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
 }
@@ -3034,8 +3205,8 @@
 				  S_IRUGO | S_IWUSR,
 				  root, dev,
 				  fops);
-	if (IS_ERR(ent))
-		return PTR_ERR(ent);
+	if (!ent)
+		return -ENOMEM;
 
 	return drm_add_fake_info_node(minor, ent, fops);
 }
@@ -3079,6 +3250,7 @@
 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
 	{"i915_energy_uJ", i915_energy_uJ, 0},
 	{"i915_pc8_status", i915_pc8_status, 0},
+	{"i915_power_domain_info", i915_power_domain_info, 0},
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
@@ -3102,10 +3274,10 @@
 void intel_display_crc_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
+	enum pipe pipe;
 
-	for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
-		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
+	for_each_pipe(pipe) {
+		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
 
 		pipe_crc->opened = false;
 		spin_lock_init(&pipe_crc->lock);
@@ -3164,5 +3336,3 @@
 		drm_debugfs_remove_files(info_list, 1, minor);
 	}
 }
-
-#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 5c64842..15a74f9 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -42,6 +42,8 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <acpi/video.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
 
 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
 
@@ -791,7 +793,7 @@
 		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
 	if (ring->irq_get(ring)) {
-		DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+		DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
 			    READ_BREADCRUMB(dev_priv) >= irq_nr);
 		ring->irq_put(ring);
 	} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
@@ -828,7 +830,7 @@
 	result = i915_emit_irq(dev);
 	mutex_unlock(&dev->struct_mutex);
 
-	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+	if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
 		DRM_ERROR("copy_to_user\n");
 		return -EFAULT;
 	}
@@ -1016,8 +1018,8 @@
 		return -EINVAL;
 	}
 
-	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
-		DRM_ERROR("DRM_COPY_TO_USER failed\n");
+	if (copy_to_user(param->value, &value, sizeof(int))) {
+		DRM_ERROR("copy_to_user failed\n");
 		return -EFAULT;
 	}
 
@@ -1411,7 +1413,7 @@
 	master->driver_priv = NULL;
 }
 
-#ifdef CONFIG_DRM_I915_FBDEV
+#if IS_ENABLED(CONFIG_FB)
 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 {
 	struct apertures_struct *ap;
@@ -1484,6 +1486,10 @@
 		return -ENODEV;
 	}
 
+	/* UMS needs agp support. */
+	if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
+		return -EINVAL;
+
 	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 	if (dev_priv == NULL)
 		return -ENOMEM;
@@ -1494,7 +1500,7 @@
 
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->gpu_error.lock);
-	spin_lock_init(&dev_priv->backlight.lock);
+	spin_lock_init(&dev_priv->backlight_lock);
 	spin_lock_init(&dev_priv->uncore.lock);
 	spin_lock_init(&dev_priv->mm.object_stat_lock);
 	mutex_init(&dev_priv->dpio_lock);
@@ -1639,8 +1645,7 @@
 			goto out_gem_unload;
 	}
 
-	if (HAS_POWER_WELL(dev))
-		intel_power_domains_init(dev);
+	intel_power_domains_init(dev);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		ret = i915_load_modeset_init(dev);
@@ -1664,11 +1669,12 @@
 	if (IS_GEN5(dev))
 		intel_gpu_ips_init(dev_priv);
 
+	intel_init_runtime_pm(dev_priv);
+
 	return 0;
 
 out_power_well:
-	if (HAS_POWER_WELL(dev))
-		intel_power_domains_remove(dev);
+	intel_power_domains_remove(dev);
 	drm_vblank_cleanup(dev);
 out_gem_unload:
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
@@ -1679,6 +1685,7 @@
 
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
+	pm_qos_remove_request(&dev_priv->pm_qos);
 	destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1704,25 +1711,27 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	ret = i915_gem_suspend(dev);
+	if (ret) {
+		DRM_ERROR("failed to idle hardware: %d\n", ret);
+		return ret;
+	}
+
+	intel_fini_runtime_pm(dev_priv);
+
 	intel_gpu_ips_teardown();
 
-	if (HAS_POWER_WELL(dev)) {
-		/* The i915.ko module is still not prepared to be loaded when
-		 * the power well is not enabled, so just enable it in case
-		 * we're going to unload/reload. */
-		intel_display_set_init_power(dev, true);
-		intel_power_domains_remove(dev);
-	}
+	/* The i915.ko module is still not prepared to be loaded when
+	 * the power well is not enabled, so just enable it in case
+	 * we're going to unload/reload. */
+	intel_display_set_init_power(dev, true);
+	intel_power_domains_remove(dev);
 
 	i915_teardown_sysfs(dev);
 
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
 		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
-	ret = i915_gem_suspend(dev);
-	if (ret)
-		DRM_ERROR("failed to idle hardware: %d\n", ret);
-
 	io_mapping_free(dev_priv->gtt.mappable);
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
 
@@ -1777,7 +1786,6 @@
 
 	list_del(&dev_priv->gtt.base.global_link);
 	WARN_ON(!list_empty(&dev_priv->vm_list));
-	drm_mm_takedown(&dev_priv->gtt.base.mm);
 
 	drm_vblank_cleanup(dev);
 
@@ -1910,6 +1918,7 @@
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5b7b7e0..04f1f02 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -59,7 +59,7 @@
 		"Enable powersavings, fbc, downclocking, etc. (default: true)");
 
 int i915_semaphores __read_mostly = -1;
-module_param_named(semaphores, i915_semaphores, int, 0600);
+module_param_named(semaphores, i915_semaphores, int, 0400);
 MODULE_PARM_DESC(semaphores,
 		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
@@ -114,7 +114,7 @@
 		"(default: true)");
 
 int i915_enable_ppgtt __read_mostly = -1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
 MODULE_PARM_DESC(i915_enable_ppgtt,
 		"Enable PPGTT (default: true)");
 
@@ -155,7 +155,6 @@
 		"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
 
 static struct drm_driver driver;
-extern int intel_agp_enabled;
 
 static const struct intel_device_info intel_i830_info = {
 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
@@ -173,6 +172,7 @@
 	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
 	.cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.has_fbc = 1,
 	.ring_mask = RENDER_RING,
 };
 
@@ -192,6 +192,7 @@
 	.cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
+	.has_fbc = 1,
 	.ring_mask = RENDER_RING,
 };
 static const struct intel_device_info intel_i945g_info = {
@@ -204,6 +205,7 @@
 	.has_hotplug = 1, .cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
+	.has_fbc = 1,
 	.ring_mask = RENDER_RING,
 };
 
@@ -265,6 +267,7 @@
 static const struct intel_device_info intel_sandybridge_d_info = {
 	.gen = 6, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_fbc = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
 	.has_llc = 1,
 };
@@ -280,6 +283,7 @@
 #define GEN7_FEATURES  \
 	.gen = 7, .num_pipes = 3, \
 	.need_gfx_hws = 1, .has_hotplug = 1, \
+	.has_fbc = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
 	.has_llc = 1
 
@@ -292,7 +296,6 @@
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
 	.is_mobile = 1,
-	.has_fbc = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_q_info = {
@@ -307,6 +310,7 @@
 	.num_pipes = 2,
 	.is_valleyview = 1,
 	.display_mmio_offset = VLV_DISPLAY_BASE,
+	.has_fbc = 0, /* legal, last one wins */
 	.has_llc = 0, /* legal, last one wins */
 };
 
@@ -315,6 +319,7 @@
 	.num_pipes = 2,
 	.is_valleyview = 1,
 	.display_mmio_offset = VLV_DISPLAY_BASE,
+	.has_fbc = 0, /* legal, last one wins */
 	.has_llc = 0, /* legal, last one wins */
 };
 
@@ -332,12 +337,10 @@
 	.is_mobile = 1,
 	.has_ddi = 1,
 	.has_fpga_dbg = 1,
-	.has_fbc = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 };
 
 static const struct intel_device_info intel_broadwell_d_info = {
-	.is_preliminary = 1,
 	.gen = 8, .num_pipes = 3,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -346,7 +349,6 @@
 };
 
 static const struct intel_device_info intel_broadwell_m_info = {
-	.is_preliminary = 1,
 	.gen = 8, .is_mobile = 1, .num_pipes = 3,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -476,12 +478,12 @@
 bool i915_semaphore_is_enabled(struct drm_device *dev)
 {
 	if (INTEL_INFO(dev)->gen < 6)
-		return 0;
+		return false;
 
 	/* Until we get further testing... */
 	if (IS_GEN8(dev)) {
 		WARN_ON(!i915_preliminary_hw_support);
-		return 0;
+		return false;
 	}
 
 	if (i915_semaphores >= 0)
@@ -493,7 +495,7 @@
 		return false;
 #endif
 
-	return 1;
+	return true;
 }
 
 static int i915_drm_freeze(struct drm_device *dev)
@@ -501,6 +503,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
 
+	intel_runtime_pm_get(dev_priv);
+
 	/* ignore lid events during suspend */
 	mutex_lock(&dev_priv->modeset_restore_lock);
 	dev_priv->modeset_restore = MODESET_SUSPENDED;
@@ -688,6 +692,8 @@
 	mutex_lock(&dev_priv->modeset_restore_lock);
 	dev_priv->modeset_restore = MODESET_DONE;
 	mutex_unlock(&dev_priv->modeset_restore_lock);
+
+	intel_runtime_pm_put(dev_priv);
 	return error;
 }
 
@@ -762,14 +768,14 @@
 		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
 		dev_priv->gpu_error.stop_rings = 0;
 		if (ret == -ENODEV) {
-			DRM_ERROR("Reset not implemented, but ignoring "
-				  "error for simulated gpu hangs\n");
+			DRM_INFO("Reset not implemented, but ignoring "
+				 "error for simulated gpu hangs\n");
 			ret = 0;
 		}
 	}
 
 	if (ret) {
-		DRM_ERROR("Failed to reset chip.\n");
+		DRM_ERROR("Failed to reset chip: %i\n", ret);
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
 	}
@@ -790,12 +796,9 @@
 	 */
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 			!dev_priv->ums.mm_suspended) {
-		bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
 		dev_priv->ums.mm_suspended = 0;
 
 		ret = i915_gem_init_hw(dev);
-		if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
-			DRM_ERROR("HW contexts didn't survive reset\n");
 		mutex_unlock(&dev->struct_mutex);
 		if (ret) {
 			DRM_ERROR("Failed hw init on reset %d\n", ret);
@@ -831,17 +834,7 @@
 	if (PCI_FUNC(pdev->devfn))
 		return -ENODEV;
 
-	/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
-	 * implementation for gen3 (and only gen3) that used legacy drm maps
-	 * (gasp!) to share buffers between X and the client. Hence we need to
-	 * keep around the fake agp stuff for gen3, even when kms is enabled. */
-	if (intel_info->gen != 3) {
-		driver.driver_features &=
-			~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
-	} else if (!intel_agp_enabled) {
-		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
-		return -ENODEV;
-	}
+	driver.driver_features &= ~(DRIVER_USE_AGP);
 
 	return drm_get_pci_dev(pdev, ent, &driver);
 }
@@ -915,6 +908,49 @@
 	return i915_drm_freeze(drm_dev);
 }
 
+static int i915_runtime_suspend(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WARN_ON(!HAS_RUNTIME_PM(dev));
+
+	DRM_DEBUG_KMS("Suspending device\n");
+
+	i915_gem_release_all_mmaps(dev_priv);
+
+	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+	dev_priv->pm.suspended = true;
+
+	/*
+	 * current versions of firmware which depend on this opregion
+	 * notification have repurposed the D1 definition to mean
+	 * "runtime suspended" vs. what you would normally expect (D3)
+	 * to distinguish it from notifications that might be sent
+	 * via the suspend path.
+	 */
+	intel_opregion_notify_adapter(dev, PCI_D1);
+
+	return 0;
+}
+
+static int i915_runtime_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WARN_ON(!HAS_RUNTIME_PM(dev));
+
+	DRM_DEBUG_KMS("Resuming device\n");
+
+	intel_opregion_notify_adapter(dev, PCI_D0);
+	dev_priv->pm.suspended = false;
+
+	return 0;
+}
+
 static const struct dev_pm_ops i915_pm_ops = {
 	.suspend = i915_pm_suspend,
 	.resume = i915_pm_resume,
@@ -922,6 +958,8 @@
 	.thaw = i915_pm_thaw,
 	.poweroff = i915_pm_poweroff,
 	.restore = i915_pm_resume,
+	.runtime_suspend = i915_runtime_suspend,
+	.runtime_resume = i915_runtime_resume,
 };
 
 static const struct vm_operations_struct i915_gem_vm_ops = {
@@ -949,7 +987,7 @@
 	 * deal with them for Intel hardware.
 	 */
 	.driver_features =
-	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+	    DRIVER_USE_AGP |
 	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
 	    DRIVER_RENDER,
 	.load = i915_driver_load,
@@ -1024,14 +1062,24 @@
 		driver.driver_features &= ~DRIVER_MODESET;
 #endif
 
-	if (!(driver.driver_features & DRIVER_MODESET))
+	if (!(driver.driver_features & DRIVER_MODESET)) {
 		driver.get_vblank_timestamp = NULL;
+#ifndef CONFIG_DRM_I915_UMS
+		/* Silently fail loading to not upset userspace. */
+		return 0;
+#endif
+	}
 
 	return drm_pci_init(&driver, &i915_pci_driver);
 }
 
 static void __exit i915_exit(void)
 {
+#ifndef CONFIG_DRM_I915_UMS
+	if (!(driver.driver_features & DRIVER_MODESET))
+		return; /* Never loaded a driver. */
+#endif
+
 	drm_pci_exit(&driver, &i915_pci_driver);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1caa5e3..df77e20 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -89,6 +89,18 @@
 };
 #define port_name(p) ((p) + 'A')
 
+#define I915_NUM_PHYS_VLV 1
+
+enum dpio_channel {
+	DPIO_CH0,
+	DPIO_CH1
+};
+
+enum dpio_phy {
+	DPIO_PHY0,
+	DPIO_PHY1
+};
+
 enum intel_display_power_domain {
 	POWER_DOMAIN_PIPE_A,
 	POWER_DOMAIN_PIPE_B,
@@ -101,6 +113,7 @@
 	POWER_DOMAIN_TRANSCODER_C,
 	POWER_DOMAIN_TRANSCODER_EDP,
 	POWER_DOMAIN_VGA,
+	POWER_DOMAIN_AUDIO,
 	POWER_DOMAIN_INIT,
 
 	POWER_DOMAIN_NUM,
@@ -310,13 +323,14 @@
 	u32 instps[I915_NUM_RINGS];
 	u32 extra_instdone[I915_NUM_INSTDONE_REG];
 	u32 seqno[I915_NUM_RINGS];
-	u64 bbaddr;
+	u64 bbaddr[I915_NUM_RINGS];
 	u32 fault_reg[I915_NUM_RINGS];
 	u32 done_reg;
 	u32 faddr[I915_NUM_RINGS];
 	u64 fence[I915_MAX_NUM_FENCES];
 	struct timeval time;
 	struct drm_i915_error_ring {
+		bool valid;
 		struct drm_i915_error_object {
 			int page_count;
 			u32 gtt_offset;
@@ -351,6 +365,7 @@
 	enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
 };
 
+struct intel_connector;
 struct intel_crtc_config;
 struct intel_crtc;
 struct intel_limit;
@@ -358,7 +373,7 @@
 
 struct drm_i915_display_funcs {
 	bool (*fbc_enabled)(struct drm_device *dev);
-	void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+	void (*enable_fbc)(struct drm_crtc *crtc);
 	void (*disable_fbc)(struct drm_device *dev);
 	int (*get_display_clock_speed)(struct drm_device *dev);
 	int (*get_fifo_size)(struct drm_device *dev, int plane);
@@ -413,11 +428,20 @@
 	/* render clock increase/decrease */
 	/* display clock increase/decrease */
 	/* pll clock increase/decrease */
+
+	int (*setup_backlight)(struct intel_connector *connector);
+	uint32_t (*get_backlight)(struct intel_connector *connector);
+	void (*set_backlight)(struct intel_connector *connector,
+			      uint32_t level);
+	void (*disable_backlight)(struct intel_connector *connector);
+	void (*enable_backlight)(struct intel_connector *connector);
 };
 
 struct intel_uncore_funcs {
-	void (*force_wake_get)(struct drm_i915_private *dev_priv);
-	void (*force_wake_put)(struct drm_i915_private *dev_priv);
+	void (*force_wake_get)(struct drm_i915_private *dev_priv,
+							int fw_engine);
+	void (*force_wake_put)(struct drm_i915_private *dev_priv,
+							int fw_engine);
 
 	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
 	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
@@ -442,6 +466,9 @@
 	unsigned fifo_count;
 	unsigned forcewake_count;
 
+	unsigned fw_rendercount;
+	unsigned fw_mediacount;
+
 	struct delayed_work force_wake_work;
 };
 
@@ -669,7 +696,6 @@
 		struct delayed_work work;
 		struct drm_crtc *crtc;
 		struct drm_framebuffer *fb;
-		int interval;
 	} *fbc_work;
 
 	enum no_fbc_reason {
@@ -708,7 +734,6 @@
 #define QUIRK_PIPEA_FORCE (1<<0)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
-#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
 
 struct intel_fbdev;
 struct intel_fbc_work;
@@ -761,8 +786,6 @@
 	u32 saveBLC_PWM_CTL;
 	u32 saveBLC_PWM_CTL2;
 	u32 saveBLC_HIST_CTL_B;
-	u32 saveBLC_PWM_CTL_B;
-	u32 saveBLC_PWM_CTL2_B;
 	u32 saveBLC_CPU_PWM_CTL;
 	u32 saveBLC_CPU_PWM_CTL2;
 	u32 saveFPB0;
@@ -932,21 +955,29 @@
 
 /* Power well structure for haswell */
 struct i915_power_well {
+	const char *name;
+	bool always_on;
 	/* power well enable/disable usage count */
 	int count;
+	unsigned long domains;
+	void *data;
+	void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
+		    bool enable);
+	bool (*is_enabled)(struct drm_device *dev,
+			   struct i915_power_well *power_well);
 };
 
-#define I915_MAX_POWER_WELLS 1
-
 struct i915_power_domains {
 	/*
 	 * Power wells needed for initialization at driver init and suspend
 	 * time are on. They are kept on until after the first modeset.
 	 */
 	bool init_power_on;
+	int power_well_count;
 
 	struct mutex lock;
-	struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
+	int domain_use_count[POWER_DOMAIN_NUM];
+	struct i915_power_well *power_wells;
 };
 
 struct i915_dri1_state {
@@ -1077,34 +1108,30 @@
 	unsigned long missed_irq_rings;
 
 	/**
-	 * State variable and reset counter controlling the reset flow
+	 * State variable controlling the reset flow and count
 	 *
-	 * Upper bits are for the reset counter.  This counter is used by the
-	 * wait_seqno code to race-free noticed that a reset event happened and
-	 * that it needs to restart the entire ioctl (since most likely the
-	 * seqno it waited for won't ever signal anytime soon).
+	 * This is a counter which gets incremented when reset is triggered,
+	 * and again when reset has been handled. So odd values (lowest bit set)
+	 * means that reset is in progress and even values that
+	 * (reset_counter >> 1):th reset was successfully completed.
+	 *
+	 * If reset is not completed succesfully, the I915_WEDGE bit is
+	 * set meaning that hardware is terminally sour and there is no
+	 * recovery. All waiters on the reset_queue will be woken when
+	 * that happens.
+	 *
+	 * This counter is used by the wait_seqno code to notice that reset
+	 * event happened and it needs to restart the entire ioctl (since most
+	 * likely the seqno it waited for won't ever signal anytime soon).
 	 *
 	 * This is important for lock-free wait paths, where no contended lock
 	 * naturally enforces the correct ordering between the bail-out of the
 	 * waiter and the gpu reset work code.
-	 *
-	 * Lowest bit controls the reset state machine: Set means a reset is in
-	 * progress. This state will (presuming we don't have any bugs) decay
-	 * into either unset (successful reset) or the special WEDGED value (hw
-	 * terminally sour). All waiters on the reset_queue will be woken when
-	 * that happens.
 	 */
 	atomic_t reset_counter;
 
-	/**
-	 * Special values/flags for reset_counter
-	 *
-	 * Note that the code relies on
-	 * 	I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
-	 * being true.
-	 */
 #define I915_RESET_IN_PROGRESS_FLAG	1
-#define I915_WEDGED			0xffffffff
+#define I915_WEDGED			(1 << 31)
 
 	/**
 	 * Waitqueue to signal when the reset has completed. Used by clients
@@ -1158,6 +1185,11 @@
 	int edp_bpp;
 	struct edp_power_seq edp_pps;
 
+	struct {
+		u16 pwm_freq_hz;
+		bool active_low_pwm;
+	} backlight;
+
 	/* MIPI DSI */
 	struct {
 		u16 panel_id;
@@ -1184,7 +1216,7 @@
 	uint32_t fbc_val;
 };
 
-struct hsw_wm_values {
+struct ilk_wm_values {
 	uint32_t wm_pipe[3];
 	uint32_t wm_lp[3];
 	uint32_t wm_lp_spr[3];
@@ -1262,6 +1294,10 @@
 	} regsave;
 };
 
+struct i915_runtime_pm {
+	bool suspended;
+};
+
 enum intel_pipe_crc_source {
 	INTEL_PIPE_CRC_SOURCE_NONE,
 	INTEL_PIPE_CRC_SOURCE_PLANE1,
@@ -1366,15 +1402,9 @@
 
 	/* overlay */
 	struct intel_overlay *overlay;
-	unsigned int sprite_scaling_enabled;
 
-	/* backlight */
-	struct {
-		int level;
-		bool enabled;
-		spinlock_t lock; /* bl registers and the above bl fields */
-		struct backlight_device *device;
-	} backlight;
+	/* backlight registers and fields in struct intel_panel */
+	spinlock_t backlight_lock;
 
 	/* LVDS info */
 	bool no_aux_handshake;
@@ -1426,6 +1456,7 @@
 	int num_shared_dpll;
 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
 	struct intel_ddi_plls ddi_plls;
+	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
 	/* Reclocking support */
 	bool render_reclock_avail;
@@ -1470,7 +1501,6 @@
 	struct drm_property *broadcast_rgb_property;
 	struct drm_property *force_audio_property;
 
-	bool hw_contexts_disabled;
 	uint32_t hw_context_size;
 	struct list_head context_list;
 
@@ -1492,11 +1522,13 @@
 		uint16_t cur_latency[5];
 
 		/* current hardware state */
-		struct hsw_wm_values hw;
+		struct ilk_wm_values hw;
 	} wm;
 
 	struct i915_package_c8 pc8;
 
+	struct i915_runtime_pm pm;
+
 	/* Old dri1 support infrastructure, beware the dragons ya fools entering
 	 * here! */
 	struct i915_dri1_state dri1;
@@ -1799,6 +1831,14 @@
 
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev))
+/*
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
+ * even when in MSI mode. This results in spurious interrupt warnings if the
+ * legacy irq no. is shared with another device. The kernel then disables that
+ * interrupt source and so prevents the other device from working properly.
+ */
+#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
@@ -1813,15 +1853,15 @@
 
 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
 #define HAS_IPS(dev)		(IS_ULT(dev) || IS_BROADWELL(dev))
 
 #define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
-#define HAS_POWER_WELL(dev)	(IS_HASWELL(dev) || IS_BROADWELL(dev))
 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
 #define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev))
 #define HAS_PC8(dev)		(IS_HASWELL(dev)) /* XXX HSW:ULX */
+#define HAS_RUNTIME_PM(dev)	(IS_HASWELL(dev))
 
 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
@@ -1911,7 +1951,6 @@
 extern void intel_uncore_sanitize(struct drm_device *dev);
 extern void intel_uncore_early_sanitize(struct drm_device *dev);
 extern void intel_uncore_init(struct drm_device *dev);
-extern void intel_uncore_clear_errors(struct drm_device *dev);
 extern void intel_uncore_check_errors(struct drm_device *dev);
 extern void intel_uncore_fini(struct drm_device *dev);
 
@@ -1987,6 +2026,7 @@
 int __must_check i915_vma_unbind(struct i915_vma *vma);
 int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
@@ -2063,12 +2103,17 @@
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
 	return unlikely(atomic_read(&error->reset_counter)
-			& I915_RESET_IN_PROGRESS_FLAG);
+			& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
 }
 
 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 {
-	return atomic_read(&error->reset_counter) == I915_WEDGED;
+	return atomic_read(&error->reset_counter) & I915_WEDGED;
+}
+
+static inline u32 i915_reset_count(struct i915_gpu_error *error)
+{
+	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
 }
 
 void i915_gem_reset(struct drm_device *dev);
@@ -2180,7 +2225,7 @@
 }
 
 /* i915_gem_context.c */
-void i915_gem_context_init(struct drm_device *dev);
+int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct intel_ring_buffer *ring,
@@ -2399,6 +2444,8 @@
 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file);
+int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file);
 
 /* overlay */
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -2414,8 +2461,8 @@
  * must be set to prevent GT core from power down and stale values being
  * returned.
  */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2430,6 +2477,8 @@
 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
@@ -2438,9 +2487,30 @@
 		   enum intel_sbi_destination destination);
 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 		     enum intel_sbi_destination destination);
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 
-int vlv_gpu_freq(int ddr_freq, int val);
-int vlv_freq_opcode(int ddr_freq, int val);
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
+
+void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
+
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+	(((reg) >= 0x2000 && (reg) < 0x4000) ||\
+	((reg) >= 0x5000 && (reg) < 0x8000) ||\
+	((reg) >= 0xB000 && (reg) < 0x12000) ||\
+	((reg) >= 0x2E000 && (reg) < 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
+	(((reg) >= 0x12000 && (reg) < 0x14000) ||\
+	((reg) >= 0x22000 && (reg) < 0x24000) ||\
+	((reg) >= 0x30000 && (reg) < 0x40000))
+
+#define FORCEWAKE_RENDER	(1 << 0)
+#define FORCEWAKE_MEDIA		(1 << 1)
+#define FORCEWAKE_ALL		(FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
+
 
 #define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
 #define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 76d3d1a..00c8361 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1015,9 +1015,11 @@
 			struct drm_i915_file_private *file_priv)
 {
 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	const bool irq_test_in_progress =
+		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
 	struct timespec before, now;
 	DEFINE_WAIT(wait);
-	long timeout_jiffies;
+	unsigned long timeout_expire;
 	int ret;
 
 	WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1025,7 +1027,7 @@
 	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
 		return 0;
 
-	timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
+	timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
 
 	if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
 		gen6_rps_boost(dev_priv);
@@ -1035,8 +1037,7 @@
 					 msecs_to_jiffies(100));
 	}
 
-	if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
-	    WARN_ON(!ring->irq_get(ring)))
+	if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
 		return -ENODEV;
 
 	/* Record current time in case interrupted by signal, or wedged */
@@ -1044,7 +1045,6 @@
 	getrawmonotonic(&before);
 	for (;;) {
 		struct timer_list timer;
-		unsigned long expire;
 
 		prepare_to_wait(&ring->irq_queue, &wait,
 				interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
@@ -1070,23 +1070,22 @@
 			break;
 		}
 
-		if (timeout_jiffies <= 0) {
+		if (timeout && time_after_eq(jiffies, timeout_expire)) {
 			ret = -ETIME;
 			break;
 		}
 
 		timer.function = NULL;
 		if (timeout || missed_irq(dev_priv, ring)) {
+			unsigned long expire;
+
 			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-			expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+			expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
 			mod_timer(&timer, expire);
 		}
 
 		io_schedule();
 
-		if (timeout)
-			timeout_jiffies = expire - jiffies;
-
 		if (timer.function) {
 			del_singleshot_timer_sync(&timer);
 			destroy_timer_on_stack(&timer);
@@ -1095,7 +1094,8 @@
 	getrawmonotonic(&now);
 	trace_i915_gem_request_wait_end(ring, seqno);
 
-	ring->irq_put(ring);
+	if (!irq_test_in_progress)
+		ring->irq_put(ring);
 
 	finish_wait(&ring->irq_queue, &wait);
 
@@ -1380,6 +1380,8 @@
 	int ret = 0;
 	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
 
+	intel_runtime_pm_get(dev_priv);
+
 	/* We don't use vmf->pgoff since that has the fake offset */
 	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
 		PAGE_SHIFT;
@@ -1427,8 +1429,10 @@
 		/* If this -EIO is due to a gpu hang, give the reset code a
 		 * chance to clean up the mess. Otherwise return the proper
 		 * SIGBUS. */
-		if (i915_terminally_wedged(&dev_priv->gpu_error))
-			return VM_FAULT_SIGBUS;
+		if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+			ret = VM_FAULT_SIGBUS;
+			break;
+		}
 	case -EAGAIN:
 		/*
 		 * EAGAIN means the gpu is hung and we'll wait for the error
@@ -1443,15 +1447,38 @@
 		 * EBUSY is ok: this just means that another thread
 		 * already did the job.
 		 */
-		return VM_FAULT_NOPAGE;
+		ret = VM_FAULT_NOPAGE;
+		break;
 	case -ENOMEM:
-		return VM_FAULT_OOM;
+		ret = VM_FAULT_OOM;
+		break;
 	case -ENOSPC:
-		return VM_FAULT_SIGBUS;
+		ret = VM_FAULT_SIGBUS;
+		break;
 	default:
 		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
-		return VM_FAULT_SIGBUS;
+		ret = VM_FAULT_SIGBUS;
+		break;
 	}
+
+	intel_runtime_pm_put(dev_priv);
+	return ret;
+}
+
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+	struct i915_vma *vma;
+
+	/*
+	 * Only the global gtt is relevant for gtt memory mappings, so restrict
+	 * list traversal to objects bound into the global address space. Note
+	 * that the active list should be empty, but better safe than sorry.
+	 */
+	WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
+	list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
+		i915_gem_release_mmap(vma->obj);
+	list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
+		i915_gem_release_mmap(vma->obj);
 }
 
 /**
@@ -2303,7 +2330,7 @@
 
 	if (ring->hangcheck.action != HANGCHECK_WAIT &&
 	    i915_request_guilty(request, acthd, &inside)) {
-		DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
+		DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
 			  ring->name,
 			  inside ? "inside" : "flushing",
 			  offset,
@@ -2361,16 +2388,6 @@
 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 					struct intel_ring_buffer *ring)
 {
-	while (!list_empty(&ring->request_list)) {
-		struct drm_i915_gem_request *request;
-
-		request = list_first_entry(&ring->request_list,
-					   struct drm_i915_gem_request,
-					   list);
-
-		i915_gem_free_request(request);
-	}
-
 	while (!list_empty(&ring->active_list)) {
 		struct drm_i915_gem_object *obj;
 
@@ -2380,6 +2397,23 @@
 
 		i915_gem_object_move_to_inactive(obj);
 	}
+
+	/*
+	 * We must free the requests after all the corresponding objects have
+	 * been moved off active lists. Which is the same order as the normal
+	 * retire_requests function does. This is important if object hold
+	 * implicit references on things like e.g. ppgtt address spaces through
+	 * the request.
+	 */
+	while (!list_empty(&ring->request_list)) {
+		struct drm_i915_gem_request *request;
+
+		request = list_first_entry(&ring->request_list,
+					   struct drm_i915_gem_request,
+					   list);
+
+		i915_gem_free_request(request);
+	}
 }
 
 void i915_gem_restore_fences(struct drm_device *dev)
@@ -2760,7 +2794,6 @@
 		obj->has_aliasing_ppgtt_mapping = 0;
 	}
 	i915_gem_gtt_finish_object(obj);
-	i915_gem_object_unpin_pages(obj);
 
 	list_del(&vma->mm_list);
 	/* Avoid an unnecessary call to unbind on rebind. */
@@ -2768,7 +2801,6 @@
 		obj->map_and_fenceable = true;
 
 	drm_mm_remove_node(&vma->node);
-
 	i915_gem_vma_destroy(vma);
 
 	/* Since the unbound list is global, only move to that list if
@@ -2776,6 +2808,12 @@
 	if (list_empty(&obj->vma_list))
 		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
+	/* And finally now the object is completely decoupled from this vma,
+	 * we can drop its hold on the backing storage and allow it to be
+	 * reaped by the shrinker.
+	 */
+	i915_gem_object_unpin_pages(obj);
+
 	return 0;
 }
 
@@ -3068,7 +3106,7 @@
 	}
 
 	if (avail == NULL)
-		return NULL;
+		goto deadlock;
 
 	/* None available, try to steal one or wait for a user to finish */
 	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
@@ -3078,7 +3116,12 @@
 		return reg;
 	}
 
-	return NULL;
+deadlock:
+	/* Wait for completion of pending flips which consume fences */
+	if (intel_has_pending_fb_unpin(dev))
+		return ERR_PTR(-EAGAIN);
+
+	return ERR_PTR(-EDEADLK);
 }
 
 /**
@@ -3123,8 +3166,8 @@
 		}
 	} else if (enable) {
 		reg = i915_find_fence_reg(dev);
-		if (reg == NULL)
-			return -EDEADLK;
+		if (IS_ERR(reg))
+			return PTR_ERR(reg);
 
 		if (reg->obj) {
 			struct drm_i915_gem_object *old = reg->obj;
@@ -4179,6 +4222,8 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct i915_vma *vma, *next;
 
+	intel_runtime_pm_get(dev_priv);
+
 	trace_i915_gem_object_destroy(obj);
 
 	if (obj->phys_obj)
@@ -4223,6 +4268,8 @@
 
 	kfree(obj->bit_17);
 	i915_gem_object_free(obj);
+
+	intel_runtime_pm_put(dev_priv);
 }
 
 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
@@ -4479,7 +4526,13 @@
 	 * XXX: There was some w/a described somewhere suggesting loading
 	 * contexts before PPGTT.
 	 */
-	i915_gem_context_init(dev);
+	ret = i915_gem_context_init(dev);
+	if (ret) {
+		i915_gem_cleanup_ringbuffer(dev);
+		DRM_ERROR("Context initialization failed %d\n", ret);
+		return ret;
+	}
+
 	if (dev_priv->mm.aliasing_ppgtt) {
 		ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
 		if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b0f42b9..e08acab 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -247,36 +247,34 @@
 	return ret;
 }
 
-void i915_gem_context_init(struct drm_device *dev)
+int i915_gem_context_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
 
-	if (!HAS_HW_CONTEXTS(dev)) {
-		dev_priv->hw_contexts_disabled = true;
-		DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
-		return;
-	}
+	if (!HAS_HW_CONTEXTS(dev))
+		return 0;
 
 	/* If called from reset, or thaw... we've been here already */
-	if (dev_priv->hw_contexts_disabled ||
-	    dev_priv->ring[RCS].default_context)
-		return;
+	if (dev_priv->ring[RCS].default_context)
+		return 0;
 
 	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
 
 	if (dev_priv->hw_context_size > (1<<20)) {
-		dev_priv->hw_contexts_disabled = true;
 		DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
-		return;
+		return -E2BIG;
 	}
 
-	if (create_default_context(dev_priv)) {
-		dev_priv->hw_contexts_disabled = true;
-		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
-		return;
+	ret = create_default_context(dev_priv);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
+				 ret);
+		return ret;
 	}
 
 	DRM_DEBUG_DRIVER("HW context support initialized\n");
+	return 0;
 }
 
 void i915_gem_context_fini(struct drm_device *dev)
@@ -284,7 +282,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
 
-	if (dev_priv->hw_contexts_disabled)
+	if (!HAS_HW_CONTEXTS(dev))
 		return;
 
 	/* The only known way to stop the gpu from accessing the hw context is
@@ -327,16 +325,16 @@
 				struct drm_file *file,
 				u32 id)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct i915_hw_context *ctx;
 
 	if (id == DEFAULT_CONTEXT_ID)
 		return &file_priv->hang_stats;
 
-	ctx = NULL;
-	if (!dev_priv->hw_contexts_disabled)
-		ctx = i915_gem_context_get(file->driver_priv, id);
+	if (!HAS_HW_CONTEXTS(dev))
+		return ERR_PTR(-ENOENT);
+
+	ctx = i915_gem_context_get(file->driver_priv, id);
 	if (ctx == NULL)
 		return ERR_PTR(-ENOENT);
 
@@ -502,8 +500,6 @@
  * @ring: ring for which we'll execute the context switch
  * @file_priv: file_priv associated with the context, may be NULL
  * @id: context id number
- * @seqno: sequence number by which the new context will be switched to
- * @flags:
  *
  * The context life cycle is simple. The context refcount is incremented and
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -517,7 +513,7 @@
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct i915_hw_context *to;
 
-	if (dev_priv->hw_contexts_disabled)
+	if (!HAS_HW_CONTEXTS(ring->dev))
 		return 0;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
@@ -542,7 +538,6 @@
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_context_create *args = data;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct i915_hw_context *ctx;
@@ -551,7 +546,7 @@
 	if (!(dev->driver->driver_features & DRIVER_GEM))
 		return -ENODEV;
 
-	if (dev_priv->hw_contexts_disabled)
+	if (!HAS_HW_CONTEXTS(dev))
 		return -ENODEV;
 
 	ret = i915_mutex_lock_interruptible(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 8f3adc7..2ca280f 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -27,8 +27,10 @@
  */
 
 #include <drm/drmP.h>
-#include "i915_drv.h"
 #include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
 #include "i915_trace.h"
 
 static bool
@@ -53,6 +55,7 @@
 	struct list_head eviction_list, unwind_list;
 	struct i915_vma *vma;
 	int ret = 0;
+	int pass = 0;
 
 	trace_i915_gem_evict(dev, min_size, alignment, mappable);
 
@@ -119,14 +122,24 @@
 	/* Can we unpin some objects such as idle hw contents,
 	 * or pending flips?
 	 */
-	ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
-	if (ret)
-		return ret;
+	if (nonblocking)
+		return -ENOSPC;
 
 	/* Only idle the GPU and repeat the search once */
-	i915_gem_retire_requests(dev);
-	nonblocking = true;
-	goto search_again;
+	if (pass++ == 0) {
+		ret = i915_gpu_idle(dev);
+		if (ret)
+			return ret;
+
+		i915_gem_retire_requests(dev);
+		goto search_again;
+	}
+
+	/* If we still have pending pageflip completions, drop
+	 * back to userspace to give our workqueues time to
+	 * acquire our locks and unpin the old scanouts.
+	 */
+	return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
 
 found:
 	/* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a3ba9a8..d269ecf 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -46,7 +46,7 @@
 };
 
 static struct eb_vmas *
-eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
+eb_create(struct drm_i915_gem_execbuffer2 *args)
 {
 	struct eb_vmas *eb = NULL;
 
@@ -252,7 +252,7 @@
 	struct drm_device *dev = obj->base.dev;
 	uint32_t page_offset = offset_in_page(reloc->offset);
 	char *vaddr;
-	int ret = -EINVAL;
+	int ret;
 
 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
 	if (ret)
@@ -287,7 +287,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t __iomem *reloc_entry;
 	void __iomem *reloc_page;
-	int ret = -EINVAL;
+	int ret;
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
 	if (ret)
@@ -335,7 +335,7 @@
 	struct drm_i915_gem_object *target_i915_obj;
 	struct i915_vma *target_vma;
 	uint32_t target_offset;
-	int ret = -EINVAL;
+	int ret;
 
 	/* we've already hold a reference to all valid objects */
 	target_vma = eb_get_vma(eb, reloc->target_handle);
@@ -344,7 +344,7 @@
 	target_i915_obj = target_vma->obj;
 	target_obj = &target_vma->obj->base;
 
-	target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
+	target_offset = target_vma->node.start;
 
 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 	 * pipe_control writes because the gpu doesn't properly redirect them
@@ -365,7 +365,7 @@
 			  (int) reloc->offset,
 			  reloc->read_domains,
 			  reloc->write_domain);
-		return ret;
+		return -EINVAL;
 	}
 	if (unlikely((reloc->write_domain | reloc->read_domains)
 		     & ~I915_GEM_GPU_DOMAINS)) {
@@ -376,7 +376,7 @@
 			  (int) reloc->offset,
 			  reloc->read_domains,
 			  reloc->write_domain);
-		return ret;
+		return -EINVAL;
 	}
 
 	target_obj->pending_read_domains |= reloc->read_domains;
@@ -396,14 +396,14 @@
 			  obj, reloc->target_handle,
 			  (int) reloc->offset,
 			  (int) obj->base.size);
-		return ret;
+		return -EINVAL;
 	}
 	if (unlikely(reloc->offset & 3)) {
 		DRM_DEBUG("Relocation not 4-byte aligned: "
 			  "obj %p target %d offset %d.\n",
 			  obj, reloc->target_handle,
 			  (int) reloc->offset);
-		return ret;
+		return -EINVAL;
 	}
 
 	/* We can't wait for rendering with pagefaults disabled */
@@ -491,8 +491,7 @@
 }
 
 static int
-i915_gem_execbuffer_relocate(struct eb_vmas *eb,
-			     struct i915_address_space *vm)
+i915_gem_execbuffer_relocate(struct eb_vmas *eb)
 {
 	struct i915_vma *vma;
 	int ret = 0;
@@ -901,6 +900,24 @@
 	return 0;
 }
 
+static int
+i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
+			  const u32 ctx_id)
+{
+	struct i915_ctx_hang_stats *hs;
+
+	hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
+	if (IS_ERR(hs))
+		return PTR_ERR(hs);
+
+	if (hs->banned) {
+		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
+		return -EIO;
+	}
+
+	return 0;
+}
+
 static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct intel_ring_buffer *ring)
@@ -980,8 +997,7 @@
 	struct drm_i915_gem_object *batch_obj;
 	struct drm_clip_rect *cliprects = NULL;
 	struct intel_ring_buffer *ring;
-	struct i915_ctx_hang_stats *hs;
-	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
+	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
 	u32 exec_start, exec_len;
 	u32 mask, flags;
 	int ret, mode, i;
@@ -1108,6 +1124,8 @@
 		}
 	}
 
+	intel_runtime_pm_get(dev_priv);
+
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		goto pre_mutex_err;
@@ -1118,7 +1136,13 @@
 		goto pre_mutex_err;
 	}
 
-	eb = eb_create(args, vm);
+	ret = i915_gem_validate_context(dev, file, ctx_id);
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		goto pre_mutex_err;
+	}
+
+	eb = eb_create(args);
 	if (eb == NULL) {
 		mutex_unlock(&dev->struct_mutex);
 		ret = -ENOMEM;
@@ -1141,7 +1165,7 @@
 
 	/* The objects are in their final locations, apply the relocations. */
 	if (need_relocs)
-		ret = i915_gem_execbuffer_relocate(eb, vm);
+		ret = i915_gem_execbuffer_relocate(eb);
 	if (ret) {
 		if (ret == -EFAULT) {
 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
@@ -1170,17 +1194,6 @@
 	if (ret)
 		goto err;
 
-	hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
-	if (IS_ERR(hs)) {
-		ret = PTR_ERR(hs);
-		goto err;
-	}
-
-	if (hs->banned) {
-		ret = -EIO;
-		goto err;
-	}
-
 	ret = i915_switch_context(ring, file, ctx_id);
 	if (ret)
 		goto err;
@@ -1242,6 +1255,10 @@
 
 pre_mutex_err:
 	kfree(cliprects);
+
+	/* intel_gpu_busy should also get a ref, so it will free when the device
+	 * is really idle. */
+	intel_runtime_pm_put(dev_priv);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3540569..40a2b36 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -240,10 +240,16 @@
 		for_each_ring(ring, dev_priv, j) {
 			ret = gen8_write_pdp(ring, i, addr);
 			if (ret)
-				return ret;
+				goto err_out;
 		}
 	}
 	return 0;
+
+err_out:
+	for_each_ring(ring, dev_priv, j)
+		I915_WRITE(RING_MODE_GEN7(ring),
+			   _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+	return ret;
 }
 
 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
@@ -293,23 +299,23 @@
 	unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
 	struct sg_page_iter sg_iter;
 
-	pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+	pt_vaddr = NULL;
 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
-		dma_addr_t page_addr;
+		if (pt_vaddr == NULL)
+			pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
 
-		page_addr = sg_dma_address(sg_iter.sg) +
-				(sg_iter.sg_pgoffset << PAGE_SHIFT);
-		pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
-						    true);
+		pt_vaddr[act_pte] =
+			gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
+					cache_level, true);
 		if (++act_pte == GEN8_PTES_PER_PAGE) {
 			kunmap_atomic(pt_vaddr);
+			pt_vaddr = NULL;
 			act_pt++;
-			pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
 			act_pte = 0;
-
 		}
 	}
-	kunmap_atomic(pt_vaddr);
+	if (pt_vaddr)
+		kunmap_atomic(pt_vaddr);
 }
 
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -318,6 +324,8 @@
 		container_of(vm, struct i915_hw_ppgtt, base);
 	int i, j;
 
+	drm_mm_takedown(&vm->mm);
+
 	for (i = 0; i < ppgtt->num_pd_pages ; i++) {
 		if (ppgtt->pd_dma_addr[i]) {
 			pci_unmap_page(ppgtt->base.dev->pdev,
@@ -381,6 +389,8 @@
 	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
 	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
 	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+	ppgtt->base.start = 0;
+	ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
 
 	BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
 
@@ -573,21 +583,23 @@
 	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
 	struct sg_page_iter sg_iter;
 
-	pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+	pt_vaddr = NULL;
 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
-		dma_addr_t page_addr;
+		if (pt_vaddr == NULL)
+			pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
 
-		page_addr = sg_page_iter_dma_address(&sg_iter);
-		pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
+		pt_vaddr[act_pte] =
+			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
+				       cache_level, true);
 		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
 			kunmap_atomic(pt_vaddr);
+			pt_vaddr = NULL;
 			act_pt++;
-			pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
 			act_pte = 0;
-
 		}
 	}
-	kunmap_atomic(pt_vaddr);
+	if (pt_vaddr)
+		kunmap_atomic(pt_vaddr);
 }
 
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -632,6 +644,8 @@
 	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
 	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+	ppgtt->base.start = 0;
+	ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
 	ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
 				  GFP_KERNEL);
 	if (!ppgtt->pt_pages)
@@ -1124,7 +1138,6 @@
 		if (ret)
 			DRM_DEBUG_KMS("Reservation failed\n");
 		obj->has_global_gtt_mapping = 1;
-		list_add(&vma->vma_link, &obj->vma_list);
 	}
 
 	dev_priv->gtt.base.start = start;
@@ -1400,6 +1413,8 @@
 {
 
 	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+
+	drm_mm_takedown(&vm->mm);
 	iounmap(gtt->gsm);
 	teardown_scratch_page(vm->dev);
 }
@@ -1425,6 +1440,9 @@
 	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
 	dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
 
+	if (unlikely(dev_priv->gtt.do_idle_maps))
+		DRM_INFO("applying Ironlake quirks for intel_iommu\n");
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index d284d89..1a24e84 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -250,7 +250,7 @@
 	}
 
 	sg = st->sgl;
-	sg->offset = offset;
+	sg->offset = 0;
 	sg->length = size;
 
 	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
@@ -420,6 +420,7 @@
 
 	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+	i915_gem_object_pin_pages(obj);
 
 	return obj;
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 79dcb8f..990cf8f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -146,7 +146,10 @@
 		va_list tmp;
 
 		va_copy(tmp, args);
-		if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
+		len = vsnprintf(NULL, 0, f, tmp);
+		va_end(tmp);
+
+		if (!__i915_error_seek(e, len))
 			return;
 	}
 
@@ -239,6 +242,9 @@
 				  unsigned ring)
 {
 	BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
+	if (!error->ring[ring].valid)
+		return;
+
 	err_printf(m, "%s command stream:\n", ring_str(ring));
 	err_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
 	err_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
@@ -247,12 +253,11 @@
 	err_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
 	err_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
 	err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
-	if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
-		err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
-	if (INTEL_INFO(dev)->gen >= 4)
+	if (INTEL_INFO(dev)->gen >= 4) {
+		err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr[ring]);
 		err_printf(m, "  BB_STATE: 0x%08x\n", error->bbstate[ring]);
-	if (INTEL_INFO(dev)->gen >= 4)
 		err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
+	}
 	err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
 	err_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
 	if (INTEL_INFO(dev)->gen >= 6) {
@@ -294,7 +299,6 @@
 	struct drm_device *dev = error_priv->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_error_state *error = error_priv->error;
-	struct intel_ring_buffer *ring;
 	int i, j, page, offset, elt;
 
 	if (!error) {
@@ -329,7 +333,7 @@
 	if (INTEL_INFO(dev)->gen == 7)
 		err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
-	for_each_ring(ring, dev_priv, i)
+	for (i = 0; i < ARRAY_SIZE(error->ring); i++)
 		i915_ring_error_state(m, dev, error, i);
 
 	if (error->active_bo)
@@ -386,8 +390,7 @@
 			}
 		}
 
-		obj = error->ring[i].ctx;
-		if (obj) {
+		if ((obj = error->ring[i].ctx)) {
 			err_printf(m, "%s --- HW Context = 0x%08x\n",
 				   dev_priv->ring[i].name,
 				   obj->gtt_offset);
@@ -668,7 +671,8 @@
 			return NULL;
 
 		obj = ring->scratch.obj;
-		if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+		if (obj != NULL &&
+		    acthd >= i915_gem_obj_ggtt_offset(obj) &&
 		    acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
 			return i915_error_object_create(dev_priv, obj);
 	}
@@ -725,8 +729,9 @@
 		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
 		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
 		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
-		if (ring->id == RCS)
-			error->bbaddr = I915_READ64(BB_ADDR);
+		error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
+		if (INTEL_INFO(dev)->gen >= 8)
+			error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
 		error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
 	} else {
 		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
@@ -775,11 +780,17 @@
 				  struct drm_i915_error_state *error)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ring_buffer *ring;
 	struct drm_i915_gem_request *request;
 	int i, count;
 
-	for_each_ring(ring, dev_priv, i) {
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		struct intel_ring_buffer *ring = &dev_priv->ring[i];
+
+		if (ring->dev == NULL)
+			continue;
+
+		error->ring[i].valid = true;
+
 		i915_record_ring_state(dev, error, ring);
 
 		error->ring[i].batchbuffer =
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f13d5ed..9fec711 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -62,7 +62,7 @@
 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
 };
 
-static const u32 hpd_status_gen4[] = {
+static const u32 hpd_status_g4x[] = {
 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
@@ -567,8 +567,7 @@
 
 		vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
 	} else {
-		enum transcoder cpu_transcoder =
-			intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+		enum transcoder cpu_transcoder = (enum transcoder) pipe;
 		u32 htotal;
 
 		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
@@ -600,7 +599,7 @@
 	 * Cook up a vblank counter by also checking the pixel
 	 * counter against vblank start.
 	 */
-	return ((high1 << 8) | low) + (pixel >= vbl_start);
+	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
 }
 
 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -621,36 +620,15 @@
 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
 
-static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
+static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t status;
-	int reg;
 
-	if (IS_VALLEYVIEW(dev)) {
-		status = pipe == PIPE_A ?
-			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
-			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
-		reg = VLV_ISR;
-	} else if (IS_GEN2(dev)) {
-		status = pipe == PIPE_A ?
-			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
-			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
-		reg = ISR;
-	} else if (INTEL_INFO(dev)->gen < 5) {
-		status = pipe == PIPE_A ?
-			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
-			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
-		reg = ISR;
-	} else if (INTEL_INFO(dev)->gen < 7) {
+	if (INTEL_INFO(dev)->gen < 7) {
 		status = pipe == PIPE_A ?
 			DE_PIPEA_VBLANK :
 			DE_PIPEB_VBLANK;
-
-		reg = DEISR;
 	} else {
 		switch (pipe) {
 		default:
@@ -664,18 +642,14 @@
 			status = DE_PIPEC_VBLANK_IVB;
 			break;
 		}
-
-		reg = DEISR;
 	}
 
-	if (IS_GEN2(dev))
-		return __raw_i915_read16(dev_priv, reg) & status;
-	else
-		return __raw_i915_read32(dev_priv, reg) & status;
+	return __raw_i915_read32(dev_priv, DEISR) & status;
 }
 
 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
-			     int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+				    unsigned int flags, int *vpos, int *hpos,
+				    ktime_t *stime, ktime_t *etime)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -698,6 +672,12 @@
 	vbl_start = mode->crtc_vblank_start;
 	vbl_end = mode->crtc_vblank_end;
 
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		vbl_start = DIV_ROUND_UP(vbl_start, 2);
+		vbl_end /= 2;
+		vtotal /= 2;
+	}
+
 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
 
 	/*
@@ -722,17 +702,42 @@
 		else
 			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
-		/*
-		 * The scanline counter increments at the leading edge
-		 * of hsync, ie. it completely misses the active portion
-		 * of the line. Fix up the counter at both edges of vblank
-		 * to get a more accurate picture whether we're in vblank
-		 * or not.
-		 */
-		in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
-		if ((in_vbl && position == vbl_start - 1) ||
-		    (!in_vbl && position == vbl_end - 1))
-			position = (position + 1) % vtotal;
+		if (HAS_PCH_SPLIT(dev)) {
+			/*
+			 * The scanline counter increments at the leading edge
+			 * of hsync, ie. it completely misses the active portion
+			 * of the line. Fix up the counter at both edges of vblank
+			 * to get a more accurate picture whether we're in vblank
+			 * or not.
+			 */
+			in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
+			if ((in_vbl && position == vbl_start - 1) ||
+			    (!in_vbl && position == vbl_end - 1))
+				position = (position + 1) % vtotal;
+		} else {
+			/*
+			 * ISR vblank status bits don't work the way we'd want
+			 * them to work on non-PCH platforms (for
+			 * ilk_pipe_in_vblank_locked()), and there doesn't
+			 * appear any other way to determine if we're currently
+			 * in vblank.
+			 *
+			 * Instead let's assume that we're already in vblank if
+			 * we got called from the vblank interrupt and the
+			 * scanline counter value indicates that we're on the
+			 * line just prior to vblank start. This should result
+			 * in the correct answer, unless the vblank interrupt
+			 * delivery really got delayed for almost exactly one
+			 * full frame/field.
+			 */
+			if (flags & DRM_CALLED_FROM_VBLIRQ &&
+			    position == vbl_start - 1) {
+				position = (position + 1) % vtotal;
+
+				/* Signal this correction as "applied". */
+				ret |= 0x8;
+			}
+		}
 	} else {
 		/* Have access to pixelcount since start of frame.
 		 * We can split this into vertical and horizontal
@@ -809,7 +814,8 @@
 	/* Helper routine in DRM core does all the work: */
 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
 						     vblank_time, flags,
-						     crtc);
+						     crtc,
+						     &to_intel_crtc(crtc)->config.adjusted_mode);
 }
 
 static bool intel_hpd_irq_event(struct drm_device *dev,
@@ -1015,10 +1021,8 @@
 	/* sysfs frequency interfaces may have snuck in while servicing the
 	 * interrupt
 	 */
-	if (new_delay < (int)dev_priv->rps.min_delay)
-		new_delay = dev_priv->rps.min_delay;
-	if (new_delay > (int)dev_priv->rps.max_delay)
-		new_delay = dev_priv->rps.max_delay;
+	new_delay = clamp_t(int, new_delay,
+			    dev_priv->rps.min_delay, dev_priv->rps.max_delay);
 	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
 
 	if (IS_VALLEYVIEW(dev_priv->dev))
@@ -1235,9 +1239,10 @@
 	spin_lock(&dev_priv->irq_lock);
 	for (i = 1; i < HPD_NUM_PINS; i++) {
 
-		WARN(((hpd[i] & hotplug_trigger) &&
-		      dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
-		     "Received HPD interrupt although disabled\n");
+		WARN_ONCE(hpd[i] & hotplug_trigger &&
+			  dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
+			  "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
+			  hotplug_trigger, i, hpd[i]);
 
 		if (!(hpd[i] & hotplug_trigger) ||
 		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
@@ -1474,6 +1479,9 @@
 
 			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
 
+			if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+				dp_aux_irq_handler(dev);
+
 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 			I915_READ(PORT_HOTPLUG_STAT);
 		}
@@ -1993,7 +2001,7 @@
 			kobject_uevent_env(&dev->primary->kdev->kobj,
 					   KOBJ_CHANGE, reset_done_event);
 		} else {
-			atomic_set(&error->reset_counter, I915_WEDGED);
+			atomic_set_mask(I915_WEDGED, &error->reset_counter);
 		}
 
 		/*
@@ -3140,10 +3148,10 @@
  * Returns true when a page flip has completed.
  */
 static bool i8xx_handle_vblank(struct drm_device *dev,
-			       int pipe, u16 iir)
+			       int plane, int pipe, u32 iir)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
+	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
 
 	if (!drm_handle_vblank(dev, pipe))
 		return false;
@@ -3151,7 +3159,7 @@
 	if ((iir & flip_pending) == 0)
 		return false;
 
-	intel_prepare_page_flip(dev, pipe);
+	intel_prepare_page_flip(dev, plane);
 
 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
@@ -3220,9 +3228,13 @@
 			notify_ring(dev, &dev_priv->ring[RCS]);
 
 		for_each_pipe(pipe) {
+			int plane = pipe;
+			if (HAS_FBC(dev))
+				plane = !plane;
+
 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
-			    i8xx_handle_vblank(dev, pipe, iir))
-				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
+			    i8xx_handle_vblank(dev, plane, pipe, iir))
+				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
 
 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
 				i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -3418,7 +3430,7 @@
 
 		for_each_pipe(pipe) {
 			int plane = pipe;
-			if (IS_MOBILE(dev))
+			if (HAS_FBC(dev))
 				plane = !plane;
 
 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
@@ -3655,7 +3667,11 @@
 				  hotplug_status);
 
 			intel_hpd_irq_handler(dev, hotplug_trigger,
-					      IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
+					      IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
+
+			if (IS_G4X(dev) &&
+			    (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
+				dp_aux_irq_handler(dev);
 
 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 			I915_READ(PORT_HOTPLUG_STAT);
@@ -3893,8 +3909,8 @@
 	dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
 	dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
 
-	ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
-	ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
+	ironlake_disable_display_irq(dev_priv, 0xffffffff);
+	ibx_disable_display_interrupt(dev_priv, 0xffffffff);
 	ilk_disable_gt_irq(dev_priv, 0xffffffff);
 	snb_disable_pm_irq(dev_priv, 0xffffffff);
 
@@ -3908,34 +3924,26 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long irqflags;
-	uint32_t val, expected;
+	uint32_t val;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
 	val = I915_READ(DEIMR);
-	expected = ~DE_PCH_EVENT_IVB;
-	WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
+	WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
 
-	val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
-	expected = ~SDE_HOTPLUG_MASK_CPT;
-	WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
-	     val, expected);
+	val = I915_READ(SDEIMR);
+	WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
 
 	val = I915_READ(GTIMR);
-	expected = 0xffffffff;
-	WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
+	WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
 
 	val = I915_READ(GEN6_PMIMR);
-	expected = 0xffffffff;
-	WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
-	     expected);
+	WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
 
 	dev_priv->pc8.irqs_disabled = false;
 
 	ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
-	ibx_enable_display_interrupt(dev_priv,
-				     ~dev_priv->pc8.regsave.sdeimr &
-				     ~SDE_HOTPLUG_MASK_CPT);
+	ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
 	ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
 	snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
 	I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ee274212..a48b7ca 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -193,10 +193,13 @@
 #define   MI_SCENE_COUNT	(1 << 3) /* just increment scene count */
 #define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
 #define   MI_INVALIDATE_ISP	(1 << 5) /* invalidate indirect state pointers */
+#define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
+#define MI_ARB_ON_OFF		MI_INSTR(0x08, 0)
+#define   MI_ARB_ENABLE			(1<<0)
+#define   MI_ARB_DISABLE		(0<<0)
 #define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
 #define MI_SUSPEND_FLUSH	MI_INSTR(0x0b, 0)
 #define   MI_SUSPEND_FLUSH_EN	(1<<0)
-#define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
 #define MI_OVERLAY_FLIP		MI_INSTR(0x11, 0)
 #define   MI_OVERLAY_CONTINUE	(0x0<<21)
 #define   MI_OVERLAY_ON		(0x1<<21)
@@ -212,10 +215,24 @@
 #define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
 #define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
 #define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
-#define MI_ARB_ON_OFF		MI_INSTR(0x08, 0)
-#define   MI_ARB_ENABLE			(1<<0)
-#define   MI_ARB_DISABLE		(0<<0)
-
+#define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6+ */
+#define   MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
+#define   MI_SEMAPHORE_UPDATE	    (1<<21)
+#define   MI_SEMAPHORE_COMPARE	    (1<<20)
+#define   MI_SEMAPHORE_REGISTER	    (1<<18)
+#define   MI_SEMAPHORE_SYNC_VR	    (0<<16) /* RCS  wait for VCS  (RVSYNC) */
+#define   MI_SEMAPHORE_SYNC_VER	    (1<<16) /* RCS  wait for VECS (RVESYNC) */
+#define   MI_SEMAPHORE_SYNC_BR	    (2<<16) /* RCS  wait for BCS  (RBSYNC) */
+#define   MI_SEMAPHORE_SYNC_BV	    (0<<16) /* VCS  wait for BCS  (VBSYNC) */
+#define   MI_SEMAPHORE_SYNC_VEV	    (1<<16) /* VCS  wait for VECS (VVESYNC) */
+#define   MI_SEMAPHORE_SYNC_RV	    (2<<16) /* VCS  wait for RCS  (VRSYNC) */
+#define   MI_SEMAPHORE_SYNC_RB	    (0<<16) /* BCS  wait for RCS  (BRSYNC) */
+#define   MI_SEMAPHORE_SYNC_VEB	    (1<<16) /* BCS  wait for VECS (BVESYNC) */
+#define   MI_SEMAPHORE_SYNC_VB	    (2<<16) /* BCS  wait for VCS  (BVSYNC) */
+#define   MI_SEMAPHORE_SYNC_BVE	    (0<<16) /* VECS wait for BCS  (VEBSYNC) */
+#define   MI_SEMAPHORE_SYNC_VVE	    (1<<16) /* VECS wait for VCS  (VEVSYNC) */
+#define   MI_SEMAPHORE_SYNC_RVE	    (2<<16) /* VECS wait for RCS  (VERSYNC) */
+#define   MI_SEMAPHORE_SYNC_INVALID  (3<<16)
 #define MI_SET_CONTEXT		MI_INSTR(0x18, 0)
 #define   MI_MM_SPACE_GTT		(1<<8)
 #define   MI_MM_SPACE_PHYSICAL		(0<<8)
@@ -235,7 +252,7 @@
  */
 #define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*x-1)
 #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
-#define  MI_SRM_LRM_GLOBAL_GTT		(1<<22)
+#define   MI_SRM_LRM_GLOBAL_GTT		(1<<22)
 #define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
 #define   MI_FLUSH_DW_STORE_INDEX	(1<<21)
 #define   MI_INVALIDATE_TLB		(1<<18)
@@ -246,30 +263,13 @@
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE		(1)
 /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
-#define   MI_BATCH_NON_SECURE_I965 	(1<<8)
+#define   MI_BATCH_NON_SECURE_I965	(1<<8)
 #define   MI_BATCH_PPGTT_HSW		(1<<8)
-#define   MI_BATCH_NON_SECURE_HSW 	(1<<13)
+#define   MI_BATCH_NON_SECURE_HSW	(1<<13)
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
 #define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
 #define MI_BATCH_BUFFER_START_GEN8	MI_INSTR(0x31, 1)
-#define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6+ */
-#define  MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
-#define  MI_SEMAPHORE_UPDATE	    (1<<21)
-#define  MI_SEMAPHORE_COMPARE	    (1<<20)
-#define  MI_SEMAPHORE_REGISTER	    (1<<18)
-#define  MI_SEMAPHORE_SYNC_VR	    (0<<16) /* RCS  wait for VCS  (RVSYNC) */
-#define  MI_SEMAPHORE_SYNC_VER	    (1<<16) /* RCS  wait for VECS (RVESYNC) */
-#define  MI_SEMAPHORE_SYNC_BR	    (2<<16) /* RCS  wait for BCS  (RBSYNC) */
-#define  MI_SEMAPHORE_SYNC_BV	    (0<<16) /* VCS  wait for BCS  (VBSYNC) */
-#define  MI_SEMAPHORE_SYNC_VEV	    (1<<16) /* VCS  wait for VECS (VVESYNC) */
-#define  MI_SEMAPHORE_SYNC_RV	    (2<<16) /* VCS  wait for RCS  (VRSYNC) */
-#define  MI_SEMAPHORE_SYNC_RB	    (0<<16) /* BCS  wait for RCS  (BRSYNC) */
-#define  MI_SEMAPHORE_SYNC_VEB	    (1<<16) /* BCS  wait for VECS (BVESYNC) */
-#define  MI_SEMAPHORE_SYNC_VB	    (2<<16) /* BCS  wait for VCS  (BVSYNC) */
-#define  MI_SEMAPHORE_SYNC_BVE	    (0<<16) /* VECS wait for BCS  (VEBSYNC) */
-#define  MI_SEMAPHORE_SYNC_VVE	    (1<<16) /* VECS wait for VCS  (VEVSYNC) */
-#define  MI_SEMAPHORE_SYNC_RVE	    (2<<16) /* VECS wait for RCS  (VERSYNC) */
-#define  MI_SEMAPHORE_SYNC_INVALID  (3<<16)
+
 
 #define MI_PREDICATE_RESULT_2	(0x2214)
 #define  LOWER_SLICE_ENABLED	(1<<0)
@@ -354,6 +354,7 @@
 #define   IOSF_BYTE_ENABLES_SHIFT		4
 #define   IOSF_BAR_SHIFT			1
 #define   IOSF_SB_BUSY				(1<<0)
+#define   IOSF_PORT_BUNIT			0x3
 #define   IOSF_PORT_PUNIT			0x4
 #define   IOSF_PORT_NC				0x11
 #define   IOSF_PORT_DPIO			0x12
@@ -361,12 +362,21 @@
 #define   IOSF_PORT_CCK				0x14
 #define   IOSF_PORT_CCU				0xA9
 #define   IOSF_PORT_GPS_CORE			0x48
+#define   IOSF_PORT_FLISDSI			0x1B
 #define VLV_IOSF_DATA				(VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_ADDR				(VLV_DISPLAY_BASE + 0x2108)
 
+/* See configdb bunit SB addr map */
+#define BUNIT_REG_BISOC				0x11
+
 #define PUNIT_OPCODE_REG_READ			6
 #define PUNIT_OPCODE_REG_WRITE			7
 
+#define PUNIT_REG_DSPFREQ			0x36
+#define   DSPFREQSTAT_SHIFT			30
+#define   DSPFREQSTAT_MASK			(0x3 << DSPFREQSTAT_SHIFT)
+#define   DSPFREQGUAR_SHIFT			14
+#define   DSPFREQGUAR_MASK			(0x3 << DSPFREQGUAR_SHIFT)
 #define PUNIT_REG_PWRGT_CTRL			0x60
 #define PUNIT_REG_PWRGT_STATUS			0x61
 #define	  PUNIT_CLK_GATE			1
@@ -429,6 +439,7 @@
 #define  DSI_PLL_N1_DIV_MASK			(3 << 16)
 #define  DSI_PLL_M1_DIV_SHIFT			0
 #define  DSI_PLL_M1_DIV_MASK			(0x1ff << 0)
+#define CCK_DISPLAY_CLOCK_CONTROL		0x6b
 
 /*
  * DPIO - a special bus for various display related registers to hide behind
@@ -447,15 +458,13 @@
 #define  DPIO_SFR_BYPASS		(1<<1)
 #define  DPIO_CMNRST			(1<<0)
 
-#define _DPIO_TX3_SWING_CTL4_A		0x690
-#define _DPIO_TX3_SWING_CTL4_B		0x2a90
-#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
-					_DPIO_TX3_SWING_CTL4_B)
+#define DPIO_PHY(pipe)			((pipe) >> 1)
+#define DPIO_PHY_IOSF_PORT(phy)		(dev_priv->dpio_phy_iosf_port[phy])
 
 /*
  * Per pipe/PLL DPIO regs
  */
-#define _DPIO_DIV_A			0x800c
+#define _VLV_PLL_DW3_CH0		0x800c
 #define   DPIO_POST_DIV_SHIFT		(28) /* 3 bits */
 #define   DPIO_POST_DIV_DAC		0
 #define   DPIO_POST_DIV_HDMIDP		1 /* DAC 225-400M rate */
@@ -468,10 +477,10 @@
 #define   DPIO_ENABLE_CALIBRATION	(1<<11)
 #define   DPIO_M1DIV_SHIFT		(8) /* 3 bits */
 #define   DPIO_M2DIV_MASK		0xff
-#define _DPIO_DIV_B			0x802c
-#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+#define _VLV_PLL_DW3_CH1		0x802c
+#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1)
 
-#define _DPIO_REFSFR_A			0x8014
+#define _VLV_PLL_DW5_CH0		0x8014
 #define   DPIO_REFSEL_OVERRIDE		27
 #define   DPIO_PLL_MODESEL_SHIFT	24 /* 3 bits */
 #define   DPIO_BIAS_CURRENT_CTL_SHIFT	21 /* 3 bits, always 0x7 */
@@ -479,118 +488,112 @@
 #define   DPIO_PLL_REFCLK_SEL_MASK	3
 #define   DPIO_DRIVER_CTL_SHIFT		12 /* always set to 0x8 */
 #define   DPIO_CLK_BIAS_CTL_SHIFT	8 /* always set to 0x5 */
-#define _DPIO_REFSFR_B			0x8034
-#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+#define _VLV_PLL_DW5_CH1		0x8034
+#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1)
 
-#define _DPIO_CORE_CLK_A		0x801c
-#define _DPIO_CORE_CLK_B		0x803c
-#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+#define _VLV_PLL_DW7_CH0		0x801c
+#define _VLV_PLL_DW7_CH1		0x803c
+#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1)
 
-#define _DPIO_IREF_CTL_A		0x8040
-#define _DPIO_IREF_CTL_B		0x8060
-#define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B)
+#define _VLV_PLL_DW8_CH0		0x8040
+#define _VLV_PLL_DW8_CH1		0x8060
+#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1)
 
-#define DPIO_IREF_BCAST			0xc044
-#define _DPIO_IREF_A			0x8044
-#define _DPIO_IREF_B			0x8064
-#define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B)
+#define VLV_PLL_DW9_BCAST		0xc044
+#define _VLV_PLL_DW9_CH0		0x8044
+#define _VLV_PLL_DW9_CH1		0x8064
+#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1)
 
-#define _DPIO_PLL_CML_A			0x804c
-#define _DPIO_PLL_CML_B			0x806c
-#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
+#define _VLV_PLL_DW10_CH0		0x8048
+#define _VLV_PLL_DW10_CH1		0x8068
+#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1)
 
-#define _DPIO_LPF_COEFF_A		0x8048
-#define _DPIO_LPF_COEFF_B		0x8068
-#define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B)
+#define _VLV_PLL_DW11_CH0		0x804c
+#define _VLV_PLL_DW11_CH1		0x806c
+#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1)
 
-#define DPIO_CALIBRATION		0x80ac
+/* Spec for ref block start counts at DW10 */
+#define VLV_REF_DW13			0x80ac
 
-#define DPIO_FASTCLK_DISABLE		0x8100
+#define VLV_CMN_DW0			0x8100
 
 /*
  * Per DDI channel DPIO regs
  */
 
-#define _DPIO_PCS_TX_0			0x8200
-#define _DPIO_PCS_TX_1			0x8400
+#define _VLV_PCS_DW0_CH0		0x8200
+#define _VLV_PCS_DW0_CH1		0x8400
 #define   DPIO_PCS_TX_LANE2_RESET	(1<<16)
 #define   DPIO_PCS_TX_LANE1_RESET	(1<<7)
-#define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1)
+#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
 
-#define _DPIO_PCS_CLK_0			0x8204
-#define _DPIO_PCS_CLK_1			0x8404
+#define _VLV_PCS_DW1_CH0		0x8204
+#define _VLV_PCS_DW1_CH1		0x8404
 #define   DPIO_PCS_CLK_CRI_RXEB_EIOS_EN	(1<<22)
 #define   DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
 #define   DPIO_PCS_CLK_DATAWIDTH_SHIFT	(6)
 #define   DPIO_PCS_CLK_SOFT_RESET	(1<<5)
-#define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1)
+#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
 
-#define _DPIO_PCS_CTL_OVR1_A		0x8224
-#define _DPIO_PCS_CTL_OVR1_B		0x8424
-#define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \
-				       _DPIO_PCS_CTL_OVR1_B)
+#define _VLV_PCS_DW8_CH0		0x8220
+#define _VLV_PCS_DW8_CH1		0x8420
+#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
 
-#define _DPIO_PCS_STAGGER0_A		0x822c
-#define _DPIO_PCS_STAGGER0_B		0x842c
-#define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \
-				      _DPIO_PCS_STAGGER0_B)
+#define _VLV_PCS01_DW8_CH0		0x0220
+#define _VLV_PCS23_DW8_CH0		0x0420
+#define _VLV_PCS01_DW8_CH1		0x2620
+#define _VLV_PCS23_DW8_CH1		0x2820
+#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1)
+#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1)
 
-#define _DPIO_PCS_STAGGER1_A		0x8230
-#define _DPIO_PCS_STAGGER1_B		0x8430
-#define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \
-				      _DPIO_PCS_STAGGER1_B)
+#define _VLV_PCS_DW9_CH0		0x8224
+#define _VLV_PCS_DW9_CH1		0x8424
+#define	VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
 
-#define _DPIO_PCS_CLOCKBUF0_A		0x8238
-#define _DPIO_PCS_CLOCKBUF0_B		0x8438
-#define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \
-				       _DPIO_PCS_CLOCKBUF0_B)
+#define _VLV_PCS_DW11_CH0		0x822c
+#define _VLV_PCS_DW11_CH1		0x842c
+#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
 
-#define _DPIO_PCS_CLOCKBUF8_A		0x825c
-#define _DPIO_PCS_CLOCKBUF8_B		0x845c
-#define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \
-				       _DPIO_PCS_CLOCKBUF8_B)
+#define _VLV_PCS_DW12_CH0		0x8230
+#define _VLV_PCS_DW12_CH1		0x8430
+#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
 
-#define _DPIO_TX_SWING_CTL2_A		0x8288
-#define _DPIO_TX_SWING_CTL2_B		0x8488
-#define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \
-				       _DPIO_TX_SWING_CTL2_B)
+#define _VLV_PCS_DW14_CH0		0x8238
+#define _VLV_PCS_DW14_CH1		0x8438
+#define	VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1)
 
-#define _DPIO_TX_SWING_CTL3_A		0x828c
-#define _DPIO_TX_SWING_CTL3_B		0x848c
-#define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \
-				       _DPIO_TX_SWING_CTL3_B)
+#define _VLV_PCS_DW23_CH0		0x825c
+#define _VLV_PCS_DW23_CH1		0x845c
+#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1)
 
-#define _DPIO_TX_SWING_CTL4_A		0x8290
-#define _DPIO_TX_SWING_CTL4_B		0x8490
-#define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \
-				       _DPIO_TX_SWING_CTL4_B)
+#define _VLV_TX_DW2_CH0			0x8288
+#define _VLV_TX_DW2_CH1			0x8488
+#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
 
-#define _DPIO_TX_OCALINIT_0		0x8294
-#define _DPIO_TX_OCALINIT_1		0x8494
+#define _VLV_TX_DW3_CH0			0x828c
+#define _VLV_TX_DW3_CH1			0x848c
+#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
+
+#define _VLV_TX_DW4_CH0			0x8290
+#define _VLV_TX_DW4_CH1			0x8490
+#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
+
+#define _VLV_TX3_DW4_CH0		0x690
+#define _VLV_TX3_DW4_CH1		0x2a90
+#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1)
+
+#define _VLV_TX_DW5_CH0			0x8294
+#define _VLV_TX_DW5_CH1			0x8494
 #define   DPIO_TX_OCALINIT_EN		(1<<31)
-#define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \
-				     _DPIO_TX_OCALINIT_1)
+#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
 
-#define _DPIO_TX_CTL_0			0x82ac
-#define _DPIO_TX_CTL_1			0x84ac
-#define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1)
+#define _VLV_TX_DW11_CH0		0x82ac
+#define _VLV_TX_DW11_CH1		0x84ac
+#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1)
 
-#define _DPIO_TX_LANE_0			0x82b8
-#define _DPIO_TX_LANE_1			0x84b8
-#define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1)
-
-#define _DPIO_DATA_CHANNEL1		0x8220
-#define _DPIO_DATA_CHANNEL2		0x8420
-#define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2)
-
-#define _DPIO_PORT0_PCS0		0x0220
-#define _DPIO_PORT0_PCS1		0x0420
-#define _DPIO_PORT1_PCS2		0x2620
-#define _DPIO_PORT1_PCS3		0x2820
-#define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2)
-#define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3)
-#define DPIO_DATA_CHANNEL1              0x8220
-#define DPIO_DATA_CHANNEL2              0x8420
+#define _VLV_TX_DW14_CH0		0x82b8
+#define _VLV_TX_DW14_CH1		0x84b8
+#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
 
 /*
  * Fence registers
@@ -732,6 +735,8 @@
 #define HWSTAM		0x02098
 #define DMA_FADD_I8XX	0x020d0
 #define RING_BBSTATE(base)	((base)+0x110)
+#define RING_BBADDR(base)	((base)+0x140)
+#define RING_BBADDR_UDW(base)	((base)+0x168) /* gen8+ */
 
 #define ERROR_GEN6	0x040a0
 #define GEN7_ERR_INT	0x44040
@@ -922,7 +927,6 @@
 #define   CM0_COLOR_EVICT_DISABLE (1<<3)
 #define   CM0_DEPTH_WRITE_DISABLE (1<<1)
 #define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
-#define BB_ADDR		0x02140 /* 8 bytes */
 #define GFX_FLSH_CNTL	0x02170 /* 915+ only */
 #define GFX_FLSH_CNTL_GEN6	0x101008
 #define   GFX_FLSH_CNTL_EN	(1<<0)
@@ -999,6 +1003,7 @@
 
 #define GEN7_FF_THREAD_MODE		0x20a0
 #define   GEN7_FF_SCHED_MASK		0x0077070
+#define   GEN8_FF_DS_REF_CNT_FFME	(1 << 19)
 #define   GEN7_FF_TS_SCHED_HS1		(0x5<<16)
 #define   GEN7_FF_TS_SCHED_HS0		(0x3<<16)
 #define   GEN7_FF_TS_SCHED_LOAD_BALANCE	(0x1<<16)
@@ -1026,14 +1031,14 @@
 #define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
 #define   FBC_CTL_C3_IDLE	(1<<13)
 #define   FBC_CTL_STRIDE_SHIFT	(5)
-#define   FBC_CTL_FENCENO	(1<<0)
+#define   FBC_CTL_FENCENO_SHIFT	(0)
 #define FBC_COMMAND		0x0320c
 #define   FBC_CMD_COMPRESS	(1<<0)
 #define FBC_STATUS		0x03210
 #define   FBC_STAT_COMPRESSING	(1<<31)
 #define   FBC_STAT_COMPRESSED	(1<<30)
 #define   FBC_STAT_MODIFIED	(1<<29)
-#define   FBC_STAT_CURRENT_LINE	(1<<0)
+#define   FBC_STAT_CURRENT_LINE_SHIFT	(0)
 #define FBC_CONTROL2		0x03214
 #define   FBC_CTL_FENCE_DBL	(0<<4)
 #define   FBC_CTL_IDLE_IMM	(0<<2)
@@ -2117,9 +2122,13 @@
  * Please check the detailed lore in the commit message for for experimental
  * evidence.
  */
-#define   PORTD_HOTPLUG_LIVE_STATUS               (1 << 29)
-#define   PORTC_HOTPLUG_LIVE_STATUS               (1 << 28)
-#define   PORTB_HOTPLUG_LIVE_STATUS               (1 << 27)
+#define   PORTD_HOTPLUG_LIVE_STATUS_G4X		(1 << 29)
+#define   PORTC_HOTPLUG_LIVE_STATUS_G4X		(1 << 28)
+#define   PORTB_HOTPLUG_LIVE_STATUS_G4X		(1 << 27)
+/* VLV DP/HDMI bits again match Bspec */
+#define   PORTD_HOTPLUG_LIVE_STATUS_VLV		(1 << 27)
+#define   PORTC_HOTPLUG_LIVE_STATUS_VLV		(1 << 28)
+#define   PORTB_HOTPLUG_LIVE_STATUS_VLV		(1 << 29)
 #define   PORTD_HOTPLUG_INT_STATUS		(3 << 21)
 #define   PORTC_HOTPLUG_INT_STATUS		(3 << 19)
 #define   PORTB_HOTPLUG_INT_STATUS		(3 << 17)
@@ -2130,6 +2139,11 @@
 #define   CRT_HOTPLUG_MONITOR_COLOR		(3 << 8)
 #define   CRT_HOTPLUG_MONITOR_MONO		(2 << 8)
 #define   CRT_HOTPLUG_MONITOR_NONE		(0 << 8)
+#define   DP_AUX_CHANNEL_D_INT_STATUS_G4X	(1 << 6)
+#define   DP_AUX_CHANNEL_C_INT_STATUS_G4X	(1 << 5)
+#define   DP_AUX_CHANNEL_B_INT_STATUS_G4X	(1 << 4)
+#define   DP_AUX_CHANNEL_MASK_INT_STATUS_G4X	(7 << 4)
+
 /* SDVO is different across gen3/4 */
 #define   SDVOC_HOTPLUG_INT_STATUS_G4X		(1 << 3)
 #define   SDVOB_HOTPLUG_INT_STATUS_G4X		(1 << 2)
@@ -3421,42 +3435,6 @@
 /* the unit of memory self-refresh latency time is 0.5us */
 #define  ILK_SRLT_MASK		0x3f
 
-/* define the fifo size on Ironlake */
-#define ILK_DISPLAY_FIFO	128
-#define ILK_DISPLAY_MAXWM	64
-#define ILK_DISPLAY_DFTWM	8
-#define ILK_CURSOR_FIFO		32
-#define ILK_CURSOR_MAXWM	16
-#define ILK_CURSOR_DFTWM	8
-
-#define ILK_DISPLAY_SR_FIFO	512
-#define ILK_DISPLAY_MAX_SRWM	0x1ff
-#define ILK_DISPLAY_DFT_SRWM	0x3f
-#define ILK_CURSOR_SR_FIFO	64
-#define ILK_CURSOR_MAX_SRWM	0x3f
-#define ILK_CURSOR_DFT_SRWM	8
-
-#define ILK_FIFO_LINE_SIZE	64
-
-/* define the WM info on Sandybridge */
-#define SNB_DISPLAY_FIFO	128
-#define SNB_DISPLAY_MAXWM	0x7f	/* bit 16:22 */
-#define SNB_DISPLAY_DFTWM	8
-#define SNB_CURSOR_FIFO		32
-#define SNB_CURSOR_MAXWM	0x1f	/* bit 4:0 */
-#define SNB_CURSOR_DFTWM	8
-
-#define SNB_DISPLAY_SR_FIFO	512
-#define SNB_DISPLAY_MAX_SRWM	0x1ff	/* bit 16:8 */
-#define SNB_DISPLAY_DFT_SRWM	0x3f
-#define SNB_CURSOR_SR_FIFO	64
-#define SNB_CURSOR_MAX_SRWM	0x3f	/* bit 5:0 */
-#define SNB_CURSOR_DFT_SRWM	8
-
-#define SNB_FBC_MAX_SRWM	0xf	/* bit 23:20 */
-
-#define SNB_FIFO_LINE_SIZE	64
-
 
 /* the address where we get all kinds of latency value */
 #define SSKPD			0x5d10
@@ -3600,8 +3578,6 @@
 #define DISP_BASEADDR_MASK	(0xfffff000)
 #define I915_LO_DISPBASE(val)	(val & ~DISP_BASEADDR_MASK)
 #define I915_HI_DISPBASE(val)	(val & DISP_BASEADDR_MASK)
-#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
-		(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
 
 /* VBIOS flags */
 #define SWF00			(dev_priv->info->display_mmio_offset + 0x71410)
@@ -3787,7 +3763,7 @@
 
 #define _SPACNTR		(VLV_DISPLAY_BASE + 0x72180)
 #define   SP_ENABLE			(1<<31)
-#define   SP_GEAMMA_ENABLE		(1<<30)
+#define   SP_GAMMA_ENABLE		(1<<30)
 #define   SP_PIXFORMAT_MASK		(0xf<<26)
 #define   SP_FORMAT_YUV422		(0<<26)
 #define   SP_FORMAT_BGR565		(5<<26)
@@ -4139,6 +4115,8 @@
 #define DISP_ARB_CTL	0x45000
 #define  DISP_TILE_SURFACE_SWIZZLING	(1<<13)
 #define  DISP_FBC_WM_DIS		(1<<15)
+#define DISP_ARB_CTL2	0x45004
+#define  DISP_DATA_PARTITION_5_6	(1<<6)
 #define GEN7_MSG_CTL	0x45010
 #define  WAIT_FOR_PCH_RESET_ACK		(1<<1)
 #define  WAIT_FOR_PCH_FLR_ACK		(1<<0)
@@ -4159,6 +4137,10 @@
 #define GEN7_L3SQCREG4				0xb034
 #define  L3SQ_URB_READ_CAM_MATCH_DISABLE	(1<<27)
 
+/* GEN8 chicken */
+#define HDC_CHICKEN0				0x7300
+#define  HDC_FORCE_NON_COHERENT			(1<<4)
+
 /* WaCatErrorRejectionIssue */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG		0x9030
 #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB	(1<<11)
@@ -4843,6 +4825,8 @@
 #define  FORCEWAKE_ACK				0x130090
 #define  VLV_GTLC_WAKE_CTRL			0x130090
 #define  VLV_GTLC_PW_STATUS			0x130094
+#define VLV_GTLC_PW_RENDER_STATUS_MASK		0x80
+#define VLV_GTLC_PW_MEDIA_STATUS_MASK		0x20
 #define  FORCEWAKE_MT				0xa188 /* multi-threaded */
 #define   FORCEWAKE_KERNEL			0x1
 #define   FORCEWAKE_USER			0x2
@@ -4851,12 +4835,16 @@
 #define    FORCEWAKE_MT_ENABLE			(1<<5)
 
 #define  GTFIFODBG				0x120000
-#define    GT_FIFO_CPU_ERROR_MASK		7
+#define    GT_FIFO_SBDROPERR			(1<<6)
+#define    GT_FIFO_BLOBDROPERR			(1<<5)
+#define    GT_FIFO_SB_READ_ABORTERR		(1<<4)
+#define    GT_FIFO_DROPERR			(1<<3)
 #define    GT_FIFO_OVFERR			(1<<2)
 #define    GT_FIFO_IAWRERR			(1<<1)
 #define    GT_FIFO_IARDERR			(1<<0)
 
-#define  GT_FIFO_FREE_ENTRIES			0x120008
+#define  GTFIFOCTL				0x120008
+#define    GT_FIFO_FREE_ENTRIES_MASK		0x7f
 #define    GT_FIFO_NUM_RESERVED_ENTRIES		20
 
 #define  HSW_IDICR				0x9008
@@ -4890,6 +4878,7 @@
 #define   GEN6_RC_CTL_RC6_ENABLE		(1<<18)
 #define   GEN6_RC_CTL_RC1e_ENABLE		(1<<20)
 #define   GEN6_RC_CTL_RC7_ENABLE		(1<<22)
+#define   VLV_RC_CTL_CTX_RST_PARALLEL		(1<<24)
 #define   GEN7_RC_CTL_TO_MODE			(1<<28)
 #define   GEN6_RC_CTL_EI_MODE(x)		((x)<<27)
 #define   GEN6_RC_CTL_HW_ENABLE			(1<<31)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 98790c7..8150fdc 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -192,7 +192,6 @@
 static void i915_save_display(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
 
 	/* Display arbitration control */
 	if (INTEL_INFO(dev)->gen <= 4)
@@ -203,46 +202,27 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_save_display_reg(dev);
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
 	/* LVDS state */
 	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
-		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
-		dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
-		dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
-		dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
 		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 			dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
 	} else if (IS_VALLEYVIEW(dev)) {
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
 		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
 
-		dev_priv->regfile.saveBLC_PWM_CTL =
-			I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
 		dev_priv->regfile.saveBLC_HIST_CTL =
 			I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
-		dev_priv->regfile.saveBLC_PWM_CTL2 =
-			I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
-		dev_priv->regfile.saveBLC_PWM_CTL_B =
-			I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
 		dev_priv->regfile.saveBLC_HIST_CTL_B =
 			I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
-		dev_priv->regfile.saveBLC_PWM_CTL2_B =
-			I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
 	} else {
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
 		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
 		dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
-		if (INTEL_INFO(dev)->gen >= 4)
-			dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
 		if (IS_MOBILE(dev) && !IS_I830(dev))
 			dev_priv->regfile.saveLVDS = I915_READ(LVDS);
 	}
 
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
 	if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
 		dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
 
@@ -257,7 +237,7 @@
 	}
 
 	/* Only regfile.save FBC state on the platform that supports FBC */
-	if (I915_HAS_FBC(dev)) {
+	if (HAS_FBC(dev)) {
 		if (HAS_PCH_SPLIT(dev)) {
 			dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
 		} else if (IS_GM45(dev)) {
@@ -278,7 +258,6 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 mask = 0xffffffff;
-	unsigned long flags;
 
 	/* Display arbitration */
 	if (INTEL_INFO(dev)->gen <= 4)
@@ -287,12 +266,6 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_restore_display_reg(dev);
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
-	/* LVDS state */
-	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
-		I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
-
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		mask = ~LVDS_PORT_EN;
 
@@ -305,13 +278,6 @@
 		I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
-		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
-		/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
-		 * otherwise we get blank eDP screen after S3 on some machines
-		 */
-		I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
-		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
 		I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
 		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
 		I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -319,21 +285,12 @@
 		I915_WRITE(RSTDBYCTL,
 			   dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
 	} else if (IS_VALLEYVIEW(dev)) {
-		I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
-			   dev_priv->regfile.saveBLC_PWM_CTL);
 		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
 			   dev_priv->regfile.saveBLC_HIST_CTL);
-		I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
-			   dev_priv->regfile.saveBLC_PWM_CTL2);
-		I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
-			   dev_priv->regfile.saveBLC_PWM_CTL);
 		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
 			   dev_priv->regfile.saveBLC_HIST_CTL);
-		I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
-			   dev_priv->regfile.saveBLC_PWM_CTL2);
 	} else {
 		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
-		I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
 		I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
 		I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
 		I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
@@ -341,11 +298,9 @@
 		I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
 	}
 
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
 	/* only restore FBC info on the platform that supports FBC*/
 	intel_disable_fbc(dev);
-	if (I915_HAS_FBC(dev)) {
+	if (HAS_FBC(dev)) {
 		if (HAS_PCH_SPLIT(dev)) {
 			I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
 		} else if (IS_GM45(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index cef38fd..33bcae3 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -40,10 +40,13 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u64 raw_time; /* 32b value may overflow during fixed point math */
 	u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
+	u32 ret;
 
 	if (!intel_enable_rc6(dev))
 		return 0;
 
+	intel_runtime_pm_get(dev_priv);
+
 	/* On VLV, residency time is in CZ units rather than 1.28us */
 	if (IS_VALLEYVIEW(dev)) {
 		u32 clkctl2;
@@ -52,7 +55,8 @@
 			CLK_CTL2_CZCOUNT_30NS_SHIFT;
 		if (!clkctl2) {
 			WARN(!clkctl2, "bogus CZ count value");
-			return 0;
+			ret = 0;
+			goto out;
 		}
 		units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
@@ -62,7 +66,11 @@
 	}
 
 	raw_time = I915_READ(reg) * units;
-	return DIV_ROUND_UP_ULL(raw_time, div);
+	ret = DIV_ROUND_UP_ULL(raw_time, div);
+
+out:
+	intel_runtime_pm_put(dev_priv);
+	return ret;
 }
 
 static ssize_t
@@ -183,13 +191,13 @@
 	int slice = (int)(uintptr_t)attr->private;
 	int ret;
 
+	if (!HAS_HW_CONTEXTS(drm_dev))
+		return -ENXIO;
+
 	ret = l3_access_valid(drm_dev, offset);
 	if (ret)
 		return ret;
 
-	if (dev_priv->hw_contexts_disabled)
-		return -ENXIO;
-
 	ret = i915_mutex_lock_interruptible(drm_dev);
 	if (ret)
 		return ret;
@@ -259,7 +267,7 @@
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
 		u32 freq;
 		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-		ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
+		ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
 	} else {
 		ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
 	}
@@ -276,8 +284,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			vlv_gpu_freq(dev_priv->mem_freq,
-				     dev_priv->rps.rpe_delay));
+			vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay));
 }
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -291,7 +298,7 @@
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
-		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
+		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
 	else
 		ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -318,7 +325,7 @@
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
-		val = vlv_freq_opcode(dev_priv->mem_freq, val);
+		val = vlv_freq_opcode(dev_priv, val);
 
 		hw_max = valleyview_rps_max_freq(dev_priv);
 		hw_min = valleyview_rps_min_freq(dev_priv);
@@ -342,15 +349,15 @@
 		DRM_DEBUG("User requested overclocking to %d\n",
 			  val * GT_FREQUENCY_MULTIPLIER);
 
-	if (dev_priv->rps.cur_delay > val) {
-		if (IS_VALLEYVIEW(dev_priv->dev))
-			valleyview_set_rps(dev_priv->dev, val);
-		else
-			gen6_set_rps(dev_priv->dev, val);
-	}
-
 	dev_priv->rps.max_delay = val;
 
+	if (dev_priv->rps.cur_delay > val) {
+		if (IS_VALLEYVIEW(dev))
+			valleyview_set_rps(dev, val);
+		else
+			gen6_set_rps(dev, val);
+	}
+
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return count;
@@ -367,7 +374,7 @@
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
-		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
+		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
 	else
 		ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -394,7 +401,7 @@
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 	if (IS_VALLEYVIEW(dev)) {
-		val = vlv_freq_opcode(dev_priv->mem_freq, val);
+		val = vlv_freq_opcode(dev_priv, val);
 
 		hw_max = valleyview_rps_max_freq(dev_priv);
 		hw_min = valleyview_rps_min_freq(dev_priv);
@@ -411,15 +418,15 @@
 		return -EINVAL;
 	}
 
+	dev_priv->rps.min_delay = val;
+
 	if (dev_priv->rps.cur_delay < val) {
 		if (IS_VALLEYVIEW(dev))
 			valleyview_set_rps(dev, val);
 		else
-			gen6_set_rps(dev_priv->dev, val);
+			gen6_set_rps(dev, val);
 	}
 
-	dev_priv->rps.min_delay = val;
-
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return count;
@@ -449,7 +456,9 @@
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
+	intel_runtime_pm_get(dev_priv);
 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
 	if (attr == &dev_attr_gt_RP0_freq_mhz) {
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index 967da47..caa18e8 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -270,6 +270,18 @@
 	}
 	/* FIXME: regfile.save TV & SDVO state */
 
+	/* Backlight */
+	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+		dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+		dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+		dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+	} else {
+		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+		if (INTEL_INFO(dev)->gen >= 4)
+			dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+	}
+
 	return;
 }
 
@@ -280,6 +292,21 @@
 	int dpll_b_reg, fpb0_reg, fpb1_reg;
 	int i;
 
+	/* Backlight */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+		/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+		 * otherwise we get blank eDP screen after S3 on some machines
+		 */
+		I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+	} else {
+		if (INTEL_INFO(dev)->gen >= 4)
+			I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+		I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+	}
+
 	/* Display port ratios (must be done before clock is set) */
 	if (SUPPORTS_INTEGRATED_DP(dev)) {
 		I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index e4fba39..f220419 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -281,6 +281,34 @@
 	}
 }
 
+static void
+parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+	const struct bdb_lfp_backlight_data *backlight_data;
+	const struct bdb_lfp_backlight_data_entry *entry;
+
+	backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
+	if (!backlight_data)
+		return;
+
+	if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
+		DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
+			      backlight_data->entry_size);
+		return;
+	}
+
+	entry = &backlight_data->data[panel_type];
+
+	dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+	dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+	DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
+		      "active %s, min brightness %u, level %u\n",
+		      dev_priv->vbt.backlight.pwm_freq_hz,
+		      dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
+		      entry->min_brightness,
+		      backlight_data->level[panel_type]);
+}
+
 /* Try to find sdvo panel data */
 static void
 parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
@@ -327,12 +355,12 @@
 {
 	switch (INTEL_INFO(dev)->gen) {
 	case 2:
-		return alternate ? 66 : 48;
+		return alternate ? 66667 : 48000;
 	case 3:
 	case 4:
-		return alternate ? 100 : 96;
+		return alternate ? 100000 : 96000;
 	default:
-		return alternate ? 100 : 120;
+		return alternate ? 100000 : 120000;
 	}
 }
 
@@ -796,7 +824,7 @@
 	 */
 	dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
 			!HAS_PCH_SPLIT(dev));
-	DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
+	DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
 
 	for (port = PORT_A; port < I915_MAX_PORTS; port++) {
 		struct ddi_vbt_port_info *info =
@@ -894,6 +922,7 @@
 	parse_general_features(dev_priv, bdb);
 	parse_general_definitions(dev_priv, bdb);
 	parse_lfp_panel_data(dev_priv, bdb);
+	parse_lfp_backlight(dev_priv, bdb);
 	parse_sdvo_panel_data(dev_priv, bdb);
 	parse_sdvo_device_mapping(dev_priv, bdb);
 	parse_device_mapping(dev_priv, bdb);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index f580a2b..282de5e 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -39,7 +39,7 @@
 	u8 reserved0;
 	u32 bdb_offset;			/**< from beginning of VBT */
 	u32 aim_offset[4];		/**< from beginning of VBT */
-} __attribute__((packed));
+} __packed;
 
 struct bdb_header {
 	u8 signature[16];		/**< Always 'BIOS_DATA_BLOCK' */
@@ -65,7 +65,7 @@
 	u8 rsvd4; /* popup memory size */
 	u8 resize_pci_bios;
 	u8 rsvd5; /* is crt already on ddc2 */
-} __attribute__((packed));
+} __packed;
 
 /*
  * There are several types of BIOS data blocks (BDBs), each block has
@@ -142,7 +142,7 @@
 	u8 dp_ssc_enb:1;	/* PCH attached eDP supports SSC */
 	u8 dp_ssc_freq:1;	/* SSC freq for PCH attached eDP */
 	u8 rsvd11:3; /* finish byte */
-} __attribute__((packed));
+} __packed;
 
 /* pre-915 */
 #define GPIO_PIN_DVI_LVDS	0x03 /* "DVI/LVDS DDC GPIO pins" */
@@ -225,7 +225,7 @@
 	u8  dvo2_wiring;
 	u16 extended_type;
 	u8  dvo_function;
-} __attribute__((packed));
+} __packed;
 
 /* This one contains field offsets that are known to be common for all BDB
  * versions. Notice that the meaning of the contents contents may still change,
@@ -238,7 +238,7 @@
 	u8 not_common2[2];
 	u8 ddc_pin;
 	u16 edid_ptr;
-} __attribute__((packed));
+} __packed;
 
 /* This field changes depending on the BDB version, so the most reliable way to
  * read it is by checking the BDB version and reading the raw pointer. */
@@ -279,7 +279,7 @@
 	 *	     sizeof(child_device_config);
 	 */
 	union child_device_config devices[0];
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_options {
 	u8 panel_type;
@@ -293,7 +293,7 @@
 	u8 lvds_edid:1;
 	u8 rsvd2:1;
 	u8 rsvd4;
-} __attribute__((packed));
+} __packed;
 
 /* LFP pointer table contains entries to the struct below */
 struct bdb_lvds_lfp_data_ptr {
@@ -303,12 +303,12 @@
 	u8 dvo_table_size;
 	u16 panel_pnp_id_offset;
 	u8 pnp_table_size;
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_lfp_data_ptrs {
 	u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
 	struct bdb_lvds_lfp_data_ptr ptr[16];
-} __attribute__((packed));
+} __packed;
 
 /* LFP data has 3 blocks per entry */
 struct lvds_fp_timing {
@@ -325,7 +325,7 @@
 	u32 pfit_reg;
 	u32 pfit_reg_val;
 	u16 terminator;
-} __attribute__((packed));
+} __packed;
 
 struct lvds_dvo_timing {
 	u16 clock;		/**< In 10khz */
@@ -353,7 +353,7 @@
 	u8 vsync_positive:1;
 	u8 hsync_positive:1;
 	u8 rsvd2:1;
-} __attribute__((packed));
+} __packed;
 
 struct lvds_pnp_id {
 	u16 mfg_name;
@@ -361,17 +361,33 @@
 	u32 serial;
 	u8 mfg_week;
 	u8 mfg_year;
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_lfp_data_entry {
 	struct lvds_fp_timing fp_timing;
 	struct lvds_dvo_timing dvo_timing;
 	struct lvds_pnp_id pnp_id;
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_lfp_data {
 	struct bdb_lvds_lfp_data_entry data[16];
-} __attribute__((packed));
+} __packed;
+
+struct bdb_lfp_backlight_data_entry {
+	u8 type:2;
+	u8 active_low_pwm:1;
+	u8 obsolete1:5;
+	u16 pwm_freq_hz;
+	u8 min_brightness;
+	u8 obsolete2;
+	u8 obsolete3;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+	u8 entry_size;
+	struct bdb_lfp_backlight_data_entry data[16];
+	u8 level[16];
+} __packed;
 
 struct aimdb_header {
 	char signature[16];
@@ -379,12 +395,12 @@
 	u16 aimdb_version;
 	u16 aimdb_header_size;
 	u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
 
 struct aimdb_block {
 	u8 aimdb_id;
 	u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
 
 struct vch_panel_data {
 	u16 fp_timing_offset;
@@ -395,12 +411,12 @@
 	u8 text_fitting_size;
 	u16 graphics_fitting_offset;
 	u8 graphics_fitting_size;
-} __attribute__((packed));
+} __packed;
 
 struct vch_bdb_22 {
 	struct aimdb_block aimdb_block;
 	struct vch_panel_data panels[16];
-} __attribute__((packed));
+} __packed;
 
 struct bdb_sdvo_lvds_options {
 	u8 panel_backlight;
@@ -416,7 +432,7 @@
 	u8 panel_misc_bits_2;
 	u8 panel_misc_bits_3;
 	u8 panel_misc_bits_4;
-} __attribute__((packed));
+} __packed;
 
 
 #define BDB_DRIVER_FEATURE_NO_LVDS		0
@@ -462,7 +478,7 @@
 
 	u8 hdmi_termination;
 	u8 custom_vbt_version;
-} __attribute__((packed));
+} __packed;
 
 #define EDP_18BPP	0
 #define EDP_24BPP	1
@@ -487,14 +503,14 @@
 	u16 t9;
 	u16 t10;
 	u16 t11_t12;
-} __attribute__ ((packed));
+} __packed;
 
 struct edp_link_params {
 	u8 rate:4;
 	u8 lanes:4;
 	u8 preemphasis:4;
 	u8 vswing:4;
-} __attribute__ ((packed));
+} __packed;
 
 struct bdb_edp {
 	struct edp_power_seq power_seqs[16];
@@ -505,7 +521,7 @@
 	/* ith bit indicates enabled/disabled for (i+1)th panel */
 	u16 edp_s3d_feature;
 	u16 edp_t3_optimization;
-} __attribute__ ((packed));
+} __packed;
 
 void intel_setup_bios(struct drm_device *dev);
 int intel_parse_bios(struct drm_device *dev);
@@ -733,6 +749,6 @@
 	u32 hl_switch_cnt;
 	u32 lp_byte_clk;
 	u32 clk_lane_switch_cnt;
-} __attribute__((packed));
+} __packed;
 
 #endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b5b1b9b..e2e39e6 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -222,8 +222,9 @@
 	intel_modeset_check_state(connector->dev);
 }
 
-static int intel_crt_mode_valid(struct drm_connector *connector,
-				struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_crt_mode_valid(struct drm_connector *connector,
+		     struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;
 
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b69dc3e..e06b9e0 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -73,7 +73,7 @@
 };
 
 static const u32 bdw_ddi_translations_edp[] = {
-	0x00FFFFFF, 0x00000012,		/* DP parameters */
+	0x00FFFFFF, 0x00000012,		/* eDP parameters */
 	0x00EBAFFF, 0x00020011,
 	0x00C71FFF, 0x0006000F,
 	0x00FFFFFF, 0x00020011,
@@ -696,25 +696,25 @@
 	*n2_out = best.n2;
 	*p_out = best.p;
 	*r2_out = best.r2;
-
-	DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n",
-		      clock, *p_out, *n2_out, *r2_out);
 }
 
-bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
+/*
+ * Tries to find a PLL for the CRTC. If it finds, it increases the refcount and
+ * stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to
+ * steal the selected PLL. You need to call intel_ddi_pll_enable to actually
+ * enable the PLL.
+ */
+bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_crtc *crtc = &intel_crtc->base;
 	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
 	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
 	int type = intel_encoder->type;
 	enum pipe pipe = intel_crtc->pipe;
-	uint32_t reg, val;
 	int clock = intel_crtc->config.port_clock;
 
-	/* TODO: reuse PLLs when possible (compare values) */
-
 	intel_ddi_put_crtc_pll(crtc);
 
 	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
@@ -736,66 +736,145 @@
 			return false;
 		}
 
-		/* We don't need to turn any PLL on because we'll use LCPLL. */
-		return true;
-
 	} else if (type == INTEL_OUTPUT_HDMI) {
+		uint32_t reg, val;
 		unsigned p, n2, r2;
 
-		if (plls->wrpll1_refcount == 0) {
-			DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
-				      pipe_name(pipe));
-			plls->wrpll1_refcount++;
-			reg = WRPLL_CTL1;
-			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
-		} else if (plls->wrpll2_refcount == 0) {
-			DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
-				      pipe_name(pipe));
-			plls->wrpll2_refcount++;
-			reg = WRPLL_CTL2;
-			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
-		} else {
-			DRM_ERROR("No WRPLLs available!\n");
-			return false;
-		}
-
-		WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
-		     "WRPLL already enabled\n");
-
 		intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
 
 		val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
 		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
 		      WRPLL_DIVIDER_POST(p);
 
+		if (val == I915_READ(WRPLL_CTL1)) {
+			DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
+				      pipe_name(pipe));
+			reg = WRPLL_CTL1;
+		} else if (val == I915_READ(WRPLL_CTL2)) {
+			DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
+				      pipe_name(pipe));
+			reg = WRPLL_CTL2;
+		} else if (plls->wrpll1_refcount == 0) {
+			DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
+				      pipe_name(pipe));
+			reg = WRPLL_CTL1;
+		} else if (plls->wrpll2_refcount == 0) {
+			DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
+				      pipe_name(pipe));
+			reg = WRPLL_CTL2;
+		} else {
+			DRM_ERROR("No WRPLLs available!\n");
+			return false;
+		}
+
+		DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+			      clock, p, n2, r2);
+
+		if (reg == WRPLL_CTL1) {
+			plls->wrpll1_refcount++;
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+		} else {
+			plls->wrpll2_refcount++;
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+		}
+
 	} else if (type == INTEL_OUTPUT_ANALOG) {
 		if (plls->spll_refcount == 0) {
 			DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
 				      pipe_name(pipe));
 			plls->spll_refcount++;
-			reg = SPLL_CTL;
 			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
 		} else {
 			DRM_ERROR("SPLL already in use\n");
 			return false;
 		}
 
-		WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
-		     "SPLL already enabled\n");
-
-		val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
-
 	} else {
 		WARN(1, "Invalid DDI encoder type %d\n", type);
 		return false;
 	}
 
-	I915_WRITE(reg, val);
-	udelay(20);
-
 	return true;
 }
 
+/*
+ * To be called after intel_ddi_pll_select(). That one selects the PLL to be
+ * used, this one actually enables the PLL.
+ */
+void intel_ddi_pll_enable(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+	int clock = crtc->config.port_clock;
+	uint32_t reg, cur_val, new_val;
+	int refcount;
+	const char *pll_name;
+	uint32_t enable_bit = (1 << 31);
+	unsigned int p, n2, r2;
+
+	BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
+	BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
+
+	switch (crtc->ddi_pll_sel) {
+	case PORT_CLK_SEL_LCPLL_2700:
+	case PORT_CLK_SEL_LCPLL_1350:
+	case PORT_CLK_SEL_LCPLL_810:
+		/*
+		 * LCPLL should always be enabled at this point of the mode set
+		 * sequence, so nothing to do.
+		 */
+		return;
+
+	case PORT_CLK_SEL_SPLL:
+		pll_name = "SPLL";
+		reg = SPLL_CTL;
+		refcount = plls->spll_refcount;
+		new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
+			  SPLL_PLL_SSC;
+		break;
+
+	case PORT_CLK_SEL_WRPLL1:
+	case PORT_CLK_SEL_WRPLL2:
+		if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
+			pll_name = "WRPLL1";
+			reg = WRPLL_CTL1;
+			refcount = plls->wrpll1_refcount;
+		} else {
+			pll_name = "WRPLL2";
+			reg = WRPLL_CTL2;
+			refcount = plls->wrpll2_refcount;
+		}
+
+		intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+		new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+			  WRPLL_DIVIDER_REFERENCE(r2) |
+			  WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
+
+		break;
+
+	case PORT_CLK_SEL_NONE:
+		WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
+		return;
+	default:
+		WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
+		return;
+	}
+
+	cur_val = I915_READ(reg);
+
+	WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
+	if (refcount == 1) {
+		WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
+		I915_WRITE(reg, new_val);
+		POSTING_READ(reg);
+		udelay(20);
+	} else {
+		WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
+	}
+}
+
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -1121,9 +1200,7 @@
 
 	if (type == INTEL_OUTPUT_EDP) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-		ironlake_edp_panel_vdd_on(intel_dp);
 		ironlake_edp_panel_on(intel_dp);
-		ironlake_edp_panel_vdd_off(intel_dp, true);
 	}
 
 	WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
@@ -1166,7 +1243,6 @@
 
 	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-		ironlake_edp_panel_vdd_on(intel_dp);
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 		ironlake_edp_panel_off(intel_dp);
 	}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2bde35d..4c16728 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -90,8 +90,8 @@
 
 static const intel_limit_t intel_limits_i8xx_dac = {
 	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 930000, .max = 1400000 },
-	.n = { .min = 3, .max = 16 },
+	.vco = { .min = 908000, .max = 1512000 },
+	.n = { .min = 2, .max = 16 },
 	.m = { .min = 96, .max = 140 },
 	.m1 = { .min = 18, .max = 26 },
 	.m2 = { .min = 6, .max = 16 },
@@ -103,8 +103,8 @@
 
 static const intel_limit_t intel_limits_i8xx_dvo = {
 	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 930000, .max = 1400000 },
-	.n = { .min = 3, .max = 16 },
+	.vco = { .min = 908000, .max = 1512000 },
+	.n = { .min = 2, .max = 16 },
 	.m = { .min = 96, .max = 140 },
 	.m1 = { .min = 18, .max = 26 },
 	.m2 = { .min = 6, .max = 16 },
@@ -116,8 +116,8 @@
 
 static const intel_limit_t intel_limits_i8xx_lvds = {
 	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 930000, .max = 1400000 },
-	.n = { .min = 3, .max = 16 },
+	.vco = { .min = 908000, .max = 1512000 },
+	.n = { .min = 2, .max = 16 },
 	.m = { .min = 96, .max = 140 },
 	.m1 = { .min = 18, .max = 26 },
 	.m2 = { .min = 6, .max = 16 },
@@ -329,6 +329,8 @@
 {
 	clock->m = clock->m1 * clock->m2;
 	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n == 0 || clock->p == 0))
+		return;
 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
@@ -430,6 +432,8 @@
 {
 	clock->m = clock->m2 + 2;
 	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n == 0 || clock->p == 0))
+		return;
 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
@@ -443,6 +447,8 @@
 {
 	clock->m = i9xx_dpll_compute_m(clock);
 	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+		return;
 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
@@ -748,10 +754,10 @@
 	return intel_crtc->config.cpu_transcoder;
 }
 
-static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 frame, frame_reg = PIPEFRAME(pipe);
+	u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
 
 	frame = I915_READ(frame_reg);
 
@@ -772,8 +778,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipestat_reg = PIPESTAT(pipe);
 
-	if (INTEL_INFO(dev)->gen >= 5) {
-		ironlake_wait_for_vblank(dev, pipe);
+	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+		g4x_wait_for_vblank(dev, pipe);
 		return;
 	}
 
@@ -1205,15 +1211,12 @@
 	}
 }
 
-static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 	bool enabled;
 
-	if (HAS_PCH_LPT(dev_priv->dev)) {
-		DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
-		return;
-	}
+	WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
 
 	val = I915_READ(PCH_DREF_CONTROL);
 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
@@ -1361,6 +1364,24 @@
 	if (!IS_VALLEYVIEW(dev))
 		return;
 
+	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+}
+
+static void intel_reset_dpio(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!IS_VALLEYVIEW(dev))
+		return;
+
+	/*
+	 * Enable the CRI clock source so we can get at the display and the
+	 * reference clock for VGA hotplug / manual detection.
+	 */
+	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+		   DPLL_REFA_CLK_ENABLE_VLV |
+		   DPLL_INTEGRATED_CRI_CLK_VLV);
+
 	/*
 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
@@ -1487,25 +1508,35 @@
 	/* Make sure the pipe isn't still relying on us */
 	assert_pipe_disabled(dev_priv, pipe);
 
-	/* Leave integrated clock source enabled */
+	/*
+	 * Leave integrated clock source and reference clock enabled for pipe B.
+	 * The latter is needed for VGA hotplug / manual detection.
+	 */
 	if (pipe == PIPE_B)
-		val = DPLL_INTEGRATED_CRI_CLK_VLV;
+		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
 	I915_WRITE(DPLL(pipe), val);
 	POSTING_READ(DPLL(pipe));
 }
 
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+		struct intel_digital_port *dport)
 {
 	u32 port_mask;
 
-	if (!port)
+	switch (dport->port) {
+	case PORT_B:
 		port_mask = DPLL_PORTB_READY_MASK;
-	else
+		break;
+	case PORT_C:
 		port_mask = DPLL_PORTC_READY_MASK;
+		break;
+	default:
+		BUG();
+	}
 
 	if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
 		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
-		     'B' + port, I915_READ(DPLL(0)));
+		     port_name(dport->port), I915_READ(DPLL(0)));
 }
 
 /**
@@ -2083,8 +2114,8 @@
 		      fb->pitches[0]);
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		I915_MODIFY_DISPBASE(DSPSURF(plane),
-				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+		I915_WRITE(DSPSURF(plane),
+			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE(DSPLINOFF(plane), linear_offset);
 	} else
@@ -2174,8 +2205,8 @@
 		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
 		      fb->pitches[0]);
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
-	I915_MODIFY_DISPBASE(DSPSURF(plane),
-			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+	I915_WRITE(DSPSURF(plane),
+		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
 	} else {
@@ -2233,7 +2264,12 @@
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
 		mutex_lock(&crtc->mutex);
-		if (intel_crtc->active)
+		/*
+		 * FIXME: Once we have proper support for primary planes (and
+		 * disabling them without disabling the entire crtc) allow again
+		 * a NULL crtc->fb.
+		 */
+		if (intel_crtc->active && crtc->fb)
 			dev_priv->display.update_plane(crtc, crtc->fb,
 						       crtc->x, crtc->y);
 		mutex_unlock(&crtc->mutex);
@@ -2350,6 +2386,8 @@
 			I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
 			I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
 		}
+		intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
+		intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
 	}
 
 	ret = dev_priv->display.update_plane(crtc, fb, x, y);
@@ -2944,6 +2982,30 @@
 	return pending;
 }
 
+bool intel_has_pending_fb_unpin(struct drm_device *dev)
+{
+	struct intel_crtc *crtc;
+
+	/* Note that we don't need to be called with mode_config.lock here
+	 * as our list of CRTC objects is static for the lifetime of the
+	 * device and so cannot disappear as we iterate. Similarly, we can
+	 * happily treat the predicates as racy, atomic checks as userspace
+	 * cannot claim and pin a new fb without at least acquring the
+	 * struct_mutex and so serialising with us.
+	 */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+		if (atomic_read(&crtc->unpin_work_count) == 0)
+			continue;
+
+		if (crtc->unpin_work)
+			intel_wait_for_vblank(dev, crtc->pipe);
+
+		return true;
+	}
+
+	return false;
+}
+
 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -3399,9 +3461,8 @@
 		mutex_unlock(&dev_priv->rps.hw_lock);
 		/* Quoting Art Runyan: "its not safe to expect any particular
 		 * value in IPS_CTL bit 31 after enabling IPS through the
-		 * mailbox." Therefore we need to defer waiting on the state
-		 * change.
-		 * TODO: need to fix this for state checker
+		 * mailbox." Moreover, the mailbox may return a bogus state,
+		 * so we need to just enable it and continue on.
 		 */
 	} else {
 		I915_WRITE(IPS_CTL, IPS_ENABLE);
@@ -3428,9 +3489,10 @@
 		mutex_lock(&dev_priv->rps.hw_lock);
 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
 		mutex_unlock(&dev_priv->rps.hw_lock);
-	} else
+	} else {
 		I915_WRITE(IPS_CTL, 0);
-	POSTING_READ(IPS_CTL);
+		POSTING_READ(IPS_CTL);
+	}
 
 	/* We need to wait for a vblank before we can disable the plane. */
 	intel_wait_for_vblank(dev, crtc->pipe);
@@ -3465,7 +3527,7 @@
 	/* Workaround : Do not read or write the pipe palette/gamma data while
 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
 	 */
-	if (intel_crtc->config.ips_enabled &&
+	if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
 	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
 	     GAMMA_MODE_MODE_SPLIT)) {
 		hsw_disable_ips(intel_crtc);
@@ -3910,6 +3972,174 @@
 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
 }
 
+int valleyview_get_vco(struct drm_i915_private *dev_priv)
+{
+	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+	/* Obtain SKU information */
+	mutex_lock(&dev_priv->dpio_lock);
+	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+		CCK_FUSE_HPLL_FREQ_MASK;
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	return vco_freq[hpll_freq];
+}
+
+/* Adjust CDclk dividers to allow high res or save power if possible */
+static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, cmd;
+
+	if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+		cmd = 2;
+	else if (cdclk == 266)
+		cmd = 1;
+	else
+		cmd = 0;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+	val &= ~DSPFREQGUAR_MASK;
+	val |= (cmd << DSPFREQGUAR_SHIFT);
+	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+		     50)) {
+		DRM_ERROR("timed out waiting for CDclk change\n");
+	}
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	if (cdclk == 400) {
+		u32 divider, vco;
+
+		vco = valleyview_get_vco(dev_priv);
+		divider = ((vco << 1) / cdclk) - 1;
+
+		mutex_lock(&dev_priv->dpio_lock);
+		/* adjust cdclk divider */
+		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+		val &= ~0xf;
+		val |= divider;
+		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+		mutex_unlock(&dev_priv->dpio_lock);
+	}
+
+	mutex_lock(&dev_priv->dpio_lock);
+	/* adjust self-refresh exit latency value */
+	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+	val &= ~0x7f;
+
+	/*
+	 * For high bandwidth configs, we set a higher latency in the bunit
+	 * so that the core display fetch happens in time to avoid underruns.
+	 */
+	if (cdclk == 400)
+		val |= 4500 / 250; /* 4.5 usec */
+	else
+		val |= 3000 / 250; /* 3.0 usec */
+	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	/* Since we changed the CDclk, we need to update the GMBUSFREQ too */
+	intel_i2c_reset(dev);
+}
+
+static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
+{
+	int cur_cdclk, vco;
+	int divider;
+
+	vco = valleyview_get_vco(dev_priv);
+
+	mutex_lock(&dev_priv->dpio_lock);
+	divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	divider &= 0xf;
+
+	cur_cdclk = (vco << 1) / (divider + 1);
+
+	return cur_cdclk;
+}
+
+static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
+				 int max_pixclk)
+{
+	int cur_cdclk;
+
+	cur_cdclk = valleyview_cur_cdclk(dev_priv);
+
+	/*
+	 * Really only a few cases to deal with, as only 4 CDclks are supported:
+	 *   200MHz
+	 *   267MHz
+	 *   320MHz
+	 *   400MHz
+	 * So we check to see whether we're above 90% of the lower bin and
+	 * adjust if needed.
+	 */
+	if (max_pixclk > 288000) {
+		return 400;
+	} else if (max_pixclk > 240000) {
+		return 320;
+	} else
+		return 266;
+	/* Looks like the 200MHz CDclk freq doesn't work on some configs */
+}
+
+static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
+				 unsigned modeset_pipes,
+				 struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct intel_crtc *intel_crtc;
+	int max_pixclk = 0;
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		if (modeset_pipes & (1 << intel_crtc->pipe))
+			max_pixclk = max(max_pixclk,
+					 pipe_config->adjusted_mode.crtc_clock);
+		else if (intel_crtc->base.enabled)
+			max_pixclk = max(max_pixclk,
+					 intel_crtc->config.adjusted_mode.crtc_clock);
+	}
+
+	return max_pixclk;
+}
+
+static void valleyview_modeset_global_pipes(struct drm_device *dev,
+					    unsigned *prepare_pipes,
+					    unsigned modeset_pipes,
+					    struct intel_crtc_config *pipe_config)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc;
+	int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
+					       pipe_config);
+	int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+
+	if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
+		return;
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+			    base.head)
+		if (intel_crtc->base.enabled)
+			*prepare_pipes |= (1 << intel_crtc->pipe);
+}
+
+static void valleyview_modeset_global_resources(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
+	int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+
+	if (req_cdclk != cur_cdclk)
+		valleyview_set_cdclk(dev, req_cdclk);
+}
+
 static void valleyview_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -4570,9 +4800,8 @@
 		refclk = 100000;
 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
-		refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
-		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
-			      refclk / 1000);
+		refclk = dev_priv->vbt.lvds_ssc_freq;
+		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
 	} else if (!IS_GEN2(dev)) {
 		refclk = 96000;
 	} else {
@@ -4634,24 +4863,24 @@
 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
 	 * and set it to a reasonable value instead.
 	 */
-	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
 	reg_val &= 0xffffff00;
 	reg_val |= 0x00000030;
-	vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
 
-	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
 	reg_val &= 0x8cffffff;
 	reg_val = 0x8c000000;
-	vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
 
-	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
 	reg_val &= 0xffffff00;
-	vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
 
-	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
 	reg_val &= 0x00ffffff;
 	reg_val |= 0xb0000000;
-	vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
 }
 
 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4720,15 +4949,15 @@
 		vlv_pllb_recal_opamp(dev_priv, pipe);
 
 	/* Set up Tx target for periodic Rcomp update */
-	vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
 
 	/* Disable target IRef on PLL */
-	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
 	reg_val &= 0x00ffffff;
-	vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
 
 	/* Disable fast lock */
-	vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
+	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
 
 	/* Set idtafcrecal before PLL is enabled */
 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4742,50 +4971,54 @@
 	 * Note: don't use the DAC post divider as it seems unstable.
 	 */
 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
-	vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 
 	mdiv |= DPIO_ENABLE_CALIBRATION;
-	vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 
 	/* Set HBR and RBR LPF coefficients */
 	if (crtc->config.port_clock == 162000 ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
-		vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
 				 0x009f0003);
 	else
-		vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
 				 0x00d0000f);
 
 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
 		/* Use SSC source */
 		if (!pipe)
-			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 					 0x0df40000);
 		else
-			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 					 0x0df70000);
 	} else { /* HDMI or VGA */
 		/* Use bend source */
 		if (!pipe)
-			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 					 0x0df70000);
 		else
-			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 					 0x0df40000);
 	}
 
-	coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
+	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
 		coreclk |= 0x01000000;
-	vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
-	vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
 
-	/* Enable DPIO clock input */
+	/*
+	 * Enable DPIO clock input. We should never disable the reference
+	 * clock for pipe B, since VGA hotplug / manual detection depends
+	 * on it.
+	 */
 	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
 		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
 	/* We should never disable this, set it here for state tracking */
@@ -5230,6 +5463,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
 
+	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+		return;
+
 	tmp = I915_READ(PFIT_CONTROL);
 	if (!(tmp & PFIT_ENABLE))
 		return;
@@ -5261,7 +5497,7 @@
 	int refclk = 100000;
 
 	mutex_lock(&dev_priv->dpio_lock);
-	mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
+	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
 	mutex_unlock(&dev_priv->dpio_lock);
 
 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
@@ -5718,9 +5954,9 @@
 	}
 
 	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
-		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
 			      dev_priv->vbt.lvds_ssc_freq);
-		return dev_priv->vbt.lvds_ssc_freq * 1000;
+		return dev_priv->vbt.lvds_ssc_freq;
 	}
 
 	return 120000;
@@ -5982,7 +6218,7 @@
 	factor = 21;
 	if (is_lvds) {
 		if ((intel_panel_use_ssc(dev_priv) &&
-		     dev_priv->vbt.lvds_ssc_freq == 100) ||
+		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
 		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
 			factor = 25;
 	} else if (intel_crtc->config.sdvo_tv_clock)
@@ -6323,7 +6559,7 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	val = I915_READ(DEIMR);
-	WARN((val & ~DE_PCH_EVENT_IVB) != val,
+	WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
 	     "Unexpected DEIMR bits enabled: 0x%x\n", val);
 	val = I915_READ(SDEIMR);
 	WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
@@ -6402,7 +6638,7 @@
 
 	/* Make sure we're not on PC8 state before disabling PC8, otherwise
 	 * we'll hang the machine! */
-	gen6_gt_force_wake_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 	if (val & LCPLL_POWER_DOWN_ALLOW) {
 		val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6672,7 @@
 			DRM_ERROR("Switching back to LCPLL failed\n");
 	}
 
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 void hsw_enable_pc8_work(struct work_struct *__work)
@@ -6447,6 +6683,8 @@
 	struct drm_device *dev = dev_priv->dev;
 	uint32_t val;
 
+	WARN_ON(!HAS_PC8(dev));
+
 	if (dev_priv->pc8.enabled)
 		return;
 
@@ -6463,6 +6701,8 @@
 	lpt_disable_clkout_dp(dev);
 	hsw_pc8_disable_interrupts(dev);
 	hsw_disable_lcpll(dev_priv, true, true);
+
+	intel_runtime_pm_put(dev_priv);
 }
 
 static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
@@ -6492,12 +6732,16 @@
 	if (dev_priv->pc8.disable_count != 1)
 		return;
 
+	WARN_ON(!HAS_PC8(dev));
+
 	cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
 	if (!dev_priv->pc8.enabled)
 		return;
 
 	DRM_DEBUG_KMS("Disabling package C8+\n");
 
+	intel_runtime_pm_get(dev_priv);
+
 	hsw_restore_lcpll(dev_priv);
 	hsw_pc8_restore_interrupts(dev);
 	lpt_init_pch_refclk(dev);
@@ -6704,8 +6948,9 @@
 	int plane = intel_crtc->plane;
 	int ret;
 
-	if (!intel_ddi_pll_mode_set(crtc))
+	if (!intel_ddi_pll_select(intel_crtc))
 		return -EINVAL;
+	intel_ddi_pll_enable(intel_crtc);
 
 	if (intel_crtc->config.has_dp_encoder)
 		intel_dp_set_m_n(intel_crtc);
@@ -6796,8 +7041,9 @@
 	if (intel_display_power_enabled(dev, pfit_domain))
 		ironlake_get_pfit_config(crtc, pipe_config);
 
-	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
-				   (I915_READ(IPS_CTL) & IPS_ENABLE);
+	if (IS_HASWELL(dev))
+		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
+			(I915_READ(IPS_CTL) & IPS_ENABLE);
 
 	pipe_config->pixel_multiplier = 1;
 
@@ -7689,7 +7935,7 @@
 	u32 dpll = pipe_config->dpll_hw_state.dpll;
 
 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
-		return dev_priv->vbt.lvds_ssc_freq * 1000;
+		return dev_priv->vbt.lvds_ssc_freq;
 	else if (HAS_PCH_SPLIT(dev))
 		return 120000;
 	else if (!IS_GEN2(dev))
@@ -7752,12 +7998,17 @@
 		else
 			i9xx_clock(refclk, &clock);
 	} else {
-		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
+		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
 
 		if (is_lvds) {
 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
-			clock.p2 = 14;
+
+			if (lvds & LVDS_CLKB_POWER_UP)
+				clock.p2 = 7;
+			else
+				clock.p2 = 14;
 		} else {
 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
 				clock.p1 = 2;
@@ -8335,6 +8586,20 @@
 	if (ring->id == RCS)
 		len += 6;
 
+	/*
+	 * BSpec MI_DISPLAY_FLIP for IVB:
+	 * "The full packet must be contained within the same cache line."
+	 *
+	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
+	 * cacheline, if we ever start emitting more commands before
+	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
+	 * then do the cacheline alignment, and finally emit the
+	 * MI_DISPLAY_FLIP.
+	 */
+	ret = intel_ring_cacheline_align(ring);
+	if (ret)
+		goto err_unpin;
+
 	ret = intel_ring_begin(ring, len);
 	if (ret)
 		goto err_unpin;
@@ -8493,28 +8758,6 @@
 	.load_lut = intel_crtc_load_lut,
 };
 
-static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
-				  struct drm_crtc *crtc)
-{
-	struct drm_device *dev;
-	struct drm_crtc *tmp;
-	int crtc_mask = 1;
-
-	WARN(!crtc, "checking null crtc?\n");
-
-	dev = crtc->dev;
-
-	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
-		if (tmp == crtc)
-			break;
-		crtc_mask <<= 1;
-	}
-
-	if (encoder->possible_crtcs & crtc_mask)
-		return true;
-	return false;
-}
-
 /**
  * intel_modeset_update_staged_output_state
  *
@@ -9122,7 +9365,9 @@
 		PIPE_CONF_CHECK_I(pch_pfit.size);
 	}
 
-	PIPE_CONF_CHECK_I(ips_enabled);
+	/* BDW+ don't expose a synchronous way to read the state */
+	if (IS_HASWELL(dev))
+		PIPE_CONF_CHECK_I(ips_enabled);
 
 	PIPE_CONF_CHECK_I(double_wide);
 
@@ -9368,21 +9613,19 @@
 {
 	struct drm_device *dev = crtc->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_display_mode *saved_mode, *saved_hwmode;
+	struct drm_display_mode *saved_mode;
 	struct intel_crtc_config *pipe_config = NULL;
 	struct intel_crtc *intel_crtc;
 	unsigned disable_pipes, prepare_pipes, modeset_pipes;
 	int ret = 0;
 
-	saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
+	saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
 	if (!saved_mode)
 		return -ENOMEM;
-	saved_hwmode = saved_mode + 1;
 
 	intel_modeset_affected_pipes(crtc, &modeset_pipes,
 				     &prepare_pipes, &disable_pipes);
 
-	*saved_hwmode = crtc->hwmode;
 	*saved_mode = crtc->mode;
 
 	/* Hack: Because we don't (yet) support global modeset on multiple
@@ -9402,6 +9645,21 @@
 				       "[modeset]");
 	}
 
+	/*
+	 * See if the config requires any additional preparation, e.g.
+	 * to adjust global state with pipes off.  We need to do this
+	 * here so we can get the modeset_pipe updated config for the new
+	 * mode set on this crtc.  For other crtcs we need to use the
+	 * adjusted_mode bits in the crtc directly.
+	 */
+	if (IS_VALLEYVIEW(dev)) {
+		valleyview_modeset_global_pipes(dev, &prepare_pipes,
+						modeset_pipes, pipe_config);
+
+		/* may have added more to prepare_pipes than we should */
+		prepare_pipes &= ~disable_pipes;
+	}
+
 	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
 		intel_crtc_disable(&intel_crtc->base);
 
@@ -9418,6 +9676,14 @@
 		/* mode_set/enable/disable functions rely on a correct pipe
 		 * config. */
 		to_intel_crtc(crtc)->config = *pipe_config;
+
+		/*
+		 * Calculate and store various constants which
+		 * are later needed by vblank and swap-completion
+		 * timestamping. They are derived from true hwmode.
+		 */
+		drm_calc_timestamping_constants(crtc,
+						&pipe_config->adjusted_mode);
 	}
 
 	/* Only after disabling all output pipelines that will be changed can we
@@ -9441,23 +9707,10 @@
 	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
 		dev_priv->display.crtc_enable(&intel_crtc->base);
 
-	if (modeset_pipes) {
-		/* Store real post-adjustment hardware mode. */
-		crtc->hwmode = pipe_config->adjusted_mode;
-
-		/* Calculate and store various constants which
-		 * are later needed by vblank and swap-completion
-		 * timestamping. They are derived from true hwmode.
-		 */
-		drm_calc_timestamping_constants(crtc);
-	}
-
 	/* FIXME: add subpixel order */
 done:
-	if (ret && crtc->enabled) {
-		crtc->hwmode = *saved_hwmode;
+	if (ret && crtc->enabled)
 		crtc->mode = *saved_mode;
-	}
 
 out:
 	kfree(pipe_config);
@@ -9679,8 +9932,8 @@
 		}
 
 		/* Make sure the new CRTC will work with the encoder */
-		if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
-					   new_crtc)) {
+		if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
+					 new_crtc)) {
 			return -EINVAL;
 		}
 		connector->encoder->new_crtc = to_intel_crtc(new_crtc);
@@ -9694,17 +9947,21 @@
 	/* Check for any encoders that needs to be disabled. */
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
 			    base.head) {
+		int num_connectors = 0;
 		list_for_each_entry(connector,
 				    &dev->mode_config.connector_list,
 				    base.head) {
 			if (connector->new_encoder == encoder) {
 				WARN_ON(!connector->new_encoder->new_crtc);
-
-				goto next_encoder;
+				num_connectors++;
 			}
 		}
-		encoder->new_crtc = NULL;
-next_encoder:
+
+		if (num_connectors == 0)
+			encoder->new_crtc = NULL;
+		else if (num_connectors > 1)
+			return -EINVAL;
+
 		/* Only now check for crtc changes so we don't miss encoders
 		 * that will be disabled. */
 		if (&encoder->new_crtc->base != encoder->base.crtc) {
@@ -9775,6 +10032,16 @@
 
 		ret = intel_pipe_set_base(set->crtc,
 					  set->x, set->y, set->fb);
+		/*
+		 * In the fastboot case this may be our only check of the
+		 * state after boot.  It would be better to only do it on
+		 * the first update, but we don't have a nice way of doing that
+		 * (and really, set_config isn't used much for high freq page
+		 * flipping, so increasing its cost here shouldn't be a big
+		 * deal).
+		 */
+		if (i915_fastboot && ret == 0)
+			intel_modeset_check_state(set->crtc->dev);
 	}
 
 	if (ret) {
@@ -9835,7 +10102,7 @@
 				struct intel_shared_dpll *pll)
 {
 	/* PCH refclock must be enabled first */
-	assert_pch_refclk_enabled(dev_priv);
+	ibx_assert_pch_refclk_enabled(dev_priv);
 
 	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
 
@@ -9903,8 +10170,6 @@
 		dev_priv->num_shared_dpll = 0;
 
 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
-	DRM_DEBUG_KMS("%i shared PLLs initialized\n",
-		      dev_priv->num_shared_dpll);
 }
 
 static void intel_crtc_init(struct drm_device *dev, int pipe)
@@ -9926,10 +10191,13 @@
 		intel_crtc->lut_b[i] = i;
 	}
 
-	/* Swap pipes & planes for FBC on pre-965 */
+	/*
+	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
+	 * is hooked to plane B. Hence we want plane A feeding pipe B.
+	 */
 	intel_crtc->pipe = pipe;
 	intel_crtc->plane = pipe;
-	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
+	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
 		intel_crtc->plane = !pipe;
 	}
@@ -10018,6 +10286,28 @@
 	return true;
 }
 
+const char *intel_output_name(int output)
+{
+	static const char *names[] = {
+		[INTEL_OUTPUT_UNUSED] = "Unused",
+		[INTEL_OUTPUT_ANALOG] = "Analog",
+		[INTEL_OUTPUT_DVO] = "DVO",
+		[INTEL_OUTPUT_SDVO] = "SDVO",
+		[INTEL_OUTPUT_LVDS] = "LVDS",
+		[INTEL_OUTPUT_TVOUT] = "TV",
+		[INTEL_OUTPUT_HDMI] = "HDMI",
+		[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
+		[INTEL_OUTPUT_EDP] = "eDP",
+		[INTEL_OUTPUT_DSI] = "DSI",
+		[INTEL_OUTPUT_UNKNOWN] = "Unknown",
+	};
+
+	if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
+		return "Invalid";
+
+	return names[output];
+}
+
 static void intel_setup_outputs(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10412,8 +10702,11 @@
 		}
 	} else if (IS_G4X(dev)) {
 		dev_priv->display.write_eld = g4x_write_eld;
-	} else if (IS_VALLEYVIEW(dev))
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->display.modeset_global_resources =
+			valleyview_modeset_global_resources;
 		dev_priv->display.write_eld = ironlake_write_eld;
+	}
 
 	/* Default just returns -ENODEV to indicate unsupported */
 	dev_priv->display.queue_flip = intel_default_queue_flip;
@@ -10440,6 +10733,8 @@
 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
 		break;
 	}
+
+	intel_panel_init_backlight_funcs(dev);
 }
 
 /*
@@ -10476,17 +10771,6 @@
 	DRM_INFO("applying inverted panel brightness quirk\n");
 }
 
-/*
- * Some machines (Dell XPS13) suffer broken backlight controls if
- * BLM_PCH_PWM_ENABLE is set.
- */
-static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
-	DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
-}
-
 struct intel_quirk {
 	int device;
 	int subsystem_vendor;
@@ -10555,11 +10839,6 @@
 
 	/* Acer Aspire 4736Z */
 	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
-
-	/* Dell XPS13 HD Sandy Bridge */
-	{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
-	/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
-	{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
@@ -10603,18 +10882,11 @@
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	intel_prepare_ddi(dev);
 
 	intel_init_clock_gating(dev);
 
-	/* Enable the CRI clock source so we can get at the display */
-	if (IS_VALLEYVIEW(dev))
-		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-			   DPLL_INTEGRATED_CRI_CLK_VLV);
-
-	intel_init_dpio(dev);
+	intel_reset_dpio(dev);
 
 	mutex_lock(&dev->struct_mutex);
 	intel_enable_gt_powersave(dev);
@@ -10676,6 +10948,9 @@
 		}
 	}
 
+	intel_init_dpio(dev);
+	intel_reset_dpio(dev);
+
 	intel_cpu_pll_init(dev);
 	intel_shared_dpll_init(dev);
 
@@ -10879,7 +11154,7 @@
 	 * level, just check if the power well is enabled instead of trying to
 	 * follow the "don't touch the power well if we don't need it" policy
 	 * the rest of the driver uses. */
-	if (HAS_POWER_WELL(dev) &&
+	if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
 	    (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
 		return;
 
@@ -11023,7 +11298,7 @@
 		pll->on = false;
 	}
 
-	if (IS_HASWELL(dev))
+	if (HAS_PCH_SPLIT(dev))
 		ilk_wm_get_hw_state(dev);
 
 	if (force_restore) {
@@ -11101,12 +11376,11 @@
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
 
-	/* destroy backlight, if any, before the connectors */
-	intel_panel_destroy_backlight(dev);
-
-	/* destroy the sysfs files before encoders/connectors */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	/* destroy the backlight and sysfs files before encoders/connectors */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		intel_panel_destroy_backlight(connector);
 		drm_sysfs_connector_remove(connector);
+	}
 
 	drm_mode_config_cleanup(dev);
 
@@ -11161,6 +11435,7 @@
 	} cursor[I915_MAX_PIPES];
 
 	struct intel_pipe_error_state {
+		bool power_domain_on;
 		u32 source;
 	} pipe[I915_MAX_PIPES];
 
@@ -11175,6 +11450,7 @@
 	} plane[I915_MAX_PIPES];
 
 	struct intel_transcoder_error_state {
+		bool power_domain_on;
 		enum transcoder cpu_transcoder;
 
 		u32 conf;
@@ -11208,11 +11484,13 @@
 	if (error == NULL)
 		return NULL;
 
-	if (HAS_POWER_WELL(dev))
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
 	for_each_pipe(i) {
-		if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
+		error->pipe[i].power_domain_on =
+			intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
+		if (!error->pipe[i].power_domain_on)
 			continue;
 
 		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
@@ -11248,8 +11526,10 @@
 	for (i = 0; i < error->num_transcoders; i++) {
 		enum transcoder cpu_transcoder = transcoders[i];
 
-		if (!intel_display_power_enabled(dev,
-				POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
+		error->transcoder[i].power_domain_on =
+			intel_display_power_enabled_sw(dev,
+				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+		if (!error->transcoder[i].power_domain_on)
 			continue;
 
 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
@@ -11279,11 +11559,13 @@
 		return;
 
 	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
-	if (HAS_POWER_WELL(dev))
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
 			   error->power_well_driver);
 	for_each_pipe(i) {
 		err_printf(m, "Pipe [%d]:\n", i);
+		err_printf(m, "  Power: %s\n",
+			   error->pipe[i].power_domain_on ? "on" : "off");
 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
 
 		err_printf(m, "Plane [%d]:\n", i);
@@ -11309,6 +11591,8 @@
 	for (i = 0; i < error->num_transcoders; i++) {
 		err_printf(m, "CPU transcoder: %c\n",
 			   transcoder_name(error->transcoder[i].cpu_transcoder));
+		err_printf(m, "  Power: %s\n",
+			   error->transcoder[i].power_domain_on ? "on" : "off");
 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 30c627c..57552eb 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -142,7 +142,7 @@
 	return (max_link_clock * max_lanes * 8) / 10;
 }
 
-static int
+static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
 {
@@ -404,7 +404,7 @@
 	int i, ret, recv_bytes;
 	uint32_t status;
 	int try, precharge, clock = 0;
-	bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+	bool has_aux_irq = HAS_AUX_IRQ(dev);
 	uint32_t timeout;
 
 	/* dp aux is extremely sensitive to irq latency, hence request the
@@ -537,29 +537,33 @@
 	uint8_t	msg[20];
 	int msg_bytes;
 	uint8_t	ack;
+	int retry;
 
 	if (WARN_ON(send_bytes > 16))
 		return -E2BIG;
 
 	intel_dp_check_edp(intel_dp);
-	msg[0] = AUX_NATIVE_WRITE << 4;
+	msg[0] = DP_AUX_NATIVE_WRITE << 4;
 	msg[1] = address >> 8;
 	msg[2] = address & 0xff;
 	msg[3] = send_bytes - 1;
 	memcpy(&msg[4], send, send_bytes);
 	msg_bytes = send_bytes + 4;
-	for (;;) {
+	for (retry = 0; retry < 7; retry++) {
 		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
 		if (ret < 0)
 			return ret;
-		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
-			break;
-		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
-			udelay(100);
+		ack >>= 4;
+		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
+			return send_bytes;
+		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+			usleep_range(400, 500);
 		else
 			return -EIO;
 	}
-	return send_bytes;
+
+	DRM_ERROR("too many retries, giving up\n");
+	return -EIO;
 }
 
 /* Write a single byte to the aux channel in native mode */
@@ -581,12 +585,13 @@
 	int reply_bytes;
 	uint8_t ack;
 	int ret;
+	int retry;
 
 	if (WARN_ON(recv_bytes > 19))
 		return -E2BIG;
 
 	intel_dp_check_edp(intel_dp);
-	msg[0] = AUX_NATIVE_READ << 4;
+	msg[0] = DP_AUX_NATIVE_READ << 4;
 	msg[1] = address >> 8;
 	msg[2] = address & 0xff;
 	msg[3] = recv_bytes - 1;
@@ -594,23 +599,26 @@
 	msg_bytes = 4;
 	reply_bytes = recv_bytes + 1;
 
-	for (;;) {
+	for (retry = 0; retry < 7; retry++) {
 		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
 				      reply, reply_bytes);
 		if (ret == 0)
 			return -EPROTO;
 		if (ret < 0)
 			return ret;
-		ack = reply[0];
-		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+		ack = reply[0] >> 4;
+		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
 			memcpy(recv, reply + 1, ret - 1);
 			return ret - 1;
 		}
-		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
-			udelay(100);
+		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+			usleep_range(400, 500);
 		else
 			return -EIO;
 	}
+
+	DRM_ERROR("too many retries, giving up\n");
+	return -EIO;
 }
 
 static int
@@ -633,12 +641,12 @@
 	intel_dp_check_edp(intel_dp);
 	/* Set up the command byte */
 	if (mode & MODE_I2C_READ)
-		msg[0] = AUX_I2C_READ << 4;
+		msg[0] = DP_AUX_I2C_READ << 4;
 	else
-		msg[0] = AUX_I2C_WRITE << 4;
+		msg[0] = DP_AUX_I2C_WRITE << 4;
 
 	if (!(mode & MODE_I2C_STOP))
-		msg[0] |= AUX_I2C_MOT << 4;
+		msg[0] |= DP_AUX_I2C_MOT << 4;
 
 	msg[1] = address >> 8;
 	msg[2] = address;
@@ -675,17 +683,17 @@
 			goto out;
 		}
 
-		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
-		case AUX_NATIVE_REPLY_ACK:
+		switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+		case DP_AUX_NATIVE_REPLY_ACK:
 			/* I2C-over-AUX Reply field is only valid
 			 * when paired with AUX ACK.
 			 */
 			break;
-		case AUX_NATIVE_REPLY_NACK:
+		case DP_AUX_NATIVE_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_ch native nack\n");
 			ret = -EREMOTEIO;
 			goto out;
-		case AUX_NATIVE_REPLY_DEFER:
+		case DP_AUX_NATIVE_REPLY_DEFER:
 			/*
 			 * For now, just give more slack to branch devices. We
 			 * could check the DPCD for I2C bit rate capabilities,
@@ -706,18 +714,18 @@
 			goto out;
 		}
 
-		switch (reply[0] & AUX_I2C_REPLY_MASK) {
-		case AUX_I2C_REPLY_ACK:
+		switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
+		case DP_AUX_I2C_REPLY_ACK:
 			if (mode == MODE_I2C_READ) {
 				*read_byte = reply[1];
 			}
 			ret = reply_bytes - 1;
 			goto out;
-		case AUX_I2C_REPLY_NACK:
+		case DP_AUX_I2C_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_i2c nack\n");
 			ret = -EREMOTEIO;
 			goto out;
-		case AUX_I2C_REPLY_DEFER:
+		case DP_AUX_I2C_REPLY_DEFER:
 			DRM_DEBUG_KMS("aux_i2c defer\n");
 			udelay(100);
 			break;
@@ -1037,6 +1045,8 @@
 				I915_READ(pp_stat_reg),
 				I915_READ(pp_ctrl_reg));
 	}
+
+	DRM_DEBUG_KMS("Wait complete\n");
 }
 
 static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
@@ -1092,6 +1102,8 @@
 	if (ironlake_edp_have_panel_vdd(intel_dp))
 		return;
 
+	intel_runtime_pm_get(dev_priv);
+
 	DRM_DEBUG_KMS("Turning eDP VDD on\n");
 
 	if (!ironlake_edp_have_panel_power(intel_dp))
@@ -1140,7 +1152,11 @@
 		/* Make sure sequencer is idle before allowing subsequent activity */
 		DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 		I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
-		msleep(intel_dp->panel_power_down_delay);
+
+		if ((pp & POWER_TARGET_ON) == 0)
+			msleep(intel_dp->panel_power_cycle_delay);
+
+		intel_runtime_pm_put(dev_priv);
 	}
 }
 
@@ -1233,20 +1249,16 @@
 
 	DRM_DEBUG_KMS("Turn eDP power off\n");
 
-	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
-
 	pp = ironlake_get_pp_control(intel_dp);
 	/* We need to switch off panel power _and_ force vdd, for otherwise some
 	 * panels get very unhappy and cease to work. */
-	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
 
 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
 
-	intel_dp->want_panel_vdd = false;
-
 	ironlake_wait_panel_off(intel_dp);
 }
 
@@ -1772,7 +1784,6 @@
 
 	/* Make sure the panel is off before trying to change the mode. But also
 	 * ensure that we have vdd while we switch off the panel. */
-	ironlake_edp_panel_vdd_on(intel_dp);
 	ironlake_edp_backlight_off(intel_dp);
 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 	ironlake_edp_panel_off(intel_dp);
@@ -1845,34 +1856,36 @@
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 	struct edp_power_seq power_seq;
 	u32 val;
 
 	mutex_lock(&dev_priv->dpio_lock);
 
-	val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
 	val = 0;
 	if (pipe)
 		val |= (1<<21);
 	else
 		val &= ~(1<<21);
 	val |= 0x001000c4;
-	vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
 
 	mutex_unlock(&dev_priv->dpio_lock);
 
-	/* init power sequencer on this pipe and port */
-	intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
-	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
-						      &power_seq);
+	if (is_edp(intel_dp)) {
+		/* init power sequencer on this pipe and port */
+		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+							      &power_seq);
+	}
 
 	intel_enable_dp(encoder);
 
-	vlv_wait_port_ready(dev_priv, port);
+	vlv_wait_port_ready(dev_priv, dport);
 }
 
 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -1882,24 +1895,24 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc =
 		to_intel_crtc(encoder->base.crtc);
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 
 	/* Program Tx lane resets to default */
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
 			 DPIO_PCS_TX_LANE2_RESET |
 			 DPIO_PCS_TX_LANE1_RESET);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
 				 DPIO_PCS_CLK_SOFT_RESET);
 
 	/* Fix up inter-pair skew failure */
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
@@ -1941,18 +1954,6 @@
 					      DP_LINK_STATUS_SIZE);
 }
 
-#if 0
-static char	*voltage_names[] = {
-	"0.4V", "0.6V", "0.8V", "1.2V"
-};
-static char	*pre_emph_names[] = {
-	"0dB", "3.5dB", "6dB", "9.5dB"
-};
-static char	*link_train_names[] = {
-	"pattern 1", "pattern 2", "idle", "off"
-};
-#endif
-
 /*
  * These are source-specific values; current Intel hardware supports
  * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
@@ -2050,7 +2051,7 @@
 	unsigned long demph_reg_value, preemph_reg_value,
 		uniqtranscale_reg_value;
 	uint8_t train_set = intel_dp->train_set[0];
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 
 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
@@ -2127,14 +2128,14 @@
 	}
 
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
 			 uniqtranscale_reg_value);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
 	mutex_unlock(&dev_priv->dpio_lock);
 
 	return 0;
@@ -2646,7 +2647,6 @@
 
 		if (cr_tries > 5) {
 			DRM_ERROR("failed to train DP, aborting\n");
-			intel_dp_link_down(intel_dp);
 			break;
 		}
 
@@ -2899,13 +2899,11 @@
 
 	/* Try to read receiver status if the link appears to be up */
 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
-		intel_dp_link_down(intel_dp);
 		return;
 	}
 
 	/* Now read the DPCD to see if it's actually running */
 	if (!intel_dp_get_dpcd(intel_dp)) {
-		intel_dp_link_down(intel_dp);
 		return;
 	}
 
@@ -3020,18 +3018,34 @@
 		return status;
 	}
 
-	switch (intel_dig_port->port) {
-	case PORT_B:
-		bit = PORTB_HOTPLUG_LIVE_STATUS;
-		break;
-	case PORT_C:
-		bit = PORTC_HOTPLUG_LIVE_STATUS;
-		break;
-	case PORT_D:
-		bit = PORTD_HOTPLUG_LIVE_STATUS;
-		break;
-	default:
-		return connector_status_unknown;
+	if (IS_VALLEYVIEW(dev)) {
+		switch (intel_dig_port->port) {
+		case PORT_B:
+			bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+			break;
+		case PORT_C:
+			bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+			break;
+		case PORT_D:
+			bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+			break;
+		default:
+			return connector_status_unknown;
+		}
+	} else {
+		switch (intel_dig_port->port) {
+		case PORT_B:
+			bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
+			break;
+		case PORT_C:
+			bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
+			break;
+		case PORT_D:
+			bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
+			break;
+		default:
+			return connector_status_unknown;
+		}
 	}
 
 	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
@@ -3082,9 +3096,12 @@
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
 	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum drm_connector_status status;
 	struct edid *edid = NULL;
 
+	intel_runtime_pm_get(dev_priv);
+
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 		      connector->base.id, drm_get_connector_name(connector));
 
@@ -3096,7 +3113,7 @@
 		status = g4x_dp_detect(intel_dp);
 
 	if (status != connector_status_connected)
-		return status;
+		goto out;
 
 	intel_dp_probe_oui(intel_dp);
 
@@ -3112,7 +3129,11 @@
 
 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
-	return connector_status_connected;
+	status = connector_status_connected;
+
+out:
+	intel_runtime_pm_put(dev_priv);
+	return status;
 }
 
 static int intel_dp_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 79f91f2..fbfaaba 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -65,8 +65,8 @@
 #define wait_for_atomic_us(COND, US) _wait_for((COND), \
 					       DIV_ROUND_UP((US), 1000), 0)
 
-#define KHz(x) (1000*x)
-#define MHz(x) KHz(1000*x)
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
 
 /*
  * Display related stuff
@@ -155,7 +155,19 @@
 
 struct intel_panel {
 	struct drm_display_mode *fixed_mode;
+	struct drm_display_mode *downclock_mode;
 	int fitting_mode;
+
+	/* backlight */
+	struct {
+		bool present;
+		u32 level;
+		u32 max;
+		bool enabled;
+		bool combination_mode;	/* gen 2/4 only */
+		bool active_low_pwm;
+		struct backlight_device *device;
+	} backlight;
 };
 
 struct intel_connector {
@@ -443,7 +455,7 @@
 	bool rgb_quant_range_selectable;
 	void (*write_infoframe)(struct drm_encoder *encoder,
 				enum hdmi_infoframe_type type,
-				const uint8_t *frame, ssize_t len);
+				const void *frame, ssize_t len);
 	void (*set_infoframes)(struct drm_encoder *encoder,
 			       struct drm_display_mode *adjusted_mode);
 };
@@ -490,9 +502,9 @@
 {
 	switch (dport->port) {
 	case PORT_B:
-		return 0;
+		return DPIO_CH0;
 	case PORT_C:
-		return 1;
+		return DPIO_CH1;
 	default:
 		BUG();
 	}
@@ -601,7 +613,8 @@
 void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
 void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
 void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
-bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
+bool intel_ddi_pll_select(struct intel_crtc *crtc);
+void intel_ddi_pll_enable(struct intel_crtc *crtc);
 void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
 void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
@@ -612,6 +625,8 @@
 
 
 /* intel_display.c */
+const char *intel_output_name(int output);
+bool intel_has_pending_fb_unpin(struct drm_device *dev);
 int intel_pch_rawclk(struct drm_device *dev);
 void intel_mark_busy(struct drm_device *dev);
 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -638,7 +653,8 @@
 void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+			 struct intel_digital_port *dport);
 bool intel_get_load_detect_pipe(struct drm_connector *connector,
 				struct drm_display_mode *mode,
 				struct intel_load_detect_pipe *old);
@@ -690,11 +706,10 @@
 ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
 				int dotclock);
 bool intel_crtc_active(struct drm_crtc *crtc);
-void i915_disable_vga_mem(struct drm_device *dev);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
 void intel_display_set_init_power(struct drm_device *dev, bool enable);
-
+int valleyview_get_vco(struct drm_i915_private *dev_priv);
 
 /* intel_dp.c */
 void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -808,9 +823,13 @@
 int intel_panel_setup_backlight(struct drm_connector *connector);
 void intel_panel_enable_backlight(struct intel_connector *connector);
 void intel_panel_disable_backlight(struct intel_connector *connector);
-void intel_panel_destroy_backlight(struct drm_device *dev);
+void intel_panel_destroy_backlight(struct drm_connector *connector);
+void intel_panel_init_backlight_funcs(struct drm_device *dev);
 enum drm_connector_status intel_panel_detect(struct drm_device *dev);
-
+extern struct drm_display_mode *intel_find_panel_downclock(
+				struct drm_device *dev,
+				struct drm_display_mode *fixed_mode,
+				struct drm_connector *connector);
 
 /* intel_pm.c */
 void intel_init_clock_gating(struct drm_device *dev);
@@ -830,6 +849,8 @@
 void intel_power_domains_remove(struct drm_device *dev);
 bool intel_display_power_enabled(struct drm_device *dev,
 				 enum intel_display_power_domain domain);
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+				    enum intel_display_power_domain domain);
 void intel_display_power_get(struct drm_device *dev,
 			     enum intel_display_power_domain domain);
 void intel_display_power_put(struct drm_device *dev,
@@ -844,6 +865,10 @@
 void gen6_rps_boost(struct drm_i915_private *dev_priv);
 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
+void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
 void ilk_wm_get_hw_state(struct drm_device *dev);
 
 
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index d257b09..fabbf0d 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -37,49 +37,18 @@
 static const struct intel_dsi_device intel_dsi_devices[] = {
 };
 
-
-static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
-			   u32 mask)
-{
-	u32 tmp = vlv_cck_read(dev_priv, reg);
-	tmp &= ~mask;
-	tmp |= val;
-	vlv_cck_write(dev_priv, reg, tmp);
-}
-
-static void band_gap_wa(struct drm_i915_private *dev_priv)
+static void band_gap_reset(struct drm_i915_private *dev_priv)
 {
 	mutex_lock(&dev_priv->dpio_lock);
 
-	/* Enable bandgap fix in GOP driver */
-	vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
-	msleep(20);
-	vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
-	msleep(20);
-	vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
-	msleep(20);
-	vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
-	msleep(20);
-	vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
-	msleep(20);
-
-	/* Turn Display Trunk on */
-	vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
-	msleep(20);
-
-	vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
-	msleep(20);
-
-	vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
-	msleep(20);
-	vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
-	msleep(20);
-	vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
+	vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
+	vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
+	vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
+	udelay(150);
+	vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
+	vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
 
 	mutex_unlock(&dev_priv->dpio_lock);
-
-	/* Need huge delay, otherwise clock is not stable */
-	msleep(100);
 }
 
 static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
@@ -132,14 +101,47 @@
 	vlv_enable_dsi_pll(encoder);
 }
 
+static void intel_dsi_device_ready(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	int pipe = intel_crtc->pipe;
+	u32 val;
+
+	DRM_DEBUG_KMS("\n");
+
+	val = I915_READ(MIPI_PORT_CTRL(pipe));
+	I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
+	usleep_range(1000, 1500);
+	I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
+	usleep_range(2000, 2500);
+	I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
+	usleep_range(2000, 2500);
+	I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
+	usleep_range(2000, 2500);
+	I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
+	usleep_range(2000, 2500);
+}
 static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 {
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
 	DRM_DEBUG_KMS("\n");
+
+	if (intel_dsi->dev.dev_ops->panel_reset)
+		intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
+
+	/* put device in ready state */
+	intel_dsi_device_ready(encoder);
+
+	if (intel_dsi->dev.dev_ops->send_otp_cmds)
+		intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
 }
 
 static void intel_dsi_enable(struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 	int pipe = intel_crtc->pipe;
@@ -147,41 +149,28 @@
 
 	DRM_DEBUG_KMS("\n");
 
-	temp = I915_READ(MIPI_DEVICE_READY(pipe));
-	if ((temp & DEVICE_READY) == 0) {
-		temp &= ~ULPS_STATE_MASK;
-		I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
-	} else if (temp & ULPS_STATE_MASK) {
-		temp &= ~ULPS_STATE_MASK;
-		I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
-		/*
-		 * We need to ensure that there is a minimum of 1 ms time
-		 * available before clearing the UPLS exit state.
-		 */
-		msleep(2);
-		I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
-	}
-
 	if (is_cmd_mode(intel_dsi))
 		I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
-
-	if (is_vid_mode(intel_dsi)) {
+	else {
 		msleep(20); /* XXX */
 		dpi_send_cmd(intel_dsi, TURN_ON);
 		msleep(100);
 
 		/* assert ip_tg_enable signal */
-		temp = I915_READ(MIPI_PORT_CTRL(pipe));
+		temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
+		temp = temp | intel_dsi->port_bits;
 		I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
 		POSTING_READ(MIPI_PORT_CTRL(pipe));
 	}
 
-	intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+	if (intel_dsi->dev.dev_ops->enable)
+		intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
 }
 
 static void intel_dsi_disable(struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 	int pipe = intel_crtc->pipe;
@@ -189,8 +178,6 @@
 
 	DRM_DEBUG_KMS("\n");
 
-	intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
-
 	if (is_vid_mode(intel_dsi)) {
 		dpi_send_cmd(intel_dsi, SHUTDOWN);
 		msleep(10);
@@ -203,20 +190,54 @@
 		msleep(2);
 	}
 
-	temp = I915_READ(MIPI_DEVICE_READY(pipe));
-	if (temp & DEVICE_READY) {
-		temp &= ~DEVICE_READY;
-		temp &= ~ULPS_STATE_MASK;
-		I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
-	}
+	/* if disable packets are sent before sending shutdown packet then in
+	 * some next enable sequence send turn on packet error is observed */
+	if (intel_dsi->dev.dev_ops->disable)
+		intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
 }
 
-static void intel_dsi_post_disable(struct intel_encoder *encoder)
+static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 {
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	int pipe = intel_crtc->pipe;
+	u32 val;
+
 	DRM_DEBUG_KMS("\n");
 
+	I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+	usleep_range(2000, 2500);
+
+	I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+	usleep_range(2000, 2500);
+
+	I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+	usleep_range(2000, 2500);
+
+	val = I915_READ(MIPI_PORT_CTRL(pipe));
+	I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
+	usleep_range(1000, 1500);
+
+	if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
+					== 0x00000), 30))
+		DRM_ERROR("DSI LP not going Low\n");
+
+	I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
+	usleep_range(2000, 2500);
+
 	vlv_disable_dsi_pll(encoder);
 }
+static void intel_dsi_post_disable(struct intel_encoder *encoder)
+{
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+	DRM_DEBUG_KMS("\n");
+
+	intel_dsi_clear_device_ready(encoder);
+
+	if (intel_dsi->dev.dev_ops->disable_panel_power)
+		intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
+}
 
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
 				   enum pipe *pipe)
@@ -251,8 +272,9 @@
 	/* XXX: read flags, set to adjusted_mode */
 }
 
-static int intel_dsi_mode_valid(struct drm_connector *connector,
-				struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_dsi_mode_valid(struct drm_connector *connector,
+		     struct drm_display_mode *mode)
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
@@ -352,11 +374,8 @@
 
 	DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
 
-	/* Update the DSI PLL */
-	vlv_enable_dsi_pll(intel_encoder);
-
 	/* XXX: Location of the call */
-	band_gap_wa(dev_priv);
+	band_gap_reset(dev_priv);
 
 	/* escape clock divider, 20MHz, shared for A and C. device ready must be
 	 * off when doing this! txclkesc? */
@@ -373,11 +392,7 @@
 	I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
 	I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
 
-	I915_WRITE(MIPI_DPHY_PARAM(pipe),
-		   0x3c << EXIT_ZERO_COUNT_SHIFT |
-		   0x1f << TRAIL_COUNT_SHIFT |
-		   0xc5 << CLK_ZERO_COUNT_SHIFT |
-		   0x1f << PREPARE_COUNT_SHIFT);
+	I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
 
 	I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
 		   adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
@@ -425,9 +440,9 @@
 				       adjusted_mode->htotal,
 				       bpp, intel_dsi->lane_count) + 1);
 	}
-	I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
-	I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
-	I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
+	I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
+	I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
+	I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
 
 	/* dphy stuff */
 
@@ -442,29 +457,31 @@
 	 *
 	 * XXX: write MIPI_STOP_STATE_STALL?
 	 */
-	I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
+	I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
+						intel_dsi->hs_to_lp_count);
 
 	/* XXX: low power clock equivalence in terms of byte clock. the number
 	 * of byte clocks occupied in one low power clock. based on txbyteclkhs
 	 * and txclkesc. txclkesc time / txbyteclk time * (105 +
 	 * MIPI_STOP_STATE_STALL) / 105.???
 	 */
-	I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
+	I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
 
 	/* the bw essential for transmitting 16 long packets containing 252
 	 * bytes meant for dcs write memory command is programmed in this
 	 * register in terms of byte clocks. based on dsi transfer rate and the
 	 * number of lanes configured the time taken to transmit 16 long packets
 	 * in a dsi stream varies. */
-	I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
+	I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
 
 	I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
-		   0xa << LP_HS_SSW_CNT_SHIFT |
-		   0x14 << HS_LP_PWR_SW_CNT_SHIFT);
+		   intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
+		   intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
 
 	if (is_vid_mode(intel_dsi))
 		I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
-			   intel_dsi->video_mode_format);
+				intel_dsi->video_frmt_cfg_bits |
+				intel_dsi->video_mode_format);
 }
 
 static enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index c7765f3..b4a27ce 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -39,6 +39,13 @@
 struct intel_dsi_dev_ops {
 	bool (*init)(struct intel_dsi_device *dsi);
 
+	void (*panel_reset)(struct intel_dsi_device *dsi);
+
+	void (*disable_panel_power)(struct intel_dsi_device *dsi);
+
+	/* one time programmable commands if needed */
+	void (*send_otp_cmds)(struct intel_dsi_device *dsi);
+
 	/* This callback must be able to assume DSI commands can be sent */
 	void (*enable)(struct intel_dsi_device *dsi);
 
@@ -89,6 +96,20 @@
 
 	/* eot for MIPI_EOT_DISABLE register */
 	u32 eot_disable;
+
+	u32 port_bits;
+	u32 bw_timer;
+	u32 dphy_reg;
+	u32 video_frmt_cfg_bits;
+	u16 lp_byte_clk;
+
+	/* timeouts in byte clocks */
+	u16 lp_rx_timeout;
+	u16 turn_arnd_val;
+	u16 rst_timer_val;
+	u16 hs_to_lp_count;
+	u16 clk_lp_to_hs_count;
+	u16 clk_hs_to_lp_count;
 };
 
 static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 44279b2..ba79ec1 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -50,6 +50,8 @@
 	71, 35							/* 91 - 92 */
 };
 
+#ifdef DSI_CLK_FROM_RR
+
 static u32 dsi_rr_formula(const struct drm_display_mode *mode,
 			  int pixel_format, int video_mode_format,
 			  int lane_count, bool eotp)
@@ -121,7 +123,7 @@
 
 	/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
 	dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
-	dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
+	dsi_clk = dsi_bit_clock_hz / 1000;
 
 	if (eotp && video_mode_format == VIDEO_MODE_BURST)
 		dsi_clk *= 2;
@@ -129,64 +131,37 @@
 	return dsi_clk;
 }
 
-#ifdef MNP_FROM_TABLE
+#else
 
-struct dsi_clock_table {
-	u32 freq;
-	u8 m;
-	u8 p;
-};
-
-static const struct dsi_clock_table dsi_clk_tbl[] = {
-	{300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
-	{343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
-	{383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
-	{401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
-	{405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
-	{409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
-	{413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
-	{417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
-	{430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
-	{470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
-	{510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
-	{550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
-	{590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
-	{630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
-	{670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
-	{710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
-	{750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
-	{790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
-	{1000, 80, 2},		/* dsi clock frequency in Mhz*/
-};
-
-static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+/* Get DSI clock from pixel clock */
+static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
+			  int pixel_format, int lane_count)
 {
-	unsigned int i;
-	u8 m;
-	u8 n;
-	u8 p;
-	u32 m_seed;
+	u32 dsi_clk_khz;
+	u32 bpp;
 
-	if (dsi_clk < 300 || dsi_clk > 1000)
-		return -ECHRNG;
-
-	for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
-		if (dsi_clk_tbl[i].freq > dsi_clk)
-			break;
+	switch (pixel_format) {
+	default:
+	case VID_MODE_FORMAT_RGB888:
+	case VID_MODE_FORMAT_RGB666_LOOSE:
+		bpp = 24;
+		break;
+	case VID_MODE_FORMAT_RGB666:
+		bpp = 18;
+		break;
+	case VID_MODE_FORMAT_RGB565:
+		bpp = 16;
+		break;
 	}
 
-	m = dsi_clk_tbl[i].m;
-	p = dsi_clk_tbl[i].p;
-	m_seed = lfsr_converts[m - 62];
-	n = 1;
-	dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
-	dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
-		m_seed << DSI_PLL_M1_DIV_SHIFT;
+	/* DSI data rate = pixel clock * bits per pixel / lane count
+	   pixel clock is converted from KHz to Hz */
+	dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count);
 
-	return 0;
+	return dsi_clk_khz;
 }
 
-#else
+#endif
 
 static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
 {
@@ -194,36 +169,47 @@
 	u32 ref_clk;
 	u32 error;
 	u32 tmp_error;
-	u32 target_dsi_clk;
-	u32 calc_dsi_clk;
+	int target_dsi_clk;
+	int calc_dsi_clk;
 	u32 calc_m;
 	u32 calc_p;
 	u32 m_seed;
 
-	if (dsi_clk < 300 || dsi_clk > 1150) {
+	/* dsi_clk is expected in KHZ */
+	if (dsi_clk < 300000 || dsi_clk > 1150000) {
 		DRM_ERROR("DSI CLK Out of Range\n");
 		return -ECHRNG;
 	}
 
 	ref_clk = 25000;
-	target_dsi_clk = dsi_clk * 1000;
+	target_dsi_clk = dsi_clk;
 	error = 0xFFFFFFFF;
+	tmp_error = 0xFFFFFFFF;
 	calc_m = 0;
 	calc_p = 0;
 
 	for (m = 62; m <= 92; m++) {
 		for (p = 2; p <= 6; p++) {
-
+			/* Find the optimal m and p divisors
+			with minimal error +/- the required clock */
 			calc_dsi_clk = (m * ref_clk) / p;
-			if (calc_dsi_clk >= target_dsi_clk) {
-				tmp_error = calc_dsi_clk - target_dsi_clk;
-				if (tmp_error < error) {
-					error = tmp_error;
-					calc_m = m;
-					calc_p = p;
-				}
+			if (calc_dsi_clk == target_dsi_clk) {
+				calc_m = m;
+				calc_p = p;
+				error = 0;
+				break;
+			} else
+				tmp_error = abs(target_dsi_clk - calc_dsi_clk);
+
+			if (tmp_error < error) {
+				error = tmp_error;
+				calc_m = m;
+				calc_p = p;
 			}
 		}
+
+		if (error == 0)
+			break;
 	}
 
 	m_seed = lfsr_converts[calc_m - 62];
@@ -235,8 +221,6 @@
 	return 0;
 }
 
-#endif
-
 /*
  * XXX: The muxing and gating is hard coded for now. Need to add support for
  * sharing PLLs with two DSI outputs.
@@ -251,9 +235,8 @@
 	struct dsi_mnp dsi_mnp;
 	u32 dsi_clk;
 
-	dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
-				 intel_dsi->video_mode_format,
-				 intel_dsi->lane_count, !intel_dsi->eot_disable);
+	dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format,
+						intel_dsi->lane_count);
 
 	ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
 	if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 3c77365..eeff998 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -234,8 +234,9 @@
 	intel_modeset_check_state(connector->dev);
 }
 
-static int intel_dvo_mode_valid(struct drm_connector *connector,
-				struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_dvo_mode_valid(struct drm_connector *connector,
+		     struct drm_display_mode *mode)
 {
 	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 895fcb4..39eac99 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -57,18 +57,14 @@
 	.fb_debug_leave = drm_fb_helper_debug_leave,
 };
 
-static int intelfb_create(struct drm_fb_helper *helper,
-			  struct drm_fb_helper_surface_size *sizes)
+static int intelfb_alloc(struct drm_fb_helper *helper,
+			 struct drm_fb_helper_surface_size *sizes)
 {
 	struct intel_fbdev *ifbdev =
 		container_of(helper, struct intel_fbdev, helper);
 	struct drm_device *dev = helper->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct fb_info *info;
-	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd = {};
 	struct drm_i915_gem_object *obj;
-	struct device *device = &dev->pdev->dev;
 	int size, ret;
 
 	/* we don't do packed 24bpp */
@@ -94,8 +90,6 @@
 		goto out;
 	}
 
-	mutex_lock(&dev->struct_mutex);
-
 	/* Flush everything out, we'll be doing GTT only from now on */
 	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
 	if (ret) {
@@ -103,7 +97,50 @@
 		goto out_unref;
 	}
 
-	info = framebuffer_alloc(0, device);
+	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
+	if (ret)
+		goto out_unpin;
+
+	return 0;
+
+out_unpin:
+	i915_gem_object_unpin(obj);
+out_unref:
+	drm_gem_object_unreference(&obj->base);
+out:
+	return ret;
+}
+
+static int intelfb_create(struct drm_fb_helper *helper,
+			  struct drm_fb_helper_surface_size *sizes)
+{
+	struct intel_fbdev *ifbdev =
+		container_of(helper, struct intel_fbdev, helper);
+	struct intel_framebuffer *intel_fb = &ifbdev->ifb;
+	struct drm_device *dev = helper->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct drm_i915_gem_object *obj;
+	int size, ret;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (!intel_fb->obj) {
+		DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+		ret = intelfb_alloc(helper, sizes);
+		if (ret)
+			goto out_unlock;
+	} else {
+		DRM_DEBUG_KMS("re-using BIOS fb\n");
+		sizes->fb_width = intel_fb->base.width;
+		sizes->fb_height = intel_fb->base.height;
+	}
+
+	obj = intel_fb->obj;
+	size = obj->base.size;
+
+	info = framebuffer_alloc(0, &dev->pdev->dev);
 	if (!info) {
 		ret = -ENOMEM;
 		goto out_unpin;
@@ -111,10 +148,6 @@
 
 	info->par = helper;
 
-	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
-	if (ret)
-		goto out_unpin;
-
 	fb = &ifbdev->ifb.base;
 
 	ifbdev->helper.fb = fb;
@@ -170,17 +203,15 @@
 		      fb->width, fb->height,
 		      i915_gem_obj_ggtt_offset(obj), obj);
 
-
 	mutex_unlock(&dev->struct_mutex);
 	vga_switcheroo_client_fb_set(dev->pdev, info);
 	return 0;
 
 out_unpin:
 	i915_gem_object_unpin(obj);
-out_unref:
 	drm_gem_object_unreference(&obj->base);
+out_unlock:
 	mutex_unlock(&dev->struct_mutex);
-out:
 	return ret;
 }
 
@@ -297,8 +328,6 @@
 	fb_set_suspend(info, state);
 }
 
-MODULE_LICENSE("GPL and additional rights");
-
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 03f9ca7..6db0d9d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -130,9 +130,9 @@
 
 static void g4x_write_infoframe(struct drm_encoder *encoder,
 				enum hdmi_infoframe_type type,
-				const uint8_t *frame, ssize_t len)
+				const void *frame, ssize_t len)
 {
-	uint32_t *data = (uint32_t *)frame;
+	const uint32_t *data = frame;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val = I915_READ(VIDEO_DIP_CTL);
@@ -167,9 +167,9 @@
 
 static void ibx_write_infoframe(struct drm_encoder *encoder,
 				enum hdmi_infoframe_type type,
-				const uint8_t *frame, ssize_t len)
+				const void *frame, ssize_t len)
 {
-	uint32_t *data = (uint32_t *)frame;
+	const uint32_t *data = frame;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -205,9 +205,9 @@
 
 static void cpt_write_infoframe(struct drm_encoder *encoder,
 				enum hdmi_infoframe_type type,
-				const uint8_t *frame, ssize_t len)
+				const void *frame, ssize_t len)
 {
-	uint32_t *data = (uint32_t *)frame;
+	const uint32_t *data = frame;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -246,9 +246,9 @@
 
 static void vlv_write_infoframe(struct drm_encoder *encoder,
 				enum hdmi_infoframe_type type,
-				const uint8_t *frame, ssize_t len)
+				const void *frame, ssize_t len)
 {
-	uint32_t *data = (uint32_t *)frame;
+	const uint32_t *data = frame;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -284,9 +284,9 @@
 
 static void hsw_write_infoframe(struct drm_encoder *encoder,
 				enum hdmi_infoframe_type type,
-				const uint8_t *frame, ssize_t len)
+				const void *frame, ssize_t len)
 {
-	uint32_t *data = (uint32_t *)frame;
+	const uint32_t *data = frame;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -853,8 +853,9 @@
 		return 225000;
 }
 
-static int intel_hdmi_mode_valid(struct drm_connector *connector,
-				 struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_hdmi_mode_valid(struct drm_connector *connector,
+		      struct drm_display_mode *mode)
 {
 	if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
 		return MODE_CLOCK_HIGH;
@@ -1081,7 +1082,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc =
 		to_intel_crtc(encoder->base.crtc);
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 	u32 val;
 
@@ -1090,41 +1091,33 @@
 
 	/* Enable clock channels for this port */
 	mutex_lock(&dev_priv->dpio_lock);
-	val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
 	val = 0;
 	if (pipe)
 		val |= (1<<21);
 	else
 		val &= ~(1<<21);
 	val |= 0x001000c4;
-	vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
 
 	/* HDMI 1.0V-2dB */
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
-			 0x2b245f5f);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
-			 0x5578b83a);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
-			 0x0c782040);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
-			 0x2b247878);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
-			 0x00002000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
-			 DPIO_TX_OCALINIT_EN);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
 
 	/* Program lane clock */
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
-			 0x00760018);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
-			 0x00400888);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
 	mutex_unlock(&dev_priv->dpio_lock);
 
 	intel_enable_hdmi(encoder);
 
-	vlv_wait_port_ready(dev_priv, port);
+	vlv_wait_port_ready(dev_priv, dport);
 }
 
 static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1134,7 +1127,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc =
 		to_intel_crtc(encoder->base.crtc);
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 
 	if (!IS_VALLEYVIEW(dev))
@@ -1142,24 +1135,22 @@
 
 	/* Program Tx lane resets to default */
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
 			 DPIO_PCS_TX_LANE2_RESET |
 			 DPIO_PCS_TX_LANE1_RESET);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
 			 DPIO_PCS_CLK_SOFT_RESET);
 
 	/* Fix up inter-pair skew failure */
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
 
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
-			 0x00002000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
-			 DPIO_TX_OCALINIT_EN);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
@@ -1169,13 +1160,13 @@
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_crtc *intel_crtc =
 		to_intel_crtc(encoder->base.crtc);
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 
 	/* Reset lanes to avoid HDMI flicker (VLV w/a) */
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 2ca17b1..d33b61d 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -82,20 +82,11 @@
 
 static void gmbus_set_freq(struct drm_i915_private *dev_priv)
 {
-	int vco_freq[] = { 800, 1600, 2000, 2400 };
-	int gmbus_freq = 0, cdclk_div, hpll_freq;
+	int vco, gmbus_freq = 0, cdclk_div;
 
 	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
-	/* Skip setting the gmbus freq if BIOS has already programmed it */
-	if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
-		return;
-
-	/* Obtain SKU information */
-	mutex_lock(&dev_priv->dpio_lock);
-	hpll_freq =
-		vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
-	mutex_unlock(&dev_priv->dpio_lock);
+	vco = valleyview_get_vco(dev_priv);
 
 	/* Get the CDCLK divide ratio */
 	cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
@@ -106,7 +97,7 @@
 	 * in fact 1MHz is the correct frequency.
 	 */
 	if (cdclk_div)
-		gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
+		gmbus_freq = (vco << 1) / cdclk_div;
 
 	if (WARN_ON(gmbus_freq == 0))
 		return;
@@ -267,13 +258,6 @@
 	algo->data = bus;
 }
 
-/*
- * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
- * mode. This results in spurious interrupt warnings if the legacy irq no. is
- * shared with another device. The kernel then disables that interrupt source
- * and so prevents the other device from working properly.
- */
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
 static int
 gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
 		     u32 gmbus2_status,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c3b4da7..8bcb93a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -256,8 +256,9 @@
 	POSTING_READ(lvds_encoder->reg);
 }
 
-static int intel_lvds_mode_valid(struct drm_connector *connector,
-				 struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_lvds_mode_valid(struct drm_connector *connector,
+		      struct drm_display_mode *mode)
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
@@ -446,9 +447,19 @@
 	if (dev_priv->modeset_restore == MODESET_DONE)
 		goto exit;
 
-	drm_modeset_lock_all(dev);
-	intel_modeset_setup_hw_state(dev, true);
-	drm_modeset_unlock_all(dev);
+	/*
+	 * Some old platform's BIOS love to wreak havoc while the lid is closed.
+	 * We try to detect this here and undo any damage. The split for PCH
+	 * platforms is rather conservative and a bit arbitrary expect that on
+	 * those platforms VGA disabling requires actual legacy VGA I/O access,
+	 * and as part of the cleanup in the hw state restore we also redisable
+	 * the vga plane.
+	 */
+	if (!HAS_PCH_SPLIT(dev)) {
+		drm_modeset_lock_all(dev);
+		intel_modeset_setup_hw_state(dev, true);
+		drm_modeset_unlock_all(dev);
+	}
 
 	dev_priv->modeset_restore = MODESET_DONE;
 
@@ -744,57 +755,6 @@
 	{ }	/* terminating entry */
 };
 
-/**
- * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
- * @dev: drm device
- * @connector: LVDS connector
- *
- * Find the reduced downclock for LVDS in EDID.
- */
-static void intel_find_lvds_downclock(struct drm_device *dev,
-				      struct drm_display_mode *fixed_mode,
-				      struct drm_connector *connector)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_display_mode *scan;
-	int temp_downclock;
-
-	temp_downclock = fixed_mode->clock;
-	list_for_each_entry(scan, &connector->probed_modes, head) {
-		/*
-		 * If one mode has the same resolution with the fixed_panel
-		 * mode while they have the different refresh rate, it means
-		 * that the reduced downclock is found for the LVDS. In such
-		 * case we can set the different FPx0/1 to dynamically select
-		 * between low and high frequency.
-		 */
-		if (scan->hdisplay == fixed_mode->hdisplay &&
-		    scan->hsync_start == fixed_mode->hsync_start &&
-		    scan->hsync_end == fixed_mode->hsync_end &&
-		    scan->htotal == fixed_mode->htotal &&
-		    scan->vdisplay == fixed_mode->vdisplay &&
-		    scan->vsync_start == fixed_mode->vsync_start &&
-		    scan->vsync_end == fixed_mode->vsync_end &&
-		    scan->vtotal == fixed_mode->vtotal) {
-			if (scan->clock < temp_downclock) {
-				/*
-				 * The downclock is already found. But we
-				 * expect to find the lower downclock.
-				 */
-				temp_downclock = scan->clock;
-			}
-		}
-	}
-	if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
-		/* We found the downclock for LVDS. */
-		dev_priv->lvds_downclock_avail = 1;
-		dev_priv->lvds_downclock = temp_downclock;
-		DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
-			      "Normal clock %dKhz, downclock %dKhz\n",
-			      fixed_mode->clock, temp_downclock);
-	}
-}
-
 /*
  * Enumerate the child dev array parsed from VBT to check whether
  * the LVDS is present.
@@ -1072,8 +1032,22 @@
 
 			fixed_mode = drm_mode_duplicate(dev, scan);
 			if (fixed_mode) {
-				intel_find_lvds_downclock(dev, fixed_mode,
-							  connector);
+				intel_connector->panel.downclock_mode =
+					intel_find_panel_downclock(dev,
+					fixed_mode, connector);
+				if (intel_connector->panel.downclock_mode !=
+					NULL &&	i915_lvds_downclock) {
+					/* We found the downclock for LVDS. */
+					dev_priv->lvds_downclock_avail = true;
+					dev_priv->lvds_downclock =
+						intel_connector->panel.
+						downclock_mode->clock;
+					DRM_DEBUG_KMS("LVDS downclock is found"
+					" in EDID. Normal clock %dKhz, "
+					"downclock %dKhz\n",
+					fixed_mode->clock,
+					dev_priv->lvds_downclock);
+				}
 				goto out;
 			}
 		}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 9a8804b..acde294 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -63,7 +63,7 @@
 	u8 driver_ver[16];
 	u32 mboxes;
 	u8 reserved[164];
-} __attribute__((packed));
+} __packed;
 
 /* OpRegion mailbox #1: public ACPI methods */
 struct opregion_acpi {
@@ -85,7 +85,7 @@
 	u32 cnot;       /* current OS notification */
 	u32 nrdy;       /* driver status */
 	u8 rsvd2[60];
-} __attribute__((packed));
+} __packed;
 
 /* OpRegion mailbox #2: SWSCI */
 struct opregion_swsci {
@@ -93,7 +93,7 @@
 	u32 parm;       /* command parameters */
 	u32 dslp;       /* driver sleep time-out */
 	u8 rsvd[244];
-} __attribute__((packed));
+} __packed;
 
 /* OpRegion mailbox #3: ASLE */
 struct opregion_asle {
@@ -114,7 +114,7 @@
 	u32 srot;       /* supported rotation angles */
 	u32 iuer;       /* IUER events */
 	u8 rsvd[86];
-} __attribute__((packed));
+} __packed;
 
 /* Driver readiness indicator */
 #define ASLE_ARDY_READY		(1 << 0)
@@ -226,6 +226,8 @@
 #define ACPI_DIGITAL_OUTPUT (3<<8)
 #define ACPI_LVDS_OUTPUT (4<<8)
 
+#define MAX_DSLP	1500
+
 #ifdef CONFIG_ACPI
 static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
 {
@@ -260,10 +262,11 @@
 		/* The spec says 2ms should be the default, but it's too small
 		 * for some machines. */
 		dslp = 50;
-	} else if (dslp > 500) {
+	} else if (dslp > MAX_DSLP) {
 		/* Hey bios, trust must be earned. */
-		WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
-		dslp = 500;
+		DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
+			      "using %u ms instead\n", dslp, MAX_DSLP);
+		dslp = MAX_DSLP;
 	}
 
 	/* The spec tells us to do this, but we are the only user... */
@@ -395,13 +398,8 @@
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_encoder *encoder;
-	struct drm_connector *connector;
-	struct intel_connector *intel_connector = NULL;
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+	struct intel_connector *intel_connector;
 	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
-	u32 ret = 0;
-	bool found = false;
 
 	DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
@@ -413,38 +411,20 @@
 		return ASLC_BACKLIGHT_FAILED;
 
 	mutex_lock(&dev->mode_config.mutex);
+
 	/*
-	 * Could match the OpRegion connector here instead, but we'd also need
-	 * to verify the connector could handle a backlight call.
+	 * Update backlight on all connectors that support backlight (usually
+	 * only one).
 	 */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		if (encoder->crtc == crtc) {
-			found = true;
-			break;
-		}
-
-	if (!found) {
-		ret = ASLC_BACKLIGHT_FAILED;
-		goto out;
-	}
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-		if (connector->encoder == encoder)
-			intel_connector = to_intel_connector(connector);
-
-	if (!intel_connector) {
-		ret = ASLC_BACKLIGHT_FAILED;
-		goto out;
-	}
-
 	DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
-	intel_panel_set_backlight(intel_connector, bclp, 255);
+	list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
+		intel_panel_set_backlight(intel_connector, bclp, 255);
 	iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 
-out:
 	mutex_unlock(&dev->mode_config.mutex);
 
-	return ret;
+
+	return 0;
 }
 
 static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a98a990..a759ecd 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1005,7 +1005,7 @@
 	u32  pfit_control;
 
 	/* i830 doesn't have a panel fitter */
-	if (IS_I830(dev))
+	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
 		return -1;
 
 	pfit_control = I915_READ(PFIT_CONTROL);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e6f782d..350de35 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -325,97 +325,6 @@
 	pipe_config->gmch_pfit.lvds_border_bits = border;
 }
 
-static int is_backlight_combination_mode(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (IS_GEN4(dev))
-		return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
-
-	if (IS_GEN2(dev))
-		return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
-
-	return 0;
-}
-
-/* XXX: query mode clock or hardware clock and program max PWM appropriately
- * when it's 0.
- */
-static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 val;
-
-	WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
-
-	/* Restore the CTL value if it lost, e.g. GPU reset */
-
-	if (HAS_PCH_SPLIT(dev_priv->dev)) {
-		val = I915_READ(BLC_PWM_PCH_CTL2);
-		if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
-			dev_priv->regfile.saveBLC_PWM_CTL2 = val;
-		} else if (val == 0) {
-			val = dev_priv->regfile.saveBLC_PWM_CTL2;
-			I915_WRITE(BLC_PWM_PCH_CTL2, val);
-		}
-	} else if (IS_VALLEYVIEW(dev)) {
-		val = I915_READ(VLV_BLC_PWM_CTL(pipe));
-		if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
-			dev_priv->regfile.saveBLC_PWM_CTL = val;
-			dev_priv->regfile.saveBLC_PWM_CTL2 =
-				I915_READ(VLV_BLC_PWM_CTL2(pipe));
-		} else if (val == 0) {
-			val = dev_priv->regfile.saveBLC_PWM_CTL;
-			I915_WRITE(VLV_BLC_PWM_CTL(pipe), val);
-			I915_WRITE(VLV_BLC_PWM_CTL2(pipe),
-				   dev_priv->regfile.saveBLC_PWM_CTL2);
-		}
-
-		if (!val)
-			val = 0x0f42ffff;
-	} else {
-		val = I915_READ(BLC_PWM_CTL);
-		if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
-			dev_priv->regfile.saveBLC_PWM_CTL = val;
-			if (INTEL_INFO(dev)->gen >= 4)
-				dev_priv->regfile.saveBLC_PWM_CTL2 =
-					I915_READ(BLC_PWM_CTL2);
-		} else if (val == 0) {
-			val = dev_priv->regfile.saveBLC_PWM_CTL;
-			I915_WRITE(BLC_PWM_CTL, val);
-			if (INTEL_INFO(dev)->gen >= 4)
-				I915_WRITE(BLC_PWM_CTL2,
-					   dev_priv->regfile.saveBLC_PWM_CTL2);
-		}
-	}
-
-	return val;
-}
-
-static u32 intel_panel_get_max_backlight(struct drm_device *dev,
-					 enum pipe pipe)
-{
-	u32 max;
-
-	max = i915_read_blc_pwm_ctl(dev, pipe);
-
-	if (HAS_PCH_SPLIT(dev)) {
-		max >>= 16;
-	} else {
-		if (INTEL_INFO(dev)->gen < 4)
-			max >>= 17;
-		else
-			max >>= 16;
-
-		if (is_backlight_combination_mode(dev))
-			max *= 0xff;
-	}
-
-	DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
-
-	return max;
-}
-
 static int i915_panel_invert_brightness;
 MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
 	"(-1 force normal, 0 machine defaults, 1 force inversion), please "
@@ -423,116 +332,163 @@
 	"to dri-devel@lists.freedesktop.org, if your machine needs it. "
 	"It will then be included in an upcoming module version.");
 module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
-static u32 intel_panel_compute_brightness(struct drm_device *dev,
-					  enum pipe pipe, u32 val)
+static u32 intel_panel_compute_brightness(struct intel_connector *connector,
+					  u32 val)
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+
+	WARN_ON(panel->backlight.max == 0);
 
 	if (i915_panel_invert_brightness < 0)
 		return val;
 
 	if (i915_panel_invert_brightness > 0 ||
 	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
-		u32 max = intel_panel_get_max_backlight(dev, pipe);
-		if (max)
-			return max - val;
+		return panel->backlight.max - val;
 	}
 
 	return val;
 }
 
-static u32 intel_panel_get_backlight(struct drm_device *dev,
-				     enum pipe pipe)
+static u32 bdw_get_backlight(struct intel_connector *connector)
 {
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 pch_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 i9xx_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 val;
+
+	val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+	if (INTEL_INFO(dev)->gen < 4)
+		val >>= 1;
+
+	if (panel->backlight.combination_mode) {
+		u8 lbpc;
+
+		pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+		val *= lbpc;
+	}
+
+	return val;
+}
+
+static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 vlv_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+
+	return _vlv_get_backlight(dev, pipe);
+}
+
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val;
 	unsigned long flags;
-	int reg;
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+	spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
-	if (IS_BROADWELL(dev)) {
-		val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
-	} else if (HAS_PCH_SPLIT(dev)) {
-		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-	} else {
-		if (IS_VALLEYVIEW(dev))
-			reg = VLV_BLC_PWM_CTL(pipe);
-		else
-			reg = BLC_PWM_CTL;
+	val = dev_priv->display.get_backlight(connector);
+	val = intel_panel_compute_brightness(connector, val);
 
-		val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK;
-		if (INTEL_INFO(dev)->gen < 4)
-			val >>= 1;
-
-		if (is_backlight_combination_mode(dev)) {
-			u8 lbpc;
-
-			pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
-			val *= lbpc;
-		}
-	}
-
-	val = intel_panel_compute_brightness(dev, pipe, val);
-
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 
 	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
 	return val;
 }
 
-static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
+static void bdw_set_backlight(struct intel_connector *connector, u32 level)
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
 	I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
 }
 
-static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+static void pch_set_backlight(struct intel_connector *connector, u32 level)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
-	I915_WRITE(BLC_PWM_CPU_CTL, val | level);
-}
-
-static void intel_panel_actually_set_backlight(struct drm_device *dev,
-					       enum pipe pipe, u32 level)
-{
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 tmp;
-	int reg;
 
-	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
-	level = intel_panel_compute_brightness(dev, pipe, level);
+	tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
+}
 
-	if (IS_BROADWELL(dev))
-		return intel_bdw_panel_set_backlight(dev, level);
-	else if (HAS_PCH_SPLIT(dev))
-		return intel_pch_panel_set_backlight(dev, level);
+static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 tmp, mask;
 
-	if (is_backlight_combination_mode(dev)) {
-		u32 max = intel_panel_get_max_backlight(dev, pipe);
+	WARN_ON(panel->backlight.max == 0);
+
+	if (panel->backlight.combination_mode) {
 		u8 lbpc;
 
-		/* we're screwed, but keep behaviour backwards compatible */
-		if (!max)
-			max = 1;
-
-		lbpc = level * 0xfe / max + 1;
+		lbpc = level * 0xfe / panel->backlight.max + 1;
 		level /= lbpc;
 		pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
 	}
 
-	if (IS_VALLEYVIEW(dev))
-		reg = VLV_BLC_PWM_CTL(pipe);
-	else
-		reg = BLC_PWM_CTL;
-
-	tmp = I915_READ(reg);
-	if (INTEL_INFO(dev)->gen < 4)
+	if (IS_GEN4(dev)) {
+		mask = BACKLIGHT_DUTY_CYCLE_MASK;
+	} else {
 		level <<= 1;
-	tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
-	I915_WRITE(reg, tmp | level);
+		mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
+	}
+
+	tmp = I915_READ(BLC_PWM_CTL) & ~mask;
+	I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
+
+static void vlv_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 tmp;
+
+	tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
+}
+
+static void
+intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+	level = intel_panel_compute_brightness(connector, level);
+	dev_priv->display.set_backlight(connector, level);
 }
 
 /* set backlight brightness to level in range [0..max] */
@@ -541,45 +497,89 @@
 {
 	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
 	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	u32 freq;
 	unsigned long flags;
 
-	if (pipe == INVALID_PIPE)
+	if (!panel->backlight.present || pipe == INVALID_PIPE)
 		return;
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+	spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
-	freq = intel_panel_get_max_backlight(dev, pipe);
-	if (!freq) {
-		/* we are screwed, bail out */
-		goto out;
-	}
+	WARN_ON(panel->backlight.max == 0);
 
-	/* scale to hardware, but be careful to not overflow */
+	/* scale to hardware max, but be careful to not overflow */
+	freq = panel->backlight.max;
 	if (freq < max)
 		level = level * freq / max;
 	else
 		level = freq / max * level;
 
-	dev_priv->backlight.level = level;
-	if (dev_priv->backlight.device)
-		dev_priv->backlight.device->props.brightness = level;
+	panel->backlight.level = level;
+	if (panel->backlight.device)
+		panel->backlight.device->props.brightness = level;
 
-	if (dev_priv->backlight.enabled)
-		intel_panel_actually_set_backlight(dev, pipe, level);
-out:
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+	if (panel->backlight.enabled)
+		intel_panel_actually_set_backlight(connector, level);
+
+	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+static void pch_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	intel_panel_actually_set_backlight(connector, 0);
+
+	tmp = I915_READ(BLC_PWM_CPU_CTL2);
+	I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+
+	tmp = I915_READ(BLC_PWM_PCH_CTL1);
+	I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_disable_backlight(struct intel_connector *connector)
+{
+	intel_panel_actually_set_backlight(connector, 0);
+}
+
+static void i965_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	intel_panel_actually_set_backlight(connector, 0);
+
+	tmp = I915_READ(BLC_PWM_CTL2);
+	I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+}
+
+static void vlv_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 tmp;
+
+	intel_panel_actually_set_backlight(connector, 0);
+
+	tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
 }
 
 void intel_panel_disable_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
 	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	unsigned long flags;
 
-	if (pipe == INVALID_PIPE)
+	if (!panel->backlight.present || pipe == INVALID_PIPE)
 		return;
 
 	/*
@@ -593,150 +593,215 @@
 		return;
 	}
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+	spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
-	dev_priv->backlight.enabled = false;
-	intel_panel_actually_set_backlight(dev, pipe, 0);
+	panel->backlight.enabled = false;
+	dev_priv->display.disable_backlight(connector);
 
-	if (INTEL_INFO(dev)->gen >= 4) {
-		uint32_t reg, tmp;
+	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
 
-		if (HAS_PCH_SPLIT(dev))
-			reg = BLC_PWM_CPU_CTL2;
-		else if (IS_VALLEYVIEW(dev))
-			reg = VLV_BLC_PWM_CTL2(pipe);
-		else
-			reg = BLC_PWM_CTL2;
+static void bdw_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 pch_ctl1, pch_ctl2;
 
-		I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
-
-		if (HAS_PCH_SPLIT(dev)) {
-			tmp = I915_READ(BLC_PWM_PCH_CTL1);
-			tmp &= ~BLM_PCH_PWM_ENABLE;
-			I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
-		}
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+		DRM_DEBUG_KMS("pch backlight already enabled\n");
+		pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
 	}
 
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+	pch_ctl2 = panel->backlight.max << 16;
+	I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+
+	pch_ctl1 = 0;
+	if (panel->backlight.active_low_pwm)
+		pch_ctl1 |= BLM_PCH_POLARITY;
+
+	/* BDW always uses the pch pwm controls. */
+	pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+	POSTING_READ(BLC_PWM_PCH_CTL1);
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+
+	/* This won't stick until the above enable. */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
+static void pch_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	enum transcoder cpu_transcoder =
+		intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+	u32 cpu_ctl2, pch_ctl1, pch_ctl2;
+
+	cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+	if (cpu_ctl2 & BLM_PWM_ENABLE) {
+		WARN(1, "cpu backlight already enabled\n");
+		cpu_ctl2 &= ~BLM_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+	}
+
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+		DRM_DEBUG_KMS("pch backlight already enabled\n");
+		pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+	}
+
+	if (cpu_transcoder == TRANSCODER_EDP)
+		cpu_ctl2 = BLM_TRANSCODER_EDP;
+	else
+		cpu_ctl2 = BLM_PIPE(cpu_transcoder);
+	I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+	POSTING_READ(BLC_PWM_CPU_CTL2);
+	I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+
+	/* This won't stick until the above enable. */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+	pch_ctl2 = panel->backlight.max << 16;
+	I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+
+	pch_ctl1 = 0;
+	if (panel->backlight.active_low_pwm)
+		pch_ctl1 |= BLM_PCH_POLARITY;
+
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+	POSTING_READ(BLC_PWM_PCH_CTL1);
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 ctl, freq;
+
+	ctl = I915_READ(BLC_PWM_CTL);
+	if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
+		WARN(1, "backlight already enabled\n");
+		I915_WRITE(BLC_PWM_CTL, 0);
+	}
+
+	freq = panel->backlight.max;
+	if (panel->backlight.combination_mode)
+		freq /= 0xff;
+
+	ctl = freq << 17;
+	if (IS_GEN2(dev) && panel->backlight.combination_mode)
+		ctl |= BLM_LEGACY_MODE;
+	if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
+		ctl |= BLM_POLARITY_PNV;
+
+	I915_WRITE(BLC_PWM_CTL, ctl);
+	POSTING_READ(BLC_PWM_CTL);
+
+	/* XXX: combine this into above write? */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
+static void i965_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 ctl, ctl2, freq;
+
+	ctl2 = I915_READ(BLC_PWM_CTL2);
+	if (ctl2 & BLM_PWM_ENABLE) {
+		WARN(1, "backlight already enabled\n");
+		ctl2 &= ~BLM_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_CTL2, ctl2);
+	}
+
+	freq = panel->backlight.max;
+	if (panel->backlight.combination_mode)
+		freq /= 0xff;
+
+	ctl = freq << 16;
+	I915_WRITE(BLC_PWM_CTL, ctl);
+
+	/* XXX: combine this into above write? */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+	ctl2 = BLM_PIPE(pipe);
+	if (panel->backlight.combination_mode)
+		ctl2 |= BLM_COMBINATION_MODE;
+	if (panel->backlight.active_low_pwm)
+		ctl2 |= BLM_POLARITY_I965;
+	I915_WRITE(BLC_PWM_CTL2, ctl2);
+	POSTING_READ(BLC_PWM_CTL2);
+	I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+}
+
+static void vlv_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 ctl, ctl2;
+
+	ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+	if (ctl2 & BLM_PWM_ENABLE) {
+		WARN(1, "backlight already enabled\n");
+		ctl2 &= ~BLM_PWM_ENABLE;
+		I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+	}
+
+	ctl = panel->backlight.max << 16;
+	I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
+
+	/* XXX: combine this into above write? */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+	ctl2 = 0;
+	if (panel->backlight.active_low_pwm)
+		ctl2 |= BLM_POLARITY_I965;
+	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+	POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
+	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
 }
 
 void intel_panel_enable_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
 	enum pipe pipe = intel_get_pipe_from_connector(connector);
-	enum transcoder cpu_transcoder =
-		intel_pipe_to_cpu_transcoder(dev_priv, pipe);
 	unsigned long flags;
 
-	if (pipe == INVALID_PIPE)
+	if (!panel->backlight.present || pipe == INVALID_PIPE)
 		return;
 
 	DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+	spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
-	if (dev_priv->backlight.level == 0) {
-		dev_priv->backlight.level = intel_panel_get_max_backlight(dev,
-									  pipe);
-		if (dev_priv->backlight.device)
-			dev_priv->backlight.device->props.brightness =
-				dev_priv->backlight.level;
+	WARN_ON(panel->backlight.max == 0);
+
+	if (panel->backlight.level == 0) {
+		panel->backlight.level = panel->backlight.max;
+		if (panel->backlight.device)
+			panel->backlight.device->props.brightness =
+				panel->backlight.level;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 4) {
-		uint32_t reg, tmp;
+	dev_priv->display.enable_backlight(connector);
+	panel->backlight.enabled = true;
 
-		if (HAS_PCH_SPLIT(dev))
-			reg = BLC_PWM_CPU_CTL2;
-		else if (IS_VALLEYVIEW(dev))
-			reg = VLV_BLC_PWM_CTL2(pipe);
-		else
-			reg = BLC_PWM_CTL2;
-
-		tmp = I915_READ(reg);
-
-		/* Note that this can also get called through dpms changes. And
-		 * we don't track the backlight dpms state, hence check whether
-		 * we have to do anything first. */
-		if (tmp & BLM_PWM_ENABLE)
-			goto set_level;
-
-		if (INTEL_INFO(dev)->num_pipes == 3)
-			tmp &= ~BLM_PIPE_SELECT_IVB;
-		else
-			tmp &= ~BLM_PIPE_SELECT;
-
-		if (cpu_transcoder == TRANSCODER_EDP)
-			tmp |= BLM_TRANSCODER_EDP;
-		else
-			tmp |= BLM_PIPE(cpu_transcoder);
-		tmp &= ~BLM_PWM_ENABLE;
-
-		I915_WRITE(reg, tmp);
-		POSTING_READ(reg);
-		I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
-
-		if (IS_BROADWELL(dev)) {
-			/*
-			 * Broadwell requires PCH override to drive the PCH
-			 * backlight pin. The above will configure the CPU
-			 * backlight pin, which we don't plan to use.
-			 */
-			tmp = I915_READ(BLC_PWM_PCH_CTL1);
-			tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
-			I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
-		} else if (HAS_PCH_SPLIT(dev) &&
-		    !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
-			tmp = I915_READ(BLC_PWM_PCH_CTL1);
-			tmp |= BLM_PCH_PWM_ENABLE;
-			tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
-			I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
-		}
-	}
-
-set_level:
-	/* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
-	 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
-	 * registers are set.
-	 */
-	dev_priv->backlight.enabled = true;
-	intel_panel_actually_set_backlight(dev, pipe,
-					   dev_priv->backlight.level);
-
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-}
-
-/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
-static void intel_panel_init_backlight_regs(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (IS_VALLEYVIEW(dev)) {
-		enum pipe pipe;
-
-		for_each_pipe(pipe) {
-			u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
-
-			/* Skip if the modulation freq is already set */
-			if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
-				continue;
-
-			cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
-			I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
-				   cur_val);
-		}
-	}
-}
-
-static void intel_panel_init_backlight(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	intel_panel_init_backlight_regs(dev);
-
-	dev_priv->backlight.level = intel_panel_get_backlight(dev, 0);
-	dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
+	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 }
 
 enum drm_connector_status
@@ -762,7 +827,7 @@
 }
 
 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-static int intel_panel_update_status(struct backlight_device *bd)
+static int intel_backlight_device_update_status(struct backlight_device *bd)
 {
 	struct intel_connector *connector = bl_get_data(bd);
 	struct drm_device *dev = connector->base.dev;
@@ -776,85 +841,362 @@
 	return 0;
 }
 
-static int intel_panel_get_brightness(struct backlight_device *bd)
+static int intel_backlight_device_get_brightness(struct backlight_device *bd)
 {
 	struct intel_connector *connector = bl_get_data(bd);
 	struct drm_device *dev = connector->base.dev;
-	enum pipe pipe;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
 
+	intel_runtime_pm_get(dev_priv);
 	mutex_lock(&dev->mode_config.mutex);
-	pipe = intel_get_pipe_from_connector(connector);
+	ret = intel_panel_get_backlight(connector);
 	mutex_unlock(&dev->mode_config.mutex);
-	if (pipe == INVALID_PIPE)
-		return 0;
+	intel_runtime_pm_put(dev_priv);
 
-	return intel_panel_get_backlight(connector->base.dev, pipe);
+	return ret;
 }
 
-static const struct backlight_ops intel_panel_bl_ops = {
-	.update_status = intel_panel_update_status,
-	.get_brightness = intel_panel_get_brightness,
+static const struct backlight_ops intel_backlight_device_ops = {
+	.update_status = intel_backlight_device_update_status,
+	.get_brightness = intel_backlight_device_get_brightness,
 };
 
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+	struct backlight_properties props;
+
+	if (WARN_ON(panel->backlight.device))
+		return -ENODEV;
+
+	BUG_ON(panel->backlight.max == 0);
+
+	memset(&props, 0, sizeof(props));
+	props.type = BACKLIGHT_RAW;
+	props.brightness = panel->backlight.level;
+	props.max_brightness = panel->backlight.max;
+
+	/*
+	 * Note: using the same name independent of the connector prevents
+	 * registration of multiple backlight devices in the driver.
+	 */
+	panel->backlight.device =
+		backlight_device_register("intel_backlight",
+					  connector->base.kdev,
+					  connector,
+					  &intel_backlight_device_ops, &props);
+
+	if (IS_ERR(panel->backlight.device)) {
+		DRM_ERROR("Failed to register backlight: %ld\n",
+			  PTR_ERR(panel->backlight.device));
+		panel->backlight.device = NULL;
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	if (panel->backlight.device) {
+		backlight_device_unregister(panel->backlight.device);
+		panel->backlight.device = NULL;
+	}
+}
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+	return 0;
+}
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+/*
+ * Note: The setup hooks can't assume pipe is set!
+ *
+ * XXX: Query mode clock or hardware clock and program PWM modulation frequency
+ * appropriately when it's 0. Use VBT and/or sane defaults.
+ */
+static int bdw_setup_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 pch_ctl1, pch_ctl2, val;
+
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+	pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+	panel->backlight.max = pch_ctl2 >> 16;
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	val = bdw_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
+		panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int pch_setup_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+	pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+	panel->backlight.max = pch_ctl2 >> 16;
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	val = pch_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+	panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+		(pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int i9xx_setup_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 ctl, val;
+
+	ctl = I915_READ(BLC_PWM_CTL);
+
+	if (IS_GEN2(dev))
+		panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
+
+	if (IS_PINEVIEW(dev))
+		panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
+
+	panel->backlight.max = ctl >> 17;
+	if (panel->backlight.combination_mode)
+		panel->backlight.max *= 0xff;
+
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	val = i9xx_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int i965_setup_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 ctl, ctl2, val;
+
+	ctl2 = I915_READ(BLC_PWM_CTL2);
+	panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
+	panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+	ctl = I915_READ(BLC_PWM_CTL);
+	panel->backlight.max = ctl >> 16;
+	if (panel->backlight.combination_mode)
+		panel->backlight.max *= 0xff;
+
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	val = i9xx_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+		panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int vlv_setup_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe;
+	u32 ctl, ctl2, val;
+
+	for_each_pipe(pipe) {
+		u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+
+		/* Skip if the modulation freq is already set */
+		if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
+			continue;
+
+		cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
+		I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+			   cur_val);
+	}
+
+	ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+	panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+	ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+	panel->backlight.max = ctl >> 16;
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	val = _vlv_get_backlight(dev, PIPE_A);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+		panel->backlight.level != 0;
+
+	return 0;
+}
+
 int intel_panel_setup_backlight(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct backlight_properties props;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_panel *panel = &intel_connector->panel;
 	unsigned long flags;
+	int ret;
 
-	intel_panel_init_backlight(dev);
+	/* set level and max in panel struct */
+	spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+	ret = dev_priv->display.setup_backlight(intel_connector);
+	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 
-	if (WARN_ON(dev_priv->backlight.device))
-		return -ENODEV;
-
-	memset(&props, 0, sizeof(props));
-	props.type = BACKLIGHT_RAW;
-	props.brightness = dev_priv->backlight.level;
-
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-	props.max_brightness = intel_panel_get_max_backlight(dev, 0);
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
-	if (props.max_brightness == 0) {
-		DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
-		return -ENODEV;
+	if (ret) {
+		DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
+			      drm_get_connector_name(connector));
+		return ret;
 	}
-	dev_priv->backlight.device =
-		backlight_device_register("intel_backlight",
-					  connector->kdev,
-					  to_intel_connector(connector),
-					  &intel_panel_bl_ops, &props);
 
-	if (IS_ERR(dev_priv->backlight.device)) {
-		DRM_ERROR("Failed to register backlight: %ld\n",
-			  PTR_ERR(dev_priv->backlight.device));
-		dev_priv->backlight.device = NULL;
-		return -ENODEV;
-	}
+	intel_backlight_device_register(intel_connector);
+
+	panel->backlight.present = true;
+
+	DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
+		      "sysfs interface %sregistered\n",
+		      panel->backlight.enabled ? "enabled" : "disabled",
+		      panel->backlight.level, panel->backlight.max,
+		      panel->backlight.device ? "" : "not ");
+
 	return 0;
 }
 
-void intel_panel_destroy_backlight(struct drm_device *dev)
+void intel_panel_destroy_backlight(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_panel *panel = &intel_connector->panel;
+
+	panel->backlight.present = false;
+	intel_backlight_device_unregister(intel_connector);
+}
+
+/**
+ * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @fixed_mode : panel native mode
+ * @connector: LVDS/eDP connector
+ *
+ * Return downclock_avail
+ * Find the reduced downclock for LVDS/eDP in EDID.
+ */
+struct drm_display_mode *
+intel_find_panel_downclock(struct drm_device *dev,
+			struct drm_display_mode *fixed_mode,
+			struct drm_connector *connector)
+{
+	struct drm_display_mode *scan, *tmp_mode;
+	int temp_downclock;
+
+	temp_downclock = fixed_mode->clock;
+	tmp_mode = NULL;
+
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		/*
+		 * If one mode has the same resolution with the fixed_panel
+		 * mode while they have the different refresh rate, it means
+		 * that the reduced downclock is found. In such
+		 * case we can set the different FPx0/1 to dynamically select
+		 * between low and high frequency.
+		 */
+		if (scan->hdisplay == fixed_mode->hdisplay &&
+		    scan->hsync_start == fixed_mode->hsync_start &&
+		    scan->hsync_end == fixed_mode->hsync_end &&
+		    scan->htotal == fixed_mode->htotal &&
+		    scan->vdisplay == fixed_mode->vdisplay &&
+		    scan->vsync_start == fixed_mode->vsync_start &&
+		    scan->vsync_end == fixed_mode->vsync_end &&
+		    scan->vtotal == fixed_mode->vtotal) {
+			if (scan->clock < temp_downclock) {
+				/*
+				 * The downclock is already found. But we
+				 * expect to find the lower downclock.
+				 */
+				temp_downclock = scan->clock;
+				tmp_mode = scan;
+			}
+		}
+	}
+
+	if (temp_downclock < fixed_mode->clock)
+		return drm_mode_duplicate(dev, tmp_mode);
+	else
+		return NULL;
+}
+
+/* Set up chip specific backlight functions */
+void intel_panel_init_backlight_funcs(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	if (dev_priv->backlight.device) {
-		backlight_device_unregister(dev_priv->backlight.device);
-		dev_priv->backlight.device = NULL;
+
+	if (IS_BROADWELL(dev)) {
+		dev_priv->display.setup_backlight = bdw_setup_backlight;
+		dev_priv->display.enable_backlight = bdw_enable_backlight;
+		dev_priv->display.disable_backlight = pch_disable_backlight;
+		dev_priv->display.set_backlight = bdw_set_backlight;
+		dev_priv->display.get_backlight = bdw_get_backlight;
+	} else if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->display.setup_backlight = pch_setup_backlight;
+		dev_priv->display.enable_backlight = pch_enable_backlight;
+		dev_priv->display.disable_backlight = pch_disable_backlight;
+		dev_priv->display.set_backlight = pch_set_backlight;
+		dev_priv->display.get_backlight = pch_get_backlight;
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->display.setup_backlight = vlv_setup_backlight;
+		dev_priv->display.enable_backlight = vlv_enable_backlight;
+		dev_priv->display.disable_backlight = vlv_disable_backlight;
+		dev_priv->display.set_backlight = vlv_set_backlight;
+		dev_priv->display.get_backlight = vlv_get_backlight;
+	} else if (IS_GEN4(dev)) {
+		dev_priv->display.setup_backlight = i965_setup_backlight;
+		dev_priv->display.enable_backlight = i965_enable_backlight;
+		dev_priv->display.disable_backlight = i965_disable_backlight;
+		dev_priv->display.set_backlight = i9xx_set_backlight;
+		dev_priv->display.get_backlight = i9xx_get_backlight;
+	} else {
+		dev_priv->display.setup_backlight = i9xx_setup_backlight;
+		dev_priv->display.enable_backlight = i9xx_enable_backlight;
+		dev_priv->display.disable_backlight = i9xx_disable_backlight;
+		dev_priv->display.set_backlight = i9xx_set_backlight;
+		dev_priv->display.get_backlight = i9xx_get_backlight;
 	}
 }
-#else
-int intel_panel_setup_backlight(struct drm_connector *connector)
-{
-	intel_panel_init_backlight(connector->dev);
-	return 0;
-}
-
-void intel_panel_destroy_backlight(struct drm_device *dev)
-{
-	return;
-}
-#endif
 
 int intel_panel_init(struct intel_panel *panel,
 		     struct drm_display_mode *fixed_mode)
@@ -871,4 +1213,8 @@
 
 	if (panel->fixed_mode)
 		drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+
+	if (panel->downclock_mode)
+		drm_mode_destroy(intel_connector->base.dev,
+				panel->downclock_mode);
 }
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 26c29c1..d77cc81 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,7 +30,9 @@
 #include "intel_drv.h"
 #include "../../../platform/x86/intel_ips.h"
 #include <linux/module.h>
+#include <linux/vgaarb.h>
 #include <drm/i915_powerwell.h>
+#include <linux/pm_runtime.h>
 
 /**
  * RC6 is a special power stage which allows the GPU to enter an very
@@ -86,7 +88,7 @@
 	DRM_DEBUG_KMS("disabled FBC\n");
 }
 
-static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void i8xx_enable_fbc(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -96,32 +98,40 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int cfb_pitch;
 	int plane, i;
-	u32 fbc_ctl, fbc_ctl2;
+	u32 fbc_ctl;
 
 	cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
 	if (fb->pitches[0] < cfb_pitch)
 		cfb_pitch = fb->pitches[0];
 
-	/* FBC_CTL wants 64B units */
-	cfb_pitch = (cfb_pitch / 64) - 1;
+	/* FBC_CTL wants 32B or 64B units */
+	if (IS_GEN2(dev))
+		cfb_pitch = (cfb_pitch / 32) - 1;
+	else
+		cfb_pitch = (cfb_pitch / 64) - 1;
 	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
 
 	/* Clear old tags */
 	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
 		I915_WRITE(FBC_TAG + (i * 4), 0);
 
-	/* Set it up... */
-	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-	fbc_ctl2 |= plane;
-	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-	I915_WRITE(FBC_FENCE_OFF, crtc->y);
+	if (IS_GEN4(dev)) {
+		u32 fbc_ctl2;
+
+		/* Set it up... */
+		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+		fbc_ctl2 |= plane;
+		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+		I915_WRITE(FBC_FENCE_OFF, crtc->y);
+	}
 
 	/* enable it... */
-	fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+	fbc_ctl = I915_READ(FBC_CONTROL);
+	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
+	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
 	if (IS_I945GM(dev))
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-	fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
 	fbc_ctl |= obj->fence_reg;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 
@@ -136,7 +146,7 @@
 	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
 }
 
-static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void g4x_enable_fbc(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -145,16 +155,12 @@
 	struct drm_i915_gem_object *obj = intel_fb->obj;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
-	unsigned long stall_watermark = 200;
 	u32 dpfc_ctl;
 
 	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
 	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
 	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
 
-	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
-		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
-		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
 	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
 
 	/* enable it... */
@@ -191,7 +197,11 @@
 	u32 blt_ecoskpd;
 
 	/* Make sure blitter notifies FBC of writes */
-	gen6_gt_force_wake_get(dev_priv);
+
+	/* Blitter is part of Media powerwell on VLV. No impact of
+	 * his param in other platforms for now */
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
+
 	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
 	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
 		GEN6_BLITTER_LOCK_SHIFT;
@@ -202,10 +212,11 @@
 			 GEN6_BLITTER_LOCK_SHIFT);
 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
 	POSTING_READ(GEN6_BLITTER_ECOSKPD);
-	gen6_gt_force_wake_put(dev_priv);
+
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
 }
 
-static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void ironlake_enable_fbc(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -214,7 +225,6 @@
 	struct drm_i915_gem_object *obj = intel_fb->obj;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
-	unsigned long stall_watermark = 200;
 	u32 dpfc_ctl;
 
 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
@@ -222,12 +232,11 @@
 	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
 	/* Set persistent mode for front-buffer rendering, ala X. */
 	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
-	dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+	dpfc_ctl |= DPFC_CTL_FENCE_EN;
+	if (IS_GEN5(dev))
+		dpfc_ctl |= obj->fence_reg;
 	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 
-	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
-		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
-		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
 	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
 	/* enable it... */
@@ -265,7 +274,7 @@
 	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
-static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void gen7_enable_fbc(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -295,7 +304,7 @@
 
 	sandybridge_blit_fbc_update(dev);
 
-	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
 }
 
 bool intel_fbc_enabled(struct drm_device *dev)
@@ -322,8 +331,7 @@
 		 * the prior work.
 		 */
 		if (work->crtc->fb == work->fb) {
-			dev_priv->display.enable_fbc(work->crtc,
-						     work->interval);
+			dev_priv->display.enable_fbc(work->crtc);
 
 			dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
 			dev_priv->fbc.fb_id = work->crtc->fb->base.id;
@@ -360,7 +368,7 @@
 	dev_priv->fbc.fbc_work = NULL;
 }
 
-static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_enable_fbc(struct drm_crtc *crtc)
 {
 	struct intel_fbc_work *work;
 	struct drm_device *dev = crtc->dev;
@@ -374,13 +382,12 @@
 	work = kzalloc(sizeof(*work), GFP_KERNEL);
 	if (work == NULL) {
 		DRM_ERROR("Failed to allocate FBC work structure\n");
-		dev_priv->display.enable_fbc(crtc, interval);
+		dev_priv->display.enable_fbc(crtc);
 		return;
 	}
 
 	work->crtc = crtc;
 	work->fb = crtc->fb;
-	work->interval = interval;
 	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
 	dev_priv->fbc.fbc_work = work;
@@ -454,7 +461,7 @@
 	const struct drm_display_mode *adjusted_mode;
 	unsigned int max_width, max_height;
 
-	if (!I915_HAS_FBC(dev)) {
+	if (!HAS_FBC(dev)) {
 		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
 		return;
 	}
@@ -530,10 +537,10 @@
 			DRM_DEBUG_KMS("mode too large for compression, disabling\n");
 		goto out_disable;
 	}
-	if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
-	    intel_crtc->plane != 0) {
+	if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
+	    intel_crtc->plane != PLANE_A) {
 		if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
-			DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+			DRM_DEBUG_KMS("plane not A, disabling compression\n");
 		goto out_disable;
 	}
 
@@ -595,7 +602,7 @@
 		intel_disable_fbc(dev);
 	}
 
-	intel_enable_fbc(crtc, 500);
+	intel_enable_fbc(crtc);
 	dev_priv->fbc.no_fbc_reason = FBC_OK;
 	return;
 
@@ -817,7 +824,7 @@
 	return size;
 }
 
-static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t dsparb = I915_READ(DSPARB);
@@ -850,21 +857,6 @@
 	return size;
 }
 
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t dsparb = I915_READ(DSPARB);
-	int size;
-
-	size = dsparb & 0x7f;
-	size >>= 1; /* Convert to cachelines */
-
-	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
-		      plane ? "B" : "A", size);
-
-	return size;
-}
-
 /* Pineview has different values for various configs */
 static const struct intel_watermark_params pineview_display_wm = {
 	PINEVIEW_DISPLAY_FIFO,
@@ -943,14 +935,14 @@
 	2,
 	I915_FIFO_LINE_SIZE
 };
-static const struct intel_watermark_params i855_wm_info = {
+static const struct intel_watermark_params i830_wm_info = {
 	I855GM_FIFO_SIZE,
 	I915_MAX_WM,
 	1,
 	2,
 	I830_FIFO_LINE_SIZE
 };
-static const struct intel_watermark_params i830_wm_info = {
+static const struct intel_watermark_params i845_wm_info = {
 	I830_FIFO_SIZE,
 	I915_MAX_WM,
 	1,
@@ -958,65 +950,6 @@
 	I830_FIFO_LINE_SIZE
 };
 
-static const struct intel_watermark_params ironlake_display_wm_info = {
-	ILK_DISPLAY_FIFO,
-	ILK_DISPLAY_MAXWM,
-	ILK_DISPLAY_DFTWM,
-	2,
-	ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_wm_info = {
-	ILK_CURSOR_FIFO,
-	ILK_CURSOR_MAXWM,
-	ILK_CURSOR_DFTWM,
-	2,
-	ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_display_srwm_info = {
-	ILK_DISPLAY_SR_FIFO,
-	ILK_DISPLAY_MAX_SRWM,
-	ILK_DISPLAY_DFT_SRWM,
-	2,
-	ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_srwm_info = {
-	ILK_CURSOR_SR_FIFO,
-	ILK_CURSOR_MAX_SRWM,
-	ILK_CURSOR_DFT_SRWM,
-	2,
-	ILK_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params sandybridge_display_wm_info = {
-	SNB_DISPLAY_FIFO,
-	SNB_DISPLAY_MAXWM,
-	SNB_DISPLAY_DFTWM,
-	2,
-	SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_wm_info = {
-	SNB_CURSOR_FIFO,
-	SNB_CURSOR_MAXWM,
-	SNB_CURSOR_DFTWM,
-	2,
-	SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_display_srwm_info = {
-	SNB_DISPLAY_SR_FIFO,
-	SNB_DISPLAY_MAX_SRWM,
-	SNB_DISPLAY_DFT_SRWM,
-	2,
-	SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
-	SNB_CURSOR_SR_FIFO,
-	SNB_CURSOR_MAX_SRWM,
-	SNB_CURSOR_DFT_SRWM,
-	2,
-	SNB_FIFO_LINE_SIZE
-};
-
-
 /**
  * intel_calculate_wm - calculate watermark level
  * @clock_in_khz: pixel clock
@@ -1567,7 +1500,7 @@
 	else if (!IS_GEN2(dev))
 		wm_info = &i915_wm_info;
 	else
-		wm_info = &i855_wm_info;
+		wm_info = &i830_wm_info;
 
 	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
 	crtc = intel_get_crtc_for_plane(dev, 0);
@@ -1615,7 +1548,7 @@
 	if (IS_I945G(dev) || IS_I945GM(dev))
 		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
 	else if (IS_I915GM(dev))
-		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
 
 	/* Calc sr entries for one plane configs */
 	if (HAS_FW_BLC(dev) && enabled) {
@@ -1667,14 +1600,14 @@
 				I915_WRITE(FW_BLC_SELF,
 					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
 			else if (IS_I915GM(dev))
-				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+				I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
 			DRM_DEBUG_KMS("memory self refresh enabled\n");
 		} else
 			DRM_DEBUG_KMS("memory self refresh disabled\n");
 	}
 }
 
-static void i830_update_wm(struct drm_crtc *unused_crtc)
+static void i845_update_wm(struct drm_crtc *unused_crtc)
 {
 	struct drm_device *dev = unused_crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1689,7 +1622,7 @@
 
 	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
 	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
-				       &i830_wm_info,
+				       &i845_wm_info,
 				       dev_priv->display.get_fifo_size(dev, 0),
 				       4, latency_ns);
 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1700,423 +1633,6 @@
 	I915_WRITE(FW_BLC, fwater_lo);
 }
 
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool ironlake_check_srwm(struct drm_device *dev, int level,
-				int fbc_wm, int display_wm, int cursor_wm,
-				const struct intel_watermark_params *display,
-				const struct intel_watermark_params *cursor)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
-		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
-
-	if (fbc_wm > SNB_FBC_MAX_SRWM) {
-		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
-			      fbc_wm, SNB_FBC_MAX_SRWM, level);
-
-		/* fbc has it's own way to disable FBC WM */
-		I915_WRITE(DISP_ARB_CTL,
-			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
-		return false;
-	} else if (INTEL_INFO(dev)->gen >= 6) {
-		/* enable FBC WM (except on ILK, where it must remain off) */
-		I915_WRITE(DISP_ARB_CTL,
-			   I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
-	}
-
-	if (display_wm > display->max_wm) {
-		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
-			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
-		return false;
-	}
-
-	if (cursor_wm > cursor->max_wm) {
-		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
-			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
-		return false;
-	}
-
-	if (!(fbc_wm || display_wm || cursor_wm)) {
-		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
-		return false;
-	}
-
-	return true;
-}
-
-/*
- * Compute watermark values of WM[1-3],
- */
-static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
-				  int latency_ns,
-				  const struct intel_watermark_params *display,
-				  const struct intel_watermark_params *cursor,
-				  int *fbc_wm, int *display_wm, int *cursor_wm)
-{
-	struct drm_crtc *crtc;
-	const struct drm_display_mode *adjusted_mode;
-	unsigned long line_time_us;
-	int hdisplay, htotal, pixel_size, clock;
-	int line_count, line_size;
-	int small, large;
-	int entries;
-
-	if (!latency_ns) {
-		*fbc_wm = *display_wm = *cursor_wm = 0;
-		return false;
-	}
-
-	crtc = intel_get_crtc_for_plane(dev, plane);
-	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
-	clock = adjusted_mode->crtc_clock;
-	htotal = adjusted_mode->crtc_htotal;
-	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
-	pixel_size = crtc->fb->bits_per_pixel / 8;
-
-	line_time_us = (htotal * 1000) / clock;
-	line_count = (latency_ns / line_time_us + 1000) / 1000;
-	line_size = hdisplay * pixel_size;
-
-	/* Use the minimum of the small and large buffer method for primary */
-	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
-	large = line_count * line_size;
-
-	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
-	*display_wm = entries + display->guard_size;
-
-	/*
-	 * Spec says:
-	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
-	 */
-	*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
-
-	/* calculate the self-refresh watermark for display cursor */
-	entries = line_count * pixel_size * 64;
-	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
-	*cursor_wm = entries + cursor->guard_size;
-
-	return ironlake_check_srwm(dev, level,
-				   *fbc_wm, *display_wm, *cursor_wm,
-				   display, cursor);
-}
-
-static void ironlake_update_wm(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int fbc_wm, plane_wm, cursor_wm;
-	unsigned int enabled;
-
-	enabled = 0;
-	if (g4x_compute_wm0(dev, PIPE_A,
-			    &ironlake_display_wm_info,
-			    dev_priv->wm.pri_latency[0] * 100,
-			    &ironlake_cursor_wm_info,
-			    dev_priv->wm.cur_latency[0] * 100,
-			    &plane_wm, &cursor_wm)) {
-		I915_WRITE(WM0_PIPEA_ILK,
-			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
-		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-			      " plane %d, " "cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 1 << PIPE_A;
-	}
-
-	if (g4x_compute_wm0(dev, PIPE_B,
-			    &ironlake_display_wm_info,
-			    dev_priv->wm.pri_latency[0] * 100,
-			    &ironlake_cursor_wm_info,
-			    dev_priv->wm.cur_latency[0] * 100,
-			    &plane_wm, &cursor_wm)) {
-		I915_WRITE(WM0_PIPEB_ILK,
-			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
-		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-			      " plane %d, cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 1 << PIPE_B;
-	}
-
-	/*
-	 * Calculate and update the self-refresh watermark only when one
-	 * display plane is used.
-	 */
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
-
-	if (!single_plane_enabled(enabled))
-		return;
-	enabled = ffs(enabled) - 1;
-
-	/* WM1 */
-	if (!ironlake_compute_srwm(dev, 1, enabled,
-				   dev_priv->wm.pri_latency[1] * 500,
-				   &ironlake_display_srwm_info,
-				   &ironlake_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM1_LP_ILK,
-		   WM1_LP_SR_EN |
-		   (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-
-	/* WM2 */
-	if (!ironlake_compute_srwm(dev, 2, enabled,
-				   dev_priv->wm.pri_latency[2] * 500,
-				   &ironlake_display_srwm_info,
-				   &ironlake_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM2_LP_ILK,
-		   WM2_LP_EN |
-		   (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-
-	/*
-	 * WM3 is unsupported on ILK, probably because we don't have latency
-	 * data for that power state
-	 */
-}
-
-static void sandybridge_update_wm(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int latency = dev_priv->wm.pri_latency[0] * 100;	/* In unit 0.1us */
-	u32 val;
-	int fbc_wm, plane_wm, cursor_wm;
-	unsigned int enabled;
-
-	enabled = 0;
-	if (g4x_compute_wm0(dev, PIPE_A,
-			    &sandybridge_display_wm_info, latency,
-			    &sandybridge_cursor_wm_info, latency,
-			    &plane_wm, &cursor_wm)) {
-		val = I915_READ(WM0_PIPEA_ILK);
-		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-		I915_WRITE(WM0_PIPEA_ILK, val |
-			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-			      " plane %d, " "cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 1 << PIPE_A;
-	}
-
-	if (g4x_compute_wm0(dev, PIPE_B,
-			    &sandybridge_display_wm_info, latency,
-			    &sandybridge_cursor_wm_info, latency,
-			    &plane_wm, &cursor_wm)) {
-		val = I915_READ(WM0_PIPEB_ILK);
-		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-		I915_WRITE(WM0_PIPEB_ILK, val |
-			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-			      " plane %d, cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 1 << PIPE_B;
-	}
-
-	/*
-	 * Calculate and update the self-refresh watermark only when one
-	 * display plane is used.
-	 *
-	 * SNB support 3 levels of watermark.
-	 *
-	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
-	 * and disabled in the descending order
-	 *
-	 */
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
-
-	if (!single_plane_enabled(enabled) ||
-	    dev_priv->sprite_scaling_enabled)
-		return;
-	enabled = ffs(enabled) - 1;
-
-	/* WM1 */
-	if (!ironlake_compute_srwm(dev, 1, enabled,
-				   dev_priv->wm.pri_latency[1] * 500,
-				   &sandybridge_display_srwm_info,
-				   &sandybridge_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM1_LP_ILK,
-		   WM1_LP_SR_EN |
-		   (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-
-	/* WM2 */
-	if (!ironlake_compute_srwm(dev, 2, enabled,
-				   dev_priv->wm.pri_latency[2] * 500,
-				   &sandybridge_display_srwm_info,
-				   &sandybridge_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM2_LP_ILK,
-		   WM2_LP_EN |
-		   (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-
-	/* WM3 */
-	if (!ironlake_compute_srwm(dev, 3, enabled,
-				   dev_priv->wm.pri_latency[3] * 500,
-				   &sandybridge_display_srwm_info,
-				   &sandybridge_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM3_LP_ILK,
-		   WM3_LP_EN |
-		   (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-}
-
-static void ivybridge_update_wm(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int latency = dev_priv->wm.pri_latency[0] * 100;	/* In unit 0.1us */
-	u32 val;
-	int fbc_wm, plane_wm, cursor_wm;
-	int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
-	unsigned int enabled;
-
-	enabled = 0;
-	if (g4x_compute_wm0(dev, PIPE_A,
-			    &sandybridge_display_wm_info, latency,
-			    &sandybridge_cursor_wm_info, latency,
-			    &plane_wm, &cursor_wm)) {
-		val = I915_READ(WM0_PIPEA_ILK);
-		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-		I915_WRITE(WM0_PIPEA_ILK, val |
-			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-			      " plane %d, " "cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 1 << PIPE_A;
-	}
-
-	if (g4x_compute_wm0(dev, PIPE_B,
-			    &sandybridge_display_wm_info, latency,
-			    &sandybridge_cursor_wm_info, latency,
-			    &plane_wm, &cursor_wm)) {
-		val = I915_READ(WM0_PIPEB_ILK);
-		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-		I915_WRITE(WM0_PIPEB_ILK, val |
-			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-			      " plane %d, cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 1 << PIPE_B;
-	}
-
-	if (g4x_compute_wm0(dev, PIPE_C,
-			    &sandybridge_display_wm_info, latency,
-			    &sandybridge_cursor_wm_info, latency,
-			    &plane_wm, &cursor_wm)) {
-		val = I915_READ(WM0_PIPEC_IVB);
-		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-		I915_WRITE(WM0_PIPEC_IVB, val |
-			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
-			      " plane %d, cursor: %d\n",
-			      plane_wm, cursor_wm);
-		enabled |= 1 << PIPE_C;
-	}
-
-	/*
-	 * Calculate and update the self-refresh watermark only when one
-	 * display plane is used.
-	 *
-	 * SNB support 3 levels of watermark.
-	 *
-	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
-	 * and disabled in the descending order
-	 *
-	 */
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
-
-	if (!single_plane_enabled(enabled) ||
-	    dev_priv->sprite_scaling_enabled)
-		return;
-	enabled = ffs(enabled) - 1;
-
-	/* WM1 */
-	if (!ironlake_compute_srwm(dev, 1, enabled,
-				   dev_priv->wm.pri_latency[1] * 500,
-				   &sandybridge_display_srwm_info,
-				   &sandybridge_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM1_LP_ILK,
-		   WM1_LP_SR_EN |
-		   (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-
-	/* WM2 */
-	if (!ironlake_compute_srwm(dev, 2, enabled,
-				   dev_priv->wm.pri_latency[2] * 500,
-				   &sandybridge_display_srwm_info,
-				   &sandybridge_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM2_LP_ILK,
-		   WM2_LP_EN |
-		   (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-
-	/* WM3, note we have to correct the cursor latency */
-	if (!ironlake_compute_srwm(dev, 3, enabled,
-				   dev_priv->wm.pri_latency[3] * 500,
-				   &sandybridge_display_srwm_info,
-				   &sandybridge_cursor_srwm_info,
-				   &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
-	    !ironlake_compute_srwm(dev, 3, enabled,
-				   dev_priv->wm.cur_latency[3] * 500,
-				   &sandybridge_display_srwm_info,
-				   &sandybridge_cursor_srwm_info,
-				   &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
-		return;
-
-	I915_WRITE(WM3_LP_ILK,
-		   WM3_LP_EN |
-		   (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
-		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-		   (plane_wm << WM1_LP_SR_SHIFT) |
-		   cursor_wm);
-}
-
 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
 				    struct drm_crtc *crtc)
 {
@@ -2185,7 +1701,7 @@
 	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
 }
 
-struct hsw_pipe_wm_parameters {
+struct ilk_pipe_wm_parameters {
 	bool active;
 	uint32_t pipe_htotal;
 	uint32_t pixel_rate;
@@ -2194,7 +1710,7 @@
 	struct intel_plane_wm_parameters cur;
 };
 
-struct hsw_wm_maximums {
+struct ilk_wm_maximums {
 	uint16_t pri;
 	uint16_t spr;
 	uint16_t cur;
@@ -2212,7 +1728,7 @@
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
 				   uint32_t mem_value,
 				   bool is_lp)
 {
@@ -2241,7 +1757,7 @@
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
 				   uint32_t mem_value)
 {
 	uint32_t method1, method2;
@@ -2264,7 +1780,7 @@
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
 				   uint32_t mem_value)
 {
 	if (!params->active || !params->cur.enabled)
@@ -2278,7 +1794,7 @@
 }
 
 /* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
 				   uint32_t pri_val)
 {
 	if (!params->active || !params->pri.enabled)
@@ -2383,7 +1899,7 @@
 				    int level,
 				    const struct intel_wm_config *config,
 				    enum intel_ddb_partitioning ddb_partitioning,
-				    struct hsw_wm_maximums *max)
+				    struct ilk_wm_maximums *max)
 {
 	max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
 	max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
@@ -2392,7 +1908,7 @@
 }
 
 static bool ilk_validate_wm_level(int level,
-				  const struct hsw_wm_maximums *max,
+				  const struct ilk_wm_maximums *max,
 				  struct intel_wm_level *result)
 {
 	bool ret;
@@ -2434,7 +1950,7 @@
 
 static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
 				 int level,
-				 const struct hsw_pipe_wm_parameters *p,
+				 const struct ilk_pipe_wm_parameters *p,
 				 struct intel_wm_level *result)
 {
 	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2482,7 +1998,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (IS_HASWELL(dev)) {
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
 		wm[0] = (sskpd >> 56) & 0xFF;
@@ -2530,7 +2046,7 @@
 static int ilk_wm_max_level(const struct drm_device *dev)
 {
 	/* how many WM levels are we expecting */
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		return 4;
 	else if (INTEL_INFO(dev)->gen >= 6)
 		return 3;
@@ -2582,8 +2098,8 @@
 	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
 }
 
-static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
-				      struct hsw_pipe_wm_parameters *p,
+static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
+				      struct ilk_pipe_wm_parameters *p,
 				      struct intel_wm_config *config)
 {
 	struct drm_device *dev = crtc->dev;
@@ -2593,7 +2109,7 @@
 
 	p->active = intel_crtc_active(crtc);
 	if (p->active) {
-		p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
+		p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
 		p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
 		p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
 		p->cur.bytes_per_pixel = 4;
@@ -2620,7 +2136,7 @@
 
 /* Compute new watermarks for the pipe */
 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
-				  const struct hsw_pipe_wm_parameters *params,
+				  const struct ilk_pipe_wm_parameters *params,
 				  struct intel_pipe_wm *pipe_wm)
 {
 	struct drm_device *dev = crtc->dev;
@@ -2632,16 +2148,25 @@
 		.sprites_enabled = params->spr.enabled,
 		.sprites_scaled = params->spr.scaled,
 	};
-	struct hsw_wm_maximums max;
+	struct ilk_wm_maximums max;
 
 	/* LP0 watermarks always use 1/2 DDB partitioning */
 	ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
 
+	/* ILK/SNB: LP2+ watermarks only w/o sprites */
+	if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
+		max_level = 1;
+
+	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
+	if (params->spr.scaled)
+		max_level = 0;
+
 	for (level = 0; level <= max_level; level++)
 		ilk_compute_wm_level(dev_priv, level, params,
 				     &pipe_wm->wm[level]);
 
-	pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
 
 	/* At least LP0 must be valid */
 	return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
@@ -2676,12 +2201,19 @@
  * Merge all low power watermarks for all active pipes.
  */
 static void ilk_wm_merge(struct drm_device *dev,
-			 const struct hsw_wm_maximums *max,
+			 const struct intel_wm_config *config,
+			 const struct ilk_wm_maximums *max,
 			 struct intel_pipe_wm *merged)
 {
 	int level, max_level = ilk_wm_max_level(dev);
 
-	merged->fbc_wm_enabled = true;
+	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
+	if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+	    config->num_pipes_active > 1)
+		return;
+
+	/* ILK: FBC WM must be disabled always */
+	merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
 
 	/* merge each WM1+ level */
 	for (level = 1; level <= max_level; level++) {
@@ -2701,6 +2233,20 @@
 			wm->fbc_val = 0;
 		}
 	}
+
+	/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
+	/*
+	 * FIXME this is racy. FBC might get enabled later.
+	 * What we should check here is whether FBC can be
+	 * enabled sometime later.
+	 */
+	if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
+		for (level = 2; level <= max_level; level++) {
+			struct intel_wm_level *wm = &merged->wm[level];
+
+			wm->enable = false;
+		}
+	}
 }
 
 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
@@ -2709,10 +2255,21 @@
 	return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
 }
 
-static void hsw_compute_wm_results(struct drm_device *dev,
+/* The value we need to program into the WM_LPx latency field */
+static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		return 2 * level;
+	else
+		return dev_priv->wm.pri_latency[level];
+}
+
+static void ilk_compute_wm_results(struct drm_device *dev,
 				   const struct intel_pipe_wm *merged,
 				   enum intel_ddb_partitioning partitioning,
-				   struct hsw_wm_values *results)
+				   struct ilk_wm_values *results)
 {
 	struct intel_crtc *intel_crtc;
 	int level, wm_lp;
@@ -2731,7 +2288,7 @@
 			break;
 
 		results->wm_lp[wm_lp - 1] = WM3_LP_EN |
-			((level * 2) << WM1_LP_LATENCY_SHIFT) |
+			(ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
 			(r->pri_val << WM1_LP_SR_SHIFT) |
 			r->cur_val;
 
@@ -2742,7 +2299,11 @@
 			results->wm_lp[wm_lp - 1] |=
 				r->fbc_val << WM1_LP_FBC_SHIFT;
 
-		results->wm_lp_spr[wm_lp - 1] = r->spr_val;
+		if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
+			WARN_ON(wm_lp != 1);
+			results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
+		} else
+			results->wm_lp_spr[wm_lp - 1] = r->spr_val;
 	}
 
 	/* LP0 register values */
@@ -2765,7 +2326,7 @@
 
 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
  * case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
+static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
 						  struct intel_pipe_wm *r1,
 						  struct intel_pipe_wm *r2)
 {
@@ -2800,8 +2361,8 @@
 #define WM_DIRTY_DDB (1 << 25)
 
 static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
-					 const struct hsw_wm_values *old,
-					 const struct hsw_wm_values *new)
+					 const struct ilk_wm_values *old,
+					 const struct ilk_wm_values *new)
 {
 	unsigned int dirty = 0;
 	enum pipe pipe;
@@ -2851,27 +2412,53 @@
 	return dirty;
 }
 
+static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+			       unsigned int dirty)
+{
+	struct ilk_wm_values *previous = &dev_priv->wm.hw;
+	bool changed = false;
+
+	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
+		previous->wm_lp[2] &= ~WM1_LP_SR_EN;
+		I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
+		changed = true;
+	}
+	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
+		previous->wm_lp[1] &= ~WM1_LP_SR_EN;
+		I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
+		changed = true;
+	}
+	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
+		previous->wm_lp[0] &= ~WM1_LP_SR_EN;
+		I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
+		changed = true;
+	}
+
+	/*
+	 * Don't touch WM1S_LP_EN here.
+	 * Doing so could cause underruns.
+	 */
+
+	return changed;
+}
+
 /*
  * The spec says we shouldn't write when we don't need, because every write
  * causes WMs to be re-evaluated, expending some power.
  */
-static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
-				struct hsw_wm_values *results)
+static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+				struct ilk_wm_values *results)
 {
-	struct hsw_wm_values *previous = &dev_priv->wm.hw;
+	struct drm_device *dev = dev_priv->dev;
+	struct ilk_wm_values *previous = &dev_priv->wm.hw;
 	unsigned int dirty;
 	uint32_t val;
 
-	dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
+	dirty = ilk_compute_wm_dirty(dev, previous, results);
 	if (!dirty)
 		return;
 
-	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
-		I915_WRITE(WM3_LP_ILK, 0);
-	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
-		I915_WRITE(WM2_LP_ILK, 0);
-	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
-		I915_WRITE(WM1_LP_ILK, 0);
+	_ilk_disable_lp_wm(dev_priv, dirty);
 
 	if (dirty & WM_DIRTY_PIPE(PIPE_A))
 		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
@@ -2888,12 +2475,21 @@
 		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
 
 	if (dirty & WM_DIRTY_DDB) {
-		val = I915_READ(WM_MISC);
-		if (results->partitioning == INTEL_DDB_PART_1_2)
-			val &= ~WM_MISC_DATA_PARTITION_5_6;
-		else
-			val |= WM_MISC_DATA_PARTITION_5_6;
-		I915_WRITE(WM_MISC, val);
+		if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+			val = I915_READ(WM_MISC);
+			if (results->partitioning == INTEL_DDB_PART_1_2)
+				val &= ~WM_MISC_DATA_PARTITION_5_6;
+			else
+				val |= WM_MISC_DATA_PARTITION_5_6;
+			I915_WRITE(WM_MISC, val);
+		} else {
+			val = I915_READ(DISP_ARB_CTL2);
+			if (results->partitioning == INTEL_DDB_PART_1_2)
+				val &= ~DISP_DATA_PARTITION_5_6;
+			else
+				val |= DISP_DATA_PARTITION_5_6;
+			I915_WRITE(DISP_ARB_CTL2, val);
+		}
 	}
 
 	if (dirty & WM_DIRTY_FBC) {
@@ -2905,37 +2501,48 @@
 		I915_WRITE(DISP_ARB_CTL, val);
 	}
 
-	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
+	if (dirty & WM_DIRTY_LP(1) &&
+	    previous->wm_lp_spr[0] != results->wm_lp_spr[0])
 		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
-	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
-		I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
-	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
-		I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
 
-	if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
+	if (INTEL_INFO(dev)->gen >= 7) {
+		if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
+			I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+		if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
+			I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+	}
+
+	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
 		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
-	if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
+	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
 		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
-	if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
+	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
 		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
 
 	dev_priv->wm.hw = *results;
 }
 
-static void haswell_update_wm(struct drm_crtc *crtc)
+static bool ilk_disable_lp_wm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+}
+
+static void ilk_update_wm(struct drm_crtc *crtc)
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct hsw_wm_maximums max;
-	struct hsw_pipe_wm_parameters params = {};
-	struct hsw_wm_values results = {};
+	struct ilk_wm_maximums max;
+	struct ilk_pipe_wm_parameters params = {};
+	struct ilk_wm_values results = {};
 	enum intel_ddb_partitioning partitioning;
 	struct intel_pipe_wm pipe_wm = {};
 	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
 	struct intel_wm_config config = {};
 
-	hsw_compute_wm_parameters(crtc, &params, &config);
+	ilk_compute_wm_parameters(crtc, &params, &config);
 
 	intel_compute_pipe_wm(crtc, &params, &pipe_wm);
 
@@ -2945,15 +2552,15 @@
 	intel_crtc->wm.active = pipe_wm;
 
 	ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
-	ilk_wm_merge(dev, &max, &lp_wm_1_2);
+	ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
 
 	/* 5/6 split only in single pipe config on IVB+ */
 	if (INTEL_INFO(dev)->gen >= 7 &&
 	    config.num_pipes_active == 1 && config.sprites_enabled) {
 		ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
-		ilk_wm_merge(dev, &max, &lp_wm_5_6);
+		ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
 
-		best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+		best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
 	} else {
 		best_lp_wm = &lp_wm_1_2;
 	}
@@ -2961,16 +2568,17 @@
 	partitioning = (best_lp_wm == &lp_wm_1_2) ?
 		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
 
-	hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+	ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
 
-	hsw_write_wm_values(dev_priv, &results);
+	ilk_write_wm_values(dev_priv, &results);
 }
 
-static void haswell_update_sprite_wm(struct drm_plane *plane,
+static void ilk_update_sprite_wm(struct drm_plane *plane,
 				     struct drm_crtc *crtc,
 				     uint32_t sprite_width, int pixel_size,
 				     bool enabled, bool scaled)
 {
+	struct drm_device *dev = plane->dev;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 
 	intel_plane->wm.enabled = enabled;
@@ -2978,176 +2586,24 @@
 	intel_plane->wm.horiz_pixels = sprite_width;
 	intel_plane->wm.bytes_per_pixel = pixel_size;
 
-	haswell_update_wm(crtc);
-}
+	/*
+	 * IVB workaround: must disable low power watermarks for at least
+	 * one frame before enabling scaling.  LP watermarks can be re-enabled
+	 * when scaling is disabled.
+	 *
+	 * WaCxSRDisabledForSpriteScaling:ivb
+	 */
+	if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
+		intel_wait_for_vblank(dev, intel_plane->pipe);
 
-static bool
-sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
-			      uint32_t sprite_width, int pixel_size,
-			      const struct intel_watermark_params *display,
-			      int display_latency_ns, int *sprite_wm)
-{
-	struct drm_crtc *crtc;
-	int clock;
-	int entries, tlb_miss;
-
-	crtc = intel_get_crtc_for_plane(dev, plane);
-	if (!intel_crtc_active(crtc)) {
-		*sprite_wm = display->guard_size;
-		return false;
-	}
-
-	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
-
-	/* Use the small buffer method to calculate the sprite watermark */
-	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
-	tlb_miss = display->fifo_size*display->cacheline_size -
-		sprite_width * 8;
-	if (tlb_miss > 0)
-		entries += tlb_miss;
-	entries = DIV_ROUND_UP(entries, display->cacheline_size);
-	*sprite_wm = entries + display->guard_size;
-	if (*sprite_wm > (int)display->max_wm)
-		*sprite_wm = display->max_wm;
-
-	return true;
-}
-
-static bool
-sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
-				uint32_t sprite_width, int pixel_size,
-				const struct intel_watermark_params *display,
-				int latency_ns, int *sprite_wm)
-{
-	struct drm_crtc *crtc;
-	unsigned long line_time_us;
-	int clock;
-	int line_count, line_size;
-	int small, large;
-	int entries;
-
-	if (!latency_ns) {
-		*sprite_wm = 0;
-		return false;
-	}
-
-	crtc = intel_get_crtc_for_plane(dev, plane);
-	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
-	if (!clock) {
-		*sprite_wm = 0;
-		return false;
-	}
-
-	line_time_us = (sprite_width * 1000) / clock;
-	if (!line_time_us) {
-		*sprite_wm = 0;
-		return false;
-	}
-
-	line_count = (latency_ns / line_time_us + 1000) / 1000;
-	line_size = sprite_width * pixel_size;
-
-	/* Use the minimum of the small and large buffer method for primary */
-	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
-	large = line_count * line_size;
-
-	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
-	*sprite_wm = entries + display->guard_size;
-
-	return *sprite_wm > 0x3ff ? false : true;
-}
-
-static void sandybridge_update_sprite_wm(struct drm_plane *plane,
-					 struct drm_crtc *crtc,
-					 uint32_t sprite_width, int pixel_size,
-					 bool enabled, bool scaled)
-{
-	struct drm_device *dev = plane->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe = to_intel_plane(plane)->pipe;
-	int latency = dev_priv->wm.spr_latency[0] * 100;	/* In unit 0.1us */
-	u32 val;
-	int sprite_wm, reg;
-	int ret;
-
-	if (!enabled)
-		return;
-
-	switch (pipe) {
-	case 0:
-		reg = WM0_PIPEA_ILK;
-		break;
-	case 1:
-		reg = WM0_PIPEB_ILK;
-		break;
-	case 2:
-		reg = WM0_PIPEC_IVB;
-		break;
-	default:
-		return; /* bad pipe */
-	}
-
-	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
-					    &sandybridge_display_wm_info,
-					    latency, &sprite_wm);
-	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
-			      pipe_name(pipe));
-		return;
-	}
-
-	val = I915_READ(reg);
-	val &= ~WM0_PIPE_SPRITE_MASK;
-	I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
-	DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
-
-
-	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
-					      pixel_size,
-					      &sandybridge_display_srwm_info,
-					      dev_priv->wm.spr_latency[1] * 500,
-					      &sprite_wm);
-	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
-			      pipe_name(pipe));
-		return;
-	}
-	I915_WRITE(WM1S_LP_ILK, sprite_wm);
-
-	/* Only IVB has two more LP watermarks for sprite */
-	if (!IS_IVYBRIDGE(dev))
-		return;
-
-	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
-					      pixel_size,
-					      &sandybridge_display_srwm_info,
-					      dev_priv->wm.spr_latency[2] * 500,
-					      &sprite_wm);
-	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
-			      pipe_name(pipe));
-		return;
-	}
-	I915_WRITE(WM2S_LP_IVB, sprite_wm);
-
-	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
-					      pixel_size,
-					      &sandybridge_display_srwm_info,
-					      dev_priv->wm.spr_latency[3] * 500,
-					      &sprite_wm);
-	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
-			      pipe_name(pipe));
-		return;
-	}
-	I915_WRITE(WM3S_LP_IVB, sprite_wm);
+	ilk_update_wm(crtc);
 }
 
 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct hsw_wm_values *hw = &dev_priv->wm.hw;
+	struct ilk_wm_values *hw = &dev_priv->wm.hw;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_pipe_wm *active = &intel_crtc->wm.active;
 	enum pipe pipe = intel_crtc->pipe;
@@ -3158,7 +2614,8 @@
 	};
 
 	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
-	hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
 
 	if (intel_crtc_active(crtc)) {
 		u32 tmp = hw->wm_pipe[pipe];
@@ -3190,7 +2647,7 @@
 void ilk_wm_get_hw_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct hsw_wm_values *hw = &dev_priv->wm.hw;
+	struct ilk_wm_values *hw = &dev_priv->wm.hw;
 	struct drm_crtc *crtc;
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -3204,8 +2661,12 @@
 	hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
 	hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
 
-	hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
-		INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+	else if (IS_IVYBRIDGE(dev))
+		hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
 
 	hw->enable_fbc_wm =
 		!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
@@ -3430,26 +2891,19 @@
  * ourselves, instead of doing a rmw cycle (which might result in us clearing
  * all limits and the gpu stuck at whatever frequency it is at atm).
  */
-static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
 {
 	u32 limits;
 
-	limits = 0;
-
-	if (*val >= dev_priv->rps.max_delay)
-		*val = dev_priv->rps.max_delay;
-	limits |= dev_priv->rps.max_delay << 24;
-
 	/* Only set the down limit when we've reached the lowest level to avoid
 	 * getting more interrupts, otherwise leave this clear. This prevents a
 	 * race in the hw when coming out of rc6: There's a tiny window where
 	 * the hw runs at the minimal clock before selecting the desired
 	 * frequency, if the down threshold expires in that window we will not
 	 * receive a down interrupt. */
-	if (*val <= dev_priv->rps.min_delay) {
-		*val = dev_priv->rps.min_delay;
+	limits = dev_priv->rps.max_delay << 24;
+	if (val <= dev_priv->rps.min_delay)
 		limits |= dev_priv->rps.min_delay << 16;
-	}
 
 	return limits;
 }
@@ -3549,7 +3003,6 @@
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 limits = gen6_rps_limits(dev_priv, &val);
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 	WARN_ON(val > dev_priv->rps.max_delay);
@@ -3572,7 +3025,8 @@
 	/* Make sure we continue to get interrupts
 	 * until we hit the minimum or maximum frequencies.
 	 */
-	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+		   gen6_rps_limits(dev_priv, val));
 
 	POSTING_READ(GEN6_RPNSWREQ);
 
@@ -3583,9 +3037,11 @@
 
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
 {
+	struct drm_device *dev = dev_priv->dev;
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (dev_priv->rps.enabled) {
-		if (dev_priv->info->is_valleyview)
+		if (IS_VALLEYVIEW(dev))
 			valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
 		else
 			gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
@@ -3596,9 +3052,11 @@
 
 void gen6_rps_boost(struct drm_i915_private *dev_priv)
 {
+	struct drm_device *dev = dev_priv->dev;
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (dev_priv->rps.enabled) {
-		if (dev_priv->info->is_valleyview)
+		if (IS_VALLEYVIEW(dev))
 			valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
 		else
 			gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
@@ -3607,48 +3065,18 @@
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
-/*
- * Wait until the previous freq change has completed,
- * or the timeout elapsed, and then update our notion
- * of the current GPU frequency.
- */
-static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
-{
-	u32 pval;
-
-	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
-	if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
-		DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
-
-	pval >>= 8;
-
-	if (pval != dev_priv->rps.cur_delay)
-		DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
-				 vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
-				 dev_priv->rps.cur_delay,
-				 vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
-
-	dev_priv->rps.cur_delay = pval;
-}
-
 void valleyview_set_rps(struct drm_device *dev, u8 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	gen6_rps_limits(dev_priv, &val);
-
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 	WARN_ON(val > dev_priv->rps.max_delay);
 	WARN_ON(val < dev_priv->rps.min_delay);
 
-	vlv_update_rps_cur_delay(dev_priv);
-
 	DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.cur_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
 			 dev_priv->rps.cur_delay,
-			 vlv_gpu_freq(dev_priv->mem_freq, val), val);
+			 vlv_gpu_freq(dev_priv, val), val);
 
 	if (val == dev_priv->rps.cur_delay)
 		return;
@@ -3657,7 +3085,7 @@
 
 	dev_priv->rps.cur_delay = val;
 
-	trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
+	trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
 }
 
 static void gen6_disable_rps_interrupts(struct drm_device *dev)
@@ -3775,7 +3203,7 @@
 
 	/* 1c & 1d: Get forcewake during program sequence. Although the driver
 	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-	gen6_gt_force_wake_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 	/* 2a: Disable RC states. */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3832,7 +3260,7 @@
 
 	gen6_enable_rps_interrupts(dev);
 
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 static void gen6_enable_rps(struct drm_device *dev)
@@ -3862,7 +3290,7 @@
 		I915_WRITE(GTFIFODBG, gtfifodbg);
 	}
 
-	gen6_gt_force_wake_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
@@ -3954,7 +3382,7 @@
 			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
 	}
 
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 void gen6_update_ring_freq(struct drm_device *dev)
@@ -4116,7 +3544,8 @@
 
 	valleyview_setup_pctx(dev);
 
-	gen6_gt_force_wake_get(dev_priv);
+	/* If VLV, Forcewake all wells, else re-direct to regular path */
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -4140,7 +3569,7 @@
 	for_each_ring(ring, dev_priv, i)
 		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
-	I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
+	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
 
 	/* allows RC6 residency counter to work */
 	I915_WRITE(VLV_COUNTER_CONTROL,
@@ -4148,65 +3577,47 @@
 				      VLV_MEDIA_RC6_COUNT_EN |
 				      VLV_RENDER_RC6_COUNT_EN));
 	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
-		rc6_mode = GEN7_RC_CTL_TO_MODE;
+		rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
 
 	intel_print_rc6_info(dev, rc6_mode);
 
 	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
 	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-	switch ((val >> 6) & 3) {
-	case 0:
-	case 1:
-		dev_priv->mem_freq = 800;
-		break;
-	case 2:
-		dev_priv->mem_freq = 1066;
-		break;
-	case 3:
-		dev_priv->mem_freq = 1333;
-		break;
-	}
-	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
 	dev_priv->rps.cur_delay = (val >> 8) & 0xff;
 	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.cur_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
 			 dev_priv->rps.cur_delay);
 
 	dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
 	dev_priv->rps.hw_max = dev_priv->rps.max_delay;
 	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.max_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
 			 dev_priv->rps.max_delay);
 
 	dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
 	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.rpe_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
 			 dev_priv->rps.rpe_delay);
 
 	dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
 	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.min_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
 			 dev_priv->rps.min_delay);
 
 	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.rpe_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
 			 dev_priv->rps.rpe_delay);
 
 	valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
 
 	gen6_enable_rps_interrupts(dev);
 
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 void ironlake_teardown_rc6(struct drm_device *dev)
@@ -5019,6 +4430,20 @@
 	}
 }
 
+static void ilk_init_lp_watermarks(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
+	I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
+	I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+
+	/*
+	 * Don't touch WM1S_LP_EN here.
+	 * Doing so could cause underruns.
+	 */
+}
+
 static void ironlake_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5052,9 +4477,8 @@
 	I915_WRITE(DISP_ARB_CTL,
 		   (I915_READ(DISP_ARB_CTL) |
 		    DISP_FBC_WM_DIS));
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
+
+	ilk_init_lp_watermarks(dev);
 
 	/*
 	 * Based on the document from hardware guys the following bits
@@ -5161,9 +4585,7 @@
 		I915_WRITE(GEN6_GT_MODE,
 			   _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
 
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
+	ilk_init_lp_watermarks(dev);
 
 	I915_WRITE(CACHE_MODE_0,
 		   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -5304,28 +4726,40 @@
 	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
 		   _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
 
-	/* WaSwitchSolVfFArbitrationPriority */
+	/* WaSwitchSolVfFArbitrationPriority:bdw */
 	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
-	/* WaPsrDPAMaskVBlankInSRD */
+	/* WaPsrDPAMaskVBlankInSRD:bdw */
 	I915_WRITE(CHICKEN_PAR1_1,
 		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
 
-	/* WaPsrDPRSUnmaskVBlankInSRD */
+	/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
 	for_each_pipe(i) {
 		I915_WRITE(CHICKEN_PIPESL_1(i),
 			   I915_READ(CHICKEN_PIPESL_1(i) |
 				     DPRS_MASK_VBLANK_SRD));
 	}
+
+	/* Use Force Non-Coherent whenever executing a 3D context. This is a
+	 * workaround for for a possible hang in the unlikely event a TLB
+	 * invalidation occurs during a PSD flush.
+	 */
+	I915_WRITE(HDC_CHICKEN0,
+		   I915_READ(HDC_CHICKEN0) |
+		   _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
+
+	/* WaVSRefCountFullforceMissDisable:bdw */
+	/* WaDSRefCountFullforceMissDisable:bdw */
+	I915_WRITE(GEN7_FF_THREAD_MODE,
+		   I915_READ(GEN7_FF_THREAD_MODE) &
+		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
 }
 
 static void haswell_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
+	ilk_init_lp_watermarks(dev);
 
 	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
 	 * This implements the WaDisableRCZUnitClockGating:hsw workaround.
@@ -5374,9 +4808,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t snpcr;
 
-	I915_WRITE(WM3_LP_ILK, 0);
-	I915_WRITE(WM2_LP_ILK, 0);
-	I915_WRITE(WM1_LP_ILK, 0);
+	ilk_init_lp_watermarks(dev);
 
 	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
@@ -5463,6 +4895,26 @@
 static void valleyview_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+	switch ((val >> 6) & 3) {
+	case 0:
+		dev_priv->mem_freq = 800;
+		break;
+	case 1:
+		dev_priv->mem_freq = 1066;
+		break;
+	case 2:
+		dev_priv->mem_freq = 1333;
+		break;
+	case 3:
+		dev_priv->mem_freq = 1333;
+		break;
+	}
+	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
 	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
@@ -5642,50 +5094,133 @@
 		lpt_suspend_hw(dev);
 }
 
-static bool is_always_on_power_domain(struct drm_device *dev,
-				      enum intel_display_power_domain domain)
-{
-	unsigned long always_on_domains;
+#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
+	for (i = 0;							\
+	     i < (power_domains)->power_well_count &&			\
+		 ((power_well) = &(power_domains)->power_wells[i]);	\
+	     i++)							\
+		if ((power_well)->domains & (domain_mask))
 
-	BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
-
-	if (IS_BROADWELL(dev)) {
-		always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
-	} else if (IS_HASWELL(dev)) {
-		always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
-	} else {
-		WARN_ON(1);
-		return true;
-	}
-
-	return BIT(domain) & always_on_domains;
-}
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+	for (i = (power_domains)->power_well_count - 1;			 \
+	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+	     i--)							 \
+		if ((power_well)->domains & (domain_mask))
 
 /**
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
  * be enabled.
  */
-bool intel_display_power_enabled(struct drm_device *dev,
-				 enum intel_display_power_domain domain)
+static bool hsw_power_well_enabled(struct drm_device *dev,
+				   struct i915_power_well *power_well)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!HAS_POWER_WELL(dev))
-		return true;
-
-	if (is_always_on_power_domain(dev, domain))
-		return true;
-
 	return I915_READ(HSW_PWR_WELL_DRIVER) ==
 		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
 }
 
-static void __intel_set_power_well(struct drm_device *dev, bool enable)
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+				    enum intel_display_power_domain domain)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains;
+
+	power_domains = &dev_priv->power_domains;
+
+	return power_domains->domain_use_count[domain];
+}
+
+bool intel_display_power_enabled(struct drm_device *dev,
+				 enum intel_display_power_domain domain)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains;
+	struct i915_power_well *power_well;
+	bool is_enabled;
+	int i;
+
+	power_domains = &dev_priv->power_domains;
+
+	is_enabled = true;
+
+	mutex_lock(&power_domains->lock);
+	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+		if (power_well->always_on)
+			continue;
+
+		if (!power_well->is_enabled(dev, power_well)) {
+			is_enabled = false;
+			break;
+		}
+	}
+	mutex_unlock(&power_domains->lock);
+
+	return is_enabled;
+}
+
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	unsigned long irqflags;
+
+	/*
+	 * After we re-enable the power well, if we touch VGA register 0x3d5
+	 * we'll get unclaimed register interrupts. This stops after we write
+	 * anything to the VGA MSR register. The vgacon module uses this
+	 * register all the time, so if we unbind our driver and, as a
+	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
+	 * console_unlock(). So make here we touch the VGA MSR register, making
+	 * sure vgacon can keep working normally without triggering interrupts
+	 * and error messages.
+	 */
+	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+	if (IS_BROADWELL(dev)) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
+			   dev_priv->de_irq_mask[PIPE_B]);
+		I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
+			   ~dev_priv->de_irq_mask[PIPE_B] |
+			   GEN8_PIPE_VBLANK);
+		I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
+			   dev_priv->de_irq_mask[PIPE_C]);
+		I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
+			   ~dev_priv->de_irq_mask[PIPE_C] |
+			   GEN8_PIPE_VBLANK);
+		POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	}
+}
+
+static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	enum pipe p;
+	unsigned long irqflags;
+
+	/*
+	 * After this, the registers on the pipes that are part of the power
+	 * well will become zero, so we have to adjust our counters according to
+	 * that.
+	 *
+	 * FIXME: Should we do this in general in drm_vblank_post_modeset?
+	 */
+	spin_lock_irqsave(&dev->vbl_lock, irqflags);
+	for_each_pipe(p)
+		if (p != PIPE_A)
+			dev->vblank[p].last = 0;
+	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+
+static void hsw_set_power_well(struct drm_device *dev,
+			       struct i915_power_well *power_well, bool enable)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool is_enabled, enable_requested;
-	unsigned long irqflags;
 	uint32_t tmp;
 
 	WARN_ON(dev_priv->pc8.enabled);
@@ -5706,42 +5241,14 @@
 				DRM_ERROR("Timeout enabling power well\n");
 		}
 
-		if (IS_BROADWELL(dev)) {
-			spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-			I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
-				   dev_priv->de_irq_mask[PIPE_B]);
-			I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
-				   ~dev_priv->de_irq_mask[PIPE_B] |
-				   GEN8_PIPE_VBLANK);
-			I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
-				   dev_priv->de_irq_mask[PIPE_C]);
-			I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
-				   ~dev_priv->de_irq_mask[PIPE_C] |
-				   GEN8_PIPE_VBLANK);
-			POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
-			spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-		}
+		hsw_power_well_post_enable(dev_priv);
 	} else {
 		if (enable_requested) {
-			enum pipe p;
-
 			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
 			POSTING_READ(HSW_PWR_WELL_DRIVER);
 			DRM_DEBUG_KMS("Requesting to disable the power well\n");
 
-			/*
-			 * After this, the registers on the pipes that are part
-			 * of the power well will become zero, so we have to
-			 * adjust our counters according to that.
-			 *
-			 * FIXME: Should we do this in general in
-			 * drm_vblank_post_modeset?
-			 */
-			spin_lock_irqsave(&dev->vbl_lock, irqflags);
-			for_each_pipe(p)
-				if (p != PIPE_A)
-					dev->vblank[p].last = 0;
-			spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+			hsw_power_well_post_disable(dev_priv);
 		}
 	}
 }
@@ -5751,9 +5258,9 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!power_well->count++) {
+	if (!power_well->count++ && power_well->set) {
 		hsw_disable_package_c8(dev_priv);
-		__intel_set_power_well(dev, true);
+		power_well->set(dev, power_well, true);
 	}
 }
 
@@ -5763,8 +5270,10 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	WARN_ON(!power_well->count);
-	if (!--power_well->count && i915_disable_power_well) {
-		__intel_set_power_well(dev, false);
+
+	if (!--power_well->count && power_well->set &&
+	    i915_disable_power_well) {
+		power_well->set(dev, power_well, false);
 		hsw_enable_package_c8(dev_priv);
 	}
 }
@@ -5774,17 +5283,18 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_power_domains *power_domains;
-
-	if (!HAS_POWER_WELL(dev))
-		return;
-
-	if (is_always_on_power_domain(dev, domain))
-		return;
+	struct i915_power_well *power_well;
+	int i;
 
 	power_domains = &dev_priv->power_domains;
 
 	mutex_lock(&power_domains->lock);
-	__intel_power_well_get(dev, &power_domains->power_wells[0]);
+
+	for_each_power_well(i, power_well, BIT(domain), power_domains)
+		__intel_power_well_get(dev, power_well);
+
+	power_domains->domain_use_count[domain]++;
+
 	mutex_unlock(&power_domains->lock);
 }
 
@@ -5793,17 +5303,19 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_power_domains *power_domains;
-
-	if (!HAS_POWER_WELL(dev))
-		return;
-
-	if (is_always_on_power_domain(dev, domain))
-		return;
+	struct i915_power_well *power_well;
+	int i;
 
 	power_domains = &dev_priv->power_domains;
 
 	mutex_lock(&power_domains->lock);
-	__intel_power_well_put(dev, &power_domains->power_wells[0]);
+
+	WARN_ON(!power_domains->domain_use_count[domain]);
+	power_domains->domain_use_count[domain]--;
+
+	for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
+		__intel_power_well_put(dev, power_well);
+
 	mutex_unlock(&power_domains->lock);
 }
 
@@ -5819,10 +5331,7 @@
 
 	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
 				power_domains);
-
-	mutex_lock(&hsw_pwr->lock);
-	__intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
-	mutex_unlock(&hsw_pwr->lock);
+	intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
 }
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 
@@ -5836,24 +5345,71 @@
 
 	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
 				power_domains);
-
-	mutex_lock(&hsw_pwr->lock);
-	__intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
-	mutex_unlock(&hsw_pwr->lock);
+	intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
+static struct i915_power_well i9xx_always_on_power_well[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = POWER_DOMAIN_MASK,
+	},
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+	},
+	{
+		.name = "display",
+		.domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
+		.is_enabled = hsw_power_well_enabled,
+		.set = hsw_set_power_well,
+	},
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+	},
+	{
+		.name = "display",
+		.domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
+		.is_enabled = hsw_power_well_enabled,
+		.set = hsw_set_power_well,
+	},
+};
+
+#define set_power_wells(power_domains, __power_wells) ({		\
+	(power_domains)->power_wells = (__power_wells);			\
+	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
+})
+
 int intel_power_domains_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-	struct i915_power_well *power_well;
 
 	mutex_init(&power_domains->lock);
-	hsw_pwr = power_domains;
 
-	power_well = &power_domains->power_wells[0];
-	power_well->count = 0;
+	/*
+	 * The enabling order will be from lower to higher indexed wells,
+	 * the disabling order is reversed.
+	 */
+	if (IS_HASWELL(dev)) {
+		set_power_wells(power_domains, hsw_power_wells);
+		hsw_pwr = power_domains;
+	} else if (IS_BROADWELL(dev)) {
+		set_power_wells(power_domains, bdw_power_wells);
+		hsw_pwr = power_domains;
+	} else {
+		set_power_wells(power_domains, i9xx_always_on_power_well);
+	}
 
 	return 0;
 }
@@ -5868,15 +5424,13 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	struct i915_power_well *power_well;
-
-	if (!HAS_POWER_WELL(dev))
-		return;
+	int i;
 
 	mutex_lock(&power_domains->lock);
-
-	power_well = &power_domains->power_wells[0];
-	__intel_set_power_well(dev, power_well->count > 0);
-
+	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+		if (power_well->set)
+			power_well->set(dev, power_well, power_well->count > 0);
+	}
 	mutex_unlock(&power_domains->lock);
 }
 
@@ -5890,13 +5444,13 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!HAS_POWER_WELL(dev))
-		return;
-
 	/* For now, we need the power well to be always enabled. */
 	intel_display_set_init_power(dev, true);
 	intel_power_domains_resume(dev);
 
+	if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
+		return;
+
 	/* We're taking over the BIOS, so clear any requests made by it since
 	 * the driver is in charge now. */
 	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
@@ -5914,31 +5468,86 @@
 	hsw_enable_package_c8(dev_priv);
 }
 
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct device *device = &dev->pdev->dev;
+
+	if (!HAS_RUNTIME_PM(dev))
+		return;
+
+	pm_runtime_get_sync(device);
+	WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+}
+
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct device *device = &dev->pdev->dev;
+
+	if (!HAS_RUNTIME_PM(dev))
+		return;
+
+	pm_runtime_mark_last_busy(device);
+	pm_runtime_put_autosuspend(device);
+}
+
+void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct device *device = &dev->pdev->dev;
+
+	dev_priv->pm.suspended = false;
+
+	if (!HAS_RUNTIME_PM(dev))
+		return;
+
+	pm_runtime_set_active(device);
+
+	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
+	pm_runtime_mark_last_busy(device);
+	pm_runtime_use_autosuspend(device);
+}
+
+void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct device *device = &dev->pdev->dev;
+
+	if (!HAS_RUNTIME_PM(dev))
+		return;
+
+	/* Make sure we're not suspended first. */
+	pm_runtime_get_sync(device);
+	pm_runtime_disable(device);
+}
+
 /* Set up chip specific power management-related functions */
 void intel_init_pm(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (I915_HAS_FBC(dev)) {
-		if (HAS_PCH_SPLIT(dev)) {
+	if (HAS_FBC(dev)) {
+		if (INTEL_INFO(dev)->gen >= 7) {
 			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
-			if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
-				dev_priv->display.enable_fbc =
-					gen7_enable_fbc;
-			else
-				dev_priv->display.enable_fbc =
-					ironlake_enable_fbc;
+			dev_priv->display.enable_fbc = gen7_enable_fbc;
+			dev_priv->display.disable_fbc = ironlake_disable_fbc;
+		} else if (INTEL_INFO(dev)->gen >= 5) {
+			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+			dev_priv->display.enable_fbc = ironlake_enable_fbc;
 			dev_priv->display.disable_fbc = ironlake_disable_fbc;
 		} else if (IS_GM45(dev)) {
 			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
 			dev_priv->display.enable_fbc = g4x_enable_fbc;
 			dev_priv->display.disable_fbc = g4x_disable_fbc;
-		} else if (IS_CRESTLINE(dev)) {
+		} else {
 			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
 			dev_priv->display.enable_fbc = i8xx_enable_fbc;
 			dev_priv->display.disable_fbc = i8xx_disable_fbc;
+
+			/* This value was pulled out of someone's hat */
+			I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
 		}
-		/* 855GM needs testing */
 	}
 
 	/* For cxsr */
@@ -5951,58 +5560,27 @@
 	if (HAS_PCH_SPLIT(dev)) {
 		intel_setup_wm_latency(dev);
 
-		if (IS_GEN5(dev)) {
-			if (dev_priv->wm.pri_latency[1] &&
-			    dev_priv->wm.spr_latency[1] &&
-			    dev_priv->wm.cur_latency[1])
-				dev_priv->display.update_wm = ironlake_update_wm;
-			else {
-				DRM_DEBUG_KMS("Failed to get proper latency. "
-					      "Disable CxSR\n");
-				dev_priv->display.update_wm = NULL;
-			}
+		if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
+		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
+		    (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
+		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
+			dev_priv->display.update_wm = ilk_update_wm;
+			dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
+		} else {
+			DRM_DEBUG_KMS("Failed to read display plane latency. "
+				      "Disable CxSR\n");
+		}
+
+		if (IS_GEN5(dev))
 			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
-		} else if (IS_GEN6(dev)) {
-			if (dev_priv->wm.pri_latency[0] &&
-			    dev_priv->wm.spr_latency[0] &&
-			    dev_priv->wm.cur_latency[0]) {
-				dev_priv->display.update_wm = sandybridge_update_wm;
-				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
-			} else {
-				DRM_DEBUG_KMS("Failed to read display plane latency. "
-					      "Disable CxSR\n");
-				dev_priv->display.update_wm = NULL;
-			}
+		else if (IS_GEN6(dev))
 			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
-		} else if (IS_IVYBRIDGE(dev)) {
-			if (dev_priv->wm.pri_latency[0] &&
-			    dev_priv->wm.spr_latency[0] &&
-			    dev_priv->wm.cur_latency[0]) {
-				dev_priv->display.update_wm = ivybridge_update_wm;
-				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
-			} else {
-				DRM_DEBUG_KMS("Failed to read display plane latency. "
-					      "Disable CxSR\n");
-				dev_priv->display.update_wm = NULL;
-			}
+		else if (IS_IVYBRIDGE(dev))
 			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
-		} else if (IS_HASWELL(dev)) {
-			if (dev_priv->wm.pri_latency[0] &&
-			    dev_priv->wm.spr_latency[0] &&
-			    dev_priv->wm.cur_latency[0]) {
-				dev_priv->display.update_wm = haswell_update_wm;
-				dev_priv->display.update_sprite_wm =
-					haswell_update_sprite_wm;
-			} else {
-				DRM_DEBUG_KMS("Failed to read display plane latency. "
-					      "Disable CxSR\n");
-				dev_priv->display.update_wm = NULL;
-			}
+		else if (IS_HASWELL(dev))
 			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
-		} else if (INTEL_INFO(dev)->gen == 8) {
+		else if (INTEL_INFO(dev)->gen == 8)
 			dev_priv->display.init_clock_gating = gen8_init_clock_gating;
-		} else
-			dev_priv->display.update_wm = NULL;
 	} else if (IS_VALLEYVIEW(dev)) {
 		dev_priv->display.update_wm = valleyview_update_wm;
 		dev_priv->display.init_clock_gating =
@@ -6036,21 +5614,21 @@
 		dev_priv->display.update_wm = i9xx_update_wm;
 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
-	} else if (IS_I865G(dev)) {
-		dev_priv->display.update_wm = i830_update_wm;
-		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
-		dev_priv->display.get_fifo_size = i830_get_fifo_size;
-	} else if (IS_I85X(dev)) {
-		dev_priv->display.update_wm = i9xx_update_wm;
-		dev_priv->display.get_fifo_size = i85x_get_fifo_size;
-		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
-	} else {
-		dev_priv->display.update_wm = i830_update_wm;
-		dev_priv->display.init_clock_gating = i830_init_clock_gating;
-		if (IS_845G(dev))
+	} else if (IS_GEN2(dev)) {
+		if (INTEL_INFO(dev)->num_pipes == 1) {
+			dev_priv->display.update_wm = i845_update_wm;
 			dev_priv->display.get_fifo_size = i845_get_fifo_size;
-		else
+		} else {
+			dev_priv->display.update_wm = i9xx_update_wm;
 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
+		}
+
+		if (IS_I85X(dev) || IS_I865G(dev))
+			dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+		else
+			dev_priv->display.init_clock_gating = i830_init_clock_gating;
+	} else {
+		DRM_ERROR("unexpected fall-through in intel_init_pm\n");
 	}
 }
 
@@ -6101,59 +5679,48 @@
 	return 0;
 }
 
-int vlv_gpu_freq(int ddr_freq, int val)
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
-	int mult, base;
+	int div;
 
-	switch (ddr_freq) {
+	/* 4 x czclk */
+	switch (dev_priv->mem_freq) {
 	case 800:
-		mult = 20;
-		base = 120;
+		div = 10;
 		break;
 	case 1066:
-		mult = 22;
-		base = 133;
+		div = 12;
 		break;
 	case 1333:
-		mult = 21;
-		base = 125;
+		div = 16;
 		break;
 	default:
 		return -1;
 	}
 
-	return ((val - 0xbd) * mult) + base;
+	return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
 }
 
-int vlv_freq_opcode(int ddr_freq, int val)
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-	int mult, base;
+	int mul;
 
-	switch (ddr_freq) {
+	/* 4 x czclk */
+	switch (dev_priv->mem_freq) {
 	case 800:
-		mult = 20;
-		base = 120;
+		mul = 10;
 		break;
 	case 1066:
-		mult = 22;
-		base = 133;
+		mul = 12;
 		break;
 	case 1333:
-		mult = 21;
-		base = 125;
+		mul = 16;
 		break;
 	default:
 		return -1;
 	}
 
-	val /= mult;
-	val -= base / mult;
-	val += 0xbd;
-
-	if (val > 0xea)
-		val = 0xea;
-
-	return val;
+	return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
 }
 
 void intel_pm_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c2f09d45..31b36c5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -285,14 +285,16 @@
 	if (!ring->fbc_dirty)
 		return 0;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(ring, 6);
 	if (ret)
 		return ret;
-	intel_ring_emit(ring, MI_NOOP);
 	/* WaFbcNukeOn3DBlt:ivb/hsw */
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 	intel_ring_emit(ring, MSG_FBC_REND_STATE);
 	intel_ring_emit(ring, value);
+	intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
+	intel_ring_emit(ring, MSG_FBC_REND_STATE);
+	intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
 	intel_ring_advance(ring);
 
 	ring->fbc_dirty = false;
@@ -354,7 +356,7 @@
 	intel_ring_emit(ring, 0);
 	intel_ring_advance(ring);
 
-	if (flush_domains)
+	if (!invalidate_domains && flush_domains)
 		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
 
 	return 0;
@@ -436,7 +438,7 @@
 	int ret = 0;
 	u32 head;
 
-	gen6_gt_force_wake_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 	if (I915_NEED_GFX_HWS(dev))
 		intel_ring_setup_status_page(ring);
@@ -509,7 +511,7 @@
 	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
 out:
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 
 	return ret;
 }
@@ -661,19 +663,22 @@
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *useless;
-	int i, ret;
+	int i, ret, num_dwords = 4;
 
-	ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
-				      MBOX_UPDATE_DWORDS) +
-				      4);
-	if (ret)
-		return ret;
+	if (i915_semaphore_is_enabled(dev))
+		num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
 #undef MBOX_UPDATE_DWORDS
 
-	for_each_ring(useless, dev_priv, i) {
-		u32 mbox_reg = ring->signal_mbox[i];
-		if (mbox_reg != GEN6_NOSYNC)
-			update_mboxes(ring, mbox_reg);
+	ret = intel_ring_begin(ring, num_dwords);
+	if (ret)
+		return ret;
+
+	if (i915_semaphore_is_enabled(dev)) {
+		for_each_ring(useless, dev_priv, i) {
+			u32 mbox_reg = ring->signal_mbox[i];
+			if (mbox_reg != GEN6_NOSYNC)
+				update_mboxes(ring, mbox_reg);
+		}
 	}
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
@@ -1030,11 +1035,6 @@
 	if (!dev->irq_enabled)
 	       return false;
 
-	/* It looks like we need to prevent the gt from suspending while waiting
-	 * for an notifiy irq, otherwise irqs seem to get lost on at least the
-	 * blt/bsd rings on ivb. */
-	gen6_gt_force_wake_get(dev_priv);
-
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0) {
 		if (HAS_L3_DPF(dev) && ring->id == RCS)
@@ -1066,8 +1066,6 @@
 		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	gen6_gt_force_wake_put(dev_priv);
 }
 
 static bool
@@ -1611,8 +1609,8 @@
 	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
 }
 
-static int __intel_ring_begin(struct intel_ring_buffer *ring,
-			      int bytes)
+static int __intel_ring_prepare(struct intel_ring_buffer *ring,
+				int bytes)
 {
 	int ret;
 
@@ -1628,7 +1626,6 @@
 			return ret;
 	}
 
-	ring->space -= bytes;
 	return 0;
 }
 
@@ -1643,12 +1640,38 @@
 	if (ret)
 		return ret;
 
+	ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
+	if (ret)
+		return ret;
+
 	/* Preallocate the olr before touching the ring */
 	ret = intel_ring_alloc_seqno(ring);
 	if (ret)
 		return ret;
 
-	return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
+	ring->space -= num_dwords * sizeof(uint32_t);
+	return 0;
+}
+
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+{
+	int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
+	int ret;
+
+	if (num_dwords == 0)
+		return 0;
+
+	ret = intel_ring_begin(ring, num_dwords);
+	if (ret)
+		return ret;
+
+	while (num_dwords--)
+		intel_ring_emit(ring, MI_NOOP);
+
+	intel_ring_advance(ring);
+
+	return 0;
 }
 
 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
@@ -1838,7 +1861,7 @@
 	}
 	intel_ring_advance(ring);
 
-	if (IS_GEN7(dev) && flush)
+	if (IS_GEN7(dev) && !invalidate && flush)
 		return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 71a73f4..0b243ce 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -233,6 +233,7 @@
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 
 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
 				   u32 data)
 {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a583e8f..95bdfb3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -413,23 +413,34 @@
 static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
 				   const void *args, int args_len)
 {
-	int i;
+	int i, pos = 0;
+#define BUF_LEN 256
+	char buffer[BUF_LEN];
 
-	DRM_DEBUG_KMS("%s: W: %02X ",
-				SDVO_NAME(intel_sdvo), cmd);
-	for (i = 0; i < args_len; i++)
-		DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
-	for (; i < 8; i++)
-		DRM_LOG_KMS("   ");
+#define BUF_PRINT(args...) \
+	pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
+
+	for (i = 0; i < args_len; i++) {
+		BUF_PRINT("%02X ", ((u8 *)args)[i]);
+	}
+	for (; i < 8; i++) {
+		BUF_PRINT("   ");
+	}
 	for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
 		if (cmd == sdvo_cmd_names[i].cmd) {
-			DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+			BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
 			break;
 		}
 	}
-	if (i == ARRAY_SIZE(sdvo_cmd_names))
-		DRM_LOG_KMS("(%02X)", cmd);
-	DRM_LOG_KMS("\n");
+	if (i == ARRAY_SIZE(sdvo_cmd_names)) {
+		BUF_PRINT("(%02X)", cmd);
+	}
+	BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+	DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
 }
 
 static const char *cmd_status_names[] = {
@@ -512,9 +523,10 @@
 {
 	u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
 	u8 status;
-	int i;
+	int i, pos = 0;
+#define BUF_LEN 256
+	char buffer[BUF_LEN];
 
-	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
 
 	/*
 	 * The documentation states that all commands will be
@@ -551,10 +563,13 @@
 			goto log_fail;
 	}
 
+#define BUF_PRINT(args...) \
+	pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
 	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
-		DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+		BUF_PRINT("(%s)", cmd_status_names[status]);
 	else
-		DRM_LOG_KMS("(??? %d)", status);
+		BUF_PRINT("(??? %d)", status);
 
 	if (status != SDVO_CMD_STATUS_SUCCESS)
 		goto log_fail;
@@ -565,13 +580,17 @@
 					  SDVO_I2C_RETURN_0 + i,
 					  &((u8 *)response)[i]))
 			goto log_fail;
-		DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+		BUF_PRINT(" %02X", ((u8 *)response)[i]);
 	}
-	DRM_LOG_KMS("\n");
+	BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+	DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
 	return true;
 
 log_fail:
-	DRM_LOG_KMS("... failed\n");
+	DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo));
 	return false;
 }
 
@@ -933,7 +952,7 @@
 
 static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
 				       unsigned if_index, uint8_t tx_rate,
-				       uint8_t *data, unsigned length)
+				       const uint8_t *data, unsigned length)
 {
 	uint8_t set_buf_index[2] = { if_index, 0 };
 	uint8_t hbuf_size, tmp[8];
@@ -1517,8 +1536,9 @@
 	intel_modeset_check_state(connector->dev);
 }
 
-static int intel_sdvo_mode_valid(struct drm_connector *connector,
-				 struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_sdvo_mode_valid(struct drm_connector *connector,
+		      struct drm_display_mode *mode)
 {
 	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
 
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 770bdd6..2e2d4eb 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -59,7 +59,7 @@
 	unsigned int stall_support:1;
 	unsigned int pad:1;
 	u16 output_flags;
-} __attribute__((packed));
+} __packed;
 
 /* Note: SDVO detailed timing flags match EDID misc flags. */
 #define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
@@ -94,12 +94,12 @@
 		u8 v_sync_off_high;
 		u8 reserved;
 	} part2;
-} __attribute__((packed));
+} __packed;
 
 struct intel_sdvo_pixel_clock_range {
 	u16 min;	/**< pixel clock, in 10kHz units */
 	u16 max;	/**< pixel clock, in 10kHz units */
-} __attribute__((packed));
+} __packed;
 
 struct intel_sdvo_preferred_input_timing_args {
 	u16 clock;
@@ -108,7 +108,7 @@
 	u8	interlace:1;
 	u8	scaled:1;
 	u8	pad:6;
-} __attribute__((packed));
+} __packed;
 
 /* I2C registers for SDVO */
 #define SDVO_I2C_ARG_0				0x07
@@ -162,7 +162,7 @@
 	unsigned int input0_trained:1;
 	unsigned int input1_trained:1;
 	unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
 
 /** Returns a struct intel_sdvo_output_flags of active outputs. */
 #define SDVO_CMD_GET_ACTIVE_OUTPUTS			0x04
@@ -219,7 +219,7 @@
 	unsigned int ambient_light_interrupt:1;
 	unsigned int hdmi_audio_encrypt_change:1;
 	unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
 
 /**
  * Selects which input is affected by future input commands.
@@ -232,7 +232,7 @@
 struct intel_sdvo_set_target_input_args {
 	unsigned int target_1:1;
 	unsigned int pad:7;
-} __attribute__((packed));
+} __packed;
 
 /**
  * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
@@ -370,7 +370,7 @@
 	unsigned int hdtv_std_eia_7702a_480i_60:1;
 	unsigned int hdtv_std_eia_7702a_480p_60:1;
 	unsigned int pad:3;
-} __attribute__((packed));
+} __packed;
 
 #define SDVO_CMD_GET_TV_FORMAT				0x28
 
@@ -401,7 +401,7 @@
 	unsigned int secam_l:1;
 	unsigned int secam_60:1;
 	unsigned int pad:5;
-} __attribute__((packed));
+} __packed;
 
 struct intel_sdvo_sdtv_resolution_reply {
 	unsigned int res_320x200:1;
@@ -426,7 +426,7 @@
 	unsigned int res_1024x768:1;
 	unsigned int res_1280x1024:1;
 	unsigned int pad:5;
-} __attribute__((packed));
+} __packed;
 
 /* Get supported resolution with squire pixel aspect ratio that can be
    scaled for the requested HDTV format */
@@ -463,7 +463,7 @@
 	unsigned int hdtv_std_eia_7702a_480i_60:1;
 	unsigned int hdtv_std_eia_7702a_480p_60:1;
 	unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
 
 struct intel_sdvo_hdtv_resolution_reply {
 	unsigned int res_640x480:1;
@@ -517,7 +517,7 @@
 
 	unsigned int res_1280x768:1;
 	unsigned int pad5:7;
-} __attribute__((packed));
+} __packed;
 
 /* Get supported power state returns info for encoder and monitor, rely on
    last SetTargetInput and SetTargetOutput calls */
@@ -557,13 +557,13 @@
 
 	unsigned int t4_high:2;
 	unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
 
 #define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL		0x30
 struct sdvo_max_backlight_reply {
 	u8 max_value;
 	u8 default_value;
-} __attribute__((packed));
+} __packed;
 
 #define SDVO_CMD_GET_BACKLIGHT_LEVEL			0x31
 #define SDVO_CMD_SET_BACKLIGHT_LEVEL			0x32
@@ -573,14 +573,14 @@
 	u16 trip_low;
 	u16 trip_high;
 	u16 value;
-} __attribute__((packed));
+} __packed;
 #define SDVO_CMD_SET_AMBIENT_LIGHT			0x34
 struct sdvo_set_ambient_light_reply {
 	u16 trip_low;
 	u16 trip_high;
 	unsigned int enable:1;
 	unsigned int pad:7;
-} __attribute__((packed));
+} __packed;
 
 /* Set display power state */
 #define SDVO_CMD_SET_DISPLAY_POWER_STATE		0x7d
@@ -608,7 +608,7 @@
 	unsigned int dither:1;
 	unsigned int tv_chroma_filter:1;
 	unsigned int tv_luma_filter:1;
-} __attribute__((packed));
+} __packed;
 
 /* Picture enhancement limits below are dependent on the current TV format,
  * and thus need to be queried and set after it.
@@ -630,7 +630,7 @@
 struct intel_sdvo_enhancement_limits_reply {
 	u16 max_value;
 	u16 default_value;
-} __attribute__((packed));
+} __packed;
 
 #define SDVO_CMD_GET_LVDS_PANEL_INFORMATION		0x7f
 #define SDVO_CMD_SET_LVDS_PANEL_INFORMATION		0x80
@@ -671,7 +671,7 @@
 #define SDVO_CMD_SET_TV_LUMA_FILTER			0x79
 struct intel_sdvo_enhancements_arg {
 	u16 value;
-} __attribute__((packed));
+} __packed;
 
 #define SDVO_CMD_GET_DOT_CRAWL				0x70
 #define SDVO_CMD_SET_DOT_CRAWL				0x71
@@ -727,4 +727,4 @@
 struct intel_sdvo_encode {
 	u8 dvi_rev;
 	u8 hdmi_rev;
-} __attribute__ ((packed));
+} __packed;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 9944d81..0954f13 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -90,6 +90,22 @@
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+			PUNIT_OPCODE_REG_READ, reg, &val);
+
+	return val;
+}
+
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+			PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
 {
 	u32 val = 0;
@@ -160,27 +176,18 @@
 			PUNIT_OPCODE_REG_WRITE, reg, &val);
 }
 
-static u32 vlv_get_phy_port(enum pipe pipe)
-{
-	u32 port = IOSF_PORT_DPIO;
-
-	WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
-
-	return port;
-}
-
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
 {
 	u32 val = 0;
 
-	vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
 			DPIO_OPCODE_REG_READ, reg, &val);
 	return val;
 }
 
 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
 {
-	vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
 			DPIO_OPCODE_REG_WRITE, reg, &val);
 }
 
@@ -242,3 +249,17 @@
 		return;
 	}
 }
+
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
+					DPIO_OPCODE_REG_READ, reg, &val);
+	return val;
+}
+
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
+					DPIO_OPCODE_REG_WRITE, reg, &val);
+}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index b9fabf8..716a3c9 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -104,6 +104,12 @@
 		break;
 	}
 
+	/*
+	 * Enable gamma to match primary/cursor plane behaviour.
+	 * FIXME should be user controllable via propertiesa.
+	 */
+	sprctl |= SP_GAMMA_ENABLE;
+
 	if (obj->tiling_mode != I915_TILING_NONE)
 		sprctl |= SP_TILED;
 
@@ -135,8 +141,8 @@
 
 	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
 	I915_WRITE(SPCNTR(pipe, plane), sprctl);
-	I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
-			     sprsurf_offset);
+	I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
+		   sprsurf_offset);
 	POSTING_READ(SPSURF(pipe, plane));
 }
 
@@ -152,7 +158,7 @@
 	I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
 		   ~SP_ENABLE);
 	/* Activate double buffered register update */
-	I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
+	I915_WRITE(SPSURF(pipe, plane), 0);
 	POSTING_READ(SPSURF(pipe, plane));
 
 	intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
@@ -224,7 +230,6 @@
 	u32 sprctl, sprscale = 0;
 	unsigned long sprsurf_offset, linear_offset;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
 
 	sprctl = I915_READ(SPRCTL(pipe));
 
@@ -257,6 +262,12 @@
 		BUG();
 	}
 
+	/*
+	 * Enable gamma to match primary/cursor plane behaviour.
+	 * FIXME should be user controllable via propertiesa.
+	 */
+	sprctl |= SPRITE_GAMMA_ENABLE;
+
 	if (obj->tiling_mode != I915_TILING_NONE)
 		sprctl |= SPRITE_TILED;
 
@@ -279,21 +290,8 @@
 	crtc_w--;
 	crtc_h--;
 
-	/*
-	 * IVB workaround: must disable low power watermarks for at least
-	 * one frame before enabling scaling.  LP watermarks can be re-enabled
-	 * when scaling is disabled.
-	 */
-	if (crtc_w != src_w || crtc_h != src_h) {
-		dev_priv->sprite_scaling_enabled |= 1 << pipe;
-
-		if (!scaling_was_enabled) {
-			intel_update_watermarks(crtc);
-			intel_wait_for_vblank(dev, pipe);
-		}
+	if (crtc_w != src_w || crtc_h != src_h)
 		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
-	} else
-		dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
 
 	I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
 	I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -317,13 +315,9 @@
 	if (intel_plane->can_scale)
 		I915_WRITE(SPRSCALE(pipe), sprscale);
 	I915_WRITE(SPRCTL(pipe), sprctl);
-	I915_MODIFY_DISPBASE(SPRSURF(pipe),
-			     i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
+	I915_WRITE(SPRSURF(pipe),
+		   i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
 	POSTING_READ(SPRSURF(pipe));
-
-	/* potentially re-enable LP watermarks */
-	if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
-		intel_update_watermarks(crtc);
 }
 
 static void
@@ -333,23 +327,22 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	int pipe = intel_plane->pipe;
-	bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
 
 	I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
 	/* Can't leave the scaler enabled... */
 	if (intel_plane->can_scale)
 		I915_WRITE(SPRSCALE(pipe), 0);
 	/* Activate double buffered register update */
-	I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
+	I915_WRITE(SPRSURF(pipe), 0);
 	POSTING_READ(SPRSURF(pipe));
 
-	dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
+	/*
+	 * Avoid underruns when disabling the sprite.
+	 * FIXME remove once watermark updates are done properly.
+	 */
+	intel_wait_for_vblank(dev, pipe);
 
 	intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
-
-	/* potentially re-enable LP watermarks */
-	if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
-		intel_update_watermarks(crtc);
 }
 
 static int
@@ -453,6 +446,12 @@
 		BUG();
 	}
 
+	/*
+	 * Enable gamma to match primary/cursor plane behaviour.
+	 * FIXME should be user controllable via propertiesa.
+	 */
+	dvscntr |= DVS_GAMMA_ENABLE;
+
 	if (obj->tiling_mode != I915_TILING_NONE)
 		dvscntr |= DVS_TILED;
 
@@ -470,7 +469,7 @@
 	crtc_h--;
 
 	dvsscale = 0;
-	if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
+	if (crtc_w != src_w || crtc_h != src_h)
 		dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
 	I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -490,8 +489,8 @@
 	I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
 	I915_WRITE(DVSSCALE(pipe), dvsscale);
 	I915_WRITE(DVSCNTR(pipe), dvscntr);
-	I915_MODIFY_DISPBASE(DVSSURF(pipe),
-			     i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
+	I915_WRITE(DVSSURF(pipe),
+		   i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
 	POSTING_READ(DVSSURF(pipe));
 }
 
@@ -507,9 +506,15 @@
 	/* Disable the scaler */
 	I915_WRITE(DVSSCALE(pipe), 0);
 	/* Flush double buffered register updates */
-	I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
+	I915_WRITE(DVSSURF(pipe), 0);
 	POSTING_READ(DVSSURF(pipe));
 
+	/*
+	 * Avoid underruns when disabling the sprite.
+	 * FIXME remove once watermark updates are done properly.
+	 */
+	intel_wait_for_vblank(dev, pipe);
+
 	intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
 }
 
@@ -643,6 +648,15 @@
 	}
 }
 
+static bool colorkey_enabled(struct intel_plane *intel_plane)
+{
+	struct drm_intel_sprite_colorkey key;
+
+	intel_plane->get_colorkey(&intel_plane->base, &key);
+
+	return key.flags != I915_SET_COLORKEY_NONE;
+}
+
 static int
 intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 		   struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -828,7 +842,7 @@
 	 * If the sprite is completely covering the primary plane,
 	 * we can disable the primary and save power.
 	 */
-	disable_primary = drm_rect_equals(&dst, &clip);
+	disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane);
 	WARN_ON(disable_primary && !visible && intel_crtc->active);
 
 	mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 25cbe07..87df68f 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -64,7 +64,8 @@
 	__raw_posting_read(dev_priv, ECOBUS);
 }
 
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
+							int fw_engine)
 {
 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
 			    FORCEWAKE_ACK_TIMEOUT_MS))
@@ -89,7 +90,8 @@
 	__raw_posting_read(dev_priv, ECOBUS);
 }
 
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
+							int fw_engine)
 {
 	u32 forcewake_ack;
 
@@ -121,12 +123,12 @@
 	u32 gtfifodbg;
 
 	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
-	if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
-	     "MMIO read or write has been dropped %x\n", gtfifodbg))
-		__raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
+		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
 }
 
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
+							int fw_engine)
 {
 	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
 	/* something from same cacheline, but !FORCEWAKE */
@@ -134,7 +136,8 @@
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
+							int fw_engine)
 {
 	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
 			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
@@ -147,12 +150,19 @@
 {
 	int ret = 0;
 
+	/* On VLV, FIFO will be shared by both SW and HW.
+	 * So, we need to read the FREE_ENTRIES everytime */
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		dev_priv->uncore.fifo_count =
+			__raw_i915_read32(dev_priv, GTFIFOCTL) &
+						GT_FIFO_FREE_ENTRIES_MASK;
+
 	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
 		int loop = 500;
-		u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+		u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
 		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
 			udelay(10);
-			fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+			fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
 		}
 		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
 			++ret;
@@ -171,38 +181,112 @@
 	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
 }
 
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
+						int fw_engine)
 {
-	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
-			    FORCEWAKE_ACK_TIMEOUT_MS))
-		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+	/* Check for Render Engine */
+	if (FORCEWAKE_RENDER & fw_engine) {
+		if (wait_for_atomic((__raw_i915_read32(dev_priv,
+						FORCEWAKE_ACK_VLV) &
+						FORCEWAKE_KERNEL) == 0,
+					FORCEWAKE_ACK_TIMEOUT_MS))
+			DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
 
-	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
-			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
-			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
-	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
-			    FORCEWAKE_ACK_TIMEOUT_MS))
-		DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+		if (wait_for_atomic((__raw_i915_read32(dev_priv,
+						FORCEWAKE_ACK_VLV) &
+						FORCEWAKE_KERNEL),
+					FORCEWAKE_ACK_TIMEOUT_MS))
+			DRM_ERROR("Timed out: waiting for Render to ack.\n");
+	}
 
-	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
-			     FORCEWAKE_KERNEL),
-			    FORCEWAKE_ACK_TIMEOUT_MS))
-		DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+	/* Check for Media Engine */
+	if (FORCEWAKE_MEDIA & fw_engine) {
+		if (wait_for_atomic((__raw_i915_read32(dev_priv,
+						FORCEWAKE_ACK_MEDIA_VLV) &
+						FORCEWAKE_KERNEL) == 0,
+					FORCEWAKE_ACK_TIMEOUT_MS))
+			DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
+
+		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+		if (wait_for_atomic((__raw_i915_read32(dev_priv,
+						FORCEWAKE_ACK_MEDIA_VLV) &
+						FORCEWAKE_KERNEL),
+					FORCEWAKE_ACK_TIMEOUT_MS))
+			DRM_ERROR("Timed out: waiting for media to ack.\n");
+	}
 
 	/* WaRsForcewakeWaitTC0:vlv */
 	__gen6_gt_wait_for_thread_c0(dev_priv);
+
 }
 
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
+					int fw_engine)
 {
-	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
-			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
-			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+	/* Check for Render Engine */
+	if (FORCEWAKE_RENDER & fw_engine)
+		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+					_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+
+	/* Check for Media Engine */
+	if (FORCEWAKE_MEDIA & fw_engine)
+		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
 	/* The below doubles as a POSTING_READ */
 	gen6_gt_check_fifodbg(dev_priv);
+
+}
+
+void vlv_force_wake_get(struct drm_i915_private *dev_priv,
+						int fw_engine)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	if (FORCEWAKE_RENDER & fw_engine) {
+		if (dev_priv->uncore.fw_rendercount++ == 0)
+			dev_priv->uncore.funcs.force_wake_get(dev_priv,
+							FORCEWAKE_RENDER);
+	}
+	if (FORCEWAKE_MEDIA & fw_engine) {
+		if (dev_priv->uncore.fw_mediacount++ == 0)
+			dev_priv->uncore.funcs.force_wake_get(dev_priv,
+							FORCEWAKE_MEDIA);
+	}
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+void vlv_force_wake_put(struct drm_i915_private *dev_priv,
+						int fw_engine)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	if (FORCEWAKE_RENDER & fw_engine) {
+		WARN_ON(dev_priv->uncore.fw_rendercount == 0);
+		if (--dev_priv->uncore.fw_rendercount == 0)
+			dev_priv->uncore.funcs.force_wake_put(dev_priv,
+							FORCEWAKE_RENDER);
+	}
+
+	if (FORCEWAKE_MEDIA & fw_engine) {
+		WARN_ON(dev_priv->uncore.fw_mediacount == 0);
+		if (--dev_priv->uncore.fw_mediacount == 0)
+			dev_priv->uncore.funcs.force_wake_put(dev_priv,
+							FORCEWAKE_MEDIA);
+	}
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void gen6_force_wake_work(struct work_struct *work)
@@ -213,7 +297,7 @@
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 	if (--dev_priv->uncore.forcewake_count == 0)
-		dev_priv->uncore.funcs.force_wake_put(dev_priv);
+		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
@@ -248,6 +332,11 @@
 		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
 	}
 
+	/* clear out old GT FIFO errors */
+	if (IS_GEN6(dev) || IS_GEN7(dev))
+		__raw_i915_write32(dev_priv, GTFIFODBG,
+				   __raw_i915_read32(dev_priv, GTFIFODBG));
+
 	intel_uncore_forcewake_reset(dev);
 }
 
@@ -256,8 +345,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg_val;
 
-	intel_uncore_forcewake_reset(dev);
-
 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
 	intel_disable_gt_powersave(dev);
 
@@ -281,29 +368,40 @@
  * be called at the beginning of the sequence followed by a call to
  * gen6_gt_force_wake_put() at the end of the sequence.
  */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
 {
 	unsigned long irqflags;
 
 	if (!dev_priv->uncore.funcs.force_wake_get)
 		return;
 
+	intel_runtime_pm_get(dev_priv);
+
+	/* Redirect to VLV specific routine */
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		return vlv_force_wake_get(dev_priv, fw_engine);
+
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 	if (dev_priv->uncore.forcewake_count++ == 0)
-		dev_priv->uncore.funcs.force_wake_get(dev_priv);
+		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 /*
  * see gen6_gt_force_wake_get()
  */
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
 {
 	unsigned long irqflags;
 
 	if (!dev_priv->uncore.funcs.force_wake_put)
 		return;
 
+	/* Redirect to VLV specific routine */
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		return vlv_force_wake_put(dev_priv, fw_engine);
+
+
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 	if (--dev_priv->uncore.forcewake_count == 0) {
 		dev_priv->uncore.forcewake_count++;
@@ -312,6 +410,8 @@
 				 1);
 	}
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+	intel_runtime_pm_put(dev_priv);
 }
 
 /* We give fast paths for the really cool registers */
@@ -346,6 +446,13 @@
 	}
 }
 
+static void
+assert_device_not_suspended(struct drm_i915_private *dev_priv)
+{
+	WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+	     "Device suspended\n");
+}
+
 #define REG_READ_HEADER(x) \
 	unsigned long irqflags; \
 	u##x val = 0; \
@@ -379,16 +486,51 @@
 	REG_READ_HEADER(x); \
 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
 		if (dev_priv->uncore.forcewake_count == 0) \
-			dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+			dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+							FORCEWAKE_ALL); \
 		val = __raw_i915_read##x(dev_priv, reg); \
 		if (dev_priv->uncore.forcewake_count == 0) \
-			dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+			dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+							FORCEWAKE_ALL); \
 	} else { \
 		val = __raw_i915_read##x(dev_priv, reg); \
 	} \
 	REG_READ_FOOTER; \
 }
 
+#define __vlv_read(x) \
+static u##x \
+vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	unsigned fwengine = 0; \
+	unsigned *fwcount; \
+	REG_READ_HEADER(x); \
+	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) {   \
+		fwengine = FORCEWAKE_RENDER;            \
+		fwcount = &dev_priv->uncore.fw_rendercount;    \
+	}                                               \
+	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) {       \
+		fwengine = FORCEWAKE_MEDIA;             \
+		fwcount = &dev_priv->uncore.fw_mediacount;     \
+	}  \
+	if (fwengine != 0) {		\
+		if ((*fwcount)++ == 0) \
+			(dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
+								fwengine); \
+		val = __raw_i915_read##x(dev_priv, reg); \
+		if (--(*fwcount) == 0) \
+			(dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
+							fwengine); \
+	} else { \
+		val = __raw_i915_read##x(dev_priv, reg); \
+	} \
+	REG_READ_FOOTER; \
+}
+
+
+__vlv_read(8)
+__vlv_read(16)
+__vlv_read(32)
+__vlv_read(64)
 __gen6_read(8)
 __gen6_read(16)
 __gen6_read(32)
@@ -402,6 +544,7 @@
 __gen4_read(32)
 __gen4_read(64)
 
+#undef __vlv_read
 #undef __gen6_read
 #undef __gen5_read
 #undef __gen4_read
@@ -413,12 +556,15 @@
 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
+#define REG_WRITE_FOOTER \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+
 #define __gen4_write(x) \
 static void \
 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
 	REG_WRITE_HEADER; \
 	__raw_i915_write##x(dev_priv, reg, val); \
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+	REG_WRITE_FOOTER; \
 }
 
 #define __gen5_write(x) \
@@ -427,7 +573,7 @@
 	REG_WRITE_HEADER; \
 	ilk_dummy_write(dev_priv); \
 	__raw_i915_write##x(dev_priv, reg, val); \
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+	REG_WRITE_FOOTER; \
 }
 
 #define __gen6_write(x) \
@@ -438,11 +584,12 @@
 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
 	} \
+	assert_device_not_suspended(dev_priv); \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	if (unlikely(__fifo_ret)) { \
 		gen6_gt_check_fifodbg(dev_priv); \
 	} \
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+	REG_WRITE_FOOTER; \
 }
 
 #define __hsw_write(x) \
@@ -453,13 +600,14 @@
 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
 	} \
+	assert_device_not_suspended(dev_priv); \
 	hsw_unclaimed_reg_clear(dev_priv, reg); \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	if (unlikely(__fifo_ret)) { \
 		gen6_gt_check_fifodbg(dev_priv); \
 	} \
 	hsw_unclaimed_reg_check(dev_priv, reg); \
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+	REG_WRITE_FOOTER; \
 }
 
 static const u32 gen8_shadowed_regs[] = {
@@ -486,16 +634,18 @@
 #define __gen8_write(x) \
 static void \
 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
-	bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
+	bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
 	REG_WRITE_HEADER; \
 	if (__needs_put) { \
-		dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+		dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+							FORCEWAKE_ALL); \
 	} \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	if (__needs_put) { \
-		dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+		dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+							FORCEWAKE_ALL); \
 	} \
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+	REG_WRITE_FOOTER; \
 }
 
 __gen8_write(8)
@@ -524,6 +674,7 @@
 #undef __gen6_write
 #undef __gen5_write
 #undef __gen4_write
+#undef REG_WRITE_FOOTER
 #undef REG_WRITE_HEADER
 
 void intel_uncore_init(struct drm_device *dev)
@@ -534,8 +685,8 @@
 			  gen6_force_wake_work);
 
 	if (IS_VALLEYVIEW(dev)) {
-		dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
-		dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+		dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
+		dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
 	} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
 		dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
 		dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
@@ -552,9 +703,9 @@
 		 * forcewake being disabled.
 		 */
 		mutex_lock(&dev->struct_mutex);
-		__gen6_gt_force_wake_mt_get(dev_priv);
+		__gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
 		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
-		__gen6_gt_force_wake_mt_put(dev_priv);
+		__gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
 		mutex_unlock(&dev->struct_mutex);
 
 		if (ecobus & FORCEWAKE_MT_ENABLE) {
@@ -601,10 +752,18 @@
 			dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
 			dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
 		}
-		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
-		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
-		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
-		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+
+		if (IS_VALLEYVIEW(dev)) {
+			dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
+			dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
+			dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
+			dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
+		} else {
+			dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
+			dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
+			dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
+			dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+		}
 		break;
 	case 5:
 		dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
@@ -646,7 +805,7 @@
 	uint32_t size;
 	uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
 } whitelist[] = {
-	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 },
 };
 
 int i915_reg_read_ioctl(struct drm_device *dev,
@@ -687,6 +846,43 @@
 	return 0;
 }
 
+int i915_get_reset_stats_ioctl(struct drm_device *dev,
+			       void *data, struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_reset_stats *args = data;
+	struct i915_ctx_hang_stats *hs;
+	int ret;
+
+	if (args->flags || args->pad)
+		return -EINVAL;
+
+	if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
+	if (IS_ERR(hs)) {
+		mutex_unlock(&dev->struct_mutex);
+		return PTR_ERR(hs);
+	}
+
+	if (capable(CAP_SYS_ADMIN))
+		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+	else
+		args->reset_count = 0;
+
+	args->batch_active = hs->batch_active;
+	args->batch_pending = hs->batch_pending;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
 static int i965_reset_complete(struct drm_device *dev)
 {
 	u8 gdrst;
@@ -770,12 +966,12 @@
 
 	/* If reset with a user forcewake, try to restore, otherwise turn it off */
 	if (dev_priv->uncore.forcewake_count)
-		dev_priv->uncore.funcs.force_wake_get(dev_priv);
+		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
 	else
-		dev_priv->uncore.funcs.force_wake_put(dev_priv);
+		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
 
 	/* Restore fifo count */
-	dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+	dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 	return ret;
@@ -793,15 +989,6 @@
 	}
 }
 
-void intel_uncore_clear_errors(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/* XXX needs spinlock around caller's grouping */
-	if (HAS_FPGA_DBG_UNCLAIMED(dev))
-		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-}
-
 void intel_uncore_check_errors(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 087db33..c3bf059 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -1075,10 +1075,10 @@
 
 		buf->file_priv = file_priv;
 
-		if (DRM_COPY_TO_USER(&d->request_indices[i],
+		if (copy_to_user(&d->request_indices[i],
 				     &buf->idx, sizeof(buf->idx)))
 			return -EFAULT;
-		if (DRM_COPY_TO_USER(&d->request_sizes[i],
+		if (copy_to_user(&d->request_sizes[i],
 				     &buf->total, sizeof(buf->total)))
 			return -EFAULT;
 
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index ca4bc54..fe45321 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -186,14 +186,14 @@
 extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
 extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t mga_driver_irq_handler(int irq, void *arg);
 extern void mga_driver_irq_preinstall(struct drm_device *dev);
 extern int mga_driver_irq_postinstall(struct drm_device *dev);
 extern void mga_driver_irq_uninstall(struct drm_device *dev);
 extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
 			     unsigned long arg);
 
-#define mga_flush_write_combine()	DRM_WRITEMEMORYBARRIER()
+#define mga_flush_write_combine()	wmb()
 
 #define MGA_READ8(reg)		DRM_READ8(dev_priv->mmio, (reg))
 #define MGA_READ(reg)		DRM_READ32(dev_priv->mmio, (reg))
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 709e90d..86b4bb8 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -34,6 +34,7 @@
 
 #include <drm/drmP.h>
 #include <drm/mga_drm.h>
+#include "mga_drv.h"
 
 typedef struct drm32_mga_init {
 	int func;
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 2b0ceb8..1b071b8 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -47,7 +47,7 @@
 }
 
 
-irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t mga_driver_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -79,7 +79,7 @@
 			MGA_WRITE(MGA_PRIMEND, prim_end);
 
 		atomic_inc(&dev_priv->last_fence_retired);
-		DRM_WAKEUP(&dev_priv->fence_queue);
+		wake_up(&dev_priv->fence_queue);
 		handled = 1;
 	}
 
@@ -128,7 +128,7 @@
 	 * by about a day rather than she wants to wait for years
 	 * using fences.
 	 */
-	DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+	DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
 		    (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
 		      - *sequence) <= (1 << 23)));
 
@@ -151,7 +151,7 @@
 {
 	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 
-	DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
+	init_waitqueue_head(&dev_priv->fence_queue);
 
 	/* Turn on soft trap interrupt.  Vertical blank interrupts are enabled
 	 * in mga_enable_vblank.
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 37cc2fb..314685b 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1029,7 +1029,7 @@
 		return -EINVAL;
 	}
 
-	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+	if (copy_to_user(param->value, &value, sizeof(int))) {
 		DRM_ERROR("copy_to_user\n");
 		return -EFAULT;
 	}
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 801731a..9f9780b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -22,8 +22,10 @@
 {
 	WREG8(MGA_CURPOSXL, 0);
 	WREG8(MGA_CURPOSXH, 0);
-	mgag200_bo_unpin(mdev->cursor.pixels_1);
-	mgag200_bo_unpin(mdev->cursor.pixels_2);
+	if (mdev->cursor.pixels_1->pin_count)
+		mgag200_bo_unpin(mdev->cursor.pixels_1);
+	if (mdev->cursor.pixels_2->pin_count)
+		mgag200_bo_unpin(mdev->cursor.pixels_2);
 }
 
 int mga_crtc_cursor_set(struct drm_crtc *crtc,
@@ -32,7 +34,7 @@
 			uint32_t width,
 			uint32_t height)
 {
-	struct drm_device *dev = (struct drm_device *)file_priv->minor->dev;
+	struct drm_device *dev = crtc->dev;
 	struct mga_device *mdev = (struct mga_device *)dev->dev_private;
 	struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1;
 	struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2;
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 964f58c..13b7dd8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -41,7 +41,7 @@
 	 * then the BO is being moved and we should
 	 * store up the damage until later.
 	 */
-	if (!in_interrupt())
+	if (drm_can_sleep())
 		ret = mgag200_bo_reserve(bo, true);
 	if (ret) {
 		if (ret != -EBUSY)
@@ -282,6 +282,11 @@
 {
 	struct mga_fbdev *mfbdev;
 	int ret;
+	int bpp_sel = 32;
+
+	/* prefer 16bpp on low end gpus with limited VRAM */
+	if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+		bpp_sel = 16;
 
 	mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
 	if (!mfbdev)
@@ -301,7 +306,7 @@
 	/* disable all the possible outputs/crtcs before entering KMS mode */
 	drm_helper_disable_unused_functions(mdev->dev);
 
-	drm_fb_helper_initial_config(&mfbdev->helper, 32);
+	drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index b1120cb..26868e5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -217,7 +217,10 @@
 
 	drm_mode_config_init(dev);
 	dev->mode_config.funcs = (void *)&mga_mode_funcs;
-	dev->mode_config.preferred_depth = 24;
+	if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+		dev->mode_config.preferred_depth = 16;
+	else
+		dev->mode_config.preferred_depth = 24;
 	dev->mode_config.prefer_shadow = 1;
 
 	r = mgag200_modeset_init(mdev);
@@ -310,7 +313,7 @@
 	return 0;
 }
 
-void mgag200_bo_unref(struct mgag200_bo **bo)
+static void mgag200_bo_unref(struct mgag200_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ee6ed63..9683747 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -691,7 +691,7 @@
    CRTCEXT0 has to be programmed last to trigger an update and make the
    new addr variable take effect.
  */
-void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
 {
 	struct mga_device *mdev = crtc->dev->dev_private;
 	u32 addr;
@@ -1398,7 +1398,7 @@
 {
 }
 
-void mga_encoder_destroy(struct drm_encoder *encoder)
+static void mga_encoder_destroy(struct drm_encoder *encoder)
 {
 	struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
 	drm_encoder_cleanup(encoder);
@@ -1519,11 +1519,11 @@
 		(mga_vga_calculate_mode_bandwidth(mode, bpp)
 			> (32700 * 1024))) {
 		return MODE_BANDWIDTH;
-	} else if (mode->type == G200_EH &&
+	} else if (mdev->type == G200_EH &&
 		(mga_vga_calculate_mode_bandwidth(mode, bpp)
 			> (37500 * 1024))) {
 		return MODE_BANDWIDTH;
-	} else if (mode->type == G200_ER &&
+	} else if (mdev->type == G200_ER &&
 		(mga_vga_calculate_mode_bandwidth(mode,
 			bpp) > (55000 * 1024))) {
 		return MODE_BANDWIDTH;
@@ -1558,7 +1558,7 @@
 	return MODE_OK;
 }
 
-struct drm_encoder *mga_connector_best_encoder(struct drm_connector
+static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
 						  *connector)
 {
 	int enc_id = connector->encoder_ids[0];
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 07b192f..adb5166 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -80,7 +80,7 @@
 	return 0;
 }
 
-void
+static void
 mgag200_ttm_global_release(struct mga_device *ast)
 {
 	if (ast->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@
 	kfree(bo);
 }
 
-bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
+static bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
 {
 	if (bo->destroy == &mgag200_bo_ttm_destroy)
 		return true;
@@ -208,7 +208,7 @@
 };
 
 
-struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
 				 unsigned long size, uint32_t page_flags,
 				 struct page *dummy_read_page)
 {
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index f39ab75..c69d1e0 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,8 +2,8 @@
 config DRM_MSM
 	tristate "MSM DRM"
 	depends on DRM
-	depends on ARCH_MSM
-	depends on ARCH_MSM8960
+	depends on MSM_IOMMU
+	depends on (ARCH_MSM && ARCH_MSM8960) || (ARM && COMPILE_TEST)
 	select DRM_KMS_HELPER
 	select SHMEM
 	select TMPFS
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index e5fa12b..4f977a5 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -12,18 +12,27 @@
 	hdmi/hdmi_i2c.o \
 	hdmi/hdmi_phy_8960.o \
 	hdmi/hdmi_phy_8x60.o \
-	mdp4/mdp4_crtc.o \
-	mdp4/mdp4_dtv_encoder.o \
-	mdp4/mdp4_format.o \
-	mdp4/mdp4_irq.o \
-	mdp4/mdp4_kms.o \
-	mdp4/mdp4_plane.o \
+	hdmi/hdmi_phy_8x74.o \
+	mdp/mdp_format.o \
+	mdp/mdp_kms.o \
+	mdp/mdp4/mdp4_crtc.o \
+	mdp/mdp4/mdp4_dtv_encoder.o \
+	mdp/mdp4/mdp4_irq.o \
+	mdp/mdp4/mdp4_kms.o \
+	mdp/mdp4/mdp4_plane.o \
+	mdp/mdp5/mdp5_crtc.o \
+	mdp/mdp5/mdp5_encoder.o \
+	mdp/mdp5/mdp5_irq.o \
+	mdp/mdp5/mdp5_kms.o \
+	mdp/mdp5/mdp5_plane.o \
+	mdp/mdp5/mdp5_smp.o \
 	msm_drv.o \
 	msm_fb.o \
 	msm_gem.o \
 	msm_gem_prime.o \
 	msm_gem_submit.o \
 	msm_gpu.o \
+	msm_iommu.o \
 	msm_ringbuffer.o
 
 msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
index e036f6c1..9c4255b 100644
--- a/drivers/gpu/drm/msm/NOTES
+++ b/drivers/gpu/drm/msm/NOTES
@@ -4,7 +4,7 @@
 display controller blocks at play:
  + MDP3 - ?? seems to be what is on geeksphone peak device
  + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
- + MDSS - snapdragon 800
+ + MDP5 - snapdragon 800
 
 (I don't have a completely clear picture on which display controller
 maps to which part #)
@@ -46,6 +46,24 @@
 may have their own irqs which they install themselves.  For this reason
 the display controller is the "master" device.
 
+For MDP5, the mapping is:
+
+  plane   -> PIPE{RGBn,VIGn}             \
+  crtc    -> LM (layer mixer)            |-> MDP "device"
+  encoder -> INTF                        /
+  connector -> HDMI/DSI/eDP/etc          --> other device(s)
+
+Unlike MDP4, it appears we can get by with a single encoder, rather
+than needing a different implementation for DTV, DSI, etc.  (Ie. the
+register interface is same, just different bases.)
+
+Also unlike MDP4, with MDP5 all the IRQs for other blocks (HDMI, DSI,
+etc) are routed through MDP.
+
+And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from
+which blocks need to be allocated to the active pipes based on fetch
+stride.
+
 Each connector probably ends up being a separate device, just for the
 logistics of finding/mapping io region, irq, etc.  Idealy we would
 have a better way than just stashing the platform device in a global
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 9588098..85d615e 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,12 +8,13 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (   8344 bytes, from 2013-11-30 14:49:47)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -202,6 +203,12 @@
 	SAMPLE_0123 = 6,
 };
 
+enum adreno_mmu_clnt_beh {
+	BEH_NEVR = 0,
+	BEH_TRAN_RNG = 1,
+	BEH_TRAN_FLT = 2,
+};
+
 enum sq_tex_clamp {
 	SQ_TEX_WRAP = 0,
 	SQ_TEX_MIRROR = 1,
@@ -238,6 +245,92 @@
 
 #define REG_A2XX_CP_PFP_UCODE_DATA				0x000000c1
 
+#define REG_A2XX_MH_MMU_CONFIG					0x00000040
+#define A2XX_MH_MMU_CONFIG_MMU_ENABLE				0x00000001
+#define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE			0x00000002
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK		0x00000030
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT		4
+static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+	return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK		0x000000c0
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT		6
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+	return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK		0x00000300
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT		8
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+	return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK		0x00000c00
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT		10
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+	return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK		0x00003000
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT		12
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+	return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK		0x0000c000
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT		14
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+	return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK		0x00030000
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT		16
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+	return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK		0x000c0000
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT		18
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+	return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK		0x00300000
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT		20
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+	return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK		0x00c00000
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT		22
+static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+	return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK		0x03000000
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT		24
+static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+	return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
+}
+
+#define REG_A2XX_MH_MMU_VA_RANGE				0x00000041
+
+#define REG_A2XX_MH_MMU_PT_BASE					0x00000042
+
+#define REG_A2XX_MH_MMU_PAGE_FAULT				0x00000043
+
+#define REG_A2XX_MH_MMU_TRAN_ERROR				0x00000044
+
+#define REG_A2XX_MH_MMU_INVALIDATE				0x00000045
+
+#define REG_A2XX_MH_MMU_MPU_BASE				0x00000046
+
+#define REG_A2XX_MH_MMU_MPU_END					0x00000047
+
+#define REG_A2XX_NQWAIT_UNTIL					0x00000394
+
 #define REG_A2XX_RBBM_PERFCOUNTER1_SELECT			0x00000395
 
 #define REG_A2XX_RBBM_PERFCOUNTER1_LO				0x00000397
@@ -276,20 +369,6 @@
 
 #define REG_A2XX_CP_PERFCOUNTER_HI				0x00000447
 
-#define REG_A2XX_CP_ST_BASE					0x0000044d
-
-#define REG_A2XX_CP_ST_BUFSZ					0x0000044e
-
-#define REG_A2XX_CP_IB1_BASE					0x00000458
-
-#define REG_A2XX_CP_IB1_BUFSZ					0x00000459
-
-#define REG_A2XX_CP_IB2_BASE					0x0000045a
-
-#define REG_A2XX_CP_IB2_BUFSZ					0x0000045b
-
-#define REG_A2XX_CP_STAT					0x0000047f
-
 #define REG_A2XX_RBBM_STATUS					0x000005d0
 #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK			0x0000001f
 #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT			0
@@ -808,6 +887,12 @@
 
 #define REG_A2XX_SQ_VS_PROGRAM					0x000021f7
 
+#define REG_A2XX_VGT_EVENT_INITIATOR				0x000021f9
+
+#define REG_A2XX_VGT_DRAW_INITIATOR				0x000021fc
+
+#define REG_A2XX_VGT_IMMED_DATA					0x000021fd
+
 #define REG_A2XX_RB_DEPTHCONTROL				0x00002200
 #define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE			0x00000001
 #define A2XX_RB_DEPTHCONTROL_Z_ENABLE				0x00000002
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index d4afdf6..a7be561 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,12 +8,13 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (   8344 bytes, from 2013-11-30 14:49:47)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -292,6 +293,8 @@
 #define A3XX_RBBM_STATUS_GPU_BUSY_NOHC				0x40000000
 #define A3XX_RBBM_STATUS_GPU_BUSY				0x80000000
 
+#define REG_A3XX_RBBM_NQWAIT_UNTIL				0x00000040
+
 #define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL			0x00000033
 
 #define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL			0x00000050
@@ -304,6 +307,8 @@
 
 #define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3			0x0000005a
 
+#define REG_A3XX_RBBM_INT_SET_CMD				0x00000060
+
 #define REG_A3XX_RBBM_INT_CLEAR_CMD				0x00000061
 
 #define REG_A3XX_RBBM_INT_0_MASK				0x00000063
@@ -937,13 +942,13 @@
 	return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
 }
 
-#define REG_A3XX_UNKNOWN_20E8					0x000020e8
+#define REG_A3XX_RB_CLEAR_COLOR_DW0				0x000020e8
 
-#define REG_A3XX_UNKNOWN_20E9					0x000020e9
+#define REG_A3XX_RB_CLEAR_COLOR_DW1				0x000020e9
 
-#define REG_A3XX_UNKNOWN_20EA					0x000020ea
+#define REG_A3XX_RB_CLEAR_COLOR_DW2				0x000020ea
 
-#define REG_A3XX_UNKNOWN_20EB					0x000020eb
+#define REG_A3XX_RB_CLEAR_COLOR_DW3				0x000020eb
 
 #define REG_A3XX_RB_COPY_CONTROL				0x000020ec
 #define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK			0x00000003
@@ -1026,7 +1031,7 @@
 #define A3XX_RB_DEPTH_CONTROL_BF_ENABLE				0x00000080
 #define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE			0x80000000
 
-#define REG_A3XX_UNKNOWN_2101					0x00002101
+#define REG_A3XX_RB_DEPTH_CLEAR					0x00002101
 
 #define REG_A3XX_RB_DEPTH_INFO					0x00002102
 #define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK			0x00000001
@@ -1103,11 +1108,11 @@
 	return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
 }
 
-#define REG_A3XX_UNKNOWN_2105					0x00002105
+#define REG_A3XX_RB_STENCIL_CLEAR				0x00002105
 
-#define REG_A3XX_UNKNOWN_2106					0x00002106
+#define REG_A3XX_RB_STENCIL_BUF_INFO				0x00002106
 
-#define REG_A3XX_UNKNOWN_2107					0x00002107
+#define REG_A3XX_RB_STENCIL_BUF_PITCH				0x00002107
 
 #define REG_A3XX_RB_STENCILREFMASK				0x00002108
 #define A3XX_RB_STENCILREFMASK_STENCILREF__MASK			0x000000ff
@@ -1149,20 +1154,31 @@
 	return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
 }
 
-#define REG_A3XX_PA_SC_WINDOW_OFFSET				0x0000210e
-#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK			0x0000ffff
-#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT			0
-static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val)
+#define REG_A3XX_RB_LRZ_VSC_CONTROL				0x0000210c
+#define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE			0x00000002
+
+#define REG_A3XX_RB_WINDOW_OFFSET				0x0000210e
+#define A3XX_RB_WINDOW_OFFSET_X__MASK				0x0000ffff
+#define A3XX_RB_WINDOW_OFFSET_X__SHIFT				0
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val)
 {
-	return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK;
+	return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK;
 }
-#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK			0xffff0000
-#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT			16
-static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val)
+#define A3XX_RB_WINDOW_OFFSET_Y__MASK				0xffff0000
+#define A3XX_RB_WINDOW_OFFSET_Y__SHIFT				16
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
 {
-	return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK;
+	return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK;
 }
 
+#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL			0x00002110
+
+#define REG_A3XX_RB_SAMPLE_COUNT_ADDR				0x00002111
+
+#define REG_A3XX_RB_Z_CLAMP_MIN					0x00002114
+
+#define REG_A3XX_RB_Z_CLAMP_MAX					0x00002115
+
 #define REG_A3XX_PC_VSTREAM_CONTROL				0x000021e4
 
 #define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL			0x000021ea
@@ -1309,6 +1325,8 @@
 
 #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG			0x00002215
 
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG			0x00002216
+
 #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG			0x00002217
 
 #define REG_A3XX_HLSQ_CL_WG_OFFSET_REG				0x0000221a
@@ -1491,12 +1509,13 @@
 
 #define REG_A3XX_SP_SP_CTRL_REG					0x000022c0
 #define A3XX_SP_SP_CTRL_REG_RESOLVE				0x00010000
-#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK			0x000c0000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK			0x00040000
 #define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT			18
 static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
 {
 	return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
 }
+#define A3XX_SP_SP_CTRL_REG_BINNING				0x00080000
 #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK			0x00300000
 #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT			20
 static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
@@ -1669,7 +1688,7 @@
 
 #define REG_A3XX_SP_VS_OBJ_START_REG				0x000022d5
 
-#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG				0x000022d6
+#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG			0x000022d6
 
 #define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG				0x000022d7
 
@@ -1772,7 +1791,7 @@
 
 #define REG_A3XX_SP_FS_OBJ_START_REG				0x000022e3
 
-#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG				0x000022e4
+#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG			0x000022e4
 
 #define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG				0x000022e5
 
@@ -1943,6 +1962,9 @@
 
 static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
 
+#define REG_A3XX_VSC_BIN_CONTROL				0x00000c3c
+#define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE			0x00000001
+
 #define REG_A3XX_UNKNOWN_0C3D					0x00000c3d
 
 #define REG_A3XX_PC_PERFCOUNTER0_SELECT				0x00000c48
@@ -1953,7 +1975,7 @@
 
 #define REG_A3XX_PC_PERFCOUNTER3_SELECT				0x00000c4b
 
-#define REG_A3XX_UNKNOWN_0C81					0x00000c81
+#define REG_A3XX_GRAS_TSE_DEBUG_ECO				0x00000c81
 
 #define REG_A3XX_GRAS_PERFCOUNTER0_SELECT			0x00000c88
 
@@ -1975,22 +1997,24 @@
 
 #define REG_A3XX_RB_GMEM_BASE_ADDR				0x00000cc0
 
+#define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR			0x00000cc1
+
 #define REG_A3XX_RB_PERFCOUNTER0_SELECT				0x00000cc6
 
 #define REG_A3XX_RB_PERFCOUNTER1_SELECT				0x00000cc7
 
-#define REG_A3XX_RB_WINDOW_SIZE					0x00000ce0
-#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK				0x00003fff
-#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT			0
-static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val)
+#define REG_A3XX_RB_FRAME_BUFFER_DIMENSION			0x00000ce0
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK		0x00003fff
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT		0
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
 {
-	return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK;
+	return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
 }
-#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK			0x0fffc000
-#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT			14
-static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val)
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK		0x0fffc000
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT		14
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
 {
-	return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK;
+	return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
 }
 
 #define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT			0x00000e00
@@ -2088,6 +2112,14 @@
 
 #define REG_A3XX_TP_PERFCOUNTER5_SELECT				0x00000f09
 
+#define REG_A3XX_VGT_CL_INITIATOR				0x000021f0
+
+#define REG_A3XX_VGT_EVENT_INITIATOR				0x000021f9
+
+#define REG_A3XX_VGT_DRAW_INITIATOR				0x000021fc
+
+#define REG_A3XX_VGT_IMMED_DATA					0x000021fd
+
 #define REG_A3XX_TEX_SAMP_0					0x00000000
 #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR			0x00000002
 #define A3XX_TEX_SAMP_0_XY_MAG__MASK				0x0000000c
@@ -2123,6 +2155,18 @@
 #define A3XX_TEX_SAMP_0_UNNORM_COORDS				0x80000000
 
 #define REG_A3XX_TEX_SAMP_1					0x00000001
+#define A3XX_TEX_SAMP_1_MAX_LOD__MASK				0x003ff000
+#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT				12
+static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+	return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A3XX_TEX_SAMP_1_MIN_LOD__MASK				0xffc00000
+#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT				22
+static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+	return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
 
 #define REG_A3XX_TEX_CONST_0					0x00000000
 #define A3XX_TEX_CONST_0_TILED					0x00000001
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 035bd13..461df93 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -15,6 +15,10 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#ifdef CONFIG_MSM_OCMEM
+#  include <mach/ocmem.h>
+#endif
+
 #include "a3xx_gpu.h"
 
 #define A3XX_INT0_MASK \
@@ -63,6 +67,7 @@
 static int a3xx_hw_init(struct msm_gpu *gpu)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
 	uint32_t *ptr, len;
 	int i, ret;
 
@@ -105,6 +110,21 @@
 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
 
+	} else if (adreno_is_a330v2(adreno_gpu)) {
+		/*
+		 * Most of the VBIF registers on 8974v2 have the correct
+		 * values at power on, so we won't modify those if we don't
+		 * need to
+		 */
+		/* Enable 1k sort: */
+		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
+		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+		/* Enable WR-REQ: */
+		gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+		gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+		/* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+		gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+
 	} else if (adreno_is_a330(adreno_gpu)) {
 		/* Set up 16 deep read/write request queues: */
 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
@@ -121,10 +141,10 @@
 		/* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
 		gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
 		/* Set up AOOO: */
-		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff);
-		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff);
+		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
+		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
 		/* Enable 1K sort: */
-		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff);
+		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
 		/* Disable VBIF clock gating. This is to enable AXI running
 		 * higher frequency than GPU:
@@ -162,14 +182,23 @@
 	gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
 
 	/* Enable Clock gating: */
-	gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
+	if (adreno_is_a320(adreno_gpu))
+		gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
+	else if (adreno_is_a330v2(adreno_gpu))
+		gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
+	else if (adreno_is_a330(adreno_gpu))
+		gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
 
-	/* Set the OCMEM base address for A330 */
-//TODO:
-//	if (adreno_is_a330(adreno_gpu)) {
-//		gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
-//			(unsigned int)(a3xx_gpu->ocmem_base >> 14));
-//	}
+	if (adreno_is_a330v2(adreno_gpu))
+		gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
+	else if (adreno_is_a330(adreno_gpu))
+		gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
+
+	/* Set the OCMEM base address for A330, etc */
+	if (a3xx_gpu->ocmem_hdl) {
+		gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
+			(unsigned int)(a3xx_gpu->ocmem_base >> 14));
+	}
 
 	/* Turn on performance counters: */
 	gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
@@ -219,7 +248,7 @@
 	/* Load PM4: */
 	ptr = (uint32_t *)(adreno_gpu->pm4->data);
 	len = adreno_gpu->pm4->size / 4;
-	DBG("loading PM4 ucode version: %u", ptr[0]);
+	DBG("loading PM4 ucode version: %x", ptr[1]);
 
 	gpu_write(gpu, REG_AXXX_CP_DEBUG,
 			AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
@@ -231,19 +260,26 @@
 	/* Load PFP: */
 	ptr = (uint32_t *)(adreno_gpu->pfp->data);
 	len = adreno_gpu->pfp->size / 4;
-	DBG("loading PFP ucode version: %u", ptr[0]);
+	DBG("loading PFP ucode version: %x", ptr[5]);
 
 	gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
 	for (i = 1; i < len; i++)
 		gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
 
 	/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
-	if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu))
+	if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) {
 		gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
 				AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
 				AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
 				AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
-
+	} else if (adreno_is_a330(adreno_gpu)) {
+		/* NOTE: this (value take from downstream android driver)
+		 * includes some bits outside of the known bitfields.  But
+		 * A330 has this "MERCIU queue" thing too, which might
+		 * explain a new bitfield or reshuffling:
+		 */
+		gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
+	}
 
 	/* clear ME_HALT to start micro engine */
 	gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
@@ -253,6 +289,14 @@
 	return 0;
 }
 
+static void a3xx_recover(struct msm_gpu *gpu)
+{
+	gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
+	gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
+	gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
+	adreno_recover(gpu);
+}
+
 static void a3xx_destroy(struct msm_gpu *gpu)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -261,6 +305,12 @@
 	DBG("%s", gpu->name);
 
 	adreno_gpu_cleanup(adreno_gpu);
+
+#ifdef CONFIG_MSM_OCMEM
+	if (a3xx_gpu->ocmem_base)
+		ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
+#endif
+
 	put_device(&a3xx_gpu->pdev->dev);
 	kfree(a3xx_gpu);
 }
@@ -371,7 +421,7 @@
 		.hw_init = a3xx_hw_init,
 		.pm_suspend = msm_gpu_pm_suspend,
 		.pm_resume = msm_gpu_pm_resume,
-		.recover = adreno_recover,
+		.recover = a3xx_recover,
 		.last_fence = adreno_last_fence,
 		.submit = adreno_submit,
 		.flush = adreno_flush,
@@ -387,6 +437,7 @@
 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
 {
 	struct a3xx_gpu *a3xx_gpu = NULL;
+	struct adreno_gpu *adreno_gpu;
 	struct msm_gpu *gpu;
 	struct platform_device *pdev = a3xx_pdev;
 	struct adreno_platform_config *config;
@@ -406,7 +457,8 @@
 		goto fail;
 	}
 
-	gpu = &a3xx_gpu->base.base;
+	adreno_gpu = &a3xx_gpu->base;
+	gpu = &adreno_gpu->base;
 
 	get_device(&pdev->dev);
 	a3xx_gpu->pdev = pdev;
@@ -414,16 +466,46 @@
 	gpu->fast_rate = config->fast_rate;
 	gpu->slow_rate = config->slow_rate;
 	gpu->bus_freq  = config->bus_freq;
+#ifdef CONFIG_MSM_BUS_SCALING
+	gpu->bus_scale_table = config->bus_scale_table;
+#endif
 
 	DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
 			gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
 
-	ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base,
-			&funcs, config->rev);
+	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev);
 	if (ret)
 		goto fail;
 
-	return &a3xx_gpu->base.base;
+	/* if needed, allocate gmem: */
+	if (adreno_is_a330(adreno_gpu)) {
+#ifdef CONFIG_MSM_OCMEM
+		/* TODO this is different/missing upstream: */
+		struct ocmem_buf *ocmem_hdl =
+				ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
+
+		a3xx_gpu->ocmem_hdl = ocmem_hdl;
+		a3xx_gpu->ocmem_base = ocmem_hdl->addr;
+		adreno_gpu->gmem = ocmem_hdl->len;
+		DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
+				a3xx_gpu->ocmem_base);
+#endif
+	}
+
+	if (!gpu->mmu) {
+		/* TODO we think it is possible to configure the GPU to
+		 * restrict access to VRAM carveout.  But the required
+		 * registers are unknown.  For now just bail out and
+		 * limp along with just modesetting.  If it turns out
+		 * to not be possible to restrict access, then we must
+		 * implement a cmdstream validator.
+		 */
+		dev_err(dev->dev, "No memory protection without IOMMU\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+
+	return gpu;
 
 fail:
 	if (a3xx_gpu)
@@ -436,19 +518,59 @@
  * The a3xx device:
  */
 
+#if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
+#  include <mach/kgsl.h>
+#endif
+
 static int a3xx_probe(struct platform_device *pdev)
 {
 	static struct adreno_platform_config config = {};
 #ifdef CONFIG_OF
-	/* TODO */
+	struct device_node *child, *node = pdev->dev.of_node;
+	u32 val;
+	int ret;
+
+	ret = of_property_read_u32(node, "qcom,chipid", &val);
+	if (ret) {
+		dev_err(&pdev->dev, "could not find chipid: %d\n", ret);
+		return ret;
+	}
+
+	config.rev = ADRENO_REV((val >> 24) & 0xff,
+			(val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
+
+	/* find clock rates: */
+	config.fast_rate = 0;
+	config.slow_rate = ~0;
+	for_each_child_of_node(node, child) {
+		if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
+			struct device_node *pwrlvl;
+			for_each_child_of_node(child, pwrlvl) {
+				ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
+				if (ret) {
+					dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret);
+					return ret;
+				}
+				config.fast_rate = max(config.fast_rate, val);
+				config.slow_rate = min(config.slow_rate, val);
+			}
+		}
+	}
+
+	if (!config.fast_rate) {
+		dev_err(&pdev->dev, "could not find clk rates\n");
+		return -ENXIO;
+	}
+
 #else
+	struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
 	uint32_t version = socinfo_get_version();
 	if (cpu_is_apq8064ab()) {
 		config.fast_rate = 450000000;
 		config.slow_rate = 27000000;
 		config.bus_freq  = 4;
 		config.rev = ADRENO_REV(3, 2, 1, 0);
-	} else if (cpu_is_apq8064() || cpu_is_msm8960ab()) {
+	} else if (cpu_is_apq8064()) {
 		config.fast_rate = 400000000;
 		config.slow_rate = 27000000;
 		config.bus_freq  = 4;
@@ -461,6 +583,16 @@
 		else
 			config.rev = ADRENO_REV(3, 2, 0, 0);
 
+	} else if (cpu_is_msm8960ab()) {
+		config.fast_rate = 400000000;
+		config.slow_rate = 320000000;
+		config.bus_freq  = 4;
+
+		if (SOCINFO_VERSION_MINOR(version) == 0)
+			config.rev = ADRENO_REV(3, 2, 1, 0);
+		else
+			config.rev = ADRENO_REV(3, 2, 1, 1);
+
 	} else if (cpu_is_msm8930()) {
 		config.fast_rate = 400000000;
 		config.slow_rate = 27000000;
@@ -473,6 +605,9 @@
 			config.rev = ADRENO_REV(3, 0, 5, 0);
 
 	}
+#  ifdef CONFIG_MSM_BUS_SCALING
+	config.bus_scale_table = pdata->bus_scale_table;
+#  endif
 #endif
 	pdev->dev.platform_data = &config;
 	a3xx_pdev = pdev;
@@ -485,10 +620,19 @@
 	return 0;
 }
 
+static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,kgsl-3d0" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
 static struct platform_driver a3xx_driver = {
 	.probe = a3xx_probe,
 	.remove = a3xx_remove,
-	.driver.name = "kgsl-3d0",
+	.driver = {
+		.name = "kgsl-3d0",
+		.of_match_table = dt_match,
+	},
 };
 
 void __init a3xx_register(void)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index 32c398c..bb9a8ca 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -24,6 +24,10 @@
 struct a3xx_gpu {
 	struct adreno_gpu base;
 	struct platform_device *pdev;
+
+	/* if OCMEM is used for GMEM: */
+	uint32_t ocmem_base;
+	void *ocmem_hdl;
 };
 #define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
 
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 33dcc60..d6e6ce2 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,12 +8,13 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (   8344 bytes, from 2013-11-30 14:49:47)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -115,96 +116,6 @@
 	DEPTHX_24_8 = 1,
 };
 
-enum adreno_mmu_clnt_beh {
-	BEH_NEVR = 0,
-	BEH_TRAN_RNG = 1,
-	BEH_TRAN_FLT = 2,
-};
-
-#define REG_AXXX_MH_MMU_CONFIG					0x00000040
-#define AXXX_MH_MMU_CONFIG_MMU_ENABLE				0x00000001
-#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE			0x00000002
-#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK		0x00000030
-#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT		4
-static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-	return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK		0x000000c0
-#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT		6
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-	return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK		0x00000300
-#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT		8
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-	return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK		0x00000c00
-#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT		10
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-	return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK		0x00003000
-#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT		12
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-	return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK		0x0000c000
-#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT		14
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-	return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK		0x00030000
-#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT		16
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-	return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK		0x000c0000
-#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT		18
-static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-	return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK		0x00300000
-#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT		20
-static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-	return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK		0x00c00000
-#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT		22
-static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-	return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK		0x03000000
-#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT		24
-static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-	return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
-}
-
-#define REG_AXXX_MH_MMU_VA_RANGE				0x00000041
-
-#define REG_AXXX_MH_MMU_PT_BASE					0x00000042
-
-#define REG_AXXX_MH_MMU_PAGE_FAULT				0x00000043
-
-#define REG_AXXX_MH_MMU_TRAN_ERROR				0x00000044
-
-#define REG_AXXX_MH_MMU_INVALIDATE				0x00000045
-
-#define REG_AXXX_MH_MMU_MPU_BASE				0x00000046
-
-#define REG_AXXX_MH_MMU_MPU_END					0x00000047
-
 #define REG_AXXX_CP_RB_BASE					0x000001c0
 
 #define REG_AXXX_CP_RB_CNTL					0x000001c1
@@ -275,6 +186,18 @@
 }
 
 #define REG_AXXX_CP_MEQ_THRESHOLDS				0x000001d6
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK			0x001f0000
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT			16
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val)
+{
+	return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK;
+}
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK			0x1f000000
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT			24
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val)
+{
+	return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK;
+}
 
 #define REG_AXXX_CP_CSQ_AVAIL					0x000001d7
 #define AXXX_CP_CSQ_AVAIL_RING__MASK				0x0000007f
@@ -402,6 +325,36 @@
 	return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
 }
 
+#define REG_AXXX_CP_NON_PREFETCH_CNTRS				0x00000440
+
+#define REG_AXXX_CP_STQ_ST_STAT					0x00000443
+
+#define REG_AXXX_CP_ST_BASE					0x0000044d
+
+#define REG_AXXX_CP_ST_BUFSZ					0x0000044e
+
+#define REG_AXXX_CP_MEQ_STAT					0x0000044f
+
+#define REG_AXXX_CP_MIU_TAG_STAT				0x00000452
+
+#define REG_AXXX_CP_BIN_MASK_LO					0x00000454
+
+#define REG_AXXX_CP_BIN_MASK_HI					0x00000455
+
+#define REG_AXXX_CP_BIN_SELECT_LO				0x00000456
+
+#define REG_AXXX_CP_BIN_SELECT_HI				0x00000457
+
+#define REG_AXXX_CP_IB1_BASE					0x00000458
+
+#define REG_AXXX_CP_IB1_BUFSZ					0x00000459
+
+#define REG_AXXX_CP_IB2_BASE					0x0000045a
+
+#define REG_AXXX_CP_IB2_BUFSZ					0x0000045b
+
+#define REG_AXXX_CP_STAT					0x0000047f
+
 #define REG_AXXX_CP_SCRATCH_REG0				0x00000578
 
 #define REG_AXXX_CP_SCRATCH_REG1				0x00000579
@@ -418,6 +371,26 @@
 
 #define REG_AXXX_CP_SCRATCH_REG7				0x0000057f
 
+#define REG_AXXX_CP_ME_VS_EVENT_SRC				0x00000600
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR				0x00000601
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA				0x00000602
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM			0x00000603
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM			0x00000604
+
+#define REG_AXXX_CP_ME_PS_EVENT_SRC				0x00000605
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR				0x00000606
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA				0x00000607
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM			0x00000608
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM			0x00000609
+
 #define REG_AXXX_CP_ME_CF_EVENT_SRC				0x0000060a
 
 #define REG_AXXX_CP_ME_CF_EVENT_ADDR				0x0000060b
@@ -428,5 +401,11 @@
 
 #define REG_AXXX_CP_ME_NRT_DATA					0x0000060e
 
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC			0x00000612
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR			0x00000613
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA			0x00000614
+
 
 #endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a0b9d8a..d321099 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -17,6 +17,7 @@
 
 #include "adreno_gpu.h"
 #include "msm_gem.h"
+#include "msm_mmu.h"
 
 struct adreno_info {
 	struct adreno_rev rev;
@@ -44,7 +45,7 @@
 		.pfpfw = "a300_pfp.fw",
 		.gmem  = SZ_512K,
 	}, {
-		.rev   = ADRENO_REV(3, 3, 0, 0),
+		.rev   = ADRENO_REV(3, 3, 0, ANY_ID),
 		.revn  = 330,
 		.name  = "A330",
 		.pm4fw = "a330_pm4.fw",
@@ -53,6 +54,11 @@
 	},
 };
 
+MODULE_FIRMWARE("a300_pm4.fw");
+MODULE_FIRMWARE("a300_pfp.fw");
+MODULE_FIRMWARE("a330_pm4.fw");
+MODULE_FIRMWARE("a330_pfp.fw");
+
 #define RB_SIZE    SZ_32K
 #define RB_BLKSIZE 16
 
@@ -65,7 +71,7 @@
 		*value = adreno_gpu->info->revn;
 		return 0;
 	case MSM_PARAM_GMEM_SIZE:
-		*value = adreno_gpu->info->gmem;
+		*value = adreno_gpu->gmem;
 		return 0;
 	default:
 		DBG("%s: invalid param: %u", gpu->name, param);
@@ -86,7 +92,7 @@
 	gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
 			/* size is log2(quad-words): */
 			AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
-			AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
+			AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
 
 	/* Setup ringbuffer address: */
 	gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
@@ -286,6 +292,7 @@
 		struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
 		struct adreno_rev rev)
 {
+	struct msm_mmu *mmu;
 	int i, ret;
 
 	/* identify gpu: */
@@ -311,6 +318,7 @@
 			rev.core, rev.major, rev.minor, rev.patchid);
 
 	gpu->funcs = funcs;
+	gpu->gmem = gpu->info->gmem;
 	gpu->rev = rev;
 
 	ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
@@ -333,10 +341,13 @@
 	if (ret)
 		return ret;
 
-	ret = msm_iommu_attach(drm, gpu->base.iommu,
-			iommu_ports, ARRAY_SIZE(iommu_ports));
-	if (ret)
-		return ret;
+	mmu = gpu->base.mmu;
+	if (mmu) {
+		ret = mmu->funcs->attach(mmu, iommu_ports,
+				ARRAY_SIZE(iommu_ports));
+		if (ret)
+			return ret;
+	}
 
 	gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
 			MSM_BO_UNCACHED);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index f73abfb..ca11ea4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -51,6 +51,7 @@
 	struct msm_gpu base;
 	struct adreno_rev rev;
 	const struct adreno_info *info;
+	uint32_t gmem;  /* actual gmem size */
 	uint32_t revn;  /* numeric revision name */
 	const struct adreno_gpu_funcs *funcs;
 
@@ -70,6 +71,9 @@
 struct adreno_platform_config {
 	struct adreno_rev rev;
 	uint32_t fast_rate, slow_rate, bus_freq;
+#ifdef CONFIG_MSM_BUS_SCALING
+	struct msm_bus_scale_pdata *bus_scale_table;
+#endif
 };
 
 #define ADRENO_IDLE_TIMEOUT (20 * 1000)
@@ -94,6 +98,11 @@
 	return gpu->revn == 330;
 }
 
+static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
+{
+	return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
+}
+
 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
 int adreno_hw_init(struct msm_gpu *gpu);
 uint32_t adreno_last_fence(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 259ad70..ae992c7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,12 +8,13 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (   8344 bytes, from 2013-11-30 14:49:47)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -66,13 +67,15 @@
 
 enum pc_di_primtype {
 	DI_PT_NONE = 0,
-	DI_PT_POINTLIST = 1,
+	DI_PT_POINTLIST_A2XX = 1,
 	DI_PT_LINELIST = 2,
 	DI_PT_LINESTRIP = 3,
 	DI_PT_TRILIST = 4,
 	DI_PT_TRIFAN = 5,
 	DI_PT_TRISTRIP = 6,
+	DI_PT_LINELOOP = 7,
 	DI_PT_RECTLIST = 8,
+	DI_PT_POINTLIST_A3XX = 9,
 	DI_PT_QUADLIST = 13,
 	DI_PT_QUADSTRIP = 14,
 	DI_PT_POLYGON = 15,
@@ -119,7 +122,7 @@
 	CP_WAIT_FOR_IDLE = 38,
 	CP_WAIT_REG_MEM = 60,
 	CP_WAIT_REG_EQ = 82,
-	CP_WAT_REG_GTE = 83,
+	CP_WAIT_REG_GTE = 83,
 	CP_WAIT_UNTIL_READ = 92,
 	CP_WAIT_IB_PFD_COMPLETE = 93,
 	CP_REG_RMW = 33,
@@ -151,7 +154,6 @@
 	CP_CONTEXT_UPDATE = 94,
 	CP_INTERRUPT = 64,
 	CP_IM_STORE = 44,
-	CP_SET_BIN_BASE_OFFSET = 75,
 	CP_SET_DRAW_INIT_FLAGS = 75,
 	CP_SET_PROTECTED_MODE = 95,
 	CP_LOAD_STATE = 48,
@@ -159,6 +161,16 @@
 	CP_COND_INDIRECT_BUFFER_PFD = 50,
 	CP_INDIRECT_BUFFER_PFE = 63,
 	CP_SET_BIN = 76,
+	CP_TEST_TWO_MEMS = 113,
+	CP_WAIT_FOR_ME = 19,
+	IN_IB_PREFETCH_END = 23,
+	IN_SUBBLK_PREFETCH = 31,
+	IN_INSTR_PREFETCH = 32,
+	IN_INSTR_MATCH = 71,
+	IN_CONST_PREFETCH = 73,
+	IN_INCR_UPDT_STATE = 85,
+	IN_INCR_UPDT_CONST = 86,
+	IN_INCR_UPDT_INSTR = 87,
 };
 
 enum adreno_state_block {
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 6d4c62b..87be647 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,14 +8,16 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index d1df38b..747a6ef 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,14 +8,16 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 0030a11..48e03ac 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,14 +8,16 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 50d11df..6f1588a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -41,7 +41,7 @@
 			power_on ? "Enable" : "Disable", ctrl);
 }
 
-static irqreturn_t hdmi_irq(int irq, void *dev_id)
+irqreturn_t hdmi_irq(int irq, void *dev_id)
 {
 	struct hdmi *hdmi = dev_id;
 
@@ -71,13 +71,13 @@
 }
 
 /* initialize connector */
-int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
 {
 	struct hdmi *hdmi = NULL;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct platform_device *pdev = hdmi_pdev;
 	struct hdmi_platform_config *config;
-	int ret;
+	int i, ret;
 
 	if (!pdev) {
 		dev_err(dev->dev, "no hdmi device\n");
@@ -99,6 +99,7 @@
 
 	hdmi->dev = dev;
 	hdmi->pdev = pdev;
+	hdmi->config = config;
 	hdmi->encoder = encoder;
 
 	/* not sure about which phy maps to which msm.. probably I miss some */
@@ -114,44 +115,70 @@
 		goto fail;
 	}
 
-	hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI");
+	hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
 	if (IS_ERR(hdmi->mmio)) {
 		ret = PTR_ERR(hdmi->mmio);
 		goto fail;
 	}
 
-	hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs");
-	if (IS_ERR(hdmi->mvs))
-		hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs");
-	if (IS_ERR(hdmi->mvs)) {
-		ret = PTR_ERR(hdmi->mvs);
-		dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret);
-		goto fail;
+	BUG_ON(config->hpd_reg_cnt > ARRAY_SIZE(hdmi->hpd_regs));
+	for (i = 0; i < config->hpd_reg_cnt; i++) {
+		struct regulator *reg;
+
+		reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]);
+		if (IS_ERR(reg)) {
+			ret = PTR_ERR(reg);
+			dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
+					config->hpd_reg_names[i], ret);
+			goto fail;
+		}
+
+		hdmi->hpd_regs[i] = reg;
 	}
 
-	hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0");
-	if (IS_ERR(hdmi->mpp0))
-		hdmi->mpp0 = NULL;
+	BUG_ON(config->pwr_reg_cnt > ARRAY_SIZE(hdmi->pwr_regs));
+	for (i = 0; i < config->pwr_reg_cnt; i++) {
+		struct regulator *reg;
 
-	hdmi->clk = devm_clk_get(&pdev->dev, "core_clk");
-	if (IS_ERR(hdmi->clk)) {
-		ret = PTR_ERR(hdmi->clk);
-		dev_err(dev->dev, "failed to get 'clk': %d\n", ret);
-		goto fail;
+		reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]);
+		if (IS_ERR(reg)) {
+			ret = PTR_ERR(reg);
+			dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
+					config->pwr_reg_names[i], ret);
+			goto fail;
+		}
+
+		hdmi->pwr_regs[i] = reg;
 	}
 
-	hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk");
-	if (IS_ERR(hdmi->m_pclk)) {
-		ret = PTR_ERR(hdmi->m_pclk);
-		dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
-		goto fail;
+	BUG_ON(config->hpd_clk_cnt > ARRAY_SIZE(hdmi->hpd_clks));
+	for (i = 0; i < config->hpd_clk_cnt; i++) {
+		struct clk *clk;
+
+		clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n",
+					config->hpd_clk_names[i], ret);
+			goto fail;
+		}
+
+		hdmi->hpd_clks[i] = clk;
 	}
 
-	hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk");
-	if (IS_ERR(hdmi->s_pclk)) {
-		ret = PTR_ERR(hdmi->s_pclk);
-		dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
-		goto fail;
+	BUG_ON(config->pwr_clk_cnt > ARRAY_SIZE(hdmi->pwr_clks));
+	for (i = 0; i < config->pwr_clk_cnt; i++) {
+		struct clk *clk;
+
+		clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n",
+					config->pwr_clk_names[i], ret);
+			goto fail;
+		}
+
+		hdmi->pwr_clks[i] = clk;
 	}
 
 	hdmi->i2c = hdmi_i2c_init(hdmi);
@@ -178,20 +205,22 @@
 		goto fail;
 	}
 
-	hdmi->irq = platform_get_irq(pdev, 0);
-	if (hdmi->irq < 0) {
-		ret = hdmi->irq;
-		dev_err(dev->dev, "failed to get irq: %d\n", ret);
-		goto fail;
-	}
+	if (!config->shared_irq) {
+		hdmi->irq = platform_get_irq(pdev, 0);
+		if (hdmi->irq < 0) {
+			ret = hdmi->irq;
+			dev_err(dev->dev, "failed to get irq: %d\n", ret);
+			goto fail;
+		}
 
-	ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
-			NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
-			"hdmi_isr", hdmi);
-	if (ret < 0) {
-		dev_err(dev->dev, "failed to request IRQ%u: %d\n",
-				hdmi->irq, ret);
-		goto fail;
+		ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
+				NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+				"hdmi_isr", hdmi);
+		if (ret < 0) {
+			dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+					hdmi->irq, ret);
+			goto fail;
+		}
 	}
 
 	encoder->bridge = hdmi->bridge;
@@ -199,7 +228,7 @@
 	priv->bridges[priv->num_bridges++]       = hdmi->bridge;
 	priv->connectors[priv->num_connectors++] = hdmi->connector;
 
-	return 0;
+	return hdmi;
 
 fail:
 	if (hdmi) {
@@ -211,37 +240,100 @@
 		hdmi_destroy(&hdmi->refcount);
 	}
 
-	return ret;
+	return ERR_PTR(ret);
 }
 
 /*
  * The hdmi device:
  */
 
+#include <linux/of_gpio.h>
+
 static int hdmi_dev_probe(struct platform_device *pdev)
 {
 	static struct hdmi_platform_config config = {};
 #ifdef CONFIG_OF
-	/* TODO */
+	struct device_node *of_node = pdev->dev.of_node;
+
+	int get_gpio(const char *name)
+	{
+		int gpio = of_get_named_gpio(of_node, name, 0);
+		if (gpio < 0) {
+			dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n",
+					name, gpio);
+			gpio = -1;
+		}
+		return gpio;
+	}
+
+	/* TODO actually use DT.. */
+	static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
+	static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
+	static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+	static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
+
+	config.phy_init      = hdmi_phy_8x74_init;
+	config.mmio_name     = "core_physical";
+	config.hpd_reg_names = hpd_reg_names;
+	config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
+	config.pwr_reg_names = pwr_reg_names;
+	config.pwr_reg_cnt   = ARRAY_SIZE(pwr_reg_names);
+	config.hpd_clk_names = hpd_clk_names;
+	config.hpd_clk_cnt   = ARRAY_SIZE(hpd_clk_names);
+	config.pwr_clk_names = pwr_clk_names;
+	config.pwr_clk_cnt   = ARRAY_SIZE(pwr_clk_names);
+	config.ddc_clk_gpio  = get_gpio("qcom,hdmi-tx-ddc-clk");
+	config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
+	config.hpd_gpio      = get_gpio("qcom,hdmi-tx-hpd");
+	config.mux_en_gpio   = get_gpio("qcom,hdmi-tx-mux-en");
+	config.mux_sel_gpio  = get_gpio("qcom,hdmi-tx-mux-sel");
+	config.shared_irq    = true;
+
 #else
+	static const char *hpd_clk_names[] = {
+			"core_clk", "master_iface_clk", "slave_iface_clk",
+	};
 	if (cpu_is_apq8064()) {
+		static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
 		config.phy_init      = hdmi_phy_8960_init;
+		config.mmio_name     = "hdmi_msm_hdmi_addr";
+		config.hpd_reg_names = hpd_reg_names;
+		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
+		config.hpd_clk_names = hpd_clk_names;
+		config.hpd_clk_cnt   = ARRAY_SIZE(hpd_clk_names);
 		config.ddc_clk_gpio  = 70;
 		config.ddc_data_gpio = 71;
 		config.hpd_gpio      = 72;
-		config.pmic_gpio     = 13 + NR_GPIO_IRQS;
-	} else if (cpu_is_msm8960()) {
+		config.mux_en_gpio   = -1;
+		config.mux_sel_gpio  = 13 + NR_GPIO_IRQS;
+	} else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
+		static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
 		config.phy_init      = hdmi_phy_8960_init;
+		config.mmio_name     = "hdmi_msm_hdmi_addr";
+		config.hpd_reg_names = hpd_reg_names;
+		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
+		config.hpd_clk_names = hpd_clk_names;
+		config.hpd_clk_cnt   = ARRAY_SIZE(hpd_clk_names);
 		config.ddc_clk_gpio  = 100;
 		config.ddc_data_gpio = 101;
 		config.hpd_gpio      = 102;
-		config.pmic_gpio     = -1;
+		config.mux_en_gpio   = -1;
+		config.mux_sel_gpio  = -1;
 	} else if (cpu_is_msm8x60()) {
+		static const char *hpd_reg_names[] = {
+				"8901_hdmi_mvs", "8901_mpp0"
+		};
 		config.phy_init      = hdmi_phy_8x60_init;
+		config.mmio_name     = "hdmi_msm_hdmi_addr";
+		config.hpd_reg_names = hpd_reg_names;
+		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
+		config.hpd_clk_names = hpd_clk_names;
+		config.hpd_clk_cnt   = ARRAY_SIZE(hpd_clk_names);
 		config.ddc_clk_gpio  = 170;
 		config.ddc_data_gpio = 171;
 		config.hpd_gpio      = 172;
-		config.pmic_gpio     = -1;
+		config.mux_en_gpio   = -1;
+		config.mux_sel_gpio  = -1;
 	}
 #endif
 	pdev->dev.platform_data = &config;
@@ -255,10 +347,19 @@
 	return 0;
 }
 
+static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,hdmi-tx" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
 static struct platform_driver hdmi_driver = {
 	.probe = hdmi_dev_probe,
 	.remove = hdmi_dev_remove,
-	.driver.name = "hdmi_msm",
+	.driver = {
+		.name = "hdmi_msm",
+		.of_match_table = dt_match,
+	},
 };
 
 void __init hdmi_register(void)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 2c2ec56..41b29ad 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -28,6 +28,7 @@
 
 
 struct hdmi_phy;
+struct hdmi_platform_config;
 
 struct hdmi {
 	struct kref refcount;
@@ -35,14 +36,14 @@
 	struct drm_device *dev;
 	struct platform_device *pdev;
 
+	const struct hdmi_platform_config *config;
+
 	void __iomem *mmio;
 
-	struct regulator *mvs;        /* HDMI_5V */
-	struct regulator *mpp0;       /* External 5V */
-
-	struct clk *clk;
-	struct clk *m_pclk;
-	struct clk *s_pclk;
+	struct regulator *hpd_regs[2];
+	struct regulator *pwr_regs[2];
+	struct clk *hpd_clks[3];
+	struct clk *pwr_clks[2];
 
 	struct hdmi_phy *phy;
 	struct i2c_adapter *i2c;
@@ -60,7 +61,29 @@
 /* platform config data (ie. from DT, or pdata) */
 struct hdmi_platform_config {
 	struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
-	int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio;
+	const char *mmio_name;
+
+	/* regulators that need to be on for hpd: */
+	const char **hpd_reg_names;
+	int hpd_reg_cnt;
+
+	/* regulators that need to be on for screen pwr: */
+	const char **pwr_reg_names;
+	int pwr_reg_cnt;
+
+	/* clks that need to be on for hpd: */
+	const char **hpd_clk_names;
+	int hpd_clk_cnt;
+
+	/* clks that need to be on for screen pwr (ie pixel clk): */
+	const char **pwr_clk_names;
+	int pwr_clk_cnt;
+
+	/* gpio's: */
+	int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+
+	/* older devices had their own irq, mdp5+ it is shared w/ mdp: */
+	bool shared_irq;
 };
 
 void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
@@ -106,6 +129,7 @@
 
 struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
 struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
+struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi);
 
 /*
  * hdmi bridge:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 4e939f8..e263658 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,14 +8,16 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -212,6 +214,20 @@
 #define REG_HDMI_HDCP_RESET					0x00000130
 #define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE			0x00000001
 
+#define REG_HDMI_VENSPEC_INFO0					0x0000016c
+
+#define REG_HDMI_VENSPEC_INFO1					0x00000170
+
+#define REG_HDMI_VENSPEC_INFO2					0x00000174
+
+#define REG_HDMI_VENSPEC_INFO3					0x00000178
+
+#define REG_HDMI_VENSPEC_INFO4					0x0000017c
+
+#define REG_HDMI_VENSPEC_INFO5					0x00000180
+
+#define REG_HDMI_VENSPEC_INFO6					0x00000184
+
 #define REG_HDMI_AUDIO_CFG					0x000001d0
 #define HDMI_AUDIO_CFG_ENGINE_ENABLE				0x00000001
 #define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK			0x000000f0
@@ -235,6 +251,9 @@
 	return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
 }
 
+#define REG_HDMI_DDC_ARBITRATION				0x00000210
+#define HDMI_DDC_ARBITRATION_HW_ARBITRATION			0x00000010
+
 #define REG_HDMI_DDC_INT_CTRL					0x00000214
 #define HDMI_DDC_INT_CTRL_SW_DONE_INT				0x00000001
 #define HDMI_DDC_INT_CTRL_SW_DONE_ACK				0x00000002
@@ -340,6 +359,20 @@
 	return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
 }
 
+#define REG_HDMI_CEC_STATUS					0x00000298
+
+#define REG_HDMI_CEC_INT					0x0000029c
+
+#define REG_HDMI_CEC_ADDR					0x000002a0
+
+#define REG_HDMI_CEC_TIME					0x000002a4
+
+#define REG_HDMI_CEC_REFTIMER					0x000002a8
+
+#define REG_HDMI_CEC_RD_DATA					0x000002ac
+
+#define REG_HDMI_CEC_RD_FILTER					0x000002b0
+
 #define REG_HDMI_ACTIVE_HSYNC					0x000002b4
 #define HDMI_ACTIVE_HSYNC_START__MASK				0x00000fff
 #define HDMI_ACTIVE_HSYNC_START__SHIFT				0
@@ -410,17 +443,33 @@
 #define HDMI_FRAME_CTRL_HSYNC_LOW				0x20000000
 #define HDMI_FRAME_CTRL_INTERLACED_EN				0x80000000
 
+#define REG_HDMI_AUD_INT					0x000002cc
+#define HDMI_AUD_INT_AUD_FIFO_URUN_INT				0x00000001
+#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK				0x00000002
+#define HDMI_AUD_INT_AUD_SAM_DROP_INT				0x00000004
+#define HDMI_AUD_INT_AUD_SAM_DROP_MASK				0x00000008
+
 #define REG_HDMI_PHY_CTRL					0x000002d4
 #define HDMI_PHY_CTRL_SW_RESET_PLL				0x00000001
 #define HDMI_PHY_CTRL_SW_RESET_PLL_LOW				0x00000002
 #define HDMI_PHY_CTRL_SW_RESET					0x00000004
 #define HDMI_PHY_CTRL_SW_RESET_LOW				0x00000008
 
-#define REG_HDMI_AUD_INT					0x000002cc
-#define HDMI_AUD_INT_AUD_FIFO_URUN_INT				0x00000001
-#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK				0x00000002
-#define HDMI_AUD_INT_AUD_SAM_DROP_INT				0x00000004
-#define HDMI_AUD_INT_AUD_SAM_DROP_MASK				0x00000008
+#define REG_HDMI_CEC_WR_RANGE					0x000002dc
+
+#define REG_HDMI_CEC_RD_RANGE					0x000002e0
+
+#define REG_HDMI_VERSION					0x000002e4
+
+#define REG_HDMI_CEC_COMPL_CTL					0x00000360
+
+#define REG_HDMI_CEC_RD_START_RANGE				0x00000364
+
+#define REG_HDMI_CEC_RD_TOTAL_RANGE				0x00000368
+
+#define REG_HDMI_CEC_RD_ERR_RESP_LO				0x0000036c
+
+#define REG_HDMI_CEC_WR_CHECK_CONFIG				0x00000370
 
 #define REG_HDMI_8x60_PHY_REG0					0x00000300
 #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK			0x0000001c
@@ -504,5 +553,23 @@
 
 #define REG_HDMI_8960_PHY_REG12					0x00000430
 
+#define REG_HDMI_8x74_ANA_CFG0					0x00000000
+
+#define REG_HDMI_8x74_ANA_CFG1					0x00000004
+
+#define REG_HDMI_8x74_PD_CTRL0					0x00000010
+
+#define REG_HDMI_8x74_PD_CTRL1					0x00000014
+
+#define REG_HDMI_8x74_BIST_CFG0					0x00000034
+
+#define REG_HDMI_8x74_BIST_PATN0				0x0000003c
+
+#define REG_HDMI_8x74_BIST_PATN1				0x00000040
+
+#define REG_HDMI_8x74_BIST_PATN2				0x00000044
+
+#define REG_HDMI_8x74_BIST_PATN3				0x00000048
+
 
 #endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 5a8ee34..7d10e55 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -21,6 +21,7 @@
 	struct drm_bridge base;
 
 	struct hdmi *hdmi;
+	bool power_on;
 
 	unsigned long int pixclock;
 };
@@ -34,6 +35,65 @@
 	kfree(hdmi_bridge);
 }
 
+static void power_on(struct drm_bridge *bridge)
+{
+	struct drm_device *dev = bridge->dev;
+	struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = hdmi_bridge->hdmi;
+	const struct hdmi_platform_config *config = hdmi->config;
+	int i, ret;
+
+	for (i = 0; i < config->pwr_reg_cnt; i++) {
+		ret = regulator_enable(hdmi->pwr_regs[i]);
+		if (ret) {
+			dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
+					config->pwr_reg_names[i], ret);
+		}
+	}
+
+	if (config->pwr_clk_cnt > 0) {
+		DBG("pixclock: %lu", hdmi_bridge->pixclock);
+		ret = clk_set_rate(hdmi->pwr_clks[0], hdmi_bridge->pixclock);
+		if (ret) {
+			dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
+					config->pwr_clk_names[0], ret);
+		}
+	}
+
+	for (i = 0; i < config->pwr_clk_cnt; i++) {
+		ret = clk_prepare_enable(hdmi->pwr_clks[i]);
+		if (ret) {
+			dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+					config->pwr_clk_names[i], ret);
+		}
+	}
+}
+
+static void power_off(struct drm_bridge *bridge)
+{
+	struct drm_device *dev = bridge->dev;
+	struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+	struct hdmi *hdmi = hdmi_bridge->hdmi;
+	const struct hdmi_platform_config *config = hdmi->config;
+	int i, ret;
+
+	/* TODO do we need to wait for final vblank somewhere before
+	 * cutting the clocks?
+	 */
+	mdelay(16 + 4);
+
+	for (i = 0; i < config->pwr_clk_cnt; i++)
+		clk_disable_unprepare(hdmi->pwr_clks[i]);
+
+	for (i = 0; i < config->pwr_reg_cnt; i++) {
+		ret = regulator_disable(hdmi->pwr_regs[i]);
+		if (ret) {
+			dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
+					config->pwr_reg_names[i], ret);
+		}
+	}
+}
+
 static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
 {
 	struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
@@ -41,6 +101,12 @@
 	struct hdmi_phy *phy = hdmi->phy;
 
 	DBG("power up");
+
+	if (!hdmi_bridge->power_on) {
+		power_on(bridge);
+		hdmi_bridge->power_on = true;
+	}
+
 	phy->funcs->powerup(phy, hdmi_bridge->pixclock);
 	hdmi_set_mode(hdmi, true);
 }
@@ -62,6 +128,11 @@
 	DBG("power down");
 	hdmi_set_mode(hdmi, false);
 	phy->funcs->powerdown(phy);
+
+	if (hdmi_bridge->power_on) {
+		power_off(bridge);
+		hdmi_bridge->power_on = false;
+	}
 }
 
 static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 823eee5..7dedfdd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -17,19 +17,20 @@
 
 #include <linux/gpio.h>
 
+#include "msm_kms.h"
 #include "hdmi.h"
 
 struct hdmi_connector {
 	struct drm_connector base;
 	struct hdmi *hdmi;
+	struct work_struct hpd_work;
 };
 #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
 
 static int gpio_config(struct hdmi *hdmi, bool on)
 {
 	struct drm_device *dev = hdmi->dev;
-	struct hdmi_platform_config *config =
-			hdmi->pdev->dev.platform_data;
+	const struct hdmi_platform_config *config = hdmi->config;
 	int ret;
 
 	if (on) {
@@ -39,26 +40,43 @@
 				"HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
 			goto error1;
 		}
+		gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
+
 		ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
 		if (ret) {
 			dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
 				"HDMI_DDC_DATA", config->ddc_data_gpio, ret);
 			goto error2;
 		}
+		gpio_set_value_cansleep(config->ddc_data_gpio, 1);
+
 		ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
 		if (ret) {
 			dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
 				"HDMI_HPD", config->hpd_gpio, ret);
 			goto error3;
 		}
-		if (config->pmic_gpio != -1) {
-			ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL");
+		gpio_direction_input(config->hpd_gpio);
+		gpio_set_value_cansleep(config->hpd_gpio, 1);
+
+		if (config->mux_en_gpio != -1) {
+			ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
 			if (ret) {
 				dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
-					"PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret);
+					"HDMI_MUX_SEL", config->mux_en_gpio, ret);
 				goto error4;
 			}
-			gpio_set_value_cansleep(config->pmic_gpio, 0);
+			gpio_set_value_cansleep(config->mux_en_gpio, 1);
+		}
+
+		if (config->mux_sel_gpio != -1) {
+			ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL");
+			if (ret) {
+				dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_MUX_SEL", config->mux_sel_gpio, ret);
+				goto error5;
+			}
+			gpio_set_value_cansleep(config->mux_sel_gpio, 0);
 		}
 		DBG("gpio on");
 	} else {
@@ -66,15 +84,23 @@
 		gpio_free(config->ddc_data_gpio);
 		gpio_free(config->hpd_gpio);
 
-		if (config->pmic_gpio != -1) {
-			gpio_set_value_cansleep(config->pmic_gpio, 1);
-			gpio_free(config->pmic_gpio);
+		if (config->mux_en_gpio != -1) {
+			gpio_set_value_cansleep(config->mux_en_gpio, 0);
+			gpio_free(config->mux_en_gpio);
+		}
+
+		if (config->mux_sel_gpio != -1) {
+			gpio_set_value_cansleep(config->mux_sel_gpio, 1);
+			gpio_free(config->mux_sel_gpio);
 		}
 		DBG("gpio off");
 	}
 
 	return 0;
 
+error5:
+	if (config->mux_en_gpio != -1)
+		gpio_free(config->mux_en_gpio);
 error4:
 	gpio_free(config->hpd_gpio);
 error3:
@@ -88,10 +114,11 @@
 static int hpd_enable(struct hdmi_connector *hdmi_connector)
 {
 	struct hdmi *hdmi = hdmi_connector->hdmi;
+	const struct hdmi_platform_config *config = hdmi->config;
 	struct drm_device *dev = hdmi_connector->base.dev;
 	struct hdmi_phy *phy = hdmi->phy;
 	uint32_t hpd_ctrl;
-	int ret;
+	int i, ret;
 
 	ret = gpio_config(hdmi, true);
 	if (ret) {
@@ -99,31 +126,22 @@
 		goto fail;
 	}
 
-	ret = clk_prepare_enable(hdmi->clk);
-	if (ret) {
-		dev_err(dev->dev, "failed to enable 'clk': %d\n", ret);
-		goto fail;
+	for (i = 0; i < config->hpd_clk_cnt; i++) {
+		ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+		if (ret) {
+			dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
+					config->hpd_clk_names[i], ret);
+			goto fail;
+		}
 	}
 
-	ret = clk_prepare_enable(hdmi->m_pclk);
-	if (ret) {
-		dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
-		goto fail;
-	}
-
-	ret = clk_prepare_enable(hdmi->s_pclk);
-	if (ret) {
-		dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
-		goto fail;
-	}
-
-	if (hdmi->mpp0)
-		ret = regulator_enable(hdmi->mpp0);
-	if (!ret)
-		ret = regulator_enable(hdmi->mvs);
-	if (ret) {
-		dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
-		goto fail;
+	for (i = 0; i < config->hpd_reg_cnt; i++) {
+		ret = regulator_enable(hdmi->hpd_regs[i]);
+		if (ret) {
+			dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
+					config->hpd_reg_names[i], ret);
+			goto fail;
+		}
 	}
 
 	hdmi_set_mode(hdmi, false);
@@ -156,26 +174,26 @@
 static int hdp_disable(struct hdmi_connector *hdmi_connector)
 {
 	struct hdmi *hdmi = hdmi_connector->hdmi;
+	const struct hdmi_platform_config *config = hdmi->config;
 	struct drm_device *dev = hdmi_connector->base.dev;
-	int ret = 0;
+	int i, ret = 0;
 
 	/* Disable HPD interrupt */
 	hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
 
 	hdmi_set_mode(hdmi, false);
 
-	if (hdmi->mpp0)
-		ret = regulator_disable(hdmi->mpp0);
-	if (!ret)
-		ret = regulator_disable(hdmi->mvs);
-	if (ret) {
-		dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
-		goto fail;
+	for (i = 0; i < config->hpd_reg_cnt; i++) {
+		ret = regulator_disable(hdmi->hpd_regs[i]);
+		if (ret) {
+			dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
+					config->hpd_reg_names[i], ret);
+			goto fail;
+		}
 	}
 
-	clk_disable_unprepare(hdmi->clk);
-	clk_disable_unprepare(hdmi->m_pclk);
-	clk_disable_unprepare(hdmi->s_pclk);
+	for (i = 0; i < config->hpd_clk_cnt; i++)
+		clk_disable_unprepare(hdmi->hpd_clks[i]);
 
 	ret = gpio_config(hdmi, false);
 	if (ret) {
@@ -189,9 +207,19 @@
 	return ret;
 }
 
+static void
+hotplug_work(struct work_struct *work)
+{
+	struct hdmi_connector *hdmi_connector =
+		container_of(work, struct hdmi_connector, hpd_work);
+	struct drm_connector *connector = &hdmi_connector->base;
+	drm_helper_hpd_irq_event(connector->dev);
+}
+
 void hdmi_connector_irq(struct drm_connector *connector)
 {
 	struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+	struct msm_drm_private *priv = connector->dev->dev_private;
 	struct hdmi *hdmi = hdmi_connector->hdmi;
 	uint32_t hpd_int_status, hpd_int_ctrl;
 
@@ -209,13 +237,13 @@
 		hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
 				hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
 
-		drm_helper_hpd_irq_event(connector->dev);
-
 		/* detect disconnect if we are connected or visa versa: */
 		hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
 		if (!detected)
 			hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
 		hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+		queue_work(priv->wq, &hdmi_connector->hpd_work);
 	}
 }
 
@@ -224,6 +252,7 @@
 {
 	struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
 	struct hdmi *hdmi = hdmi_connector->hdmi;
+	const struct hdmi_platform_config *config = hdmi->config;
 	uint32_t hpd_int_status;
 	int retry = 20;
 
@@ -233,6 +262,14 @@
 	 * let that trick us into thinking the monitor is gone:
 	 */
 	while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
+		/* hdmi debounce logic seems to get stuck sometimes,
+		 * read directly the gpio to get a second opinion:
+		 */
+		if (gpio_get_value(config->hpd_gpio)) {
+			DBG("gpio tells us we are connected!");
+			hpd_int_status |= HDMI_HPD_INT_STATUS_CABLE_DETECTED;
+			break;
+		}
 		mdelay(10);
 		hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
 		DBG("status=%08x", hpd_int_status);
@@ -285,6 +322,8 @@
 				 struct drm_display_mode *mode)
 {
 	struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+	struct hdmi *hdmi = hdmi_connector->hdmi;
+	const struct hdmi_platform_config *config = hdmi->config;
 	struct msm_drm_private *priv = connector->dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	long actual, requested;
@@ -293,6 +332,13 @@
 	actual = kms->funcs->round_pixclk(kms,
 			requested, hdmi_connector->hdmi->encoder);
 
+	/* for mdp5/apq8074, we manage our own pixel clk (as opposed to
+	 * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
+	 * instead):
+	 */
+	if (config->pwr_clk_cnt > 0)
+		actual = clk_round_rate(hdmi->pwr_clks[0], actual);
+
 	DBG("requested=%ld, actual=%ld", requested, actual);
 
 	if (actual != requested)
@@ -335,6 +381,7 @@
 	}
 
 	hdmi_connector->hdmi = hdmi_reference(hdmi);
+	INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
 
 	connector = &hdmi_connector->base;
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
new file mode 100644
index 0000000..59fa6cd
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_phy_8x74 {
+	struct hdmi_phy base;
+	struct hdmi *hdmi;
+	void __iomem *mmio;
+};
+#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
+
+
+static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data)
+{
+	msm_writel(data, phy->mmio + reg);
+}
+
+//static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg)
+//{
+//	return msm_readl(phy->mmio + reg);
+//}
+
+static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+	kfree(phy_8x74);
+}
+
+static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+	struct hdmi *hdmi = phy_8x74->hdmi;
+	unsigned int val;
+
+	/* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
+
+	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	}
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+	} else {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+	}
+
+	msleep(100);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	}
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+	} else {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+	}
+}
+
+static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
+		unsigned long int pixclock)
+{
+	struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+
+	phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0,   0x1b);
+	phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1,   0xf2);
+	phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0,  0x0);
+	phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0);
+	phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0);
+	phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0);
+	phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0);
+	phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1,   0x20);
+}
+
+static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+	phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
+		.destroy = hdmi_phy_8x74_destroy,
+		.reset = hdmi_phy_8x74_reset,
+		.powerup = hdmi_phy_8x74_powerup,
+		.powerdown = hdmi_phy_8x74_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
+{
+	struct hdmi_phy_8x74 *phy_8x74;
+	struct hdmi_phy *phy = NULL;
+	int ret;
+
+	phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL);
+	if (!phy_8x74) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	phy = &phy_8x74->base;
+
+	phy->funcs = &hdmi_phy_8x74_funcs;
+
+	phy_8x74->hdmi = hdmi;
+
+	/* for 8x74, the phy mmio is mapped separately: */
+	phy_8x74->mmio = msm_ioremap(hdmi->pdev,
+			"phy_physical", "HDMI_8x74");
+	if (IS_ERR(phy_8x74->mmio)) {
+		ret = PTR_ERR(phy_8x74->mmio);
+		goto fail;
+	}
+
+	return phy;
+
+fail:
+	if (phy)
+		hdmi_phy_8x74_destroy(phy);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index dbde4f6..d591567 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,14 +8,16 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
similarity index 94%
rename from drivers/gpu/drm/msm/mdp4/mdp4.xml.h
rename to drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 9908ffe..416a26e 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -8,14 +8,16 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -42,27 +44,6 @@
 */
 
 
-enum mdp4_bpc {
-	BPC1 = 0,
-	BPC5 = 1,
-	BPC6 = 2,
-	BPC8 = 3,
-};
-
-enum mdp4_bpc_alpha {
-	BPC1A = 0,
-	BPC4A = 1,
-	BPC6A = 2,
-	BPC8A = 3,
-};
-
-enum mdp4_alpha_type {
-	FG_CONST = 0,
-	BG_CONST = 1,
-	FG_PIXEL = 2,
-	BG_PIXEL = 3,
-};
-
 enum mdp4_pipe {
 	VG1 = 0,
 	VG2 = 1,
@@ -79,15 +60,6 @@
 	MIXER2 = 2,
 };
 
-enum mdp4_mixer_stage_id {
-	STAGE_UNUSED = 0,
-	STAGE_BASE = 1,
-	STAGE0 = 2,
-	STAGE1 = 3,
-	STAGE2 = 4,
-	STAGE3 = 5,
-};
-
 enum mdp4_intf {
 	INTF_LCDC_DTV = 0,
 	INTF_DSI_VIDEO = 1,
@@ -194,56 +166,56 @@
 #define REG_MDP4_LAYERMIXER2_IN_CFG				0x000100f0
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK			0x00000007
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT			0
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1			0x00000008
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK			0x00000070
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT			4
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1			0x00000080
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK			0x00000700
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT			8
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1			0x00000800
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK			0x00007000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT			12
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1			0x00008000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK			0x00070000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT			16
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1			0x00080000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK			0x00700000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT			20
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1			0x00800000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK			0x07000000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT			24
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
 }
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1			0x08000000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK			0x70000000
 #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT			28
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
 }
@@ -254,56 +226,56 @@
 #define REG_MDP4_LAYERMIXER_IN_CFG				0x00010100
 #define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK			0x00000007
 #define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT			0
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1			0x00000008
 #define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK			0x00000070
 #define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT			4
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1			0x00000080
 #define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK			0x00000700
 #define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT			8
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1			0x00000800
 #define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK			0x00007000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT			12
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1			0x00008000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK			0x00070000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT			16
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1			0x00080000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK			0x00700000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT			20
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1			0x00800000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK			0x07000000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT			24
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
 }
 #define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1			0x08000000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK			0x70000000
 #define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT			28
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
 {
 	return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
 }
@@ -369,7 +341,7 @@
 static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
 #define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK			0x00000003
 #define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT			0
-static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
+static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val)
 {
 	return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
 }
@@ -377,7 +349,7 @@
 #define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA				0x00000008
 #define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK			0x00000030
 #define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT			4
-static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val)
+static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val)
 {
 	return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
 }
@@ -472,19 +444,19 @@
 static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
 #define MDP4_DMA_CONFIG_G_BPC__MASK				0x00000003
 #define MDP4_DMA_CONFIG_G_BPC__SHIFT				0
-static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val)
 {
 	return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
 }
 #define MDP4_DMA_CONFIG_B_BPC__MASK				0x0000000c
 #define MDP4_DMA_CONFIG_B_BPC__SHIFT				2
-static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val)
 {
 	return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
 }
 #define MDP4_DMA_CONFIG_R_BPC__MASK				0x00000030
 #define MDP4_DMA_CONFIG_R_BPC__SHIFT				4
-static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val)
 {
 	return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
 }
@@ -710,25 +682,25 @@
 static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
 #define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK			0x00000003
 #define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT			0
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
 {
 	return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
 }
 #define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK			0x0000000c
 #define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT			2
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
 {
 	return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
 }
 #define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK			0x00000030
 #define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT			4
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
 {
 	return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
 }
 #define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK			0x000000c0
 #define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT			6
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
 {
 	return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
 }
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
similarity index 87%
rename from drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
rename to drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 019d530..84c5b13 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -39,6 +39,7 @@
 		spinlock_t lock;
 		bool stale;
 		uint32_t width, height;
+		uint32_t x, y;
 
 		/* next cursor to scan-out: */
 		uint32_t next_iova;
@@ -57,44 +58,100 @@
 #define PENDING_FLIP   0x2
 	atomic_t pending;
 
-	/* the fb that we currently hold a scanout ref to: */
+	/* the fb that we logically (from PoV of KMS API) hold a ref
+	 * to.  Which we may not yet be scanning out (we may still
+	 * be scanning out previous in case of page_flip while waiting
+	 * for gpu rendering to complete:
+	 */
 	struct drm_framebuffer *fb;
 
+	/* the fb that we currently hold a scanout ref to: */
+	struct drm_framebuffer *scanout_fb;
+
 	/* for unref'ing framebuffers after scanout completes: */
 	struct drm_flip_work unref_fb_work;
 
 	/* for unref'ing cursor bo's after scanout completes: */
 	struct drm_flip_work unref_cursor_work;
 
-	struct mdp4_irq vblank;
-	struct mdp4_irq err;
+	struct mdp_irq vblank;
+	struct mdp_irq err;
 };
 #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
 
 static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv = crtc->dev->dev_private;
-	return to_mdp4_kms(priv->kms);
+	return to_mdp4_kms(to_mdp_kms(priv->kms));
 }
 
-static void update_fb(struct drm_crtc *crtc, bool async,
-		struct drm_framebuffer *new_fb)
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+	atomic_or(pending, &mdp4_crtc->pending);
+	mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+	struct mdp4_kms *mdp4_kms = get_kms(crtc);
+	uint32_t i, flush = 0;
+
+	for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+		struct drm_plane *plane = mdp4_crtc->planes[i];
+		if (plane) {
+			enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+			flush |= pipe2flush(pipe_id);
+		}
+	}
+	flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+	DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+	mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
 {
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 	struct drm_framebuffer *old_fb = mdp4_crtc->fb;
 
-	if (old_fb)
-		drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
-
 	/* grab reference to incoming scanout fb: */
 	drm_framebuffer_reference(new_fb);
 	mdp4_crtc->base.fb = new_fb;
 	mdp4_crtc->fb = new_fb;
 
-	if (!async) {
-		/* enable vblank to pick up the old_fb */
-		mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
-	}
+	if (old_fb)
+		drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this.  Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+	/* flush updates, to make sure hw is updated to new scanout fb,
+	 * so that we can safely queue unref to current fb (ie. next
+	 * vblank we know hw is done w/ previous scanout_fb).
+	 */
+	crtc_flush(crtc);
+
+	if (mdp4_crtc->scanout_fb)
+		drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
+				mdp4_crtc->scanout_fb);
+
+	mdp4_crtc->scanout_fb = fb;
+
+	/* enable vblank to complete flip: */
+	request_pending(crtc, PENDING_FLIP);
 }
 
 /* if file!=NULL, this is preclose potential cancel-flip path */
@@ -120,34 +177,6 @@
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
-static void crtc_flush(struct drm_crtc *crtc)
-{
-	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-	struct mdp4_kms *mdp4_kms = get_kms(crtc);
-	uint32_t i, flush = 0;
-
-	for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
-		struct drm_plane *plane = mdp4_crtc->planes[i];
-		if (plane) {
-			enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
-			flush |= pipe2flush(pipe_id);
-		}
-	}
-	flush |= ovlp2flush(mdp4_crtc->ovlp);
-
-	DBG("%s: flush=%08x", mdp4_crtc->name, flush);
-
-	mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
-}
-
-static void request_pending(struct drm_crtc *crtc, uint32_t pending)
-{
-	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
-	atomic_or(pending, &mdp4_crtc->pending);
-	mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
-}
-
 static void pageflip_cb(struct msm_fence_cb *cb)
 {
 	struct mdp4_crtc *mdp4_crtc =
@@ -158,11 +187,9 @@
 	if (!fb)
 		return;
 
+	drm_framebuffer_reference(fb);
 	mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
-	crtc_flush(crtc);
-
-	/* enable vblank to complete flip: */
-	request_pending(crtc, PENDING_FLIP);
+	update_scanout(crtc, fb);
 }
 
 static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -210,9 +237,9 @@
 	if (enabled != mdp4_crtc->enabled) {
 		if (enabled) {
 			mdp4_enable(mdp4_kms);
-			mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
+			mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
 		} else {
-			mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
+			mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
 			mdp4_disable(mdp4_kms);
 		}
 		mdp4_crtc->enabled = enabled;
@@ -232,7 +259,7 @@
 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
 	int i, ovlp = mdp4_crtc->ovlp;
 	uint32_t mixer_cfg = 0;
-	static const enum mdp4_mixer_stage_id stages[] = {
+	static const enum mdp_mixer_stage_id stages[] = {
 			STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
 	};
 	/* statically (for now) map planes to mixer stage (z-order): */
@@ -262,8 +289,8 @@
 			enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
 			int idx = idxs[pipe_id];
 			if (idx > 0) {
-				const struct mdp4_format *format =
-					to_mdp4_format(msm_framebuffer_format(plane->fb));
+				const struct mdp_format *format =
+					to_mdp_format(msm_framebuffer_format(plane->fb));
 				alpha[idx-1] = format->alpha_enable;
 			}
 			mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
@@ -320,6 +347,20 @@
 			mode->vsync_end, mode->vtotal,
 			mode->type, mode->flags);
 
+	/* grab extra ref for update_scanout() */
+	drm_framebuffer_reference(crtc->fb);
+
+	ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+			0, 0, mode->hdisplay, mode->vdisplay,
+			x << 16, y << 16,
+			mode->hdisplay << 16, mode->vdisplay << 16);
+	if (ret) {
+		drm_framebuffer_unreference(crtc->fb);
+		dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+				mdp4_crtc->name, ret);
+		return ret;
+	}
+
 	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
 			MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
 			MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
@@ -341,24 +382,15 @@
 
 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
 
-	update_fb(crtc, false, crtc->fb);
-
-	ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
-			0, 0, mode->hdisplay, mode->vdisplay,
-			x << 16, y << 16,
-			mode->hdisplay << 16, mode->vdisplay << 16);
-	if (ret) {
-		dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
-				mdp4_crtc->name, ret);
-		return ret;
-	}
-
 	if (dma == DMA_E) {
 		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
 		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
 		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
 	}
 
+	update_fb(crtc, crtc->fb);
+	update_scanout(crtc, crtc->fb);
+
 	return 0;
 }
 
@@ -385,13 +417,24 @@
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 	struct drm_plane *plane = mdp4_crtc->plane;
 	struct drm_display_mode *mode = &crtc->mode;
+	int ret;
 
-	update_fb(crtc, false, crtc->fb);
+	/* grab extra ref for update_scanout() */
+	drm_framebuffer_reference(crtc->fb);
 
-	return mdp4_plane_mode_set(plane, crtc, crtc->fb,
+	ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
 			0, 0, mode->hdisplay, mode->vdisplay,
 			x << 16, y << 16,
 			mode->hdisplay << 16, mode->vdisplay << 16);
+	if (ret) {
+		drm_framebuffer_unreference(crtc->fb);
+		return ret;
+	}
+
+	update_fb(crtc, crtc->fb);
+	update_scanout(crtc, crtc->fb);
+
+	return 0;
 }
 
 static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
@@ -419,7 +462,7 @@
 	mdp4_crtc->event = event;
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
-	update_fb(crtc, true, new_fb);
+	update_fb(crtc, new_fb);
 
 	return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
 }
@@ -442,12 +485,12 @@
 static void update_cursor(struct drm_crtc *crtc)
 {
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+	struct mdp4_kms *mdp4_kms = get_kms(crtc);
 	enum mdp4_dma dma = mdp4_crtc->dma;
 	unsigned long flags;
 
 	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
 	if (mdp4_crtc->cursor.stale) {
-		struct mdp4_kms *mdp4_kms = get_kms(crtc);
 		struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
 		struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
 		uint32_t iova = mdp4_crtc->cursor.next_iova;
@@ -479,6 +522,11 @@
 		mdp4_crtc->cursor.scanout_bo = next_bo;
 		mdp4_crtc->cursor.stale = false;
 	}
+
+	mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+			MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
+			MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
+
 	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
 }
 
@@ -530,6 +578,7 @@
 		drm_gem_object_unreference_unlocked(old_bo);
 	}
 
+	crtc_flush(crtc);
 	request_pending(crtc, PENDING_CURSOR);
 
 	return 0;
@@ -542,12 +591,15 @@
 static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 {
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-	struct mdp4_kms *mdp4_kms = get_kms(crtc);
-	enum mdp4_dma dma = mdp4_crtc->dma;
+	unsigned long flags;
 
-	mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
-			MDP4_DMA_CURSOR_POS_X(x) |
-			MDP4_DMA_CURSOR_POS_Y(y));
+	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+	mdp4_crtc->cursor.x = x;
+	mdp4_crtc->cursor.y = y;
+	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+	crtc_flush(crtc);
+	request_pending(crtc, PENDING_CURSOR);
 
 	return 0;
 }
@@ -571,14 +623,14 @@
 	.load_lut = mdp4_crtc_load_lut,
 };
 
-static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
 {
 	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
 	struct drm_crtc *crtc = &mdp4_crtc->base;
 	struct msm_drm_private *priv = crtc->dev->dev_private;
 	unsigned pending;
 
-	mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
+	mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
 
 	pending = atomic_xchg(&mdp4_crtc->pending, 0);
 
@@ -593,7 +645,7 @@
 	}
 }
 
-static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
 {
 	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
 	struct drm_crtc *crtc = &mdp4_crtc->base;
@@ -713,6 +765,7 @@
 	crtc = &mdp4_crtc->base;
 
 	mdp4_crtc->plane = plane;
+	mdp4_crtc->id = id;
 
 	mdp4_crtc->ovlp = ovlp_id;
 	mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
similarity index 98%
rename from drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
rename to drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 5e0dcae..067ed03 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -15,8 +15,6 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <mach/clk.h>
-
 #include "mdp4_kms.h"
 
 #include "drm_crtc.h"
@@ -37,7 +35,7 @@
 static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
 {
 	struct msm_drm_private *priv = encoder->dev->dev_private;
-	return to_mdp4_kms(priv->kms);
+	return to_mdp4_kms(to_mdp_kms(priv->kms));
 }
 
 #ifdef CONFIG_MSM_BUS_SCALING
@@ -139,7 +137,7 @@
 		 * the settings changes for the new modeset (like new
 		 * scanout buffer) don't latch properly..
 		 */
-		mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
+		mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
 
 		clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
 		clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
new file mode 100644
index 0000000..c740ccd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+	mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
+}
+
+static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+	DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp4_irq_preinstall(struct msm_kms *kms)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp4_irq_postinstall(struct msm_kms *kms)
+{
+	struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+	struct mdp_irq *error_handler = &mdp4_kms->error_handler;
+
+	error_handler->irq = mdp4_irq_error_handler;
+	error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
+			MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+
+	mdp_irq_register(mdp_kms, error_handler);
+
+	return 0;
+}
+
+void mdp4_irq_uninstall(struct msm_kms *kms)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+	mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+}
+
+irqreturn_t mdp4_irq(struct msm_kms *kms)
+{
+	struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+	struct drm_device *dev = mdp4_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	unsigned int id;
+	uint32_t status;
+
+	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
+
+	VERB("status=%08x", status);
+
+	for (id = 0; id < priv->num_crtcs; id++)
+		if (status & mdp4_crtc_vblank(priv->crtcs[id]))
+			drm_handle_vblank(dev, id);
+
+	mdp_dispatch_irqs(mdp_kms, status);
+
+	return IRQ_HANDLED;
+}
+
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	mdp_update_vblank_mask(to_mdp_kms(kms),
+			mdp4_crtc_vblank(crtc), true);
+	return 0;
+}
+
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	mdp_update_vblank_mask(to_mdp_kms(kms),
+			mdp4_crtc_vblank(crtc), false);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
similarity index 88%
rename from drivers/gpu/drm/msm/mdp4/mdp4_kms.c
rename to drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 8972ac3..272e707 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -17,13 +17,14 @@
 
 
 #include "msm_drv.h"
+#include "msm_mmu.h"
 #include "mdp4_kms.h"
 
 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
 
 static int mdp4_hw_init(struct msm_kms *kms)
 {
-	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
 	struct drm_device *dev = mdp4_kms->dev;
 	uint32_t version, major, minor, dmap_cfg, vg_cfg;
 	unsigned long clk;
@@ -31,12 +32,14 @@
 
 	pm_runtime_get_sync(dev->dev);
 
+	mdp4_enable(mdp4_kms);
 	version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+	mdp4_disable(mdp4_kms);
 
 	major = FIELD(version, MDP4_VERSION_MAJOR);
 	minor = FIELD(version, MDP4_VERSION_MINOR);
 
-	DBG("found MDP version v%d.%d", major, minor);
+	DBG("found MDP4 version v%d.%d", major, minor);
 
 	if (major != 4) {
 		dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
@@ -130,7 +133,7 @@
 
 static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
 {
-	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
 	struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
 	unsigned i;
 
@@ -140,11 +143,12 @@
 
 static void mdp4_destroy(struct msm_kms *kms)
 {
-	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
 	kfree(mdp4_kms);
 }
 
-static const struct msm_kms_funcs kms_funcs = {
+static const struct mdp_kms_funcs kms_funcs = {
+	.base = {
 		.hw_init         = mdp4_hw_init,
 		.irq_preinstall  = mdp4_irq_preinstall,
 		.irq_postinstall = mdp4_irq_postinstall,
@@ -152,10 +156,12 @@
 		.irq             = mdp4_irq,
 		.enable_vblank   = mdp4_enable_vblank,
 		.disable_vblank  = mdp4_disable_vblank,
-		.get_format      = mdp4_get_format,
+		.get_format      = mdp_get_format,
 		.round_pixclk    = mdp4_round_pixclk,
 		.preclose        = mdp4_preclose,
 		.destroy         = mdp4_destroy,
+	},
+	.set_irqmask         = mdp4_set_irqmask,
 };
 
 int mdp4_disable(struct mdp4_kms *mdp4_kms)
@@ -189,6 +195,7 @@
 	struct drm_plane *plane;
 	struct drm_crtc *crtc;
 	struct drm_encoder *encoder;
+	struct hdmi *hdmi;
 	int ret;
 
 	/*
@@ -238,9 +245,10 @@
 	encoder->possible_crtcs = 0x1;     /* DTV can be hooked to DMA_E */
 	priv->encoders[priv->num_encoders++] = encoder;
 
-	ret = hdmi_init(dev, encoder);
-	if (ret) {
-		dev_err(dev->dev, "failed to initialize HDMI\n");
+	hdmi = hdmi_init(dev, encoder);
+	if (IS_ERR(hdmi)) {
+		ret = PTR_ERR(hdmi);
+		dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
 		goto fail;
 	}
 
@@ -260,6 +268,7 @@
 	struct mdp4_platform_config *config = mdp4_get_config(pdev);
 	struct mdp4_kms *mdp4_kms;
 	struct msm_kms *kms = NULL;
+	struct msm_mmu *mmu;
 	int ret;
 
 	mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
@@ -269,8 +278,9 @@
 		goto fail;
 	}
 
-	kms = &mdp4_kms->base;
-	kms->funcs = &kms_funcs;
+	mdp_kms_init(&mdp4_kms->base, &kms_funcs);
+
+	kms = &mdp4_kms->base.base;
 
 	mdp4_kms->dev = dev;
 
@@ -322,27 +332,34 @@
 	clk_set_rate(mdp4_kms->clk, config->max_clk);
 	clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
 
-	if (!config->iommu) {
-		dev_err(dev->dev, "no iommu\n");
-		ret = -ENXIO;
-		goto fail;
-	}
-
 	/* make sure things are off before attaching iommu (bootloader could
 	 * have left things on, in which case we'll start getting faults if
 	 * we don't disable):
 	 */
+	mdp4_enable(mdp4_kms);
 	mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
 	mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
 	mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+	mdp4_disable(mdp4_kms);
 	mdelay(16);
 
-	ret = msm_iommu_attach(dev, config->iommu,
-			iommu_ports, ARRAY_SIZE(iommu_ports));
-	if (ret)
-		goto fail;
+	if (config->iommu) {
+		mmu = msm_iommu_new(dev, config->iommu);
+		if (IS_ERR(mmu)) {
+			ret = PTR_ERR(mmu);
+			goto fail;
+		}
+		ret = mmu->funcs->attach(mmu, iommu_ports,
+				ARRAY_SIZE(iommu_ports));
+		if (ret)
+			goto fail;
+	} else {
+		dev_info(dev->dev, "no iommu, fallback to phys "
+				"contig buffers for scanout\n");
+		mmu = NULL;
+	}
 
-	mdp4_kms->id = msm_register_iommu(dev, config->iommu);
+	mdp4_kms->id = msm_register_mmu(dev, mmu);
 	if (mdp4_kms->id < 0) {
 		ret = mdp4_kms->id;
 		dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
similarity index 79%
rename from drivers/gpu/drm/msm/mdp4/mdp4_kms.h
rename to drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index eb015c8..66a4d31 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -18,29 +18,13 @@
 #ifndef __MDP4_KMS_H__
 #define __MDP4_KMS_H__
 
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-
 #include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp/mdp_kms.h"
 #include "mdp4.xml.h"
 
-
-/* For transiently registering for different MDP4 irqs that various parts
- * of the KMS code need during setup/configuration.  We these are not
- * necessarily the same as what drm_vblank_get/put() are requesting, and
- * the hysteresis in drm_vblank_put() is not necessarily desirable for
- * internal housekeeping related irq usage.
- */
-struct mdp4_irq {
-	struct list_head node;
-	uint32_t irqmask;
-	bool registered;
-	void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
-};
-
 struct mdp4_kms {
-	struct msm_kms base;
+	struct mdp_kms base;
 
 	struct drm_device *dev;
 
@@ -59,11 +43,7 @@
 	struct clk *pclk;
 	struct clk *lut_clk;
 
-	/* irq handling: */
-	bool in_irq;
-	struct list_head irq_list;    /* list of mdp4_irq */
-	uint32_t vblank_mask;         /* irq bits set for userspace vblank */
-	struct mdp4_irq error_handler;
+	struct mdp_irq error_handler;
 };
 #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
 
@@ -73,16 +53,6 @@
 	uint32_t max_clk;
 };
 
-struct mdp4_format {
-	struct msm_format base;
-	enum mdp4_bpc bpc_r, bpc_g, bpc_b;
-	enum mdp4_bpc_alpha bpc_a;
-	uint8_t unpack[4];
-	bool alpha_enable, unpack_tight;
-	uint8_t cpp, unpack_count;
-};
-#define to_mdp4_format(x) container_of(x, struct mdp4_format, base)
-
 static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
 {
 	msm_writel(data, mdp4_kms->mmio + reg);
@@ -134,7 +104,7 @@
 }
 
 static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
-		enum mdp4_mixer_stage_id stage)
+		enum mdp_mixer_stage_id stage)
 {
 	uint32_t mixer_cfg = 0;
 
@@ -178,19 +148,23 @@
 int mdp4_disable(struct mdp4_kms *mdp4_kms);
 int mdp4_enable(struct mdp4_kms *mdp4_kms);
 
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
 void mdp4_irq_preinstall(struct msm_kms *kms);
 int mdp4_irq_postinstall(struct msm_kms *kms);
 void mdp4_irq_uninstall(struct msm_kms *kms);
 irqreturn_t mdp4_irq(struct msm_kms *kms);
-void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
-void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
-void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
 int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats,
-		uint32_t max_formats);
-const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
+static inline
+uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
+		uint32_t max_formats)
+{
+	/* TODO when we have YUV, we need to filter supported formats
+	 * based on pipe_id..
+	 */
+	return mdp_get_formats(pixel_formats, max_formats);
+}
 
 void mdp4_plane_install_properties(struct drm_plane *plane,
 		struct drm_mode_object *obj);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
similarity index 96%
rename from drivers/gpu/drm/msm/mdp4/mdp4_plane.c
rename to drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0f0af24..1e893dd 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -34,7 +34,7 @@
 static struct mdp4_kms *get_kms(struct drm_plane *plane)
 {
 	struct msm_drm_private *priv = plane->dev->dev_private;
-	return to_mdp4_kms(priv->kms);
+	return to_mdp4_kms(to_mdp_kms(priv->kms));
 }
 
 static int mdp4_plane_update(struct drm_plane *plane,
@@ -132,7 +132,7 @@
 	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
 	struct mdp4_kms *mdp4_kms = get_kms(plane);
 	enum mdp4_pipe pipe = mdp4_plane->pipe;
-	const struct mdp4_format *format;
+	const struct mdp_format *format;
 	uint32_t op_mode = 0;
 	uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
 	uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
@@ -170,12 +170,12 @@
 			MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
 
 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
-			MDP4_PIPE_SRC_XY_X(crtc_x) |
-			MDP4_PIPE_SRC_XY_Y(crtc_y));
+			MDP4_PIPE_DST_XY_X(crtc_x) |
+			MDP4_PIPE_DST_XY_Y(crtc_y));
 
 	mdp4_plane_set_scanout(plane, fb);
 
-	format = to_mdp4_format(msm_framebuffer_format(fb));
+	format = to_mdp_format(msm_framebuffer_format(fb));
 
 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
 			MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
new file mode 100644
index 0000000..0aa5151
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -0,0 +1,1036 @@
+#ifndef MDP5_XML
+#define MDP5_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp5_intf {
+	INTF_DSI = 1,
+	INTF_HDMI = 3,
+	INTF_LCDC = 5,
+	INTF_eDP = 9,
+};
+
+enum mdp5_intfnum {
+	NO_INTF = 0,
+	INTF0 = 1,
+	INTF1 = 2,
+	INTF2 = 3,
+	INTF3 = 4,
+};
+
+enum mdp5_pipe {
+	SSPP_VIG0 = 0,
+	SSPP_VIG1 = 1,
+	SSPP_VIG2 = 2,
+	SSPP_RGB0 = 3,
+	SSPP_RGB1 = 4,
+	SSPP_RGB2 = 5,
+	SSPP_DMA0 = 6,
+	SSPP_DMA1 = 7,
+};
+
+enum mdp5_ctl_mode {
+	MODE_NONE = 0,
+	MODE_ROT0 = 1,
+	MODE_ROT1 = 2,
+	MODE_WB0 = 3,
+	MODE_WB1 = 4,
+	MODE_WFD = 5,
+};
+
+enum mdp5_pack_3d {
+	PACK_3D_FRAME_INT = 0,
+	PACK_3D_H_ROW_INT = 1,
+	PACK_3D_V_ROW_INT = 2,
+	PACK_3D_COL_INT = 3,
+};
+
+enum mdp5_chroma_samp_type {
+	CHROMA_RGB = 0,
+	CHROMA_H2V1 = 1,
+	CHROMA_H1V2 = 2,
+	CHROMA_420 = 3,
+};
+
+enum mdp5_scale_filter {
+	SCALE_FILTER_NEAREST = 0,
+	SCALE_FILTER_BIL = 1,
+	SCALE_FILTER_PCMN = 2,
+	SCALE_FILTER_CA = 3,
+};
+
+enum mdp5_pipe_bwc {
+	BWC_LOSSLESS = 0,
+	BWC_Q_HIGH = 1,
+	BWC_Q_MED = 2,
+};
+
+enum mdp5_client_id {
+	CID_UNUSED = 0,
+	CID_VIG0_Y = 1,
+	CID_VIG0_CR = 2,
+	CID_VIG0_CB = 3,
+	CID_VIG1_Y = 4,
+	CID_VIG1_CR = 5,
+	CID_VIG1_CB = 6,
+	CID_VIG2_Y = 7,
+	CID_VIG2_CR = 8,
+	CID_VIG2_CB = 9,
+	CID_DMA0_Y = 10,
+	CID_DMA0_CR = 11,
+	CID_DMA0_CB = 12,
+	CID_DMA1_Y = 13,
+	CID_DMA1_CR = 14,
+	CID_DMA1_CB = 15,
+	CID_RGB0 = 16,
+	CID_RGB1 = 17,
+	CID_RGB2 = 18,
+	CID_MAX = 19,
+};
+
+enum mdp5_igc_type {
+	IGC_VIG = 0,
+	IGC_RGB = 1,
+	IGC_DMA = 2,
+	IGC_DSPP = 3,
+};
+
+#define MDP5_IRQ_INTF0_WB_ROT_COMP				0x00000001
+#define MDP5_IRQ_INTF1_WB_ROT_COMP				0x00000002
+#define MDP5_IRQ_INTF2_WB_ROT_COMP				0x00000004
+#define MDP5_IRQ_INTF3_WB_ROT_COMP				0x00000008
+#define MDP5_IRQ_INTF0_WB_WFD					0x00000010
+#define MDP5_IRQ_INTF1_WB_WFD					0x00000020
+#define MDP5_IRQ_INTF2_WB_WFD					0x00000040
+#define MDP5_IRQ_INTF3_WB_WFD					0x00000080
+#define MDP5_IRQ_INTF0_PING_PONG_COMP				0x00000100
+#define MDP5_IRQ_INTF1_PING_PONG_COMP				0x00000200
+#define MDP5_IRQ_INTF2_PING_PONG_COMP				0x00000400
+#define MDP5_IRQ_INTF3_PING_PONG_COMP				0x00000800
+#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR				0x00001000
+#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR				0x00002000
+#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR				0x00004000
+#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR				0x00008000
+#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR				0x00010000
+#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR				0x00020000
+#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR				0x00040000
+#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR				0x00080000
+#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF			0x00100000
+#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF			0x00200000
+#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF			0x00400000
+#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF			0x00800000
+#define MDP5_IRQ_INTF0_UNDER_RUN				0x01000000
+#define MDP5_IRQ_INTF0_VSYNC					0x02000000
+#define MDP5_IRQ_INTF1_UNDER_RUN				0x04000000
+#define MDP5_IRQ_INTF1_VSYNC					0x08000000
+#define MDP5_IRQ_INTF2_UNDER_RUN				0x10000000
+#define MDP5_IRQ_INTF2_VSYNC					0x20000000
+#define MDP5_IRQ_INTF3_UNDER_RUN				0x40000000
+#define MDP5_IRQ_INTF3_VSYNC					0x80000000
+#define REG_MDP5_HW_VERSION					0x00000000
+
+#define REG_MDP5_HW_INTR_STATUS					0x00000010
+#define MDP5_HW_INTR_STATUS_INTR_MDP				0x00000001
+#define MDP5_HW_INTR_STATUS_INTR_DSI0				0x00000010
+#define MDP5_HW_INTR_STATUS_INTR_DSI1				0x00000020
+#define MDP5_HW_INTR_STATUS_INTR_HDMI				0x00000100
+#define MDP5_HW_INTR_STATUS_INTR_EDP				0x00001000
+
+#define REG_MDP5_MDP_VERSION					0x00000100
+#define MDP5_MDP_VERSION_MINOR__MASK				0x00ff0000
+#define MDP5_MDP_VERSION_MINOR__SHIFT				16
+static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val)
+{
+	return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK;
+}
+#define MDP5_MDP_VERSION_MAJOR__MASK				0xf0000000
+#define MDP5_MDP_VERSION_MAJOR__SHIFT				28
+static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val)
+{
+	return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP5_DISP_INTF_SEL					0x00000104
+#define MDP5_DISP_INTF_SEL_INTF0__MASK				0x000000ff
+#define MDP5_DISP_INTF_SEL_INTF0__SHIFT				0
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val)
+{
+	return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF1__MASK				0x0000ff00
+#define MDP5_DISP_INTF_SEL_INTF1__SHIFT				8
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val)
+{
+	return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF2__MASK				0x00ff0000
+#define MDP5_DISP_INTF_SEL_INTF2__SHIFT				16
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val)
+{
+	return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF3__MASK				0xff000000
+#define MDP5_DISP_INTF_SEL_INTF3__SHIFT				24
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val)
+{
+	return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
+}
+
+#define REG_MDP5_INTR_EN					0x00000110
+
+#define REG_MDP5_INTR_STATUS					0x00000114
+
+#define REG_MDP5_INTR_CLEAR					0x00000118
+
+#define REG_MDP5_HIST_INTR_EN					0x0000011c
+
+#define REG_MDP5_HIST_INTR_STATUS				0x00000120
+
+#define REG_MDP5_HIST_INTR_CLEAR				0x00000124
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK			0x000000ff
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT			0
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val)
+{
+	return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK			0x0000ff00
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT			8
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val)
+{
+	return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK			0x00ff0000
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT			16
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val)
+{
+	return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK			0x000000ff
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT			0
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val)
+{
+	return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK			0x0000ff00
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT			8
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val)
+{
+	return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK			0x00ff0000
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT			16
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val)
+{
+	return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
+{
+	switch (idx) {
+		case IGC_VIG: return 0x00000300;
+		case IGC_RGB: return 0x00000310;
+		case IGC_DMA: return 0x00000320;
+		case IGC_DSPP: return 0x00000400;
+		default: return INVALID_IDX(idx);
+	}
+}
+static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
+
+static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+#define MDP5_IGC_LUT_REG_VAL__MASK				0x00000fff
+#define MDP5_IGC_LUT_REG_VAL__SHIFT				0
+static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
+{
+	return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
+}
+#define MDP5_IGC_LUT_REG_INDEX_UPDATE				0x02000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0				0x10000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1				0x20000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2				0x40000000
+
+static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+#define MDP5_CTL_LAYER_REG_VIG0__MASK				0x00000007
+#define MDP5_CTL_LAYER_REG_VIG0__SHIFT				0
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG1__MASK				0x00000038
+#define MDP5_CTL_LAYER_REG_VIG1__SHIFT				3
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG2__MASK				0x000001c0
+#define MDP5_CTL_LAYER_REG_VIG2__SHIFT				6
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB0__MASK				0x00000e00
+#define MDP5_CTL_LAYER_REG_RGB0__SHIFT				9
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB1__MASK				0x00007000
+#define MDP5_CTL_LAYER_REG_RGB1__SHIFT				12
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB2__MASK				0x00038000
+#define MDP5_CTL_LAYER_REG_RGB2__SHIFT				15
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA0__MASK				0x001c0000
+#define MDP5_CTL_LAYER_REG_DMA0__SHIFT				18
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA1__MASK				0x00e00000
+#define MDP5_CTL_LAYER_REG_DMA1__SHIFT				21
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_BORDER_COLOR				0x01000000
+#define MDP5_CTL_LAYER_REG_CURSOR_OUT				0x02000000
+
+static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; }
+#define MDP5_CTL_OP_MODE__MASK					0x0000000f
+#define MDP5_CTL_OP_MODE__SHIFT					0
+static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
+{
+	return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK;
+}
+#define MDP5_CTL_OP_INTF_NUM__MASK				0x00000070
+#define MDP5_CTL_OP_INTF_NUM__SHIFT				4
+static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val)
+{
+	return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK;
+}
+#define MDP5_CTL_OP_CMD_MODE					0x00020000
+#define MDP5_CTL_OP_PACK_3D_ENABLE				0x00080000
+#define MDP5_CTL_OP_PACK_3D__MASK				0x00300000
+#define MDP5_CTL_OP_PACK_3D__SHIFT				20
+static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
+{
+	return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
+}
+
+static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; }
+#define MDP5_CTL_FLUSH_VIG0					0x00000001
+#define MDP5_CTL_FLUSH_VIG1					0x00000002
+#define MDP5_CTL_FLUSH_VIG2					0x00000004
+#define MDP5_CTL_FLUSH_RGB0					0x00000008
+#define MDP5_CTL_FLUSH_RGB1					0x00000010
+#define MDP5_CTL_FLUSH_RGB2					0x00000020
+#define MDP5_CTL_FLUSH_LM0					0x00000040
+#define MDP5_CTL_FLUSH_LM1					0x00000080
+#define MDP5_CTL_FLUSH_LM2					0x00000100
+#define MDP5_CTL_FLUSH_DMA0					0x00000800
+#define MDP5_CTL_FLUSH_DMA1					0x00001000
+#define MDP5_CTL_FLUSH_DSPP0					0x00002000
+#define MDP5_CTL_FLUSH_DSPP1					0x00004000
+#define MDP5_CTL_FLUSH_DSPP2					0x00008000
+#define MDP5_CTL_FLUSH_CTL					0x00020000
+
+static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK				0xffff0000
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT			16
+static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK				0x0000ffff
+#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT				0
+static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; }
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK			0xffff0000
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT			16
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK			0x0000ffff
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT			0
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; }
+#define MDP5_PIPE_SRC_XY_Y__MASK				0xffff0000
+#define MDP5_PIPE_SRC_XY_Y__SHIFT				16
+static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP5_PIPE_SRC_XY_X__MASK				0x0000ffff
+#define MDP5_PIPE_SRC_XY_X__SHIFT				0
+static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; }
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK				0xffff0000
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT			16
+static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK				0x0000ffff
+#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT				0
+static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; }
+#define MDP5_PIPE_OUT_XY_Y__MASK				0xffff0000
+#define MDP5_PIPE_OUT_XY_Y__SHIFT				16
+static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK;
+}
+#define MDP5_PIPE_OUT_XY_X__MASK				0x0000ffff
+#define MDP5_PIPE_OUT_XY_X__SHIFT				0
+static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; }
+#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK				0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT			0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK				0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT			16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; }
+#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK				0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT			0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK				0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT			16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; }
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK			0x00000003
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT			0
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
+{
+	return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK			0x0000000c
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT			2
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
+{
+	return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK			0x00000030
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT			4
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
+{
+	return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK			0x000000c0
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT			6
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
+{
+	return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE			0x00000100
+#define MDP5_PIPE_SRC_FORMAT_CPP__MASK				0x00000600
+#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT				9
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ROT90				0x00000800
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK			0x00003000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT		12
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT			0x00020000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB			0x00040000
+#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK			0x00780000
+#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT			19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK			0x01800000
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT			23
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_type val)
+{
+	return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; }
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK			0x000000ff
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT			0
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK			0x0000ff00
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT			8
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK			0x00ff0000
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT			16
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK			0xff000000
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT			24
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; }
+#define MDP5_PIPE_SRC_OP_MODE_BWC_EN				0x00000001
+#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK				0x00000006
+#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT			1
+static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
+{
+	return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK;
+}
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR				0x00002000
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD				0x00004000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_EN				0x00010000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0				0x00020000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1				0x00040000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE			0x00400000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD			0x00800000
+
+static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; }
+#define MDP5_PIPE_DECIMATION_VERT__MASK				0x000000ff
+#define MDP5_PIPE_DECIMATION_VERT__SHIFT			0
+static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK;
+}
+#define MDP5_PIPE_DECIMATION_HORZ__MASK				0x0000ff00
+#define MDP5_PIPE_DECIMATION_HORZ__SHIFT			8
+static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
+{
+	return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; }
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN			0x00000001
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN			0x00000002
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK		0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT		8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val)
+{
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK		0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT		10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val)
+{
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK		0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT		12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val)
+{
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK		0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT		14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val)
+{
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK		0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT		16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val)
+{
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK		0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT		18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val)
+{
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA			0x00000002
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA			0x00000004
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA			0x00000008
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA			0x00000010
+
+static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; }
+#define MDP5_LM_OUT_SIZE_HEIGHT__MASK				0xffff0000
+#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT				16
+static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
+{
+	return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_LM_OUT_SIZE_WIDTH__MASK				0x0000ffff
+#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT				0
+static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
+{
+	return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK			0x00000003
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT			0
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
+{
+	return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA			0x00000004
+#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA			0x00000008
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA			0x00000010
+#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN			0x00000020
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK			0x00000300
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT			8
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
+{
+	return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA			0x00000400
+#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA			0x00000800
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA			0x00001000
+#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN			0x00002000
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+#define MDP5_DSPP_OP_MODE_IGC_LUT_EN				0x00000001
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK			0x0000000e
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT			1
+static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
+{
+	return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK;
+}
+#define MDP5_DSPP_OP_MODE_PCC_EN				0x00000010
+#define MDP5_DSPP_OP_MODE_DITHER_EN				0x00000100
+#define MDP5_DSPP_OP_MODE_HIST_EN				0x00010000
+#define MDP5_DSPP_OP_MODE_AUTO_CLEAR				0x00020000
+#define MDP5_DSPP_OP_MODE_HIST_LUT_EN				0x00080000
+#define MDP5_DSPP_OP_MODE_PA_EN					0x00100000
+#define MDP5_DSPP_OP_MODE_GAMUT_EN				0x00800000
+#define MDP5_DSPP_OP_MODE_GAMUT_ORDER				0x01000000
+
+static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; }
+#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK			0x0000ffff
+#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT			0
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
+{
+	return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK;
+}
+#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK			0xffff0000
+#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT			16
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
+{
+	return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK			0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT			0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
+{
+	return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK;
+}
+#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE		0x80000000
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK			0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT			0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
+{
+	return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; }
+#define MDP5_INTF_DISPLAY_HCTL_START__MASK			0x0000ffff
+#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT			0
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
+{
+	return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK;
+}
+#define MDP5_INTF_DISPLAY_HCTL_END__MASK			0xffff0000
+#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT			16
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
+{
+	return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_HCTL_START__MASK			0x00007fff
+#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT			0
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
+{
+	return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_END__MASK				0x7fff0000
+#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT			16
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
+{
+	return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE			0x80000000
+
+static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; }
+#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW			0x00000001
+#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW			0x00000002
+#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW			0x00000004
+
+static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; }
+
+
+#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
new file mode 100644
index 0000000..f279402
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_flip_work.h"
+
+struct mdp5_crtc {
+	struct drm_crtc base;
+	char name[8];
+	struct drm_plane *plane;
+	struct drm_plane *planes[8];
+	int id;
+	bool enabled;
+
+	/* which mixer/encoder we route output to: */
+	int mixer;
+
+	/* if there is a pending flip, these will be non-null: */
+	struct drm_pending_vblank_event *event;
+	struct msm_fence_cb pageflip_cb;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP   0x2
+	atomic_t pending;
+
+	/* the fb that we logically (from PoV of KMS API) hold a ref
+	 * to.  Which we may not yet be scanning out (we may still
+	 * be scanning out previous in case of page_flip while waiting
+	 * for gpu rendering to complete:
+	 */
+	struct drm_framebuffer *fb;
+
+	/* the fb that we currently hold a scanout ref to: */
+	struct drm_framebuffer *scanout_fb;
+
+	/* for unref'ing framebuffers after scanout completes: */
+	struct drm_flip_work unref_fb_work;
+
+	struct mdp_irq vblank;
+	struct mdp_irq err;
+};
+#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
+
+static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
+{
+	struct msm_drm_private *priv = crtc->dev->dev_private;
+	return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+	atomic_or(pending, &mdp5_crtc->pending);
+	mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	struct mdp5_kms *mdp5_kms = get_kms(crtc);
+	int id = mdp5_crtc->id;
+	uint32_t i, flush = 0;
+
+	for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
+		struct drm_plane *plane = mdp5_crtc->planes[i];
+		if (plane) {
+			enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
+			flush |= pipe2flush(pipe);
+		}
+	}
+	flush |= mixer2flush(mdp5_crtc->id);
+	flush |= MDP5_CTL_FLUSH_CTL;
+
+	DBG("%s: flush=%08x", mdp5_crtc->name, flush);
+
+	mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	struct drm_framebuffer *old_fb = mdp5_crtc->fb;
+
+	/* grab reference to incoming scanout fb: */
+	drm_framebuffer_reference(new_fb);
+	mdp5_crtc->base.fb = new_fb;
+	mdp5_crtc->fb = new_fb;
+
+	if (old_fb)
+		drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this.  Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+	/* flush updates, to make sure hw is updated to new scanout fb,
+	 * so that we can safely queue unref to current fb (ie. next
+	 * vblank we know hw is done w/ previous scanout_fb).
+	 */
+	crtc_flush(crtc);
+
+	if (mdp5_crtc->scanout_fb)
+		drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
+				mdp5_crtc->scanout_fb);
+
+	mdp5_crtc->scanout_fb = fb;
+
+	/* enable vblank to complete flip: */
+	request_pending(crtc, PENDING_FLIP);
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_pending_vblank_event *event;
+	unsigned long flags, i;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = mdp5_crtc->event;
+	if (event) {
+		/* if regular vblank case (!file) or if cancel-flip from
+		 * preclose on file that requested flip, then send the
+		 * event:
+		 */
+		if (!file || (event->base.file_priv == file)) {
+			mdp5_crtc->event = NULL;
+			drm_send_vblank_event(dev, mdp5_crtc->id, event);
+		}
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
+		struct drm_plane *plane = mdp5_crtc->planes[i];
+		if (plane)
+			mdp5_plane_complete_flip(plane);
+	}
+}
+
+static void pageflip_cb(struct msm_fence_cb *cb)
+{
+	struct mdp5_crtc *mdp5_crtc =
+		container_of(cb, struct mdp5_crtc, pageflip_cb);
+	struct drm_crtc *crtc = &mdp5_crtc->base;
+	struct drm_framebuffer *fb = mdp5_crtc->fb;
+
+	if (!fb)
+		return;
+
+	drm_framebuffer_reference(fb);
+	mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
+	update_scanout(crtc, fb);
+}
+
+static void unref_fb_worker(struct drm_flip_work *work, void *val)
+{
+	struct mdp5_crtc *mdp5_crtc =
+		container_of(work, struct mdp5_crtc, unref_fb_work);
+	struct drm_device *dev = mdp5_crtc->base.dev;
+
+	mutex_lock(&dev->mode_config.mutex);
+	drm_framebuffer_unreference(val);
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void mdp5_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+	mdp5_crtc->plane->funcs->destroy(mdp5_crtc->plane);
+
+	drm_crtc_cleanup(crtc);
+	drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
+
+	kfree(mdp5_crtc);
+}
+
+static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	struct mdp5_kms *mdp5_kms = get_kms(crtc);
+	bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+	DBG("%s: mode=%d", mdp5_crtc->name, mode);
+
+	if (enabled != mdp5_crtc->enabled) {
+		if (enabled) {
+			mdp5_enable(mdp5_kms);
+			mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
+		} else {
+			mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
+			mdp5_disable(mdp5_kms);
+		}
+		mdp5_crtc->enabled = enabled;
+	}
+}
+
+static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	struct mdp5_kms *mdp5_kms = get_kms(crtc);
+	int id = mdp5_crtc->id;
+
+	/*
+	 * Hard-coded setup for now until I figure out how the
+	 * layer-mixer works
+	 */
+
+	/* LM[id]: */
+	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
+			MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
+	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
+			MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
+			MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
+	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
+	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
+
+	/* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
+	 * we want to be setting CTL[m].LAYER[n].  Not sure what the
+	 * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
+	 * used when chaining up mixers for high resolution displays?
+	 */
+
+	/* CTL[id]: */
+	mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
+			MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
+			MDP5_CTL_LAYER_REG_BORDER_COLOR);
+	mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
+}
+
+static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode,
+		int x, int y,
+		struct drm_framebuffer *old_fb)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	struct mdp5_kms *mdp5_kms = get_kms(crtc);
+	int ret;
+
+	mode = adjusted_mode;
+
+	DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+			mdp5_crtc->name, mode->base.id, mode->name,
+			mode->vrefresh, mode->clock,
+			mode->hdisplay, mode->hsync_start,
+			mode->hsync_end, mode->htotal,
+			mode->vdisplay, mode->vsync_start,
+			mode->vsync_end, mode->vtotal,
+			mode->type, mode->flags);
+
+	/* grab extra ref for update_scanout() */
+	drm_framebuffer_reference(crtc->fb);
+
+	ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->fb,
+			0, 0, mode->hdisplay, mode->vdisplay,
+			x << 16, y << 16,
+			mode->hdisplay << 16, mode->vdisplay << 16);
+	if (ret) {
+		drm_framebuffer_unreference(crtc->fb);
+		dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+				mdp5_crtc->name, ret);
+		return ret;
+	}
+
+	mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
+			MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
+			MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+	update_fb(crtc, crtc->fb);
+	update_scanout(crtc, crtc->fb);
+
+	return 0;
+}
+
+static void mdp5_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	DBG("%s", mdp5_crtc->name);
+	/* make sure we hold a ref to mdp clks while setting up mode: */
+	mdp5_enable(get_kms(crtc));
+	mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp5_crtc_commit(struct drm_crtc *crtc)
+{
+	mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+	crtc_flush(crtc);
+	/* drop the ref to mdp clk's that we got in prepare: */
+	mdp5_disable(get_kms(crtc));
+}
+
+static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+		struct drm_framebuffer *old_fb)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	struct drm_plane *plane = mdp5_crtc->plane;
+	struct drm_display_mode *mode = &crtc->mode;
+	int ret;
+
+	/* grab extra ref for update_scanout() */
+	drm_framebuffer_reference(crtc->fb);
+
+	ret = mdp5_plane_mode_set(plane, crtc, crtc->fb,
+			0, 0, mode->hdisplay, mode->vdisplay,
+			x << 16, y << 16,
+			mode->hdisplay << 16, mode->vdisplay << 16);
+	if (ret) {
+		drm_framebuffer_unreference(crtc->fb);
+		return ret;
+	}
+
+	update_fb(crtc, crtc->fb);
+	update_scanout(crtc, crtc->fb);
+
+	return 0;
+}
+
+static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
+		struct drm_framebuffer *new_fb,
+		struct drm_pending_vblank_event *event,
+		uint32_t page_flip_flags)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_gem_object *obj;
+	unsigned long flags;
+
+	if (mdp5_crtc->event) {
+		dev_err(dev->dev, "already pending flip!\n");
+		return -EBUSY;
+	}
+
+	obj = msm_framebuffer_bo(new_fb, 0);
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	mdp5_crtc->event = event;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	update_fb(crtc, new_fb);
+
+	return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
+}
+
+static int mdp5_crtc_set_property(struct drm_crtc *crtc,
+		struct drm_property *property, uint64_t val)
+{
+	// XXX
+	return -EINVAL;
+}
+
+static const struct drm_crtc_funcs mdp5_crtc_funcs = {
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = mdp5_crtc_destroy,
+	.page_flip = mdp5_crtc_page_flip,
+	.set_property = mdp5_crtc_set_property,
+};
+
+static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
+	.dpms = mdp5_crtc_dpms,
+	.mode_fixup = mdp5_crtc_mode_fixup,
+	.mode_set = mdp5_crtc_mode_set,
+	.prepare = mdp5_crtc_prepare,
+	.commit = mdp5_crtc_commit,
+	.mode_set_base = mdp5_crtc_mode_set_base,
+	.load_lut = mdp5_crtc_load_lut,
+};
+
+static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
+	struct drm_crtc *crtc = &mdp5_crtc->base;
+	struct msm_drm_private *priv = crtc->dev->dev_private;
+	unsigned pending;
+
+	mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+
+	pending = atomic_xchg(&mdp5_crtc->pending, 0);
+
+	if (pending & PENDING_FLIP) {
+		complete_flip(crtc, NULL);
+		drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
+	}
+}
+
+static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
+	struct drm_crtc *crtc = &mdp5_crtc->base;
+	DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
+	crtc_flush(crtc);
+}
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	return mdp5_crtc->vblank.irqmask;
+}
+
+void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+	DBG("cancel: %p", file);
+	complete_flip(crtc, file);
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
+		enum mdp5_intf intf_id)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	struct mdp5_kms *mdp5_kms = get_kms(crtc);
+	static const enum mdp5_intfnum intfnum[] = {
+			INTF0, INTF1, INTF2, INTF3,
+	};
+	uint32_t intf_sel;
+
+	/* now that we know what irq's we want: */
+	mdp5_crtc->err.irqmask = intf2err(intf);
+	mdp5_crtc->vblank.irqmask = intf2vblank(intf);
+
+	/* when called from modeset_init(), skip the rest until later: */
+	if (!mdp5_kms)
+		return;
+
+	intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
+
+	switch (intf) {
+	case 0:
+		intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
+		intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
+		break;
+	case 1:
+		intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
+		intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
+		break;
+	case 2:
+		intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
+		intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
+		break;
+	case 3:
+		intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
+		intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	blend_setup(crtc);
+
+	DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
+
+	mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
+	mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
+			MDP5_CTL_OP_MODE(MODE_NONE) |
+			MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
+
+	crtc_flush(crtc);
+}
+
+static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
+		struct drm_plane *plane)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+	BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
+
+	if (mdp5_crtc->planes[pipe_id] == plane)
+		return;
+
+	mdp5_crtc->planes[pipe_id] = plane;
+	blend_setup(crtc);
+	if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
+		crtc_flush(crtc);
+}
+
+void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+	set_attach(crtc, mdp5_plane_pipe(plane), plane);
+}
+
+void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+	set_attach(crtc, mdp5_plane_pipe(plane), NULL);
+}
+
+/* initialize crtc */
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+		struct drm_plane *plane, int id)
+{
+	struct drm_crtc *crtc = NULL;
+	struct mdp5_crtc *mdp5_crtc;
+	int ret;
+
+	mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
+	if (!mdp5_crtc) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	crtc = &mdp5_crtc->base;
+
+	mdp5_crtc->plane = plane;
+	mdp5_crtc->id = id;
+
+	mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
+	mdp5_crtc->err.irq = mdp5_crtc_err_irq;
+
+	snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
+			pipe2name(mdp5_plane_pipe(plane)), id);
+
+	ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
+			"unref fb", unref_fb_worker);
+	if (ret)
+		goto fail;
+
+	INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
+
+	drm_crtc_init(dev, crtc, &mdp5_crtc_funcs);
+	drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
+
+	mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
+
+	return crtc;
+
+fail:
+	if (crtc)
+		mdp5_crtc_destroy(crtc);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
new file mode 100644
index 0000000..edec7bf
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct mdp5_encoder {
+	struct drm_encoder base;
+	int intf;
+	enum mdp5_intf intf_id;
+	bool enabled;
+	uint32_t bsc;
+};
+#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+	struct msm_drm_private *priv = encoder->dev->dev_private;
+	return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val)		\
+	{						\
+		.src = MSM_BUS_MASTER_MDP_PORT0,	\
+		.dst = MSM_BUS_SLAVE_EBI_CH0,		\
+		.ab = (ab_val),				\
+		.ib = (ib_val),				\
+	}
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+	MDP_BUS_VECTOR_ENTRY(0, 0),
+	MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
+};
+static struct msm_bus_paths mdp_bus_usecases[] = { {
+		.num_paths = 1,
+		.vectors = &mdp_bus_vectors[0],
+}, {
+		.num_paths = 1,
+		.vectors = &mdp_bus_vectors[1],
+} };
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+	.usecase = mdp_bus_usecases,
+	.num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+	.name = "mdss_mdp",
+};
+
+static void bs_init(struct mdp5_encoder *mdp5_encoder)
+{
+	mdp5_encoder->bsc = msm_bus_scale_register_client(
+			&mdp_bus_scale_table);
+	DBG("bus scale client: %08x", mdp5_encoder->bsc);
+}
+
+static void bs_fini(struct mdp5_encoder *mdp5_encoder)
+{
+	if (mdp5_encoder->bsc) {
+		msm_bus_scale_unregister_client(mdp5_encoder->bsc);
+		mdp5_encoder->bsc = 0;
+	}
+}
+
+static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx)
+{
+	if (mdp5_encoder->bsc) {
+		DBG("set bus scaling: %d", idx);
+		/* HACK: scaling down, and then immediately back up
+		 * seems to leave things broken (underflow).. so
+		 * never disable:
+		 */
+		idx = 1;
+		msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx);
+	}
+}
+#else
+static void bs_init(struct mdp5_encoder *mdp5_encoder) {}
+static void bs_fini(struct mdp5_encoder *mdp5_encoder) {}
+static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {}
+#endif
+
+static void mdp5_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	bs_fini(mdp5_encoder);
+	drm_encoder_cleanup(encoder);
+	kfree(mdp5_encoder);
+}
+
+static const struct drm_encoder_funcs mdp5_encoder_funcs = {
+	.destroy = mdp5_encoder_destroy,
+};
+
+static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	struct mdp5_kms *mdp5_kms = get_kms(encoder);
+	int intf = mdp5_encoder->intf;
+	bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+	DBG("mode=%d", mode);
+
+	if (enabled == mdp5_encoder->enabled)
+		return;
+
+	if (enabled) {
+		bs_set(mdp5_encoder, 1);
+		mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+	} else {
+		mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+		bs_set(mdp5_encoder, 0);
+	}
+
+	mdp5_encoder->enabled = enabled;
+}
+
+static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	struct mdp5_kms *mdp5_kms = get_kms(encoder);
+	int intf = mdp5_encoder->intf;
+	uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+	uint32_t display_v_start, display_v_end;
+	uint32_t hsync_start_x, hsync_end_x;
+	uint32_t format;
+
+	mode = adjusted_mode;
+
+	DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+			mode->base.id, mode->name,
+			mode->vrefresh, mode->clock,
+			mode->hdisplay, mode->hsync_start,
+			mode->hsync_end, mode->htotal,
+			mode->vdisplay, mode->vsync_start,
+			mode->vsync_end, mode->vtotal,
+			mode->type, mode->flags);
+
+	ctrl_pol = 0;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW;
+	/* probably need to get DATA_EN polarity from panel.. */
+
+	dtv_hsync_skew = 0;  /* get this from panel? */
+	format = 0x213f;     /* get this from panel? */
+
+	hsync_start_x = (mode->htotal - mode->hsync_start);
+	hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+	vsync_period = mode->vtotal * mode->htotal;
+	vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+	display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+	display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
+			MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
+			MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf),
+			MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) |
+			MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x));
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf),
+			MDP5_INTF_ACTIVE_HCTL_START(0) |
+			MDP5_INTF_ACTIVE_HCTL_END(0));
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3);  /* frame+line? */
+}
+
+static void mdp5_encoder_prepare(struct drm_encoder *encoder)
+{
+	mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp5_encoder_commit(struct drm_encoder *encoder)
+{
+	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
+			mdp5_encoder->intf_id);
+	mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
+	.dpms = mdp5_encoder_dpms,
+	.mode_fixup = mdp5_encoder_mode_fixup,
+	.mode_set = mdp5_encoder_mode_set,
+	.prepare = mdp5_encoder_prepare,
+	.commit = mdp5_encoder_commit,
+};
+
+/* initialize encoder */
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
+		enum mdp5_intf intf_id)
+{
+	struct drm_encoder *encoder = NULL;
+	struct mdp5_encoder *mdp5_encoder;
+	int ret;
+
+	mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
+	if (!mdp5_encoder) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	mdp5_encoder->intf = intf;
+	mdp5_encoder->intf_id = intf_id;
+	encoder = &mdp5_encoder->base;
+
+	drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
+
+	bs_init(mdp5_encoder);
+
+	return encoder;
+
+fail:
+	if (encoder)
+		mdp5_encoder_destroy(encoder);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
new file mode 100644
index 0000000..353d494
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp5_kms.h"
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+	mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
+}
+
+static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+	DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp5_irq_preinstall(struct msm_kms *kms)
+{
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+	mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp5_irq_postinstall(struct msm_kms *kms)
+{
+	struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+	struct mdp_irq *error_handler = &mdp5_kms->error_handler;
+
+	error_handler->irq = mdp5_irq_error_handler;
+	error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
+			MDP5_IRQ_INTF1_UNDER_RUN |
+			MDP5_IRQ_INTF2_UNDER_RUN |
+			MDP5_IRQ_INTF3_UNDER_RUN;
+
+	mdp_irq_register(mdp_kms, error_handler);
+
+	return 0;
+}
+
+void mdp5_irq_uninstall(struct msm_kms *kms)
+{
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+	mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+}
+
+static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
+{
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+	struct drm_device *dev = mdp5_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	unsigned int id;
+	uint32_t status;
+
+	status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS);
+	mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
+
+	VERB("status=%08x", status);
+
+	for (id = 0; id < priv->num_crtcs; id++)
+		if (status & mdp5_crtc_vblank(priv->crtcs[id]))
+			drm_handle_vblank(dev, id);
+
+	mdp_dispatch_irqs(mdp_kms, status);
+}
+
+irqreturn_t mdp5_irq(struct msm_kms *kms)
+{
+	struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+	uint32_t intr;
+
+	intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
+
+	VERB("intr=%08x", intr);
+
+	if (intr & MDP5_HW_INTR_STATUS_INTR_MDP)
+		mdp5_irq_mdp(mdp_kms);
+
+	if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
+		hdmi_irq(0, mdp5_kms->hdmi);
+
+	return IRQ_HANDLED;
+}
+
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	mdp_update_vblank_mask(to_mdp_kms(kms),
+			mdp5_crtc_vblank(crtc), true);
+	return 0;
+}
+
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	mdp_update_vblank_mask(to_mdp_kms(kms),
+			mdp5_crtc_vblank(crtc), false);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
new file mode 100644
index 0000000..ee8446c
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "mdp5_kms.h"
+
+static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
+
+static int mdp5_hw_init(struct msm_kms *kms)
+{
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+	struct drm_device *dev = mdp5_kms->dev;
+	uint32_t version, major, minor;
+	int ret = 0;
+
+	pm_runtime_get_sync(dev->dev);
+
+	mdp5_enable(mdp5_kms);
+	version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
+	mdp5_disable(mdp5_kms);
+
+	major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
+	minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
+
+	DBG("found MDP5 version v%d.%d", major, minor);
+
+	if ((major != 1) || ((minor != 0) && (minor != 2))) {
+		dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+				major, minor);
+		ret = -ENXIO;
+		goto out;
+	}
+
+	mdp5_kms->rev = minor;
+
+	/* Magic unknown register writes:
+	 *
+	 *    W VBIF:0x004 00000001      (mdss_mdp.c:839)
+	 *    W MDP5:0x2e0 0xe9          (mdss_mdp.c:839)
+	 *    W MDP5:0x2e4 0x55          (mdss_mdp.c:839)
+	 *    W MDP5:0x3ac 0xc0000ccc    (mdss_mdp.c:839)
+	 *    W MDP5:0x3b4 0xc0000ccc    (mdss_mdp.c:839)
+	 *    W MDP5:0x3bc 0xcccccc      (mdss_mdp.c:839)
+	 *    W MDP5:0x4a8 0xcccc0c0     (mdss_mdp.c:839)
+	 *    W MDP5:0x4b0 0xccccc0c0    (mdss_mdp.c:839)
+	 *    W MDP5:0x4b8 0xccccc000    (mdss_mdp.c:839)
+	 *
+	 * Downstream fbdev driver gets these register offsets/values
+	 * from DT.. not really sure what these registers are or if
+	 * different values for different boards/SoC's, etc.  I guess
+	 * they are the golden registers.
+	 *
+	 * Not setting these does not seem to cause any problem.  But
+	 * we may be getting lucky with the bootloader initializing
+	 * them for us.  OTOH, if we can always count on the bootloader
+	 * setting the golden registers, then perhaps we don't need to
+	 * care.
+	 */
+
+	mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+	mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0);
+
+out:
+	pm_runtime_put_sync(dev->dev);
+
+	return ret;
+}
+
+static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
+		struct drm_encoder *encoder)
+{
+	return rate;
+}
+
+static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+	struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
+	unsigned i;
+
+	for (i = 0; i < priv->num_crtcs; i++)
+		mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
+static void mdp5_destroy(struct msm_kms *kms)
+{
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+	kfree(mdp5_kms);
+}
+
+static const struct mdp_kms_funcs kms_funcs = {
+	.base = {
+		.hw_init         = mdp5_hw_init,
+		.irq_preinstall  = mdp5_irq_preinstall,
+		.irq_postinstall = mdp5_irq_postinstall,
+		.irq_uninstall   = mdp5_irq_uninstall,
+		.irq             = mdp5_irq,
+		.enable_vblank   = mdp5_enable_vblank,
+		.disable_vblank  = mdp5_disable_vblank,
+		.get_format      = mdp_get_format,
+		.round_pixclk    = mdp5_round_pixclk,
+		.preclose        = mdp5_preclose,
+		.destroy         = mdp5_destroy,
+	},
+	.set_irqmask         = mdp5_set_irqmask,
+};
+
+int mdp5_disable(struct mdp5_kms *mdp5_kms)
+{
+	DBG("");
+
+	clk_disable_unprepare(mdp5_kms->ahb_clk);
+	clk_disable_unprepare(mdp5_kms->axi_clk);
+	clk_disable_unprepare(mdp5_kms->core_clk);
+	clk_disable_unprepare(mdp5_kms->lut_clk);
+
+	return 0;
+}
+
+int mdp5_enable(struct mdp5_kms *mdp5_kms)
+{
+	DBG("");
+
+	clk_prepare_enable(mdp5_kms->ahb_clk);
+	clk_prepare_enable(mdp5_kms->axi_clk);
+	clk_prepare_enable(mdp5_kms->core_clk);
+	clk_prepare_enable(mdp5_kms->lut_clk);
+
+	return 0;
+}
+
+static int modeset_init(struct mdp5_kms *mdp5_kms)
+{
+	static const enum mdp5_pipe crtcs[] = {
+			SSPP_RGB0, SSPP_RGB1, SSPP_RGB2,
+	};
+	struct drm_device *dev = mdp5_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_encoder *encoder;
+	int i, ret;
+
+	/* construct CRTCs: */
+	for (i = 0; i < ARRAY_SIZE(crtcs); i++) {
+		struct drm_plane *plane;
+		struct drm_crtc *crtc;
+
+		plane = mdp5_plane_init(dev, crtcs[i], true);
+		if (IS_ERR(plane)) {
+			ret = PTR_ERR(plane);
+			dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
+					pipe2name(crtcs[i]), ret);
+			goto fail;
+		}
+
+		crtc  = mdp5_crtc_init(dev, plane, i);
+		if (IS_ERR(crtc)) {
+			ret = PTR_ERR(crtc);
+			dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
+					pipe2name(crtcs[i]), ret);
+			goto fail;
+		}
+		priv->crtcs[priv->num_crtcs++] = crtc;
+	}
+
+	/* Construct encoder for HDMI: */
+	encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
+	if (IS_ERR(encoder)) {
+		dev_err(dev->dev, "failed to construct encoder\n");
+		ret = PTR_ERR(encoder);
+		goto fail;
+	}
+
+	/* NOTE: the vsync and error irq's are actually associated with
+	 * the INTF/encoder.. the easiest way to deal with this (ie. what
+	 * we do now) is assume a fixed relationship between crtc's and
+	 * encoders.  I'm not sure if there is ever a need to more freely
+	 * assign crtcs to encoders, but if there is then we need to take
+	 * care of error and vblank irq's that the crtc has registered,
+	 * and also update user-requested vblank_mask.
+	 */
+	encoder->possible_crtcs = BIT(0);
+	mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
+
+	priv->encoders[priv->num_encoders++] = encoder;
+
+	/* Construct bridge/connector for HDMI: */
+	mdp5_kms->hdmi = hdmi_init(dev, encoder);
+	if (IS_ERR(mdp5_kms->hdmi)) {
+		ret = PTR_ERR(mdp5_kms->hdmi);
+		dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static const char *iommu_ports[] = {
+		"mdp_0",
+};
+
+static int get_clk(struct platform_device *pdev, struct clk **clkp,
+		const char *name)
+{
+	struct device *dev = &pdev->dev;
+	struct clk *clk = devm_clk_get(dev, name);
+	if (IS_ERR(clk)) {
+		dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+		return PTR_ERR(clk);
+	}
+	*clkp = clk;
+	return 0;
+}
+
+struct msm_kms *mdp5_kms_init(struct drm_device *dev)
+{
+	struct platform_device *pdev = dev->platformdev;
+	struct mdp5_platform_config *config = mdp5_get_config(pdev);
+	struct mdp5_kms *mdp5_kms;
+	struct msm_kms *kms = NULL;
+	struct msm_mmu *mmu;
+	int ret;
+
+	mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
+	if (!mdp5_kms) {
+		dev_err(dev->dev, "failed to allocate kms\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+
+	kms = &mdp5_kms->base.base;
+
+	mdp5_kms->dev = dev;
+	mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
+
+	mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
+	if (IS_ERR(mdp5_kms->mmio)) {
+		ret = PTR_ERR(mdp5_kms->mmio);
+		goto fail;
+	}
+
+	mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+	if (IS_ERR(mdp5_kms->vbif)) {
+		ret = PTR_ERR(mdp5_kms->vbif);
+		goto fail;
+	}
+
+	mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(mdp5_kms->vdd)) {
+		ret = PTR_ERR(mdp5_kms->vdd);
+		goto fail;
+	}
+
+	ret = regulator_enable(mdp5_kms->vdd);
+	if (ret) {
+		dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+		goto fail;
+	}
+
+	ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk") ||
+			get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk") ||
+			get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src") ||
+			get_clk(pdev, &mdp5_kms->core_clk, "core_clk") ||
+			get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk") ||
+			get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
+	if (ret)
+		goto fail;
+
+	ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
+
+	/* make sure things are off before attaching iommu (bootloader could
+	 * have left things on, in which case we'll start getting faults if
+	 * we don't disable):
+	 */
+	mdp5_enable(mdp5_kms);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0);
+	mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0);
+	mdp5_disable(mdp5_kms);
+	mdelay(16);
+
+	if (config->iommu) {
+		mmu = msm_iommu_new(dev, config->iommu);
+		if (IS_ERR(mmu)) {
+			ret = PTR_ERR(mmu);
+			goto fail;
+		}
+		ret = mmu->funcs->attach(mmu, iommu_ports,
+				ARRAY_SIZE(iommu_ports));
+		if (ret)
+			goto fail;
+	} else {
+		dev_info(dev->dev, "no iommu, fallback to phys "
+				"contig buffers for scanout\n");
+		mmu = NULL;
+	}
+
+	mdp5_kms->id = msm_register_mmu(dev, mmu);
+	if (mdp5_kms->id < 0) {
+		ret = mdp5_kms->id;
+		dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
+		goto fail;
+	}
+
+	ret = modeset_init(mdp5_kms);
+	if (ret) {
+		dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+		goto fail;
+	}
+
+	return kms;
+
+fail:
+	if (kms)
+		mdp5_destroy(kms);
+	return ERR_PTR(ret);
+}
+
+static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
+{
+	static struct mdp5_platform_config config = {};
+#ifdef CONFIG_OF
+	/* TODO */
+#endif
+	return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
new file mode 100644
index 0000000..c8b1a25
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_KMS_H__
+#define __MDP5_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp/mdp_kms.h"
+#include "mdp5.xml.h"
+#include "mdp5_smp.h"
+
+struct mdp5_kms {
+	struct mdp_kms base;
+
+	struct drm_device *dev;
+
+	int rev;
+
+	/* mapper-id used to request GEM buffer mapped for scanout: */
+	int id;
+
+	/* for tracking smp allocation amongst pipes: */
+	mdp5_smp_state_t smp_state;
+	struct mdp5_client_smp_state smp_client_state[CID_MAX];
+	int smp_blk_cnt;
+
+	/* io/register spaces: */
+	void __iomem *mmio, *vbif;
+
+	struct regulator *vdd;
+
+	struct clk *axi_clk;
+	struct clk *ahb_clk;
+	struct clk *src_clk;
+	struct clk *core_clk;
+	struct clk *lut_clk;
+	struct clk *vsync_clk;
+
+	struct hdmi *hdmi;
+
+	struct mdp_irq error_handler;
+};
+#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp5_platform_config {
+	struct iommu_domain *iommu;
+	uint32_t max_clk;
+	int smp_blk_cnt;
+};
+
+static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
+{
+	msm_writel(data, mdp5_kms->mmio + reg);
+}
+
+static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
+{
+	return msm_readl(mdp5_kms->mmio + reg);
+}
+
+static inline const char *pipe2name(enum mdp5_pipe pipe)
+{
+	static const char *names[] = {
+#define NAME(n) [SSPP_ ## n] = #n
+		NAME(VIG0), NAME(VIG1), NAME(VIG2),
+		NAME(RGB0), NAME(RGB1), NAME(RGB2),
+		NAME(DMA0), NAME(DMA1),
+#undef NAME
+	};
+	return names[pipe];
+}
+
+static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
+{
+	switch (pipe) {
+	case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+	case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+	case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+	case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+	case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+	case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+	case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+	case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+	default:        return 0;
+	}
+}
+
+static inline int pipe2nclients(enum mdp5_pipe pipe)
+{
+	switch (pipe) {
+	case SSPP_RGB0:
+	case SSPP_RGB1:
+	case SSPP_RGB2:
+		return 1;
+	default:
+		return 3;
+	}
+}
+
+static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+{
+	WARN_ON(plane >= pipe2nclients(pipe));
+	switch (pipe) {
+	case SSPP_VIG0: return CID_VIG0_Y + plane;
+	case SSPP_VIG1: return CID_VIG1_Y + plane;
+	case SSPP_VIG2: return CID_VIG2_Y + plane;
+	case SSPP_RGB0: return CID_RGB0;
+	case SSPP_RGB1: return CID_RGB1;
+	case SSPP_RGB2: return CID_RGB2;
+	case SSPP_DMA0: return CID_DMA0_Y + plane;
+	case SSPP_DMA1: return CID_DMA1_Y + plane;
+	default:        return CID_UNUSED;
+	}
+}
+
+static inline uint32_t mixer2flush(int lm)
+{
+	switch (lm) {
+	case 0:  return MDP5_CTL_FLUSH_LM0;
+	case 1:  return MDP5_CTL_FLUSH_LM1;
+	case 2:  return MDP5_CTL_FLUSH_LM2;
+	default: return 0;
+	}
+}
+
+static inline uint32_t intf2err(int intf)
+{
+	switch (intf) {
+	case 0:  return MDP5_IRQ_INTF0_UNDER_RUN;
+	case 1:  return MDP5_IRQ_INTF1_UNDER_RUN;
+	case 2:  return MDP5_IRQ_INTF2_UNDER_RUN;
+	case 3:  return MDP5_IRQ_INTF3_UNDER_RUN;
+	default: return 0;
+	}
+}
+
+static inline uint32_t intf2vblank(int intf)
+{
+	switch (intf) {
+	case 0:  return MDP5_IRQ_INTF0_VSYNC;
+	case 1:  return MDP5_IRQ_INTF1_VSYNC;
+	case 2:  return MDP5_IRQ_INTF2_VSYNC;
+	case 3:  return MDP5_IRQ_INTF3_VSYNC;
+	default: return 0;
+	}
+}
+
+int mdp5_disable(struct mdp5_kms *mdp5_kms);
+int mdp5_enable(struct mdp5_kms *mdp5_kms);
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp5_irq_preinstall(struct msm_kms *kms);
+int mdp5_irq_postinstall(struct msm_kms *kms);
+void mdp5_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp5_irq(struct msm_kms *kms);
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+static inline
+uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
+		uint32_t max_formats)
+{
+	/* TODO when we have YUV, we need to filter supported formats
+	 * based on pipe id..
+	 */
+	return mdp_get_formats(pixel_formats, max_formats);
+}
+
+void mdp5_plane_install_properties(struct drm_plane *plane,
+		struct drm_mode_object *obj);
+void mdp5_plane_set_scanout(struct drm_plane *plane,
+		struct drm_framebuffer *fb);
+int mdp5_plane_mode_set(struct drm_plane *plane,
+		struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		int crtc_x, int crtc_y,
+		unsigned int crtc_w, unsigned int crtc_h,
+		uint32_t src_x, uint32_t src_y,
+		uint32_t src_w, uint32_t src_h);
+void mdp5_plane_complete_flip(struct drm_plane *plane);
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+		enum mdp5_pipe pipe, bool private_plane);
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
+
+void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
+		enum mdp5_intf intf_id);
+void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
+void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+		struct drm_plane *plane, int id);
+
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
+		enum mdp5_intf intf_id);
+
+#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
new file mode 100644
index 0000000..0ac8bb5
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+
+struct mdp5_plane {
+	struct drm_plane base;
+	const char *name;
+
+	enum mdp5_pipe pipe;
+
+	uint32_t nformats;
+	uint32_t formats[32];
+
+	bool enabled;
+};
+#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
+
+static struct mdp5_kms *get_kms(struct drm_plane *plane)
+{
+	struct msm_drm_private *priv = plane->dev->dev_private;
+	return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static int mdp5_plane_update(struct drm_plane *plane,
+		struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		int crtc_x, int crtc_y,
+		unsigned int crtc_w, unsigned int crtc_h,
+		uint32_t src_x, uint32_t src_y,
+		uint32_t src_w, uint32_t src_h)
+{
+	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+	mdp5_plane->enabled = true;
+
+	if (plane->fb)
+		drm_framebuffer_unreference(plane->fb);
+
+	drm_framebuffer_reference(fb);
+
+	return mdp5_plane_mode_set(plane, crtc, fb,
+			crtc_x, crtc_y, crtc_w, crtc_h,
+			src_x, src_y, src_w, src_h);
+}
+
+static int mdp5_plane_disable(struct drm_plane *plane)
+{
+	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	enum mdp5_pipe pipe = mdp5_plane->pipe;
+	int i;
+
+	DBG("%s: disable", mdp5_plane->name);
+
+	/* update our SMP request to zero (release all our blks): */
+	for (i = 0; i < pipe2nclients(pipe); i++)
+		mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0);
+
+	/* TODO detaching now will cause us not to get the last
+	 * vblank and mdp5_smp_commit().. so other planes will
+	 * still see smp blocks previously allocated to us as
+	 * in-use..
+	 */
+	if (plane->crtc)
+		mdp5_crtc_detach(plane->crtc, plane);
+
+	return 0;
+}
+
+static void mdp5_plane_destroy(struct drm_plane *plane)
+{
+	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+	mdp5_plane_disable(plane);
+	drm_plane_cleanup(plane);
+
+	kfree(mdp5_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void mdp5_plane_install_properties(struct drm_plane *plane,
+		struct drm_mode_object *obj)
+{
+	// XXX
+}
+
+int mdp5_plane_set_property(struct drm_plane *plane,
+		struct drm_property *property, uint64_t val)
+{
+	// XXX
+	return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp5_plane_funcs = {
+		.update_plane = mdp5_plane_update,
+		.disable_plane = mdp5_plane_disable,
+		.destroy = mdp5_plane_destroy,
+		.set_property = mdp5_plane_set_property,
+};
+
+void mdp5_plane_set_scanout(struct drm_plane *plane,
+		struct drm_framebuffer *fb)
+{
+	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	enum mdp5_pipe pipe = mdp5_plane->pipe;
+	uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
+	uint32_t iova[4];
+	int i;
+
+	for (i = 0; i < nplanes; i++) {
+		struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
+		msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
+	}
+	for (; i < 4; i++)
+		iova[i] = 0;
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
+			MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+			MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
+			MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+			MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
+
+	plane->fb = fb;
+}
+
+/* NOTE: looks like if horizontal decimation is used (if we supported that)
+ * then the width used to calculate SMP block requirements is the post-
+ * decimated width.  Ie. SMP buffering sits downstream of decimation (which
+ * presumably happens during the dma from scanout buffer).
+ */
+static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
+		uint32_t nplanes, uint32_t width)
+{
+	struct drm_device *dev = plane->dev;
+	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	enum mdp5_pipe pipe = mdp5_plane->pipe;
+	int i, hsub, nlines, nblks, ret;
+
+	hsub = drm_format_horz_chroma_subsampling(format);
+
+	/* different if BWC (compressed framebuffer?) enabled: */
+	nlines = 2;
+
+	for (i = 0, nblks = 0; i < nplanes; i++) {
+		int n, fetch_stride, cpp;
+
+		cpp = drm_format_plane_cpp(format, i);
+		fetch_stride = width * cpp / (i ? hsub : 1);
+
+		n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE);
+
+		/* for hw rev v1.00 */
+		if (mdp5_kms->rev == 0)
+			n = roundup_pow_of_two(n);
+
+		DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n);
+		ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
+		if (ret) {
+			dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
+					n, ret);
+			return ret;
+		}
+
+		nblks += n;
+	}
+
+	/* in success case, return total # of blocks allocated: */
+	return nblks;
+}
+
+static void set_fifo_thresholds(struct drm_plane *plane, int nblks)
+{
+	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	enum mdp5_pipe pipe = mdp5_plane->pipe;
+	uint32_t val;
+
+	/* 1/4 of SMP pool that is being fetched */
+	val = (nblks * SMP_ENTRIES_PER_BLK) / 4;
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+
+}
+
+int mdp5_plane_mode_set(struct drm_plane *plane,
+		struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		int crtc_x, int crtc_y,
+		unsigned int crtc_w, unsigned int crtc_h,
+		uint32_t src_x, uint32_t src_y,
+		uint32_t src_w, uint32_t src_h)
+{
+	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	enum mdp5_pipe pipe = mdp5_plane->pipe;
+	const struct mdp_format *format;
+	uint32_t nplanes, config = 0;
+	uint32_t phasex_step = 0, phasey_step = 0;
+	uint32_t hdecm = 0, vdecm = 0;
+	int i, nblks;
+
+	nplanes = drm_format_num_planes(fb->pixel_format);
+
+	/* bad formats should already be rejected: */
+	if (WARN_ON(nplanes > pipe2nclients(pipe)))
+		return -EINVAL;
+
+	/* src values are in Q16 fixed point, convert to integer: */
+	src_x = src_x >> 16;
+	src_y = src_y >> 16;
+	src_w = src_w >> 16;
+	src_h = src_h >> 16;
+
+	DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name,
+			fb->base.id, src_x, src_y, src_w, src_h,
+			crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+	/*
+	 * Calculate and request required # of smp blocks:
+	 */
+	nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w);
+	if (nblks < 0)
+		return nblks;
+
+	/*
+	 * Currently we update the hw for allocations/requests immediately,
+	 * but once atomic modeset/pageflip is in place, the allocation
+	 * would move into atomic->check_plane_state(), while updating the
+	 * hw would remain here:
+	 */
+	for (i = 0; i < pipe2nclients(pipe); i++)
+		mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
+
+	if (src_w != crtc_w) {
+		config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
+		/* TODO calc phasex_step, hdecm */
+	}
+
+	if (src_h != crtc_h) {
+		config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN;
+		/* TODO calc phasey_step, vdecm */
+	}
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
+			MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
+			MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
+			MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
+			MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
+			MDP5_PIPE_SRC_XY_X(src_x) |
+			MDP5_PIPE_SRC_XY_Y(src_y));
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
+			MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
+			MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
+			MDP5_PIPE_OUT_XY_X(crtc_x) |
+			MDP5_PIPE_OUT_XY_Y(crtc_y));
+
+	mdp5_plane_set_scanout(plane, fb);
+
+	format = to_mdp_format(msm_framebuffer_format(fb));
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
+			MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+			MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+			MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+			MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+			COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+			MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+			MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+			COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
+			MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) |
+			MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB));
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
+			MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+			MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+			MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+			MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+			MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
+
+	/* not using secure mode: */
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
+
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step);
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step);
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+			MDP5_PIPE_DECIMATION_VERT(vdecm) |
+			MDP5_PIPE_DECIMATION_HORZ(hdecm));
+	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
+			MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) |
+			MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) |
+			MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) |
+			MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) |
+			MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
+			MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
+
+	set_fifo_thresholds(plane, nblks);
+
+	/* TODO detach from old crtc (if we had more than one) */
+	mdp5_crtc_attach(crtc, plane);
+
+	return 0;
+}
+
+void mdp5_plane_complete_flip(struct drm_plane *plane)
+{
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
+	int i;
+
+	for (i = 0; i < pipe2nclients(pipe); i++)
+		mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
+}
+
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
+{
+	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+	return mdp5_plane->pipe;
+}
+
+/* initialize plane */
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+		enum mdp5_pipe pipe, bool private_plane)
+{
+	struct drm_plane *plane = NULL;
+	struct mdp5_plane *mdp5_plane;
+	int ret;
+
+	mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
+	if (!mdp5_plane) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	plane = &mdp5_plane->base;
+
+	mdp5_plane->pipe = pipe;
+	mdp5_plane->name = pipe2name(pipe);
+
+	mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
+			ARRAY_SIZE(mdp5_plane->formats));
+
+	drm_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+			mdp5_plane->formats, mdp5_plane->nformats,
+			private_plane);
+
+	mdp5_plane_install_properties(plane, &plane->base);
+
+	return plane;
+
+fail:
+	if (plane)
+		mdp5_plane_destroy(plane);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
new file mode 100644
index 0000000..2d0236b
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "mdp5_kms.h"
+#include "mdp5_smp.h"
+
+
+/* SMP - Shared Memory Pool
+ *
+ * These are shared between all the clients, where each plane in a
+ * scanout buffer is a SMP client.  Ie. scanout of 3 plane I420 on
+ * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
+ *
+ * Based on the size of the attached scanout buffer, a certain # of
+ * blocks must be allocated to that client out of the shared pool.
+ *
+ * For each block, it can be either free, or pending/in-use by a
+ * client.  The updates happen in three steps:
+ *
+ *  1) mdp5_smp_request():
+ *     When plane scanout is setup, calculate required number of
+ *     blocks needed per client, and request.  Blocks not inuse or
+ *     pending by any other client are added to client's pending
+ *     set.
+ *
+ *  2) mdp5_smp_configure():
+ *     As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
+ *     are configured for the union(pending, inuse)
+ *
+ *  3) mdp5_smp_commit():
+ *     After next vblank, copy pending -> inuse.  Optionally update
+ *     MDP5_SMP_ALLOC registers if there are newly unused blocks
+ *
+ * On the next vblank after changes have been committed to hw, the
+ * client's pending blocks become it's in-use blocks (and no-longer
+ * in-use blocks become available to other clients).
+ *
+ * btw, hurray for confusing overloaded acronyms!  :-/
+ *
+ * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
+ * should happen at (or before)? atomic->check().  And we'd need
+ * an API to discard previous requests if update is aborted or
+ * (test-only).
+ *
+ * TODO would perhaps be nice to have debugfs to dump out kernel
+ * inuse and pending state of all clients..
+ */
+
+static DEFINE_SPINLOCK(smp_lock);
+
+
+/* step #1: update # of blocks pending for the client: */
+int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
+		enum mdp5_client_id cid, int nblks)
+{
+	struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+	int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
+	unsigned long flags;
+
+	spin_lock_irqsave(&smp_lock, flags);
+
+	avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
+	if (nblks > avail) {
+		ret = -ENOSPC;
+		goto fail;
+	}
+
+	cur_nblks = bitmap_weight(ps->pending, cnt);
+	if (nblks > cur_nblks) {
+		/* grow the existing pending reservation: */
+		for (i = cur_nblks; i < nblks; i++) {
+			int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
+			set_bit(blk, ps->pending);
+			set_bit(blk, mdp5_kms->smp_state);
+		}
+	} else {
+		/* shrink the existing pending reservation: */
+		for (i = cur_nblks; i > nblks; i--) {
+			int blk = find_first_bit(ps->pending, cnt);
+			clear_bit(blk, ps->pending);
+			/* don't clear in global smp_state until _commit() */
+		}
+	}
+
+fail:
+	spin_unlock_irqrestore(&smp_lock, flags);
+	return 0;
+}
+
+static void update_smp_state(struct mdp5_kms *mdp5_kms,
+		enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
+{
+	int cnt = mdp5_kms->smp_blk_cnt;
+	uint32_t blk, val;
+
+	for_each_set_bit(blk, *assigned, cnt) {
+		int idx = blk / 3;
+		int fld = blk % 3;
+
+		val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
+
+		switch (fld) {
+		case 0:
+			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+			val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
+			break;
+		case 1:
+			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+			val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
+			break;
+		case 2:
+			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+			val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
+			break;
+		}
+
+		mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
+		mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+	}
+}
+
+/* step #2: configure hw for union(pending, inuse): */
+void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+{
+	struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+	int cnt = mdp5_kms->smp_blk_cnt;
+	mdp5_smp_state_t assigned;
+
+	bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+	update_smp_state(mdp5_kms, cid, &assigned);
+}
+
+/* step #3: after vblank, copy pending -> inuse: */
+void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+{
+	struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+	int cnt = mdp5_kms->smp_blk_cnt;
+	mdp5_smp_state_t released;
+
+	/*
+	 * Figure out if there are any blocks we where previously
+	 * using, which can be released and made available to other
+	 * clients:
+	 */
+	if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&smp_lock, flags);
+		/* clear released blocks: */
+		bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
+				released, cnt);
+		spin_unlock_irqrestore(&smp_lock, flags);
+
+		update_smp_state(mdp5_kms, CID_UNUSED, &released);
+	}
+
+	bitmap_copy(ps->inuse, ps->pending, cnt);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
new file mode 100644
index 0000000..0ab739e
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_SMP_H__
+#define __MDP5_SMP_H__
+
+#include "msm_drv.h"
+
+#define MAX_SMP_BLOCKS  22
+#define SMP_BLK_SIZE    4096
+#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
+
+typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
+
+struct mdp5_client_smp_state {
+	mdp5_smp_state_t inuse;
+	mdp5_smp_state_t pending;
+};
+
+struct mdp5_kms;
+
+int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks);
+void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+
+
+#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
new file mode 100644
index 0000000..a9629b8
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -0,0 +1,78 @@
+#ifndef MDP_COMMON_XML
+#define MDP_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp_mixer_stage_id {
+	STAGE_UNUSED = 0,
+	STAGE_BASE = 1,
+	STAGE0 = 2,
+	STAGE1 = 3,
+	STAGE2 = 4,
+	STAGE3 = 5,
+};
+
+enum mdp_alpha_type {
+	FG_CONST = 0,
+	BG_CONST = 1,
+	FG_PIXEL = 2,
+	BG_PIXEL = 3,
+};
+
+enum mdp_bpc {
+	BPC1 = 0,
+	BPC5 = 1,
+	BPC6 = 2,
+	BPC8 = 3,
+};
+
+enum mdp_bpc_alpha {
+	BPC1A = 0,
+	BPC4A = 1,
+	BPC6A = 2,
+	BPC8A = 3,
+};
+
+
+#endif /* MDP_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
similarity index 85%
rename from drivers/gpu/drm/msm/mdp4/mdp4_format.c
rename to drivers/gpu/drm/msm/mdp/mdp_format.c
index 17330b0..e0a6ffb 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -17,7 +17,7 @@
 
 
 #include "msm_drv.h"
-#include "mdp4_kms.h"
+#include "mdp_kms.h"
 
 #define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
 		.base = { .pixel_format = DRM_FORMAT_ ## name }, \
@@ -34,7 +34,7 @@
 
 #define BPC0A 0
 
-static const struct mdp4_format formats[] = {
+static const struct mdp_format formats[] = {
 	/*  name      a  r  g  b   e0 e1 e2 e3  alpha   tight  cpp cnt */
 	FMT(ARGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  true,   true,  4,  4),
 	FMT(XRGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  false,  true,  4,  4),
@@ -44,12 +44,11 @@
 	FMT(BGR565,   0, 5, 6, 5,  2, 0, 1, 0,  false,  true,  2,  3),
 };
 
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
-		uint32_t max_formats)
+uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats)
 {
 	uint32_t i;
 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
-		const struct mdp4_format *f = &formats[i];
+		const struct mdp_format *f = &formats[i];
 
 		if (i == max_formats)
 			break;
@@ -60,11 +59,11 @@
 	return i;
 }
 
-const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
 {
 	int i;
 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
-		const struct mdp4_format *f = &formats[i];
+		const struct mdp_format *f = &formats[i];
 		if (f->base.pixel_format == format)
 			return &f->base;
 	}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
new file mode 100644
index 0000000..3be48f7
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp_kms.h"
+
+
+struct mdp_irq_wait {
+	struct mdp_irq irq;
+	int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void update_irq(struct mdp_kms *mdp_kms)
+{
+	struct mdp_irq *irq;
+	uint32_t irqmask = mdp_kms->vblank_mask;
+
+	BUG_ON(!spin_is_locked(&list_lock));
+
+	list_for_each_entry(irq, &mdp_kms->irq_list, node)
+		irqmask |= irq->irqmask;
+
+	mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
+}
+
+static void update_irq_unlocked(struct mdp_kms *mdp_kms)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&list_lock, flags);
+	update_irq(mdp_kms);
+	spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
+{
+	struct mdp_irq *handler, *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&list_lock, flags);
+	mdp_kms->in_irq = true;
+	list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
+		if (handler->irqmask & status) {
+			spin_unlock_irqrestore(&list_lock, flags);
+			handler->irq(handler, handler->irqmask & status);
+			spin_lock_irqsave(&list_lock, flags);
+		}
+	}
+	mdp_kms->in_irq = false;
+	update_irq(mdp_kms);
+	spin_unlock_irqrestore(&list_lock, flags);
+
+}
+
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&list_lock, flags);
+	if (enable)
+		mdp_kms->vblank_mask |= mask;
+	else
+		mdp_kms->vblank_mask &= ~mask;
+	update_irq(mdp_kms);
+	spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+	struct mdp_irq_wait *wait =
+			container_of(irq, struct mdp_irq_wait, irq);
+	wait->count--;
+	wake_up_all(&wait_event);
+}
+
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+	struct mdp_irq_wait wait = {
+		.irq = {
+			.irq = wait_irq,
+			.irqmask = irqmask,
+		},
+		.count = 1,
+	};
+	mdp_irq_register(mdp_kms, &wait.irq);
+	wait_event(wait_event, (wait.count <= 0));
+	mdp_irq_unregister(mdp_kms, &wait.irq);
+}
+
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+	unsigned long flags;
+	bool needs_update = false;
+
+	spin_lock_irqsave(&list_lock, flags);
+
+	if (!irq->registered) {
+		irq->registered = true;
+		list_add(&irq->node, &mdp_kms->irq_list);
+		needs_update = !mdp_kms->in_irq;
+	}
+
+	spin_unlock_irqrestore(&list_lock, flags);
+
+	if (needs_update)
+		update_irq_unlocked(mdp_kms);
+}
+
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+	unsigned long flags;
+	bool needs_update = false;
+
+	spin_lock_irqsave(&list_lock, flags);
+
+	if (irq->registered) {
+		irq->registered = false;
+		list_del(&irq->node);
+		needs_update = !mdp_kms->in_irq;
+	}
+
+	spin_unlock_irqrestore(&list_lock, flags);
+
+	if (needs_update)
+		update_irq_unlocked(mdp_kms);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
new file mode 100644
index 0000000..99557b5
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP_KMS_H__
+#define __MDP_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp_common.xml.h"
+
+struct mdp_kms;
+
+struct mdp_kms_funcs {
+	struct msm_kms_funcs base;
+	void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
+};
+
+struct mdp_kms {
+	struct msm_kms base;
+
+	const struct mdp_kms_funcs *funcs;
+
+	/* irq handling: */
+	bool in_irq;
+	struct list_head irq_list;    /* list of mdp4_irq */
+	uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+};
+#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
+
+static inline void mdp_kms_init(struct mdp_kms *mdp_kms,
+		const struct mdp_kms_funcs *funcs)
+{
+	mdp_kms->funcs = funcs;
+	INIT_LIST_HEAD(&mdp_kms->irq_list);
+	msm_kms_init(&mdp_kms->base, &funcs->base);
+}
+
+/*
+ * irq helpers:
+ */
+
+/* For transiently registering for different MDP irqs that various parts
+ * of the KMS code need during setup/configuration.  These are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct mdp_irq {
+	struct list_head node;
+	uint32_t irqmask;
+	bool registered;
+	void (*irq)(struct mdp_irq *irq, uint32_t irqstatus);
+};
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status);
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable);
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+
+
+/*
+ * pixel format helpers:
+ */
+
+struct mdp_format {
+	struct msm_format base;
+	enum mdp_bpc bpc_r, bpc_g, bpc_b;
+	enum mdp_bpc_alpha bpc_a;
+	uint8_t unpack[4];
+	bool alpha_enable, unpack_tight;
+	uint8_t cpp, unpack_count;
+};
+#define to_mdp_format(x) container_of(x, struct mdp_format, base)
+
+uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats);
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+
+#endif /* __MDP_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
deleted file mode 100644
index 5c6b7fc..0000000
--- a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-
-#include "msm_drv.h"
-#include "mdp4_kms.h"
-
-
-struct mdp4_irq_wait {
-	struct mdp4_irq irq;
-	int count;
-};
-
-static DECLARE_WAIT_QUEUE_HEAD(wait_event);
-
-static DEFINE_SPINLOCK(list_lock);
-
-static void update_irq(struct mdp4_kms *mdp4_kms)
-{
-	struct mdp4_irq *irq;
-	uint32_t irqmask = mdp4_kms->vblank_mask;
-
-	BUG_ON(!spin_is_locked(&list_lock));
-
-	list_for_each_entry(irq, &mdp4_kms->irq_list, node)
-		irqmask |= irq->irqmask;
-
-	mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
-}
-
-static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
-{
-	unsigned long flags;
-	spin_lock_irqsave(&list_lock, flags);
-	update_irq(mdp4_kms);
-	spin_unlock_irqrestore(&list_lock, flags);
-}
-
-static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
-{
-	DRM_ERROR("errors: %08x\n", irqstatus);
-}
-
-void mdp4_irq_preinstall(struct msm_kms *kms)
-{
-	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
-}
-
-int mdp4_irq_postinstall(struct msm_kms *kms)
-{
-	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-	struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
-
-	INIT_LIST_HEAD(&mdp4_kms->irq_list);
-
-	error_handler->irq = mdp4_irq_error_handler;
-	error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
-			MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
-
-	mdp4_irq_register(mdp4_kms, error_handler);
-
-	return 0;
-}
-
-void mdp4_irq_uninstall(struct msm_kms *kms)
-{
-	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-	mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
-}
-
-irqreturn_t mdp4_irq(struct msm_kms *kms)
-{
-	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-	struct drm_device *dev = mdp4_kms->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-	struct mdp4_irq *handler, *n;
-	unsigned long flags;
-	unsigned int id;
-	uint32_t status;
-
-	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
-	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
-
-	VERB("status=%08x", status);
-
-	for (id = 0; id < priv->num_crtcs; id++)
-		if (status & mdp4_crtc_vblank(priv->crtcs[id]))
-			drm_handle_vblank(dev, id);
-
-	spin_lock_irqsave(&list_lock, flags);
-	mdp4_kms->in_irq = true;
-	list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
-		if (handler->irqmask & status) {
-			spin_unlock_irqrestore(&list_lock, flags);
-			handler->irq(handler, handler->irqmask & status);
-			spin_lock_irqsave(&list_lock, flags);
-		}
-	}
-	mdp4_kms->in_irq = false;
-	update_irq(mdp4_kms);
-	spin_unlock_irqrestore(&list_lock, flags);
-
-	return IRQ_HANDLED;
-}
-
-int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
-{
-	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-	unsigned long flags;
-
-	spin_lock_irqsave(&list_lock, flags);
-	mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
-	update_irq(mdp4_kms);
-	spin_unlock_irqrestore(&list_lock, flags);
-
-	return 0;
-}
-
-void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
-{
-	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-	unsigned long flags;
-
-	spin_lock_irqsave(&list_lock, flags);
-	mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
-	update_irq(mdp4_kms);
-	spin_unlock_irqrestore(&list_lock, flags);
-}
-
-static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
-{
-	struct mdp4_irq_wait *wait =
-			container_of(irq, struct mdp4_irq_wait, irq);
-	wait->count--;
-	wake_up_all(&wait_event);
-}
-
-void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
-{
-	struct mdp4_irq_wait wait = {
-		.irq = {
-			.irq = wait_irq,
-			.irqmask = irqmask,
-		},
-		.count = 1,
-	};
-	mdp4_irq_register(mdp4_kms, &wait.irq);
-	wait_event(wait_event, (wait.count <= 0));
-	mdp4_irq_unregister(mdp4_kms, &wait.irq);
-}
-
-void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
-{
-	unsigned long flags;
-	bool needs_update = false;
-
-	spin_lock_irqsave(&list_lock, flags);
-
-	if (!irq->registered) {
-		irq->registered = true;
-		list_add(&irq->node, &mdp4_kms->irq_list);
-		needs_update = !mdp4_kms->in_irq;
-	}
-
-	spin_unlock_irqrestore(&list_lock, flags);
-
-	if (needs_update)
-		update_irq_unlocked(mdp4_kms);
-}
-
-void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
-{
-	unsigned long flags;
-	bool needs_update = false;
-
-	spin_lock_irqsave(&list_lock, flags);
-
-	if (irq->registered) {
-		irq->registered = false;
-		list_del(&irq->node);
-		needs_update = !mdp4_kms->in_irq;
-	}
-
-	spin_unlock_irqrestore(&list_lock, flags);
-
-	if (needs_update)
-		update_irq_unlocked(mdp4_kms);
-}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 8653769..e6adafc 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -17,6 +17,7 @@
 
 #include "msm_drv.h"
 #include "msm_gpu.h"
+#include "msm_kms.h"
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
@@ -30,50 +31,19 @@
 	.output_poll_changed = msm_fb_output_poll_changed,
 };
 
-static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
-		unsigned long iova, int flags, void *arg)
-{
-	DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
-	return 0;
-}
-
-int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
 {
 	struct msm_drm_private *priv = dev->dev_private;
-	int idx = priv->num_iommus++;
+	int idx = priv->num_mmus++;
 
-	if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
+	if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
 		return -EINVAL;
 
-	priv->iommus[idx] = iommu;
-
-	iommu_set_fault_handler(iommu, msm_fault_handler, dev);
-
-	/* need to iommu_attach_device() somewhere??  on resume?? */
+	priv->mmus[idx] = mmu;
 
 	return idx;
 }
 
-int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
-		const char **names, int cnt)
-{
-	int i, ret;
-
-	for (i = 0; i < cnt; i++) {
-		/* TODO maybe some day msm iommu won't require this hack: */
-		struct device *msm_iommu_get_ctx(const char *ctx_name);
-		struct device *ctx = msm_iommu_get_ctx(names[i]);
-		if (!ctx)
-			continue;
-		ret = iommu_attach_device(iommu, ctx);
-		if (ret) {
-			dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
-			return ret;
-		}
-	}
-	return 0;
-}
-
 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
 static bool reglog = false;
 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -82,6 +52,10 @@
 #define reglog 0
 #endif
 
+static char *vram;
+MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
+module_param(vram, charp, 0);
+
 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 		const char *dbgname)
 {
@@ -161,6 +135,14 @@
 		mutex_unlock(&dev->struct_mutex);
 	}
 
+	if (priv->vram.paddr) {
+		DEFINE_DMA_ATTRS(attrs);
+		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+		drm_mm_takedown(&priv->vram.mm);
+		dma_free_attrs(dev->dev, priv->vram.size, NULL,
+				priv->vram.paddr, &attrs);
+	}
+
 	dev->dev_private = NULL;
 
 	kfree(priv);
@@ -168,6 +150,24 @@
 	return 0;
 }
 
+static int get_mdp_ver(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+	const static struct of_device_id match_types[] = { {
+		.compatible = "qcom,mdss_mdp",
+		.data	= (void	*)5,
+	}, {
+		/* end node */
+	} };
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
+	match = of_match_node(match_types, dev->of_node);
+	if (match)
+		return (int)match->data;
+#endif
+	return 4;
+}
+
 static int msm_load(struct drm_device *dev, unsigned long flags)
 {
 	struct platform_device *pdev = dev->platformdev;
@@ -191,7 +191,53 @@
 
 	drm_mode_config_init(dev);
 
-	kms = mdp4_kms_init(dev);
+	/* if we have no IOMMU, then we need to use carveout allocator.
+	 * Grab the entire CMA chunk carved out in early startup in
+	 * mach-msm:
+	 */
+	if (!iommu_present(&platform_bus_type)) {
+		DEFINE_DMA_ATTRS(attrs);
+		unsigned long size;
+		void *p;
+
+		DBG("using %s VRAM carveout", vram);
+		size = memparse(vram, NULL);
+		priv->vram.size = size;
+
+		drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
+
+		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+
+		/* note that for no-kernel-mapping, the vaddr returned
+		 * is bogus, but non-null if allocation succeeded:
+		 */
+		p = dma_alloc_attrs(dev->dev, size,
+				&priv->vram.paddr, 0, &attrs);
+		if (!p) {
+			dev_err(dev->dev, "failed to allocate VRAM\n");
+			priv->vram.paddr = 0;
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		dev_info(dev->dev, "VRAM: %08x->%08x\n",
+				(uint32_t)priv->vram.paddr,
+				(uint32_t)(priv->vram.paddr + size));
+	}
+
+	switch (get_mdp_ver(pdev)) {
+	case 4:
+		kms = mdp4_kms_init(dev);
+		break;
+	case 5:
+		kms = mdp5_kms_init(dev);
+		break;
+	default:
+		kms = ERR_PTR(-ENODEV);
+		break;
+	}
+
 	if (IS_ERR(kms)) {
 		/*
 		 * NOTE: once we have GPU support, having no kms should not
@@ -326,7 +372,7 @@
 	}
 }
 
-static irqreturn_t msm_irq(DRM_IRQ_ARGS)
+static irqreturn_t msm_irq(int irq, void *arg)
 {
 	struct drm_device *dev = arg;
 	struct msm_drm_private *priv = dev->dev_private;
@@ -415,7 +461,7 @@
 
 static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
 {
-	return drm_mm_dump_table(m, dev->mm_private);
+	return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
 }
 
 static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
@@ -778,12 +824,13 @@
 
 static int msm_pdev_probe(struct platform_device *pdev)
 {
+	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 	return drm_platform_init(&msm_driver, pdev);
 }
 
 static int msm_pdev_remove(struct platform_device *pdev)
 {
-	drm_platform_exit(&msm_driver, pdev);
+	drm_put_dev(platform_get_drvdata(pdev));
 
 	return 0;
 }
@@ -793,12 +840,19 @@
 	{ }
 };
 
+static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,mdss_mdp" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
 static struct platform_driver msm_platform_driver = {
 	.probe      = msm_pdev_probe,
 	.remove     = msm_pdev_remove,
 	.driver     = {
 		.owner  = THIS_MODULE,
 		.name   = "msm",
+		.of_match_table = dt_match,
 		.pm     = &msm_pm_ops,
 	},
 	.id_table   = msm_id,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d39f086..3d63269 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -31,6 +31,15 @@
 #include <linux/types.h>
 #include <asm/sizes.h>
 
+
+#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_MSM)
+/* stubs we need for compile-test: */
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+	return NULL;
+}
+#endif
+
 #ifndef CONFIG_OF
 #include <mach/board.h>
 #include <mach/socinfo.h>
@@ -44,6 +53,7 @@
 
 struct msm_kms;
 struct msm_gpu;
+struct msm_mmu;
 
 #define NUM_DOMAINS 2    /* one for KMS, then one per gpu core (?) */
 
@@ -76,9 +86,9 @@
 	/* callbacks deferred until bo is inactive: */
 	struct list_head fence_cbs;
 
-	/* registered IOMMU domains: */
-	unsigned int num_iommus;
-	struct iommu_domain *iommus[NUM_DOMAINS];
+	/* registered MMUs: */
+	unsigned int num_mmus;
+	struct msm_mmu *mmus[NUM_DOMAINS];
 
 	unsigned int num_planes;
 	struct drm_plane *planes[8];
@@ -94,6 +104,16 @@
 
 	unsigned int num_connectors;
 	struct drm_connector *connectors[8];
+
+	/* VRAM carveout, used when no IOMMU: */
+	struct {
+		unsigned long size;
+		dma_addr_t paddr;
+		/* NOTE: mm managed at the page level, size is in # of pages
+		 * and position mm_node->start is in # of pages:
+		 */
+		struct drm_mm mm;
+	} vram;
 };
 
 struct msm_format {
@@ -114,39 +134,7 @@
 		(_cb)->func = _func;                         \
 	} while (0)
 
-/* As there are different display controller blocks depending on the
- * snapdragon version, the kms support is split out and the appropriate
- * implementation is loaded at runtime.  The kms module is responsible
- * for constructing the appropriate planes/crtcs/encoders/connectors.
- */
-struct msm_kms_funcs {
-	/* hw initialization: */
-	int (*hw_init)(struct msm_kms *kms);
-	/* irq handling: */
-	void (*irq_preinstall)(struct msm_kms *kms);
-	int (*irq_postinstall)(struct msm_kms *kms);
-	void (*irq_uninstall)(struct msm_kms *kms);
-	irqreturn_t (*irq)(struct msm_kms *kms);
-	int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
-	void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
-	/* misc: */
-	const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
-	long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
-			struct drm_encoder *encoder);
-	/* cleanup: */
-	void (*preclose)(struct msm_kms *kms, struct drm_file *file);
-	void (*destroy)(struct msm_kms *kms);
-};
-
-struct msm_kms {
-	const struct msm_kms_funcs *funcs;
-};
-
-struct msm_kms *mdp4_kms_init(struct drm_device *dev);
-
-int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
-int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
-		const char **names, int cnt);
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 
 int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
 		struct timespec *timeout);
@@ -202,7 +190,9 @@
 
 struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
 
-int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
+struct hdmi;
+struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
+irqreturn_t hdmi_irq(int irq, void *dev_id);
 void __init hdmi_register(void);
 void __exit hdmi_unregister(void);
 
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 0286c0e..81bafdf 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -16,6 +16,7 @@
  */
 
 #include "msm_drv.h"
+#include "msm_kms.h"
 
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index e587d25..3da8264 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -22,7 +22,45 @@
 #include "msm_drv.h"
 #include "msm_gem.h"
 #include "msm_gpu.h"
+#include "msm_mmu.h"
 
+static dma_addr_t physaddr(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_drm_private *priv = obj->dev->dev_private;
+	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
+			priv->vram.paddr;
+}
+
+/* allocate pages from VRAM carveout, used when no IOMMU: */
+static struct page **get_pages_vram(struct drm_gem_object *obj,
+		int npages)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_drm_private *priv = obj->dev->dev_private;
+	dma_addr_t paddr;
+	struct page **p;
+	int ret, i;
+
+	p = drm_malloc_ab(npages, sizeof(struct page *));
+	if (!p)
+		return ERR_PTR(-ENOMEM);
+
+	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
+			npages, 0, DRM_MM_SEARCH_DEFAULT);
+	if (ret) {
+		drm_free_large(p);
+		return ERR_PTR(ret);
+	}
+
+	paddr = physaddr(obj);
+	for (i = 0; i < npages; i++) {
+		p[i] = phys_to_page(paddr);
+		paddr += PAGE_SIZE;
+	}
+
+	return p;
+}
 
 /* called with dev->struct_mutex held */
 static struct page **get_pages(struct drm_gem_object *obj)
@@ -31,9 +69,14 @@
 
 	if (!msm_obj->pages) {
 		struct drm_device *dev = obj->dev;
-		struct page **p = drm_gem_get_pages(obj, 0);
+		struct page **p;
 		int npages = obj->size >> PAGE_SHIFT;
 
+		if (iommu_present(&platform_bus_type))
+			p = drm_gem_get_pages(obj, 0);
+		else
+			p = get_pages_vram(obj, npages);
+
 		if (IS_ERR(p)) {
 			dev_err(dev->dev, "could not get pages: %ld\n",
 					PTR_ERR(p));
@@ -73,7 +116,11 @@
 		sg_free_table(msm_obj->sgt);
 		kfree(msm_obj->sgt);
 
-		drm_gem_put_pages(obj, msm_obj->pages, true, false);
+		if (iommu_present(&platform_bus_type))
+			drm_gem_put_pages(obj, msm_obj->pages, true, false);
+		else
+			drm_mm_remove_node(msm_obj->vram_node);
+
 		msm_obj->pages = NULL;
 	}
 }
@@ -138,7 +185,6 @@
 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct drm_device *dev = obj->dev;
 	struct page **pages;
 	unsigned long pfn;
@@ -163,7 +209,7 @@
 	pgoff = ((unsigned long)vmf->virtual_address -
 			vma->vm_start) >> PAGE_SHIFT;
 
-	pfn = page_to_pfn(msm_obj->pages[pgoff]);
+	pfn = page_to_pfn(pages[pgoff]);
 
 	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
 			pfn, pfn << PAGE_SHIFT);
@@ -219,67 +265,6 @@
 	return offset;
 }
 
-/* helpers for dealing w/ iommu: */
-static int map_range(struct iommu_domain *domain, unsigned int iova,
-		struct sg_table *sgt, unsigned int len, int prot)
-{
-	struct scatterlist *sg;
-	unsigned int da = iova;
-	unsigned int i, j;
-	int ret;
-
-	if (!domain || !sgt)
-		return -EINVAL;
-
-	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
-		u32 pa = sg_phys(sg) - sg->offset;
-		size_t bytes = sg->length + sg->offset;
-
-		VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
-
-		ret = iommu_map(domain, da, pa, bytes, prot);
-		if (ret)
-			goto fail;
-
-		da += bytes;
-	}
-
-	return 0;
-
-fail:
-	da = iova;
-
-	for_each_sg(sgt->sgl, sg, i, j) {
-		size_t bytes = sg->length + sg->offset;
-		iommu_unmap(domain, da, bytes);
-		da += bytes;
-	}
-	return ret;
-}
-
-static void unmap_range(struct iommu_domain *domain, unsigned int iova,
-		struct sg_table *sgt, unsigned int len)
-{
-	struct scatterlist *sg;
-	unsigned int da = iova;
-	int i;
-
-	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
-		size_t bytes = sg->length + sg->offset;
-		size_t unmapped;
-
-		unmapped = iommu_unmap(domain, da, bytes);
-		if (unmapped < bytes)
-			break;
-
-		VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
-
-		BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
-
-		da += bytes;
-	}
-}
-
 /* should be called under struct_mutex.. although it can be called
  * from atomic context without struct_mutex to acquire an extra
  * iova ref if you know one is already held.
@@ -295,15 +280,20 @@
 
 	if (!msm_obj->domain[id].iova) {
 		struct msm_drm_private *priv = obj->dev->dev_private;
-		uint32_t offset = (uint32_t)mmap_offset(obj);
-		struct page **pages;
-		pages = get_pages(obj);
+		struct msm_mmu *mmu = priv->mmus[id];
+		struct page **pages = get_pages(obj);
+
 		if (IS_ERR(pages))
 			return PTR_ERR(pages);
-		// XXX ideally we would not map buffers writable when not needed...
-		ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
-				obj->size, IOMMU_READ | IOMMU_WRITE);
-		msm_obj->domain[id].iova = offset;
+
+		if (iommu_present(&platform_bus_type)) {
+			uint32_t offset = (uint32_t)mmap_offset(obj);
+			ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
+					obj->size, IOMMU_READ | IOMMU_WRITE);
+			msm_obj->domain[id].iova = offset;
+		} else {
+			msm_obj->domain[id].iova = physaddr(obj);
+		}
 	}
 
 	if (!ret)
@@ -514,6 +504,7 @@
 void msm_gem_free_object(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
+	struct msm_drm_private *priv = obj->dev->dev_private;
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	int id;
 
@@ -525,11 +516,10 @@
 	list_del(&msm_obj->mm_list);
 
 	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
-		if (msm_obj->domain[id].iova) {
-			struct msm_drm_private *priv = obj->dev->dev_private;
+		struct msm_mmu *mmu = priv->mmus[id];
+		if (mmu && msm_obj->domain[id].iova) {
 			uint32_t offset = (uint32_t)mmap_offset(obj);
-			unmap_range(priv->iommus[id], offset,
-					msm_obj->sgt, obj->size);
+			mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
 		}
 	}
 
@@ -591,6 +581,7 @@
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_gem_object *msm_obj;
+	unsigned sz;
 
 	switch (flags & MSM_BO_CACHE_MASK) {
 	case MSM_BO_UNCACHED:
@@ -603,10 +594,17 @@
 		return -EINVAL;
 	}
 
-	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
+	sz = sizeof(*msm_obj);
+	if (!iommu_present(&platform_bus_type))
+		sz += sizeof(struct drm_mm_node);
+
+	msm_obj = kzalloc(sz, GFP_KERNEL);
 	if (!msm_obj)
 		return -ENOMEM;
 
+	if (!iommu_present(&platform_bus_type))
+		msm_obj->vram_node = (void *)&msm_obj[1];
+
 	msm_obj->flags = flags;
 
 	msm_obj->resv = &msm_obj->_resv;
@@ -623,7 +621,7 @@
 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 		uint32_t size, uint32_t flags)
 {
-	struct drm_gem_object *obj;
+	struct drm_gem_object *obj = NULL;
 	int ret;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -634,15 +632,19 @@
 	if (ret)
 		goto fail;
 
-	ret = drm_gem_object_init(dev, obj, size);
-	if (ret)
-		goto fail;
+	if (iommu_present(&platform_bus_type)) {
+		ret = drm_gem_object_init(dev, obj, size);
+		if (ret)
+			goto fail;
+	} else {
+		drm_gem_private_object_init(dev, obj, size);
+	}
 
 	return obj;
 
 fail:
 	if (obj)
-		drm_gem_object_unreference_unlocked(obj);
+		drm_gem_object_unreference(obj);
 
 	return ERR_PTR(ret);
 }
@@ -654,6 +656,12 @@
 	struct drm_gem_object *obj;
 	int ret, npages;
 
+	/* if we don't have IOMMU, don't bother pretending we can import: */
+	if (!iommu_present(&platform_bus_type)) {
+		dev_err(dev->dev, "cannot import without IOMMU\n");
+		return ERR_PTR(-EINVAL);
+	}
+
 	size = PAGE_ALIGN(size);
 
 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index f4f23a5..3246bb4 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -57,6 +57,11 @@
 	/* normally (resv == &_resv) except for imported bo's */
 	struct reservation_object *resv;
 	struct reservation_object _resv;
+
+	/* For physically contiguous buffers.  Used when we don't have
+	 * an IOMMU.
+	 */
+	struct drm_mm_node *vram_node;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5281d4b..5423e91 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -163,7 +163,7 @@
 
 
 		/* if locking succeeded, pin bo: */
-		ret = msm_gem_get_iova(&msm_obj->base,
+		ret = msm_gem_get_iova_locked(&msm_obj->base,
 				submit->gpu->id, &iova);
 
 		/* this would break the logic in the fail path.. there is no
@@ -247,7 +247,7 @@
 	/* For now, just map the entire thing.  Eventually we probably
 	 * to do it page-by-page, w/ kmap() if not vmap()d..
 	 */
-	ptr = msm_gem_vaddr(&obj->base);
+	ptr = msm_gem_vaddr_locked(&obj->base);
 
 	if (IS_ERR(ptr)) {
 		ret = PTR_ERR(ptr);
@@ -307,14 +307,12 @@
 {
 	unsigned i;
 
-	mutex_lock(&submit->dev->struct_mutex);
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
 		submit_unlock_unpin_bo(submit, i);
 		list_del_init(&msm_obj->submit_entry);
 		drm_gem_object_unreference(&msm_obj->base);
 	}
-	mutex_unlock(&submit->dev->struct_mutex);
 
 	ww_acquire_fini(&submit->ticket);
 	kfree(submit);
@@ -342,6 +340,8 @@
 	if (args->nr_cmds > MAX_CMDS)
 		return -EINVAL;
 
+	mutex_lock(&dev->struct_mutex);
+
 	submit = submit_create(dev, gpu, args->nr_bos);
 	if (!submit) {
 		ret = -ENOMEM;
@@ -410,5 +410,6 @@
 out:
 	if (submit)
 		submit_cleanup(submit, !!ret);
+	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4583d61..0cfe3f4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -17,6 +17,7 @@
 
 #include "msm_gpu.h"
 #include "msm_gem.h"
+#include "msm_mmu.h"
 
 
 /*
@@ -25,20 +26,10 @@
 
 #ifdef CONFIG_MSM_BUS_SCALING
 #include <mach/board.h>
-#include <mach/kgsl.h>
-static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
+static void bs_init(struct msm_gpu *gpu)
 {
-	struct drm_device *dev = gpu->dev;
-	struct kgsl_device_platform_data *pdata;
-
-	if (!pdev) {
-		dev_err(dev->dev, "could not find dtv pdata\n");
-		return;
-	}
-
-	pdata = pdev->dev.platform_data;
-	if (pdata->bus_scale_table) {
-		gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
+	if (gpu->bus_scale_table) {
+		gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
 		DBG("bus scale client: %08x", gpu->bsc);
 	}
 }
@@ -59,7 +50,7 @@
 	}
 }
 #else
-static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
+static void bs_init(struct msm_gpu *gpu) {}
 static void bs_fini(struct msm_gpu *gpu) {}
 static void bs_set(struct msm_gpu *gpu, int idx) {}
 #endif
@@ -307,8 +298,6 @@
 	struct msm_drm_private *priv = dev->dev_private;
 	int i, ret;
 
-	mutex_lock(&dev->struct_mutex);
-
 	submit->fence = ++priv->next_fence;
 
 	gpu->submitted_fence = submit->fence;
@@ -340,7 +329,6 @@
 			msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
 	}
 	hangcheck_timer_reset(gpu);
-	mutex_unlock(&dev->struct_mutex);
 
 	return ret;
 }
@@ -363,6 +351,7 @@
 		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
 		const char *name, const char *ioname, const char *irqname, int ringsz)
 {
+	struct iommu_domain *iommu;
 	int i, ret;
 
 	gpu->dev = drm;
@@ -428,13 +417,14 @@
 	 * and have separate page tables per context.  For now, to keep things
 	 * simple and to get something working, just use a single address space:
 	 */
-	gpu->iommu = iommu_domain_alloc(&platform_bus_type);
-	if (!gpu->iommu) {
-		dev_err(drm->dev, "failed to allocate IOMMU\n");
-		ret = -ENOMEM;
-		goto fail;
+	iommu = iommu_domain_alloc(&platform_bus_type);
+	if (iommu) {
+		dev_info(drm->dev, "%s: using IOMMU\n", name);
+		gpu->mmu = msm_iommu_new(drm, iommu);
+	} else {
+		dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
 	}
-	gpu->id = msm_register_iommu(drm, gpu->iommu);
+	gpu->id = msm_register_mmu(drm, gpu->mmu);
 
 	/* Create ringbuffer: */
 	gpu->rb = msm_ringbuffer_new(gpu, ringsz);
@@ -452,7 +442,7 @@
 		goto fail;
 	}
 
-	bs_init(gpu, pdev);
+	bs_init(gpu);
 
 	return 0;
 
@@ -474,6 +464,6 @@
 		msm_ringbuffer_destroy(gpu->rb);
 	}
 
-	if (gpu->iommu)
-		iommu_domain_free(gpu->iommu);
+	if (gpu->mmu)
+		gpu->mmu->funcs->destroy(gpu->mmu);
 }
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 8cd829e..458db8c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -78,14 +78,18 @@
 	void __iomem *mmio;
 	int irq;
 
-	struct iommu_domain *iommu;
+	struct msm_mmu *mmu;
 	int id;
 
 	/* Power Control: */
 	struct regulator *gpu_reg, *gpu_cx;
 	struct clk *ebi1_clk, *grp_clks[5];
 	uint32_t fast_rate, slow_rate, bus_freq;
+
+#ifdef CONFIG_MSM_BUS_SCALING
+	struct msm_bus_scale_pdata *bus_scale_table;
 	uint32_t bsc;
+#endif
 
 	/* Hang Detction: */
 #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
new file mode 100644
index 0000000..92b7459
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+struct msm_iommu {
+	struct msm_mmu base;
+	struct iommu_domain *domain;
+};
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
+		unsigned long iova, int flags, void *arg)
+{
+	DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
+	return 0;
+}
+
+static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+	struct drm_device *dev = mmu->dev;
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+	int i, ret;
+
+	for (i = 0; i < cnt; i++) {
+		struct device *msm_iommu_get_ctx(const char *ctx_name);
+		struct device *ctx = msm_iommu_get_ctx(names[i]);
+		if (IS_ERR_OR_NULL(ctx))
+			continue;
+		ret = iommu_attach_device(iommu->domain, ctx);
+		if (ret) {
+			dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
+		struct sg_table *sgt, unsigned len, int prot)
+{
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+	struct iommu_domain *domain = iommu->domain;
+	struct scatterlist *sg;
+	unsigned int da = iova;
+	unsigned int i, j;
+	int ret;
+
+	if (!domain || !sgt)
+		return -EINVAL;
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		u32 pa = sg_phys(sg) - sg->offset;
+		size_t bytes = sg->length + sg->offset;
+
+		VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
+
+		ret = iommu_map(domain, da, pa, bytes, prot);
+		if (ret)
+			goto fail;
+
+		da += bytes;
+	}
+
+	return 0;
+
+fail:
+	da = iova;
+
+	for_each_sg(sgt->sgl, sg, i, j) {
+		size_t bytes = sg->length + sg->offset;
+		iommu_unmap(domain, da, bytes);
+		da += bytes;
+	}
+	return ret;
+}
+
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
+		struct sg_table *sgt, unsigned len)
+{
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+	struct iommu_domain *domain = iommu->domain;
+	struct scatterlist *sg;
+	unsigned int da = iova;
+	int i;
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		size_t bytes = sg->length + sg->offset;
+		size_t unmapped;
+
+		unmapped = iommu_unmap(domain, da, bytes);
+		if (unmapped < bytes)
+			return unmapped;
+
+		VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
+
+		BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+
+		da += bytes;
+	}
+
+	return 0;
+}
+
+static void msm_iommu_destroy(struct msm_mmu *mmu)
+{
+	struct msm_iommu *iommu = to_msm_iommu(mmu);
+	iommu_domain_free(iommu->domain);
+	kfree(iommu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+		.attach = msm_iommu_attach,
+		.map = msm_iommu_map,
+		.unmap = msm_iommu_unmap,
+		.destroy = msm_iommu_destroy,
+};
+
+struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
+{
+	struct msm_iommu *iommu;
+
+	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+	if (!iommu)
+		return ERR_PTR(-ENOMEM);
+
+	iommu->domain = domain;
+	msm_mmu_init(&iommu->base, dev, &funcs);
+	iommu_set_fault_handler(domain, msm_fault_handler, dev);
+
+	return &iommu->base;
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
new file mode 100644
index 0000000..0643774
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_KMS_H__
+#define __MSM_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+
+/* As there are different display controller blocks depending on the
+ * snapdragon version, the kms support is split out and the appropriate
+ * implementation is loaded at runtime.  The kms module is responsible
+ * for constructing the appropriate planes/crtcs/encoders/connectors.
+ */
+struct msm_kms_funcs {
+	/* hw initialization: */
+	int (*hw_init)(struct msm_kms *kms);
+	/* irq handling: */
+	void (*irq_preinstall)(struct msm_kms *kms);
+	int (*irq_postinstall)(struct msm_kms *kms);
+	void (*irq_uninstall)(struct msm_kms *kms);
+	irqreturn_t (*irq)(struct msm_kms *kms);
+	int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+	void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+	/* misc: */
+	const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
+	long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
+			struct drm_encoder *encoder);
+	/* cleanup: */
+	void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+	void (*destroy)(struct msm_kms *kms);
+};
+
+struct msm_kms {
+	const struct msm_kms_funcs *funcs;
+
+	/* irq handling: */
+	bool in_irq;
+	struct list_head irq_list;    /* list of mdp4_irq */
+	uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+};
+
+static inline void msm_kms_init(struct msm_kms *kms,
+		const struct msm_kms_funcs *funcs)
+{
+	kms->funcs = funcs;
+}
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev);
+struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+
+#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
new file mode 100644
index 0000000..0303244
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_MMU_H__
+#define __MSM_MMU_H__
+
+#include <linux/iommu.h>
+
+struct msm_mmu_funcs {
+	int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
+	int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+			unsigned len, int prot);
+	int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+			unsigned len);
+	void (*destroy)(struct msm_mmu *mmu);
+};
+
+struct msm_mmu {
+	const struct msm_mmu_funcs *funcs;
+	struct drm_device *dev;
+};
+
+static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev,
+		const struct msm_mmu_funcs *funcs)
+{
+	mmu->dev = dev;
+	mmu->funcs = funcs;
+}
+
+struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain);
+struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu);
+
+#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index b3fa1ba..d310c19 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -41,6 +41,7 @@
 nouveau-y += core/subdev/bios/mxm.o
 nouveau-y += core/subdev/bios/perf.o
 nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/ramcfg.o
 nouveau-y += core/subdev/bios/rammap.o
 nouveau-y += core/subdev/bios/timing.o
 nouveau-y += core/subdev/bios/therm.o
@@ -71,7 +72,10 @@
 nouveau-y += core/subdev/devinit/nv1a.o
 nouveau-y += core/subdev/devinit/nv20.o
 nouveau-y += core/subdev/devinit/nv50.o
+nouveau-y += core/subdev/devinit/nv84.o
+nouveau-y += core/subdev/devinit/nv98.o
 nouveau-y += core/subdev/devinit/nva3.o
+nouveau-y += core/subdev/devinit/nvaf.o
 nouveau-y += core/subdev/devinit/nvc0.o
 nouveau-y += core/subdev/fb/base.o
 nouveau-y += core/subdev/fb/nv04.o
@@ -137,6 +141,7 @@
 nouveau-y += core/subdev/mc/nv04.o
 nouveau-y += core/subdev/mc/nv40.o
 nouveau-y += core/subdev/mc/nv44.o
+nouveau-y += core/subdev/mc/nv4c.o
 nouveau-y += core/subdev/mc/nv50.o
 nouveau-y += core/subdev/mc/nv94.o
 nouveau-y += core/subdev/mc/nv98.o
@@ -232,6 +237,7 @@
 nouveau-y += core/engine/fifo/nv84.o
 nouveau-y += core/engine/fifo/nvc0.o
 nouveau-y += core/engine/fifo/nve0.o
+nouveau-y += core/engine/fifo/nv108.o
 nouveau-y += core/engine/graph/ctxnv40.o
 nouveau-y += core/engine/graph/ctxnv50.o
 nouveau-y += core/engine/graph/ctxnvc0.o
@@ -242,6 +248,7 @@
 nouveau-y += core/engine/graph/ctxnvd9.o
 nouveau-y += core/engine/graph/ctxnve4.o
 nouveau-y += core/engine/graph/ctxnvf0.o
+nouveau-y += core/engine/graph/ctxnv108.o
 nouveau-y += core/engine/graph/nv04.o
 nouveau-y += core/engine/graph/nv10.o
 nouveau-y += core/engine/graph/nv20.o
@@ -260,6 +267,7 @@
 nouveau-y += core/engine/graph/nvd9.o
 nouveau-y += core/engine/graph/nve4.o
 nouveau-y += core/engine/graph/nvf0.o
+nouveau-y += core/engine/graph/nv108.o
 nouveau-y += core/engine/mpeg/nv31.o
 nouveau-y += core/engine/mpeg/nv40.o
 nouveau-y += core/engine/mpeg/nv44.o
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c
index c8bed4a..1f6954a 100644
--- a/drivers/gpu/drm/nouveau/core/core/engine.c
+++ b/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -42,11 +42,24 @@
 	if (ret)
 		return ret;
 
-	if ( parent &&
-	    !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) {
-		if (!enable)
-			nv_warn(engine, "disabled, %s=1 to enable\n", iname);
-		return -ENODEV;
+	if (parent) {
+		struct nouveau_device *device = nv_device(parent);
+		int engidx = nv_engidx(nv_object(engine));
+
+		if (device->disable_mask & (1ULL << engidx)) {
+			if (!nouveau_boolopt(device->cfgopt, iname, false)) {
+				nv_debug(engine, "engine disabled by hw/fw\n");
+				return -ENODEV;
+			}
+
+			nv_warn(engine, "ignoring hw/fw engine disable\n");
+		}
+
+		if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
+			if (!enable)
+				nv_warn(engine, "disabled, %s=1 to enable\n", iname);
+			return -ENODEV;
+		}
 	}
 
 	INIT_LIST_HEAD(&engine->contexts);
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index 993df09..ac3291f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -105,9 +105,6 @@
 	struct nvc0_copy_priv *priv;
 	int ret;
 
-	if (nv_rd32(parent, 0x022500) & 0x00000100)
-		return -ENODEV;
-
 	ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
 				    "PCE0", "copy0", &priv);
 	*pobject = nv_object(priv);
@@ -133,9 +130,6 @@
 	struct nvc0_copy_priv *priv;
 	int ret;
 
-	if (nv_rd32(parent, 0x022500) & 0x00000200)
-		return -ENODEV;
-
 	ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
 				    "PCE1", "copy1", &priv);
 	*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 30f1ef1..748a61e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -88,9 +88,6 @@
 	struct nve0_copy_priv *priv;
 	int ret;
 
-	if (nv_rd32(parent, 0x022500) & 0x00000100)
-		return -ENODEV;
-
 	ret = nouveau_engine_create(parent, engine, oclass, true,
 				    "PCE0", "copy0", &priv);
 	*pobject = nv_object(priv);
@@ -112,9 +109,6 @@
 	struct nve0_copy_priv *priv;
 	int ret;
 
-	if (nv_rd32(parent, 0x022500) & 0x00000200)
-		return -ENODEV;
-
 	ret = nouveau_engine_create(parent, engine, oclass, true,
 				    "PCE1", "copy1", &priv);
 	*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index dbd2dde..32113b0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -49,12 +49,12 @@
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv04_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
@@ -67,12 +67,12 @@
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv05_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 6e03dd6..744f15d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -51,12 +51,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
@@ -68,12 +68,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
@@ -87,12 +87,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
@@ -106,12 +106,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
@@ -125,12 +125,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
@@ -144,12 +144,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -163,12 +163,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -182,12 +182,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index dcde53b..27ba61f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -52,12 +52,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv20_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -71,12 +71,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -90,12 +90,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -109,12 +109,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index 7b8662e..fd47ace 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -52,12 +52,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -71,12 +71,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv35_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -90,12 +90,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -110,12 +110,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv36_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -130,12 +130,12 @@
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index c8c41e9..08b8859 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -57,12 +57,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -80,12 +80,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -103,12 +103,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -126,12 +126,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -149,12 +149,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -172,12 +172,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv47_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -195,12 +195,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -218,12 +218,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -241,12 +241,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -264,12 +264,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -287,12 +287,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -310,12 +310,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -333,12 +333,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv4e_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv4e_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,12 +356,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -379,12 +379,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -402,12 +402,12 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index db3fc7b..81d5c26 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -65,12 +65,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv50_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv50_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv50_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -90,12 +90,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -118,12 +118,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -146,12 +146,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -174,12 +174,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv94_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -202,12 +202,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv94_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -230,12 +230,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv98_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -258,12 +258,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -286,12 +286,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nvaa_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv98_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvaa_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -314,12 +314,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nvaa_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv98_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvaa_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -342,12 +342,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nva3_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
@@ -372,12 +372,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nva3_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
@@ -401,12 +401,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nva3_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
@@ -430,12 +430,12 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvaf_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvaf_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index dbc5e33..b7d66b5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -65,14 +65,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -97,14 +97,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -129,14 +129,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -160,14 +160,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -192,14 +192,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -224,14 +224,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -255,14 +255,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -287,14 +287,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
@@ -318,14 +318,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 3900104..987edbc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -65,14 +65,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
@@ -98,14 +98,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
@@ -131,14 +131,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
@@ -164,14 +164,14 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
@@ -199,29 +199,27 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
 		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_SUBDEV_PWR    ] = &nv108_pwr_oclass;
 		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv108_fifo_oclass;
 		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  nvf0_graph_oclass;
-#endif
+		device->oclass[NVDEV_ENGINE_GR     ] =  nv108_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nvf0_disp_oclass;
-#if 0
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
 		device->oclass[NVDEV_ENGINE_COPY2  ] = &nve0_copy2_oclass;
+#if 0
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index a0bc8a8..7cf8b13 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -31,9 +31,45 @@
 	struct nouveau_disp base;
 };
 
+static int
+nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd,
+		     void *data, u32 size)
+{
+	struct nv04_disp_priv *priv = (void *)object->engine;
+	struct nv04_display_scanoutpos *args = data;
+	const int head = (mthd & NV04_DISP_MTHD_HEAD);
+	u32 line;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	args->vblanks = nv_rd32(priv, 0x680800 + (head * 0x2000)) & 0xffff;
+	args->vtotal  = nv_rd32(priv, 0x680804 + (head * 0x2000)) & 0xffff;
+	args->vblanke = args->vtotal - 1;
+
+	args->hblanks = nv_rd32(priv, 0x680820 + (head * 0x2000)) & 0xffff;
+	args->htotal  = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff;
+	args->hblanke = args->htotal - 1;
+
+	args->time[0] = ktime_to_ns(ktime_get());
+	line = nv_rd32(priv, 0x600868 + (head * 0x2000));
+	args->time[1] = ktime_to_ns(ktime_get());
+	args->hline = (line & 0xffff0000) >> 16;
+	args->vline = (line & 0x0000ffff);
+	return 0;
+}
+
+#define HEAD_MTHD(n) (n), (n) + 0x01
+
+static struct nouveau_omthds
+nv04_disp_omthds[] = {
+	{ HEAD_MTHD(NV04_DISP_SCANOUTPOS), nv04_disp_scanoutpos },
+	{}
+};
+
 static struct nouveau_oclass
 nv04_disp_sclass[] = {
-	{ NV04_DISP_CLASS, &nouveau_object_ofuncs },
+	{ NV04_DISP_CLASS, &nouveau_object_ofuncs, nv04_disp_omthds },
 	{},
 };
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index c168ae3..9ad722e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -541,6 +541,35 @@
  * Base display object
  ******************************************************************************/
 
+int
+nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
+			  void *data, u32 size)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv04_display_scanoutpos *args = data;
+	const int head = (mthd & NV50_DISP_MTHD_HEAD);
+	u32 blanke, blanks, total;
+
+	if (size < sizeof(*args) || head >= priv->head.nr)
+		return -EINVAL;
+	blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
+	blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
+	total  = nv_rd32(priv, 0x610afc + (head * 0x540));
+
+	args->vblanke = (blanke & 0xffff0000) >> 16;
+	args->hblanke = (blanke & 0x0000ffff);
+	args->vblanks = (blanks & 0xffff0000) >> 16;
+	args->hblanks = (blanks & 0x0000ffff);
+	args->vtotal  = ( total & 0xffff0000) >> 16;
+	args->htotal  = ( total & 0x0000ffff);
+
+	args->time[0] = ktime_to_ns(ktime_get());
+	args->vline   = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+	args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
+	args->hline   = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+	return 0;
+}
+
 static void
 nv50_disp_base_vblank_enable(struct nouveau_event *event, int head)
 {
@@ -675,6 +704,7 @@
 
 static struct nouveau_omthds
 nv50_disp_base_omthds[] = {
+	{ HEAD_MTHD(NV50_DISP_SCANOUTPOS)     , nv50_disp_base_scanoutpos },
 	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
 	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
 	{ DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
@@ -1112,7 +1142,7 @@
 	if (conf != ~0) {
 		if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
 			u32 soff = (ffs(outp.or) - 1) * 0x08;
-			u32 ctrl = nv_rd32(priv, 0x610798 + soff);
+			u32 ctrl = nv_rd32(priv, 0x610794 + soff);
 			u32 datarate;
 
 			switch ((ctrl & 0x000f0000) >> 16) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 1ae6ceb..d31d426 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -43,6 +43,10 @@
 	} pior;
 };
 
+#define HEAD_MTHD(n) (n), (n) + 0x03
+
+int nv50_disp_base_scanoutpos(struct nouveau_object *, u32, void *, u32);
+
 #define DAC_MTHD(n) (n), (n) + 0x03
 
 int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
@@ -132,13 +136,12 @@
 
 extern struct nouveau_omthds nv84_disp_base_omthds[];
 
-extern struct nouveau_omthds nva3_disp_base_omthds[];
-
 extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
 extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
 extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
 extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
 extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_omthds nvd0_disp_base_omthds[];
 extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
 extern struct nouveau_oclass nvd0_disp_cclass;
 void nvd0_disp_intr_supervisor(struct work_struct *);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index d8c74c0..ef9ce30 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -41,6 +41,7 @@
 
 struct nouveau_omthds
 nv84_disp_base_omthds[] = {
+	{ HEAD_MTHD(NV50_DISP_SCANOUTPOS)     , nv50_disp_base_scanoutpos },
 	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
 	{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
 	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index a66f949..a518543 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -41,6 +41,7 @@
 
 static struct nouveau_omthds
 nv94_disp_base_omthds[] = {
+	{ HEAD_MTHD(NV50_DISP_SCANOUTPOS)     , nv50_disp_base_scanoutpos },
 	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
 	{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
 	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index b754131..6ad6dce 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -39,8 +39,9 @@
 	{}
 };
 
-struct nouveau_omthds
+static struct nouveau_omthds
 nva3_disp_base_omthds[] = {
+	{ HEAD_MTHD(NV50_DISP_SCANOUTPOS)     , nv50_disp_base_scanoutpos },
 	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
 	{ SOR_MTHD(NVA3_DISP_SOR_HDA_ELD)     , nv50_sor_mthd },
 	{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 378a015..1c5e4e8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -440,6 +440,36 @@
  * Base display object
  ******************************************************************************/
 
+static int
+nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
+			  void *data, u32 size)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv04_display_scanoutpos *args = data;
+	const int head = (mthd & NV50_DISP_MTHD_HEAD);
+	u32 blanke, blanks, total;
+
+	if (size < sizeof(*args) || head >= priv->head.nr)
+		return -EINVAL;
+
+	total  = nv_rd32(priv, 0x640414 + (head * 0x300));
+	blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
+	blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
+
+	args->vblanke = (blanke & 0xffff0000) >> 16;
+	args->hblanke = (blanke & 0x0000ffff);
+	args->vblanks = (blanks & 0xffff0000) >> 16;
+	args->hblanks = (blanks & 0x0000ffff);
+	args->vtotal  = ( total & 0xffff0000) >> 16;
+	args->htotal  = ( total & 0x0000ffff);
+
+	args->time[0] = ktime_to_ns(ktime_get());
+	args->vline   = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+	args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
+	args->hline   = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+	return 0;
+}
+
 static void
 nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head)
 {
@@ -573,9 +603,24 @@
 	.fini = nvd0_disp_base_fini,
 };
 
+struct nouveau_omthds
+nvd0_disp_base_omthds[] = {
+	{ HEAD_MTHD(NV50_DISP_SCANOUTPOS)     , nvd0_disp_base_scanoutpos },
+	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+	{ SOR_MTHD(NVA3_DISP_SOR_HDA_ELD)     , nv50_sor_mthd },
+	{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_PWR)       , nv50_pior_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR)  , nv50_pior_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_DP_PWR)    , nv50_pior_mthd },
+	{},
+};
+
 static struct nouveau_oclass
 nvd0_disp_base_oclass[] = {
-	{ NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+	{ NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
 	{}
 };
 
@@ -967,9 +1012,6 @@
 	int heads = nv_rd32(parent, 0x022448);
 	int ret;
 
-	if (nv_rd32(parent, 0x022500) & 0x00000001)
-		return -ENODEV;
-
 	ret = nouveau_disp_create(parent, engine, oclass, heads,
 				  "PDISP", "display", &priv);
 	*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index fb1fe6a..ab63f32 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -41,7 +41,7 @@
 
 static struct nouveau_oclass
 nve0_disp_base_oclass[] = {
-	{ NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+	{ NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
 	{}
 };
 
@@ -54,9 +54,6 @@
 	int heads = nv_rd32(parent, 0x022448);
 	int ret;
 
-	if (nv_rd32(parent, 0x022500) & 0x00000001)
-		return -ENODEV;
-
 	ret = nouveau_disp_create(parent, engine, oclass, heads,
 				  "PDISP", "display", &priv);
 	*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 42aa6b9..05fee10 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -41,7 +41,7 @@
 
 static struct nouveau_oclass
 nvf0_disp_base_oclass[] = {
-	{ NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+	{ NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
 	{}
 };
 
@@ -54,9 +54,6 @@
 	int heads = nv_rd32(parent, 0x022448);
 	int ret;
 
-	if (nv_rd32(parent, 0x022500) & 0x00000001)
-		return -ENODEV;
-
 	ret = nouveau_disp_create(parent, engine, oclass, heads,
 				  "PDISP", "display", &priv);
 	*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
index 5a1c684..8836c3c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
@@ -138,10 +138,15 @@
 bool
 nv_lockvgac(void *obj, bool lock)
 {
+	struct nouveau_device *dev = nv_device(obj);
+
 	bool locked = !nv_rdvgac(obj, 0, 0x1f);
 	u8 data = lock ? 0x99 : 0x57;
-	nv_wrvgac(obj, 0, 0x1f, data);
-	if (nv_device(obj)->chipset == 0x11) {
+	if (dev->card_type < NV_50)
+		nv_wrvgac(obj, 0, 0x1f, data);
+	else
+		nv_wrvgac(obj, 0, 0x3f, data);
+	if (dev->chipset == 0x11) {
 		if (!(nv_rd32(obj, 0x001084) & 0x10000000))
 			nv_wrvgac(obj, 1, 0x1f, data);
 	}
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c
index e03fc8e..5e077e4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c
@@ -56,6 +56,16 @@
 	nv_wr32(falcon, falcon->addr + addr, data);
 }
 
+static void *
+vmemdup(const void *src, size_t len)
+{
+	void *p = vmalloc(len);
+
+	if (p)
+		memcpy(p, src, len);
+	return p;
+}
+
 int
 _nouveau_falcon_init(struct nouveau_object *object)
 {
@@ -111,7 +121,7 @@
 
 		ret = request_firmware(&fw, name, &device->pdev->dev);
 		if (ret == 0) {
-			falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+			falcon->code.data = vmemdup(fw->data, fw->size);
 			falcon->code.size = fw->size;
 			falcon->data.data = NULL;
 			falcon->data.size = 0;
@@ -134,7 +144,7 @@
 			return ret;
 		}
 
-		falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+		falcon->data.data = vmemdup(fw->data, fw->size);
 		falcon->data.size = fw->size;
 		release_firmware(fw);
 		if (!falcon->data.data)
@@ -149,7 +159,7 @@
 			return ret;
 		}
 
-		falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+		falcon->code.data = vmemdup(fw->data, fw->size);
 		falcon->code.size = fw->size;
 		release_firmware(fw);
 		if (!falcon->code.data)
@@ -235,8 +245,8 @@
 	if (!suspend) {
 		nouveau_gpuobj_ref(NULL, &falcon->core);
 		if (falcon->external) {
-			kfree(falcon->data.data);
-			kfree(falcon->code.data);
+			vfree(falcon->data.data);
+			vfree(falcon->code.data);
 			falcon->code.data = NULL;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c
new file mode 100644
index 0000000..09362a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nve0.h"
+
+struct nouveau_oclass *
+nv108_fifo_oclass = &(struct nve0_fifo_impl) {
+	.base.handle = NV_ENGINE(FIFO, 0x08),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_fifo_ctor,
+		.dtor = nve0_fifo_dtor,
+		.init = nve0_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+	.channels = 1024,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 9ac94d4..b22a33f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -33,6 +33,7 @@
 
 #include <subdev/timer.h>
 #include <subdev/bar.h>
+#include <subdev/fb.h>
 #include <subdev/vm.h>
 
 #include <engine/dmaobj.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 04f4129..54c1b5b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -33,10 +33,12 @@
 
 #include <subdev/timer.h>
 #include <subdev/bar.h>
+#include <subdev/fb.h>
 #include <subdev/vm.h>
 
 #include <engine/dmaobj.h>
-#include <engine/fifo.h>
+
+#include "nve0.h"
 
 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
 static const struct {
@@ -56,8 +58,8 @@
 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
 
 struct nve0_fifo_engn {
-	struct nouveau_gpuobj *playlist[2];
-	int cur_playlist;
+	struct nouveau_gpuobj *runlist[2];
+	int cur_runlist;
 };
 
 struct nve0_fifo_priv {
@@ -86,7 +88,7 @@
  ******************************************************************************/
 
 static void
-nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
+nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
 {
 	struct nouveau_bar *bar = nouveau_bar(priv);
 	struct nve0_fifo_engn *engn = &priv->engine[engine];
@@ -95,8 +97,8 @@
 	int i, p;
 
 	mutex_lock(&nv_subdev(priv)->mutex);
-	cur = engn->playlist[engn->cur_playlist];
-	engn->cur_playlist = !engn->cur_playlist;
+	cur = engn->runlist[engn->cur_runlist];
+	engn->cur_runlist = !engn->cur_runlist;
 
 	for (i = 0, p = 0; i < priv->base.max; i++) {
 		u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
@@ -110,8 +112,8 @@
 
 	nv_wr32(priv, 0x002270, cur->addr >> 12);
 	nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
-	if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
-		nv_error(priv, "playlist %d update timeout\n", engine);
+	if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000))
+		nv_error(priv, "runlist %d update timeout\n", engine);
 	mutex_unlock(&nv_subdev(priv)->mutex);
 }
 
@@ -278,7 +280,7 @@
 	nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
 	nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
 	nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
-	nve0_fifo_playlist_update(priv, chan->engine);
+	nve0_fifo_runlist_update(priv, chan->engine);
 	nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
 	return 0;
 }
@@ -291,7 +293,7 @@
 	u32 chid = chan->base.chid;
 
 	nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
-	nve0_fifo_playlist_update(priv, chan->engine);
+	nve0_fifo_runlist_update(priv, chan->engine);
 	nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
 
 	return nouveau_fifo_channel_fini(&chan->base, suspend);
@@ -375,54 +377,189 @@
  * PFIFO engine
  ******************************************************************************/
 
-static const struct nouveau_enum nve0_fifo_fault_unit[] = {
+static const struct nouveau_enum nve0_fifo_sched_reason[] = {
+	{ 0x0a, "CTXSW_TIMEOUT" },
+	{}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_engine[] = {
+	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
+	{ 0x03, "IFB" },
+	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
+	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
+	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
+	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
+	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
+	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_BSP },
+	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_PPP },
+	{ 0x13, "PERF" },
+	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_VP },
+	{ 0x15, "CE0", NULL, NVDEV_ENGINE_COPY0 },
+	{ 0x16, "CE1", NULL, NVDEV_ENGINE_COPY1 },
+	{ 0x17, "PMU" },
+	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_VENC },
+	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_COPY2 },
 	{}
 };
 
 static const struct nouveau_enum nve0_fifo_fault_reason[] = {
-	{ 0x00, "PT_NOT_PRESENT" },
-	{ 0x01, "PT_TOO_SHORT" },
-	{ 0x02, "PAGE_NOT_PRESENT" },
-	{ 0x03, "VM_LIMIT_EXCEEDED" },
-	{ 0x04, "NO_CHANNEL" },
-	{ 0x05, "PAGE_SYSTEM_ONLY" },
-	{ 0x06, "PAGE_READ_ONLY" },
-	{ 0x0a, "COMPRESSED_SYSRAM" },
-	{ 0x0c, "INVALID_STORAGE_TYPE" },
+	{ 0x00, "PDE" },
+	{ 0x01, "PDE_SIZE" },
+	{ 0x02, "PTE" },
+	{ 0x03, "VA_LIMIT_VIOLATION" },
+	{ 0x04, "UNBOUND_INST_BLOCK" },
+	{ 0x05, "PRIV_VIOLATION" },
+	{ 0x06, "RO_VIOLATION" },
+	{ 0x07, "WO_VIOLATION" },
+	{ 0x08, "PITCH_MASK_VIOLATION" },
+	{ 0x09, "WORK_CREATION" },
+	{ 0x0a, "UNSUPPORTED_APERTURE" },
+	{ 0x0b, "COMPRESSION_FAILURE" },
+	{ 0x0c, "UNSUPPORTED_KIND" },
+	{ 0x0d, "REGION_VIOLATION" },
+	{ 0x0e, "BOTH_PTES_VALID" },
+	{ 0x0f, "INFO_TYPE_POISONED" },
 	{}
 };
 
 static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+	{ 0x00, "VIP" },
+	{ 0x01, "CE0" },
+	{ 0x02, "CE1" },
+	{ 0x03, "DNISO" },
+	{ 0x04, "FE" },
+	{ 0x05, "FECS" },
+	{ 0x06, "HOST" },
+	{ 0x07, "HOST_CPU" },
+	{ 0x08, "HOST_CPU_NB" },
+	{ 0x09, "ISO" },
+	{ 0x0a, "MMU" },
+	{ 0x0b, "MSPDEC" },
+	{ 0x0c, "MSPPP" },
+	{ 0x0d, "MSVLD" },
+	{ 0x0e, "NISO" },
+	{ 0x0f, "P2P" },
+	{ 0x10, "PD" },
+	{ 0x11, "PERF" },
+	{ 0x12, "PMU" },
+	{ 0x13, "RASTERTWOD" },
+	{ 0x14, "SCC" },
+	{ 0x15, "SCC_NB" },
+	{ 0x16, "SEC" },
+	{ 0x17, "SSYNC" },
+	{ 0x18, "GR_COPY" },
+	{ 0x19, "CE2" },
+	{ 0x1a, "XV" },
+	{ 0x1b, "MMU_NB" },
+	{ 0x1c, "MSENC" },
+	{ 0x1d, "DFALCON" },
+	{ 0x1e, "SKED" },
+	{ 0x1f, "AFALCON" },
 	{}
 };
 
 static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
+	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
+	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
+	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
+	{ 0x0c, "RAST" },
+	{ 0x0d, "GCC" },
+	{ 0x0e, "GPCCS" },
+	{ 0x0f, "PROP_0" },
+	{ 0x10, "PROP_1" },
+	{ 0x11, "PROP_2" },
+	{ 0x12, "PROP_3" },
+	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
+	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
+	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
+	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
+	{ 0x1f, "GPM" },
+	{ 0x20, "LTP_UTLB_0" },
+	{ 0x21, "LTP_UTLB_1" },
+	{ 0x22, "LTP_UTLB_2" },
+	{ 0x23, "LTP_UTLB_3" },
+	{ 0x24, "GPC_RGG_UTLB" },
 	{}
 };
 
-static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
-	{ 0x00200000, "ILLEGAL_MTHD" },
-	{ 0x00800000, "EMPTY_SUBC" },
+static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
+	{ 0x00000001, "MEMREQ" },
+	{ 0x00000002, "MEMACK_TIMEOUT" },
+	{ 0x00000004, "MEMACK_EXTRA" },
+	{ 0x00000008, "MEMDAT_TIMEOUT" },
+	{ 0x00000010, "MEMDAT_EXTRA" },
+	{ 0x00000020, "MEMFLUSH" },
+	{ 0x00000040, "MEMOP" },
+	{ 0x00000080, "LBCONNECT" },
+	{ 0x00000100, "LBREQ" },
+	{ 0x00000200, "LBACK_TIMEOUT" },
+	{ 0x00000400, "LBACK_EXTRA" },
+	{ 0x00000800, "LBDAT_TIMEOUT" },
+	{ 0x00001000, "LBDAT_EXTRA" },
+	{ 0x00002000, "GPFIFO" },
+	{ 0x00004000, "GPPTR" },
+	{ 0x00008000, "GPENTRY" },
+	{ 0x00010000, "GPCRC" },
+	{ 0x00020000, "PBPTR" },
+	{ 0x00040000, "PBENTRY" },
+	{ 0x00080000, "PBCRC" },
+	{ 0x00100000, "XBARCONNECT" },
+	{ 0x00200000, "METHOD" },
+	{ 0x00400000, "METHODCRC" },
+	{ 0x00800000, "DEVICE" },
+	{ 0x02000000, "SEMAPHORE" },
+	{ 0x04000000, "ACQUIRE" },
+	{ 0x08000000, "PRI" },
+	{ 0x20000000, "NO_CTXSW_SEG" },
+	{ 0x40000000, "PBSEG" },
+	{ 0x80000000, "SIGNATURE" },
 	{}
 };
 
 static void
-nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
+nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
+{
+	u32 intr = nv_rd32(priv, 0x00254c);
+	u32 code = intr & 0x000000ff;
+	nv_error(priv, "SCHED_ERROR [");
+	nouveau_enum_print(nve0_fifo_sched_reason, code);
+	pr_cont("]\n");
+}
+
+static void
+nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
+{
+	u32 stat = nv_rd32(priv, 0x00256c);
+	nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
+	nv_wr32(priv, 0x00256c, stat);
+}
+
+static void
+nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv)
+{
+	u32 stat = nv_rd32(priv, 0x00259c);
+	nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
+}
+
+static void
+nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
 {
 	u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
 	u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
 	u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
 	u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
 	u32 client = (stat & 0x00001f00) >> 8;
-	const struct nouveau_enum *en;
-	struct nouveau_engine *engine;
+	struct nouveau_engine *engine = NULL;
 	struct nouveau_object *engctx = NULL;
+	const struct nouveau_enum *en;
+	const char *name = "unknown";
 
 	nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
 		       "write" : "read", (u64)vahi << 32 | valo);
 	nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
 	pr_cont("] from ");
-	en = nouveau_enum_print(nve0_fifo_fault_unit, unit);
+	en = nouveau_enum_print(nve0_fifo_fault_engine, unit);
 	if (stat & 0x00000040) {
 		pr_cont("/");
 		nouveau_enum_print(nve0_fifo_fault_hubclient, client);
@@ -432,14 +569,22 @@
 	}
 
 	if (en && en->data2) {
-		engine = nouveau_engine(priv, en->data2);
-		if (engine)
-			engctx = nouveau_engctx_get(engine, inst);
-
+		if (en->data2 == NVDEV_SUBDEV_BAR) {
+			nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+			name = "BAR1";
+		} else
+		if (en->data2 == NVDEV_SUBDEV_INSTMEM) {
+			nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+			name = "BAR3";
+		} else {
+			engine = nouveau_engine(priv, en->data2);
+			if (engine) {
+				engctx = nouveau_engctx_get(engine, inst);
+				name   = nouveau_client_name(engctx);
+			}
+		}
 	}
-
-	pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
-			nouveau_client_name(engctx));
+	pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12, name);
 
 	nouveau_engctx_put(engctx);
 }
@@ -471,7 +616,7 @@
 }
 
 static void
-nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
+nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
 {
 	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
 	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
@@ -487,11 +632,11 @@
 	}
 
 	if (show) {
-		nv_error(priv, "SUBFIFO%d:", unit);
-		nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+		nv_error(priv, "PBDMA%d:", unit);
+		nouveau_bitfield_print(nve0_fifo_pbdma_intr, show);
 		pr_cont("\n");
 		nv_error(priv,
-			 "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+			 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
 			 unit, chid,
 			 nouveau_client_name_for_fifo_chid(&priv->base, chid),
 			 subc, mthd, data);
@@ -508,19 +653,56 @@
 	u32 mask = nv_rd32(priv, 0x002140);
 	u32 stat = nv_rd32(priv, 0x002100) & mask;
 
+	if (stat & 0x00000001) {
+		u32 stat = nv_rd32(priv, 0x00252c);
+		nv_error(priv, "BIND_ERROR 0x%08x\n", stat);
+		nv_wr32(priv, 0x002100, 0x00000001);
+		stat &= ~0x00000001;
+	}
+
+	if (stat & 0x00000010) {
+		nv_error(priv, "PIO_ERROR\n");
+		nv_wr32(priv, 0x002100, 0x00000010);
+		stat &= ~0x00000010;
+	}
+
 	if (stat & 0x00000100) {
-		nv_warn(priv, "unknown status 0x00000100\n");
+		nve0_fifo_intr_sched(priv);
 		nv_wr32(priv, 0x002100, 0x00000100);
 		stat &= ~0x00000100;
 	}
 
+	if (stat & 0x00010000) {
+		nve0_fifo_intr_chsw(priv);
+		nv_wr32(priv, 0x002100, 0x00010000);
+		stat &= ~0x00010000;
+	}
+
+	if (stat & 0x00800000) {
+		nv_error(priv, "FB_FLUSH_TIMEOUT\n");
+		nv_wr32(priv, 0x002100, 0x00800000);
+		stat &= ~0x00800000;
+	}
+
+	if (stat & 0x01000000) {
+		nv_error(priv, "LB_ERROR\n");
+		nv_wr32(priv, 0x002100, 0x01000000);
+		stat &= ~0x01000000;
+	}
+
+	if (stat & 0x08000000) {
+		nve0_fifo_intr_dropped_fault(priv);
+		nv_wr32(priv, 0x002100, 0x08000000);
+		stat &= ~0x08000000;
+	}
+
 	if (stat & 0x10000000) {
 		u32 units = nv_rd32(priv, 0x00259c);
 		u32 u = units;
 
 		while (u) {
 			int i = ffs(u) - 1;
-			nve0_fifo_isr_vm_fault(priv, i);
+			nve0_fifo_intr_fault(priv, i);
 			u &= ~(1 << i);
 		}
 
@@ -529,22 +711,28 @@
 	}
 
 	if (stat & 0x20000000) {
-		u32 units = nv_rd32(priv, 0x0025a0);
-		u32 u = units;
+		u32 mask = nv_rd32(priv, 0x0025a0);
+		u32 temp = mask;
 
-		while (u) {
-			int i = ffs(u) - 1;
-			nve0_fifo_isr_subfifo_intr(priv, i);
-			u &= ~(1 << i);
+		while (temp) {
+			u32 unit = ffs(temp) - 1;
+			nve0_fifo_intr_pbdma(priv, unit);
+			temp &= ~(1 << unit);
 		}
 
-		nv_wr32(priv, 0x0025a0, units);
+		nv_wr32(priv, 0x0025a0, mask);
 		stat &= ~0x20000000;
 	}
 
 	if (stat & 0x40000000) {
-		nv_warn(priv, "unknown status 0x40000000\n");
-		nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+		u32 mask = nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+
+		while (mask) {
+			u32 engn = ffs(mask) - 1;
+			/* runlist event, not currently used */
+			mask &= ~(1 << engn);
+		}
+
 		stat &= ~0x40000000;
 	}
 
@@ -575,27 +763,91 @@
 	nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
 }
 
-static int
+int
+nve0_fifo_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nve0_fifo_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fifo_fini(&priv->base, suspend);
+	if (ret)
+		return ret;
+
+	/* allow mmu fault interrupts, even when we're not using fifo */
+	nv_mask(priv, 0x002140, 0x10000000, 0x10000000);
+	return 0;
+}
+
+int
+nve0_fifo_init(struct nouveau_object *object)
+{
+	struct nve0_fifo_priv *priv = (void *)object;
+	int ret, i;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* enable all available PBDMA units */
+	nv_wr32(priv, 0x000204, 0xffffffff);
+	priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
+	nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
+
+	/* PBDMA[n] */
+	for (i = 0; i < priv->spoon_nr; i++) {
+		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+	}
+
+	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+	nv_wr32(priv, 0x002a00, 0xffffffff);
+	nv_wr32(priv, 0x002100, 0xffffffff);
+	nv_wr32(priv, 0x002140, 0x3fffffff);
+	return 0;
+}
+
+void
+nve0_fifo_dtor(struct nouveau_object *object)
+{
+	struct nve0_fifo_priv *priv = (void *)object;
+	int i;
+
+	nouveau_gpuobj_unmap(&priv->user.bar);
+	nouveau_gpuobj_ref(NULL, &priv->user.mem);
+
+	for (i = 0; i < FIFO_ENGINE_NR; i++) {
+		nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
+		nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
+	}
+
+	nouveau_fifo_destroy(&priv->base);
+}
+
+int
 nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	       struct nouveau_oclass *oclass, void *data, u32 size,
 	       struct nouveau_object **pobject)
 {
+	struct nve0_fifo_impl *impl = (void *)oclass;
 	struct nve0_fifo_priv *priv;
 	int ret, i;
 
-	ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
+	ret = nouveau_fifo_create(parent, engine, oclass, 0,
+				  impl->channels - 1, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
 	for (i = 0; i < FIFO_ENGINE_NR; i++) {
 		ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
-					 0, &priv->engine[i].playlist[0]);
+					 0, &priv->engine[i].runlist[0]);
 		if (ret)
 			return ret;
 
 		ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
-					 0, &priv->engine[i].playlist[1]);
+					 0, &priv->engine[i].runlist[1]);
 		if (ret)
 			return ret;
 	}
@@ -621,60 +873,14 @@
 	return 0;
 }
 
-static void
-nve0_fifo_dtor(struct nouveau_object *object)
-{
-	struct nve0_fifo_priv *priv = (void *)object;
-	int i;
-
-	nouveau_gpuobj_unmap(&priv->user.bar);
-	nouveau_gpuobj_ref(NULL, &priv->user.mem);
-
-	for (i = 0; i < FIFO_ENGINE_NR; i++) {
-		nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
-		nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
-	}
-
-	nouveau_fifo_destroy(&priv->base);
-}
-
-static int
-nve0_fifo_init(struct nouveau_object *object)
-{
-	struct nve0_fifo_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nouveau_fifo_init(&priv->base);
-	if (ret)
-		return ret;
-
-	/* enable all available PSUBFIFOs */
-	nv_wr32(priv, 0x000204, 0xffffffff);
-	priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
-	nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
-
-	/* PSUBFIFO[n] */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
-		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
-	}
-
-	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
-
-	nv_wr32(priv, 0x002a00, 0xffffffff);
-	nv_wr32(priv, 0x002100, 0xffffffff);
-	nv_wr32(priv, 0x002140, 0x3fffffff);
-	return 0;
-}
-
 struct nouveau_oclass *
-nve0_fifo_oclass = &(struct nouveau_oclass) {
-	.handle = NV_ENGINE(FIFO, 0xe0),
-	.ofuncs = &(struct nouveau_ofuncs) {
+nve0_fifo_oclass = &(struct nve0_fifo_impl) {
+	.base.handle = NV_ENGINE(FIFO, 0xe0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nve0_fifo_ctor,
 		.dtor = nve0_fifo_dtor,
 		.init = nve0_fifo_init,
-		.fini = _nouveau_fifo_fini,
+		.fini = nve0_fifo_fini,
 	},
-};
+	.channels = 4096,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
new file mode 100644
index 0000000..014344e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_FIFO_NVE0_H__
+#define __NVKM_FIFO_NVE0_H__
+
+#include <engine/fifo.h>
+
+int  nve0_fifo_ctor(struct nouveau_object *, struct nouveau_object *,
+		    struct nouveau_oclass *, void *, u32,
+		    struct nouveau_object **);
+void nve0_fifo_dtor(struct nouveau_object *);
+int  nve0_fifo_init(struct nouveau_object *);
+
+struct nve0_fifo_impl {
+	struct nouveau_oclass base;
+	u32 channels;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
new file mode 100644
index 0000000..a86bd33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
@@ -0,0 +1,1408 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nvc0.h"
+
+static struct nvc0_graph_init
+nv108_grctx_init_icmd[] = {
+	{ 0x001000,   1, 0x01, 0x00000004 },
+	{ 0x000039,   3, 0x01, 0x00000000 },
+	{ 0x0000a9,   1, 0x01, 0x0000ffff },
+	{ 0x000038,   1, 0x01, 0x0fac6881 },
+	{ 0x00003d,   1, 0x01, 0x00000001 },
+	{ 0x0000e8,   8, 0x01, 0x00000400 },
+	{ 0x000078,   8, 0x01, 0x00000300 },
+	{ 0x000050,   1, 0x01, 0x00000011 },
+	{ 0x000058,   8, 0x01, 0x00000008 },
+	{ 0x000208,   8, 0x01, 0x00000001 },
+	{ 0x000081,   1, 0x01, 0x00000001 },
+	{ 0x000085,   1, 0x01, 0x00000004 },
+	{ 0x000088,   1, 0x01, 0x00000400 },
+	{ 0x000090,   1, 0x01, 0x00000300 },
+	{ 0x000098,   1, 0x01, 0x00001001 },
+	{ 0x0000e3,   1, 0x01, 0x00000001 },
+	{ 0x0000da,   1, 0x01, 0x00000001 },
+	{ 0x0000f8,   1, 0x01, 0x00000003 },
+	{ 0x0000fa,   1, 0x01, 0x00000001 },
+	{ 0x00009f,   4, 0x01, 0x0000ffff },
+	{ 0x0000b1,   1, 0x01, 0x00000001 },
+	{ 0x0000ad,   1, 0x01, 0x0000013e },
+	{ 0x0000e1,   1, 0x01, 0x00000010 },
+	{ 0x000290,  16, 0x01, 0x00000000 },
+	{ 0x0003b0,  16, 0x01, 0x00000000 },
+	{ 0x0002a0,  16, 0x01, 0x00000000 },
+	{ 0x000420,  16, 0x01, 0x00000000 },
+	{ 0x0002b0,  16, 0x01, 0x00000000 },
+	{ 0x000430,  16, 0x01, 0x00000000 },
+	{ 0x0002c0,  16, 0x01, 0x00000000 },
+	{ 0x0004d0,  16, 0x01, 0x00000000 },
+	{ 0x000720,  16, 0x01, 0x00000000 },
+	{ 0x0008c0,  16, 0x01, 0x00000000 },
+	{ 0x000890,  16, 0x01, 0x00000000 },
+	{ 0x0008e0,  16, 0x01, 0x00000000 },
+	{ 0x0008a0,  16, 0x01, 0x00000000 },
+	{ 0x0008f0,  16, 0x01, 0x00000000 },
+	{ 0x00094c,   1, 0x01, 0x000000ff },
+	{ 0x00094d,   1, 0x01, 0xffffffff },
+	{ 0x00094e,   1, 0x01, 0x00000002 },
+	{ 0x0002ec,   1, 0x01, 0x00000001 },
+	{ 0x0002f2,   2, 0x01, 0x00000001 },
+	{ 0x0002f5,   1, 0x01, 0x00000001 },
+	{ 0x0002f7,   1, 0x01, 0x00000001 },
+	{ 0x000303,   1, 0x01, 0x00000001 },
+	{ 0x0002e6,   1, 0x01, 0x00000001 },
+	{ 0x000466,   1, 0x01, 0x00000052 },
+	{ 0x000301,   1, 0x01, 0x3f800000 },
+	{ 0x000304,   1, 0x01, 0x30201000 },
+	{ 0x000305,   1, 0x01, 0x70605040 },
+	{ 0x000306,   1, 0x01, 0xb8a89888 },
+	{ 0x000307,   1, 0x01, 0xf8e8d8c8 },
+	{ 0x00030a,   1, 0x01, 0x00ffff00 },
+	{ 0x00030b,   1, 0x01, 0x0000001a },
+	{ 0x00030c,   1, 0x01, 0x00000001 },
+	{ 0x000318,   1, 0x01, 0x00000001 },
+	{ 0x000340,   1, 0x01, 0x00000000 },
+	{ 0x000375,   1, 0x01, 0x00000001 },
+	{ 0x00037d,   1, 0x01, 0x00000006 },
+	{ 0x0003a0,   1, 0x01, 0x00000002 },
+	{ 0x0003aa,   1, 0x01, 0x00000001 },
+	{ 0x0003a9,   1, 0x01, 0x00000001 },
+	{ 0x000380,   1, 0x01, 0x00000001 },
+	{ 0x000383,   1, 0x01, 0x00000011 },
+	{ 0x000360,   1, 0x01, 0x00000040 },
+	{ 0x000366,   2, 0x01, 0x00000000 },
+	{ 0x000368,   1, 0x01, 0x00000fff },
+	{ 0x000370,   2, 0x01, 0x00000000 },
+	{ 0x000372,   1, 0x01, 0x000fffff },
+	{ 0x00037a,   1, 0x01, 0x00000012 },
+	{ 0x000619,   1, 0x01, 0x00000003 },
+	{ 0x000811,   1, 0x01, 0x00000003 },
+	{ 0x000812,   1, 0x01, 0x00000004 },
+	{ 0x000813,   1, 0x01, 0x00000006 },
+	{ 0x000814,   1, 0x01, 0x00000008 },
+	{ 0x000815,   1, 0x01, 0x0000000b },
+	{ 0x000800,   6, 0x01, 0x00000001 },
+	{ 0x000632,   1, 0x01, 0x00000001 },
+	{ 0x000633,   1, 0x01, 0x00000002 },
+	{ 0x000634,   1, 0x01, 0x00000003 },
+	{ 0x000635,   1, 0x01, 0x00000004 },
+	{ 0x000654,   1, 0x01, 0x3f800000 },
+	{ 0x000657,   1, 0x01, 0x3f800000 },
+	{ 0x000655,   2, 0x01, 0x3f800000 },
+	{ 0x0006cd,   1, 0x01, 0x3f800000 },
+	{ 0x0007f5,   1, 0x01, 0x3f800000 },
+	{ 0x0007dc,   1, 0x01, 0x39291909 },
+	{ 0x0007dd,   1, 0x01, 0x79695949 },
+	{ 0x0007de,   1, 0x01, 0xb9a99989 },
+	{ 0x0007df,   1, 0x01, 0xf9e9d9c9 },
+	{ 0x0007e8,   1, 0x01, 0x00003210 },
+	{ 0x0007e9,   1, 0x01, 0x00007654 },
+	{ 0x0007ea,   1, 0x01, 0x00000098 },
+	{ 0x0007ec,   1, 0x01, 0x39291909 },
+	{ 0x0007ed,   1, 0x01, 0x79695949 },
+	{ 0x0007ee,   1, 0x01, 0xb9a99989 },
+	{ 0x0007ef,   1, 0x01, 0xf9e9d9c9 },
+	{ 0x0007f0,   1, 0x01, 0x00003210 },
+	{ 0x0007f1,   1, 0x01, 0x00007654 },
+	{ 0x0007f2,   1, 0x01, 0x00000098 },
+	{ 0x0005a5,   1, 0x01, 0x00000001 },
+	{ 0x000980, 128, 0x01, 0x00000000 },
+	{ 0x000468,   1, 0x01, 0x00000004 },
+	{ 0x00046c,   1, 0x01, 0x00000001 },
+	{ 0x000470,  96, 0x01, 0x00000000 },
+	{ 0x000510,  16, 0x01, 0x3f800000 },
+	{ 0x000520,   1, 0x01, 0x000002b6 },
+	{ 0x000529,   1, 0x01, 0x00000001 },
+	{ 0x000530,  16, 0x01, 0xffff0000 },
+	{ 0x000585,   1, 0x01, 0x0000003f },
+	{ 0x000576,   1, 0x01, 0x00000003 },
+	{ 0x00057b,   1, 0x01, 0x00000059 },
+	{ 0x000586,   1, 0x01, 0x00000040 },
+	{ 0x000582,   2, 0x01, 0x00000080 },
+	{ 0x0005c2,   1, 0x01, 0x00000001 },
+	{ 0x000638,   2, 0x01, 0x00000001 },
+	{ 0x00063a,   1, 0x01, 0x00000002 },
+	{ 0x00063b,   2, 0x01, 0x00000001 },
+	{ 0x00063d,   1, 0x01, 0x00000002 },
+	{ 0x00063e,   1, 0x01, 0x00000001 },
+	{ 0x0008b8,   8, 0x01, 0x00000001 },
+	{ 0x000900,   8, 0x01, 0x00000001 },
+	{ 0x000908,   8, 0x01, 0x00000002 },
+	{ 0x000910,  16, 0x01, 0x00000001 },
+	{ 0x000920,   8, 0x01, 0x00000002 },
+	{ 0x000928,   8, 0x01, 0x00000001 },
+	{ 0x000662,   1, 0x01, 0x00000001 },
+	{ 0x000648,   9, 0x01, 0x00000001 },
+	{ 0x000658,   1, 0x01, 0x0000000f },
+	{ 0x0007ff,   1, 0x01, 0x0000000a },
+	{ 0x00066a,   1, 0x01, 0x40000000 },
+	{ 0x00066b,   1, 0x01, 0x10000000 },
+	{ 0x00066c,   2, 0x01, 0xffff0000 },
+	{ 0x0007af,   2, 0x01, 0x00000008 },
+	{ 0x0007f6,   1, 0x01, 0x00000001 },
+	{ 0x00080b,   1, 0x01, 0x00000002 },
+	{ 0x0006b2,   1, 0x01, 0x00000055 },
+	{ 0x0007ad,   1, 0x01, 0x00000003 },
+	{ 0x000937,   1, 0x01, 0x00000001 },
+	{ 0x000971,   1, 0x01, 0x00000008 },
+	{ 0x000972,   1, 0x01, 0x00000040 },
+	{ 0x000973,   1, 0x01, 0x0000012c },
+	{ 0x00097c,   1, 0x01, 0x00000040 },
+	{ 0x000979,   1, 0x01, 0x00000003 },
+	{ 0x000975,   1, 0x01, 0x00000020 },
+	{ 0x000976,   1, 0x01, 0x00000001 },
+	{ 0x000977,   1, 0x01, 0x00000020 },
+	{ 0x000978,   1, 0x01, 0x00000001 },
+	{ 0x000957,   1, 0x01, 0x00000003 },
+	{ 0x00095e,   1, 0x01, 0x20164010 },
+	{ 0x00095f,   1, 0x01, 0x00000020 },
+	{ 0x000a0d,   1, 0x01, 0x00000006 },
+	{ 0x00097d,   1, 0x01, 0x00000020 },
+	{ 0x000683,   1, 0x01, 0x00000006 },
+	{ 0x000685,   1, 0x01, 0x003fffff },
+	{ 0x000687,   1, 0x01, 0x003fffff },
+	{ 0x0006a0,   1, 0x01, 0x00000005 },
+	{ 0x000840,   1, 0x01, 0x00400008 },
+	{ 0x000841,   1, 0x01, 0x08000080 },
+	{ 0x000842,   1, 0x01, 0x00400008 },
+	{ 0x000843,   1, 0x01, 0x08000080 },
+	{ 0x0006aa,   1, 0x01, 0x00000001 },
+	{ 0x0006ab,   1, 0x01, 0x00000002 },
+	{ 0x0006ac,   1, 0x01, 0x00000080 },
+	{ 0x0006ad,   2, 0x01, 0x00000100 },
+	{ 0x0006b1,   1, 0x01, 0x00000011 },
+	{ 0x0006bb,   1, 0x01, 0x000000cf },
+	{ 0x0006ce,   1, 0x01, 0x2a712488 },
+	{ 0x000739,   1, 0x01, 0x4085c000 },
+	{ 0x00073a,   1, 0x01, 0x00000080 },
+	{ 0x000786,   1, 0x01, 0x80000100 },
+	{ 0x00073c,   1, 0x01, 0x00010100 },
+	{ 0x00073d,   1, 0x01, 0x02800000 },
+	{ 0x000787,   1, 0x01, 0x000000cf },
+	{ 0x00078c,   1, 0x01, 0x00000008 },
+	{ 0x000792,   1, 0x01, 0x00000001 },
+	{ 0x000794,   3, 0x01, 0x00000001 },
+	{ 0x000797,   1, 0x01, 0x000000cf },
+	{ 0x000836,   1, 0x01, 0x00000001 },
+	{ 0x00079a,   1, 0x01, 0x00000002 },
+	{ 0x000833,   1, 0x01, 0x04444480 },
+	{ 0x0007a1,   1, 0x01, 0x00000001 },
+	{ 0x0007a3,   3, 0x01, 0x00000001 },
+	{ 0x000831,   1, 0x01, 0x00000004 },
+	{ 0x000b07,   1, 0x01, 0x00000002 },
+	{ 0x000b08,   2, 0x01, 0x00000100 },
+	{ 0x000b0a,   1, 0x01, 0x00000001 },
+	{ 0x000a04,   1, 0x01, 0x000000ff },
+	{ 0x000a0b,   1, 0x01, 0x00000040 },
+	{ 0x00097f,   1, 0x01, 0x00000100 },
+	{ 0x000a02,   1, 0x01, 0x00000001 },
+	{ 0x000809,   1, 0x01, 0x00000007 },
+	{ 0x00c221,   1, 0x01, 0x00000040 },
+	{ 0x00c1b0,   8, 0x01, 0x0000000f },
+	{ 0x00c1b8,   1, 0x01, 0x0fac6881 },
+	{ 0x00c1b9,   1, 0x01, 0x00fac688 },
+	{ 0x00c401,   1, 0x01, 0x00000001 },
+	{ 0x00c402,   1, 0x01, 0x00010001 },
+	{ 0x00c403,   2, 0x01, 0x00000001 },
+	{ 0x00c40e,   1, 0x01, 0x00000020 },
+	{ 0x00c500,   1, 0x01, 0x00000003 },
+	{ 0x01e100,   1, 0x01, 0x00000001 },
+	{ 0x001000,   1, 0x01, 0x00000002 },
+	{ 0x0006aa,   1, 0x01, 0x00000001 },
+	{ 0x0006ad,   2, 0x01, 0x00000100 },
+	{ 0x0006b1,   1, 0x01, 0x00000011 },
+	{ 0x00078c,   1, 0x01, 0x00000008 },
+	{ 0x000792,   1, 0x01, 0x00000001 },
+	{ 0x000794,   3, 0x01, 0x00000001 },
+	{ 0x000797,   1, 0x01, 0x000000cf },
+	{ 0x00079a,   1, 0x01, 0x00000002 },
+	{ 0x0007a1,   1, 0x01, 0x00000001 },
+	{ 0x0007a3,   3, 0x01, 0x00000001 },
+	{ 0x000831,   1, 0x01, 0x00000004 },
+	{ 0x01e100,   1, 0x01, 0x00000001 },
+	{ 0x001000,   1, 0x01, 0x00000008 },
+	{ 0x000039,   3, 0x01, 0x00000000 },
+	{ 0x000380,   1, 0x01, 0x00000001 },
+	{ 0x000366,   2, 0x01, 0x00000000 },
+	{ 0x000368,   1, 0x01, 0x00000fff },
+	{ 0x000370,   2, 0x01, 0x00000000 },
+	{ 0x000372,   1, 0x01, 0x000fffff },
+	{ 0x000813,   1, 0x01, 0x00000006 },
+	{ 0x000814,   1, 0x01, 0x00000008 },
+	{ 0x000957,   1, 0x01, 0x00000003 },
+	{ 0x000b07,   1, 0x01, 0x00000002 },
+	{ 0x000b08,   2, 0x01, 0x00000100 },
+	{ 0x000b0a,   1, 0x01, 0x00000001 },
+	{ 0x000a04,   1, 0x01, 0x000000ff },
+	{ 0x000a0b,   1, 0x01, 0x00000040 },
+	{ 0x00097f,   1, 0x01, 0x00000100 },
+	{ 0x000a02,   1, 0x01, 0x00000001 },
+	{ 0x000809,   1, 0x01, 0x00000007 },
+	{ 0x00c221,   1, 0x01, 0x00000040 },
+	{ 0x00c401,   1, 0x01, 0x00000001 },
+	{ 0x00c402,   1, 0x01, 0x00010001 },
+	{ 0x00c403,   2, 0x01, 0x00000001 },
+	{ 0x00c40e,   1, 0x01, 0x00000020 },
+	{ 0x00c500,   1, 0x01, 0x00000003 },
+	{ 0x01e100,   1, 0x01, 0x00000001 },
+	{ 0x001000,   1, 0x01, 0x00000001 },
+	{ 0x000b07,   1, 0x01, 0x00000002 },
+	{ 0x000b08,   2, 0x01, 0x00000100 },
+	{ 0x000b0a,   1, 0x01, 0x00000001 },
+	{ 0x01e100,   1, 0x01, 0x00000001 },
+	{}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_a197[] = {
+	{ 0x000800,   1, 0x04, 0x00000000 },
+	{ 0x000840,   1, 0x04, 0x00000000 },
+	{ 0x000880,   1, 0x04, 0x00000000 },
+	{ 0x0008c0,   1, 0x04, 0x00000000 },
+	{ 0x000900,   1, 0x04, 0x00000000 },
+	{ 0x000940,   1, 0x04, 0x00000000 },
+	{ 0x000980,   1, 0x04, 0x00000000 },
+	{ 0x0009c0,   1, 0x04, 0x00000000 },
+	{ 0x000804,   1, 0x04, 0x00000000 },
+	{ 0x000844,   1, 0x04, 0x00000000 },
+	{ 0x000884,   1, 0x04, 0x00000000 },
+	{ 0x0008c4,   1, 0x04, 0x00000000 },
+	{ 0x000904,   1, 0x04, 0x00000000 },
+	{ 0x000944,   1, 0x04, 0x00000000 },
+	{ 0x000984,   1, 0x04, 0x00000000 },
+	{ 0x0009c4,   1, 0x04, 0x00000000 },
+	{ 0x000808,   1, 0x04, 0x00000400 },
+	{ 0x000848,   1, 0x04, 0x00000400 },
+	{ 0x000888,   1, 0x04, 0x00000400 },
+	{ 0x0008c8,   1, 0x04, 0x00000400 },
+	{ 0x000908,   1, 0x04, 0x00000400 },
+	{ 0x000948,   1, 0x04, 0x00000400 },
+	{ 0x000988,   1, 0x04, 0x00000400 },
+	{ 0x0009c8,   1, 0x04, 0x00000400 },
+	{ 0x00080c,   1, 0x04, 0x00000300 },
+	{ 0x00084c,   1, 0x04, 0x00000300 },
+	{ 0x00088c,   1, 0x04, 0x00000300 },
+	{ 0x0008cc,   1, 0x04, 0x00000300 },
+	{ 0x00090c,   1, 0x04, 0x00000300 },
+	{ 0x00094c,   1, 0x04, 0x00000300 },
+	{ 0x00098c,   1, 0x04, 0x00000300 },
+	{ 0x0009cc,   1, 0x04, 0x00000300 },
+	{ 0x000810,   1, 0x04, 0x000000cf },
+	{ 0x000850,   1, 0x04, 0x00000000 },
+	{ 0x000890,   1, 0x04, 0x00000000 },
+	{ 0x0008d0,   1, 0x04, 0x00000000 },
+	{ 0x000910,   1, 0x04, 0x00000000 },
+	{ 0x000950,   1, 0x04, 0x00000000 },
+	{ 0x000990,   1, 0x04, 0x00000000 },
+	{ 0x0009d0,   1, 0x04, 0x00000000 },
+	{ 0x000814,   1, 0x04, 0x00000040 },
+	{ 0x000854,   1, 0x04, 0x00000040 },
+	{ 0x000894,   1, 0x04, 0x00000040 },
+	{ 0x0008d4,   1, 0x04, 0x00000040 },
+	{ 0x000914,   1, 0x04, 0x00000040 },
+	{ 0x000954,   1, 0x04, 0x00000040 },
+	{ 0x000994,   1, 0x04, 0x00000040 },
+	{ 0x0009d4,   1, 0x04, 0x00000040 },
+	{ 0x000818,   1, 0x04, 0x00000001 },
+	{ 0x000858,   1, 0x04, 0x00000001 },
+	{ 0x000898,   1, 0x04, 0x00000001 },
+	{ 0x0008d8,   1, 0x04, 0x00000001 },
+	{ 0x000918,   1, 0x04, 0x00000001 },
+	{ 0x000958,   1, 0x04, 0x00000001 },
+	{ 0x000998,   1, 0x04, 0x00000001 },
+	{ 0x0009d8,   1, 0x04, 0x00000001 },
+	{ 0x00081c,   1, 0x04, 0x00000000 },
+	{ 0x00085c,   1, 0x04, 0x00000000 },
+	{ 0x00089c,   1, 0x04, 0x00000000 },
+	{ 0x0008dc,   1, 0x04, 0x00000000 },
+	{ 0x00091c,   1, 0x04, 0x00000000 },
+	{ 0x00095c,   1, 0x04, 0x00000000 },
+	{ 0x00099c,   1, 0x04, 0x00000000 },
+	{ 0x0009dc,   1, 0x04, 0x00000000 },
+	{ 0x000820,   1, 0x04, 0x00000000 },
+	{ 0x000860,   1, 0x04, 0x00000000 },
+	{ 0x0008a0,   1, 0x04, 0x00000000 },
+	{ 0x0008e0,   1, 0x04, 0x00000000 },
+	{ 0x000920,   1, 0x04, 0x00000000 },
+	{ 0x000960,   1, 0x04, 0x00000000 },
+	{ 0x0009a0,   1, 0x04, 0x00000000 },
+	{ 0x0009e0,   1, 0x04, 0x00000000 },
+	{ 0x001c00,   1, 0x04, 0x00000000 },
+	{ 0x001c10,   1, 0x04, 0x00000000 },
+	{ 0x001c20,   1, 0x04, 0x00000000 },
+	{ 0x001c30,   1, 0x04, 0x00000000 },
+	{ 0x001c40,   1, 0x04, 0x00000000 },
+	{ 0x001c50,   1, 0x04, 0x00000000 },
+	{ 0x001c60,   1, 0x04, 0x00000000 },
+	{ 0x001c70,   1, 0x04, 0x00000000 },
+	{ 0x001c80,   1, 0x04, 0x00000000 },
+	{ 0x001c90,   1, 0x04, 0x00000000 },
+	{ 0x001ca0,   1, 0x04, 0x00000000 },
+	{ 0x001cb0,   1, 0x04, 0x00000000 },
+	{ 0x001cc0,   1, 0x04, 0x00000000 },
+	{ 0x001cd0,   1, 0x04, 0x00000000 },
+	{ 0x001ce0,   1, 0x04, 0x00000000 },
+	{ 0x001cf0,   1, 0x04, 0x00000000 },
+	{ 0x001c04,   1, 0x04, 0x00000000 },
+	{ 0x001c14,   1, 0x04, 0x00000000 },
+	{ 0x001c24,   1, 0x04, 0x00000000 },
+	{ 0x001c34,   1, 0x04, 0x00000000 },
+	{ 0x001c44,   1, 0x04, 0x00000000 },
+	{ 0x001c54,   1, 0x04, 0x00000000 },
+	{ 0x001c64,   1, 0x04, 0x00000000 },
+	{ 0x001c74,   1, 0x04, 0x00000000 },
+	{ 0x001c84,   1, 0x04, 0x00000000 },
+	{ 0x001c94,   1, 0x04, 0x00000000 },
+	{ 0x001ca4,   1, 0x04, 0x00000000 },
+	{ 0x001cb4,   1, 0x04, 0x00000000 },
+	{ 0x001cc4,   1, 0x04, 0x00000000 },
+	{ 0x001cd4,   1, 0x04, 0x00000000 },
+	{ 0x001ce4,   1, 0x04, 0x00000000 },
+	{ 0x001cf4,   1, 0x04, 0x00000000 },
+	{ 0x001c08,   1, 0x04, 0x00000000 },
+	{ 0x001c18,   1, 0x04, 0x00000000 },
+	{ 0x001c28,   1, 0x04, 0x00000000 },
+	{ 0x001c38,   1, 0x04, 0x00000000 },
+	{ 0x001c48,   1, 0x04, 0x00000000 },
+	{ 0x001c58,   1, 0x04, 0x00000000 },
+	{ 0x001c68,   1, 0x04, 0x00000000 },
+	{ 0x001c78,   1, 0x04, 0x00000000 },
+	{ 0x001c88,   1, 0x04, 0x00000000 },
+	{ 0x001c98,   1, 0x04, 0x00000000 },
+	{ 0x001ca8,   1, 0x04, 0x00000000 },
+	{ 0x001cb8,   1, 0x04, 0x00000000 },
+	{ 0x001cc8,   1, 0x04, 0x00000000 },
+	{ 0x001cd8,   1, 0x04, 0x00000000 },
+	{ 0x001ce8,   1, 0x04, 0x00000000 },
+	{ 0x001cf8,   1, 0x04, 0x00000000 },
+	{ 0x001c0c,   1, 0x04, 0x00000000 },
+	{ 0x001c1c,   1, 0x04, 0x00000000 },
+	{ 0x001c2c,   1, 0x04, 0x00000000 },
+	{ 0x001c3c,   1, 0x04, 0x00000000 },
+	{ 0x001c4c,   1, 0x04, 0x00000000 },
+	{ 0x001c5c,   1, 0x04, 0x00000000 },
+	{ 0x001c6c,   1, 0x04, 0x00000000 },
+	{ 0x001c7c,   1, 0x04, 0x00000000 },
+	{ 0x001c8c,   1, 0x04, 0x00000000 },
+	{ 0x001c9c,   1, 0x04, 0x00000000 },
+	{ 0x001cac,   1, 0x04, 0x00000000 },
+	{ 0x001cbc,   1, 0x04, 0x00000000 },
+	{ 0x001ccc,   1, 0x04, 0x00000000 },
+	{ 0x001cdc,   1, 0x04, 0x00000000 },
+	{ 0x001cec,   1, 0x04, 0x00000000 },
+	{ 0x001cfc,   2, 0x04, 0x00000000 },
+	{ 0x001d10,   1, 0x04, 0x00000000 },
+	{ 0x001d20,   1, 0x04, 0x00000000 },
+	{ 0x001d30,   1, 0x04, 0x00000000 },
+	{ 0x001d40,   1, 0x04, 0x00000000 },
+	{ 0x001d50,   1, 0x04, 0x00000000 },
+	{ 0x001d60,   1, 0x04, 0x00000000 },
+	{ 0x001d70,   1, 0x04, 0x00000000 },
+	{ 0x001d80,   1, 0x04, 0x00000000 },
+	{ 0x001d90,   1, 0x04, 0x00000000 },
+	{ 0x001da0,   1, 0x04, 0x00000000 },
+	{ 0x001db0,   1, 0x04, 0x00000000 },
+	{ 0x001dc0,   1, 0x04, 0x00000000 },
+	{ 0x001dd0,   1, 0x04, 0x00000000 },
+	{ 0x001de0,   1, 0x04, 0x00000000 },
+	{ 0x001df0,   1, 0x04, 0x00000000 },
+	{ 0x001d04,   1, 0x04, 0x00000000 },
+	{ 0x001d14,   1, 0x04, 0x00000000 },
+	{ 0x001d24,   1, 0x04, 0x00000000 },
+	{ 0x001d34,   1, 0x04, 0x00000000 },
+	{ 0x001d44,   1, 0x04, 0x00000000 },
+	{ 0x001d54,   1, 0x04, 0x00000000 },
+	{ 0x001d64,   1, 0x04, 0x00000000 },
+	{ 0x001d74,   1, 0x04, 0x00000000 },
+	{ 0x001d84,   1, 0x04, 0x00000000 },
+	{ 0x001d94,   1, 0x04, 0x00000000 },
+	{ 0x001da4,   1, 0x04, 0x00000000 },
+	{ 0x001db4,   1, 0x04, 0x00000000 },
+	{ 0x001dc4,   1, 0x04, 0x00000000 },
+	{ 0x001dd4,   1, 0x04, 0x00000000 },
+	{ 0x001de4,   1, 0x04, 0x00000000 },
+	{ 0x001df4,   1, 0x04, 0x00000000 },
+	{ 0x001d08,   1, 0x04, 0x00000000 },
+	{ 0x001d18,   1, 0x04, 0x00000000 },
+	{ 0x001d28,   1, 0x04, 0x00000000 },
+	{ 0x001d38,   1, 0x04, 0x00000000 },
+	{ 0x001d48,   1, 0x04, 0x00000000 },
+	{ 0x001d58,   1, 0x04, 0x00000000 },
+	{ 0x001d68,   1, 0x04, 0x00000000 },
+	{ 0x001d78,   1, 0x04, 0x00000000 },
+	{ 0x001d88,   1, 0x04, 0x00000000 },
+	{ 0x001d98,   1, 0x04, 0x00000000 },
+	{ 0x001da8,   1, 0x04, 0x00000000 },
+	{ 0x001db8,   1, 0x04, 0x00000000 },
+	{ 0x001dc8,   1, 0x04, 0x00000000 },
+	{ 0x001dd8,   1, 0x04, 0x00000000 },
+	{ 0x001de8,   1, 0x04, 0x00000000 },
+	{ 0x001df8,   1, 0x04, 0x00000000 },
+	{ 0x001d0c,   1, 0x04, 0x00000000 },
+	{ 0x001d1c,   1, 0x04, 0x00000000 },
+	{ 0x001d2c,   1, 0x04, 0x00000000 },
+	{ 0x001d3c,   1, 0x04, 0x00000000 },
+	{ 0x001d4c,   1, 0x04, 0x00000000 },
+	{ 0x001d5c,   1, 0x04, 0x00000000 },
+	{ 0x001d6c,   1, 0x04, 0x00000000 },
+	{ 0x001d7c,   1, 0x04, 0x00000000 },
+	{ 0x001d8c,   1, 0x04, 0x00000000 },
+	{ 0x001d9c,   1, 0x04, 0x00000000 },
+	{ 0x001dac,   1, 0x04, 0x00000000 },
+	{ 0x001dbc,   1, 0x04, 0x00000000 },
+	{ 0x001dcc,   1, 0x04, 0x00000000 },
+	{ 0x001ddc,   1, 0x04, 0x00000000 },
+	{ 0x001dec,   1, 0x04, 0x00000000 },
+	{ 0x001dfc,   1, 0x04, 0x00000000 },
+	{ 0x001f00,   1, 0x04, 0x00000000 },
+	{ 0x001f08,   1, 0x04, 0x00000000 },
+	{ 0x001f10,   1, 0x04, 0x00000000 },
+	{ 0x001f18,   1, 0x04, 0x00000000 },
+	{ 0x001f20,   1, 0x04, 0x00000000 },
+	{ 0x001f28,   1, 0x04, 0x00000000 },
+	{ 0x001f30,   1, 0x04, 0x00000000 },
+	{ 0x001f38,   1, 0x04, 0x00000000 },
+	{ 0x001f40,   1, 0x04, 0x00000000 },
+	{ 0x001f48,   1, 0x04, 0x00000000 },
+	{ 0x001f50,   1, 0x04, 0x00000000 },
+	{ 0x001f58,   1, 0x04, 0x00000000 },
+	{ 0x001f60,   1, 0x04, 0x00000000 },
+	{ 0x001f68,   1, 0x04, 0x00000000 },
+	{ 0x001f70,   1, 0x04, 0x00000000 },
+	{ 0x001f78,   1, 0x04, 0x00000000 },
+	{ 0x001f04,   1, 0x04, 0x00000000 },
+	{ 0x001f0c,   1, 0x04, 0x00000000 },
+	{ 0x001f14,   1, 0x04, 0x00000000 },
+	{ 0x001f1c,   1, 0x04, 0x00000000 },
+	{ 0x001f24,   1, 0x04, 0x00000000 },
+	{ 0x001f2c,   1, 0x04, 0x00000000 },
+	{ 0x001f34,   1, 0x04, 0x00000000 },
+	{ 0x001f3c,   1, 0x04, 0x00000000 },
+	{ 0x001f44,   1, 0x04, 0x00000000 },
+	{ 0x001f4c,   1, 0x04, 0x00000000 },
+	{ 0x001f54,   1, 0x04, 0x00000000 },
+	{ 0x001f5c,   1, 0x04, 0x00000000 },
+	{ 0x001f64,   1, 0x04, 0x00000000 },
+	{ 0x001f6c,   1, 0x04, 0x00000000 },
+	{ 0x001f74,   1, 0x04, 0x00000000 },
+	{ 0x001f7c,   2, 0x04, 0x00000000 },
+	{ 0x001f88,   1, 0x04, 0x00000000 },
+	{ 0x001f90,   1, 0x04, 0x00000000 },
+	{ 0x001f98,   1, 0x04, 0x00000000 },
+	{ 0x001fa0,   1, 0x04, 0x00000000 },
+	{ 0x001fa8,   1, 0x04, 0x00000000 },
+	{ 0x001fb0,   1, 0x04, 0x00000000 },
+	{ 0x001fb8,   1, 0x04, 0x00000000 },
+	{ 0x001fc0,   1, 0x04, 0x00000000 },
+	{ 0x001fc8,   1, 0x04, 0x00000000 },
+	{ 0x001fd0,   1, 0x04, 0x00000000 },
+	{ 0x001fd8,   1, 0x04, 0x00000000 },
+	{ 0x001fe0,   1, 0x04, 0x00000000 },
+	{ 0x001fe8,   1, 0x04, 0x00000000 },
+	{ 0x001ff0,   1, 0x04, 0x00000000 },
+	{ 0x001ff8,   1, 0x04, 0x00000000 },
+	{ 0x001f84,   1, 0x04, 0x00000000 },
+	{ 0x001f8c,   1, 0x04, 0x00000000 },
+	{ 0x001f94,   1, 0x04, 0x00000000 },
+	{ 0x001f9c,   1, 0x04, 0x00000000 },
+	{ 0x001fa4,   1, 0x04, 0x00000000 },
+	{ 0x001fac,   1, 0x04, 0x00000000 },
+	{ 0x001fb4,   1, 0x04, 0x00000000 },
+	{ 0x001fbc,   1, 0x04, 0x00000000 },
+	{ 0x001fc4,   1, 0x04, 0x00000000 },
+	{ 0x001fcc,   1, 0x04, 0x00000000 },
+	{ 0x001fd4,   1, 0x04, 0x00000000 },
+	{ 0x001fdc,   1, 0x04, 0x00000000 },
+	{ 0x001fe4,   1, 0x04, 0x00000000 },
+	{ 0x001fec,   1, 0x04, 0x00000000 },
+	{ 0x001ff4,   1, 0x04, 0x00000000 },
+	{ 0x001ffc,   2, 0x04, 0x00000000 },
+	{ 0x002040,   1, 0x04, 0x00000011 },
+	{ 0x002080,   1, 0x04, 0x00000020 },
+	{ 0x0020c0,   1, 0x04, 0x00000030 },
+	{ 0x002100,   1, 0x04, 0x00000040 },
+	{ 0x002140,   1, 0x04, 0x00000051 },
+	{ 0x00200c,   1, 0x04, 0x00000001 },
+	{ 0x00204c,   1, 0x04, 0x00000001 },
+	{ 0x00208c,   1, 0x04, 0x00000001 },
+	{ 0x0020cc,   1, 0x04, 0x00000001 },
+	{ 0x00210c,   1, 0x04, 0x00000001 },
+	{ 0x00214c,   1, 0x04, 0x00000001 },
+	{ 0x002010,   1, 0x04, 0x00000000 },
+	{ 0x002050,   1, 0x04, 0x00000000 },
+	{ 0x002090,   1, 0x04, 0x00000001 },
+	{ 0x0020d0,   1, 0x04, 0x00000002 },
+	{ 0x002110,   1, 0x04, 0x00000003 },
+	{ 0x002150,   1, 0x04, 0x00000004 },
+	{ 0x000380,   1, 0x04, 0x00000000 },
+	{ 0x0003a0,   1, 0x04, 0x00000000 },
+	{ 0x0003c0,   1, 0x04, 0x00000000 },
+	{ 0x0003e0,   1, 0x04, 0x00000000 },
+	{ 0x000384,   1, 0x04, 0x00000000 },
+	{ 0x0003a4,   1, 0x04, 0x00000000 },
+	{ 0x0003c4,   1, 0x04, 0x00000000 },
+	{ 0x0003e4,   1, 0x04, 0x00000000 },
+	{ 0x000388,   1, 0x04, 0x00000000 },
+	{ 0x0003a8,   1, 0x04, 0x00000000 },
+	{ 0x0003c8,   1, 0x04, 0x00000000 },
+	{ 0x0003e8,   1, 0x04, 0x00000000 },
+	{ 0x00038c,   1, 0x04, 0x00000000 },
+	{ 0x0003ac,   1, 0x04, 0x00000000 },
+	{ 0x0003cc,   1, 0x04, 0x00000000 },
+	{ 0x0003ec,   1, 0x04, 0x00000000 },
+	{ 0x000700,   1, 0x04, 0x00000000 },
+	{ 0x000710,   1, 0x04, 0x00000000 },
+	{ 0x000720,   1, 0x04, 0x00000000 },
+	{ 0x000730,   1, 0x04, 0x00000000 },
+	{ 0x000704,   1, 0x04, 0x00000000 },
+	{ 0x000714,   1, 0x04, 0x00000000 },
+	{ 0x000724,   1, 0x04, 0x00000000 },
+	{ 0x000734,   1, 0x04, 0x00000000 },
+	{ 0x000708,   1, 0x04, 0x00000000 },
+	{ 0x000718,   1, 0x04, 0x00000000 },
+	{ 0x000728,   1, 0x04, 0x00000000 },
+	{ 0x000738,   1, 0x04, 0x00000000 },
+	{ 0x002800, 128, 0x04, 0x00000000 },
+	{ 0x000a00,   1, 0x04, 0x00000000 },
+	{ 0x000a20,   1, 0x04, 0x00000000 },
+	{ 0x000a40,   1, 0x04, 0x00000000 },
+	{ 0x000a60,   1, 0x04, 0x00000000 },
+	{ 0x000a80,   1, 0x04, 0x00000000 },
+	{ 0x000aa0,   1, 0x04, 0x00000000 },
+	{ 0x000ac0,   1, 0x04, 0x00000000 },
+	{ 0x000ae0,   1, 0x04, 0x00000000 },
+	{ 0x000b00,   1, 0x04, 0x00000000 },
+	{ 0x000b20,   1, 0x04, 0x00000000 },
+	{ 0x000b40,   1, 0x04, 0x00000000 },
+	{ 0x000b60,   1, 0x04, 0x00000000 },
+	{ 0x000b80,   1, 0x04, 0x00000000 },
+	{ 0x000ba0,   1, 0x04, 0x00000000 },
+	{ 0x000bc0,   1, 0x04, 0x00000000 },
+	{ 0x000be0,   1, 0x04, 0x00000000 },
+	{ 0x000a04,   1, 0x04, 0x00000000 },
+	{ 0x000a24,   1, 0x04, 0x00000000 },
+	{ 0x000a44,   1, 0x04, 0x00000000 },
+	{ 0x000a64,   1, 0x04, 0x00000000 },
+	{ 0x000a84,   1, 0x04, 0x00000000 },
+	{ 0x000aa4,   1, 0x04, 0x00000000 },
+	{ 0x000ac4,   1, 0x04, 0x00000000 },
+	{ 0x000ae4,   1, 0x04, 0x00000000 },
+	{ 0x000b04,   1, 0x04, 0x00000000 },
+	{ 0x000b24,   1, 0x04, 0x00000000 },
+	{ 0x000b44,   1, 0x04, 0x00000000 },
+	{ 0x000b64,   1, 0x04, 0x00000000 },
+	{ 0x000b84,   1, 0x04, 0x00000000 },
+	{ 0x000ba4,   1, 0x04, 0x00000000 },
+	{ 0x000bc4,   1, 0x04, 0x00000000 },
+	{ 0x000be4,   1, 0x04, 0x00000000 },
+	{ 0x000a08,   1, 0x04, 0x00000000 },
+	{ 0x000a28,   1, 0x04, 0x00000000 },
+	{ 0x000a48,   1, 0x04, 0x00000000 },
+	{ 0x000a68,   1, 0x04, 0x00000000 },
+	{ 0x000a88,   1, 0x04, 0x00000000 },
+	{ 0x000aa8,   1, 0x04, 0x00000000 },
+	{ 0x000ac8,   1, 0x04, 0x00000000 },
+	{ 0x000ae8,   1, 0x04, 0x00000000 },
+	{ 0x000b08,   1, 0x04, 0x00000000 },
+	{ 0x000b28,   1, 0x04, 0x00000000 },
+	{ 0x000b48,   1, 0x04, 0x00000000 },
+	{ 0x000b68,   1, 0x04, 0x00000000 },
+	{ 0x000b88,   1, 0x04, 0x00000000 },
+	{ 0x000ba8,   1, 0x04, 0x00000000 },
+	{ 0x000bc8,   1, 0x04, 0x00000000 },
+	{ 0x000be8,   1, 0x04, 0x00000000 },
+	{ 0x000a0c,   1, 0x04, 0x00000000 },
+	{ 0x000a2c,   1, 0x04, 0x00000000 },
+	{ 0x000a4c,   1, 0x04, 0x00000000 },
+	{ 0x000a6c,   1, 0x04, 0x00000000 },
+	{ 0x000a8c,   1, 0x04, 0x00000000 },
+	{ 0x000aac,   1, 0x04, 0x00000000 },
+	{ 0x000acc,   1, 0x04, 0x00000000 },
+	{ 0x000aec,   1, 0x04, 0x00000000 },
+	{ 0x000b0c,   1, 0x04, 0x00000000 },
+	{ 0x000b2c,   1, 0x04, 0x00000000 },
+	{ 0x000b4c,   1, 0x04, 0x00000000 },
+	{ 0x000b6c,   1, 0x04, 0x00000000 },
+	{ 0x000b8c,   1, 0x04, 0x00000000 },
+	{ 0x000bac,   1, 0x04, 0x00000000 },
+	{ 0x000bcc,   1, 0x04, 0x00000000 },
+	{ 0x000bec,   1, 0x04, 0x00000000 },
+	{ 0x000a10,   1, 0x04, 0x00000000 },
+	{ 0x000a30,   1, 0x04, 0x00000000 },
+	{ 0x000a50,   1, 0x04, 0x00000000 },
+	{ 0x000a70,   1, 0x04, 0x00000000 },
+	{ 0x000a90,   1, 0x04, 0x00000000 },
+	{ 0x000ab0,   1, 0x04, 0x00000000 },
+	{ 0x000ad0,   1, 0x04, 0x00000000 },
+	{ 0x000af0,   1, 0x04, 0x00000000 },
+	{ 0x000b10,   1, 0x04, 0x00000000 },
+	{ 0x000b30,   1, 0x04, 0x00000000 },
+	{ 0x000b50,   1, 0x04, 0x00000000 },
+	{ 0x000b70,   1, 0x04, 0x00000000 },
+	{ 0x000b90,   1, 0x04, 0x00000000 },
+	{ 0x000bb0,   1, 0x04, 0x00000000 },
+	{ 0x000bd0,   1, 0x04, 0x00000000 },
+	{ 0x000bf0,   1, 0x04, 0x00000000 },
+	{ 0x000a14,   1, 0x04, 0x00000000 },
+	{ 0x000a34,   1, 0x04, 0x00000000 },
+	{ 0x000a54,   1, 0x04, 0x00000000 },
+	{ 0x000a74,   1, 0x04, 0x00000000 },
+	{ 0x000a94,   1, 0x04, 0x00000000 },
+	{ 0x000ab4,   1, 0x04, 0x00000000 },
+	{ 0x000ad4,   1, 0x04, 0x00000000 },
+	{ 0x000af4,   1, 0x04, 0x00000000 },
+	{ 0x000b14,   1, 0x04, 0x00000000 },
+	{ 0x000b34,   1, 0x04, 0x00000000 },
+	{ 0x000b54,   1, 0x04, 0x00000000 },
+	{ 0x000b74,   1, 0x04, 0x00000000 },
+	{ 0x000b94,   1, 0x04, 0x00000000 },
+	{ 0x000bb4,   1, 0x04, 0x00000000 },
+	{ 0x000bd4,   1, 0x04, 0x00000000 },
+	{ 0x000bf4,   1, 0x04, 0x00000000 },
+	{ 0x000c00,   1, 0x04, 0x00000000 },
+	{ 0x000c10,   1, 0x04, 0x00000000 },
+	{ 0x000c20,   1, 0x04, 0x00000000 },
+	{ 0x000c30,   1, 0x04, 0x00000000 },
+	{ 0x000c40,   1, 0x04, 0x00000000 },
+	{ 0x000c50,   1, 0x04, 0x00000000 },
+	{ 0x000c60,   1, 0x04, 0x00000000 },
+	{ 0x000c70,   1, 0x04, 0x00000000 },
+	{ 0x000c80,   1, 0x04, 0x00000000 },
+	{ 0x000c90,   1, 0x04, 0x00000000 },
+	{ 0x000ca0,   1, 0x04, 0x00000000 },
+	{ 0x000cb0,   1, 0x04, 0x00000000 },
+	{ 0x000cc0,   1, 0x04, 0x00000000 },
+	{ 0x000cd0,   1, 0x04, 0x00000000 },
+	{ 0x000ce0,   1, 0x04, 0x00000000 },
+	{ 0x000cf0,   1, 0x04, 0x00000000 },
+	{ 0x000c04,   1, 0x04, 0x00000000 },
+	{ 0x000c14,   1, 0x04, 0x00000000 },
+	{ 0x000c24,   1, 0x04, 0x00000000 },
+	{ 0x000c34,   1, 0x04, 0x00000000 },
+	{ 0x000c44,   1, 0x04, 0x00000000 },
+	{ 0x000c54,   1, 0x04, 0x00000000 },
+	{ 0x000c64,   1, 0x04, 0x00000000 },
+	{ 0x000c74,   1, 0x04, 0x00000000 },
+	{ 0x000c84,   1, 0x04, 0x00000000 },
+	{ 0x000c94,   1, 0x04, 0x00000000 },
+	{ 0x000ca4,   1, 0x04, 0x00000000 },
+	{ 0x000cb4,   1, 0x04, 0x00000000 },
+	{ 0x000cc4,   1, 0x04, 0x00000000 },
+	{ 0x000cd4,   1, 0x04, 0x00000000 },
+	{ 0x000ce4,   1, 0x04, 0x00000000 },
+	{ 0x000cf4,   1, 0x04, 0x00000000 },
+	{ 0x000c08,   1, 0x04, 0x00000000 },
+	{ 0x000c18,   1, 0x04, 0x00000000 },
+	{ 0x000c28,   1, 0x04, 0x00000000 },
+	{ 0x000c38,   1, 0x04, 0x00000000 },
+	{ 0x000c48,   1, 0x04, 0x00000000 },
+	{ 0x000c58,   1, 0x04, 0x00000000 },
+	{ 0x000c68,   1, 0x04, 0x00000000 },
+	{ 0x000c78,   1, 0x04, 0x00000000 },
+	{ 0x000c88,   1, 0x04, 0x00000000 },
+	{ 0x000c98,   1, 0x04, 0x00000000 },
+	{ 0x000ca8,   1, 0x04, 0x00000000 },
+	{ 0x000cb8,   1, 0x04, 0x00000000 },
+	{ 0x000cc8,   1, 0x04, 0x00000000 },
+	{ 0x000cd8,   1, 0x04, 0x00000000 },
+	{ 0x000ce8,   1, 0x04, 0x00000000 },
+	{ 0x000cf8,   1, 0x04, 0x00000000 },
+	{ 0x000c0c,   1, 0x04, 0x3f800000 },
+	{ 0x000c1c,   1, 0x04, 0x3f800000 },
+	{ 0x000c2c,   1, 0x04, 0x3f800000 },
+	{ 0x000c3c,   1, 0x04, 0x3f800000 },
+	{ 0x000c4c,   1, 0x04, 0x3f800000 },
+	{ 0x000c5c,   1, 0x04, 0x3f800000 },
+	{ 0x000c6c,   1, 0x04, 0x3f800000 },
+	{ 0x000c7c,   1, 0x04, 0x3f800000 },
+	{ 0x000c8c,   1, 0x04, 0x3f800000 },
+	{ 0x000c9c,   1, 0x04, 0x3f800000 },
+	{ 0x000cac,   1, 0x04, 0x3f800000 },
+	{ 0x000cbc,   1, 0x04, 0x3f800000 },
+	{ 0x000ccc,   1, 0x04, 0x3f800000 },
+	{ 0x000cdc,   1, 0x04, 0x3f800000 },
+	{ 0x000cec,   1, 0x04, 0x3f800000 },
+	{ 0x000cfc,   1, 0x04, 0x3f800000 },
+	{ 0x000d00,   1, 0x04, 0xffff0000 },
+	{ 0x000d08,   1, 0x04, 0xffff0000 },
+	{ 0x000d10,   1, 0x04, 0xffff0000 },
+	{ 0x000d18,   1, 0x04, 0xffff0000 },
+	{ 0x000d20,   1, 0x04, 0xffff0000 },
+	{ 0x000d28,   1, 0x04, 0xffff0000 },
+	{ 0x000d30,   1, 0x04, 0xffff0000 },
+	{ 0x000d38,   1, 0x04, 0xffff0000 },
+	{ 0x000d04,   1, 0x04, 0xffff0000 },
+	{ 0x000d0c,   1, 0x04, 0xffff0000 },
+	{ 0x000d14,   1, 0x04, 0xffff0000 },
+	{ 0x000d1c,   1, 0x04, 0xffff0000 },
+	{ 0x000d24,   1, 0x04, 0xffff0000 },
+	{ 0x000d2c,   1, 0x04, 0xffff0000 },
+	{ 0x000d34,   1, 0x04, 0xffff0000 },
+	{ 0x000d3c,   1, 0x04, 0xffff0000 },
+	{ 0x000e00,   1, 0x04, 0x00000000 },
+	{ 0x000e10,   1, 0x04, 0x00000000 },
+	{ 0x000e20,   1, 0x04, 0x00000000 },
+	{ 0x000e30,   1, 0x04, 0x00000000 },
+	{ 0x000e40,   1, 0x04, 0x00000000 },
+	{ 0x000e50,   1, 0x04, 0x00000000 },
+	{ 0x000e60,   1, 0x04, 0x00000000 },
+	{ 0x000e70,   1, 0x04, 0x00000000 },
+	{ 0x000e80,   1, 0x04, 0x00000000 },
+	{ 0x000e90,   1, 0x04, 0x00000000 },
+	{ 0x000ea0,   1, 0x04, 0x00000000 },
+	{ 0x000eb0,   1, 0x04, 0x00000000 },
+	{ 0x000ec0,   1, 0x04, 0x00000000 },
+	{ 0x000ed0,   1, 0x04, 0x00000000 },
+	{ 0x000ee0,   1, 0x04, 0x00000000 },
+	{ 0x000ef0,   1, 0x04, 0x00000000 },
+	{ 0x000e04,   1, 0x04, 0xffff0000 },
+	{ 0x000e14,   1, 0x04, 0xffff0000 },
+	{ 0x000e24,   1, 0x04, 0xffff0000 },
+	{ 0x000e34,   1, 0x04, 0xffff0000 },
+	{ 0x000e44,   1, 0x04, 0xffff0000 },
+	{ 0x000e54,   1, 0x04, 0xffff0000 },
+	{ 0x000e64,   1, 0x04, 0xffff0000 },
+	{ 0x000e74,   1, 0x04, 0xffff0000 },
+	{ 0x000e84,   1, 0x04, 0xffff0000 },
+	{ 0x000e94,   1, 0x04, 0xffff0000 },
+	{ 0x000ea4,   1, 0x04, 0xffff0000 },
+	{ 0x000eb4,   1, 0x04, 0xffff0000 },
+	{ 0x000ec4,   1, 0x04, 0xffff0000 },
+	{ 0x000ed4,   1, 0x04, 0xffff0000 },
+	{ 0x000ee4,   1, 0x04, 0xffff0000 },
+	{ 0x000ef4,   1, 0x04, 0xffff0000 },
+	{ 0x000e08,   1, 0x04, 0xffff0000 },
+	{ 0x000e18,   1, 0x04, 0xffff0000 },
+	{ 0x000e28,   1, 0x04, 0xffff0000 },
+	{ 0x000e38,   1, 0x04, 0xffff0000 },
+	{ 0x000e48,   1, 0x04, 0xffff0000 },
+	{ 0x000e58,   1, 0x04, 0xffff0000 },
+	{ 0x000e68,   1, 0x04, 0xffff0000 },
+	{ 0x000e78,   1, 0x04, 0xffff0000 },
+	{ 0x000e88,   1, 0x04, 0xffff0000 },
+	{ 0x000e98,   1, 0x04, 0xffff0000 },
+	{ 0x000ea8,   1, 0x04, 0xffff0000 },
+	{ 0x000eb8,   1, 0x04, 0xffff0000 },
+	{ 0x000ec8,   1, 0x04, 0xffff0000 },
+	{ 0x000ed8,   1, 0x04, 0xffff0000 },
+	{ 0x000ee8,   1, 0x04, 0xffff0000 },
+	{ 0x000ef8,   1, 0x04, 0xffff0000 },
+	{ 0x000d40,   1, 0x04, 0x00000000 },
+	{ 0x000d48,   1, 0x04, 0x00000000 },
+	{ 0x000d50,   1, 0x04, 0x00000000 },
+	{ 0x000d58,   1, 0x04, 0x00000000 },
+	{ 0x000d44,   1, 0x04, 0x00000000 },
+	{ 0x000d4c,   1, 0x04, 0x00000000 },
+	{ 0x000d54,   1, 0x04, 0x00000000 },
+	{ 0x000d5c,   1, 0x04, 0x00000000 },
+	{ 0x001e00,   1, 0x04, 0x00000001 },
+	{ 0x001e20,   1, 0x04, 0x00000001 },
+	{ 0x001e40,   1, 0x04, 0x00000001 },
+	{ 0x001e60,   1, 0x04, 0x00000001 },
+	{ 0x001e80,   1, 0x04, 0x00000001 },
+	{ 0x001ea0,   1, 0x04, 0x00000001 },
+	{ 0x001ec0,   1, 0x04, 0x00000001 },
+	{ 0x001ee0,   1, 0x04, 0x00000001 },
+	{ 0x001e04,   1, 0x04, 0x00000001 },
+	{ 0x001e24,   1, 0x04, 0x00000001 },
+	{ 0x001e44,   1, 0x04, 0x00000001 },
+	{ 0x001e64,   1, 0x04, 0x00000001 },
+	{ 0x001e84,   1, 0x04, 0x00000001 },
+	{ 0x001ea4,   1, 0x04, 0x00000001 },
+	{ 0x001ec4,   1, 0x04, 0x00000001 },
+	{ 0x001ee4,   1, 0x04, 0x00000001 },
+	{ 0x001e08,   1, 0x04, 0x00000002 },
+	{ 0x001e28,   1, 0x04, 0x00000002 },
+	{ 0x001e48,   1, 0x04, 0x00000002 },
+	{ 0x001e68,   1, 0x04, 0x00000002 },
+	{ 0x001e88,   1, 0x04, 0x00000002 },
+	{ 0x001ea8,   1, 0x04, 0x00000002 },
+	{ 0x001ec8,   1, 0x04, 0x00000002 },
+	{ 0x001ee8,   1, 0x04, 0x00000002 },
+	{ 0x001e0c,   1, 0x04, 0x00000001 },
+	{ 0x001e2c,   1, 0x04, 0x00000001 },
+	{ 0x001e4c,   1, 0x04, 0x00000001 },
+	{ 0x001e6c,   1, 0x04, 0x00000001 },
+	{ 0x001e8c,   1, 0x04, 0x00000001 },
+	{ 0x001eac,   1, 0x04, 0x00000001 },
+	{ 0x001ecc,   1, 0x04, 0x00000001 },
+	{ 0x001eec,   1, 0x04, 0x00000001 },
+	{ 0x001e10,   1, 0x04, 0x00000001 },
+	{ 0x001e30,   1, 0x04, 0x00000001 },
+	{ 0x001e50,   1, 0x04, 0x00000001 },
+	{ 0x001e70,   1, 0x04, 0x00000001 },
+	{ 0x001e90,   1, 0x04, 0x00000001 },
+	{ 0x001eb0,   1, 0x04, 0x00000001 },
+	{ 0x001ed0,   1, 0x04, 0x00000001 },
+	{ 0x001ef0,   1, 0x04, 0x00000001 },
+	{ 0x001e14,   1, 0x04, 0x00000002 },
+	{ 0x001e34,   1, 0x04, 0x00000002 },
+	{ 0x001e54,   1, 0x04, 0x00000002 },
+	{ 0x001e74,   1, 0x04, 0x00000002 },
+	{ 0x001e94,   1, 0x04, 0x00000002 },
+	{ 0x001eb4,   1, 0x04, 0x00000002 },
+	{ 0x001ed4,   1, 0x04, 0x00000002 },
+	{ 0x001ef4,   1, 0x04, 0x00000002 },
+	{ 0x001e18,   1, 0x04, 0x00000001 },
+	{ 0x001e38,   1, 0x04, 0x00000001 },
+	{ 0x001e58,   1, 0x04, 0x00000001 },
+	{ 0x001e78,   1, 0x04, 0x00000001 },
+	{ 0x001e98,   1, 0x04, 0x00000001 },
+	{ 0x001eb8,   1, 0x04, 0x00000001 },
+	{ 0x001ed8,   1, 0x04, 0x00000001 },
+	{ 0x001ef8,   1, 0x04, 0x00000001 },
+	{ 0x003400, 128, 0x04, 0x00000000 },
+	{ 0x00030c,   1, 0x04, 0x00000001 },
+	{ 0x001944,   1, 0x04, 0x00000000 },
+	{ 0x001514,   1, 0x04, 0x00000000 },
+	{ 0x000d68,   1, 0x04, 0x0000ffff },
+	{ 0x00121c,   1, 0x04, 0x0fac6881 },
+	{ 0x000fac,   1, 0x04, 0x00000001 },
+	{ 0x001538,   1, 0x04, 0x00000001 },
+	{ 0x000fe0,   2, 0x04, 0x00000000 },
+	{ 0x000fe8,   1, 0x04, 0x00000014 },
+	{ 0x000fec,   1, 0x04, 0x00000040 },
+	{ 0x000ff0,   1, 0x04, 0x00000000 },
+	{ 0x00179c,   1, 0x04, 0x00000000 },
+	{ 0x001228,   1, 0x04, 0x00000400 },
+	{ 0x00122c,   1, 0x04, 0x00000300 },
+	{ 0x001230,   1, 0x04, 0x00010001 },
+	{ 0x0007f8,   1, 0x04, 0x00000000 },
+	{ 0x0015b4,   1, 0x04, 0x00000001 },
+	{ 0x0015cc,   1, 0x04, 0x00000000 },
+	{ 0x001534,   1, 0x04, 0x00000000 },
+	{ 0x000fb0,   1, 0x04, 0x00000000 },
+	{ 0x0015d0,   1, 0x04, 0x00000000 },
+	{ 0x00153c,   1, 0x04, 0x00000000 },
+	{ 0x0016b4,   1, 0x04, 0x00000003 },
+	{ 0x000fbc,   4, 0x04, 0x0000ffff },
+	{ 0x000df8,   2, 0x04, 0x00000000 },
+	{ 0x001948,   1, 0x04, 0x00000000 },
+	{ 0x001970,   1, 0x04, 0x00000001 },
+	{ 0x00161c,   1, 0x04, 0x000009f0 },
+	{ 0x000dcc,   1, 0x04, 0x00000010 },
+	{ 0x00163c,   1, 0x04, 0x00000000 },
+	{ 0x0015e4,   1, 0x04, 0x00000000 },
+	{ 0x001160,  32, 0x04, 0x25e00040 },
+	{ 0x001880,  32, 0x04, 0x00000000 },
+	{ 0x000f84,   2, 0x04, 0x00000000 },
+	{ 0x0017c8,   2, 0x04, 0x00000000 },
+	{ 0x0017d0,   1, 0x04, 0x000000ff },
+	{ 0x0017d4,   1, 0x04, 0xffffffff },
+	{ 0x0017d8,   1, 0x04, 0x00000002 },
+	{ 0x0017dc,   1, 0x04, 0x00000000 },
+	{ 0x0015f4,   2, 0x04, 0x00000000 },
+	{ 0x001434,   2, 0x04, 0x00000000 },
+	{ 0x000d74,   1, 0x04, 0x00000000 },
+	{ 0x000dec,   1, 0x04, 0x00000001 },
+	{ 0x0013a4,   1, 0x04, 0x00000000 },
+	{ 0x001318,   1, 0x04, 0x00000001 },
+	{ 0x001644,   1, 0x04, 0x00000000 },
+	{ 0x000748,   1, 0x04, 0x00000000 },
+	{ 0x000de8,   1, 0x04, 0x00000000 },
+	{ 0x001648,   1, 0x04, 0x00000000 },
+	{ 0x0012a4,   1, 0x04, 0x00000000 },
+	{ 0x001120,   4, 0x04, 0x00000000 },
+	{ 0x001118,   1, 0x04, 0x00000000 },
+	{ 0x00164c,   1, 0x04, 0x00000000 },
+	{ 0x001658,   1, 0x04, 0x00000000 },
+	{ 0x001910,   1, 0x04, 0x00000290 },
+	{ 0x001518,   1, 0x04, 0x00000000 },
+	{ 0x00165c,   1, 0x04, 0x00000001 },
+	{ 0x001520,   1, 0x04, 0x00000000 },
+	{ 0x001604,   1, 0x04, 0x00000000 },
+	{ 0x001570,   1, 0x04, 0x00000000 },
+	{ 0x0013b0,   2, 0x04, 0x3f800000 },
+	{ 0x00020c,   1, 0x04, 0x00000000 },
+	{ 0x001670,   1, 0x04, 0x30201000 },
+	{ 0x001674,   1, 0x04, 0x70605040 },
+	{ 0x001678,   1, 0x04, 0xb8a89888 },
+	{ 0x00167c,   1, 0x04, 0xf8e8d8c8 },
+	{ 0x00166c,   1, 0x04, 0x00000000 },
+	{ 0x001680,   1, 0x04, 0x00ffff00 },
+	{ 0x0012d0,   1, 0x04, 0x00000003 },
+	{ 0x0012d4,   1, 0x04, 0x00000002 },
+	{ 0x001684,   2, 0x04, 0x00000000 },
+	{ 0x000dac,   2, 0x04, 0x00001b02 },
+	{ 0x000db4,   1, 0x04, 0x00000000 },
+	{ 0x00168c,   1, 0x04, 0x00000000 },
+	{ 0x0015bc,   1, 0x04, 0x00000000 },
+	{ 0x00156c,   1, 0x04, 0x00000000 },
+	{ 0x00187c,   1, 0x04, 0x00000000 },
+	{ 0x001110,   1, 0x04, 0x00000001 },
+	{ 0x000dc0,   3, 0x04, 0x00000000 },
+	{ 0x001234,   1, 0x04, 0x00000000 },
+	{ 0x001690,   1, 0x04, 0x00000000 },
+	{ 0x0012ac,   1, 0x04, 0x00000001 },
+	{ 0x0002c4,   1, 0x04, 0x00000000 },
+	{ 0x000790,   5, 0x04, 0x00000000 },
+	{ 0x00077c,   1, 0x04, 0x00000000 },
+	{ 0x001000,   1, 0x04, 0x00000010 },
+	{ 0x0010fc,   1, 0x04, 0x00000000 },
+	{ 0x001290,   1, 0x04, 0x00000000 },
+	{ 0x000218,   1, 0x04, 0x00000010 },
+	{ 0x0012d8,   1, 0x04, 0x00000000 },
+	{ 0x0012dc,   1, 0x04, 0x00000010 },
+	{ 0x000d94,   1, 0x04, 0x00000001 },
+	{ 0x00155c,   2, 0x04, 0x00000000 },
+	{ 0x001564,   1, 0x04, 0x00000fff },
+	{ 0x001574,   2, 0x04, 0x00000000 },
+	{ 0x00157c,   1, 0x04, 0x000fffff },
+	{ 0x001354,   1, 0x04, 0x00000000 },
+	{ 0x001610,   1, 0x04, 0x00000012 },
+	{ 0x001608,   2, 0x04, 0x00000000 },
+	{ 0x00260c,   1, 0x04, 0x00000000 },
+	{ 0x0007ac,   1, 0x04, 0x00000000 },
+	{ 0x00162c,   1, 0x04, 0x00000003 },
+	{ 0x000210,   1, 0x04, 0x00000000 },
+	{ 0x000320,   1, 0x04, 0x00000000 },
+	{ 0x000324,   6, 0x04, 0x3f800000 },
+	{ 0x000750,   1, 0x04, 0x00000000 },
+	{ 0x000760,   1, 0x04, 0x39291909 },
+	{ 0x000764,   1, 0x04, 0x79695949 },
+	{ 0x000768,   1, 0x04, 0xb9a99989 },
+	{ 0x00076c,   1, 0x04, 0xf9e9d9c9 },
+	{ 0x000770,   1, 0x04, 0x30201000 },
+	{ 0x000774,   1, 0x04, 0x70605040 },
+	{ 0x000778,   1, 0x04, 0x00009080 },
+	{ 0x000780,   1, 0x04, 0x39291909 },
+	{ 0x000784,   1, 0x04, 0x79695949 },
+	{ 0x000788,   1, 0x04, 0xb9a99989 },
+	{ 0x00078c,   1, 0x04, 0xf9e9d9c9 },
+	{ 0x0007d0,   1, 0x04, 0x30201000 },
+	{ 0x0007d4,   1, 0x04, 0x70605040 },
+	{ 0x0007d8,   1, 0x04, 0x00009080 },
+	{ 0x00037c,   1, 0x04, 0x00000001 },
+	{ 0x000740,   2, 0x04, 0x00000000 },
+	{ 0x002600,   1, 0x04, 0x00000000 },
+	{ 0x001918,   1, 0x04, 0x00000000 },
+	{ 0x00191c,   1, 0x04, 0x00000900 },
+	{ 0x001920,   1, 0x04, 0x00000405 },
+	{ 0x001308,   1, 0x04, 0x00000001 },
+	{ 0x001924,   1, 0x04, 0x00000000 },
+	{ 0x0013ac,   1, 0x04, 0x00000000 },
+	{ 0x00192c,   1, 0x04, 0x00000001 },
+	{ 0x00193c,   1, 0x04, 0x00002c1c },
+	{ 0x000d7c,   1, 0x04, 0x00000000 },
+	{ 0x000f8c,   1, 0x04, 0x00000000 },
+	{ 0x0002c0,   1, 0x04, 0x00000001 },
+	{ 0x001510,   1, 0x04, 0x00000000 },
+	{ 0x001940,   1, 0x04, 0x00000000 },
+	{ 0x000ff4,   2, 0x04, 0x00000000 },
+	{ 0x00194c,   2, 0x04, 0x00000000 },
+	{ 0x001968,   1, 0x04, 0x00000000 },
+	{ 0x001590,   1, 0x04, 0x0000003f },
+	{ 0x0007e8,   4, 0x04, 0x00000000 },
+	{ 0x00196c,   1, 0x04, 0x00000011 },
+	{ 0x0002e4,   1, 0x04, 0x0000b001 },
+	{ 0x00036c,   2, 0x04, 0x00000000 },
+	{ 0x00197c,   1, 0x04, 0x00000000 },
+	{ 0x000fcc,   2, 0x04, 0x00000000 },
+	{ 0x0002d8,   1, 0x04, 0x00000040 },
+	{ 0x001980,   1, 0x04, 0x00000080 },
+	{ 0x001504,   1, 0x04, 0x00000080 },
+	{ 0x001984,   1, 0x04, 0x00000000 },
+	{ 0x000300,   1, 0x04, 0x00000001 },
+	{ 0x0013a8,   1, 0x04, 0x00000000 },
+	{ 0x0012ec,   1, 0x04, 0x00000000 },
+	{ 0x001310,   1, 0x04, 0x00000000 },
+	{ 0x001314,   1, 0x04, 0x00000001 },
+	{ 0x001380,   1, 0x04, 0x00000000 },
+	{ 0x001384,   4, 0x04, 0x00000001 },
+	{ 0x001394,   1, 0x04, 0x00000000 },
+	{ 0x00139c,   1, 0x04, 0x00000000 },
+	{ 0x001398,   1, 0x04, 0x00000000 },
+	{ 0x001594,   1, 0x04, 0x00000000 },
+	{ 0x001598,   4, 0x04, 0x00000001 },
+	{ 0x000f54,   3, 0x04, 0x00000000 },
+	{ 0x0019bc,   1, 0x04, 0x00000000 },
+	{ 0x000f9c,   2, 0x04, 0x00000000 },
+	{ 0x0012cc,   1, 0x04, 0x00000000 },
+	{ 0x0012e8,   1, 0x04, 0x00000000 },
+	{ 0x00130c,   1, 0x04, 0x00000001 },
+	{ 0x001360,   8, 0x04, 0x00000000 },
+	{ 0x00133c,   2, 0x04, 0x00000001 },
+	{ 0x001344,   1, 0x04, 0x00000002 },
+	{ 0x001348,   2, 0x04, 0x00000001 },
+	{ 0x001350,   1, 0x04, 0x00000002 },
+	{ 0x001358,   1, 0x04, 0x00000001 },
+	{ 0x0012e4,   1, 0x04, 0x00000000 },
+	{ 0x00131c,   4, 0x04, 0x00000000 },
+	{ 0x0019c0,   1, 0x04, 0x00000000 },
+	{ 0x001140,   1, 0x04, 0x00000000 },
+	{ 0x0019c4,   1, 0x04, 0x00000000 },
+	{ 0x0019c8,   1, 0x04, 0x00001500 },
+	{ 0x00135c,   1, 0x04, 0x00000000 },
+	{ 0x000f90,   1, 0x04, 0x00000000 },
+	{ 0x0019e0,   8, 0x04, 0x00000001 },
+	{ 0x0019cc,   1, 0x04, 0x00000001 },
+	{ 0x0015b8,   1, 0x04, 0x00000000 },
+	{ 0x001a00,   1, 0x04, 0x00001111 },
+	{ 0x001a04,   7, 0x04, 0x00000000 },
+	{ 0x000d6c,   2, 0x04, 0xffff0000 },
+	{ 0x0010f8,   1, 0x04, 0x00001010 },
+	{ 0x000d80,   5, 0x04, 0x00000000 },
+	{ 0x000da0,   1, 0x04, 0x00000000 },
+	{ 0x0007a4,   2, 0x04, 0x00000000 },
+	{ 0x001508,   1, 0x04, 0x80000000 },
+	{ 0x00150c,   1, 0x04, 0x40000000 },
+	{ 0x001668,   1, 0x04, 0x00000000 },
+	{ 0x000318,   2, 0x04, 0x00000008 },
+	{ 0x000d9c,   1, 0x04, 0x00000001 },
+	{ 0x000ddc,   1, 0x04, 0x00000002 },
+	{ 0x000374,   1, 0x04, 0x00000000 },
+	{ 0x000378,   1, 0x04, 0x00000020 },
+	{ 0x0007dc,   1, 0x04, 0x00000000 },
+	{ 0x00074c,   1, 0x04, 0x00000055 },
+	{ 0x001420,   1, 0x04, 0x00000003 },
+	{ 0x0017bc,   2, 0x04, 0x00000000 },
+	{ 0x0017c4,   1, 0x04, 0x00000001 },
+	{ 0x001008,   1, 0x04, 0x00000008 },
+	{ 0x00100c,   1, 0x04, 0x00000040 },
+	{ 0x001010,   1, 0x04, 0x0000012c },
+	{ 0x000d60,   1, 0x04, 0x00000040 },
+	{ 0x00075c,   1, 0x04, 0x00000003 },
+	{ 0x001018,   1, 0x04, 0x00000020 },
+	{ 0x00101c,   1, 0x04, 0x00000001 },
+	{ 0x001020,   1, 0x04, 0x00000020 },
+	{ 0x001024,   1, 0x04, 0x00000001 },
+	{ 0x001444,   3, 0x04, 0x00000000 },
+	{ 0x000360,   1, 0x04, 0x20164010 },
+	{ 0x000364,   1, 0x04, 0x00000020 },
+	{ 0x000368,   1, 0x04, 0x00000000 },
+	{ 0x000de4,   1, 0x04, 0x00000000 },
+	{ 0x000204,   1, 0x04, 0x00000006 },
+	{ 0x000208,   1, 0x04, 0x00000000 },
+	{ 0x0002cc,   2, 0x04, 0x003fffff },
+	{ 0x001220,   1, 0x04, 0x00000005 },
+	{ 0x000fdc,   1, 0x04, 0x00000000 },
+	{ 0x000f98,   1, 0x04, 0x00400008 },
+	{ 0x001284,   1, 0x04, 0x08000080 },
+	{ 0x001450,   1, 0x04, 0x00400008 },
+	{ 0x001454,   1, 0x04, 0x08000080 },
+	{ 0x000214,   1, 0x04, 0x00000000 },
+	{}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk40xx[] = {
+	{ 0x404004,   8, 0x04, 0x00000000 },
+	{ 0x404024,   1, 0x04, 0x0000e000 },
+	{ 0x404028,   8, 0x04, 0x00000000 },
+	{ 0x4040a8,   8, 0x04, 0x00000000 },
+	{ 0x4040c8,   1, 0x04, 0xf800008f },
+	{ 0x4040d0,   6, 0x04, 0x00000000 },
+	{ 0x4040e8,   1, 0x04, 0x00001000 },
+	{ 0x4040f8,   1, 0x04, 0x00000000 },
+	{ 0x404100,  10, 0x04, 0x00000000 },
+	{ 0x404130,   2, 0x04, 0x00000000 },
+	{ 0x404138,   1, 0x04, 0x20000040 },
+	{ 0x404150,   1, 0x04, 0x0000002e },
+	{ 0x404154,   1, 0x04, 0x00000400 },
+	{ 0x404158,   1, 0x04, 0x00000200 },
+	{ 0x404164,   1, 0x04, 0x00000055 },
+	{ 0x40417c,   2, 0x04, 0x00000000 },
+	{ 0x404194,   1, 0x04, 0x01000700 },
+	{ 0x4041a0,   4, 0x04, 0x00000000 },
+	{ 0x404200,   1, 0x04, 0x0000a197 },
+	{ 0x404204,   1, 0x04, 0x0000a1c0 },
+	{ 0x404208,   1, 0x04, 0x0000a140 },
+	{ 0x40420c,   1, 0x04, 0x0000902d },
+	{}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk58xx[] = {
+	{ 0x405800,   1, 0x04, 0x0f8000bf },
+	{ 0x405830,   1, 0x04, 0x02180648 },
+	{ 0x405834,   1, 0x04, 0x08000000 },
+	{ 0x405838,   1, 0x04, 0x00000000 },
+	{ 0x405854,   1, 0x04, 0x00000000 },
+	{ 0x405870,   4, 0x04, 0x00000001 },
+	{ 0x405a00,   2, 0x04, 0x00000000 },
+	{ 0x405a18,   1, 0x04, 0x00000000 },
+	{ 0x405a1c,   1, 0x04, 0x000000ff },
+	{}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk64xx[] = {
+	{ 0x4064a8,   1, 0x04, 0x00000000 },
+	{ 0x4064ac,   1, 0x04, 0x00003fff },
+	{ 0x4064b0,   3, 0x04, 0x00000000 },
+	{ 0x4064c0,   1, 0x04, 0x802000f0 },
+	{ 0x4064c4,   1, 0x04, 0x0192ffff },
+	{ 0x4064c8,   1, 0x04, 0x00c20200 },
+	{ 0x4064cc,   9, 0x04, 0x00000000 },
+	{ 0x4064fc,   1, 0x04, 0x0000022a },
+	{}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk78xx[] = {
+	{ 0x407804,   1, 0x04, 0x00000063 },
+	{ 0x40780c,   1, 0x04, 0x0a418820 },
+	{ 0x407810,   1, 0x04, 0x062080e6 },
+	{ 0x407814,   1, 0x04, 0x020398a4 },
+	{ 0x407818,   1, 0x04, 0x0e629062 },
+	{ 0x40781c,   1, 0x04, 0x0a418820 },
+	{ 0x407820,   1, 0x04, 0x000000e6 },
+	{ 0x4078bc,   1, 0x04, 0x00000103 },
+	{}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk88xx[] = {
+	{ 0x408800,   1, 0x04, 0x32802a3c },
+	{ 0x408804,   1, 0x04, 0x00000040 },
+	{ 0x408808,   1, 0x04, 0x1003e005 },
+	{ 0x408840,   1, 0x04, 0x0000000b },
+	{ 0x408900,   1, 0x04, 0xb080b801 },
+	{ 0x408904,   1, 0x04, 0x62000001 },
+	{ 0x408908,   1, 0x04, 0x02c8102f },
+	{ 0x408980,   1, 0x04, 0x0000011d },
+	{}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_gpc_0[] = {
+	{ 0x418380,   1, 0x04, 0x00000016 },
+	{ 0x418400,   1, 0x04, 0x38005e00 },
+	{ 0x418404,   1, 0x04, 0x71e0ffff },
+	{ 0x41840c,   1, 0x04, 0x00001008 },
+	{ 0x418410,   1, 0x04, 0x0fff0fff },
+	{ 0x418414,   1, 0x04, 0x02200fff },
+	{ 0x418450,   6, 0x04, 0x00000000 },
+	{ 0x418468,   1, 0x04, 0x00000001 },
+	{ 0x41846c,   2, 0x04, 0x00000000 },
+	{ 0x418600,   1, 0x04, 0x0000007f },
+	{ 0x418684,   1, 0x04, 0x0000001f },
+	{ 0x418700,   1, 0x04, 0x00000002 },
+	{ 0x418704,   2, 0x04, 0x00000080 },
+	{ 0x41870c,   2, 0x04, 0x00000000 },
+	{ 0x418800,   1, 0x04, 0x7006863a },
+	{ 0x418808,   1, 0x04, 0x00000000 },
+	{ 0x41880c,   1, 0x04, 0x00000030 },
+	{ 0x418810,   1, 0x04, 0x00000000 },
+	{ 0x418828,   1, 0x04, 0x00000044 },
+	{ 0x418830,   1, 0x04, 0x10000001 },
+	{ 0x4188d8,   1, 0x04, 0x00000008 },
+	{ 0x4188e0,   1, 0x04, 0x01000000 },
+	{ 0x4188e8,   5, 0x04, 0x00000000 },
+	{ 0x4188fc,   1, 0x04, 0x20100058 },
+	{ 0x41891c,   1, 0x04, 0x00ff00ff },
+	{ 0x418924,   1, 0x04, 0x00000000 },
+	{ 0x418928,   1, 0x04, 0x00ffff00 },
+	{ 0x41892c,   1, 0x04, 0x0000ff00 },
+	{ 0x418b00,   1, 0x04, 0x0000001e },
+	{ 0x418b08,   1, 0x04, 0x0a418820 },
+	{ 0x418b0c,   1, 0x04, 0x062080e6 },
+	{ 0x418b10,   1, 0x04, 0x020398a4 },
+	{ 0x418b14,   1, 0x04, 0x0e629062 },
+	{ 0x418b18,   1, 0x04, 0x0a418820 },
+	{ 0x418b1c,   1, 0x04, 0x000000e6 },
+	{ 0x418bb8,   1, 0x04, 0x00000103 },
+	{ 0x418c08,   1, 0x04, 0x00000001 },
+	{ 0x418c10,   8, 0x04, 0x00000000 },
+	{ 0x418c40,   1, 0x04, 0xffffffff },
+	{ 0x418c6c,   1, 0x04, 0x00000001 },
+	{ 0x418c80,   1, 0x04, 0x2020000c },
+	{ 0x418c8c,   1, 0x04, 0x00000001 },
+	{ 0x418d24,   1, 0x04, 0x00000000 },
+	{ 0x419000,   1, 0x04, 0x00000780 },
+	{ 0x419004,   2, 0x04, 0x00000000 },
+	{ 0x419014,   1, 0x04, 0x00000004 },
+	{}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_tpc[] = {
+	{ 0x419848,   1, 0x04, 0x00000000 },
+	{ 0x419864,   1, 0x04, 0x00000129 },
+	{ 0x419888,   1, 0x04, 0x00000000 },
+	{ 0x419a00,   1, 0x04, 0x000100f0 },
+	{ 0x419a04,   1, 0x04, 0x00000001 },
+	{ 0x419a08,   1, 0x04, 0x00000421 },
+	{ 0x419a0c,   1, 0x04, 0x00120000 },
+	{ 0x419a10,   1, 0x04, 0x00000000 },
+	{ 0x419a14,   1, 0x04, 0x00000200 },
+	{ 0x419a1c,   1, 0x04, 0x0000c000 },
+	{ 0x419a20,   1, 0x04, 0x00000800 },
+	{ 0x419a30,   1, 0x04, 0x00000001 },
+	{ 0x419ac4,   1, 0x04, 0x0037f440 },
+	{ 0x419c00,   1, 0x04, 0x0000001a },
+	{ 0x419c04,   1, 0x04, 0x80000006 },
+	{ 0x419c08,   1, 0x04, 0x00000002 },
+	{ 0x419c20,   1, 0x04, 0x00000000 },
+	{ 0x419c24,   1, 0x04, 0x00084210 },
+	{ 0x419c28,   1, 0x04, 0x3efbefbe },
+	{ 0x419ce8,   1, 0x04, 0x00000000 },
+	{ 0x419cf4,   1, 0x04, 0x00000203 },
+	{ 0x419e04,   1, 0x04, 0x00000000 },
+	{ 0x419e08,   1, 0x04, 0x0000001d },
+	{ 0x419e0c,   1, 0x04, 0x00000000 },
+	{ 0x419e10,   1, 0x04, 0x00001c02 },
+	{ 0x419e44,   1, 0x04, 0x0013eff2 },
+	{ 0x419e48,   1, 0x04, 0x00000000 },
+	{ 0x419e4c,   1, 0x04, 0x0000007f },
+	{ 0x419e50,   2, 0x04, 0x00000000 },
+	{ 0x419e58,   1, 0x04, 0x00000001 },
+	{ 0x419e5c,   3, 0x04, 0x00000000 },
+	{ 0x419e68,   1, 0x04, 0x00000002 },
+	{ 0x419e6c,  12, 0x04, 0x00000000 },
+	{ 0x419eac,   1, 0x04, 0x00001f8f },
+	{ 0x419eb0,   1, 0x04, 0x0db00da0 },
+	{ 0x419eb8,   1, 0x04, 0x00000000 },
+	{ 0x419ec8,   1, 0x04, 0x0001304f },
+	{ 0x419f30,   4, 0x04, 0x00000000 },
+	{ 0x419f40,   1, 0x04, 0x00000018 },
+	{ 0x419f44,   3, 0x04, 0x00000000 },
+	{ 0x419f58,   1, 0x04, 0x00000020 },
+	{ 0x419f70,   1, 0x04, 0x00000000 },
+	{ 0x419f78,   1, 0x04, 0x000001eb },
+	{ 0x419f7c,   1, 0x04, 0x00000404 },
+	{}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk[] = {
+	{ 0x41be24,   1, 0x04, 0x00000006 },
+	{ 0x41bec0,   1, 0x04, 0x10000000 },
+	{ 0x41bec4,   1, 0x04, 0x00037f7f },
+	{ 0x41bee4,   1, 0x04, 0x00000000 },
+	{ 0x41bef0,   1, 0x04, 0x000003ff },
+	{ 0x41bf00,   1, 0x04, 0x0a418820 },
+	{ 0x41bf04,   1, 0x04, 0x062080e6 },
+	{ 0x41bf08,   1, 0x04, 0x020398a4 },
+	{ 0x41bf0c,   1, 0x04, 0x0e629062 },
+	{ 0x41bf10,   1, 0x04, 0x0a418820 },
+	{ 0x41bf14,   1, 0x04, 0x000000e6 },
+	{ 0x41bfd0,   1, 0x04, 0x00900103 },
+	{ 0x41bfe0,   1, 0x04, 0x00400001 },
+	{ 0x41bfe4,   1, 0x04, 0x00000000 },
+	{}
+};
+
+static void
+nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+{
+	u32 magic[GPC_MAX][2];
+	u32 offset;
+	int gpc;
+
+	mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+	mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+	mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+	mmio_list(0x40800c, 0x00000000,  8, 1);
+	mmio_list(0x408010, 0x80000000,  0, 0);
+	mmio_list(0x419004, 0x00000000,  8, 1);
+	mmio_list(0x419008, 0x00000000,  0, 0);
+	mmio_list(0x408004, 0x00000000,  8, 0);
+	mmio_list(0x408008, 0x80000030,  0, 0);
+	mmio_list(0x418808, 0x00000000,  8, 0);
+	mmio_list(0x41880c, 0x80000030,  0, 0);
+	mmio_list(0x418810, 0x80000000, 12, 2);
+	mmio_list(0x419848, 0x10000000, 12, 2);
+
+	mmio_list(0x405830, 0x02180648,  0, 0);
+	mmio_list(0x4064c4, 0x0192ffff,  0, 0);
+
+	for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
+		u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+		u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
+		magic[gpc][0]  = 0x10000000 | (magic0 << 16) | offset;
+		magic[gpc][1]  = 0x00000000 | (magic1 << 16);
+		offset += 0x0324 * priv->tpc_nr[gpc];
+	}
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
+		mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
+		offset += 0x07ff * priv->tpc_nr[gpc];
+	}
+
+	mmio_list(0x17e91c, 0x0b040a0b, 0, 0);
+	mmio_list(0x17e920, 0x00090d08, 0, 0);
+}
+
+static struct nvc0_graph_init *
+nv108_grctx_init_hub[] = {
+	nvc0_grctx_init_base,
+	nv108_grctx_init_unk40xx,
+	nvf0_grctx_init_unk44xx,
+	nve4_grctx_init_unk46xx,
+	nve4_grctx_init_unk47xx,
+	nv108_grctx_init_unk58xx,
+	nvf0_grctx_init_unk5bxx,
+	nvf0_grctx_init_unk60xx,
+	nv108_grctx_init_unk64xx,
+	nv108_grctx_init_unk78xx,
+	nve4_grctx_init_unk80xx,
+	nv108_grctx_init_unk88xx,
+	NULL
+};
+
+struct nvc0_graph_init *
+nv108_grctx_init_gpc[] = {
+	nv108_grctx_init_gpc_0,
+	nvc0_grctx_init_gpc_1,
+	nv108_grctx_init_tpc,
+	nv108_grctx_init_unk,
+	NULL
+};
+
+struct nvc0_graph_init
+nv108_grctx_init_mthd_magic[] = {
+	{ 0x3410, 1, 0x04, 0x8e0e2006 },
+	{ 0x3414, 1, 0x04, 0x00000038 },
+	{}
+};
+
+static struct nvc0_graph_mthd
+nv108_grctx_init_mthd[] = {
+	{ 0xa197, nv108_grctx_init_a197, },
+	{ 0x902d, nvc0_grctx_init_902d, },
+	{ 0x902d, nv108_grctx_init_mthd_magic, },
+	{}
+};
+
+struct nouveau_oclass *
+nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
+	.base.handle = NV_ENGCTX(GR, 0x08),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_graph_context_ctor,
+		.dtor = nvc0_graph_context_dtor,
+		.init = _nouveau_graph_context_init,
+		.fini = _nouveau_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+	.main = nve4_grctx_generate_main,
+	.mods = nv108_grctx_generate_mods,
+	.unkn = nve4_grctx_generate_unkn,
+	.hub  = nv108_grctx_init_hub,
+	.gpc  = nv108_grctx_init_gpc,
+	.icmd = nv108_grctx_init_icmd,
+	.mthd = nv108_grctx_init_mthd,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
index dcb2ebb..44012c3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
@@ -50,7 +50,7 @@
 	{}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_grctx_init_unk44xx[] = {
 	{ 0x404404,  12, 0x04, 0x00000000 },
 	{ 0x404438,   1, 0x04, 0x00000000 },
@@ -62,7 +62,7 @@
 	{}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_grctx_init_unk5bxx[] = {
 	{ 0x405b00,   1, 0x04, 0x00000000 },
 	{ 0x405b10,   1, 0x04, 0x00001000 },
@@ -70,7 +70,7 @@
 	{}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_grctx_init_unk60xx[] = {
 	{ 0x406020,   1, 0x04, 0x034103c1 },
 	{ 0x406028,   4, 0x04, 0x00000001 },
@@ -286,7 +286,6 @@
 	nvf0_grctx_init_unk64xx,
 	nve4_grctx_init_unk80xx,
 	nvf0_grctx_init_unk88xx,
-	nvd9_grctx_init_rop,
 	NULL
 };
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
index 5d24b6d..e148961 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
@@ -38,7 +38,7 @@
 	cmpu b32 $r8 $r9
 	bra ne #queue_put_next
 		mov $r15 E_CMD_OVERFLOW
-		call #error
+		call(error)
 		ret
 
 	// store cmd/data on queue
@@ -92,18 +92,16 @@
 // Out: $r15 value
 //
 nv_rd32:
-	mov $r11 0x728
-	shl b32 $r11 6
 	mov b32 $r12 $r14
 	bset $r12 31			// MMIO_CTRL_PENDING
-	iowr I[$r11 + 0x000] $r12	// MMIO_CTRL
+	nv_iowr(NV_PGRAPH_FECS_MMIO_CTRL, 0, $r12)
 	nv_rd32_wait:
-		iord $r12 I[$r11 + 0x000]
+		nv_iord($r12, NV_PGRAPH_FECS_MMIO_CTRL, 0)
 		xbit $r12 $r12 31
 		bra ne #nv_rd32_wait
 	mov $r10 6			// DONE_MMIO_RD
-	call #wait_doneo
-	iord $r15 I[$r11 + 0x100]	// MMIO_RDVAL
+	call(wait_doneo)
+	nv_iord($r15, NV_PGRAPH_FECS_MMIO_RDVAL, 0)
 	ret
 
 // nv_wr32 - write 32-bit value to nv register
@@ -112,37 +110,17 @@
 //      $r15 value
 //
 nv_wr32:
-	mov $r11 0x728
-	shl b32 $r11 6
-	iowr I[$r11 + 0x200] $r15	// MMIO_WRVAL
+	nv_iowr(NV_PGRAPH_FECS_MMIO_WRVAL, 0, $r15)
 	mov b32 $r12 $r14
 	bset $r12 31			// MMIO_CTRL_PENDING
 	bset $r12 30			// MMIO_CTRL_WRITE
-	iowr I[$r11 + 0x000] $r12	// MMIO_CTRL
+	nv_iowr(NV_PGRAPH_FECS_MMIO_CTRL, 0, $r12)
 	nv_wr32_wait:
-		iord $r12 I[$r11 + 0x000]
+		nv_iord($r12, NV_PGRAPH_FECS_MMIO_CTRL, 0)
 		xbit $r12 $r12 31
 		bra ne #nv_wr32_wait
 	ret
 
-// (re)set watchdog timer
-//
-// In : $r15 timeout
-//
-watchdog_reset:
-	mov $r8 0x430
-	shl b32 $r8 6
-	bset $r15 31
-	iowr I[$r8 + 0x000] $r15
-	ret
-
-// clear watchdog timer
-watchdog_clear:
-	mov $r8 0x430
-	shl b32 $r8 6
-	iowr I[$r8 + 0x000] $r0
-	ret
-
 // wait_donez - wait on FUC_DONE bit to become clear
 //
 // In : $r10 bit to wait on
@@ -163,13 +141,9 @@
 //
 wait_doneo:
 	trace_set(T_WAIT);
-	mov $r8 0x818
-	shl b32 $r8 6
-	iowr I[$r8 + 0x000] $r10
+	nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(6), 0, $r10)
 	wait_doneo_e:
-		mov $r8 0x400
-		shl b32 $r8 6
-		iord $r8 I[$r8 + 0x000]
+		nv_iord($r8, NV_PGRAPH_FECS_SIGNAL, 0)
 		xbit $r8 $r8 $r10
 		bra e #wait_doneo_e
 	trace_clr(T_WAIT)
@@ -209,21 +183,18 @@
 //
 mmctx_xfer:
 	trace_set(T_MMCTX)
-	mov $r8 0x710
-	shl b32 $r8 6
 	clear b32 $r9
 	or $r11 $r11
 	bra e #mmctx_base_disabled
-		iowr I[$r8 + 0x000] $r11	// MMCTX_BASE
+		nv_iowr(NV_PGRAPH_FECS_MMCTX_BASE, 0, $r11)
 		bset $r9 0			// BASE_EN
 	mmctx_base_disabled:
 	or $r14 $r14
 	bra e #mmctx_multi_disabled
-		iowr I[$r8 + 0x200] $r14 	// MMCTX_MULTI_STRIDE
-		iowr I[$r8 + 0x300] $r15 	// MMCTX_MULTI_MASK
+		nv_iowr(NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE, 0, $r14)
+		nv_iowr(NV_PGRAPH_FECS_MMCTX_MULTI_MASK, 0, $r15)
 		bset $r9 1			// MULTI_EN
 	mmctx_multi_disabled:
-	add b32 $r8 0x100
 
 	xbit $r11 $r10 0
 	shl b32 $r11 16			// DIR
@@ -231,20 +202,20 @@
 	xbit $r14 $r10 1
 	shl b32 $r14 17
 	or $r11 $r14			// START_TRIGGER
-	iowr I[$r8 + 0x000] $r11	// MMCTX_CTRL
+	nv_iowr(NV_PGRAPH_FECS_MMCTX_CTRL, 0, $r11)
 
 	// loop over the mmio list, and send requests to the hw
 	mmctx_exec_loop:
 		// wait for space in mmctx queue
 		mmctx_wait_free:
-			iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+			nv_iord($r14, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
 			and $r14 0x1f
 			bra e #mmctx_wait_free
 
 		// queue up an entry
 		ld b32 $r14 D[$r12]
 		or $r14 $r9
-		iowr I[$r8 + 0x300] $r14
+		nv_iowr(NV_PGRAPH_FECS_MMCTX_QUEUE, 0, $r14)
 		add b32 $r12 4
 		cmpu b32 $r12 $r13
 		bra ne #mmctx_exec_loop
@@ -253,22 +224,22 @@
 	bra ne #mmctx_stop
 		// wait for queue to empty
 		mmctx_fini_wait:
-			iord $r11 I[$r8 + 0x000]	// MMCTX_CTRL
+			nv_iord($r11, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
 			and $r11 0x1f
 			cmpu b32 $r11 0x10
 			bra ne #mmctx_fini_wait
 		mov $r10 2				// DONE_MMCTX
-		call #wait_donez
+		call(wait_donez)
 		bra #mmctx_done
 	mmctx_stop:
 		xbit $r11 $r10 0
 		shl b32 $r11 16			// DIR
 		bset $r11 12			// QLIMIT = 0x10
 		bset $r11 18			// STOP_TRIGGER
-		iowr I[$r8 + 0x000] $r11	// MMCTX_CTRL
+		nv_iowr(NV_PGRAPH_FECS_MMCTX_CTRL, 0, $r11)
 		mmctx_stop_wait:
 			// wait for STOP_TRIGGER to clear
-			iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+			nv_iord($r11, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
 			xbit $r11 $r11 18
 			bra ne #mmctx_stop_wait
 	mmctx_done:
@@ -280,28 +251,24 @@
 strand_wait:
 	push $r10
 	mov $r10 2
-	call #wait_donez
+	call(wait_donez)
 	pop $r10
 	ret
 
 // unknown - call before issuing strand commands
 //
 strand_pre:
-	mov $r8 0x4afc
-	sethi $r8 0x20000
-	mov $r9 0xc
-	iowr I[$r8] $r9
-	call #strand_wait
+	mov $r9 NV_PGRAPH_FECS_STRAND_CMD_ENABLE
+	nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r9)
+	call(strand_wait)
 	ret
 
 // unknown - call after issuing strand commands
 //
 strand_post:
-	mov $r8 0x4afc
-	sethi $r8 0x20000
-	mov $r9 0xd
-	iowr I[$r8] $r9
-	call #strand_wait
+	mov $r9 NV_PGRAPH_FECS_STRAND_CMD_DISABLE
+	nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r9)
+	call(strand_wait)
 	ret
 
 // Selects strand set?!
@@ -309,18 +276,14 @@
 // In: $r14 id
 //
 strand_set:
-	mov $r10 0x4ffc
-	sethi $r10 0x20000
-	sub b32 $r11 $r10 0x500
 	mov $r12 0xf
-	iowr I[$r10 + 0x000] $r12		// 0x93c = 0xf
-	mov $r12 0xb
-	iowr I[$r11 + 0x000] $r12		// 0x928 = 0xb
-	call #strand_wait
-	iowr I[$r10 + 0x000] $r14		// 0x93c = <id>
-	mov $r12 0xa
-	iowr I[$r11 + 0x000] $r12		// 0x928 = 0xa
-	call #strand_wait
+	nv_iowr(NV_PGRAPH_FECS_STRAND_FILTER, 0x3f, $r12)
+	mov $r12 NV_PGRAPH_FECS_STRAND_CMD_DEACTIVATE_FILTER
+	nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+	nv_iowr(NV_PGRAPH_FECS_STRAND_FILTER, 0x3f, $r14)
+	mov $r12 NV_PGRAPH_FECS_STRAND_CMD_ACTIVATE_FILTER
+	nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+	call(strand_wait)
 	ret
 
 // Initialise strand context data
@@ -332,30 +295,27 @@
 //
 strand_ctx_init:
 	trace_set(T_STRINIT)
-	call #strand_pre
+	call(strand_pre)
 	mov $r14 3
-	call #strand_set
-	mov $r10 0x46fc
-	sethi $r10 0x20000
-	add b32 $r11 $r10 0x400
-	iowr I[$r10 + 0x100] $r0	// STRAND_FIRST_GENE = 0
-	mov $r12 1
-	iowr I[$r11 + 0x000] $r12	// STRAND_CMD = LATCH_FIRST_GENE
-	call #strand_wait
+	call(strand_set)
+
+	clear b32 $r12
+	nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r12)
+	mov $r12 NV_PGRAPH_FECS_STRAND_CMD_SEEK
+	nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+	call(strand_wait)
 	sub b32 $r12 $r0 1
-	iowr I[$r10 + 0x000] $r12	// STRAND_GENE_CNT = 0xffffffff
-	mov $r12 2
-	iowr I[$r11 + 0x000] $r12	// STRAND_CMD = LATCH_GENE_CNT
-	call #strand_wait
-	call #strand_post
+	nv_iowr(NV_PGRAPH_FECS_STRAND_DATA, 0x3f, $r12)
+	mov $r12 NV_PGRAPH_FECS_STRAND_CMD_GET_INFO
+	nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+	call(strand_wait)
+	call(strand_post)
 
 	// read the size of each strand, poke the context offset of
 	// each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
 	// about it later then.
-	mov $r8 0x880
-	shl b32 $r8 6
-	iord $r9 I[$r8 + 0x000]		// STRANDS
-	add b32 $r8 0x2200
+	nv_mkio($r8, NV_PGRAPH_FECS_STRAND_SAVE_SWBASE, 0x00)
+	nv_iord($r9, NV_PGRAPH_FECS_STRANDS_CNT, 0x00)
 	shr b32 $r14 $r15 8
 	ctx_init_strand_loop:
 		iowr I[$r8 + 0x000] $r14	// STRAND_SAVE_SWBASE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
index 5547c1b..96cbcea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
@@ -58,12 +58,9 @@
 //
 error:
 	push $r14
-	mov $r14 -0x67ec 	// 0x9814
-	sethi $r14 0x400000
-	call #nv_wr32		// HUB_CTXCTL_CC_SCRATCH[5] = error code
-	add b32 $r14 0x41c
+	nv_wr32(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), $r15)
 	mov $r15 1
-	call #nv_wr32		// HUB_CTXCTL_INTR_UP_SET
+	nv_wr32(NV_PGRAPH_FECS_INTR_UP_SET, $r15)
 	pop $r14
 	ret
 
@@ -84,46 +81,40 @@
 	mov $sp $r0
 
 	// enable fifo access
-	mov $r1 0x1200
-	mov $r2 2
-	iowr I[$r1 + 0x000] $r2		// FIFO_ENABLE
+	mov $r2 NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_ACCESS, 0, $r2)
 
 	// setup i0 handler, and route all interrupts to it
 	mov $r1 #ih
 	mov $iv0 $r1
-	mov $r1 0x400
-	iowr I[$r1 + 0x300] $r0		// INTR_DISPATCH
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_ROUTE, 0, $r0)
 
 	// enable fifo interrupt
-	mov $r2 4
-	iowr I[$r1 + 0x000] $r2		// INTR_EN_SET
+	mov $r2 NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET_FIFO
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET, 0, $r2)
 
 	// enable interrupts
 	bset $flags ie0
 
 	// figure out which GPC we are, and how many TPCs we have
-	mov $r1 0x608
-	shl b32 $r1 6
-	iord $r2 I[$r1 + 0x000]		// UNITS
+	nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_UNITS, 0)
 	mov $r3 1
 	and $r2 0x1f
 	shl b32 $r3 $r2
 	sub b32 $r3 1
 	st b32 D[$r0 + #tpc_count] $r2
 	st b32 D[$r0 + #tpc_mask] $r3
-	add b32 $r1 0x400
-	iord $r2 I[$r1 + 0x000]		// MYINDEX
+	nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_MYINDEX, 0)
 	st b32 D[$r0 + #gpc_id] $r2
 
 #if NV_PGRAPH_GPCX_UNK__SIZE > 0
 	// figure out which, and how many, UNKs are actually present
-	mov $r14 0x0c30
-	sethi $r14 0x500000
+	imm32($r14, 0x500c30)
 	clear b32 $r2
 	clear b32 $r3
 	clear b32 $r4
 	init_unk_loop:
-		call #nv_rd32
+		call(nv_rd32)
 		cmp b32 $r15 0
 		bra z #init_unk_next
 			mov $r15 1
@@ -146,23 +137,21 @@
 
 	// set mmctx base addresses now so we don't have to do it later,
 	// they don't currently ever change
-	mov $r4 0x700
-	shl b32 $r4 6
 	shr b32 $r5 $r2 8
-	iowr I[$r4 + 0x000] $r5		// MMCTX_SAVE_SWBASE
-	iowr I[$r4 + 0x100] $r5		// MMCTX_LOAD_SWBASE
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE, 0, $r5)
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE, 0, $r5)
 
 	// calculate GPC mmio context size
 	ld b32 $r14 D[$r0 + #gpc_mmio_list_head]
 	ld b32 $r15 D[$r0 + #gpc_mmio_list_tail]
-	call #mmctx_size
+	call(mmctx_size)
 	add b32 $r2 $r15
 	add b32 $r3 $r15
 
 	// calculate per-TPC mmio context size
 	ld b32 $r14 D[$r0 + #tpc_mmio_list_head]
 	ld b32 $r15 D[$r0 + #tpc_mmio_list_tail]
-	call #mmctx_size
+	call(mmctx_size)
 	ld b32 $r14 D[$r0 + #tpc_count]
 	mulu $r14 $r15
 	add b32 $r2 $r14
@@ -172,7 +161,7 @@
 	// calculate per-UNK mmio context size
 	ld b32 $r14 D[$r0 + #unk_mmio_list_head]
 	ld b32 $r15 D[$r0 + #unk_mmio_list_tail]
-	call #mmctx_size
+	call(mmctx_size)
 	ld b32 $r14 D[$r0 + #unk_count]
 	mulu $r14 $r15
 	add b32 $r2 $r14
@@ -180,9 +169,8 @@
 #endif
 
 	// round up base/size to 256 byte boundary (for strand SWBASE)
-	add b32 $r4 0x1300
 	shr b32 $r3 2
-	iowr I[$r4 + 0x000] $r3		// MMCTX_LOAD_COUNT, wtf for?!?
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT, 0, $r3) // wtf for?!
 	shr b32 $r2 8
 	shr b32 $r3 6
 	add b32 $r2 1
@@ -192,7 +180,7 @@
 
 	// calculate size of strand context data
 	mov b32 $r15 $r2
-	call #strand_ctx_init
+	call(strand_ctx_init)
 	add b32 $r3 $r15
 
 	// save context size, and tell HUB we're done
@@ -208,7 +196,7 @@
 	bset $flags $p0
 	sleep $p0
 	mov $r13 #cmd_queue
-	call #queue_get
+	call(queue_get)
 	bra $p1 #main
 
 	// 0x0000-0x0003 are all context transfers
@@ -224,13 +212,13 @@
 		or $r1 $r14
 		mov $flags $r1
 		// transfer context data
-		call #ctx_xfer
+		call(ctx_xfer)
 		bra #main
 
 	main_not_ctx_xfer:
 	shl b32 $r15 $r14 16
 	or $r15 E_BAD_COMMAND
-	call #error
+	call(error)
 	bra #main
 
 // interrupt handler
@@ -247,22 +235,20 @@
 	clear b32 $r0
 
 	// incoming fifo command?
-	iord $r10 I[$r0 + 0x200]	// INTR
-	and $r11 $r10 0x00000004
+	nv_iord($r10, NV_PGRAPH_GPCX_GPCCS_INTR, 0)
+	and $r11 $r10 NV_PGRAPH_GPCX_GPCCS_INTR_FIFO
 	bra e #ih_no_fifo
 		// queue incoming fifo command for later processing
-		mov $r11 0x1900
 		mov $r13 #cmd_queue
-		iord $r14 I[$r11 + 0x100]	// FIFO_CMD
-		iord $r15 I[$r11 + 0x000]	// FIFO_DATA
-		call #queue_put
-		add b32 $r11 0x400
+		nv_iord($r14, NV_PGRAPH_GPCX_GPCCS_FIFO_CMD, 0)
+		nv_iord($r15, NV_PGRAPH_GPCX_GPCCS_FIFO_DATA, 0)
+		call(queue_put)
 		mov $r14 1
-		iowr I[$r11 + 0x000] $r14	// FIFO_ACK
+		nv_iowr(NV_PGRAPH_GPCX_GPCCS_FIFO_ACK, 0, $r14)
 
 	// ack, and wake up main()
 	ih_no_fifo:
-	iowr I[$r0 + 0x100] $r10	// INTR_ACK
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_ACK, 0, $r10)
 
 	pop $r15
 	pop $r14
@@ -283,9 +269,7 @@
 	mov $r15 1
 	ld b32 $r14 D[$r0 + #gpc_id]
 	shl b32 $r15 $r14
-	mov $r14 -0x6be8 	// 0x409418 - HUB_BAR_SET
-	sethi $r14 0x400000
-	call #nv_wr32
+	nv_wr32(0x409418, $r15)	// 0x409418 - HUB_BAR_SET
 	ret
 
 // Disables various things, waits a bit, and re-enables them..
@@ -295,16 +279,15 @@
 // funny things happen.
 //
 ctx_redswitch:
-	mov $r14 0x614
-	shl b32 $r14 6
-	mov $r15 0x020
-	iowr I[$r14] $r15	// GPC_RED_SWITCH = POWER
-	mov $r15 8
+	mov $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_POWER
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_RED_SWITCH, 0, $r15)
+	mov $r14 8
 	ctx_redswitch_delay:
-		sub b32 $r15 1
+		sub b32 $r14 1
 		bra ne #ctx_redswitch_delay
-	mov $r15 0xa20
-	iowr I[$r14] $r15	// GPC_RED_SWITCH = UNK11, ENABLE, POWER
+	or $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11
+	or $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_RED_SWITCH, 0, $r15)
 	ret
 
 // Transfer GPC context data between GPU and storage area
@@ -317,46 +300,37 @@
 //
 ctx_xfer:
 	// set context base address
-	mov $r1 0xa04
-	shl b32 $r1 6
-	iowr I[$r1 + 0x000] $r15// MEM_BASE
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_MEM_BASE, 0, $r15)
 	bra not $p1 #ctx_xfer_not_load
-		call #ctx_redswitch
+		call(ctx_redswitch)
 	ctx_xfer_not_load:
 
 	// strands
-	mov $r1 0x4afc
-	sethi $r1 0x20000
-	mov $r2 0xc
-	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0c
-	call #strand_wait
-	mov $r2 0x47fc
-	sethi $r2 0x20000
-	iowr I[$r2] $r0		// STRAND_FIRST_GENE(0x3f) = 0x00
-	xbit $r2 $flags $p1
-	add b32 $r2 3
-	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+	call(strand_pre)
+	clear b32 $r2
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_SELECT, 0x3f, $r2)
+	xbit $r2 $flags $p1	// SAVE/LOAD
+	add b32 $r2 NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE
+	nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_CMD, 0x3f, $r2)
 
 	// mmio context
 	xbit $r10 $flags $p1	// direction
 	or $r10 2		// first
-	mov $r11 0x0000
-	sethi $r11 0x500000
+	imm32($r11,0x500000)
 	ld b32 $r12 D[$r0 + #gpc_id]
 	shl b32 $r12 15
 	add b32 $r11 $r12	// base = NV_PGRAPH_GPCn
 	ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
 	ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
 	mov $r14 0		// not multi
-	call #mmctx_xfer
+	call(mmctx_xfer)
 
 	// per-TPC mmio context
 	xbit $r10 $flags $p1	// direction
 #if !NV_PGRAPH_GPCX_UNK__SIZE
 	or $r10 4		// last
 #endif
-	mov $r11 0x4000
-	sethi $r11 0x500000	// base = NV_PGRAPH_GPC0_TPC0
+	imm32($r11, 0x504000)
 	ld b32 $r12 D[$r0 + #gpc_id]
 	shl b32 $r12 15
 	add b32 $r11 $r12	// base = NV_PGRAPH_GPCn_TPC0
@@ -364,14 +338,13 @@
 	ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
 	ld b32 $r15 D[$r0 + #tpc_mask]
 	mov $r14 0x800		// stride = 0x800
-	call #mmctx_xfer
+	call(mmctx_xfer)
 
 #if NV_PGRAPH_GPCX_UNK__SIZE > 0
 	// per-UNK mmio context
 	xbit $r10 $flags $p1	// direction
 	or $r10 4		// last
-	mov $r11 0x3000
-	sethi $r11 0x500000	// base = NV_PGRAPH_GPC0_UNK0
+	imm32($r11, 0x503000)
 	ld b32 $r12 D[$r0 + #gpc_id]
 	shl b32 $r12 15
 	add b32 $r11 $r12	// base = NV_PGRAPH_GPCn_UNK0
@@ -379,11 +352,11 @@
 	ld b32 $r13 D[$r0 + #unk_mmio_list_tail]
 	ld b32 $r15 D[$r0 + #unk_mask]
 	mov $r14 0x200		// stride = 0x200
-	call #mmctx_xfer
+	call(mmctx_xfer)
 #endif
 
 	// wait for strands to finish
-	call #strand_wait
+	call(strand_wait)
 
 	// if load, or a save without a load following, do some
 	// unknown stuff that's done after finishing a block of
@@ -391,14 +364,10 @@
 	bra $p1 #ctx_xfer_post
 	bra not $p2 #ctx_xfer_done
 	ctx_xfer_post:
-		mov $r1 0x4afc
-		sethi $r1 0x20000
-		mov $r2 0xd
-		iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0d
-		call #strand_wait
+		call(strand_post)
 
 	// mark completion in HUB's barrier
 	ctx_xfer_done:
-	call #hub_barrier_done
+	call(hub_barrier_done)
 	ret
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5
new file mode 100644
index 0000000..bd30262d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#define NV_PGRAPH_GPCX_UNK__SIZE                                     0x00000001
+
+#define CHIPSET GK208
+#include "macros.fuc"
+
+.section #nv108_grgpc_data
+#define INCLUDE_DATA
+#include "com.fuc"
+#include "gpc.fuc"
+#undef INCLUDE_DATA
+
+.section #nv108_grgpc_code
+#define INCLUDE_CODE
+bra #init
+#include "com.fuc"
+#include "gpc.fuc"
+.align 256
+#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
new file mode 100644
index 0000000..27dc128
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
@@ -0,0 +1,473 @@
+uint32_t nv108_grgpc_data[] = {
+/* 0x0000: gpc_mmio_list_head */
+	0x0000006c,
+/* 0x0004: gpc_mmio_list_tail */
+/* 0x0004: tpc_mmio_list_head */
+	0x0000006c,
+/* 0x0008: tpc_mmio_list_tail */
+/* 0x0008: unk_mmio_list_head */
+	0x0000006c,
+/* 0x000c: unk_mmio_list_tail */
+	0x0000006c,
+/* 0x0010: gpc_id */
+	0x00000000,
+/* 0x0014: tpc_count */
+	0x00000000,
+/* 0x0018: tpc_mask */
+	0x00000000,
+/* 0x001c: unk_count */
+	0x00000000,
+/* 0x0020: unk_mask */
+	0x00000000,
+/* 0x0024: cmd_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+uint32_t nv108_grgpc_code[] = {
+	0x03140ef5,
+/* 0x0004: queue_put */
+	0x9800d898,
+	0x86f001d9,
+	0xf489a408,
+	0x020f0b1b,
+	0x0002f87e,
+/* 0x001a: queue_put_next */
+	0x98c400f8,
+	0x0384b607,
+	0xb6008dbb,
+	0x8eb50880,
+	0x018fb500,
+	0xf00190b6,
+	0xd9b50f94,
+/* 0x0037: queue_get */
+	0xf400f801,
+	0xd8980131,
+	0x01d99800,
+	0x0bf489a4,
+	0x0789c421,
+	0xbb0394b6,
+	0x90b6009d,
+	0x009e9808,
+	0xb6019f98,
+	0x84f00180,
+	0x00d8b50f,
+/* 0x0063: queue_get_done */
+	0xf80132f4,
+/* 0x0065: nv_rd32 */
+	0xf0ecb200,
+	0x00801fc9,
+	0x0cf601ca,
+/* 0x0073: nv_rd32_wait */
+	0x8c04bd00,
+	0xcf01ca00,
+	0xccc800cc,
+	0xf61bf41f,
+	0xec7e060a,
+	0x008f0000,
+	0xffcf01cb,
+/* 0x008f: nv_wr32 */
+	0x8000f800,
+	0xf601cc00,
+	0x04bd000f,
+	0xc9f0ecb2,
+	0x1ec9f01f,
+	0x01ca0080,
+	0xbd000cf6,
+/* 0x00a9: nv_wr32_wait */
+	0xca008c04,
+	0x00cccf01,
+	0xf41fccc8,
+	0x00f8f61b,
+/* 0x00b8: wait_donez */
+	0x99f094bd,
+	0x37008000,
+	0x0009f602,
+	0x008004bd,
+	0x0af60206,
+/* 0x00cf: wait_donez_ne */
+	0x8804bd00,
+	0xcf010000,
+	0x8aff0088,
+	0xf61bf488,
+	0x99f094bd,
+	0x17008000,
+	0x0009f602,
+	0x00f804bd,
+/* 0x00ec: wait_doneo */
+	0x99f094bd,
+	0x37008000,
+	0x0009f602,
+	0x008004bd,
+	0x0af60206,
+/* 0x0103: wait_doneo_e */
+	0x8804bd00,
+	0xcf010000,
+	0x8aff0088,
+	0xf60bf488,
+	0x99f094bd,
+	0x17008000,
+	0x0009f602,
+	0x00f804bd,
+/* 0x0120: mmctx_size */
+/* 0x0122: nv_mmctx_size_loop */
+	0xe89894bd,
+	0x1a85b600,
+	0xb60180b6,
+	0x98bb0284,
+	0x04e0b600,
+	0x1bf4efa4,
+	0xf89fb2ec,
+/* 0x013d: mmctx_xfer */
+	0xf094bd00,
+	0x00800199,
+	0x09f60237,
+	0xbd04bd00,
+	0x05bbfd94,
+	0x800f0bf4,
+	0xf601c400,
+	0x04bd000b,
+/* 0x015f: mmctx_base_disabled */
+	0xfd0099f0,
+	0x0bf405ee,
+	0xc6008018,
+	0x000ef601,
+	0x008004bd,
+	0x0ff601c7,
+	0xf004bd00,
+/* 0x017a: mmctx_multi_disabled */
+	0xabc80199,
+	0x10b4b600,
+	0xc80cb9f0,
+	0xe4b601ae,
+	0x05befd11,
+	0x01c50080,
+	0xbd000bf6,
+/* 0x0195: mmctx_exec_loop */
+/* 0x0195: mmctx_wait_free */
+	0xc5008e04,
+	0x00eecf01,
+	0xf41fe4f0,
+	0xce98f60b,
+	0x05e9fd00,
+	0x01c80080,
+	0xbd000ef6,
+	0x04c0b604,
+	0x1bf4cda4,
+	0x02abc8df,
+/* 0x01bf: mmctx_fini_wait */
+	0x8b1c1bf4,
+	0xcf01c500,
+	0xb4f000bb,
+	0x10b4b01f,
+	0x0af31bf4,
+	0x00b87e02,
+	0x250ef400,
+/* 0x01d8: mmctx_stop */
+	0xb600abc8,
+	0xb9f010b4,
+	0x12b9f00c,
+	0x01c50080,
+	0xbd000bf6,
+/* 0x01ed: mmctx_stop_wait */
+	0xc5008b04,
+	0x00bbcf01,
+	0xf412bbc8,
+/* 0x01fa: mmctx_done */
+	0x94bdf61b,
+	0x800199f0,
+	0xf6021700,
+	0x04bd0009,
+/* 0x020a: strand_wait */
+	0xa0f900f8,
+	0xb87e020a,
+	0xa0fc0000,
+/* 0x0216: strand_pre */
+	0x0c0900f8,
+	0x024afc80,
+	0xbd0009f6,
+	0x020a7e04,
+/* 0x0227: strand_post */
+	0x0900f800,
+	0x4afc800d,
+	0x0009f602,
+	0x0a7e04bd,
+	0x00f80002,
+/* 0x0238: strand_set */
+	0xfc800f0c,
+	0x0cf6024f,
+	0x0c04bd00,
+	0x4afc800b,
+	0x000cf602,
+	0xfc8004bd,
+	0x0ef6024f,
+	0x0c04bd00,
+	0x4afc800a,
+	0x000cf602,
+	0x0a7e04bd,
+	0x00f80002,
+/* 0x0268: strand_ctx_init */
+	0x99f094bd,
+	0x37008003,
+	0x0009f602,
+	0x167e04bd,
+	0x030e0002,
+	0x0002387e,
+	0xfc80c4bd,
+	0x0cf60247,
+	0x0c04bd00,
+	0x4afc8001,
+	0x000cf602,
+	0x0a7e04bd,
+	0x0c920002,
+	0x46fc8001,
+	0x000cf602,
+	0x020c04bd,
+	0x024afc80,
+	0xbd000cf6,
+	0x020a7e04,
+	0x02277e00,
+	0x42008800,
+	0x20008902,
+	0x0099cf02,
+/* 0x02c7: ctx_init_strand_loop */
+	0xf608fe95,
+	0x8ef6008e,
+	0x808acf40,
+	0xb606a5b6,
+	0xeabb01a0,
+	0x0480b600,
+	0xf40192b6,
+	0xe4b6e81b,
+	0xf2efbc08,
+	0x99f094bd,
+	0x17008003,
+	0x0009f602,
+	0x00f804bd,
+/* 0x02f8: error */
+	0xffb2e0f9,
+	0x4098148e,
+	0x00008f7e,
+	0xffb2010f,
+	0x409c1c8e,
+	0x00008f7e,
+	0x00f8e0fc,
+/* 0x0314: init */
+	0x04fe04bd,
+	0x40020200,
+	0x02f61200,
+	0x4104bd00,
+	0x10fe0465,
+	0x07004000,
+	0xbd0000f6,
+	0x40040204,
+	0x02f60400,
+	0xf404bd00,
+	0x00821031,
+	0x22cf0182,
+	0xf0010300,
+	0x32bb1f24,
+	0x0132b604,
+	0xb50502b5,
+	0x00820603,
+	0x22cf0186,
+	0x0402b500,
+	0x500c308e,
+	0x34bd24bd,
+/* 0x036a: init_unk_loop */
+	0x657e44bd,
+	0xf6b00000,
+	0x0e0bf400,
+	0xf2bb010f,
+	0x054ffd04,
+/* 0x037f: init_unk_next */
+	0xb60130b6,
+	0xe0b60120,
+	0x0126b004,
+/* 0x038b: init_unk_done */
+	0xb5e21bf4,
+	0x04b50703,
+	0x01008208,
+	0x0022cf02,
+	0x259534bd,
+	0xc0008008,
+	0x0005f601,
+	0x008004bd,
+	0x05f601c1,
+	0x9804bd00,
+	0x0f98000e,
+	0x01207e01,
+	0x002fbb00,
+	0x98003fbb,
+	0x0f98010e,
+	0x01207e02,
+	0x050e9800,
+	0xbb00effd,
+	0x3ebb002e,
+	0x020e9800,
+	0x7e030f98,
+	0x98000120,
+	0xeffd070e,
+	0x002ebb00,
+	0xb6003ebb,
+	0x00800235,
+	0x03f601d3,
+	0xb604bd00,
+	0x35b60825,
+	0x0120b606,
+	0xb60130b6,
+	0x34b60824,
+	0x7e2fb208,
+	0xbb000268,
+	0x0080003f,
+	0x03f60201,
+	0xbd04bd00,
+	0x1f29f024,
+	0x02300080,
+	0xbd0002f6,
+/* 0x0429: main */
+	0x0031f404,
+	0x0d0028f4,
+	0x00377e24,
+	0xf401f400,
+	0xf404e4b0,
+	0x81fe1d18,
+	0xbd060201,
+	0x0412fd20,
+	0xfd01e4b6,
+	0x18fe051e,
+	0x04fc7e00,
+	0xd40ef400,
+/* 0x0458: main_not_ctx_xfer */
+	0xf010ef94,
+	0xf87e01f5,
+	0x0ef40002,
+/* 0x0465: ih */
+	0xfe80f9c7,
+	0x80f90188,
+	0xa0f990f9,
+	0xd0f9b0f9,
+	0xf0f9e0f9,
+	0x004a04bd,
+	0x00aacf02,
+	0xf404abc4,
+	0x240d1f0b,
+	0xcf1a004e,
+	0x004f00ee,
+	0x00ffcf19,
+	0x0000047e,
+	0x0040010e,
+	0x000ef61d,
+/* 0x04a2: ih_no_fifo */
+	0x004004bd,
+	0x000af601,
+	0xf0fc04bd,
+	0xd0fce0fc,
+	0xa0fcb0fc,
+	0x80fc90fc,
+	0xfc0088fe,
+	0x0032f480,
+/* 0x04c2: hub_barrier_done */
+	0x010f01f8,
+	0xbb040e98,
+	0xffb204fe,
+	0x4094188e,
+	0x00008f7e,
+/* 0x04d6: ctx_redswitch */
+	0x200f00f8,
+	0x01850080,
+	0xbd000ff6,
+/* 0x04e3: ctx_redswitch_delay */
+	0xb6080e04,
+	0x1bf401e2,
+	0x00f5f1fd,
+	0x00f5f108,
+	0x85008002,
+	0x000ff601,
+	0x00f804bd,
+/* 0x04fc: ctx_xfer */
+	0x02810080,
+	0xbd000ff6,
+	0x0711f404,
+	0x0004d67e,
+/* 0x050c: ctx_xfer_not_load */
+	0x0002167e,
+	0xfc8024bd,
+	0x02f60247,
+	0xf004bd00,
+	0x20b6012c,
+	0x4afc8003,
+	0x0002f602,
+	0xacf004bd,
+	0x02a5f001,
+	0x5000008b,
+	0xb6040c98,
+	0xbcbb0fc4,
+	0x000c9800,
+	0x0e010d98,
+	0x013d7e00,
+	0x01acf000,
+	0x5040008b,
+	0xb6040c98,
+	0xbcbb0fc4,
+	0x010c9800,
+	0x98020d98,
+	0x004e060f,
+	0x013d7e08,
+	0x01acf000,
+	0x8b04a5f0,
+	0x98503000,
+	0xc4b6040c,
+	0x00bcbb0f,
+	0x98020c98,
+	0x0f98030d,
+	0x02004e08,
+	0x00013d7e,
+	0x00020a7e,
+	0xf40601f4,
+/* 0x0596: ctx_xfer_post */
+	0x277e0712,
+/* 0x059a: ctx_xfer_done */
+	0xc27e0002,
+	0x00f80004,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
index f2b0dea..0e7b01e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
@@ -37,14 +37,14 @@
 };
 
 uint32_t nvc0_grgpc_code[] = {
-	0x03180ef5,
+	0x03a10ef5,
 /* 0x0004: queue_put */
 	0x9800d898,
 	0x86f001d9,
 	0x0489b808,
 	0xf00c1bf4,
 	0x21f502f7,
-	0x00f802fe,
+	0x00f8037e,
 /* 0x001c: queue_put_next */
 	0xb60798c4,
 	0x8dbb0384,
@@ -68,184 +68,214 @@
 /* 0x0066: queue_get_done */
 	0x00f80132,
 /* 0x0068: nv_rd32 */
-	0x0728b7f1,
-	0xb906b4b6,
-	0xc9f002ec,
-	0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-	0xc800bccf,
-	0x1bf41fcc,
-	0x06a7f0fa,
-	0x010921f5,
-	0xf840bfcf,
-/* 0x008d: nv_wr32 */
-	0x28b7f100,
-	0x06b4b607,
-	0xb980bfd0,
-	0xc9f002ec,
-	0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-	0xcf00bcd0,
-	0xccc800bc,
-	0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-	0x87f100f8,
-	0x84b60430,
-	0x1ff9f006,
-	0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-	0x3087f100,
-	0x0684b604,
-	0xf80080d0,
-/* 0x00c9: wait_donez */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x07f104bd,
-	0x03f00600,
-	0x000ad002,
-/* 0x00e6: wait_donez_ne */
-	0x87f104bd,
-	0x83f00000,
-	0x0088cf01,
-	0xf4888aff,
-	0x94bdf31b,
-	0xf10099f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0109: wait_doneo */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x87f104bd,
-	0x84b60818,
-	0x008ad006,
-/* 0x0124: wait_doneo_e */
-	0x040087f1,
-	0xcf0684b6,
-	0x8aff0088,
-	0xf30bf488,
+	0xf002ecb9,
+	0x07f11fc9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x007a: nv_rd32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0xa7f0f31b,
+	0x1021f506,
+	0x00f7f101,
+	0x01f3f0cb,
+	0xf800ffcf,
+/* 0x009d: nv_wr32 */
+	0x0007f100,
+	0x0103f0cc,
+	0xbd000fd0,
+	0x02ecb904,
+	0xf01fc9f0,
+	0x07f11ec9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x00be: nv_wr32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0x00f8f31b,
+/* 0x00d0: wait_donez */
 	0x99f094bd,
 	0x0007f100,
-	0x0203f017,
+	0x0203f00f,
 	0xbd0009d0,
-/* 0x0147: mmctx_size */
-	0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-	0x00e89894,
-	0xb61a85b6,
-	0x84b60180,
-	0x0098bb02,
-	0xb804e0b6,
-	0x1bf404ef,
-	0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-	0x94bd00f8,
-	0xf10199f0,
-	0xf00f0007,
-	0x09d00203,
-	0xf104bd00,
-	0xb6071087,
-	0x94bd0684,
-	0xf405bbfd,
-	0x8bd0090b,
-	0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-	0xf405eefd,
-	0x8ed00c0b,
-	0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-	0xb70199f0,
-	0xc8010080,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x1bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0110: wait_doneo */
+	0x99f094bd,
+	0x0007f100,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x0bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+	0xe89894bd,
+	0x1a85b600,
+	0xb60180b6,
+	0x98bb0284,
+	0x04e0b600,
+	0xf404efb8,
+	0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+	0xbd00f802,
+	0x0199f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0xbbfd94bd,
+	0x120bf405,
+	0xc40007f1,
+	0xd00103f0,
+	0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+	0xfd0099f0,
+	0x0bf405ee,
+	0x0007f11e,
+	0x0103f0c6,
+	0xbd000ed0,
+	0x0007f104,
+	0x0103f0c7,
+	0xbd000fd0,
+	0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+	0xb600abc8,
+	0xb9f010b4,
+	0x01aec80c,
+	0xfd11e4b6,
+	0x07f105be,
+	0x03f0c500,
+	0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+	0xe7f104bd,
+	0xe3f0c500,
+	0x00eecf01,
+	0xf41fe4f0,
+	0xce98f30b,
+	0x05e9fd00,
+	0xc80007f1,
+	0xd00103f0,
+	0x04bd000e,
+	0xb804c0b6,
+	0x1bf404cd,
+	0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+	0xf11f1bf4,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x1fb4f000,
+	0xf410b4b0,
+	0xa7f0f01b,
+	0xd021f402,
+/* 0x0223: mmctx_stop */
+	0xc82b0ef4,
 	0xb4b600ab,
 	0x0cb9f010,
-	0xb601aec8,
-	0xbefd11e4,
-	0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-	0xf0008ecf,
-	0x0bf41fe4,
-	0x00ce98fa,
-	0xd005e9fd,
-	0xc0b6c08e,
-	0x04cdb804,
-	0xc8e81bf4,
-	0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-	0x008bcf18,
-	0xb01fb4f0,
-	0x1bf410b4,
-	0x02a7f0f7,
-	0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-	0xabc81b0e,
-	0x10b4b600,
-	0xf00cb9f0,
-	0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-	0x008bcf00,
-	0xf412bbc8,
-/* 0x0202: mmctx_done */
-	0x94bdfa1b,
-	0xf10199f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0215: strand_wait */
-	0xf0a0f900,
-	0x21f402a7,
-	0xf8a0fcc9,
-/* 0x0221: strand_pre */
-	0xfc87f100,
-	0x0283f04a,
-	0xd00c97f0,
-	0x21f50089,
-	0x00f80215,
-/* 0x0234: strand_post */
-	0x4afc87f1,
-	0xf00283f0,
-	0x89d00d97,
-	0x1521f500,
-/* 0x0247: strand_set */
-	0xf100f802,
-	0xf04ffca7,
-	0xaba202a3,
-	0xc7f00500,
-	0x00acd00f,
-	0xd00bc7f0,
-	0x21f500bc,
-	0xaed00215,
-	0x0ac7f000,
-	0xf500bcd0,
-	0xf8021521,
-/* 0x0271: strand_ctx_init */
-	0xf094bd00,
-	0x07f10399,
-	0x03f00f00,
+	0xf112b9f0,
+	0xf0c50007,
+	0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+	0xf104bd00,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x12bbc800,
+/* 0x024b: mmctx_done */
+	0xbdf31bf4,
+	0x0199f094,
+	0x170007f1,
+	0xd00203f0,
+	0x04bd0009,
+/* 0x025e: strand_wait */
+	0xa0f900f8,
+	0xf402a7f0,
+	0xa0fcd021,
+/* 0x026a: strand_pre */
+	0x97f000f8,
+	0xfc07f10c,
+	0x0203f04a,
+	0xbd0009d0,
+	0x5e21f504,
+/* 0x027f: strand_post */
+	0xf000f802,
+	0x07f10d97,
+	0x03f04afc,
 	0x0009d002,
 	0x21f504bd,
-	0xe7f00221,
-	0x4721f503,
-	0xfca7f102,
-	0x02a3f046,
-	0x0400aba0,
-	0xf040a0d0,
-	0xbcd001c7,
-	0x1521f500,
-	0x010c9202,
-	0xf000acd0,
-	0xbcd002c7,
-	0x1521f500,
-	0x3421f502,
-	0x8087f102,
-	0x0684b608,
-	0xb70089cf,
-	0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+	0x00f8025e,
+/* 0x0294: strand_set */
+	0xf10fc7f0,
+	0xf04ffc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f10bc7,
+	0x03f04afc,
+	0x000cd002,
+	0x07f104bd,
+	0x03f04ffc,
+	0x000ed002,
+	0xc7f004bd,
+	0xfc07f10a,
+	0x0203f04a,
+	0xbd000cd0,
+	0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+	0xbd00f802,
+	0x0399f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0x026a21f5,
+	0xf503e7f0,
+	0xbd029421,
+	0xfc07f1c4,
+	0x0203f047,
+	0xbd000cd0,
+	0x01c7f004,
+	0x4afc07f1,
+	0xd00203f0,
+	0x04bd000c,
+	0x025e21f5,
+	0xf1010c92,
+	0xf046fc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f102c7,
+	0x03f04afc,
+	0x000cd002,
+	0x21f504bd,
+	0x21f5025e,
+	0x87f1027f,
+	0x83f04200,
+	0x0097f102,
+	0x0293f020,
+	0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
 	0x8ed008fe,
 	0x408ed000,
 	0xb6808acf,
@@ -259,167 +289,199 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
 	0xe0f900f8,
-	0x9814e7f1,
-	0xf440e3f0,
-	0xe0b78d21,
-	0xf7f0041c,
-	0x8d21f401,
-	0x00f8e0fc,
-/* 0x0318: init */
-	0x04fe04bd,
-	0x0017f100,
-	0x0227f012,
-	0xf10012d0,
-	0xfe042617,
-	0x17f10010,
-	0x10d00400,
-	0x0427f0c0,
-	0xf40012d0,
-	0x17f11031,
-	0x14b60608,
-	0x0012cf06,
+	0xf102ffb9,
+	0xf09814e7,
+	0x21f440e3,
+	0x01f7f09d,
+	0xf102ffb9,
+	0xf09c1ce7,
+	0x21f440e3,
+	0xf8e0fc9d,
+/* 0x03a1: init */
+	0xfe04bd00,
+	0x27f00004,
+	0x0007f102,
+	0x0003f012,
+	0xbd0002d0,
+	0xd517f104,
+	0x0010fe04,
+	0x070007f1,
+	0xd00003f0,
+	0x04bd0000,
+	0xf10427f0,
+	0xf0040007,
+	0x02d00003,
+	0xf404bd00,
+	0x27f11031,
+	0x23f08200,
+	0x0022cf01,
 	0xf00137f0,
 	0x32bb1f24,
 	0x0132b604,
 	0x80050280,
-	0x10b70603,
-	0x12cf0400,
-	0x04028000,
-	0x010027f1,
-	0xcf0223f0,
-	0x34bd0022,
-	0x070047f1,
-	0x950644b6,
-	0x45d00825,
-	0x4045d000,
-	0x98000e98,
-	0x21f5010f,
-	0x2fbb0147,
-	0x003fbb00,
-	0x98010e98,
-	0x21f5020f,
-	0x0e980147,
-	0x00effd05,
-	0xbb002ebb,
-	0x40b7003e,
-	0x35b61300,
-	0x0043d002,
-	0xb60825b6,
-	0x20b60635,
-	0x0130b601,
-	0xb60824b6,
-	0x2fb90834,
-	0x7121f502,
-	0x003fbb02,
-	0x010007f1,
+	0x27f10603,
+	0x23f08600,
+	0x0022cf01,
+	0xf1040280,
+	0xf0010027,
+	0x22cf0223,
+	0x9534bd00,
+	0x07f10825,
+	0x03f0c000,
+	0x0005d001,
+	0x07f104bd,
+	0x03f0c100,
+	0x0005d001,
+	0x0e9804bd,
+	0x010f9800,
+	0x015021f5,
+	0xbb002fbb,
+	0x0e98003f,
+	0x020f9801,
+	0x015021f5,
+	0xfd050e98,
+	0x2ebb00ef,
+	0x003ebb00,
+	0xf10235b6,
+	0xf0d30007,
+	0x03d00103,
+	0xb604bd00,
+	0x35b60825,
+	0x0120b606,
+	0xb60130b6,
+	0x34b60824,
+	0x022fb908,
+	0x02d321f5,
+	0xf1003fbb,
+	0xf0010007,
+	0x03d00203,
+	0xbd04bd00,
+	0x1f29f024,
+	0x080007f1,
 	0xd00203f0,
-	0x04bd0003,
-	0x29f024bd,
-	0x0007f11f,
-	0x0203f008,
-	0xbd0002d0,
-/* 0x03e9: main */
-	0x0031f404,
-	0xf00028f4,
-	0x21f41cd7,
-	0xf401f439,
-	0xf404e4b0,
-	0x81fe1e18,
-	0x0627f001,
-	0x12fd20bd,
-	0x01e4b604,
-	0xfe051efd,
-	0x21f50018,
-	0x0ef404ad,
-/* 0x0419: main_not_ctx_xfer */
-	0x10ef94d3,
-	0xf501f5f0,
-	0xf402fe21,
-/* 0x0426: ih */
-	0x80f9c60e,
-	0xf90188fe,
-	0xf990f980,
-	0xf9b0f9a0,
-	0xf9e0f9d0,
-	0xcf04bdf0,
-	0xabc4800a,
-	0x1d0bf404,
-	0x1900b7f1,
-	0xcf1cd7f0,
-	0xbfcf40be,
+	0x04bd0002,
+/* 0x0498: main */
+	0xf40031f4,
+	0xd7f00028,
+	0x3921f41c,
+	0xb0f401f4,
+	0x18f404e4,
+	0x0181fe1e,
+	0xbd0627f0,
+	0x0412fd20,
+	0xfd01e4b6,
+	0x18fe051e,
+	0x8d21f500,
+	0xd30ef405,
+/* 0x04c8: main_not_ctx_xfer */
+	0xf010ef94,
+	0x21f501f5,
+	0x0ef4037e,
+/* 0x04d5: ih */
+	0xfe80f9c6,
+	0x80f90188,
+	0xa0f990f9,
+	0xd0f9b0f9,
+	0xf0f9e0f9,
+	0xa7f104bd,
+	0xa3f00200,
+	0x00aacf00,
+	0xf404abc4,
+	0xd7f02c0b,
+	0x00e7f11c,
+	0x00e3f01a,
+	0xf100eecf,
+	0xf01900f7,
+	0xffcf00f3,
 	0x0421f400,
-	0x0400b0b7,
-	0xd001e7f0,
-/* 0x045e: ih_no_fifo */
-	0x0ad000be,
-	0xfcf0fc40,
-	0xfcd0fce0,
-	0xfca0fcb0,
-	0xfe80fc90,
-	0x80fc0088,
-	0xf80032f4,
-/* 0x0479: hub_barrier_done */
-	0x01f7f001,
-	0xbb040e98,
-	0xe7f104fe,
-	0xe3f09418,
-	0x8d21f440,
-/* 0x048e: ctx_redswitch */
-	0xe7f100f8,
-	0xe4b60614,
-	0x20f7f006,
-	0xf000efd0,
-/* 0x049e: ctx_redswitch_delay */
-	0xf2b608f7,
-	0xfd1bf401,
-	0x0a20f7f1,
-	0xf800efd0,
-/* 0x04ad: ctx_xfer */
-	0x0417f100,
-	0x0614b60a,
-	0xf4001fd0,
-	0x21f50711,
-/* 0x04be: ctx_xfer_not_load */
-	0x17f1048e,
-	0x13f04afc,
-	0x0c27f002,
-	0xf50012d0,
-	0xf1021521,
-	0xf047fc27,
-	0x20d00223,
-	0x012cf000,
-	0xd00320b6,
-	0xacf00012,
-	0x02a5f001,
-	0xf000b7f0,
-	0x0c9850b3,
-	0x0fc4b604,
-	0x9800bcbb,
-	0x0d98000c,
-	0x00e7f001,
-	0x016621f5,
+	0xf101e7f0,
+	0xf01d0007,
+	0x0ed00003,
+/* 0x0523: ih_no_fifo */
+	0xf104bd00,
+	0xf0010007,
+	0x0ad00003,
+	0xfc04bd00,
+	0xfce0fcf0,
+	0xfcb0fcd0,
+	0xfc90fca0,
+	0x0088fe80,
+	0x32f480fc,
+/* 0x0547: hub_barrier_done */
+	0xf001f800,
+	0x0e9801f7,
+	0x04febb04,
+	0xf102ffb9,
+	0xf09418e7,
+	0x21f440e3,
+/* 0x055f: ctx_redswitch */
+	0xf000f89d,
+	0x07f120f7,
+	0x03f08500,
+	0x000fd001,
+	0xe7f004bd,
+/* 0x0571: ctx_redswitch_delay */
+	0x01e2b608,
+	0xf1fd1bf4,
+	0xf10800f5,
+	0xf10200f5,
+	0xf0850007,
+	0x0fd00103,
+	0xf804bd00,
+/* 0x058d: ctx_xfer */
+	0x0007f100,
+	0x0203f081,
+	0xbd000fd0,
+	0x0711f404,
+	0x055f21f5,
+/* 0x05a0: ctx_xfer_not_load */
+	0x026a21f5,
+	0x07f124bd,
+	0x03f047fc,
+	0x0002d002,
+	0x2cf004bd,
+	0x0320b601,
+	0x4afc07f1,
+	0xd00203f0,
+	0x04bd0002,
 	0xf001acf0,
-	0xb7f104a5,
-	0xb3f04000,
+	0xb7f102a5,
+	0xb3f00000,
 	0x040c9850,
 	0xbb0fc4b6,
 	0x0c9800bc,
-	0x020d9801,
-	0xf1060f98,
-	0xf50800e7,
-	0xf5016621,
-	0xf4021521,
-	0x12f40601,
-/* 0x0535: ctx_xfer_post */
-	0xfc17f114,
-	0x0213f04a,
-	0xd00d27f0,
-	0x21f50012,
-/* 0x0546: ctx_xfer_done */
-	0x21f50215,
-	0x00f80479,
+	0x010d9800,
+	0xf500e7f0,
+	0xf0016f21,
+	0xa5f001ac,
+	0x00b7f104,
+	0x50b3f040,
+	0xb6040c98,
+	0xbcbb0fc4,
+	0x010c9800,
+	0x98020d98,
+	0xe7f1060f,
+	0x21f50800,
+	0x21f5016f,
+	0x01f4025e,
+	0x0712f406,
+/* 0x0618: ctx_xfer_post */
+	0x027f21f5,
+/* 0x061c: ctx_xfer_done */
+	0x054721f5,
+	0x000000f8,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
index dd346c2..84dd32d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
@@ -41,14 +41,14 @@
 };
 
 uint32_t nvd7_grgpc_code[] = {
-	0x03180ef5,
+	0x03a10ef5,
 /* 0x0004: queue_put */
 	0x9800d898,
 	0x86f001d9,
 	0x0489b808,
 	0xf00c1bf4,
 	0x21f502f7,
-	0x00f802fe,
+	0x00f8037e,
 /* 0x001c: queue_put_next */
 	0xb60798c4,
 	0x8dbb0384,
@@ -72,184 +72,214 @@
 /* 0x0066: queue_get_done */
 	0x00f80132,
 /* 0x0068: nv_rd32 */
-	0x0728b7f1,
-	0xb906b4b6,
-	0xc9f002ec,
-	0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-	0xc800bccf,
-	0x1bf41fcc,
-	0x06a7f0fa,
-	0x010921f5,
-	0xf840bfcf,
-/* 0x008d: nv_wr32 */
-	0x28b7f100,
-	0x06b4b607,
-	0xb980bfd0,
-	0xc9f002ec,
-	0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-	0xcf00bcd0,
-	0xccc800bc,
-	0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-	0x87f100f8,
-	0x84b60430,
-	0x1ff9f006,
-	0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-	0x3087f100,
-	0x0684b604,
-	0xf80080d0,
-/* 0x00c9: wait_donez */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x07f104bd,
-	0x03f00600,
-	0x000ad002,
-/* 0x00e6: wait_donez_ne */
-	0x87f104bd,
-	0x83f00000,
-	0x0088cf01,
-	0xf4888aff,
-	0x94bdf31b,
-	0xf10099f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0109: wait_doneo */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x87f104bd,
-	0x84b60818,
-	0x008ad006,
-/* 0x0124: wait_doneo_e */
-	0x040087f1,
-	0xcf0684b6,
-	0x8aff0088,
-	0xf30bf488,
+	0xf002ecb9,
+	0x07f11fc9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x007a: nv_rd32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0xa7f0f31b,
+	0x1021f506,
+	0x00f7f101,
+	0x01f3f0cb,
+	0xf800ffcf,
+/* 0x009d: nv_wr32 */
+	0x0007f100,
+	0x0103f0cc,
+	0xbd000fd0,
+	0x02ecb904,
+	0xf01fc9f0,
+	0x07f11ec9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x00be: nv_wr32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0x00f8f31b,
+/* 0x00d0: wait_donez */
 	0x99f094bd,
 	0x0007f100,
-	0x0203f017,
+	0x0203f00f,
 	0xbd0009d0,
-/* 0x0147: mmctx_size */
-	0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-	0x00e89894,
-	0xb61a85b6,
-	0x84b60180,
-	0x0098bb02,
-	0xb804e0b6,
-	0x1bf404ef,
-	0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-	0x94bd00f8,
-	0xf10199f0,
-	0xf00f0007,
-	0x09d00203,
-	0xf104bd00,
-	0xb6071087,
-	0x94bd0684,
-	0xf405bbfd,
-	0x8bd0090b,
-	0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-	0xf405eefd,
-	0x8ed00c0b,
-	0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-	0xb70199f0,
-	0xc8010080,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x1bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0110: wait_doneo */
+	0x99f094bd,
+	0x0007f100,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x0bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+	0xe89894bd,
+	0x1a85b600,
+	0xb60180b6,
+	0x98bb0284,
+	0x04e0b600,
+	0xf404efb8,
+	0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+	0xbd00f802,
+	0x0199f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0xbbfd94bd,
+	0x120bf405,
+	0xc40007f1,
+	0xd00103f0,
+	0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+	0xfd0099f0,
+	0x0bf405ee,
+	0x0007f11e,
+	0x0103f0c6,
+	0xbd000ed0,
+	0x0007f104,
+	0x0103f0c7,
+	0xbd000fd0,
+	0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+	0xb600abc8,
+	0xb9f010b4,
+	0x01aec80c,
+	0xfd11e4b6,
+	0x07f105be,
+	0x03f0c500,
+	0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+	0xe7f104bd,
+	0xe3f0c500,
+	0x00eecf01,
+	0xf41fe4f0,
+	0xce98f30b,
+	0x05e9fd00,
+	0xc80007f1,
+	0xd00103f0,
+	0x04bd000e,
+	0xb804c0b6,
+	0x1bf404cd,
+	0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+	0xf11f1bf4,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x1fb4f000,
+	0xf410b4b0,
+	0xa7f0f01b,
+	0xd021f402,
+/* 0x0223: mmctx_stop */
+	0xc82b0ef4,
 	0xb4b600ab,
 	0x0cb9f010,
-	0xb601aec8,
-	0xbefd11e4,
-	0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-	0xf0008ecf,
-	0x0bf41fe4,
-	0x00ce98fa,
-	0xd005e9fd,
-	0xc0b6c08e,
-	0x04cdb804,
-	0xc8e81bf4,
-	0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-	0x008bcf18,
-	0xb01fb4f0,
-	0x1bf410b4,
-	0x02a7f0f7,
-	0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-	0xabc81b0e,
-	0x10b4b600,
-	0xf00cb9f0,
-	0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-	0x008bcf00,
-	0xf412bbc8,
-/* 0x0202: mmctx_done */
-	0x94bdfa1b,
-	0xf10199f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0215: strand_wait */
-	0xf0a0f900,
-	0x21f402a7,
-	0xf8a0fcc9,
-/* 0x0221: strand_pre */
-	0xfc87f100,
-	0x0283f04a,
-	0xd00c97f0,
-	0x21f50089,
-	0x00f80215,
-/* 0x0234: strand_post */
-	0x4afc87f1,
-	0xf00283f0,
-	0x89d00d97,
-	0x1521f500,
-/* 0x0247: strand_set */
-	0xf100f802,
-	0xf04ffca7,
-	0xaba202a3,
-	0xc7f00500,
-	0x00acd00f,
-	0xd00bc7f0,
-	0x21f500bc,
-	0xaed00215,
-	0x0ac7f000,
-	0xf500bcd0,
-	0xf8021521,
-/* 0x0271: strand_ctx_init */
-	0xf094bd00,
-	0x07f10399,
-	0x03f00f00,
+	0xf112b9f0,
+	0xf0c50007,
+	0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+	0xf104bd00,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x12bbc800,
+/* 0x024b: mmctx_done */
+	0xbdf31bf4,
+	0x0199f094,
+	0x170007f1,
+	0xd00203f0,
+	0x04bd0009,
+/* 0x025e: strand_wait */
+	0xa0f900f8,
+	0xf402a7f0,
+	0xa0fcd021,
+/* 0x026a: strand_pre */
+	0x97f000f8,
+	0xfc07f10c,
+	0x0203f04a,
+	0xbd0009d0,
+	0x5e21f504,
+/* 0x027f: strand_post */
+	0xf000f802,
+	0x07f10d97,
+	0x03f04afc,
 	0x0009d002,
 	0x21f504bd,
-	0xe7f00221,
-	0x4721f503,
-	0xfca7f102,
-	0x02a3f046,
-	0x0400aba0,
-	0xf040a0d0,
-	0xbcd001c7,
-	0x1521f500,
-	0x010c9202,
-	0xf000acd0,
-	0xbcd002c7,
-	0x1521f500,
-	0x3421f502,
-	0x8087f102,
-	0x0684b608,
-	0xb70089cf,
-	0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+	0x00f8025e,
+/* 0x0294: strand_set */
+	0xf10fc7f0,
+	0xf04ffc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f10bc7,
+	0x03f04afc,
+	0x000cd002,
+	0x07f104bd,
+	0x03f04ffc,
+	0x000ed002,
+	0xc7f004bd,
+	0xfc07f10a,
+	0x0203f04a,
+	0xbd000cd0,
+	0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+	0xbd00f802,
+	0x0399f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0x026a21f5,
+	0xf503e7f0,
+	0xbd029421,
+	0xfc07f1c4,
+	0x0203f047,
+	0xbd000cd0,
+	0x01c7f004,
+	0x4afc07f1,
+	0xd00203f0,
+	0x04bd000c,
+	0x025e21f5,
+	0xf1010c92,
+	0xf046fc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f102c7,
+	0x03f04afc,
+	0x000cd002,
+	0x21f504bd,
+	0x21f5025e,
+	0x87f1027f,
+	0x83f04200,
+	0x0097f102,
+	0x0293f020,
+	0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
 	0x8ed008fe,
 	0x408ed000,
 	0xb6808acf,
@@ -263,198 +293,230 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
 	0xe0f900f8,
-	0x9814e7f1,
-	0xf440e3f0,
-	0xe0b78d21,
-	0xf7f0041c,
-	0x8d21f401,
-	0x00f8e0fc,
-/* 0x0318: init */
-	0x04fe04bd,
-	0x0017f100,
-	0x0227f012,
-	0xf10012d0,
-	0xfe047017,
-	0x17f10010,
-	0x10d00400,
-	0x0427f0c0,
-	0xf40012d0,
-	0x17f11031,
-	0x14b60608,
-	0x0012cf06,
+	0xf102ffb9,
+	0xf09814e7,
+	0x21f440e3,
+	0x01f7f09d,
+	0xf102ffb9,
+	0xf09c1ce7,
+	0x21f440e3,
+	0xf8e0fc9d,
+/* 0x03a1: init */
+	0xfe04bd00,
+	0x27f00004,
+	0x0007f102,
+	0x0003f012,
+	0xbd0002d0,
+	0x1f17f104,
+	0x0010fe05,
+	0x070007f1,
+	0xd00003f0,
+	0x04bd0000,
+	0xf10427f0,
+	0xf0040007,
+	0x02d00003,
+	0xf404bd00,
+	0x27f11031,
+	0x23f08200,
+	0x0022cf01,
 	0xf00137f0,
 	0x32bb1f24,
 	0x0132b604,
 	0x80050280,
-	0x10b70603,
-	0x12cf0400,
-	0x04028000,
-	0x0c30e7f1,
-	0xbd50e3f0,
-	0xbd34bd24,
-/* 0x0371: init_unk_loop */
-	0x6821f444,
-	0xf400f6b0,
-	0xf7f00f0b,
-	0x04f2bb01,
-	0xb6054ffd,
-/* 0x0386: init_unk_next */
-	0x20b60130,
-	0x04e0b601,
-	0xf40126b0,
-/* 0x0392: init_unk_done */
-	0x0380e21b,
-	0x08048007,
-	0x010027f1,
-	0xcf0223f0,
-	0x34bd0022,
-	0x070047f1,
-	0x950644b6,
-	0x45d00825,
-	0x4045d000,
-	0x98000e98,
-	0x21f5010f,
-	0x2fbb0147,
-	0x003fbb00,
-	0x98010e98,
-	0x21f5020f,
-	0x0e980147,
-	0x00effd05,
-	0xbb002ebb,
-	0x0e98003e,
-	0x030f9802,
-	0x014721f5,
-	0xfd070e98,
+	0x27f10603,
+	0x23f08600,
+	0x0022cf01,
+	0xf1040280,
+	0xf00c30e7,
+	0x24bd50e3,
+	0x44bd34bd,
+/* 0x0410: init_unk_loop */
+	0xb06821f4,
+	0x0bf400f6,
+	0x01f7f00f,
+	0xfd04f2bb,
+	0x30b6054f,
+/* 0x0425: init_unk_next */
+	0x0120b601,
+	0xb004e0b6,
+	0x1bf40126,
+/* 0x0431: init_unk_done */
+	0x070380e2,
+	0xf1080480,
+	0xf0010027,
+	0x22cf0223,
+	0x9534bd00,
+	0x07f10825,
+	0x03f0c000,
+	0x0005d001,
+	0x07f104bd,
+	0x03f0c100,
+	0x0005d001,
+	0x0e9804bd,
+	0x010f9800,
+	0x015021f5,
+	0xbb002fbb,
+	0x0e98003f,
+	0x020f9801,
+	0x015021f5,
+	0xfd050e98,
 	0x2ebb00ef,
 	0x003ebb00,
-	0x130040b7,
-	0xd00235b6,
-	0x25b60043,
-	0x0635b608,
-	0xb60120b6,
-	0x24b60130,
-	0x0834b608,
-	0xf5022fb9,
-	0xbb027121,
-	0x07f1003f,
-	0x03f00100,
-	0x0003d002,
-	0x24bd04bd,
-	0xf11f29f0,
-	0xf0080007,
-	0x02d00203,
-/* 0x0433: main */
+	0x98020e98,
+	0x21f5030f,
+	0x0e980150,
+	0x00effd07,
+	0xbb002ebb,
+	0x35b6003e,
+	0x0007f102,
+	0x0103f0d3,
+	0xbd0003d0,
+	0x0825b604,
+	0xb60635b6,
+	0x30b60120,
+	0x0824b601,
+	0xb90834b6,
+	0x21f5022f,
+	0x3fbb02d3,
+	0x0007f100,
+	0x0203f001,
+	0xbd0003d0,
+	0xf024bd04,
+	0x07f11f29,
+	0x03f00800,
+	0x0002d002,
+/* 0x04e2: main */
+	0x31f404bd,
+	0x0028f400,
+	0xf424d7f0,
+	0x01f43921,
+	0x04e4b0f4,
+	0xfe1e18f4,
+	0x27f00181,
+	0xfd20bd06,
+	0xe4b60412,
+	0x051efd01,
+	0xf50018fe,
+	0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+	0xef94d30e,
+	0x01f5f010,
+	0x037e21f5,
+/* 0x051f: ih */
+	0xf9c60ef4,
+	0x0188fe80,
+	0x90f980f9,
+	0xb0f9a0f9,
+	0xe0f9d0f9,
+	0x04bdf0f9,
+	0x0200a7f1,
+	0xcf00a3f0,
+	0xabc400aa,
+	0x2c0bf404,
+	0xf124d7f0,
+	0xf01a00e7,
+	0xeecf00e3,
+	0x00f7f100,
+	0x00f3f019,
+	0xf400ffcf,
+	0xe7f00421,
+	0x0007f101,
+	0x0003f01d,
+	0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+	0x0007f104,
+	0x0003f001,
+	0xbd000ad0,
+	0xfcf0fc04,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0xf80032f4,
+/* 0x0591: hub_barrier_done */
+	0x01f7f001,
+	0xbb040e98,
+	0xffb904fe,
+	0x18e7f102,
+	0x40e3f094,
+	0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+	0x20f7f000,
+	0x850007f1,
+	0xd00103f0,
+	0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+	0xb608e7f0,
+	0x1bf401e2,
+	0x00f5f1fd,
+	0x00f5f108,
+	0x0007f102,
+	0x0103f085,
+	0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+	0xf100f804,
+	0xf0810007,
+	0x0fd00203,
 	0xf404bd00,
-	0x28f40031,
-	0x24d7f000,
-	0xf43921f4,
-	0xe4b0f401,
-	0x1e18f404,
-	0xf00181fe,
-	0x20bd0627,
-	0xb60412fd,
-	0x1efd01e4,
-	0x0018fe05,
-	0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
-	0x94d30ef4,
-	0xf5f010ef,
-	0xfe21f501,
-	0xc60ef402,
-/* 0x0470: ih */
-	0x88fe80f9,
-	0xf980f901,
-	0xf9a0f990,
-	0xf9d0f9b0,
-	0xbdf0f9e0,
-	0x800acf04,
-	0xf404abc4,
-	0xb7f11d0b,
-	0xd7f01900,
-	0x40becf24,
-	0xf400bfcf,
-	0xb0b70421,
-	0xe7f00400,
-	0x00bed001,
-/* 0x04a8: ih_no_fifo */
-	0xfc400ad0,
-	0xfce0fcf0,
-	0xfcb0fcd0,
-	0xfc90fca0,
-	0x0088fe80,
-	0x32f480fc,
-/* 0x04c3: hub_barrier_done */
-	0xf001f800,
-	0x0e9801f7,
-	0x04febb04,
-	0x9418e7f1,
-	0xf440e3f0,
-	0x00f88d21,
-/* 0x04d8: ctx_redswitch */
-	0x0614e7f1,
-	0xf006e4b6,
-	0xefd020f7,
-	0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
-	0xf401f2b6,
-	0xf7f1fd1b,
-	0xefd00a20,
-/* 0x04f7: ctx_xfer */
-	0xf100f800,
-	0xb60a0417,
-	0x1fd00614,
-	0x0711f400,
-	0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
-	0x4afc17f1,
-	0xf00213f0,
-	0x12d00c27,
-	0x1521f500,
-	0xfc27f102,
-	0x0223f047,
-	0xf00020d0,
-	0x20b6012c,
-	0x0012d003,
-	0xf001acf0,
-	0xb7f002a5,
-	0x50b3f000,
-	0xb6040c98,
-	0xbcbb0fc4,
-	0x000c9800,
-	0xf0010d98,
-	0x21f500e7,
-	0xacf00166,
-	0x00b7f101,
-	0x50b3f040,
-	0xb6040c98,
-	0xbcbb0fc4,
-	0x010c9800,
-	0x98020d98,
-	0xe7f1060f,
-	0x21f50800,
-	0xacf00166,
-	0x04a5f001,
-	0x3000b7f1,
+	0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+	0x21f505a9,
+	0x24bd026a,
+	0x47fc07f1,
+	0xd00203f0,
+	0x04bd0002,
+	0xb6012cf0,
+	0x07f10320,
+	0x03f04afc,
+	0x0002d002,
+	0xacf004bd,
+	0x02a5f001,
+	0x0000b7f1,
 	0x9850b3f0,
 	0xc4b6040c,
 	0x00bcbb0f,
-	0x98020c98,
-	0x0f98030d,
-	0x00e7f108,
-	0x6621f502,
-	0x1521f501,
-	0x0601f402,
-/* 0x05a3: ctx_xfer_post */
-	0xf11412f4,
-	0xf04afc17,
-	0x27f00213,
-	0x0012d00d,
-	0x021521f5,
-/* 0x05b4: ctx_xfer_done */
-	0x04c321f5,
-	0x000000f8,
+	0x98000c98,
+	0xe7f0010d,
+	0x6f21f500,
+	0x01acf001,
+	0x4000b7f1,
+	0x9850b3f0,
+	0xc4b6040c,
+	0x00bcbb0f,
+	0x98010c98,
+	0x0f98020d,
+	0x00e7f106,
+	0x6f21f508,
+	0x01acf001,
+	0xf104a5f0,
+	0xf03000b7,
+	0x0c9850b3,
+	0x0fc4b604,
+	0x9800bcbb,
+	0x0d98020c,
+	0x080f9803,
+	0x0200e7f1,
+	0x016f21f5,
+	0x025e21f5,
+	0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+	0x21f50712,
+/* 0x068a: ctx_xfer_done */
+	0x21f5027f,
+	0x00f80591,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
index 7ff5ef6..b6da800 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -41,14 +41,14 @@
 };
 
 uint32_t nve0_grgpc_code[] = {
-	0x03180ef5,
+	0x03a10ef5,
 /* 0x0004: queue_put */
 	0x9800d898,
 	0x86f001d9,
 	0x0489b808,
 	0xf00c1bf4,
 	0x21f502f7,
-	0x00f802fe,
+	0x00f8037e,
 /* 0x001c: queue_put_next */
 	0xb60798c4,
 	0x8dbb0384,
@@ -72,184 +72,214 @@
 /* 0x0066: queue_get_done */
 	0x00f80132,
 /* 0x0068: nv_rd32 */
-	0x0728b7f1,
-	0xb906b4b6,
-	0xc9f002ec,
-	0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-	0xc800bccf,
-	0x1bf41fcc,
-	0x06a7f0fa,
-	0x010921f5,
-	0xf840bfcf,
-/* 0x008d: nv_wr32 */
-	0x28b7f100,
-	0x06b4b607,
-	0xb980bfd0,
-	0xc9f002ec,
-	0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-	0xcf00bcd0,
-	0xccc800bc,
-	0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-	0x87f100f8,
-	0x84b60430,
-	0x1ff9f006,
-	0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-	0x3087f100,
-	0x0684b604,
-	0xf80080d0,
-/* 0x00c9: wait_donez */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x07f104bd,
-	0x03f00600,
-	0x000ad002,
-/* 0x00e6: wait_donez_ne */
-	0x87f104bd,
-	0x83f00000,
-	0x0088cf01,
-	0xf4888aff,
-	0x94bdf31b,
-	0xf10099f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0109: wait_doneo */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x87f104bd,
-	0x84b60818,
-	0x008ad006,
-/* 0x0124: wait_doneo_e */
-	0x040087f1,
-	0xcf0684b6,
-	0x8aff0088,
-	0xf30bf488,
+	0xf002ecb9,
+	0x07f11fc9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x007a: nv_rd32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0xa7f0f31b,
+	0x1021f506,
+	0x00f7f101,
+	0x01f3f0cb,
+	0xf800ffcf,
+/* 0x009d: nv_wr32 */
+	0x0007f100,
+	0x0103f0cc,
+	0xbd000fd0,
+	0x02ecb904,
+	0xf01fc9f0,
+	0x07f11ec9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x00be: nv_wr32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0x00f8f31b,
+/* 0x00d0: wait_donez */
 	0x99f094bd,
 	0x0007f100,
-	0x0203f017,
+	0x0203f00f,
 	0xbd0009d0,
-/* 0x0147: mmctx_size */
-	0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-	0x00e89894,
-	0xb61a85b6,
-	0x84b60180,
-	0x0098bb02,
-	0xb804e0b6,
-	0x1bf404ef,
-	0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-	0x94bd00f8,
-	0xf10199f0,
-	0xf00f0007,
-	0x09d00203,
-	0xf104bd00,
-	0xb6071087,
-	0x94bd0684,
-	0xf405bbfd,
-	0x8bd0090b,
-	0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-	0xf405eefd,
-	0x8ed00c0b,
-	0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-	0xb70199f0,
-	0xc8010080,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x1bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0110: wait_doneo */
+	0x99f094bd,
+	0x0007f100,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x0bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+	0xe89894bd,
+	0x1a85b600,
+	0xb60180b6,
+	0x98bb0284,
+	0x04e0b600,
+	0xf404efb8,
+	0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+	0xbd00f802,
+	0x0199f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0xbbfd94bd,
+	0x120bf405,
+	0xc40007f1,
+	0xd00103f0,
+	0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+	0xfd0099f0,
+	0x0bf405ee,
+	0x0007f11e,
+	0x0103f0c6,
+	0xbd000ed0,
+	0x0007f104,
+	0x0103f0c7,
+	0xbd000fd0,
+	0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+	0xb600abc8,
+	0xb9f010b4,
+	0x01aec80c,
+	0xfd11e4b6,
+	0x07f105be,
+	0x03f0c500,
+	0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+	0xe7f104bd,
+	0xe3f0c500,
+	0x00eecf01,
+	0xf41fe4f0,
+	0xce98f30b,
+	0x05e9fd00,
+	0xc80007f1,
+	0xd00103f0,
+	0x04bd000e,
+	0xb804c0b6,
+	0x1bf404cd,
+	0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+	0xf11f1bf4,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x1fb4f000,
+	0xf410b4b0,
+	0xa7f0f01b,
+	0xd021f402,
+/* 0x0223: mmctx_stop */
+	0xc82b0ef4,
 	0xb4b600ab,
 	0x0cb9f010,
-	0xb601aec8,
-	0xbefd11e4,
-	0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-	0xf0008ecf,
-	0x0bf41fe4,
-	0x00ce98fa,
-	0xd005e9fd,
-	0xc0b6c08e,
-	0x04cdb804,
-	0xc8e81bf4,
-	0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-	0x008bcf18,
-	0xb01fb4f0,
-	0x1bf410b4,
-	0x02a7f0f7,
-	0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-	0xabc81b0e,
-	0x10b4b600,
-	0xf00cb9f0,
-	0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-	0x008bcf00,
-	0xf412bbc8,
-/* 0x0202: mmctx_done */
-	0x94bdfa1b,
-	0xf10199f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0215: strand_wait */
-	0xf0a0f900,
-	0x21f402a7,
-	0xf8a0fcc9,
-/* 0x0221: strand_pre */
-	0xfc87f100,
-	0x0283f04a,
-	0xd00c97f0,
-	0x21f50089,
-	0x00f80215,
-/* 0x0234: strand_post */
-	0x4afc87f1,
-	0xf00283f0,
-	0x89d00d97,
-	0x1521f500,
-/* 0x0247: strand_set */
-	0xf100f802,
-	0xf04ffca7,
-	0xaba202a3,
-	0xc7f00500,
-	0x00acd00f,
-	0xd00bc7f0,
-	0x21f500bc,
-	0xaed00215,
-	0x0ac7f000,
-	0xf500bcd0,
-	0xf8021521,
-/* 0x0271: strand_ctx_init */
-	0xf094bd00,
-	0x07f10399,
-	0x03f00f00,
+	0xf112b9f0,
+	0xf0c50007,
+	0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+	0xf104bd00,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x12bbc800,
+/* 0x024b: mmctx_done */
+	0xbdf31bf4,
+	0x0199f094,
+	0x170007f1,
+	0xd00203f0,
+	0x04bd0009,
+/* 0x025e: strand_wait */
+	0xa0f900f8,
+	0xf402a7f0,
+	0xa0fcd021,
+/* 0x026a: strand_pre */
+	0x97f000f8,
+	0xfc07f10c,
+	0x0203f04a,
+	0xbd0009d0,
+	0x5e21f504,
+/* 0x027f: strand_post */
+	0xf000f802,
+	0x07f10d97,
+	0x03f04afc,
 	0x0009d002,
 	0x21f504bd,
-	0xe7f00221,
-	0x4721f503,
-	0xfca7f102,
-	0x02a3f046,
-	0x0400aba0,
-	0xf040a0d0,
-	0xbcd001c7,
-	0x1521f500,
-	0x010c9202,
-	0xf000acd0,
-	0xbcd002c7,
-	0x1521f500,
-	0x3421f502,
-	0x8087f102,
-	0x0684b608,
-	0xb70089cf,
-	0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+	0x00f8025e,
+/* 0x0294: strand_set */
+	0xf10fc7f0,
+	0xf04ffc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f10bc7,
+	0x03f04afc,
+	0x000cd002,
+	0x07f104bd,
+	0x03f04ffc,
+	0x000ed002,
+	0xc7f004bd,
+	0xfc07f10a,
+	0x0203f04a,
+	0xbd000cd0,
+	0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+	0xbd00f802,
+	0x0399f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0x026a21f5,
+	0xf503e7f0,
+	0xbd029421,
+	0xfc07f1c4,
+	0x0203f047,
+	0xbd000cd0,
+	0x01c7f004,
+	0x4afc07f1,
+	0xd00203f0,
+	0x04bd000c,
+	0x025e21f5,
+	0xf1010c92,
+	0xf046fc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f102c7,
+	0x03f04afc,
+	0x000cd002,
+	0x21f504bd,
+	0x21f5025e,
+	0x87f1027f,
+	0x83f04200,
+	0x0097f102,
+	0x0293f020,
+	0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
 	0x8ed008fe,
 	0x408ed000,
 	0xb6808acf,
@@ -263,198 +293,230 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
 	0xe0f900f8,
-	0x9814e7f1,
-	0xf440e3f0,
-	0xe0b78d21,
-	0xf7f0041c,
-	0x8d21f401,
-	0x00f8e0fc,
-/* 0x0318: init */
-	0x04fe04bd,
-	0x0017f100,
-	0x0227f012,
-	0xf10012d0,
-	0xfe047017,
-	0x17f10010,
-	0x10d00400,
-	0x0427f0c0,
-	0xf40012d0,
-	0x17f11031,
-	0x14b60608,
-	0x0012cf06,
+	0xf102ffb9,
+	0xf09814e7,
+	0x21f440e3,
+	0x01f7f09d,
+	0xf102ffb9,
+	0xf09c1ce7,
+	0x21f440e3,
+	0xf8e0fc9d,
+/* 0x03a1: init */
+	0xfe04bd00,
+	0x27f00004,
+	0x0007f102,
+	0x0003f012,
+	0xbd0002d0,
+	0x1f17f104,
+	0x0010fe05,
+	0x070007f1,
+	0xd00003f0,
+	0x04bd0000,
+	0xf10427f0,
+	0xf0040007,
+	0x02d00003,
+	0xf404bd00,
+	0x27f11031,
+	0x23f08200,
+	0x0022cf01,
 	0xf00137f0,
 	0x32bb1f24,
 	0x0132b604,
 	0x80050280,
-	0x10b70603,
-	0x12cf0400,
-	0x04028000,
-	0x0c30e7f1,
-	0xbd50e3f0,
-	0xbd34bd24,
-/* 0x0371: init_unk_loop */
-	0x6821f444,
-	0xf400f6b0,
-	0xf7f00f0b,
-	0x04f2bb01,
-	0xb6054ffd,
-/* 0x0386: init_unk_next */
-	0x20b60130,
-	0x04e0b601,
-	0xf40126b0,
-/* 0x0392: init_unk_done */
-	0x0380e21b,
-	0x08048007,
-	0x010027f1,
-	0xcf0223f0,
-	0x34bd0022,
-	0x070047f1,
-	0x950644b6,
-	0x45d00825,
-	0x4045d000,
-	0x98000e98,
-	0x21f5010f,
-	0x2fbb0147,
-	0x003fbb00,
-	0x98010e98,
-	0x21f5020f,
-	0x0e980147,
-	0x00effd05,
-	0xbb002ebb,
-	0x0e98003e,
-	0x030f9802,
-	0x014721f5,
-	0xfd070e98,
+	0x27f10603,
+	0x23f08600,
+	0x0022cf01,
+	0xf1040280,
+	0xf00c30e7,
+	0x24bd50e3,
+	0x44bd34bd,
+/* 0x0410: init_unk_loop */
+	0xb06821f4,
+	0x0bf400f6,
+	0x01f7f00f,
+	0xfd04f2bb,
+	0x30b6054f,
+/* 0x0425: init_unk_next */
+	0x0120b601,
+	0xb004e0b6,
+	0x1bf40126,
+/* 0x0431: init_unk_done */
+	0x070380e2,
+	0xf1080480,
+	0xf0010027,
+	0x22cf0223,
+	0x9534bd00,
+	0x07f10825,
+	0x03f0c000,
+	0x0005d001,
+	0x07f104bd,
+	0x03f0c100,
+	0x0005d001,
+	0x0e9804bd,
+	0x010f9800,
+	0x015021f5,
+	0xbb002fbb,
+	0x0e98003f,
+	0x020f9801,
+	0x015021f5,
+	0xfd050e98,
 	0x2ebb00ef,
 	0x003ebb00,
-	0x130040b7,
-	0xd00235b6,
-	0x25b60043,
-	0x0635b608,
-	0xb60120b6,
-	0x24b60130,
-	0x0834b608,
-	0xf5022fb9,
-	0xbb027121,
-	0x07f1003f,
-	0x03f00100,
-	0x0003d002,
-	0x24bd04bd,
-	0xf11f29f0,
-	0xf0080007,
-	0x02d00203,
-/* 0x0433: main */
+	0x98020e98,
+	0x21f5030f,
+	0x0e980150,
+	0x00effd07,
+	0xbb002ebb,
+	0x35b6003e,
+	0x0007f102,
+	0x0103f0d3,
+	0xbd0003d0,
+	0x0825b604,
+	0xb60635b6,
+	0x30b60120,
+	0x0824b601,
+	0xb90834b6,
+	0x21f5022f,
+	0x3fbb02d3,
+	0x0007f100,
+	0x0203f001,
+	0xbd0003d0,
+	0xf024bd04,
+	0x07f11f29,
+	0x03f00800,
+	0x0002d002,
+/* 0x04e2: main */
+	0x31f404bd,
+	0x0028f400,
+	0xf424d7f0,
+	0x01f43921,
+	0x04e4b0f4,
+	0xfe1e18f4,
+	0x27f00181,
+	0xfd20bd06,
+	0xe4b60412,
+	0x051efd01,
+	0xf50018fe,
+	0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+	0xef94d30e,
+	0x01f5f010,
+	0x037e21f5,
+/* 0x051f: ih */
+	0xf9c60ef4,
+	0x0188fe80,
+	0x90f980f9,
+	0xb0f9a0f9,
+	0xe0f9d0f9,
+	0x04bdf0f9,
+	0x0200a7f1,
+	0xcf00a3f0,
+	0xabc400aa,
+	0x2c0bf404,
+	0xf124d7f0,
+	0xf01a00e7,
+	0xeecf00e3,
+	0x00f7f100,
+	0x00f3f019,
+	0xf400ffcf,
+	0xe7f00421,
+	0x0007f101,
+	0x0003f01d,
+	0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+	0x0007f104,
+	0x0003f001,
+	0xbd000ad0,
+	0xfcf0fc04,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0xf80032f4,
+/* 0x0591: hub_barrier_done */
+	0x01f7f001,
+	0xbb040e98,
+	0xffb904fe,
+	0x18e7f102,
+	0x40e3f094,
+	0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+	0x20f7f000,
+	0x850007f1,
+	0xd00103f0,
+	0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+	0xb608e7f0,
+	0x1bf401e2,
+	0x00f5f1fd,
+	0x00f5f108,
+	0x0007f102,
+	0x0103f085,
+	0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+	0xf100f804,
+	0xf0810007,
+	0x0fd00203,
 	0xf404bd00,
-	0x28f40031,
-	0x24d7f000,
-	0xf43921f4,
-	0xe4b0f401,
-	0x1e18f404,
-	0xf00181fe,
-	0x20bd0627,
-	0xb60412fd,
-	0x1efd01e4,
-	0x0018fe05,
-	0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
-	0x94d30ef4,
-	0xf5f010ef,
-	0xfe21f501,
-	0xc60ef402,
-/* 0x0470: ih */
-	0x88fe80f9,
-	0xf980f901,
-	0xf9a0f990,
-	0xf9d0f9b0,
-	0xbdf0f9e0,
-	0x800acf04,
-	0xf404abc4,
-	0xb7f11d0b,
-	0xd7f01900,
-	0x40becf24,
-	0xf400bfcf,
-	0xb0b70421,
-	0xe7f00400,
-	0x00bed001,
-/* 0x04a8: ih_no_fifo */
-	0xfc400ad0,
-	0xfce0fcf0,
-	0xfcb0fcd0,
-	0xfc90fca0,
-	0x0088fe80,
-	0x32f480fc,
-/* 0x04c3: hub_barrier_done */
-	0xf001f800,
-	0x0e9801f7,
-	0x04febb04,
-	0x9418e7f1,
-	0xf440e3f0,
-	0x00f88d21,
-/* 0x04d8: ctx_redswitch */
-	0x0614e7f1,
-	0xf006e4b6,
-	0xefd020f7,
-	0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
-	0xf401f2b6,
-	0xf7f1fd1b,
-	0xefd00a20,
-/* 0x04f7: ctx_xfer */
-	0xf100f800,
-	0xb60a0417,
-	0x1fd00614,
-	0x0711f400,
-	0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
-	0x4afc17f1,
-	0xf00213f0,
-	0x12d00c27,
-	0x1521f500,
-	0xfc27f102,
-	0x0223f047,
-	0xf00020d0,
-	0x20b6012c,
-	0x0012d003,
-	0xf001acf0,
-	0xb7f002a5,
-	0x50b3f000,
-	0xb6040c98,
-	0xbcbb0fc4,
-	0x000c9800,
-	0xf0010d98,
-	0x21f500e7,
-	0xacf00166,
-	0x00b7f101,
-	0x50b3f040,
-	0xb6040c98,
-	0xbcbb0fc4,
-	0x010c9800,
-	0x98020d98,
-	0xe7f1060f,
-	0x21f50800,
-	0xacf00166,
-	0x04a5f001,
-	0x3000b7f1,
+	0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+	0x21f505a9,
+	0x24bd026a,
+	0x47fc07f1,
+	0xd00203f0,
+	0x04bd0002,
+	0xb6012cf0,
+	0x07f10320,
+	0x03f04afc,
+	0x0002d002,
+	0xacf004bd,
+	0x02a5f001,
+	0x0000b7f1,
 	0x9850b3f0,
 	0xc4b6040c,
 	0x00bcbb0f,
-	0x98020c98,
-	0x0f98030d,
-	0x00e7f108,
-	0x6621f502,
-	0x1521f501,
-	0x0601f402,
-/* 0x05a3: ctx_xfer_post */
-	0xf11412f4,
-	0xf04afc17,
-	0x27f00213,
-	0x0012d00d,
-	0x021521f5,
-/* 0x05b4: ctx_xfer_done */
-	0x04c321f5,
-	0x000000f8,
+	0x98000c98,
+	0xe7f0010d,
+	0x6f21f500,
+	0x01acf001,
+	0x4000b7f1,
+	0x9850b3f0,
+	0xc4b6040c,
+	0x00bcbb0f,
+	0x98010c98,
+	0x0f98020d,
+	0x00e7f106,
+	0x6f21f508,
+	0x01acf001,
+	0xf104a5f0,
+	0xf03000b7,
+	0x0c9850b3,
+	0x0fc4b604,
+	0x9800bcbb,
+	0x0d98020c,
+	0x080f9803,
+	0x0200e7f1,
+	0x016f21f5,
+	0x025e21f5,
+	0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+	0x21f50712,
+/* 0x068a: ctx_xfer_done */
+	0x21f5027f,
+	0x00f80591,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
index f870507..6316eba 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
@@ -41,14 +41,14 @@
 };
 
 uint32_t nvf0_grgpc_code[] = {
-	0x03180ef5,
+	0x03a10ef5,
 /* 0x0004: queue_put */
 	0x9800d898,
 	0x86f001d9,
 	0x0489b808,
 	0xf00c1bf4,
 	0x21f502f7,
-	0x00f802fe,
+	0x00f8037e,
 /* 0x001c: queue_put_next */
 	0xb60798c4,
 	0x8dbb0384,
@@ -72,184 +72,214 @@
 /* 0x0066: queue_get_done */
 	0x00f80132,
 /* 0x0068: nv_rd32 */
-	0x0728b7f1,
-	0xb906b4b6,
-	0xc9f002ec,
-	0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-	0xc800bccf,
-	0x1bf41fcc,
-	0x06a7f0fa,
-	0x010921f5,
-	0xf840bfcf,
-/* 0x008d: nv_wr32 */
-	0x28b7f100,
-	0x06b4b607,
-	0xb980bfd0,
-	0xc9f002ec,
-	0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-	0xcf00bcd0,
-	0xccc800bc,
-	0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-	0x87f100f8,
-	0x84b60430,
-	0x1ff9f006,
-	0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-	0x3087f100,
-	0x0684b604,
-	0xf80080d0,
-/* 0x00c9: wait_donez */
-	0xf094bd00,
-	0x07f10099,
-	0x03f03700,
-	0x0009d002,
-	0x07f104bd,
-	0x03f00600,
-	0x000ad002,
-/* 0x00e6: wait_donez_ne */
-	0x87f104bd,
-	0x83f00000,
-	0x0088cf01,
-	0xf4888aff,
-	0x94bdf31b,
-	0xf10099f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0109: wait_doneo */
-	0xf094bd00,
-	0x07f10099,
-	0x03f03700,
-	0x0009d002,
-	0x87f104bd,
-	0x84b60818,
-	0x008ad006,
-/* 0x0124: wait_doneo_e */
-	0x040087f1,
-	0xcf0684b6,
-	0x8aff0088,
-	0xf30bf488,
+	0xf002ecb9,
+	0x07f11fc9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x007a: nv_rd32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0xa7f0f31b,
+	0x1021f506,
+	0x00f7f101,
+	0x01f3f0cb,
+	0xf800ffcf,
+/* 0x009d: nv_wr32 */
+	0x0007f100,
+	0x0103f0cc,
+	0xbd000fd0,
+	0x02ecb904,
+	0xf01fc9f0,
+	0x07f11ec9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x00be: nv_wr32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0x00f8f31b,
+/* 0x00d0: wait_donez */
 	0x99f094bd,
 	0x0007f100,
-	0x0203f017,
+	0x0203f037,
 	0xbd0009d0,
-/* 0x0147: mmctx_size */
-	0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-	0x00e89894,
-	0xb61a85b6,
-	0x84b60180,
-	0x0098bb02,
-	0xb804e0b6,
-	0x1bf404ef,
-	0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-	0x94bd00f8,
-	0xf10199f0,
-	0xf0370007,
-	0x09d00203,
-	0xf104bd00,
-	0xb6071087,
-	0x94bd0684,
-	0xf405bbfd,
-	0x8bd0090b,
-	0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-	0xf405eefd,
-	0x8ed00c0b,
-	0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-	0xb70199f0,
-	0xc8010080,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x1bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0110: wait_doneo */
+	0x99f094bd,
+	0x0007f100,
+	0x0203f037,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x0bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+	0xe89894bd,
+	0x1a85b600,
+	0xb60180b6,
+	0x98bb0284,
+	0x04e0b600,
+	0xf404efb8,
+	0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+	0xbd00f802,
+	0x0199f094,
+	0x370007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0xbbfd94bd,
+	0x120bf405,
+	0xc40007f1,
+	0xd00103f0,
+	0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+	0xfd0099f0,
+	0x0bf405ee,
+	0x0007f11e,
+	0x0103f0c6,
+	0xbd000ed0,
+	0x0007f104,
+	0x0103f0c7,
+	0xbd000fd0,
+	0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+	0xb600abc8,
+	0xb9f010b4,
+	0x01aec80c,
+	0xfd11e4b6,
+	0x07f105be,
+	0x03f0c500,
+	0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+	0xe7f104bd,
+	0xe3f0c500,
+	0x00eecf01,
+	0xf41fe4f0,
+	0xce98f30b,
+	0x05e9fd00,
+	0xc80007f1,
+	0xd00103f0,
+	0x04bd000e,
+	0xb804c0b6,
+	0x1bf404cd,
+	0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+	0xf11f1bf4,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x1fb4f000,
+	0xf410b4b0,
+	0xa7f0f01b,
+	0xd021f402,
+/* 0x0223: mmctx_stop */
+	0xc82b0ef4,
 	0xb4b600ab,
 	0x0cb9f010,
-	0xb601aec8,
-	0xbefd11e4,
-	0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-	0xf0008ecf,
-	0x0bf41fe4,
-	0x00ce98fa,
-	0xd005e9fd,
-	0xc0b6c08e,
-	0x04cdb804,
-	0xc8e81bf4,
-	0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-	0x008bcf18,
-	0xb01fb4f0,
-	0x1bf410b4,
-	0x02a7f0f7,
-	0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-	0xabc81b0e,
-	0x10b4b600,
-	0xf00cb9f0,
-	0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-	0x008bcf00,
-	0xf412bbc8,
-/* 0x0202: mmctx_done */
-	0x94bdfa1b,
-	0xf10199f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0215: strand_wait */
-	0xf0a0f900,
-	0x21f402a7,
-	0xf8a0fcc9,
-/* 0x0221: strand_pre */
-	0xfc87f100,
-	0x0283f04a,
-	0xd00c97f0,
-	0x21f50089,
-	0x00f80215,
-/* 0x0234: strand_post */
-	0x4afc87f1,
-	0xf00283f0,
-	0x89d00d97,
-	0x1521f500,
-/* 0x0247: strand_set */
-	0xf100f802,
-	0xf04ffca7,
-	0xaba202a3,
-	0xc7f00500,
-	0x00acd00f,
-	0xd00bc7f0,
-	0x21f500bc,
-	0xaed00215,
-	0x0ac7f000,
-	0xf500bcd0,
-	0xf8021521,
-/* 0x0271: strand_ctx_init */
-	0xf094bd00,
-	0x07f10399,
-	0x03f03700,
+	0xf112b9f0,
+	0xf0c50007,
+	0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+	0xf104bd00,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x12bbc800,
+/* 0x024b: mmctx_done */
+	0xbdf31bf4,
+	0x0199f094,
+	0x170007f1,
+	0xd00203f0,
+	0x04bd0009,
+/* 0x025e: strand_wait */
+	0xa0f900f8,
+	0xf402a7f0,
+	0xa0fcd021,
+/* 0x026a: strand_pre */
+	0x97f000f8,
+	0xfc07f10c,
+	0x0203f04a,
+	0xbd0009d0,
+	0x5e21f504,
+/* 0x027f: strand_post */
+	0xf000f802,
+	0x07f10d97,
+	0x03f04afc,
 	0x0009d002,
 	0x21f504bd,
-	0xe7f00221,
-	0x4721f503,
-	0xfca7f102,
-	0x02a3f046,
-	0x0400aba0,
-	0xf040a0d0,
-	0xbcd001c7,
-	0x1521f500,
-	0x010c9202,
-	0xf000acd0,
-	0xbcd002c7,
-	0x1521f500,
-	0x3421f502,
-	0x8087f102,
-	0x0684b608,
-	0xb70089cf,
-	0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+	0x00f8025e,
+/* 0x0294: strand_set */
+	0xf10fc7f0,
+	0xf04ffc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f10bc7,
+	0x03f04afc,
+	0x000cd002,
+	0x07f104bd,
+	0x03f04ffc,
+	0x000ed002,
+	0xc7f004bd,
+	0xfc07f10a,
+	0x0203f04a,
+	0xbd000cd0,
+	0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+	0xbd00f802,
+	0x0399f094,
+	0x370007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0x026a21f5,
+	0xf503e7f0,
+	0xbd029421,
+	0xfc07f1c4,
+	0x0203f047,
+	0xbd000cd0,
+	0x01c7f004,
+	0x4afc07f1,
+	0xd00203f0,
+	0x04bd000c,
+	0x025e21f5,
+	0xf1010c92,
+	0xf046fc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f102c7,
+	0x03f04afc,
+	0x000cd002,
+	0x21f504bd,
+	0x21f5025e,
+	0x87f1027f,
+	0x83f04200,
+	0x0097f102,
+	0x0293f020,
+	0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
 	0x8ed008fe,
 	0x408ed000,
 	0xb6808acf,
@@ -263,198 +293,230 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
 	0xe0f900f8,
-	0x9814e7f1,
-	0xf440e3f0,
-	0xe0b78d21,
-	0xf7f0041c,
-	0x8d21f401,
-	0x00f8e0fc,
-/* 0x0318: init */
-	0x04fe04bd,
-	0x0017f100,
-	0x0227f012,
-	0xf10012d0,
-	0xfe047017,
-	0x17f10010,
-	0x10d00400,
-	0x0427f0c0,
-	0xf40012d0,
-	0x17f11031,
-	0x14b60608,
-	0x0012cf06,
+	0xf102ffb9,
+	0xf09814e7,
+	0x21f440e3,
+	0x01f7f09d,
+	0xf102ffb9,
+	0xf09c1ce7,
+	0x21f440e3,
+	0xf8e0fc9d,
+/* 0x03a1: init */
+	0xfe04bd00,
+	0x27f00004,
+	0x0007f102,
+	0x0003f012,
+	0xbd0002d0,
+	0x1f17f104,
+	0x0010fe05,
+	0x070007f1,
+	0xd00003f0,
+	0x04bd0000,
+	0xf10427f0,
+	0xf0040007,
+	0x02d00003,
+	0xf404bd00,
+	0x27f11031,
+	0x23f08200,
+	0x0022cf01,
 	0xf00137f0,
 	0x32bb1f24,
 	0x0132b604,
 	0x80050280,
-	0x10b70603,
-	0x12cf0400,
-	0x04028000,
-	0x0c30e7f1,
-	0xbd50e3f0,
-	0xbd34bd24,
-/* 0x0371: init_unk_loop */
-	0x6821f444,
-	0xf400f6b0,
-	0xf7f00f0b,
-	0x04f2bb01,
-	0xb6054ffd,
-/* 0x0386: init_unk_next */
-	0x20b60130,
-	0x04e0b601,
-	0xf40226b0,
-/* 0x0392: init_unk_done */
-	0x0380e21b,
-	0x08048007,
-	0x010027f1,
-	0xcf0223f0,
-	0x34bd0022,
-	0x070047f1,
-	0x950644b6,
-	0x45d00825,
-	0x4045d000,
-	0x98000e98,
-	0x21f5010f,
-	0x2fbb0147,
-	0x003fbb00,
-	0x98010e98,
-	0x21f5020f,
-	0x0e980147,
-	0x00effd05,
-	0xbb002ebb,
-	0x0e98003e,
-	0x030f9802,
-	0x014721f5,
-	0xfd070e98,
+	0x27f10603,
+	0x23f08600,
+	0x0022cf01,
+	0xf1040280,
+	0xf00c30e7,
+	0x24bd50e3,
+	0x44bd34bd,
+/* 0x0410: init_unk_loop */
+	0xb06821f4,
+	0x0bf400f6,
+	0x01f7f00f,
+	0xfd04f2bb,
+	0x30b6054f,
+/* 0x0425: init_unk_next */
+	0x0120b601,
+	0xb004e0b6,
+	0x1bf40226,
+/* 0x0431: init_unk_done */
+	0x070380e2,
+	0xf1080480,
+	0xf0010027,
+	0x22cf0223,
+	0x9534bd00,
+	0x07f10825,
+	0x03f0c000,
+	0x0005d001,
+	0x07f104bd,
+	0x03f0c100,
+	0x0005d001,
+	0x0e9804bd,
+	0x010f9800,
+	0x015021f5,
+	0xbb002fbb,
+	0x0e98003f,
+	0x020f9801,
+	0x015021f5,
+	0xfd050e98,
 	0x2ebb00ef,
 	0x003ebb00,
-	0x130040b7,
-	0xd00235b6,
-	0x25b60043,
-	0x0635b608,
-	0xb60120b6,
-	0x24b60130,
-	0x0834b608,
-	0xf5022fb9,
-	0xbb027121,
-	0x07f1003f,
-	0x03f00100,
-	0x0003d002,
-	0x24bd04bd,
-	0xf11f29f0,
-	0xf0300007,
-	0x02d00203,
-/* 0x0433: main */
+	0x98020e98,
+	0x21f5030f,
+	0x0e980150,
+	0x00effd07,
+	0xbb002ebb,
+	0x35b6003e,
+	0x0007f102,
+	0x0103f0d3,
+	0xbd0003d0,
+	0x0825b604,
+	0xb60635b6,
+	0x30b60120,
+	0x0824b601,
+	0xb90834b6,
+	0x21f5022f,
+	0x3fbb02d3,
+	0x0007f100,
+	0x0203f001,
+	0xbd0003d0,
+	0xf024bd04,
+	0x07f11f29,
+	0x03f03000,
+	0x0002d002,
+/* 0x04e2: main */
+	0x31f404bd,
+	0x0028f400,
+	0xf424d7f0,
+	0x01f43921,
+	0x04e4b0f4,
+	0xfe1e18f4,
+	0x27f00181,
+	0xfd20bd06,
+	0xe4b60412,
+	0x051efd01,
+	0xf50018fe,
+	0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+	0xef94d30e,
+	0x01f5f010,
+	0x037e21f5,
+/* 0x051f: ih */
+	0xf9c60ef4,
+	0x0188fe80,
+	0x90f980f9,
+	0xb0f9a0f9,
+	0xe0f9d0f9,
+	0x04bdf0f9,
+	0x0200a7f1,
+	0xcf00a3f0,
+	0xabc400aa,
+	0x2c0bf404,
+	0xf124d7f0,
+	0xf01a00e7,
+	0xeecf00e3,
+	0x00f7f100,
+	0x00f3f019,
+	0xf400ffcf,
+	0xe7f00421,
+	0x0007f101,
+	0x0003f01d,
+	0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+	0x0007f104,
+	0x0003f001,
+	0xbd000ad0,
+	0xfcf0fc04,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0xf80032f4,
+/* 0x0591: hub_barrier_done */
+	0x01f7f001,
+	0xbb040e98,
+	0xffb904fe,
+	0x18e7f102,
+	0x40e3f094,
+	0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+	0x20f7f000,
+	0x850007f1,
+	0xd00103f0,
+	0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+	0xb608e7f0,
+	0x1bf401e2,
+	0x00f5f1fd,
+	0x00f5f108,
+	0x0007f102,
+	0x0103f085,
+	0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+	0xf100f804,
+	0xf0810007,
+	0x0fd00203,
 	0xf404bd00,
-	0x28f40031,
-	0x24d7f000,
-	0xf43921f4,
-	0xe4b0f401,
-	0x1e18f404,
-	0xf00181fe,
-	0x20bd0627,
-	0xb60412fd,
-	0x1efd01e4,
-	0x0018fe05,
-	0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
-	0x94d30ef4,
-	0xf5f010ef,
-	0xfe21f501,
-	0xc60ef402,
-/* 0x0470: ih */
-	0x88fe80f9,
-	0xf980f901,
-	0xf9a0f990,
-	0xf9d0f9b0,
-	0xbdf0f9e0,
-	0x800acf04,
-	0xf404abc4,
-	0xb7f11d0b,
-	0xd7f01900,
-	0x40becf24,
-	0xf400bfcf,
-	0xb0b70421,
-	0xe7f00400,
-	0x00bed001,
-/* 0x04a8: ih_no_fifo */
-	0xfc400ad0,
-	0xfce0fcf0,
-	0xfcb0fcd0,
-	0xfc90fca0,
-	0x0088fe80,
-	0x32f480fc,
-/* 0x04c3: hub_barrier_done */
-	0xf001f800,
-	0x0e9801f7,
-	0x04febb04,
-	0x9418e7f1,
-	0xf440e3f0,
-	0x00f88d21,
-/* 0x04d8: ctx_redswitch */
-	0x0614e7f1,
-	0xf006e4b6,
-	0xefd020f7,
-	0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
-	0xf401f2b6,
-	0xf7f1fd1b,
-	0xefd00a20,
-/* 0x04f7: ctx_xfer */
-	0xf100f800,
-	0xb60a0417,
-	0x1fd00614,
-	0x0711f400,
-	0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
-	0x4afc17f1,
-	0xf00213f0,
-	0x12d00c27,
-	0x1521f500,
-	0xfc27f102,
-	0x0223f047,
-	0xf00020d0,
-	0x20b6012c,
-	0x0012d003,
-	0xf001acf0,
-	0xb7f002a5,
-	0x50b3f000,
-	0xb6040c98,
-	0xbcbb0fc4,
-	0x000c9800,
-	0xf0010d98,
-	0x21f500e7,
-	0xacf00166,
-	0x00b7f101,
-	0x50b3f040,
-	0xb6040c98,
-	0xbcbb0fc4,
-	0x010c9800,
-	0x98020d98,
-	0xe7f1060f,
-	0x21f50800,
-	0xacf00166,
-	0x04a5f001,
-	0x3000b7f1,
+	0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+	0x21f505a9,
+	0x24bd026a,
+	0x47fc07f1,
+	0xd00203f0,
+	0x04bd0002,
+	0xb6012cf0,
+	0x07f10320,
+	0x03f04afc,
+	0x0002d002,
+	0xacf004bd,
+	0x02a5f001,
+	0x0000b7f1,
 	0x9850b3f0,
 	0xc4b6040c,
 	0x00bcbb0f,
-	0x98020c98,
-	0x0f98030d,
-	0x00e7f108,
-	0x6621f502,
-	0x1521f501,
-	0x0601f402,
-/* 0x05a3: ctx_xfer_post */
-	0xf11412f4,
-	0xf04afc17,
-	0x27f00213,
-	0x0012d00d,
-	0x021521f5,
-/* 0x05b4: ctx_xfer_done */
-	0x04c321f5,
-	0x000000f8,
+	0x98000c98,
+	0xe7f0010d,
+	0x6f21f500,
+	0x01acf001,
+	0x4000b7f1,
+	0x9850b3f0,
+	0xc4b6040c,
+	0x00bcbb0f,
+	0x98010c98,
+	0x0f98020d,
+	0x00e7f106,
+	0x6f21f508,
+	0x01acf001,
+	0xf104a5f0,
+	0xf03000b7,
+	0x0c9850b3,
+	0x0fc4b604,
+	0x9800bcbb,
+	0x0d98020c,
+	0x080f9803,
+	0x0200e7f1,
+	0x016f21f5,
+	0x025e21f5,
+	0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+	0x21f50712,
+/* 0x068a: ctx_xfer_done */
+	0x21f5027f,
+	0x00f80591,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
index b82d2ae..c8ddb8d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
@@ -68,60 +68,57 @@
 //
 init:
 	clear b32 $r0
-	mov $sp $r0
 	mov $xdbase $r0
 
+	// setup stack
+	nv_iord($r1, NV_PGRAPH_FECS_CAPS, 0)
+	extr $r1 $r1 9:17
+	shl b32 $r1 8
+	mov $sp $r1
+
 	// enable fifo access
-	mov $r1 0x1200
-	mov $r2 2
-	iowr I[$r1 + 0x000] $r2	// FIFO_ENABLE
+	mov $r2 NV_PGRAPH_FECS_ACCESS_FIFO
+	nv_iowr(NV_PGRAPH_FECS_ACCESS, 0, $r2)
 
 	// setup i0 handler, and route all interrupts to it
 	mov $r1 #ih
 	mov $iv0 $r1
-	mov $r1 0x400
-	iowr I[$r1 + 0x300] $r0	// INTR_DISPATCH
 
-	// route HUB_CHANNEL_SWITCH to fuc interrupt 8
-	mov $r3 0x404
-	shl b32 $r3 6
-	mov $r2 0x2003		// { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
-	iowr I[$r3 + 0x000] $r2
+	clear b32 $r2
+	nv_iowr(NV_PGRAPH_FECS_INTR_ROUTE, 0, $r2)
+
+	// route HUB_CHSW_PULSE to fuc interrupt 8
+	mov $r2 0x2003		// { HUB_CHSW_PULSE, ZERO } -> intr 8
+	nv_iowr(NV_PGRAPH_FECS_IROUTE, 0, $r2)
 
 	// not sure what these are, route them because NVIDIA does, and
 	// the IRQ handler will signal the host if we ever get one.. we
 	// may find out if/why we need to handle these if so..
 	//
-	mov $r2 0x2004
-	iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
-	mov $r2 0x200b
-	iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
-	mov $r2 0x200c
-	iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
+	mov $r2 0x2004		// { 0x04, ZERO } -> intr 9
+	nv_iowr(NV_PGRAPH_FECS_IROUTE, 1, $r2)
+	mov $r2 0x200b		// { HUB_FIRMWARE_MTHD, ZERO } -> intr 10
+	nv_iowr(NV_PGRAPH_FECS_IROUTE, 2, $r2)
+	mov $r2 0x200c		// { 0x0c, ZERO } -> intr 15
+	nv_iowr(NV_PGRAPH_FECS_IROUTE, 7, $r2)
 
 	// enable all INTR_UP interrupts
-	mov $r2 0xc24
-	shl b32 $r2 6
-	not b32 $r3 $r0
-	iowr I[$r2] $r3
+	sub b32 $r3 $r0 1
+	nv_iowr(NV_PGRAPH_FECS_INTR_UP_EN, 0, $r3)
 
-	// enable fifo, ctxsw, 9, 10, 15 interrupts
-	mov $r2 -0x78fc		// 0x8704
-	sethi $r2 0
-	iowr I[$r1 + 0x000] $r2	// INTR_EN_SET
+	// enable fifo, ctxsw, 9, fwmthd, 15 interrupts
+	imm32($r2, 0x8704)
+	nv_iowr(NV_PGRAPH_FECS_INTR_EN_SET, 0, $r2)
 
 	// fifo level triggered, rest edge
-	sub b32 $r1 0x100
-	mov $r2 4
-	iowr I[$r1] $r2
+	mov $r2 NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL
+	nv_iowr(NV_PGRAPH_FECS_INTR_MODE, 0, $r2)
 
 	// enable interrupts
 	bset $flags ie0
 
 	// fetch enabled GPC/ROP counts
-	mov $r14 -0x69fc	// 0x409604
-	sethi $r14 0x400000
-	call #nv_rd32
+	nv_rd32($r14, 0x409604)
 	extr $r1 $r15 16:20
 	st b32 D[$r0 + #rop_count] $r1
 	and $r15 0x1f
@@ -131,37 +128,40 @@
 	mov $r1 1
 	shl b32 $r1 $r15
 	sub b32 $r1 1
-	mov $r2 0x40c
-	shl b32 $r2 6
-	iowr I[$r2 + 0x000] $r1
-	iowr I[$r2 + 0x100] $r1
+	nv_iowr(NV_PGRAPH_FECS_BAR_MASK0, 0, $r1)
+	nv_iowr(NV_PGRAPH_FECS_BAR_MASK1, 0, $r1)
 
 	// context size calculation, reserve first 256 bytes for use by fuc
 	mov $r1 256
 
+	//
+	mov $r15 2
+	call(ctx_4170s)
+	call(ctx_4170w)
+	mov $r15 0x10
+	call(ctx_86c)
+
 	// calculate size of mmio context data
 	ld b32 $r14 D[$r0 + #hub_mmio_list_head]
 	ld b32 $r15 D[$r0 + #hub_mmio_list_tail]
-	call #mmctx_size
+	call(mmctx_size)
 
 	// set mmctx base addresses now so we don't have to do it later,
 	// they don't (currently) ever change
-	mov $r3 0x700
-	shl b32 $r3 6
 	shr b32 $r4 $r1 8
-	iowr I[$r3 + 0x000] $r4		// MMCTX_SAVE_SWBASE
-	iowr I[$r3 + 0x100] $r4		// MMCTX_LOAD_SWBASE
+	nv_iowr(NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE, 0, $r4)
+	nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE, 0, $r4)
 	add b32 $r3 0x1300
 	add b32 $r1 $r15
 	shr b32 $r15 2
-	iowr I[$r3 + 0x000] $r15	// MMCTX_LOAD_COUNT, wtf for?!?
+	nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_COUNT, 0, $r15) // wtf??
 
 	// strands, base offset needs to be aligned to 256 bytes
 	shr b32 $r1 8
 	add b32 $r1 1
 	shl b32 $r1 8
 	mov b32 $r15 $r1
-	call #strand_ctx_init
+	call(strand_ctx_init)
 	add b32 $r1 $r15
 
 	// initialise each GPC in sequence by passing in the offset of its
@@ -173,30 +173,29 @@
 	// in GPCn_CC_SCRATCH[1]
 	//
 	ld b32 $r3 D[$r0 + #gpc_count]
-	mov $r4 0x2000
-	sethi $r4 0x500000
+	imm32($r4, 0x502000)
 	init_gpc:
 		// setup, and start GPC ucode running
 		add b32 $r14 $r4 0x804
 		mov b32 $r15 $r1
-		call #nv_wr32			// CC_SCRATCH[1] = ctx offset
+		call(nv_wr32)			// CC_SCRATCH[1] = ctx offset
 		add b32 $r14 $r4 0x10c
 		clear b32 $r15
-		call #nv_wr32
+		call(nv_wr32)
 		add b32 $r14 $r4 0x104
-		call #nv_wr32			// ENTRY
+		call(nv_wr32)			// ENTRY
 		add b32 $r14 $r4 0x100
 		mov $r15 2			// CTRL_START_TRIGGER
-		call #nv_wr32			// CTRL
+		call(nv_wr32)			// CTRL
 
 		// wait for it to complete, and adjust context size
 		add b32 $r14 $r4 0x800
 		init_gpc_wait:
-			call #nv_rd32
+			call(nv_rd32)
 			xbit $r15 $r15 31
 			bra e #init_gpc_wait
 		add b32 $r14 $r4 0x804
-		call #nv_rd32
+		call(nv_rd32)
 		add b32 $r1 $r15
 
 		// next!
@@ -204,6 +203,12 @@
 		sub b32 $r3 1
 		bra ne #init_gpc
 
+	//
+	mov $r15 0
+	call(ctx_86c)
+	mov $r15 0
+	call(ctx_4170s)
+
 	// save context size, and tell host we're ready
 	nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(1), 0, $r1)
 	clear b32 $r1
@@ -218,17 +223,15 @@
 	bset $flags $p0
 	sleep $p0
 	mov $r13 #cmd_queue
-	call #queue_get
+	call(queue_get)
 	bra $p1 #main
 
 	// context switch, requested by GPU?
 	cmpu b32 $r14 0x4001
 	bra ne #main_not_ctx_switch
 		trace_set(T_AUTO)
-		mov $r1 0xb00
-		shl b32 $r1 6
-		iord $r2 I[$r1 + 0x100]		// CHAN_NEXT
-		iord $r1 I[$r1 + 0x000]		// CHAN_CUR
+		nv_iord($r1, NV_PGRAPH_FECS_CHAN_ADDR, 0)
+		nv_iord($r2, NV_PGRAPH_FECS_CHAN_NEXT, 0)
 
 		xbit $r3 $r1 31
 		bra e #chsw_no_prev
@@ -239,12 +242,12 @@
 				trace_set(T_SAVE)
 				bclr $flags $p1
 				bset $flags $p2
-				call #ctx_xfer
+				call(ctx_xfer)
 				trace_clr(T_SAVE);
 				pop $r2
 				trace_set(T_LOAD);
 				bset $flags $p1
-				call #ctx_xfer
+				call(ctx_xfer)
 				trace_clr(T_LOAD);
 				bra #chsw_done
 			chsw_prev_no_next:
@@ -252,25 +255,21 @@
 				mov b32 $r2 $r1
 				bclr $flags $p1
 				bclr $flags $p2
-				call #ctx_xfer
+				call(ctx_xfer)
 				pop $r2
-				mov $r1 0xb00
-				shl b32 $r1 6
-				iowr I[$r1] $r2
+				nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
 				bra #chsw_done
 		chsw_no_prev:
 			xbit $r3 $r2 31
 			bra e #chsw_done
 				bset $flags $p1
 				bclr $flags $p2
-				call #ctx_xfer
+				call(ctx_xfer)
 
 		// ack the context switch request
 		chsw_done:
-		mov $r1 0xb0c
-		shl b32 $r1 6
-		mov $r2 1
-		iowr I[$r1 + 0x000] $r2		// 0x409b0c
+		mov $r2 NV_PGRAPH_FECS_CHSW_ACK
+		nv_iowr(NV_PGRAPH_FECS_CHSW, 0, $r2)
 		trace_clr(T_AUTO)
 		bra #main
 
@@ -279,7 +278,7 @@
 	cmpu b32 $r14 0x0001
 	bra ne #main_not_ctx_chan
 		mov b32 $r2 $r15
-		call #ctx_chan
+		call(ctx_chan)
 		bra #main_done
 
 	// request to store current channel context?
@@ -289,14 +288,14 @@
 		trace_set(T_SAVE)
 		bclr $flags $p1
 		bclr $flags $p2
-		call #ctx_xfer
+		call(ctx_xfer)
 		trace_clr(T_SAVE)
 		bra #main_done
 
 	main_not_ctx_save:
 		shl b32 $r15 $r14 16
 		or $r15 E_BAD_COMMAND
-		call #error
+		call(error)
 		bra #main
 
 	main_done:
@@ -319,41 +318,46 @@
 	clear b32 $r0
 
 	// incoming fifo command?
-	iord $r10 I[$r0 + 0x200]	// INTR
-	and $r11 $r10 0x00000004
+	nv_iord($r10, NV_PGRAPH_FECS_INTR, 0)
+	and $r11 $r10 NV_PGRAPH_FECS_INTR_FIFO
 	bra e #ih_no_fifo
 		// queue incoming fifo command for later processing
-		mov $r11 0x1900
 		mov $r13 #cmd_queue
-		iord $r14 I[$r11 + 0x100]	// FIFO_CMD
-		iord $r15 I[$r11 + 0x000]	// FIFO_DATA
-		call #queue_put
+		nv_iord($r14, NV_PGRAPH_FECS_FIFO_CMD, 0)
+		nv_iord($r15, NV_PGRAPH_FECS_FIFO_DATA, 0)
+		call(queue_put)
 		add b32 $r11 0x400
 		mov $r14 1
-		iowr I[$r11 + 0x000] $r14	// FIFO_ACK
+		nv_iowr(NV_PGRAPH_FECS_FIFO_ACK, 0, $r14)
 
 	// context switch request?
 	ih_no_fifo:
-	and $r11 $r10 0x00000100
+	and $r11 $r10 NV_PGRAPH_FECS_INTR_CHSW
 	bra e #ih_no_ctxsw
 		// enqueue a context switch for later processing
 		mov $r13 #cmd_queue
 		mov $r14 0x4001
-		call #queue_put
+		call(queue_put)
+
+	// firmware method?
+	ih_no_ctxsw:
+	and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD
+	bra e #ih_no_fwmthd
+		// none we handle, ack, and fall-through to unhandled
+		mov $r11 0x100
+		nv_wr32(0x400144, $r11)
 
 	// anything we didn't handle, bring it to the host's attention
-	ih_no_ctxsw:
-	mov $r11 0x104
+	ih_no_fwmthd:
+	mov $r11 0x104 // FIFO | CHSW
 	not b32 $r11
 	and $r11 $r10 $r11
 	bra e #ih_no_other
-		mov $r10 0xc1c
-		shl b32 $r10 6
-		iowr I[$r10] $r11	// INTR_UP_SET
+		nv_iowr(NV_PGRAPH_FECS_INTR_UP_SET, 0, $r11)
 
 	// ack, and wake up main()
 	ih_no_other:
-	iowr I[$r0 + 0x100] $r10	// INTR_ACK
+	nv_iowr(NV_PGRAPH_FECS_INTR_ACK, 0, $r10)
 
 	pop $r15
 	pop $r14
@@ -370,12 +374,10 @@
 #if CHIPSET < GK100
 // Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
 ctx_4160s:
-	mov $r14 0x4160
-	sethi $r14 0x400000
 	mov $r15 1
-	call #nv_wr32
+	nv_wr32(0x404160, $r15)
 	ctx_4160s_wait:
-		call #nv_rd32
+		nv_rd32($r15, 0x404160)
 		xbit $r15 $r15 4
 		bra e #ctx_4160s_wait
 	ret
@@ -384,10 +386,8 @@
 // to hang with STATUS=0x00000007 until it's cleared.. fbcon can
 // still function with it set however...
 ctx_4160c:
-	mov $r14 0x4160
-	sethi $r14 0x400000
 	clear b32 $r15
-	call #nv_wr32
+	nv_wr32(0x404160, $r15)
 	ret
 #endif
 
@@ -396,18 +396,14 @@
 // In: $r15 value to set 0x404170 to
 //
 ctx_4170s:
-	mov $r14 0x4170
-	sethi $r14 0x400000
 	or $r15 0x10
-	call #nv_wr32
+	nv_wr32(0x404170, $r15)
 	ret
 
 // Waits for a ctx_4170s() call to complete
 //
 ctx_4170w:
-	mov $r14 0x4170
-	sethi $r14 0x400000
-	call #nv_rd32
+	nv_rd32($r15, 0x404170)
 	and $r15 0x10
 	bra ne #ctx_4170w
 	ret
@@ -419,16 +415,18 @@
 // funny things happen.
 //
 ctx_redswitch:
-	mov $r14 0x614
-	shl b32 $r14 6
-	mov $r15 0x270
-	iowr I[$r14] $r15	// HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
+	mov $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC
+	or  $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP
+	or  $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC
+	or  $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN
+	nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
 	mov $r15 8
 	ctx_redswitch_delay:
 		sub b32 $r15 1
 		bra ne #ctx_redswitch_delay
-	mov $r15 0x770
-	iowr I[$r14] $r15	// HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+	or  $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP
+	or  $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN
+	nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
 	ret
 
 // Not a clue what this is for, except that unless the value is 0x10, the
@@ -437,15 +435,18 @@
 // In: $r15 value to set to (0x00/0x10 are used)
 //
 ctx_86c:
-	mov $r14 0x86c
-	shl b32 $r14 6
-	iowr I[$r14] $r15	// HUB(0x86c) = val
-	mov $r14 -0x75ec
-	sethi $r14 0x400000
-	call #nv_wr32		// ROP(0xa14) = val
-	mov $r14 -0x5794
-	sethi $r14 0x410000
-	call #nv_wr32		// GPC(0x86c) = val
+	nv_iowr(NV_PGRAPH_FECS_UNK86C, 0, $r15)
+	nv_wr32(0x408a14, $r15)
+	nv_wr32(NV_PGRAPH_GPCX_GPCCS_UNK86C, $r15)
+	ret
+
+// In: $r15 NV_PGRAPH_FECS_MEM_CMD_*
+ctx_mem:
+	nv_iowr(NV_PGRAPH_FECS_MEM_CMD, 0, $r15)
+	ctx_mem_wait:
+		nv_iord($r15, NV_PGRAPH_FECS_MEM_CMD, 0)
+		or $r15 $r15
+		bra ne #ctx_mem_wait
 	ret
 
 // ctx_load - load's a channel's ctxctl data, and selects its vm
@@ -457,23 +458,14 @@
 
 	// switch to channel, somewhat magic in parts..
 	mov $r10 12		// DONE_UNK12
-	call #wait_donez
-	mov $r1 0xa24
-	shl b32 $r1 6
-	iowr I[$r1 + 0x000] $r0	// 0x409a24
-	mov $r3 0xb00
-	shl b32 $r3 6
-	iowr I[$r3 + 0x100] $r2	// CHAN_NEXT
-	mov $r1 0xa0c
-	shl b32 $r1 6
-	mov $r4 7
-	iowr I[$r1 + 0x000] $r2 // MEM_CHAN
-	iowr I[$r1 + 0x100] $r4	// MEM_CMD
-	ctx_chan_wait_0:
-		iord $r4 I[$r1 + 0x100]
-		and $r4 0x1f
-		bra ne #ctx_chan_wait_0
-	iowr I[$r3 + 0x000] $r2	// CHAN_CUR
+	call(wait_donez)
+	clear b32 $r15
+	nv_iowr(0x409a24, 0, $r15)
+	nv_iowr(NV_PGRAPH_FECS_CHAN_NEXT, 0, $r2)
+	nv_iowr(NV_PGRAPH_FECS_MEM_CHAN, 0, $r2)
+	mov $r15 NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN
+	call(ctx_mem)
+	nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
 
 	// load channel header, fetch PGRAPH context pointer
 	mov $xtargets $r0
@@ -482,14 +474,10 @@
 	add b32 $r2 2
 
 	trace_set(T_LCHAN)
-	mov $r1 0xa04
-	shl b32 $r1 6
-	iowr I[$r1 + 0x000] $r2		// MEM_BASE
-	mov $r1 0xa20
-	shl b32 $r1 6
-	mov $r2 0x0002
-	sethi $r2 0x80000000
-	iowr I[$r1 + 0x000] $r2		// MEM_TARGET = vram
+	nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r2)
+	imm32($r2, NV_PGRAPH_FECS_MEM_TARGET_UNK31)
+	or  $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM
+	nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
 	mov $r1 0x10			// chan + 0x0210
 	mov $r2 #xfer_data
 	sethi $r2 0x00020000		// 16 bytes
@@ -507,13 +495,9 @@
 
 	// set transfer base to start of context, and fetch context header
 	trace_set(T_LCTXH)
-	mov $r2 0xa04
-	shl b32 $r2 6
-	iowr I[$r2 + 0x000] $r1		// MEM_BASE
-	mov $r2 1
-	mov $r1 0xa20
-	shl b32 $r1 6
-	iowr I[$r1 + 0x000] $r2		// MEM_TARGET = vm
+	nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r1)
+	mov $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VM
+	nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
 	mov $r1 #chan_data
 	sethi $r1 0x00060000		// 256 bytes
 	xdld $r0 $r1
@@ -532,21 +516,15 @@
 //
 ctx_chan:
 #if CHIPSET < GK100
-	call #ctx_4160s
+	call(ctx_4160s)
 #endif
-	call #ctx_load
+	call(ctx_load)
 	mov $r10 12			// DONE_UNK12
-	call #wait_donez
-	mov $r1 0xa10
-	shl b32 $r1 6
-	mov $r2 5
-	iowr I[$r1 + 0x000] $r2		// MEM_CMD = 5 (???)
-	ctx_chan_wait:
-		iord $r2 I[$r1 + 0x000]
-		or $r2 $r2
-		bra ne #ctx_chan_wait
+	call(wait_donez)
+	mov $r15 5 // MEM_CMD 5 ???
+	call(ctx_mem)
 #if CHIPSET < GK100
-	call #ctx_4160c
+	call(ctx_4160c)
 #endif
 	ret
 
@@ -562,9 +540,7 @@
 ctx_mmio_exec:
 	// set transfer base to be the mmio list
 	ld b32 $r3 D[$r0 + #chan_mmio_address]
-	mov $r2 0xa04
-	shl b32 $r2 6
-	iowr I[$r2 + 0x000] $r3		// MEM_BASE
+	nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
 
 	clear b32 $r3
 	ctx_mmio_loop:
@@ -580,7 +556,7 @@
 		ctx_mmio_pull:
 		ld b32 $r14 D[$r4 + #xfer_data + 0x00]
 		ld b32 $r15 D[$r4 + #xfer_data + 0x04]
-		call #nv_wr32
+		call(nv_wr32)
 
 		// next!
 		add b32 $r3 8
@@ -590,7 +566,7 @@
 	// set transfer base back to the current context
 	ctx_mmio_done:
 	ld b32 $r3 D[$r0 + #ctx_current]
-	iowr I[$r2 + 0x000] $r3		// MEM_BASE
+	nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
 
 	// disable the mmio list now, we don't need/want to execute it again
 	st b32 D[$r0 + #chan_mmio_count] $r0
@@ -610,12 +586,10 @@
 //
 ctx_xfer:
 	// according to mwk, some kind of wait for idle
-	mov $r15 0xc00
-	shl b32 $r15 6
 	mov $r14 4
-	iowr I[$r15 + 0x200] $r14
+	nv_iowr(0x409c08, 0, $r14)
 	ctx_xfer_idle:
-		iord $r14 I[$r15 + 0x000]
+		nv_iord($r14, 0x409c00, 0)
 		and $r14 0x2000
 		bra ne #ctx_xfer_idle
 
@@ -623,50 +597,42 @@
 	bra $p2 #ctx_xfer_pre_load
 	ctx_xfer_pre:
 		mov $r15 0x10
-		call #ctx_86c
+		call(ctx_86c)
 #if CHIPSET < GK100
-		call #ctx_4160s
+		call(ctx_4160s)
 #endif
 		bra not $p1 #ctx_xfer_exec
 
 	ctx_xfer_pre_load:
 		mov $r15 2
-		call #ctx_4170s
-		call #ctx_4170w
-		call #ctx_redswitch
+		call(ctx_4170s)
+		call(ctx_4170w)
+		call(ctx_redswitch)
 		clear b32 $r15
-		call #ctx_4170s
-		call #ctx_load
+		call(ctx_4170s)
+		call(ctx_load)
 
 	// fetch context pointer, and initiate xfer on all GPCs
 	ctx_xfer_exec:
 	ld b32 $r1 D[$r0 + #ctx_current]
-	mov $r2 0x414
-	shl b32 $r2 6
-	iowr I[$r2 + 0x000] $r0	// BAR_STATUS = reset
-	mov $r14 -0x5b00
-	sethi $r14 0x410000
-	mov b32 $r15 $r1
-	call #nv_wr32		// GPC_BCAST_WRCMD_DATA = ctx pointer
-	add b32 $r14 4
+
+	clear b32 $r2
+	nv_iowr(NV_PGRAPH_FECS_BAR, 0, $r2)
+
+	nv_wr32(0x41a500, $r1)	// GPC_BCAST_WRCMD_DATA = ctx pointer
 	xbit $r15 $flags $p1
 	xbit $r2 $flags $p2
 	shl b32 $r2 1
 	or $r15 $r2
-	call #nv_wr32		// GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+	nv_wr32(0x41a504, $r15)	// GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
 
 	// strands
-	mov $r1 0x4afc
-	sethi $r1 0x20000
-	mov $r2 0xc
-	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0c
-	call #strand_wait
-	mov $r2 0x47fc
-	sethi $r2 0x20000
-	iowr I[$r2] $r0		// STRAND_FIRST_GENE(0x3f) = 0x00
-	xbit $r2 $flags $p1
-	add b32 $r2 3
-	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+	call(strand_pre)
+	clear b32 $r2
+	nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r2)
+	xbit $r2 $flags $p1	// SAVE/LOAD
+	add b32 $r2 NV_PGRAPH_FECS_STRAND_CMD_SAVE
+	nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r2)
 
 	// mmio context
 	xbit $r10 $flags $p1	// direction
@@ -675,48 +641,42 @@
 	ld b32 $r12 D[$r0 + #hub_mmio_list_head]
 	ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
 	mov $r14 0		// not multi
-	call #mmctx_xfer
+	call(mmctx_xfer)
 
 	// wait for GPCs to all complete
 	mov $r10 8		// DONE_BAR
-	call #wait_doneo
+	call(wait_doneo)
 
 	// wait for strand xfer to complete
-	call #strand_wait
+	call(strand_wait)
 
 	// post-op
 	bra $p1 #ctx_xfer_post
 		mov $r10 12		// DONE_UNK12
-		call #wait_donez
-		mov $r1 0xa10
-		shl b32 $r1 6
-		mov $r2 5
-		iowr I[$r1] $r2		// MEM_CMD
-		ctx_xfer_post_save_wait:
-			iord $r2 I[$r1]
-			or $r2 $r2
-			bra ne #ctx_xfer_post_save_wait
+		call(wait_donez)
+		mov $r15 5 // MEM_CMD 5 ???
+		call(ctx_mem)
 
 	bra $p2 #ctx_xfer_done
 	ctx_xfer_post:
 		mov $r15 2
-		call #ctx_4170s
+		call(ctx_4170s)
 		clear b32 $r15
-		call #ctx_86c
-		call #strand_post
-		call #ctx_4170w
+		call(ctx_86c)
+		call(strand_post)
+		call(ctx_4170w)
 		clear b32 $r15
-		call #ctx_4170s
+		call(ctx_4170s)
 
 		bra not $p1 #ctx_xfer_no_post_mmio
 		ld b32 $r1 D[$r0 + #chan_mmio_count]
 		or $r1 $r1
 		bra e #ctx_xfer_no_post_mmio
-			call #ctx_mmio_exec
+			call(ctx_mmio_exec)
 
 		ctx_xfer_no_post_mmio:
 #if CHIPSET < GK100
-		call #ctx_4160c
+		call(ctx_4160c)
 #endif
 
 	ctx_xfer_done:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5
new file mode 100644
index 0000000..7c5d256
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#define CHIPSET GK208
+#include "macros.fuc"
+
+.section #nv108_grhub_data
+#define INCLUDE_DATA
+#include "com.fuc"
+#include "hub.fuc"
+#undef INCLUDE_DATA
+
+.section #nv108_grhub_code
+#define INCLUDE_CODE
+bra #init
+#include "com.fuc"
+#include "hub.fuc"
+.align 256
+#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
new file mode 100644
index 0000000..4750984
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
@@ -0,0 +1,916 @@
+uint32_t nv108_grhub_data[] = {
+/* 0x0000: hub_mmio_list_head */
+	0x00000300,
+/* 0x0004: hub_mmio_list_tail */
+	0x00000304,
+/* 0x0008: gpc_count */
+	0x00000000,
+/* 0x000c: rop_count */
+	0x00000000,
+/* 0x0010: cmd_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0058: ctx_current */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0100: chan_data */
+/* 0x0100: chan_mmio_count */
+	0x00000000,
+/* 0x0104: chan_mmio_address */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0200: xfer_data */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0300: hub_mmio_list_base */
+	0x0417e91c,
+};
+
+uint32_t nv108_grhub_code[] = {
+	0x030e0ef5,
+/* 0x0004: queue_put */
+	0x9800d898,
+	0x86f001d9,
+	0xf489a408,
+	0x020f0b1b,
+	0x0002f87e,
+/* 0x001a: queue_put_next */
+	0x98c400f8,
+	0x0384b607,
+	0xb6008dbb,
+	0x8eb50880,
+	0x018fb500,
+	0xf00190b6,
+	0xd9b50f94,
+/* 0x0037: queue_get */
+	0xf400f801,
+	0xd8980131,
+	0x01d99800,
+	0x0bf489a4,
+	0x0789c421,
+	0xbb0394b6,
+	0x90b6009d,
+	0x009e9808,
+	0xb6019f98,
+	0x84f00180,
+	0x00d8b50f,
+/* 0x0063: queue_get_done */
+	0xf80132f4,
+/* 0x0065: nv_rd32 */
+	0xf0ecb200,
+	0x00801fc9,
+	0x0cf601ca,
+/* 0x0073: nv_rd32_wait */
+	0x8c04bd00,
+	0xcf01ca00,
+	0xccc800cc,
+	0xf61bf41f,
+	0xec7e060a,
+	0x008f0000,
+	0xffcf01cb,
+/* 0x008f: nv_wr32 */
+	0x8000f800,
+	0xf601cc00,
+	0x04bd000f,
+	0xc9f0ecb2,
+	0x1ec9f01f,
+	0x01ca0080,
+	0xbd000cf6,
+/* 0x00a9: nv_wr32_wait */
+	0xca008c04,
+	0x00cccf01,
+	0xf41fccc8,
+	0x00f8f61b,
+/* 0x00b8: wait_donez */
+	0x99f094bd,
+	0x37008000,
+	0x0009f602,
+	0x008004bd,
+	0x0af60206,
+/* 0x00cf: wait_donez_ne */
+	0x8804bd00,
+	0xcf010000,
+	0x8aff0088,
+	0xf61bf488,
+	0x99f094bd,
+	0x17008000,
+	0x0009f602,
+	0x00f804bd,
+/* 0x00ec: wait_doneo */
+	0x99f094bd,
+	0x37008000,
+	0x0009f602,
+	0x008004bd,
+	0x0af60206,
+/* 0x0103: wait_doneo_e */
+	0x8804bd00,
+	0xcf010000,
+	0x8aff0088,
+	0xf60bf488,
+	0x99f094bd,
+	0x17008000,
+	0x0009f602,
+	0x00f804bd,
+/* 0x0120: mmctx_size */
+/* 0x0122: nv_mmctx_size_loop */
+	0xe89894bd,
+	0x1a85b600,
+	0xb60180b6,
+	0x98bb0284,
+	0x04e0b600,
+	0x1bf4efa4,
+	0xf89fb2ec,
+/* 0x013d: mmctx_xfer */
+	0xf094bd00,
+	0x00800199,
+	0x09f60237,
+	0xbd04bd00,
+	0x05bbfd94,
+	0x800f0bf4,
+	0xf601c400,
+	0x04bd000b,
+/* 0x015f: mmctx_base_disabled */
+	0xfd0099f0,
+	0x0bf405ee,
+	0xc6008018,
+	0x000ef601,
+	0x008004bd,
+	0x0ff601c7,
+	0xf004bd00,
+/* 0x017a: mmctx_multi_disabled */
+	0xabc80199,
+	0x10b4b600,
+	0xc80cb9f0,
+	0xe4b601ae,
+	0x05befd11,
+	0x01c50080,
+	0xbd000bf6,
+/* 0x0195: mmctx_exec_loop */
+/* 0x0195: mmctx_wait_free */
+	0xc5008e04,
+	0x00eecf01,
+	0xf41fe4f0,
+	0xce98f60b,
+	0x05e9fd00,
+	0x01c80080,
+	0xbd000ef6,
+	0x04c0b604,
+	0x1bf4cda4,
+	0x02abc8df,
+/* 0x01bf: mmctx_fini_wait */
+	0x8b1c1bf4,
+	0xcf01c500,
+	0xb4f000bb,
+	0x10b4b01f,
+	0x0af31bf4,
+	0x00b87e02,
+	0x250ef400,
+/* 0x01d8: mmctx_stop */
+	0xb600abc8,
+	0xb9f010b4,
+	0x12b9f00c,
+	0x01c50080,
+	0xbd000bf6,
+/* 0x01ed: mmctx_stop_wait */
+	0xc5008b04,
+	0x00bbcf01,
+	0xf412bbc8,
+/* 0x01fa: mmctx_done */
+	0x94bdf61b,
+	0x800199f0,
+	0xf6021700,
+	0x04bd0009,
+/* 0x020a: strand_wait */
+	0xa0f900f8,
+	0xb87e020a,
+	0xa0fc0000,
+/* 0x0216: strand_pre */
+	0x0c0900f8,
+	0x024afc80,
+	0xbd0009f6,
+	0x020a7e04,
+/* 0x0227: strand_post */
+	0x0900f800,
+	0x4afc800d,
+	0x0009f602,
+	0x0a7e04bd,
+	0x00f80002,
+/* 0x0238: strand_set */
+	0xfc800f0c,
+	0x0cf6024f,
+	0x0c04bd00,
+	0x4afc800b,
+	0x000cf602,
+	0xfc8004bd,
+	0x0ef6024f,
+	0x0c04bd00,
+	0x4afc800a,
+	0x000cf602,
+	0x0a7e04bd,
+	0x00f80002,
+/* 0x0268: strand_ctx_init */
+	0x99f094bd,
+	0x37008003,
+	0x0009f602,
+	0x167e04bd,
+	0x030e0002,
+	0x0002387e,
+	0xfc80c4bd,
+	0x0cf60247,
+	0x0c04bd00,
+	0x4afc8001,
+	0x000cf602,
+	0x0a7e04bd,
+	0x0c920002,
+	0x46fc8001,
+	0x000cf602,
+	0x020c04bd,
+	0x024afc80,
+	0xbd000cf6,
+	0x020a7e04,
+	0x02277e00,
+	0x42008800,
+	0x20008902,
+	0x0099cf02,
+/* 0x02c7: ctx_init_strand_loop */
+	0xf608fe95,
+	0x8ef6008e,
+	0x808acf40,
+	0xb606a5b6,
+	0xeabb01a0,
+	0x0480b600,
+	0xf40192b6,
+	0xe4b6e81b,
+	0xf2efbc08,
+	0x99f094bd,
+	0x17008003,
+	0x0009f602,
+	0x00f804bd,
+/* 0x02f8: error */
+	0x02050080,
+	0xbd000ff6,
+	0x80010f04,
+	0xf6030700,
+	0x04bd000f,
+/* 0x030e: init */
+	0x04bd00f8,
+	0x410007fe,
+	0x11cf4200,
+	0x0911e700,
+	0x0814b601,
+	0x020014fe,
+	0x12004002,
+	0xbd0002f6,
+	0x05c94104,
+	0xbd0010fe,
+	0x07004024,
+	0xbd0002f6,
+	0x20034204,
+	0x01010080,
+	0xbd0002f6,
+	0x20044204,
+	0x01010480,
+	0xbd0002f6,
+	0x200b4204,
+	0x01010880,
+	0xbd0002f6,
+	0x200c4204,
+	0x01011c80,
+	0xbd0002f6,
+	0x01039204,
+	0x03090080,
+	0xbd0003f6,
+	0x87044204,
+	0xf6040040,
+	0x04bd0002,
+	0x00400402,
+	0x0002f603,
+	0x31f404bd,
+	0x96048e10,
+	0x00657e40,
+	0xc7feb200,
+	0x01b590f1,
+	0x1ff4f003,
+	0x01020fb5,
+	0x041fbb01,
+	0x800112b6,
+	0xf6010300,
+	0x04bd0001,
+	0x01040080,
+	0xbd0001f6,
+	0x01004104,
+	0x627e020f,
+	0x717e0006,
+	0x100f0006,
+	0x0006b37e,
+	0x98000e98,
+	0x207e010f,
+	0x14950001,
+	0xc0008008,
+	0x0004f601,
+	0x008004bd,
+	0x04f601c1,
+	0xb704bd00,
+	0xbb130030,
+	0xf5b6001f,
+	0xd3008002,
+	0x000ff601,
+	0x15b604bd,
+	0x0110b608,
+	0xb20814b6,
+	0x02687e1f,
+	0x001fbb00,
+	0x84020398,
+/* 0x041f: init_gpc */
+	0xb8502000,
+	0x0008044e,
+	0x8f7e1fb2,
+	0x4eb80000,
+	0xbd00010c,
+	0x008f7ef4,
+	0x044eb800,
+	0x8f7e0001,
+	0x4eb80000,
+	0x0f000100,
+	0x008f7e02,
+	0x004eb800,
+/* 0x044e: init_gpc_wait */
+	0x657e0008,
+	0xffc80000,
+	0xf90bf41f,
+	0x08044eb8,
+	0x00657e00,
+	0x001fbb00,
+	0x800040b7,
+	0xf40132b6,
+	0x000fb41b,
+	0x0006b37e,
+	0x627e000f,
+	0x00800006,
+	0x01f60201,
+	0xbd04bd00,
+	0x1f19f014,
+	0x02300080,
+	0xbd0001f6,
+/* 0x0491: main */
+	0x0031f404,
+	0x0d0028f4,
+	0x00377e10,
+	0xf401f400,
+	0x4001e4b1,
+	0x00c71bf5,
+	0x99f094bd,
+	0x37008004,
+	0x0009f602,
+	0x008104bd,
+	0x11cf02c0,
+	0xc1008200,
+	0x0022cf02,
+	0xf41f13c8,
+	0x23c8770b,
+	0x550bf41f,
+	0x12b220f9,
+	0x99f094bd,
+	0x37008007,
+	0x0009f602,
+	0x32f404bd,
+	0x0231f401,
+	0x0008367e,
+	0x99f094bd,
+	0x17008007,
+	0x0009f602,
+	0x20fc04bd,
+	0x99f094bd,
+	0x37008006,
+	0x0009f602,
+	0x31f404bd,
+	0x08367e01,
+	0xf094bd00,
+	0x00800699,
+	0x09f60217,
+	0xf404bd00,
+/* 0x0522: chsw_prev_no_next */
+	0x20f92f0e,
+	0x32f412b2,
+	0x0232f401,
+	0x0008367e,
+	0x008020fc,
+	0x02f602c0,
+	0xf404bd00,
+/* 0x053e: chsw_no_prev */
+	0x23c8130e,
+	0x0d0bf41f,
+	0xf40131f4,
+	0x367e0232,
+/* 0x054e: chsw_done */
+	0x01020008,
+	0x02c30080,
+	0xbd0002f6,
+	0xf094bd04,
+	0x00800499,
+	0x09f60217,
+	0xf504bd00,
+/* 0x056b: main_not_ctx_switch */
+	0xb0ff2a0e,
+	0x1bf401e4,
+	0x7ef2b20c,
+	0xf40007d6,
+/* 0x057a: main_not_ctx_chan */
+	0xe4b0400e,
+	0x2c1bf402,
+	0x99f094bd,
+	0x37008007,
+	0x0009f602,
+	0x32f404bd,
+	0x0232f401,
+	0x0008367e,
+	0x99f094bd,
+	0x17008007,
+	0x0009f602,
+	0x0ef404bd,
+/* 0x05a9: main_not_ctx_save */
+	0x10ef9411,
+	0x7e01f5f0,
+	0xf50002f8,
+/* 0x05b7: main_done */
+	0xbdfede0e,
+	0x1f29f024,
+	0x02300080,
+	0xbd0002f6,
+	0xcc0ef504,
+/* 0x05c9: ih */
+	0xfe80f9fe,
+	0x80f90188,
+	0xa0f990f9,
+	0xd0f9b0f9,
+	0xf0f9e0f9,
+	0x004a04bd,
+	0x00aacf02,
+	0xf404abc4,
+	0x100d230b,
+	0xcf1a004e,
+	0x004f00ee,
+	0x00ffcf19,
+	0x0000047e,
+	0x0400b0b7,
+	0x0040010e,
+	0x000ef61d,
+/* 0x060a: ih_no_fifo */
+	0xabe404bd,
+	0x0bf40100,
+	0x4e100d0c,
+	0x047e4001,
+/* 0x061a: ih_no_ctxsw */
+	0xabe40000,
+	0x0bf40400,
+	0x01004b10,
+	0x448ebfb2,
+	0x8f7e4001,
+/* 0x062e: ih_no_fwmthd */
+	0x044b0000,
+	0xffb0bd01,
+	0x0bf4b4ab,
+	0x0700800c,
+	0x000bf603,
+/* 0x0642: ih_no_other */
+	0x004004bd,
+	0x000af601,
+	0xf0fc04bd,
+	0xd0fce0fc,
+	0xa0fcb0fc,
+	0x80fc90fc,
+	0xfc0088fe,
+	0x0032f480,
+/* 0x0662: ctx_4170s */
+	0xf5f001f8,
+	0x8effb210,
+	0x7e404170,
+	0xf800008f,
+/* 0x0671: ctx_4170w */
+	0x41708e00,
+	0x00657e40,
+	0xf0ffb200,
+	0x1bf410f4,
+/* 0x0683: ctx_redswitch */
+	0x4e00f8f3,
+	0xe5f00200,
+	0x20e5f040,
+	0x8010e5f0,
+	0xf6018500,
+	0x04bd000e,
+/* 0x069a: ctx_redswitch_delay */
+	0xf2b6080f,
+	0xfd1bf401,
+	0x0400e5f1,
+	0x0100e5f1,
+	0x01850080,
+	0xbd000ef6,
+/* 0x06b3: ctx_86c */
+	0x8000f804,
+	0xf6022300,
+	0x04bd000f,
+	0x148effb2,
+	0x8f7e408a,
+	0xffb20000,
+	0x41a88c8e,
+	0x00008f7e,
+/* 0x06d2: ctx_mem */
+	0x008000f8,
+	0x0ff60284,
+/* 0x06db: ctx_mem_wait */
+	0x8f04bd00,
+	0xcf028400,
+	0xfffd00ff,
+	0xf61bf405,
+/* 0x06ea: ctx_load */
+	0x94bd00f8,
+	0x800599f0,
+	0xf6023700,
+	0x04bd0009,
+	0xb87e0c0a,
+	0xf4bd0000,
+	0x02890080,
+	0xbd000ff6,
+	0xc1008004,
+	0x0002f602,
+	0x008004bd,
+	0x02f60283,
+	0x0f04bd00,
+	0x06d27e07,
+	0xc0008000,
+	0x0002f602,
+	0x0bfe04bd,
+	0x1f2af000,
+	0xb60424b6,
+	0x94bd0220,
+	0x800899f0,
+	0xf6023700,
+	0x04bd0009,
+	0x02810080,
+	0xbd0002f6,
+	0x0000d204,
+	0x25f08000,
+	0x88008002,
+	0x0002f602,
+	0x100104bd,
+	0xf0020042,
+	0x12fa0223,
+	0xbd03f805,
+	0x0899f094,
+	0x02170080,
+	0xbd0009f6,
+	0x81019804,
+	0x981814b6,
+	0x25b68002,
+	0x0512fd08,
+	0xbd1601b5,
+	0x0999f094,
+	0x02370080,
+	0xbd0009f6,
+	0x81008004,
+	0x0001f602,
+	0x010204bd,
+	0x02880080,
+	0xbd0002f6,
+	0x01004104,
+	0xfa0613f0,
+	0x03f80501,
+	0x99f094bd,
+	0x17008009,
+	0x0009f602,
+	0x94bd04bd,
+	0x800599f0,
+	0xf6021700,
+	0x04bd0009,
+/* 0x07d6: ctx_chan */
+	0xea7e00f8,
+	0x0c0a0006,
+	0x0000b87e,
+	0xd27e050f,
+	0x00f80006,
+/* 0x07e8: ctx_mmio_exec */
+	0x80410398,
+	0xf6028100,
+	0x04bd0003,
+/* 0x07f6: ctx_mmio_loop */
+	0x34c434bd,
+	0x0e1bf4ff,
+	0xf0020045,
+	0x35fa0653,
+/* 0x0807: ctx_mmio_pull */
+	0x9803f805,
+	0x4f98804e,
+	0x008f7e81,
+	0x0830b600,
+	0xf40112b6,
+/* 0x081a: ctx_mmio_done */
+	0x0398df1b,
+	0x81008016,
+	0x0003f602,
+	0x00b504bd,
+	0x01004140,
+	0xfa0613f0,
+	0x03f80601,
+/* 0x0836: ctx_xfer */
+	0x040e00f8,
+	0x03020080,
+	0xbd000ef6,
+/* 0x0841: ctx_xfer_idle */
+	0x00008e04,
+	0x00eecf03,
+	0x2000e4f1,
+	0xf4f51bf4,
+	0x02f40611,
+/* 0x0855: ctx_xfer_pre */
+	0x7e100f0c,
+	0xf40006b3,
+/* 0x085e: ctx_xfer_pre_load */
+	0x020f1b11,
+	0x0006627e,
+	0x0006717e,
+	0x0006837e,
+	0x627ef4bd,
+	0xea7e0006,
+/* 0x0876: ctx_xfer_exec */
+	0x01980006,
+	0x8024bd16,
+	0xf6010500,
+	0x04bd0002,
+	0x008e1fb2,
+	0x8f7e41a5,
+	0xfcf00000,
+	0x022cf001,
+	0xfd0124b6,
+	0xffb205f2,
+	0x41a5048e,
+	0x00008f7e,
+	0x0002167e,
+	0xfc8024bd,
+	0x02f60247,
+	0xf004bd00,
+	0x20b6012c,
+	0x4afc8003,
+	0x0002f602,
+	0xacf004bd,
+	0x06a5f001,
+	0x0c98000b,
+	0x010d9800,
+	0x3d7e000e,
+	0x080a0001,
+	0x0000ec7e,
+	0x00020a7e,
+	0x0a1201f4,
+	0x00b87e0c,
+	0x7e050f00,
+	0xf40006d2,
+/* 0x08f2: ctx_xfer_post */
+	0x020f2d02,
+	0x0006627e,
+	0xb37ef4bd,
+	0x277e0006,
+	0x717e0002,
+	0xf4bd0006,
+	0x0006627e,
+	0x981011f4,
+	0x11fd4001,
+	0x070bf405,
+	0x0007e87e,
+/* 0x091c: ctx_xfer_no_post_mmio */
+/* 0x091c: ctx_xfer_done */
+	0x000000f8,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index b59f694..132f684 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -206,14 +206,14 @@
 };
 
 uint32_t nvc0_grhub_code[] = {
-	0x031b0ef5,
+	0x039b0ef5,
 /* 0x0004: queue_put */
 	0x9800d898,
 	0x86f001d9,
 	0x0489b808,
 	0xf00c1bf4,
 	0x21f502f7,
-	0x00f802fe,
+	0x00f8037e,
 /* 0x001c: queue_put_next */
 	0xb60798c4,
 	0x8dbb0384,
@@ -237,184 +237,214 @@
 /* 0x0066: queue_get_done */
 	0x00f80132,
 /* 0x0068: nv_rd32 */
-	0x0728b7f1,
-	0xb906b4b6,
-	0xc9f002ec,
-	0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-	0xc800bccf,
-	0x1bf41fcc,
-	0x06a7f0fa,
-	0x010921f5,
-	0xf840bfcf,
-/* 0x008d: nv_wr32 */
-	0x28b7f100,
-	0x06b4b607,
-	0xb980bfd0,
-	0xc9f002ec,
-	0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-	0xcf00bcd0,
-	0xccc800bc,
-	0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-	0x87f100f8,
-	0x84b60430,
-	0x1ff9f006,
-	0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-	0x3087f100,
-	0x0684b604,
-	0xf80080d0,
-/* 0x00c9: wait_donez */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x07f104bd,
-	0x03f00600,
-	0x000ad002,
-/* 0x00e6: wait_donez_ne */
-	0x87f104bd,
-	0x83f00000,
-	0x0088cf01,
-	0xf4888aff,
-	0x94bdf31b,
-	0xf10099f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0109: wait_doneo */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x87f104bd,
-	0x84b60818,
-	0x008ad006,
-/* 0x0124: wait_doneo_e */
-	0x040087f1,
-	0xcf0684b6,
-	0x8aff0088,
-	0xf30bf488,
+	0xf002ecb9,
+	0x07f11fc9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x007a: nv_rd32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0xa7f0f31b,
+	0x1021f506,
+	0x00f7f101,
+	0x01f3f0cb,
+	0xf800ffcf,
+/* 0x009d: nv_wr32 */
+	0x0007f100,
+	0x0103f0cc,
+	0xbd000fd0,
+	0x02ecb904,
+	0xf01fc9f0,
+	0x07f11ec9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x00be: nv_wr32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0x00f8f31b,
+/* 0x00d0: wait_donez */
 	0x99f094bd,
 	0x0007f100,
-	0x0203f017,
+	0x0203f00f,
 	0xbd0009d0,
-/* 0x0147: mmctx_size */
-	0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-	0x00e89894,
-	0xb61a85b6,
-	0x84b60180,
-	0x0098bb02,
-	0xb804e0b6,
-	0x1bf404ef,
-	0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-	0x94bd00f8,
-	0xf10199f0,
-	0xf00f0007,
-	0x09d00203,
-	0xf104bd00,
-	0xb6071087,
-	0x94bd0684,
-	0xf405bbfd,
-	0x8bd0090b,
-	0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-	0xf405eefd,
-	0x8ed00c0b,
-	0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-	0xb70199f0,
-	0xc8010080,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x1bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0110: wait_doneo */
+	0x99f094bd,
+	0x0007f100,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x0bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+	0xe89894bd,
+	0x1a85b600,
+	0xb60180b6,
+	0x98bb0284,
+	0x04e0b600,
+	0xf404efb8,
+	0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+	0xbd00f802,
+	0x0199f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0xbbfd94bd,
+	0x120bf405,
+	0xc40007f1,
+	0xd00103f0,
+	0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+	0xfd0099f0,
+	0x0bf405ee,
+	0x0007f11e,
+	0x0103f0c6,
+	0xbd000ed0,
+	0x0007f104,
+	0x0103f0c7,
+	0xbd000fd0,
+	0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+	0xb600abc8,
+	0xb9f010b4,
+	0x01aec80c,
+	0xfd11e4b6,
+	0x07f105be,
+	0x03f0c500,
+	0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+	0xe7f104bd,
+	0xe3f0c500,
+	0x00eecf01,
+	0xf41fe4f0,
+	0xce98f30b,
+	0x05e9fd00,
+	0xc80007f1,
+	0xd00103f0,
+	0x04bd000e,
+	0xb804c0b6,
+	0x1bf404cd,
+	0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+	0xf11f1bf4,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x1fb4f000,
+	0xf410b4b0,
+	0xa7f0f01b,
+	0xd021f402,
+/* 0x0223: mmctx_stop */
+	0xc82b0ef4,
 	0xb4b600ab,
 	0x0cb9f010,
-	0xb601aec8,
-	0xbefd11e4,
-	0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-	0xf0008ecf,
-	0x0bf41fe4,
-	0x00ce98fa,
-	0xd005e9fd,
-	0xc0b6c08e,
-	0x04cdb804,
-	0xc8e81bf4,
-	0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-	0x008bcf18,
-	0xb01fb4f0,
-	0x1bf410b4,
-	0x02a7f0f7,
-	0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-	0xabc81b0e,
-	0x10b4b600,
-	0xf00cb9f0,
-	0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-	0x008bcf00,
-	0xf412bbc8,
-/* 0x0202: mmctx_done */
-	0x94bdfa1b,
-	0xf10199f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0215: strand_wait */
-	0xf0a0f900,
-	0x21f402a7,
-	0xf8a0fcc9,
-/* 0x0221: strand_pre */
-	0xfc87f100,
-	0x0283f04a,
-	0xd00c97f0,
-	0x21f50089,
-	0x00f80215,
-/* 0x0234: strand_post */
-	0x4afc87f1,
-	0xf00283f0,
-	0x89d00d97,
-	0x1521f500,
-/* 0x0247: strand_set */
-	0xf100f802,
-	0xf04ffca7,
-	0xaba202a3,
-	0xc7f00500,
-	0x00acd00f,
-	0xd00bc7f0,
-	0x21f500bc,
-	0xaed00215,
-	0x0ac7f000,
-	0xf500bcd0,
-	0xf8021521,
-/* 0x0271: strand_ctx_init */
-	0xf094bd00,
-	0x07f10399,
-	0x03f00f00,
+	0xf112b9f0,
+	0xf0c50007,
+	0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+	0xf104bd00,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x12bbc800,
+/* 0x024b: mmctx_done */
+	0xbdf31bf4,
+	0x0199f094,
+	0x170007f1,
+	0xd00203f0,
+	0x04bd0009,
+/* 0x025e: strand_wait */
+	0xa0f900f8,
+	0xf402a7f0,
+	0xa0fcd021,
+/* 0x026a: strand_pre */
+	0x97f000f8,
+	0xfc07f10c,
+	0x0203f04a,
+	0xbd0009d0,
+	0x5e21f504,
+/* 0x027f: strand_post */
+	0xf000f802,
+	0x07f10d97,
+	0x03f04afc,
 	0x0009d002,
 	0x21f504bd,
-	0xe7f00221,
-	0x4721f503,
-	0xfca7f102,
-	0x02a3f046,
-	0x0400aba0,
-	0xf040a0d0,
-	0xbcd001c7,
-	0x1521f500,
-	0x010c9202,
-	0xf000acd0,
-	0xbcd002c7,
-	0x1521f500,
-	0x3421f502,
-	0x8087f102,
-	0x0684b608,
-	0xb70089cf,
-	0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+	0x00f8025e,
+/* 0x0294: strand_set */
+	0xf10fc7f0,
+	0xf04ffc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f10bc7,
+	0x03f04afc,
+	0x000cd002,
+	0x07f104bd,
+	0x03f04ffc,
+	0x000ed002,
+	0xc7f004bd,
+	0xfc07f10a,
+	0x0203f04a,
+	0xbd000cd0,
+	0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+	0xbd00f802,
+	0x0399f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0x026a21f5,
+	0xf503e7f0,
+	0xbd029421,
+	0xfc07f1c4,
+	0x0203f047,
+	0xbd000cd0,
+	0x01c7f004,
+	0x4afc07f1,
+	0xd00203f0,
+	0x04bd000c,
+	0x025e21f5,
+	0xf1010c92,
+	0xf046fc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f102c7,
+	0x03f04afc,
+	0x000cd002,
+	0x21f504bd,
+	0x21f5025e,
+	0x87f1027f,
+	0x83f04200,
+	0x0097f102,
+	0x0293f020,
+	0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
 	0x8ed008fe,
 	0x408ed000,
 	0xb6808acf,
@@ -428,7 +458,7 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
 	0x07f100f8,
 	0x03f00500,
 	0x000fd002,
@@ -436,82 +466,117 @@
 	0x0007f101,
 	0x0303f007,
 	0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
 	0xbd00f804,
-	0x0004fe04,
-	0xf10007fe,
-	0xf0120017,
-	0x12d00227,
-	0xb117f100,
-	0x0010fe05,
-	0x040017f1,
-	0xf1c010d0,
-	0xb6040437,
-	0x27f10634,
-	0x32d02003,
-	0x0427f100,
-	0x0132d020,
+	0x0007fe04,
+	0x420017f1,
+	0xcf0013f0,
+	0x11e70011,
+	0x14b60109,
+	0x0014fe08,
+	0xf10227f0,
+	0xf0120007,
+	0x02d00003,
+	0xf104bd00,
+	0xfe06c817,
+	0x24bd0010,
+	0x070007f1,
+	0xd00003f0,
+	0x04bd0002,
+	0x200327f1,
+	0x010007f1,
+	0xd00103f0,
+	0x04bd0002,
+	0x200427f1,
+	0x010407f1,
+	0xd00103f0,
+	0x04bd0002,
 	0x200b27f1,
-	0xf10232d0,
-	0xd0200c27,
-	0x27f10732,
-	0x24b60c24,
-	0x0003b906,
-	0xf10023d0,
+	0x010807f1,
+	0xd00103f0,
+	0x04bd0002,
+	0x200c27f1,
+	0x011c07f1,
+	0xd00103f0,
+	0x04bd0002,
+	0xf1010392,
+	0xf0090007,
+	0x03d00303,
+	0xf104bd00,
 	0xf0870427,
-	0x12d00023,
-	0x0012b700,
-	0x0427f001,
-	0xf40012d0,
-	0xe7f11031,
-	0xe3f09604,
-	0x6821f440,
-	0x8090f1c7,
-	0xf4f00301,
-	0x020f801f,
-	0xbb0117f0,
-	0x12b6041f,
-	0x0c27f101,
-	0x0624b604,
-	0xd00021d0,
-	0x17f14021,
-	0x0e980100,
-	0x010f9800,
-	0x014721f5,
-	0x070037f1,
-	0x950634b6,
-	0x34d00814,
-	0x4034d000,
-	0x130030b7,
-	0xb6001fbb,
-	0x3fd002f5,
-	0x0815b600,
-	0xb60110b6,
-	0x1fb90814,
-	0x7121f502,
-	0x001fbb02,
-	0xf1020398,
-	0xf0200047,
-/* 0x03f6: init_gpc */
-	0x4ea05043,
-	0x1fb90804,
-	0x8d21f402,
-	0x010c4ea0,
-	0x21f4f4bd,
-	0x044ea08d,
-	0x8d21f401,
-	0x01004ea0,
-	0xf402f7f0,
-	0x4ea08d21,
-/* 0x041e: init_gpc_wait */
-	0x21f40800,
-	0x1fffc868,
-	0xa0fa0bf4,
-	0xf408044e,
-	0x1fbb6821,
-	0x0040b700,
-	0x0132b680,
-	0xf1be1bf4,
+	0x07f10023,
+	0x03f00400,
+	0x0002d000,
+	0x27f004bd,
+	0x0007f104,
+	0x0003f003,
+	0xbd0002d0,
+	0x1031f404,
+	0x9604e7f1,
+	0xf440e3f0,
+	0xfeb96821,
+	0x90f1c702,
+	0xf0030180,
+	0x0f801ff4,
+	0x0117f002,
+	0xb6041fbb,
+	0x07f10112,
+	0x03f00300,
+	0x0001d001,
+	0x07f104bd,
+	0x03f00400,
+	0x0001d001,
+	0x17f104bd,
+	0xf7f00100,
+	0xb521f502,
+	0xc721f507,
+	0x10f7f007,
+	0x081421f5,
+	0x98000e98,
+	0x21f5010f,
+	0x14950150,
+	0x0007f108,
+	0x0103f0c0,
+	0xbd0004d0,
+	0x0007f104,
+	0x0103f0c1,
+	0xbd0004d0,
+	0x0030b704,
+	0x001fbb13,
+	0xf102f5b6,
+	0xf0d30007,
+	0x0fd00103,
+	0xb604bd00,
+	0x10b60815,
+	0x0814b601,
+	0xf5021fb9,
+	0xbb02d321,
+	0x0398001f,
+	0x0047f102,
+	0x5043f020,
+/* 0x04f4: init_gpc */
+	0x08044ea0,
+	0xf4021fb9,
+	0x4ea09d21,
+	0xf4bd010c,
+	0xa09d21f4,
+	0xf401044e,
+	0x4ea09d21,
+	0xf7f00100,
+	0x9d21f402,
+	0x08004ea0,
+/* 0x051c: init_gpc_wait */
+	0xc86821f4,
+	0x0bf41fff,
+	0x044ea0fa,
+	0x6821f408,
+	0xb7001fbb,
+	0xb6800040,
+	0x1bf40132,
+	0x00f7f0be,
+	0x081421f5,
+	0xf500f7f0,
+	0xf107b521,
 	0xf0010007,
 	0x01d00203,
 	0xbd04bd00,
@@ -519,402 +584,399 @@
 	0x080007f1,
 	0xd00203f0,
 	0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
 	0xf40031f4,
 	0xd7f00028,
 	0x3921f410,
 	0xb1f401f4,
 	0xf54001e4,
-	0xbd00de1b,
+	0xbd00e91b,
 	0x0499f094,
 	0x0f0007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0x0b0017f1,
-	0xcf0614b6,
-	0x11cf4012,
-	0x1f13c800,
-	0x00870bf5,
+	0xc00017f1,
+	0xcf0213f0,
+	0x27f10011,
+	0x23f0c100,
+	0x0022cf02,
+	0xf51f13c8,
+	0xc800890b,
+	0x0bf41f23,
+	0xb920f962,
+	0x94bd0212,
+	0xf10799f0,
+	0xf00f0007,
+	0x09d00203,
+	0xf404bd00,
+	0x31f40132,
+	0xe821f502,
+	0xf094bd09,
+	0x07f10799,
+	0x03f01700,
+	0x0009d002,
+	0x20fc04bd,
+	0x99f094bd,
+	0x0007f106,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0131f404,
+	0x09e821f5,
+	0x99f094bd,
+	0x0007f106,
+	0x0203f017,
+	0xbd0009d0,
+	0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+	0x12b920f9,
+	0x0132f402,
+	0xf50232f4,
+	0xfc09e821,
+	0x0007f120,
+	0x0203f0c0,
+	0xbd0002d0,
+	0x130ef404,
+/* 0x062c: chsw_no_prev */
 	0xf41f23c8,
-	0x20f9620b,
-	0xbd0212b9,
-	0x0799f094,
-	0x0f0007f1,
+	0x31f40d0b,
+	0x0232f401,
+	0x09e821f5,
+/* 0x063c: chsw_done */
+	0xf10127f0,
+	0xf0c30007,
+	0x02d00203,
+	0xbd04bd00,
+	0x0499f094,
+	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0xf40132f4,
-	0x21f50231,
-	0x94bd082f,
+	0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+	0xf401e4b0,
+	0xf2b90d1b,
+	0x7821f502,
+	0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+	0xf402e4b0,
+	0x94bd321b,
 	0xf10799f0,
-	0xf0170007,
+	0xf00f0007,
 	0x09d00203,
-	0xfc04bd00,
-	0xf094bd20,
-	0x07f10699,
-	0x03f00f00,
-	0x0009d002,
-	0x31f404bd,
-	0x2f21f501,
-	0xf094bd08,
-	0x07f10699,
+	0xf404bd00,
+	0x32f40132,
+	0xe821f502,
+	0xf094bd09,
+	0x07f10799,
 	0x03f01700,
 	0x0009d002,
 	0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
-	0xb920f931,
-	0x32f40212,
-	0x0232f401,
-	0x082f21f5,
-	0x17f120fc,
-	0x14b60b00,
-	0x0012d006,
-/* 0x0517: chsw_no_prev */
-	0xc8130ef4,
-	0x0bf41f23,
-	0x0131f40d,
-	0xf50232f4,
-/* 0x0527: chsw_done */
-	0xf1082f21,
-	0xb60b0c17,
-	0x27f00614,
-	0x0012d001,
-	0x99f094bd,
-	0x0007f104,
-	0x0203f017,
-	0xbd0009d0,
-	0x130ef504,
-/* 0x0549: main_not_ctx_switch */
-	0x01e4b0ff,
-	0xb90d1bf4,
-	0x21f502f2,
-	0x0ef407bb,
-/* 0x0559: main_not_ctx_chan */
-	0x02e4b046,
-	0xbd321bf4,
-	0x0799f094,
-	0x0f0007f1,
+/* 0x06a5: main_not_ctx_save */
+	0x10ef9411,
+	0xf501f5f0,
+	0xf5037e21,
+/* 0x06b3: main_done */
+	0xbdfeb50e,
+	0x1f29f024,
+	0x080007f1,
 	0xd00203f0,
-	0x04bd0009,
-	0xf40132f4,
-	0x21f50232,
-	0x94bd082f,
-	0xf10799f0,
-	0xf0170007,
-	0x09d00203,
-	0xf404bd00,
-/* 0x058e: main_not_ctx_save */
-	0xef94110e,
-	0x01f5f010,
-	0x02fe21f5,
-	0xfec00ef5,
-/* 0x059c: main_done */
-	0x29f024bd,
-	0x0007f11f,
-	0x0203f008,
-	0xbd0002d0,
-	0xab0ef504,
-/* 0x05b1: ih */
-	0xfe80f9fe,
-	0x80f90188,
-	0xa0f990f9,
-	0xd0f9b0f9,
-	0xf0f9e0f9,
-	0x0acf04bd,
-	0x04abc480,
-	0xf11d0bf4,
-	0xf01900b7,
-	0xbecf10d7,
-	0x00bfcf40,
+	0x04bd0002,
+	0xfea00ef5,
+/* 0x06c8: ih */
+	0x88fe80f9,
+	0xf980f901,
+	0xf9a0f990,
+	0xf9d0f9b0,
+	0xbdf0f9e0,
+	0x00a7f104,
+	0x00a3f002,
+	0xc400aacf,
+	0x0bf404ab,
+	0x10d7f030,
+	0x1a00e7f1,
+	0xcf00e3f0,
+	0xf7f100ee,
+	0xf3f01900,
+	0x00ffcf00,
 	0xb70421f4,
 	0xf00400b0,
-	0xbed001e7,
-/* 0x05e9: ih_no_fifo */
-	0x00abe400,
-	0x0d0bf401,
-	0xf110d7f0,
-	0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
-	0xb7f10421,
-	0xb0bd0104,
-	0xf4b4abff,
-	0xa7f10d0b,
-	0xa4b60c1c,
-	0x00abd006,
-/* 0x0610: ih_no_other */
-	0xfc400ad0,
+	0x07f101e7,
+	0x03f01d00,
+	0x000ed000,
+/* 0x071a: ih_no_fifo */
+	0xabe404bd,
+	0x0bf40100,
+	0x10d7f00d,
+	0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+	0xe40421f4,
+	0xf40400ab,
+	0xb7f1140b,
+	0xbfb90100,
+	0x44e7f102,
+	0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+	0xf19d21f4,
+	0xbd0104b7,
+	0xb4abffb0,
+	0xf10f0bf4,
+	0xf0070007,
+	0x0bd00303,
+/* 0x075b: ih_no_other */
+	0xf104bd00,
+	0xf0010007,
+	0x0ad00003,
+	0xfc04bd00,
 	0xfce0fcf0,
 	0xfcb0fcd0,
 	0xfc90fca0,
 	0x0088fe80,
 	0x32f480fc,
-/* 0x062b: ctx_4160s */
-	0xf101f800,
-	0xf04160e7,
-	0xf7f040e3,
-	0x8d21f401,
-/* 0x0638: ctx_4160s_wait */
-	0xc86821f4,
-	0x0bf404ff,
-/* 0x0643: ctx_4160c */
-	0xf100f8fa,
-	0xf04160e7,
-	0xf4bd40e3,
-	0xf88d21f4,
-/* 0x0651: ctx_4170s */
-	0x70e7f100,
+/* 0x077f: ctx_4160s */
+	0xf001f800,
+	0xffb901f7,
+	0x60e7f102,
 	0x40e3f041,
-	0xf410f5f0,
-	0x00f88d21,
-/* 0x0660: ctx_4170w */
-	0x4170e7f1,
-	0xf440e3f0,
-	0xf4f06821,
-	0xf31bf410,
-/* 0x0672: ctx_redswitch */
-	0xe7f100f8,
-	0xe4b60614,
-	0x70f7f106,
-	0x00efd002,
-/* 0x0683: ctx_redswitch_delay */
-	0xb608f7f0,
-	0x1bf401f2,
-	0x70f7f1fd,
-	0x00efd007,
-/* 0x0692: ctx_86c */
-	0xe7f100f8,
-	0xe4b6086c,
-	0x00efd006,
-	0x8a14e7f1,
-	0xf440e3f0,
-	0xe7f18d21,
-	0xe3f0a86c,
-	0x8d21f441,
-/* 0x06b2: ctx_load */
+/* 0x078f: ctx_4160s_wait */
+	0xf19d21f4,
+	0xf04160e7,
+	0x21f440e3,
+	0x02ffb968,
+	0xf404ffc8,
+	0x00f8f00b,
+/* 0x07a4: ctx_4160c */
+	0xffb9f4bd,
+	0x60e7f102,
+	0x40e3f041,
+	0xf89d21f4,
+/* 0x07b5: ctx_4170s */
+	0x10f5f000,
+	0xf102ffb9,
+	0xf04170e7,
+	0x21f440e3,
+/* 0x07c7: ctx_4170w */
+	0xf100f89d,
+	0xf04170e7,
+	0x21f440e3,
+	0x02ffb968,
+	0xf410f4f0,
+	0x00f8f01b,
+/* 0x07dc: ctx_redswitch */
+	0x0200e7f1,
+	0xf040e5f0,
+	0xe5f020e5,
+	0x0007f110,
+	0x0103f085,
+	0xbd000ed0,
+	0x08f7f004,
+/* 0x07f8: ctx_redswitch_delay */
+	0xf401f2b6,
+	0xe5f1fd1b,
+	0xe5f10400,
+	0x07f10100,
+	0x03f08500,
+	0x000ed001,
+	0x00f804bd,
+/* 0x0814: ctx_86c */
+	0x1b0007f1,
+	0xd00203f0,
+	0x04bd000f,
+	0xf102ffb9,
+	0xf08a14e7,
+	0x21f440e3,
+	0x02ffb99d,
+	0xa86ce7f1,
+	0xf441e3f0,
+	0x00f89d21,
+/* 0x083c: ctx_mem */
+	0x840007f1,
+	0xd00203f0,
+	0x04bd000f,
+/* 0x0848: ctx_mem_wait */
+	0x8400f7f1,
+	0xcf02f3f0,
+	0xfffd00ff,
+	0xf31bf405,
+/* 0x085a: ctx_load */
 	0x94bd00f8,
 	0xf10599f0,
 	0xf00f0007,
 	0x09d00203,
 	0xf004bd00,
 	0x21f40ca7,
-	0x2417f1c9,
-	0x0614b60a,
-	0xf10010d0,
-	0xb60b0037,
-	0x32d00634,
-	0x0c17f140,
-	0x0614b60a,
-	0xd00747f0,
-	0x14d00012,
-/* 0x06ed: ctx_chan_wait_0 */
-	0x4014cf40,
-	0xf41f44f0,
-	0x32d0fa1b,
-	0x000bfe00,
-	0xb61f2af0,
-	0x20b60424,
-	0xf094bd02,
+	0xf1f4bdd0,
+	0xf0890007,
+	0x0fd00203,
+	0xf104bd00,
+	0xf0c10007,
+	0x02d00203,
+	0xf104bd00,
+	0xf0830007,
+	0x02d00203,
+	0xf004bd00,
+	0x21f507f7,
+	0x07f1083c,
+	0x03f0c000,
+	0x0002d002,
+	0x0bfe04bd,
+	0x1f2af000,
+	0xb60424b6,
+	0x94bd0220,
+	0xf10899f0,
+	0xf00f0007,
+	0x09d00203,
+	0xf104bd00,
+	0xf0810007,
+	0x02d00203,
+	0xf104bd00,
+	0xf1000027,
+	0xf0800023,
+	0x07f10225,
+	0x03f08800,
+	0x0002d002,
+	0x17f004bd,
+	0x0027f110,
+	0x0223f002,
+	0xf80512fa,
+	0xf094bd03,
 	0x07f10899,
-	0x03f00f00,
+	0x03f01700,
 	0x0009d002,
-	0x17f104bd,
-	0x14b60a04,
-	0x0012d006,
-	0x0a2017f1,
-	0xf00614b6,
-	0x23f10227,
-	0x12d08000,
-	0x1017f000,
-	0x020027f1,
-	0xfa0223f0,
-	0x03f80512,
+	0x019804bd,
+	0x1814b681,
+	0xb6800298,
+	0x12fd0825,
+	0x16018005,
 	0x99f094bd,
-	0x0007f108,
+	0x0007f109,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f081,
+	0xbd0001d0,
+	0x0127f004,
+	0x880007f1,
+	0xd00203f0,
+	0x04bd0002,
+	0x010017f1,
+	0xfa0613f0,
+	0x03f80501,
+	0x99f094bd,
+	0x0007f109,
 	0x0203f017,
 	0xbd0009d0,
-	0x81019804,
-	0x981814b6,
-	0x25b68002,
-	0x0512fd08,
-	0xbd160180,
-	0x0999f094,
-	0x0f0007f1,
-	0xd00203f0,
-	0x04bd0009,
-	0x0a0427f1,
-	0xd00624b6,
-	0x27f00021,
-	0x2017f101,
-	0x0614b60a,
-	0xf10012d0,
-	0xf0010017,
-	0x01fa0613,
-	0xbd03f805,
-	0x0999f094,
-	0x170007f1,
-	0xd00203f0,
-	0x04bd0009,
-	0x99f094bd,
-	0x0007f105,
-	0x0203f017,
-	0xbd0009d0,
-/* 0x07bb: ctx_chan */
-	0xf500f804,
-	0xf5062b21,
-	0xf006b221,
-	0x21f40ca7,
-	0x1017f1c9,
-	0x0614b60a,
-	0xd00527f0,
-/* 0x07d6: ctx_chan_wait */
-	0x12cf0012,
-	0x0522fd00,
-	0xf5fa1bf4,
-	0xf8064321,
-/* 0x07e5: ctx_mmio_exec */
-	0x41039800,
-	0x0a0427f1,
-	0xd00624b6,
-	0x34bd0023,
-/* 0x07f4: ctx_mmio_loop */
+	0xf094bd04,
+	0x07f10599,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0978: ctx_chan */
+	0x077f21f5,
+	0x085a21f5,
+	0xf40ca7f0,
+	0xf7f0d021,
+	0x3c21f505,
+	0xa421f508,
+/* 0x0993: ctx_mmio_exec */
+	0x9800f807,
+	0x07f14103,
+	0x03f08100,
+	0x0003d002,
+	0x34bd04bd,
+/* 0x09a4: ctx_mmio_loop */
 	0xf4ff34c4,
 	0x57f10f1b,
 	0x53f00200,
 	0x0535fa06,
-/* 0x0806: ctx_mmio_pull */
+/* 0x09b6: ctx_mmio_pull */
 	0x4e9803f8,
 	0x814f9880,
-	0xb68d21f4,
+	0xb69d21f4,
 	0x12b60830,
 	0xdf1bf401,
-/* 0x0818: ctx_mmio_done */
-	0xd0160398,
-	0x00800023,
-	0x0017f140,
-	0x0613f001,
-	0xf80601fa,
-/* 0x082f: ctx_xfer */
-	0xf100f803,
-	0xb60c00f7,
-	0xe7f006f4,
-	0x80fed004,
-/* 0x083c: ctx_xfer_idle */
-	0xf100fecf,
-	0xf42000e4,
-	0x11f4f91b,
-	0x1102f406,
-/* 0x084c: ctx_xfer_pre */
-	0xf510f7f0,
-	0xf5069221,
-	0xf4062b21,
-/* 0x085a: ctx_xfer_pre_load */
-	0xf7f01c11,
-	0x5121f502,
-	0x6021f506,
-	0x7221f506,
-	0xf5f4bd06,
-	0xf5065121,
-/* 0x0873: ctx_xfer_exec */
-	0x9806b221,
-	0x27f11601,
-	0x24b60414,
-	0x0020d006,
-	0xa500e7f1,
-	0xb941e3f0,
-	0x21f4021f,
-	0x04e0b68d,
-	0xf001fcf0,
-	0x24b6022c,
-	0x05f2fd01,
-	0xf18d21f4,
-	0xf04afc17,
-	0x27f00213,
-	0x0012d00c,
-	0x021521f5,
-	0x47fc27f1,
-	0xd00223f0,
-	0x2cf00020,
+/* 0x09c8: ctx_mmio_done */
+	0xf1160398,
+	0xf0810007,
+	0x03d00203,
+	0x8004bd00,
+	0x17f14000,
+	0x13f00100,
+	0x0601fa06,
+	0x00f803f8,
+/* 0x09e8: ctx_xfer */
+	0xf104e7f0,
+	0xf0020007,
+	0x0ed00303,
+/* 0x09f7: ctx_xfer_idle */
+	0xf104bd00,
+	0xf00000e7,
+	0xeecf03e3,
+	0x00e4f100,
+	0xf21bf420,
+	0xf40611f4,
+/* 0x0a0e: ctx_xfer_pre */
+	0xf7f01102,
+	0x1421f510,
+	0x7f21f508,
+	0x1c11f407,
+/* 0x0a1c: ctx_xfer_pre_load */
+	0xf502f7f0,
+	0xf507b521,
+	0xf507c721,
+	0xbd07dc21,
+	0xb521f5f4,
+	0x5a21f507,
+/* 0x0a35: ctx_xfer_exec */
+	0x16019808,
+	0x07f124bd,
+	0x03f00500,
+	0x0002d001,
+	0x1fb904bd,
+	0x00e7f102,
+	0x41e3f0a5,
+	0xf09d21f4,
+	0x2cf001fc,
+	0x0124b602,
+	0xb905f2fd,
+	0xe7f102ff,
+	0xe3f0a504,
+	0x9d21f441,
+	0x026a21f5,
+	0x07f124bd,
+	0x03f047fc,
+	0x0002d002,
+	0x2cf004bd,
 	0x0320b601,
-	0xf00012d0,
-	0xa5f001ac,
-	0x00b7f006,
-	0x98000c98,
-	0xe7f0010d,
-	0x6621f500,
-	0x08a7f001,
-	0x010921f5,
-	0x021521f5,
-	0xf02201f4,
-	0x21f40ca7,
-	0x1017f1c9,
-	0x0614b60a,
-	0xd00527f0,
-/* 0x08fa: ctx_xfer_post_save_wait */
-	0x12cf0012,
-	0x0522fd00,
-	0xf4fa1bf4,
-/* 0x0906: ctx_xfer_post */
-	0xf7f03202,
-	0x5121f502,
-	0xf5f4bd06,
-	0xf5069221,
-	0xf5023421,
-	0xbd066021,
-	0x5121f5f4,
-	0x1011f406,
-	0xfd400198,
-	0x0bf40511,
-	0xe521f507,
-/* 0x0931: ctx_xfer_no_post_mmio */
-	0x4321f507,
-/* 0x0935: ctx_xfer_done */
-	0x0000f806,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
+	0x4afc07f1,
+	0xd00203f0,
+	0x04bd0002,
+	0xf001acf0,
+	0xb7f006a5,
+	0x000c9800,
+	0xf0010d98,
+	0x21f500e7,
+	0xa7f0016f,
+	0x1021f508,
+	0x5e21f501,
+	0x1301f402,
+	0xf40ca7f0,
+	0xf7f0d021,
+	0x3c21f505,
+	0x3202f408,
+/* 0x0ac4: ctx_xfer_post */
+	0xf502f7f0,
+	0xbd07b521,
+	0x1421f5f4,
+	0x7f21f508,
+	0xc721f502,
+	0xf5f4bd07,
+	0xf407b521,
+	0x01981011,
+	0x0511fd40,
+	0xf5070bf4,
+/* 0x0aef: ctx_xfer_no_post_mmio */
+	0xf5099321,
+/* 0x0af3: ctx_xfer_done */
+	0xf807a421,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
index a1b9f76..84af824 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
@@ -206,14 +206,14 @@
 };
 
 uint32_t nvd7_grhub_code[] = {
-	0x031b0ef5,
+	0x039b0ef5,
 /* 0x0004: queue_put */
 	0x9800d898,
 	0x86f001d9,
 	0x0489b808,
 	0xf00c1bf4,
 	0x21f502f7,
-	0x00f802fe,
+	0x00f8037e,
 /* 0x001c: queue_put_next */
 	0xb60798c4,
 	0x8dbb0384,
@@ -237,184 +237,214 @@
 /* 0x0066: queue_get_done */
 	0x00f80132,
 /* 0x0068: nv_rd32 */
-	0x0728b7f1,
-	0xb906b4b6,
-	0xc9f002ec,
-	0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-	0xc800bccf,
-	0x1bf41fcc,
-	0x06a7f0fa,
-	0x010921f5,
-	0xf840bfcf,
-/* 0x008d: nv_wr32 */
-	0x28b7f100,
-	0x06b4b607,
-	0xb980bfd0,
-	0xc9f002ec,
-	0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-	0xcf00bcd0,
-	0xccc800bc,
-	0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-	0x87f100f8,
-	0x84b60430,
-	0x1ff9f006,
-	0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-	0x3087f100,
-	0x0684b604,
-	0xf80080d0,
-/* 0x00c9: wait_donez */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x07f104bd,
-	0x03f00600,
-	0x000ad002,
-/* 0x00e6: wait_donez_ne */
-	0x87f104bd,
-	0x83f00000,
-	0x0088cf01,
-	0xf4888aff,
-	0x94bdf31b,
-	0xf10099f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0109: wait_doneo */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x87f104bd,
-	0x84b60818,
-	0x008ad006,
-/* 0x0124: wait_doneo_e */
-	0x040087f1,
-	0xcf0684b6,
-	0x8aff0088,
-	0xf30bf488,
+	0xf002ecb9,
+	0x07f11fc9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x007a: nv_rd32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0xa7f0f31b,
+	0x1021f506,
+	0x00f7f101,
+	0x01f3f0cb,
+	0xf800ffcf,
+/* 0x009d: nv_wr32 */
+	0x0007f100,
+	0x0103f0cc,
+	0xbd000fd0,
+	0x02ecb904,
+	0xf01fc9f0,
+	0x07f11ec9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x00be: nv_wr32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0x00f8f31b,
+/* 0x00d0: wait_donez */
 	0x99f094bd,
 	0x0007f100,
-	0x0203f017,
+	0x0203f00f,
 	0xbd0009d0,
-/* 0x0147: mmctx_size */
-	0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-	0x00e89894,
-	0xb61a85b6,
-	0x84b60180,
-	0x0098bb02,
-	0xb804e0b6,
-	0x1bf404ef,
-	0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-	0x94bd00f8,
-	0xf10199f0,
-	0xf00f0007,
-	0x09d00203,
-	0xf104bd00,
-	0xb6071087,
-	0x94bd0684,
-	0xf405bbfd,
-	0x8bd0090b,
-	0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-	0xf405eefd,
-	0x8ed00c0b,
-	0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-	0xb70199f0,
-	0xc8010080,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x1bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0110: wait_doneo */
+	0x99f094bd,
+	0x0007f100,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x0bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+	0xe89894bd,
+	0x1a85b600,
+	0xb60180b6,
+	0x98bb0284,
+	0x04e0b600,
+	0xf404efb8,
+	0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+	0xbd00f802,
+	0x0199f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0xbbfd94bd,
+	0x120bf405,
+	0xc40007f1,
+	0xd00103f0,
+	0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+	0xfd0099f0,
+	0x0bf405ee,
+	0x0007f11e,
+	0x0103f0c6,
+	0xbd000ed0,
+	0x0007f104,
+	0x0103f0c7,
+	0xbd000fd0,
+	0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+	0xb600abc8,
+	0xb9f010b4,
+	0x01aec80c,
+	0xfd11e4b6,
+	0x07f105be,
+	0x03f0c500,
+	0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+	0xe7f104bd,
+	0xe3f0c500,
+	0x00eecf01,
+	0xf41fe4f0,
+	0xce98f30b,
+	0x05e9fd00,
+	0xc80007f1,
+	0xd00103f0,
+	0x04bd000e,
+	0xb804c0b6,
+	0x1bf404cd,
+	0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+	0xf11f1bf4,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x1fb4f000,
+	0xf410b4b0,
+	0xa7f0f01b,
+	0xd021f402,
+/* 0x0223: mmctx_stop */
+	0xc82b0ef4,
 	0xb4b600ab,
 	0x0cb9f010,
-	0xb601aec8,
-	0xbefd11e4,
-	0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-	0xf0008ecf,
-	0x0bf41fe4,
-	0x00ce98fa,
-	0xd005e9fd,
-	0xc0b6c08e,
-	0x04cdb804,
-	0xc8e81bf4,
-	0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-	0x008bcf18,
-	0xb01fb4f0,
-	0x1bf410b4,
-	0x02a7f0f7,
-	0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-	0xabc81b0e,
-	0x10b4b600,
-	0xf00cb9f0,
-	0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-	0x008bcf00,
-	0xf412bbc8,
-/* 0x0202: mmctx_done */
-	0x94bdfa1b,
-	0xf10199f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0215: strand_wait */
-	0xf0a0f900,
-	0x21f402a7,
-	0xf8a0fcc9,
-/* 0x0221: strand_pre */
-	0xfc87f100,
-	0x0283f04a,
-	0xd00c97f0,
-	0x21f50089,
-	0x00f80215,
-/* 0x0234: strand_post */
-	0x4afc87f1,
-	0xf00283f0,
-	0x89d00d97,
-	0x1521f500,
-/* 0x0247: strand_set */
-	0xf100f802,
-	0xf04ffca7,
-	0xaba202a3,
-	0xc7f00500,
-	0x00acd00f,
-	0xd00bc7f0,
-	0x21f500bc,
-	0xaed00215,
-	0x0ac7f000,
-	0xf500bcd0,
-	0xf8021521,
-/* 0x0271: strand_ctx_init */
-	0xf094bd00,
-	0x07f10399,
-	0x03f00f00,
+	0xf112b9f0,
+	0xf0c50007,
+	0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+	0xf104bd00,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x12bbc800,
+/* 0x024b: mmctx_done */
+	0xbdf31bf4,
+	0x0199f094,
+	0x170007f1,
+	0xd00203f0,
+	0x04bd0009,
+/* 0x025e: strand_wait */
+	0xa0f900f8,
+	0xf402a7f0,
+	0xa0fcd021,
+/* 0x026a: strand_pre */
+	0x97f000f8,
+	0xfc07f10c,
+	0x0203f04a,
+	0xbd0009d0,
+	0x5e21f504,
+/* 0x027f: strand_post */
+	0xf000f802,
+	0x07f10d97,
+	0x03f04afc,
 	0x0009d002,
 	0x21f504bd,
-	0xe7f00221,
-	0x4721f503,
-	0xfca7f102,
-	0x02a3f046,
-	0x0400aba0,
-	0xf040a0d0,
-	0xbcd001c7,
-	0x1521f500,
-	0x010c9202,
-	0xf000acd0,
-	0xbcd002c7,
-	0x1521f500,
-	0x3421f502,
-	0x8087f102,
-	0x0684b608,
-	0xb70089cf,
-	0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+	0x00f8025e,
+/* 0x0294: strand_set */
+	0xf10fc7f0,
+	0xf04ffc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f10bc7,
+	0x03f04afc,
+	0x000cd002,
+	0x07f104bd,
+	0x03f04ffc,
+	0x000ed002,
+	0xc7f004bd,
+	0xfc07f10a,
+	0x0203f04a,
+	0xbd000cd0,
+	0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+	0xbd00f802,
+	0x0399f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0x026a21f5,
+	0xf503e7f0,
+	0xbd029421,
+	0xfc07f1c4,
+	0x0203f047,
+	0xbd000cd0,
+	0x01c7f004,
+	0x4afc07f1,
+	0xd00203f0,
+	0x04bd000c,
+	0x025e21f5,
+	0xf1010c92,
+	0xf046fc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f102c7,
+	0x03f04afc,
+	0x000cd002,
+	0x21f504bd,
+	0x21f5025e,
+	0x87f1027f,
+	0x83f04200,
+	0x0097f102,
+	0x0293f020,
+	0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
 	0x8ed008fe,
 	0x408ed000,
 	0xb6808acf,
@@ -428,7 +458,7 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
 	0x07f100f8,
 	0x03f00500,
 	0x000fd002,
@@ -436,82 +466,117 @@
 	0x0007f101,
 	0x0303f007,
 	0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
 	0xbd00f804,
-	0x0004fe04,
-	0xf10007fe,
-	0xf0120017,
-	0x12d00227,
-	0xb117f100,
-	0x0010fe05,
-	0x040017f1,
-	0xf1c010d0,
-	0xb6040437,
-	0x27f10634,
-	0x32d02003,
-	0x0427f100,
-	0x0132d020,
+	0x0007fe04,
+	0x420017f1,
+	0xcf0013f0,
+	0x11e70011,
+	0x14b60109,
+	0x0014fe08,
+	0xf10227f0,
+	0xf0120007,
+	0x02d00003,
+	0xf104bd00,
+	0xfe06c817,
+	0x24bd0010,
+	0x070007f1,
+	0xd00003f0,
+	0x04bd0002,
+	0x200327f1,
+	0x010007f1,
+	0xd00103f0,
+	0x04bd0002,
+	0x200427f1,
+	0x010407f1,
+	0xd00103f0,
+	0x04bd0002,
 	0x200b27f1,
-	0xf10232d0,
-	0xd0200c27,
-	0x27f10732,
-	0x24b60c24,
-	0x0003b906,
-	0xf10023d0,
+	0x010807f1,
+	0xd00103f0,
+	0x04bd0002,
+	0x200c27f1,
+	0x011c07f1,
+	0xd00103f0,
+	0x04bd0002,
+	0xf1010392,
+	0xf0090007,
+	0x03d00303,
+	0xf104bd00,
 	0xf0870427,
-	0x12d00023,
-	0x0012b700,
-	0x0427f001,
-	0xf40012d0,
-	0xe7f11031,
-	0xe3f09604,
-	0x6821f440,
-	0x8090f1c7,
-	0xf4f00301,
-	0x020f801f,
-	0xbb0117f0,
-	0x12b6041f,
-	0x0c27f101,
-	0x0624b604,
-	0xd00021d0,
-	0x17f14021,
-	0x0e980100,
-	0x010f9800,
-	0x014721f5,
-	0x070037f1,
-	0x950634b6,
-	0x34d00814,
-	0x4034d000,
-	0x130030b7,
-	0xb6001fbb,
-	0x3fd002f5,
-	0x0815b600,
-	0xb60110b6,
-	0x1fb90814,
-	0x7121f502,
-	0x001fbb02,
-	0xf1020398,
-	0xf0200047,
-/* 0x03f6: init_gpc */
-	0x4ea05043,
-	0x1fb90804,
-	0x8d21f402,
-	0x010c4ea0,
-	0x21f4f4bd,
-	0x044ea08d,
-	0x8d21f401,
-	0x01004ea0,
-	0xf402f7f0,
-	0x4ea08d21,
-/* 0x041e: init_gpc_wait */
-	0x21f40800,
-	0x1fffc868,
-	0xa0fa0bf4,
-	0xf408044e,
-	0x1fbb6821,
-	0x0040b700,
-	0x0132b680,
-	0xf1be1bf4,
+	0x07f10023,
+	0x03f00400,
+	0x0002d000,
+	0x27f004bd,
+	0x0007f104,
+	0x0003f003,
+	0xbd0002d0,
+	0x1031f404,
+	0x9604e7f1,
+	0xf440e3f0,
+	0xfeb96821,
+	0x90f1c702,
+	0xf0030180,
+	0x0f801ff4,
+	0x0117f002,
+	0xb6041fbb,
+	0x07f10112,
+	0x03f00300,
+	0x0001d001,
+	0x07f104bd,
+	0x03f00400,
+	0x0001d001,
+	0x17f104bd,
+	0xf7f00100,
+	0xb521f502,
+	0xc721f507,
+	0x10f7f007,
+	0x081421f5,
+	0x98000e98,
+	0x21f5010f,
+	0x14950150,
+	0x0007f108,
+	0x0103f0c0,
+	0xbd0004d0,
+	0x0007f104,
+	0x0103f0c1,
+	0xbd0004d0,
+	0x0030b704,
+	0x001fbb13,
+	0xf102f5b6,
+	0xf0d30007,
+	0x0fd00103,
+	0xb604bd00,
+	0x10b60815,
+	0x0814b601,
+	0xf5021fb9,
+	0xbb02d321,
+	0x0398001f,
+	0x0047f102,
+	0x5043f020,
+/* 0x04f4: init_gpc */
+	0x08044ea0,
+	0xf4021fb9,
+	0x4ea09d21,
+	0xf4bd010c,
+	0xa09d21f4,
+	0xf401044e,
+	0x4ea09d21,
+	0xf7f00100,
+	0x9d21f402,
+	0x08004ea0,
+/* 0x051c: init_gpc_wait */
+	0xc86821f4,
+	0x0bf41fff,
+	0x044ea0fa,
+	0x6821f408,
+	0xb7001fbb,
+	0xb6800040,
+	0x1bf40132,
+	0x00f7f0be,
+	0x081421f5,
+	0xf500f7f0,
+	0xf107b521,
 	0xf0010007,
 	0x01d00203,
 	0xbd04bd00,
@@ -519,402 +584,399 @@
 	0x080007f1,
 	0xd00203f0,
 	0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
 	0xf40031f4,
 	0xd7f00028,
 	0x3921f410,
 	0xb1f401f4,
 	0xf54001e4,
-	0xbd00de1b,
+	0xbd00e91b,
 	0x0499f094,
 	0x0f0007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0x0b0017f1,
-	0xcf0614b6,
-	0x11cf4012,
-	0x1f13c800,
-	0x00870bf5,
+	0xc00017f1,
+	0xcf0213f0,
+	0x27f10011,
+	0x23f0c100,
+	0x0022cf02,
+	0xf51f13c8,
+	0xc800890b,
+	0x0bf41f23,
+	0xb920f962,
+	0x94bd0212,
+	0xf10799f0,
+	0xf00f0007,
+	0x09d00203,
+	0xf404bd00,
+	0x31f40132,
+	0xe821f502,
+	0xf094bd09,
+	0x07f10799,
+	0x03f01700,
+	0x0009d002,
+	0x20fc04bd,
+	0x99f094bd,
+	0x0007f106,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0131f404,
+	0x09e821f5,
+	0x99f094bd,
+	0x0007f106,
+	0x0203f017,
+	0xbd0009d0,
+	0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+	0x12b920f9,
+	0x0132f402,
+	0xf50232f4,
+	0xfc09e821,
+	0x0007f120,
+	0x0203f0c0,
+	0xbd0002d0,
+	0x130ef404,
+/* 0x062c: chsw_no_prev */
 	0xf41f23c8,
-	0x20f9620b,
-	0xbd0212b9,
-	0x0799f094,
-	0x0f0007f1,
+	0x31f40d0b,
+	0x0232f401,
+	0x09e821f5,
+/* 0x063c: chsw_done */
+	0xf10127f0,
+	0xf0c30007,
+	0x02d00203,
+	0xbd04bd00,
+	0x0499f094,
+	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0xf40132f4,
-	0x21f50231,
-	0x94bd082f,
+	0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+	0xf401e4b0,
+	0xf2b90d1b,
+	0x7821f502,
+	0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+	0xf402e4b0,
+	0x94bd321b,
 	0xf10799f0,
-	0xf0170007,
+	0xf00f0007,
 	0x09d00203,
-	0xfc04bd00,
-	0xf094bd20,
-	0x07f10699,
-	0x03f00f00,
-	0x0009d002,
-	0x31f404bd,
-	0x2f21f501,
-	0xf094bd08,
-	0x07f10699,
+	0xf404bd00,
+	0x32f40132,
+	0xe821f502,
+	0xf094bd09,
+	0x07f10799,
 	0x03f01700,
 	0x0009d002,
 	0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
-	0xb920f931,
-	0x32f40212,
-	0x0232f401,
-	0x082f21f5,
-	0x17f120fc,
-	0x14b60b00,
-	0x0012d006,
-/* 0x0517: chsw_no_prev */
-	0xc8130ef4,
-	0x0bf41f23,
-	0x0131f40d,
-	0xf50232f4,
-/* 0x0527: chsw_done */
-	0xf1082f21,
-	0xb60b0c17,
-	0x27f00614,
-	0x0012d001,
-	0x99f094bd,
-	0x0007f104,
-	0x0203f017,
-	0xbd0009d0,
-	0x130ef504,
-/* 0x0549: main_not_ctx_switch */
-	0x01e4b0ff,
-	0xb90d1bf4,
-	0x21f502f2,
-	0x0ef407bb,
-/* 0x0559: main_not_ctx_chan */
-	0x02e4b046,
-	0xbd321bf4,
-	0x0799f094,
-	0x0f0007f1,
+/* 0x06a5: main_not_ctx_save */
+	0x10ef9411,
+	0xf501f5f0,
+	0xf5037e21,
+/* 0x06b3: main_done */
+	0xbdfeb50e,
+	0x1f29f024,
+	0x080007f1,
 	0xd00203f0,
-	0x04bd0009,
-	0xf40132f4,
-	0x21f50232,
-	0x94bd082f,
-	0xf10799f0,
-	0xf0170007,
-	0x09d00203,
-	0xf404bd00,
-/* 0x058e: main_not_ctx_save */
-	0xef94110e,
-	0x01f5f010,
-	0x02fe21f5,
-	0xfec00ef5,
-/* 0x059c: main_done */
-	0x29f024bd,
-	0x0007f11f,
-	0x0203f008,
-	0xbd0002d0,
-	0xab0ef504,
-/* 0x05b1: ih */
-	0xfe80f9fe,
-	0x80f90188,
-	0xa0f990f9,
-	0xd0f9b0f9,
-	0xf0f9e0f9,
-	0x0acf04bd,
-	0x04abc480,
-	0xf11d0bf4,
-	0xf01900b7,
-	0xbecf10d7,
-	0x00bfcf40,
+	0x04bd0002,
+	0xfea00ef5,
+/* 0x06c8: ih */
+	0x88fe80f9,
+	0xf980f901,
+	0xf9a0f990,
+	0xf9d0f9b0,
+	0xbdf0f9e0,
+	0x00a7f104,
+	0x00a3f002,
+	0xc400aacf,
+	0x0bf404ab,
+	0x10d7f030,
+	0x1a00e7f1,
+	0xcf00e3f0,
+	0xf7f100ee,
+	0xf3f01900,
+	0x00ffcf00,
 	0xb70421f4,
 	0xf00400b0,
-	0xbed001e7,
-/* 0x05e9: ih_no_fifo */
-	0x00abe400,
-	0x0d0bf401,
-	0xf110d7f0,
-	0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
-	0xb7f10421,
-	0xb0bd0104,
-	0xf4b4abff,
-	0xa7f10d0b,
-	0xa4b60c1c,
-	0x00abd006,
-/* 0x0610: ih_no_other */
-	0xfc400ad0,
+	0x07f101e7,
+	0x03f01d00,
+	0x000ed000,
+/* 0x071a: ih_no_fifo */
+	0xabe404bd,
+	0x0bf40100,
+	0x10d7f00d,
+	0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+	0xe40421f4,
+	0xf40400ab,
+	0xb7f1140b,
+	0xbfb90100,
+	0x44e7f102,
+	0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+	0xf19d21f4,
+	0xbd0104b7,
+	0xb4abffb0,
+	0xf10f0bf4,
+	0xf0070007,
+	0x0bd00303,
+/* 0x075b: ih_no_other */
+	0xf104bd00,
+	0xf0010007,
+	0x0ad00003,
+	0xfc04bd00,
 	0xfce0fcf0,
 	0xfcb0fcd0,
 	0xfc90fca0,
 	0x0088fe80,
 	0x32f480fc,
-/* 0x062b: ctx_4160s */
-	0xf101f800,
-	0xf04160e7,
-	0xf7f040e3,
-	0x8d21f401,
-/* 0x0638: ctx_4160s_wait */
-	0xc86821f4,
-	0x0bf404ff,
-/* 0x0643: ctx_4160c */
-	0xf100f8fa,
-	0xf04160e7,
-	0xf4bd40e3,
-	0xf88d21f4,
-/* 0x0651: ctx_4170s */
-	0x70e7f100,
+/* 0x077f: ctx_4160s */
+	0xf001f800,
+	0xffb901f7,
+	0x60e7f102,
 	0x40e3f041,
-	0xf410f5f0,
-	0x00f88d21,
-/* 0x0660: ctx_4170w */
-	0x4170e7f1,
-	0xf440e3f0,
-	0xf4f06821,
-	0xf31bf410,
-/* 0x0672: ctx_redswitch */
-	0xe7f100f8,
-	0xe4b60614,
-	0x70f7f106,
-	0x00efd002,
-/* 0x0683: ctx_redswitch_delay */
-	0xb608f7f0,
-	0x1bf401f2,
-	0x70f7f1fd,
-	0x00efd007,
-/* 0x0692: ctx_86c */
-	0xe7f100f8,
-	0xe4b6086c,
-	0x00efd006,
-	0x8a14e7f1,
-	0xf440e3f0,
-	0xe7f18d21,
-	0xe3f0a86c,
-	0x8d21f441,
-/* 0x06b2: ctx_load */
+/* 0x078f: ctx_4160s_wait */
+	0xf19d21f4,
+	0xf04160e7,
+	0x21f440e3,
+	0x02ffb968,
+	0xf404ffc8,
+	0x00f8f00b,
+/* 0x07a4: ctx_4160c */
+	0xffb9f4bd,
+	0x60e7f102,
+	0x40e3f041,
+	0xf89d21f4,
+/* 0x07b5: ctx_4170s */
+	0x10f5f000,
+	0xf102ffb9,
+	0xf04170e7,
+	0x21f440e3,
+/* 0x07c7: ctx_4170w */
+	0xf100f89d,
+	0xf04170e7,
+	0x21f440e3,
+	0x02ffb968,
+	0xf410f4f0,
+	0x00f8f01b,
+/* 0x07dc: ctx_redswitch */
+	0x0200e7f1,
+	0xf040e5f0,
+	0xe5f020e5,
+	0x0007f110,
+	0x0103f085,
+	0xbd000ed0,
+	0x08f7f004,
+/* 0x07f8: ctx_redswitch_delay */
+	0xf401f2b6,
+	0xe5f1fd1b,
+	0xe5f10400,
+	0x07f10100,
+	0x03f08500,
+	0x000ed001,
+	0x00f804bd,
+/* 0x0814: ctx_86c */
+	0x1b0007f1,
+	0xd00203f0,
+	0x04bd000f,
+	0xf102ffb9,
+	0xf08a14e7,
+	0x21f440e3,
+	0x02ffb99d,
+	0xa86ce7f1,
+	0xf441e3f0,
+	0x00f89d21,
+/* 0x083c: ctx_mem */
+	0x840007f1,
+	0xd00203f0,
+	0x04bd000f,
+/* 0x0848: ctx_mem_wait */
+	0x8400f7f1,
+	0xcf02f3f0,
+	0xfffd00ff,
+	0xf31bf405,
+/* 0x085a: ctx_load */
 	0x94bd00f8,
 	0xf10599f0,
 	0xf00f0007,
 	0x09d00203,
 	0xf004bd00,
 	0x21f40ca7,
-	0x2417f1c9,
-	0x0614b60a,
-	0xf10010d0,
-	0xb60b0037,
-	0x32d00634,
-	0x0c17f140,
-	0x0614b60a,
-	0xd00747f0,
-	0x14d00012,
-/* 0x06ed: ctx_chan_wait_0 */
-	0x4014cf40,
-	0xf41f44f0,
-	0x32d0fa1b,
-	0x000bfe00,
-	0xb61f2af0,
-	0x20b60424,
-	0xf094bd02,
+	0xf1f4bdd0,
+	0xf0890007,
+	0x0fd00203,
+	0xf104bd00,
+	0xf0c10007,
+	0x02d00203,
+	0xf104bd00,
+	0xf0830007,
+	0x02d00203,
+	0xf004bd00,
+	0x21f507f7,
+	0x07f1083c,
+	0x03f0c000,
+	0x0002d002,
+	0x0bfe04bd,
+	0x1f2af000,
+	0xb60424b6,
+	0x94bd0220,
+	0xf10899f0,
+	0xf00f0007,
+	0x09d00203,
+	0xf104bd00,
+	0xf0810007,
+	0x02d00203,
+	0xf104bd00,
+	0xf1000027,
+	0xf0800023,
+	0x07f10225,
+	0x03f08800,
+	0x0002d002,
+	0x17f004bd,
+	0x0027f110,
+	0x0223f002,
+	0xf80512fa,
+	0xf094bd03,
 	0x07f10899,
-	0x03f00f00,
+	0x03f01700,
 	0x0009d002,
-	0x17f104bd,
-	0x14b60a04,
-	0x0012d006,
-	0x0a2017f1,
-	0xf00614b6,
-	0x23f10227,
-	0x12d08000,
-	0x1017f000,
-	0x020027f1,
-	0xfa0223f0,
-	0x03f80512,
+	0x019804bd,
+	0x1814b681,
+	0xb6800298,
+	0x12fd0825,
+	0x16018005,
 	0x99f094bd,
-	0x0007f108,
+	0x0007f109,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f081,
+	0xbd0001d0,
+	0x0127f004,
+	0x880007f1,
+	0xd00203f0,
+	0x04bd0002,
+	0x010017f1,
+	0xfa0613f0,
+	0x03f80501,
+	0x99f094bd,
+	0x0007f109,
 	0x0203f017,
 	0xbd0009d0,
-	0x81019804,
-	0x981814b6,
-	0x25b68002,
-	0x0512fd08,
-	0xbd160180,
-	0x0999f094,
-	0x0f0007f1,
-	0xd00203f0,
-	0x04bd0009,
-	0x0a0427f1,
-	0xd00624b6,
-	0x27f00021,
-	0x2017f101,
-	0x0614b60a,
-	0xf10012d0,
-	0xf0010017,
-	0x01fa0613,
-	0xbd03f805,
-	0x0999f094,
-	0x170007f1,
-	0xd00203f0,
-	0x04bd0009,
-	0x99f094bd,
-	0x0007f105,
-	0x0203f017,
-	0xbd0009d0,
-/* 0x07bb: ctx_chan */
-	0xf500f804,
-	0xf5062b21,
-	0xf006b221,
-	0x21f40ca7,
-	0x1017f1c9,
-	0x0614b60a,
-	0xd00527f0,
-/* 0x07d6: ctx_chan_wait */
-	0x12cf0012,
-	0x0522fd00,
-	0xf5fa1bf4,
-	0xf8064321,
-/* 0x07e5: ctx_mmio_exec */
-	0x41039800,
-	0x0a0427f1,
-	0xd00624b6,
-	0x34bd0023,
-/* 0x07f4: ctx_mmio_loop */
+	0xf094bd04,
+	0x07f10599,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0978: ctx_chan */
+	0x077f21f5,
+	0x085a21f5,
+	0xf40ca7f0,
+	0xf7f0d021,
+	0x3c21f505,
+	0xa421f508,
+/* 0x0993: ctx_mmio_exec */
+	0x9800f807,
+	0x07f14103,
+	0x03f08100,
+	0x0003d002,
+	0x34bd04bd,
+/* 0x09a4: ctx_mmio_loop */
 	0xf4ff34c4,
 	0x57f10f1b,
 	0x53f00200,
 	0x0535fa06,
-/* 0x0806: ctx_mmio_pull */
+/* 0x09b6: ctx_mmio_pull */
 	0x4e9803f8,
 	0x814f9880,
-	0xb68d21f4,
+	0xb69d21f4,
 	0x12b60830,
 	0xdf1bf401,
-/* 0x0818: ctx_mmio_done */
-	0xd0160398,
-	0x00800023,
-	0x0017f140,
-	0x0613f001,
-	0xf80601fa,
-/* 0x082f: ctx_xfer */
-	0xf100f803,
-	0xb60c00f7,
-	0xe7f006f4,
-	0x80fed004,
-/* 0x083c: ctx_xfer_idle */
-	0xf100fecf,
-	0xf42000e4,
-	0x11f4f91b,
-	0x1102f406,
-/* 0x084c: ctx_xfer_pre */
-	0xf510f7f0,
-	0xf5069221,
-	0xf4062b21,
-/* 0x085a: ctx_xfer_pre_load */
-	0xf7f01c11,
-	0x5121f502,
-	0x6021f506,
-	0x7221f506,
-	0xf5f4bd06,
-	0xf5065121,
-/* 0x0873: ctx_xfer_exec */
-	0x9806b221,
-	0x27f11601,
-	0x24b60414,
-	0x0020d006,
-	0xa500e7f1,
-	0xb941e3f0,
-	0x21f4021f,
-	0x04e0b68d,
-	0xf001fcf0,
-	0x24b6022c,
-	0x05f2fd01,
-	0xf18d21f4,
-	0xf04afc17,
-	0x27f00213,
-	0x0012d00c,
-	0x021521f5,
-	0x47fc27f1,
-	0xd00223f0,
-	0x2cf00020,
+/* 0x09c8: ctx_mmio_done */
+	0xf1160398,
+	0xf0810007,
+	0x03d00203,
+	0x8004bd00,
+	0x17f14000,
+	0x13f00100,
+	0x0601fa06,
+	0x00f803f8,
+/* 0x09e8: ctx_xfer */
+	0xf104e7f0,
+	0xf0020007,
+	0x0ed00303,
+/* 0x09f7: ctx_xfer_idle */
+	0xf104bd00,
+	0xf00000e7,
+	0xeecf03e3,
+	0x00e4f100,
+	0xf21bf420,
+	0xf40611f4,
+/* 0x0a0e: ctx_xfer_pre */
+	0xf7f01102,
+	0x1421f510,
+	0x7f21f508,
+	0x1c11f407,
+/* 0x0a1c: ctx_xfer_pre_load */
+	0xf502f7f0,
+	0xf507b521,
+	0xf507c721,
+	0xbd07dc21,
+	0xb521f5f4,
+	0x5a21f507,
+/* 0x0a35: ctx_xfer_exec */
+	0x16019808,
+	0x07f124bd,
+	0x03f00500,
+	0x0002d001,
+	0x1fb904bd,
+	0x00e7f102,
+	0x41e3f0a5,
+	0xf09d21f4,
+	0x2cf001fc,
+	0x0124b602,
+	0xb905f2fd,
+	0xe7f102ff,
+	0xe3f0a504,
+	0x9d21f441,
+	0x026a21f5,
+	0x07f124bd,
+	0x03f047fc,
+	0x0002d002,
+	0x2cf004bd,
 	0x0320b601,
-	0xf00012d0,
-	0xa5f001ac,
-	0x00b7f006,
-	0x98000c98,
-	0xe7f0010d,
-	0x6621f500,
-	0x08a7f001,
-	0x010921f5,
-	0x021521f5,
-	0xf02201f4,
-	0x21f40ca7,
-	0x1017f1c9,
-	0x0614b60a,
-	0xd00527f0,
-/* 0x08fa: ctx_xfer_post_save_wait */
-	0x12cf0012,
-	0x0522fd00,
-	0xf4fa1bf4,
-/* 0x0906: ctx_xfer_post */
-	0xf7f03202,
-	0x5121f502,
-	0xf5f4bd06,
-	0xf5069221,
-	0xf5023421,
-	0xbd066021,
-	0x5121f5f4,
-	0x1011f406,
-	0xfd400198,
-	0x0bf40511,
-	0xe521f507,
-/* 0x0931: ctx_xfer_no_post_mmio */
-	0x4321f507,
-/* 0x0935: ctx_xfer_done */
-	0x0000f806,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
+	0x4afc07f1,
+	0xd00203f0,
+	0x04bd0002,
+	0xf001acf0,
+	0xb7f006a5,
+	0x000c9800,
+	0xf0010d98,
+	0x21f500e7,
+	0xa7f0016f,
+	0x1021f508,
+	0x5e21f501,
+	0x1301f402,
+	0xf40ca7f0,
+	0xf7f0d021,
+	0x3c21f505,
+	0x3202f408,
+/* 0x0ac4: ctx_xfer_post */
+	0xf502f7f0,
+	0xbd07b521,
+	0x1421f5f4,
+	0x7f21f508,
+	0xc721f502,
+	0xf5f4bd07,
+	0xf407b521,
+	0x01981011,
+	0x0511fd40,
+	0xf5070bf4,
+/* 0x0aef: ctx_xfer_no_post_mmio */
+	0xf5099321,
+/* 0x0af3: ctx_xfer_done */
+	0xf807a421,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index eb7bc0e..1c179bdd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -206,14 +206,14 @@
 };
 
 uint32_t nve0_grhub_code[] = {
-	0x031b0ef5,
+	0x039b0ef5,
 /* 0x0004: queue_put */
 	0x9800d898,
 	0x86f001d9,
 	0x0489b808,
 	0xf00c1bf4,
 	0x21f502f7,
-	0x00f802fe,
+	0x00f8037e,
 /* 0x001c: queue_put_next */
 	0xb60798c4,
 	0x8dbb0384,
@@ -237,184 +237,214 @@
 /* 0x0066: queue_get_done */
 	0x00f80132,
 /* 0x0068: nv_rd32 */
-	0x0728b7f1,
-	0xb906b4b6,
-	0xc9f002ec,
-	0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-	0xc800bccf,
-	0x1bf41fcc,
-	0x06a7f0fa,
-	0x010921f5,
-	0xf840bfcf,
-/* 0x008d: nv_wr32 */
-	0x28b7f100,
-	0x06b4b607,
-	0xb980bfd0,
-	0xc9f002ec,
-	0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-	0xcf00bcd0,
-	0xccc800bc,
-	0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-	0x87f100f8,
-	0x84b60430,
-	0x1ff9f006,
-	0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-	0x3087f100,
-	0x0684b604,
-	0xf80080d0,
-/* 0x00c9: wait_donez */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x07f104bd,
-	0x03f00600,
-	0x000ad002,
-/* 0x00e6: wait_donez_ne */
-	0x87f104bd,
-	0x83f00000,
-	0x0088cf01,
-	0xf4888aff,
-	0x94bdf31b,
-	0xf10099f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0109: wait_doneo */
-	0xf094bd00,
-	0x07f10099,
-	0x03f00f00,
-	0x0009d002,
-	0x87f104bd,
-	0x84b60818,
-	0x008ad006,
-/* 0x0124: wait_doneo_e */
-	0x040087f1,
-	0xcf0684b6,
-	0x8aff0088,
-	0xf30bf488,
+	0xf002ecb9,
+	0x07f11fc9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x007a: nv_rd32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0xa7f0f31b,
+	0x1021f506,
+	0x00f7f101,
+	0x01f3f0cb,
+	0xf800ffcf,
+/* 0x009d: nv_wr32 */
+	0x0007f100,
+	0x0103f0cc,
+	0xbd000fd0,
+	0x02ecb904,
+	0xf01fc9f0,
+	0x07f11ec9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x00be: nv_wr32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0x00f8f31b,
+/* 0x00d0: wait_donez */
 	0x99f094bd,
 	0x0007f100,
-	0x0203f017,
+	0x0203f00f,
 	0xbd0009d0,
-/* 0x0147: mmctx_size */
-	0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-	0x00e89894,
-	0xb61a85b6,
-	0x84b60180,
-	0x0098bb02,
-	0xb804e0b6,
-	0x1bf404ef,
-	0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-	0x94bd00f8,
-	0xf10199f0,
-	0xf00f0007,
-	0x09d00203,
-	0xf104bd00,
-	0xb6071087,
-	0x94bd0684,
-	0xf405bbfd,
-	0x8bd0090b,
-	0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-	0xf405eefd,
-	0x8ed00c0b,
-	0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-	0xb70199f0,
-	0xc8010080,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x1bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0110: wait_doneo */
+	0x99f094bd,
+	0x0007f100,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x0bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+	0xe89894bd,
+	0x1a85b600,
+	0xb60180b6,
+	0x98bb0284,
+	0x04e0b600,
+	0xf404efb8,
+	0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+	0xbd00f802,
+	0x0199f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0xbbfd94bd,
+	0x120bf405,
+	0xc40007f1,
+	0xd00103f0,
+	0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+	0xfd0099f0,
+	0x0bf405ee,
+	0x0007f11e,
+	0x0103f0c6,
+	0xbd000ed0,
+	0x0007f104,
+	0x0103f0c7,
+	0xbd000fd0,
+	0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+	0xb600abc8,
+	0xb9f010b4,
+	0x01aec80c,
+	0xfd11e4b6,
+	0x07f105be,
+	0x03f0c500,
+	0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+	0xe7f104bd,
+	0xe3f0c500,
+	0x00eecf01,
+	0xf41fe4f0,
+	0xce98f30b,
+	0x05e9fd00,
+	0xc80007f1,
+	0xd00103f0,
+	0x04bd000e,
+	0xb804c0b6,
+	0x1bf404cd,
+	0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+	0xf11f1bf4,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x1fb4f000,
+	0xf410b4b0,
+	0xa7f0f01b,
+	0xd021f402,
+/* 0x0223: mmctx_stop */
+	0xc82b0ef4,
 	0xb4b600ab,
 	0x0cb9f010,
-	0xb601aec8,
-	0xbefd11e4,
-	0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-	0xf0008ecf,
-	0x0bf41fe4,
-	0x00ce98fa,
-	0xd005e9fd,
-	0xc0b6c08e,
-	0x04cdb804,
-	0xc8e81bf4,
-	0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-	0x008bcf18,
-	0xb01fb4f0,
-	0x1bf410b4,
-	0x02a7f0f7,
-	0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-	0xabc81b0e,
-	0x10b4b600,
-	0xf00cb9f0,
-	0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-	0x008bcf00,
-	0xf412bbc8,
-/* 0x0202: mmctx_done */
-	0x94bdfa1b,
-	0xf10199f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0215: strand_wait */
-	0xf0a0f900,
-	0x21f402a7,
-	0xf8a0fcc9,
-/* 0x0221: strand_pre */
-	0xfc87f100,
-	0x0283f04a,
-	0xd00c97f0,
-	0x21f50089,
-	0x00f80215,
-/* 0x0234: strand_post */
-	0x4afc87f1,
-	0xf00283f0,
-	0x89d00d97,
-	0x1521f500,
-/* 0x0247: strand_set */
-	0xf100f802,
-	0xf04ffca7,
-	0xaba202a3,
-	0xc7f00500,
-	0x00acd00f,
-	0xd00bc7f0,
-	0x21f500bc,
-	0xaed00215,
-	0x0ac7f000,
-	0xf500bcd0,
-	0xf8021521,
-/* 0x0271: strand_ctx_init */
-	0xf094bd00,
-	0x07f10399,
-	0x03f00f00,
+	0xf112b9f0,
+	0xf0c50007,
+	0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+	0xf104bd00,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x12bbc800,
+/* 0x024b: mmctx_done */
+	0xbdf31bf4,
+	0x0199f094,
+	0x170007f1,
+	0xd00203f0,
+	0x04bd0009,
+/* 0x025e: strand_wait */
+	0xa0f900f8,
+	0xf402a7f0,
+	0xa0fcd021,
+/* 0x026a: strand_pre */
+	0x97f000f8,
+	0xfc07f10c,
+	0x0203f04a,
+	0xbd0009d0,
+	0x5e21f504,
+/* 0x027f: strand_post */
+	0xf000f802,
+	0x07f10d97,
+	0x03f04afc,
 	0x0009d002,
 	0x21f504bd,
-	0xe7f00221,
-	0x4721f503,
-	0xfca7f102,
-	0x02a3f046,
-	0x0400aba0,
-	0xf040a0d0,
-	0xbcd001c7,
-	0x1521f500,
-	0x010c9202,
-	0xf000acd0,
-	0xbcd002c7,
-	0x1521f500,
-	0x3421f502,
-	0x8087f102,
-	0x0684b608,
-	0xb70089cf,
-	0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+	0x00f8025e,
+/* 0x0294: strand_set */
+	0xf10fc7f0,
+	0xf04ffc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f10bc7,
+	0x03f04afc,
+	0x000cd002,
+	0x07f104bd,
+	0x03f04ffc,
+	0x000ed002,
+	0xc7f004bd,
+	0xfc07f10a,
+	0x0203f04a,
+	0xbd000cd0,
+	0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+	0xbd00f802,
+	0x0399f094,
+	0x0f0007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0x026a21f5,
+	0xf503e7f0,
+	0xbd029421,
+	0xfc07f1c4,
+	0x0203f047,
+	0xbd000cd0,
+	0x01c7f004,
+	0x4afc07f1,
+	0xd00203f0,
+	0x04bd000c,
+	0x025e21f5,
+	0xf1010c92,
+	0xf046fc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f102c7,
+	0x03f04afc,
+	0x000cd002,
+	0x21f504bd,
+	0x21f5025e,
+	0x87f1027f,
+	0x83f04200,
+	0x0097f102,
+	0x0293f020,
+	0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
 	0x8ed008fe,
 	0x408ed000,
 	0xb6808acf,
@@ -428,7 +458,7 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
 	0x07f100f8,
 	0x03f00500,
 	0x000fd002,
@@ -436,82 +466,117 @@
 	0x0007f101,
 	0x0303f007,
 	0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
 	0xbd00f804,
-	0x0004fe04,
-	0xf10007fe,
-	0xf0120017,
-	0x12d00227,
-	0xb117f100,
-	0x0010fe05,
-	0x040017f1,
-	0xf1c010d0,
-	0xb6040437,
-	0x27f10634,
-	0x32d02003,
-	0x0427f100,
-	0x0132d020,
+	0x0007fe04,
+	0x420017f1,
+	0xcf0013f0,
+	0x11e70011,
+	0x14b60109,
+	0x0014fe08,
+	0xf10227f0,
+	0xf0120007,
+	0x02d00003,
+	0xf104bd00,
+	0xfe06c817,
+	0x24bd0010,
+	0x070007f1,
+	0xd00003f0,
+	0x04bd0002,
+	0x200327f1,
+	0x010007f1,
+	0xd00103f0,
+	0x04bd0002,
+	0x200427f1,
+	0x010407f1,
+	0xd00103f0,
+	0x04bd0002,
 	0x200b27f1,
-	0xf10232d0,
-	0xd0200c27,
-	0x27f10732,
-	0x24b60c24,
-	0x0003b906,
-	0xf10023d0,
+	0x010807f1,
+	0xd00103f0,
+	0x04bd0002,
+	0x200c27f1,
+	0x011c07f1,
+	0xd00103f0,
+	0x04bd0002,
+	0xf1010392,
+	0xf0090007,
+	0x03d00303,
+	0xf104bd00,
 	0xf0870427,
-	0x12d00023,
-	0x0012b700,
-	0x0427f001,
-	0xf40012d0,
-	0xe7f11031,
-	0xe3f09604,
-	0x6821f440,
-	0x8090f1c7,
-	0xf4f00301,
-	0x020f801f,
-	0xbb0117f0,
-	0x12b6041f,
-	0x0c27f101,
-	0x0624b604,
-	0xd00021d0,
-	0x17f14021,
-	0x0e980100,
-	0x010f9800,
-	0x014721f5,
-	0x070037f1,
-	0x950634b6,
-	0x34d00814,
-	0x4034d000,
-	0x130030b7,
-	0xb6001fbb,
-	0x3fd002f5,
-	0x0815b600,
-	0xb60110b6,
-	0x1fb90814,
-	0x7121f502,
-	0x001fbb02,
-	0xf1020398,
-	0xf0200047,
-/* 0x03f6: init_gpc */
-	0x4ea05043,
-	0x1fb90804,
-	0x8d21f402,
-	0x010c4ea0,
-	0x21f4f4bd,
-	0x044ea08d,
-	0x8d21f401,
-	0x01004ea0,
-	0xf402f7f0,
-	0x4ea08d21,
-/* 0x041e: init_gpc_wait */
-	0x21f40800,
-	0x1fffc868,
-	0xa0fa0bf4,
-	0xf408044e,
-	0x1fbb6821,
-	0x0040b700,
-	0x0132b680,
-	0xf1be1bf4,
+	0x07f10023,
+	0x03f00400,
+	0x0002d000,
+	0x27f004bd,
+	0x0007f104,
+	0x0003f003,
+	0xbd0002d0,
+	0x1031f404,
+	0x9604e7f1,
+	0xf440e3f0,
+	0xfeb96821,
+	0x90f1c702,
+	0xf0030180,
+	0x0f801ff4,
+	0x0117f002,
+	0xb6041fbb,
+	0x07f10112,
+	0x03f00300,
+	0x0001d001,
+	0x07f104bd,
+	0x03f00400,
+	0x0001d001,
+	0x17f104bd,
+	0xf7f00100,
+	0x7f21f502,
+	0x9121f507,
+	0x10f7f007,
+	0x07de21f5,
+	0x98000e98,
+	0x21f5010f,
+	0x14950150,
+	0x0007f108,
+	0x0103f0c0,
+	0xbd0004d0,
+	0x0007f104,
+	0x0103f0c1,
+	0xbd0004d0,
+	0x0030b704,
+	0x001fbb13,
+	0xf102f5b6,
+	0xf0d30007,
+	0x0fd00103,
+	0xb604bd00,
+	0x10b60815,
+	0x0814b601,
+	0xf5021fb9,
+	0xbb02d321,
+	0x0398001f,
+	0x0047f102,
+	0x5043f020,
+/* 0x04f4: init_gpc */
+	0x08044ea0,
+	0xf4021fb9,
+	0x4ea09d21,
+	0xf4bd010c,
+	0xa09d21f4,
+	0xf401044e,
+	0x4ea09d21,
+	0xf7f00100,
+	0x9d21f402,
+	0x08004ea0,
+/* 0x051c: init_gpc_wait */
+	0xc86821f4,
+	0x0bf41fff,
+	0x044ea0fa,
+	0x6821f408,
+	0xb7001fbb,
+	0xb6800040,
+	0x1bf40132,
+	0x00f7f0be,
+	0x07de21f5,
+	0xf500f7f0,
+	0xf1077f21,
 	0xf0010007,
 	0x01d00203,
 	0xbd04bd00,
@@ -519,382 +584,379 @@
 	0x080007f1,
 	0xd00203f0,
 	0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
 	0xf40031f4,
 	0xd7f00028,
 	0x3921f410,
 	0xb1f401f4,
 	0xf54001e4,
-	0xbd00de1b,
+	0xbd00e91b,
 	0x0499f094,
 	0x0f0007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0x0b0017f1,
-	0xcf0614b6,
-	0x11cf4012,
-	0x1f13c800,
-	0x00870bf5,
+	0xc00017f1,
+	0xcf0213f0,
+	0x27f10011,
+	0x23f0c100,
+	0x0022cf02,
+	0xf51f13c8,
+	0xc800890b,
+	0x0bf41f23,
+	0xb920f962,
+	0x94bd0212,
+	0xf10799f0,
+	0xf00f0007,
+	0x09d00203,
+	0xf404bd00,
+	0x31f40132,
+	0xaa21f502,
+	0xf094bd09,
+	0x07f10799,
+	0x03f01700,
+	0x0009d002,
+	0x20fc04bd,
+	0x99f094bd,
+	0x0007f106,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0131f404,
+	0x09aa21f5,
+	0x99f094bd,
+	0x0007f106,
+	0x0203f017,
+	0xbd0009d0,
+	0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+	0x12b920f9,
+	0x0132f402,
+	0xf50232f4,
+	0xfc09aa21,
+	0x0007f120,
+	0x0203f0c0,
+	0xbd0002d0,
+	0x130ef404,
+/* 0x062c: chsw_no_prev */
 	0xf41f23c8,
-	0x20f9620b,
-	0xbd0212b9,
-	0x0799f094,
-	0x0f0007f1,
+	0x31f40d0b,
+	0x0232f401,
+	0x09aa21f5,
+/* 0x063c: chsw_done */
+	0xf10127f0,
+	0xf0c30007,
+	0x02d00203,
+	0xbd04bd00,
+	0x0499f094,
+	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0xf40132f4,
-	0x21f50231,
-	0x94bd0801,
+	0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+	0xf401e4b0,
+	0xf2b90d1b,
+	0x4221f502,
+	0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+	0xf402e4b0,
+	0x94bd321b,
 	0xf10799f0,
-	0xf0170007,
+	0xf00f0007,
 	0x09d00203,
-	0xfc04bd00,
-	0xf094bd20,
-	0x07f10699,
-	0x03f00f00,
-	0x0009d002,
-	0x31f404bd,
-	0x0121f501,
-	0xf094bd08,
-	0x07f10699,
+	0xf404bd00,
+	0x32f40132,
+	0xaa21f502,
+	0xf094bd09,
+	0x07f10799,
 	0x03f01700,
 	0x0009d002,
 	0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
-	0xb920f931,
-	0x32f40212,
-	0x0232f401,
-	0x080121f5,
-	0x17f120fc,
-	0x14b60b00,
-	0x0012d006,
-/* 0x0517: chsw_no_prev */
-	0xc8130ef4,
-	0x0bf41f23,
-	0x0131f40d,
-	0xf50232f4,
-/* 0x0527: chsw_done */
-	0xf1080121,
-	0xb60b0c17,
-	0x27f00614,
-	0x0012d001,
-	0x99f094bd,
-	0x0007f104,
-	0x0203f017,
-	0xbd0009d0,
-	0x130ef504,
-/* 0x0549: main_not_ctx_switch */
-	0x01e4b0ff,
-	0xb90d1bf4,
-	0x21f502f2,
-	0x0ef40795,
-/* 0x0559: main_not_ctx_chan */
-	0x02e4b046,
-	0xbd321bf4,
-	0x0799f094,
-	0x0f0007f1,
+/* 0x06a5: main_not_ctx_save */
+	0x10ef9411,
+	0xf501f5f0,
+	0xf5037e21,
+/* 0x06b3: main_done */
+	0xbdfeb50e,
+	0x1f29f024,
+	0x080007f1,
 	0xd00203f0,
-	0x04bd0009,
-	0xf40132f4,
-	0x21f50232,
-	0x94bd0801,
-	0xf10799f0,
-	0xf0170007,
-	0x09d00203,
-	0xf404bd00,
-/* 0x058e: main_not_ctx_save */
-	0xef94110e,
-	0x01f5f010,
-	0x02fe21f5,
-	0xfec00ef5,
-/* 0x059c: main_done */
-	0x29f024bd,
-	0x0007f11f,
-	0x0203f008,
-	0xbd0002d0,
-	0xab0ef504,
-/* 0x05b1: ih */
-	0xfe80f9fe,
-	0x80f90188,
-	0xa0f990f9,
-	0xd0f9b0f9,
-	0xf0f9e0f9,
-	0x0acf04bd,
-	0x04abc480,
-	0xf11d0bf4,
-	0xf01900b7,
-	0xbecf10d7,
-	0x00bfcf40,
+	0x04bd0002,
+	0xfea00ef5,
+/* 0x06c8: ih */
+	0x88fe80f9,
+	0xf980f901,
+	0xf9a0f990,
+	0xf9d0f9b0,
+	0xbdf0f9e0,
+	0x00a7f104,
+	0x00a3f002,
+	0xc400aacf,
+	0x0bf404ab,
+	0x10d7f030,
+	0x1a00e7f1,
+	0xcf00e3f0,
+	0xf7f100ee,
+	0xf3f01900,
+	0x00ffcf00,
 	0xb70421f4,
 	0xf00400b0,
-	0xbed001e7,
-/* 0x05e9: ih_no_fifo */
-	0x00abe400,
-	0x0d0bf401,
-	0xf110d7f0,
-	0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
-	0xb7f10421,
-	0xb0bd0104,
-	0xf4b4abff,
-	0xa7f10d0b,
-	0xa4b60c1c,
-	0x00abd006,
-/* 0x0610: ih_no_other */
-	0xfc400ad0,
+	0x07f101e7,
+	0x03f01d00,
+	0x000ed000,
+/* 0x071a: ih_no_fifo */
+	0xabe404bd,
+	0x0bf40100,
+	0x10d7f00d,
+	0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+	0xe40421f4,
+	0xf40400ab,
+	0xb7f1140b,
+	0xbfb90100,
+	0x44e7f102,
+	0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+	0xf19d21f4,
+	0xbd0104b7,
+	0xb4abffb0,
+	0xf10f0bf4,
+	0xf0070007,
+	0x0bd00303,
+/* 0x075b: ih_no_other */
+	0xf104bd00,
+	0xf0010007,
+	0x0ad00003,
+	0xfc04bd00,
 	0xfce0fcf0,
 	0xfcb0fcd0,
 	0xfc90fca0,
 	0x0088fe80,
 	0x32f480fc,
-/* 0x062b: ctx_4170s */
-	0xf101f800,
-	0xf04170e7,
-	0xf5f040e3,
-	0x8d21f410,
-/* 0x063a: ctx_4170w */
+/* 0x077f: ctx_4170s */
+	0xf001f800,
+	0xffb910f5,
+	0x70e7f102,
+	0x40e3f041,
+	0xf89d21f4,
+/* 0x0791: ctx_4170w */
+	0x70e7f100,
+	0x40e3f041,
+	0xb96821f4,
+	0xf4f002ff,
+	0xf01bf410,
+/* 0x07a6: ctx_redswitch */
 	0xe7f100f8,
-	0xe3f04170,
-	0x6821f440,
-	0xf410f4f0,
+	0xe5f00200,
+	0x20e5f040,
+	0xf110e5f0,
+	0xf0850007,
+	0x0ed00103,
+	0xf004bd00,
+/* 0x07c2: ctx_redswitch_delay */
+	0xf2b608f7,
+	0xfd1bf401,
+	0x0400e5f1,
+	0x0100e5f1,
+	0x850007f1,
+	0xd00103f0,
+	0x04bd000e,
+/* 0x07de: ctx_86c */
+	0x07f100f8,
+	0x03f01b00,
+	0x000fd002,
+	0xffb904bd,
+	0x14e7f102,
+	0x40e3f08a,
+	0xb99d21f4,
+	0xe7f102ff,
+	0xe3f0a86c,
+	0x9d21f441,
+/* 0x0806: ctx_mem */
+	0x07f100f8,
+	0x03f08400,
+	0x000fd002,
+/* 0x0812: ctx_mem_wait */
+	0xf7f104bd,
+	0xf3f08400,
+	0x00ffcf02,
+	0xf405fffd,
 	0x00f8f31b,
-/* 0x064c: ctx_redswitch */
-	0x0614e7f1,
-	0xf106e4b6,
-	0xd00270f7,
-	0xf7f000ef,
-/* 0x065d: ctx_redswitch_delay */
-	0x01f2b608,
-	0xf1fd1bf4,
-	0xd00770f7,
-	0x00f800ef,
-/* 0x066c: ctx_86c */
-	0x086ce7f1,
-	0xd006e4b6,
-	0xe7f100ef,
-	0xe3f08a14,
-	0x8d21f440,
-	0xa86ce7f1,
-	0xf441e3f0,
-	0x00f88d21,
-/* 0x068c: ctx_load */
+/* 0x0824: ctx_load */
 	0x99f094bd,
 	0x0007f105,
 	0x0203f00f,
 	0xbd0009d0,
 	0x0ca7f004,
-	0xf1c921f4,
-	0xb60a2417,
-	0x10d00614,
-	0x0037f100,
-	0x0634b60b,
-	0xf14032d0,
-	0xb60a0c17,
-	0x47f00614,
-	0x0012d007,
-/* 0x06c7: ctx_chan_wait_0 */
-	0xcf4014d0,
-	0x44f04014,
-	0xfa1bf41f,
-	0xfe0032d0,
-	0x2af0000b,
-	0x0424b61f,
-	0xbd0220b6,
+	0xbdd021f4,
+	0x0007f1f4,
+	0x0203f089,
+	0xbd000fd0,
+	0x0007f104,
+	0x0203f0c1,
+	0xbd0002d0,
+	0x0007f104,
+	0x0203f083,
+	0xbd0002d0,
+	0x07f7f004,
+	0x080621f5,
+	0xc00007f1,
+	0xd00203f0,
+	0x04bd0002,
+	0xf0000bfe,
+	0x24b61f2a,
+	0x0220b604,
+	0x99f094bd,
+	0x0007f108,
+	0x0203f00f,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f081,
+	0xbd0002d0,
+	0x0027f104,
+	0x0023f100,
+	0x0225f080,
+	0x880007f1,
+	0xd00203f0,
+	0x04bd0002,
+	0xf11017f0,
+	0xf0020027,
+	0x12fa0223,
+	0xbd03f805,
 	0x0899f094,
-	0x0f0007f1,
+	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0x0a0417f1,
-	0xd00614b6,
-	0x17f10012,
-	0x14b60a20,
-	0x0227f006,
-	0x800023f1,
-	0xf00012d0,
-	0x27f11017,
-	0x23f00200,
-	0x0512fa02,
+	0xb6810198,
+	0x02981814,
+	0x0825b680,
+	0x800512fd,
+	0x94bd1601,
+	0xf10999f0,
+	0xf00f0007,
+	0x09d00203,
+	0xf104bd00,
+	0xf0810007,
+	0x01d00203,
+	0xf004bd00,
+	0x07f10127,
+	0x03f08800,
+	0x0002d002,
+	0x17f104bd,
+	0x13f00100,
+	0x0501fa06,
 	0x94bd03f8,
-	0xf10899f0,
+	0xf10999f0,
 	0xf0170007,
 	0x09d00203,
-	0x9804bd00,
-	0x14b68101,
-	0x80029818,
-	0xfd0825b6,
-	0x01800512,
-	0xf094bd16,
-	0x07f10999,
-	0x03f00f00,
-	0x0009d002,
-	0x27f104bd,
-	0x24b60a04,
-	0x0021d006,
-	0xf10127f0,
-	0xb60a2017,
-	0x12d00614,
-	0x0017f100,
-	0x0613f001,
-	0xf80501fa,
-	0xf094bd03,
-	0x07f10999,
-	0x03f01700,
-	0x0009d002,
-	0x94bd04bd,
-	0xf10599f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0795: ctx_chan */
-	0x8c21f500,
-	0x0ca7f006,
-	0xf1c921f4,
-	0xb60a1017,
-	0x27f00614,
-	0x0012d005,
-/* 0x07ac: ctx_chan_wait */
-	0xfd0012cf,
-	0x1bf40522,
-/* 0x07b7: ctx_mmio_exec */
-	0x9800f8fa,
-	0x27f14103,
-	0x24b60a04,
-	0x0023d006,
-/* 0x07c6: ctx_mmio_loop */
+	0xbd04bd00,
+	0x0599f094,
+	0x170007f1,
+	0xd00203f0,
+	0x04bd0009,
+/* 0x0942: ctx_chan */
+	0x21f500f8,
+	0xa7f00824,
+	0xd021f40c,
+	0xf505f7f0,
+	0xf8080621,
+/* 0x0955: ctx_mmio_exec */
+	0x41039800,
+	0x810007f1,
+	0xd00203f0,
+	0x04bd0003,
+/* 0x0966: ctx_mmio_loop */
 	0x34c434bd,
 	0x0f1bf4ff,
 	0x020057f1,
 	0xfa0653f0,
 	0x03f80535,
-/* 0x07d8: ctx_mmio_pull */
+/* 0x0978: ctx_mmio_pull */
 	0x98804e98,
 	0x21f4814f,
-	0x0830b68d,
+	0x0830b69d,
 	0xf40112b6,
-/* 0x07ea: ctx_mmio_done */
+/* 0x098a: ctx_mmio_done */
 	0x0398df1b,
-	0x0023d016,
-	0xf1400080,
-	0xf0010017,
-	0x01fa0613,
-	0xf803f806,
-/* 0x0801: ctx_xfer */
-	0x00f7f100,
-	0x06f4b60c,
-	0xd004e7f0,
-/* 0x080e: ctx_xfer_idle */
-	0xfecf80fe,
-	0x00e4f100,
-	0xf91bf420,
-	0xf40611f4,
-/* 0x081e: ctx_xfer_pre */
-	0xf7f00d02,
-	0x6c21f510,
-	0x1c11f406,
-/* 0x0828: ctx_xfer_pre_load */
-	0xf502f7f0,
-	0xf5062b21,
-	0xf5063a21,
-	0xbd064c21,
-	0x2b21f5f4,
-	0x8c21f506,
-/* 0x0841: ctx_xfer_exec */
-	0x16019806,
-	0x041427f1,
-	0xd00624b6,
-	0xe7f10020,
-	0xe3f0a500,
-	0x021fb941,
-	0xb68d21f4,
-	0xfcf004e0,
-	0x022cf001,
-	0xfd0124b6,
-	0x21f405f2,
-	0xfc17f18d,
-	0x0213f04a,
-	0xd00c27f0,
-	0x21f50012,
-	0x27f10215,
-	0x23f047fc,
-	0x0020d002,
+	0x0007f116,
+	0x0203f081,
+	0xbd0003d0,
+	0x40008004,
+	0x010017f1,
+	0xfa0613f0,
+	0x03f80601,
+/* 0x09aa: ctx_xfer */
+	0xe7f000f8,
+	0x0007f104,
+	0x0303f002,
+	0xbd000ed0,
+/* 0x09b9: ctx_xfer_idle */
+	0x00e7f104,
+	0x03e3f000,
+	0xf100eecf,
+	0xf42000e4,
+	0x11f4f21b,
+	0x0d02f406,
+/* 0x09d0: ctx_xfer_pre */
+	0xf510f7f0,
+	0xf407de21,
+/* 0x09da: ctx_xfer_pre_load */
+	0xf7f01c11,
+	0x7f21f502,
+	0x9121f507,
+	0xa621f507,
+	0xf5f4bd07,
+	0xf5077f21,
+/* 0x09f3: ctx_xfer_exec */
+	0x98082421,
+	0x24bd1601,
+	0x050007f1,
+	0xd00103f0,
+	0x04bd0002,
+	0xf1021fb9,
+	0xf0a500e7,
+	0x21f441e3,
+	0x01fcf09d,
+	0xb6022cf0,
+	0xf2fd0124,
+	0x02ffb905,
+	0xa504e7f1,
+	0xf441e3f0,
+	0x21f59d21,
+	0x24bd026a,
+	0x47fc07f1,
+	0xd00203f0,
+	0x04bd0002,
 	0xb6012cf0,
-	0x12d00320,
-	0x01acf000,
-	0xf006a5f0,
-	0x0c9800b7,
-	0x010d9800,
-	0xf500e7f0,
-	0xf0016621,
-	0x21f508a7,
-	0x21f50109,
-	0x01f40215,
-	0x0ca7f022,
-	0xf1c921f4,
-	0xb60a1017,
-	0x27f00614,
-	0x0012d005,
-/* 0x08c8: ctx_xfer_post_save_wait */
-	0xfd0012cf,
-	0x1bf40522,
-	0x2e02f4fa,
-/* 0x08d4: ctx_xfer_post */
-	0xf502f7f0,
-	0xbd062b21,
-	0x6c21f5f4,
-	0x3421f506,
-	0x3a21f502,
-	0xf5f4bd06,
-	0xf4062b21,
-	0x01981011,
-	0x0511fd40,
-	0xf5070bf4,
-/* 0x08ff: ctx_xfer_no_post_mmio */
-/* 0x08ff: ctx_xfer_done */
-	0xf807b721,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
+	0x07f10320,
+	0x03f04afc,
+	0x0002d002,
+	0xacf004bd,
+	0x06a5f001,
+	0x9800b7f0,
+	0x0d98000c,
+	0x00e7f001,
+	0x016f21f5,
+	0xf508a7f0,
+	0xf5011021,
+	0xf4025e21,
+	0xa7f01301,
+	0xd021f40c,
+	0xf505f7f0,
+	0xf4080621,
+/* 0x0a82: ctx_xfer_post */
+	0xf7f02e02,
+	0x7f21f502,
+	0xf5f4bd07,
+	0xf507de21,
+	0xf5027f21,
+	0xbd079121,
+	0x7f21f5f4,
+	0x1011f407,
+	0xfd400198,
+	0x0bf40511,
+	0x5521f507,
+/* 0x0aad: ctx_xfer_no_post_mmio */
+/* 0x0aad: ctx_xfer_done */
+	0x0000f809,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
index 438506d..229c0ae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
@@ -206,14 +206,14 @@
 };
 
 uint32_t nvf0_grhub_code[] = {
-	0x031b0ef5,
+	0x039b0ef5,
 /* 0x0004: queue_put */
 	0x9800d898,
 	0x86f001d9,
 	0x0489b808,
 	0xf00c1bf4,
 	0x21f502f7,
-	0x00f802fe,
+	0x00f8037e,
 /* 0x001c: queue_put_next */
 	0xb60798c4,
 	0x8dbb0384,
@@ -237,184 +237,214 @@
 /* 0x0066: queue_get_done */
 	0x00f80132,
 /* 0x0068: nv_rd32 */
-	0x0728b7f1,
-	0xb906b4b6,
-	0xc9f002ec,
-	0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-	0xc800bccf,
-	0x1bf41fcc,
-	0x06a7f0fa,
-	0x010921f5,
-	0xf840bfcf,
-/* 0x008d: nv_wr32 */
-	0x28b7f100,
-	0x06b4b607,
-	0xb980bfd0,
-	0xc9f002ec,
-	0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-	0xcf00bcd0,
-	0xccc800bc,
-	0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-	0x87f100f8,
-	0x84b60430,
-	0x1ff9f006,
-	0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-	0x3087f100,
-	0x0684b604,
-	0xf80080d0,
-/* 0x00c9: wait_donez */
-	0xf094bd00,
-	0x07f10099,
-	0x03f03700,
-	0x0009d002,
-	0x07f104bd,
-	0x03f00600,
-	0x000ad002,
-/* 0x00e6: wait_donez_ne */
-	0x87f104bd,
-	0x83f00000,
-	0x0088cf01,
-	0xf4888aff,
-	0x94bdf31b,
-	0xf10099f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0109: wait_doneo */
-	0xf094bd00,
-	0x07f10099,
-	0x03f03700,
-	0x0009d002,
-	0x87f104bd,
-	0x84b60818,
-	0x008ad006,
-/* 0x0124: wait_doneo_e */
-	0x040087f1,
-	0xcf0684b6,
-	0x8aff0088,
-	0xf30bf488,
+	0xf002ecb9,
+	0x07f11fc9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x007a: nv_rd32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0xa7f0f31b,
+	0x1021f506,
+	0x00f7f101,
+	0x01f3f0cb,
+	0xf800ffcf,
+/* 0x009d: nv_wr32 */
+	0x0007f100,
+	0x0103f0cc,
+	0xbd000fd0,
+	0x02ecb904,
+	0xf01fc9f0,
+	0x07f11ec9,
+	0x03f0ca00,
+	0x000cd001,
+/* 0x00be: nv_wr32_wait */
+	0xc7f104bd,
+	0xc3f0ca00,
+	0x00cccf01,
+	0xf41fccc8,
+	0x00f8f31b,
+/* 0x00d0: wait_donez */
 	0x99f094bd,
 	0x0007f100,
-	0x0203f017,
+	0x0203f037,
 	0xbd0009d0,
-/* 0x0147: mmctx_size */
-	0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-	0x00e89894,
-	0xb61a85b6,
-	0x84b60180,
-	0x0098bb02,
-	0xb804e0b6,
-	0x1bf404ef,
-	0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-	0x94bd00f8,
-	0xf10199f0,
-	0xf0370007,
-	0x09d00203,
-	0xf104bd00,
-	0xb6071087,
-	0x94bd0684,
-	0xf405bbfd,
-	0x8bd0090b,
-	0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-	0xf405eefd,
-	0x8ed00c0b,
-	0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-	0xb70199f0,
-	0xc8010080,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x1bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0110: wait_doneo */
+	0x99f094bd,
+	0x0007f100,
+	0x0203f037,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f006,
+	0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+	0x0087f104,
+	0x0183f000,
+	0xff0088cf,
+	0x0bf4888a,
+	0xf094bdf3,
+	0x07f10099,
+	0x03f01700,
+	0x0009d002,
+	0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+	0xe89894bd,
+	0x1a85b600,
+	0xb60180b6,
+	0x98bb0284,
+	0x04e0b600,
+	0xf404efb8,
+	0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+	0xbd00f802,
+	0x0199f094,
+	0x370007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0xbbfd94bd,
+	0x120bf405,
+	0xc40007f1,
+	0xd00103f0,
+	0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+	0xfd0099f0,
+	0x0bf405ee,
+	0x0007f11e,
+	0x0103f0c6,
+	0xbd000ed0,
+	0x0007f104,
+	0x0103f0c7,
+	0xbd000fd0,
+	0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+	0xb600abc8,
+	0xb9f010b4,
+	0x01aec80c,
+	0xfd11e4b6,
+	0x07f105be,
+	0x03f0c500,
+	0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+	0xe7f104bd,
+	0xe3f0c500,
+	0x00eecf01,
+	0xf41fe4f0,
+	0xce98f30b,
+	0x05e9fd00,
+	0xc80007f1,
+	0xd00103f0,
+	0x04bd000e,
+	0xb804c0b6,
+	0x1bf404cd,
+	0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+	0xf11f1bf4,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x1fb4f000,
+	0xf410b4b0,
+	0xa7f0f01b,
+	0xd021f402,
+/* 0x0223: mmctx_stop */
+	0xc82b0ef4,
 	0xb4b600ab,
 	0x0cb9f010,
-	0xb601aec8,
-	0xbefd11e4,
-	0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-	0xf0008ecf,
-	0x0bf41fe4,
-	0x00ce98fa,
-	0xd005e9fd,
-	0xc0b6c08e,
-	0x04cdb804,
-	0xc8e81bf4,
-	0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-	0x008bcf18,
-	0xb01fb4f0,
-	0x1bf410b4,
-	0x02a7f0f7,
-	0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-	0xabc81b0e,
-	0x10b4b600,
-	0xf00cb9f0,
-	0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-	0x008bcf00,
-	0xf412bbc8,
-/* 0x0202: mmctx_done */
-	0x94bdfa1b,
-	0xf10199f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0215: strand_wait */
-	0xf0a0f900,
-	0x21f402a7,
-	0xf8a0fcc9,
-/* 0x0221: strand_pre */
-	0xfc87f100,
-	0x0283f04a,
-	0xd00c97f0,
-	0x21f50089,
-	0x00f80215,
-/* 0x0234: strand_post */
-	0x4afc87f1,
-	0xf00283f0,
-	0x89d00d97,
-	0x1521f500,
-/* 0x0247: strand_set */
-	0xf100f802,
-	0xf04ffca7,
-	0xaba202a3,
-	0xc7f00500,
-	0x00acd00f,
-	0xd00bc7f0,
-	0x21f500bc,
-	0xaed00215,
-	0x0ac7f000,
-	0xf500bcd0,
-	0xf8021521,
-/* 0x0271: strand_ctx_init */
-	0xf094bd00,
-	0x07f10399,
-	0x03f03700,
+	0xf112b9f0,
+	0xf0c50007,
+	0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+	0xf104bd00,
+	0xf0c500b7,
+	0xbbcf01b3,
+	0x12bbc800,
+/* 0x024b: mmctx_done */
+	0xbdf31bf4,
+	0x0199f094,
+	0x170007f1,
+	0xd00203f0,
+	0x04bd0009,
+/* 0x025e: strand_wait */
+	0xa0f900f8,
+	0xf402a7f0,
+	0xa0fcd021,
+/* 0x026a: strand_pre */
+	0x97f000f8,
+	0xfc07f10c,
+	0x0203f04a,
+	0xbd0009d0,
+	0x5e21f504,
+/* 0x027f: strand_post */
+	0xf000f802,
+	0x07f10d97,
+	0x03f04afc,
 	0x0009d002,
 	0x21f504bd,
-	0xe7f00221,
-	0x4721f503,
-	0xfca7f102,
-	0x02a3f046,
-	0x0400aba0,
-	0xf040a0d0,
-	0xbcd001c7,
-	0x1521f500,
-	0x010c9202,
-	0xf000acd0,
-	0xbcd002c7,
-	0x1521f500,
-	0x3421f502,
-	0x8087f102,
-	0x0684b608,
-	0xb70089cf,
-	0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+	0x00f8025e,
+/* 0x0294: strand_set */
+	0xf10fc7f0,
+	0xf04ffc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f10bc7,
+	0x03f04afc,
+	0x000cd002,
+	0x07f104bd,
+	0x03f04ffc,
+	0x000ed002,
+	0xc7f004bd,
+	0xfc07f10a,
+	0x0203f04a,
+	0xbd000cd0,
+	0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+	0xbd00f802,
+	0x0399f094,
+	0x370007f1,
+	0xd00203f0,
+	0x04bd0009,
+	0x026a21f5,
+	0xf503e7f0,
+	0xbd029421,
+	0xfc07f1c4,
+	0x0203f047,
+	0xbd000cd0,
+	0x01c7f004,
+	0x4afc07f1,
+	0xd00203f0,
+	0x04bd000c,
+	0x025e21f5,
+	0xf1010c92,
+	0xf046fc07,
+	0x0cd00203,
+	0xf004bd00,
+	0x07f102c7,
+	0x03f04afc,
+	0x000cd002,
+	0x21f504bd,
+	0x21f5025e,
+	0x87f1027f,
+	0x83f04200,
+	0x0097f102,
+	0x0293f020,
+	0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
 	0x8ed008fe,
 	0x408ed000,
 	0xb6808acf,
@@ -428,7 +458,7 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
 	0x07f100f8,
 	0x03f00500,
 	0x000fd002,
@@ -436,82 +466,117 @@
 	0x0007f101,
 	0x0303f007,
 	0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
 	0xbd00f804,
-	0x0004fe04,
-	0xf10007fe,
-	0xf0120017,
-	0x12d00227,
-	0xb117f100,
-	0x0010fe05,
-	0x040017f1,
-	0xf1c010d0,
-	0xb6040437,
-	0x27f10634,
-	0x32d02003,
-	0x0427f100,
-	0x0132d020,
+	0x0007fe04,
+	0x420017f1,
+	0xcf0013f0,
+	0x11e70011,
+	0x14b60109,
+	0x0014fe08,
+	0xf10227f0,
+	0xf0120007,
+	0x02d00003,
+	0xf104bd00,
+	0xfe06c817,
+	0x24bd0010,
+	0x070007f1,
+	0xd00003f0,
+	0x04bd0002,
+	0x200327f1,
+	0x010007f1,
+	0xd00103f0,
+	0x04bd0002,
+	0x200427f1,
+	0x010407f1,
+	0xd00103f0,
+	0x04bd0002,
 	0x200b27f1,
-	0xf10232d0,
-	0xd0200c27,
-	0x27f10732,
-	0x24b60c24,
-	0x0003b906,
-	0xf10023d0,
+	0x010807f1,
+	0xd00103f0,
+	0x04bd0002,
+	0x200c27f1,
+	0x011c07f1,
+	0xd00103f0,
+	0x04bd0002,
+	0xf1010392,
+	0xf0090007,
+	0x03d00303,
+	0xf104bd00,
 	0xf0870427,
-	0x12d00023,
-	0x0012b700,
-	0x0427f001,
-	0xf40012d0,
-	0xe7f11031,
-	0xe3f09604,
-	0x6821f440,
-	0x8090f1c7,
-	0xf4f00301,
-	0x020f801f,
-	0xbb0117f0,
-	0x12b6041f,
-	0x0c27f101,
-	0x0624b604,
-	0xd00021d0,
-	0x17f14021,
-	0x0e980100,
-	0x010f9800,
-	0x014721f5,
-	0x070037f1,
-	0x950634b6,
-	0x34d00814,
-	0x4034d000,
-	0x130030b7,
-	0xb6001fbb,
-	0x3fd002f5,
-	0x0815b600,
-	0xb60110b6,
-	0x1fb90814,
-	0x7121f502,
-	0x001fbb02,
-	0xf1020398,
-	0xf0200047,
-/* 0x03f6: init_gpc */
-	0x4ea05043,
-	0x1fb90804,
-	0x8d21f402,
-	0x010c4ea0,
-	0x21f4f4bd,
-	0x044ea08d,
-	0x8d21f401,
-	0x01004ea0,
-	0xf402f7f0,
-	0x4ea08d21,
-/* 0x041e: init_gpc_wait */
-	0x21f40800,
-	0x1fffc868,
-	0xa0fa0bf4,
-	0xf408044e,
-	0x1fbb6821,
-	0x0040b700,
-	0x0132b680,
-	0xf1be1bf4,
+	0x07f10023,
+	0x03f00400,
+	0x0002d000,
+	0x27f004bd,
+	0x0007f104,
+	0x0003f003,
+	0xbd0002d0,
+	0x1031f404,
+	0x9604e7f1,
+	0xf440e3f0,
+	0xfeb96821,
+	0x90f1c702,
+	0xf0030180,
+	0x0f801ff4,
+	0x0117f002,
+	0xb6041fbb,
+	0x07f10112,
+	0x03f00300,
+	0x0001d001,
+	0x07f104bd,
+	0x03f00400,
+	0x0001d001,
+	0x17f104bd,
+	0xf7f00100,
+	0x7f21f502,
+	0x9121f507,
+	0x10f7f007,
+	0x07de21f5,
+	0x98000e98,
+	0x21f5010f,
+	0x14950150,
+	0x0007f108,
+	0x0103f0c0,
+	0xbd0004d0,
+	0x0007f104,
+	0x0103f0c1,
+	0xbd0004d0,
+	0x0030b704,
+	0x001fbb13,
+	0xf102f5b6,
+	0xf0d30007,
+	0x0fd00103,
+	0xb604bd00,
+	0x10b60815,
+	0x0814b601,
+	0xf5021fb9,
+	0xbb02d321,
+	0x0398001f,
+	0x0047f102,
+	0x5043f020,
+/* 0x04f4: init_gpc */
+	0x08044ea0,
+	0xf4021fb9,
+	0x4ea09d21,
+	0xf4bd010c,
+	0xa09d21f4,
+	0xf401044e,
+	0x4ea09d21,
+	0xf7f00100,
+	0x9d21f402,
+	0x08004ea0,
+/* 0x051c: init_gpc_wait */
+	0xc86821f4,
+	0x0bf41fff,
+	0x044ea0fa,
+	0x6821f408,
+	0xb7001fbb,
+	0xb6800040,
+	0x1bf40132,
+	0x00f7f0be,
+	0x07de21f5,
+	0xf500f7f0,
+	0xf1077f21,
 	0xf0010007,
 	0x01d00203,
 	0xbd04bd00,
@@ -519,382 +584,379 @@
 	0x300007f1,
 	0xd00203f0,
 	0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
 	0xf40031f4,
 	0xd7f00028,
 	0x3921f410,
 	0xb1f401f4,
 	0xf54001e4,
-	0xbd00de1b,
+	0xbd00e91b,
 	0x0499f094,
 	0x370007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0x0b0017f1,
-	0xcf0614b6,
-	0x11cf4012,
-	0x1f13c800,
-	0x00870bf5,
+	0xc00017f1,
+	0xcf0213f0,
+	0x27f10011,
+	0x23f0c100,
+	0x0022cf02,
+	0xf51f13c8,
+	0xc800890b,
+	0x0bf41f23,
+	0xb920f962,
+	0x94bd0212,
+	0xf10799f0,
+	0xf0370007,
+	0x09d00203,
+	0xf404bd00,
+	0x31f40132,
+	0xaa21f502,
+	0xf094bd09,
+	0x07f10799,
+	0x03f01700,
+	0x0009d002,
+	0x20fc04bd,
+	0x99f094bd,
+	0x0007f106,
+	0x0203f037,
+	0xbd0009d0,
+	0x0131f404,
+	0x09aa21f5,
+	0x99f094bd,
+	0x0007f106,
+	0x0203f017,
+	0xbd0009d0,
+	0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+	0x12b920f9,
+	0x0132f402,
+	0xf50232f4,
+	0xfc09aa21,
+	0x0007f120,
+	0x0203f0c0,
+	0xbd0002d0,
+	0x130ef404,
+/* 0x062c: chsw_no_prev */
 	0xf41f23c8,
-	0x20f9620b,
-	0xbd0212b9,
-	0x0799f094,
-	0x370007f1,
+	0x31f40d0b,
+	0x0232f401,
+	0x09aa21f5,
+/* 0x063c: chsw_done */
+	0xf10127f0,
+	0xf0c30007,
+	0x02d00203,
+	0xbd04bd00,
+	0x0499f094,
+	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0xf40132f4,
-	0x21f50231,
-	0x94bd0801,
+	0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+	0xf401e4b0,
+	0xf2b90d1b,
+	0x4221f502,
+	0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+	0xf402e4b0,
+	0x94bd321b,
 	0xf10799f0,
-	0xf0170007,
+	0xf0370007,
 	0x09d00203,
-	0xfc04bd00,
-	0xf094bd20,
-	0x07f10699,
-	0x03f03700,
-	0x0009d002,
-	0x31f404bd,
-	0x0121f501,
-	0xf094bd08,
-	0x07f10699,
+	0xf404bd00,
+	0x32f40132,
+	0xaa21f502,
+	0xf094bd09,
+	0x07f10799,
 	0x03f01700,
 	0x0009d002,
 	0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
-	0xb920f931,
-	0x32f40212,
-	0x0232f401,
-	0x080121f5,
-	0x17f120fc,
-	0x14b60b00,
-	0x0012d006,
-/* 0x0517: chsw_no_prev */
-	0xc8130ef4,
-	0x0bf41f23,
-	0x0131f40d,
-	0xf50232f4,
-/* 0x0527: chsw_done */
-	0xf1080121,
-	0xb60b0c17,
-	0x27f00614,
-	0x0012d001,
-	0x99f094bd,
-	0x0007f104,
-	0x0203f017,
-	0xbd0009d0,
-	0x130ef504,
-/* 0x0549: main_not_ctx_switch */
-	0x01e4b0ff,
-	0xb90d1bf4,
-	0x21f502f2,
-	0x0ef40795,
-/* 0x0559: main_not_ctx_chan */
-	0x02e4b046,
-	0xbd321bf4,
-	0x0799f094,
-	0x370007f1,
+/* 0x06a5: main_not_ctx_save */
+	0x10ef9411,
+	0xf501f5f0,
+	0xf5037e21,
+/* 0x06b3: main_done */
+	0xbdfeb50e,
+	0x1f29f024,
+	0x300007f1,
 	0xd00203f0,
-	0x04bd0009,
-	0xf40132f4,
-	0x21f50232,
-	0x94bd0801,
-	0xf10799f0,
-	0xf0170007,
-	0x09d00203,
-	0xf404bd00,
-/* 0x058e: main_not_ctx_save */
-	0xef94110e,
-	0x01f5f010,
-	0x02fe21f5,
-	0xfec00ef5,
-/* 0x059c: main_done */
-	0x29f024bd,
-	0x0007f11f,
-	0x0203f030,
-	0xbd0002d0,
-	0xab0ef504,
-/* 0x05b1: ih */
-	0xfe80f9fe,
-	0x80f90188,
-	0xa0f990f9,
-	0xd0f9b0f9,
-	0xf0f9e0f9,
-	0x0acf04bd,
-	0x04abc480,
-	0xf11d0bf4,
-	0xf01900b7,
-	0xbecf10d7,
-	0x00bfcf40,
+	0x04bd0002,
+	0xfea00ef5,
+/* 0x06c8: ih */
+	0x88fe80f9,
+	0xf980f901,
+	0xf9a0f990,
+	0xf9d0f9b0,
+	0xbdf0f9e0,
+	0x00a7f104,
+	0x00a3f002,
+	0xc400aacf,
+	0x0bf404ab,
+	0x10d7f030,
+	0x1a00e7f1,
+	0xcf00e3f0,
+	0xf7f100ee,
+	0xf3f01900,
+	0x00ffcf00,
 	0xb70421f4,
 	0xf00400b0,
-	0xbed001e7,
-/* 0x05e9: ih_no_fifo */
-	0x00abe400,
-	0x0d0bf401,
-	0xf110d7f0,
-	0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
-	0xb7f10421,
-	0xb0bd0104,
-	0xf4b4abff,
-	0xa7f10d0b,
-	0xa4b60c1c,
-	0x00abd006,
-/* 0x0610: ih_no_other */
-	0xfc400ad0,
+	0x07f101e7,
+	0x03f01d00,
+	0x000ed000,
+/* 0x071a: ih_no_fifo */
+	0xabe404bd,
+	0x0bf40100,
+	0x10d7f00d,
+	0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+	0xe40421f4,
+	0xf40400ab,
+	0xb7f1140b,
+	0xbfb90100,
+	0x44e7f102,
+	0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+	0xf19d21f4,
+	0xbd0104b7,
+	0xb4abffb0,
+	0xf10f0bf4,
+	0xf0070007,
+	0x0bd00303,
+/* 0x075b: ih_no_other */
+	0xf104bd00,
+	0xf0010007,
+	0x0ad00003,
+	0xfc04bd00,
 	0xfce0fcf0,
 	0xfcb0fcd0,
 	0xfc90fca0,
 	0x0088fe80,
 	0x32f480fc,
-/* 0x062b: ctx_4170s */
-	0xf101f800,
-	0xf04170e7,
-	0xf5f040e3,
-	0x8d21f410,
-/* 0x063a: ctx_4170w */
+/* 0x077f: ctx_4170s */
+	0xf001f800,
+	0xffb910f5,
+	0x70e7f102,
+	0x40e3f041,
+	0xf89d21f4,
+/* 0x0791: ctx_4170w */
+	0x70e7f100,
+	0x40e3f041,
+	0xb96821f4,
+	0xf4f002ff,
+	0xf01bf410,
+/* 0x07a6: ctx_redswitch */
 	0xe7f100f8,
-	0xe3f04170,
-	0x6821f440,
-	0xf410f4f0,
+	0xe5f00200,
+	0x20e5f040,
+	0xf110e5f0,
+	0xf0850007,
+	0x0ed00103,
+	0xf004bd00,
+/* 0x07c2: ctx_redswitch_delay */
+	0xf2b608f7,
+	0xfd1bf401,
+	0x0400e5f1,
+	0x0100e5f1,
+	0x850007f1,
+	0xd00103f0,
+	0x04bd000e,
+/* 0x07de: ctx_86c */
+	0x07f100f8,
+	0x03f02300,
+	0x000fd002,
+	0xffb904bd,
+	0x14e7f102,
+	0x40e3f08a,
+	0xb99d21f4,
+	0xe7f102ff,
+	0xe3f0a88c,
+	0x9d21f441,
+/* 0x0806: ctx_mem */
+	0x07f100f8,
+	0x03f08400,
+	0x000fd002,
+/* 0x0812: ctx_mem_wait */
+	0xf7f104bd,
+	0xf3f08400,
+	0x00ffcf02,
+	0xf405fffd,
 	0x00f8f31b,
-/* 0x064c: ctx_redswitch */
-	0x0614e7f1,
-	0xf106e4b6,
-	0xd00270f7,
-	0xf7f000ef,
-/* 0x065d: ctx_redswitch_delay */
-	0x01f2b608,
-	0xf1fd1bf4,
-	0xd00770f7,
-	0x00f800ef,
-/* 0x066c: ctx_86c */
-	0x086ce7f1,
-	0xd006e4b6,
-	0xe7f100ef,
-	0xe3f08a14,
-	0x8d21f440,
-	0xa86ce7f1,
-	0xf441e3f0,
-	0x00f88d21,
-/* 0x068c: ctx_load */
+/* 0x0824: ctx_load */
 	0x99f094bd,
 	0x0007f105,
 	0x0203f037,
 	0xbd0009d0,
 	0x0ca7f004,
-	0xf1c921f4,
-	0xb60a2417,
-	0x10d00614,
-	0x0037f100,
-	0x0634b60b,
-	0xf14032d0,
-	0xb60a0c17,
-	0x47f00614,
-	0x0012d007,
-/* 0x06c7: ctx_chan_wait_0 */
-	0xcf4014d0,
-	0x44f04014,
-	0xfa1bf41f,
-	0xfe0032d0,
-	0x2af0000b,
-	0x0424b61f,
-	0xbd0220b6,
+	0xbdd021f4,
+	0x0007f1f4,
+	0x0203f089,
+	0xbd000fd0,
+	0x0007f104,
+	0x0203f0c1,
+	0xbd0002d0,
+	0x0007f104,
+	0x0203f083,
+	0xbd0002d0,
+	0x07f7f004,
+	0x080621f5,
+	0xc00007f1,
+	0xd00203f0,
+	0x04bd0002,
+	0xf0000bfe,
+	0x24b61f2a,
+	0x0220b604,
+	0x99f094bd,
+	0x0007f108,
+	0x0203f037,
+	0xbd0009d0,
+	0x0007f104,
+	0x0203f081,
+	0xbd0002d0,
+	0x0027f104,
+	0x0023f100,
+	0x0225f080,
+	0x880007f1,
+	0xd00203f0,
+	0x04bd0002,
+	0xf11017f0,
+	0xf0020027,
+	0x12fa0223,
+	0xbd03f805,
 	0x0899f094,
-	0x370007f1,
+	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0x0a0417f1,
-	0xd00614b6,
-	0x17f10012,
-	0x14b60a20,
-	0x0227f006,
-	0x800023f1,
-	0xf00012d0,
-	0x27f11017,
-	0x23f00200,
-	0x0512fa02,
+	0xb6810198,
+	0x02981814,
+	0x0825b680,
+	0x800512fd,
+	0x94bd1601,
+	0xf10999f0,
+	0xf0370007,
+	0x09d00203,
+	0xf104bd00,
+	0xf0810007,
+	0x01d00203,
+	0xf004bd00,
+	0x07f10127,
+	0x03f08800,
+	0x0002d002,
+	0x17f104bd,
+	0x13f00100,
+	0x0501fa06,
 	0x94bd03f8,
-	0xf10899f0,
+	0xf10999f0,
 	0xf0170007,
 	0x09d00203,
-	0x9804bd00,
-	0x14b68101,
-	0x80029818,
-	0xfd0825b6,
-	0x01800512,
-	0xf094bd16,
-	0x07f10999,
-	0x03f03700,
-	0x0009d002,
-	0x27f104bd,
-	0x24b60a04,
-	0x0021d006,
-	0xf10127f0,
-	0xb60a2017,
-	0x12d00614,
-	0x0017f100,
-	0x0613f001,
-	0xf80501fa,
-	0xf094bd03,
-	0x07f10999,
-	0x03f01700,
-	0x0009d002,
-	0x94bd04bd,
-	0xf10599f0,
-	0xf0170007,
-	0x09d00203,
-	0xf804bd00,
-/* 0x0795: ctx_chan */
-	0x8c21f500,
-	0x0ca7f006,
-	0xf1c921f4,
-	0xb60a1017,
-	0x27f00614,
-	0x0012d005,
-/* 0x07ac: ctx_chan_wait */
-	0xfd0012cf,
-	0x1bf40522,
-/* 0x07b7: ctx_mmio_exec */
-	0x9800f8fa,
-	0x27f14103,
-	0x24b60a04,
-	0x0023d006,
-/* 0x07c6: ctx_mmio_loop */
+	0xbd04bd00,
+	0x0599f094,
+	0x170007f1,
+	0xd00203f0,
+	0x04bd0009,
+/* 0x0942: ctx_chan */
+	0x21f500f8,
+	0xa7f00824,
+	0xd021f40c,
+	0xf505f7f0,
+	0xf8080621,
+/* 0x0955: ctx_mmio_exec */
+	0x41039800,
+	0x810007f1,
+	0xd00203f0,
+	0x04bd0003,
+/* 0x0966: ctx_mmio_loop */
 	0x34c434bd,
 	0x0f1bf4ff,
 	0x020057f1,
 	0xfa0653f0,
 	0x03f80535,
-/* 0x07d8: ctx_mmio_pull */
+/* 0x0978: ctx_mmio_pull */
 	0x98804e98,
 	0x21f4814f,
-	0x0830b68d,
+	0x0830b69d,
 	0xf40112b6,
-/* 0x07ea: ctx_mmio_done */
+/* 0x098a: ctx_mmio_done */
 	0x0398df1b,
-	0x0023d016,
-	0xf1400080,
-	0xf0010017,
-	0x01fa0613,
-	0xf803f806,
-/* 0x0801: ctx_xfer */
-	0x00f7f100,
-	0x06f4b60c,
-	0xd004e7f0,
-/* 0x080e: ctx_xfer_idle */
-	0xfecf80fe,
-	0x00e4f100,
-	0xf91bf420,
-	0xf40611f4,
-/* 0x081e: ctx_xfer_pre */
-	0xf7f00d02,
-	0x6c21f510,
-	0x1c11f406,
-/* 0x0828: ctx_xfer_pre_load */
-	0xf502f7f0,
-	0xf5062b21,
-	0xf5063a21,
-	0xbd064c21,
-	0x2b21f5f4,
-	0x8c21f506,
-/* 0x0841: ctx_xfer_exec */
-	0x16019806,
-	0x041427f1,
-	0xd00624b6,
-	0xe7f10020,
-	0xe3f0a500,
-	0x021fb941,
-	0xb68d21f4,
-	0xfcf004e0,
-	0x022cf001,
-	0xfd0124b6,
-	0x21f405f2,
-	0xfc17f18d,
-	0x0213f04a,
-	0xd00c27f0,
-	0x21f50012,
-	0x27f10215,
-	0x23f047fc,
-	0x0020d002,
+	0x0007f116,
+	0x0203f081,
+	0xbd0003d0,
+	0x40008004,
+	0x010017f1,
+	0xfa0613f0,
+	0x03f80601,
+/* 0x09aa: ctx_xfer */
+	0xe7f000f8,
+	0x0007f104,
+	0x0303f002,
+	0xbd000ed0,
+/* 0x09b9: ctx_xfer_idle */
+	0x00e7f104,
+	0x03e3f000,
+	0xf100eecf,
+	0xf42000e4,
+	0x11f4f21b,
+	0x0d02f406,
+/* 0x09d0: ctx_xfer_pre */
+	0xf510f7f0,
+	0xf407de21,
+/* 0x09da: ctx_xfer_pre_load */
+	0xf7f01c11,
+	0x7f21f502,
+	0x9121f507,
+	0xa621f507,
+	0xf5f4bd07,
+	0xf5077f21,
+/* 0x09f3: ctx_xfer_exec */
+	0x98082421,
+	0x24bd1601,
+	0x050007f1,
+	0xd00103f0,
+	0x04bd0002,
+	0xf1021fb9,
+	0xf0a500e7,
+	0x21f441e3,
+	0x01fcf09d,
+	0xb6022cf0,
+	0xf2fd0124,
+	0x02ffb905,
+	0xa504e7f1,
+	0xf441e3f0,
+	0x21f59d21,
+	0x24bd026a,
+	0x47fc07f1,
+	0xd00203f0,
+	0x04bd0002,
 	0xb6012cf0,
-	0x12d00320,
-	0x01acf000,
-	0xf006a5f0,
-	0x0c9800b7,
-	0x010d9800,
-	0xf500e7f0,
-	0xf0016621,
-	0x21f508a7,
-	0x21f50109,
-	0x01f40215,
-	0x0ca7f022,
-	0xf1c921f4,
-	0xb60a1017,
-	0x27f00614,
-	0x0012d005,
-/* 0x08c8: ctx_xfer_post_save_wait */
-	0xfd0012cf,
-	0x1bf40522,
-	0x2e02f4fa,
-/* 0x08d4: ctx_xfer_post */
-	0xf502f7f0,
-	0xbd062b21,
-	0x6c21f5f4,
-	0x3421f506,
-	0x3a21f502,
-	0xf5f4bd06,
-	0xf4062b21,
-	0x01981011,
-	0x0511fd40,
-	0xf5070bf4,
-/* 0x08ff: ctx_xfer_no_post_mmio */
-/* 0x08ff: ctx_xfer_done */
-	0xf807b721,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
+	0x07f10320,
+	0x03f04afc,
+	0x0002d002,
+	0xacf004bd,
+	0x06a5f001,
+	0x9800b7f0,
+	0x0d98000c,
+	0x00e7f001,
+	0x016f21f5,
+	0xf508a7f0,
+	0xf5011021,
+	0xf4025e21,
+	0xa7f01301,
+	0xd021f40c,
+	0xf505f7f0,
+	0xf4080621,
+/* 0x0a82: ctx_xfer_post */
+	0xf7f02e02,
+	0x7f21f502,
+	0xf5f4bd07,
+	0xf507de21,
+	0xf5027f21,
+	0xbd079121,
+	0x7f21f5f4,
+	0x1011f407,
+	0xfd400198,
+	0x0bf40511,
+	0x5521f507,
+/* 0x0aad: ctx_xfer_no_post_mmio */
+/* 0x0aad: ctx_xfer_done */
+	0x0000f809,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
index 33a5a82..6ffe283 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
@@ -28,28 +28,135 @@
 #define GF117 0xd7
 #define GK100 0xe0
 #define GK110 0xf0
+#define GK208 0x108
 
+#define NV_PGRAPH_FECS_INTR_ACK                                        0x409004
+#define NV_PGRAPH_FECS_INTR                                            0x409008
+#define NV_PGRAPH_FECS_INTR_FWMTHD                                   0x00000400
+#define NV_PGRAPH_FECS_INTR_CHSW                                     0x00000100
+#define NV_PGRAPH_FECS_INTR_FIFO                                     0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE                                       0x40900c
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO                                0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL                          0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO_EDGE                           0x00000000
+#define NV_PGRAPH_FECS_INTR_EN_SET                                     0x409010
+#define NV_PGRAPH_FECS_INTR_EN_SET_FIFO                              0x00000004
+#define NV_PGRAPH_FECS_INTR_ROUTE                                      0x40901c
+#define NV_PGRAPH_FECS_ACCESS                                          0x409048
+#define NV_PGRAPH_FECS_ACCESS_FIFO                                   0x00000002
+#define NV_PGRAPH_FECS_FIFO_DATA                                       0x409064
+#define NV_PGRAPH_FECS_FIFO_CMD                                        0x409068
+#define NV_PGRAPH_FECS_FIFO_ACK                                        0x409074
+#define NV_PGRAPH_FECS_CAPS                                            0x409108
 #define NV_PGRAPH_FECS_SIGNAL                                          0x409400
+#define NV_PGRAPH_FECS_IROUTE                                          0x409404
+#define NV_PGRAPH_FECS_BAR_MASK0                                       0x40940c
+#define NV_PGRAPH_FECS_BAR_MASK1                                       0x409410
+#define NV_PGRAPH_FECS_BAR                                             0x409414
+#define NV_PGRAPH_FECS_BAR_SET                                         0x409418
+#define NV_PGRAPH_FECS_RED_SWITCH                                      0x409614
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP                         0x00000400
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC                         0x00000200
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN                        0x00000100
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP                          0x00000040
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC                          0x00000020
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN                         0x00000010
+#define NV_PGRAPH_FECS_RED_SWITCH_PAUSE_GPC                          0x00000002
+#define NV_PGRAPH_FECS_RED_SWITCH_PAUSE_MAIN                         0x00000001
+#define NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE                               0x409700
+#define NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE                               0x409704
+#define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT                                0x40974c
+#define NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE                               0x409700
+#define NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE                               0x409704
+#define NV_PGRAPH_FECS_MMCTX_BASE                                      0x409710
+#define NV_PGRAPH_FECS_MMCTX_CTRL                                      0x409714
+#define NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE                              0x409718
+#define NV_PGRAPH_FECS_MMCTX_MULTI_MASK                                0x40971c
+#define NV_PGRAPH_FECS_MMCTX_QUEUE                                     0x409720
+#define NV_PGRAPH_FECS_MMIO_CTRL                                       0x409728
+#define NV_PGRAPH_FECS_MMIO_RDVAL                                      0x40972c
+#define NV_PGRAPH_FECS_MMIO_WRVAL                                      0x409730
+#define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT                                0x40974c
 #if CHIPSET < GK110
 #define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n)                    ((n) * 4 + 0x409800)
 #define NV_PGRAPH_FECS_CC_SCRATCH_SET(n)                    ((n) * 4 + 0x409820)
 #define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n)                    ((n) * 4 + 0x409840)
+#define NV_PGRAPH_FECS_UNK86C                                          0x40986c
 #else
 #define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n)                    ((n) * 4 + 0x409800)
 #define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n)                    ((n) * 4 + 0x409840)
+#define NV_PGRAPH_FECS_UNK86C                                          0x40988c
 #define NV_PGRAPH_FECS_CC_SCRATCH_SET(n)                    ((n) * 4 + 0x4098c0)
 #endif
+#define NV_PGRAPH_FECS_STRANDS_CNT                                     0x409880
+#define NV_PGRAPH_FECS_STRAND_SAVE_SWBASE                              0x409908
+#define NV_PGRAPH_FECS_STRAND_LOAD_SWBASE                              0x40990c
+#define NV_PGRAPH_FECS_STRAND_WORDS                                    0x409910
+#define NV_PGRAPH_FECS_STRAND_DATA                                     0x409918
+#define NV_PGRAPH_FECS_STRAND_SELECT                                   0x40991c
+#define NV_PGRAPH_FECS_STRAND_CMD                                      0x409928
+#define NV_PGRAPH_FECS_STRAND_CMD_SEEK                               0x00000001
+#define NV_PGRAPH_FECS_STRAND_CMD_GET_INFO                           0x00000002
+#define NV_PGRAPH_FECS_STRAND_CMD_SAVE                               0x00000003
+#define NV_PGRAPH_FECS_STRAND_CMD_LOAD                               0x00000004
+#define NV_PGRAPH_FECS_STRAND_CMD_ACTIVATE_FILTER                    0x0000000a
+#define NV_PGRAPH_FECS_STRAND_CMD_DEACTIVATE_FILTER                  0x0000000b
+#define NV_PGRAPH_FECS_STRAND_CMD_ENABLE                             0x0000000c
+#define NV_PGRAPH_FECS_STRAND_CMD_DISABLE                            0x0000000d
+#define NV_PGRAPH_FECS_STRAND_FILTER                                   0x40993c
+#define NV_PGRAPH_FECS_MEM_BASE                                        0x409a04
+#define NV_PGRAPH_FECS_MEM_CHAN                                        0x409a0c
+#define NV_PGRAPH_FECS_MEM_CMD                                         0x409a10
+#define NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN                             0x00000007
+#define NV_PGRAPH_FECS_MEM_TARGET                                      0x409a20
+#define NV_PGRAPH_FECS_MEM_TARGET_UNK31                              0x80000000
+#define NV_PGRAPH_FECS_MEM_TARGET_AS                                 0x0000001f
+#define NV_PGRAPH_FECS_MEM_TARGET_AS_VM                              0x00000001
+#define NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM                            0x00000002
+#define NV_PGRAPH_FECS_CHAN_ADDR                                       0x409b00
+#define NV_PGRAPH_FECS_CHAN_NEXT                                       0x409b04
+#define NV_PGRAPH_FECS_CHSW                                            0x409b0c
+#define NV_PGRAPH_FECS_CHSW_ACK                                      0x00000001
 #define NV_PGRAPH_FECS_INTR_UP_SET                                     0x409c1c
+#define NV_PGRAPH_FECS_INTR_UP_EN                                      0x409c24
 
+#define NV_PGRAPH_GPCX_GPCCS_INTR_ACK                                  0x41a004
+#define NV_PGRAPH_GPCX_GPCCS_INTR                                      0x41a008
+#define NV_PGRAPH_GPCX_GPCCS_INTR_FIFO                               0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET                               0x41a010
+#define NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET_FIFO                        0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_INTR_ROUTE                                0x41a01c
+#define NV_PGRAPH_GPCX_GPCCS_ACCESS                                    0x41a048
+#define NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO                             0x00000002
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_DATA                                 0x41a064
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_CMD                                  0x41a068
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_ACK                                  0x41a074
+#define NV_PGRAPH_GPCX_GPCCS_UNITS                                     0x41a608
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH                                0x41a614
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11                        0x00000800
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE                       0x00000200
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_POWER                        0x00000020
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_PAUSE                        0x00000002
+#define NV_PGRAPH_GPCX_GPCCS_MYINDEX                                   0x41a618
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE                         0x41a700
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE                         0x41a704
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT                          0x41a74c
 #if CHIPSET < GK110
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n)              ((n) * 4 + 0x41a800)
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n)              ((n) * 4 + 0x41a820)
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n)              ((n) * 4 + 0x41a840)
+#define NV_PGRAPH_GPCX_GPCCS_UNK86C                                    0x41a86c
 #else
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n)              ((n) * 4 + 0x41a800)
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n)              ((n) * 4 + 0x41a840)
+#define NV_PGRAPH_GPCX_GPCCS_UNK86C                                    0x41a88c
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n)              ((n) * 4 + 0x41a8c0)
 #endif
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_SELECT                             0x41a91c
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD                                0x41a928
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE                         0x00000003
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_LOAD                         0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_MEM_BASE                                  0x41aa04
 
 #define mmctx_data(r,c) .b32 (((c - 1) << 26) | r)
 #define queue_init      .skip 72 // (2 * 4) + ((8 * 4) * 2)
@@ -65,24 +172,50 @@
 #define T_LCHAN   8
 #define T_LCTXH   9
 
-#define nv_mkmm(rv,r) /*
-*/	movw rv  ((r) & 0x0000fffc) /*
-*/	sethi rv ((r) & 0x00ff0000)
+#if CHIPSET < GK208
+#define imm32(reg,val) /*
+*/	movw reg  ((val) & 0x0000ffff) /*
+*/	sethi reg ((val) & 0xffff0000)
+#else
+#define imm32(reg,val) /*
+*/	mov reg (val)
+#endif
+
 #define nv_mkio(rv,r,i) /*
-*/	nv_mkmm(rv, (((r) & 0xffc) << 6) | ((i) << 2))
+*/	imm32(rv, (((r) & 0xffc) << 6) | ((i) << 2))
+
+#define hash #
+#define fn(a) a
+#if CHIPSET < GK208
+#define call(a) call fn(hash)a
+#else
+#define call(a) lcall fn(hash)a
+#endif
 
 #define nv_iord(rv,r,i) /*
 */	nv_mkio(rv,r,i) /*
 */	iord rv I[rv]
+
 #define nv_iowr(r,i,rv) /*
 */	nv_mkio($r0,r,i) /*
 */	iowr I[$r0] rv /*
 */	clear b32 $r0
 
+#define nv_rd32(reg,addr) /*
+*/	imm32($r14, addr) /*
+*/	call(nv_rd32) /*
+*/	mov b32 reg $r15
+
+#define nv_wr32(addr,reg) /*
+*/	mov b32 $r15 reg /*
+*/	imm32($r14, addr) /*
+*/	call(nv_wr32)
+
 #define trace_set(bit) /*
 */	clear b32 $r9 /*
 */	bset $r9 bit /*
 */	nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(7), 0, $r9)
+
 #define trace_clr(bit) /*
 */	clear b32 $r9 /*
 */	bset $r9 bit /*
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
new file mode 100644
index 0000000..e1af65e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv108_graph_sclass[] = {
+	{ 0x902d, &nouveau_object_ofuncs },
+	{ 0xa140, &nouveau_object_ofuncs },
+	{ 0xa197, &nouveau_object_ofuncs },
+	{ 0xa1c0, &nouveau_object_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static struct nvc0_graph_init
+nv108_graph_init_regs[] = {
+	{ 0x400080,   1, 0x04, 0x003083c2 },
+	{ 0x400088,   1, 0x04, 0x0001bfe7 },
+	{ 0x40008c,   1, 0x04, 0x00000000 },
+	{ 0x400090,   1, 0x04, 0x00000030 },
+	{ 0x40013c,   1, 0x04, 0x003901f7 },
+	{ 0x400140,   1, 0x04, 0x00000100 },
+	{ 0x400144,   1, 0x04, 0x00000000 },
+	{ 0x400148,   1, 0x04, 0x00000110 },
+	{ 0x400138,   1, 0x04, 0x00000000 },
+	{ 0x400130,   2, 0x04, 0x00000000 },
+	{ 0x400124,   1, 0x04, 0x00000002 },
+	{}
+};
+
+struct nvc0_graph_init
+nv108_graph_init_unk58xx[] = {
+	{ 0x405844,   1, 0x04, 0x00ffffff },
+	{ 0x405850,   1, 0x04, 0x00000000 },
+	{ 0x405900,   1, 0x04, 0x00000000 },
+	{ 0x405908,   1, 0x04, 0x00000000 },
+	{ 0x405928,   1, 0x04, 0x00000000 },
+	{ 0x40592c,   1, 0x04, 0x00000000 },
+	{}
+};
+
+static struct nvc0_graph_init
+nv108_graph_init_gpc[] = {
+	{ 0x418408,   1, 0x04, 0x00000000 },
+	{ 0x4184a0,   3, 0x04, 0x00000000 },
+	{ 0x418604,   1, 0x04, 0x00000000 },
+	{ 0x418680,   1, 0x04, 0x00000000 },
+	{ 0x418714,   1, 0x04, 0x00000000 },
+	{ 0x418384,   2, 0x04, 0x00000000 },
+	{ 0x418814,   3, 0x04, 0x00000000 },
+	{ 0x418b04,   1, 0x04, 0x00000000 },
+	{ 0x4188c8,   2, 0x04, 0x00000000 },
+	{ 0x4188d0,   1, 0x04, 0x00010000 },
+	{ 0x4188d4,   1, 0x04, 0x00000201 },
+	{ 0x418910,   1, 0x04, 0x00010001 },
+	{ 0x418914,   1, 0x04, 0x00000301 },
+	{ 0x418918,   1, 0x04, 0x00800000 },
+	{ 0x418980,   1, 0x04, 0x77777770 },
+	{ 0x418984,   3, 0x04, 0x77777777 },
+	{ 0x418c04,   1, 0x04, 0x00000000 },
+	{ 0x418c64,   2, 0x04, 0x00000000 },
+	{ 0x418c88,   1, 0x04, 0x00000000 },
+	{ 0x418cb4,   2, 0x04, 0x00000000 },
+	{ 0x418d00,   1, 0x04, 0x00000000 },
+	{ 0x418d28,   2, 0x04, 0x00000000 },
+	{ 0x418f00,   1, 0x04, 0x00000400 },
+	{ 0x418f08,   1, 0x04, 0x00000000 },
+	{ 0x418f20,   2, 0x04, 0x00000000 },
+	{ 0x418e00,   1, 0x04, 0x00000000 },
+	{ 0x418e08,   1, 0x04, 0x00000000 },
+	{ 0x418e1c,   2, 0x04, 0x00000000 },
+	{ 0x41900c,   1, 0x04, 0x00000000 },
+	{ 0x419018,   1, 0x04, 0x00000000 },
+	{}
+};
+
+static struct nvc0_graph_init
+nv108_graph_init_tpc[] = {
+	{ 0x419d0c,   1, 0x04, 0x00000000 },
+	{ 0x419d10,   1, 0x04, 0x00000014 },
+	{ 0x419ab0,   1, 0x04, 0x00000000 },
+	{ 0x419ac8,   1, 0x04, 0x00000000 },
+	{ 0x419ab8,   1, 0x04, 0x000000e7 },
+	{ 0x419abc,   2, 0x04, 0x00000000 },
+	{ 0x419ab4,   1, 0x04, 0x00000000 },
+	{ 0x419aa8,   2, 0x04, 0x00000000 },
+	{ 0x41980c,   1, 0x04, 0x00000010 },
+	{ 0x419844,   1, 0x04, 0x00000000 },
+	{ 0x419850,   1, 0x04, 0x00000004 },
+	{ 0x419854,   2, 0x04, 0x00000000 },
+	{ 0x419c98,   1, 0x04, 0x00000000 },
+	{ 0x419ca8,   1, 0x04, 0x00000000 },
+	{ 0x419cb0,   1, 0x04, 0x01000000 },
+	{ 0x419cb4,   1, 0x04, 0x00000000 },
+	{ 0x419cb8,   1, 0x04, 0x00b08bea },
+	{ 0x419c84,   1, 0x04, 0x00010384 },
+	{ 0x419cbc,   1, 0x04, 0x281b3646 },
+	{ 0x419cc0,   2, 0x04, 0x00000000 },
+	{ 0x419c80,   1, 0x04, 0x00000230 },
+	{ 0x419ccc,   2, 0x04, 0x00000000 },
+	{ 0x419c0c,   1, 0x04, 0x00000000 },
+	{ 0x419e00,   1, 0x04, 0x00000080 },
+	{ 0x419ea0,   1, 0x04, 0x00000000 },
+	{ 0x419ee4,   1, 0x04, 0x00000000 },
+	{ 0x419ea4,   1, 0x04, 0x00000100 },
+	{ 0x419ea8,   1, 0x04, 0x00000000 },
+	{ 0x419eb4,   1, 0x04, 0x00000000 },
+	{ 0x419ebc,   2, 0x04, 0x00000000 },
+	{ 0x419edc,   1, 0x04, 0x00000000 },
+	{ 0x419f00,   1, 0x04, 0x00000000 },
+	{ 0x419ed0,   1, 0x04, 0x00003234 },
+	{ 0x419f74,   1, 0x04, 0x00015555 },
+	{ 0x419f80,   4, 0x04, 0x00000000 },
+	{}
+};
+
+static int
+nv108_graph_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nvc0_graph_priv *priv = (void *)object;
+	static const struct {
+		u32 addr;
+		u32 data;
+	} magic[] = {
+		{ 0x020520, 0xfffffffc },
+		{ 0x020524, 0xfffffffe },
+		{ 0x020524, 0xfffffffc },
+		{ 0x020524, 0xfffffff8 },
+		{ 0x020524, 0xffffffe0 },
+		{ 0x020530, 0xfffffffe },
+		{ 0x02052c, 0xfffffffa },
+		{ 0x02052c, 0xfffffff0 },
+		{ 0x02052c, 0xffffffc0 },
+		{ 0x02052c, 0xffffff00 },
+		{ 0x02052c, 0xfffffc00 },
+		{ 0x02052c, 0xfffcfc00 },
+		{ 0x02052c, 0xfff0fc00 },
+		{ 0x02052c, 0xff80fc00 },
+		{ 0x020528, 0xfffffffe },
+		{ 0x020528, 0xfffffffc },
+	};
+	int i;
+
+	nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
+	nv_mask(priv, 0x0206b4, 0x00000000, 0x00000000);
+	for (i = 0; i < ARRAY_SIZE(magic); i++) {
+		nv_wr32(priv, magic[i].addr, magic[i].data);
+		nv_wait(priv, magic[i].addr, 0x80000000, 0x00000000);
+	}
+
+	return nouveau_graph_fini(&priv->base, suspend);
+}
+
+static struct nvc0_graph_init *
+nv108_graph_init_mmio[] = {
+	nv108_graph_init_regs,
+	nvf0_graph_init_unk40xx,
+	nvc0_graph_init_unk44xx,
+	nvc0_graph_init_unk78xx,
+	nvc0_graph_init_unk60xx,
+	nvd9_graph_init_unk64xx,
+	nv108_graph_init_unk58xx,
+	nvc0_graph_init_unk80xx,
+	nvf0_graph_init_unk70xx,
+	nvf0_graph_init_unk5bxx,
+	nv108_graph_init_gpc,
+	nv108_graph_init_tpc,
+	nve4_graph_init_unk,
+	nve4_graph_init_unk88xx,
+	NULL
+};
+
+#include "fuc/hubnv108.fuc5.h"
+
+static struct nvc0_graph_ucode
+nv108_graph_fecs_ucode = {
+	.code.data = nv108_grhub_code,
+	.code.size = sizeof(nv108_grhub_code),
+	.data.data = nv108_grhub_data,
+	.data.size = sizeof(nv108_grhub_data),
+};
+
+#include "fuc/gpcnv108.fuc5.h"
+
+static struct nvc0_graph_ucode
+nv108_graph_gpccs_ucode = {
+	.code.data = nv108_grgpc_code,
+	.code.size = sizeof(nv108_grgpc_code),
+	.data.data = nv108_grgpc_data,
+	.data.size = sizeof(nv108_grgpc_data),
+};
+
+struct nouveau_oclass *
+nv108_graph_oclass = &(struct nvc0_graph_oclass) {
+	.base.handle = NV_ENGINE(GR, 0x08),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_graph_ctor,
+		.dtor = nvc0_graph_dtor,
+		.init = nve4_graph_init,
+		.fini = nv108_graph_fini,
+	},
+	.cclass = &nv108_grctx_oclass,
+	.sclass =  nv108_graph_sclass,
+	.mmio = nv108_graph_init_mmio,
+	.fecs.ucode = &nv108_graph_fecs_ucode,
+	.gpccs.ucode = &nv108_graph_gpccs_ucode,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 03de517..7a367c4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -304,12 +304,28 @@
 	return timeout ? -EBUSY : 0;
 }
 
-static const struct nouveau_enum nv50_mp_exec_error_names[] = {
-	{ 3, "STACK_UNDERFLOW", NULL },
-	{ 4, "QUADON_ACTIVE", NULL },
-	{ 8, "TIMEOUT", NULL },
-	{ 0x10, "INVALID_OPCODE", NULL },
-	{ 0x40, "BREAKPOINT", NULL },
+static const struct nouveau_bitfield nv50_mp_exec_errors[] = {
+	{ 0x01, "STACK_UNDERFLOW" },
+	{ 0x02, "STACK_MISMATCH" },
+	{ 0x04, "QUADON_ACTIVE" },
+	{ 0x08, "TIMEOUT" },
+	{ 0x10, "INVALID_OPCODE" },
+	{ 0x20, "PM_OVERFLOW" },
+	{ 0x40, "BREAKPOINT" },
+	{}
+};
+
+static const struct nouveau_bitfield nv50_mpc_traps[] = {
+	{ 0x0000001, "LOCAL_LIMIT_READ" },
+	{ 0x0000010, "LOCAL_LIMIT_WRITE" },
+	{ 0x0000040, "STACK_LIMIT" },
+	{ 0x0000100, "GLOBAL_LIMIT_READ" },
+	{ 0x0001000, "GLOBAL_LIMIT_WRITE" },
+	{ 0x0010000, "MP0" },
+	{ 0x0020000, "MP1" },
+	{ 0x0040000, "GLOBAL_LIMIT_RED" },
+	{ 0x0400000, "GLOBAL_LIMIT_ATOM" },
+	{ 0x4000000, "MP2" },
 	{}
 };
 
@@ -396,6 +412,60 @@
 	{}
 };
 
+static const struct nouveau_bitfield nv50_graph_trap_prop[] = {
+	{ 0x00000004, "SURF_WIDTH_OVERRUN" },
+	{ 0x00000008, "SURF_HEIGHT_OVERRUN" },
+	{ 0x00000010, "DST2D_FAULT" },
+	{ 0x00000020, "ZETA_FAULT" },
+	{ 0x00000040, "RT_FAULT" },
+	{ 0x00000080, "CUDA_FAULT" },
+	{ 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
+	{ 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
+	{ 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
+	{ 0x00000800, "DST2D_LINEAR_MISMATCH" },
+	{ 0x00001000, "RT_LINEAR_MISMATCH" },
+	{}
+};
+
+static void
+nv50_priv_prop_trap(struct nv50_graph_priv *priv,
+		    u32 ustatus_addr, u32 ustatus, u32 tp)
+{
+	u32 e0c = nv_rd32(priv, ustatus_addr + 0x04);
+	u32 e10 = nv_rd32(priv, ustatus_addr + 0x08);
+	u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c);
+	u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
+	u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
+	u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
+	u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
+
+	/* CUDA memory: l[], g[] or stack. */
+	if (ustatus & 0x00000080) {
+		if (e18 & 0x80000000) {
+			/* g[] read fault? */
+			nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
+					 tp, e14, e10 | ((e18 >> 24) & 0x1f));
+			e18 &= ~0x1f000000;
+		} else if (e18 & 0xc) {
+			/* g[] write fault? */
+			nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
+				 tp, e14, e10 | ((e18 >> 7) & 0x1f));
+			e18 &= ~0x00000f80;
+		} else {
+			nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
+				 tp, e14, e10);
+		}
+		ustatus &= ~0x00000080;
+	}
+	if (ustatus) {
+		nv_error(priv, "TRAP_PROP - TP %d -", tp);
+		nouveau_bitfield_print(nv50_graph_trap_prop, ustatus);
+		pr_cont(" - Address %02x%08x\n", e14, e10);
+	}
+	nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+		 tp, e0c, e18, e1c, e20, e24);
+}
+
 static void
 nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
 {
@@ -420,8 +490,8 @@
 			oplow = nv_rd32(priv, addr + 0x70);
 			ophigh = nv_rd32(priv, addr + 0x74);
 			nv_error(priv, "TRAP_MP_EXEC - "
-					"TP %d MP %d: ", tpid, i);
-			nouveau_enum_print(nv50_mp_exec_error_names, status);
+					"TP %d MP %d:", tpid, i);
+			nouveau_bitfield_print(nv50_mp_exec_errors, status);
 			pr_cont(" at %06x warp %d, opcode %08x %08x\n",
 					pc&0xffffff, pc >> 24,
 					oplow, ophigh);
@@ -468,59 +538,18 @@
 				nv50_priv_mp_trap(priv, i, display);
 				ustatus &= ~0x04030000;
 			}
+			if (ustatus && display) {
+				nv_error(priv, "%s - TP%d:", name, i);
+				nouveau_bitfield_print(nv50_mpc_traps, ustatus);
+				pr_cont("\n");
+				ustatus = 0;
+			}
 			break;
-		case 8: /* TPDMA error */
-			{
-			u32 e0c = nv_rd32(priv, ustatus_addr + 4);
-			u32 e10 = nv_rd32(priv, ustatus_addr + 8);
-			u32 e14 = nv_rd32(priv, ustatus_addr + 0xc);
-			u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
-			u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
-			u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
-			u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
-			/* 2d engine destination */
-			if (ustatus & 0x00000010) {
-				if (display) {
-					nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
-							i, e14, e10);
-					nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000010;
-			}
-			/* Render target */
-			if (ustatus & 0x00000040) {
-				if (display) {
-					nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
-							i, e14, e10);
-					nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000040;
-			}
-			/* CUDA memory: l[], g[] or stack. */
-			if (ustatus & 0x00000080) {
-				if (display) {
-					if (e18 & 0x80000000) {
-						/* g[] read fault? */
-						nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
-								i, e14, e10 | ((e18 >> 24) & 0x1f));
-						e18 &= ~0x1f000000;
-					} else if (e18 & 0xc) {
-						/* g[] write fault? */
-						nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
-								i, e14, e10 | ((e18 >> 7) & 0x1f));
-						e18 &= ~0x00000f80;
-					} else {
-						nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
-								i, e14, e10);
-					}
-					nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000080;
-			}
-			}
+		case 8: /* PROP error */
+			if (display)
+				nv50_priv_prop_trap(
+						priv, ustatus_addr, ustatus, i);
+			ustatus = 0;
 			break;
 		}
 		if (ustatus) {
@@ -727,11 +756,11 @@
 		status &= ~0x080;
 	}
 
-	/* TPDMA:  Handles TP-initiated uncached memory accesses:
+	/* PROP:  Handles TP-initiated uncached memory accesses:
 	 * l[], g[], stack, 2d surfaces, render targets. */
 	if (status & 0x100) {
 		nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
-				    "TRAP_TPDMA");
+				    "TRAP_PROP");
 		nv_wr32(priv, 0x400108, 0x100);
 		status &= ~0x100;
 	}
@@ -760,7 +789,7 @@
 	u32 mthd = (addr & 0x00001ffc);
 	u32 data = nv_rd32(priv, 0x400708);
 	u32 class = nv_rd32(priv, 0x400814);
-	u32 show = stat;
+	u32 show = stat, show_bitfield = stat;
 	int chid;
 
 	engctx = nouveau_engctx_get(engine, inst);
@@ -778,21 +807,26 @@
 		nv_error(priv, "DATA_ERROR ");
 		nouveau_enum_print(nv50_data_error_names, ecode);
 		pr_cont("\n");
+		show_bitfield &= ~0x00100000;
 	}
 
 	if (stat & 0x00200000) {
 		if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12,
 				engctx))
 			show &= ~0x00200000;
+		show_bitfield &= ~0x00200000;
 	}
 
 	nv_wr32(priv, 0x400100, stat);
 	nv_wr32(priv, 0x400500, 0x00010001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nouveau_bitfield_print(nv50_graph_intr_name, show);
-		pr_cont("\n");
+		show &= show_bitfield;
+		if (show) {
+			nv_error(priv, "%s", "");
+			nouveau_bitfield_print(nv50_graph_intr_name, show);
+			pr_cont("\n");
+		}
 		nv_error(priv,
 			 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
 			 chid, (u64)inst << 12, nouveau_client_name(engctx),
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 5c8a63d..a73ab20 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -901,6 +901,9 @@
 		}
 
 		return 0;
+	} else
+	if (!oclass->fecs.ucode) {
+		return -ENOSYS;
 	}
 
 	/* load HUB microcode */
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index ea17a80..b0ab6de 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -205,6 +205,11 @@
 extern struct nvc0_graph_init nve4_graph_init_unk[];
 extern struct nvc0_graph_init nve4_graph_init_unk88xx[];
 
+extern struct nvc0_graph_init nvf0_graph_init_unk40xx[];
+extern struct nvc0_graph_init nvf0_graph_init_unk70xx[];
+extern struct nvc0_graph_init nvf0_graph_init_unk5bxx[];
+extern struct nvc0_graph_init nvf0_graph_init_tpc[];
+
 int  nvc0_grctx_generate(struct nvc0_graph_priv *);
 void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
 void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
@@ -266,6 +271,11 @@
 extern struct nvc0_graph_init nve4_grctx_init_unk90xx[];
 
 extern struct nouveau_oclass *nvf0_grctx_oclass;
+extern struct nvc0_graph_init nvf0_grctx_init_unk44xx[];
+extern struct nvc0_graph_init nvf0_grctx_init_unk5bxx[];
+extern struct nvc0_graph_init nvf0_grctx_init_unk60xx[];
+
+extern struct nouveau_oclass *nv108_grctx_oclass;
 
 #define mmio_data(s,a,p) do {                                                  \
 	info->buffer[info->buffer_nr] = round_up(info->addr, (a));             \
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
index 2f0ac78..b1acb99 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
@@ -41,7 +41,7 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_graph_init_unk40xx[] = {
 	{ 0x40415c,   1, 0x04, 0x00000000 },
 	{ 0x404170,   1, 0x04, 0x00000000 },
@@ -60,7 +60,7 @@
 	{}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_graph_init_unk70xx[] = {
 	{ 0x407010,   1, 0x04, 0x00000000 },
 	{ 0x407040,   1, 0x04, 0x80440424 },
@@ -68,7 +68,7 @@
 	{}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_graph_init_unk5bxx[] = {
 	{ 0x405b44,   1, 0x04, 0x00000000 },
 	{ 0x405b50,   1, 0x04, 0x00000000 },
@@ -114,7 +114,7 @@
 	{}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_graph_init_tpc[] = {
 	{ 0x419d0c,   1, 0x04, 0x00000000 },
 	{ 0x419d10,   1, 0x04, 0x00000014 },
@@ -243,6 +243,6 @@
 	.cclass = &nvf0_grctx_oclass,
 	.sclass =  nvf0_graph_sclass,
 	.mmio = nvf0_graph_init_mmio,
-	.fecs.ucode = 0 ? &nvf0_graph_fecs_ucode : NULL,
+	.fecs.ucode = &nvf0_graph_fecs_ucode,
 	.gpccs.ucode = &nvf0_graph_gpccs_ucode,
 }.base;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 560c359..e71a432 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -230,9 +230,26 @@
 
 #define NV04_DISP_CLASS                                              0x00000046
 
+#define NV04_DISP_MTHD                                               0x00000000
+#define NV04_DISP_MTHD_HEAD                                          0x00000001
+
+#define NV04_DISP_SCANOUTPOS                                         0x00000000
+
 struct nv04_display_class {
 };
 
+struct nv04_display_scanoutpos {
+	s64 time[2];
+	u32 vblanks;
+	u32 vblanke;
+	u32 vtotal;
+	u32 vline;
+	u32 hblanks;
+	u32 hblanke;
+	u32 htotal;
+	u32 hline;
+};
+
 /* 5070: NV50_DISP
  * 8270: NV84_DISP
  * 8370: NVA0_DISP
@@ -252,6 +269,11 @@
 #define NVE0_DISP_CLASS                                              0x00009170
 #define NVF0_DISP_CLASS                                              0x00009270
 
+#define NV50_DISP_MTHD                                               0x00000000
+#define NV50_DISP_MTHD_HEAD                                          0x00000003
+
+#define NV50_DISP_SCANOUTPOS                                         0x00000000
+
 #define NV50_DISP_SOR_MTHD                                           0x00010000
 #define NV50_DISP_SOR_MTHD_TYPE                                      0x0000f000
 #define NV50_DISP_SOR_MTHD_HEAD                                      0x00000018
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index ac2881d..7b8ea22 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -38,7 +38,8 @@
 	NVDEV_SUBDEV_THERM,
 	NVDEV_SUBDEV_CLOCK,
 
-	NVDEV_ENGINE_DMAOBJ,
+	NVDEV_ENGINE_FIRST,
+	NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
 	NVDEV_ENGINE_FIFO,
 	NVDEV_ENGINE_SW,
 	NVDEV_ENGINE_GR,
@@ -70,6 +71,7 @@
 	const char *dbgopt;
 	const char *name;
 	const char *cname;
+	u64 disable_mask;
 
 	enum {
 		NV_04    = 0x04,
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index 8c32cf4..26b6b2b 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -109,6 +109,7 @@
 extern struct nouveau_oclass *nv84_fifo_oclass;
 extern struct nouveau_oclass *nvc0_fifo_oclass;
 extern struct nouveau_oclass *nve0_fifo_oclass;
+extern struct nouveau_oclass *nv108_fifo_oclass;
 
 void nv04_fifo_intr(struct nouveau_subdev *);
 int  nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 8e1b523..9770561 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -69,6 +69,7 @@
 extern struct nouveau_oclass *nvd9_graph_oclass;
 extern struct nouveau_oclass *nve4_graph_oclass;
 extern struct nouveau_oclass *nvf0_graph_oclass;
+extern struct nouveau_oclass *nv108_graph_oclass;
 
 extern const struct nouveau_bitfield nv04_graph_nsource[];
 extern struct nouveau_ofuncs nv04_graph_ofuncs;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
index 4f4ff45..9faa98e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -4,8 +4,7 @@
 #include <core/subdev.h>
 #include <core/device.h>
 
-#include <subdev/fb.h>
-
+struct nouveau_mem;
 struct nouveau_vma;
 
 struct nouveau_bar {
@@ -29,27 +28,7 @@
 	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
 }
 
-#define nouveau_bar_create(p,e,o,d)                                            \
-	nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_bar_init(p)                                                    \
-	nouveau_subdev_init(&(p)->base)
-#define nouveau_bar_fini(p,s)                                                  \
-	nouveau_subdev_fini(&(p)->base, (s))
-
-int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
-			struct nouveau_oclass *, int, void **);
-void nouveau_bar_destroy(struct nouveau_bar *);
-
-void _nouveau_bar_dtor(struct nouveau_object *);
-#define _nouveau_bar_init _nouveau_subdev_init
-#define _nouveau_bar_fini _nouveau_subdev_fini
-
 extern struct nouveau_oclass nv50_bar_oclass;
 extern struct nouveau_oclass nvc0_bar_oclass;
 
-int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
-		      struct nouveau_mem *, struct nouveau_object **);
-
-void nv84_bar_flush(struct nouveau_bar *);
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
new file mode 100644
index 0000000..c5e6d1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
@@ -0,0 +1,66 @@
+#ifndef __NVBIOS_RAMCFG_H__
+#define __NVBIOS_RAMCFG_H__
+
+struct nouveau_bios;
+
+struct nvbios_ramcfg {
+	unsigned rammap_11_08_01:1;
+	unsigned rammap_11_08_0c:2;
+	unsigned rammap_11_08_10:1;
+	unsigned rammap_11_11_0c:2;
+
+	unsigned ramcfg_11_01_01:1;
+	unsigned ramcfg_11_01_02:1;
+	unsigned ramcfg_11_01_04:1;
+	unsigned ramcfg_11_01_08:1;
+	unsigned ramcfg_11_01_10:1;
+	unsigned ramcfg_11_01_20:1;
+	unsigned ramcfg_11_01_40:1;
+	unsigned ramcfg_11_01_80:1;
+	unsigned ramcfg_11_02_03:2;
+	unsigned ramcfg_11_02_04:1;
+	unsigned ramcfg_11_02_08:1;
+	unsigned ramcfg_11_02_10:1;
+	unsigned ramcfg_11_02_40:1;
+	unsigned ramcfg_11_02_80:1;
+	unsigned ramcfg_11_03_0f:4;
+	unsigned ramcfg_11_03_30:2;
+	unsigned ramcfg_11_03_c0:2;
+	unsigned ramcfg_11_03_f0:4;
+	unsigned ramcfg_11_04:8;
+	unsigned ramcfg_11_06:8;
+	unsigned ramcfg_11_07_02:1;
+	unsigned ramcfg_11_07_04:1;
+	unsigned ramcfg_11_07_08:1;
+	unsigned ramcfg_11_07_10:1;
+	unsigned ramcfg_11_07_40:1;
+	unsigned ramcfg_11_07_80:1;
+	unsigned ramcfg_11_08_01:1;
+	unsigned ramcfg_11_08_02:1;
+	unsigned ramcfg_11_08_04:1;
+	unsigned ramcfg_11_08_08:1;
+	unsigned ramcfg_11_08_10:1;
+	unsigned ramcfg_11_08_20:1;
+	unsigned ramcfg_11_09:8;
+
+	unsigned timing[11];
+	unsigned timing_20_2e_03:2;
+	unsigned timing_20_2e_30:2;
+	unsigned timing_20_2e_c0:2;
+	unsigned timing_20_2f_03:2;
+	unsigned timing_20_2c_003f:6;
+	unsigned timing_20_2c_1fc0:7;
+	unsigned timing_20_30_f8:5;
+	unsigned timing_20_30_07:3;
+	unsigned timing_20_31_0007:3;
+	unsigned timing_20_31_0078:4;
+	unsigned timing_20_31_0780:4;
+	unsigned timing_20_31_0800:1;
+	unsigned timing_20_31_7000:3;
+	unsigned timing_20_31_8000:1;
+};
+
+u8 nvbios_ramcfg_count(struct nouveau_bios *);
+u8 nvbios_ramcfg_index(struct nouveau_bios *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
index bc15e03..5bdf8e4 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
@@ -1,11 +1,25 @@
 #ifndef __NVBIOS_RAMMAP_H__
 #define __NVBIOS_RAMMAP_H__
 
-u16 nvbios_rammap_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
-			u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
-u16 nvbios_rammap_entry(struct nouveau_bios *, int idx,
-			u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_rammap_match(struct nouveau_bios *, u16 khz,
-			u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+struct nvbios_ramcfg;
+
+u32 nvbios_rammapTe(struct nouveau_bios *, u8 *ver, u8 *hdr,
+		    u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+
+u32 nvbios_rammapEe(struct nouveau_bios *, int idx,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEm(struct nouveau_bios *, u16 mhz,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEp(struct nouveau_bios *, u16 mhz,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		    struct nvbios_ramcfg *);
+
+u32 nvbios_rammapSe(struct nouveau_bios *, u32 data,
+		    u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+		    u8 *ver, u8 *hdr);
+u32 nvbios_rammapSp(struct nouveau_bios *, u32 data,
+		    u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+		    u8 *ver, u8 *hdr,
+		    struct nvbios_ramcfg *);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
index 963694b..76d914b 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
@@ -1,8 +1,14 @@
 #ifndef __NVBIOS_TIMING_H__
 #define __NVBIOS_TIMING_H__
 
-u16 nvbios_timing_table(struct nouveau_bios *,
-			u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_timing_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+struct nvbios_ramcfg;
+
+u16 nvbios_timingTe(struct nouveau_bios *,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+u16 nvbios_timingEe(struct nouveau_bios *, int idx,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_timingEp(struct nouveau_bios *, int idx,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		    struct nvbios_ramcfg *);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
index 685c9b1..ed1ac68 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -9,7 +9,6 @@
 	bool post;
 	void (*meminit)(struct nouveau_devinit *);
 	int  (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
-
 };
 
 static inline struct nouveau_devinit *
@@ -18,32 +17,16 @@
 	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT];
 }
 
-#define nouveau_devinit_create(p,e,o,d)                                        \
-	nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_devinit_destroy(p)                                             \
-	nouveau_subdev_destroy(&(p)->base)
-#define nouveau_devinit_init(p) ({                                             \
-	struct nouveau_devinit *d = (p);                                       \
-	_nouveau_devinit_init(nv_object(d));                                   \
-})
-#define nouveau_devinit_fini(p,s) ({                                           \
-	struct nouveau_devinit *d = (p);                                       \
-	_nouveau_devinit_fini(nv_object(d), (s));                              \
-})
-
-int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
-			    struct nouveau_oclass *, int, void **);
-#define _nouveau_devinit_dtor _nouveau_subdev_dtor
-int _nouveau_devinit_init(struct nouveau_object *);
-int _nouveau_devinit_fini(struct nouveau_object *, bool suspend);
-
-extern struct nouveau_oclass nv04_devinit_oclass;
-extern struct nouveau_oclass nv05_devinit_oclass;
-extern struct nouveau_oclass nv10_devinit_oclass;
-extern struct nouveau_oclass nv1a_devinit_oclass;
-extern struct nouveau_oclass nv20_devinit_oclass;
-extern struct nouveau_oclass nv50_devinit_oclass;
-extern struct nouveau_oclass nva3_devinit_oclass;
-extern struct nouveau_oclass nvc0_devinit_oclass;
+extern struct nouveau_oclass *nv04_devinit_oclass;
+extern struct nouveau_oclass *nv05_devinit_oclass;
+extern struct nouveau_oclass *nv10_devinit_oclass;
+extern struct nouveau_oclass *nv1a_devinit_oclass;
+extern struct nouveau_oclass *nv20_devinit_oclass;
+extern struct nouveau_oclass *nv50_devinit_oclass;
+extern struct nouveau_oclass *nv84_devinit_oclass;
+extern struct nouveau_oclass *nv98_devinit_oclass;
+extern struct nouveau_oclass *nva3_devinit_oclass;
+extern struct nouveau_oclass *nvaf_devinit_oclass;
+extern struct nouveau_oclass *nvc0_devinit_oclass;
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index d89dbdf..d7ecafb 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -106,6 +106,13 @@
 extern struct nouveau_oclass *nvc0_fb_oclass;
 extern struct nouveau_oclass *nve0_fb_oclass;
 
+#include <subdev/bios/ramcfg.h>
+
+struct nouveau_ram_data {
+	struct nvbios_ramcfg bios;
+	u32 freq;
+};
+
 struct nouveau_ram {
 	struct nouveau_object base;
 	enum {
@@ -142,6 +149,12 @@
 	} rammap, ramcfg, timing;
 	u32 freq;
 	u32 mr[16];
+	u32 mr1_nuts;
+
+	struct nouveau_ram_data *next;
+	struct nouveau_ram_data former;
+	struct nouveau_ram_data xition;
+	struct nouveau_ram_data target;
 };
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index 4aca338..c1df26f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -23,21 +23,6 @@
 	return obj;
 }
 
-#define nouveau_instobj_create(p,e,o,d)                                        \
-	nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_instobj_init(p)                                                \
-	nouveau_object_init(&(p)->base)
-#define nouveau_instobj_fini(p,s)                                              \
-	nouveau_object_fini(&(p)->base, (s))
-
-int  nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
-			     struct nouveau_oclass *, int, void **);
-void nouveau_instobj_destroy(struct nouveau_instobj *);
-
-void _nouveau_instobj_dtor(struct nouveau_object *);
-#define _nouveau_instobj_init nouveau_object_init
-#define _nouveau_instobj_fini nouveau_object_fini
-
 struct nouveau_instmem {
 	struct nouveau_subdev base;
 	struct list_head list;
@@ -60,21 +45,8 @@
 	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
 }
 
-#define nouveau_instmem_create(p,e,o,d)                                        \
-	nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_instmem_destroy(p)                                             \
-	nouveau_subdev_destroy(&(p)->base)
-int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
-			    struct nouveau_oclass *, int, void **);
-int nouveau_instmem_init(struct nouveau_instmem *);
-int nouveau_instmem_fini(struct nouveau_instmem *, bool);
-
-#define _nouveau_instmem_dtor _nouveau_subdev_dtor
-int _nouveau_instmem_init(struct nouveau_object *);
-int _nouveau_instmem_fini(struct nouveau_object *, bool);
-
-extern struct nouveau_oclass nv04_instmem_oclass;
-extern struct nouveau_oclass nv40_instmem_oclass;
-extern struct nouveau_oclass nv50_instmem_oclass;
+extern struct nouveau_oclass *nv04_instmem_oclass;
+extern struct nouveau_oclass *nv40_instmem_oclass;
+extern struct nouveau_oclass *nv50_instmem_oclass;
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index adc88b7..3c6738e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -47,6 +47,7 @@
 extern struct nouveau_oclass *nv04_mc_oclass;
 extern struct nouveau_oclass *nv40_mc_oclass;
 extern struct nouveau_oclass *nv44_mc_oclass;
+extern struct nouveau_oclass *nv4c_mc_oclass;
 extern struct nouveau_oclass *nv50_mc_oclass;
 extern struct nouveau_oclass *nv94_mc_oclass;
 extern struct nouveau_oclass *nv98_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index fcf57fa..c950903 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -131,9 +131,5 @@
 void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
 void nouveau_vm_unmap(struct nouveau_vma *);
 void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
-void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
-		       struct nouveau_mem *);
-void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
-		     struct nouveau_mem *mem);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index d70ba34..7098ddd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -23,7 +23,11 @@
  */
 
 #include <core/object.h>
-#include <subdev/bar.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+#include "priv.h"
 
 struct nouveau_barobj {
 	struct nouveau_object base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
index 160d27f3..090d594 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -25,10 +25,11 @@
 #include <core/gpuobj.h>
 
 #include <subdev/timer.h>
-#include <subdev/bar.h>
 #include <subdev/fb.h>
 #include <subdev/vm.h>
 
+#include "priv.h"
+
 struct nv50_bar_priv {
 	struct nouveau_bar base;
 	spinlock_t lock;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index b2ec741..bac5e75 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -25,10 +25,11 @@
 #include <core/gpuobj.h>
 
 #include <subdev/timer.h>
-#include <subdev/bar.h>
 #include <subdev/fb.h>
 #include <subdev/vm.h>
 
+#include "priv.h"
+
 struct nvc0_bar_priv {
 	struct nouveau_bar base;
 	spinlock_t lock;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
new file mode 100644
index 0000000..ffad8f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_BAR_PRIV_H__
+#define __NVKM_BAR_PRIV_H__
+
+#include <subdev/bar.h>
+
+#define nouveau_bar_create(p,e,o,d)                                            \
+	nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_bar_init(p)                                                    \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_bar_fini(p,s)                                                  \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
+			struct nouveau_oclass *, int, void **);
+void nouveau_bar_destroy(struct nouveau_bar *);
+
+void _nouveau_bar_dtor(struct nouveau_object *);
+#define _nouveau_bar_init _nouveau_subdev_init
+#define _nouveau_bar_fini _nouveau_subdev_fini
+
+int  nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
+		       struct nouveau_mem *, struct nouveau_object **);
+
+void nv84_bar_flush(struct nouveau_bar *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index aa0fbbe..ef0c9c4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -130,6 +130,10 @@
 	u16 pcir;
 	int i;
 
+	/* there is no prom on nv4x IGP's */
+	if (device->card_type == NV_40 && device->chipset >= 0x4c)
+		return;
+
 	/* enable access to rom */
 	if (device->card_type >= NV_50)
 		pcireg = 0x088050;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index df1b1b4..de201ba 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -9,6 +9,7 @@
 #include <subdev/bios/dp.h>
 #include <subdev/bios/gpio.h>
 #include <subdev/bios/init.h>
+#include <subdev/bios/ramcfg.h>
 #include <subdev/devinit.h>
 #include <subdev/i2c.h>
 #include <subdev/vga.h>
@@ -391,43 +392,14 @@
 	return 0x0000;
 }
 
-static u16
-init_ram_restrict_table(struct nvbios_init *init)
-{
-	struct nouveau_bios *bios = init->bios;
-	struct bit_entry bit_M;
-	u16 data = 0x0000;
-
-	if (!bit_entry(bios, 'M', &bit_M)) {
-		if (bit_M.version == 1 && bit_M.length >= 5)
-			data = nv_ro16(bios, bit_M.offset + 3);
-		if (bit_M.version == 2 && bit_M.length >= 3)
-			data = nv_ro16(bios, bit_M.offset + 1);
-	}
-
-	if (data == 0x0000)
-		warn("ram restrict table not found\n");
-	return data;
-}
-
 static u8
 init_ram_restrict_group_count(struct nvbios_init *init)
 {
-	struct nouveau_bios *bios = init->bios;
-	struct bit_entry bit_M;
-
-	if (!bit_entry(bios, 'M', &bit_M)) {
-		if (bit_M.version == 1 && bit_M.length >= 5)
-			return nv_ro08(bios, bit_M.offset + 2);
-		if (bit_M.version == 2 && bit_M.length >= 3)
-			return nv_ro08(bios, bit_M.offset + 0);
-	}
-
-	return 0x00;
+	return nvbios_ramcfg_count(init->bios);
 }
 
 static u8
-init_ram_restrict_strap(struct nvbios_init *init)
+init_ram_restrict(struct nvbios_init *init)
 {
 	/* This appears to be the behaviour of the VBIOS parser, and *is*
 	 * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
@@ -438,18 +410,8 @@
 	 * in case *not* re-reading the strap causes similar breakage.
 	 */
 	if (!init->ramcfg || init->bios->version.major < 0x70)
-		init->ramcfg = init_rd32(init, 0x101000);
-	return (init->ramcfg & 0x00000003c) >> 2;
-}
-
-static u8
-init_ram_restrict(struct nvbios_init *init)
-{
-	u8  strap = init_ram_restrict_strap(init);
-	u16 table = init_ram_restrict_table(init);
-	if (table)
-		return nv_ro08(init->bios, table + strap);
-	return 0x00;
+		init->ramcfg = 0x80000000 | nvbios_ramcfg_index(init->bios);
+	return (init->ramcfg & 0x7fffffff);
 }
 
 static u8
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
new file mode 100644
index 0000000..991aedd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
+
+static u8
+nvbios_ramcfg_strap(struct nouveau_bios *bios)
+{
+	return (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+}
+
+u8
+nvbios_ramcfg_count(struct nouveau_bios *bios)
+{
+	struct bit_entry bit_M;
+
+	if (!bit_entry(bios, 'M', &bit_M)) {
+		if (bit_M.version == 1 && bit_M.length >= 5)
+			return nv_ro08(bios, bit_M.offset + 2);
+		if (bit_M.version == 2 && bit_M.length >= 3)
+			return nv_ro08(bios, bit_M.offset + 0);
+	}
+
+	return 0x00;
+}
+
+u8
+nvbios_ramcfg_index(struct nouveau_bios *bios)
+{
+	u8 strap = nvbios_ramcfg_strap(bios);
+	u32 xlat = 0x00000000;
+	struct bit_entry bit_M;
+
+	if (!bit_entry(bios, 'M', &bit_M)) {
+		if (bit_M.version == 1 && bit_M.length >= 5)
+			xlat = nv_ro16(bios, bit_M.offset + 3);
+		if (bit_M.version == 2 && bit_M.length >= 3)
+			xlat = nv_ro16(bios, bit_M.offset + 1);
+	}
+
+	if (xlat)
+		strap = nv_ro08(bios, xlat + strap);
+	return strap;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
index 916fa9d..1811b2c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -24,11 +24,12 @@
 
 #include <subdev/bios.h>
 #include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
 #include <subdev/bios/rammap.h>
 
-u16
-nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
-		    u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
+u32
+nvbios_rammapTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+		u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
 {
 	struct bit_entry bit_P;
 	u16 rammap = 0x0000;
@@ -57,12 +58,12 @@
 	return 0x0000;
 }
 
-u16
-nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
-		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u32
+nvbios_rammapEe(struct nouveau_bios *bios, int idx,
+		u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
 	u8  snr, ssz;
-	u16 rammap = nvbios_rammap_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+	u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
 	if (rammap && idx < *cnt) {
 		rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
 		*hdr = *len;
@@ -73,16 +74,100 @@
 	return 0x0000;
 }
 
-u16
-nvbios_rammap_match(struct nouveau_bios *bios, u16 khz,
-		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u32
+nvbios_rammapEm(struct nouveau_bios *bios, u16 khz,
+		u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
 	int idx = 0;
 	u32 data;
-	while ((data = nvbios_rammap_entry(bios, idx++, ver, hdr, cnt, len))) {
+	while ((data = nvbios_rammapEe(bios, idx++, ver, hdr, cnt, len))) {
 		if (khz >= nv_ro16(bios, data + 0x00) &&
 		    khz <= nv_ro16(bios, data + 0x02))
 			break;
 	}
 	return data;
 }
+
+u32
+nvbios_rammapEp(struct nouveau_bios *bios, u16 khz,
+		u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		struct nvbios_ramcfg *p)
+{
+	u32 data = nvbios_rammapEm(bios, khz, ver, hdr, cnt, len);
+	memset(p, 0x00, sizeof(*p));
+	switch (!!data * *ver) {
+	case 0x11:
+		p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
+		p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2;
+		p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
+		p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2;
+		break;
+	default:
+		data = 0;
+		break;
+	}
+	return data;
+}
+
+u32
+nvbios_rammapSe(struct nouveau_bios *bios, u32 data,
+		u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+		u8 *ver, u8 *hdr)
+{
+	if (idx < ecnt) {
+		data = data + ehdr + (idx * elen);
+		*ver = ever;
+		*hdr = elen;
+		return data;
+	}
+	return 0;
+}
+
+u32
+nvbios_rammapSp(struct nouveau_bios *bios, u32 data,
+		u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+		u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
+{
+	data = nvbios_rammapSe(bios, data, ever, ehdr, ecnt, elen, idx, ver, hdr);
+	switch (!!data * *ver) {
+	case 0x11:
+		p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0;
+		p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1;
+		p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2;
+		p->ramcfg_11_01_08 = (nv_ro08(bios, data + 0x01) & 0x08) >> 3;
+		p->ramcfg_11_01_10 = (nv_ro08(bios, data + 0x01) & 0x10) >> 4;
+		p->ramcfg_11_01_20 = (nv_ro08(bios, data + 0x01) & 0x20) >> 5;
+		p->ramcfg_11_01_40 = (nv_ro08(bios, data + 0x01) & 0x40) >> 6;
+		p->ramcfg_11_01_80 = (nv_ro08(bios, data + 0x01) & 0x80) >> 7;
+		p->ramcfg_11_02_03 = (nv_ro08(bios, data + 0x02) & 0x03) >> 0;
+		p->ramcfg_11_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
+		p->ramcfg_11_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
+		p->ramcfg_11_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
+		p->ramcfg_11_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
+		p->ramcfg_11_02_80 = (nv_ro08(bios, data + 0x02) & 0x80) >> 7;
+		p->ramcfg_11_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
+		p->ramcfg_11_03_30 = (nv_ro08(bios, data + 0x03) & 0x30) >> 4;
+		p->ramcfg_11_03_c0 = (nv_ro08(bios, data + 0x03) & 0xc0) >> 6;
+		p->ramcfg_11_03_f0 = (nv_ro08(bios, data + 0x03) & 0xf0) >> 4;
+		p->ramcfg_11_04    = (nv_ro08(bios, data + 0x04) & 0xff) >> 0;
+		p->ramcfg_11_06    = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
+		p->ramcfg_11_07_02 = (nv_ro08(bios, data + 0x07) & 0x02) >> 1;
+		p->ramcfg_11_07_04 = (nv_ro08(bios, data + 0x07) & 0x04) >> 2;
+		p->ramcfg_11_07_08 = (nv_ro08(bios, data + 0x07) & 0x08) >> 3;
+		p->ramcfg_11_07_10 = (nv_ro08(bios, data + 0x07) & 0x10) >> 4;
+		p->ramcfg_11_07_40 = (nv_ro08(bios, data + 0x07) & 0x40) >> 6;
+		p->ramcfg_11_07_80 = (nv_ro08(bios, data + 0x07) & 0x80) >> 7;
+		p->ramcfg_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
+		p->ramcfg_11_08_02 = (nv_ro08(bios, data + 0x08) & 0x02) >> 1;
+		p->ramcfg_11_08_04 = (nv_ro08(bios, data + 0x08) & 0x04) >> 2;
+		p->ramcfg_11_08_08 = (nv_ro08(bios, data + 0x08) & 0x08) >> 3;
+		p->ramcfg_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
+		p->ramcfg_11_08_20 = (nv_ro08(bios, data + 0x08) & 0x20) >> 5;
+		p->ramcfg_11_09    = (nv_ro08(bios, data + 0x09) & 0xff) >> 0;
+		break;
+	default:
+		data = 0;
+		break;
+	}
+	return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
index 151c2d6..350d44a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -24,11 +24,12 @@
 
 #include <subdev/bios.h>
 #include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
 #include <subdev/bios/timing.h>
 
 u16
-nvbios_timing_table(struct nouveau_bios *bios,
-		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+nvbios_timingTe(struct nouveau_bios *bios,
+		u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
 {
 	struct bit_entry bit_P;
 	u16 timing = 0x0000;
@@ -47,11 +48,15 @@
 				*hdr = nv_ro08(bios, timing + 1);
 				*cnt = nv_ro08(bios, timing + 2);
 				*len = nv_ro08(bios, timing + 3);
+				*snr = 0;
+				*ssz = 0;
 				return timing;
 			case 0x20:
 				*hdr = nv_ro08(bios, timing + 1);
-				*cnt = nv_ro08(bios, timing + 3);
+				*cnt = nv_ro08(bios, timing + 5);
 				*len = nv_ro08(bios, timing + 2);
+				*snr = nv_ro08(bios, timing + 4);
+				*ssz = nv_ro08(bios, timing + 3);
 				return timing;
 			default:
 				break;
@@ -63,11 +68,60 @@
 }
 
 u16
-nvbios_timing_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+nvbios_timingEe(struct nouveau_bios *bios, int idx,
+		u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
-	u8  hdr, cnt;
-	u16 timing = nvbios_timing_table(bios, ver, &hdr, &cnt, len);
-	if (timing && idx < cnt)
-		return timing + hdr + (idx * *len);
+	u8  snr, ssz;
+	u16 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+	if (timing && idx < *cnt) {
+		timing += *hdr + idx * (*len + (snr * ssz));
+		*hdr = *len;
+		*cnt = snr;
+		*len = ssz;
+		return timing;
+	}
 	return 0x0000;
 }
+
+u16
+nvbios_timingEp(struct nouveau_bios *bios, int idx,
+		u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		struct nvbios_ramcfg *p)
+{
+	u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
+	switch (!!data * *ver) {
+	case 0x20:
+		p->timing[0] = nv_ro32(bios, data + 0x00);
+		p->timing[1] = nv_ro32(bios, data + 0x04);
+		p->timing[2] = nv_ro32(bios, data + 0x08);
+		p->timing[3] = nv_ro32(bios, data + 0x0c);
+		p->timing[4] = nv_ro32(bios, data + 0x10);
+		p->timing[5] = nv_ro32(bios, data + 0x14);
+		p->timing[6] = nv_ro32(bios, data + 0x18);
+		p->timing[7] = nv_ro32(bios, data + 0x1c);
+		p->timing[8] = nv_ro32(bios, data + 0x20);
+		p->timing[9] = nv_ro32(bios, data + 0x24);
+		p->timing[10] = nv_ro32(bios, data + 0x28);
+		p->timing_20_2e_03 = (nv_ro08(bios, data + 0x2e) & 0x03) >> 0;
+		p->timing_20_2e_30 = (nv_ro08(bios, data + 0x2e) & 0x30) >> 4;
+		p->timing_20_2e_c0 = (nv_ro08(bios, data + 0x2e) & 0xc0) >> 6;
+		p->timing_20_2f_03 = (nv_ro08(bios, data + 0x2f) & 0x03) >> 0;
+		temp = nv_ro16(bios, data + 0x2c);
+		p->timing_20_2c_003f = (temp & 0x003f) >> 0;
+		p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6;
+		p->timing_20_30_07 = (nv_ro08(bios, data + 0x30) & 0x07) >> 0;
+		p->timing_20_30_f8 = (nv_ro08(bios, data + 0x30) & 0xf8) >> 3;
+		temp = nv_ro16(bios, data + 0x31);
+		p->timing_20_31_0007 = (temp & 0x0007) >> 0;
+		p->timing_20_31_0078 = (temp & 0x0078) >> 3;
+		p->timing_20_31_0780 = (temp & 0x0780) >> 7;
+		p->timing_20_31_0800 = (temp & 0x0800) >> 11;
+		p->timing_20_31_7000 = (temp & 0x7000) >> 12;
+		p->timing_20_31_8000 = (temp & 0x8000) >> 15;
+		break;
+	default:
+		data = 0;
+		break;
+	}
+	return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
index e2938a2..dd62bae 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -182,9 +182,12 @@
 	clk->pstate = pstatei;
 
 	if (pfb->ram->calc) {
-		ret = pfb->ram->calc(pfb, pstate->base.domain[nv_clk_src_mem]);
-		if (ret == 0)
-			ret = pfb->ram->prog(pfb);
+		int khz = pstate->base.domain[nv_clk_src_mem];
+		do {
+			ret = pfb->ram->calc(pfb, khz);
+			if (ret == 0)
+				ret = pfb->ram->prog(pfb);
+		} while (ret > 0);
 		pfb->ram->tidy(pfb);
 	}
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index 30c1f3a..b74db6c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -25,7 +25,7 @@
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/clock.h>
-#include <subdev/devinit/priv.h>
+#include <subdev/devinit/nv04.h>
 
 #include "pll.h"
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
index 4c62e84..d3c37c9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -457,7 +457,7 @@
 	{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
 	{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
 	{ nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
-	{ nv_clk_src_mem    , 0x03, 0, "memory", 1000 },
+	{ nv_clk_src_mem    , 0x03, 0, "memory", 500 },
 	{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
 	{ nv_clk_src_hubk01 , 0x05 },
 	{ nv_clk_src_vdec   , 0x06 },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
index 79c81d3..8fa34e8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -24,9 +24,11 @@
 
 #include <core/option.h>
 
-#include <subdev/devinit.h>
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
+#include <subdev/vga.h>
+
+#include "priv.h"
 
 int
 _nouveau_devinit_fini(struct nouveau_object *object, bool suspend)
@@ -37,18 +39,41 @@
 	if (suspend)
 		devinit->post = true;
 
+	/* unlock the extended vga crtc regs */
+	nv_lockvgac(devinit, false);
+
 	return nouveau_subdev_fini(&devinit->base, suspend);
 }
 
 int
 _nouveau_devinit_init(struct nouveau_object *object)
 {
+	struct nouveau_devinit_impl *impl = (void *)object->oclass;
 	struct nouveau_devinit *devinit = (void *)object;
-	int ret = nouveau_subdev_init(&devinit->base);
+	int ret;
+
+	ret = nouveau_subdev_init(&devinit->base);
 	if (ret)
 		return ret;
 
-	return nvbios_init(&devinit->base, devinit->post);
+	ret = nvbios_init(&devinit->base, devinit->post);
+	if (ret)
+		return ret;
+
+	if (impl->disable)
+		nv_device(devinit)->disable_mask |= impl->disable(devinit);
+	return 0;
+}
+
+void
+_nouveau_devinit_dtor(struct nouveau_object *object)
+{
+	struct nouveau_devinit *devinit = (void *)object;
+
+	/* lock crtc regs */
+	nv_lockvgac(devinit, true);
+
+	nouveau_subdev_destroy(&devinit->base);
 }
 
 int
@@ -57,6 +82,7 @@
 			struct nouveau_oclass *oclass,
 			int size, void **pobject)
 {
+	struct nouveau_devinit_impl *impl = (void *)oclass;
 	struct nouveau_device *device = nv_device(parent);
 	struct nouveau_devinit *devinit;
 	int ret;
@@ -68,5 +94,7 @@
 		return ret;
 
 	devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false);
+	devinit->meminit = impl->meminit;
+	devinit->pll_set = impl->pll_set;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index 27c8235..7037eae 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -27,12 +27,7 @@
 #include <subdev/vga.h>
 
 #include "fbmem.h"
-#include "priv.h"
-
-struct nv04_devinit_priv {
-	struct nouveau_devinit base;
-	int owner;
-};
+#include "nv04.h"
 
 static void
 nv04_devinit_meminit(struct nouveau_devinit *devinit)
@@ -393,17 +388,21 @@
 nv04_devinit_fini(struct nouveau_object *object, bool suspend)
 {
 	struct nv04_devinit_priv *priv = (void *)object;
+	int ret;
 
 	/* make i2c busses accessible */
 	nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
 
-	/* unlock extended vga crtc regs, and unslave crtcs */
-	nv_lockvgac(priv, false);
+	ret = nouveau_devinit_fini(&priv->base, suspend);
+	if (ret)
+		return ret;
+
+	/* unslave crtcs */
 	if (priv->owner < 0)
 		priv->owner = nv_rdvgaowner(priv);
 	nv_wrvgaowner(priv, 0);
 
-	return nouveau_devinit_fini(&priv->base, suspend);
+	return 0;
 }
 
 int
@@ -431,14 +430,13 @@
 {
 	struct nv04_devinit_priv *priv = (void *)object;
 
-	/* restore vga owner saved at first init, and lock crtc regs  */
+	/* restore vga owner saved at first init */
 	nv_wrvgaowner(priv, priv->owner);
-	nv_lockvgac(priv, true);
 
 	nouveau_devinit_destroy(&priv->base);
 }
 
-static int
+int
 nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		  struct nouveau_oclass *oclass, void *data, u32 size,
 		  struct nouveau_object **pobject)
@@ -451,19 +449,19 @@
 	if (ret)
 		return ret;
 
-	priv->base.meminit = nv04_devinit_meminit;
-	priv->base.pll_set = nv04_devinit_pll_set;
 	priv->owner = -1;
 	return 0;
 }
 
-struct nouveau_oclass
-nv04_devinit_oclass = {
-	.handle = NV_SUBDEV(DEVINIT, 0x04),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_devinit_oclass = &(struct nouveau_devinit_impl) {
+	.base.handle = NV_SUBDEV(DEVINIT, 0x04),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_devinit_ctor,
 		.dtor = nv04_devinit_dtor,
 		.init = nv04_devinit_init,
 		.fini = nv04_devinit_fini,
 	},
-};
+	.meminit = nv04_devinit_meminit,
+	.pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h
new file mode 100644
index 0000000..23470a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h
@@ -0,0 +1,23 @@
+#ifndef __NVKM_DEVINIT_NV04_H__
+#define __NVKM_DEVINIT_NV04_H__
+
+#include "priv.h"
+
+struct nv04_devinit_priv {
+	struct nouveau_devinit base;
+	u8 owner;
+};
+
+int  nv04_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
+		       struct nouveau_oclass *, void *, u32,
+		       struct nouveau_object **);
+void nv04_devinit_dtor(struct nouveau_object *);
+int  nv04_devinit_init(struct nouveau_object *);
+int  nv04_devinit_fini(struct nouveau_object *, bool);
+int  nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
index b1912a8a..98b7e67 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -29,12 +29,7 @@
 #include <subdev/vga.h>
 
 #include "fbmem.h"
-#include "priv.h"
-
-struct nv05_devinit_priv {
-	struct nouveau_devinit base;
-	u8 owner;
-};
+#include "nv04.h"
 
 static void
 nv05_devinit_meminit(struct nouveau_devinit *devinit)
@@ -49,7 +44,7 @@
 		{ 0x06, 0x00 },
 		{ 0x00, 0x00 }
 	};
-	struct nv05_devinit_priv *priv = (void *)devinit;
+	struct nv04_devinit_priv *priv = (void *)devinit;
 	struct nouveau_bios *bios = nouveau_bios(priv);
 	struct io_mapping *fb;
 	u32 patt = 0xdeadbeef;
@@ -130,31 +125,15 @@
 	fbmem_fini(fb);
 }
 
-static int
-nv05_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		  struct nouveau_oclass *oclass, void *data, u32 size,
-		  struct nouveau_object **pobject)
-{
-	struct nv05_devinit_priv *priv;
-	int ret;
-
-	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.meminit = nv05_devinit_meminit;
-	priv->base.pll_set = nv04_devinit_pll_set;
-	return 0;
-}
-
-struct nouveau_oclass
-nv05_devinit_oclass = {
-	.handle = NV_SUBDEV(DEVINIT, 0x05),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv05_devinit_ctor,
+struct nouveau_oclass *
+nv05_devinit_oclass = &(struct nouveau_devinit_impl) {
+	.base.handle = NV_SUBDEV(DEVINIT, 0x05),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_devinit_ctor,
 		.dtor = nv04_devinit_dtor,
 		.init = nv04_devinit_init,
 		.fini = nv04_devinit_fini,
 	},
-};
+	.meminit = nv05_devinit_meminit,
+	.pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 8d274db..32b3d21 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -27,17 +27,12 @@
 #include <subdev/vga.h>
 
 #include "fbmem.h"
-#include "priv.h"
-
-struct nv10_devinit_priv {
-	struct nouveau_devinit base;
-	u8 owner;
-};
+#include "nv04.h"
 
 static void
 nv10_devinit_meminit(struct nouveau_devinit *devinit)
 {
-	struct nv10_devinit_priv *priv = (void *)devinit;
+	struct nv04_devinit_priv *priv = (void *)devinit;
 	static const int mem_width[] = { 0x10, 0x00, 0x20 };
 	int mem_width_count;
 	uint32_t patt = 0xdeadbeef;
@@ -101,31 +96,15 @@
 	fbmem_fini(fb);
 }
 
-static int
-nv10_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		  struct nouveau_oclass *oclass, void *data, u32 size,
-		  struct nouveau_object **pobject)
-{
-	struct nv10_devinit_priv *priv;
-	int ret;
-
-	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.meminit = nv10_devinit_meminit;
-	priv->base.pll_set = nv04_devinit_pll_set;
-	return 0;
-}
-
-struct nouveau_oclass
-nv10_devinit_oclass = {
-	.handle = NV_SUBDEV(DEVINIT, 0x10),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv10_devinit_ctor,
+struct nouveau_oclass *
+nv10_devinit_oclass = &(struct nouveau_devinit_impl) {
+	.base.handle = NV_SUBDEV(DEVINIT, 0x10),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_devinit_ctor,
 		.dtor = nv04_devinit_dtor,
 		.init = nv04_devinit_init,
 		.fini = nv04_devinit_fini,
 	},
-};
+	.meminit = nv10_devinit_meminit,
+	.pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
index e9743cd..526d0c6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
@@ -22,37 +22,16 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv04.h"
 
-struct nv1a_devinit_priv {
-	struct nouveau_devinit base;
-	u8 owner;
-};
-
-static int
-nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		  struct nouveau_oclass *oclass, void *data, u32 size,
-		  struct nouveau_object **pobject)
-{
-	struct nv1a_devinit_priv *priv;
-	int ret;
-
-	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.pll_set = nv04_devinit_pll_set;
-	return 0;
-}
-
-struct nouveau_oclass
-nv1a_devinit_oclass = {
-	.handle = NV_SUBDEV(DEVINIT, 0x1a),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv1a_devinit_ctor,
+struct nouveau_oclass *
+nv1a_devinit_oclass = &(struct nouveau_devinit_impl) {
+	.base.handle = NV_SUBDEV(DEVINIT, 0x1a),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_devinit_ctor,
 		.dtor = nv04_devinit_dtor,
 		.init = nv04_devinit_init,
 		.fini = nv04_devinit_fini,
 	},
-};
+	.pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
index 6cc6080..4689ba3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -24,18 +24,13 @@
  *
  */
 
-#include "priv.h"
+#include "nv04.h"
 #include "fbmem.h"
 
-struct nv20_devinit_priv {
-	struct nouveau_devinit base;
-	u8 owner;
-};
-
 static void
 nv20_devinit_meminit(struct nouveau_devinit *devinit)
 {
-	struct nv20_devinit_priv *priv = (void *)devinit;
+	struct nv04_devinit_priv *priv = (void *)devinit;
 	struct nouveau_device *device = nv_device(priv);
 	uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
 	uint32_t amount, off;
@@ -65,31 +60,15 @@
 	fbmem_fini(fb);
 }
 
-static int
-nv20_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		  struct nouveau_oclass *oclass, void *data, u32 size,
-		  struct nouveau_object **pobject)
-{
-	struct nv20_devinit_priv *priv;
-	int ret;
-
-	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.meminit = nv20_devinit_meminit;
-	priv->base.pll_set = nv04_devinit_pll_set;
-	return 0;
-}
-
-struct nouveau_oclass
-nv20_devinit_oclass = {
-	.handle = NV_SUBDEV(DEVINIT, 0x20),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv20_devinit_ctor,
+struct nouveau_oclass *
+nv20_devinit_oclass = &(struct nouveau_devinit_impl) {
+	.base.handle = NV_SUBDEV(DEVINIT, 0x20),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_devinit_ctor,
 		.dtor = nv04_devinit_dtor,
 		.init = nv04_devinit_init,
 		.fini = nv04_devinit_fini,
 	},
-};
+	.meminit = nv20_devinit_meminit,
+	.pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index 6df7224..b46c62a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -28,9 +28,9 @@
 #include <subdev/bios/init.h>
 #include <subdev/vga.h>
 
-#include "priv.h"
+#include "nv50.h"
 
-static int
+int
 nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
 {
 	struct nv50_devinit_priv *priv = (void *)devinit;
@@ -74,6 +74,19 @@
 	return 0;
 }
 
+static u64
+nv50_devinit_disable(struct nouveau_devinit *devinit)
+{
+	struct nv50_devinit_priv *priv = (void *)devinit;
+	u32 r001540 = nv_rd32(priv, 0x001540);
+	u64 disable = 0ULL;
+
+	if (!(r001540 & 0x40000000))
+		disable |= (1ULL << NVDEV_ENGINE_MPEG);
+
+	return disable;
+}
+
 int
 nv50_devinit_init(struct nouveau_object *object)
 {
@@ -120,7 +133,7 @@
 	return 0;
 }
 
-static int
+int
 nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		  struct nouveau_oclass *oclass, void *data, u32 size,
 		  struct nouveau_object **pobject)
@@ -133,17 +146,18 @@
 	if (ret)
 		return ret;
 
-	priv->base.pll_set = nv50_devinit_pll_set;
 	return 0;
 }
 
-struct nouveau_oclass
-nv50_devinit_oclass = {
-	.handle = NV_SUBDEV(DEVINIT, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_devinit_oclass = &(struct nouveau_devinit_impl) {
+	.base.handle = NV_SUBDEV(DEVINIT, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_devinit_ctor,
 		.dtor = _nouveau_devinit_dtor,
 		.init = nv50_devinit_init,
 		.fini = _nouveau_devinit_fini,
 	},
-};
+	.pll_set = nv50_devinit_pll_set,
+	.disable = nv50_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
new file mode 100644
index 0000000..141c27e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DEVINIT_NV50_H__
+#define __NVKM_DEVINIT_NV50_H__
+
+#include "priv.h"
+
+struct nv50_devinit_priv {
+	struct nouveau_devinit base;
+};
+
+int  nv50_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
+		       struct nouveau_oclass *, void *, u32,
+		       struct nouveau_object **);
+int  nv50_devinit_init(struct nouveau_object *);
+int  nv50_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+int  nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
new file mode 100644
index 0000000..7874225
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nv84_devinit_disable(struct nouveau_devinit *devinit)
+{
+	struct nv50_devinit_priv *priv = (void *)devinit;
+	u32 r001540 = nv_rd32(priv, 0x001540);
+	u32 r00154c = nv_rd32(priv, 0x00154c);
+	u64 disable = 0ULL;
+
+	if (!(r001540 & 0x40000000)) {
+		disable |= (1ULL << NVDEV_ENGINE_MPEG);
+		disable |= (1ULL << NVDEV_ENGINE_VP);
+		disable |= (1ULL << NVDEV_ENGINE_BSP);
+		disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+	}
+
+	if (!(r00154c & 0x00000004))
+		disable |= (1ULL << NVDEV_ENGINE_DISP);
+	if (!(r00154c & 0x00000020))
+		disable |= (1ULL << NVDEV_ENGINE_BSP);
+	if (!(r00154c & 0x00000040))
+		disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+
+	return disable;
+}
+
+struct nouveau_oclass *
+nv84_devinit_oclass = &(struct nouveau_devinit_impl) {
+	.base.handle = NV_SUBDEV(DEVINIT, 0x84),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_devinit_ctor,
+		.dtor = _nouveau_devinit_dtor,
+		.init = nv50_devinit_init,
+		.fini = _nouveau_devinit_fini,
+	},
+	.pll_set = nv50_devinit_pll_set,
+	.disable = nv84_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
new file mode 100644
index 0000000..2b0e963
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nv98_devinit_disable(struct nouveau_devinit *devinit)
+{
+	struct nv50_devinit_priv *priv = (void *)devinit;
+	u32 r001540 = nv_rd32(priv, 0x001540);
+	u32 r00154c = nv_rd32(priv, 0x00154c);
+	u64 disable = 0ULL;
+
+	if (!(r001540 & 0x40000000)) {
+		disable |= (1ULL << NVDEV_ENGINE_VP);
+		disable |= (1ULL << NVDEV_ENGINE_BSP);
+		disable |= (1ULL << NVDEV_ENGINE_PPP);
+	}
+
+	if (!(r00154c & 0x00000004))
+		disable |= (1ULL << NVDEV_ENGINE_DISP);
+	if (!(r00154c & 0x00000020))
+		disable |= (1ULL << NVDEV_ENGINE_BSP);
+	if (!(r00154c & 0x00000040))
+		disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+
+	return disable;
+}
+
+struct nouveau_oclass *
+nv98_devinit_oclass = &(struct nouveau_devinit_impl) {
+	.base.handle = NV_SUBDEV(DEVINIT, 0x98),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_devinit_ctor,
+		.dtor = _nouveau_devinit_dtor,
+		.init = nv50_devinit_init,
+		.fini = _nouveau_devinit_fini,
+	},
+	.pll_set = nv50_devinit_pll_set,
+	.disable = nv98_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
index 76a68b2..6dedf1d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
@@ -22,12 +22,12 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv50.h"
 
-static int
+int
 nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
 {
-	struct nva3_devinit_priv *priv = (void *)devinit;
+	struct nv50_devinit_priv *priv = (void *)devinit;
 	struct nouveau_bios *bios = nouveau_bios(priv);
 	struct nvbios_pll info;
 	int N, fN, M, P;
@@ -58,30 +58,38 @@
 	return ret;
 }
 
-static int
-nva3_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		  struct nouveau_oclass *oclass, void *data, u32 size,
-		  struct nouveau_object **pobject)
+static u64
+nva3_devinit_disable(struct nouveau_devinit *devinit)
 {
-	struct nv50_devinit_priv *priv;
-	int ret;
+	struct nv50_devinit_priv *priv = (void *)devinit;
+	u32 r001540 = nv_rd32(priv, 0x001540);
+	u32 r00154c = nv_rd32(priv, 0x00154c);
+	u64 disable = 0ULL;
 
-	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(r001540 & 0x40000000)) {
+		disable |= (1ULL << NVDEV_ENGINE_VP);
+		disable |= (1ULL << NVDEV_ENGINE_PPP);
+	}
 
-	priv->base.pll_set = nva3_devinit_pll_set;
-	return 0;
+	if (!(r00154c & 0x00000004))
+		disable |= (1ULL << NVDEV_ENGINE_DISP);
+	if (!(r00154c & 0x00000020))
+		disable |= (1ULL << NVDEV_ENGINE_BSP);
+	if (!(r00154c & 0x00000200))
+		disable |= (1ULL << NVDEV_ENGINE_COPY0);
+
+	return disable;
 }
 
-struct nouveau_oclass
-nva3_devinit_oclass = {
-	.handle = NV_SUBDEV(DEVINIT, 0xa3),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nva3_devinit_ctor,
+struct nouveau_oclass *
+nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
+	.base.handle = NV_SUBDEV(DEVINIT, 0xa3),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_devinit_ctor,
 		.dtor = _nouveau_devinit_dtor,
 		.init = nv50_devinit_init,
 		.fini = _nouveau_devinit_fini,
 	},
-};
+	.pll_set = nva3_devinit_pll_set,
+	.disable = nva3_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
new file mode 100644
index 0000000..4fc68d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nvaf_devinit_disable(struct nouveau_devinit *devinit)
+{
+	struct nv50_devinit_priv *priv = (void *)devinit;
+	u32 r001540 = nv_rd32(priv, 0x001540);
+	u32 r00154c = nv_rd32(priv, 0x00154c);
+	u64 disable = 0;
+
+	if (!(r001540 & 0x40000000)) {
+		disable |= (1ULL << NVDEV_ENGINE_VP);
+		disable |= (1ULL << NVDEV_ENGINE_PPP);
+	}
+
+	if (!(r00154c & 0x00000004))
+		disable |= (1ULL << NVDEV_ENGINE_DISP);
+	if (!(r00154c & 0x00000020))
+		disable |= (1ULL << NVDEV_ENGINE_BSP);
+	if (!(r00154c & 0x00000040))
+		disable |= (1ULL << NVDEV_ENGINE_VIC);
+	if (!(r00154c & 0x00000200))
+		disable |= (1ULL << NVDEV_ENGINE_COPY0);
+
+	return disable;
+}
+
+struct nouveau_oclass *
+nvaf_devinit_oclass = &(struct nouveau_devinit_impl) {
+	.base.handle = NV_SUBDEV(DEVINIT, 0xaf),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_devinit_ctor,
+		.dtor = _nouveau_devinit_dtor,
+		.init = nv50_devinit_init,
+		.fini = _nouveau_devinit_fini,
+	},
+	.pll_set = nva3_devinit_pll_set,
+	.disable = nvaf_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
index 19e265b..fa7e637 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
@@ -22,12 +22,12 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv50.h"
 
 static int
 nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
 {
-	struct nvc0_devinit_priv *priv = (void *)devinit;
+	struct nv50_devinit_priv *priv = (void *)devinit;
 	struct nouveau_bios *bios = nouveau_bios(priv);
 	struct nvbios_pll info;
 	int N, fN, M, P;
@@ -59,6 +59,33 @@
 	return ret;
 }
 
+static u64
+nvc0_devinit_disable(struct nouveau_devinit *devinit)
+{
+	struct nv50_devinit_priv *priv = (void *)devinit;
+	u32 r022500 = nv_rd32(priv, 0x022500);
+	u64 disable = 0ULL;
+
+	if (r022500 & 0x00000001)
+		disable |= (1ULL << NVDEV_ENGINE_DISP);
+
+	if (r022500 & 0x00000002) {
+		disable |= (1ULL << NVDEV_ENGINE_VP);
+		disable |= (1ULL << NVDEV_ENGINE_PPP);
+	}
+
+	if (r022500 & 0x00000004)
+		disable |= (1ULL << NVDEV_ENGINE_BSP);
+	if (r022500 & 0x00000008)
+		disable |= (1ULL << NVDEV_ENGINE_VENC);
+	if (r022500 & 0x00000100)
+		disable |= (1ULL << NVDEV_ENGINE_COPY0);
+	if (r022500 & 0x00000200)
+		disable |= (1ULL << NVDEV_ENGINE_COPY1);
+
+	return disable;
+}
+
 static int
 nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		  struct nouveau_oclass *oclass, void *data, u32 size,
@@ -72,19 +99,20 @@
 	if (ret)
 		return ret;
 
-	priv->base.pll_set = nvc0_devinit_pll_set;
 	if (nv_rd32(priv, 0x022500) & 0x00000001)
 		priv->base.post = true;
 	return 0;
 }
 
-struct nouveau_oclass
-nvc0_devinit_oclass = {
-	.handle = NV_SUBDEV(DEVINIT, 0xc0),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nvc0_devinit_oclass = &(struct nouveau_devinit_impl) {
+	.base.handle = NV_SUBDEV(DEVINIT, 0xc0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nvc0_devinit_ctor,
 		.dtor = _nouveau_devinit_dtor,
 		.init = nv50_devinit_init,
 		.fini = _nouveau_devinit_fini,
 	},
-};
+	.pll_set = nvc0_devinit_pll_set,
+	.disable = nvc0_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
index 7d622e2..822a2fb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
@@ -6,20 +6,32 @@
 #include <subdev/clock/pll.h>
 #include <subdev/devinit.h>
 
-void nv04_devinit_dtor(struct nouveau_object *);
-int  nv04_devinit_init(struct nouveau_object *);
-int  nv04_devinit_fini(struct nouveau_object *, bool);
-int  nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32);
-
-void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-
-
-struct nv50_devinit_priv {
-	struct nouveau_devinit base;
+struct nouveau_devinit_impl {
+	struct nouveau_oclass base;
+	void (*meminit)(struct nouveau_devinit *);
+	int  (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
+	u64  (*disable)(struct nouveau_devinit *);
 };
 
-int  nv50_devinit_init(struct nouveau_object *);
+#define nouveau_devinit_create(p,e,o,d)                                        \
+	nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_devinit_destroy(p) ({                                          \
+	struct nouveau_devinit *d = (p);                                       \
+	_nouveau_devinit_dtor(nv_object(d));                                   \
+})
+#define nouveau_devinit_init(p) ({                                             \
+	struct nouveau_devinit *d = (p);                                       \
+	_nouveau_devinit_init(nv_object(d));                                   \
+})
+#define nouveau_devinit_fini(p,s) ({                                           \
+	struct nouveau_devinit *d = (p);                                       \
+	_nouveau_devinit_fini(nv_object(d), (s));                              \
+})
+
+int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, int, void **);
+void _nouveau_devinit_dtor(struct nouveau_object *);
+int _nouveau_devinit_init(struct nouveau_object *);
+int _nouveau_devinit_fini(struct nouveau_object *, bool suspend);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
index 34f9605..66fe959 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
@@ -25,35 +25,44 @@
 #include <subdev/bios.h>
 #include "priv.h"
 
+/* binary driver only executes this path if the condition (a) is true
+ * for any configuration (combination of rammap+ramcfg+timing) that
+ * can be reached on a given card.  for now, we will execute the branch
+ * unconditionally in the hope that a "false everywhere" in the bios
+ * tables doesn't actually mean "don't touch this".
+ */
+#define NOTE00(a) 1
+
 int
-nouveau_gddr5_calc(struct nouveau_ram *ram)
+nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts)
 {
-	struct nouveau_bios *bios = nouveau_bios(ram);
-	int pd, lf, xd, vh, vr, vo;
-	int WL, CL, WR, at, dt, ds;
+	int pd, lf, xd, vh, vr, vo, l3;
+	int WL, CL, WR, at[2], dt, ds;
 	int rq = ram->freq < 1000000; /* XXX */
 
-	switch (!!ram->ramcfg.data * ram->ramcfg.version) {
+	switch (ram->ramcfg.version) {
 	case 0x11:
-		pd =  (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x80) >> 7;
-		lf =  (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x40) >> 6;
-		xd = !(nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x20);
-		vh =  (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x10) >> 4;
-		vr =  (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x04) >> 2;
-		vo =   nv_ro08(bios, ram->ramcfg.data + 0x06) & 0xff;
+		pd =  ram->next->bios.ramcfg_11_01_80;
+		lf =  ram->next->bios.ramcfg_11_01_40;
+		xd = !ram->next->bios.ramcfg_11_01_20;
+		vh =  ram->next->bios.ramcfg_11_02_10;
+		vr =  ram->next->bios.ramcfg_11_02_04;
+		vo =  ram->next->bios.ramcfg_11_06;
+		l3 = !ram->next->bios.ramcfg_11_07_02;
 		break;
 	default:
 		return -ENOSYS;
 	}
 
-	switch (!!ram->timing.data * ram->timing.version) {
+	switch (ram->timing.version) {
 	case 0x20:
-		WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
-		CL =  nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
-		WR =  nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
-		at = (nv_ro08(bios, ram->timing.data + 0x2e) & 0xc0) >> 6;
-		dt =  nv_ro08(bios, ram->timing.data + 0x2e) & 0x03;
-		ds =  nv_ro08(bios, ram->timing.data + 0x2f) & 0x03;
+		WL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
+		CL = (ram->next->bios.timing[1] & 0x0000001f);
+		WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
+		at[0] = ram->next->bios.timing_20_2e_c0;
+		at[1] = ram->next->bios.timing_20_2e_30;
+		dt =  ram->next->bios.timing_20_2e_03;
+		ds =  ram->next->bios.timing_20_2f_03;
 		break;
 	default:
 		return -ENOSYS;
@@ -71,13 +80,25 @@
 
 	ram->mr[1] &= ~0x0bf;
 	ram->mr[1] |= (xd & 0x01) << 7;
-	ram->mr[1] |= (at & 0x03) << 4;
+	ram->mr[1] |= (at[0] & 0x03) << 4;
 	ram->mr[1] |= (dt & 0x03) << 2;
 	ram->mr[1] |= (ds & 0x03) << 0;
 
+	/* this seems wrong, alternate field used for the broadcast
+	 * on nuts vs non-nuts configs..  meh, it matches for now.
+	 */
+	ram->mr1_nuts = ram->mr[1];
+	if (nuts) {
+		ram->mr[1] &= ~0x030;
+		ram->mr[1] |= (at[1] & 0x03) << 4;
+	}
+
 	ram->mr[3] &= ~0x020;
 	ram->mr[3] |= (rq & 0x01) << 5;
 
+	ram->mr[5] &= ~0x004;
+	ram->mr[5] |= (l3 << 2);
+
 	if (!vo)
 		vo = (ram->mr[6] & 0xff0) >> 4;
 	if (ram->mr[6] & 0x001)
@@ -86,11 +107,16 @@
 	ram->mr[6] |= (vo & 0xff) << 4;
 	ram->mr[6] |= (pd & 0x01) << 0;
 
-	if (!(ram->mr[7] & 0x100))
-		vr = 0; /* binary driver does this.. bug? */
-	ram->mr[7] &= ~0x188;
-	ram->mr[7] |= (vr & 0x01) << 8;
+	if (NOTE00(vr)) {
+		ram->mr[7] &= ~0x300;
+		ram->mr[7] |= (vr & 0x03) << 8;
+	}
+	ram->mr[7] &= ~0x088;
 	ram->mr[7] |= (vh & 0x01) << 7;
 	ram->mr[7] |= (lf & 0x01) << 3;
+
+	ram->mr[8] &= ~0x003;
+	ram->mr[8] |= (WR & 0x10) >> 3;
+	ram->mr[8] |= (CL & 0x10) >> 4;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
index 9159a5c..265d125 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -36,7 +36,7 @@
 		.fini = _nouveau_fb_fini,
 	},
 	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv10_ram_oclass,
+	.base.ram = &nv1a_ram_oclass,
 	.tile.regions = 8,
 	.tile.init = nv10_fb_tile_init,
 	.tile.fini = nv10_fb_tile_fini,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index e5fc37c..45470e1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -33,6 +33,21 @@
 	return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
 }
 
+static void
+nvc0_fb_intr(struct nouveau_subdev *subdev)
+{
+	struct nvc0_fb_priv *priv = (void *)subdev;
+	u32 intr = nv_rd32(priv, 0x000100);
+	if (intr & 0x08000000) {
+		nv_debug(priv, "PFFB intr\n");
+		intr &= ~0x08000000;
+	}
+	if (intr & 0x00002000) {
+		nv_debug(priv, "PBFB intr\n");
+		intr &= ~0x00002000;
+	}
+}
+
 int
 nvc0_fb_init(struct nouveau_object *object)
 {
@@ -86,6 +101,7 @@
 			return -EFAULT;
 	}
 
+	nv_subdev(priv)->intr = nvc0_fb_intr;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 4931252..edaf95d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -34,7 +34,7 @@
 extern struct nouveau_oclass nve0_ram_oclass;
 
 int nouveau_sddr3_calc(struct nouveau_ram *ram);
-int nouveau_gddr5_calc(struct nouveau_ram *ram);
+int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
 
 #define nouveau_fb_create(p,e,c,d)                                             \
 	nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index 76762a1..c7fdb3a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -70,13 +70,11 @@
 	struct nv50_ramseq *hwsq = &ram->hwsq;
 	struct nvbios_perfE perfE;
 	struct nvbios_pll mpll;
-	struct bit_entry M;
 	struct {
 		u32 data;
 		u8  size;
 	} ramcfg, timing;
-	u8  ver, hdr, cnt, strap;
-	u32 data;
+	u8  ver, hdr, cnt, len, strap;
 	int N1, M1, N2, M2, P;
 	int ret, i;
 
@@ -93,16 +91,7 @@
 	} while (perfE.memory < freq);
 
 	/* locate specific data set for the attached memory */
-	if (bit_entry(bios, 'M', &M) || M.version != 1 || M.length < 5) {
-		nv_error(pfb, "invalid/missing memory table\n");
-		return -EINVAL;
-	}
-
-	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
-	data = nv_ro16(bios, M.offset + 3);
-	if (data)
-		strap = nv_ro08(bios, data + strap);
-
+	strap = nvbios_ramcfg_index(bios);
 	if (strap >= cnt) {
 		nv_error(pfb, "invalid ramcfg strap\n");
 		return -EINVAL;
@@ -113,7 +102,8 @@
 	/* lookup memory timings, if bios says they're present */
 	strap = nv_ro08(bios, ramcfg.data + 0x01);
 	if (strap != 0xff) {
-		timing.data = nvbios_timing_entry(bios, strap, &ver, &hdr);
+		timing.data = nvbios_timingEe(bios, strap, &ver, &hdr,
+					     &cnt, &len);
 		if (!timing.data || ver != 0x10 || hdr < 0x12) {
 			nv_error(pfb, "invalid/missing timing entry "
 				 "%02x %04x %02x %02x\n",
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
index f6292cd..f4ae8aa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -79,8 +79,7 @@
 	struct nva3_ram *ram = (void *)pfb->ram;
 	struct nva3_ramfuc *fuc = &ram->fuc;
 	struct nva3_clock_info mclk;
-	struct bit_entry M;
-	u8  ver, cnt, strap;
+	u8  ver, cnt, len, strap;
 	u32 data;
 	struct {
 		u32 data;
@@ -91,24 +90,15 @@
 	int ret;
 
 	/* lookup memory config data relevant to the target frequency */
-	rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
-					 &cnt, &ramcfg.size);
+	rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
+				     &cnt, &ramcfg.size);
 	if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
 		nv_error(pfb, "invalid/missing rammap entry\n");
 		return -EINVAL;
 	}
 
 	/* locate specific data set for the attached memory */
-	if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
-		nv_error(pfb, "invalid/missing memory table\n");
-		return -EINVAL;
-	}
-
-	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
-	data = nv_ro16(bios, M.offset + 1);
-	if (data)
-		strap = nv_ro08(bios, data + strap);
-
+	strap = nvbios_ramcfg_index(bios);
 	if (strap >= cnt) {
 		nv_error(pfb, "invalid ramcfg strap\n");
 		return -EINVAL;
@@ -123,8 +113,8 @@
 	/* lookup memory timings, if bios says they're present */
 	strap = nv_ro08(bios, ramcfg.data + 0x01);
 	if (strap != 0xff) {
-		timing.data = nvbios_timing_entry(bios, strap, &ver,
-						 &timing.size);
+		timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
+					     &cnt, &len);
 		if (!timing.data || ver != 0x10 || timing.size < 0x19) {
 			nv_error(pfb, "invalid/missing timing entry\n");
 			return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index f464547..0391b82 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -23,7 +23,6 @@
  */
 
 #include <subdev/bios.h>
-#include <subdev/bios/bit.h>
 #include <subdev/bios/pll.h>
 #include <subdev/bios/rammap.h>
 #include <subdev/bios/timing.h>
@@ -134,9 +133,7 @@
 	struct nouveau_bios *bios = nouveau_bios(pfb);
 	struct nvc0_ram *ram = (void *)pfb->ram;
 	struct nvc0_ramfuc *fuc = &ram->fuc;
-	struct bit_entry M;
-	u8  ver, cnt, strap;
-	u32 data;
+	u8  ver, cnt, len, strap;
 	struct {
 		u32 data;
 		u8  size;
@@ -147,24 +144,15 @@
 	int ret;
 
 	/* lookup memory config data relevant to the target frequency */
-	rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
-					 &cnt, &ramcfg.size);
+	rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
+				     &cnt, &ramcfg.size);
 	if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
 		nv_error(pfb, "invalid/missing rammap entry\n");
 		return -EINVAL;
 	}
 
 	/* locate specific data set for the attached memory */
-	if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
-		nv_error(pfb, "invalid/missing memory table\n");
-		return -EINVAL;
-	}
-
-	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
-	data = nv_ro16(bios, M.offset + 1);
-	if (data)
-		strap = nv_ro08(bios, data + strap);
-
+	strap = nvbios_ramcfg_index(bios);
 	if (strap >= cnt) {
 		nv_error(pfb, "invalid ramcfg strap\n");
 		return -EINVAL;
@@ -179,8 +167,8 @@
 	/* lookup memory timings, if bios says they're present */
 	strap = nv_ro08(bios, ramcfg.data + 0x01);
 	if (strap != 0xff) {
-		timing.data = nvbios_timing_entry(bios, strap, &ver,
-						 &timing.size);
+		timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
+					     &cnt, &len);
 		if (!timing.data || ver != 0x10 || timing.size < 0x19) {
 			nv_error(pfb, "invalid/missing timing entry\n");
 			return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index bc86cfd..3257c52 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -25,7 +25,6 @@
 #include <subdev/gpio.h>
 
 #include <subdev/bios.h>
-#include <subdev/bios/bit.h>
 #include <subdev/bios/pll.h>
 #include <subdev/bios/init.h>
 #include <subdev/bios/rammap.h>
@@ -42,6 +41,14 @@
 
 #include "ramfuc.h"
 
+/* binary driver only executes this path if the condition (a) is true
+ * for any configuration (combination of rammap+ramcfg+timing) that
+ * can be reached on a given card.  for now, we will execute the branch
+ * unconditionally in the hope that a "false everywhere" in the bios
+ * tables doesn't actually mean "don't touch this".
+ */
+#define NOTE00(a) 1
+
 struct nve0_ramfuc {
 	struct ramfuc base;
 
@@ -104,7 +111,9 @@
 	struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */
 
 	struct ramfuc_reg r_0x62c000;
+
 	struct ramfuc_reg r_0x10f200;
+
 	struct ramfuc_reg r_0x10f210;
 	struct ramfuc_reg r_0x10f310;
 	struct ramfuc_reg r_0x10f314;
@@ -118,12 +127,17 @@
 	struct ramfuc_reg r_0x10f65c;
 	struct ramfuc_reg r_0x10f6bc;
 	struct ramfuc_reg r_0x100710;
-	struct ramfuc_reg r_0x10f750;
+	struct ramfuc_reg r_0x100750;
 };
 
 struct nve0_ram {
 	struct nouveau_ram base;
 	struct nve0_ramfuc fuc;
+
+	u32 parts;
+	u32 pmask;
+	u32 pnuts;
+
 	int from;
 	int mode;
 	int N1, fN1, M1, P1;
@@ -134,17 +148,17 @@
  * GDDR5
  ******************************************************************************/
 static void
-train(struct nve0_ramfuc *fuc, u32 magic)
+nve0_ram_train(struct nve0_ramfuc *fuc, u32 mask, u32 data)
 {
 	struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
-	struct nouveau_fb *pfb = nouveau_fb(ram);
-	const int mc = nv_rd32(pfb, 0x02243c);
-	int i;
+	u32 addr = 0x110974, i;
 
-	ram_mask(fuc, 0x10f910, 0xbc0e0000, magic);
-	ram_mask(fuc, 0x10f914, 0xbc0e0000, magic);
-	for (i = 0; i < mc; i++) {
-		const u32 addr = 0x110974 + (i * 0x1000);
+	ram_mask(fuc, 0x10f910, mask, data);
+	ram_mask(fuc, 0x10f914, mask, data);
+
+	for (i = 0; (data & 0x80000000) && i < ram->parts; addr += 0x1000, i++) {
+		if (ram->pmask & (1 << i))
+			continue;
 		ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
 	}
 }
@@ -199,12 +213,12 @@
 }
 
 static void
-r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
+r1373f4_fini(struct nve0_ramfuc *fuc)
 {
 	struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
-	struct nouveau_bios *bios = nouveau_bios(ram);
-	u8 v0 = (nv_ro08(bios, ramcfg + 0x03) & 0xc0) >> 6;
-	u8 v1 = (nv_ro08(bios, ramcfg + 0x03) & 0x30) >> 4;
+	struct nouveau_ram_data *next = ram->base.next;
+	u8 v0 = next->bios.ramcfg_11_03_c0;
+	u8 v1 = next->bios.ramcfg_11_03_30;
 	u32 tmp;
 
 	tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
@@ -220,25 +234,46 @@
 	ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4);
 }
 
+static void
+nve0_ram_nuts(struct nve0_ram *ram, struct ramfuc_reg *reg,
+	      u32 _mask, u32 _data, u32 _copy)
+{
+	struct nve0_fb_priv *priv = (void *)nouveau_fb(ram);
+	struct ramfuc *fuc = &ram->fuc.base;
+	u32 addr = 0x110000 + (reg->addr[0] & 0xfff);
+	u32 mask = _mask | _copy;
+	u32 data = (_data & _mask) | (reg->data & _copy);
+	u32 i;
+
+	for (i = 0; i < 16; i++, addr += 0x1000) {
+		if (ram->pnuts & (1 << i)) {
+			u32 prev = nv_rd32(priv, addr);
+			u32 next = (prev & ~mask) | data;
+			nouveau_memx_wr32(fuc->memx, addr, next);
+		}
+	}
+}
+#define ram_nuts(s,r,m,d,c)                                                    \
+	nve0_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
+
 static int
 nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 {
-	struct nouveau_bios *bios = nouveau_bios(pfb);
 	struct nve0_ram *ram = (void *)pfb->ram;
 	struct nve0_ramfuc *fuc = &ram->fuc;
-	const u32 rammap = ram->base.rammap.data;
-	const u32 ramcfg = ram->base.ramcfg.data;
-	const u32 timing = ram->base.timing.data;
-	int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
-	int mv = 1; /*XXX*/
+	struct nouveau_ram_data *next = ram->base.next;
+	int vc = !(next->bios.ramcfg_11_02_08);
+	int mv = !(next->bios.ramcfg_11_02_04);
 	u32 mask, data;
 
 	ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
 	ram_wr32(fuc, 0x62c000, 0x0f0f0000);
 
 	/* MR1: turn termination on early, for some reason.. */
-	if ((ram->base.mr[1] & 0x03c) != 0x030)
+	if ((ram->base.mr[1] & 0x03c) != 0x030) {
 		ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c);
+		ram_nuts(ram, mr[1], 0x03c, ram->base.mr1_nuts & 0x03c, 0x000);
+	}
 
 	if (vc == 1 && ram_have(fuc, gpio2E)) {
 		u32 temp  = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
@@ -250,8 +285,7 @@
 
 	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
 
-	ram_mask(fuc, 0x10f914, 0x01020000, 0x000c0000);
-	ram_mask(fuc, 0x10f910, 0x01020000, 0x000c0000);
+	nve0_ram_train(fuc, 0x01020000, 0x000c0000);
 
 	ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
 	ram_nsec(fuc, 1000);
@@ -280,28 +314,28 @@
 
 	if (1) {
 		data |= 0x800807e0;
-		switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
-		case 0xc0: data &= ~0x00000040; break;
-		case 0x80: data &= ~0x00000100; break;
-		case 0x40: data &= ~0x80000000; break;
-		case 0x00: data &= ~0x00000400; break;
+		switch (next->bios.ramcfg_11_03_c0) {
+		case 3: data &= ~0x00000040; break;
+		case 2: data &= ~0x00000100; break;
+		case 1: data &= ~0x80000000; break;
+		case 0: data &= ~0x00000400; break;
 		}
 
-		switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
-		case 0x30: data &= ~0x00000020; break;
-		case 0x20: data &= ~0x00000080; break;
-		case 0x10: data &= ~0x00080000; break;
-		case 0x00: data &= ~0x00000200; break;
+		switch (next->bios.ramcfg_11_03_30) {
+		case 3: data &= ~0x00000020; break;
+		case 2: data &= ~0x00000080; break;
+		case 1: data &= ~0x00080000; break;
+		case 0: data &= ~0x00000200; break;
 		}
 	}
 
-	if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+	if (next->bios.ramcfg_11_02_80)
 		mask |= 0x03000000;
-	if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+	if (next->bios.ramcfg_11_02_40)
 		mask |= 0x00002000;
-	if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+	if (next->bios.ramcfg_11_07_10)
 		mask |= 0x00004000;
-	if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+	if (next->bios.ramcfg_11_07_08)
 		mask |= 0x00000003;
 	else {
 		mask |= 0x34000000;
@@ -314,18 +348,18 @@
 
 	if (ram->from == 2 && ram->mode != 2) {
 		ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
-		ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
+		ram_mask(fuc, 0x10f200, 0x18008000, 0x00008000);
 		ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004);
 		ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010);
 		ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
 		r1373f4_init(fuc);
 		ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001);
-		r1373f4_fini(fuc, ramcfg);
+		r1373f4_fini(fuc);
 		ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001);
 	} else
 	if (ram->from != 2 && ram->mode != 2) {
 		r1373f4_init(fuc);
-		r1373f4_fini(fuc, ramcfg);
+		r1373f4_fini(fuc);
 	}
 
 	if (ram_have(fuc, gpioMV)) {
@@ -336,49 +370,54 @@
 		}
 	}
 
-	if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
-	     (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+	if ( (next->bios.ramcfg_11_02_40) ||
+	     (next->bios.ramcfg_11_07_10)) {
 		ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
 		ram_nsec(fuc, 20000);
 	}
 
 	if (ram->from != 2 && ram->mode == 2) {
+		if (0 /*XXX: Titan */)
+			ram_mask(fuc, 0x10f200, 0x18000000, 0x18000000);
 		ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
 		ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002);
 		ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010);
 		r1373f4_init(fuc);
-		r1373f4_fini(fuc, ramcfg);
+		r1373f4_fini(fuc);
 		ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000);
 		ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000);
 	} else
 	if (ram->from == 2 && ram->mode == 2) {
 		ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
 		r1373f4_init(fuc);
-		r1373f4_fini(fuc, ramcfg);
+		r1373f4_fini(fuc);
 	}
 
 	if (ram->mode != 2) /*XXX*/ {
-		if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+		if (next->bios.ramcfg_11_07_40)
 			ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
 	}
 
-	data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
-	ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
-	ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
-	ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+	ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c);
+	ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09);
+	ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09);
 
-	data = nv_ro08(bios, ramcfg + 0x04);
-	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
-		ram_wr32(fuc, 0x10f698, 0x01010101 * data);
-		ram_wr32(fuc, 0x10f69c, 0x01010101 * data);
+	if (!next->bios.ramcfg_11_07_08 && !next->bios.ramcfg_11_07_04) {
+		ram_wr32(fuc, 0x10f698, 0x01010101 * next->bios.ramcfg_11_04);
+		ram_wr32(fuc, 0x10f69c, 0x01010101 * next->bios.ramcfg_11_04);
+	} else
+	if (!next->bios.ramcfg_11_07_08) {
+		ram_wr32(fuc, 0x10f698, 0x00000000);
+		ram_wr32(fuc, 0x10f69c, 0x00000000);
 	}
 
 	if (ram->mode != 2) {
-		u32 temp = ram_rd32(fuc, 0x10f694) & ~0xff00ff00;
-		ram_wr32(fuc, 0x10f694, temp | (0x01000100 * data));
+		u32 data = 0x01000100 * next->bios.ramcfg_11_04;
+		ram_nuke(fuc, 0x10f694);
+		ram_mask(fuc, 0x10f694, 0xff00ff00, data);
 	}
 
-	if (ram->mode == 2 && (nv_ro08(bios, ramcfg + 0x08) & 0x10))
+	if (ram->mode == 2 && (next->bios.ramcfg_11_08_10))
 		data = 0x00000080;
 	else
 		data = 0x00000000;
@@ -386,19 +425,19 @@
 
 	mask = 0x00070000;
 	data = 0x00000000;
-	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+	if (!(next->bios.ramcfg_11_02_80))
 		data |= 0x03000000;
-	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+	if (!(next->bios.ramcfg_11_02_40))
 		data |= 0x00002000;
-	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+	if (!(next->bios.ramcfg_11_07_10))
 		data |= 0x00004000;
-	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+	if (!(next->bios.ramcfg_11_07_08))
 		data |= 0x00000003;
 	else
 		data |= 0x74000000;
 	ram_mask(fuc, 0x10f824, mask, data);
 
-	if (nv_ro08(bios, ramcfg + 0x01) & 0x08)
+	if (next->bios.ramcfg_11_01_08)
 		data = 0x00000000;
 	else
 		data = 0x00001000;
@@ -409,61 +448,90 @@
 		ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000);
 	}
 
-	if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+	if (next->bios.ramcfg_11_08_01)
 		data = 0x00100000;
 	else
 		data = 0x00000000;
 	ram_mask(fuc, 0x10f82c, 0x00100000, data);
 
 	data = 0x00000000;
-	if (nv_ro08(bios, ramcfg + 0x08) & 0x08)
+	if (next->bios.ramcfg_11_08_08)
 		data |= 0x00002000;
-	if (nv_ro08(bios, ramcfg + 0x08) & 0x04)
+	if (next->bios.ramcfg_11_08_04)
 		data |= 0x00001000;
-	if (nv_ro08(bios, ramcfg + 0x08) & 0x02)
+	if (next->bios.ramcfg_11_08_02)
 		data |= 0x00004000;
 	ram_mask(fuc, 0x10f830, 0x00007000, data);
 
 	/* PFB timing */
-	ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
-	ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
-	ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
-	ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
-	ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
-	ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
-	ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
-	ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
-	ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
-	ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
-	ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
+	ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]);
+	ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]);
+	ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]);
+	ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]);
+	ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]);
+	ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]);
+	ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]);
+	ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]);
+	ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]);
+	ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]);
+	ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]);
 
-	data = (nv_ro08(bios, ramcfg + 0x02) & 0x03) << 8;
-	if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
-		data |= 0x70000000;
-	ram_mask(fuc, 0x10f604, 0x70000300, data);
+	data = mask = 0x00000000;
+	if (NOTE00(ramcfg_08_20)) {
+		if (next->bios.ramcfg_11_08_20)
+			data |= 0x01000000;
+		mask |= 0x01000000;
+	}
+	ram_mask(fuc, 0x10f200, mask, data);
 
-	data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
-	if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
-		data |= 0x00000100;
-	ram_mask(fuc, 0x10f614, 0x70000000, data);
+	data = mask = 0x00000000;
+	if (NOTE00(ramcfg_02_03 != 0)) {
+		data |= (next->bios.ramcfg_11_02_03) << 8;
+		mask |= 0x00000300;
+	}
+	if (NOTE00(ramcfg_01_10)) {
+		if (next->bios.ramcfg_11_01_10)
+			data |= 0x70000000;
+		mask |= 0x70000000;
+	}
+	ram_mask(fuc, 0x10f604, mask, data);
 
-	data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
-	if (nv_ro08(bios, ramcfg + 0x01) & 0x02)
-		data |= 0x00000100;
-	ram_mask(fuc, 0x10f610, 0x70000000, data);
+	data = mask = 0x00000000;
+	if (NOTE00(timing_30_07 != 0)) {
+		data |= (next->bios.timing_20_30_07) << 28;
+		mask |= 0x70000000;
+	}
+	if (NOTE00(ramcfg_01_01)) {
+		if (next->bios.ramcfg_11_01_01)
+			data |= 0x00000100;
+		mask |= 0x00000100;
+	}
+	ram_mask(fuc, 0x10f614, mask, data);
+
+	data = mask = 0x00000000;
+	if (NOTE00(timing_30_07 != 0)) {
+		data |= (next->bios.timing_20_30_07) << 28;
+		mask |= 0x70000000;
+	}
+	if (NOTE00(ramcfg_01_02)) {
+		if (next->bios.ramcfg_11_01_02)
+			data |= 0x00000100;
+		mask |= 0x00000100;
+	}
+	ram_mask(fuc, 0x10f610, mask, data);
 
 	mask = 0x33f00000;
 	data = 0x00000000;
-	if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+	if (!(next->bios.ramcfg_11_01_04))
 		data |= 0x20200000;
-	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+	if (!(next->bios.ramcfg_11_07_80))
 		data |= 0x12800000;
 	/*XXX: see note above about there probably being some condition
 	 *     for the 10f824 stuff that uses ramcfg 3...
 	 */
-	if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
-		if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
-			if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+	if ( (next->bios.ramcfg_11_03_f0)) {
+		if (next->bios.rammap_11_08_0c) {
+			if (!(next->bios.ramcfg_11_07_80))
 				mask |= 0x00000020;
 			else
 				data |= 0x00000020;
@@ -476,49 +544,53 @@
 
 	ram_mask(fuc, 0x10f808, mask, data);
 
-	data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
-	ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+	ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f);
 
-	data = nv_ro08(bios, ramcfg + 0x02) & 0x03;
-	if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
-		data |= 0x00000004;
-	if ((nv_rd32(bios, 0x100770) & 0x00000004) != (data & 0x00000004)) {
-		ram_wr32(fuc, 0x10f750, 0x04000009);
+	data = mask = 0x00000000;
+	if (NOTE00(ramcfg_02_03 != 0)) {
+		data |= next->bios.ramcfg_11_02_03;
+		mask |= 0x00000003;
+	}
+	if (NOTE00(ramcfg_01_10)) {
+		if (next->bios.ramcfg_11_01_10)
+			data |= 0x00000004;
+		mask |= 0x00000004;
+	}
+
+	if ((ram_mask(fuc, 0x100770, mask, data) & mask & 4) != (data & 4)) {
+		ram_mask(fuc, 0x100750, 0x00000008, 0x00000008);
 		ram_wr32(fuc, 0x100710, 0x00000000);
 		ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
 	}
-	ram_mask(fuc, 0x100770, 0x00000007, data);
 
-	data = (nv_ro08(bios, timing + 0x30) & 0x07) << 8;
-	if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
+	data = (next->bios.timing_20_30_07) << 8;
+	if (next->bios.ramcfg_11_01_01)
 		data |= 0x80000000;
 	ram_mask(fuc, 0x100778, 0x00000700, data);
 
-	data = nv_ro16(bios, timing + 0x2c);
-	ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) <<  4);
-	ram_mask(fuc, 0x10f24c, 0x7f000000, (data & 0x1fc0) << 18);
+	ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4);
+	data = (next->bios.timing[10] & 0x7f000000) >> 24;
+	if (data < next->bios.timing_20_2c_1fc0)
+		data = next->bios.timing_20_2c_1fc0;
+	ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
+	ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16);
 
-	data = nv_ro08(bios, timing + 0x30);
-	ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
-
-	data = nv_ro16(bios, timing + 0x31);
-	ram_mask(fuc, 0x10fec4, 0x041e0f07, (data & 0x0800) << 15 |
-					    (data & 0x0780) << 10 |
-					    (data & 0x0078) <<  5 |
-					    (data & 0x0007));
-	ram_mask(fuc, 0x10fec8, 0x00000027, (data & 0x8000) >> 10 |
-					    (data & 0x7000) >> 12);
+	ram_mask(fuc, 0x10fec4, 0x041e0f07, next->bios.timing_20_31_0800 << 26 |
+					    next->bios.timing_20_31_0780 << 17 |
+					    next->bios.timing_20_31_0078 << 8 |
+					    next->bios.timing_20_31_0007);
+	ram_mask(fuc, 0x10fec8, 0x00000027, next->bios.timing_20_31_8000 << 5 |
+					    next->bios.timing_20_31_7000);
 
 	ram_wr32(fuc, 0x10f090, 0x4000007e);
-	ram_nsec(fuc, 1000);
+	ram_nsec(fuc, 2000);
 	ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
 	ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
-	ram_nsec(fuc, 2000);
 	ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
 
-	if ((nv_ro08(bios, ramcfg + 0x08) & 0x10) && (ram->mode == 2) /*XXX*/) {
+	if ((next->bios.ramcfg_11_08_10) && (ram->mode == 2) /*XXX*/) {
 		u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
-		train(fuc, 0xa4010000); /*XXX*/
+		nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
 		ram_nsec(fuc, 1000);
 		ram_wr32(fuc, 0x10f294, temp);
 	}
@@ -528,7 +600,7 @@
 	ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]);
 	ram_nsec(fuc, 1000);
 	ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]);
-	ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5]);
+	ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5] & ~0x004); /* LP3 later */
 	ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]);
 	ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]);
 
@@ -544,12 +616,13 @@
 	ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
 	ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
 	ram_nsec(fuc, 1000);
+	ram_nuts(ram, 0x10f200, 0x18808800, 0x00000000, 0x18808800);
 
 	data  = ram_rd32(fuc, 0x10f978);
 	data &= ~0x00046144;
 	data |=  0x0000000b;
-	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
-		if (!(nv_ro08(bios, ramcfg + 0x07) & 0x04))
+	if (!(next->bios.ramcfg_11_07_08)) {
+		if (!(next->bios.ramcfg_11_07_04))
 			data |= 0x0000200c;
 		else
 			data |= 0x00000000;
@@ -563,44 +636,43 @@
 		ram_wr32(fuc, 0x10f830, data);
 	}
 
-	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+	if (!(next->bios.ramcfg_11_07_08)) {
 		data = 0x88020000;
-		if ( (nv_ro08(bios, ramcfg + 0x07) & 0x04))
+		if ( (next->bios.ramcfg_11_07_04))
 			data |= 0x10000000;
-		if (!(nv_ro08(bios, rammap + 0x08) & 0x10))
+		if (!(next->bios.rammap_11_08_10))
 			data |= 0x00080000;
 	} else {
 		data = 0xa40e0000;
 	}
-	train(fuc, data);
-	ram_nsec(fuc, 1000);
+	nve0_ram_train(fuc, 0xbc0f0000, data);
+	if (1) /* XXX: not always? */
+		ram_nsec(fuc, 1000);
 
 	if (ram->mode == 2) { /*XXX*/
 		ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004);
 	}
 
-	/* MR5: (re)enable LP3 if necessary
-	 * XXX: need to find the switch, keeping off for now
-	 */
-	ram_mask(fuc, mr[5], 0x00000004, 0x00000000);
+	/* LP3 */
+	if (ram_mask(fuc, mr[5], 0x004, ram->base.mr[5]) != ram->base.mr[5])
+		ram_nsec(fuc, 1000);
 
 	if (ram->mode != 2) {
 		ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
 		ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
 	}
 
-	if (nv_ro08(bios, ramcfg + 0x07) & 0x02) {
-		ram_mask(fuc, 0x10f910, 0x80020000, 0x01000000);
-		ram_mask(fuc, 0x10f914, 0x80020000, 0x01000000);
-	}
+	if (next->bios.ramcfg_11_07_02)
+		nve0_ram_train(fuc, 0x80020000, 0x01000000);
 
 	ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
 
-	if (nv_ro08(bios, rammap + 0x08) & 0x01)
+	if (next->bios.rammap_11_08_01)
 		data = 0x00000800;
 	else
 		data = 0x00000000;
 	ram_mask(fuc, 0x10f200, 0x00000800, data);
+	ram_nuts(ram, 0x10f200, 0x18808800, data, 0x18808800);
 	return 0;
 }
 
@@ -611,17 +683,14 @@
 static int
 nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
 {
-	struct nouveau_bios *bios = nouveau_bios(pfb);
 	struct nve0_ram *ram = (void *)pfb->ram;
 	struct nve0_ramfuc *fuc = &ram->fuc;
 	const u32 rcoef = ((  ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
 	const u32 runk0 = ram->fN1 << 16;
 	const u32 runk1 = ram->fN1;
-	const u32 rammap = ram->base.rammap.data;
-	const u32 ramcfg = ram->base.ramcfg.data;
-	const u32 timing = ram->base.timing.data;
-	int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
-	int mv = 1; /*XXX*/
+	struct nouveau_ram_data *next = ram->base.next;
+	int vc = !(next->bios.ramcfg_11_02_08);
+	int mv = !(next->bios.ramcfg_11_02_04);
 	u32 mask, data;
 
 	ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -636,7 +705,7 @@
 	}
 
 	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
-	if ((nv_ro08(bios, ramcfg + 0x03) & 0xf0))
+	if ((next->bios.ramcfg_11_03_f0))
 		ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
 
 	ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
@@ -661,28 +730,28 @@
 	if (1) {
 		mask |= 0x800807e0;
 		data |= 0x800807e0;
-		switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
-		case 0xc0: data &= ~0x00000040; break;
-		case 0x80: data &= ~0x00000100; break;
-		case 0x40: data &= ~0x80000000; break;
-		case 0x00: data &= ~0x00000400; break;
+		switch (next->bios.ramcfg_11_03_c0) {
+		case 3: data &= ~0x00000040; break;
+		case 2: data &= ~0x00000100; break;
+		case 1: data &= ~0x80000000; break;
+		case 0: data &= ~0x00000400; break;
 		}
 
-		switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
-		case 0x30: data &= ~0x00000020; break;
-		case 0x20: data &= ~0x00000080; break;
-		case 0x10: data &= ~0x00080000; break;
-		case 0x00: data &= ~0x00000200; break;
+		switch (next->bios.ramcfg_11_03_30) {
+		case 3: data &= ~0x00000020; break;
+		case 2: data &= ~0x00000080; break;
+		case 1: data &= ~0x00080000; break;
+		case 0: data &= ~0x00000200; break;
 		}
 	}
 
-	if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+	if (next->bios.ramcfg_11_02_80)
 		mask |= 0x03000000;
-	if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+	if (next->bios.ramcfg_11_02_40)
 		mask |= 0x00002000;
-	if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+	if (next->bios.ramcfg_11_07_10)
 		mask |= 0x00004000;
-	if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+	if (next->bios.ramcfg_11_07_08)
 		mask |= 0x00000003;
 	else
 		mask |= 0x14000000;
@@ -692,7 +761,7 @@
 
 	ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
 	data  = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
-	data |= (nv_ro08(bios, ramcfg + 0x03) & 0x30) << 12;
+	data |= (next->bios.ramcfg_11_03_30) << 12;
 	ram_wr32(fuc, 0x1373ec, data);
 	ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
 	ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
@@ -724,68 +793,67 @@
 		}
 	}
 
-	if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
-	     (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+	if ( (next->bios.ramcfg_11_02_40) ||
+	     (next->bios.ramcfg_11_07_10)) {
 		ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
 		ram_nsec(fuc, 20000);
 	}
 
 	if (ram->mode != 2) /*XXX*/ {
-		if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+		if (next->bios.ramcfg_11_07_40)
 			ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
 	}
 
-	data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
-	ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
-	ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
-	ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+	ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c);
+	ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09);
+	ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09);
 
 	mask = 0x00010000;
 	data = 0x00000000;
-	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+	if (!(next->bios.ramcfg_11_02_80))
 		data |= 0x03000000;
-	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+	if (!(next->bios.ramcfg_11_02_40))
 		data |= 0x00002000;
-	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+	if (!(next->bios.ramcfg_11_07_10))
 		data |= 0x00004000;
-	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+	if (!(next->bios.ramcfg_11_07_08))
 		data |= 0x00000003;
 	else
 		data |= 0x14000000;
 	ram_mask(fuc, 0x10f824, mask, data);
 	ram_nsec(fuc, 1000);
 
-	if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+	if (next->bios.ramcfg_11_08_01)
 		data = 0x00100000;
 	else
 		data = 0x00000000;
 	ram_mask(fuc, 0x10f82c, 0x00100000, data);
 
 	/* PFB timing */
-	ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
-	ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
-	ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
-	ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
-	ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
-	ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
-	ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
-	ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
-	ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
-	ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
-	ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
+	ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]);
+	ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]);
+	ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]);
+	ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]);
+	ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]);
+	ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]);
+	ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]);
+	ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]);
+	ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]);
+	ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]);
+	ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]);
 
 	mask = 0x33f00000;
 	data = 0x00000000;
-	if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+	if (!(next->bios.ramcfg_11_01_04))
 		data |= 0x20200000;
-	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+	if (!(next->bios.ramcfg_11_07_80))
 		data |= 0x12800000;
 	/*XXX: see note above about there probably being some condition
 	 *     for the 10f824 stuff that uses ramcfg 3...
 	 */
-	if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
-		if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
-			if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+	if ( (next->bios.ramcfg_11_03_f0)) {
+		if (next->bios.rammap_11_08_0c) {
+			if (!(next->bios.ramcfg_11_07_80))
 				mask |= 0x00000020;
 			else
 				data |= 0x00000020;
@@ -799,21 +867,16 @@
 
 	ram_mask(fuc, 0x10f808, mask, data);
 
-	data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
-	ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+	ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f);
 
-	data = nv_ro16(bios, timing + 0x2c);
-	ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) <<  4);
+	ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4);
 
-	if (((nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >>  6) >
-	    ((nv_ro32(bios, timing + 0x28) & 0x7f000000) >> 24))
-		data = (nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >>  6;
-	else
-		data = (nv_ro32(bios, timing + 0x28) & 0x1f000000) >> 24;
+	data = (next->bios.timing[10] & 0x7f000000) >> 24;
+	if (data < next->bios.timing_20_2c_1fc0)
+		data = next->bios.timing_20_2c_1fc0;
 	ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
 
-	data = nv_ro08(bios, timing + 0x30);
-	ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+	ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8);
 
 	ram_wr32(fuc, 0x10f090, 0x4000007f);
 	ram_nsec(fuc, 1000);
@@ -855,7 +918,7 @@
 
 	ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
 
-	if (nv_ro08(bios, rammap + 0x08) & 0x01)
+	if (next->bios.rammap_11_08_01)
 		data = 0x00000800;
 	else
 		data = 0x00000000;
@@ -868,21 +931,18 @@
  ******************************************************************************/
 
 static int
-nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+nve0_ram_calc_data(struct nouveau_fb *pfb, u32 freq,
+		   struct nouveau_ram_data *data)
 {
 	struct nouveau_bios *bios = nouveau_bios(pfb);
 	struct nve0_ram *ram = (void *)pfb->ram;
-	struct nve0_ramfuc *fuc = &ram->fuc;
-	struct bit_entry M;
-	int ret, refclk, strap, i;
-	u32 data;
-	u8  cnt;
+	u8 strap, cnt, len;
 
 	/* lookup memory config data relevant to the target frequency */
-	ram->base.rammap.data = nvbios_rammap_match(bios, freq / 1000,
-						   &ram->base.rammap.version,
-						   &ram->base.rammap.size, &cnt,
-						   &ram->base.ramcfg.size);
+	ram->base.rammap.data = nvbios_rammapEp(bios, freq / 1000,
+					       &ram->base.rammap.version,
+					       &ram->base.rammap.size,
+					       &cnt, &len, &data->bios);
 	if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 ||
 	     ram->base.rammap.size < 0x09) {
 		nv_error(pfb, "invalid/missing rammap entry\n");
@@ -890,24 +950,13 @@
 	}
 
 	/* locate specific data set for the attached memory */
-	if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
-		nv_error(pfb, "invalid/missing memory table\n");
-		return -EINVAL;
-	}
-
-	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
-	data = nv_ro16(bios, M.offset + 1);
-	if (data)
-		strap = nv_ro08(bios, data + strap);
-
-	if (strap >= cnt) {
-		nv_error(pfb, "invalid ramcfg strap\n");
-		return -EINVAL;
-	}
-
-	ram->base.ramcfg.version = ram->base.rammap.version;
-	ram->base.ramcfg.data = ram->base.rammap.data + ram->base.rammap.size +
-			       (ram->base.ramcfg.size * strap);
+	ram->base.ramcfg.data = nvbios_rammapSp(bios, ram->base.rammap.data,
+						ram->base.rammap.version,
+						ram->base.rammap.size, cnt, len,
+						nvbios_ramcfg_index(bios),
+						&ram->base.ramcfg.version,
+						&ram->base.ramcfg.size,
+						&data->bios);
 	if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 ||
 	     ram->base.ramcfg.size < 0x08) {
 		nv_error(pfb, "invalid/missing ramcfg entry\n");
@@ -918,9 +967,9 @@
 	strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00);
 	if (strap != 0xff) {
 		ram->base.timing.data =
-			nvbios_timing_entry(bios, strap,
-					   &ram->base.timing.version,
-					   &ram->base.timing.size);
+			nvbios_timingEp(bios, strap, &ram->base.timing.version,
+				       &ram->base.timing.size, &cnt, &len,
+				       &data->bios);
 		if (!ram->base.timing.data ||
 		     ram->base.timing.version != 0x20 ||
 		     ram->base.timing.size < 0x33) {
@@ -931,11 +980,23 @@
 		ram->base.timing.data = 0;
 	}
 
+	data->freq = freq;
+	return 0;
+}
+
+static int
+nve0_ram_calc_xits(struct nouveau_fb *pfb, struct nouveau_ram_data *next)
+{
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nve0_ramfuc *fuc = &ram->fuc;
+	int refclk, i;
+	int ret;
+
 	ret = ram_init(fuc, pfb);
 	if (ret)
 		return ret;
 
-	ram->mode = (freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
+	ram->mode = (next->freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
 	ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f;
 
 	/* XXX: this is *not* what nvidia do.  on fermi nvidia generally
@@ -946,7 +1007,7 @@
 	 * so far, i've seen very weird values being chosen by nvidia on
 	 * kepler boards, no idea how/why they're chosen.
 	 */
-	refclk = freq;
+	refclk = next->freq;
 	if (ram->mode == 2)
 		refclk = fuc->mempll.refclk;
 
@@ -968,7 +1029,7 @@
 		fuc->mempll.min_p = 1;
 		fuc->mempll.max_p = 2;
 
-		ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, freq,
+		ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq,
 				   &ram->N2, NULL, &ram->M2, &ram->P2);
 		if (ret <= 0) {
 			nv_error(pfb, "unable to calc mempll\n");
@@ -980,17 +1041,18 @@
 		if (ram_have(fuc, mr[i]))
 			ram->base.mr[i] = ram_rd32(fuc, mr[i]);
 	}
+	ram->base.freq = next->freq;
 
 	switch (ram->base.type) {
 	case NV_MEM_TYPE_DDR3:
 		ret = nouveau_sddr3_calc(&ram->base);
 		if (ret == 0)
-			ret = nve0_ram_calc_sddr3(pfb, freq);
+			ret = nve0_ram_calc_sddr3(pfb, next->freq);
 		break;
 	case NV_MEM_TYPE_GDDR5:
-		ret = nouveau_gddr5_calc(&ram->base);
+		ret = nouveau_gddr5_calc(&ram->base, ram->pnuts != 0);
 		if (ret == 0)
-			ret = nve0_ram_calc_gddr5(pfb, freq);
+			ret = nve0_ram_calc_gddr5(pfb, next->freq);
 		break;
 	default:
 		ret = -ENOSYS;
@@ -1001,13 +1063,55 @@
 }
 
 static int
+nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_clock *clk = nouveau_clock(pfb);
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nouveau_ram_data *xits = &ram->base.xition;
+	struct nouveau_ram_data *copy;
+	int ret;
+
+	if (ram->base.next == NULL) {
+		ret = nve0_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem),
+					&ram->base.former);
+		if (ret)
+			return ret;
+
+		ret = nve0_ram_calc_data(pfb, freq, &ram->base.target);
+		if (ret)
+			return ret;
+
+		if (ram->base.target.freq < ram->base.former.freq) {
+			*xits = ram->base.target;
+			copy = &ram->base.former;
+		} else {
+			*xits = ram->base.former;
+			copy = &ram->base.target;
+		}
+
+		xits->bios.ramcfg_11_02_04 = copy->bios.ramcfg_11_02_04;
+		xits->bios.ramcfg_11_02_03 = copy->bios.ramcfg_11_02_03;
+		xits->bios.timing_20_30_07 = copy->bios.timing_20_30_07;
+
+		ram->base.next = &ram->base.target;
+		if (memcmp(xits, &ram->base.former, sizeof(xits->bios)))
+			ram->base.next = &ram->base.xition;
+	} else {
+		BUG_ON(ram->base.next != &ram->base.xition);
+		ram->base.next = &ram->base.target;
+	}
+
+	return nve0_ram_calc_xits(pfb, ram->base.next);
+}
+
+static int
 nve0_ram_prog(struct nouveau_fb *pfb)
 {
 	struct nouveau_device *device = nv_device(pfb);
 	struct nve0_ram *ram = (void *)pfb->ram;
 	struct nve0_ramfuc *fuc = &ram->fuc;
 	ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
-	return 0;
+	return (ram->base.next == &ram->base.xition);
 }
 
 static void
@@ -1015,6 +1119,7 @@
 {
 	struct nve0_ram *ram = (void *)pfb->ram;
 	struct nve0_ramfuc *fuc = &ram->fuc;
+	ram->base.next = NULL;
 	ram_exec(fuc, false);
 }
 
@@ -1055,7 +1160,7 @@
 	 * binary driver skips the one that's already been setup by
 	 * the init tables.
 	 */
-	data = nvbios_rammap_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+	data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
 	if (!data || hdr < 0x15)
 		return -EINVAL;
 
@@ -1073,6 +1178,7 @@
 		data += 4;
 	}
 	nv_wr32(pfb, 0x10f65c, save);
+	nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000);
 
 	switch (ram->base.type) {
 	case NV_MEM_TYPE_GDDR5:
@@ -1117,7 +1223,8 @@
 	struct nouveau_gpio *gpio = nouveau_gpio(pfb);
 	struct dcb_gpio_func func;
 	struct nve0_ram *ram;
-	int ret;
+	int ret, i;
+	u32 tmp;
 
 	ret = nvc0_ram_create(parent, engine, oclass, &ram);
 	*pobject = nv_object(ram);
@@ -1136,6 +1243,25 @@
 		break;
 	}
 
+	/* calculate a mask of differently configured memory partitions,
+	 * because, of course reclocking wasn't complicated enough
+	 * already without having to treat some of them differently to
+	 * the others....
+	 */
+	ram->parts = nv_rd32(pfb, 0x022438);
+	ram->pmask = nv_rd32(pfb, 0x022554);
+	ram->pnuts = 0;
+	for (i = 0, tmp = 0; i < ram->parts; i++) {
+		if (!(ram->pmask & (1 << i))) {
+			u32 cfg1 = nv_rd32(pfb, 0x110204 + (i * 0x1000));
+			if (tmp && tmp != cfg1) {
+				ram->pnuts |= (1 << i);
+				continue;
+			}
+			tmp = cfg1;
+		}
+	}
+
 	// parse bios data for both pll's
 	ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
 	if (ret) {
@@ -1248,7 +1374,7 @@
 	ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c);
 	ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc);
 	ram->fuc.r_0x100710 = ramfuc_reg(0x100710);
-	ram->fuc.r_0x10f750 = ramfuc_reg(0x10f750);
+	ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
index 6565f3d..14706d9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -22,7 +22,24 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/instmem.h>
+#include "priv.h"
+
+/******************************************************************************
+ * instmem object base implementation
+ *****************************************************************************/
+
+void
+_nouveau_instobj_dtor(struct nouveau_object *object)
+{
+	struct nouveau_instmem *imem = (void *)object->engine;
+	struct nouveau_instobj *iobj = (void *)object;
+
+	mutex_lock(&nv_subdev(imem)->mutex);
+	list_del(&iobj->head);
+	mutex_unlock(&nv_subdev(imem)->mutex);
+
+	return nouveau_object_destroy(&iobj->base);
+}
 
 int
 nouveau_instobj_create_(struct nouveau_object *parent,
@@ -46,73 +63,26 @@
 	return 0;
 }
 
-void
-nouveau_instobj_destroy(struct nouveau_instobj *iobj)
+/******************************************************************************
+ * instmem subdev base implementation
+ *****************************************************************************/
+
+static int
+nouveau_instmem_alloc(struct nouveau_instmem *imem,
+		      struct nouveau_object *parent, u32 size, u32 align,
+		      struct nouveau_object **pobject)
 {
-	struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
-
-	mutex_lock(&subdev->mutex);
-	list_del(&iobj->head);
-	mutex_unlock(&subdev->mutex);
-
-	return nouveau_object_destroy(&iobj->base);
-}
-
-void
-_nouveau_instobj_dtor(struct nouveau_object *object)
-{
-	struct nouveau_instobj *iobj = (void *)object;
-	return nouveau_instobj_destroy(iobj);
+	struct nouveau_object *engine = nv_object(imem);
+	struct nouveau_instmem_impl *impl = (void *)engine->oclass;
+	struct nouveau_instobj_args args = { .size = size, .align = align };
+	return nouveau_object_ctor(parent, engine, impl->instobj, &args,
+				   sizeof(args), pobject);
 }
 
 int
-nouveau_instmem_create_(struct nouveau_object *parent,
-			struct nouveau_object *engine,
-			struct nouveau_oclass *oclass,
-			int length, void **pobject)
+_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
 {
-	struct nouveau_instmem *imem;
-	int ret;
-
-	ret = nouveau_subdev_create_(parent, engine, oclass, 0,
-				     "INSTMEM", "instmem", length, pobject);
-	imem = *pobject;
-	if (ret)
-		return ret;
-
-	INIT_LIST_HEAD(&imem->list);
-	return 0;
-}
-
-int
-nouveau_instmem_init(struct nouveau_instmem *imem)
-{
-	struct nouveau_instobj *iobj;
-	int ret, i;
-
-	ret = nouveau_subdev_init(&imem->base);
-	if (ret)
-		return ret;
-
-	mutex_lock(&imem->base.mutex);
-
-	list_for_each_entry(iobj, &imem->list, head) {
-		if (iobj->suspend) {
-			for (i = 0; i < iobj->size; i += 4)
-				nv_wo32(iobj, i, iobj->suspend[i / 4]);
-			vfree(iobj->suspend);
-			iobj->suspend = NULL;
-		}
-	}
-
-	mutex_unlock(&imem->base.mutex);
-
-	return 0;
-}
-
-int
-nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
-{
+	struct nouveau_instmem *imem = (void *)object;
 	struct nouveau_instobj *iobj;
 	int i, ret = 0;
 
@@ -143,12 +113,45 @@
 _nouveau_instmem_init(struct nouveau_object *object)
 {
 	struct nouveau_instmem *imem = (void *)object;
-	return nouveau_instmem_init(imem);
+	struct nouveau_instobj *iobj;
+	int ret, i;
+
+	ret = nouveau_subdev_init(&imem->base);
+	if (ret)
+		return ret;
+
+	mutex_lock(&imem->base.mutex);
+
+	list_for_each_entry(iobj, &imem->list, head) {
+		if (iobj->suspend) {
+			for (i = 0; i < iobj->size; i += 4)
+				nv_wo32(iobj, i, iobj->suspend[i / 4]);
+			vfree(iobj->suspend);
+			iobj->suspend = NULL;
+		}
+	}
+
+	mutex_unlock(&imem->base.mutex);
+
+	return 0;
 }
 
 int
-_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
+nouveau_instmem_create_(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass,
+			int length, void **pobject)
 {
-	struct nouveau_instmem *imem = (void *)object;
-	return nouveau_instmem_fini(imem, suspend);
+	struct nouveau_instmem *imem;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0,
+				     "INSTMEM", "instmem", length, pobject);
+	imem = *pobject;
+	if (ret)
+		return ret;
+
+	INIT_LIST_HEAD(&imem->list);
+	imem->alloc = nouveau_instmem_alloc;
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index 795393d..7b64bef 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -22,45 +22,11 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/fb.h>
-
 #include "nv04.h"
 
-static int
-nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		  struct nouveau_oclass *oclass, void *data, u32 size,
-		  struct nouveau_object **pobject)
-{
-	struct nv04_instmem_priv *priv = (void *)engine;
-	struct nv04_instobj_priv *node;
-	int ret, align;
-
-	align = (unsigned long)data;
-	if (!align)
-		align = 1;
-
-	ret = nouveau_instobj_create(parent, engine, oclass, &node);
-	*pobject = nv_object(node);
-	if (ret)
-		return ret;
-
-	ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem);
-	if (ret)
-		return ret;
-
-	node->base.addr = node->mem->offset;
-	node->base.size = node->mem->length;
-	return 0;
-}
-
-static void
-nv04_instobj_dtor(struct nouveau_object *object)
-{
-	struct nv04_instmem_priv *priv = (void *)object->engine;
-	struct nv04_instobj_priv *node = (void *)object;
-	nouveau_mm_free(&priv->heap, &node->mem);
-	nouveau_instobj_destroy(&node->base);
-}
+/******************************************************************************
+ * instmem object implementation
+ *****************************************************************************/
 
 static u32
 nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
@@ -76,9 +42,46 @@
 	nv_wo32(object->engine, node->mem->offset + addr, data);
 }
 
-static struct nouveau_oclass
+static void
+nv04_instobj_dtor(struct nouveau_object *object)
+{
+	struct nv04_instmem_priv *priv = (void *)object->engine;
+	struct nv04_instobj_priv *node = (void *)object;
+	nouveau_mm_free(&priv->heap, &node->mem);
+	nouveau_instobj_destroy(&node->base);
+}
+
+static int
+nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *priv = (void *)engine;
+	struct nv04_instobj_priv *node;
+	struct nouveau_instobj_args *args = data;
+	int ret;
+
+	if (!args->align)
+		args->align = 1;
+
+	ret = nouveau_instobj_create(parent, engine, oclass, &node);
+	*pobject = nv_object(node);
+	if (ret)
+		return ret;
+
+	ret = nouveau_mm_head(&priv->heap, 1, args->size, args->size,
+			      args->align, &node->mem);
+	if (ret)
+		return ret;
+
+	node->base.addr = node->mem->offset;
+	node->base.size = node->mem->length;
+	return 0;
+}
+
+struct nouveau_instobj_impl
 nv04_instobj_oclass = {
-	.ofuncs = &(struct nouveau_ofuncs) {
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_instobj_ctor,
 		.dtor = nv04_instobj_dtor,
 		.init = _nouveau_instobj_init,
@@ -88,19 +91,34 @@
 	},
 };
 
-int
-nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
-		   u32 size, u32 align, struct nouveau_object **pobject)
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
+static u32
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
 {
-	struct nouveau_object *engine = nv_object(imem);
-	int ret;
+	return nv_rd32(object, 0x700000 + addr);
+}
 
-	ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
-				  (void *)(unsigned long)align, size, pobject);
-	if (ret)
-		return ret;
+static void
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	return nv_wr32(object, 0x700000 + addr, data);
+}
 
-	return 0;
+void
+nv04_instmem_dtor(struct nouveau_object *object)
+{
+	struct nv04_instmem_priv *priv = (void *)object;
+	nouveau_gpuobj_ref(NULL, &priv->ramfc);
+	nouveau_gpuobj_ref(NULL, &priv->ramro);
+	nouveau_ramht_ref(NULL, &priv->ramht);
+	nouveau_gpuobj_ref(NULL, &priv->vbios);
+	nouveau_mm_fini(&priv->heap);
+	if (priv->iomem)
+		iounmap(priv->iomem);
+	nouveau_instmem_destroy(&priv->base);
 }
 
 static int
@@ -118,7 +136,6 @@
 
 	/* PRAMIN aperture maps over the end of VRAM, reserve it */
 	priv->base.reserved = 512 * 1024;
-	priv->base.alloc    = nv04_instmem_alloc;
 
 	ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
 	if (ret)
@@ -150,36 +167,10 @@
 	return 0;
 }
 
-void
-nv04_instmem_dtor(struct nouveau_object *object)
-{
-	struct nv04_instmem_priv *priv = (void *)object;
-	nouveau_gpuobj_ref(NULL, &priv->ramfc);
-	nouveau_gpuobj_ref(NULL, &priv->ramro);
-	nouveau_ramht_ref(NULL, &priv->ramht);
-	nouveau_gpuobj_ref(NULL, &priv->vbios);
-	nouveau_mm_fini(&priv->heap);
-	if (priv->iomem)
-		iounmap(priv->iomem);
-	nouveau_instmem_destroy(&priv->base);
-}
-
-static u32
-nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
-{
-	return nv_rd32(object, 0x700000 + addr);
-}
-
-static void
-nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
-	return nv_wr32(object, 0x700000 + addr, data);
-}
-
-struct nouveau_oclass
-nv04_instmem_oclass = {
-	.handle = NV_SUBDEV(INSTMEM, 0x04),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_instmem_oclass = &(struct nouveau_instmem_impl) {
+	.base.handle = NV_SUBDEV(INSTMEM, 0x04),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_instmem_ctor,
 		.dtor = nv04_instmem_dtor,
 		.init = _nouveau_instmem_init,
@@ -187,4 +178,5 @@
 		.rd32 = nv04_instmem_rd32,
 		.wr32 = nv04_instmem_wr32,
 	},
-};
+	.instobj = &nv04_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
index b15b613..095fbc6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -5,7 +5,9 @@
 #include <core/ramht.h>
 #include <core/mm.h>
 
-#include <subdev/instmem.h>
+#include "priv.h"
+
+extern struct nouveau_instobj_impl nv04_instobj_oclass;
 
 struct nv04_instmem_priv {
 	struct nouveau_instmem base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index b10a143..ec0b966 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -26,6 +26,24 @@
 
 #include "nv04.h"
 
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
+static u32
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
+{
+	struct nv04_instmem_priv *priv = (void *)object;
+	return ioread32_native(priv->iomem + addr);
+}
+
+static void
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	struct nv04_instmem_priv *priv = (void *)object;
+	iowrite32_native(data, priv->iomem + addr);
+}
+
 static int
 nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		  struct nouveau_oclass *oclass, void *data, u32 size,
@@ -69,7 +87,6 @@
 	priv->base.reserved += 512 * 1024;	/* object storage */
 
 	priv->base.reserved = round_up(priv->base.reserved, 4096);
-	priv->base.alloc    = nv04_instmem_alloc;
 
 	ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
 	if (ret)
@@ -106,24 +123,10 @@
 	return 0;
 }
 
-static u32
-nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
-{
-	struct nv04_instmem_priv *priv = (void *)object;
-	return ioread32_native(priv->iomem + addr);
-}
-
-static void
-nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
-	struct nv04_instmem_priv *priv = (void *)object;
-	iowrite32_native(data, priv->iomem + addr);
-}
-
-struct nouveau_oclass
-nv40_instmem_oclass = {
-	.handle = NV_SUBDEV(INSTMEM, 0x40),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv40_instmem_oclass = &(struct nouveau_instmem_impl) {
+	.base.handle = NV_SUBDEV(INSTMEM, 0x40),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv40_instmem_ctor,
 		.dtor = nv04_instmem_dtor,
 		.init = _nouveau_instmem_init,
@@ -131,4 +134,5 @@
 		.rd32 = nv40_instmem_rd32,
 		.wr32 = nv40_instmem_wr32,
 	},
-};
+	.instobj = &nv04_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
index 97bc5df..7cb3b09 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -22,11 +22,11 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/instmem.h>
 #include <subdev/fb.h>
-
 #include <core/mm.h>
 
+#include "priv.h"
+
 struct nv50_instmem_priv {
 	struct nouveau_instmem base;
 	spinlock_t lock;
@@ -38,42 +38,9 @@
 	struct nouveau_mem *mem;
 };
 
-static int
-nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		  struct nouveau_oclass *oclass, void *data, u32 size,
-		  struct nouveau_object **pobject)
-{
-	struct nouveau_fb *pfb = nouveau_fb(parent);
-	struct nv50_instobj_priv *node;
-	u32 align = (unsigned long)data;
-	int ret;
-
-	size  = max((size  + 4095) & ~4095, (u32)4096);
-	align = max((align + 4095) & ~4095, (u32)4096);
-
-	ret = nouveau_instobj_create(parent, engine, oclass, &node);
-	*pobject = nv_object(node);
-	if (ret)
-		return ret;
-
-	ret = pfb->ram->get(pfb, size, align, 0, 0x800, &node->mem);
-	if (ret)
-		return ret;
-
-	node->base.addr = node->mem->offset;
-	node->base.size = node->mem->size << 12;
-	node->mem->page_shift = 12;
-	return 0;
-}
-
-static void
-nv50_instobj_dtor(struct nouveau_object *object)
-{
-	struct nv50_instobj_priv *node = (void *)object;
-	struct nouveau_fb *pfb = nouveau_fb(object);
-	pfb->ram->put(pfb, &node->mem);
-	nouveau_instobj_destroy(&node->base);
-}
+/******************************************************************************
+ * instmem object implementation
+ *****************************************************************************/
 
 static u32
 nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
@@ -113,9 +80,46 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static struct nouveau_oclass
+static void
+nv50_instobj_dtor(struct nouveau_object *object)
+{
+	struct nv50_instobj_priv *node = (void *)object;
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	pfb->ram->put(pfb, &node->mem);
+	nouveau_instobj_destroy(&node->base);
+}
+
+static int
+nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nouveau_fb *pfb = nouveau_fb(parent);
+	struct nouveau_instobj_args *args = data;
+	struct nv50_instobj_priv *node;
+	int ret;
+
+	args->size  = max((args->size  + 4095) & ~4095, (u32)4096);
+	args->align = max((args->align + 4095) & ~4095, (u32)4096);
+
+	ret = nouveau_instobj_create(parent, engine, oclass, &node);
+	*pobject = nv_object(node);
+	if (ret)
+		return ret;
+
+	ret = pfb->ram->get(pfb, args->size, args->align, 0, 0x800, &node->mem);
+	if (ret)
+		return ret;
+
+	node->base.addr = node->mem->offset;
+	node->base.size = node->mem->size << 12;
+	node->mem->page_shift = 12;
+	return 0;
+}
+
+static struct nouveau_instobj_impl
 nv50_instobj_oclass = {
-	.ofuncs = &(struct nouveau_ofuncs) {
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_instobj_ctor,
 		.dtor = nv50_instobj_dtor,
 		.init = _nouveau_instobj_init,
@@ -125,13 +129,16 @@
 	},
 };
 
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
 static int
-nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
-		   u32 size, u32 align, struct nouveau_object **pobject)
+nv50_instmem_fini(struct nouveau_object *object, bool suspend)
 {
-	struct nouveau_object *engine = nv_object(imem);
-	return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass,
-				   (void *)(unsigned long)align, size, pobject);
+	struct nv50_instmem_priv *priv = (void *)object;
+	priv->addr = ~0ULL;
+	return nouveau_instmem_fini(&priv->base, suspend);
 }
 
 static int
@@ -148,25 +155,17 @@
 		return ret;
 
 	spin_lock_init(&priv->lock);
-	priv->base.alloc = nv50_instmem_alloc;
 	return 0;
 }
 
-static int
-nv50_instmem_fini(struct nouveau_object *object, bool suspend)
-{
-	struct nv50_instmem_priv *priv = (void *)object;
-	priv->addr = ~0ULL;
-	return nouveau_instmem_fini(&priv->base, suspend);
-}
-
-struct nouveau_oclass
-nv50_instmem_oclass = {
-	.handle = NV_SUBDEV(INSTMEM, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_instmem_oclass = &(struct nouveau_instmem_impl) {
+	.base.handle = NV_SUBDEV(INSTMEM, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_instmem_ctor,
 		.dtor = _nouveau_instmem_dtor,
 		.init = _nouveau_instmem_init,
 		.fini = nv50_instmem_fini,
 	},
-};
+	.instobj = &nv50_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h
new file mode 100644
index 0000000..8d67ded
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h
@@ -0,0 +1,56 @@
+#ifndef __NVKM_INSTMEM_PRIV_H__
+#define __NVKM_INSTMEM_PRIV_H__
+
+#include <subdev/instmem.h>
+
+struct nouveau_instobj_impl {
+	struct nouveau_oclass base;
+};
+
+struct nouveau_instobj_args {
+	u32 size;
+	u32 align;
+};
+
+#define nouveau_instobj_create(p,e,o,d)                                        \
+	nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instobj_destroy(p) ({                                          \
+	struct nouveau_instobj *iobj = (p);                                    \
+	_nouveau_instobj_dtor(nv_object(iobj));                                \
+})
+#define nouveau_instobj_init(p)                                                \
+	nouveau_object_init(&(p)->base)
+#define nouveau_instobj_fini(p,s)                                              \
+	nouveau_object_fini(&(p)->base, (s))
+
+int  nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
+			     struct nouveau_oclass *, int, void **);
+void _nouveau_instobj_dtor(struct nouveau_object *);
+#define _nouveau_instobj_init nouveau_object_init
+#define _nouveau_instobj_fini nouveau_object_fini
+
+struct nouveau_instmem_impl {
+	struct nouveau_oclass base;
+	struct nouveau_oclass *instobj;
+};
+
+#define nouveau_instmem_create(p,e,o,d)                                        \
+	nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instmem_destroy(p)                                             \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_instmem_init(p) ({                                             \
+	struct nouveau_instmem *imem = (p);                                    \
+	_nouveau_instmem_init(nv_object(imem));                                \
+})
+#define nouveau_instmem_fini(p,s) ({                                           \
+	struct nouveau_instmem *imem = (p);                                    \
+	_nouveau_instmem_fini(nv_object(imem), (s));                           \
+})
+
+int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, int, void **);
+#define _nouveau_instmem_dtor _nouveau_subdev_dtor
+int _nouveau_instmem_init(struct nouveau_object *);
+int _nouveau_instmem_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
index b0d5c31..81a408e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -14,6 +14,7 @@
 extern const struct nouveau_mc_intr nv04_mc_intr[];
 int  nv04_mc_init(struct nouveau_object *);
 void nv40_mc_msi_rearm(struct nouveau_mc *);
+int  nv44_mc_init(struct nouveau_object *object);
 int  nv50_mc_init(struct nouveau_object *);
 extern const struct nouveau_mc_intr nv50_mc_intr[];
 extern const struct nouveau_mc_intr nvc0_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 3bfee5c..cc4d0d2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -24,7 +24,7 @@
 
 #include "nv04.h"
 
-static int
+int
 nv44_mc_init(struct nouveau_object *object)
 {
 	struct nv04_mc_priv *priv = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
new file mode 100644
index 0000000..a75c35c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2014 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ilia Mirkin
+ */
+
+#include "nv04.h"
+
+static void
+nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
+{
+	struct nv04_mc_priv *priv = (void *)pmc;
+	nv_wr08(priv, 0x088050, 0xff);
+}
+
+struct nouveau_oclass *
+nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x4c),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv44_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+	.intr = nv04_mc_intr,
+	.msi_rearm = nv4c_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c02b476..34472d31 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -32,6 +32,7 @@
 	{ 0x00000080, NVDEV_ENGINE_COPY2 },
 	{ 0x00000100, NVDEV_ENGINE_FIFO },
 	{ 0x00001000, NVDEV_ENGINE_GR },
+	{ 0x00002000, NVDEV_SUBDEV_FB },
 	{ 0x00008000, NVDEV_ENGINE_BSP },
 	{ 0x00040000, NVDEV_SUBDEV_THERM },
 	{ 0x00020000, NVDEV_ENGINE_VP },
@@ -40,6 +41,7 @@
 	{ 0x01000000, NVDEV_SUBDEV_PWR },
 	{ 0x02000000, NVDEV_SUBDEV_LTCG },
 	{ 0x04000000, NVDEV_ENGINE_DISP },
+	{ 0x08000000, NVDEV_SUBDEV_FB },
 	{ 0x10000000, NVDEV_SUBDEV_BUS },
 	{ 0x40000000, NVDEV_SUBDEV_IBUS },
 	{ 0x80000000, NVDEV_ENGINE_SW },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc
new file mode 100644
index 0000000..757dda7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define T_TIMEOUT  2200000
+#define T_RISEFALL 1000
+#define T_HOLD     5000
+
+#ifdef INCLUDE_PROC
+process(PROC_I2C_, #i2c_init, #i2c_recv)
+#endif
+
+/******************************************************************************
+ * I2C_ data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+i2c_scl_map:
+.b32 NV_PPWR_OUTPUT_I2C_0_SCL
+.b32 NV_PPWR_OUTPUT_I2C_1_SCL
+.b32 NV_PPWR_OUTPUT_I2C_2_SCL
+.b32 NV_PPWR_OUTPUT_I2C_3_SCL
+.b32 NV_PPWR_OUTPUT_I2C_4_SCL
+.b32 NV_PPWR_OUTPUT_I2C_5_SCL
+.b32 NV_PPWR_OUTPUT_I2C_6_SCL
+.b32 NV_PPWR_OUTPUT_I2C_7_SCL
+.b32 NV_PPWR_OUTPUT_I2C_8_SCL
+.b32 NV_PPWR_OUTPUT_I2C_9_SCL
+i2c_sda_map:
+.b32 NV_PPWR_OUTPUT_I2C_0_SDA
+.b32 NV_PPWR_OUTPUT_I2C_1_SDA
+.b32 NV_PPWR_OUTPUT_I2C_2_SDA
+.b32 NV_PPWR_OUTPUT_I2C_3_SDA
+.b32 NV_PPWR_OUTPUT_I2C_4_SDA
+.b32 NV_PPWR_OUTPUT_I2C_5_SDA
+.b32 NV_PPWR_OUTPUT_I2C_6_SDA
+.b32 NV_PPWR_OUTPUT_I2C_7_SDA
+.b32 NV_PPWR_OUTPUT_I2C_8_SDA
+.b32 NV_PPWR_OUTPUT_I2C_9_SDA
+#if NVKM_PPWR_CHIPSET < GF119
+i2c_ctrl:
+.b32 0x00e138
+.b32 0x00e150
+.b32 0x00e168
+.b32 0x00e180
+.b32 0x00e254
+.b32 0x00e274
+.b32 0x00e764
+.b32 0x00e780
+.b32 0x00e79c
+.b32 0x00e7b8
+#endif
+#endif
+
+/******************************************************************************
+ * I2C_ code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+
+// $r3  - value
+// $r2  - sda line
+// $r1  - scl line
+// $r0  - zero
+i2c_drive_scl:
+	cmp b32 $r3 0
+	bra e #i2c_drive_scl_lo
+	nv_iowr(NV_PPWR_OUTPUT_SET, $r1)
+	ret
+	i2c_drive_scl_lo:
+	nv_iowr(NV_PPWR_OUTPUT_CLR, $r1)
+	ret
+
+i2c_drive_sda:
+	cmp b32 $r3 0
+	bra e #i2c_drive_sda_lo
+	nv_iowr(NV_PPWR_OUTPUT_SET, $r2)
+	ret
+	i2c_drive_sda_lo:
+	nv_iowr(NV_PPWR_OUTPUT_CLR, $r2)
+	ret
+
+i2c_sense_scl:
+	bclr $flags $p1
+	nv_iord($r3, NV_PPWR_INPUT)
+	and $r3 $r1
+	bra z #i2c_sense_scl_done
+		bset $flags $p1
+	i2c_sense_scl_done:
+	ret
+
+i2c_sense_sda:
+	bclr $flags $p1
+	nv_iord($r3, NV_PPWR_INPUT)
+	and $r3 $r2
+	bra z #i2c_sense_sda_done
+		bset $flags $p1
+	i2c_sense_sda_done:
+	ret
+
+#define i2c_drive_scl(v) /*
+*/	mov $r3 (v) /*
+*/	call(i2c_drive_scl)
+#define i2c_drive_sda(v) /*
+*/	mov $r3 (v) /*
+*/	call(i2c_drive_sda)
+#define i2c_sense_scl() /*
+*/	call(i2c_sense_scl)
+#define i2c_sense_sda() /*
+*/	call(i2c_sense_sda)
+#define i2c_delay(v) /*
+*/	mov $r14 (v) /*
+*/	call(nsec)
+
+#define i2c_trace_init() /*
+*/	imm32($r6, 0x10000000) /*
+*/	sub b32 $r7 $r6 1 /*
+*/
+#define i2c_trace_down() /*
+*/	shr b32 $r6 4 /*
+*/	push $r5 /*
+*/	shl b32 $r5 $r6 4 /*
+*/	sub b32 $r5 $r6 /*
+*/	not b32 $r5 /*
+*/	and $r7 $r5 /*
+*/	pop $r5 /*
+*/
+#define i2c_trace_exit() /*
+*/	shl b32 $r6 4 /*
+*/
+#define i2c_trace_next() /*
+*/	add b32 $r7 $r6 /*
+*/
+#define i2c_trace_call(func) /*
+*/	i2c_trace_next() /*
+*/	i2c_trace_down() /*
+*/	call(func) /*
+*/	i2c_trace_exit() /*
+*/
+
+i2c_raise_scl:
+	push $r4
+	mov $r4 (T_TIMEOUT / T_RISEFALL)
+	i2c_drive_scl(1)
+	i2c_raise_scl_wait:
+		i2c_delay(T_RISEFALL)
+		i2c_sense_scl()
+		bra $p1 #i2c_raise_scl_done
+		sub b32 $r4 1
+		bra nz #i2c_raise_scl_wait
+	i2c_raise_scl_done:
+	pop $r4
+	ret
+
+i2c_start:
+	i2c_sense_scl()
+	bra not $p1 #i2c_start_rep
+	i2c_sense_sda()
+	bra not $p1 #i2c_start_rep
+	bra #i2c_start_send
+	i2c_start_rep:
+		i2c_drive_scl(0)
+		i2c_drive_sda(1)
+		i2c_trace_call(i2c_raise_scl)
+		bra not $p1 #i2c_start_out
+	i2c_start_send:
+	i2c_drive_sda(0)
+	i2c_delay(T_HOLD)
+	i2c_drive_scl(0)
+	i2c_delay(T_HOLD)
+	i2c_start_out:
+	ret
+
+i2c_stop:
+	i2c_drive_scl(0)
+	i2c_drive_sda(0)
+	i2c_delay(T_RISEFALL)
+	i2c_drive_scl(1)
+	i2c_delay(T_HOLD)
+	i2c_drive_sda(1)
+	i2c_delay(T_HOLD)
+	ret
+
+// $r3  - value
+// $r2  - sda line
+// $r1  - scl line
+// $r0  - zero
+i2c_bitw:
+	call(i2c_drive_sda)
+	i2c_delay(T_RISEFALL)
+	i2c_trace_call(i2c_raise_scl)
+	bra not $p1 #i2c_bitw_out
+	i2c_delay(T_HOLD)
+	i2c_drive_scl(0)
+	i2c_delay(T_HOLD)
+	i2c_bitw_out:
+	ret
+
+// $r3  - value (out)
+// $r2  - sda line
+// $r1  - scl line
+// $r0  - zero
+i2c_bitr:
+	i2c_drive_sda(1)
+	i2c_delay(T_RISEFALL)
+	i2c_trace_call(i2c_raise_scl)
+	bra not $p1 #i2c_bitr_done
+	i2c_sense_sda()
+	i2c_drive_scl(0)
+	i2c_delay(T_HOLD)
+	xbit $r3 $flags $p1
+	bset $flags $p1
+	i2c_bitr_done:
+	ret
+
+i2c_get_byte:
+	mov $r5 0
+	mov $r4 8
+	i2c_get_byte_next:
+		shl b32 $r5 1
+		i2c_trace_call(i2c_bitr)
+		bra not $p1 #i2c_get_byte_done
+		or $r5 $r3
+		sub b32 $r4 1
+		bra nz #i2c_get_byte_next
+	mov $r3 1
+	i2c_trace_call(i2c_bitw)
+	i2c_get_byte_done:
+	ret
+
+i2c_put_byte:
+	mov $r4 8
+	i2c_put_byte_next:
+		sub b32 $r4 1
+		xbit $r3 $r5 $r4
+		i2c_trace_call(i2c_bitw)
+		bra not $p1 #i2c_put_byte_done
+		cmp b32 $r4 0
+		bra ne #i2c_put_byte_next
+	i2c_trace_call(i2c_bitr)
+	bra not $p1 #i2c_put_byte_done
+	i2c_trace_next()
+	cmp b32 $r3 1
+	bra ne #i2c_put_byte_done
+	bclr $flags $p1	// nack
+	i2c_put_byte_done:
+	ret
+
+i2c_addr:
+	i2c_trace_call(i2c_start)
+	bra not $p1 #i2c_addr_done
+	extr $r3 $r12 I2C__MSG_DATA0_ADDR
+	shl b32 $r3 1
+	or $r5 $r3
+	i2c_trace_call(i2c_put_byte)
+	i2c_addr_done:
+	ret
+
+i2c_acquire_addr:
+	extr $r14 $r12 I2C__MSG_DATA0_PORT
+#if NVKM_PPWR_CHIPSET < GF119
+	shl b32 $r14 2
+	add b32 $r14 #i2c_ctrl
+	ld b32 $r14 D[$r14]
+#else
+	shl b32 $r14 5
+	add b32 $r14 0x00d014
+#endif
+	ret
+
+i2c_acquire:
+	call(i2c_acquire_addr)
+	call(rd32)
+	bset $r13 3
+	call(wr32)
+	ret
+
+i2c_release:
+	call(i2c_acquire_addr)
+	call(rd32)
+	bclr $r13 3
+	call(wr32)
+	ret
+
+// description
+//
+// $r15 - current (i2c)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0  - zero
+i2c_recv:
+	bclr $flags $p1
+	extr $r1 $r12 I2C__MSG_DATA0_PORT
+	shl b32 $r1 2
+	cmp b32 $r1 (#i2c_sda_map - #i2c_scl_map)
+	bra ge #i2c_recv_done
+	add b32 $r3 $r1 #i2c_sda_map
+	ld b32 $r2 D[$r3]
+	add b32 $r3 $r1 #i2c_scl_map
+	ld b32 $r1 D[$r3]
+
+	bset $flags $p2
+	push $r13
+	push $r14
+
+	push $r13
+	i2c_trace_init()
+	i2c_trace_call(i2c_acquire)
+	pop $r13
+
+	cmp b32 $r13 I2C__MSG_RD08
+	bra ne #i2c_recv_not_rd08
+		mov $r5 0
+		i2c_trace_call(i2c_addr)
+		bra not $p1 #i2c_recv_done
+		extr $r5 $r12 I2C__MSG_DATA0_RD08_REG
+		i2c_trace_call(i2c_put_byte)
+		bra not $p1 #i2c_recv_done
+		mov $r5 1
+		i2c_trace_call(i2c_addr)
+		bra not $p1 #i2c_recv_done
+		i2c_trace_call(i2c_get_byte)
+		bra not $p1 #i2c_recv_done
+		ins $r11 $r5 I2C__MSG_DATA1_RD08_VAL
+		i2c_trace_call(i2c_stop)
+		mov b32 $r11 $r5
+		clear b32 $r7
+		bra #i2c_recv_done
+
+	i2c_recv_not_rd08:
+	cmp b32 $r13 I2C__MSG_WR08
+	bra ne #i2c_recv_not_wr08
+		mov $r5 0
+		call(i2c_addr)
+		bra not $p1 #i2c_recv_done
+		extr $r5 $r12 I2C__MSG_DATA0_WR08_REG
+		call(i2c_put_byte)
+		bra not $p1 #i2c_recv_done
+		mov $r5 0
+		call(i2c_addr)
+		bra not $p1 #i2c_recv_done
+		extr $r5 $r11 I2C__MSG_DATA1_WR08_VAL
+		call(i2c_put_byte)
+		bra not $p1 #i2c_recv_done
+		call(i2c_stop)
+		clear b32 $r7
+		extr $r5 $r12 I2C__MSG_DATA0_WR08_SYNC
+		bra nz #i2c_recv_done
+		bclr $flags $p2
+		bra #i2c_recv_done
+
+	i2c_recv_not_wr08:
+
+	i2c_recv_done:
+	extr $r14 $r12 I2C__MSG_DATA0_PORT
+	call(i2c_release)
+
+	pop $r14
+	pop $r13
+	bra not $p2 #i2c_recv_exit
+	mov b32 $r12 $r7
+	call(send)
+
+	i2c_recv_exit:
+	ret
+
+// description
+//
+// $r15 - current (i2c)
+// $r0  - zero
+i2c_init:
+	ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
index 0a7b05f..8f29bad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
@@ -51,12 +51,12 @@
 // $r0  - zero
 rd32:
 	nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
-	mov $r14 NV_PPWR_MMIO_CTRL_OP_RD
-	sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
-	nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+	mov $r13 NV_PPWR_MMIO_CTRL_OP_RD
+	sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
+	nv_iowr(NV_PPWR_MMIO_CTRL, $r13)
 	rd32_wait:
-		nv_iord($r14, NV_PPWR_MMIO_CTRL)
-		and $r14 NV_PPWR_MMIO_CTRL_STATUS
+		nv_iord($r13, NV_PPWR_MMIO_CTRL)
+		and $r13 NV_PPWR_MMIO_CTRL_STATUS
 		bra nz #rd32_wait
 	nv_iord($r13, NV_PPWR_MMIO_DATA)
 	ret
@@ -70,23 +70,25 @@
 wr32:
 	nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
 	nv_iowr(NV_PPWR_MMIO_DATA, $r13)
-	mov $r14 NV_PPWR_MMIO_CTRL_OP_WR
-	or $r14 NV_PPWR_MMIO_CTRL_MASK_B32_0
-	sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
+	mov $r13 NV_PPWR_MMIO_CTRL_OP_WR
+	or $r13 NV_PPWR_MMIO_CTRL_MASK_B32_0
+	sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
 
 #ifdef NVKM_FALCON_MMIO_TRAP
-	mov $r8 NV_PPWR_INTR_TRIGGER_USER1
-	nv_iowr(NV_PPWR_INTR_TRIGGER, $r8)
+	push $r13
+	mov $r13 NV_PPWR_INTR_TRIGGER_USER1
+	nv_iowr(NV_PPWR_INTR_TRIGGER, $r13)
 	wr32_host:
-		nv_iord($r8, NV_PPWR_INTR)
-		and $r8 NV_PPWR_INTR_USER1
+		nv_iord($r13, NV_PPWR_INTR)
+		and $r13 NV_PPWR_INTR_USER1
 		bra nz #wr32_host
+	pop $r13
 #endif
 
-	nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+	nv_iowr(NV_PPWR_MMIO_CTRL, $r13)
 	wr32_wait:
-		nv_iord($r14, NV_PPWR_MMIO_CTRL)
-		and $r14 NV_PPWR_MMIO_CTRL_STATUS
+		nv_iord($r13, NV_PPWR_MMIO_CTRL)
+		and $r13 NV_PPWR_MMIO_CTRL_STATUS
 		bra nz #wr32_wait
 	ret
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
index 2a74ea9..e2a63ac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -83,6 +83,50 @@
 #define NV_PPWR_MMIO_CTRL_OP_WR                                      0x00000002
 #define NV_PPWR_OUTPUT                                                   0x07c0
 #define NV_PPWR_OUTPUT_FB_PAUSE                                      0x00000004
+#if NVKM_PPWR_CHIPSET < GF119
+#define NV_PPWR_OUTPUT_I2C_3_SCL                                     0x00000100
+#define NV_PPWR_OUTPUT_I2C_3_SDA                                     0x00000200
+#define NV_PPWR_OUTPUT_I2C_0_SCL                                     0x00001000
+#define NV_PPWR_OUTPUT_I2C_0_SDA                                     0x00002000
+#define NV_PPWR_OUTPUT_I2C_1_SCL                                     0x00004000
+#define NV_PPWR_OUTPUT_I2C_1_SDA                                     0x00008000
+#define NV_PPWR_OUTPUT_I2C_2_SCL                                     0x00010000
+#define NV_PPWR_OUTPUT_I2C_2_SDA                                     0x00020000
+#define NV_PPWR_OUTPUT_I2C_4_SCL                                     0x00040000
+#define NV_PPWR_OUTPUT_I2C_4_SDA                                     0x00080000
+#define NV_PPWR_OUTPUT_I2C_5_SCL                                     0x00100000
+#define NV_PPWR_OUTPUT_I2C_5_SDA                                     0x00200000
+#define NV_PPWR_OUTPUT_I2C_6_SCL                                     0x00400000
+#define NV_PPWR_OUTPUT_I2C_6_SDA                                     0x00800000
+#define NV_PPWR_OUTPUT_I2C_7_SCL                                     0x01000000
+#define NV_PPWR_OUTPUT_I2C_7_SDA                                     0x02000000
+#define NV_PPWR_OUTPUT_I2C_8_SCL                                     0x04000000
+#define NV_PPWR_OUTPUT_I2C_8_SDA                                     0x08000000
+#define NV_PPWR_OUTPUT_I2C_9_SCL                                     0x10000000
+#define NV_PPWR_OUTPUT_I2C_9_SDA                                     0x20000000
+#else
+#define NV_PPWR_OUTPUT_I2C_0_SCL                                     0x00000400
+#define NV_PPWR_OUTPUT_I2C_1_SCL                                     0x00000800
+#define NV_PPWR_OUTPUT_I2C_2_SCL                                     0x00001000
+#define NV_PPWR_OUTPUT_I2C_3_SCL                                     0x00002000
+#define NV_PPWR_OUTPUT_I2C_4_SCL                                     0x00004000
+#define NV_PPWR_OUTPUT_I2C_5_SCL                                     0x00008000
+#define NV_PPWR_OUTPUT_I2C_6_SCL                                     0x00010000
+#define NV_PPWR_OUTPUT_I2C_7_SCL                                     0x00020000
+#define NV_PPWR_OUTPUT_I2C_8_SCL                                     0x00040000
+#define NV_PPWR_OUTPUT_I2C_9_SCL                                     0x00080000
+#define NV_PPWR_OUTPUT_I2C_0_SDA                                     0x00100000
+#define NV_PPWR_OUTPUT_I2C_1_SDA                                     0x00200000
+#define NV_PPWR_OUTPUT_I2C_2_SDA                                     0x00400000
+#define NV_PPWR_OUTPUT_I2C_3_SDA                                     0x00800000
+#define NV_PPWR_OUTPUT_I2C_4_SDA                                     0x01000000
+#define NV_PPWR_OUTPUT_I2C_5_SDA                                     0x02000000
+#define NV_PPWR_OUTPUT_I2C_6_SDA                                     0x04000000
+#define NV_PPWR_OUTPUT_I2C_7_SDA                                     0x08000000
+#define NV_PPWR_OUTPUT_I2C_8_SDA                                     0x10000000
+#define NV_PPWR_OUTPUT_I2C_9_SDA                                     0x20000000
+#endif
+#define NV_PPWR_INPUT                                                    0x07c4
 #define NV_PPWR_OUTPUT_SET                                               0x07e0
 #define NV_PPWR_OUTPUT_SET_FB_PAUSE                                  0x00000004
 #define NV_PPWR_OUTPUT_CLR                                               0x07e4
@@ -125,6 +169,15 @@
 */	.b32 0 /*
 */	.skip 64
 
+#if NV_PPWR_CHIPSET < GK208
+#define imm32(reg,val) /*
+*/	movw reg  ((val) & 0x0000ffff) /*
+*/	sethi reg ((val) & 0xffff0000)
+#else
+#define imm32(reg,val) /*
+*/	mov reg (val)
+#endif
+
 #ifndef NVKM_FALCON_UNSHIFTED_IO
 #define nv_iord(reg,ior) /*
 */	mov reg ior /*
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
index 947be53..17a8a38 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
@@ -37,6 +37,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_PROC
@@ -46,6 +47,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_DATA
@@ -57,6 +59,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 9342e2d..4bd43a9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -89,9 +89,31 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
+	0x5f433249,
+	0x00000877,
+	0x0000071e,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
 	0x54534554,
-	0x00000494,
-	0x00000475,
+	0x00000898,
+	0x00000879,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x0000049f,
-	0x0000049d,
+	0x000008a3,
+	0x000008a1,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -133,12 +155,12 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
 	0x00000000,
-/* 0x0214: time_next */
+/* 0x026c: time_next */
 	0x00000000,
-/* 0x0218: fifo_queue */
+/* 0x0270: fifo_queue */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -171,7 +193,7 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x02f0: rfifo_queue */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -204,11 +226,11 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0318: memx_func_head */
+/* 0x0370: memx_func_head */
 	0x00010000,
 	0x00000000,
 	0x000003a9,
-/* 0x0324: memx_func_next */
+/* 0x037c: memx_func_next */
 	0x00000001,
 	0x00000000,
 	0x000003c7,
@@ -221,8 +243,8 @@
 	0x00010004,
 	0x00000000,
 	0x00000421,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -735,49 +757,29 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0b54: memx_data_tail */
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+	0x00000400,
+	0x00000800,
+	0x00001000,
+	0x00002000,
+	0x00004000,
+	0x00008000,
+	0x00010000,
+	0x00020000,
+	0x00040000,
+	0x00080000,
+/* 0x0bd4: i2c_sda_map */
+	0x00100000,
+	0x00200000,
+	0x00400000,
+	0x00800000,
+	0x01000000,
+	0x02000000,
+	0x04000000,
+	0x08000000,
+	0x10000000,
+	0x20000000,
 	0x00000000,
 };
 
@@ -786,13 +788,13 @@
 /* 0x0004: rd32 */
 	0xf607a040,
 	0x04bd000e,
-	0xe3f0010e,
+	0xd3f0010d,
 	0x07ac4001,
-	0xbd000ef6,
+	0xbd000df6,
 /* 0x0019: rd32_wait */
-	0x07ac4e04,
-	0xf100eecf,
-	0xf47000e4,
+	0x07ac4d04,
+	0xf100ddcf,
+	0xf47000d4,
 	0xa44df61b,
 	0x00ddcf07,
 /* 0x002e: wr32 */
@@ -800,14 +802,14 @@
 	0x000ef607,
 	0xa44004bd,
 	0x000df607,
-	0x020e04bd,
-	0xf0f0e5f0,
-	0xac4001e3,
-	0x000ef607,
+	0x020d04bd,
+	0xf0f0d5f0,
+	0xac4001d3,
+	0x000df607,
 /* 0x004e: wr32_wait */
-	0xac4e04bd,
-	0x00eecf07,
-	0x7000e4f1,
+	0xac4d04bd,
+	0x00ddcf07,
+	0x7000d4f1,
 	0xf8f61bf4,
 /* 0x005d: nsec */
 	0xcf2c0800,
@@ -832,20 +834,20 @@
 	0x03e99800,
 	0xf40096b0,
 	0x0a98280b,
-	0x029abb84,
+	0x029abb9a,
 	0x0d0e1cf4,
 	0x01de7e01,
 	0xf494bd00,
 /* 0x00b2: intr_watchdog_next_time */
 	0x0a98140e,
-	0x00a6b085,
+	0x00a6b09b,
 	0xa6080bf4,
 	0x061cf49a,
 /* 0x00c0: intr_watchdog_next_time_set */
 /* 0x00c3: intr_watchdog_next_proc */
-	0xb58509b5,
+	0xb59b09b5,
 	0xe0b603e9,
-	0x10e6b158,
+	0x68e6b158,
 	0xc81bf402,
 /* 0x00d2: intr */
 	0x00f900f8,
@@ -862,15 +864,15 @@
 	0x080804bd,
 	0xc40088cf,
 	0x0bf40289,
-	0x8500b51f,
+	0x9b00b51f,
 	0x957e580e,
 	0x09980000,
-	0x0096b085,
+	0x0096b09b,
 	0x000d0bf4,
 	0x0009f634,
 	0x09b504bd,
 /* 0x0125: intr_skip_watchdog */
-	0x0089e484,
+	0x0089e49a,
 	0x360bf408,
 	0xcf068849,
 	0x9ac40099,
@@ -918,7 +920,7 @@
 /* 0x01c6: timer_reset */
 	0x3400161e,
 	0xbd000ef6,
-	0x840eb504,
+	0x9a0eb504,
 /* 0x01d0: timer_enable */
 	0x38000108,
 	0xbd0008f6,
@@ -949,7 +951,7 @@
 	0xa6008a98,
 	0x100bf4ae,
 	0xb15880b6,
-	0xf4021086,
+	0xf4026886,
 	0x32f4f11b,
 /* 0x0239: find_done */
 	0xfc8eb201,
@@ -1009,7 +1011,7 @@
 	0x0bf412a6,
 	0x071ec42e,
 	0xb704ee94,
-	0x980218e0,
+	0x980270e0,
 	0xec9803eb,
 	0x01ed9802,
 	0x7e00ee98,
@@ -1031,7 +1033,7 @@
 	0xf412a608,
 	0x23c4ef0b,
 	0x0434b607,
-	0x029830b7,
+	0x02f030b7,
 	0xb5033bb5,
 	0x3db5023c,
 	0x003eb501,
@@ -1044,11 +1046,11 @@
 /* 0x0379: host_init */
 	0x00804100,
 	0xf11014b6,
-	0x40021815,
+	0x40027015,
 	0x01f604d0,
 	0x4104bd00,
 	0x14b60080,
-	0x9815f110,
+	0xf015f110,
 	0x04dc4002,
 	0xbd0001f6,
 	0x40010104,
@@ -1101,13 +1103,13 @@
 	0x001398b2,
 	0x950410b6,
 	0x30f01034,
-	0xc835980c,
+	0xde35980c,
 	0x12a655f9,
 	0xfced1ef4,
 	0x7ee0fcd0,
 	0xf800023f,
 /* 0x0455: memx_info */
-	0x03544c00,
+	0x03ac4c00,
 	0x7e08004b,
 	0xf800023f,
 /* 0x0461: memx_recv */
@@ -1119,7 +1121,301 @@
 /* 0x0471: perf_recv */
 /* 0x0473: perf_init */
 	0xf800f800,
-/* 0x0475: test_recv */
+/* 0x0475: i2c_drive_scl */
+	0x0036b000,
+	0x400d0bf4,
+	0x01f607e0,
+	0xf804bd00,
+/* 0x0485: i2c_drive_scl_lo */
+	0x07e44000,
+	0xbd0001f6,
+/* 0x048f: i2c_drive_sda */
+	0xb000f804,
+	0x0bf40036,
+	0x07e0400d,
+	0xbd0002f6,
+/* 0x049f: i2c_drive_sda_lo */
+	0x4000f804,
+	0x02f607e4,
+	0xf804bd00,
+/* 0x04a9: i2c_sense_scl */
+	0x0132f400,
+	0xcf07c443,
+	0x31fd0033,
+	0x060bf404,
+/* 0x04bb: i2c_sense_scl_done */
+	0xf80131f4,
+/* 0x04bd: i2c_sense_sda */
+	0x0132f400,
+	0xcf07c443,
+	0x32fd0033,
+	0x060bf404,
+/* 0x04cf: i2c_sense_sda_done */
+	0xf80131f4,
+/* 0x04d1: i2c_raise_scl */
+	0x4440f900,
+	0x01030898,
+	0x0004757e,
+/* 0x04dc: i2c_raise_scl_wait */
+	0x7e03e84e,
+	0x7e00005d,
+	0xf40004a9,
+	0x42b60901,
+	0xef1bf401,
+/* 0x04f0: i2c_raise_scl_done */
+	0x00f840fc,
+/* 0x04f4: i2c_start */
+	0x0004a97e,
+	0x7e0d11f4,
+	0xf40004bd,
+	0x0ef40611,
+/* 0x0505: i2c_start_rep */
+	0x7e00032e,
+	0x03000475,
+	0x048f7e01,
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0xd17e50fc,
+	0x64b60004,
+	0x1d11f404,
+/* 0x0530: i2c_start_send */
+	0x8f7e0003,
+	0x884e0004,
+	0x005d7e13,
+	0x7e000300,
+	0x4e000475,
+	0x5d7e1388,
+/* 0x054a: i2c_start_out */
+	0x00f80000,
+/* 0x054c: i2c_stop */
+	0x757e0003,
+	0x00030004,
+	0x00048f7e,
+	0x7e03e84e,
+	0x0300005d,
+	0x04757e01,
+	0x13884e00,
+	0x00005d7e,
+	0x8f7e0103,
+	0x884e0004,
+	0x005d7e13,
+/* 0x057b: i2c_bitw */
+	0x7e00f800,
+	0x4e00048f,
+	0x5d7e03e8,
+	0x76bb0000,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb60004d1,
+	0x11f40464,
+	0x13884e17,
+	0x00005d7e,
+	0x757e0003,
+	0x884e0004,
+	0x005d7e13,
+/* 0x05b9: i2c_bitw_out */
+/* 0x05bb: i2c_bitr */
+	0x0300f800,
+	0x048f7e01,
+	0x03e84e00,
+	0x00005d7e,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x04d17e50,
+	0x0464b600,
+	0x7e1a11f4,
+	0x030004bd,
+	0x04757e00,
+	0x13884e00,
+	0x00005d7e,
+	0xf4013cf0,
+/* 0x05fe: i2c_bitr_done */
+	0x00f80131,
+/* 0x0600: i2c_get_byte */
+	0x08040005,
+/* 0x0604: i2c_get_byte_next */
+	0xbb0154b6,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x0005bb7e,
+	0xf40464b6,
+	0x53fd2a11,
+	0x0142b605,
+	0x03d81bf4,
+	0x0076bb01,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x7b7e50fc,
+	0x64b60005,
+/* 0x064d: i2c_get_byte_done */
+/* 0x064f: i2c_put_byte */
+	0x0400f804,
+/* 0x0651: i2c_put_byte_next */
+	0x0142b608,
+	0xbb3854ff,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x00057b7e,
+	0xf40464b6,
+	0x46b03411,
+	0xd81bf400,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x05bb7e50,
+	0x0464b600,
+	0xbb0f11f4,
+	0x36b00076,
+	0x061bf401,
+/* 0x06a7: i2c_put_byte_done */
+	0xf80132f4,
+/* 0x06a9: i2c_addr */
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0xf47e50fc,
+	0x64b60004,
+	0x2911f404,
+	0x012ec3e7,
+	0xfd0134b6,
+	0x76bb0553,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb600064f,
+/* 0x06ee: i2c_addr_done */
+	0x00f80464,
+/* 0x06f0: i2c_acquire_addr */
+	0xb6f8cec7,
+	0xe0b705e4,
+	0x00f8d014,
+/* 0x06fc: i2c_acquire */
+	0x0006f07e,
+	0x0000047e,
+	0x7e03d9f0,
+	0xf800002e,
+/* 0x070d: i2c_release */
+	0x06f07e00,
+	0x00047e00,
+	0x03daf000,
+	0x00002e7e,
+/* 0x071e: i2c_recv */
+	0x32f400f8,
+	0xf8c1c701,
+	0xb00214b6,
+	0x1ff52816,
+	0x13b80137,
+	0x98000bd4,
+	0x13b80032,
+	0x98000bac,
+	0x31f40031,
+	0xf9d0f902,
+	0xf1d0f9e0,
+	0xf1000067,
+	0x92100063,
+	0x76bb0167,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb60006fc,
+	0xd0fc0464,
+	0xf500d6b0,
+	0x0500b01b,
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0xa97e50fc,
+	0x64b60006,
+	0xcc11f504,
+	0xe0c5c700,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x064f7e50,
+	0x0464b600,
+	0x00a911f5,
+	0x76bb0105,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb60006a9,
+	0x11f50464,
+	0x76bb0087,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0x7e50fc04,
+	0xb6000600,
+	0x11f40464,
+	0xe05bcb67,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x054c7e50,
+	0x0464b600,
+	0x74bd5bb2,
+/* 0x0823: i2c_recv_not_rd08 */
+	0xb0410ef4,
+	0x1bf401d6,
+	0x7e00053b,
+	0xf40006a9,
+	0xc5c73211,
+	0x064f7ee0,
+	0x2811f400,
+	0xa97e0005,
+	0x11f40006,
+	0xe0b5c71f,
+	0x00064f7e,
+	0x7e1511f4,
+	0xbd00054c,
+	0x08c5c774,
+	0xf4091bf4,
+	0x0ef40232,
+/* 0x0861: i2c_recv_not_wr08 */
+/* 0x0861: i2c_recv_done */
+	0xf8cec703,
+	0x00070d7e,
+	0xd0fce0fc,
+	0xb20912f4,
+	0x023f7e7c,
+/* 0x0875: i2c_recv_exit */
+/* 0x0877: i2c_init */
+	0xf800f800,
+/* 0x0879: test_recv */
 	0x04584100,
 	0xb60011cf,
 	0x58400110,
@@ -1128,26 +1424,26 @@
 	0xe3f1d900,
 	0x967e134f,
 	0x00f80001,
-/* 0x0494: test_init */
+/* 0x0898: test_init */
 	0x7e08004e,
 	0xf8000196,
-/* 0x049d: idle_recv */
-/* 0x049f: idle */
+/* 0x08a1: idle_recv */
+/* 0x08a3: idle */
 	0xf400f800,
 	0x54410031,
 	0x0011cf04,
 	0x400110b6,
 	0x01f60454,
-/* 0x04b3: idle_loop */
+/* 0x08b7: idle_loop */
 	0x0104bd00,
 	0x0232f458,
-/* 0x04b8: idle_proc */
-/* 0x04b8: idle_proc_exec */
+/* 0x08bc: idle_proc */
+/* 0x08bc: idle_proc_exec */
 	0x1eb210f9,
 	0x0002487e,
 	0x11f410fc,
 	0x0231f409,
-/* 0x04cb: idle_proc_next */
+/* 0x08cf: idle_proc_next */
 	0xb6f00ef4,
 	0x1fa65810,
 	0xf4e81bf4,
@@ -1161,5 +1457,4 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
index 6fde0b89..6744fcc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
@@ -37,6 +37,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_PROC
@@ -46,6 +47,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_DATA
@@ -57,6 +59,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 0fa4d7d..5a73fa6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -89,9 +89,31 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
+	0x5f433249,
+	0x00000982,
+	0x00000825,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
 	0x54534554,
-	0x0000057b,
-	0x00000554,
+	0x000009ab,
+	0x00000984,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000587,
-	0x00000585,
+	0x000009b7,
+	0x000009b5,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -133,12 +155,12 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
 	0x00000000,
-/* 0x0214: time_next */
+/* 0x026c: time_next */
 	0x00000000,
-/* 0x0218: fifo_queue */
+/* 0x0270: fifo_queue */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -171,7 +193,7 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x02f0: rfifo_queue */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -204,11 +226,11 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0318: memx_func_head */
+/* 0x0370: memx_func_head */
 	0x00010000,
 	0x00000000,
 	0x0000046f,
-/* 0x0324: memx_func_next */
+/* 0x037c: memx_func_next */
 	0x00000001,
 	0x00000000,
 	0x00000496,
@@ -221,8 +243,8 @@
 	0x00010004,
 	0x00000000,
 	0x000004fc,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -735,7 +757,52 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+	0x00001000,
+	0x00004000,
+	0x00010000,
+	0x00000100,
+	0x00040000,
+	0x00100000,
+	0x00400000,
+	0x01000000,
+	0x04000000,
+	0x10000000,
+/* 0x0bd4: i2c_sda_map */
+	0x00002000,
+	0x00008000,
+	0x00020000,
+	0x00000200,
+	0x00080000,
+	0x00200000,
+	0x00800000,
+	0x02000000,
+	0x08000000,
+	0x20000000,
+/* 0x0bfc: i2c_ctrl */
+	0x0000e138,
+	0x0000e150,
+	0x0000e168,
+	0x0000e180,
+	0x0000e254,
+	0x0000e274,
+	0x0000e764,
+	0x0000e780,
+	0x0000e79c,
+	0x0000e7b8,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
-/* 0x0b54: memx_data_tail */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -787,15 +854,15 @@
 	0x07a007f1,
 	0xd00604b6,
 	0x04bd000e,
-	0xf001e7f0,
-	0x07f101e3,
+	0xf001d7f0,
+	0x07f101d3,
 	0x04b607ac,
-	0x000ed006,
+	0x000dd006,
 /* 0x0022: rd32_wait */
-	0xe7f104bd,
-	0xe4b607ac,
-	0x00eecf06,
-	0x7000e4f1,
+	0xd7f104bd,
+	0xd4b607ac,
+	0x00ddcf06,
+	0x7000d4f1,
 	0xf1f21bf4,
 	0xb607a4d7,
 	0xddcf06d4,
@@ -807,15 +874,15 @@
 	0xb607a407,
 	0x0dd00604,
 	0xf004bd00,
-	0xe5f002e7,
-	0x01e3f0f0,
+	0xd5f002d7,
+	0x01d3f0f0,
 	0x07ac07f1,
 	0xd00604b6,
-	0x04bd000e,
+	0x04bd000d,
 /* 0x006c: wr32_wait */
-	0x07ace7f1,
-	0xcf06e4b6,
-	0xe4f100ee,
+	0x07acd7f1,
+	0xcf06d4b6,
+	0xd4f100dd,
 	0x1bf47000,
 /* 0x007f: nsec */
 	0xf000f8f2,
@@ -845,21 +912,21 @@
 	0x9800f8df,
 	0x96b003e9,
 	0x2a0bf400,
-	0xbb840a98,
+	0xbb9a0a98,
 	0x1cf4029a,
 	0x01d7f00f,
 	0x025421f5,
 	0x0ef494bd,
 /* 0x00e9: intr_watchdog_next_time */
-	0x850a9815,
+	0x9b0a9815,
 	0xf400a6b0,
 	0x9ab8090b,
 	0x061cf406,
 /* 0x00f8: intr_watchdog_next_time_set */
 /* 0x00fb: intr_watchdog_next_proc */
-	0x80850980,
+	0x809b0980,
 	0xe0b603e9,
-	0x10e6b158,
+	0x68e6b158,
 	0xc61bf402,
 /* 0x010a: intr */
 	0x00f900f8,
@@ -880,15 +947,15 @@
 	0x0088cf06,
 	0xf40289c4,
 	0x0080230b,
-	0x58e7f085,
+	0x58e7f09b,
 	0x98cb21f4,
-	0x96b08509,
+	0x96b09b09,
 	0x110bf400,
 	0xb63407f0,
 	0x09d00604,
 	0x8004bd00,
 /* 0x016e: intr_skip_watchdog */
-	0x89e48409,
+	0x89e49a09,
 	0x0bf40800,
 	0x8897f148,
 	0x0694b606,
@@ -948,7 +1015,7 @@
 	0x000ed006,
 	0x0e8004bd,
 /* 0x0241: timer_enable */
-	0x0187f084,
+	0x0187f09a,
 	0xb63807f0,
 	0x08d00604,
 /* 0x024f: timer_done */
@@ -979,7 +1046,7 @@
 	0xb8008a98,
 	0x0bf406ae,
 	0x5880b610,
-	0x021086b1,
+	0x026886b1,
 	0xf4f01bf4,
 /* 0x02b2: find_done */
 	0x8eb90132,
@@ -1049,7 +1116,7 @@
 	0x320bf406,
 	0x94071ec4,
 	0xe0b704ee,
-	0xeb980218,
+	0xeb980270,
 	0x02ec9803,
 	0x9801ed98,
 	0x21f500ee,
@@ -1075,7 +1142,7 @@
 	0xe60bf406,
 	0xb60723c4,
 	0x30b70434,
-	0x3b800298,
+	0x3b8002f0,
 	0x023c8003,
 	0x80013d80,
 	0x20b6003e,
@@ -1090,13 +1157,13 @@
 /* 0x0430: host_init */
 	0x008017f1,
 	0xf11014b6,
-	0xf1021815,
+	0xf1027015,
 	0xb604d007,
 	0x01d00604,
 	0xf104bd00,
 	0xb6008017,
 	0x15f11014,
-	0x07f10298,
+	0x07f102f0,
 	0x04b604dc,
 	0x0001d006,
 	0x17f004bd,
@@ -1156,14 +1223,14 @@
 	0x00139802,
 	0x950410b6,
 	0x30f01034,
-	0xc835980c,
+	0xde35980c,
 	0x12b855f9,
 	0xec1ef406,
 	0xe0fcd0fc,
 	0x02b921f5,
 /* 0x0532: memx_info */
 	0xc7f100f8,
-	0xb7f10354,
+	0xb7f103ac,
 	0x21f50800,
 	0x00f802b9,
 /* 0x0540: memx_recv */
@@ -1175,7 +1242,312 @@
 /* 0x0550: perf_recv */
 /* 0x0552: perf_init */
 	0x00f800f8,
-/* 0x0554: test_recv */
+/* 0x0554: i2c_drive_scl */
+	0xf40036b0,
+	0x07f1110b,
+	0x04b607e0,
+	0x0001d006,
+	0x00f804bd,
+/* 0x0568: i2c_drive_scl_lo */
+	0x07e407f1,
+	0xd00604b6,
+	0x04bd0001,
+/* 0x0576: i2c_drive_sda */
+	0x36b000f8,
+	0x110bf400,
+	0x07e007f1,
+	0xd00604b6,
+	0x04bd0002,
+/* 0x058a: i2c_drive_sda_lo */
+	0x07f100f8,
+	0x04b607e4,
+	0x0002d006,
+	0x00f804bd,
+/* 0x0598: i2c_sense_scl */
+	0xf10132f4,
+	0xb607c437,
+	0x33cf0634,
+	0x0431fd00,
+	0xf4060bf4,
+/* 0x05ae: i2c_sense_scl_done */
+	0x00f80131,
+/* 0x05b0: i2c_sense_sda */
+	0xf10132f4,
+	0xb607c437,
+	0x33cf0634,
+	0x0432fd00,
+	0xf4060bf4,
+/* 0x05c6: i2c_sense_sda_done */
+	0x00f80131,
+/* 0x05c8: i2c_raise_scl */
+	0x47f140f9,
+	0x37f00898,
+	0x5421f501,
+/* 0x05d5: i2c_raise_scl_wait */
+	0xe8e7f105,
+	0x7f21f403,
+	0x059821f5,
+	0xb60901f4,
+	0x1bf40142,
+/* 0x05e9: i2c_raise_scl_done */
+	0xf840fcef,
+/* 0x05ed: i2c_start */
+	0x9821f500,
+	0x0d11f405,
+	0x05b021f5,
+	0xf40611f4,
+/* 0x05fe: i2c_start_rep */
+	0x37f0300e,
+	0x5421f500,
+	0x0137f005,
+	0x057621f5,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xc821f550,
+	0x0464b605,
+/* 0x062b: i2c_start_send */
+	0xf01f11f4,
+	0x21f50037,
+	0xe7f10576,
+	0x21f41388,
+	0x0037f07f,
+	0x055421f5,
+	0x1388e7f1,
+/* 0x0647: i2c_start_out */
+	0xf87f21f4,
+/* 0x0649: i2c_stop */
+	0x0037f000,
+	0x055421f5,
+	0xf50037f0,
+	0xf1057621,
+	0xf403e8e7,
+	0x37f07f21,
+	0x5421f501,
+	0x88e7f105,
+	0x7f21f413,
+	0xf50137f0,
+	0xf1057621,
+	0xf41388e7,
+	0x00f87f21,
+/* 0x067c: i2c_bitw */
+	0x057621f5,
+	0x03e8e7f1,
+	0xbb7f21f4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x05c821f5,
+	0xf40464b6,
+	0xe7f11811,
+	0x21f41388,
+	0x0037f07f,
+	0x055421f5,
+	0x1388e7f1,
+/* 0x06bb: i2c_bitw_out */
+	0xf87f21f4,
+/* 0x06bd: i2c_bitr */
+	0x0137f000,
+	0x057621f5,
+	0x03e8e7f1,
+	0xbb7f21f4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x05c821f5,
+	0xf40464b6,
+	0x21f51b11,
+	0x37f005b0,
+	0x5421f500,
+	0x88e7f105,
+	0x7f21f413,
+	0xf4013cf0,
+/* 0x0702: i2c_bitr_done */
+	0x00f80131,
+/* 0x0704: i2c_get_byte */
+	0xf00057f0,
+/* 0x070a: i2c_get_byte_next */
+	0x54b60847,
+	0x0076bb01,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b606bd,
+	0x2b11f404,
+	0xb60553fd,
+	0x1bf40142,
+	0x0137f0d8,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x7c21f550,
+	0x0464b606,
+/* 0x0754: i2c_get_byte_done */
+/* 0x0756: i2c_put_byte */
+	0x47f000f8,
+/* 0x0759: i2c_put_byte_next */
+	0x0142b608,
+	0xbb3854ff,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x067c21f5,
+	0xf40464b6,
+	0x46b03411,
+	0xd81bf400,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xbd21f550,
+	0x0464b606,
+	0xbb0f11f4,
+	0x36b00076,
+	0x061bf401,
+/* 0x07af: i2c_put_byte_done */
+	0xf80132f4,
+/* 0x07b1: i2c_addr */
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b605ed,
+	0x2911f404,
+	0x012ec3e7,
+	0xfd0134b6,
+	0x76bb0553,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb6075621,
+/* 0x07f6: i2c_addr_done */
+	0x00f80464,
+/* 0x07f8: i2c_acquire_addr */
+	0xb6f8cec7,
+	0xe0b702e4,
+	0xee980bfc,
+/* 0x0807: i2c_acquire */
+	0xf500f800,
+	0xf407f821,
+	0xd9f00421,
+	0x3f21f403,
+/* 0x0816: i2c_release */
+	0x21f500f8,
+	0x21f407f8,
+	0x03daf004,
+	0xf83f21f4,
+/* 0x0825: i2c_recv */
+	0x0132f400,
+	0xb6f8c1c7,
+	0x16b00214,
+	0x3a1ff528,
+	0xd413a001,
+	0x0032980b,
+	0x0bac13a0,
+	0xf4003198,
+	0xd0f90231,
+	0xd0f9e0f9,
+	0x000067f1,
+	0x100063f1,
+	0xbb016792,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x080721f5,
+	0xfc0464b6,
+	0x00d6b0d0,
+	0x00b31bf5,
+	0xbb0057f0,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x07b121f5,
+	0xf50464b6,
+	0xc700d011,
+	0x76bbe0c5,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb6075621,
+	0x11f50464,
+	0x57f000ad,
+	0x0076bb01,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b607b1,
+	0x8a11f504,
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b60704,
+	0x6a11f404,
+	0xbbe05bcb,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x064921f5,
+	0xb90464b6,
+	0x74bd025b,
+/* 0x092b: i2c_recv_not_rd08 */
+	0xb0430ef4,
+	0x1bf401d6,
+	0x0057f03d,
+	0x07b121f5,
+	0xc73311f4,
+	0x21f5e0c5,
+	0x11f40756,
+	0x0057f029,
+	0x07b121f5,
+	0xc71f11f4,
+	0x21f5e0b5,
+	0x11f40756,
+	0x4921f515,
+	0xc774bd06,
+	0x1bf408c5,
+	0x0232f409,
+/* 0x096b: i2c_recv_not_wr08 */
+/* 0x096b: i2c_recv_done */
+	0xc7030ef4,
+	0x21f5f8ce,
+	0xe0fc0816,
+	0x12f4d0fc,
+	0x027cb90a,
+	0x02b921f5,
+/* 0x0980: i2c_recv_exit */
+/* 0x0982: i2c_init */
+	0x00f800f8,
+/* 0x0984: test_recv */
 	0x05d817f1,
 	0xcf0614b6,
 	0x10b60011,
@@ -1185,12 +1557,12 @@
 	0x00e7f104,
 	0x4fe3f1d9,
 	0xf521f513,
-/* 0x057b: test_init */
+/* 0x09ab: test_init */
 	0xf100f801,
 	0xf50800e7,
 	0xf801f521,
-/* 0x0585: idle_recv */
-/* 0x0587: idle */
+/* 0x09b5: idle_recv */
+/* 0x09b7: idle */
 	0xf400f800,
 	0x17f10031,
 	0x14b605d4,
@@ -1198,32 +1570,20 @@
 	0xf10110b6,
 	0xb605d407,
 	0x01d00604,
-/* 0x05a3: idle_loop */
+/* 0x09d3: idle_loop */
 	0xf004bd00,
 	0x32f45817,
-/* 0x05a9: idle_proc */
-/* 0x05a9: idle_proc_exec */
+/* 0x09d9: idle_proc */
+/* 0x09d9: idle_proc_exec */
 	0xb910f902,
 	0x21f5021e,
 	0x10fc02c2,
 	0xf40911f4,
 	0x0ef40231,
-/* 0x05bd: idle_proc_next */
+/* 0x09ed: idle_proc_next */
 	0x5810b6ef,
 	0xf4061fb8,
 	0x02f4e61b,
 	0x0028f4dd,
 	0x00bb0ef4,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
index eaa64da..48f7943 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
@@ -37,6 +37,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_PROC
@@ -46,6 +47,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_DATA
@@ -57,6 +59,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index 82c8e8b..4dba00d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -89,9 +89,31 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
+	0x5f433249,
+	0x00000982,
+	0x00000825,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
 	0x54534554,
-	0x0000057b,
-	0x00000554,
+	0x000009ab,
+	0x00000984,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000587,
-	0x00000585,
+	0x000009b7,
+	0x000009b5,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -133,12 +155,12 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
 	0x00000000,
-/* 0x0214: time_next */
+/* 0x026c: time_next */
 	0x00000000,
-/* 0x0218: fifo_queue */
+/* 0x0270: fifo_queue */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -171,7 +193,7 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x02f0: rfifo_queue */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -204,11 +226,11 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0318: memx_func_head */
+/* 0x0370: memx_func_head */
 	0x00010000,
 	0x00000000,
 	0x0000046f,
-/* 0x0324: memx_func_next */
+/* 0x037c: memx_func_next */
 	0x00000001,
 	0x00000000,
 	0x00000496,
@@ -221,8 +243,8 @@
 	0x00010004,
 	0x00000000,
 	0x000004fc,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -735,7 +757,52 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+	0x00001000,
+	0x00004000,
+	0x00010000,
+	0x00000100,
+	0x00040000,
+	0x00100000,
+	0x00400000,
+	0x01000000,
+	0x04000000,
+	0x10000000,
+/* 0x0bd4: i2c_sda_map */
+	0x00002000,
+	0x00008000,
+	0x00020000,
+	0x00000200,
+	0x00080000,
+	0x00200000,
+	0x00800000,
+	0x02000000,
+	0x08000000,
+	0x20000000,
+/* 0x0bfc: i2c_ctrl */
+	0x0000e138,
+	0x0000e150,
+	0x0000e168,
+	0x0000e180,
+	0x0000e254,
+	0x0000e274,
+	0x0000e764,
+	0x0000e780,
+	0x0000e79c,
+	0x0000e7b8,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
-/* 0x0b54: memx_data_tail */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -787,15 +854,15 @@
 	0x07a007f1,
 	0xd00604b6,
 	0x04bd000e,
-	0xf001e7f0,
-	0x07f101e3,
+	0xf001d7f0,
+	0x07f101d3,
 	0x04b607ac,
-	0x000ed006,
+	0x000dd006,
 /* 0x0022: rd32_wait */
-	0xe7f104bd,
-	0xe4b607ac,
-	0x00eecf06,
-	0x7000e4f1,
+	0xd7f104bd,
+	0xd4b607ac,
+	0x00ddcf06,
+	0x7000d4f1,
 	0xf1f21bf4,
 	0xb607a4d7,
 	0xddcf06d4,
@@ -807,15 +874,15 @@
 	0xb607a407,
 	0x0dd00604,
 	0xf004bd00,
-	0xe5f002e7,
-	0x01e3f0f0,
+	0xd5f002d7,
+	0x01d3f0f0,
 	0x07ac07f1,
 	0xd00604b6,
-	0x04bd000e,
+	0x04bd000d,
 /* 0x006c: wr32_wait */
-	0x07ace7f1,
-	0xcf06e4b6,
-	0xe4f100ee,
+	0x07acd7f1,
+	0xcf06d4b6,
+	0xd4f100dd,
 	0x1bf47000,
 /* 0x007f: nsec */
 	0xf000f8f2,
@@ -845,21 +912,21 @@
 	0x9800f8df,
 	0x96b003e9,
 	0x2a0bf400,
-	0xbb840a98,
+	0xbb9a0a98,
 	0x1cf4029a,
 	0x01d7f00f,
 	0x025421f5,
 	0x0ef494bd,
 /* 0x00e9: intr_watchdog_next_time */
-	0x850a9815,
+	0x9b0a9815,
 	0xf400a6b0,
 	0x9ab8090b,
 	0x061cf406,
 /* 0x00f8: intr_watchdog_next_time_set */
 /* 0x00fb: intr_watchdog_next_proc */
-	0x80850980,
+	0x809b0980,
 	0xe0b603e9,
-	0x10e6b158,
+	0x68e6b158,
 	0xc61bf402,
 /* 0x010a: intr */
 	0x00f900f8,
@@ -880,15 +947,15 @@
 	0x0088cf06,
 	0xf40289c4,
 	0x0080230b,
-	0x58e7f085,
+	0x58e7f09b,
 	0x98cb21f4,
-	0x96b08509,
+	0x96b09b09,
 	0x110bf400,
 	0xb63407f0,
 	0x09d00604,
 	0x8004bd00,
 /* 0x016e: intr_skip_watchdog */
-	0x89e48409,
+	0x89e49a09,
 	0x0bf40800,
 	0x8897f148,
 	0x0694b606,
@@ -948,7 +1015,7 @@
 	0x000ed006,
 	0x0e8004bd,
 /* 0x0241: timer_enable */
-	0x0187f084,
+	0x0187f09a,
 	0xb63807f0,
 	0x08d00604,
 /* 0x024f: timer_done */
@@ -979,7 +1046,7 @@
 	0xb8008a98,
 	0x0bf406ae,
 	0x5880b610,
-	0x021086b1,
+	0x026886b1,
 	0xf4f01bf4,
 /* 0x02b2: find_done */
 	0x8eb90132,
@@ -1049,7 +1116,7 @@
 	0x320bf406,
 	0x94071ec4,
 	0xe0b704ee,
-	0xeb980218,
+	0xeb980270,
 	0x02ec9803,
 	0x9801ed98,
 	0x21f500ee,
@@ -1075,7 +1142,7 @@
 	0xe60bf406,
 	0xb60723c4,
 	0x30b70434,
-	0x3b800298,
+	0x3b8002f0,
 	0x023c8003,
 	0x80013d80,
 	0x20b6003e,
@@ -1090,13 +1157,13 @@
 /* 0x0430: host_init */
 	0x008017f1,
 	0xf11014b6,
-	0xf1021815,
+	0xf1027015,
 	0xb604d007,
 	0x01d00604,
 	0xf104bd00,
 	0xb6008017,
 	0x15f11014,
-	0x07f10298,
+	0x07f102f0,
 	0x04b604dc,
 	0x0001d006,
 	0x17f004bd,
@@ -1156,14 +1223,14 @@
 	0x00139802,
 	0x950410b6,
 	0x30f01034,
-	0xc835980c,
+	0xde35980c,
 	0x12b855f9,
 	0xec1ef406,
 	0xe0fcd0fc,
 	0x02b921f5,
 /* 0x0532: memx_info */
 	0xc7f100f8,
-	0xb7f10354,
+	0xb7f103ac,
 	0x21f50800,
 	0x00f802b9,
 /* 0x0540: memx_recv */
@@ -1175,7 +1242,312 @@
 /* 0x0550: perf_recv */
 /* 0x0552: perf_init */
 	0x00f800f8,
-/* 0x0554: test_recv */
+/* 0x0554: i2c_drive_scl */
+	0xf40036b0,
+	0x07f1110b,
+	0x04b607e0,
+	0x0001d006,
+	0x00f804bd,
+/* 0x0568: i2c_drive_scl_lo */
+	0x07e407f1,
+	0xd00604b6,
+	0x04bd0001,
+/* 0x0576: i2c_drive_sda */
+	0x36b000f8,
+	0x110bf400,
+	0x07e007f1,
+	0xd00604b6,
+	0x04bd0002,
+/* 0x058a: i2c_drive_sda_lo */
+	0x07f100f8,
+	0x04b607e4,
+	0x0002d006,
+	0x00f804bd,
+/* 0x0598: i2c_sense_scl */
+	0xf10132f4,
+	0xb607c437,
+	0x33cf0634,
+	0x0431fd00,
+	0xf4060bf4,
+/* 0x05ae: i2c_sense_scl_done */
+	0x00f80131,
+/* 0x05b0: i2c_sense_sda */
+	0xf10132f4,
+	0xb607c437,
+	0x33cf0634,
+	0x0432fd00,
+	0xf4060bf4,
+/* 0x05c6: i2c_sense_sda_done */
+	0x00f80131,
+/* 0x05c8: i2c_raise_scl */
+	0x47f140f9,
+	0x37f00898,
+	0x5421f501,
+/* 0x05d5: i2c_raise_scl_wait */
+	0xe8e7f105,
+	0x7f21f403,
+	0x059821f5,
+	0xb60901f4,
+	0x1bf40142,
+/* 0x05e9: i2c_raise_scl_done */
+	0xf840fcef,
+/* 0x05ed: i2c_start */
+	0x9821f500,
+	0x0d11f405,
+	0x05b021f5,
+	0xf40611f4,
+/* 0x05fe: i2c_start_rep */
+	0x37f0300e,
+	0x5421f500,
+	0x0137f005,
+	0x057621f5,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xc821f550,
+	0x0464b605,
+/* 0x062b: i2c_start_send */
+	0xf01f11f4,
+	0x21f50037,
+	0xe7f10576,
+	0x21f41388,
+	0x0037f07f,
+	0x055421f5,
+	0x1388e7f1,
+/* 0x0647: i2c_start_out */
+	0xf87f21f4,
+/* 0x0649: i2c_stop */
+	0x0037f000,
+	0x055421f5,
+	0xf50037f0,
+	0xf1057621,
+	0xf403e8e7,
+	0x37f07f21,
+	0x5421f501,
+	0x88e7f105,
+	0x7f21f413,
+	0xf50137f0,
+	0xf1057621,
+	0xf41388e7,
+	0x00f87f21,
+/* 0x067c: i2c_bitw */
+	0x057621f5,
+	0x03e8e7f1,
+	0xbb7f21f4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x05c821f5,
+	0xf40464b6,
+	0xe7f11811,
+	0x21f41388,
+	0x0037f07f,
+	0x055421f5,
+	0x1388e7f1,
+/* 0x06bb: i2c_bitw_out */
+	0xf87f21f4,
+/* 0x06bd: i2c_bitr */
+	0x0137f000,
+	0x057621f5,
+	0x03e8e7f1,
+	0xbb7f21f4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x05c821f5,
+	0xf40464b6,
+	0x21f51b11,
+	0x37f005b0,
+	0x5421f500,
+	0x88e7f105,
+	0x7f21f413,
+	0xf4013cf0,
+/* 0x0702: i2c_bitr_done */
+	0x00f80131,
+/* 0x0704: i2c_get_byte */
+	0xf00057f0,
+/* 0x070a: i2c_get_byte_next */
+	0x54b60847,
+	0x0076bb01,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b606bd,
+	0x2b11f404,
+	0xb60553fd,
+	0x1bf40142,
+	0x0137f0d8,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x7c21f550,
+	0x0464b606,
+/* 0x0754: i2c_get_byte_done */
+/* 0x0756: i2c_put_byte */
+	0x47f000f8,
+/* 0x0759: i2c_put_byte_next */
+	0x0142b608,
+	0xbb3854ff,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x067c21f5,
+	0xf40464b6,
+	0x46b03411,
+	0xd81bf400,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xbd21f550,
+	0x0464b606,
+	0xbb0f11f4,
+	0x36b00076,
+	0x061bf401,
+/* 0x07af: i2c_put_byte_done */
+	0xf80132f4,
+/* 0x07b1: i2c_addr */
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b605ed,
+	0x2911f404,
+	0x012ec3e7,
+	0xfd0134b6,
+	0x76bb0553,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb6075621,
+/* 0x07f6: i2c_addr_done */
+	0x00f80464,
+/* 0x07f8: i2c_acquire_addr */
+	0xb6f8cec7,
+	0xe0b702e4,
+	0xee980bfc,
+/* 0x0807: i2c_acquire */
+	0xf500f800,
+	0xf407f821,
+	0xd9f00421,
+	0x3f21f403,
+/* 0x0816: i2c_release */
+	0x21f500f8,
+	0x21f407f8,
+	0x03daf004,
+	0xf83f21f4,
+/* 0x0825: i2c_recv */
+	0x0132f400,
+	0xb6f8c1c7,
+	0x16b00214,
+	0x3a1ff528,
+	0xd413a001,
+	0x0032980b,
+	0x0bac13a0,
+	0xf4003198,
+	0xd0f90231,
+	0xd0f9e0f9,
+	0x000067f1,
+	0x100063f1,
+	0xbb016792,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x080721f5,
+	0xfc0464b6,
+	0x00d6b0d0,
+	0x00b31bf5,
+	0xbb0057f0,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x07b121f5,
+	0xf50464b6,
+	0xc700d011,
+	0x76bbe0c5,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb6075621,
+	0x11f50464,
+	0x57f000ad,
+	0x0076bb01,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b607b1,
+	0x8a11f504,
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b60704,
+	0x6a11f404,
+	0xbbe05bcb,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x064921f5,
+	0xb90464b6,
+	0x74bd025b,
+/* 0x092b: i2c_recv_not_rd08 */
+	0xb0430ef4,
+	0x1bf401d6,
+	0x0057f03d,
+	0x07b121f5,
+	0xc73311f4,
+	0x21f5e0c5,
+	0x11f40756,
+	0x0057f029,
+	0x07b121f5,
+	0xc71f11f4,
+	0x21f5e0b5,
+	0x11f40756,
+	0x4921f515,
+	0xc774bd06,
+	0x1bf408c5,
+	0x0232f409,
+/* 0x096b: i2c_recv_not_wr08 */
+/* 0x096b: i2c_recv_done */
+	0xc7030ef4,
+	0x21f5f8ce,
+	0xe0fc0816,
+	0x12f4d0fc,
+	0x027cb90a,
+	0x02b921f5,
+/* 0x0980: i2c_recv_exit */
+/* 0x0982: i2c_init */
+	0x00f800f8,
+/* 0x0984: test_recv */
 	0x05d817f1,
 	0xcf0614b6,
 	0x10b60011,
@@ -1185,12 +1557,12 @@
 	0x00e7f104,
 	0x4fe3f1d9,
 	0xf521f513,
-/* 0x057b: test_init */
+/* 0x09ab: test_init */
 	0xf100f801,
 	0xf50800e7,
 	0xf801f521,
-/* 0x0585: idle_recv */
-/* 0x0587: idle */
+/* 0x09b5: idle_recv */
+/* 0x09b7: idle */
 	0xf400f800,
 	0x17f10031,
 	0x14b605d4,
@@ -1198,32 +1570,20 @@
 	0xf10110b6,
 	0xb605d407,
 	0x01d00604,
-/* 0x05a3: idle_loop */
+/* 0x09d3: idle_loop */
 	0xf004bd00,
 	0x32f45817,
-/* 0x05a9: idle_proc */
-/* 0x05a9: idle_proc_exec */
+/* 0x09d9: idle_proc */
+/* 0x09d9: idle_proc_exec */
 	0xb910f902,
 	0x21f5021e,
 	0x10fc02c2,
 	0xf40911f4,
 	0x0ef40231,
-/* 0x05bd: idle_proc_next */
+/* 0x09ed: idle_proc_next */
 	0x5810b6ef,
 	0xf4061fb8,
 	0x02f4e61b,
 	0x0028f4dd,
 	0x00bb0ef4,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
index 32d65ea..8a89dfe 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
@@ -37,6 +37,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_PROC
@@ -46,6 +47,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_DATA
@@ -57,6 +59,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index ce65e2a..5e24c6b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -89,9 +89,31 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
+	0x5f433249,
+	0x000008e3,
+	0x00000786,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
 	0x54534554,
-	0x000004eb,
-	0x000004ca,
+	0x00000906,
+	0x000008e5,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x000004f7,
-	0x000004f5,
+	0x00000912,
+	0x00000910,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -133,12 +155,12 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
 	0x00000000,
-/* 0x0214: time_next */
+/* 0x026c: time_next */
 	0x00000000,
-/* 0x0218: fifo_queue */
+/* 0x0270: fifo_queue */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -171,7 +193,7 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x02f0: rfifo_queue */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -204,11 +226,11 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0318: memx_func_head */
+/* 0x0370: memx_func_head */
 	0x00010000,
 	0x00000000,
 	0x000003f4,
-/* 0x0324: memx_func_next */
+/* 0x037c: memx_func_next */
 	0x00000001,
 	0x00000000,
 	0x00000415,
@@ -221,8 +243,8 @@
 	0x00010004,
 	0x00000000,
 	0x00000472,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -735,49 +757,29 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-/* 0x0b54: memx_data_tail */
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+	0x00000400,
+	0x00000800,
+	0x00001000,
+	0x00002000,
+	0x00004000,
+	0x00008000,
+	0x00010000,
+	0x00020000,
+	0x00040000,
+	0x00080000,
+/* 0x0bd4: i2c_sda_map */
+	0x00100000,
+	0x00200000,
+	0x00400000,
+	0x00800000,
+	0x01000000,
+	0x02000000,
+	0x04000000,
+	0x08000000,
+	0x10000000,
+	0x20000000,
 	0x00000000,
 };
 
@@ -786,14 +788,14 @@
 /* 0x0004: rd32 */
 	0x07a007f1,
 	0xbd000ed0,
-	0x01e7f004,
-	0xf101e3f0,
+	0x01d7f004,
+	0xf101d3f0,
 	0xd007ac07,
-	0x04bd000e,
+	0x04bd000d,
 /* 0x001c: rd32_wait */
-	0x07ace7f1,
-	0xf100eecf,
-	0xf47000e4,
+	0x07acd7f1,
+	0xf100ddcf,
+	0xf47000d4,
 	0xd7f1f51b,
 	0xddcf07a4,
 /* 0x0033: wr32 */
@@ -802,14 +804,14 @@
 	0x04bd000e,
 	0x07a407f1,
 	0xbd000dd0,
-	0x02e7f004,
-	0xf0f0e5f0,
-	0x07f101e3,
-	0x0ed007ac,
+	0x02d7f004,
+	0xf0f0d5f0,
+	0x07f101d3,
+	0x0dd007ac,
 /* 0x0057: wr32_wait */
 	0xf104bd00,
-	0xcf07ace7,
-	0xe4f100ee,
+	0xcf07acd7,
+	0xd4f100dd,
 	0x1bf47000,
 /* 0x0067: nsec */
 	0xf000f8f5,
@@ -836,21 +838,21 @@
 	0x9800f8e2,
 	0x96b003e9,
 	0x2a0bf400,
-	0xbb840a98,
+	0xbb9a0a98,
 	0x1cf4029a,
 	0x01d7f00f,
 	0x020621f5,
 	0x0ef494bd,
 /* 0x00c5: intr_watchdog_next_time */
-	0x850a9815,
+	0x9b0a9815,
 	0xf400a6b0,
 	0x9ab8090b,
 	0x061cf406,
 /* 0x00d4: intr_watchdog_next_time_set */
 /* 0x00d7: intr_watchdog_next_proc */
-	0x80850980,
+	0x809b0980,
 	0xe0b603e9,
-	0x10e6b158,
+	0x68e6b158,
 	0xc61bf402,
 /* 0x00e6: intr */
 	0x00f900f8,
@@ -868,15 +870,15 @@
 	0x0887f004,
 	0xc40088cf,
 	0x0bf40289,
-	0x85008020,
+	0x9b008020,
 	0xf458e7f0,
 	0x0998a721,
-	0x0096b085,
+	0x0096b09b,
 	0xf00e0bf4,
 	0x09d03407,
 	0x8004bd00,
 /* 0x013e: intr_skip_watchdog */
-	0x89e48409,
+	0x89e49a09,
 	0x0bf40800,
 	0x8897f13c,
 	0x0099cf06,
@@ -929,7 +931,7 @@
 	0x0ed03407,
 	0x8004bd00,
 /* 0x01f6: timer_enable */
-	0x87f0840e,
+	0x87f09a0e,
 	0x3807f001,
 	0xbd0008d0,
 /* 0x0201: timer_done */
@@ -960,7 +962,7 @@
 	0x06aeb800,
 	0xb6100bf4,
 	0x86b15880,
-	0x1bf40210,
+	0x1bf40268,
 	0x0132f4f0,
 /* 0x0264: find_done */
 	0xfc028eb9,
@@ -1024,7 +1026,7 @@
 	0x0bf40612,
 	0x071ec42f,
 	0xb704ee94,
-	0x980218e0,
+	0x980270e0,
 	0xec9803eb,
 	0x01ed9802,
 	0xf500ee98,
@@ -1048,7 +1050,7 @@
 	0xec0bf406,
 	0xb60723c4,
 	0x30b70434,
-	0x3b800298,
+	0x3b8002f0,
 	0x023c8003,
 	0x80013d80,
 	0x20b6003e,
@@ -1061,12 +1063,12 @@
 /* 0x03be: host_init */
 	0x17f100f8,
 	0x14b60080,
-	0x1815f110,
+	0x7015f110,
 	0xd007f102,
 	0x0001d004,
 	0x17f104bd,
 	0x14b60080,
-	0x9815f110,
+	0xf015f110,
 	0xdc07f102,
 	0x0001d004,
 	0x17f004bd,
@@ -1122,13 +1124,13 @@
 	0x10b60013,
 	0x10349504,
 	0x980c30f0,
-	0x55f9c835,
+	0x55f9de35,
 	0xf40612b8,
 	0xd0fcec1e,
 	0x21f5e0fc,
 	0x00f8026b,
 /* 0x04a8: memx_info */
-	0x0354c7f1,
+	0x03acc7f1,
 	0x0800b7f1,
 	0x026b21f5,
 /* 0x04b6: memx_recv */
@@ -1140,49 +1142,342 @@
 /* 0x04c6: perf_recv */
 	0x00f800f8,
 /* 0x04c8: perf_init */
-/* 0x04ca: test_recv */
-	0x17f100f8,
-	0x11cf05d8,
-	0x0110b600,
-	0x05d807f1,
+/* 0x04ca: i2c_drive_scl */
+	0x36b000f8,
+	0x0e0bf400,
+	0x07e007f1,
 	0xbd0001d0,
-	0x00e7f104,
-	0x4fe3f1d9,
-	0xb621f513,
-/* 0x04eb: test_init */
-	0xf100f801,
-	0xf50800e7,
-	0xf801b621,
-/* 0x04f5: idle_recv */
-/* 0x04f7: idle */
-	0xf400f800,
-	0x17f10031,
-	0x11cf05d4,
-	0x0110b600,
-	0x05d407f1,
-	0xbd0001d0,
-/* 0x050d: idle_loop */
-	0x5817f004,
-/* 0x0513: idle_proc */
-/* 0x0513: idle_proc_exec */
-	0xf90232f4,
-	0x021eb910,
-	0x027421f5,
-	0x11f410fc,
-	0x0231f409,
-/* 0x0527: idle_proc_next */
-	0xb6ef0ef4,
-	0x1fb85810,
-	0xe61bf406,
-	0xf4dd02f4,
-	0x0ef40028,
-	0x000000c1,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
+/* 0x04db: i2c_drive_scl_lo */
+	0xf100f804,
+	0xd007e407,
+	0x04bd0001,
+/* 0x04e6: i2c_drive_sda */
+	0x36b000f8,
+	0x0e0bf400,
+	0x07e007f1,
+	0xbd0002d0,
+/* 0x04f7: i2c_drive_sda_lo */
+	0xf100f804,
+	0xd007e407,
+	0x04bd0002,
+/* 0x0502: i2c_sense_scl */
+	0x32f400f8,
+	0xc437f101,
+	0x0033cf07,
+	0xf40431fd,
+	0x31f4060b,
+/* 0x0515: i2c_sense_scl_done */
+/* 0x0517: i2c_sense_sda */
+	0xf400f801,
+	0x37f10132,
+	0x33cf07c4,
+	0x0432fd00,
+	0xf4060bf4,
+/* 0x052a: i2c_sense_sda_done */
+	0x00f80131,
+/* 0x052c: i2c_raise_scl */
+	0x47f140f9,
+	0x37f00898,
+	0xca21f501,
+/* 0x0539: i2c_raise_scl_wait */
+	0xe8e7f104,
+	0x6721f403,
+	0x050221f5,
+	0xb60901f4,
+	0x1bf40142,
+/* 0x054d: i2c_raise_scl_done */
+	0xf840fcef,
+/* 0x0551: i2c_start */
+	0x0221f500,
+	0x0d11f405,
+	0x051721f5,
+	0xf40611f4,
+/* 0x0562: i2c_start_rep */
+	0x37f0300e,
+	0xca21f500,
+	0x0137f004,
+	0x04e621f5,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x2c21f550,
+	0x0464b605,
+/* 0x058f: i2c_start_send */
+	0xf01f11f4,
+	0x21f50037,
+	0xe7f104e6,
+	0x21f41388,
+	0x0037f067,
+	0x04ca21f5,
+	0x1388e7f1,
+/* 0x05ab: i2c_start_out */
+	0xf86721f4,
+/* 0x05ad: i2c_stop */
+	0x0037f000,
+	0x04ca21f5,
+	0xf50037f0,
+	0xf104e621,
+	0xf403e8e7,
+	0x37f06721,
+	0xca21f501,
+	0x88e7f104,
+	0x6721f413,
+	0xf50137f0,
+	0xf104e621,
+	0xf41388e7,
+	0x00f86721,
+/* 0x05e0: i2c_bitw */
+	0x04e621f5,
+	0x03e8e7f1,
+	0xbb6721f4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x052c21f5,
+	0xf40464b6,
+	0xe7f11811,
+	0x21f41388,
+	0x0037f067,
+	0x04ca21f5,
+	0x1388e7f1,
+/* 0x061f: i2c_bitw_out */
+	0xf86721f4,
+/* 0x0621: i2c_bitr */
+	0x0137f000,
+	0x04e621f5,
+	0x03e8e7f1,
+	0xbb6721f4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x052c21f5,
+	0xf40464b6,
+	0x21f51b11,
+	0x37f00517,
+	0xca21f500,
+	0x88e7f104,
+	0x6721f413,
+	0xf4013cf0,
+/* 0x0666: i2c_bitr_done */
+	0x00f80131,
+/* 0x0668: i2c_get_byte */
+	0xf00057f0,
+/* 0x066e: i2c_get_byte_next */
+	0x54b60847,
+	0x0076bb01,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b60621,
+	0x2b11f404,
+	0xb60553fd,
+	0x1bf40142,
+	0x0137f0d8,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xe021f550,
+	0x0464b605,
+/* 0x06b8: i2c_get_byte_done */
+/* 0x06ba: i2c_put_byte */
+	0x47f000f8,
+/* 0x06bd: i2c_put_byte_next */
+	0x0142b608,
+	0xbb3854ff,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x05e021f5,
+	0xf40464b6,
+	0x46b03411,
+	0xd81bf400,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x2121f550,
+	0x0464b606,
+	0xbb0f11f4,
+	0x36b00076,
+	0x061bf401,
+/* 0x0713: i2c_put_byte_done */
+	0xf80132f4,
+/* 0x0715: i2c_addr */
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b60551,
+	0x2911f404,
+	0x012ec3e7,
+	0xfd0134b6,
+	0x76bb0553,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb606ba21,
+/* 0x075a: i2c_addr_done */
+	0x00f80464,
+/* 0x075c: i2c_acquire_addr */
+	0xb6f8cec7,
+	0xe0b705e4,
+	0x00f8d014,
+/* 0x0768: i2c_acquire */
+	0x075c21f5,
+	0xf00421f4,
+	0x21f403d9,
+/* 0x0777: i2c_release */
+	0xf500f833,
+	0xf4075c21,
+	0xdaf00421,
+	0x3321f403,
+/* 0x0786: i2c_recv */
+	0x32f400f8,
+	0xf8c1c701,
+	0xb00214b6,
+	0x1ff52816,
+	0x13a0013a,
+	0x32980bd4,
+	0xac13a000,
+	0x0031980b,
+	0xf90231f4,
+	0xf9e0f9d0,
+	0x0067f1d0,
+	0x0063f100,
+	0x01679210,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x6821f550,
+	0x0464b607,
+	0xd6b0d0fc,
+	0xb31bf500,
+	0x0057f000,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x1521f550,
+	0x0464b607,
+	0x00d011f5,
+	0xbbe0c5c7,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x06ba21f5,
+	0xf50464b6,
+	0xf000ad11,
+	0x76bb0157,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb6071521,
+	0x11f50464,
+	0x76bb008a,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb6066821,
+	0x11f40464,
+	0xe05bcb6a,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xad21f550,
+	0x0464b605,
+	0xbd025bb9,
+	0x430ef474,
+/* 0x088c: i2c_recv_not_rd08 */
+	0xf401d6b0,
+	0x57f03d1b,
+	0x1521f500,
+	0x3311f407,
+	0xf5e0c5c7,
+	0xf406ba21,
+	0x57f02911,
+	0x1521f500,
+	0x1f11f407,
+	0xf5e0b5c7,
+	0xf406ba21,
+	0x21f51511,
+	0x74bd05ad,
+	0xf408c5c7,
+	0x32f4091b,
+	0x030ef402,
+/* 0x08cc: i2c_recv_not_wr08 */
+/* 0x08cc: i2c_recv_done */
+	0xf5f8cec7,
+	0xfc077721,
+	0xf4d0fce0,
+	0x7cb90a12,
+	0x6b21f502,
+/* 0x08e1: i2c_recv_exit */
+/* 0x08e3: i2c_init */
+	0xf800f802,
+/* 0x08e5: test_recv */
+	0xd817f100,
+	0x0011cf05,
+	0xf10110b6,
+	0xd005d807,
+	0x04bd0001,
+	0xd900e7f1,
+	0x134fe3f1,
+	0x01b621f5,
+/* 0x0906: test_init */
+	0xe7f100f8,
+	0x21f50800,
+	0x00f801b6,
+/* 0x0910: idle_recv */
+/* 0x0912: idle */
+	0x31f400f8,
+	0xd417f100,
+	0x0011cf05,
+	0xf10110b6,
+	0xd005d407,
+	0x04bd0001,
+/* 0x0928: idle_loop */
+	0xf45817f0,
+/* 0x092e: idle_proc */
+/* 0x092e: idle_proc_exec */
+	0x10f90232,
+	0xf5021eb9,
+	0xfc027421,
+	0x0911f410,
+	0xf40231f4,
+/* 0x0942: idle_proc_next */
+	0x10b6ef0e,
+	0x061fb858,
+	0xf4e61bf4,
+	0x28f4dd02,
+	0xc10ef400,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
index 5fb0ccc..574acfa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -7,6 +7,7 @@
 #define PROC_HOST 0x54534f48
 #define PROC_MEMX 0x584d454d
 #define PROC_PERF 0x46524550
+#define PROC_I2C_ 0x5f433249
 #define PROC_TEST 0x54534554
 
 /* KERN: message identifiers */
@@ -24,4 +25,22 @@
 #define MEMX_WAIT   3
 #define MEMX_DELAY  4
 
+/* I2C_: message identifiers */
+#define I2C__MSG_RD08 0
+#define I2C__MSG_WR08 1
+
+#define I2C__MSG_DATA0_PORT 24:31
+#define I2C__MSG_DATA0_ADDR 14:23
+
+#define I2C__MSG_DATA0_RD08_PORT I2C__MSG_DATA0_PORT
+#define I2C__MSG_DATA0_RD08_ADDR I2C__MSG_DATA0_ADDR
+#define I2C__MSG_DATA0_RD08_REG 0:7
+#define I2C__MSG_DATA1_RD08_VAL 0:7
+
+#define I2C__MSG_DATA0_WR08_PORT I2C__MSG_DATA0_PORT
+#define I2C__MSG_DATA0_WR08_ADDR I2C__MSG_DATA0_ADDR
+#define I2C__MSG_DATA0_WR08_SYNC 8:8
+#define I2C__MSG_DATA0_WR08_REG 0:7
+#define I2C__MSG_DATA1_WR08_VAL 0:7
+
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index ef3133e..7dd680f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -72,13 +72,7 @@
 	vmm->flush(vm);
 }
 
-void
-nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
-{
-	nouveau_vm_map_at(vma, 0, node);
-}
-
-void
+static void
 nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
 			struct nouveau_mem *mem)
 {
@@ -136,7 +130,7 @@
 	vmm->flush(vm);
 }
 
-void
+static void
 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
 		  struct nouveau_mem *mem)
 {
@@ -175,6 +169,18 @@
 }
 
 void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
+{
+	if (node->sg)
+		nouveau_vm_map_sg_table(vma, 0, node->size << 12, node);
+	else
+	if (node->pages)
+		nouveau_vm_map_sg(vma, 0, node->size << 12, node);
+	else
+		nouveau_vm_map_at(vma, 0, node);
+}
+
+void
 nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
 {
 	struct nouveau_vm *vm = vma->vm;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index b13ff0f..2f1ed61 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -77,11 +77,6 @@
 
 	nouveau_hw_save_vga_fonts(dev, 1);
 
-	ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 0xd1500000,
-				 NV04_DISP_CLASS, NULL, 0, &disp->core);
-	if (ret)
-		return ret;
-
 	nv04_crtc_create(dev, 0);
 	if (nv_two_heads(dev))
 		nv04_crtc_create(dev, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 56a28db..4245fc3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -80,7 +80,6 @@
 	struct nv04_mode_state saved_reg;
 	uint32_t saved_vga_font[4][16384];
 	uint32_t dac_users[4];
-	struct nouveau_object *core;
 	struct nouveau_bo *image[2];
 };
 
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 32e7064..ab03f77 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -55,9 +55,12 @@
 	int hue;
 	int saturation;
 	int iturbt_709;
+
+	void (*set_params)(struct nouveau_plane *);
 };
 
 static uint32_t formats[] = {
+	DRM_FORMAT_YUYV,
 	DRM_FORMAT_UYVY,
 	DRM_FORMAT_NV12,
 };
@@ -140,10 +143,10 @@
 	nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
 	nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
 
-	if (fb->pixel_format == DRM_FORMAT_NV12) {
+	if (fb->pixel_format != DRM_FORMAT_UYVY)
 		format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
+	if (fb->pixel_format == DRM_FORMAT_NV12)
 		format |= NV_PVIDEO_FORMAT_PLANAR;
-	}
 	if (nv_plane->iturbt_709)
 		format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
 	if (nv_plane->colorkey & (1 << 24))
@@ -182,9 +185,9 @@
 }
 
 static void
-nv10_destroy_plane(struct drm_plane *plane)
+nv_destroy_plane(struct drm_plane *plane)
 {
-	nv10_disable_plane(plane);
+	plane->funcs->disable_plane(plane);
 	drm_plane_cleanup(plane);
 	kfree(plane);
 }
@@ -217,9 +220,9 @@
 }
 
 static int
-nv10_set_property(struct drm_plane *plane,
-		  struct drm_property *property,
-		  uint64_t value)
+nv_set_property(struct drm_plane *plane,
+		struct drm_property *property,
+		uint64_t value)
 {
 	struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
 
@@ -238,15 +241,16 @@
 	else
 		return -EINVAL;
 
-	nv10_set_params(nv_plane);
+	if (nv_plane->set_params)
+		nv_plane->set_params(nv_plane);
 	return 0;
 }
 
 static const struct drm_plane_funcs nv10_plane_funcs = {
 	.update_plane = nv10_update_plane,
 	.disable_plane = nv10_disable_plane,
-	.set_property = nv10_set_property,
-	.destroy = nv10_destroy_plane,
+	.set_property = nv_set_property,
+	.destroy = nv_destroy_plane,
 };
 
 static void
@@ -266,7 +270,7 @@
 	case 0x15:
 	case 0x1a:
 	case 0x20:
-		num_formats = 1;
+		num_formats = 2;
 		break;
 	}
 
@@ -321,8 +325,159 @@
 	drm_object_attach_property(&plane->base.base,
 				   plane->props.iturbt_709, plane->iturbt_709);
 
+	plane->set_params = nv10_set_params;
 	nv10_set_params(plane);
-	nv_wr32(dev, NV_PVIDEO_STOP, 1);
+	nv10_disable_plane(&plane->base);
+	return;
+cleanup:
+	drm_plane_cleanup(&plane->base);
+err:
+	kfree(plane);
+	nv_error(dev, "Failed to create plane\n");
+}
+
+static int
+nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+		  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+		  unsigned int crtc_w, unsigned int crtc_h,
+		  uint32_t src_x, uint32_t src_y,
+		  uint32_t src_w, uint32_t src_h)
+{
+	struct nouveau_device *dev = nouveau_dev(plane->dev);
+	struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+	struct nouveau_bo *cur = nv_plane->cur;
+	uint32_t overlay = 1;
+	int brightness = (nv_plane->brightness - 512) * 62 / 512;
+	int pitch, ret, i;
+
+	/* Source parameters given in 16.16 fixed point, ignore fractional. */
+	src_x >>= 16;
+	src_y >>= 16;
+	src_w >>= 16;
+	src_h >>= 16;
+
+	pitch = ALIGN(src_w * 4, 0x100);
+
+	if (pitch > 0xffff)
+		return -ERANGE;
+
+	/* TODO: Compute an offset? Not sure how to do this for YUYV. */
+	if (src_x != 0 || src_y != 0)
+		return -ERANGE;
+
+	if (crtc_w < src_w || crtc_h < src_h)
+		return -ERANGE;
+
+	ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+	if (ret)
+		return ret;
+
+	nv_plane->cur = nv_fb->nvbo;
+
+	nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+	nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+	nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+
+	for (i = 0; i < 2; i++) {
+		nv_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
+			nv_fb->nvbo->bo.offset);
+		nv_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch);
+		nv_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
+	}
+	nv_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
+	nv_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w);
+	nv_wr32(dev, NV_PVIDEO_STEP_SIZE,
+		(uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1)));
+
+	/* It should be possible to convert hue/contrast to this */
+	nv_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness);
+	nv_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness);
+	nv_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness);
+	nv_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0);
+
+	nv_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */
+	nv_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */
+
+	nv_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03);
+	nv_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38);
+
+	nv_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey);
+
+	if (nv_plane->colorkey & (1 << 24))
+		overlay |= 0x10;
+	if (fb->pixel_format == DRM_FORMAT_YUYV)
+		overlay |= 0x100;
+
+	nv_wr32(dev, NV_PVIDEO_OVERLAY, overlay);
+
+	nv_wr32(dev, NV_PVIDEO_SU_STATE, nv_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16));
+
+	if (cur)
+		nouveau_bo_unpin(cur);
+
+	return 0;
+}
+
+static int
+nv04_disable_plane(struct drm_plane *plane)
+{
+	struct nouveau_device *dev = nouveau_dev(plane->dev);
+	struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+
+	nv_mask(dev, NV_PVIDEO_OVERLAY, 1, 0);
+	nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+	nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+	nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+	if (nv_plane->cur) {
+		nouveau_bo_unpin(nv_plane->cur);
+		nv_plane->cur = NULL;
+	}
+
+	return 0;
+}
+
+static const struct drm_plane_funcs nv04_plane_funcs = {
+	.update_plane = nv04_update_plane,
+	.disable_plane = nv04_disable_plane,
+	.set_property = nv_set_property,
+	.destroy = nv_destroy_plane,
+};
+
+static void
+nv04_overlay_init(struct drm_device *device)
+{
+	struct nouveau_device *dev = nouveau_dev(device);
+	struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
+	int ret;
+
+	if (!plane)
+		return;
+
+	ret = drm_plane_init(device, &plane->base, 1 /* single crtc */,
+			     &nv04_plane_funcs,
+			     formats, 2, false);
+	if (ret)
+		goto err;
+
+	/* Set up the plane properties */
+	plane->props.colorkey = drm_property_create_range(
+			device, 0, "colorkey", 0, 0x01ffffff);
+	plane->props.brightness = drm_property_create_range(
+			device, 0, "brightness", 0, 1024);
+	if (!plane->props.colorkey ||
+	    !plane->props.brightness)
+		goto cleanup;
+
+	plane->colorkey = 0;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.colorkey, plane->colorkey);
+
+	plane->brightness = 512;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.brightness, plane->brightness);
+
+	nv04_disable_plane(&plane->base);
 	return;
 cleanup:
 	drm_plane_cleanup(&plane->base);
@@ -335,6 +490,8 @@
 nouveau_overlay_init(struct drm_device *device)
 {
 	struct nouveau_device *dev = nouveau_dev(device);
-	if (dev->chipset >= 0x10 && dev->chipset <= 0x40)
+	if (dev->chipset < 0x10)
+		nv04_overlay_init(device);
+	else if (dev->chipset <= 0x40)
 		nv10_overlay_init(device);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 3c14961..4ef83df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -61,6 +61,7 @@
 #define NOUVEAU_DSM_HAS_MUX 0x1
 #define NOUVEAU_DSM_HAS_OPT 0x2
 
+#ifdef CONFIG_VGA_SWITCHEROO
 static const char nouveau_dsm_muid[] = {
 	0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
 	0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
@@ -326,6 +327,11 @@
 	if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
 		vga_switcheroo_unregister_handler();
 }
+#else
+void nouveau_register_dsm_handler(void) {}
+void nouveau_unregister_dsm_handler(void) {}
+void nouveau_switcheroo_optimus_dsm(void) {}
+#endif
 
 /* retrieve the ROM in 4k blocks */
 static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c0fde6b..4aed171 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -560,28 +560,6 @@
 }
 
 
-/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
- * TTM_PL_{VRAM,TT} directly.
- */
-
-static int
-nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
-			      struct nouveau_bo *nvbo, bool evict,
-			      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
-{
-	struct nouveau_fence *fence = NULL;
-	int ret;
-
-	ret = nouveau_fence_new(chan, false, &fence);
-	if (ret)
-		return ret;
-
-	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
-					no_wait_gpu, new_mem);
-	nouveau_fence_unref(&fence);
-	return ret;
-}
-
 static int
 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
 {
@@ -798,25 +776,25 @@
 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 {
 	struct nouveau_mem *node = old_mem->mm_node;
-	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	u64 length = (new_mem->num_pages << PAGE_SHIFT);
 	u64 src_offset = node->vma[0].offset;
 	u64 dst_offset = node->vma[1].offset;
+	int src_tiled = !!node->memtype;
+	int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
 	int ret;
 
 	while (length) {
 		u32 amount, stride, height;
 
+		ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
+		if (ret)
+			return ret;
+
 		amount  = min(length, (u64)(4 * 1024 * 1024));
 		stride  = 16 * 4;
 		height  = amount / stride;
 
-		if (old_mem->mem_type == TTM_PL_VRAM &&
-		    nouveau_bo_tile_layout(nvbo)) {
-			ret = RING_SPACE(chan, 8);
-			if (ret)
-				return ret;
-
+		if (src_tiled) {
 			BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
 			OUT_RING  (chan, 0);
 			OUT_RING  (chan, 0);
@@ -826,19 +804,10 @@
 			OUT_RING  (chan, 0);
 			OUT_RING  (chan, 0);
 		} else {
-			ret = RING_SPACE(chan, 2);
-			if (ret)
-				return ret;
-
 			BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
 			OUT_RING  (chan, 1);
 		}
-		if (new_mem->mem_type == TTM_PL_VRAM &&
-		    nouveau_bo_tile_layout(nvbo)) {
-			ret = RING_SPACE(chan, 8);
-			if (ret)
-				return ret;
-
+		if (dst_tiled) {
 			BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
 			OUT_RING  (chan, 0);
 			OUT_RING  (chan, 0);
@@ -848,18 +817,10 @@
 			OUT_RING  (chan, 0);
 			OUT_RING  (chan, 0);
 		} else {
-			ret = RING_SPACE(chan, 2);
-			if (ret)
-				return ret;
-
 			BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
 			OUT_RING  (chan, 1);
 		}
 
-		ret = RING_SPACE(chan, 14);
-		if (ret)
-			return ret;
-
 		BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
 		OUT_RING  (chan, upper_32_bits(src_offset));
 		OUT_RING  (chan, upper_32_bits(dst_offset));
@@ -953,23 +914,28 @@
 }
 
 static int
-nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
-		   struct ttm_mem_reg *mem, struct nouveau_vma *vma)
+nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
+		     struct ttm_mem_reg *mem)
 {
-	struct nouveau_mem *node = mem->mm_node;
+	struct nouveau_mem *old_node = bo->mem.mm_node;
+	struct nouveau_mem *new_node = mem->mm_node;
+	u64 size = (u64)mem->num_pages << PAGE_SHIFT;
 	int ret;
 
-	ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
-			     PAGE_SHIFT, node->page_shift,
-			     NV_MEM_ACCESS_RW, vma);
+	ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift,
+			     NV_MEM_ACCESS_RW, &old_node->vma[0]);
 	if (ret)
 		return ret;
 
-	if (mem->mem_type == TTM_PL_VRAM)
-		nouveau_vm_map(vma, node);
-	else
-		nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
+	ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift,
+			     NV_MEM_ACCESS_RW, &old_node->vma[1]);
+	if (ret) {
+		nouveau_vm_put(&old_node->vma[0]);
+		return ret;
+	}
 
+	nouveau_vm_map(&old_node->vma[0], old_node);
+	nouveau_vm_map(&old_node->vma[1], new_node);
 	return 0;
 }
 
@@ -979,35 +945,34 @@
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_channel *chan = drm->ttm.chan;
-	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct nouveau_fence *fence;
 	int ret;
 
-	mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
-
 	/* create temporary vmas for the transfer and attach them to the
 	 * old nouveau_mem node, these will get cleaned up after ttm has
 	 * destroyed the ttm_mem_reg
 	 */
 	if (nv_device(drm->device)->card_type >= NV_50) {
-		struct nouveau_mem *node = old_mem->mm_node;
-
-		ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
+		ret = nouveau_bo_move_prep(drm, bo, new_mem);
 		if (ret)
-			goto out;
-
-		ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
-		if (ret)
-			goto out;
+			return ret;
 	}
 
-	ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
+	mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
+	ret = nouveau_fence_sync(bo->sync_obj, chan);
 	if (ret == 0) {
-		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
-						    no_wait_gpu, new_mem);
+		ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
+		if (ret == 0) {
+			ret = nouveau_fence_new(chan, false, &fence);
+			if (ret == 0) {
+				ret = ttm_bo_move_accel_cleanup(bo, fence,
+								evict,
+								no_wait_gpu,
+								new_mem);
+				nouveau_fence_unref(&fence);
+			}
+		}
 	}
-
-out:
 	mutex_unlock(&chan->cli->mutex);
 	return ret;
 }
@@ -1147,19 +1112,10 @@
 		return;
 
 	list_for_each_entry(vma, &nvbo->vma_list, head) {
-		if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
+		if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
+			      (new_mem->mem_type == TTM_PL_VRAM ||
+			       nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
 			nouveau_vm_map(vma, new_mem->mm_node);
-		} else
-		if (new_mem && new_mem->mem_type == TTM_PL_TT &&
-		    nvbo->page_shift == vma->vm->vmm->spg_shift) {
-			if (((struct nouveau_mem *)new_mem->mm_node)->sg)
-				nouveau_vm_map_sg_table(vma, 0, new_mem->
-						  num_pages << PAGE_SHIFT,
-						  new_mem->mm_node);
-			else
-				nouveau_vm_map_sg(vma, 0, new_mem->
-						  num_pages << PAGE_SHIFT,
-						  new_mem->mm_node);
 		} else {
 			nouveau_vm_unmap(vma);
 		}
@@ -1224,28 +1180,27 @@
 		goto out;
 	}
 
-	/* CPU copy if we have no accelerated method available */
-	if (!drm->ttm.move) {
-		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-		goto out;
+	/* Hardware assisted copy. */
+	if (drm->ttm.move) {
+		if (new_mem->mem_type == TTM_PL_SYSTEM)
+			ret = nouveau_bo_move_flipd(bo, evict, intr,
+						    no_wait_gpu, new_mem);
+		else if (old_mem->mem_type == TTM_PL_SYSTEM)
+			ret = nouveau_bo_move_flips(bo, evict, intr,
+						    no_wait_gpu, new_mem);
+		else
+			ret = nouveau_bo_move_m2mf(bo, evict, intr,
+						   no_wait_gpu, new_mem);
+		if (!ret)
+			goto out;
 	}
 
-	/* Hardware assisted copy. */
-	if (new_mem->mem_type == TTM_PL_SYSTEM)
-		ret = nouveau_bo_move_flipd(bo, evict, intr,
-					    no_wait_gpu, new_mem);
-	else if (old_mem->mem_type == TTM_PL_SYSTEM)
-		ret = nouveau_bo_move_flips(bo, evict, intr,
-					    no_wait_gpu, new_mem);
-	else
-		ret = nouveau_bo_move_m2mf(bo, evict, intr,
-					   no_wait_gpu, new_mem);
-
-	if (!ret)
-		goto out;
-
 	/* Fallback to software copy. */
-	ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+	spin_lock(&bo->bdev->fence_lock);
+	ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
+	spin_unlock(&bo->bdev->fence_lock);
+	if (ret == 0)
+		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
 
 out:
 	if (nv_device(drm->device)->card_type < NV_50) {
@@ -1271,6 +1226,7 @@
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct nouveau_mem *node = mem->mm_node;
 	struct drm_device *dev = drm->dev;
 	int ret;
 
@@ -1293,14 +1249,16 @@
 			mem->bus.is_iomem = !dev->agp->cant_use_aperture;
 		}
 #endif
-		break;
+		if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
+			/* untiled */
+			break;
+		/* fallthrough, tiled memory */
 	case TTM_PL_VRAM:
 		mem->bus.offset = mem->start << PAGE_SHIFT;
 		mem->bus.base = pci_resource_start(dev->pdev, 1);
 		mem->bus.is_iomem = true;
 		if (nv_device(drm->device)->card_type >= NV_50) {
 			struct nouveau_bar *bar = nouveau_bar(drm->device);
-			struct nouveau_mem *node = mem->mm_node;
 
 			ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
 					&node->bar_vma);
@@ -1336,6 +1294,7 @@
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_device *device = nv_device(drm->device);
 	u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
+	int ret;
 
 	/* as long as the bo isn't in vram, and isn't tiled, we've got
 	 * nothing to do here.
@@ -1344,10 +1303,20 @@
 		if (nv_device(drm->device)->card_type < NV_50 ||
 		    !nouveau_bo_tile_layout(nvbo))
 			return 0;
+
+		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+			nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
+
+			ret = nouveau_bo_validate(nvbo, false, false);
+			if (ret)
+				return ret;
+		}
+		return 0;
 	}
 
 	/* make sure bo is in mappable vram */
-	if (bo->mem.start + bo->mem.num_pages < mappable)
+	if (nv_device(drm->device)->card_type >= NV_50 ||
+	    bo->mem.start + bo->mem.num_pages < mappable)
 		return 0;
 
 
@@ -1535,7 +1504,6 @@
 		   struct nouveau_vma *vma)
 {
 	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
-	struct nouveau_mem *node = nvbo->bo.mem.mm_node;
 	int ret;
 
 	ret = nouveau_vm_get(vm, size, nvbo->page_shift,
@@ -1543,15 +1511,10 @@
 	if (ret)
 		return ret;
 
-	if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+	if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+	    (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
+	     nvbo->page_shift != vma->vm->vmm->lpg_shift))
 		nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
-	else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
-		 nvbo->page_shift == vma->vm->vmm->spg_shift) {
-		if (node->sg)
-			nouveau_vm_map_sg_table(vma, 0, size, node);
-		else
-			nouveau_vm_map_sg(vma, 0, size, node);
-	}
 
 	list_add_tail(&vma->head, &nvbo->vma_list);
 	vma->refcount = 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 25ea82f..2401159 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -68,20 +68,100 @@
 		nouveau_event_put(disp->vblank[head]);
 }
 
+static inline int
+calc(int blanks, int blanke, int total, int line)
+{
+	if (blanke >= blanks) {
+		if (line >= blanks)
+			line -= total;
+	} else {
+		if (line >= blanks)
+			line -= total;
+		line -= blanke + 1;
+	}
+	return line;
+}
+
+int
+nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
+				ktime_t *stime, ktime_t *etime)
+{
+	const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index;
+	struct nouveau_display *disp = nouveau_display(crtc->dev);
+	struct nv04_display_scanoutpos args;
+	int ret, retry = 1;
+
+	do {
+		ret = nv_exec(disp->core, mthd, &args, sizeof(args));
+		if (ret != 0)
+			return 0;
+
+		if (args.vline) {
+			ret |= DRM_SCANOUTPOS_ACCURATE;
+			ret |= DRM_SCANOUTPOS_VALID;
+			break;
+		}
+
+		if (retry) ndelay(crtc->linedur_ns);
+	} while (retry--);
+
+	*hpos = calc(args.hblanks, args.hblanke, args.htotal, args.hline);
+	*vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
+	if (stime) *stime = ns_to_ktime(args.time[0]);
+	if (etime) *etime = ns_to_ktime(args.time[1]);
+
+	if (*vpos < 0)
+		ret |= DRM_SCANOUTPOS_INVBL;
+	return ret;
+}
+
+int
+nouveau_display_scanoutpos(struct drm_device *dev, int head, unsigned int flags,
+			   int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (nouveau_crtc(crtc)->index == head) {
+			return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
+							       stime, etime);
+		}
+	}
+
+	return 0;
+}
+
+int
+nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error,
+			 struct timeval *time, unsigned flags)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (nouveau_crtc(crtc)->index == head) {
+			return drm_calc_vbltimestamp_from_scanoutpos(dev,
+					head, max_error, time, flags, crtc,
+					&crtc->hwmode);
+		}
+	}
+
+	return -EINVAL;
+}
+
 static void
 nouveau_display_vblank_fini(struct drm_device *dev)
 {
 	struct nouveau_display *disp = nouveau_display(dev);
 	int i;
 
+	drm_vblank_cleanup(dev);
+
 	if (disp->vblank) {
 		for (i = 0; i < dev->mode_config.num_crtc; i++)
 			nouveau_event_ref(NULL, &disp->vblank[i]);
 		kfree(disp->vblank);
 		disp->vblank = NULL;
 	}
-
-	drm_vblank_cleanup(dev);
 }
 
 static int
@@ -407,10 +487,31 @@
 	drm_kms_helper_poll_disable(dev);
 
 	if (drm->vbios.dcb.entries) {
-		if (nv_device(drm->device)->card_type < NV_50)
-			ret = nv04_display_create(dev);
-		else
-			ret = nv50_display_create(dev);
+		static const u16 oclass[] = {
+			NVF0_DISP_CLASS,
+			NVE0_DISP_CLASS,
+			NVD0_DISP_CLASS,
+			NVA3_DISP_CLASS,
+			NV94_DISP_CLASS,
+			NVA0_DISP_CLASS,
+			NV84_DISP_CLASS,
+			NV50_DISP_CLASS,
+			NV04_DISP_CLASS,
+		};
+		int i;
+
+		for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
+			ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+						 NVDRM_DISPLAY, oclass[i],
+						 NULL, 0, &disp->core);
+		}
+
+		if (ret == 0) {
+			if (nv_mclass(disp->core) < NV50_DISP_CLASS)
+				ret = nv04_display_create(dev);
+			else
+				ret = nv50_display_create(dev);
+		}
 	} else {
 		ret = 0;
 	}
@@ -439,6 +540,7 @@
 nouveau_display_destroy(struct drm_device *dev)
 {
 	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	nouveau_backlight_exit(dev);
 	nouveau_display_vblank_fini(dev);
@@ -449,6 +551,8 @@
 	if (disp->dtor)
 		disp->dtor(dev);
 
+	nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY);
+
 	nouveau_drm(dev)->display = NULL;
 	kfree(disp);
 }
@@ -603,15 +707,6 @@
 	if (!s)
 		return -ENOMEM;
 
-	/* synchronise rendering channel with the kernel's channel */
-	spin_lock(&new_bo->bo.bdev->fence_lock);
-	fence = nouveau_fence_ref(new_bo->bo.sync_obj);
-	spin_unlock(&new_bo->bo.bdev->fence_lock);
-	ret = nouveau_fence_sync(fence, chan);
-	nouveau_fence_unref(&fence);
-	if (ret)
-		goto fail_free;
-
 	if (new_bo != old_bo) {
 		ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
 		if (ret)
@@ -619,6 +714,16 @@
 	}
 
 	mutex_lock(&chan->cli->mutex);
+
+	/* synchronise rendering channel with the kernel's channel */
+	spin_lock(&new_bo->bo.bdev->fence_lock);
+	fence = nouveau_fence_ref(new_bo->bo.sync_obj);
+	spin_unlock(&new_bo->bo.bdev->fence_lock);
+	ret = nouveau_fence_sync(fence, chan);
+	nouveau_fence_unref(&fence);
+	if (ret)
+		goto fail_unpin;
+
 	ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
 	if (ret)
 		goto fail_unpin;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 8bc8bab..a71cf77 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -36,6 +36,7 @@
 	int  (*init)(struct drm_device *);
 	void (*fini)(struct drm_device *);
 
+	struct nouveau_object *core;
 	struct nouveau_eventh **vblank;
 
 	struct drm_property *dithering_mode;
@@ -63,6 +64,10 @@
 void nouveau_display_resume(struct drm_device *dev);
 int  nouveau_display_vblank_enable(struct drm_device *, int);
 void nouveau_display_vblank_disable(struct drm_device *, int);
+int  nouveau_display_scanoutpos(struct drm_device *, int, unsigned int,
+				int *, int *, ktime_t *, ktime_t *);
+int  nouveau_display_vblstamp(struct drm_device *, int, int *,
+			      struct timeval *, unsigned);
 
 int  nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 			    struct drm_pending_vblank_event *event,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 40f91e1..c177272 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -100,7 +100,7 @@
 
 	chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
 
-	DRM_MEMORYBARRIER();
+	mb();
 	/* Flush writes. */
 	nouveau_bo_rd32(pb, 0);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 984004d..dc0e0c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -155,7 +155,7 @@
 }
 
 #define WRITE_PUT(val) do {                                                    \
-	DRM_MEMORYBARRIER();                                                   \
+	mb();                                                   \
 	nouveau_bo_rd32(chan->push.buffer, 0);                                 \
 	nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset);  \
 } while (0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 98a22e6..89c484d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -376,6 +376,8 @@
 	if (ret)
 		goto fail_device;
 
+	dev->irq_enabled = true;
+
 	/* workaround an odd issue on nvc1 by disabling the device's
 	 * nosnoop capability.  hopefully won't cause issues until a
 	 * better fix is found - assuming there is one...
@@ -475,6 +477,7 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_object *device;
 
+	dev->irq_enabled = false;
 	device = drm->client.base.device;
 	drm_put_dev(dev);
 
@@ -503,19 +506,21 @@
 	if (drm->cechan) {
 		ret = nouveau_channel_idle(drm->cechan);
 		if (ret)
-			return ret;
+			goto fail_display;
 	}
 
 	if (drm->channel) {
 		ret = nouveau_channel_idle(drm->channel);
 		if (ret)
-			return ret;
+			goto fail_display;
 	}
 
 	NV_INFO(drm, "suspending client object trees...\n");
 	if (drm->fence && nouveau_fence(drm)->suspend) {
-		if (!nouveau_fence(drm)->suspend(drm))
-			return -ENOMEM;
+		if (!nouveau_fence(drm)->suspend(drm)) {
+			ret = -ENOMEM;
+			goto fail_display;
+		}
 	}
 
 	list_for_each_entry(cli, &drm->clients, head) {
@@ -537,6 +542,10 @@
 		nouveau_client_init(&cli->base);
 	}
 
+	if (drm->fence && nouveau_fence(drm)->resume)
+		nouveau_fence(drm)->resume(drm);
+
+fail_display:
 	if (dev->mode_config.num_crtc) {
 		NV_INFO(drm, "resuming display...\n");
 		nouveau_display_resume(dev);
@@ -798,6 +807,8 @@
 	.get_vblank_counter = drm_vblank_count,
 	.enable_vblank = nouveau_display_vblank_enable,
 	.disable_vblank = nouveau_display_vblank_disable,
+	.get_scanout_position = nouveau_display_scanoutpos,
+	.get_vblank_timestamp = nouveau_display_vblstamp,
 
 	.ioctls = nouveau_ioctls,
 	.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 4b0fb6c..23ca7a5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -54,6 +54,7 @@
 	NVDRM_CLIENT  = 0xffffffff,
 	NVDRM_DEVICE  = 0xdddddddd,
 	NVDRM_CONTROL = 0xdddddddc,
+	NVDRM_DISPLAY = 0xd1500000,
 	NVDRM_PUSH    = 0xbbbb0000, /* |= client chid */
 	NVDRM_CHAN    = 0xcccc0000, /* |= client chid */
 	NVDRM_NVSW    = 0x55550000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 40cf52e..90074d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -143,7 +143,7 @@
 	int ret;
 
 	fence->channel  = chan;
-	fence->timeout  = jiffies + (15 * DRM_HZ);
+	fence->timeout  = jiffies + (15 * HZ);
 	fence->sequence = ++fctx->sequence;
 
 	ret = fctx->emit(fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 78a27f8..27c3fd8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -463,12 +463,6 @@
 	list_for_each_entry(nvbo, list, entry) {
 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 
-		ret = validate_sync(chan, nvbo);
-		if (unlikely(ret)) {
-			NV_ERROR(cli, "fail pre-validate sync\n");
-			return ret;
-		}
-
 		ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
 					     b->write_domains,
 					     b->valid_domains);
@@ -506,7 +500,7 @@
 			b->presumed.valid = 0;
 			relocs++;
 
-			if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
+			if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
 					     &b->presumed, sizeof(b->presumed)))
 				return -EFAULT;
 		}
@@ -593,7 +587,7 @@
 	if (!mem)
 		return ERR_PTR(-ENOMEM);
 
-	if (DRM_COPY_FROM_USER(mem, userptr, size)) {
+	if (copy_from_user(mem, userptr, size)) {
 		u_free(mem);
 		return ERR_PTR(-EFAULT);
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 0843ebc..a4d22e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -31,16 +31,17 @@
 {
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
 	struct nouveau_mem *node = mem->mm_node;
-	u64 size = mem->num_pages << 12;
 
 	if (ttm->sg) {
-		node->sg = ttm->sg;
-		nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
+		node->sg    = ttm->sg;
+		node->pages = NULL;
 	} else {
+		node->sg    = NULL;
 		node->pages = nvbe->ttm.dma_address;
-		nouveau_vm_map_sg(&node->vma[0], 0, size, node);
 	}
+	node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
 
+	nouveau_vm_map(&node->vma[0], node);
 	nvbe->node = node;
 	return 0;
 }
@@ -67,9 +68,13 @@
 
 	/* noop: bound in move_notify() */
 	if (ttm->sg) {
-		node->sg = ttm->sg;
-	} else
+		node->sg    = ttm->sg;
+		node->pages = NULL;
+	} else {
+		node->sg    = NULL;
 		node->pages = nvbe->ttm.dma_address;
+	}
+	node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 19e3757..d45d50d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -171,6 +171,7 @@
 	node = kzalloc(sizeof(*node), GFP_KERNEL);
 	if (!node)
 		return -ENOMEM;
+
 	node->page_shift = 12;
 
 	switch (nv_device(drm->device)->card_type) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 81638d7..471347e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -14,7 +14,9 @@
 {
 	struct nouveau_device *device = nouveau_dev(priv);
 
-	if (device->chipset >= 0x40)
+	if (device->card_type == NV_40 && device->chipset >= 0x4c)
+		nv_wr32(device, 0x088060, state);
+	else if (device->chipset >= 0x40)
 		nv_wr32(device, 0x088054, state);
 	else
 		nv_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 4e384a2..2dccafc 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1035,6 +1035,7 @@
 nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
 		     struct drm_display_mode *adjusted_mode)
 {
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
 	return true;
 }
 
@@ -2199,16 +2200,6 @@
 int
 nv50_display_create(struct drm_device *dev)
 {
-	static const u16 oclass[] = {
-		NVF0_DISP_CLASS,
-		NVE0_DISP_CLASS,
-		NVD0_DISP_CLASS,
-		NVA3_DISP_CLASS,
-		NV94_DISP_CLASS,
-		NVA0_DISP_CLASS,
-		NV84_DISP_CLASS,
-		NV50_DISP_CLASS,
-	};
 	struct nouveau_device *device = nouveau_dev(dev);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct dcb_table *dcb = &drm->vbios.dcb;
@@ -2225,6 +2216,7 @@
 	nouveau_display(dev)->dtor = nv50_display_destroy;
 	nouveau_display(dev)->init = nv50_display_init;
 	nouveau_display(dev)->fini = nv50_display_fini;
+	disp->core = nouveau_display(dev)->core;
 
 	/* small shared memory area we use for notifiers and semaphores */
 	ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2243,17 +2235,6 @@
 	if (ret)
 		goto out;
 
-	/* attempt to allocate a supported evo display class */
-	ret = -ENODEV;
-	for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
-		ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
-					 0xd1500000, oclass[i], NULL, 0,
-					 &disp->core);
-	}
-
-	if (ret)
-		goto out;
-
 	/* allocate master evo channel */
 	ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
 			      &(struct nv50_display_mast_class) {
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 0fd2eb1..4313bb0 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -411,7 +411,7 @@
 	struct drm_crtc *crtc = &omap_crtc->base;
 	DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
 	/* avoid getting in a flood, unregister the irq until next vblank */
-	omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+	__omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
 }
 
 static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
@@ -421,13 +421,13 @@
 	struct drm_crtc *crtc = &omap_crtc->base;
 
 	if (!omap_crtc->error_irq.registered)
-		omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+		__omap_irq_register(crtc->dev, &omap_crtc->error_irq);
 
 	if (!dispc_mgr_go_busy(omap_crtc->channel)) {
 		struct omap_drm_private *priv =
 				crtc->dev->dev_private;
 		DBG("%s: apply done", omap_crtc->name);
-		omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+		__omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
 		queue_work(priv->wq, &omap_crtc->apply_work);
 	}
 }
@@ -623,6 +623,11 @@
 	dss_install_mgr_ops(&mgr_ops);
 }
 
+void omap_crtc_pre_uninit(void)
+{
+	dss_uninstall_mgr_ops();
+}
+
 /* initialize crtc */
 struct drm_crtc *omap_crtc_init(struct drm_device *dev,
 		struct drm_plane *plane, enum omap_channel channel, int id)
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index c27f59d..d4c04d6 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -48,7 +48,7 @@
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
-	return drm_mm_dump_table(m, dev->mm_private);
+	return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
 }
 
 static int fb_show(struct seq_file *m, void *arg)
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 701c4c1..f926b4c 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -969,12 +969,21 @@
 };
 #endif
 
+#if defined(CONFIG_OF)
+static const struct of_device_id dmm_of_match[] = {
+	{ .compatible = "ti,omap4-dmm", },
+	{ .compatible = "ti,omap5-dmm", },
+	{},
+};
+#endif
+
 struct platform_driver omap_dmm_driver = {
 	.probe = omap_dmm_probe,
 	.remove = omap_dmm_remove,
 	.driver = {
 		.owner = THIS_MODULE,
 		.name = DMM_DRIVER_NAME,
+		.of_match_table = of_match_ptr(dmm_of_match),
 #ifdef CONFIG_PM
 		.pm = &omap_dmm_pm_ops,
 #endif
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index e7fa3cd..bf39fcc 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -86,6 +86,47 @@
 
 	return false;
 }
+static void omap_disconnect_dssdevs(void)
+{
+	struct omap_dss_device *dssdev = NULL;
+
+	for_each_dss_dev(dssdev)
+		dssdev->driver->disconnect(dssdev);
+}
+
+static int omap_connect_dssdevs(void)
+{
+	int r;
+	struct omap_dss_device *dssdev = NULL;
+	bool no_displays = true;
+
+	for_each_dss_dev(dssdev) {
+		r = dssdev->driver->connect(dssdev);
+		if (r == -EPROBE_DEFER) {
+			omap_dss_put_device(dssdev);
+			goto cleanup;
+		} else if (r) {
+			dev_warn(dssdev->dev, "could not connect display: %s\n",
+				dssdev->name);
+		} else {
+			no_displays = false;
+		}
+	}
+
+	if (no_displays)
+		return -EPROBE_DEFER;
+
+	return 0;
+
+cleanup:
+	/*
+	 * if we are deferring probe, we disconnect the devices we previously
+	 * connected
+	 */
+	omap_disconnect_dssdevs();
+
+	return r;
+}
 
 static int omap_modeset_init(struct drm_device *dev)
 {
@@ -95,9 +136,6 @@
 	int num_mgrs = dss_feat_get_num_mgrs();
 	int num_crtcs;
 	int i, id = 0;
-	int r;
-
-	omap_crtc_pre_init();
 
 	drm_mode_config_init(dev);
 
@@ -119,26 +157,8 @@
 		enum omap_channel channel;
 		struct omap_overlay_manager *mgr;
 
-		if (!dssdev->driver) {
-			dev_warn(dev->dev, "%s has no driver.. skipping it\n",
-					dssdev->name);
+		if (!omapdss_device_is_connected(dssdev))
 			continue;
-		}
-
-		if (!(dssdev->driver->get_timings ||
-					dssdev->driver->read_edid)) {
-			dev_warn(dev->dev, "%s driver does not support "
-				"get_timings or read_edid.. skipping it!\n",
-				dssdev->name);
-			continue;
-		}
-
-		r = dssdev->driver->connect(dssdev);
-		if (r) {
-			dev_err(dev->dev, "could not connect display: %s\n",
-					dssdev->name);
-			continue;
-		}
 
 		encoder = omap_encoder_init(dev, dssdev);
 
@@ -497,16 +517,16 @@
 	DBG("unload: dev=%p", dev);
 
 	drm_kms_helper_poll_fini(dev);
-	drm_vblank_cleanup(dev);
-	omap_drm_irq_uninstall(dev);
 
 	omap_fbdev_free(dev);
 	omap_modeset_free(dev);
 	omap_gem_deinit(dev);
 
-	flush_workqueue(priv->wq);
 	destroy_workqueue(priv->wq);
 
+	drm_vblank_cleanup(dev);
+	omap_drm_irq_uninstall(dev);
+
 	kfree(dev->dev_private);
 	dev->dev_private = NULL;
 
@@ -655,9 +675,19 @@
 
 static int pdev_probe(struct platform_device *device)
 {
+	int r;
+
 	if (omapdss_is_initialized() == false)
 		return -EPROBE_DEFER;
 
+	omap_crtc_pre_init();
+
+	r = omap_connect_dssdevs();
+	if (r) {
+		omap_crtc_pre_uninit();
+		return r;
+	}
+
 	DBG("%s", device->name);
 	return drm_platform_init(&omap_drm_driver, device);
 }
@@ -665,9 +695,11 @@
 static int pdev_remove(struct platform_device *device)
 {
 	DBG("");
-	drm_platform_exit(&omap_drm_driver, device);
 
-	platform_driver_unregister(&omap_dmm_driver);
+	omap_disconnect_dssdevs();
+	omap_crtc_pre_uninit();
+
+	drm_put_dev(platform_get_drvdata(device));
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 0784769..428b2981 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -141,10 +141,12 @@
 
 int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
 void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
-irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t omap_irq_handler(int irq, void *arg);
 void omap_irq_preinstall(struct drm_device *dev);
 int omap_irq_postinstall(struct drm_device *dev);
 void omap_irq_uninstall(struct drm_device *dev);
+void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
+void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
 void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
 void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
 int omap_drm_irq_uninstall(struct drm_device *dev);
@@ -158,6 +160,7 @@
 int omap_crtc_apply(struct drm_crtc *crtc,
 		struct omap_drm_apply *apply);
 void omap_crtc_pre_init(void);
+void omap_crtc_pre_uninit(void);
 struct drm_crtc *omap_crtc_init(struct drm_device *dev,
 		struct drm_plane *plane, enum omap_channel channel, int id);
 
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 6a12e89..5290a88 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -51,6 +51,9 @@
 static void omap_encoder_destroy(struct drm_encoder *encoder)
 {
 	struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+
+	omap_encoder_set_enabled(encoder, false);
+
 	drm_encoder_cleanup(encoder);
 	kfree(omap_encoder);
 }
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index f2b8f06..f466c4a 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -123,12 +123,16 @@
 {
 	int i;
 
+	drm_modeset_lock_all(fb->dev);
+
 	for (i = 0; i < num_clips; i++) {
 		omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
 					clips[i].x2 - clips[i].x1,
 					clips[i].y2 - clips[i].y1);
 	}
 
+	drm_modeset_unlock_all(fb->dev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index cb85860..f035d2b 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -45,12 +45,11 @@
 	dispc_read_irqenable();        /* flush posted write */
 }
 
-void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
 {
 	struct omap_drm_private *priv = dev->dev_private;
 	unsigned long flags;
 
-	dispc_runtime_get();
 	spin_lock_irqsave(&list_lock, flags);
 
 	if (!WARN_ON(irq->registered)) {
@@ -60,14 +59,21 @@
 	}
 
 	spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+	dispc_runtime_get();
+
+	__omap_irq_register(dev, irq);
+
 	dispc_runtime_put();
 }
 
-void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
 {
 	unsigned long flags;
 
-	dispc_runtime_get();
 	spin_lock_irqsave(&list_lock, flags);
 
 	if (!WARN_ON(!irq->registered)) {
@@ -77,6 +83,14 @@
 	}
 
 	spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+	dispc_runtime_get();
+
+	__omap_irq_unregister(dev, irq);
+
 	dispc_runtime_put();
 }
 
@@ -173,7 +187,7 @@
 	dispc_runtime_put();
 }
 
-irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t omap_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	struct omap_drm_private *priv = dev->dev_private;
@@ -308,7 +322,7 @@
 	if (dev->num_crtcs) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		for (i = 0; i < dev->num_crtcs; i++) {
-			DRM_WAKEUP(&dev->vblank[i].queue);
+			wake_up(&dev->vblank[i].queue);
 			dev->vblank[i].enabled = false;
 			dev->vblank[i].last =
 				dev->driver->get_vblank_counter(dev, i);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
new file mode 100644
index 0000000..3e0f13d
--- /dev/null
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -0,0 +1,19 @@
+config DRM_PANEL
+	bool
+	depends on DRM
+	help
+	  Panel registration and lookup framework.
+
+menu "Display Panels"
+	depends on DRM_PANEL
+
+config DRM_PANEL_SIMPLE
+	tristate "support for simple panels"
+	depends on OF
+	help
+	  DRM panel driver for dumb panels that need at most a regulator and
+	  a GPIO to be powered up. Optionally a backlight can be attached so
+	  that it can be automatically turned off when the panel goes into a
+	  low power state.
+
+endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
new file mode 100644
index 0000000..af9dfa2
--- /dev/null
+++ b/drivers/gpu/drm/panel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
new file mode 100644
index 0000000..59d52ca
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+struct panel_desc {
+	const struct drm_display_mode *modes;
+	unsigned int num_modes;
+
+	struct {
+		unsigned int width;
+		unsigned int height;
+	} size;
+};
+
+/* TODO: convert to gpiod_*() API once it's been merged */
+#define GPIO_ACTIVE_LOW	(1 << 0)
+
+struct panel_simple {
+	struct drm_panel base;
+	bool enabled;
+
+	const struct panel_desc *desc;
+
+	struct backlight_device *backlight;
+	struct regulator *supply;
+	struct i2c_adapter *ddc;
+
+	unsigned long enable_gpio_flags;
+	int enable_gpio;
+};
+
+static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
+{
+	return container_of(panel, struct panel_simple, base);
+}
+
+static int panel_simple_get_fixed_modes(struct panel_simple *panel)
+{
+	struct drm_connector *connector = panel->base.connector;
+	struct drm_device *drm = panel->base.drm;
+	struct drm_display_mode *mode;
+	unsigned int i, num = 0;
+
+	if (!panel->desc)
+		return 0;
+
+	for (i = 0; i < panel->desc->num_modes; i++) {
+		const struct drm_display_mode *m = &panel->desc->modes[i];
+
+		mode = drm_mode_duplicate(drm, m);
+		if (!mode) {
+			dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+				m->hdisplay, m->vdisplay, m->vrefresh);
+			continue;
+		}
+
+		drm_mode_set_name(mode);
+
+		drm_mode_probed_add(connector, mode);
+		num++;
+	}
+
+	connector->display_info.width_mm = panel->desc->size.width;
+	connector->display_info.height_mm = panel->desc->size.height;
+
+	return num;
+}
+
+static int panel_simple_disable(struct drm_panel *panel)
+{
+	struct panel_simple *p = to_panel_simple(panel);
+
+	if (!p->enabled)
+		return 0;
+
+	if (p->backlight) {
+		p->backlight->props.power = FB_BLANK_POWERDOWN;
+		backlight_update_status(p->backlight);
+	}
+
+	if (gpio_is_valid(p->enable_gpio)) {
+		if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
+			gpio_set_value(p->enable_gpio, 1);
+		else
+			gpio_set_value(p->enable_gpio, 0);
+	}
+
+	regulator_disable(p->supply);
+	p->enabled = false;
+
+	return 0;
+}
+
+static int panel_simple_enable(struct drm_panel *panel)
+{
+	struct panel_simple *p = to_panel_simple(panel);
+	int err;
+
+	if (p->enabled)
+		return 0;
+
+	err = regulator_enable(p->supply);
+	if (err < 0) {
+		dev_err(panel->dev, "failed to enable supply: %d\n", err);
+		return err;
+	}
+
+	if (gpio_is_valid(p->enable_gpio)) {
+		if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
+			gpio_set_value(p->enable_gpio, 0);
+		else
+			gpio_set_value(p->enable_gpio, 1);
+	}
+
+	if (p->backlight) {
+		p->backlight->props.power = FB_BLANK_UNBLANK;
+		backlight_update_status(p->backlight);
+	}
+
+	p->enabled = true;
+
+	return 0;
+}
+
+static int panel_simple_get_modes(struct drm_panel *panel)
+{
+	struct panel_simple *p = to_panel_simple(panel);
+	int num = 0;
+
+	/* probe EDID if a DDC bus is available */
+	if (p->ddc) {
+		struct edid *edid = drm_get_edid(panel->connector, p->ddc);
+		drm_mode_connector_update_edid_property(panel->connector, edid);
+		if (edid) {
+			num += drm_add_edid_modes(panel->connector, edid);
+			kfree(edid);
+		}
+	}
+
+	/* add hard-coded panel modes */
+	num += panel_simple_get_fixed_modes(p);
+
+	return num;
+}
+
+static const struct drm_panel_funcs panel_simple_funcs = {
+	.disable = panel_simple_disable,
+	.enable = panel_simple_enable,
+	.get_modes = panel_simple_get_modes,
+};
+
+static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
+{
+	struct device_node *backlight, *ddc;
+	struct panel_simple *panel;
+	enum of_gpio_flags flags;
+	int err;
+
+	panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+	if (!panel)
+		return -ENOMEM;
+
+	panel->enabled = false;
+	panel->desc = desc;
+
+	panel->supply = devm_regulator_get(dev, "power");
+	if (IS_ERR(panel->supply))
+		return PTR_ERR(panel->supply);
+
+	panel->enable_gpio = of_get_named_gpio_flags(dev->of_node,
+						     "enable-gpios", 0,
+						     &flags);
+	if (gpio_is_valid(panel->enable_gpio)) {
+		unsigned int value;
+
+		if (flags & OF_GPIO_ACTIVE_LOW)
+			panel->enable_gpio_flags |= GPIO_ACTIVE_LOW;
+
+		err = gpio_request(panel->enable_gpio, "enable");
+		if (err < 0) {
+			dev_err(dev, "failed to request GPIO#%u: %d\n",
+				panel->enable_gpio, err);
+			return err;
+		}
+
+		value = (panel->enable_gpio_flags & GPIO_ACTIVE_LOW) != 0;
+
+		err = gpio_direction_output(panel->enable_gpio, value);
+		if (err < 0) {
+			dev_err(dev, "failed to setup GPIO%u: %d\n",
+				panel->enable_gpio, err);
+			goto free_gpio;
+		}
+	}
+
+	backlight = of_parse_phandle(dev->of_node, "backlight", 0);
+	if (backlight) {
+		panel->backlight = of_find_backlight_by_node(backlight);
+		of_node_put(backlight);
+
+		if (!panel->backlight) {
+			err = -EPROBE_DEFER;
+			goto free_gpio;
+		}
+	}
+
+	ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
+	if (ddc) {
+		panel->ddc = of_find_i2c_adapter_by_node(ddc);
+		of_node_put(ddc);
+
+		if (!panel->ddc) {
+			err = -EPROBE_DEFER;
+			goto free_backlight;
+		}
+	}
+
+	drm_panel_init(&panel->base);
+	panel->base.dev = dev;
+	panel->base.funcs = &panel_simple_funcs;
+
+	err = drm_panel_add(&panel->base);
+	if (err < 0)
+		goto free_ddc;
+
+	dev_set_drvdata(dev, panel);
+
+	return 0;
+
+free_ddc:
+	if (panel->ddc)
+		put_device(&panel->ddc->dev);
+free_backlight:
+	if (panel->backlight)
+		put_device(&panel->backlight->dev);
+free_gpio:
+	if (gpio_is_valid(panel->enable_gpio))
+		gpio_free(panel->enable_gpio);
+
+	return err;
+}
+
+static int panel_simple_remove(struct device *dev)
+{
+	struct panel_simple *panel = dev_get_drvdata(dev);
+
+	drm_panel_detach(&panel->base);
+	drm_panel_remove(&panel->base);
+
+	panel_simple_disable(&panel->base);
+
+	if (panel->ddc)
+		put_device(&panel->ddc->dev);
+
+	if (panel->backlight)
+		put_device(&panel->backlight->dev);
+
+	if (gpio_is_valid(panel->enable_gpio))
+		gpio_free(panel->enable_gpio);
+
+	regulator_disable(panel->supply);
+
+	return 0;
+}
+
+static const struct drm_display_mode auo_b101aw03_mode = {
+	.clock = 51450,
+	.hdisplay = 1024,
+	.hsync_start = 1024 + 156,
+	.hsync_end = 1024 + 156 + 8,
+	.htotal = 1024 + 156 + 8 + 156,
+	.vdisplay = 600,
+	.vsync_start = 600 + 16,
+	.vsync_end = 600 + 16 + 6,
+	.vtotal = 600 + 16 + 6 + 16,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc auo_b101aw03 = {
+	.modes = &auo_b101aw03_mode,
+	.num_modes = 1,
+	.size = {
+		.width = 223,
+		.height = 125,
+	},
+};
+
+static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
+	.clock = 72070,
+	.hdisplay = 1366,
+	.hsync_start = 1366 + 58,
+	.hsync_end = 1366 + 58 + 58,
+	.htotal = 1366 + 58 + 58 + 58,
+	.vdisplay = 768,
+	.vsync_start = 768 + 4,
+	.vsync_end = 768 + 4 + 4,
+	.vtotal = 768 + 4 + 4 + 4,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc chunghwa_claa101wa01a = {
+	.modes = &chunghwa_claa101wa01a_mode,
+	.num_modes = 1,
+	.size = {
+		.width = 220,
+		.height = 120,
+	},
+};
+
+static const struct drm_display_mode chunghwa_claa101wb01_mode = {
+	.clock = 69300,
+	.hdisplay = 1366,
+	.hsync_start = 1366 + 48,
+	.hsync_end = 1366 + 48 + 32,
+	.htotal = 1366 + 48 + 32 + 20,
+	.vdisplay = 768,
+	.vsync_start = 768 + 16,
+	.vsync_end = 768 + 16 + 8,
+	.vtotal = 768 + 16 + 8 + 16,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc chunghwa_claa101wb01 = {
+	.modes = &chunghwa_claa101wb01_mode,
+	.num_modes = 1,
+	.size = {
+		.width = 223,
+		.height = 125,
+	},
+};
+
+static const struct drm_display_mode samsung_ltn101nt05_mode = {
+	.clock = 54030,
+	.hdisplay = 1024,
+	.hsync_start = 1024 + 24,
+	.hsync_end = 1024 + 24 + 136,
+	.htotal = 1024 + 24 + 136 + 160,
+	.vdisplay = 600,
+	.vsync_start = 600 + 3,
+	.vsync_end = 600 + 3 + 6,
+	.vtotal = 600 + 3 + 6 + 61,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc samsung_ltn101nt05 = {
+	.modes = &samsung_ltn101nt05_mode,
+	.num_modes = 1,
+	.size = {
+		.width = 1024,
+		.height = 600,
+	},
+};
+
+static const struct of_device_id platform_of_match[] = {
+	{
+		.compatible = "auo,b101aw03",
+		.data = &auo_b101aw03,
+	}, {
+		.compatible = "chunghwa,claa101wa01a",
+		.data = &chunghwa_claa101wa01a
+	}, {
+		.compatible = "chunghwa,claa101wb01",
+		.data = &chunghwa_claa101wb01
+	}, {
+		.compatible = "samsung,ltn101nt05",
+		.data = &samsung_ltn101nt05,
+	}, {
+		.compatible = "simple-panel",
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(of, platform_of_match);
+
+static int panel_simple_platform_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *id;
+
+	id = of_match_node(platform_of_match, pdev->dev.of_node);
+	if (!id)
+		return -ENODEV;
+
+	return panel_simple_probe(&pdev->dev, id->data);
+}
+
+static int panel_simple_platform_remove(struct platform_device *pdev)
+{
+	return panel_simple_remove(&pdev->dev);
+}
+
+static struct platform_driver panel_simple_platform_driver = {
+	.driver = {
+		.name = "panel-simple",
+		.owner = THIS_MODULE,
+		.of_match_table = platform_of_match,
+	},
+	.probe = panel_simple_platform_probe,
+	.remove = panel_simple_platform_remove,
+};
+
+struct panel_desc_dsi {
+	struct panel_desc desc;
+
+	enum mipi_dsi_pixel_format format;
+	unsigned int lanes;
+};
+
+static const struct drm_display_mode panasonic_vvx10f004b00_mode = {
+	.clock = 157200,
+	.hdisplay = 1920,
+	.hsync_start = 1920 + 154,
+	.hsync_end = 1920 + 154 + 16,
+	.htotal = 1920 + 154 + 16 + 32,
+	.vdisplay = 1200,
+	.vsync_start = 1200 + 17,
+	.vsync_end = 1200 + 17 + 2,
+	.vtotal = 1200 + 17 + 2 + 16,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
+	.desc = {
+		.modes = &panasonic_vvx10f004b00_mode,
+		.num_modes = 1,
+		.size = {
+			.width = 217,
+			.height = 136,
+		},
+	},
+	.format = MIPI_DSI_FMT_RGB888,
+	.lanes = 4,
+};
+
+static const struct of_device_id dsi_of_match[] = {
+	{
+		.compatible = "panasonic,vvx10f004b00",
+		.data = &panasonic_vvx10f004b00
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(of, dsi_of_match);
+
+static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
+{
+	const struct panel_desc_dsi *desc;
+	const struct of_device_id *id;
+	int err;
+
+	id = of_match_node(dsi_of_match, dsi->dev.of_node);
+	if (!id)
+		return -ENODEV;
+
+	desc = id->data;
+
+	err = panel_simple_probe(&dsi->dev, &desc->desc);
+	if (err < 0)
+		return err;
+
+	dsi->format = desc->format;
+	dsi->lanes = desc->lanes;
+
+	return mipi_dsi_attach(dsi);
+}
+
+static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
+{
+	int err;
+
+	err = mipi_dsi_detach(dsi);
+	if (err < 0)
+		dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+	return panel_simple_remove(&dsi->dev);
+}
+
+static struct mipi_dsi_driver panel_simple_dsi_driver = {
+	.driver = {
+		.name = "panel-simple-dsi",
+		.owner = THIS_MODULE,
+		.of_match_table = dsi_of_match,
+	},
+	.probe = panel_simple_dsi_probe,
+	.remove = panel_simple_dsi_remove,
+};
+
+static int __init panel_simple_init(void)
+{
+	int err;
+
+	err = platform_driver_register(&panel_simple_platform_driver);
+	if (err < 0)
+		return err;
+
+	if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
+		err = mipi_dsi_driver_register(&panel_simple_dsi_driver);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+module_init(panel_simple_init);
+
+static void __exit panel_simple_exit(void)
+{
+	if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+		mipi_dsi_driver_unregister(&panel_simple_dsi_driver);
+
+	platform_driver_unregister(&panel_simple_platform_driver);
+}
+module_exit(panel_simple_exit);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM Driver for Simple Panels");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index d70aafb..798bde2 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -399,10 +399,14 @@
 	struct qxl_bo *qobj;
 	int inc = 1;
 
+	drm_modeset_lock_all(fb->dev);
+
 	qobj = gem_to_qxl_bo(qxl_fb->obj);
 	/* if we aren't primary surface ignore this */
-	if (!qobj->is_primary)
+	if (!qobj->is_primary) {
+		drm_modeset_unlock_all(fb->dev);
 		return 0;
+	}
 
 	if (!num_clips) {
 		num_clips = 1;
@@ -417,6 +421,9 @@
 
 	qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
 			  clips, num_clips, inc);
+
+	drm_modeset_unlock_all(fb->dev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 7bda32f..36ed40b 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -534,7 +534,7 @@
 
 /* qxl_irq.c */
 int qxl_irq_init(struct qxl_device *qdev);
-irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t qxl_irq_handler(int irq, void *arg);
 
 /* qxl_fb.c */
 int qxl_fb_init(struct qxl_device *qdev);
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 7b95c75..0bb86e6 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -200,7 +200,7 @@
 	for (i = 0; i < cmd->relocs_num; ++i) {
 		struct drm_qxl_reloc reloc;
 
-		if (DRM_COPY_FROM_USER(&reloc,
+		if (copy_from_user(&reloc,
 				       &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
 				       sizeof(reloc))) {
 			ret = -EFAULT;
@@ -297,7 +297,7 @@
 		struct drm_qxl_command *commands =
 			(struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
 
-		if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+		if (copy_from_user(&user_cmd, &commands[cmd_num],
 				       sizeof(user_cmd)))
 			return -EFAULT;
 
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 21393dc..28f84b4 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -25,7 +25,7 @@
 
 #include "qxl_drv.h"
 
-irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t qxl_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index e5ca498..fd88eb4 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -115,7 +115,7 @@
 	qxl_garbage_collect(qdev);
 }
 
-int qxl_device_init(struct qxl_device *qdev,
+static int qxl_device_init(struct qxl_device *qdev,
 		    struct drm_device *ddev,
 		    struct pci_dev *pdev,
 		    unsigned long flags)
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index c451257..59459fe 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -892,10 +892,10 @@
 
 		buf->file_priv = file_priv;
 
-		if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+		if (copy_to_user(&d->request_indices[i], &buf->idx,
 				     sizeof(buf->idx)))
 			return -EFAULT;
-		if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+		if (copy_to_user(&d->request_sizes[i], &buf->total,
 				     sizeof(buf->total)))
 			return -EFAULT;
 
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 56eb5e3..5bf3f5f 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -154,7 +154,7 @@
 extern int r128_enable_vblank(struct drm_device *dev, int crtc);
 extern void r128_disable_vblank(struct drm_device *dev, int crtc);
 extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
-extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t r128_driver_irq_handler(int irq, void *arg);
 extern void r128_driver_irq_preinstall(struct drm_device *dev);
 extern int r128_driver_irq_postinstall(struct drm_device *dev);
 extern void r128_driver_irq_uninstall(struct drm_device *dev);
@@ -514,7 +514,7 @@
 	if (R128_VERBOSE)						\
 		DRM_INFO("COMMIT_RING() tail=0x%06x\n",			\
 			 dev_priv->ring.tail);				\
-	DRM_MEMORYBARRIER();						\
+	mb();						\
 	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail);	\
 	R128_READ(R128_PM4_BUFFER_DL_WPTR);				\
 } while (0)
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index a954c54..b0d0fd3 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -33,6 +33,7 @@
 
 #include <drm/drmP.h>
 #include <drm/r128_drm.h>
+#include "r128_drv.h"
 
 typedef struct drm_r128_init32 {
 	int func;
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index 2ea4f09..c2ae496 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -44,7 +44,7 @@
 	return atomic_read(&dev_priv->vbl_received);
 }
 
-irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t r128_driver_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 01dd9ae..e806dac 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -895,31 +895,22 @@
 	if (count > 4096 || count <= 0)
 		return -EMSGSIZE;
 
-	if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
+	if (copy_from_user(&x, depth->x, sizeof(x)))
 		return -EFAULT;
-	if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
+	if (copy_from_user(&y, depth->y, sizeof(y)))
 		return -EFAULT;
 
 	buffer_size = depth->n * sizeof(u32);
-	buffer = kmalloc(buffer_size, GFP_KERNEL);
-	if (buffer == NULL)
-		return -ENOMEM;
-	if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
-		kfree(buffer);
-		return -EFAULT;
-	}
+	buffer = memdup_user(depth->buffer, buffer_size);
+	if (IS_ERR(buffer))
+		return PTR_ERR(buffer);
 
 	mask_size = depth->n * sizeof(u8);
 	if (depth->mask) {
-		mask = kmalloc(mask_size, GFP_KERNEL);
-		if (mask == NULL) {
+		mask = memdup_user(depth->mask, mask_size);
+		if (IS_ERR(mask)) {
 			kfree(buffer);
-			return -ENOMEM;
-		}
-		if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
-			kfree(buffer);
-			kfree(mask);
-			return -EFAULT;
+			return PTR_ERR(mask);
 		}
 
 		for (i = 0; i < count; i++, x++) {
@@ -999,46 +990,33 @@
 		kfree(x);
 		return -ENOMEM;
 	}
-	if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+	if (copy_from_user(x, depth->x, xbuf_size)) {
 		kfree(x);
 		kfree(y);
 		return -EFAULT;
 	}
-	if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
+	if (copy_from_user(y, depth->y, xbuf_size)) {
 		kfree(x);
 		kfree(y);
 		return -EFAULT;
 	}
 
 	buffer_size = depth->n * sizeof(u32);
-	buffer = kmalloc(buffer_size, GFP_KERNEL);
-	if (buffer == NULL) {
+	buffer = memdup_user(depth->buffer, buffer_size);
+	if (IS_ERR(buffer)) {
 		kfree(x);
 		kfree(y);
-		return -ENOMEM;
-	}
-	if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
-		kfree(x);
-		kfree(y);
-		kfree(buffer);
-		return -EFAULT;
+		return PTR_ERR(buffer);
 	}
 
 	if (depth->mask) {
 		mask_size = depth->n * sizeof(u8);
-		mask = kmalloc(mask_size, GFP_KERNEL);
-		if (mask == NULL) {
+		mask = memdup_user(depth->mask, mask_size);
+		if (IS_ERR(mask)) {
 			kfree(x);
 			kfree(y);
 			kfree(buffer);
-			return -ENOMEM;
-		}
-		if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
-			kfree(x);
-			kfree(y);
-			kfree(buffer);
-			kfree(mask);
-			return -EFAULT;
+			return PTR_ERR(mask);
 		}
 
 		for (i = 0; i < count; i++) {
@@ -1107,9 +1085,9 @@
 	if (count > 4096 || count <= 0)
 		return -EMSGSIZE;
 
-	if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
+	if (copy_from_user(&x, depth->x, sizeof(x)))
 		return -EFAULT;
-	if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
+	if (copy_from_user(&y, depth->y, sizeof(y)))
 		return -EFAULT;
 
 	BEGIN_RING(7);
@@ -1162,12 +1140,12 @@
 		kfree(x);
 		return -ENOMEM;
 	}
-	if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+	if (copy_from_user(x, depth->x, xbuf_size)) {
 		kfree(x);
 		kfree(y);
 		return -EFAULT;
 	}
-	if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
+	if (copy_from_user(y, depth->y, ybuf_size)) {
 		kfree(x);
 		kfree(y);
 		return -EFAULT;
@@ -1524,7 +1502,7 @@
 
 	DEV_INIT_TEST_WITH_RETURN(dev_priv);
 
-	if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+	if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
 		return -EFAULT;
 
 	RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -1622,7 +1600,7 @@
 		return -EINVAL;
 	}
 
-	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+	if (copy_to_user(param->value, &value, sizeof(int))) {
 		DRM_ERROR("copy_to_user\n");
 		return -EFAULT;
 	}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 0b9621c..0d19f4f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -209,6 +209,16 @@
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
+static const u32 vga_control_regs[6] =
+{
+	AVIVO_D1VGA_CONTROL,
+	AVIVO_D2VGA_CONTROL,
+	EVERGREEN_D3VGA_CONTROL,
+	EVERGREEN_D4VGA_CONTROL,
+	EVERGREEN_D5VGA_CONTROL,
+	EVERGREEN_D6VGA_CONTROL,
+};
+
 static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -216,13 +226,23 @@
 	struct radeon_device *rdev = dev->dev_private;
 	int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
 	BLANK_CRTC_PS_ALLOCATION args;
+	u32 vga_control = 0;
 
 	memset(&args, 0, sizeof(args));
 
+	if (ASIC_IS_DCE8(rdev)) {
+		vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]);
+		WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1);
+	}
+
 	args.ucCRTC = radeon_crtc->crtc_id;
 	args.ucBlanking = state;
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	if (ASIC_IS_DCE8(rdev)) {
+		WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
+	}
 }
 
 static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
@@ -423,7 +443,17 @@
 	int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
 	union atom_enable_ss args;
 
-	if (!enable) {
+	if (enable) {
+		/* Don't mess with SS if percentage is 0 or external ss.
+		 * SS is already disabled previously, and disabling it
+		 * again can cause display problems if the pll is already
+		 * programmed.
+		 */
+		if (ss->percentage == 0)
+			return;
+		if (ss->type & ATOM_EXTERNAL_SS_MASK)
+			return;
+	} else {
 		for (i = 0; i < rdev->num_crtc; i++) {
 			if (rdev->mode_info.crtcs[i] &&
 			    rdev->mode_info.crtcs[i]->enabled &&
@@ -459,8 +489,6 @@
 		args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
 		args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 		args.v3.ucEnable = enable;
-		if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
-			args.v3.ucEnable = ATOM_DISABLE;
 	} else if (ASIC_IS_DCE4(rdev)) {
 		args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
 		args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
@@ -480,8 +508,6 @@
 		args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
 		args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 		args.v2.ucEnable = enable;
-		if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
-			args.v2.ucEnable = ATOM_DISABLE;
 	} else if (ASIC_IS_DCE3(rdev)) {
 		args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
 		args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
@@ -503,8 +529,7 @@
 		args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
 		args.lvds_ss_2.ucEnable = enable;
 	} else {
-		if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
-		    (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+		if (enable == ATOM_DISABLE) {
 			atombios_disable_ss(rdev, pll_id);
 			return;
 		}
@@ -534,7 +559,7 @@
 	u32 adjusted_clock = mode->clock;
 	int encoder_mode = atombios_get_encoder_mode(encoder);
 	u32 dp_clock = mode->clock;
-	int bpc = radeon_get_monitor_bpc(connector);
+	int bpc = radeon_crtc->bpc;
 	bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
 
 	/* reset the pll flags */
@@ -938,11 +963,14 @@
 							radeon_atombios_get_ppll_ss_info(rdev,
 											 &radeon_crtc->ss,
 											 ATOM_DP_SS_ID1);
-				} else
+				} else {
 					radeon_crtc->ss_enabled =
 						radeon_atombios_get_ppll_ss_info(rdev,
 										 &radeon_crtc->ss,
 										 ATOM_DP_SS_ID1);
+				}
+				/* disable spread spectrum on DCE3 DP */
+				radeon_crtc->ss_enabled = false;
 			}
 			break;
 		case ATOM_ENCODER_MODE_LVDS:
@@ -1039,15 +1067,17 @@
 		/* calculate ss amount and step size */
 		if (ASIC_IS_DCE4(rdev)) {
 			u32 step_size;
-			u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
+			u32 amount = (((fb_div * 10) + frac_fb_div) *
+				      (u32)radeon_crtc->ss.percentage) /
+				(100 * (u32)radeon_crtc->ss.percentage_divider);
 			radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
 			radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
 				ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
 			if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
-				step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+				step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
 					(125 * 25 * pll->reference_freq / 100);
 			else
-				step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+				step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
 					(125 * 25 * pll->reference_freq / 100);
 			radeon_crtc->ss.step = step_size;
 		}
@@ -1146,7 +1176,7 @@
 		evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
 
 		/* Set NUM_BANKS. */
-		if (rdev->family >= CHIP_BONAIRE) {
+		if (rdev->family >= CHIP_TAHITI) {
 			unsigned tileb, index, num_banks, tile_split_bytes;
 
 			/* Calculate the macrotile mode index. */
@@ -1164,13 +1194,14 @@
 				return -EINVAL;
 			}
 
-			num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+			if (rdev->family >= CHIP_BONAIRE)
+				num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+			else
+				num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
 			fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
 		} else {
-			/* SI and older. */
-			if (rdev->family >= CHIP_TAHITI)
-				tmp = rdev->config.si.tile_config;
-			else if (rdev->family >= CHIP_CAYMAN)
+			/* NI and older. */
+			if (rdev->family >= CHIP_CAYMAN)
 				tmp = rdev->config.cayman.tile_config;
 			else
 				tmp = rdev->config.evergreen.tile_config;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index fb3ae07..4ad7643 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -157,21 +157,22 @@
 
 	msg[0] = address;
 	msg[1] = address >> 8;
-	msg[2] = AUX_NATIVE_WRITE << 4;
+	msg[2] = DP_AUX_NATIVE_WRITE << 4;
 	msg[3] = (msg_bytes << 4) | (send_bytes - 1);
 	memcpy(&msg[4], send, send_bytes);
 
-	for (retry = 0; retry < 4; retry++) {
+	for (retry = 0; retry < 7; retry++) {
 		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
 					    msg, msg_bytes, NULL, 0, delay, &ack);
 		if (ret == -EBUSY)
 			continue;
 		else if (ret < 0)
 			return ret;
-		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+		ack >>= 4;
+		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
 			return send_bytes;
-		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
-			udelay(400);
+		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+			usleep_range(400, 500);
 		else
 			return -EIO;
 	}
@@ -191,20 +192,21 @@
 
 	msg[0] = address;
 	msg[1] = address >> 8;
-	msg[2] = AUX_NATIVE_READ << 4;
+	msg[2] = DP_AUX_NATIVE_READ << 4;
 	msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
 
-	for (retry = 0; retry < 4; retry++) {
+	for (retry = 0; retry < 7; retry++) {
 		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
 					    msg, msg_bytes, recv, recv_bytes, delay, &ack);
 		if (ret == -EBUSY)
 			continue;
 		else if (ret < 0)
 			return ret;
-		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+		ack >>= 4;
+		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
 			return ret;
-		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
-			udelay(400);
+		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+			usleep_range(400, 500);
 		else if (ret == 0)
 			return -EPROTO;
 		else
@@ -246,12 +248,12 @@
 
 	/* Set up the command byte */
 	if (mode & MODE_I2C_READ)
-		msg[2] = AUX_I2C_READ << 4;
+		msg[2] = DP_AUX_I2C_READ << 4;
 	else
-		msg[2] = AUX_I2C_WRITE << 4;
+		msg[2] = DP_AUX_I2C_WRITE << 4;
 
 	if (!(mode & MODE_I2C_STOP))
-		msg[2] |= AUX_I2C_MOT << 4;
+		msg[2] |= DP_AUX_I2C_MOT << 4;
 
 	msg[0] = address;
 	msg[1] = address >> 8;
@@ -272,7 +274,7 @@
 		break;
 	}
 
-	for (retry = 0; retry < 4; retry++) {
+	for (retry = 0; retry < 7; retry++) {
 		ret = radeon_process_aux_ch(auxch,
 					    msg, msg_bytes, reply, reply_bytes, 0, &ack);
 		if (ret == -EBUSY)
@@ -282,35 +284,35 @@
 			return ret;
 		}
 
-		switch (ack & AUX_NATIVE_REPLY_MASK) {
-		case AUX_NATIVE_REPLY_ACK:
+		switch ((ack >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+		case DP_AUX_NATIVE_REPLY_ACK:
 			/* I2C-over-AUX Reply field is only valid
 			 * when paired with AUX ACK.
 			 */
 			break;
-		case AUX_NATIVE_REPLY_NACK:
+		case DP_AUX_NATIVE_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_ch native nack\n");
 			return -EREMOTEIO;
-		case AUX_NATIVE_REPLY_DEFER:
+		case DP_AUX_NATIVE_REPLY_DEFER:
 			DRM_DEBUG_KMS("aux_ch native defer\n");
-			udelay(400);
+			usleep_range(500, 600);
 			continue;
 		default:
 			DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
 			return -EREMOTEIO;
 		}
 
-		switch (ack & AUX_I2C_REPLY_MASK) {
-		case AUX_I2C_REPLY_ACK:
+		switch ((ack >> 4) & DP_AUX_I2C_REPLY_MASK) {
+		case DP_AUX_I2C_REPLY_ACK:
 			if (mode == MODE_I2C_READ)
 				*read_byte = reply[0];
 			return ret;
-		case AUX_I2C_REPLY_NACK:
+		case DP_AUX_I2C_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_i2c nack\n");
 			return -EREMOTEIO;
-		case AUX_I2C_REPLY_DEFER:
+		case DP_AUX_I2C_REPLY_DEFER:
 			DRM_DEBUG_KMS("aux_i2c defer\n");
-			udelay(400);
+			usleep_range(400, 500);
 			break;
 		default:
 			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
@@ -671,9 +673,11 @@
 	u8 tmp;
 
 	/* power up the sink */
-	if (dp_info->dpcd[0] >= 0x11)
+	if (dp_info->dpcd[0] >= 0x11) {
 		radeon_write_dpcd_reg(dp_info->radeon_connector,
 				      DP_SET_POWER, DP_SET_POWER_D0);
+		usleep_range(1000, 2000);
+	}
 
 	/* possibly enable downspread on the sink */
 	if (dp_info->dpcd[3] & 0x1)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index a42d615..2cec2ab 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -464,11 +464,12 @@
 
 static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
 {
-	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 	int bpc = 8;
 
-	if (connector)
-		bpc = radeon_get_monitor_bpc(connector);
+	if (encoder->crtc) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+		bpc = radeon_crtc->bpc;
+	}
 
 	switch (bpc) {
 	case 0:
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index f685035dbe..b5162c3 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -27,8 +27,6 @@
 #include "radeon.h"
 #include "atom.h"
 
-extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-
 #define TARGET_HW_I2C_CLOCK 50
 
 /* these are a limitation of ProcessI2cChannelTransaction not the hw */
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 9b6950d..ea103cc 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -29,6 +29,7 @@
 #include "cypress_dpm.h"
 #include "btc_dpm.h"
 #include "atom.h"
+#include <linux/seq_file.h>
 
 #define MC_CG_ARB_FREQ_F0           0x0a
 #define MC_CG_ARB_FREQ_F1           0x0b
@@ -49,6 +50,7 @@
 struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
 struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
 
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
 
 //********* BARTS **************//
 static const u32 barts_cgcg_cgls_default[] =
@@ -2510,21 +2512,6 @@
 	if (eg_pi->ls_clock_gating)
 		btc_ls_clock_gating_enable(rdev, true);
 
-	if (rdev->irq.installed &&
-	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-		PPSMC_Result result;
-
-		ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-		if (ret)
-			return ret;
-		rdev->irq.dpm_thermal = true;
-		radeon_irq_set(rdev);
-		result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
-		if (result != PPSMC_Result_OK)
-			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-	}
-
 	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
 	btc_init_stutter_mode(rdev);
@@ -2576,7 +2563,11 @@
 void btc_dpm_setup_asic(struct radeon_device *rdev)
 {
 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	int r;
 
+	r = ni_mc_load_microcode(rdev);
+	if (r)
+		DRM_ERROR("Failed to load MC firmware!\n");
 	rv770_get_memory_type(rdev);
 	rv740_read_clock_registers(rdev);
 	btc_read_arb_registers(rdev);
@@ -2766,6 +2757,37 @@
 	r600_free_extended_power_table(rdev);
 }
 
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						     struct seq_file *m)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
+	struct rv7xx_ps *ps = rv770_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+		CURRENT_PROFILE_INDEX_SHIFT;
+
+	if (current_index > 2) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		if (current_index == 0)
+			pl = &ps->low;
+		else if (current_index == 1)
+			pl = &ps->medium;
+		else /* current_index == 2 */
+			pl = &ps->high;
+		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+		if (rdev->family >= CHIP_CEDAR) {
+			seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+				   current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+		} else {
+			seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u\n",
+				   current_index, pl->sclk, pl->mclk, pl->vddc);
+		}
+	}
+}
+
 u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
 {
 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
index 29e32de..9c65be2 100644
--- a/drivers/gpu/drm/radeon/btcd.h
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -44,6 +44,10 @@
 #       define DYN_SPREAD_SPECTRUM_EN                   (1 << 23)
 #       define AC_DC_SW                                 (1 << 24)
 
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0x66c
+#       define CURRENT_PROFILE_INDEX_MASK                 (0xf << 4)
+#       define CURRENT_PROFILE_INDEX_SHIFT                4
+
 #define	CG_BIF_REQ_AND_RSP				0x7f4
 #define		CG_CLIENT_REQ(x)			((x) << 0)
 #define		CG_CLIENT_REQ_MASK			(0xff << 0)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 1ed4799..8d49104 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -171,8 +171,7 @@
 						     struct atom_voltage_table *voltage_table);
 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
-extern void cik_update_cg(struct radeon_device *rdev,
-			  u32 block, bool enable);
+extern int ci_mc_load_microcode(struct radeon_device *rdev);
 
 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
 					 struct atom_voltage_table_entry *voltage_table,
@@ -4503,8 +4502,8 @@
 
 }
 
-void ci_update_current_ps(struct radeon_device *rdev,
-			  struct radeon_ps *rps)
+static void ci_update_current_ps(struct radeon_device *rdev,
+				 struct radeon_ps *rps)
 {
 	struct ci_ps *new_ps = ci_get_ps(rps);
 	struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4514,8 +4513,8 @@
 	pi->current_rps.ps_priv = &pi->current_ps;
 }
 
-void ci_update_requested_ps(struct radeon_device *rdev,
-			    struct radeon_ps *rps)
+static void ci_update_requested_ps(struct radeon_device *rdev,
+				   struct radeon_ps *rps)
 {
 	struct ci_ps *new_ps = ci_get_ps(rps);
 	struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4549,6 +4548,11 @@
 
 void ci_dpm_setup_asic(struct radeon_device *rdev)
 {
+	int r;
+
+	r = ci_mc_load_microcode(rdev);
+	if (r)
+		DRM_ERROR("Failed to load MC firmware!\n");
 	ci_read_clock_registers(rdev);
 	ci_get_memory_type(rdev);
 	ci_enable_acpi_power_management(rdev);
@@ -4561,13 +4565,6 @@
 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
 	int ret;
 
-	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			     RADEON_CG_BLOCK_MC |
-			     RADEON_CG_BLOCK_SDMA |
-			     RADEON_CG_BLOCK_BIF |
-			     RADEON_CG_BLOCK_UVD |
-			     RADEON_CG_BLOCK_HDP), false);
-
 	if (ci_is_smc_running(rdev))
 		return -EINVAL;
 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
@@ -4665,6 +4662,18 @@
 		DRM_ERROR("ci_enable_power_containment failed\n");
 		return ret;
 	}
+
+	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+	ci_update_current_ps(rdev, boot_ps);
+
+	return 0;
+}
+
+int ci_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
 	if (rdev->irq.installed &&
 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
 #if 0
@@ -4685,19 +4694,8 @@
 #endif
 	}
 
-	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
 	ci_dpm_powergate_uvd(rdev, true);
 
-	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			     RADEON_CG_BLOCK_MC |
-			     RADEON_CG_BLOCK_SDMA |
-			     RADEON_CG_BLOCK_BIF |
-			     RADEON_CG_BLOCK_UVD |
-			     RADEON_CG_BLOCK_HDP), true);
-
-	ci_update_current_ps(rdev, boot_ps);
-
 	return 0;
 }
 
@@ -4706,12 +4704,6 @@
 	struct ci_power_info *pi = ci_get_pi(rdev);
 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
 
-	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			     RADEON_CG_BLOCK_MC |
-			     RADEON_CG_BLOCK_SDMA |
-			     RADEON_CG_BLOCK_UVD |
-			     RADEON_CG_BLOCK_HDP), false);
-
 	ci_dpm_powergate_uvd(rdev, false);
 
 	if (!ci_is_smc_running(rdev))
@@ -4742,13 +4734,6 @@
 	struct radeon_ps *old_ps = &pi->current_rps;
 	int ret;
 
-	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			     RADEON_CG_BLOCK_MC |
-			     RADEON_CG_BLOCK_SDMA |
-			     RADEON_CG_BLOCK_BIF |
-			     RADEON_CG_BLOCK_UVD |
-			     RADEON_CG_BLOCK_HDP), false);
-
 	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
 	if (pi->pcie_performance_request)
 		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
@@ -4804,13 +4789,6 @@
 	if (pi->pcie_performance_request)
 		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
 
-	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			     RADEON_CG_BLOCK_MC |
-			     RADEON_CG_BLOCK_SDMA |
-			     RADEON_CG_BLOCK_BIF |
-			     RADEON_CG_BLOCK_UVD |
-			     RADEON_CG_BLOCK_HDP), true);
-
 	return 0;
 }
 
@@ -5023,8 +5001,8 @@
 	return 0;
 }
 
-int ci_get_vbios_boot_values(struct radeon_device *rdev,
-			     struct ci_vbios_boot_state *boot_state)
+static int ci_get_vbios_boot_values(struct radeon_device *rdev,
+				    struct ci_vbios_boot_state *boot_state)
 {
 	struct radeon_mode_info *mode_info = &rdev->mode_info;
 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 9c745dd..8debc9d 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -28,6 +28,7 @@
 #include "cikd.h"
 #include "ppsmc.h"
 #include "radeon_ucode.h"
+#include "ci_dpm.h"
 
 static int ci_set_smc_sram_address(struct radeon_device *rdev,
 				   u32 smc_address, u32 limit)
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e950fab..e6419ca 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1697,7 +1697,7 @@
  * Load the GDDR MC ucode into the hw (CIK).
  * Returns 0 on success, error on failure.
  */
-static int ci_mc_load_microcode(struct radeon_device *rdev)
+int ci_mc_load_microcode(struct radeon_device *rdev)
 {
 	const __be32 *fw_data;
 	u32 running, blackout = 0;
@@ -3487,6 +3487,51 @@
 }
 
 /**
+ * cik_hdp_flush_cp_ring_emit - emit an hdp flush on the cp
+ *
+ * @rdev: radeon_device pointer
+ * @ridx: radeon ring index
+ *
+ * Emits an hdp flush on the cp.
+ */
+static void cik_hdp_flush_cp_ring_emit(struct radeon_device *rdev,
+				       int ridx)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+	u32 ref_and_mask;
+
+	switch (ring->idx) {
+	case CAYMAN_RING_TYPE_CP1_INDEX:
+	case CAYMAN_RING_TYPE_CP2_INDEX:
+	default:
+		switch (ring->me) {
+		case 0:
+			ref_and_mask = CP2 << ring->pipe;
+			break;
+		case 1:
+			ref_and_mask = CP6 << ring->pipe;
+			break;
+		default:
+			return;
+		}
+		break;
+	case RADEON_RING_TYPE_GFX_INDEX:
+		ref_and_mask = CP0;
+		break;
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+	radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
+				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
+				 WAIT_REG_MEM_ENGINE(1)));   /* pfp */
+	radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
+	radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
+	radeon_ring_write(ring, ref_and_mask);
+	radeon_ring_write(ring, ref_and_mask);
+	radeon_ring_write(ring, 0x20); /* poll interval */
+}
+
+/**
  * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
  *
  * @rdev: radeon_device pointer
@@ -3512,15 +3557,7 @@
 	radeon_ring_write(ring, fence->seq);
 	radeon_ring_write(ring, 0);
 	/* HDP flush */
-	/* We should be using the new WAIT_REG_MEM special op packet here
-	 * but it causes the CP to hang
-	 */
-	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-				 WRITE_DATA_DST_SEL(0)));
-	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-	radeon_ring_write(ring, 0);
-	radeon_ring_write(ring, 0);
+	cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
 }
 
 /**
@@ -3550,15 +3587,7 @@
 	radeon_ring_write(ring, fence->seq);
 	radeon_ring_write(ring, 0);
 	/* HDP flush */
-	/* We should be using the new WAIT_REG_MEM special op packet here
-	 * but it causes the CP to hang
-	 */
-	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-				 WRITE_DATA_DST_SEL(0)));
-	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-	radeon_ring_write(ring, 0);
-	radeon_ring_write(ring, 0);
+	cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
 }
 
 bool cik_semaphore_ring_emit(struct radeon_device *rdev,
@@ -3566,8 +3595,6 @@
 			     struct radeon_semaphore *semaphore,
 			     bool emit_wait)
 {
-/* TODO: figure out why semaphore cause lockups */
-#if 0
 	uint64_t addr = semaphore->gpu_addr;
 	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
 
@@ -3576,9 +3603,6 @@
 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
 
 	return true;
-#else
-	return false;
-#endif
 }
 
 /**
@@ -3816,6 +3840,8 @@
 	if (enable)
 		WREG32(CP_ME_CNTL, 0);
 	else {
+		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
 	}
@@ -4014,18 +4040,50 @@
 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
 		return r;
 	}
+
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
 	return 0;
 }
 
-u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
-			      struct radeon_ring *ring)
+u32 cik_gfx_get_rptr(struct radeon_device *rdev,
+		     struct radeon_ring *ring)
 {
 	u32 rptr;
 
+	if (rdev->wb.enabled)
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	else
+		rptr = RREG32(CP_RB0_RPTR);
 
+	return rptr;
+}
+
+u32 cik_gfx_get_wptr(struct radeon_device *rdev,
+		     struct radeon_ring *ring)
+{
+	u32 wptr;
+
+	wptr = RREG32(CP_RB0_WPTR);
+
+	return wptr;
+}
+
+void cik_gfx_set_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring)
+{
+	WREG32(CP_RB0_WPTR, ring->wptr);
+	(void)RREG32(CP_RB0_WPTR);
+}
+
+u32 cik_compute_get_rptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring)
+{
+	u32 rptr;
 
 	if (rdev->wb.enabled) {
-		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
 	} else {
 		mutex_lock(&rdev->srbm_mutex);
 		cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
@@ -4037,13 +4095,14 @@
 	return rptr;
 }
 
-u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
-			      struct radeon_ring *ring)
+u32 cik_compute_get_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring)
 {
 	u32 wptr;
 
 	if (rdev->wb.enabled) {
-		wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
+		/* XXX check if swapping is necessary on BE */
+		wptr = rdev->wb.wb[ring->wptr_offs/4];
 	} else {
 		mutex_lock(&rdev->srbm_mutex);
 		cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
@@ -4055,10 +4114,11 @@
 	return wptr;
 }
 
-void cik_compute_ring_set_wptr(struct radeon_device *rdev,
-			       struct radeon_ring *ring)
+void cik_compute_set_wptr(struct radeon_device *rdev,
+			  struct radeon_ring *ring)
 {
-	rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
+	/* XXX check if swapping is necessary on BE */
+	rdev->wb.wb[ring->wptr_offs/4] = ring->wptr;
 	WDOORBELL32(ring->doorbell_index, ring->wptr);
 }
 
@@ -4852,6 +4912,160 @@
 	cik_print_gpu_status_regs(rdev);
 }
 
+struct kv_reset_save_regs {
+	u32 gmcon_reng_execute;
+	u32 gmcon_misc;
+	u32 gmcon_misc3;
+};
+
+static void kv_save_regs_for_reset(struct radeon_device *rdev,
+				   struct kv_reset_save_regs *save)
+{
+	save->gmcon_reng_execute = RREG32(GMCON_RENG_EXECUTE);
+	save->gmcon_misc = RREG32(GMCON_MISC);
+	save->gmcon_misc3 = RREG32(GMCON_MISC3);
+
+	WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute & ~RENG_EXECUTE_ON_PWR_UP);
+	WREG32(GMCON_MISC, save->gmcon_misc & ~(RENG_EXECUTE_ON_REG_UPDATE |
+						STCTRL_STUTTER_EN));
+}
+
+static void kv_restore_regs_for_reset(struct radeon_device *rdev,
+				      struct kv_reset_save_regs *save)
+{
+	int i;
+
+	WREG32(GMCON_PGFSM_WRITE, 0);
+	WREG32(GMCON_PGFSM_CONFIG, 0x200010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0);
+	WREG32(GMCON_PGFSM_CONFIG, 0x300010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x210000);
+	WREG32(GMCON_PGFSM_CONFIG, 0xa00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x21003);
+	WREG32(GMCON_PGFSM_CONFIG, 0xb00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x2b00);
+	WREG32(GMCON_PGFSM_CONFIG, 0xc00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0);
+	WREG32(GMCON_PGFSM_CONFIG, 0xd00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x420000);
+	WREG32(GMCON_PGFSM_CONFIG, 0x100010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x120202);
+	WREG32(GMCON_PGFSM_CONFIG, 0x500010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x3e3e36);
+	WREG32(GMCON_PGFSM_CONFIG, 0x600010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x373f3e);
+	WREG32(GMCON_PGFSM_CONFIG, 0x700010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(GMCON_PGFSM_WRITE, 0);
+
+	WREG32(GMCON_PGFSM_WRITE, 0x3e1332);
+	WREG32(GMCON_PGFSM_CONFIG, 0xe00010ff);
+
+	WREG32(GMCON_MISC3, save->gmcon_misc3);
+	WREG32(GMCON_MISC, save->gmcon_misc);
+	WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute);
+}
+
+static void cik_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	struct kv_reset_save_regs kv_save = { 0 };
+	u32 tmp, i;
+
+	dev_info(rdev->dev, "GPU pci config reset\n");
+
+	/* disable dpm? */
+
+	/* disable cg/pg */
+	cik_fini_pg(rdev);
+	cik_fini_cg(rdev);
+
+	/* Disable GFX parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+	/* Disable MEC parsing/prefetching */
+	WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
+
+	/* sdma0 */
+	tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
+	tmp |= SDMA_HALT;
+	WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+	/* sdma1 */
+	tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
+	tmp |= SDMA_HALT;
+	WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+	/* XXX other engines? */
+
+	/* halt the rlc, disable cp internal ints */
+	cik_rlc_stop(rdev);
+
+	udelay(50);
+
+	/* disable mem access */
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+	}
+
+	if (rdev->flags & RADEON_IS_IGP)
+		kv_save_regs_for_reset(rdev, &kv_save);
+
+	/* disable BM */
+	pci_clear_master(rdev->pdev);
+	/* reset */
+	radeon_pci_config_reset(rdev);
+
+	udelay(100);
+
+	/* wait for asic to come out of reset */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+			break;
+		udelay(1);
+	}
+
+	/* does asic init need to be run first??? */
+	if (rdev->flags & RADEON_IS_IGP)
+		kv_restore_regs_for_reset(rdev, &kv_save);
+}
+
 /**
  * cik_asic_reset - soft reset GPU
  *
@@ -4870,10 +5084,17 @@
 	if (reset_mask)
 		r600_set_bios_scratch_engine_hung(rdev, true);
 
+	/* try soft reset */
 	cik_gpu_soft_reset(rdev, reset_mask);
 
 	reset_mask = cik_gpu_check_soft_reset(rdev);
 
+	/* try pci config reset */
+	if (reset_mask && radeon_hard_reset)
+		cik_gpu_pci_config_reset(rdev);
+
+	reset_mask = cik_gpu_check_soft_reset(rdev);
+
 	if (!reset_mask)
 		r600_set_bios_scratch_engine_hung(rdev, false);
 
@@ -5138,20 +5359,6 @@
 				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
-	/* TC cache setup ??? */
-	WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
-	WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
-	WREG32(TC_CFG_L1_STORE_POLICY, 0);
-
-	WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
-	WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
-	WREG32(TC_CFG_L2_STORE_POLICY0, 0);
-	WREG32(TC_CFG_L2_STORE_POLICY1, 0);
-	WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
-
-	WREG32(TC_CFG_L1_VOLATILE, 0);
-	WREG32(TC_CFG_L2_VOLATILE, 0);
-
 	if (rdev->family == CHIP_KAVERI) {
 		u32 tmp = RREG32(CHUB_CONTROL);
 		tmp &= ~BYPASS_VM;
@@ -5367,16 +5574,7 @@
 	radeon_ring_write(ring, VMID(0));
 
 	/* HDP flush */
-	/* We should be using the WAIT_REG_MEM packet here like in
-	 * cik_fence_ring_emit(), but it causes the CP to hang in this
-	 * context...
-	 */
-	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-				 WRITE_DATA_DST_SEL(0)));
-	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-	radeon_ring_write(ring, 0);
-	radeon_ring_write(ring, 0);
+	cik_hdp_flush_cp_ring_emit(rdev, ridx);
 
 	/* bits 0-15 are the VM contexts0-15 */
 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -7503,26 +7701,7 @@
 
 	cik_mc_program(rdev);
 
-	if (rdev->flags & RADEON_IS_IGP) {
-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
-		    !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
-			r = cik_init_microcode(rdev);
-			if (r) {
-				DRM_ERROR("Failed to load firmware!\n");
-				return r;
-			}
-		}
-	} else {
-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
-		    !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
-		    !rdev->mc_fw) {
-			r = cik_init_microcode(rdev);
-			if (r) {
-				DRM_ERROR("Failed to load firmware!\n");
-				return r;
-			}
-		}
-
+	if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
 		r = ci_mc_load_microcode(rdev);
 		if (r) {
 			DRM_ERROR("Failed to load MC firmware!\n");
@@ -7627,7 +7806,6 @@
 
 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-			     CP_RB0_RPTR, CP_RB0_WPTR,
 			     PACKET3(PACKET3_NOP, 0x3FFF));
 	if (r)
 		return r;
@@ -7636,7 +7814,6 @@
 	/* type-2 packets are deprecated on MEC, use type-3 instead */
 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
-			     CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
 			     PACKET3(PACKET3_NOP, 0x3FFF));
 	if (r)
 		return r;
@@ -7648,7 +7825,6 @@
 	/* type-2 packets are deprecated on MEC, use type-3 instead */
 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
-			     CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
 			     PACKET3(PACKET3_NOP, 0x3FFF));
 	if (r)
 		return r;
@@ -7660,16 +7836,12 @@
 
 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-			     SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
-			     SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
 			     SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
 	if (r)
 		return r;
 
 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
-			     SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
-			     SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
 			     SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
 	if (r)
 		return r;
@@ -7685,7 +7857,6 @@
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
 	if (ring->ring_size) {
 		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
 				     RADEON_CP_PACKET2);
 		if (!r)
 			r = uvd_v1_0_init(rdev);
@@ -7731,6 +7902,8 @@
 	/* init golden registers */
 	cik_init_golden_registers(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = cik_startup(rdev);
 	if (r) {
@@ -7754,6 +7927,7 @@
  */
 int cik_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	dce6_audio_fini(rdev);
 	radeon_vm_manager_fini(rdev);
 	cik_cp_enable(rdev, false);
@@ -7835,6 +8009,30 @@
 	if (r)
 		return r;
 
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+		    !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
+			r = cik_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+		    !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
+		    !rdev->mc_fw) {
+			r = cik_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	ring->ring_obj = NULL;
 	r600_ring_init(rdev, ring, 1024 * 1024);
@@ -7915,6 +8113,7 @@
  */
 void cik_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	cik_cp_fini(rdev);
 	cik_sdma_fini(rdev);
 	cik_fini_pg(rdev);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index d08b83c..1ecb3f1 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -52,6 +52,75 @@
  */
 
 /**
+ * cik_sdma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (CIK+).
+ */
+uint32_t cik_sdma_get_rptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	u32 rptr, reg;
+
+	if (rdev->wb.enabled) {
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	} else {
+		if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+			reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET;
+		else
+			reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET;
+
+		rptr = RREG32(reg);
+	}
+
+	return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (CIK+).
+ */
+uint32_t cik_sdma_get_wptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	u32 reg;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
+	else
+		reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
+
+	return (RREG32(reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (CIK+).
+ */
+void cik_sdma_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	u32 reg;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
+	else
+		reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
+
+	WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
  * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
  *
  * @rdev: radeon_device pointer
@@ -88,6 +157,35 @@
 }
 
 /**
+ * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @ridx: radeon ring index
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev,
+					 int ridx)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+			  SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+	u32 ref_and_mask;
+
+	if (ridx == R600_RING_TYPE_DMA_INDEX)
+		ref_and_mask = SDMA0;
+	else
+		ref_and_mask = SDMA1;
+
+	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+	radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+	radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+	radeon_ring_write(ring, ref_and_mask); /* reference */
+	radeon_ring_write(ring, ref_and_mask); /* mask */
+	radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
+}
+
+/**
  * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
  *
  * @rdev: radeon_device pointer
@@ -111,12 +209,7 @@
 	/* generate an interrupt */
 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
 	/* flush HDP */
-	/* We should be using the new POLL_REG_MEM special op packet here
-	 * but it causes sDMA to hang sometimes
-	 */
-	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
-	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-	radeon_ring_write(ring, 0);
+	cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
 }
 
 /**
@@ -157,7 +250,9 @@
 	u32 rb_cntl, reg_offset;
 	int i;
 
-	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
 	for (i = 0; i < 2; i++) {
 		if (i == 0)
@@ -288,7 +383,9 @@
 		}
 	}
 
-	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
 	return 0;
 }
@@ -747,12 +844,7 @@
 	radeon_ring_write(ring, VMID(0));
 
 	/* flush HDP */
-	/* We should be using the new POLL_REG_MEM special op packet here
-	 * but it causes sDMA to hang sometimes
-	 */
-	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
-	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-	radeon_ring_write(ring, 0);
+	cik_sdma_hdp_flush_ring_emit(rdev, ridx);
 
 	/* flush TLB */
 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 5964af5..98bae9d7 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -724,6 +724,17 @@
 
 #define ATC_MISC_CG           				0x3350
 
+#define GMCON_RENG_EXECUTE				0x3508
+#define 	RENG_EXECUTE_ON_PWR_UP			(1 << 0)
+#define GMCON_MISC					0x350c
+#define 	RENG_EXECUTE_ON_REG_UPDATE		(1 << 11)
+#define 	STCTRL_STUTTER_EN			(1 << 16)
+
+#define GMCON_PGFSM_CONFIG				0x3538
+#define GMCON_PGFSM_WRITE				0x353c
+#define GMCON_PGFSM_READ				0x3540
+#define GMCON_MISC3					0x3544
+
 #define MC_SEQ_CNTL_3                                     0x3600
 #       define CAC_EN                                     (1 << 31)
 #define MC_SEQ_G5PDX_CTRL                                 0x3604
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 920e1e4..cf783fc 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -1905,21 +1905,6 @@
 	if (pi->mg_clock_gating)
 		cypress_mg_clock_gating_enable(rdev, true);
 
-	if (rdev->irq.installed &&
-	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-		PPSMC_Result result;
-
-		ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-		if (ret)
-			return ret;
-		rdev->irq.dpm_thermal = true;
-		radeon_irq_set(rdev);
-		result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
-		if (result != PPSMC_Result_OK)
-			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-	}
-
 	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
 	return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 9702e55..5623e75 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -146,6 +146,7 @@
 extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
 extern u32 cik_get_csb_size(struct radeon_device *rdev);
 extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
+extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
 
 static const u32 evergreen_golden_registers[] =
 {
@@ -1679,7 +1680,7 @@
 	case RADEON_HPD_6:
 		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
 			connected = true;
-			break;
+		break;
 	default:
 		break;
 	}
@@ -3867,6 +3868,48 @@
 	evergreen_print_gpu_status_regs(rdev);
 }
 
+void evergreen_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 tmp, i;
+
+	dev_info(rdev->dev, "GPU pci config reset\n");
+
+	/* disable dpm? */
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+	udelay(50);
+	/* Disable DMA */
+	tmp = RREG32(DMA_RB_CNTL);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL, tmp);
+	/* XXX other engines? */
+
+	/* halt the rlc */
+	r600_rlc_stop(rdev);
+
+	udelay(50);
+
+	/* set mclk/sclk to bypass */
+	rv770_set_clk_bypass_mode(rdev);
+	/* disable BM */
+	pci_clear_master(rdev->pdev);
+	/* disable mem access */
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+	}
+	/* reset */
+	radeon_pci_config_reset(rdev);
+	/* wait for asic to come out of reset */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+			break;
+		udelay(1);
+	}
+}
+
 int evergreen_asic_reset(struct radeon_device *rdev)
 {
 	u32 reset_mask;
@@ -3876,10 +3919,17 @@
 	if (reset_mask)
 		r600_set_bios_scratch_engine_hung(rdev, true);
 
+	/* try soft reset */
 	evergreen_gpu_soft_reset(rdev, reset_mask);
 
 	reset_mask = evergreen_gpu_check_soft_reset(rdev);
 
+	/* try pci config reset */
+	if (reset_mask && radeon_hard_reset)
+		evergreen_gpu_pci_config_reset(rdev);
+
+	reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
 	if (!reset_mask)
 		r600_set_bios_scratch_engine_hung(rdev, false);
 
@@ -4298,8 +4348,8 @@
 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
 	}
 
-	/* only one DAC on DCE6 */
-	if (!ASIC_IS_DCE6(rdev))
+	/* only one DAC on DCE5 */
+	if (!ASIC_IS_DCE5(rdev))
 		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
 	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
 
@@ -5109,27 +5159,12 @@
 
 	evergreen_mc_program(rdev);
 
-	if (ASIC_IS_DCE5(rdev)) {
-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
-			r = ni_init_microcode(rdev);
-			if (r) {
-				DRM_ERROR("Failed to load firmware!\n");
-				return r;
-			}
-		}
+	if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
 		r = ni_mc_load_microcode(rdev);
 		if (r) {
 			DRM_ERROR("Failed to load MC firmware!\n");
 			return r;
 		}
-	} else {
-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-			r = r600_init_microcode(rdev);
-			if (r) {
-				DRM_ERROR("Failed to load firmware!\n");
-				return r;
-			}
-		}
 	}
 
 	if (rdev->flags & RADEON_IS_AGP) {
@@ -5199,14 +5234,12 @@
 
 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
 			     RADEON_CP_PACKET2);
 	if (r)
 		return r;
 
 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-			     DMA_RB_RPTR, DMA_RB_WPTR,
 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0));
 	if (r)
 		return r;
@@ -5224,7 +5257,6 @@
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
 	if (ring->ring_size) {
 		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
 				     RADEON_CP_PACKET2);
 		if (!r)
 			r = uvd_v1_0_init(rdev);
@@ -5267,6 +5299,8 @@
 	/* init golden registers */
 	evergreen_init_golden_registers(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = evergreen_startup(rdev);
 	if (r) {
@@ -5281,6 +5315,7 @@
 
 int evergreen_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	r600_audio_fini(rdev);
 	uvd_v1_0_fini(rdev);
 	radeon_uvd_suspend(rdev);
@@ -5357,6 +5392,27 @@
 	if (r)
 		return r;
 
+	if (ASIC_IS_DCE5(rdev)) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+			r = r600_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
@@ -5409,6 +5465,7 @@
 
 void evergreen_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r600_audio_fini(rdev);
 	r700_cp_fini(rdev);
 	r600_dma_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index eb8ac31..c7cac07 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -967,7 +967,10 @@
 	if (track->cb_dirty) {
 		tmp = track->cb_target_mask;
 		for (i = 0; i < 8; i++) {
-			if ((tmp >> (i * 4)) & 0xF) {
+			u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
+
+			if (format != V_028C70_COLOR_INVALID &&
+			    (tmp >> (i * 4)) & 0xF) {
 				/* at least one component is enabled */
 				if (track->cb_color_bo[i] == NULL) {
 					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 8a4e641..a0f63ff 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -33,6 +33,7 @@
 #define EVERGREEN_PIF_PHY0_DATA                         0xc
 #define EVERGREEN_PIF_PHY1_INDEX                        0x10
 #define EVERGREEN_PIF_PHY1_DATA                         0x14
+#define EVERGREEN_MM_INDEX_HI                           0x18
 
 #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0x310
 #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0x324
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 17f9907..f9c7963 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -82,12 +82,16 @@
 #define	CG_SPLL_FUNC_CNTL_2				0x604
 #define		SCLK_MUX_SEL(x)				((x) << 0)
 #define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define		SCLK_MUX_UPDATE				(1 << 26)
 #define	CG_SPLL_FUNC_CNTL_3				0x608
 #define		SPLL_FB_DIV(x)				((x) << 0)
 #define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
 #define		SPLL_DITHEN				(1 << 28)
+#define	CG_SPLL_STATUS					0x60c
+#define		SPLL_CHG_STATUS				(1 << 1)
 
 #define MPLL_CNTL_MODE                                  0x61c
+#       define MPLL_MCLK_SEL                            (1 << 11)
 #       define SS_SSEN                                  (1 << 24)
 #       define SS_DSMODE_EN                             (1 << 25)
 
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index b419055..351db36 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1126,11 +1126,6 @@
 	struct kv_power_info *pi = kv_get_pi(rdev);
 	int ret;
 
-	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			     RADEON_CG_BLOCK_SDMA |
-			     RADEON_CG_BLOCK_BIF |
-			     RADEON_CG_BLOCK_HDP), false);
-
 	ret = kv_process_firmware_header(rdev);
 	if (ret) {
 		DRM_ERROR("kv_process_firmware_header failed\n");
@@ -1215,6 +1210,21 @@
 
 	kv_reset_acp_boot_level(rdev);
 
+	ret = kv_smc_bapm_enable(rdev, false);
+	if (ret) {
+		DRM_ERROR("kv_smc_bapm_enable failed\n");
+		return ret;
+	}
+
+	kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+	return ret;
+}
+
+int kv_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret = 0;
+
 	if (rdev->irq.installed &&
 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
 		ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1226,35 +1236,17 @@
 		radeon_irq_set(rdev);
 	}
 
-	ret = kv_smc_bapm_enable(rdev, false);
-	if (ret) {
-		DRM_ERROR("kv_smc_bapm_enable failed\n");
-		return ret;
-	}
-
 	/* powerdown unused blocks for now */
 	kv_dpm_powergate_acp(rdev, true);
 	kv_dpm_powergate_samu(rdev, true);
 	kv_dpm_powergate_vce(rdev, true);
 	kv_dpm_powergate_uvd(rdev, true);
 
-	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			     RADEON_CG_BLOCK_SDMA |
-			     RADEON_CG_BLOCK_BIF |
-			     RADEON_CG_BLOCK_HDP), true);
-
-	kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
-
 	return ret;
 }
 
 void kv_dpm_disable(struct radeon_device *rdev)
 {
-	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			     RADEON_CG_BLOCK_SDMA |
-			     RADEON_CG_BLOCK_BIF |
-			     RADEON_CG_BLOCK_HDP), false);
-
 	kv_smc_bapm_enable(rdev, false);
 
 	/* powerup blocks */
@@ -1779,11 +1771,6 @@
 	/*struct radeon_ps *old_ps = &pi->current_rps;*/
 	int ret;
 
-	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			     RADEON_CG_BLOCK_SDMA |
-			     RADEON_CG_BLOCK_BIF |
-			     RADEON_CG_BLOCK_HDP), false);
-
 	if (pi->bapm_enable) {
 		ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
 		if (ret) {
@@ -1849,11 +1836,6 @@
 		}
 	}
 
-	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			     RADEON_CG_BLOCK_SDMA |
-			     RADEON_CG_BLOCK_BIF |
-			     RADEON_CG_BLOCK_HDP), true);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index f59a9e9..ea932ac 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -174,6 +174,7 @@
 extern void evergreen_program_aspm(struct radeon_device *rdev);
 extern void sumo_rlc_fini(struct radeon_device *rdev);
 extern int sumo_rlc_init(struct radeon_device *rdev);
+extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
 
 /* Firmware Names */
 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -1330,13 +1331,12 @@
 {
 	struct radeon_ring *ring = &rdev->ring[fence->ring];
 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+		PACKET3_SH_ACTION_ENA;
 
 	/* flush read cache over gart for this vmid */
-	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
-	radeon_ring_write(ring, 0);
 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
-	radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
 	radeon_ring_write(ring, 0xFFFFFFFF);
 	radeon_ring_write(ring, 0);
 	radeon_ring_write(ring, 10); /* poll interval */
@@ -1352,6 +1352,8 @@
 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
 	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+		PACKET3_SH_ACTION_ENA;
 
 	/* set to DX10/11 mode */
 	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
@@ -1376,14 +1378,11 @@
 			  (ib->vm ? (ib->vm->id << 24) : 0));
 
 	/* flush read cache over gart for this vmid */
-	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
-	radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
-	radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
 	radeon_ring_write(ring, 0xFFFFFFFF);
 	radeon_ring_write(ring, 0);
-	radeon_ring_write(ring, 10); /* poll interval */
+	radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
 }
 
 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
@@ -1391,13 +1390,63 @@
 	if (enable)
 		WREG32(CP_ME_CNTL, 0);
 	else {
-		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
 		WREG32(SCRATCH_UMSK, 0);
 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
 	}
 }
 
+u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
+			struct radeon_ring *ring)
+{
+	u32 rptr;
+
+	if (rdev->wb.enabled)
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	else {
+		if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
+			rptr = RREG32(CP_RB0_RPTR);
+		else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
+			rptr = RREG32(CP_RB1_RPTR);
+		else
+			rptr = RREG32(CP_RB2_RPTR);
+	}
+
+	return rptr;
+}
+
+u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
+			struct radeon_ring *ring)
+{
+	u32 wptr;
+
+	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
+		wptr = RREG32(CP_RB0_WPTR);
+	else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
+		wptr = RREG32(CP_RB1_WPTR);
+	else
+		wptr = RREG32(CP_RB2_WPTR);
+
+	return wptr;
+}
+
+void cayman_gfx_set_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring)
+{
+	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
+		WREG32(CP_RB0_WPTR, ring->wptr);
+		(void)RREG32(CP_RB0_WPTR);
+	} else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
+		WREG32(CP_RB1_WPTR, ring->wptr);
+		(void)RREG32(CP_RB1_WPTR);
+	} else {
+		WREG32(CP_RB2_WPTR, ring->wptr);
+		(void)RREG32(CP_RB2_WPTR);
+	}
+}
+
 static int cayman_cp_load_microcode(struct radeon_device *rdev)
 {
 	const __be32 *fw_data;
@@ -1526,6 +1575,16 @@
 		CP_RB1_BASE,
 		CP_RB2_BASE
 	};
+	static const unsigned cp_rb_rptr[] = {
+		CP_RB0_RPTR,
+		CP_RB1_RPTR,
+		CP_RB2_RPTR
+	};
+	static const unsigned cp_rb_wptr[] = {
+		CP_RB0_WPTR,
+		CP_RB1_WPTR,
+		CP_RB2_WPTR
+	};
 	struct radeon_ring *ring;
 	int i, r;
 
@@ -1584,8 +1643,8 @@
 		WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
 
 		ring->rptr = ring->wptr = 0;
-		WREG32(ring->rptr_reg, ring->rptr);
-		WREG32(ring->wptr_reg, ring->wptr);
+		WREG32(cp_rb_rptr[i], ring->rptr);
+		WREG32(cp_rb_wptr[i], ring->wptr);
 
 		mdelay(1);
 		WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
@@ -1605,6 +1664,9 @@
 		return r;
 	}
 
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
 	return 0;
 }
 
@@ -1831,8 +1893,10 @@
 
 	reset_mask = cayman_gpu_check_soft_reset(rdev);
 
-	if (!reset_mask)
-		r600_set_bios_scratch_engine_hung(rdev, false);
+	if (reset_mask)
+		evergreen_gpu_pci_config_reset(rdev);
+
+	r600_set_bios_scratch_engine_hung(rdev, false);
 
 	return 0;
 }
@@ -1878,23 +1942,7 @@
 
 	evergreen_mc_program(rdev);
 
-	if (rdev->flags & RADEON_IS_IGP) {
-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-			r = ni_init_microcode(rdev);
-			if (r) {
-				DRM_ERROR("Failed to load firmware!\n");
-				return r;
-			}
-		}
-	} else {
-		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
-			r = ni_init_microcode(rdev);
-			if (r) {
-				DRM_ERROR("Failed to load firmware!\n");
-				return r;
-			}
-		}
-
+	if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
 		r = ni_mc_load_microcode(rdev);
 		if (r) {
 			DRM_ERROR("Failed to load MC firmware!\n");
@@ -1981,23 +2029,18 @@
 	evergreen_irq_set(rdev);
 
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-			     CP_RB0_RPTR, CP_RB0_WPTR,
 			     RADEON_CP_PACKET2);
 	if (r)
 		return r;
 
 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-			     DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
-			     DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
 	if (r)
 		return r;
 
 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
-			     DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
-			     DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
 	if (r)
 		return r;
@@ -2016,7 +2059,6 @@
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
 	if (ring->ring_size) {
 		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
 				     RADEON_CP_PACKET2);
 		if (!r)
 			r = uvd_v1_0_init(rdev);
@@ -2063,6 +2105,8 @@
 	/* init golden registers */
 	ni_init_golden_registers(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = cayman_startup(rdev);
 	if (r) {
@@ -2075,6 +2119,7 @@
 
 int cayman_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	if (ASIC_IS_DCE6(rdev))
 		dce6_audio_fini(rdev);
 	else
@@ -2145,6 +2190,27 @@
 	if (r)
 		return r;
 
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	ring->ring_obj = NULL;
 	r600_ring_init(rdev, ring, 1024 * 1024);
 
@@ -2204,6 +2270,7 @@
 
 void cayman_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	cayman_cp_fini(rdev);
 	cayman_dma_fini(rdev);
 	r600_irq_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index bdeb65e..7cf96b1 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -43,6 +43,75 @@
  */
 
 /**
+ * cayman_dma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (cayman+).
+ */
+uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
+			     struct radeon_ring *ring)
+{
+	u32 rptr, reg;
+
+	if (rdev->wb.enabled) {
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	} else {
+		if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+			reg = DMA_RB_RPTR + DMA0_REGISTER_OFFSET;
+		else
+			reg = DMA_RB_RPTR + DMA1_REGISTER_OFFSET;
+
+		rptr = RREG32(reg);
+	}
+
+	return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cayman_dma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (cayman+).
+ */
+uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
+			   struct radeon_ring *ring)
+{
+	u32 reg;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
+	else
+		reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
+
+	return (RREG32(reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * cayman_dma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (cayman+).
+ */
+void cayman_dma_set_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring)
+{
+	u32 reg;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
+	else
+		reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
+
+	WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
  * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
  *
  * @rdev: radeon_device pointer
@@ -88,7 +157,9 @@
 {
 	u32 rb_cntl;
 
-	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
 	/* dma0 */
 	rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
@@ -190,7 +261,9 @@
 		}
 	}
 
-	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 49c4d48..ca81427 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -720,6 +720,8 @@
 struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
 struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
 
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
+
 struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
 {
         struct ni_power_info *pi = rdev->pm.dpm.priv;
@@ -2586,7 +2588,7 @@
 	if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
 		enable_sq_ramping = false;
 
-	if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+	if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
 		enable_sq_ramping = false;
 
 	for (i = 0; i < state->performance_level_count; i++) {
@@ -3565,7 +3567,11 @@
 void ni_dpm_setup_asic(struct radeon_device *rdev)
 {
 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	int r;
 
+	r = ni_mc_load_microcode(rdev);
+	if (r)
+		DRM_ERROR("Failed to load MC firmware!\n");
 	ni_read_clock_registers(rdev);
 	btc_read_arb_registers(rdev);
 	rv770_get_memory_type(rdev);
@@ -3710,21 +3716,6 @@
 	if (eg_pi->ls_clock_gating)
 		ni_ls_clockgating_enable(rdev, true);
 
-	if (rdev->irq.installed &&
-	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-		PPSMC_Result result;
-
-		ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000);
-		if (ret)
-			return ret;
-		rdev->irq.dpm_thermal = true;
-		radeon_irq_set(rdev);
-		result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
-		if (result != PPSMC_Result_OK)
-			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-	}
-
 	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
 	ni_update_current_ps(rdev, boot_ps);
@@ -3954,7 +3945,6 @@
 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
 	struct ni_ps *ps = ni_get_ps(rps);
-	u16 vddc;
 	struct rv7xx_pl *pl = &ps->performance_levels[index];
 
 	ps->performance_level_count = index + 1;
@@ -3970,8 +3960,8 @@
 
 	/* patch up vddc if necessary */
 	if (pl->vddc == 0xff01) {
-		if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
-			pl->vddc = vddc;
+		if (pi->max_vddc)
+			pl->vddc = pi->max_vddc;
 	}
 
 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -4331,7 +4321,8 @@
 void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
 						    struct seq_file *m)
 {
-	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
 	struct ni_ps *ps = ni_get_ps(rps);
 	struct rv7xx_pl *pl;
 	u32 current_index =
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 22421bc..d996033 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -1154,6 +1154,7 @@
 #              define PACKET3_DB_ACTION_ENA        (1 << 26)
 #              define PACKET3_SH_ACTION_ENA        (1 << 27)
 #              define PACKET3_SX_ACTION_ENA        (1 << 28)
+#              define PACKET3_ENGINE_ME            (1 << 31)
 #define	PACKET3_ME_INITIALIZE				0x44
 #define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
 #define	PACKET3_COND_WRITE				0x45
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index da43ab3..2d53299 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -23,7 +23,7 @@
 #ifndef _PPTABLE_H
 #define _PPTABLE_H
 
-#pragma pack(push, 1)
+#pragma pack(1)
 
 typedef struct _ATOM_PPLIB_THERMALCONTROLLER
 
@@ -677,6 +677,6 @@
       ULONG  ulTjmax;
 } ATOM_PPLIB_PPM_Table;
 
-#pragma pack(pop)
+#pragma pack()
 
 #endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 10abc4d..ef024ce 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1050,6 +1050,36 @@
 	return err;
 }
 
+u32 r100_gfx_get_rptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring)
+{
+	u32 rptr;
+
+	if (rdev->wb.enabled)
+		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+	else
+		rptr = RREG32(RADEON_CP_RB_RPTR);
+
+	return rptr;
+}
+
+u32 r100_gfx_get_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring)
+{
+	u32 wptr;
+
+	wptr = RREG32(RADEON_CP_RB_WPTR);
+
+	return wptr;
+}
+
+void r100_gfx_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	WREG32(RADEON_CP_RB_WPTR, ring->wptr);
+	(void)RREG32(RADEON_CP_RB_WPTR);
+}
+
 static void r100_cp_load_microcode(struct radeon_device *rdev)
 {
 	const __be32 *fw_data;
@@ -1102,7 +1132,6 @@
 	ring_size = (1 << (rb_bufsz + 1)) * 4;
 	r100_cp_load_microcode(rdev);
 	r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
-			     RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
 			     RADEON_CP_PACKET2);
 	if (r) {
 		return r;
@@ -3913,6 +3942,8 @@
 	/* Initialize surface registers */
 	radeon_surface_init(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = r100_startup(rdev);
 	if (r) {
@@ -3923,6 +3954,7 @@
 
 int r100_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	r100_cp_disable(rdev);
 	radeon_wb_disable(rdev);
 	r100_irq_disable(rdev);
@@ -3933,6 +3965,7 @@
 
 void r100_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	radeon_wb_fini(rdev);
 	radeon_ib_pool_fini(rdev);
@@ -4039,6 +4072,9 @@
 	}
 	r100_set_safe_registers(rdev);
 
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	rdev->accel_working = true;
 	r = r100_startup(rdev);
 	if (r) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index d8dd269..7c63ef8 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1430,6 +1430,8 @@
 	/* Initialize surface registers */
 	radeon_surface_init(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = r300_startup(rdev);
 	if (r) {
@@ -1440,6 +1442,7 @@
 
 int r300_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	r100_cp_disable(rdev);
 	radeon_wb_disable(rdev);
 	r100_irq_disable(rdev);
@@ -1452,6 +1455,7 @@
 
 void r300_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	radeon_wb_fini(rdev);
 	radeon_ib_pool_fini(rdev);
@@ -1538,6 +1542,9 @@
 	}
 	r300_set_reg_safe(rdev);
 
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	rdev->accel_working = true;
 	r = r300_startup(rdev);
 	if (r) {
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 60170ea..84b1d53 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -75,7 +75,7 @@
 		OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
 
 		for (i = 0; i < nr; ++i) {
-			if (DRM_COPY_FROM_USER
+			if (copy_from_user
 			    (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
 				DRM_ERROR("copy cliprect faulted\n");
 				return -EFAULT;
@@ -928,12 +928,12 @@
 		buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
 		*buf_idx *= 2; /* 8 bytes per buf */
 
-		if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
+		if (copy_to_user(ref_age_base + *buf_idx,
 				&dev_priv->scratch_ages[header.scratch.reg],
 				sizeof(u32)))
 			return -EINVAL;
 
-		if (DRM_COPY_FROM_USER(&h_pending,
+		if (copy_from_user(&h_pending,
 				ref_age_base + *buf_idx + 1,
 				sizeof(u32)))
 			return -EINVAL;
@@ -943,7 +943,7 @@
 
 		h_pending--;
 
-		if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
+		if (copy_to_user(ref_age_base + *buf_idx + 1,
 					&h_pending,
 					sizeof(u32)))
 			return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 6edf2b3..3768aab 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -325,6 +325,8 @@
 	/* Initialize surface registers */
 	radeon_surface_init(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = r420_startup(rdev);
 	if (r) {
@@ -335,6 +337,7 @@
 
 int r420_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	r420_cp_errata_fini(rdev);
 	r100_cp_disable(rdev);
 	radeon_wb_disable(rdev);
@@ -348,6 +351,7 @@
 
 void r420_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	radeon_wb_fini(rdev);
 	radeon_ib_pool_fini(rdev);
@@ -444,6 +448,9 @@
 	}
 	r420_set_reg_safe(rdev);
 
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	rdev->accel_working = true;
 	r = r420_startup(rdev);
 	if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index e1aece7..e209eb7 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -240,6 +240,8 @@
 	/* Initialize surface registers */
 	radeon_surface_init(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = r520_startup(rdev);
 	if (r) {
@@ -312,6 +314,9 @@
 		return r;
 	rv515_set_safe_registers(rdev);
 
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	rdev->accel_working = true;
 	r = r520_startup(rdev);
 	if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9ad0673..cdbc417 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -105,6 +105,7 @@
 void r600_irq_disable(struct radeon_device *rdev);
 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
 extern int evergreen_rlc_resume(struct radeon_device *rdev);
+extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
 
 /**
  * r600_get_xclk - get the xclk
@@ -1644,6 +1645,67 @@
 	r600_print_gpu_status_regs(rdev);
 }
 
+static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+	u32 tmp, i;
+
+	dev_info(rdev->dev, "GPU pci config reset\n");
+
+	/* disable dpm? */
+
+	/* Disable CP parsing/prefetching */
+	if (rdev->family >= CHIP_RV770)
+		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+	else
+		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+	/* disable the RLC */
+	WREG32(RLC_CNTL, 0);
+
+	/* Disable DMA */
+	tmp = RREG32(DMA_RB_CNTL);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL, tmp);
+
+	mdelay(50);
+
+	/* set mclk/sclk to bypass */
+	if (rdev->family >= CHIP_RV770)
+		rv770_set_clk_bypass_mode(rdev);
+	/* disable BM */
+	pci_clear_master(rdev->pdev);
+	/* disable mem access */
+	rv515_mc_stop(rdev, &save);
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	/* BIF reset workaround.  Not sure if this is needed on 6xx */
+	tmp = RREG32(BUS_CNTL);
+	tmp |= VGA_COHE_SPEC_TIMER_DIS;
+	WREG32(BUS_CNTL, tmp);
+
+	tmp = RREG32(BIF_SCRATCH0);
+
+	/* reset */
+	radeon_pci_config_reset(rdev);
+	mdelay(1);
+
+	/* BIF reset workaround.  Not sure if this is needed on 6xx */
+	tmp = SOFT_RESET_BIF;
+	WREG32(SRBM_SOFT_RESET, tmp);
+	mdelay(1);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	/* wait for asic to come out of reset */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+			break;
+		udelay(1);
+	}
+}
+
 int r600_asic_reset(struct radeon_device *rdev)
 {
 	u32 reset_mask;
@@ -1653,10 +1715,17 @@
 	if (reset_mask)
 		r600_set_bios_scratch_engine_hung(rdev, true);
 
+	/* try soft reset */
 	r600_gpu_soft_reset(rdev, reset_mask);
 
 	reset_mask = r600_gpu_check_soft_reset(rdev);
 
+	/* try pci config reset */
+	if (reset_mask && radeon_hard_reset)
+		r600_gpu_pci_config_reset(rdev);
+
+	reset_mask = r600_gpu_check_soft_reset(rdev);
+
 	if (!reset_mask)
 		r600_set_bios_scratch_engine_hung(rdev, false);
 
@@ -2185,7 +2254,8 @@
  */
 void r600_cp_stop(struct radeon_device *rdev)
 {
-	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
 	WREG32(SCRATCH_UMSK, 0);
 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -2382,6 +2452,36 @@
 	return err;
 }
 
+u32 r600_gfx_get_rptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring)
+{
+	u32 rptr;
+
+	if (rdev->wb.enabled)
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	else
+		rptr = RREG32(R600_CP_RB_RPTR);
+
+	return rptr;
+}
+
+u32 r600_gfx_get_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring)
+{
+	u32 wptr;
+
+	wptr = RREG32(R600_CP_RB_WPTR);
+
+	return wptr;
+}
+
+void r600_gfx_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	WREG32(R600_CP_RB_WPTR, ring->wptr);
+	(void)RREG32(R600_CP_RB_WPTR);
+}
+
 static int r600_cp_load_microcode(struct radeon_device *rdev)
 {
 	const __be32 *fw_data;
@@ -2513,6 +2613,10 @@
 		ring->ready = false;
 		return r;
 	}
+
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
 	return 0;
 }
 
@@ -2607,14 +2711,17 @@
 			  struct radeon_fence *fence)
 {
 	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
+		PACKET3_SH_ACTION_ENA;
+
+	if (rdev->family >= CHIP_RV770)
+		cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
 
 	if (rdev->wb.use_event) {
 		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
 		/* flush read cache over gart */
 		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
-		radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
-					PACKET3_VC_ACTION_ENA |
-					PACKET3_SH_ACTION_ENA);
+		radeon_ring_write(ring, cp_coher_cntl);
 		radeon_ring_write(ring, 0xFFFFFFFF);
 		radeon_ring_write(ring, 0);
 		radeon_ring_write(ring, 10); /* poll interval */
@@ -2628,9 +2735,7 @@
 	} else {
 		/* flush read cache over gart */
 		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
-		radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
-					PACKET3_VC_ACTION_ENA |
-					PACKET3_SH_ACTION_ENA);
+		radeon_ring_write(ring, cp_coher_cntl);
 		radeon_ring_write(ring, 0xFFFFFFFF);
 		radeon_ring_write(ring, 0);
 		radeon_ring_write(ring, 10); /* poll interval */
@@ -2775,14 +2880,6 @@
 
 	r600_mc_program(rdev);
 
-	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-		r = r600_init_microcode(rdev);
-		if (r) {
-			DRM_ERROR("Failed to load firmware!\n");
-			return r;
-		}
-	}
-
 	if (rdev->flags & RADEON_IS_AGP) {
 		r600_agp_enable(rdev);
 	} else {
@@ -2803,12 +2900,6 @@
 		return r;
 	}
 
-	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
-	if (r) {
-		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
-		return r;
-	}
-
 	/* Enable IRQ */
 	if (!rdev->irq.installed) {
 		r = radeon_irq_kms_init(rdev);
@@ -2826,18 +2917,10 @@
 
 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
 			     RADEON_CP_PACKET2);
 	if (r)
 		return r;
 
-	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-			     DMA_RB_RPTR, DMA_RB_WPTR,
-			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
-	if (r)
-		return r;
-
 	r = r600_cp_load_microcode(rdev);
 	if (r)
 		return r;
@@ -2845,10 +2928,6 @@
 	if (r)
 		return r;
 
-	r = r600_dma_resume(rdev);
-	if (r)
-		return r;
-
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2889,6 +2968,8 @@
 	/* post card */
 	atom_asic_init(rdev->mode_info.atom_context);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = r600_startup(rdev);
 	if (r) {
@@ -2902,9 +2983,9 @@
 
 int r600_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	r600_audio_fini(rdev);
 	r600_cp_stop(rdev);
-	r600_dma_stop(rdev);
 	r600_irq_suspend(rdev);
 	radeon_wb_disable(rdev);
 	r600_pcie_gart_disable(rdev);
@@ -2970,12 +3051,20 @@
 	if (r)
 		return r;
 
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		r = r600_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
-	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
-	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
-
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -2988,7 +3077,6 @@
 	if (r) {
 		dev_err(rdev->dev, "disabling GPU acceleration\n");
 		r600_cp_fini(rdev);
-		r600_dma_fini(rdev);
 		r600_irq_fini(rdev);
 		radeon_wb_fini(rdev);
 		radeon_ib_pool_fini(rdev);
@@ -3002,9 +3090,9 @@
 
 void r600_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r600_audio_fini(rdev);
 	r600_cp_fini(rdev);
-	r600_dma_fini(rdev);
 	r600_irq_fini(rdev);
 	radeon_wb_fini(rdev);
 	radeon_ib_pool_fini(rdev);
@@ -3903,6 +3991,10 @@
 				break;
 			}
 			break;
+		case 124: /* UVD */
+			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+			break;
 		case 176: /* CP_INT in ring buffer */
 		case 177: /* CP_INT in IB1 */
 		case 178: /* CP_INT in IB2 */
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index d8eb48b..8c9b7e2 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2515,7 +2515,7 @@
 		buf = radeon_freelist_get(dev);
 		if (!buf) {
 			DRM_DEBUG("EAGAIN\n");
-			if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+			if (copy_to_user(tex->image, image, sizeof(*image)))
 				return -EFAULT;
 			return -EAGAIN;
 		}
@@ -2528,7 +2528,7 @@
 		buffer =
 		    (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
 
-		if (DRM_COPY_FROM_USER(buffer, data, pass_size)) {
+		if (copy_from_user(buffer, data, pass_size)) {
 			DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
 			return -EFAULT;
 		}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 5dceea6..2812c7d1a 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -749,7 +749,10 @@
 		}
 
 		for (i = 0; i < 8; i++) {
-			if ((tmp >> (i * 4)) & 0xF) {
+			u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
+
+			if (format != V_0280A0_COLOR_INVALID &&
+			    (tmp >> (i * 4)) & 0xF) {
 				/* at least one component is enabled */
 				if (track->cb_color_bo[i] == NULL) {
 					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
@@ -1004,8 +1007,22 @@
 	case R_008C64_SQ_VSTMP_RING_SIZE:
 	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
 		/* get value to populate the IB don't remove */
-		tmp =radeon_get_ib_value(p, idx);
-		ib[idx] = 0;
+		/*tmp =radeon_get_ib_value(p, idx);
+		  ib[idx] = 0;*/
+		break;
+	case SQ_ESGS_RING_BASE:
+	case SQ_GSVS_RING_BASE:
+	case SQ_ESTMP_RING_BASE:
+	case SQ_GSTMP_RING_BASE:
+	case SQ_PSTMP_RING_BASE:
+	case SQ_VSTMP_RING_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
 		break;
 	case SQ_CONFIG:
 		track->sq_config = radeon_get_ib_value(p, idx);
@@ -2386,7 +2403,7 @@
 	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
 	parser.ib.length_dw = ib_chunk->length_dw;
 	*l = parser.ib.length_dw;
-	if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
+	if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
 		r = -EFAULT;
 		r600_cs_parser_fini(&parser, r);
 		return r;
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 7844d15..b2d4c91 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -51,7 +51,14 @@
 uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
 			   struct radeon_ring *ring)
 {
-	return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2;
+	u32 rptr;
+
+	if (rdev->wb.enabled)
+		rptr = rdev->wb.wb[ring->rptr_offs/4];
+	else
+		rptr = RREG32(DMA_RB_RPTR);
+
+	return (rptr & 0x3fffc) >> 2;
 }
 
 /**
@@ -65,7 +72,7 @@
 uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
 			   struct radeon_ring *ring)
 {
-	return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2;
+	return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2;
 }
 
 /**
@@ -79,7 +86,7 @@
 void r600_dma_set_wptr(struct radeon_device *rdev,
 		       struct radeon_ring *ring)
 {
-	WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc);
+	WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc);
 }
 
 /**
@@ -93,7 +100,8 @@
 {
 	u32 rb_cntl = RREG32(DMA_RB_CNTL);
 
-	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
 	rb_cntl &= ~DMA_RB_ENABLE;
 	WREG32(DMA_RB_CNTL, rb_cntl);
@@ -180,7 +188,8 @@
 		return r;
 	}
 
-	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+	if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 5513d8f..e4cc9b3 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -729,8 +729,8 @@
 	return false;
 }
 
-int r600_set_thermal_temperature_range(struct radeon_device *rdev,
-				       int min_temp, int max_temp)
+static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
+					      int min_temp, int max_temp)
 {
 	int low_temp = 0 * 1000;
 	int high_temp = 255 * 1000;
@@ -777,6 +777,22 @@
 	}
 }
 
+int r600_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
+	if (rdev->irq.installed &&
+	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+		ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+		if (ret)
+			return ret;
+		rdev->irq.dpm_thermal = true;
+		radeon_irq_set(rdev);
+	}
+
+	return 0;
+}
+
 union power_info {
 	struct _ATOM_POWERPLAY_INFO info;
 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 1000bf9..07eab2b 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -213,8 +213,6 @@
 void r600_start_dpm(struct radeon_device *rdev);
 void r600_stop_dpm(struct radeon_device *rdev);
 
-int r600_set_thermal_temperature_range(struct radeon_device *rdev,
-				       int min_temp, int max_temp);
 bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor);
 
 int r600_parse_extended_power_table(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index b7d3ecb..3016fc1 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -250,7 +250,7 @@
 		 value, ~HDMI0_AUDIO_TEST_EN);
 }
 
-void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
 {
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index ebe3872..37455f6 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -701,11 +701,18 @@
 #define RLC_UCODE_DATA                                    0x3f30
 
 #define SRBM_SOFT_RESET                                   0xe60
+#       define SOFT_RESET_BIF                             (1 << 1)
 #       define SOFT_RESET_DMA                             (1 << 12)
 #       define SOFT_RESET_RLC                             (1 << 13)
 #       define SOFT_RESET_UVD                             (1 << 18)
 #       define RV770_SOFT_RESET_DMA                       (1 << 20)
 
+#define BIF_SCRATCH0                                      0x5438
+
+#define BUS_CNTL                                          0x5420
+#       define BIOS_ROM_DIS                               (1 << 1)
+#       define VGA_COHE_SPEC_TIMER_DIS                    (1 << 9)
+
 #define CP_INT_CNTL                                       0xc124
 #       define CNTX_BUSY_INT_ENABLE                       (1 << 19)
 #       define CNTX_EMPTY_INT_ENABLE                      (1 << 20)
@@ -1575,6 +1582,7 @@
 #              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
 #define	PACKET3_SURFACE_SYNC				0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_FULL_CACHE_ENA       (1 << 20) /* r7xx+ only */
 #              define PACKET3_TC_ACTION_ENA        (1 << 23)
 #              define PACKET3_VC_ACTION_ENA        (1 << 24)
 #              define PACKET3_CB_ACTION_ENA        (1 << 25)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 45e1f44..024db37 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -99,6 +99,7 @@
 extern int radeon_dpm;
 extern int radeon_aspm;
 extern int radeon_runtime_pm;
+extern int radeon_hard_reset;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -134,11 +135,17 @@
 /* R600+ */
 #define R600_RING_TYPE_UVD_INDEX	5
 
+/* number of hw syncs before falling back on blocking */
+#define RADEON_NUM_SYNCS			4
+
 /* hardcode those limit for now */
 #define RADEON_VA_IB_OFFSET			(1 << 20)
 #define RADEON_VA_RESERVED_SIZE			(8 << 20)
 #define RADEON_IB_VM_MAX_SIZE			(64 << 10)
 
+/* hard reset data */
+#define RADEON_ASIC_RESET_DATA                  0x39d5e86b
+
 /* reset flags */
 #define RADEON_RESET_GFX			(1 << 0)
 #define RADEON_RESET_COMPUTE			(1 << 1)
@@ -252,6 +259,7 @@
  * Power management
  */
 int radeon_pm_init(struct radeon_device *rdev);
+int radeon_pm_late_init(struct radeon_device *rdev);
 void radeon_pm_fini(struct radeon_device *rdev);
 void radeon_pm_compute_clocks(struct radeon_device *rdev);
 void radeon_pm_suspend(struct radeon_device *rdev);
@@ -413,6 +421,11 @@
 	struct ttm_bo_device		bdev;
 	bool				mem_global_referenced;
 	bool				initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry			*vram;
+	struct dentry			*gtt;
+#endif
 };
 
 /* bo virtual address in a specific vm */
@@ -544,7 +557,6 @@
 /*
  * Semaphores.
  */
-/* everything here is constant */
 struct radeon_semaphore {
 	struct radeon_sa_bo		*sa_bo;
 	signed				waiters;
@@ -779,13 +791,11 @@
 	volatile uint32_t	*ring;
 	unsigned		rptr;
 	unsigned		rptr_offs;
-	unsigned		rptr_reg;
 	unsigned		rptr_save_reg;
 	u64			next_rptr_gpu_addr;
 	volatile u32		*next_rptr_cpu_addr;
 	unsigned		wptr;
 	unsigned		wptr_old;
-	unsigned		wptr_reg;
 	unsigned		ring_size;
 	unsigned		ring_free_dw;
 	int			count_dw;
@@ -859,6 +869,8 @@
 	struct radeon_fence		*fence;
 	/* last flush or NULL if we still need to flush */
 	struct radeon_fence		*last_flush;
+	/* last use of vmid */
+	struct radeon_fence		*last_id_use;
 };
 
 struct radeon_vm_manager {
@@ -949,7 +961,7 @@
 int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
 			unsigned size, uint32_t *data);
 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
-		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop);
+		     unsigned rptr_offs, u32 nop);
 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
 
 
@@ -1775,6 +1787,7 @@
 		int (*init)(struct radeon_device *rdev);
 		void (*setup_asic)(struct radeon_device *rdev);
 		int (*enable)(struct radeon_device *rdev);
+		int (*late_enable)(struct radeon_device *rdev);
 		void (*disable)(struct radeon_device *rdev);
 		int (*pre_set_power_state)(struct radeon_device *rdev);
 		int (*set_power_state)(struct radeon_device *rdev);
@@ -2650,6 +2663,7 @@
 #define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
 #define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
 #define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
+#define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev))
 #define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
 #define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev))
 #define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev))
@@ -2668,6 +2682,7 @@
 /* Common functions */
 /* AGP */
 extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void radeon_pci_config_reset(struct radeon_device *rdev);
 extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
 extern void radeon_agp_disable(struct radeon_device *rdev);
 extern int radeon_modeset_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index c0425bb..dda02bf 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -182,9 +182,9 @@
 	.ring_test = &r100_ring_test,
 	.ib_test = &r100_ib_test,
 	.is_lockup = &r100_gpu_is_lockup,
-	.get_rptr = &radeon_ring_generic_get_rptr,
-	.get_wptr = &radeon_ring_generic_get_wptr,
-	.set_wptr = &radeon_ring_generic_set_wptr,
+	.get_rptr = &r100_gfx_get_rptr,
+	.get_wptr = &r100_gfx_get_wptr,
+	.set_wptr = &r100_gfx_set_wptr,
 };
 
 static struct radeon_asic r100_asic = {
@@ -330,9 +330,9 @@
 	.ring_test = &r100_ring_test,
 	.ib_test = &r100_ib_test,
 	.is_lockup = &r100_gpu_is_lockup,
-	.get_rptr = &radeon_ring_generic_get_rptr,
-	.get_wptr = &radeon_ring_generic_get_wptr,
-	.set_wptr = &radeon_ring_generic_set_wptr,
+	.get_rptr = &r100_gfx_get_rptr,
+	.get_wptr = &r100_gfx_get_wptr,
+	.set_wptr = &r100_gfx_set_wptr,
 };
 
 static struct radeon_asic r300_asic = {
@@ -883,9 +883,9 @@
 	.ring_test = &r600_ring_test,
 	.ib_test = &r600_ib_test,
 	.is_lockup = &r600_gfx_is_lockup,
-	.get_rptr = &radeon_ring_generic_get_rptr,
-	.get_wptr = &radeon_ring_generic_get_wptr,
-	.set_wptr = &radeon_ring_generic_set_wptr,
+	.get_rptr = &r600_gfx_get_rptr,
+	.get_wptr = &r600_gfx_get_wptr,
+	.set_wptr = &r600_gfx_set_wptr,
 };
 
 static struct radeon_asic_ring r600_dma_ring = {
@@ -1045,6 +1045,7 @@
 		.init = &rv6xx_dpm_init,
 		.setup_asic = &rv6xx_setup_asic,
 		.enable = &rv6xx_dpm_enable,
+		.late_enable = &r600_dpm_late_enable,
 		.disable = &rv6xx_dpm_disable,
 		.pre_set_power_state = &r600_dpm_pre_set_power_state,
 		.set_power_state = &rv6xx_dpm_set_power_state,
@@ -1135,6 +1136,7 @@
 		.init = &rs780_dpm_init,
 		.setup_asic = &rs780_dpm_setup_asic,
 		.enable = &rs780_dpm_enable,
+		.late_enable = &r600_dpm_late_enable,
 		.disable = &rs780_dpm_disable,
 		.pre_set_power_state = &r600_dpm_pre_set_power_state,
 		.set_power_state = &rs780_dpm_set_power_state,
@@ -1239,6 +1241,7 @@
 		.init = &rv770_dpm_init,
 		.setup_asic = &rv770_dpm_setup_asic,
 		.enable = &rv770_dpm_enable,
+		.late_enable = &rv770_dpm_late_enable,
 		.disable = &rv770_dpm_disable,
 		.pre_set_power_state = &r600_dpm_pre_set_power_state,
 		.set_power_state = &rv770_dpm_set_power_state,
@@ -1267,9 +1270,9 @@
 	.ring_test = &r600_ring_test,
 	.ib_test = &r600_ib_test,
 	.is_lockup = &evergreen_gfx_is_lockup,
-	.get_rptr = &radeon_ring_generic_get_rptr,
-	.get_wptr = &radeon_ring_generic_get_wptr,
-	.set_wptr = &radeon_ring_generic_set_wptr,
+	.get_rptr = &r600_gfx_get_rptr,
+	.get_wptr = &r600_gfx_get_wptr,
+	.set_wptr = &r600_gfx_set_wptr,
 };
 
 static struct radeon_asic_ring evergreen_dma_ring = {
@@ -1357,6 +1360,7 @@
 		.init = &cypress_dpm_init,
 		.setup_asic = &cypress_dpm_setup_asic,
 		.enable = &cypress_dpm_enable,
+		.late_enable = &rv770_dpm_late_enable,
 		.disable = &cypress_dpm_disable,
 		.pre_set_power_state = &r600_dpm_pre_set_power_state,
 		.set_power_state = &cypress_dpm_set_power_state,
@@ -1449,6 +1453,7 @@
 		.init = &sumo_dpm_init,
 		.setup_asic = &sumo_dpm_setup_asic,
 		.enable = &sumo_dpm_enable,
+		.late_enable = &sumo_dpm_late_enable,
 		.disable = &sumo_dpm_disable,
 		.pre_set_power_state = &sumo_dpm_pre_set_power_state,
 		.set_power_state = &sumo_dpm_set_power_state,
@@ -1540,6 +1545,7 @@
 		.init = &btc_dpm_init,
 		.setup_asic = &btc_dpm_setup_asic,
 		.enable = &btc_dpm_enable,
+		.late_enable = &rv770_dpm_late_enable,
 		.disable = &btc_dpm_disable,
 		.pre_set_power_state = &btc_dpm_pre_set_power_state,
 		.set_power_state = &btc_dpm_set_power_state,
@@ -1549,7 +1555,7 @@
 		.get_sclk = &btc_dpm_get_sclk,
 		.get_mclk = &btc_dpm_get_mclk,
 		.print_power_state = &rv770_dpm_print_power_state,
-		.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
+		.debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
 		.force_performance_level = &rv770_dpm_force_performance_level,
 		.vblank_too_short = &btc_dpm_vblank_too_short,
 	},
@@ -1570,9 +1576,9 @@
 	.ib_test = &r600_ib_test,
 	.is_lockup = &cayman_gfx_is_lockup,
 	.vm_flush = &cayman_vm_flush,
-	.get_rptr = &radeon_ring_generic_get_rptr,
-	.get_wptr = &radeon_ring_generic_get_wptr,
-	.set_wptr = &radeon_ring_generic_set_wptr,
+	.get_rptr = &cayman_gfx_get_rptr,
+	.get_wptr = &cayman_gfx_get_wptr,
+	.set_wptr = &cayman_gfx_set_wptr,
 };
 
 static struct radeon_asic_ring cayman_dma_ring = {
@@ -1585,9 +1591,9 @@
 	.ib_test = &r600_dma_ib_test,
 	.is_lockup = &cayman_dma_is_lockup,
 	.vm_flush = &cayman_dma_vm_flush,
-	.get_rptr = &r600_dma_get_rptr,
-	.get_wptr = &r600_dma_get_wptr,
-	.set_wptr = &r600_dma_set_wptr
+	.get_rptr = &cayman_dma_get_rptr,
+	.get_wptr = &cayman_dma_get_wptr,
+	.set_wptr = &cayman_dma_set_wptr
 };
 
 static struct radeon_asic_ring cayman_uvd_ring = {
@@ -1683,6 +1689,7 @@
 		.init = &ni_dpm_init,
 		.setup_asic = &ni_dpm_setup_asic,
 		.enable = &ni_dpm_enable,
+		.late_enable = &rv770_dpm_late_enable,
 		.disable = &ni_dpm_disable,
 		.pre_set_power_state = &ni_dpm_pre_set_power_state,
 		.set_power_state = &ni_dpm_set_power_state,
@@ -1783,6 +1790,7 @@
 		.init = &trinity_dpm_init,
 		.setup_asic = &trinity_dpm_setup_asic,
 		.enable = &trinity_dpm_enable,
+		.late_enable = &trinity_dpm_late_enable,
 		.disable = &trinity_dpm_disable,
 		.pre_set_power_state = &trinity_dpm_pre_set_power_state,
 		.set_power_state = &trinity_dpm_set_power_state,
@@ -1813,9 +1821,9 @@
 	.ib_test = &r600_ib_test,
 	.is_lockup = &si_gfx_is_lockup,
 	.vm_flush = &si_vm_flush,
-	.get_rptr = &radeon_ring_generic_get_rptr,
-	.get_wptr = &radeon_ring_generic_get_wptr,
-	.set_wptr = &radeon_ring_generic_set_wptr,
+	.get_rptr = &cayman_gfx_get_rptr,
+	.get_wptr = &cayman_gfx_get_wptr,
+	.set_wptr = &cayman_gfx_set_wptr,
 };
 
 static struct radeon_asic_ring si_dma_ring = {
@@ -1828,9 +1836,9 @@
 	.ib_test = &r600_dma_ib_test,
 	.is_lockup = &si_dma_is_lockup,
 	.vm_flush = &si_dma_vm_flush,
-	.get_rptr = &r600_dma_get_rptr,
-	.get_wptr = &r600_dma_get_wptr,
-	.set_wptr = &r600_dma_set_wptr,
+	.get_rptr = &cayman_dma_get_rptr,
+	.get_wptr = &cayman_dma_get_wptr,
+	.set_wptr = &cayman_dma_set_wptr,
 };
 
 static struct radeon_asic si_asic = {
@@ -1913,6 +1921,7 @@
 		.init = &si_dpm_init,
 		.setup_asic = &si_dpm_setup_asic,
 		.enable = &si_dpm_enable,
+		.late_enable = &si_dpm_late_enable,
 		.disable = &si_dpm_disable,
 		.pre_set_power_state = &si_dpm_pre_set_power_state,
 		.set_power_state = &si_dpm_set_power_state,
@@ -1943,9 +1952,9 @@
 	.ib_test = &cik_ib_test,
 	.is_lockup = &cik_gfx_is_lockup,
 	.vm_flush = &cik_vm_flush,
-	.get_rptr = &radeon_ring_generic_get_rptr,
-	.get_wptr = &radeon_ring_generic_get_wptr,
-	.set_wptr = &radeon_ring_generic_set_wptr,
+	.get_rptr = &cik_gfx_get_rptr,
+	.get_wptr = &cik_gfx_get_wptr,
+	.set_wptr = &cik_gfx_set_wptr,
 };
 
 static struct radeon_asic_ring ci_cp_ring = {
@@ -1958,9 +1967,9 @@
 	.ib_test = &cik_ib_test,
 	.is_lockup = &cik_gfx_is_lockup,
 	.vm_flush = &cik_vm_flush,
-	.get_rptr = &cik_compute_ring_get_rptr,
-	.get_wptr = &cik_compute_ring_get_wptr,
-	.set_wptr = &cik_compute_ring_set_wptr,
+	.get_rptr = &cik_compute_get_rptr,
+	.get_wptr = &cik_compute_get_wptr,
+	.set_wptr = &cik_compute_set_wptr,
 };
 
 static struct radeon_asic_ring ci_dma_ring = {
@@ -1973,9 +1982,9 @@
 	.ib_test = &cik_sdma_ib_test,
 	.is_lockup = &cik_sdma_is_lockup,
 	.vm_flush = &cik_dma_vm_flush,
-	.get_rptr = &r600_dma_get_rptr,
-	.get_wptr = &r600_dma_get_wptr,
-	.set_wptr = &r600_dma_set_wptr,
+	.get_rptr = &cik_sdma_get_rptr,
+	.get_wptr = &cik_sdma_get_wptr,
+	.set_wptr = &cik_sdma_set_wptr,
 };
 
 static struct radeon_asic ci_asic = {
@@ -2058,6 +2067,7 @@
 		.init = &ci_dpm_init,
 		.setup_asic = &ci_dpm_setup_asic,
 		.enable = &ci_dpm_enable,
+		.late_enable = &ci_dpm_late_enable,
 		.disable = &ci_dpm_disable,
 		.pre_set_power_state = &ci_dpm_pre_set_power_state,
 		.set_power_state = &ci_dpm_set_power_state,
@@ -2159,6 +2169,7 @@
 		.init = &kv_dpm_init,
 		.setup_asic = &kv_dpm_setup_asic,
 		.enable = &kv_dpm_enable,
+		.late_enable = &kv_dpm_late_enable,
 		.disable = &kv_dpm_disable,
 		.pre_set_power_state = &kv_dpm_pre_set_power_state,
 		.set_power_state = &kv_dpm_set_power_state,
@@ -2449,7 +2460,7 @@
 			rdev->cg_flags =
 				RADEON_CG_SUPPORT_GFX_MGCG |
 				RADEON_CG_SUPPORT_GFX_MGLS |
-				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGCG |
 				RADEON_CG_SUPPORT_GFX_CGLS |
 				RADEON_CG_SUPPORT_GFX_CGTS |
 				RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2468,7 +2479,7 @@
 			rdev->cg_flags =
 				RADEON_CG_SUPPORT_GFX_MGCG |
 				RADEON_CG_SUPPORT_GFX_MGLS |
-				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGCG |
 				RADEON_CG_SUPPORT_GFX_CGLS |
 				RADEON_CG_SUPPORT_GFX_CGTS |
 				RADEON_CG_SUPPORT_GFX_CP_LS |
@@ -2493,7 +2504,7 @@
 			rdev->cg_flags =
 				RADEON_CG_SUPPORT_GFX_MGCG |
 				RADEON_CG_SUPPORT_GFX_MGLS |
-				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGCG |
 				RADEON_CG_SUPPORT_GFX_CGLS |
 				RADEON_CG_SUPPORT_GFX_CGTS |
 				RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2521,7 +2532,7 @@
 			rdev->cg_flags =
 				RADEON_CG_SUPPORT_GFX_MGCG |
 				RADEON_CG_SUPPORT_GFX_MGLS |
-				/*RADEON_CG_SUPPORT_GFX_CGCG |*/
+				RADEON_CG_SUPPORT_GFX_CGCG |
 				RADEON_CG_SUPPORT_GFX_CGLS |
 				RADEON_CG_SUPPORT_GFX_CGTS |
 				RADEON_CG_SUPPORT_GFX_CGTS_LS |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c9fd97b..ae637cf 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -47,13 +47,6 @@
 void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
 u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
 
-u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
-				 struct radeon_ring *ring);
-u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
-				 struct radeon_ring *ring);
-void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
-				  struct radeon_ring *ring);
-
 /*
  * r100,rv100,rs100,rv200,rs200
  */
@@ -148,6 +141,13 @@
 extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
 extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
 
+u32 r100_gfx_get_rptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+u32 r100_gfx_get_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+void r100_gfx_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring);
+
 /*
  * r200,rv250,rs300,rv280
  */
@@ -368,6 +368,12 @@
 int r600_pcie_gart_init(struct radeon_device *rdev);
 void r600_scratch_init(struct radeon_device *rdev);
 int r600_init_microcode(struct radeon_device *rdev);
+u32 r600_gfx_get_rptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+u32 r600_gfx_get_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+void r600_gfx_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring);
 /* r600 irq */
 int r600_irq_process(struct radeon_device *rdev);
 int r600_irq_init(struct radeon_device *rdev);
@@ -392,6 +398,7 @@
 int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
 int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
 void r600_dpm_post_set_power_state(struct radeon_device *rdev);
+int r600_dpm_late_enable(struct radeon_device *rdev);
 /* r600 dma */
 uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
 			   struct radeon_ring *ring);
@@ -454,6 +461,7 @@
 /* rv7xx pm */
 int rv770_dpm_init(struct radeon_device *rdev);
 int rv770_dpm_enable(struct radeon_device *rdev);
+int rv770_dpm_late_enable(struct radeon_device *rdev);
 void rv770_dpm_disable(struct radeon_device *rdev);
 int rv770_dpm_set_power_state(struct radeon_device *rdev);
 void rv770_dpm_setup_asic(struct radeon_device *rdev);
@@ -543,8 +551,11 @@
 u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
 u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
 bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+						     struct seq_file *m);
 int sumo_dpm_init(struct radeon_device *rdev);
 int sumo_dpm_enable(struct radeon_device *rdev);
+int sumo_dpm_late_enable(struct radeon_device *rdev);
 void sumo_dpm_disable(struct radeon_device *rdev);
 int sumo_dpm_pre_set_power_state(struct radeon_device *rdev);
 int sumo_dpm_set_power_state(struct radeon_device *rdev);
@@ -591,6 +602,19 @@
 
 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
+u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
+			struct radeon_ring *ring);
+u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
+			struct radeon_ring *ring);
+void cayman_gfx_set_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring);
+uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
+			     struct radeon_ring *ring);
+uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
+			     struct radeon_ring *ring);
+void cayman_dma_set_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring);
+
 int ni_dpm_init(struct radeon_device *rdev);
 void ni_dpm_setup_asic(struct radeon_device *rdev);
 int ni_dpm_enable(struct radeon_device *rdev);
@@ -610,6 +634,7 @@
 bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
 int trinity_dpm_init(struct radeon_device *rdev);
 int trinity_dpm_enable(struct radeon_device *rdev);
+int trinity_dpm_late_enable(struct radeon_device *rdev);
 void trinity_dpm_disable(struct radeon_device *rdev);
 int trinity_dpm_pre_set_power_state(struct radeon_device *rdev);
 int trinity_dpm_set_power_state(struct radeon_device *rdev);
@@ -669,6 +694,7 @@
 int si_dpm_init(struct radeon_device *rdev);
 void si_dpm_setup_asic(struct radeon_device *rdev);
 int si_dpm_enable(struct radeon_device *rdev);
+int si_dpm_late_enable(struct radeon_device *rdev);
 void si_dpm_disable(struct radeon_device *rdev);
 int si_dpm_pre_set_power_state(struct radeon_device *rdev);
 int si_dpm_set_power_state(struct radeon_device *rdev);
@@ -739,17 +765,30 @@
 			  uint32_t incr, uint32_t flags);
 void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
-u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
-			      struct radeon_ring *ring);
-u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
-			      struct radeon_ring *ring);
-void cik_compute_ring_set_wptr(struct radeon_device *rdev,
-			       struct radeon_ring *ring);
+u32 cik_gfx_get_rptr(struct radeon_device *rdev,
+		     struct radeon_ring *ring);
+u32 cik_gfx_get_wptr(struct radeon_device *rdev,
+		     struct radeon_ring *ring);
+void cik_gfx_set_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+u32 cik_compute_get_rptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring);
+u32 cik_compute_get_wptr(struct radeon_device *rdev,
+			 struct radeon_ring *ring);
+void cik_compute_set_wptr(struct radeon_device *rdev,
+			  struct radeon_ring *ring);
+u32 cik_sdma_get_rptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+u32 cik_sdma_get_wptr(struct radeon_device *rdev,
+		      struct radeon_ring *ring);
+void cik_sdma_set_wptr(struct radeon_device *rdev,
+		       struct radeon_ring *ring);
 int ci_get_temp(struct radeon_device *rdev);
 int kv_get_temp(struct radeon_device *rdev);
 
 int ci_dpm_init(struct radeon_device *rdev);
 int ci_dpm_enable(struct radeon_device *rdev);
+int ci_dpm_late_enable(struct radeon_device *rdev);
 void ci_dpm_disable(struct radeon_device *rdev);
 int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
 int ci_dpm_set_power_state(struct radeon_device *rdev);
@@ -770,6 +809,7 @@
 
 int kv_dpm_init(struct radeon_device *rdev);
 int kv_dpm_enable(struct radeon_device *rdev);
+int kv_dpm_late_enable(struct radeon_device *rdev);
 void kv_dpm_disable(struct radeon_device *rdev);
 int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
 int kv_dpm_set_power_state(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5c39bf7..3084481 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -30,27 +30,10 @@
 #include "atom.h"
 #include "atom-bits.h"
 
-/* from radeon_encoder.c */
-extern uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
-			uint8_t dac);
-extern void radeon_link_encoder_connector(struct drm_device *dev);
 extern void
 radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
 			uint32_t supported_device, u16 caps);
 
-/* from radeon_connector.c */
-extern void
-radeon_add_atom_connector(struct drm_device *dev,
-			  uint32_t connector_id,
-			  uint32_t supported_device,
-			  int connector_type,
-			  struct radeon_i2c_bus_rec *i2c_bus,
-			  uint32_t igp_lane_info,
-			  uint16_t connector_object_id,
-			  struct radeon_hpd *hpd,
-			  struct radeon_router *router);
-
 /* from radeon_legacy_encoder.c */
 extern void
 radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
@@ -1528,6 +1511,7 @@
 						le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
 					ss->type = ss_assign->v1.ucSpreadSpectrumMode;
 					ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
+					ss->percentage_divider = 100;
 					return true;
 				}
 				ss_assign = (union asic_ss_assignment *)
@@ -1545,6 +1529,7 @@
 						le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
 					ss->type = ss_assign->v2.ucSpreadSpectrumMode;
 					ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
+					ss->percentage_divider = 100;
 					if ((crev == 2) &&
 					    ((id == ASIC_INTERNAL_ENGINE_SS) ||
 					     (id == ASIC_INTERNAL_MEMORY_SS)))
@@ -1566,6 +1551,11 @@
 						le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
 					ss->type = ss_assign->v3.ucSpreadSpectrumMode;
 					ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
+					if (ss_assign->v3.ucSpreadSpectrumMode &
+					    SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
+						ss->percentage_divider = 1000;
+					else
+						ss->percentage_divider = 100;
 					if ((id == ASIC_INTERNAL_ENGINE_SS) ||
 					    (id == ASIC_INTERNAL_MEMORY_SS))
 						ss->rate /= 100;
@@ -1809,7 +1799,8 @@
 		if (misc & ATOM_DOUBLE_CLOCK_MODE)
 			mode->flags |= DRM_MODE_FLAG_DBLSCAN;
 
-		mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
+		mode->crtc_clock = mode->clock =
+			le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
 
 		if (index == 1) {
 			/* PAL timings appear to have wrong values for totals */
@@ -1852,7 +1843,8 @@
 		if (misc & ATOM_DOUBLE_CLOCK_MODE)
 			mode->flags |= DRM_MODE_FLAG_DBLSCAN;
 
-		mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
+		mode->crtc_clock = mode->clock =
+			le16_to_cpu(dtd_timings->usPixClk) * 10;
 		break;
 	}
 	return true;
@@ -3884,16 +3876,18 @@
 							((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
 					}
 					reg_table->last = i;
-					while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) &&
+					while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
 					       (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
-						t_mem_id = (u8)((*(u32 *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
+						t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
+								>> MEM_ID_SHIFT);
 						if (module_index == t_mem_id) {
 							reg_table->mc_reg_table_entry[num_ranges].mclk_max =
-								(u32)((*(u32 *)reg_data & CLOCK_RANGE_MASK) >> CLOCK_RANGE_SHIFT);
+								(u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
+								      >> CLOCK_RANGE_SHIFT);
 							for (i = 0, j = 1; i < reg_table->last; i++) {
 								if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
 									reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
-										(u32)*((u32 *)reg_data + j);
+										(u32)le32_to_cpu(*((u32 *)reg_data + j));
 									j++;
 								} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
 									reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
@@ -3905,7 +3899,7 @@
 						reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
 							((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
 					}
-					if (*(u32 *)reg_data != END_OF_REG_DATA_BLOCK)
+					if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
 						return -EINVAL;
 					reg_table->num_entries = num_ranges;
 				} else
@@ -3944,6 +3938,10 @@
 	/* tell the bios not to handle mode switching */
 	bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
 
+	/* clear the vbios dpms state */
+	if (ASIC_IS_DCE4(rdev))
+		bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
+
 	if (rdev->family >= CHIP_R600) {
 		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
 		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 68ce360..6651177 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -37,22 +37,6 @@
 #include <asm/pci-bridge.h>
 #endif /* CONFIG_PPC_PMAC */
 
-/* from radeon_encoder.c */
-extern uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
-			uint8_t dac);
-extern void radeon_link_encoder_connector(struct drm_device *dev);
-
-/* from radeon_connector.c */
-extern void
-radeon_add_legacy_connector(struct drm_device *dev,
-			    uint32_t connector_id,
-			    uint32_t supported_device,
-			    int connector_type,
-			    struct radeon_i2c_bus_rec *i2c_bus,
-			    uint16_t connector_object_id,
-			    struct radeon_hpd *hpd);
-
 /* from radeon_legacy_encoder.c */
 extern void
 radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 20a768a..82d4f86 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -33,15 +33,6 @@
 
 #include <linux/pm_runtime.h>
 
-extern void
-radeon_combios_connected_scratch_regs(struct drm_connector *connector,
-				      struct drm_encoder *encoder,
-				      bool connected);
-extern void
-radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
-				       struct drm_encoder *encoder,
-				       bool connected);
-
 void radeon_connector_hotplug(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 3cae2bb..bb0d5c3 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2020,10 +2020,10 @@
 
 		buf->file_priv = file_priv;
 
-		if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+		if (copy_to_user(&d->request_indices[i], &buf->idx,
 				     sizeof(buf->idx)))
 			return -EFAULT;
-		if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+		if (copy_to_user(&d->request_sizes[i], &buf->total,
 				     sizeof(buf->total)))
 			return -EFAULT;
 
@@ -2228,7 +2228,7 @@
 
 	dev_priv->ring.tail &= dev_priv->ring.tail_mask;
 
-	DRM_MEMORYBARRIER();
+	mb();
 	GET_RING_HEAD( dev_priv );
 
 	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 0b36616..dfb5a1d 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -138,7 +138,7 @@
 				p->ring = R600_RING_TYPE_DMA_INDEX;
 			else
 				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
-		} else if (p->rdev->family >= CHIP_R600) {
+		} else if (p->rdev->family >= CHIP_RV770) {
 			p->ring = R600_RING_TYPE_DMA_INDEX;
 		} else {
 			return -EINVAL;
@@ -192,7 +192,7 @@
 		return -ENOMEM;
 	}
 	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
-	if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
+	if (copy_from_user(p->chunks_array, chunk_array_ptr,
 			       sizeof(uint64_t)*cs->num_chunks)) {
 		return -EFAULT;
 	}
@@ -208,7 +208,7 @@
 		uint32_t __user *cdata;
 
 		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
-		if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
+		if (copy_from_user(&user_chunk, chunk_ptr,
 				       sizeof(struct drm_radeon_cs_chunk))) {
 			return -EFAULT;
 		}
@@ -252,7 +252,7 @@
 		if (p->chunks[i].kdata == NULL) {
 			return -ENOMEM;
 		}
-		if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
+		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 			return -EFAULT;
 		}
 		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
@@ -472,7 +472,7 @@
 			}
 			parser->const_ib.is_const_ib = true;
 			parser->const_ib.length_dw = ib_chunk->length_dw;
-			if (DRM_COPY_FROM_USER(parser->const_ib.ptr,
+			if (copy_from_user(parser->const_ib.ptr,
 					       ib_chunk->user_ptr,
 					       ib_chunk->length_dw * 4))
 				return -EFAULT;
@@ -495,7 +495,7 @@
 	parser->ib.length_dw = ib_chunk->length_dw;
 	if (ib_chunk->kdata)
 		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
-	else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
+	else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
 		return -EFAULT;
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 39b033b..b012cbb 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -144,6 +144,11 @@
 	}
 }
 
+void radeon_pci_config_reset(struct radeon_device *rdev)
+{
+	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
+}
+
 /**
  * radeon_surface_init - Clear GPU surface registers.
  *
@@ -249,7 +254,7 @@
  * Init doorbell driver information (CIK)
  * Returns 0 on success, error on failure.
  */
-int radeon_doorbell_init(struct radeon_device *rdev)
+static int radeon_doorbell_init(struct radeon_device *rdev)
 {
 	/* doorbell bar mapping */
 	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
@@ -278,7 +283,7 @@
  *
  * Tear down doorbell driver information (CIK)
  */
-void radeon_doorbell_fini(struct radeon_device *rdev)
+static void radeon_doorbell_fini(struct radeon_device *rdev)
 {
 	iounmap(rdev->doorbell.ptr);
 	rdev->doorbell.ptr = NULL;
@@ -1330,6 +1335,7 @@
 		if (r)
 			return r;
 	}
+
 	if ((radeon_testing & 1)) {
 		if (rdev->accel_working)
 			radeon_test_moves(rdev);
@@ -1455,7 +1461,6 @@
 
 	radeon_save_bios_scratch_regs(rdev);
 
-	radeon_pm_suspend(rdev);
 	radeon_suspend(rdev);
 	radeon_hpd_fini(rdev);
 	/* evict remaining vram memory */
@@ -1516,14 +1521,22 @@
 	if (r)
 		DRM_ERROR("ib ring test failed (%d).\n", r);
 
-	radeon_pm_resume(rdev);
+	if (rdev->pm.dpm_enabled) {
+		/* do dpm late init */
+		r = radeon_pm_late_init(rdev);
+		if (r) {
+			rdev->pm.dpm_enabled = false;
+			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+		}
+	}
+
 	radeon_restore_bios_scratch_regs(rdev);
 
 	if (fbcon) {
 		radeon_fbdev_set_suspend(rdev, 0);
 		console_unlock();
 	}
-       
+
 	/* init dig PHYs, disp eng pll */
 	if (rdev->is_atom_bios) {
 		radeon_atom_encoder_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7b25381..fbd8b93 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -306,7 +306,7 @@
 	 * to complete in this vblank?
 	 */
 	if (update_pending &&
-	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0,
 							       &vpos, &hpos, NULL, NULL)) &&
 	    ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
 	     (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
@@ -571,6 +571,8 @@
 		radeon_crtc->max_cursor_width = CURSOR_WIDTH;
 		radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
 	}
+	dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
+	dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
 
 #if 0
 	radeon_crtc->mode_set.crtc = &radeon_crtc->base;
@@ -1464,12 +1466,22 @@
 	/* setup afmt */
 	radeon_afmt_init(rdev);
 
-	/* Initialize power management */
-	radeon_pm_init(rdev);
-
 	radeon_fbdev_init(rdev);
 	drm_kms_helper_poll_init(rdev->ddev);
 
+	if (rdev->pm.dpm_enabled) {
+		/* do dpm late init */
+		ret = radeon_pm_late_init(rdev);
+		if (ret) {
+			rdev->pm.dpm_enabled = false;
+			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+		}
+		/* set the dpm state for PX since there won't be
+		 * a modeset to call this.
+		 */
+		radeon_pm_compute_clocks(rdev);
+	}
+
 	return 0;
 }
 
@@ -1477,7 +1489,6 @@
 {
 	radeon_fbdev_fini(rdev);
 	kfree(rdev->mode_info.bios_hardcoded_edid);
-	radeon_pm_fini(rdev);
 
 	if (rdev->mode_info.mode_config_initialized) {
 		radeon_afmt_fini(rdev);
@@ -1601,6 +1612,7 @@
  *
  * \param dev Device to query.
  * \param crtc Crtc to query.
+ * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *hpos Location where horizontal scanout position should go.
  * \param *stime Target location for timestamp taken immediately before
@@ -1622,8 +1634,8 @@
  * unknown small number of scanlines wrt. real scanout position.
  *
  */
-int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos,
-			       ktime_t *stime, ktime_t *etime)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
+			       int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
 {
 	u32 stat_crtc = 0, vbl = 0, position = 0;
 	int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1765,5 +1777,27 @@
 	if (in_vbl)
 		ret |= DRM_SCANOUTPOS_INVBL;
 
+	/* Is vpos outside nominal vblank area, but less than
+	 * 1/100 of a frame height away from start of vblank?
+	 * If so, assume this isn't a massively delayed vblank
+	 * interrupt, but a vblank interrupt that fired a few
+	 * microseconds before true start of vblank. Compensate
+	 * by adding a full frame duration to the final timestamp.
+	 * Happens, e.g., on ATI R500, R600.
+	 *
+	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
+	 */
+	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
+		vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
+		vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+
+		if (vbl_start - *vpos < vtotal / 100) {
+			*vpos -= vtotal;
+
+			/* Signal this correction as "applied". */
+			ret |= 0x8;
+		}
+	}
+
 	return ret;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index db39ea3..84a1bbb7 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -78,9 +78,10 @@
  *   2.34.0 - Add CIK tiling mode array query
  *   2.35.0 - Add CIK macrotile mode array query
  *   2.36.0 - Fix CIK DCE tiling setup
+ *   2.37.0 - allow GS ring setup on r6xx/r7xx
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	36
+#define KMS_DRIVER_MINOR	37
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -102,13 +103,14 @@
 void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
-irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
+irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
 void radeon_gem_object_free(struct drm_gem_object *obj);
 int radeon_gem_object_open(struct drm_gem_object *obj,
 				struct drm_file *file_priv);
 void radeon_gem_object_close(struct drm_gem_object *obj,
 				struct drm_file *file_priv);
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+				      unsigned int flags,
 				      int *vpos, int *hpos, ktime_t *stime,
 				      ktime_t *etime);
 extern const struct drm_ioctl_desc radeon_ioctls_kms[];
@@ -168,6 +170,7 @@
 int radeon_dpm = -1;
 int radeon_aspm = -1;
 int radeon_runtime_pm = -1;
+int radeon_hard_reset = 0;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -232,6 +235,9 @@
 MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
 module_param_named(runpm, radeon_runtime_pm, int, 0444);
 
+MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
+module_param_named(hard_reset, radeon_hard_reset, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
 	radeon_PCI_IDS
 };
@@ -400,6 +406,9 @@
 	if (radeon_runtime_pm == 0)
 		return -EINVAL;
 
+	if (radeon_runtime_pm == -1 && !radeon_is_px())
+		return -EINVAL;
+
 	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 	drm_kms_helper_poll_disable(drm_dev);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
@@ -422,6 +431,9 @@
 	if (radeon_runtime_pm == 0)
 		return -EINVAL;
 
+	if (radeon_runtime_pm == -1 && !radeon_is_px())
+		return -EINVAL;
+
 	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
 	pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 00e0d44..dafd812 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -405,7 +405,7 @@
 extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
 extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
-extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t radeon_driver_irq_handler(int irq, void *arg);
 extern void radeon_driver_irq_preinstall(struct drm_device * dev);
 extern int radeon_driver_irq_postinstall(struct drm_device *dev);
 extern void radeon_driver_irq_uninstall(struct drm_device * dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d3a86e4..c37cb79 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -121,7 +121,7 @@
 	(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
 	(*fence)->ring = ring;
 	radeon_fence_ring_emit(rdev, ring, *fence);
-	trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
+	trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
 	return 0;
 }
 
@@ -313,7 +313,7 @@
 				continue;
 
 			last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
-			trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
+			trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
 			radeon_irq_kms_sw_irq_get(rdev, i);
 		}
 
@@ -332,7 +332,7 @@
 				continue;
 
 			radeon_irq_kms_sw_irq_put(rdev, i);
-			trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
+			trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
 		}
 
 		if (unlikely(r < 0))
@@ -841,6 +841,8 @@
 		if (!rdev->fence_drv[i].initialized)
 			continue;
 
+		radeon_fence_process(rdev, i);
+
 		seq_printf(m, "--- ring %d ---\n", i);
 		seq_printf(m, "Last signaled fence 0x%016llx\n",
 			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 96e4400..a8f9b46 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -713,7 +713,7 @@
 	unsigned i;
 
 	/* check if the id is still valid */
-	if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
+	if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
 		return NULL;
 
 	/* we definately need to flush */
@@ -726,6 +726,7 @@
 		if (fence == NULL) {
 			/* found a free one */
 			vm->id = i;
+			trace_radeon_vm_grab_id(vm->id, ring);
 			return NULL;
 		}
 
@@ -769,6 +770,9 @@
 
 	radeon_fence_unref(&vm->fence);
 	vm->fence = radeon_fence_ref(fence);
+
+	radeon_fence_unref(&vm->last_id_use);
+	vm->last_id_use = radeon_fence_ref(fence);
 }
 
 /**
@@ -1303,6 +1307,8 @@
 {
 	vm->id = 0;
 	vm->fence = NULL;
+	vm->last_flush = NULL;
+	vm->last_id_use = NULL;
 	mutex_init(&vm->mutex);
 	INIT_LIST_HEAD(&vm->list);
 	INIT_LIST_HEAD(&vm->va);
@@ -1341,5 +1347,6 @@
 	}
 	radeon_fence_unref(&vm->fence);
 	radeon_fence_unref(&vm->last_flush);
+	radeon_fence_unref(&vm->last_id_use);
 	mutex_unlock(&vm->mutex);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 805c5e5..b96c819 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -86,7 +86,7 @@
 	return 0;
 }
 
-int radeon_gem_set_domain(struct drm_gem_object *gobj,
+static int radeon_gem_set_domain(struct drm_gem_object *gobj,
 			  uint32_t rdomain, uint32_t wdomain)
 {
 	struct radeon_bo *robj;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index fc60b74..e24ca6a 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1020,6 +1020,9 @@
 /* Add the default buses */
 void radeon_i2c_init(struct radeon_device *rdev)
 {
+	if (radeon_hw_i2c)
+		DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
+
 	if (rdev->is_atom_bios)
 		radeon_atombios_i2c_init(rdev);
 	else
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 8d68e97..244b19b 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -181,7 +181,7 @@
  * tied to dma at all, this is just a hangover from dri prehistory.
  */
 
-irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t radeon_driver_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_radeon_private_t *dev_priv =
@@ -203,7 +203,7 @@
 
 	/* SW interrupt */
 	if (stat & RADEON_SW_INT_TEST)
-		DRM_WAKEUP(&dev_priv->swi_queue);
+		wake_up(&dev_priv->swi_queue);
 
 	/* VBLANK interrupt */
 	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
@@ -249,7 +249,7 @@
 
 	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
 
-	DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
+	DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * HZ,
 		    RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
 
 	return ret;
@@ -302,7 +302,7 @@
 
 	result = radeon_emit_irq(dev);
 
-	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+	if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
 		DRM_ERROR("copy_to_user\n");
 		return -EFAULT;
 	}
@@ -354,7 +354,7 @@
 	    (drm_radeon_private_t *) dev->dev_private;
 
 	atomic_set(&dev_priv->swi_emitted, 0);
-	DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+	init_waitqueue_head(&dev_priv->swi_queue);
 
 	dev->max_vblank_count = 0x001fffff;
 
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index ec6240b..089c9ff 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -39,13 +39,13 @@
 /**
  * radeon_driver_irq_handler_kms - irq handler for KMS
  *
- * @DRM_IRQ_ARGS: args
+ * @int irq, void *arg: args
  *
  * This is the irq handler for the radeon KMS driver (all asics).
  * radeon_irq_process is a macro that points to the per-asic
  * irq handler callback.
  */
-irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
+irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 21d593c..114d167 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -191,7 +191,7 @@
  * etc. (all asics).
  * Returns 0 on success, -EINVAL on failure.
  */
-int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_radeon_info *info = data;
@@ -223,7 +223,7 @@
 			*value = rdev->accel_working;
 		break;
 	case RADEON_INFO_CRTC_FROM_ID:
-		if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
 			return -EFAULT;
 		}
@@ -269,7 +269,7 @@
 		 *
 		 * When returning, the value is 1 if filp owns hyper-z access,
 		 * 0 otherwise. */
-		if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
 			return -EFAULT;
 		}
@@ -281,7 +281,7 @@
 		break;
 	case RADEON_INFO_WANT_CMASK:
 		/* The same logic as Hyper-Z. */
-		if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
 			return -EFAULT;
 		}
@@ -417,7 +417,7 @@
 		*value = rdev->fastfb_working;
 		break;
 	case RADEON_INFO_RING_WORKING:
-		if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
 			return -EFAULT;
 		}
@@ -470,11 +470,18 @@
 			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
 		}
 		break;
+	case RADEON_INFO_MAX_SCLK:
+		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+		    rdev->pm.dpm_enabled)
+			*value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
+		else
+			*value = rdev->pm.default_sclk * 10;
+		break;
 	default:
 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
 		return -EINVAL;
 	}
-	if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) {
+	if (copy_to_user(value_ptr, (char*)value, value_size)) {
 		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
 		return -EFAULT;
 	}
@@ -712,11 +719,12 @@
 	/* Helper routine in DRM core does all the work: */
 	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
 						     vblank_time, flags,
-						     drmcrtc);
+						     drmcrtc, &drmcrtc->hwmode);
 }
 
 #define KMS_INVALID_IOCTL(name)						\
-int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
+static int name(struct drm_device *dev, void *data, struct drm_file	\
+		*file_priv)						\
 {									\
 	DRM_ERROR("invalid ioctl with kms %s\n", __func__);		\
 	return -EINVAL;							\
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index d54d2d7..146d253 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -243,7 +243,7 @@
 	if (!block)
 		return -ENOMEM;
 
-	if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+	if (copy_to_user(alloc->region_offset, &block->start,
 			     sizeof(int))) {
 		DRM_ERROR("copy_to_user\n");
 		return -EFAULT;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3f0dd66..402dbe32 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -291,6 +291,7 @@
 
 struct radeon_atom_ss {
 	uint16_t percentage;
+	uint16_t percentage_divider;
 	uint8_t type;
 	uint16_t step;
 	uint8_t delay;
@@ -624,6 +625,30 @@
 	struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
 };
 
+
+extern void
+radeon_add_atom_connector(struct drm_device *dev,
+			  uint32_t connector_id,
+			  uint32_t supported_device,
+			  int connector_type,
+			  struct radeon_i2c_bus_rec *i2c_bus,
+			  uint32_t igp_lane_info,
+			  uint16_t connector_object_id,
+			  struct radeon_hpd *hpd,
+			  struct radeon_router *router);
+extern void
+radeon_add_legacy_connector(struct drm_device *dev,
+			    uint32_t connector_id,
+			    uint32_t supported_device,
+			    int connector_type,
+			    struct radeon_i2c_bus_rec *i2c_bus,
+			    uint16_t connector_object_id,
+			    struct radeon_hpd *hpd);
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+			uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+
 extern enum radeon_tv_std
 radeon_combios_get_tv_info(struct radeon_device *rdev);
 extern enum radeon_tv_std
@@ -631,6 +656,15 @@
 extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
 						 u16 *vddc, u16 *vddci, u16 *mvdd);
 
+extern void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+				      struct drm_encoder *encoder,
+				      bool connected);
+extern void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+				       struct drm_encoder *encoder,
+				       bool connected);
+
 extern struct drm_connector *
 radeon_get_connector_for_encoder(struct drm_encoder *encoder);
 extern struct drm_connector *
@@ -666,6 +700,7 @@
 extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
 extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 				u8 write_byte, u8 *read_byte);
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
 
 extern void radeon_i2c_init(struct radeon_device *rdev);
 extern void radeon_i2c_fini(struct radeon_device *rdev);
@@ -766,6 +801,7 @@
 				   int x, int y);
 
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+				      unsigned int flags,
 				      int *vpos, int *hpos, ktime_t *stime,
 				      ktime_t *etime);
 
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index c0fa4aa9..08595cf 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,7 +46,7 @@
  * function are calling it.
  */
 
-void radeon_bo_clear_va(struct radeon_bo *bo)
+static void radeon_bo_clear_va(struct radeon_bo *bo)
 {
 	struct radeon_bo_va *bo_va, *tmp;
 
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 984097b..8e8153e 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -924,6 +924,10 @@
 
 	if (rdev->asic->dpm.powergate_uvd) {
 		mutex_lock(&rdev->pm.mutex);
+		/* don't powergate anything if we
+		   have active but pause streams */
+		enable |= rdev->pm.dpm.sd > 0;
+		enable |= rdev->pm.dpm.hd > 0;
 		/* enable/disable UVD */
 		radeon_dpm_powergate_uvd(rdev, !enable);
 		mutex_unlock(&rdev->pm.mutex);
@@ -1010,8 +1014,10 @@
 	rdev->pm.current_clock_mode_index = 0;
 	rdev->pm.current_sclk = rdev->pm.default_sclk;
 	rdev->pm.current_mclk = rdev->pm.default_mclk;
-	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
-	rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+	if (rdev->pm.power_state) {
+		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+	}
 	if (rdev->pm.pm_method == PM_METHOD_DYNPM
 	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
 		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
@@ -1032,25 +1038,27 @@
 	radeon_dpm_setup_asic(rdev);
 	ret = radeon_dpm_enable(rdev);
 	mutex_unlock(&rdev->pm.mutex);
-	if (ret) {
-		DRM_ERROR("radeon: dpm resume failed\n");
-		if ((rdev->family >= CHIP_BARTS) &&
-		    (rdev->family <= CHIP_CAYMAN) &&
-		    rdev->mc_fw) {
-			if (rdev->pm.default_vddc)
-				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
-							SET_VOLTAGE_TYPE_ASIC_VDDC);
-			if (rdev->pm.default_vddci)
-				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
-							SET_VOLTAGE_TYPE_ASIC_VDDCI);
-			if (rdev->pm.default_sclk)
-				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
-			if (rdev->pm.default_mclk)
-				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
-		}
-	} else {
-		rdev->pm.dpm_enabled = true;
-		radeon_pm_compute_clocks(rdev);
+	if (ret)
+		goto dpm_resume_fail;
+	rdev->pm.dpm_enabled = true;
+	radeon_pm_compute_clocks(rdev);
+	return;
+
+dpm_resume_fail:
+	DRM_ERROR("radeon: dpm resume failed\n");
+	if ((rdev->family >= CHIP_BARTS) &&
+	    (rdev->family <= CHIP_CAYMAN) &&
+	    rdev->mc_fw) {
+		if (rdev->pm.default_vddc)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+						SET_VOLTAGE_TYPE_ASIC_VDDC);
+		if (rdev->pm.default_vddci)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+						SET_VOLTAGE_TYPE_ASIC_VDDCI);
+		if (rdev->pm.default_sclk)
+			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+		if (rdev->pm.default_mclk)
+			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
 	}
 }
 
@@ -1170,51 +1178,50 @@
 	radeon_dpm_setup_asic(rdev);
 	ret = radeon_dpm_enable(rdev);
 	mutex_unlock(&rdev->pm.mutex);
-	if (ret) {
-		rdev->pm.dpm_enabled = false;
-		if ((rdev->family >= CHIP_BARTS) &&
-		    (rdev->family <= CHIP_CAYMAN) &&
-		    rdev->mc_fw) {
-			if (rdev->pm.default_vddc)
-				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
-							SET_VOLTAGE_TYPE_ASIC_VDDC);
-			if (rdev->pm.default_vddci)
-				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
-							SET_VOLTAGE_TYPE_ASIC_VDDCI);
-			if (rdev->pm.default_sclk)
-				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
-			if (rdev->pm.default_mclk)
-				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
-		}
-		DRM_ERROR("radeon: dpm initialization failed\n");
-		return ret;
-	}
+	if (ret)
+		goto dpm_failed;
 	rdev->pm.dpm_enabled = true;
-	radeon_pm_compute_clocks(rdev);
 
-	if (rdev->pm.num_power_states > 1) {
-		ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
-		if (ret)
-			DRM_ERROR("failed to create device file for dpm state\n");
-		ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
-		if (ret)
-			DRM_ERROR("failed to create device file for dpm state\n");
-		/* XXX: these are noops for dpm but are here for backwards compat */
-		ret = device_create_file(rdev->dev, &dev_attr_power_profile);
-		if (ret)
-			DRM_ERROR("failed to create device file for power profile\n");
-		ret = device_create_file(rdev->dev, &dev_attr_power_method);
-		if (ret)
-			DRM_ERROR("failed to create device file for power method\n");
+	ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+	if (ret)
+		DRM_ERROR("failed to create device file for dpm state\n");
+	ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+	if (ret)
+		DRM_ERROR("failed to create device file for dpm state\n");
+	/* XXX: these are noops for dpm but are here for backwards compat */
+	ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+	if (ret)
+		DRM_ERROR("failed to create device file for power profile\n");
+	ret = device_create_file(rdev->dev, &dev_attr_power_method);
+	if (ret)
+		DRM_ERROR("failed to create device file for power method\n");
 
-		if (radeon_debugfs_pm_init(rdev)) {
-			DRM_ERROR("Failed to register debugfs file for dpm!\n");
-		}
-
-		DRM_INFO("radeon: dpm initialized\n");
+	if (radeon_debugfs_pm_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for dpm!\n");
 	}
 
+	DRM_INFO("radeon: dpm initialized\n");
+
 	return 0;
+
+dpm_failed:
+	rdev->pm.dpm_enabled = false;
+	if ((rdev->family >= CHIP_BARTS) &&
+	    (rdev->family <= CHIP_CAYMAN) &&
+	    rdev->mc_fw) {
+		if (rdev->pm.default_vddc)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+						SET_VOLTAGE_TYPE_ASIC_VDDC);
+		if (rdev->pm.default_vddci)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+						SET_VOLTAGE_TYPE_ASIC_VDDCI);
+		if (rdev->pm.default_sclk)
+			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+		if (rdev->pm.default_mclk)
+			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+	}
+	DRM_ERROR("radeon: dpm initialization failed\n");
+	return ret;
 }
 
 int radeon_pm_init(struct radeon_device *rdev)
@@ -1228,11 +1235,10 @@
 	case CHIP_RV670:
 	case CHIP_RS780:
 	case CHIP_RS880:
+	case CHIP_BARTS:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
 	case CHIP_CAYMAN:
-	case CHIP_BONAIRE:
-	case CHIP_KABINI:
-	case CHIP_KAVERI:
-	case CHIP_HAWAII:
 		/* DPM requires the RLC, RV770+ dGPU requires SMC */
 		if (!rdev->rlc_fw)
 			rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1257,15 +1263,16 @@
 	case CHIP_PALM:
 	case CHIP_SUMO:
 	case CHIP_SUMO2:
-	case CHIP_BARTS:
-	case CHIP_TURKS:
-	case CHIP_CAICOS:
 	case CHIP_ARUBA:
 	case CHIP_TAHITI:
 	case CHIP_PITCAIRN:
 	case CHIP_VERDE:
 	case CHIP_OLAND:
 	case CHIP_HAINAN:
+	case CHIP_BONAIRE:
+	case CHIP_KABINI:
+	case CHIP_KAVERI:
+	case CHIP_HAWAII:
 		/* DPM requires the RLC, RV770+ dGPU requires SMC */
 		if (!rdev->rlc_fw)
 			rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1290,6 +1297,18 @@
 		return radeon_pm_init_old(rdev);
 }
 
+int radeon_pm_late_init(struct radeon_device *rdev)
+{
+	int ret = 0;
+
+	if (rdev->pm.pm_method == PM_METHOD_DPM) {
+		mutex_lock(&rdev->pm.mutex);
+		ret = radeon_dpm_late_enable(rdev);
+		mutex_unlock(&rdev->pm.mutex);
+	}
+	return ret;
+}
+
 static void radeon_pm_fini_old(struct radeon_device *rdev)
 {
 	if (rdev->pm.num_power_states > 1) {
@@ -1420,6 +1439,9 @@
 	struct drm_crtc *crtc;
 	struct radeon_crtc *radeon_crtc;
 
+	if (!rdev->pm.dpm_enabled)
+		return;
+
 	mutex_lock(&rdev->pm.mutex);
 
 	/* update active crtc counts */
@@ -1464,7 +1486,7 @@
 	 */
 	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
 		if (rdev->pm.active_crtcs & (1 << crtc)) {
-			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL);
+			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
 			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
 			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
 				in_vbl = false;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 9214403..15e44a7 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -139,7 +139,7 @@
 	}
 
 	/* 64 dwords should be enough for fence too */
-	r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
+	r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
 	if (r) {
 		dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
 		return r;
@@ -332,36 +332,6 @@
 	}
 }
 
-u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
-				 struct radeon_ring *ring)
-{
-	u32 rptr;
-
-	if (rdev->wb.enabled)
-		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
-	else
-		rptr = RREG32(ring->rptr_reg);
-
-	return rptr;
-}
-
-u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
-				 struct radeon_ring *ring)
-{
-	u32 wptr;
-
-	wptr = RREG32(ring->wptr_reg);
-
-	return wptr;
-}
-
-void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
-				  struct radeon_ring *ring)
-{
-	WREG32(ring->wptr_reg, ring->wptr);
-	(void)RREG32(ring->wptr_reg);
-}
-
 /**
  * radeon_ring_free_size - update the free size
  *
@@ -463,7 +433,7 @@
 	while (ring->wptr & ring->align_mask) {
 		radeon_ring_write(ring, ring->nop);
 	}
-	DRM_MEMORYBARRIER();
+	mb();
 	radeon_ring_set_wptr(rdev, ring);
 }
 
@@ -689,22 +659,18 @@
  * @ring: radeon_ring structure holding ring information
  * @ring_size: size of the ring
  * @rptr_offs: offset of the rptr writeback location in the WB buffer
- * @rptr_reg: MMIO offset of the rptr register
- * @wptr_reg: MMIO offset of the wptr register
  * @nop: nop packet for this ring
  *
  * Initialize the driver information for the selected ring (all asics).
  * Returns 0 on success, error on failure.
  */
 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
-		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop)
+		     unsigned rptr_offs, u32 nop)
 {
 	int r;
 
 	ring->ring_size = ring_size;
 	ring->rptr_offs = rptr_offs;
-	ring->rptr_reg = rptr_reg;
-	ring->wptr_reg = wptr_reg;
 	ring->nop = nop;
 	/* Allocate ring buffer */
 	if (ring->ring_obj == NULL) {
@@ -790,34 +756,54 @@
 	struct radeon_device *rdev = dev->dev_private;
 	int ridx = *(int*)node->info_ent->data;
 	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	uint32_t rptr, wptr, rptr_next;
 	unsigned count, i, j;
-	u32 tmp;
 
 	radeon_ring_free_size(rdev, ring);
 	count = (ring->ring_size / 4) - ring->ring_free_dw;
-	tmp = radeon_ring_get_wptr(rdev, ring);
-	seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
-	tmp = radeon_ring_get_rptr(rdev, ring);
-	seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
+
+	wptr = radeon_ring_get_wptr(rdev, ring);
+	seq_printf(m, "wptr: 0x%08x [%5d]\n",
+		   wptr, wptr);
+
+	rptr = radeon_ring_get_rptr(rdev, ring);
+	seq_printf(m, "rptr: 0x%08x [%5d]\n",
+		   rptr, rptr);
+
 	if (ring->rptr_save_reg) {
-		seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
-			   RREG32(ring->rptr_save_reg));
-	}
-	seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
-	seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
-	seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
-	seq_printf(m, "last semaphore wait addr   : 0x%016llx\n", ring->last_semaphore_wait_addr);
+		rptr_next = RREG32(ring->rptr_save_reg);
+		seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n",
+			   ring->rptr_save_reg, rptr_next, rptr_next);
+	} else
+		rptr_next = ~0;
+
+	seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
+		   ring->wptr, ring->wptr);
+	seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n",
+		   ring->rptr, ring->rptr);
+	seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
+		   ring->last_semaphore_signal_addr);
+	seq_printf(m, "last semaphore wait addr   : 0x%016llx\n",
+		   ring->last_semaphore_wait_addr);
 	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
 	seq_printf(m, "%u dwords in ring\n", count);
+
+	if (!ring->ready)
+		return 0;
+
 	/* print 8 dw before current rptr as often it's the last executed
 	 * packet that is the root issue
 	 */
-	i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
-	if (ring->ready) {
-		for (j = 0; j <= (count + 32); j++) {
-			seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
-			i = (i + 1) & ring->ptr_mask;
-		}
+	i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+	for (j = 0; j <= (count + 32); j++) {
+		seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
+		if (rptr == i)
+			seq_puts(m, " *");
+		if (rptr_next == i)
+			seq_puts(m, " #");
+		seq_puts(m, "\n");
+		i = (i + 1) & ring->ptr_mask;
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index f0bac68..c062580 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -402,13 +402,15 @@
 
 	spin_lock(&sa_manager->wq.lock);
 	list_for_each_entry(i, &sa_manager->olist, olist) {
+		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
+		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
 		if (&i->olist == sa_manager->hole) {
 			seq_printf(m, ">");
 		} else {
 			seq_printf(m, " ");
 		}
-		seq_printf(m, "[0x%08x 0x%08x] size %8d",
-			   i->soffset, i->eoffset, i->eoffset - i->soffset);
+		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
+			   soffset, eoffset, eoffset - soffset);
 		if (i->fence) {
 			seq_printf(m, " protected by 0x%016llx on ring %d",
 				   i->fence->seq, i->fence->ring);
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 2b42aa1..9006b32 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,14 +34,15 @@
 int radeon_semaphore_create(struct radeon_device *rdev,
 			    struct radeon_semaphore **semaphore)
 {
+	uint32_t *cpu_addr;
 	int i, r;
 
 	*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
 	if (*semaphore == NULL) {
 		return -ENOMEM;
 	}
-	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
-			     &(*semaphore)->sa_bo, 8, 8, true);
+	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
+			     8 * RADEON_NUM_SYNCS, 8, true);
 	if (r) {
 		kfree(*semaphore);
 		*semaphore = NULL;
@@ -49,7 +50,10 @@
 	}
 	(*semaphore)->waiters = 0;
 	(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
-	*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+
+	cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
+	for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+		cpu_addr[i] = 0;
 
 	for (i = 0; i < RADEON_NUM_RINGS; ++i)
 		(*semaphore)->sync_to[i] = NULL;
@@ -125,6 +129,7 @@
 				struct radeon_semaphore *semaphore,
 				int ring)
 {
+	unsigned count = 0;
 	int i, r;
 
         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -140,6 +145,12 @@
 			return -EINVAL;
 		}
 
+		if (++count > RADEON_NUM_SYNCS) {
+			/* not enough room, wait manually */
+			radeon_fence_wait_locked(fence);
+			continue;
+		}
+
 		/* allocate enough space for sync command */
 		r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
 		if (r) {
@@ -164,6 +175,8 @@
 
 		radeon_ring_commit(rdev, &rdev->ring[i]);
 		radeon_fence_note_sync(fence, ring);
+
+		semaphore->gpu_addr += 8;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 4d20910..956ab7f 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -1810,7 +1810,7 @@
 		}
 		if (!buf) {
 			DRM_DEBUG("EAGAIN\n");
-			if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+			if (copy_to_user(tex->image, image, sizeof(*image)))
 				return -EFAULT;
 			return -EAGAIN;
 		}
@@ -1823,7 +1823,7 @@
 
 #define RADEON_COPY_MT(_buf, _data, _width) \
 	do { \
-		if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
+		if (copy_from_user(_buf, _data, (_width))) {\
 			DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
 			return -EFAULT; \
 		} \
@@ -2168,7 +2168,7 @@
 	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
 		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
 
-	if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+	if (copy_from_user(&depth_boxes, clear->depth_boxes,
 			       sarea_priv->nbox * sizeof(depth_boxes[0])))
 		return -EFAULT;
 
@@ -2436,7 +2436,7 @@
 		return -EINVAL;
 	}
 
-	if (DRM_COPY_FROM_USER(&image,
+	if (copy_from_user(&image,
 			       (drm_radeon_tex_image_t __user *) tex->image,
 			       sizeof(image)))
 		return -EFAULT;
@@ -2460,7 +2460,7 @@
 
 	LOCK_TEST_WITH_RETURN(dev, file_priv);
 
-	if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+	if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
 		return -EFAULT;
 
 	RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -2585,13 +2585,13 @@
 		drm_radeon_prim_t prim;
 		drm_radeon_tcl_prim_t tclprim;
 
-		if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
+		if (copy_from_user(&prim, &vertex->prim[i], sizeof(prim)))
 			return -EFAULT;
 
 		if (prim.stateidx != laststate) {
 			drm_radeon_state_t state;
 
-			if (DRM_COPY_FROM_USER(&state,
+			if (copy_from_user(&state,
 					       &vertex->state[prim.stateidx],
 					       sizeof(state)))
 				return -EFAULT;
@@ -2799,7 +2799,7 @@
 
 	do {
 		if (i < cmdbuf->nbox) {
-			if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
+			if (copy_from_user(&box, &boxes[i], sizeof(box)))
 				return -EFAULT;
 			/* FIXME The second and subsequent times round
 			 * this loop, send a WAIT_UNTIL_3D_IDLE before
@@ -3116,7 +3116,7 @@
 		return -EINVAL;
 	}
 
-	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+	if (copy_to_user(param->value, &value, sizeof(int))) {
 		DRM_ERROR("copy_to_user\n");
 		return -EFAULT;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 0473257..f749f2c 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -106,42 +106,45 @@
 
 DECLARE_EVENT_CLASS(radeon_fence_request,
 
-	    TP_PROTO(struct drm_device *dev, u32 seqno),
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
 
-	    TP_ARGS(dev, seqno),
+	    TP_ARGS(dev, ring, seqno),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
+			     __field(int, ring)
 			     __field(u32, seqno)
 			     ),
 
 	    TP_fast_assign(
 			   __entry->dev = dev->primary->index;
+			   __entry->ring = ring;
 			   __entry->seqno = seqno;
 			   ),
 
-	    TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+	    TP_printk("dev=%u, ring=%d, seqno=%u",
+		      __entry->dev, __entry->ring, __entry->seqno)
 );
 
 DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
 
-	    TP_PROTO(struct drm_device *dev, u32 seqno),
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
 
-	    TP_ARGS(dev, seqno)
+	    TP_ARGS(dev, ring, seqno)
 );
 
 DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
 
-	    TP_PROTO(struct drm_device *dev, u32 seqno),
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
 
-	    TP_ARGS(dev, seqno)
+	    TP_ARGS(dev, ring, seqno)
 );
 
 DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
 
-	    TP_PROTO(struct drm_device *dev, u32 seqno),
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
 
-	    TP_ARGS(dev, seqno)
+	    TP_ARGS(dev, ring, seqno)
 );
 
 DECLARE_EVENT_CLASS(radeon_semaphore_request,
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 71245d6..77f5b0c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -39,12 +39,14 @@
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/swiotlb.h>
+#include <linux/debugfs.h>
 #include "radeon_reg.h"
 #include "radeon.h"
 
 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
 
 static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
+static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
 
 static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
 {
@@ -142,7 +144,7 @@
 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
 #if __OS_HAS_AGP
 		if (rdev->flags & RADEON_IS_AGP) {
-			if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
+			if (!rdev->ddev->agp) {
 				DRM_ERROR("AGP is not enabled for memory type %u\n",
 					  (unsigned)type);
 				return -EINVAL;
@@ -753,6 +755,7 @@
 
 	if (!rdev->mman.initialized)
 		return;
+	radeon_ttm_debugfs_fini(rdev);
 	if (rdev->stollen_vga_memory) {
 		r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
 		if (r == 0) {
@@ -832,16 +835,15 @@
 	return 0;
 }
 
-
-#define RADEON_DEBUGFS_MEM_TYPES 2
-
 #if defined(CONFIG_DEBUG_FS)
+
 static int radeon_mm_dump_table(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
-	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+	unsigned ttm_pl = *(int *)node->info_ent->data;
 	struct drm_device *dev = node->minor->dev;
 	struct radeon_device *rdev = dev->dev_private;
+	struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
 	int ret;
 	struct ttm_bo_global *glob = rdev->mman.bdev.glob;
 
@@ -850,46 +852,169 @@
 	spin_unlock(&glob->lru_lock);
 	return ret;
 }
+
+static int ttm_pl_vram = TTM_PL_VRAM;
+static int ttm_pl_tt = TTM_PL_TT;
+
+static struct drm_info_list radeon_ttm_debugfs_list[] = {
+	{"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
+	{"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
+	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
+#ifdef CONFIG_SWIOTLB
+	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
+#endif
+};
+
+static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
+{
+	struct radeon_device *rdev = inode->i_private;
+	i_size_write(inode, rdev->mc.mc_vram_size);
+	filep->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
+				    size_t size, loff_t *pos)
+{
+	struct radeon_device *rdev = f->private_data;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	while (size) {
+		unsigned long flags;
+		uint32_t value;
+
+		if (*pos >= rdev->mc.mc_vram_size)
+			return result;
+
+		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+		WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
+		if (rdev->family >= CHIP_CEDAR)
+			WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
+		value = RREG32(RADEON_MM_DATA);
+		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+		r = put_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
+static const struct file_operations radeon_ttm_vram_fops = {
+	.owner = THIS_MODULE,
+	.open = radeon_ttm_vram_open,
+	.read = radeon_ttm_vram_read,
+	.llseek = default_llseek
+};
+
+static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
+{
+	struct radeon_device *rdev = inode->i_private;
+	i_size_write(inode, rdev->mc.gtt_size);
+	filep->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
+				   size_t size, loff_t *pos)
+{
+	struct radeon_device *rdev = f->private_data;
+	ssize_t result = 0;
+	int r;
+
+	while (size) {
+		loff_t p = *pos / PAGE_SIZE;
+		unsigned off = *pos & ~PAGE_MASK;
+		ssize_t cur_size = min(size, PAGE_SIZE - off);
+		struct page *page;
+		void *ptr;
+
+		if (p >= rdev->gart.num_cpu_pages)
+			return result;
+
+		page = rdev->gart.pages[p];
+		if (page) {
+			ptr = kmap(page);
+			ptr += off;
+
+			r = copy_to_user(buf, ptr, cur_size);
+			kunmap(rdev->gart.pages[p]);
+		} else
+			r = clear_user(buf, cur_size);
+
+		if (r)
+			return -EFAULT;
+
+		result += cur_size;
+		buf += cur_size;
+		*pos += cur_size;
+		size -= cur_size;
+	}
+
+	return result;
+}
+
+static const struct file_operations radeon_ttm_gtt_fops = {
+	.owner = THIS_MODULE,
+	.open = radeon_ttm_gtt_open,
+	.read = radeon_ttm_gtt_read,
+	.llseek = default_llseek
+};
+
 #endif
 
 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
-	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
-	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
-	unsigned i;
+	unsigned count;
 
-	for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
-		if (i == 0)
-			sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
-		else
-			sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
-		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-		radeon_mem_types_list[i].show = &radeon_mm_dump_table;
-		radeon_mem_types_list[i].driver_features = 0;
-		if (i == 0)
-			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
-		else
-			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+	struct drm_minor *minor = rdev->ddev->primary;
+	struct dentry *ent, *root = minor->debugfs_root;
 
-	}
-	/* Add ttm page pool to debugfs */
-	sprintf(radeon_mem_types_names[i], "ttm_page_pool");
-	radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-	radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
-	radeon_mem_types_list[i].driver_features = 0;
-	radeon_mem_types_list[i++].data = NULL;
+	ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
+				  rdev, &radeon_ttm_vram_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+	rdev->mman.vram = ent;
+
+	ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root,
+				  rdev, &radeon_ttm_gtt_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+	rdev->mman.gtt = ent;
+
+	count = ARRAY_SIZE(radeon_ttm_debugfs_list);
+
 #ifdef CONFIG_SWIOTLB
-	if (swiotlb_nr_tbl()) {
-		sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
-		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-		radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
-		radeon_mem_types_list[i].driver_features = 0;
-		radeon_mem_types_list[i++].data = NULL;
-	}
+	if (!swiotlb_nr_tbl())
+		--count;
 #endif
-	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
 
-#endif
+	return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
+#else
+
 	return 0;
+#endif
+}
+
+static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+	debugfs_remove(rdev->mman.vram);
+	rdev->mman.vram = NULL;
+
+	debugfs_remove(rdev->mman.gtt);
+	rdev->mman.gtt = NULL;
+#endif
 }
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index b9c0529..6781fee 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -91,6 +91,7 @@
 	case CHIP_VERDE:
 	case CHIP_PITCAIRN:
 	case CHIP_ARUBA:
+	case CHIP_OLAND:
 		fw_name = FIRMWARE_TAHITI;
 		break;
 
@@ -778,6 +779,8 @@
 
 	if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+			radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
+						 &rdev->pm.dpm.hd);
 			radeon_dpm_enable_uvd(rdev, false);
 		} else {
 			radeon_set_uvd_clocks(rdev, 0, 0);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 20bfbda..ec0c682 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -18,6 +18,7 @@
 0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
 0x00028A40 VGT_GS_MODE
 0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028B38 VGT_GS_MAX_VERT_OUT
 0x000088C8 VGT_GS_PER_ES
 0x000088E8 VGT_GS_PER_VS
 0x000088D4 VGT_GS_VERTEX_REUSE
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 9566b59..b5c2369 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -474,6 +474,8 @@
 	/* Initialize surface registers */
 	radeon_surface_init(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = rs400_startup(rdev);
 	if (r) {
@@ -484,6 +486,7 @@
 
 int rs400_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	r100_cp_disable(rdev);
 	radeon_wb_disable(rdev);
 	r100_irq_disable(rdev);
@@ -493,6 +496,7 @@
 
 void rs400_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	radeon_wb_fini(rdev);
 	radeon_ib_pool_fini(rdev);
@@ -560,6 +564,9 @@
 		return r;
 	r300_set_reg_safe(rdev);
 
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	rdev->accel_working = true;
 	r = rs400_startup(rdev);
 	if (r) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 76cc8d3..fdcde76 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -1048,6 +1048,8 @@
 	/* Initialize surface registers */
 	radeon_surface_init(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = rs600_startup(rdev);
 	if (r) {
@@ -1058,6 +1060,7 @@
 
 int rs600_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	r600_audio_fini(rdev);
 	r100_cp_disable(rdev);
 	radeon_wb_disable(rdev);
@@ -1068,6 +1071,7 @@
 
 void rs600_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r600_audio_fini(rdev);
 	r100_cp_fini(rdev);
 	radeon_wb_fini(rdev);
@@ -1136,6 +1140,9 @@
 		return r;
 	rs600_set_safe_registers(rdev);
 
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	rdev->accel_working = true;
 	r = rs600_startup(rdev);
 	if (r) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index e7dab06..3595073 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -756,6 +756,8 @@
 	/* Initialize surface registers */
 	radeon_surface_init(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = rs690_startup(rdev);
 	if (r) {
@@ -766,6 +768,7 @@
 
 int rs690_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	r600_audio_fini(rdev);
 	r100_cp_disable(rdev);
 	radeon_wb_disable(rdev);
@@ -776,6 +779,7 @@
 
 void rs690_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r600_audio_fini(rdev);
 	r100_cp_fini(rdev);
 	radeon_wb_fini(rdev);
@@ -845,6 +849,9 @@
 		return r;
 	rs600_set_safe_registers(rdev);
 
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	rdev->accel_working = true;
 	r = rs690_startup(rdev);
 	if (r) {
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 6af8505..8512085 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -623,14 +623,6 @@
 	if (pi->gfx_clock_gating)
 		r600_gfx_clockgating_enable(rdev, true);
 
-	if (rdev->irq.installed && (rdev->pm.int_thermal_type == THERMAL_TYPE_RV6XX)) {
-		ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-		if (ret)
-			return ret;
-		rdev->irq.dpm_thermal = true;
-		radeon_irq_set(rdev);
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 5d1c316..98e8138 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -586,6 +586,8 @@
 	/* Initialize surface registers */
 	radeon_surface_init(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r =  rv515_startup(rdev);
 	if (r) {
@@ -596,6 +598,7 @@
 
 int rv515_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	r100_cp_disable(rdev);
 	radeon_wb_disable(rdev);
 	rs600_irq_disable(rdev);
@@ -612,6 +615,7 @@
 
 void rv515_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r100_cp_fini(rdev);
 	radeon_wb_fini(rdev);
 	radeon_ib_pool_fini(rdev);
@@ -685,6 +689,9 @@
 		return r;
 	rv515_set_safe_registers(rdev);
 
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	rdev->accel_working = true;
 	r = rv515_startup(rdev);
 	if (r) {
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 26633a0..bebf31c 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1546,7 +1546,6 @@
 {
 	struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
-	int ret;
 
 	if (r600_dynamicpm_enabled(rdev))
 		return -EINVAL;
@@ -1594,15 +1593,6 @@
 	r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
 	r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
 
-	if (rdev->irq.installed &&
-	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-		ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-		if (ret)
-			return ret;
-		rdev->irq.dpm_thermal = true;
-		radeon_irq_set(rdev);
-	}
-
 	rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
 	r600_start_dpm(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 9f58467..6c772e5 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1071,7 +1071,8 @@
  */
 void r700_cp_stop(struct radeon_device *rdev)
 {
-	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 	WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
 	WREG32(SCRATCH_UMSK, 0);
 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -1123,6 +1124,35 @@
 	radeon_scratch_free(rdev, ring->rptr_save_reg);
 }
 
+void rv770_set_clk_bypass_mode(struct radeon_device *rdev)
+{
+	u32 tmp, i;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+	tmp &= SCLK_MUX_SEL_MASK;
+	tmp |= SCLK_MUX_SEL(1) | SCLK_MUX_UPDATE;
+	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CG_SPLL_STATUS) & SPLL_CHG_STATUS)
+			break;
+		udelay(1);
+	}
+
+	tmp &= ~SCLK_MUX_UPDATE;
+	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+	tmp = RREG32(MPLL_CNTL_MODE);
+	if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+		tmp &= ~RV730_MPLL_MCLK_SEL;
+	else
+		tmp &= ~MPLL_MCLK_SEL;
+	WREG32(MPLL_CNTL_MODE, tmp);
+}
+
 /*
  * Core functions
  */
@@ -1665,14 +1695,6 @@
 
 	rv770_mc_program(rdev);
 
-	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-		r = r600_init_microcode(rdev);
-		if (r) {
-			DRM_ERROR("Failed to load firmware!\n");
-			return r;
-		}
-	}
-
 	if (rdev->flags & RADEON_IS_AGP) {
 		rv770_agp_enable(rdev);
 	} else {
@@ -1728,14 +1750,12 @@
 
 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
 			     RADEON_CP_PACKET2);
 	if (r)
 		return r;
 
 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-			     DMA_RB_RPTR, DMA_RB_WPTR,
 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
 	if (r)
 		return r;
@@ -1754,7 +1774,6 @@
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
 	if (ring->ring_size) {
 		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
 				     RADEON_CP_PACKET2);
 		if (!r)
 			r = uvd_v1_0_init(rdev);
@@ -1792,6 +1811,8 @@
 	/* init golden registers */
 	rv770_init_golden_registers(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = rv770_startup(rdev);
 	if (r) {
@@ -1806,6 +1827,7 @@
 
 int rv770_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	r600_audio_fini(rdev);
 	uvd_v1_0_fini(rdev);
 	radeon_uvd_suspend(rdev);
@@ -1876,6 +1898,17 @@
 	if (r)
 		return r;
 
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		r = r600_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
@@ -1915,6 +1948,7 @@
 
 void rv770_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	r700_cp_fini(rdev);
 	r600_dma_fini(rdev);
 	r600_irq_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 374499d..b5f63f5 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -1863,8 +1863,8 @@
 	}
 }
 
-int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
-					int min_temp, int max_temp)
+static int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
+					       int min_temp, int max_temp)
 {
 	int low_temp = 0 * 1000;
 	int high_temp = 255 * 1000;
@@ -1966,6 +1966,15 @@
 	if (pi->mg_clock_gating)
 		rv770_mg_clock_gating_enable(rdev, true);
 
+	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+	return 0;
+}
+
+int rv770_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
 	if (rdev->irq.installed &&
 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
 		PPSMC_Result result;
@@ -1981,8 +1990,6 @@
 			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
 	}
 
-	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
 	return 0;
 }
 
@@ -2167,7 +2174,6 @@
 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
 	struct rv7xx_ps *ps = rv770_get_ps(rps);
 	u32 sclk, mclk;
-	u16 vddc;
 	struct rv7xx_pl *pl;
 
 	switch (index) {
@@ -2207,8 +2213,8 @@
 
 	/* patch up vddc if necessary */
 	if (pl->vddc == 0xff01) {
-		if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
-			pl->vddc = vddc;
+		if (pi->max_vddc)
+			pl->vddc = pi->max_vddc;
 	}
 
 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -2244,14 +2250,12 @@
 		pl->vddci = vddci;
 	}
 
-	if (rdev->family >= CHIP_BARTS) {
-		if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
-		    ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
-			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
-			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
-			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
-			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
-		}
+	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+	    ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+		rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
 	}
 }
 
@@ -2522,14 +2526,13 @@
 bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
 {
 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
-	u32 switch_limit = 300;
+	u32 switch_limit = 200; /* 300 */
 
-	/* quirks */
-	/* ASUS K70AF */
-	if ((rdev->pdev->device == 0x9553) &&
-	    (rdev->pdev->subsystem_vendor == 0x1043) &&
-	    (rdev->pdev->subsystem_device == 0x1c42))
-		switch_limit = 200;
+	/* RV770 */
+	/* mclk switching doesn't seem to work reliably on desktop RV770s */
+	if ((rdev->family == CHIP_RV770) &&
+	    !(rdev->flags & RADEON_IS_MOBILITY))
+		switch_limit = 0xffffffff; /* disable mclk switching */
 
 	if (vblank_time < switch_limit)
 		return true;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
index 9244eff..f776634 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.h
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -283,8 +283,4 @@
 int rv770_write_smc_soft_register(struct radeon_device *rdev,
 				  u16 reg_offset, u32 value);
 
-/* thermal */
-int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
-					int min_temp, int max_temp);
-
 #endif
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 1ae2771..3cf1e29 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -100,14 +100,21 @@
 #define	CG_SPLL_FUNC_CNTL_2				0x604
 #define		SCLK_MUX_SEL(x)				((x) << 0)
 #define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define		SCLK_MUX_UPDATE				(1 << 26)
 #define	CG_SPLL_FUNC_CNTL_3				0x608
 #define		SPLL_FB_DIV(x)				((x) << 0)
 #define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
 #define		SPLL_DITHEN				(1 << 28)
+#define	CG_SPLL_STATUS					0x60c
+#define		SPLL_CHG_STATUS				(1 << 1)
 
 #define	SPLL_CNTL_MODE					0x610
 #define		SPLL_DIV_SYNC				(1 << 5)
 
+#define MPLL_CNTL_MODE                                  0x61c
+#       define MPLL_MCLK_SEL                            (1 << 11)
+#       define RV730_MPLL_MCLK_SEL                      (1 << 25)
+
 #define	MPLL_AD_FUNC_CNTL				0x624
 #define		CLKF(x)					((x) << 0)
 #define		CLKF_MASK				(0x7f << 0)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 85e1edf..8357832 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -80,6 +80,8 @@
 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
 					 bool enable);
+static void si_init_pg(struct radeon_device *rdev);
+static void si_init_cg(struct radeon_device *rdev);
 static void si_fini_pg(struct radeon_device *rdev);
 static void si_fini_cg(struct radeon_device *rdev);
 static void si_rlc_stop(struct radeon_device *rdev);
@@ -1460,7 +1462,7 @@
 };
 
 /* ucode loading */
-static int si_mc_load_microcode(struct radeon_device *rdev)
+int si_mc_load_microcode(struct radeon_device *rdev)
 {
 	const __be32 *fw_data;
 	u32 running, blackout = 0;
@@ -3247,7 +3249,8 @@
 	if (enable)
 		WREG32(CP_ME_CNTL, 0);
 	else {
-		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
 		WREG32(SCRATCH_UMSK, 0);
 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -3508,6 +3511,9 @@
 
 	si_enable_gui_idle_interrupt(rdev, true);
 
+	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
 	return 0;
 }
 
@@ -3724,6 +3730,106 @@
 	evergreen_print_gpu_status_regs(rdev);
 }
 
+static void si_set_clk_bypass_mode(struct radeon_device *rdev)
+{
+	u32 tmp, i;
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL);
+	tmp |= SPLL_BYPASS_EN;
+	WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+	tmp |= SPLL_CTLREQ_CHG;
+	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+			break;
+		udelay(1);
+	}
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+	tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
+	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+	tmp = RREG32(MPLL_CNTL_MODE);
+	tmp &= ~MPLL_MCLK_SEL;
+	WREG32(MPLL_CNTL_MODE, tmp);
+}
+
+static void si_spll_powerdown(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32(SPLL_CNTL_MODE);
+	tmp |= SPLL_SW_DIR_CONTROL;
+	WREG32(SPLL_CNTL_MODE, tmp);
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL);
+	tmp |= SPLL_RESET;
+	WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+	tmp = RREG32(CG_SPLL_FUNC_CNTL);
+	tmp |= SPLL_SLEEP;
+	WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+	tmp = RREG32(SPLL_CNTL_MODE);
+	tmp &= ~SPLL_SW_DIR_CONTROL;
+	WREG32(SPLL_CNTL_MODE, tmp);
+}
+
+static void si_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 tmp, i;
+
+	dev_info(rdev->dev, "GPU pci config reset\n");
+
+	/* disable dpm? */
+
+	/* disable cg/pg */
+	si_fini_pg(rdev);
+	si_fini_cg(rdev);
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+	/* dma0 */
+	tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+	/* dma1 */
+	tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+	tmp &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+	/* XXX other engines? */
+
+	/* halt the rlc, disable cp internal ints */
+	si_rlc_stop(rdev);
+
+	udelay(50);
+
+	/* disable mem access */
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+	}
+
+	/* set mclk/sclk to bypass */
+	si_set_clk_bypass_mode(rdev);
+	/* powerdown spll */
+	si_spll_powerdown(rdev);
+	/* disable BM */
+	pci_clear_master(rdev->pdev);
+	/* reset */
+	radeon_pci_config_reset(rdev);
+	/* wait for asic to come out of reset */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+			break;
+		udelay(1);
+	}
+}
+
 int si_asic_reset(struct radeon_device *rdev)
 {
 	u32 reset_mask;
@@ -3733,10 +3839,17 @@
 	if (reset_mask)
 		r600_set_bios_scratch_engine_hung(rdev, true);
 
+	/* try soft reset */
 	si_gpu_soft_reset(rdev, reset_mask);
 
 	reset_mask = si_gpu_check_soft_reset(rdev);
 
+	/* try pci config reset */
+	if (reset_mask && radeon_hard_reset)
+		si_gpu_pci_config_reset(rdev);
+
+	reset_mask = si_gpu_check_soft_reset(rdev);
+
 	if (!reset_mask)
 		r600_set_bios_scratch_engine_hung(rdev, false);
 
@@ -5212,8 +5325,8 @@
 		WREG32(HDP_MEM_POWER_LS, data);
 }
 
-void si_update_cg(struct radeon_device *rdev,
-		  u32 block, bool enable)
+static void si_update_cg(struct radeon_device *rdev,
+			 u32 block, bool enable)
 {
 	if (block & RADEON_CG_BLOCK_GFX) {
 		si_enable_gui_idle_interrupt(rdev, false);
@@ -5379,6 +5492,9 @@
 		si_init_ao_cu_mask(rdev);
 		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
 			si_init_gfx_cgpg(rdev);
+		} else {
+			WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+			WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
 		}
 		si_enable_dma_pg(rdev, true);
 		si_enable_gfx_cgpg(rdev, true);
@@ -5566,7 +5682,7 @@
 	}
 
 	if (!ASIC_IS_NODCE(rdev)) {
-		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+		WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
 
 		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
 		WREG32(DC_HPD1_INT_CONTROL, tmp);
@@ -6222,6 +6338,10 @@
 				break;
 			}
 			break;
+		case 124: /* UVD */
+			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+			break;
 		case 146:
 		case 147:
 			addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
@@ -6324,21 +6444,14 @@
 
 	si_mc_program(rdev);
 
-	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
-	    !rdev->rlc_fw || !rdev->mc_fw) {
-		r = si_init_microcode(rdev);
+	if (!rdev->pm.dpm_enabled) {
+		r = si_mc_load_microcode(rdev);
 		if (r) {
-			DRM_ERROR("Failed to load firmware!\n");
+			DRM_ERROR("Failed to load MC firmware!\n");
 			return r;
 		}
 	}
 
-	r = si_mc_load_microcode(rdev);
-	if (r) {
-		DRM_ERROR("Failed to load MC firmware!\n");
-		return r;
-	}
-
 	r = si_pcie_gart_enable(rdev);
 	if (r)
 		return r;
@@ -6421,37 +6534,30 @@
 
 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-			     CP_RB0_RPTR, CP_RB0_WPTR,
 			     RADEON_CP_PACKET2);
 	if (r)
 		return r;
 
 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
-			     CP_RB1_RPTR, CP_RB1_WPTR,
 			     RADEON_CP_PACKET2);
 	if (r)
 		return r;
 
 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
-			     CP_RB2_RPTR, CP_RB2_WPTR,
 			     RADEON_CP_PACKET2);
 	if (r)
 		return r;
 
 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-			     DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
-			     DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
 	if (r)
 		return r;
 
 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
-			     DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
-			     DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
 	if (r)
 		return r;
@@ -6471,7 +6577,6 @@
 		ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
 		if (ring->ring_size) {
 			r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-					     UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
 					     RADEON_CP_PACKET2);
 			if (!r)
 				r = uvd_v1_0_init(rdev);
@@ -6513,6 +6618,8 @@
 	/* init golden registers */
 	si_init_golden_registers(rdev);
 
+	radeon_pm_resume(rdev);
+
 	rdev->accel_working = true;
 	r = si_startup(rdev);
 	if (r) {
@@ -6527,6 +6634,7 @@
 
 int si_suspend(struct radeon_device *rdev)
 {
+	radeon_pm_suspend(rdev);
 	dce6_audio_fini(rdev);
 	radeon_vm_manager_fini(rdev);
 	si_cp_enable(rdev, false);
@@ -6600,6 +6708,18 @@
 	if (r)
 		return r;
 
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+	    !rdev->rlc_fw || !rdev->mc_fw) {
+		r = si_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	ring->ring_obj = NULL;
 	r600_ring_init(rdev, ring, 1024 * 1024);
@@ -6666,6 +6786,7 @@
 
 void si_fini(struct radeon_device *rdev)
 {
+	radeon_pm_fini(rdev);
 	si_cp_fini(rdev);
 	cayman_dma_fini(rdev);
 	si_fini_pg(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0b00c79..0a2f5b4 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1738,6 +1738,8 @@
 struct ni_power_info *ni_get_pi(struct radeon_device *rdev);
 struct ni_ps *ni_get_ps(struct radeon_ps *rps);
 
+extern int si_mc_load_microcode(struct radeon_device *rdev);
+
 static int si_populate_voltage_value(struct radeon_device *rdev,
 				     const struct atom_voltage_table *table,
 				     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
@@ -1753,9 +1755,6 @@
 				    u32 engine_clock,
 				    SISLANDS_SMC_SCLK_VALUE *sclk);
 
-extern void si_update_cg(struct radeon_device *rdev,
-			 u32 block, bool enable);
-
 static struct si_power_info *si_get_pi(struct radeon_device *rdev)
 {
         struct si_power_info *pi = rdev->pm.dpm.priv;
@@ -2396,7 +2395,7 @@
 	if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
 		enable_sq_ramping = false;
 
-	if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+	if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
 		enable_sq_ramping = false;
 
 	for (i = 0; i < state->performance_level_count; i++) {
@@ -3591,10 +3590,9 @@
 
 	/* Setting this to false forces the performance state to low if the crtcs are disabled.
 	 * This can be a problem on PowerXpress systems or if you want to use the card
-	 * for offscreen rendering or compute if there are no crtcs enabled.  Set it to
-	 * true for now so that performance scales even if the displays are off.
+	 * for offscreen rendering or compute if there are no crtcs enabled.
 	 */
-	si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/);
+	si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
 }
 
 static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
@@ -5414,7 +5412,7 @@
 
 	for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
 		if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
-			if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+			if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
 				break;
 			mc_reg_table->address[i].s0 =
 				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
@@ -5754,6 +5752,11 @@
 
 void si_dpm_setup_asic(struct radeon_device *rdev)
 {
+	int r;
+
+	r = si_mc_load_microcode(rdev);
+	if (r)
+		DRM_ERROR("Failed to load MC firmware!\n");
 	rv770_get_memory_type(rdev);
 	si_read_clock_registers(rdev);
 	si_enable_acpi_power_management(rdev);
@@ -5791,13 +5794,6 @@
 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
 	int ret;
 
-	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			    RADEON_CG_BLOCK_MC |
-			    RADEON_CG_BLOCK_SDMA |
-			    RADEON_CG_BLOCK_BIF |
-			    RADEON_CG_BLOCK_UVD |
-			    RADEON_CG_BLOCK_HDP), false);
-
 	if (si_is_smc_running(rdev))
 		return -EINVAL;
 	if (pi->voltage_control)
@@ -5900,6 +5896,17 @@
 	si_enable_sclk_control(rdev, true);
 	si_start_dpm(rdev);
 
+	si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+	ni_update_current_ps(rdev, boot_ps);
+
+	return 0;
+}
+
+int si_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
 	if (rdev->irq.installed &&
 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
 		PPSMC_Result result;
@@ -5915,17 +5922,6 @@
 			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
 	}
 
-	si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
-	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			    RADEON_CG_BLOCK_MC |
-			    RADEON_CG_BLOCK_SDMA |
-			    RADEON_CG_BLOCK_BIF |
-			    RADEON_CG_BLOCK_UVD |
-			    RADEON_CG_BLOCK_HDP), true);
-
-	ni_update_current_ps(rdev, boot_ps);
-
 	return 0;
 }
 
@@ -5934,13 +5930,6 @@
 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
 
-	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			    RADEON_CG_BLOCK_MC |
-			    RADEON_CG_BLOCK_SDMA |
-			    RADEON_CG_BLOCK_BIF |
-			    RADEON_CG_BLOCK_UVD |
-			    RADEON_CG_BLOCK_HDP), false);
-
 	if (!si_is_smc_running(rdev))
 		return;
 	si_disable_ulv(rdev);
@@ -6005,13 +5994,6 @@
 	struct radeon_ps *old_ps = &eg_pi->current_rps;
 	int ret;
 
-	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			    RADEON_CG_BLOCK_MC |
-			    RADEON_CG_BLOCK_SDMA |
-			    RADEON_CG_BLOCK_BIF |
-			    RADEON_CG_BLOCK_UVD |
-			    RADEON_CG_BLOCK_HDP), false);
-
 	ret = si_disable_ulv(rdev);
 	if (ret) {
 		DRM_ERROR("si_disable_ulv failed\n");
@@ -6104,13 +6086,6 @@
 		return ret;
 	}
 
-	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-			    RADEON_CG_BLOCK_MC |
-			    RADEON_CG_BLOCK_SDMA |
-			    RADEON_CG_BLOCK_BIF |
-			    RADEON_CG_BLOCK_UVD |
-			    RADEON_CG_BLOCK_HDP), true);
-
 	return 0;
 }
 
@@ -6497,7 +6472,8 @@
 void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
 						    struct seq_file *m)
 {
-	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct radeon_ps *rps = &eg_pi->current_rps;
 	struct ni_ps *ps = ni_get_ps(rps);
 	struct rv7xx_pl *pl;
 	u32 current_index =
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index d422a1c..e80efcf 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -28,6 +28,7 @@
 #include "sid.h"
 #include "ppsmc.h"
 #include "radeon_ucode.h"
+#include "sislands_smc.h"
 
 static int si_set_smc_sram_address(struct radeon_device *rdev,
 				   u32 smc_address, u32 limit)
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index b322acc..9239a6d 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -94,6 +94,8 @@
 #define	CG_SPLL_FUNC_CNTL_2				0x604
 #define		SCLK_MUX_SEL(x)				((x) << 0)
 #define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define		SPLL_CTLREQ_CHG				(1 << 23)
+#define		SCLK_MUX_UPDATE				(1 << 26)
 #define	CG_SPLL_FUNC_CNTL_3				0x608
 #define		SPLL_FB_DIV(x)				((x) << 0)
 #define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
@@ -101,7 +103,10 @@
 #define		SPLL_DITHEN				(1 << 28)
 #define	CG_SPLL_FUNC_CNTL_4				0x60c
 
+#define	SPLL_STATUS					0x614
+#define		SPLL_CHG_STATUS				(1 << 1)
 #define	SPLL_CNTL_MODE					0x618
+#define		SPLL_SW_DIR_CONTROL			(1 << 0)
 #	define SPLL_REFCLK_SEL(x)			((x) << 8)
 #	define SPLL_REFCLK_SEL_MASK			0xFF00
 
@@ -559,6 +564,8 @@
 #       define MRDCK0_BYPASS                            (1 << 24)
 #       define MRDCK1_BYPASS                            (1 << 25)
 
+#define	MPLL_CNTL_MODE					0x2bb0
+#       define MPLL_MCLK_SEL                            (1 << 11)
 #define	MPLL_FUNC_CNTL					0x2bb4
 #define		BWCTRL(x)				((x) << 20)
 #define		BWCTRL_MASK				(0xff << 20)
@@ -815,7 +822,7 @@
 #       define GRPH_PFLIP_INT_MASK                      (1 << 0)
 #       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
 
-#define	DACA_AUTODETECT_INT_CONTROL			0x66c8
+#define	DAC_AUTODETECT_INT_CONTROL			0x67c8
 
 #define DC_HPD1_INT_STATUS                              0x601c
 #define DC_HPD2_INT_STATUS                              0x6028
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 5578e98..10e945a 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -374,8 +374,6 @@
 
 #pragma pack(pop)
 
-int si_set_smc_sram_address(struct radeon_device *rdev,
-			    u32 smc_address, u32 limit);
 int si_copy_bytes_to_smc(struct radeon_device *rdev,
 			 u32 smc_start_address,
 			 const u8 *src, u32 byte_count, u32 limit);
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 96ea6db8..8b47b3cd 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -71,7 +71,7 @@
 	SUMO_DTC_DFLT_14,
 };
 
-struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
+static struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
 {
 	struct sumo_ps *ps = rps->ps_priv;
 
@@ -1202,14 +1202,10 @@
 int sumo_dpm_enable(struct radeon_device *rdev)
 {
 	struct sumo_power_info *pi = sumo_get_pi(rdev);
-	int ret;
 
 	if (sumo_dpm_enabled(rdev))
 		return -EINVAL;
 
-	ret = sumo_enable_clock_power_gating(rdev);
-	if (ret)
-		return ret;
 	sumo_program_bootup_state(rdev);
 	sumo_init_bsp(rdev);
 	sumo_reset_am(rdev);
@@ -1233,6 +1229,19 @@
 	if (pi->enable_boost)
 		sumo_enable_boost_timer(rdev);
 
+	sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+	return 0;
+}
+
+int sumo_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
+	ret = sumo_enable_clock_power_gating(rdev);
+	if (ret)
+		return ret;
+
 	if (rdev->irq.installed &&
 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
 		ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1242,8 +1251,6 @@
 		radeon_irq_set(rdev);
 	}
 
-	sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
-
 	return 0;
 }
 
@@ -1800,7 +1807,7 @@
 						      struct seq_file *m)
 {
 	struct sumo_power_info *pi = sumo_get_pi(rdev);
-	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct radeon_ps *rps = &pi->current_rps;
 	struct sumo_ps *ps = sumo_get_ps(rps);
 	struct sumo_pl *pl;
 	u32 current_index =
diff --git a/drivers/gpu/drm/radeon/sumo_smc.c b/drivers/gpu/drm/radeon/sumo_smc.c
index 18abba5..fb081d2 100644
--- a/drivers/gpu/drm/radeon/sumo_smc.c
+++ b/drivers/gpu/drm/radeon/sumo_smc.c
@@ -31,7 +31,6 @@
 #define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY  27
 #define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20  20
 
-struct sumo_ps *sumo_get_ps(struct radeon_ps *rps);
 struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev);
 
 static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id)
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index d700698..2da0e17 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -342,14 +342,14 @@
 					     struct radeon_ps *new_rps,
 					     struct radeon_ps *old_rps);
 
-struct trinity_ps *trinity_get_ps(struct radeon_ps *rps)
+static struct trinity_ps *trinity_get_ps(struct radeon_ps *rps)
 {
 	struct trinity_ps *ps = rps->ps_priv;
 
 	return ps;
 }
 
-struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev)
+static struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev)
 {
 	struct trinity_power_info *pi = rdev->pm.dpm.priv;
 
@@ -1082,7 +1082,6 @@
 int trinity_dpm_enable(struct radeon_device *rdev)
 {
 	struct trinity_power_info *pi = trinity_get_pi(rdev);
-	int ret;
 
 	trinity_acquire_mutex(rdev);
 
@@ -1091,7 +1090,6 @@
 		return -EINVAL;
 	}
 
-	trinity_enable_clock_power_gating(rdev);
 	trinity_program_bootup_state(rdev);
 	sumo_program_vc(rdev, 0x00C00033);
 	trinity_start_am(rdev);
@@ -1105,6 +1103,18 @@
 	trinity_dpm_bapm_enable(rdev, false);
 	trinity_release_mutex(rdev);
 
+	trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+	return 0;
+}
+
+int trinity_dpm_late_enable(struct radeon_device *rdev)
+{
+	int ret;
+
+	trinity_acquire_mutex(rdev);
+	trinity_enable_clock_power_gating(rdev);
+
 	if (rdev->irq.installed &&
 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
 		ret = trinity_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1115,8 +1125,7 @@
 		rdev->irq.dpm_thermal = true;
 		radeon_irq_set(rdev);
 	}
-
-	trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+	trinity_release_mutex(rdev);
 
 	return 0;
 }
@@ -1917,7 +1926,8 @@
 void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
 							 struct seq_file *m)
 {
-	struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+	struct trinity_power_info *pi = trinity_get_pi(rdev);
+	struct radeon_ps *rps = &pi->current_rps;
 	struct trinity_ps *ps = trinity_get_ps(rps);
 	struct trinity_pl *pl;
 	u32 current_index =
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index 9672bcb..99dd045 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -27,9 +27,6 @@
 #include "trinity_dpm.h"
 #include "ppsmc.h"
 
-struct trinity_ps *trinity_get_ps(struct radeon_ps *rps);
-struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev);
-
 static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
 {
 	int i;
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index b19ef49..d177100 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -57,7 +57,6 @@
 	radeon_ring_write(ring, 0);
 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
 	radeon_ring_write(ring, 2);
-	return;
 }
 
 /**
@@ -153,6 +152,7 @@
 		chip_id = 0x01000015;
 		break;
 	case CHIP_PITCAIRN:
+	case CHIP_OLAND:
 		chip_id = 0x01000016;
 		break;
 	case CHIP_ARUBA:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index a9d24e4..fbf4be3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -371,7 +371,6 @@
 		goto error;
 
 	rcrtc->plane->format = format;
-	rcrtc->plane->pitch = crtc->fb->pitches[0];
 
 	rcrtc->plane->src_x = x;
 	rcrtc->plane->src_y = y;
@@ -413,7 +412,7 @@
 	rcrtc->plane->src_x = x;
 	rcrtc->plane->src_y = y;
 
-	rcar_du_crtc_update_base(to_rcar_crtc(crtc));
+	rcar_du_crtc_update_base(rcrtc);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 0023f97..792fd1d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -224,7 +224,9 @@
 
 static int rcar_du_remove(struct platform_device *pdev)
 {
-	drm_platform_exit(&rcar_du_driver, pdev);
+	struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
+
+	drm_put_dev(rcdu->ddev);
 
 	return 0;
 }
@@ -249,8 +251,8 @@
 };
 
 static const struct rcar_du_device_info rcar_du_r8a7790_info = {
-	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B
-		  | RCAR_DU_FEATURE_DEFR8,
+	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+	.quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
 	.num_crtcs = 3,
 	.routes = {
 		/* R8A7790 has one RGB output, two LVDS outputs and one
@@ -272,9 +274,29 @@
 	.num_lvds = 2,
 };
 
+static const struct rcar_du_device_info rcar_du_r8a7791_info = {
+	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+	.num_crtcs = 2,
+	.routes = {
+		/* R8A7791 has one RGB output, one LVDS output and one
+		 * (currently unsupported) TCON output.
+		 */
+		[RCAR_DU_OUTPUT_DPAD0] = {
+			.possible_crtcs = BIT(1),
+			.encoder_type = DRM_MODE_ENCODER_NONE,
+		},
+		[RCAR_DU_OUTPUT_LVDS0] = {
+			.possible_crtcs = BIT(0),
+			.encoder_type = DRM_MODE_ENCODER_LVDS,
+		},
+	},
+	.num_lvds = 1,
+};
+
 static const struct platform_device_id rcar_du_id_table[] = {
 	{ "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
 	{ "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
+	{ "rcar-du-r8a7791", (kernel_ulong_t)&rcar_du_r8a7791_info },
 	{ }
 };
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 65d2d63..e31b735 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -28,8 +28,10 @@
 struct rcar_du_lvdsenc;
 
 #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK	(1 << 0)	/* Per-CRTC IRQ and clock */
-#define RCAR_DU_FEATURE_ALIGN_128B	(1 << 1)	/* Align pitches to 128 bytes */
-#define RCAR_DU_FEATURE_DEFR8		(1 << 2)	/* Has DEFR8 register */
+#define RCAR_DU_FEATURE_DEFR8		(1 << 1)	/* Has DEFR8 register */
+
+#define RCAR_DU_QUIRK_ALIGN_128B	(1 << 0)	/* Align pitches to 128 bytes */
+#define RCAR_DU_QUIRK_LVDS_LANES	(1 << 1)	/* LVDS lanes 1 and 3 inverted */
 
 /*
  * struct rcar_du_output_routing - Output routing specification
@@ -48,12 +50,14 @@
 /*
  * struct rcar_du_device_info - DU model-specific information
  * @features: device features (RCAR_DU_FEATURE_*)
+ * @quirks: device quirks (RCAR_DU_QUIRK_*)
  * @num_crtcs: total number of CRTCs
  * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
  * @num_lvds: number of internal LVDS encoders
  */
 struct rcar_du_device_info {
 	unsigned int features;
+	unsigned int quirks;
 	unsigned int num_crtcs;
 	struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
 	unsigned int num_lvds;
@@ -84,6 +88,12 @@
 	return rcdu->info->features & feature;
 }
 
+static inline bool rcar_du_needs(struct rcar_du_device *rcdu,
+				 unsigned int quirk)
+{
+	return rcdu->info->quirks & quirk;
+}
+
 static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
 {
 	return ioread32(rcdu->mmio + reg);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index b31ac08..fbeabd9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -119,7 +119,7 @@
 	/* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
 	 * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
 	 */
-	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+	if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
 		align = 128;
 	else
 		align = 16 * args->bpp / 8;
@@ -144,7 +144,7 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+	if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
 		align = 128;
 	else
 		align = 16 * format->bpp / 8;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index a0f6a17..df30a07 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -44,6 +44,7 @@
 	const struct drm_display_mode *mode = &rcrtc->crtc.mode;
 	unsigned int freq = mode->clock;
 	u32 lvdcr0;
+	u32 lvdhcr;
 	u32 pllcr;
 	int ret;
 
@@ -72,15 +73,19 @@
 	 * VSYNC -> CTRL1
 	 * DISP  -> CTRL2
 	 * 0     -> CTRL3
-	 *
-	 * Channels 1 and 3 are switched on ES1.
 	 */
 	rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
 			LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
 			LVDCTRCR_CTR0SEL_HSYNC);
-	rcar_lvds_write(lvds, LVDCHCR,
-			LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) |
-			LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1));
+
+	if (rcar_du_needs(lvds->dev, RCAR_DU_QUIRK_LVDS_LANES))
+		lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3)
+		       | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1);
+	else
+		lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 1)
+		       | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 3);
+
+	rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
 
 	/* Select the input, hardcode mode 0, enable LVDS operation and turn
 	 * bias circuitry on.
@@ -144,18 +149,9 @@
 	sprintf(name, "lvds.%u", lvds->index);
 
 	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
-	if (mem == NULL) {
-		dev_err(&pdev->dev, "failed to get memory resource for %s\n",
-			name);
-		return -EINVAL;
-	}
-
 	lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
-	if (lvds->mmio == NULL) {
-		dev_err(&pdev->dev, "failed to remap memory resource for %s\n",
-			name);
-		return -ENOMEM;
-	}
+	if (IS_ERR(lvds->mmio))
+		return PTR_ERR(lvds->mmio);
 
 	lvds->clock = devm_clk_get(&pdev->dev, name);
 	if (IS_ERR(lvds->clock)) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 5300064..3fb69d9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -104,6 +104,15 @@
 {
 	struct rcar_du_group *rgrp = plane->group;
 	unsigned int index = plane->hwindex;
+	u32 mwr;
+
+	/* Memory pitch (expressed in pixels) */
+	if (plane->format->planes == 2)
+		mwr = plane->pitch;
+	else
+		mwr = plane->pitch * 8 / plane->format->bpp;
+
+	rcar_du_plane_write(rgrp, index, PnMWR, mwr);
 
 	/* The Y position is expressed in raster line units and must be doubled
 	 * for 32bpp formats, according to the R8A7790 datasheet. No mention of
@@ -133,6 +142,8 @@
 {
 	struct drm_gem_cma_object *gem;
 
+	plane->pitch = fb->pitches[0];
+
 	gem = drm_fb_cma_get_gem_obj(fb, 0);
 	plane->dma[0] = gem->paddr + fb->offsets[0];
 
@@ -209,7 +220,6 @@
 	struct rcar_du_group *rgrp = plane->group;
 	u32 ddcr2 = PnDDCR2_CODE;
 	u32 ddcr4;
-	u32 mwr;
 
 	/* Data format
 	 *
@@ -240,14 +250,6 @@
 	rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
 	rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
 
-	/* Memory pitch (expressed in pixels) */
-	if (plane->format->planes == 2)
-		mwr = plane->pitch;
-	else
-		mwr = plane->pitch * 8 / plane->format->bpp;
-
-	rcar_du_plane_write(rgrp, index, PnMWR, mwr);
-
 	/* Destination position and size */
 	rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
 	rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
@@ -309,7 +311,6 @@
 
 	rplane->crtc = crtc;
 	rplane->format = format;
-	rplane->pitch = fb->pitches[0];
 
 	rplane->src_x = src_x >> 16;
 	rplane->src_y = src_y >> 16;
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index b17d071..d2b2df9 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -49,7 +49,7 @@
 #endif
 
 	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
-		DRM_MEMORYBARRIER();
+		mb();
 		status = dev_priv->status_ptr[0];
 		if ((status & mask) < threshold)
 			return 0;
@@ -123,7 +123,7 @@
 	int i;
 
 	for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
-		DRM_MEMORYBARRIER();
+		mb();
 		status = dev_priv->status_ptr[1];
 		if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
 		    (status & 0xffff) == 0)
@@ -449,7 +449,7 @@
 		}
 	}
 
-	DRM_MEMORYBARRIER();
+	mb();
 
 	/* do flush ... */
 	phys_addr = dev_priv->cmd_dma->offset +
@@ -990,10 +990,10 @@
 
 		buf->file_priv = file_priv;
 
-		if (DRM_COPY_TO_USER(&d->request_indices[i],
+		if (copy_to_user(&d->request_indices[i],
 				     &buf->idx, sizeof(buf->idx)))
 			return -EFAULT;
-		if (DRM_COPY_TO_USER(&d->request_sizes[i],
+		if (copy_to_user(&d->request_sizes[i],
 				     &buf->total, sizeof(buf->total)))
 			return -EFAULT;
 
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index b35e75e..c01ad0a 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -992,7 +992,7 @@
 		if (kcmd_addr == NULL)
 			return -ENOMEM;
 
-		if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
+		if (copy_from_user(kcmd_addr, cmdbuf->cmd_addr,
 				       cmdbuf->size * 8))
 		{
 			kfree(kcmd_addr);
@@ -1007,7 +1007,7 @@
 			goto done;
 		}
 
-		if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
+		if (copy_from_user(kvb_addr, cmdbuf->vb_addr,
 				       cmdbuf->vb_size)) {
 			ret = -EFAULT;
 			goto done;
@@ -1022,7 +1022,7 @@
 			goto done;
 		}
 
-		if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
+		if (copy_from_user(kbox_addr, cmdbuf->box_addr,
 				       cmdbuf->nbox * sizeof(struct drm_clip_rect))) {
 			ret = -EFAULT;
 			goto done;
@@ -1032,7 +1032,7 @@
 
 	/* Make sure writes to DMA buffers are finished before sending
 	 * DMA commands to the graphics hardware. */
-	DRM_MEMORYBARRIER();
+	mb();
 
 	/* Coming from user space. Don't know if the Xserver has
 	 * emitted wait commands. Assuming the worst. */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 562f9a4..0428076 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -37,14 +37,21 @@
  * Clock management
  */
 
-static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
+static int shmob_drm_clk_on(struct shmob_drm_device *sdev)
 {
-	if (sdev->clock)
-		clk_prepare_enable(sdev->clock);
+	int ret;
+
+	if (sdev->clock) {
+		ret = clk_prepare_enable(sdev->clock);
+		if (ret < 0)
+			return ret;
+	}
 #if 0
 	if (sdev->meram_dev && sdev->meram_dev->pdev)
 		pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
 #endif
+
+	return 0;
 }
 
 static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
@@ -161,6 +168,7 @@
 	struct drm_device *dev = sdev->ddev;
 	struct drm_plane *plane;
 	u32 value;
+	int ret;
 
 	if (scrtc->started)
 		return;
@@ -170,7 +178,9 @@
 		return;
 
 	/* Enable clocks before accessing the hardware. */
-	shmob_drm_clk_on(sdev);
+	ret = shmob_drm_clk_on(sdev);
+	if (ret < 0)
+		return;
 
 	/* Reset and enable the LCDC. */
 	lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 0155518..c839c9c 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -336,7 +336,9 @@
 
 static int shmob_drm_remove(struct platform_device *pdev)
 {
-	drm_platform_exit(&shmob_drm_driver, pdev);
+	struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
+
+	drm_put_dev(sdev->ddev);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4383b74..756f787 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -94,7 +94,7 @@
 	return 0;
 }
 
-void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
+static void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
 {
 	struct sis_file_private *file_priv = file->driver_priv;
 
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 01857d8..0573be0 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -266,7 +266,7 @@
 	 * because its polling frequency is too low.
 	 */
 
-	end = jiffies + (DRM_HZ * 3);
+	end = jiffies + (HZ * 3);
 
 	for (i = 0; i < 4; ++i) {
 		do {
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 8db9b3b..354ddb2 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,14 +1,12 @@
 config DRM_TEGRA
-	bool "NVIDIA Tegra DRM"
-	depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
+	tristate "NVIDIA Tegra DRM"
+	depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
 	depends on DRM
 	depends on RESET_CONTROLLER
-	select TEGRA_HOST1X
 	select DRM_KMS_HELPER
-	select DRM_KMS_FB_HELPER
-	select FB_SYS_FILLRECT
-	select FB_SYS_COPYAREA
-	select FB_SYS_IMAGEBLIT
+	select DRM_MIPI_DSI
+	select DRM_PANEL
+	select TEGRA_HOST1X
 	help
 	  Choose this option if you have an NVIDIA Tegra SoC.
 
@@ -17,6 +15,18 @@
 
 if DRM_TEGRA
 
+config DRM_TEGRA_FBDEV
+	bool "Enable legacy fbdev support"
+	select DRM_KMS_FB_HELPER
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	default y
+	help
+	  Choose this option if you have a need for the legacy fbdev support.
+	  Note that this support also provides the Linux console on top of
+	  the Tegra modesetting driver.
+
 config DRM_TEGRA_DEBUG
 	bool "NVIDIA Tegra DRM debug support"
 	help
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index edc76ab..8d220af 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -9,6 +9,8 @@
 	output.o \
 	rgb.o \
 	hdmi.o \
+	mipi-phy.o \
+	dsi.o \
 	gr2d.o \
 	gr3d.o
 
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
index 565f8f7..e38e596 100644
--- a/drivers/gpu/drm/tegra/bus.c
+++ b/drivers/gpu/drm/tegra/bus.c
@@ -46,7 +46,6 @@
 	struct drm_device *drm;
 	int ret;
 
-	INIT_LIST_HEAD(&driver->device_list);
 	driver->bus = &drm_host1x_bus;
 
 	drm = drm_dev_alloc(driver, &device->dev);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index cd7f1e4..9336006 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -15,6 +15,10 @@
 #include "drm.h"
 #include "gem.h"
 
+struct tegra_dc_soc_info {
+	bool supports_interlacing;
+};
+
 struct tegra_plane {
 	struct drm_plane base;
 	unsigned int index;
@@ -658,19 +662,12 @@
 	/* program display mode */
 	tegra_dc_set_timings(dc, mode);
 
-	value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
-	tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
-
-	value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
-	value &= ~LVS_OUTPUT_POLARITY_LOW;
-	value &= ~LHS_OUTPUT_POLARITY_LOW;
-	tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
-
-	value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
-		DISP_ORDER_RED_BLUE;
-	tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
-
-	tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
+	/* interlacing isn't supported yet, so disable it */
+	if (dc->soc->supports_interlacing) {
+		value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
+		value &= ~INTERLACE_ENABLE;
+		tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL);
+	}
 
 	value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
 	tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
@@ -735,10 +732,6 @@
 		PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
 	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
 
-	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
-	value |= DISP_CTRL_MODE_C_DISPLAY;
-	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
 	/* initialize timer */
 	value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
 		WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
@@ -1107,8 +1100,6 @@
 	struct tegra_dc *dc = host1x_client_to_dc(client);
 	int err;
 
-	dc->pipe = tegra->drm->mode_config.num_crtc;
-
 	drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
 	drm_mode_crtc_set_gamma_size(&dc->base, 256);
 	drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
@@ -1167,8 +1158,71 @@
 	.exit = tegra_dc_exit,
 };
 
+static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
+	.supports_interlacing = false,
+};
+
+static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
+	.supports_interlacing = false,
+};
+
+static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
+	.supports_interlacing = true,
+};
+
+static const struct of_device_id tegra_dc_of_match[] = {
+	{
+		.compatible = "nvidia,tegra124-dc",
+		.data = &tegra124_dc_soc_info,
+	}, {
+		.compatible = "nvidia,tegra30-dc",
+		.data = &tegra30_dc_soc_info,
+	}, {
+		.compatible = "nvidia,tegra20-dc",
+		.data = &tegra20_dc_soc_info,
+	}, {
+		/* sentinel */
+	}
+};
+
+static int tegra_dc_parse_dt(struct tegra_dc *dc)
+{
+	struct device_node *np;
+	u32 value = 0;
+	int err;
+
+	err = of_property_read_u32(dc->dev->of_node, "nvidia,head", &value);
+	if (err < 0) {
+		dev_err(dc->dev, "missing \"nvidia,head\" property\n");
+
+		/*
+		 * If the nvidia,head property isn't present, try to find the
+		 * correct head number by looking up the position of this
+		 * display controller's node within the device tree. Assuming
+		 * that the nodes are ordered properly in the DTS file and
+		 * that the translation into a flattened device tree blob
+		 * preserves that ordering this will actually yield the right
+		 * head number.
+		 *
+		 * If those assumptions don't hold, this will still work for
+		 * cases where only a single display controller is used.
+		 */
+		for_each_matching_node(np, tegra_dc_of_match) {
+			if (np == dc->dev->of_node)
+				break;
+
+			value++;
+		}
+	}
+
+	dc->pipe = value;
+
+	return 0;
+}
+
 static int tegra_dc_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *id;
 	struct resource *regs;
 	struct tegra_dc *dc;
 	int err;
@@ -1177,9 +1231,18 @@
 	if (!dc)
 		return -ENOMEM;
 
+	id = of_match_node(tegra_dc_of_match, pdev->dev.of_node);
+	if (!id)
+		return -ENODEV;
+
 	spin_lock_init(&dc->lock);
 	INIT_LIST_HEAD(&dc->list);
 	dc->dev = &pdev->dev;
+	dc->soc = id->data;
+
+	err = tegra_dc_parse_dt(dc);
+	if (err < 0)
+		return err;
 
 	dc->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(dc->clk)) {
@@ -1253,12 +1316,6 @@
 	return 0;
 }
 
-static struct of_device_id tegra_dc_of_match[] = {
-	{ .compatible = "nvidia,tegra30-dc", },
-	{ .compatible = "nvidia,tegra20-dc", },
-	{ },
-};
-
 struct platform_driver tegra_dc_driver = {
 	.driver = {
 		.name = "tegra-dc",
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 91bbda2..3c2c0ea 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -28,6 +28,7 @@
 #define DISP_CTRL_MODE_STOP (0 << 5)
 #define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
 #define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DISP_CTRL_MODE_MASK (3 << 5)
 #define DC_CMD_SIGNAL_RAISE			0x033
 #define DC_CMD_DISPLAY_POWER_CONTROL		0x036
 #define PW0_ENABLE (1 <<  0)
@@ -116,6 +117,7 @@
 
 #define DC_DISP_DISP_WIN_OPTIONS		0x402
 #define HDMI_ENABLE (1 << 30)
+#define DSI_ENABLE  (1 << 29)
 
 #define DC_DISP_DISP_MEM_HIGH_PRIORITY		0x403
 #define CURSOR_THRESHOLD(x)   (((x) & 0x03) << 24)
@@ -238,6 +240,8 @@
 #define DITHER_CONTROL_ERRDIFF (3 << 8)
 
 #define DC_DISP_SHIFT_CLOCK_OPTIONS		0x431
+#define  SC1_H_QUALIFIER_NONE	(1 << 16)
+#define  SC0_H_QUALIFIER_NONE	(1 <<  0)
 
 #define DC_DISP_DATA_ENABLE_OPTIONS		0x432
 #define DE_SELECT_ACTIVE_BLANK  (0 << 0)
@@ -292,6 +296,11 @@
 #define DC_DISP_SD_HW_K_VALUES			0x4dd
 #define DC_DISP_SD_MAN_K_VALUES			0x4de
 
+#define DC_DISP_INTERLACE_CONTROL		0x4e5
+#define  INTERLACE_STATUS (1 << 2)
+#define  INTERLACE_START  (1 << 1)
+#define  INTERLACE_ENABLE (1 << 0)
+
 #define DC_WIN_CSC_YOF				0x611
 #define DC_WIN_CSC_KYRGB			0x612
 #define DC_WIN_CSC_KUR				0x613
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 07eba59..88a5290 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -104,9 +104,11 @@
 
 static void tegra_drm_lastclose(struct drm_device *drm)
 {
+#ifdef CONFIG_TEGRA_DRM_FBDEV
 	struct tegra_drm *tegra = drm->dev_private;
 
 	tegra_fbdev_restore_mode(tegra->fbdev);
+#endif
 }
 
 static struct host1x_bo *
@@ -578,7 +580,7 @@
 #endif
 
 static struct drm_driver tegra_drm_driver = {
-	.driver_features = DRIVER_MODESET | DRIVER_GEM,
+	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
 	.load = tegra_drm_load,
 	.unload = tegra_drm_unload,
 	.open = tegra_drm_open,
@@ -596,6 +598,12 @@
 
 	.gem_free_object = tegra_bo_free_object,
 	.gem_vm_ops = &tegra_bo_vm_ops,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = tegra_gem_prime_export,
+	.gem_prime_import = tegra_gem_prime_import,
+
 	.dumb_create = tegra_bo_dumb_create,
 	.dumb_map_offset = tegra_bo_dumb_map_offset,
 	.dumb_destroy = drm_gem_dumb_destroy,
@@ -653,8 +661,10 @@
 	{ .compatible = "nvidia,tegra30-hdmi", },
 	{ .compatible = "nvidia,tegra30-gr2d", },
 	{ .compatible = "nvidia,tegra30-gr3d", },
+	{ .compatible = "nvidia,tegra114-dsi", },
 	{ .compatible = "nvidia,tegra114-hdmi", },
 	{ .compatible = "nvidia,tegra114-gr3d", },
+	{ .compatible = "nvidia,tegra124-dc", },
 	{ /* sentinel */ }
 };
 
@@ -677,10 +687,14 @@
 	if (err < 0)
 		goto unregister_host1x;
 
-	err = platform_driver_register(&tegra_hdmi_driver);
+	err = platform_driver_register(&tegra_dsi_driver);
 	if (err < 0)
 		goto unregister_dc;
 
+	err = platform_driver_register(&tegra_hdmi_driver);
+	if (err < 0)
+		goto unregister_dsi;
+
 	err = platform_driver_register(&tegra_gr2d_driver);
 	if (err < 0)
 		goto unregister_hdmi;
@@ -695,6 +709,8 @@
 	platform_driver_unregister(&tegra_gr2d_driver);
 unregister_hdmi:
 	platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dsi:
+	platform_driver_unregister(&tegra_dsi_driver);
 unregister_dc:
 	platform_driver_unregister(&tegra_dc_driver);
 unregister_host1x:
@@ -708,6 +724,7 @@
 	platform_driver_unregister(&tegra_gr3d_driver);
 	platform_driver_unregister(&tegra_gr2d_driver);
 	platform_driver_unregister(&tegra_hdmi_driver);
+	platform_driver_unregister(&tegra_dsi_driver);
 	platform_driver_unregister(&tegra_dc_driver);
 	host1x_driver_unregister(&host1x_drm_driver);
 }
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 266aae0..bf1cac7 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -27,10 +27,12 @@
 	unsigned int num_planes;
 };
 
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 struct tegra_fbdev {
 	struct drm_fb_helper base;
 	struct tegra_fb *fb;
 };
+#endif
 
 struct tegra_drm {
 	struct drm_device *drm;
@@ -38,7 +40,9 @@
 	struct mutex clients_lock;
 	struct list_head clients;
 
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 	struct tegra_fbdev *fbdev;
+#endif
 };
 
 struct tegra_drm_client;
@@ -84,6 +88,7 @@
 extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
 extern int tegra_drm_exit(struct tegra_drm *tegra);
 
+struct tegra_dc_soc_info;
 struct tegra_output;
 
 struct tegra_dc {
@@ -109,6 +114,8 @@
 
 	/* page-flip handling */
 	struct drm_pending_vblank_event *event;
+
+	const struct tegra_dc_soc_info *soc;
 };
 
 static inline struct tegra_dc *
@@ -177,6 +184,7 @@
 enum tegra_output_type {
 	TEGRA_OUTPUT_RGB,
 	TEGRA_OUTPUT_HDMI,
+	TEGRA_OUTPUT_DSI,
 };
 
 struct tegra_output {
@@ -186,6 +194,7 @@
 	const struct tegra_output_ops *ops;
 	enum tegra_output_type type;
 
+	struct drm_panel *panel;
 	struct i2c_adapter *ddc;
 	const struct edid *edid;
 	unsigned int hpd_irq;
@@ -263,9 +272,12 @@
 bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
 extern int tegra_drm_fb_init(struct drm_device *drm);
 extern void tegra_drm_fb_exit(struct drm_device *drm);
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
+#endif
 
 extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_dsi_driver;
 extern struct platform_driver tegra_hdmi_driver;
 extern struct platform_driver tegra_gr2d_driver;
 extern struct platform_driver tegra_gr3d_driver;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
new file mode 100644
index 0000000..d452faab
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -0,0 +1,971 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#include "dc.h"
+#include "drm.h"
+#include "dsi.h"
+#include "mipi-phy.h"
+
+#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
+#define DSI_HOST_FIFO_DEPTH 64
+
+struct tegra_dsi {
+	struct host1x_client client;
+	struct tegra_output output;
+	struct device *dev;
+
+	void __iomem *regs;
+
+	struct reset_control *rst;
+	struct clk *clk_parent;
+	struct clk *clk_lp;
+	struct clk *clk;
+
+	struct drm_info_list *debugfs_files;
+	struct drm_minor *minor;
+	struct dentry *debugfs;
+
+	enum mipi_dsi_pixel_format format;
+	unsigned int lanes;
+
+	struct tegra_mipi_device *mipi;
+	struct mipi_dsi_host host;
+};
+
+static inline struct tegra_dsi *
+host1x_client_to_dsi(struct host1x_client *client)
+{
+	return container_of(client, struct tegra_dsi, client);
+}
+
+static inline struct tegra_dsi *host_to_tegra(struct mipi_dsi_host *host)
+{
+	return container_of(host, struct tegra_dsi, host);
+}
+
+static inline struct tegra_dsi *to_dsi(struct tegra_output *output)
+{
+	return container_of(output, struct tegra_dsi, output);
+}
+
+static inline unsigned long tegra_dsi_readl(struct tegra_dsi *dsi,
+					    unsigned long reg)
+{
+	return readl(dsi->regs + (reg << 2));
+}
+
+static inline void tegra_dsi_writel(struct tegra_dsi *dsi, unsigned long value,
+				    unsigned long reg)
+{
+	writel(value, dsi->regs + (reg << 2));
+}
+
+static int tegra_dsi_show_regs(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct tegra_dsi *dsi = node->info_ent->data;
+
+#define DUMP_REG(name)						\
+	seq_printf(s, "%-32s %#05x %08lx\n", #name, name,	\
+		   tegra_dsi_readl(dsi, name))
+
+	DUMP_REG(DSI_INCR_SYNCPT);
+	DUMP_REG(DSI_INCR_SYNCPT_CONTROL);
+	DUMP_REG(DSI_INCR_SYNCPT_ERROR);
+	DUMP_REG(DSI_CTXSW);
+	DUMP_REG(DSI_RD_DATA);
+	DUMP_REG(DSI_WR_DATA);
+	DUMP_REG(DSI_POWER_CONTROL);
+	DUMP_REG(DSI_INT_ENABLE);
+	DUMP_REG(DSI_INT_STATUS);
+	DUMP_REG(DSI_INT_MASK);
+	DUMP_REG(DSI_HOST_CONTROL);
+	DUMP_REG(DSI_CONTROL);
+	DUMP_REG(DSI_SOL_DELAY);
+	DUMP_REG(DSI_MAX_THRESHOLD);
+	DUMP_REG(DSI_TRIGGER);
+	DUMP_REG(DSI_TX_CRC);
+	DUMP_REG(DSI_STATUS);
+
+	DUMP_REG(DSI_INIT_SEQ_CONTROL);
+	DUMP_REG(DSI_INIT_SEQ_DATA_0);
+	DUMP_REG(DSI_INIT_SEQ_DATA_1);
+	DUMP_REG(DSI_INIT_SEQ_DATA_2);
+	DUMP_REG(DSI_INIT_SEQ_DATA_3);
+	DUMP_REG(DSI_INIT_SEQ_DATA_4);
+	DUMP_REG(DSI_INIT_SEQ_DATA_5);
+	DUMP_REG(DSI_INIT_SEQ_DATA_6);
+	DUMP_REG(DSI_INIT_SEQ_DATA_7);
+
+	DUMP_REG(DSI_PKT_SEQ_0_LO);
+	DUMP_REG(DSI_PKT_SEQ_0_HI);
+	DUMP_REG(DSI_PKT_SEQ_1_LO);
+	DUMP_REG(DSI_PKT_SEQ_1_HI);
+	DUMP_REG(DSI_PKT_SEQ_2_LO);
+	DUMP_REG(DSI_PKT_SEQ_2_HI);
+	DUMP_REG(DSI_PKT_SEQ_3_LO);
+	DUMP_REG(DSI_PKT_SEQ_3_HI);
+	DUMP_REG(DSI_PKT_SEQ_4_LO);
+	DUMP_REG(DSI_PKT_SEQ_4_HI);
+	DUMP_REG(DSI_PKT_SEQ_5_LO);
+	DUMP_REG(DSI_PKT_SEQ_5_HI);
+
+	DUMP_REG(DSI_DCS_CMDS);
+
+	DUMP_REG(DSI_PKT_LEN_0_1);
+	DUMP_REG(DSI_PKT_LEN_2_3);
+	DUMP_REG(DSI_PKT_LEN_4_5);
+	DUMP_REG(DSI_PKT_LEN_6_7);
+
+	DUMP_REG(DSI_PHY_TIMING_0);
+	DUMP_REG(DSI_PHY_TIMING_1);
+	DUMP_REG(DSI_PHY_TIMING_2);
+	DUMP_REG(DSI_BTA_TIMING);
+
+	DUMP_REG(DSI_TIMEOUT_0);
+	DUMP_REG(DSI_TIMEOUT_1);
+	DUMP_REG(DSI_TO_TALLY);
+
+	DUMP_REG(DSI_PAD_CONTROL_0);
+	DUMP_REG(DSI_PAD_CONTROL_CD);
+	DUMP_REG(DSI_PAD_CD_STATUS);
+	DUMP_REG(DSI_VIDEO_MODE_CONTROL);
+	DUMP_REG(DSI_PAD_CONTROL_1);
+	DUMP_REG(DSI_PAD_CONTROL_2);
+	DUMP_REG(DSI_PAD_CONTROL_3);
+	DUMP_REG(DSI_PAD_CONTROL_4);
+
+	DUMP_REG(DSI_GANGED_MODE_CONTROL);
+	DUMP_REG(DSI_GANGED_MODE_START);
+	DUMP_REG(DSI_GANGED_MODE_SIZE);
+
+	DUMP_REG(DSI_RAW_DATA_BYTE_COUNT);
+	DUMP_REG(DSI_ULTRA_LOW_POWER_CONTROL);
+
+	DUMP_REG(DSI_INIT_SEQ_DATA_8);
+	DUMP_REG(DSI_INIT_SEQ_DATA_9);
+	DUMP_REG(DSI_INIT_SEQ_DATA_10);
+	DUMP_REG(DSI_INIT_SEQ_DATA_11);
+	DUMP_REG(DSI_INIT_SEQ_DATA_12);
+	DUMP_REG(DSI_INIT_SEQ_DATA_13);
+	DUMP_REG(DSI_INIT_SEQ_DATA_14);
+	DUMP_REG(DSI_INIT_SEQ_DATA_15);
+
+#undef DUMP_REG
+
+	return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+	{ "regs", tegra_dsi_show_regs, 0, NULL },
+};
+
+static int tegra_dsi_debugfs_init(struct tegra_dsi *dsi,
+				  struct drm_minor *minor)
+{
+	const char *name = dev_name(dsi->dev);
+	unsigned int i;
+	int err;
+
+	dsi->debugfs = debugfs_create_dir(name, minor->debugfs_root);
+	if (!dsi->debugfs)
+		return -ENOMEM;
+
+	dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+				     GFP_KERNEL);
+	if (!dsi->debugfs_files) {
+		err = -ENOMEM;
+		goto remove;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+		dsi->debugfs_files[i].data = dsi;
+
+	err = drm_debugfs_create_files(dsi->debugfs_files,
+				       ARRAY_SIZE(debugfs_files),
+				       dsi->debugfs, minor);
+	if (err < 0)
+		goto free;
+
+	dsi->minor = minor;
+
+	return 0;
+
+free:
+	kfree(dsi->debugfs_files);
+	dsi->debugfs_files = NULL;
+remove:
+	debugfs_remove(dsi->debugfs);
+	dsi->debugfs = NULL;
+
+	return err;
+}
+
+static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi)
+{
+	drm_debugfs_remove_files(dsi->debugfs_files, ARRAY_SIZE(debugfs_files),
+				 dsi->minor);
+	dsi->minor = NULL;
+
+	kfree(dsi->debugfs_files);
+	dsi->debugfs_files = NULL;
+
+	debugfs_remove(dsi->debugfs);
+	dsi->debugfs = NULL;
+
+	return 0;
+}
+
+#define PKT_ID0(id)	((((id) & 0x3f) <<  3) | (1 <<  9))
+#define PKT_LEN0(len)	(((len) & 0x07) <<  0)
+#define PKT_ID1(id)	((((id) & 0x3f) << 13) | (1 << 19))
+#define PKT_LEN1(len)	(((len) & 0x07) << 10)
+#define PKT_ID2(id)	((((id) & 0x3f) << 23) | (1 << 29))
+#define PKT_LEN2(len)	(((len) & 0x07) << 20)
+
+#define PKT_LP		(1 << 30)
+#define NUM_PKT_SEQ	12
+
+/* non-burst mode with sync-end */
+static const u32 pkt_seq_vnb_syne[NUM_PKT_SEQ] = {
+	[ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
+	       PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+	       PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+	       PKT_LP,
+	[ 1] = 0,
+	[ 2] = PKT_ID0(MIPI_DSI_V_SYNC_END) | PKT_LEN0(0) |
+	       PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+	       PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+	       PKT_LP,
+	[ 3] = 0,
+	[ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+	       PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+	       PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+	       PKT_LP,
+	[ 5] = 0,
+	[ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+	       PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+	       PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
+	[ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
+	       PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
+	       PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
+	[ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+	       PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+	       PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+	       PKT_LP,
+	[ 9] = 0,
+	[10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+	       PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+	       PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
+	[11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
+	       PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
+	       PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
+};
+
+static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
+{
+	struct mipi_dphy_timing timing;
+	unsigned long value, period;
+	long rate;
+	int err;
+
+	rate = clk_get_rate(dsi->clk);
+	if (rate < 0)
+		return rate;
+
+	period = DIV_ROUND_CLOSEST(1000000000UL, rate * 2);
+
+	err = mipi_dphy_timing_get_default(&timing, period);
+	if (err < 0)
+		return err;
+
+	err = mipi_dphy_timing_validate(&timing, period);
+	if (err < 0) {
+		dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err);
+		return err;
+	}
+
+	/*
+	 * The D-PHY timing fields below are expressed in byte-clock cycles,
+	 * so multiply the period by 8.
+	 */
+	period *= 8;
+
+	value = DSI_TIMING_FIELD(timing.hsexit, period, 1) << 24 |
+		DSI_TIMING_FIELD(timing.hstrail, period, 0) << 16 |
+		DSI_TIMING_FIELD(timing.hszero, period, 3) << 8 |
+		DSI_TIMING_FIELD(timing.hsprepare, period, 1);
+	tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0);
+
+	value = DSI_TIMING_FIELD(timing.clktrail, period, 1) << 24 |
+		DSI_TIMING_FIELD(timing.clkpost, period, 1) << 16 |
+		DSI_TIMING_FIELD(timing.clkzero, period, 1) << 8 |
+		DSI_TIMING_FIELD(timing.lpx, period, 1);
+	tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1);
+
+	value = DSI_TIMING_FIELD(timing.clkprepare, period, 1) << 16 |
+		DSI_TIMING_FIELD(timing.clkpre, period, 1) << 8 |
+		DSI_TIMING_FIELD(0xff * period, period, 0) << 0;
+	tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2);
+
+	value = DSI_TIMING_FIELD(timing.taget, period, 1) << 16 |
+		DSI_TIMING_FIELD(timing.tasure, period, 1) << 8 |
+		DSI_TIMING_FIELD(timing.tago, period, 1);
+	tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
+
+	return 0;
+}
+
+static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format,
+				unsigned int *mulp, unsigned int *divp)
+{
+	switch (format) {
+	case MIPI_DSI_FMT_RGB666_PACKED:
+	case MIPI_DSI_FMT_RGB888:
+		*mulp = 3;
+		*divp = 1;
+		break;
+
+	case MIPI_DSI_FMT_RGB565:
+		*mulp = 2;
+		*divp = 1;
+		break;
+
+	case MIPI_DSI_FMT_RGB666:
+		*mulp = 9;
+		*divp = 4;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tegra_output_dsi_enable(struct tegra_output *output)
+{
+	struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+	struct drm_display_mode *mode = &dc->base.mode;
+	unsigned int hact, hsw, hbp, hfp, i, mul, div;
+	struct tegra_dsi *dsi = to_dsi(output);
+	/* FIXME: don't hardcode this */
+	const u32 *pkt_seq = pkt_seq_vnb_syne;
+	unsigned long value;
+	int err;
+
+	err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
+	if (err < 0)
+		return err;
+
+	err = clk_enable(dsi->clk);
+	if (err < 0)
+		return err;
+
+	reset_control_deassert(dsi->rst);
+
+	value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(dsi->format) |
+		DSI_CONTROL_LANES(dsi->lanes - 1) |
+		DSI_CONTROL_SOURCE(dc->pipe);
+	tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+	tegra_dsi_writel(dsi, DSI_VIDEO_FIFO_DEPTH, DSI_MAX_THRESHOLD);
+
+	value = DSI_HOST_CONTROL_HS | DSI_HOST_CONTROL_CS |
+		DSI_HOST_CONTROL_ECC;
+	tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+	value = tegra_dsi_readl(dsi, DSI_CONTROL);
+	value |= DSI_CONTROL_HS_CLK_CTRL;
+	value &= ~DSI_CONTROL_TX_TRIG(3);
+	value &= ~DSI_CONTROL_DCS_ENABLE;
+	value |= DSI_CONTROL_VIDEO_ENABLE;
+	value &= ~DSI_CONTROL_HOST_ENABLE;
+	tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+	err = tegra_dsi_set_phy_timing(dsi);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < NUM_PKT_SEQ; i++)
+		tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i);
+
+	/* horizontal active pixels */
+	hact = mode->hdisplay * mul / div;
+
+	/* horizontal sync width */
+	hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
+	hsw -= 10;
+
+	/* horizontal back porch */
+	hbp = (mode->htotal - mode->hsync_end) * mul / div;
+	hbp -= 14;
+
+	/* horizontal front porch */
+	hfp = (mode->hsync_start  - mode->hdisplay) * mul / div;
+	hfp -= 8;
+
+	tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
+	tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
+	tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
+	tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
+
+	/* set SOL delay */
+	tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+
+	/* enable display controller */
+	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+	value |= DSI_ENABLE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+	value &= ~DISP_CTRL_MODE_MASK;
+	value |= DISP_CTRL_MODE_C_DISPLAY;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+	value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+		 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+	/* enable DSI controller */
+	value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+	value |= DSI_POWER_CONTROL_ENABLE;
+	tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+	return 0;
+}
+
+static int tegra_output_dsi_disable(struct tegra_output *output)
+{
+	struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+	struct tegra_dsi *dsi = to_dsi(output);
+	unsigned long value;
+
+	/* disable DSI controller */
+	value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+	value &= DSI_POWER_CONTROL_ENABLE;
+	tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+	/*
+	 * The following accesses registers of the display controller, so make
+	 * sure it's only executed when the output is attached to one.
+	 */
+	if (dc) {
+		value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+		value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+			   PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+		tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+		value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+		value &= ~DISP_CTRL_MODE_MASK;
+		tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+		value &= ~DSI_ENABLE;
+		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+		tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+		tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+	}
+
+	clk_disable(dsi->clk);
+
+	return 0;
+}
+
+static int tegra_output_dsi_setup_clock(struct tegra_output *output,
+					struct clk *clk, unsigned long pclk)
+{
+	struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+	struct drm_display_mode *mode = &dc->base.mode;
+	unsigned int timeout, mul, div, vrefresh;
+	struct tegra_dsi *dsi = to_dsi(output);
+	unsigned long bclk, plld, value;
+	struct clk *base;
+	int err;
+
+	err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
+	if (err < 0)
+		return err;
+
+	vrefresh = drm_mode_vrefresh(mode);
+
+	pclk = mode->htotal * mode->vtotal * vrefresh;
+	bclk = (pclk * mul) / (div * dsi->lanes);
+	plld = DIV_ROUND_UP(bclk * 8, 1000000);
+	pclk = (plld * 1000000) / 2;
+
+	err = clk_set_parent(clk, dsi->clk_parent);
+	if (err < 0) {
+		dev_err(dsi->dev, "failed to set parent clock: %d\n", err);
+		return err;
+	}
+
+	base = clk_get_parent(dsi->clk_parent);
+
+	/*
+	 * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+	 * respectively, each of which divides the base pll_d by 2.
+	 */
+	err = clk_set_rate(base, pclk * 2);
+	if (err < 0) {
+		dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n",
+			pclk * 2);
+		return err;
+	}
+
+	/*
+	 * XXX: Move the below somewhere else so that we don't need to have
+	 * access to the vrefresh in this function?
+	 */
+
+	/* one frame high-speed transmission timeout */
+	timeout = (bclk / vrefresh) / 512;
+	value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
+	tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
+
+	/* 2 ms peripheral timeout for panel */
+	timeout = 2 * bclk / 512 * 1000;
+	value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
+	tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
+
+	value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
+	tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+
+	return 0;
+}
+
+static int tegra_output_dsi_check_mode(struct tegra_output *output,
+				       struct drm_display_mode *mode,
+				       enum drm_mode_status *status)
+{
+	/*
+	 * FIXME: For now, always assume that the mode is okay.
+	 */
+
+	*status = MODE_OK;
+
+	return 0;
+}
+
+static const struct tegra_output_ops dsi_ops = {
+	.enable = tegra_output_dsi_enable,
+	.disable = tegra_output_dsi_disable,
+	.setup_clock = tegra_output_dsi_setup_clock,
+	.check_mode = tegra_output_dsi_check_mode,
+};
+
+static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
+{
+	unsigned long value;
+
+	value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
+	tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
+
+	return 0;
+}
+
+static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
+{
+	unsigned long value;
+
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+	/* start calibration */
+	tegra_dsi_pad_enable(dsi);
+
+	value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
+		DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
+		DSI_PAD_OUT_CLK(0x0);
+	tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+
+	return tegra_mipi_calibrate(dsi->mipi);
+}
+
+static int tegra_dsi_init(struct host1x_client *client)
+{
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+	struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+	unsigned long value, i;
+	int err;
+
+	dsi->output.type = TEGRA_OUTPUT_DSI;
+	dsi->output.dev = client->dev;
+	dsi->output.ops = &dsi_ops;
+
+	err = tegra_output_init(tegra->drm, &dsi->output);
+	if (err < 0) {
+		dev_err(client->dev, "output setup failed: %d\n", err);
+		return err;
+	}
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+		err = tegra_dsi_debugfs_init(dsi, tegra->drm->primary);
+		if (err < 0)
+			dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
+	}
+
+	/*
+	 * enable high-speed mode, checksum generation, ECC generation and
+	 * disable raw mode
+	 */
+	value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
+	value |= DSI_HOST_CONTROL_ECC | DSI_HOST_CONTROL_CS |
+		 DSI_HOST_CONTROL_HS;
+	value &= ~DSI_HOST_CONTROL_RAW;
+	tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+	tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
+	tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
+
+	tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
+
+	for (i = 0; i < 8; i++) {
+		tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
+		tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
+	}
+
+	for (i = 0; i < 12; i++)
+		tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
+
+	tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
+
+	err = tegra_dsi_pad_calibrate(dsi);
+	if (err < 0) {
+		dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+		return err;
+	}
+
+	tegra_dsi_writel(dsi, DSI_POWER_CONTROL_ENABLE, DSI_POWER_CONTROL);
+	usleep_range(300, 1000);
+
+	return 0;
+}
+
+static int tegra_dsi_exit(struct host1x_client *client)
+{
+	struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+	int err;
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+		err = tegra_dsi_debugfs_exit(dsi);
+		if (err < 0)
+			dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err);
+	}
+
+	err = tegra_output_disable(&dsi->output);
+	if (err < 0) {
+		dev_err(client->dev, "output failed to disable: %d\n", err);
+		return err;
+	}
+
+	err = tegra_output_exit(&dsi->output);
+	if (err < 0) {
+		dev_err(client->dev, "output cleanup failed: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct host1x_client_ops dsi_client_ops = {
+	.init = tegra_dsi_init,
+	.exit = tegra_dsi_exit,
+};
+
+static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
+{
+	struct clk *parent;
+	int err;
+
+	parent = clk_get_parent(dsi->clk);
+	if (!parent)
+		return -EINVAL;
+
+	err = clk_set_parent(parent, dsi->clk_parent);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static void tegra_dsi_initialize(struct tegra_dsi *dsi)
+{
+	unsigned int i;
+
+	tegra_dsi_writel(dsi, 0, DSI_POWER_CONTROL);
+
+	tegra_dsi_writel(dsi, 0, DSI_INT_ENABLE);
+	tegra_dsi_writel(dsi, 0, DSI_INT_STATUS);
+	tegra_dsi_writel(dsi, 0, DSI_INT_MASK);
+
+	tegra_dsi_writel(dsi, 0, DSI_HOST_CONTROL);
+	tegra_dsi_writel(dsi, 0, DSI_CONTROL);
+
+	tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
+	tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
+
+	tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
+
+	for (i = 0; i < 8; i++) {
+		tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
+		tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
+	}
+
+	for (i = 0; i < 12; i++)
+		tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
+
+	tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
+
+	for (i = 0; i < 4; i++)
+		tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1 + i);
+
+	tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_0);
+	tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_1);
+	tegra_dsi_writel(dsi, 0x000000ff, DSI_PHY_TIMING_2);
+	tegra_dsi_writel(dsi, 0x00000000, DSI_BTA_TIMING);
+
+	tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_0);
+	tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_1);
+	tegra_dsi_writel(dsi, 0, DSI_TO_TALLY);
+
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_CD);
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CD_STATUS);
+	tegra_dsi_writel(dsi, 0, DSI_VIDEO_MODE_CONTROL);
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+	tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+	tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
+	tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
+	tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
+}
+
+static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
+				 struct mipi_dsi_device *device)
+{
+	struct tegra_dsi *dsi = host_to_tegra(host);
+	struct tegra_output *output = &dsi->output;
+
+	dsi->format = device->format;
+	dsi->lanes = device->lanes;
+
+	output->panel = of_drm_find_panel(device->dev.of_node);
+	if (output->panel) {
+		if (output->connector.dev)
+			drm_helper_hpd_irq_event(output->connector.dev);
+	}
+
+	return 0;
+}
+
+static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
+				 struct mipi_dsi_device *device)
+{
+	struct tegra_dsi *dsi = host_to_tegra(host);
+	struct tegra_output *output = &dsi->output;
+
+	if (output->panel && &device->dev == output->panel->dev) {
+		if (output->connector.dev)
+			drm_helper_hpd_irq_event(output->connector.dev);
+
+		output->panel = NULL;
+	}
+
+	return 0;
+}
+
+static const struct mipi_dsi_host_ops tegra_dsi_host_ops = {
+	.attach = tegra_dsi_host_attach,
+	.detach = tegra_dsi_host_detach,
+};
+
+static int tegra_dsi_probe(struct platform_device *pdev)
+{
+	struct tegra_dsi *dsi;
+	struct resource *regs;
+	int err;
+
+	dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+	if (!dsi)
+		return -ENOMEM;
+
+	dsi->output.dev = dsi->dev = &pdev->dev;
+
+	err = tegra_output_probe(&dsi->output);
+	if (err < 0)
+		return err;
+
+	/*
+	 * Assume these values by default. When a DSI peripheral driver
+	 * attaches to the DSI host, the parameters will be taken from
+	 * the attached device.
+	 */
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->lanes = 4;
+
+	dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+	if (IS_ERR(dsi->rst))
+		return PTR_ERR(dsi->rst);
+
+	dsi->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(dsi->clk)) {
+		dev_err(&pdev->dev, "cannot get DSI clock\n");
+		return PTR_ERR(dsi->clk);
+	}
+
+	err = clk_prepare_enable(dsi->clk);
+	if (err < 0) {
+		dev_err(&pdev->dev, "cannot enable DSI clock\n");
+		return err;
+	}
+
+	dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
+	if (IS_ERR(dsi->clk_lp)) {
+		dev_err(&pdev->dev, "cannot get low-power clock\n");
+		return PTR_ERR(dsi->clk_lp);
+	}
+
+	err = clk_prepare_enable(dsi->clk_lp);
+	if (err < 0) {
+		dev_err(&pdev->dev, "cannot enable low-power clock\n");
+		return err;
+	}
+
+	dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+	if (IS_ERR(dsi->clk_parent)) {
+		dev_err(&pdev->dev, "cannot get parent clock\n");
+		return PTR_ERR(dsi->clk_parent);
+	}
+
+	err = clk_prepare_enable(dsi->clk_parent);
+	if (err < 0) {
+		dev_err(&pdev->dev, "cannot enable parent clock\n");
+		return err;
+	}
+
+	err = tegra_dsi_setup_clocks(dsi);
+	if (err < 0) {
+		dev_err(&pdev->dev, "cannot setup clocks\n");
+		return err;
+	}
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+	if (IS_ERR(dsi->regs))
+		return PTR_ERR(dsi->regs);
+
+	tegra_dsi_initialize(dsi);
+
+	dsi->mipi = tegra_mipi_request(&pdev->dev);
+	if (IS_ERR(dsi->mipi))
+		return PTR_ERR(dsi->mipi);
+
+	dsi->host.ops = &tegra_dsi_host_ops;
+	dsi->host.dev = &pdev->dev;
+
+	err = mipi_dsi_host_register(&dsi->host);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register DSI host: %d\n", err);
+		return err;
+	}
+
+	INIT_LIST_HEAD(&dsi->client.list);
+	dsi->client.ops = &dsi_client_ops;
+	dsi->client.dev = &pdev->dev;
+
+	err = host1x_client_register(&dsi->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, dsi);
+
+	return 0;
+}
+
+static int tegra_dsi_remove(struct platform_device *pdev)
+{
+	struct tegra_dsi *dsi = platform_get_drvdata(pdev);
+	int err;
+
+	err = host1x_client_unregister(&dsi->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	mipi_dsi_host_unregister(&dsi->host);
+	tegra_mipi_free(dsi->mipi);
+
+	clk_disable_unprepare(dsi->clk_parent);
+	clk_disable_unprepare(dsi->clk_lp);
+	clk_disable_unprepare(dsi->clk);
+
+	err = tegra_output_remove(&dsi->output);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to remove output: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id tegra_dsi_of_match[] = {
+	{ .compatible = "nvidia,tegra114-dsi", },
+	{ },
+};
+
+struct platform_driver tegra_dsi_driver = {
+	.driver = {
+		.name = "tegra-dsi",
+		.of_match_table = tegra_dsi_of_match,
+	},
+	.probe = tegra_dsi_probe,
+	.remove = tegra_dsi_remove,
+};
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
new file mode 100644
index 0000000..00e79c1
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef DRM_TEGRA_DSI_H
+#define DRM_TEGRA_DSI_H
+
+#define DSI_INCR_SYNCPT			0x00
+#define DSI_INCR_SYNCPT_CONTROL		0x01
+#define DSI_INCR_SYNCPT_ERROR		0x02
+#define DSI_CTXSW			0x08
+#define DSI_RD_DATA			0x09
+#define DSI_WR_DATA			0x0a
+#define DSI_POWER_CONTROL		0x0b
+#define DSI_POWER_CONTROL_ENABLE	(1 << 0)
+#define DSI_INT_ENABLE			0x0c
+#define DSI_INT_STATUS			0x0d
+#define DSI_INT_MASK			0x0e
+#define DSI_HOST_CONTROL		0x0f
+#define DSI_HOST_CONTROL_RAW		(1 << 6)
+#define DSI_HOST_CONTROL_HS		(1 << 5)
+#define DSI_HOST_CONTROL_BTA		(1 << 2)
+#define DSI_HOST_CONTROL_CS		(1 << 1)
+#define DSI_HOST_CONTROL_ECC		(1 << 0)
+#define DSI_CONTROL			0x10
+#define DSI_CONTROL_HS_CLK_CTRL		(1 << 20)
+#define DSI_CONTROL_CHANNEL(c)		(((c) & 0x3) << 16)
+#define DSI_CONTROL_FORMAT(f)		(((f) & 0x3) << 12)
+#define DSI_CONTROL_TX_TRIG(x)		(((x) & 0x3) <<  8)
+#define DSI_CONTROL_LANES(n)		(((n) & 0x3) <<  4)
+#define DSI_CONTROL_DCS_ENABLE		(1 << 3)
+#define DSI_CONTROL_SOURCE(s)		(((s) & 0x1) <<  2)
+#define DSI_CONTROL_VIDEO_ENABLE	(1 << 1)
+#define DSI_CONTROL_HOST_ENABLE		(1 << 0)
+#define DSI_SOL_DELAY			0x11
+#define DSI_MAX_THRESHOLD		0x12
+#define DSI_TRIGGER			0x13
+#define DSI_TX_CRC			0x14
+#define DSI_STATUS			0x15
+#define DSI_STATUS_IDLE			(1 << 10)
+#define DSI_INIT_SEQ_CONTROL		0x1a
+#define DSI_INIT_SEQ_DATA_0		0x1b
+#define DSI_INIT_SEQ_DATA_1		0x1c
+#define DSI_INIT_SEQ_DATA_2		0x1d
+#define DSI_INIT_SEQ_DATA_3		0x1e
+#define DSI_INIT_SEQ_DATA_4		0x1f
+#define DSI_INIT_SEQ_DATA_5		0x20
+#define DSI_INIT_SEQ_DATA_6		0x21
+#define DSI_INIT_SEQ_DATA_7		0x22
+#define DSI_PKT_SEQ_0_LO		0x23
+#define DSI_PKT_SEQ_0_HI		0x24
+#define DSI_PKT_SEQ_1_LO		0x25
+#define DSI_PKT_SEQ_1_HI		0x26
+#define DSI_PKT_SEQ_2_LO		0x27
+#define DSI_PKT_SEQ_2_HI		0x28
+#define DSI_PKT_SEQ_3_LO		0x29
+#define DSI_PKT_SEQ_3_HI		0x2a
+#define DSI_PKT_SEQ_4_LO		0x2b
+#define DSI_PKT_SEQ_4_HI		0x2c
+#define DSI_PKT_SEQ_5_LO		0x2d
+#define DSI_PKT_SEQ_5_HI		0x2e
+#define DSI_DCS_CMDS			0x33
+#define DSI_PKT_LEN_0_1			0x34
+#define DSI_PKT_LEN_2_3			0x35
+#define DSI_PKT_LEN_4_5			0x36
+#define DSI_PKT_LEN_6_7			0x37
+#define DSI_PHY_TIMING_0		0x3c
+#define DSI_PHY_TIMING_1		0x3d
+#define DSI_PHY_TIMING_2		0x3e
+#define DSI_BTA_TIMING			0x3f
+
+#define DSI_TIMING_FIELD(value, period, hwinc) \
+	((DIV_ROUND_CLOSEST(value, period) - (hwinc)) & 0xff)
+
+#define DSI_TIMEOUT_0			0x44
+#define DSI_TIMEOUT_LRX(x)		(((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_HTX(x)		(((x) & 0xffff) <<  0)
+#define DSI_TIMEOUT_1			0x45
+#define DSI_TIMEOUT_PR(x)		(((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_TA(x)		(((x) & 0xffff) <<  0)
+#define DSI_TO_TALLY			0x46
+#define DSI_TALLY_TA(x)			(((x) & 0xff) << 16)
+#define DSI_TALLY_LRX(x)		(((x) & 0xff) <<  8)
+#define DSI_TALLY_HTX(x)		(((x) & 0xff) <<  0)
+#define DSI_PAD_CONTROL_0		0x4b
+#define DSI_PAD_CONTROL_VS1_PDIO(x)	(((x) & 0xf) <<  0)
+#define DSI_PAD_CONTROL_VS1_PDIO_CLK	(1 <<  8)
+#define DSI_PAD_CONTROL_VS1_PULLDN(x)	(((x) & 0xf) << 16)
+#define DSI_PAD_CONTROL_VS1_PULLDN_CLK	(1 << 24)
+#define DSI_PAD_CONTROL_CD		0x4c
+#define DSI_PAD_CD_STATUS		0x4d
+#define DSI_VIDEO_MODE_CONTROL		0x4e
+#define DSI_PAD_CONTROL_1		0x4f
+#define DSI_PAD_CONTROL_2		0x50
+#define DSI_PAD_OUT_CLK(x)		(((x) & 0x7) <<  0)
+#define DSI_PAD_LP_DN(x)		(((x) & 0x7) <<  4)
+#define DSI_PAD_LP_UP(x)		(((x) & 0x7) <<  8)
+#define DSI_PAD_SLEW_DN(x)		(((x) & 0x7) << 12)
+#define DSI_PAD_SLEW_UP(x)		(((x) & 0x7) << 16)
+#define DSI_PAD_CONTROL_3		0x51
+#define DSI_PAD_CONTROL_4		0x52
+#define DSI_GANGED_MODE_CONTROL		0x53
+#define DSI_GANGED_MODE_START		0x54
+#define DSI_GANGED_MODE_SIZE		0x55
+#define DSI_RAW_DATA_BYTE_COUNT		0x56
+#define DSI_ULTRA_LOW_POWER_CONTROL	0x57
+#define DSI_INIT_SEQ_DATA_8		0x58
+#define DSI_INIT_SEQ_DATA_9		0x59
+#define DSI_INIT_SEQ_DATA_10		0x5a
+#define DSI_INIT_SEQ_DATA_11		0x5b
+#define DSI_INIT_SEQ_DATA_12		0x5c
+#define DSI_INIT_SEQ_DATA_13		0x5d
+#define DSI_INIT_SEQ_DATA_14		0x5e
+#define DSI_INIT_SEQ_DATA_15		0x5f
+
+#endif
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index a3835e7..f7fca09 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -18,10 +18,12 @@
 	return container_of(fb, struct tegra_fb, base);
 }
 
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
 {
 	return container_of(helper, struct tegra_fbdev, base);
 }
+#endif
 
 struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
 				    unsigned int index)
@@ -98,8 +100,10 @@
 		return ERR_PTR(-ENOMEM);
 
 	fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
-	if (!fb->planes)
+	if (!fb->planes) {
+		kfree(fb);
 		return ERR_PTR(-ENOMEM);
+	}
 
 	fb->num_planes = num_planes;
 
@@ -172,6 +176,7 @@
 	return ERR_PTR(err);
 }
 
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 static struct fb_ops tegra_fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_fillrect = sys_fillrect,
@@ -339,6 +344,15 @@
 	kfree(fbdev);
 }
 
+void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
+{
+	if (fbdev) {
+		drm_modeset_lock_all(fbdev->base.dev);
+		drm_fb_helper_restore_fbdev_mode(&fbdev->base);
+		drm_modeset_unlock_all(fbdev->base.dev);
+	}
+}
+
 static void tegra_fb_output_poll_changed(struct drm_device *drm)
 {
 	struct tegra_drm *tegra = drm->dev_private;
@@ -346,16 +360,20 @@
 	if (tegra->fbdev)
 		drm_fb_helper_hotplug_event(&tegra->fbdev->base);
 }
+#endif
 
 static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
 	.fb_create = tegra_fb_create,
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 	.output_poll_changed = tegra_fb_output_poll_changed,
+#endif
 };
 
 int tegra_drm_fb_init(struct drm_device *drm)
 {
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 	struct tegra_drm *tegra = drm->dev_private;
-	struct tegra_fbdev *fbdev;
+#endif
 
 	drm->mode_config.min_width = 0;
 	drm->mode_config.min_height = 0;
@@ -365,28 +383,21 @@
 
 	drm->mode_config.funcs = &tegra_drm_mode_funcs;
 
-	fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
-				   drm->mode_config.num_connector);
-	if (IS_ERR(fbdev))
-		return PTR_ERR(fbdev);
-
-	tegra->fbdev = fbdev;
+#ifdef CONFIG_DRM_TEGRA_FBDEV
+	tegra->fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
+					  drm->mode_config.num_connector);
+	if (IS_ERR(tegra->fbdev))
+		return PTR_ERR(tegra->fbdev);
+#endif
 
 	return 0;
 }
 
 void tegra_drm_fb_exit(struct drm_device *drm)
 {
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 	struct tegra_drm *tegra = drm->dev_private;
 
 	tegra_fbdev_free(tegra->fbdev);
-}
-
-void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
-{
-	if (fbdev) {
-		drm_modeset_lock_all(fbdev->base.dev);
-		drm_fb_helper_restore_fbdev_mode(&fbdev->base);
-		drm_modeset_unlock_all(fbdev->base.dev);
-	}
+#endif
 }
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 28a9cbc..ef853e5 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -18,6 +18,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/dma-buf.h>
 #include <drm/tegra_drm.h>
 
 #include "gem.h"
@@ -83,7 +84,7 @@
 	return bo;
 }
 
-const struct host1x_bo_ops tegra_bo_ops = {
+static const struct host1x_bo_ops tegra_bo_ops = {
 	.get = tegra_bo_get,
 	.put = tegra_bo_put,
 	.pin = tegra_bo_pin,
@@ -145,7 +146,6 @@
 	kfree(bo);
 
 	return ERR_PTR(err);
-
 }
 
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
@@ -174,13 +174,87 @@
 	return ERR_PTR(ret);
 }
 
+struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf)
+{
+	struct dma_buf_attachment *attach;
+	struct tegra_bo *bo;
+	ssize_t size;
+	int err;
+
+	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+	if (!bo)
+		return ERR_PTR(-ENOMEM);
+
+	host1x_bo_init(&bo->base, &tegra_bo_ops);
+	size = round_up(buf->size, PAGE_SIZE);
+
+	err = drm_gem_object_init(drm, &bo->gem, size);
+	if (err < 0)
+		goto free;
+
+	err = drm_gem_create_mmap_offset(&bo->gem);
+	if (err < 0)
+		goto release;
+
+	attach = dma_buf_attach(buf, drm->dev);
+	if (IS_ERR(attach)) {
+		err = PTR_ERR(attach);
+		goto free_mmap;
+	}
+
+	get_dma_buf(buf);
+
+	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+	if (!bo->sgt) {
+		err = -ENOMEM;
+		goto detach;
+	}
+
+	if (IS_ERR(bo->sgt)) {
+		err = PTR_ERR(bo->sgt);
+		goto detach;
+	}
+
+	if (bo->sgt->nents > 1) {
+		err = -EINVAL;
+		goto detach;
+	}
+
+	bo->paddr = sg_dma_address(bo->sgt->sgl);
+	bo->gem.import_attach = attach;
+
+	return bo;
+
+detach:
+	if (!IS_ERR_OR_NULL(bo->sgt))
+		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
+
+	dma_buf_detach(buf, attach);
+	dma_buf_put(buf);
+free_mmap:
+	drm_gem_free_mmap_offset(&bo->gem);
+release:
+	drm_gem_object_release(&bo->gem);
+free:
+	kfree(bo);
+
+	return ERR_PTR(err);
+}
+
 void tegra_bo_free_object(struct drm_gem_object *gem)
 {
 	struct tegra_bo *bo = to_tegra_bo(gem);
 
+	if (gem->import_attach) {
+		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
+					 DMA_TO_DEVICE);
+		drm_prime_gem_destroy(gem, NULL);
+	} else {
+		tegra_bo_destroy(gem->dev, bo);
+	}
+
 	drm_gem_free_mmap_offset(gem);
 	drm_gem_object_release(gem);
-	tegra_bo_destroy(gem->dev, bo);
 
 	kfree(bo);
 }
@@ -256,3 +330,106 @@
 
 	return ret;
 }
+
+static struct sg_table *
+tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+			    enum dma_data_direction dir)
+{
+	struct drm_gem_object *gem = attach->dmabuf->priv;
+	struct tegra_bo *bo = to_tegra_bo(gem);
+	struct sg_table *sgt;
+
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt)
+		return NULL;
+
+	if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
+		kfree(sgt);
+		return NULL;
+	}
+
+	sg_dma_address(sgt->sgl) = bo->paddr;
+	sg_dma_len(sgt->sgl) = gem->size;
+
+	return sgt;
+}
+
+static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+					  struct sg_table *sgt,
+					  enum dma_data_direction dir)
+{
+	sg_free_table(sgt);
+	kfree(sgt);
+}
+
+static void tegra_gem_prime_release(struct dma_buf *buf)
+{
+	drm_gem_dmabuf_release(buf);
+}
+
+static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
+					 unsigned long page)
+{
+	return NULL;
+}
+
+static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
+					  unsigned long page,
+					  void *addr)
+{
+}
+
+static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
+{
+	return NULL;
+}
+
+static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
+				   void *addr)
+{
+}
+
+static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
+static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
+	.map_dma_buf = tegra_gem_prime_map_dma_buf,
+	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
+	.release = tegra_gem_prime_release,
+	.kmap_atomic = tegra_gem_prime_kmap_atomic,
+	.kunmap_atomic = tegra_gem_prime_kunmap_atomic,
+	.kmap = tegra_gem_prime_kmap,
+	.kunmap = tegra_gem_prime_kunmap,
+	.mmap = tegra_gem_prime_mmap,
+};
+
+struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
+				       struct drm_gem_object *gem,
+				       int flags)
+{
+	return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
+			      flags);
+}
+
+struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
+					      struct dma_buf *buf)
+{
+	struct tegra_bo *bo;
+
+	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
+		struct drm_gem_object *gem = buf->priv;
+
+		if (gem->dev == drm) {
+			drm_gem_object_reference(gem);
+			return gem;
+		}
+	}
+
+	bo = tegra_bo_import(drm, buf);
+	if (IS_ERR(bo))
+		return ERR_CAST(bo);
+
+	return &bo->gem;
+}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 7674000..ffd4f792 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -31,6 +31,7 @@
 	struct drm_gem_object gem;
 	struct host1x_bo base;
 	unsigned long flags;
+	struct sg_table *sgt;
 	dma_addr_t paddr;
 	void *vaddr;
 };
@@ -40,8 +41,6 @@
 	return container_of(gem, struct tegra_bo, gem);
 }
 
-extern const struct host1x_bo_ops tegra_bo_ops;
-
 struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
 				 unsigned long flags);
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
@@ -59,4 +58,10 @@
 
 extern const struct vm_operations_struct tegra_bo_vm_ops;
 
+struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
+				       struct drm_gem_object *gem,
+				       int flags);
+struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
+					      struct dma_buf *buf);
+
 #endif
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 7f6253e..6928015 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -40,6 +40,7 @@
 	struct host1x_client client;
 	struct tegra_output output;
 	struct device *dev;
+	bool enabled;
 
 	struct regulator *vdd;
 	struct regulator *pll;
@@ -379,7 +380,7 @@
 
 		if (f > 96000)
 			delta = 2;
-		else if (f > 480000)
+		else if (f > 48000)
 			delta = 6;
 		else
 			delta = 9;
@@ -699,6 +700,9 @@
 	int retries = 1000;
 	int err;
 
+	if (hdmi->enabled)
+		return 0;
+
 	hdmi->dvi = !tegra_output_is_hdmi(output);
 
 	pclk = mode->clock * 1000;
@@ -839,10 +843,6 @@
 	value |= SOR_CSTM_ROTCLK(2);
 	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
 
-	tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
-	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
-	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
 	/* start SOR */
 	tegra_hdmi_writel(hdmi,
 			  SOR_PWR_NORMAL_STATE_PU |
@@ -892,31 +892,67 @@
 			  HDMI_NV_PDISP_SOR_STATE1);
 	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
 
-	tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+	value |= HDMI_ENABLE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
 
-	value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
-		PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
-	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-
-	value = DISP_CTRL_MODE_C_DISPLAY;
+	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+	value &= ~DISP_CTRL_MODE_MASK;
+	value |= DISP_CTRL_MODE_C_DISPLAY;
 	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
 
+	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+	value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+		 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
 	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
 	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
 
 	/* TODO: add HDCP support */
 
+	hdmi->enabled = true;
+
 	return 0;
 }
 
 static int tegra_output_hdmi_disable(struct tegra_output *output)
 {
+	struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
 	struct tegra_hdmi *hdmi = to_hdmi(output);
+	unsigned long value;
+
+	if (!hdmi->enabled)
+		return 0;
+
+	/*
+	 * The following accesses registers of the display controller, so make
+	 * sure it's only executed when the output is attached to one.
+	 */
+	if (dc) {
+		value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+		value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+			   PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+		tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+		value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+		value &= ~DISP_CTRL_MODE_MASK;
+		tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+		value &= ~HDMI_ENABLE;
+		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+		tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+		tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+	}
 
 	reset_control_assert(hdmi->rst);
 	clk_disable(hdmi->clk);
 	regulator_disable(hdmi->pll);
 
+	hdmi->enabled = false;
+
 	return 0;
 }
 
@@ -960,7 +996,7 @@
 	parent = clk_get_parent(hdmi->clk_parent);
 
 	err = clk_round_rate(parent, pclk * 4);
-	if (err < 0)
+	if (err <= 0)
 		*status = MODE_NOCLOCK;
 	else
 		*status = MODE_OK;
@@ -1382,9 +1418,6 @@
 		return err;
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!regs)
-		return -ENXIO;
-
 	hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
 	if (IS_ERR(hdmi->regs))
 		return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/tegra/mipi-phy.c b/drivers/gpu/drm/tegra/mipi-phy.c
new file mode 100644
index 0000000..e2c4aed
--- /dev/null
+++ b/drivers/gpu/drm/tegra/mipi-phy.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#include "mipi-phy.h"
+
+/*
+ * Default D-PHY timings based on MIPI D-PHY specification. Derived from
+ * the valid ranges specified in Section 5.9 of the D-PHY specification
+ * with minor adjustments.
+ */
+int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+				 unsigned long period)
+{
+	timing->clkmiss = 0;
+	timing->clkpost = 70 + 52 * period;
+	timing->clkpre = 8;
+	timing->clkprepare = 65;
+	timing->clksettle = 95;
+	timing->clktermen = 0;
+	timing->clktrail = 80;
+	timing->clkzero = 260;
+	timing->dtermen = 0;
+	timing->eot = 0;
+	timing->hsexit = 120;
+	timing->hsprepare = 65 + 5 * period;
+	timing->hszero = 145 + 5 * period;
+	timing->hssettle = 85 + 6 * period;
+	timing->hsskip = 40;
+	timing->hstrail = max(8 * period, 60 + 4 * period);
+	timing->init = 100000;
+	timing->lpx = 60;
+	timing->taget = 5 * timing->lpx;
+	timing->tago = 4 * timing->lpx;
+	timing->tasure = 2 * timing->lpx;
+	timing->wakeup = 1000000;
+
+	return 0;
+}
+
+/*
+ * Validate D-PHY timing according to MIPI Alliance Specification for D-PHY,
+ * Section 5.9 "Global Operation Timing Parameters".
+ */
+int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
+			      unsigned long period)
+{
+	if (timing->clkmiss > 60)
+		return -EINVAL;
+
+	if (timing->clkpost < (60 + 52 * period))
+		return -EINVAL;
+
+	if (timing->clkpre < 8)
+		return -EINVAL;
+
+	if (timing->clkprepare < 38 || timing->clkprepare > 95)
+		return -EINVAL;
+
+	if (timing->clksettle < 95 || timing->clksettle > 300)
+		return -EINVAL;
+
+	if (timing->clktermen > 38)
+		return -EINVAL;
+
+	if (timing->clktrail < 60)
+		return -EINVAL;
+
+	if (timing->clkprepare + timing->clkzero < 300)
+		return -EINVAL;
+
+	if (timing->dtermen > 35 + 4 * period)
+		return -EINVAL;
+
+	if (timing->eot > 105 + 12 * period)
+		return -EINVAL;
+
+	if (timing->hsexit < 100)
+		return -EINVAL;
+
+	if (timing->hsprepare < 40 + 4 * period ||
+	    timing->hsprepare > 85 + 6 * period)
+		return -EINVAL;
+
+	if (timing->hsprepare + timing->hszero < 145 + 10 * period)
+		return -EINVAL;
+
+	if ((timing->hssettle < 85 + 6 * period) ||
+	    (timing->hssettle > 145 + 10 * period))
+		return -EINVAL;
+
+	if (timing->hsskip < 40 || timing->hsskip > 55 + 4 * period)
+		return -EINVAL;
+
+	if (timing->hstrail < max(8 * period, 60 + 4 * period))
+		return -EINVAL;
+
+	if (timing->init < 100000)
+		return -EINVAL;
+
+	if (timing->lpx < 50)
+		return -EINVAL;
+
+	if (timing->taget != 5 * timing->lpx)
+		return -EINVAL;
+
+	if (timing->tago != 4 * timing->lpx)
+		return -EINVAL;
+
+	if (timing->tasure < timing->lpx || timing->tasure > 2 * timing->lpx)
+		return -EINVAL;
+
+	if (timing->wakeup < 1000000)
+		return -EINVAL;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/tegra/mipi-phy.h b/drivers/gpu/drm/tegra/mipi-phy.h
new file mode 100644
index 0000000..d359169
--- /dev/null
+++ b/drivers/gpu/drm/tegra/mipi-phy.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef DRM_TEGRA_MIPI_PHY_H
+#define DRM_TEGRA_MIPI_PHY_H
+
+/*
+ * D-PHY timing parameters
+ *
+ * A detailed description of these parameters can be found in the  MIPI
+ * Alliance Specification for D-PHY, Section 5.9 "Global Operation Timing
+ * Parameters".
+ *
+ * All parameters are specified in nanoseconds.
+ */
+struct mipi_dphy_timing {
+	unsigned int clkmiss;
+	unsigned int clkpost;
+	unsigned int clkpre;
+	unsigned int clkprepare;
+	unsigned int clksettle;
+	unsigned int clktermen;
+	unsigned int clktrail;
+	unsigned int clkzero;
+	unsigned int dtermen;
+	unsigned int eot;
+	unsigned int hsexit;
+	unsigned int hsprepare;
+	unsigned int hszero;
+	unsigned int hssettle;
+	unsigned int hsskip;
+	unsigned int hstrail;
+	unsigned int init;
+	unsigned int lpx;
+	unsigned int taget;
+	unsigned int tago;
+	unsigned int tasure;
+	unsigned int wakeup;
+};
+
+int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+				 unsigned long period);
+int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
+			      unsigned long period);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 2cb0065..57cecbd 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -9,6 +9,7 @@
 
 #include <linux/of_gpio.h>
 
+#include <drm/drm_panel.h>
 #include "drm.h"
 
 static int tegra_connector_get_modes(struct drm_connector *connector)
@@ -17,6 +18,16 @@
 	struct edid *edid = NULL;
 	int err = 0;
 
+	/*
+	 * If the panel provides one or more modes, use them exclusively and
+	 * ignore any other means of obtaining a mode.
+	 */
+	if (output->panel) {
+		err = output->panel->funcs->get_modes(output->panel);
+		if (err > 0)
+			return err;
+	}
+
 	if (output->edid)
 		edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
 	else if (output->ddc)
@@ -72,6 +83,11 @@
 		else
 			status = connector_status_connected;
 	} else {
+		if (!output->panel)
+			status = connector_status_disconnected;
+		else
+			status = connector_status_connected;
+
 		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
 			status = connector_status_connected;
 	}
@@ -115,6 +131,16 @@
 
 static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
 {
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct drm_panel *panel = output->panel;
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		drm_panel_disable(panel);
+		tegra_output_disable(output);
+	} else {
+		tegra_output_enable(output);
+		drm_panel_enable(panel);
+	}
 }
 
 static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
@@ -163,14 +189,22 @@
 
 int tegra_output_probe(struct tegra_output *output)
 {
+	struct device_node *ddc, *panel;
 	enum of_gpio_flags flags;
-	struct device_node *ddc;
-	size_t size;
-	int err;
+	int err, size;
 
 	if (!output->of_node)
 		output->of_node = output->dev->of_node;
 
+	panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
+	if (panel) {
+		output->panel = of_drm_find_panel(panel);
+		if (!output->panel)
+			return -EPROBE_DEFER;
+
+		of_node_put(panel);
+	}
+
 	output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
 
 	ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
@@ -185,9 +219,6 @@
 		of_node_put(ddc);
 	}
 
-	if (!output->edid && !output->ddc)
-		return -ENODEV;
-
 	output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
 						   "nvidia,hpd-gpio", 0,
 						   &flags);
@@ -256,6 +287,11 @@
 		encoder = DRM_MODE_ENCODER_TMDS;
 		break;
 
+	case TEGRA_OUTPUT_DSI:
+		connector = DRM_MODE_CONNECTOR_DSI;
+		encoder = DRM_MODE_ENCODER_DSI;
+		break;
+
 	default:
 		connector = DRM_MODE_CONNECTOR_Unknown;
 		encoder = DRM_MODE_ENCODER_NONE;
@@ -267,6 +303,9 @@
 	drm_connector_helper_add(&output->connector, &connector_helper_funcs);
 	output->connector.dpms = DRM_MODE_DPMS_OFF;
 
+	if (output->panel)
+		drm_panel_attach(output->panel, &output->connector);
+
 	drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
 	drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
 
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 3b29018..338f7f6 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -87,15 +87,60 @@
 static int tegra_output_rgb_enable(struct tegra_output *output)
 {
 	struct tegra_rgb *rgb = to_rgb(output);
+	unsigned long value;
 
 	tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
 
+	value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
+	tegra_dc_writel(rgb->dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
+
+	/* XXX: parameterize? */
+	value = tegra_dc_readl(rgb->dc, DC_COM_PIN_OUTPUT_POLARITY(1));
+	value &= ~LVS_OUTPUT_POLARITY_LOW;
+	value &= ~LHS_OUTPUT_POLARITY_LOW;
+	tegra_dc_writel(rgb->dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
+
+	/* XXX: parameterize? */
+	value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
+		DISP_ORDER_RED_BLUE;
+	tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
+
+	/* XXX: parameterize? */
+	value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE;
+	tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
+
+	value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND);
+	value &= ~DISP_CTRL_MODE_MASK;
+	value |= DISP_CTRL_MODE_C_DISPLAY;
+	tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND);
+
+	value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
+	value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+		 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+	tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+	tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
 	return 0;
 }
 
 static int tegra_output_rgb_disable(struct tegra_output *output)
 {
 	struct tegra_rgb *rgb = to_rgb(output);
+	unsigned long value;
+
+	value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
+	value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+		   PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+	tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+	value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND);
+	value &= ~DISP_CTRL_MODE_MASK;
+	tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND);
+
+	tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
 
 	tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
 
@@ -213,7 +258,7 @@
 	 * RGB outputs are an exception, so we make sure they can be attached
 	 * to only their parent display controller.
 	 */
-	rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
+	rgb->output.encoder.possible_crtcs = drm_crtc_mask(&dc->base);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 116da19..171a820 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -311,7 +311,7 @@
 	drm_fbdev_cma_restore_mode(priv->fbdev);
 }
 
-static irqreturn_t tilcdc_irq(DRM_IRQ_ARGS)
+static irqreturn_t tilcdc_irq(int irq, void *arg)
 {
 	struct drm_device *dev = arg;
 	struct tilcdc_drm_private *priv = dev->dev_private;
@@ -444,7 +444,7 @@
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
-	return drm_mm_dump_table(m, dev->mm_private);
+	return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
 }
 
 static struct drm_info_list tilcdc_debugfs_list[] = {
@@ -594,7 +594,7 @@
 
 static int tilcdc_pdev_remove(struct platform_device *pdev)
 {
-	drm_platform_exit(&tilcdc_driver, pdev);
+	drm_put_dev(platform_get_drvdata(pdev));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 3302f99e..764be36 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -126,6 +126,7 @@
 	agp_be->ttm.func = &ttm_agp_func;
 
 	if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+		kfree(agp_be);
 		return NULL;
 	}
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 07e02c4..a066513 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -957,7 +957,7 @@
 }
 EXPORT_SYMBOL(ttm_bo_mem_space);
 
-int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 			struct ttm_placement *placement,
 			bool interruptible,
 			bool no_wait_gpu)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 4061521..1df856f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -187,7 +187,7 @@
 	}
 }
 
-int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 			void **virtual)
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -219,7 +219,7 @@
 	return 0;
 }
 
-void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 			 void *virtual)
 {
 	struct ttm_mem_type_manager *man;
@@ -594,7 +594,7 @@
 	if (start_page > bo->num_pages)
 		return -EINVAL;
 #if 0
-	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
 		return -EPERM;
 #endif
 	(void) ttm_mem_io_lock(man, false);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6440eea..801231c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -132,6 +132,15 @@
 		return VM_FAULT_NOPAGE;
 	}
 
+	/*
+	 * Refuse to fault imported pages. This should be handled
+	 * (if at all) by redirecting mmap to the exporter.
+	 */
+	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+		retval = VM_FAULT_SIGBUS;
+		goto out_unlock;
+	}
+
 	if (bdev->driver->fault_reserve_notify) {
 		ret = bdev->driver->fault_reserve_notify(bo);
 		switch (ret) {
@@ -217,10 +226,17 @@
 			} else if (unlikely(!page)) {
 				break;
 			}
+			page->mapping = vma->vm_file->f_mapping;
+			page->index = drm_vma_node_start(&bo->vma_node) +
+				page_offset;
 			pfn = page_to_pfn(page);
 		}
 
-		ret = vm_insert_mixed(&cvma, address, pfn);
+		if (vma->vm_flags & VM_MIXEDMAP)
+			ret = vm_insert_mixed(&cvma, address, pfn);
+		else
+			ret = vm_insert_pfn(&cvma, address, pfn);
+
 		/*
 		 * Somebody beat us to this PTE or prefaulting to
 		 * an already populated PTE, or prefaulting error.
@@ -250,6 +266,8 @@
 	struct ttm_buffer_object *bo =
 	    (struct ttm_buffer_object *)vma->vm_private_data;
 
+	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
+
 	(void)ttm_bo_reference(bo);
 }
 
@@ -319,7 +337,14 @@
 	 */
 
 	vma->vm_private_data = bo;
-	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+
+	/*
+	 * PFNMAP is faster than MIXEDMAP due to reduced page
+	 * administration. So use MIXEDMAP only if private VMA, where
+	 * we need to support COW.
+	 */
+	vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 	return 0;
 out_unref:
 	ttm_bo_unref(&bo);
@@ -334,7 +359,8 @@
 
 	vma->vm_ops = &ttm_bo_vm_ops;
 	vma->vm_private_data = ttm_bo_reference(bo);
-	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+	vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
 	return 0;
 }
 EXPORT_SYMBOL(ttm_fbdev_mmap);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 3daa9a3..6a95454 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -186,14 +186,6 @@
 }
 EXPORT_SYMBOL(ttm_write_lock);
 
-void ttm_write_lock_downgrade(struct ttm_lock *lock)
-{
-	spin_lock(&lock->lock);
-	lock->rw = 1;
-	wake_up_all(&lock->queue);
-	spin_unlock(&lock->lock);
-}
-
 static int __ttm_vt_unlock(struct ttm_lock *lock)
 {
 	int ret = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 6fe7b92..53b51c4 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -68,7 +68,7 @@
 
 struct ttm_object_file {
 	struct ttm_object_device *tdev;
-	rwlock_t lock;
+	spinlock_t lock;
 	struct list_head ref_list;
 	struct drm_open_hash ref_hash[TTM_REF_NUM];
 	struct kref refcount;
@@ -118,6 +118,7 @@
  */
 
 struct ttm_ref_object {
+	struct rcu_head rcu_head;
 	struct drm_hash_item hash;
 	struct list_head head;
 	struct kref kref;
@@ -210,10 +211,9 @@
 	 * call_rcu() or ttm_base_object_kfree().
 	 */
 
-	if (base->refcount_release) {
-		ttm_object_file_unref(&base->tfile);
+	ttm_object_file_unref(&base->tfile);
+	if (base->refcount_release)
 		base->refcount_release(&base);
-	}
 }
 
 void ttm_base_object_unref(struct ttm_base_object **p_base)
@@ -229,33 +229,47 @@
 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
 					       uint32_t key)
 {
-	struct ttm_object_device *tdev = tfile->tdev;
-	struct ttm_base_object *uninitialized_var(base);
+	struct ttm_base_object *base = NULL;
 	struct drm_hash_item *hash;
+	struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
 	int ret;
 
 	rcu_read_lock();
-	ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
+	ret = drm_ht_find_item_rcu(ht, key, &hash);
 
 	if (likely(ret == 0)) {
-		base = drm_hash_entry(hash, struct ttm_base_object, hash);
-		ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
+		base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+		if (!kref_get_unless_zero(&base->refcount))
+			base = NULL;
 	}
 	rcu_read_unlock();
 
-	if (unlikely(ret != 0))
-		return NULL;
-
-	if (tfile != base->tfile && !base->shareable) {
-		pr_err("Attempted access of non-shareable object\n");
-		ttm_base_object_unref(&base);
-		return NULL;
-	}
-
 	return base;
 }
 EXPORT_SYMBOL(ttm_base_object_lookup);
 
+struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
+{
+	struct ttm_base_object *base = NULL;
+	struct drm_hash_item *hash;
+	struct drm_open_hash *ht = &tdev->object_hash;
+	int ret;
+
+	rcu_read_lock();
+	ret = drm_ht_find_item_rcu(ht, key, &hash);
+
+	if (likely(ret == 0)) {
+		base = drm_hash_entry(hash, struct ttm_base_object, hash);
+		if (!kref_get_unless_zero(&base->refcount))
+			base = NULL;
+	}
+	rcu_read_unlock();
+
+	return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
+
 int ttm_ref_object_add(struct ttm_object_file *tfile,
 		       struct ttm_base_object *base,
 		       enum ttm_ref_type ref_type, bool *existed)
@@ -266,21 +280,25 @@
 	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 	int ret = -EINVAL;
 
+	if (base->tfile != tfile && !base->shareable)
+		return -EPERM;
+
 	if (existed != NULL)
 		*existed = true;
 
 	while (ret == -EINVAL) {
-		read_lock(&tfile->lock);
-		ret = drm_ht_find_item(ht, base->hash.key, &hash);
+		rcu_read_lock();
+		ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
 
 		if (ret == 0) {
 			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
-			kref_get(&ref->kref);
-			read_unlock(&tfile->lock);
-			break;
+			if (kref_get_unless_zero(&ref->kref)) {
+				rcu_read_unlock();
+				break;
+			}
 		}
 
-		read_unlock(&tfile->lock);
+		rcu_read_unlock();
 		ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
 					   false, false);
 		if (unlikely(ret != 0))
@@ -297,19 +315,19 @@
 		ref->ref_type = ref_type;
 		kref_init(&ref->kref);
 
-		write_lock(&tfile->lock);
-		ret = drm_ht_insert_item(ht, &ref->hash);
+		spin_lock(&tfile->lock);
+		ret = drm_ht_insert_item_rcu(ht, &ref->hash);
 
 		if (likely(ret == 0)) {
 			list_add_tail(&ref->head, &tfile->ref_list);
 			kref_get(&base->refcount);
-			write_unlock(&tfile->lock);
+			spin_unlock(&tfile->lock);
 			if (existed != NULL)
 				*existed = false;
 			break;
 		}
 
-		write_unlock(&tfile->lock);
+		spin_unlock(&tfile->lock);
 		BUG_ON(ret != -EINVAL);
 
 		ttm_mem_global_free(mem_glob, sizeof(*ref));
@@ -330,17 +348,17 @@
 	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 
 	ht = &tfile->ref_hash[ref->ref_type];
-	(void)drm_ht_remove_item(ht, &ref->hash);
+	(void)drm_ht_remove_item_rcu(ht, &ref->hash);
 	list_del(&ref->head);
-	write_unlock(&tfile->lock);
+	spin_unlock(&tfile->lock);
 
 	if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
 		base->ref_obj_release(base, ref->ref_type);
 
 	ttm_base_object_unref(&ref->obj);
 	ttm_mem_global_free(mem_glob, sizeof(*ref));
-	kfree(ref);
-	write_lock(&tfile->lock);
+	kfree_rcu(ref, rcu_head);
+	spin_lock(&tfile->lock);
 }
 
 int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
@@ -351,15 +369,15 @@
 	struct drm_hash_item *hash;
 	int ret;
 
-	write_lock(&tfile->lock);
+	spin_lock(&tfile->lock);
 	ret = drm_ht_find_item(ht, key, &hash);
 	if (unlikely(ret != 0)) {
-		write_unlock(&tfile->lock);
+		spin_unlock(&tfile->lock);
 		return -EINVAL;
 	}
 	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
 	kref_put(&ref->kref, ttm_ref_object_release);
-	write_unlock(&tfile->lock);
+	spin_unlock(&tfile->lock);
 	return 0;
 }
 EXPORT_SYMBOL(ttm_ref_object_base_unref);
@@ -372,7 +390,7 @@
 	struct ttm_object_file *tfile = *p_tfile;
 
 	*p_tfile = NULL;
-	write_lock(&tfile->lock);
+	spin_lock(&tfile->lock);
 
 	/*
 	 * Since we release the lock within the loop, we have to
@@ -388,7 +406,7 @@
 	for (i = 0; i < TTM_REF_NUM; ++i)
 		drm_ht_remove(&tfile->ref_hash[i]);
 
-	write_unlock(&tfile->lock);
+	spin_unlock(&tfile->lock);
 	ttm_object_file_unref(&tfile);
 }
 EXPORT_SYMBOL(ttm_object_file_release);
@@ -404,7 +422,7 @@
 	if (unlikely(tfile == NULL))
 		return NULL;
 
-	rwlock_init(&tfile->lock);
+	spin_lock_init(&tfile->lock);
 	tfile->tdev = tdev;
 	kref_init(&tfile->refcount);
 	INIT_LIST_HEAD(&tfile->ref_list);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 210d503..75f3190 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -170,9 +170,8 @@
 		ttm_tt_unbind(ttm);
 	}
 
-	if (ttm->state == tt_unbound) {
-		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
-	}
+	if (ttm->state == tt_unbound)
+		ttm_tt_unpopulate(ttm);
 
 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
 	    ttm->swap_storage)
@@ -362,7 +361,7 @@
 		page_cache_release(to_page);
 	}
 
-	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+	ttm_tt_unpopulate(ttm);
 	ttm->swap_storage = swap_storage;
 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
 	if (persistent_swap_storage)
@@ -375,3 +374,26 @@
 
 	return ret;
 }
+
+static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
+{
+	pgoff_t i;
+	struct page **page = ttm->pages;
+
+	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+		return;
+
+	for (i = 0; i < ttm->num_pages; ++i) {
+		(*page)->mapping = NULL;
+		(*page++)->index = 0;
+	}
+}
+
+void ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	if (ttm->state == tt_unpopulated)
+		return;
+
+	ttm_tt_clear_mapping(ttm);
+	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 97e9d61..dbadd49 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -403,15 +403,17 @@
 	int i;
 	int ret = 0;
 
+	drm_modeset_lock_all(fb->dev);
+
 	if (!ufb->active_16)
-		return 0;
+		goto unlock;
 
 	if (ufb->obj->base.import_attach) {
 		ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
 					       0, ufb->obj->base.size,
 					       DMA_FROM_DEVICE);
 		if (ret)
-			return ret;
+			goto unlock;
 	}
 
 	for (i = 0; i < num_clips; i++) {
@@ -419,7 +421,7 @@
 				  clips[i].x2 - clips[i].x1,
 				  clips[i].y2 - clips[i].y1);
 		if (ret)
-			break;
+			goto unlock;
 	}
 
 	if (ufb->obj->base.import_attach) {
@@ -427,6 +429,10 @@
 				       0, ufb->obj->base.size,
 				       DMA_FROM_DEVICE);
 	}
+
+ unlock:
+	drm_modeset_unlock_all(fb->dev);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 652f9b4..a18479c 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -60,7 +60,7 @@
 	dev_priv->dma_low += 8;					\
 }
 
-#define via_flush_write_combine() DRM_MEMORYBARRIER()
+#define via_flush_write_combine() mb()
 
 #define VIA_OUT_RING_QW(w1, w2)	do {		\
 	*vb++ = (w1);				\
@@ -234,13 +234,13 @@
 
 	switch (init->func) {
 	case VIA_INIT_DMA:
-		if (!DRM_SUSER(DRM_CURPROC))
+		if (!capable(CAP_SYS_ADMIN))
 			retcode = -EPERM;
 		else
 			retcode = via_initialize(dev, dev_priv, init);
 		break;
 	case VIA_CLEANUP_DMA:
-		if (!DRM_SUSER(DRM_CURPROC))
+		if (!capable(CAP_SYS_ADMIN))
 			retcode = -EPERM;
 		else
 			retcode = via_dma_cleanup(dev);
@@ -273,7 +273,7 @@
 	if (cmd->size > VIA_PCI_BUF_SIZE)
 		return -ENOMEM;
 
-	if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+	if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
 		return -EFAULT;
 
 	/*
@@ -346,7 +346,7 @@
 
 	if (cmd->size > VIA_PCI_BUF_SIZE)
 		return -ENOMEM;
-	if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+	if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
 		return -EFAULT;
 
 	if ((ret =
@@ -543,7 +543,7 @@
 
 	VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
 	VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
-	DRM_WRITEMEMORYBARRIER();
+	wmb();
 	VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
 	VIA_READ(VIA_REG_TRANSPACE);
 
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 8b0f259..ba33cf6 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -217,7 +217,7 @@
 	VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
 	VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
 	VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
-	DRM_WRITEMEMORYBARRIER();
+	wmb();
 	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
 	VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
 }
@@ -338,7 +338,7 @@
 
 		blitq->blits[cur]->aborted = blitq->aborting;
 		blitq->done_blit_handle++;
-		DRM_WAKEUP(blitq->blit_queue + cur);
+		wake_up(blitq->blit_queue + cur);
 
 		cur++;
 		if (cur >= VIA_NUM_BLIT_SLOTS)
@@ -363,7 +363,7 @@
 
 		via_abort_dmablit(dev, engine);
 		blitq->aborting = 1;
-		blitq->end = jiffies + DRM_HZ;
+		blitq->end = jiffies + HZ;
 	}
 
 	if (!blitq->is_active) {
@@ -372,7 +372,7 @@
 			blitq->is_active = 1;
 			blitq->cur = cur;
 			blitq->num_outstanding--;
-			blitq->end = jiffies + DRM_HZ;
+			blitq->end = jiffies + HZ;
 			if (!timer_pending(&blitq->poll_timer))
 				mod_timer(&blitq->poll_timer, jiffies + 1);
 		} else {
@@ -436,7 +436,7 @@
 	int ret = 0;
 
 	if (via_dmablit_active(blitq, engine, handle, &queue)) {
-		DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
+		DRM_WAIT_ON(ret, *queue, 3 * HZ,
 			    !via_dmablit_active(blitq, engine, handle, NULL));
 	}
 	DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
@@ -521,7 +521,7 @@
 
 		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 
-		DRM_WAKEUP(&blitq->busy_queue);
+		wake_up(&blitq->busy_queue);
 
 		via_free_sg_info(dev->pdev, cur_sg);
 		kfree(cur_sg);
@@ -561,8 +561,8 @@
 		blitq->aborting = 0;
 		spin_lock_init(&blitq->blit_lock);
 		for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
-			DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
-		DRM_INIT_WAITQUEUE(&blitq->busy_queue);
+			init_waitqueue_head(blitq->blit_queue + j);
+		init_waitqueue_head(&blitq->busy_queue);
 		INIT_WORK(&blitq->wq, via_dmablit_workqueue);
 		setup_timer(&blitq->poll_timer, via_dmablit_timer,
 				(unsigned long)blitq);
@@ -688,7 +688,7 @@
 	while (blitq->num_free == 0) {
 		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 
-		DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
+		DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
 		if (ret)
 			return (-EINTR == ret) ? -EAGAIN : ret;
 
@@ -713,7 +713,7 @@
 	spin_lock_irqsave(&blitq->blit_lock, irqsave);
 	blitq->num_free++;
 	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-	DRM_WAKEUP(&blitq->busy_queue);
+	wake_up(&blitq->busy_queue);
 }
 
 /*
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 92684a9..50abc2a 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -46,7 +46,7 @@
 	return 0;
 }
 
-void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+static void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
 {
 	struct via_file_private *file_priv = file->driver_priv;
 
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index a811ef2..ad02732 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -138,7 +138,7 @@
 extern int via_enable_vblank(struct drm_device *dev, int crtc);
 extern void via_disable_vblank(struct drm_device *dev, int crtc);
 
-extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t via_driver_irq_handler(int irq, void *arg);
 extern void via_driver_irq_preinstall(struct drm_device *dev);
 extern int via_driver_irq_postinstall(struct drm_device *dev);
 extern void via_driver_irq_uninstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index ac98964..1319433 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -104,7 +104,7 @@
 	return atomic_read(&dev_priv->vbl_received);
 }
 
-irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t via_driver_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -138,7 +138,7 @@
 	for (i = 0; i < dev_priv->num_irqs; ++i) {
 		if (status & cur_irq->pending_mask) {
 			atomic_inc(&cur_irq->irq_received);
-			DRM_WAKEUP(&cur_irq->irq_queue);
+			wake_up(&cur_irq->irq_queue);
 			handled = 1;
 			if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
 				via_dmablit_handler(dev, 0, 1);
@@ -239,12 +239,12 @@
 	cur_irq = dev_priv->via_irqs + real_irq;
 
 	if (masks[real_irq][2] && !force_sequence) {
-		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
 			    ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
 			     masks[irq][4]));
 		cur_irq_sequence = atomic_read(&cur_irq->irq_received);
 	} else {
-		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
 			    (((cur_irq_sequence =
 			       atomic_read(&cur_irq->irq_received)) -
 			      *sequence) <= (1 << 23)));
@@ -287,7 +287,7 @@
 			atomic_set(&cur_irq->irq_received, 0);
 			cur_irq->enable_mask = dev_priv->irq_masks[i][0];
 			cur_irq->pending_mask = dev_priv->irq_masks[i][1];
-			DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
+			init_waitqueue_head(&cur_irq->irq_queue);
 			dev_priv->irq_enable_mask |= cur_irq->enable_mask;
 			dev_priv->irq_pending_mask |= cur_irq->pending_mask;
 			cur_irq++;
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6569efa..a9ffbad 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -36,7 +36,7 @@
 	DRM_DEBUG("\n");
 
 	for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
-		DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
+		init_waitqueue_head(&(dev_priv->decoder_queue[i]));
 		XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
 	}
 }
@@ -58,7 +58,7 @@
 		if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
 			if (_DRM_LOCK_IS_HELD(*lock)
 			    && (*lock & _DRM_LOCK_CONT)) {
-				DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
+				wake_up(&(dev_priv->decoder_queue[i]));
 			}
 			*lock = 0;
 		}
@@ -83,10 +83,10 @@
 	switch (fx->func) {
 	case VIA_FUTEX_WAIT:
 		DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
-			    (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
+			    (fx->ms / 10) * (HZ / 100), *lock != fx->val);
 		return ret;
 	case VIA_FUTEX_WAKE:
-		DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
+		wake_up(&(dev_priv->decoder_queue[fx->lock]));
 		return 0;
 	}
 	return 0;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 9f8b690..458cdf6 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,6 @@
 	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
 	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
 	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
-	    vmwgfx_surface.o vmwgfx_prime.o
+	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d0e085e..bb594c1 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -34,6 +34,8 @@
 
 #include "svga_reg.h"
 
+typedef uint32 PPN;
+typedef __le64 PPN64;
 
 /*
  * 3D Hardware Version
@@ -71,6 +73,9 @@
 #define SVGA3D_MAX_CONTEXT_IDS                  256
 #define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
 
+#define SVGA3D_NUM_TEXTURE_UNITS                32
+#define SVGA3D_NUM_LIGHTS                       8
+
 /*
  * Surface formats.
  *
@@ -81,6 +86,7 @@
  */
 
 typedef enum SVGA3dSurfaceFormat {
+   SVGA3D_FORMAT_MIN                   = 0,
    SVGA3D_FORMAT_INVALID               = 0,
 
    SVGA3D_X8R8G8B8                     = 1,
@@ -134,12 +140,6 @@
    SVGA3D_RG_S10E5                     = 35,
    SVGA3D_RG_S23E8                     = 36,
 
-   /*
-    * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
-    * the most efficient format to use when creating new surfaces
-    * expressly for index or vertex data.
-    */
-
    SVGA3D_BUFFER                       = 37,
 
    SVGA3D_Z_D24X8                      = 38,
@@ -159,15 +159,114 @@
    /* Video format with alpha */
    SVGA3D_AYUV                         = 45,
 
+   SVGA3D_R32G32B32A32_TYPELESS        = 46,
+   SVGA3D_R32G32B32A32_FLOAT           = 25,
+   SVGA3D_R32G32B32A32_UINT            = 47,
+   SVGA3D_R32G32B32A32_SINT            = 48,
+   SVGA3D_R32G32B32_TYPELESS           = 49,
+   SVGA3D_R32G32B32_FLOAT              = 50,
+   SVGA3D_R32G32B32_UINT               = 51,
+   SVGA3D_R32G32B32_SINT               = 52,
+   SVGA3D_R16G16B16A16_TYPELESS        = 53,
+   SVGA3D_R16G16B16A16_FLOAT           = 24,
+   SVGA3D_R16G16B16A16_UNORM           = 41,
+   SVGA3D_R16G16B16A16_UINT            = 54,
+   SVGA3D_R16G16B16A16_SNORM           = 55,
+   SVGA3D_R16G16B16A16_SINT            = 56,
+   SVGA3D_R32G32_TYPELESS              = 57,
+   SVGA3D_R32G32_FLOAT                 = 36,
+   SVGA3D_R32G32_UINT                  = 58,
+   SVGA3D_R32G32_SINT                  = 59,
+   SVGA3D_R32G8X24_TYPELESS            = 60,
+   SVGA3D_D32_FLOAT_S8X24_UINT         = 61,
+   SVGA3D_R32_FLOAT_X8X24_TYPELESS     = 62,
+   SVGA3D_X32_TYPELESS_G8X24_UINT      = 63,
+   SVGA3D_R10G10B10A2_TYPELESS         = 64,
+   SVGA3D_R10G10B10A2_UNORM            = 26,
+   SVGA3D_R10G10B10A2_UINT             = 65,
+   SVGA3D_R11G11B10_FLOAT              = 66,
+   SVGA3D_R8G8B8A8_TYPELESS            = 67,
+   SVGA3D_R8G8B8A8_UNORM               = 68,
+   SVGA3D_R8G8B8A8_UNORM_SRGB          = 69,
+   SVGA3D_R8G8B8A8_UINT                = 70,
+   SVGA3D_R8G8B8A8_SNORM               = 28,
+   SVGA3D_R8G8B8A8_SINT                = 71,
+   SVGA3D_R16G16_TYPELESS              = 72,
+   SVGA3D_R16G16_FLOAT                 = 35,
+   SVGA3D_R16G16_UNORM                 = 40,
+   SVGA3D_R16G16_UINT                  = 73,
+   SVGA3D_R16G16_SNORM                 = 39,
+   SVGA3D_R16G16_SINT                  = 74,
+   SVGA3D_R32_TYPELESS                 = 75,
+   SVGA3D_D32_FLOAT                    = 76,
+   SVGA3D_R32_FLOAT                    = 34,
+   SVGA3D_R32_UINT                     = 77,
+   SVGA3D_R32_SINT                     = 78,
+   SVGA3D_R24G8_TYPELESS               = 79,
+   SVGA3D_D24_UNORM_S8_UINT            = 80,
+   SVGA3D_R24_UNORM_X8_TYPELESS        = 81,
+   SVGA3D_X24_TYPELESS_G8_UINT         = 82,
+   SVGA3D_R8G8_TYPELESS                = 83,
+   SVGA3D_R8G8_UNORM                   = 84,
+   SVGA3D_R8G8_UINT                    = 85,
+   SVGA3D_R8G8_SNORM                   = 27,
+   SVGA3D_R8G8_SINT                    = 86,
+   SVGA3D_R16_TYPELESS                 = 87,
+   SVGA3D_R16_FLOAT                    = 33,
+   SVGA3D_D16_UNORM                    = 8,
+   SVGA3D_R16_UNORM                    = 88,
+   SVGA3D_R16_UINT                     = 89,
+   SVGA3D_R16_SNORM                    = 90,
+   SVGA3D_R16_SINT                     = 91,
+   SVGA3D_R8_TYPELESS                  = 92,
+   SVGA3D_R8_UNORM                     = 93,
+   SVGA3D_R8_UINT                      = 94,
+   SVGA3D_R8_SNORM                     = 95,
+   SVGA3D_R8_SINT                      = 96,
+   SVGA3D_A8_UNORM                     = 32,
+   SVGA3D_R1_UNORM                     = 97,
+   SVGA3D_R9G9B9E5_SHAREDEXP           = 98,
+   SVGA3D_R8G8_B8G8_UNORM              = 99,
+   SVGA3D_G8R8_G8B8_UNORM              = 100,
+   SVGA3D_BC1_TYPELESS                 = 101,
+   SVGA3D_BC1_UNORM                    = 15,
+   SVGA3D_BC1_UNORM_SRGB               = 102,
+   SVGA3D_BC2_TYPELESS                 = 103,
+   SVGA3D_BC2_UNORM                    = 17,
+   SVGA3D_BC2_UNORM_SRGB               = 104,
+   SVGA3D_BC3_TYPELESS                 = 105,
+   SVGA3D_BC3_UNORM                    = 19,
+   SVGA3D_BC3_UNORM_SRGB               = 106,
+   SVGA3D_BC4_TYPELESS                 = 107,
    SVGA3D_BC4_UNORM                    = 108,
+   SVGA3D_BC4_SNORM                    = 109,
+   SVGA3D_BC5_TYPELESS                 = 110,
    SVGA3D_BC5_UNORM                    = 111,
+   SVGA3D_BC5_SNORM                    = 112,
+   SVGA3D_B5G6R5_UNORM                 = 3,
+   SVGA3D_B5G5R5A1_UNORM               = 5,
+   SVGA3D_B8G8R8A8_UNORM               = 2,
+   SVGA3D_B8G8R8X8_UNORM               = 1,
+   SVGA3D_R10G10B10_XR_BIAS_A2_UNORM   = 113,
+   SVGA3D_B8G8R8A8_TYPELESS            = 114,
+   SVGA3D_B8G8R8A8_UNORM_SRGB          = 115,
+   SVGA3D_B8G8R8X8_TYPELESS            = 116,
+   SVGA3D_B8G8R8X8_UNORM_SRGB          = 117,
 
    /* Advanced D3D9 depth formats. */
    SVGA3D_Z_DF16                       = 118,
    SVGA3D_Z_DF24                       = 119,
    SVGA3D_Z_D24S8_INT                  = 120,
 
-   SVGA3D_FORMAT_MAX
+   /* Planar video formats. */
+   SVGA3D_YV12                         = 121,
+
+   /* Shader constant formats. */
+   SVGA3D_SURFACE_SHADERCONST_FLOAT    = 122,
+   SVGA3D_SURFACE_SHADERCONST_INT      = 123,
+   SVGA3D_SURFACE_SHADERCONST_BOOL     = 124,
+
+   SVGA3D_FORMAT_MAX                   = 125,
 } SVGA3dSurfaceFormat;
 
 typedef uint32 SVGA3dColor; /* a, r, g, b */
@@ -957,15 +1056,21 @@
 } SVGA3dCubeFace;
 
 typedef enum {
+   SVGA3D_SHADERTYPE_INVALID                    = 0,
+   SVGA3D_SHADERTYPE_MIN                        = 1,
    SVGA3D_SHADERTYPE_VS                         = 1,
    SVGA3D_SHADERTYPE_PS                         = 2,
-   SVGA3D_SHADERTYPE_MAX
+   SVGA3D_SHADERTYPE_MAX                        = 3,
+   SVGA3D_SHADERTYPE_GS                         = 3,
 } SVGA3dShaderType;
 
+#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
+
 typedef enum {
    SVGA3D_CONST_TYPE_FLOAT                      = 0,
    SVGA3D_CONST_TYPE_INT                        = 1,
    SVGA3D_CONST_TYPE_BOOL                       = 2,
+   SVGA3D_CONST_TYPE_MAX
 } SVGA3dShaderConstType;
 
 #define SVGA3D_MAX_SURFACE_FACES                6
@@ -1056,9 +1161,84 @@
 #define SVGA_3D_CMD_GENERATE_MIPMAPS       SVGA_3D_CMD_BASE + 31
 #define SVGA_3D_CMD_ACTIVATE_SURFACE       SVGA_3D_CMD_BASE + 40
 #define SVGA_3D_CMD_DEACTIVATE_SURFACE     SVGA_3D_CMD_BASE + 41
-#define SVGA_3D_CMD_MAX                    SVGA_3D_CMD_BASE + 42
+#define SVGA_3D_CMD_SCREEN_DMA               1082
+#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
+#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE     1084
 
-#define SVGA_3D_CMD_FUTURE_MAX             2000
+#define SVGA_3D_CMD_LOGICOPS_BITBLT          1085
+#define SVGA_3D_CMD_LOGICOPS_TRANSBLT        1086
+#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT      1087
+#define SVGA_3D_CMD_LOGICOPS_COLORFILL       1088
+#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND      1089
+#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND  1090
+
+#define SVGA_3D_CMD_SET_OTABLE_BASE          1091
+#define SVGA_3D_CMD_READBACK_OTABLE          1092
+
+#define SVGA_3D_CMD_DEFINE_GB_MOB            1093
+#define SVGA_3D_CMD_DESTROY_GB_MOB           1094
+#define SVGA_3D_CMD_REDEFINE_GB_MOB          1095
+#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING    1096
+
+#define SVGA_3D_CMD_DEFINE_GB_SURFACE        1097
+#define SVGA_3D_CMD_DESTROY_GB_SURFACE       1098
+#define SVGA_3D_CMD_BIND_GB_SURFACE          1099
+#define SVGA_3D_CMD_COND_BIND_GB_SURFACE     1100
+#define SVGA_3D_CMD_UPDATE_GB_IMAGE          1101
+#define SVGA_3D_CMD_UPDATE_GB_SURFACE        1102
+#define SVGA_3D_CMD_READBACK_GB_IMAGE        1103
+#define SVGA_3D_CMD_READBACK_GB_SURFACE      1104
+#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE      1105
+#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE    1106
+
+#define SVGA_3D_CMD_DEFINE_GB_CONTEXT        1107
+#define SVGA_3D_CMD_DESTROY_GB_CONTEXT       1108
+#define SVGA_3D_CMD_BIND_GB_CONTEXT          1109
+#define SVGA_3D_CMD_READBACK_GB_CONTEXT      1110
+#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT    1111
+
+#define SVGA_3D_CMD_DEFINE_GB_SHADER         1112
+#define SVGA_3D_CMD_DESTROY_GB_SHADER        1113
+#define SVGA_3D_CMD_BIND_GB_SHADER           1114
+
+#define SVGA_3D_CMD_SET_OTABLE_BASE64        1115
+
+#define SVGA_3D_CMD_BEGIN_GB_QUERY           1116
+#define SVGA_3D_CMD_END_GB_QUERY             1117
+#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY        1118
+
+#define SVGA_3D_CMD_NOP                      1119
+
+#define SVGA_3D_CMD_ENABLE_GART              1120
+#define SVGA_3D_CMD_DISABLE_GART             1121
+#define SVGA_3D_CMD_MAP_MOB_INTO_GART        1122
+#define SVGA_3D_CMD_UNMAP_GART_RANGE         1123
+
+#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET   1124
+#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET  1125
+#define SVGA_3D_CMD_BIND_GB_SCREENTARGET     1126
+#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET   1127
+
+#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL   1128
+#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
+
+#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE  1130
+#define SVGA_3D_CMD_GB_SCREEN_DMA               1131
+#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH  1132
+#define SVGA_3D_CMD_GB_MOB_FENCE                1133
+#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2        1134
+#define SVGA_3D_CMD_DEFINE_GB_MOB64          1135
+#define SVGA_3D_CMD_REDEFINE_GB_MOB64        1136
+#define SVGA_3D_CMD_NOP_ERROR                1137
+
+#define SVGA_3D_CMD_RESERVED1                1138
+#define SVGA_3D_CMD_RESERVED2                1139
+#define SVGA_3D_CMD_RESERVED3                1140
+#define SVGA_3D_CMD_RESERVED4                1141
+#define SVGA_3D_CMD_RESERVED5                1142
+
+#define SVGA_3D_CMD_MAX                      1142
+#define SVGA_3D_CMD_FUTURE_MAX               3000
 
 /*
  * Common substructures used in multiple FIFO commands:
@@ -1750,6 +1930,507 @@
 
 
 /*
+ * Guest-backed surface definitions.
+ */
+
+typedef uint32 SVGAMobId;
+
+typedef enum SVGAMobFormat {
+   SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
+   SVGA3D_MOBFMT_PTDEPTH_0 = 0,
+   SVGA3D_MOBFMT_PTDEPTH_1 = 1,
+   SVGA3D_MOBFMT_PTDEPTH_2 = 2,
+   SVGA3D_MOBFMT_RANGE     = 3,
+   SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
+   SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
+   SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
+   SVGA3D_MOBFMT_MAX,
+} SVGAMobFormat;
+
+/*
+ * Sizes of opaque types.
+ */
+
+#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
+#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
+#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
+#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
+#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
+#define SVGA3D_CONTEXT_DATA_SIZE 16384
+
+/*
+ * SVGA3dCmdSetOTableBase --
+ *
+ * This command allows the guest to specify the base PPN of the
+ * specified object table.
+ */
+
+typedef enum {
+   SVGA_OTABLE_MOB           = 0,
+   SVGA_OTABLE_MIN           = 0,
+   SVGA_OTABLE_SURFACE       = 1,
+   SVGA_OTABLE_CONTEXT       = 2,
+   SVGA_OTABLE_SHADER        = 3,
+   SVGA_OTABLE_SCREEN_TARGET = 4,
+   SVGA_OTABLE_DX9_MAX       = 5,
+   SVGA_OTABLE_MAX           = 8
+} SVGAOTableType;
+
+typedef
+struct {
+   SVGAOTableType type;
+   PPN baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+} __packed
+SVGA3dCmdSetOTableBase;  /* SVGA_3D_CMD_SET_OTABLE_BASE */
+
+typedef
+struct {
+   SVGAOTableType type;
+   PPN64 baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+} __packed
+SVGA3dCmdSetOTableBase64;  /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+
+typedef
+struct {
+   SVGAOTableType type;
+} __packed
+SVGA3dCmdReadbackOTable;  /* SVGA_3D_CMD_READBACK_OTABLE */
+
+/*
+ * Define a memory object (Mob) in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBMob {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN base;
+   uint32 sizeInBytes;
+} __packed
+SVGA3dCmdDefineGBMob;   /* SVGA_3D_CMD_DEFINE_GB_MOB */
+
+
+/*
+ * Destroys an object in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBMob {
+   SVGAMobId mobid;
+} __packed
+SVGA3dCmdDestroyGBMob;   /* SVGA_3D_CMD_DESTROY_GB_MOB */
+
+/*
+ * Redefine an object in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdRedefineGBMob {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN base;
+   uint32 sizeInBytes;
+} __packed
+SVGA3dCmdRedefineGBMob;   /* SVGA_3D_CMD_REDEFINE_GB_MOB */
+
+/*
+ * Define a memory object (Mob) in the OTable with a PPN64 base.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+} __packed
+SVGA3dCmdDefineGBMob64;   /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
+
+/*
+ * Redefine an object in the OTable with PPN64 base.
+ */
+
+typedef
+struct SVGA3dCmdRedefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+} __packed
+SVGA3dCmdRedefineGBMob64;   /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
+
+/*
+ * Notification that the page tables have been modified.
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBMobMapping {
+   SVGAMobId mobid;
+} __packed
+SVGA3dCmdUpdateGBMobMapping;   /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
+
+/*
+ * Define a guest-backed surface.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBSurface {
+   uint32 sid;
+   SVGA3dSurfaceFlags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+} __packed
+SVGA3dCmdDefineGBSurface;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+
+/*
+ * Destroy a guest-backed surface.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBSurface {
+   uint32 sid;
+} __packed
+SVGA3dCmdDestroyGBSurface;   /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+
+/*
+ * Bind a guest-backed surface to an object.
+ */
+
+typedef
+struct SVGA3dCmdBindGBSurface {
+   uint32 sid;
+   SVGAMobId mobid;
+} __packed
+SVGA3dCmdBindGBSurface;   /* SVGA_3D_CMD_BIND_GB_SURFACE */
+
+/*
+ * Conditionally bind a mob to a guest backed surface if testMobid
+ * matches the currently bound mob.  Optionally issue a readback on
+ * the surface while it is still bound to the old mobid if the mobid
+ * is changed by this command.
+ */
+
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
+
+typedef
+struct{
+   uint32 sid;
+   SVGAMobId testMobid;
+   SVGAMobId mobid;
+   uint32 flags;
+} __packed
+SVGA3dCmdCondBindGBSurface;          /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
+
+/*
+ * Update an image in a guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBImage {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+} __packed
+SVGA3dCmdUpdateGBImage;   /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+
+/*
+ * Update an entire guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBSurface {
+   uint32 sid;
+} __packed
+SVGA3dCmdUpdateGBSurface;   /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+
+/*
+ * Readback an image in a guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBImage {
+   SVGA3dSurfaceImageId image;
+} __packed
+SVGA3dCmdReadbackGBImage;   /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
+
+/*
+ * Readback an entire guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBSurface {
+   uint32 sid;
+} __packed
+SVGA3dCmdReadbackGBSurface;   /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+
+/*
+ * Readback a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+} __packed
+SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBImage {
+   SVGA3dSurfaceImageId image;
+} __packed
+SVGA3dCmdInvalidateGBImage;   /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+
+/*
+ * Invalidate an entire guest-backed surface.
+ * (Notify the device that the contents if all images can be lost.)
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBSurface {
+   uint32 sid;
+} __packed
+SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+
+/*
+ * Invalidate a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+} __packed
+SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
+
+/*
+ * Define a guest-backed context.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBContext {
+   uint32 cid;
+} __packed
+SVGA3dCmdDefineGBContext;   /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+
+/*
+ * Destroy a guest-backed context.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBContext {
+   uint32 cid;
+} __packed
+SVGA3dCmdDestroyGBContext;   /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+
+/*
+ * Bind a guest-backed context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+
+typedef
+struct SVGA3dCmdBindGBContext {
+   uint32 cid;
+   SVGAMobId mobid;
+   uint32 validContents;
+} __packed
+SVGA3dCmdBindGBContext;   /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+
+/*
+ * Readback a guest-backed context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBContext {
+   uint32 cid;
+} __packed
+SVGA3dCmdReadbackGBContext;   /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+struct SVGA3dCmdInvalidateGBContext {
+   uint32 cid;
+} __packed
+SVGA3dCmdInvalidateGBContext;   /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+
+/*
+ * Define a guest-backed shader.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBShader {
+   uint32 shid;
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+} __packed
+SVGA3dCmdDefineGBShader;   /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+
+/*
+ * Bind a guest-backed shader.
+ */
+
+typedef struct SVGA3dCmdBindGBShader {
+   uint32 shid;
+   SVGAMobId mobid;
+   uint32 offsetInBytes;
+} __packed
+SVGA3dCmdBindGBShader;   /* SVGA_3D_CMD_BIND_GB_SHADER */
+
+/*
+ * Destroy a guest-backed shader.
+ */
+
+typedef struct SVGA3dCmdDestroyGBShader {
+   uint32 shid;
+} __packed
+SVGA3dCmdDestroyGBShader;   /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+
+typedef
+struct {
+   uint32                  cid;
+   uint32                  regStart;
+   SVGA3dShaderType        shaderType;
+   SVGA3dShaderConstType   constType;
+
+   /*
+    * Followed by a variable number of shader constants.
+    *
+    * Note that FLOAT and INT constants are 4-dwords in length, while
+    * BOOL constants are 1-dword in length.
+    */
+} __packed
+SVGA3dCmdSetGBShaderConstInline;
+/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+} __packed
+SVGA3dCmdBeginGBQuery;           /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+} __packed
+SVGA3dCmdEndGBQuery;                  /* SVGA_3D_CMD_END_GB_QUERY */
+
+
+/*
+ * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
+ *
+ *    The semantics of this command are identical to the
+ *    SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
+ *    to a Mob instead of a GMR.
+ */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+} __packed
+SVGA3dCmdWaitForGBQuery;          /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+
+typedef
+struct {
+   SVGAMobId mobid;
+   uint32 fbOffset;
+   uint32 initalized;
+} __packed
+SVGA3dCmdEnableGart;              /* SVGA_3D_CMD_ENABLE_GART */
+
+typedef
+struct {
+   SVGAMobId mobid;
+   uint32 gartOffset;
+} __packed
+SVGA3dCmdMapMobIntoGart;          /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
+
+
+typedef
+struct {
+   uint32 gartOffset;
+   uint32 numPages;
+} __packed
+SVGA3dCmdUnmapGartRange;          /* SVGA_3D_CMD_UNMAP_GART_RANGE */
+
+
+/*
+ * Screen Targets
+ */
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+
+typedef
+struct {
+   uint32 stid;
+   uint32 width;
+   uint32 height;
+   int32 xRoot;
+   int32 yRoot;
+   uint32 flags;
+} __packed
+SVGA3dCmdDefineGBScreenTarget;    /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
+
+typedef
+struct {
+   uint32 stid;
+} __packed
+SVGA3dCmdDestroyGBScreenTarget;  /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
+
+typedef
+struct {
+   uint32 stid;
+   SVGA3dSurfaceImageId image;
+} __packed
+SVGA3dCmdBindGBScreenTarget;  /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
+
+typedef
+struct {
+   uint32 stid;
+   SVGA3dBox box;
+} __packed
+SVGA3dCmdUpdateGBScreenTarget;  /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
+
+/*
  * Capability query index.
  *
  * Notes:
@@ -1879,10 +2560,41 @@
    SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM              = 83,
 
    /*
-    * Don't add new caps into the previous section; the values in this
-    * enumeration must not change. You can put new values right before
-    * SVGA3D_DEVCAP_MAX.
+    * Deprecated.
     */
+   SVGA3D_DEVCAP_VGPU10                            = 84,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
+    * ored together, one for every type of video decoding supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_DECODE                      = 85,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
+    * ored together, one for every type of video processing supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_PROCESS                     = 86,
+
+   SVGA3D_DEVCAP_LINE_AA                           = 87,  /* boolean */
+   SVGA3D_DEVCAP_LINE_STIPPLE                      = 88,  /* boolean */
+   SVGA3D_DEVCAP_MAX_LINE_WIDTH                    = 89,  /* float */
+   SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH                 = 90,  /* float */
+
+   SVGA3D_DEVCAP_SURFACEFMT_YV12                   = 91,
+
+   /*
+    * Does the host support the SVGA logic ops commands?
+    */
+   SVGA3D_DEVCAP_LOGICOPS                          = 92,
+
+   /*
+    * What support does the host have for screen targets?
+    *
+    * See the SVGA3D_SCREENTARGET_CAP bits below.
+    */
+   SVGA3D_DEVCAP_SCREENTARGETS                     = 93,
+
    SVGA3D_DEVCAP_MAX                                  /* This must be the last index. */
 } SVGA3dDevCapIndex;
 
@@ -1893,4 +2605,28 @@
    float  f;
 } SVGA3dDevCapResult;
 
+typedef enum {
+   SVGA3DCAPS_RECORD_UNKNOWN        = 0,
+   SVGA3DCAPS_RECORD_DEVCAPS_MIN    = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS        = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS_MAX    = 0x1ff,
+} SVGA3dCapsRecordType;
+
+typedef
+struct SVGA3dCapsRecordHeader {
+   uint32 length;
+   SVGA3dCapsRecordType type;
+}
+SVGA3dCapsRecordHeader;
+
+typedef
+struct SVGA3dCapsRecord {
+   SVGA3dCapsRecordHeader header;
+   uint32 data[1];
+}
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
 #endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
index 8369c3b..ef33850 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -38,8 +38,11 @@
 
 #define DIV_ROUND_UP(x, y)  (((x) + (y) - 1) / (y))
 #define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
+#define min_t(type, x, y)  ((x) < (y) ? (x) : (y))
 #define surf_size_struct SVGA3dSize
 #define u32 uint32
+#define u64 uint64_t
+#define U32_MAX ((u32)~0U)
 
 #endif /* __KERNEL__ */
 
@@ -704,8 +707,8 @@
 
 static inline u32 clamped_umul32(u32 a, u32 b)
 {
-	uint64_t tmp = (uint64_t) a*b;
-	return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+	u64 tmp = (u64) a*b;
+	return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
 }
 
 static inline const struct svga3d_surface_desc *
@@ -834,7 +837,7 @@
 				  bool cubemap)
 {
 	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
-	u32 total_size = 0;
+	u64 total_size = 0;
 	u32 mip;
 
 	for (mip = 0; mip < num_mip_levels; mip++) {
@@ -847,7 +850,7 @@
 	if (cubemap)
 		total_size *= SVGA3D_MAX_SURFACE_FACES;
 
-	return total_size;
+	return (u32) min_t(u64, total_size, (u64) U32_MAX);
 }
 
 
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 01f63cb..11323dd 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -169,7 +169,17 @@
    SVGA_REG_TRACES = 45,            /* Enable trace-based updates even when FIFO is on */
    SVGA_REG_GMRS_MAX_PAGES = 46,    /* Maximum number of 4KB pages for all GMRs */
    SVGA_REG_MEMORY_SIZE = 47,       /* Total dedicated device memory excluding FIFO */
-   SVGA_REG_TOP = 48,               /* Must be 1 more than the last register */
+   SVGA_REG_COMMAND_LOW = 48,       /* Lower 32 bits and submits commands */
+   SVGA_REG_COMMAND_HIGH = 49,      /* Upper 32 bits of command buffer PA */
+   SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,   /* Max primary memory */
+   SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
+   SVGA_REG_DEV_CAP = 52,           /* Write dev cap index, read value */
+   SVGA_REG_CMD_PREPEND_LOW = 53,
+   SVGA_REG_CMD_PREPEND_HIGH = 54,
+   SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
+   SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
+   SVGA_REG_MOB_MAX_SIZE = 57,
+   SVGA_REG_TOP = 58,               /* Must be 1 more than the last register */
 
    SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
    /* Next 768 (== 256*3) registers exist for colormap */
@@ -431,7 +441,10 @@
 #define SVGA_CAP_TRACES             0x00200000
 #define SVGA_CAP_GMR2               0x00400000
 #define SVGA_CAP_SCREEN_OBJECT_2    0x00800000
-
+#define SVGA_CAP_COMMAND_BUFFERS    0x01000000
+#define SVGA_CAP_DEAD1              0x02000000
+#define SVGA_CAP_CMD_BUFFERS_2      0x04000000
+#define SVGA_CAP_GBOBJECTS          0x08000000
 
 /*
  * FIFO register indices.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 0489c61..6327cfc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -40,6 +40,10 @@
 static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
 	TTM_PL_FLAG_CACHED;
 
+static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM |
+	TTM_PL_FLAG_CACHED |
+	TTM_PL_FLAG_NO_EVICT;
+
 static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
 	TTM_PL_FLAG_CACHED;
 
@@ -47,6 +51,9 @@
 	TTM_PL_FLAG_CACHED |
 	TTM_PL_FLAG_NO_EVICT;
 
+static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB |
+	TTM_PL_FLAG_CACHED;
+
 struct ttm_placement vmw_vram_placement = {
 	.fpfn = 0,
 	.lpfn = 0,
@@ -116,16 +123,26 @@
 	.busy_placement = &sys_placement_flags
 };
 
+struct ttm_placement vmw_sys_ne_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 1,
+	.placement = &sys_ne_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &sys_ne_placement_flags
+};
+
 static uint32_t evictable_placement_flags[] = {
 	TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
 	TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
-	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
+	VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
 };
 
 struct ttm_placement vmw_evictable_placement = {
 	.fpfn = 0,
 	.lpfn = 0,
-	.num_placement = 3,
+	.num_placement = 4,
 	.placement = evictable_placement_flags,
 	.num_busy_placement = 1,
 	.busy_placement = &sys_placement_flags
@@ -140,10 +157,21 @@
 	.busy_placement = gmr_vram_placement_flags
 };
 
+struct ttm_placement vmw_mob_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 1,
+	.num_busy_placement = 1,
+	.placement = &mob_placement_flags,
+	.busy_placement = &mob_placement_flags
+};
+
 struct vmw_ttm_tt {
 	struct ttm_dma_tt dma_ttm;
 	struct vmw_private *dev_priv;
 	int gmr_id;
+	struct vmw_mob *mob;
+	int mem_type;
 	struct sg_table sgt;
 	struct vmw_sg_table vsgt;
 	uint64_t sg_alloc_size;
@@ -244,6 +272,7 @@
 		viter->dma_address = &__vmw_piter_dma_addr;
 		viter->page = &__vmw_piter_non_sg_page;
 		viter->addrs = vsgt->addrs;
+		viter->pages = vsgt->pages;
 		break;
 	case vmw_dma_map_populate:
 	case vmw_dma_map_bind:
@@ -424,6 +453,63 @@
 	vmw_tt->mapped = false;
 }
 
+
+/**
+ * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ * Note that the buffer object must be either pinned or reserved before
+ * calling this function.
+ */
+int vmw_bo_map_dma(struct ttm_buffer_object *bo)
+{
+	struct vmw_ttm_tt *vmw_tt =
+		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+	return vmw_ttm_map_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ */
+void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
+{
+	struct vmw_ttm_tt *vmw_tt =
+		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+	vmw_ttm_unmap_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
+ * TTM buffer object
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Returns a pointer to a struct vmw_sg_table object. The object should
+ * not be freed after use.
+ * Note that for the device addresses to be valid, the buffer object must
+ * either be reserved or pinned.
+ */
+const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
+{
+	struct vmw_ttm_tt *vmw_tt =
+		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+	return &vmw_tt->vsgt;
+}
+
+
 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
 	struct vmw_ttm_tt *vmw_be =
@@ -435,9 +521,27 @@
 		return ret;
 
 	vmw_be->gmr_id = bo_mem->start;
+	vmw_be->mem_type = bo_mem->mem_type;
 
-	return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
-			    ttm->num_pages, vmw_be->gmr_id);
+	switch (bo_mem->mem_type) {
+	case VMW_PL_GMR:
+		return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
+				    ttm->num_pages, vmw_be->gmr_id);
+	case VMW_PL_MOB:
+		if (unlikely(vmw_be->mob == NULL)) {
+			vmw_be->mob =
+				vmw_mob_create(ttm->num_pages);
+			if (unlikely(vmw_be->mob == NULL))
+				return -ENOMEM;
+		}
+
+		return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
+				    &vmw_be->vsgt, ttm->num_pages,
+				    vmw_be->gmr_id);
+	default:
+		BUG();
+	}
+	return 0;
 }
 
 static int vmw_ttm_unbind(struct ttm_tt *ttm)
@@ -445,7 +549,16 @@
 	struct vmw_ttm_tt *vmw_be =
 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 
-	vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+	switch (vmw_be->mem_type) {
+	case VMW_PL_GMR:
+		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+		break;
+	case VMW_PL_MOB:
+		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
+		break;
+	default:
+		BUG();
+	}
 
 	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
 		vmw_ttm_unmap_dma(vmw_be);
@@ -453,6 +566,7 @@
 	return 0;
 }
 
+
 static void vmw_ttm_destroy(struct ttm_tt *ttm)
 {
 	struct vmw_ttm_tt *vmw_be =
@@ -463,9 +577,14 @@
 		ttm_dma_tt_fini(&vmw_be->dma_ttm);
 	else
 		ttm_tt_fini(ttm);
+
+	if (vmw_be->mob)
+		vmw_mob_destroy(vmw_be->mob);
+
 	kfree(vmw_be);
 }
 
+
 static int vmw_ttm_populate(struct ttm_tt *ttm)
 {
 	struct vmw_ttm_tt *vmw_tt =
@@ -500,6 +619,12 @@
 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
 
+
+	if (vmw_tt->mob) {
+		vmw_mob_destroy(vmw_tt->mob);
+		vmw_tt->mob = NULL;
+	}
+
 	vmw_ttm_unmap_dma(vmw_tt);
 	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
 		size_t size =
@@ -517,7 +642,7 @@
 	.destroy = vmw_ttm_destroy,
 };
 
-struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
 				 unsigned long size, uint32_t page_flags,
 				 struct page *dummy_read_page)
 {
@@ -530,6 +655,7 @@
 
 	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
 	vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
+	vmw_be->mob = NULL;
 
 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
 		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
@@ -546,12 +672,12 @@
 	return NULL;
 }
 
-int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 {
 	return 0;
 }
 
-int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		      struct ttm_mem_type_manager *man)
 {
 	switch (type) {
@@ -571,6 +697,7 @@
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	case VMW_PL_GMR:
+	case VMW_PL_MOB:
 		/*
 		 * "Guest Memory Regions" is an aperture like feature with
 		 *  one slot per bo. There is an upper limit of the number of
@@ -589,7 +716,7 @@
 	return 0;
 }
 
-void vmw_evict_flags(struct ttm_buffer_object *bo,
+static void vmw_evict_flags(struct ttm_buffer_object *bo,
 		     struct ttm_placement *placement)
 {
 	*placement = vmw_sys_placement;
@@ -618,6 +745,7 @@
 	switch (mem->mem_type) {
 	case TTM_PL_SYSTEM:
 	case VMW_PL_GMR:
+	case VMW_PL_MOB:
 		return 0;
 	case TTM_PL_VRAM:
 		mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -677,6 +805,38 @@
 				  VMW_FENCE_WAIT_TIMEOUT);
 }
 
+/**
+ * vmw_move_notify - TTM move_notify_callback
+ *
+ * @bo:             The TTM buffer object about to move.
+ * @mem:            The truct ttm_mem_reg indicating to what memory
+ *                  region the move is taking place.
+ *
+ * Calls move_notify for all subsystems needing it.
+ * (currently only resources).
+ */
+static void vmw_move_notify(struct ttm_buffer_object *bo,
+			    struct ttm_mem_reg *mem)
+{
+	vmw_resource_move_notify(bo, mem);
+}
+
+
+/**
+ * vmw_swap_notify - TTM move_notify_callback
+ *
+ * @bo:             The TTM buffer object about to be swapped out.
+ */
+static void vmw_swap_notify(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+
+	spin_lock(&bdev->fence_lock);
+	ttm_bo_wait(bo, false, false, false);
+	spin_unlock(&bdev->fence_lock);
+}
+
+
 struct ttm_bo_driver vmw_bo_driver = {
 	.ttm_tt_create = &vmw_ttm_tt_create,
 	.ttm_tt_populate = &vmw_ttm_populate,
@@ -691,8 +851,8 @@
 	.sync_obj_flush = vmw_sync_obj_flush,
 	.sync_obj_unref = vmw_sync_obj_unref,
 	.sync_obj_ref = vmw_sync_obj_ref,
-	.move_notify = NULL,
-	.swap_notify = NULL,
+	.move_notify = vmw_move_notify,
+	.swap_notify = vmw_swap_notify,
 	.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
 	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
 	.io_mem_free = &vmw_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 00ae092..1e80152 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -32,12 +32,30 @@
 struct vmw_user_context {
 	struct ttm_base_object base;
 	struct vmw_resource res;
+	struct vmw_ctx_binding_state cbs;
 };
 
+
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
+
 static void vmw_user_context_free(struct vmw_resource *res);
 static struct vmw_resource *
 vmw_user_context_base_to_res(struct ttm_base_object *base);
 
+static int vmw_gb_context_create(struct vmw_resource *res);
+static int vmw_gb_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_destroy(struct vmw_resource *res);
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind);
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
+static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
 static uint64_t vmw_user_context_size;
 
 static const struct vmw_user_resource_conv user_context_conv = {
@@ -62,6 +80,23 @@
 	.unbind = NULL
 };
 
+static const struct vmw_res_func vmw_gb_context_func = {
+	.res_type = vmw_res_context,
+	.needs_backup = true,
+	.may_evict = true,
+	.type_name = "guest backed contexts",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_gb_context_create,
+	.destroy = vmw_gb_context_destroy,
+	.bind = vmw_gb_context_bind,
+	.unbind = vmw_gb_context_unbind
+};
+
+static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
+	[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
+	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
+	[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+
 /**
  * Context management:
  */
@@ -76,6 +111,20 @@
 	} *cmd;
 
 
+	if (res->func->destroy == vmw_gb_context_destroy) {
+		mutex_lock(&dev_priv->cmdbuf_mutex);
+		mutex_lock(&dev_priv->binding_mutex);
+		(void) vmw_context_binding_state_kill
+			(&container_of(res, struct vmw_user_context, res)->cbs);
+		(void) vmw_gb_context_destroy(res);
+		if (dev_priv->pinned_bo != NULL &&
+		    !dev_priv->query_cid_valid)
+			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+		mutex_unlock(&dev_priv->binding_mutex);
+		mutex_unlock(&dev_priv->cmdbuf_mutex);
+		return;
+	}
+
 	vmw_execbuf_release_pinned_bo(dev_priv);
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
@@ -92,6 +141,33 @@
 	vmw_3d_resource_dec(dev_priv, false);
 }
 
+static int vmw_gb_context_init(struct vmw_private *dev_priv,
+			       struct vmw_resource *res,
+			       void (*res_free) (struct vmw_resource *res))
+{
+	int ret;
+	struct vmw_user_context *uctx =
+		container_of(res, struct vmw_user_context, res);
+
+	ret = vmw_resource_init(dev_priv, res, true,
+				res_free, &vmw_gb_context_func);
+	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+
+	if (unlikely(ret != 0)) {
+		if (res_free)
+			res_free(res);
+		else
+			kfree(res);
+		return ret;
+	}
+
+	memset(&uctx->cbs, 0, sizeof(uctx->cbs));
+	INIT_LIST_HEAD(&uctx->cbs.list);
+
+	vmw_resource_activate(res, vmw_hw_context_destroy);
+	return 0;
+}
+
 static int vmw_context_init(struct vmw_private *dev_priv,
 			    struct vmw_resource *res,
 			    void (*res_free) (struct vmw_resource *res))
@@ -103,6 +179,9 @@
 		SVGA3dCmdDefineContext body;
 	} *cmd;
 
+	if (dev_priv->has_mob)
+		return vmw_gb_context_init(dev_priv, res, res_free);
+
 	ret = vmw_resource_init(dev_priv, res, false,
 				res_free, &vmw_legacy_context_func);
 
@@ -154,6 +233,180 @@
 	return (ret == 0) ? res : NULL;
 }
 
+
+static int vmw_gb_context_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBContext body;
+	} *cmd;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a context id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "creation.\n");
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	(void) vmw_3d_resource_inc(dev_priv, false);
+
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+static int vmw_gb_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBContext body;
+	} *cmd;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.validContents = res->backup_dirty;
+	res->backup_dirty = false;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+	struct vmw_user_context *uctx =
+		container_of(res, struct vmw_user_context, res);
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdReadbackGBContext body;
+	} *cmd1;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBContext body;
+	} *cmd2;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_context_binding_state_scrub(&uctx->cbs);
+
+	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "unbinding.\n");
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	cmd2 = (void *) cmd;
+	if (readback) {
+		cmd1 = (void *) cmd;
+		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
+		cmd1->header.size = sizeof(cmd1->body);
+		cmd1->body.cid = res->id;
+		cmd2 = (void *) (&cmd1[1]);
+	}
+	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+	cmd2->header.size = sizeof(cmd2->body);
+	cmd2->body.cid = res->id;
+	cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+	vmw_fifo_commit(dev_priv, submit_size);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_fence_single_bo(bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static int vmw_gb_context_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBContext body;
+	} *cmd;
+
+	if (likely(res->id == -1))
+		return 0;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "destruction.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	if (dev_priv->query_cid == res->id)
+		dev_priv->query_cid_valid = false;
+	vmw_resource_release_id(res);
+	vmw_3d_resource_dec(dev_priv, false);
+
+	return 0;
+}
+
 /**
  * User-space context management:
  */
@@ -272,3 +525,380 @@
 	return ret;
 
 }
+
+/**
+ * vmw_context_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetShader body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = bi->i1.shader_type;
+	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_context_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind)
+{
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetRenderTarget body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for render target "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = bi->i1.rt_type;
+	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	cmd->body.target.face = 0;
+	cmd->body.target.mipmap = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_context_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
+				     bool rebind)
+{
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		struct {
+			SVGA3dCmdSetTextureState c;
+			SVGA3dTextureState s1;
+		} body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for texture "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+
+	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.c.cid = bi->ctx->id;
+	cmd->body.s1.stage = bi->i1.texture_stage;
+	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_context_binding_drop: Stop tracking a context binding
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+{
+	list_del(&cb->ctx_list);
+	if (!list_empty(&cb->res_list))
+		list_del(&cb->res_list);
+	cb->bi.ctx = NULL;
+}
+
+/**
+ * vmw_context_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Performs basic checks on the binding to make sure arguments are within
+ * bounds and then starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+			    const struct vmw_ctx_bindinfo *bi)
+{
+	struct vmw_ctx_binding *loc;
+
+	switch (bi->bt) {
+	case vmw_ctx_binding_rt:
+		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
+			DRM_ERROR("Illegal render target type %u.\n",
+				  (unsigned) bi->i1.rt_type);
+			return -EINVAL;
+		}
+		loc = &cbs->render_targets[bi->i1.rt_type];
+		break;
+	case vmw_ctx_binding_tex:
+		if (unlikely((unsigned)bi->i1.texture_stage >=
+			     SVGA3D_NUM_TEXTURE_UNITS)) {
+			DRM_ERROR("Illegal texture/sampler unit %u.\n",
+				  (unsigned) bi->i1.texture_stage);
+			return -EINVAL;
+		}
+		loc = &cbs->texture_units[bi->i1.texture_stage];
+		break;
+	case vmw_ctx_binding_shader:
+		if (unlikely((unsigned)bi->i1.shader_type >=
+			     SVGA3D_SHADERTYPE_MAX)) {
+			DRM_ERROR("Illegal shader type %u.\n",
+				  (unsigned) bi->i1.shader_type);
+			return -EINVAL;
+		}
+		loc = &cbs->shaders[bi->i1.shader_type];
+		break;
+	default:
+		BUG();
+	}
+
+	if (loc->bi.ctx != NULL)
+		vmw_context_binding_drop(loc);
+
+	loc->bi = *bi;
+	loc->bi.scrubbed = false;
+	list_add_tail(&loc->ctx_list, &cbs->list);
+	INIT_LIST_HEAD(&loc->res_list);
+
+	return 0;
+}
+
+/**
+ * vmw_context_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
+					 const struct vmw_ctx_bindinfo *bi)
+{
+	struct vmw_ctx_binding *loc;
+
+	switch (bi->bt) {
+	case vmw_ctx_binding_rt:
+		loc = &cbs->render_targets[bi->i1.rt_type];
+		break;
+	case vmw_ctx_binding_tex:
+		loc = &cbs->texture_units[bi->i1.texture_stage];
+		break;
+	case vmw_ctx_binding_shader:
+		loc = &cbs->shaders[bi->i1.shader_type];
+		break;
+	default:
+		BUG();
+	}
+
+	if (loc->bi.ctx != NULL)
+		vmw_context_binding_drop(loc);
+
+	if (bi->res != NULL) {
+		loc->bi = *bi;
+		list_add_tail(&loc->ctx_list, &cbs->list);
+		list_add_tail(&loc->res_list, &bi->res->binding_head);
+	}
+}
+
+/**
+ * vmw_context_binding_kill - Kill a binding on the device
+ * and stop tracking it.
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Emits FIFO commands to scrub a binding represented by @cb.
+ * Then stops tracking the binding and re-initializes its storage.
+ */
+static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
+{
+	if (!cb->bi.scrubbed) {
+		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
+		cb->bi.scrubbed = true;
+	}
+	vmw_context_binding_drop(cb);
+}
+
+/**
+ * vmw_context_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_binding *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+		vmw_context_binding_kill(entry);
+}
+
+/**
+ * vmw_context_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_binding *entry;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (!entry->bi.scrubbed) {
+			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+			entry->bi.scrubbed = true;
+		}
+	}
+}
+
+/**
+ * vmw_context_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_context_binding_res_list_kill(struct list_head *head)
+{
+	struct vmw_ctx_binding *entry, *next;
+
+	list_for_each_entry_safe(entry, next, head, res_list)
+		vmw_context_binding_kill(entry);
+}
+
+/**
+ * vmw_context_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_context_binding_res_list_scrub(struct list_head *head)
+{
+	struct vmw_ctx_binding *entry;
+
+	list_for_each_entry(entry, head, res_list) {
+		if (!entry->bi.scrubbed) {
+			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+			entry->bi.scrubbed = true;
+		}
+	}
+}
+
+/**
+ * vmw_context_binding_state_transfer - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ *
+ * Transfers binding info from a temporary structure to the persistent
+ * structure in the context. This can be done once commands
+ */
+void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
+					struct vmw_ctx_binding_state *from)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+	struct vmw_ctx_binding *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
+		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
+}
+
+/**
+ * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_context_rebind_all(struct vmw_resource *ctx)
+{
+	struct vmw_ctx_binding *entry;
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+	struct vmw_ctx_binding_state *cbs = &uctx->cbs;
+	int ret;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (likely(!entry->bi.scrubbed))
+			continue;
+
+		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
+			    SVGA3D_INVALID_ID))
+			continue;
+
+		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
+		if (unlikely(ret != 0))
+			return ret;
+
+		entry->bi.scrubbed = false;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_context_binding_list - Return a list of context bindings
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+{
+	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index d4e54fc..a758402 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -290,8 +290,7 @@
 /**
  * vmw_bo_pin - Pin or unpin a buffer object without moving it.
  *
- * @bo: The buffer object. Must be reserved, and present either in VRAM
- * or GMR memory.
+ * @bo: The buffer object. Must be reserved.
  * @pin: Whether to pin or unpin.
  *
  */
@@ -303,10 +302,9 @@
 	int ret;
 
 	lockdep_assert_held(&bo->resv->lock.base);
-	BUG_ON(old_mem_type != TTM_PL_VRAM &&
-	       old_mem_type != VMW_PL_GMR);
 
-	pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+	pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+		| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
 	if (pin)
 		pl_flags |= TTM_PL_FLAG_NO_EVICT;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c7a5496..0083cbf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -112,6 +112,21 @@
 #define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
 		 struct drm_vmw_update_layout_arg)
+#define DRM_IOCTL_VMW_CREATE_SHADER				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
+		 struct drm_vmw_shader_create_arg)
+#define DRM_IOCTL_VMW_UNREF_SHADER				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
+		 struct drm_vmw_shader_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
+		 union drm_vmw_gb_surface_create_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
+		 union drm_vmw_gb_surface_reference_arg)
+#define DRM_IOCTL_VMW_SYNCCPU					\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
+		 struct drm_vmw_synccpu_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
@@ -177,6 +192,21 @@
 	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
 		      vmw_kms_update_layout_ioctl,
 		      DRM_MASTER | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
+		      vmw_shader_define_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
+		      vmw_shader_destroy_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
+		      vmw_gb_surface_define_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
+		      vmw_gb_surface_reference_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_SYNCCPU,
+		      vmw_user_dmabuf_synccpu_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
 };
 
 static struct pci_device_id vmw_pci_id_list[] = {
@@ -189,6 +219,7 @@
 static int vmw_force_iommu;
 static int vmw_restrict_iommu;
 static int vmw_force_coherent;
+static int vmw_restrict_dma_mask;
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 static void vmw_master_init(struct vmw_master *);
@@ -203,6 +234,8 @@
 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
 
 
 static void vmw_print_capabilities(uint32_t capabilities)
@@ -240,52 +273,14 @@
 		DRM_INFO("  GMR2.\n");
 	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
 		DRM_INFO("  Screen Object 2.\n");
+	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
+		DRM_INFO("  Command Buffers.\n");
+	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
+		DRM_INFO("  Command Buffers 2.\n");
+	if (capabilities & SVGA_CAP_GBOBJECTS)
+		DRM_INFO("  Guest Backed Resources.\n");
 }
 
-
-/**
- * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
- * the start of a buffer object.
- *
- * @dev_priv: The device private structure.
- *
- * This function will idle the buffer using an uninterruptible wait, then
- * map the first page and initialize a pending occlusion query result structure,
- * Finally it will unmap the buffer.
- *
- * TODO: Since we're only mapping a single page, we should optimize the map
- * to use kmap_atomic / iomap_atomic.
- */
-static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
-{
-	struct ttm_bo_kmap_obj map;
-	volatile SVGA3dQueryResult *result;
-	bool dummy;
-	int ret;
-	struct ttm_bo_device *bdev = &dev_priv->bdev;
-	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
-
-	ttm_bo_reserve(bo, false, false, false, 0);
-	spin_lock(&bdev->fence_lock);
-	ret = ttm_bo_wait(bo, false, false, false);
-	spin_unlock(&bdev->fence_lock);
-	if (unlikely(ret != 0))
-		(void) vmw_fallback_wait(dev_priv, false, true, 0, false,
-					 10*HZ);
-
-	ret = ttm_bo_kmap(bo, 0, 1, &map);
-	if (likely(ret == 0)) {
-		result = ttm_kmap_obj_virtual(&map, &dummy);
-		result->totalSize = sizeof(*result);
-		result->state = SVGA3D_QUERYSTATE_PENDING;
-		result->result32 = 0xff;
-		ttm_bo_kunmap(&map);
-	} else
-		DRM_ERROR("Dummy query buffer map failed.\n");
-	ttm_bo_unreserve(bo);
-}
-
-
 /**
  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
  *
@@ -293,20 +288,57 @@
  *
  * This function creates a small buffer object that holds the query
  * result for dummy queries emitted as query barriers.
+ * The function will then map the first page and initialize a pending
+ * occlusion query result structure, Finally it will unmap the buffer.
  * No interruptible waits are done within this function.
  *
- * Returns an error if bo creation fails.
+ * Returns an error if bo creation or initialization fails.
  */
 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 {
-	return ttm_bo_create(&dev_priv->bdev,
-			     PAGE_SIZE,
-			     ttm_bo_type_device,
-			     &vmw_vram_sys_placement,
-			     0, false, NULL,
-			     &dev_priv->dummy_query_bo);
-}
+	int ret;
+	struct ttm_buffer_object *bo;
+	struct ttm_bo_kmap_obj map;
+	volatile SVGA3dQueryResult *result;
+	bool dummy;
 
+	/*
+	 * Create the bo as pinned, so that a tryreserve will
+	 * immediately succeed. This is because we're the only
+	 * user of the bo currently.
+	 */
+	ret = ttm_bo_create(&dev_priv->bdev,
+			    PAGE_SIZE,
+			    ttm_bo_type_device,
+			    &vmw_sys_ne_placement,
+			    0, false, NULL,
+			    &bo);
+
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_bo_reserve(bo, false, true, false, 0);
+	BUG_ON(ret != 0);
+
+	ret = ttm_bo_kmap(bo, 0, 1, &map);
+	if (likely(ret == 0)) {
+		result = ttm_kmap_obj_virtual(&map, &dummy);
+		result->totalSize = sizeof(*result);
+		result->state = SVGA3D_QUERYSTATE_PENDING;
+		result->result32 = 0xff;
+		ttm_bo_kunmap(&map);
+	}
+	vmw_bo_pin(bo, false);
+	ttm_bo_unreserve(bo);
+
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Dummy query buffer map failed.\n");
+		ttm_bo_unref(&bo);
+	} else
+		dev_priv->dummy_query_bo = bo;
+
+	return ret;
+}
 
 static int vmw_request_device(struct vmw_private *dev_priv)
 {
@@ -318,14 +350,24 @@
 		return ret;
 	}
 	vmw_fence_fifo_up(dev_priv->fman);
+	if (dev_priv->has_mob) {
+		ret = vmw_otables_setup(dev_priv);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Unable to initialize "
+				  "guest Memory OBjects.\n");
+			goto out_no_mob;
+		}
+	}
 	ret = vmw_dummy_query_bo_create(dev_priv);
 	if (unlikely(ret != 0))
 		goto out_no_query_bo;
-	vmw_dummy_query_bo_prepare(dev_priv);
 
 	return 0;
 
 out_no_query_bo:
+	if (dev_priv->has_mob)
+		vmw_otables_takedown(dev_priv);
+out_no_mob:
 	vmw_fence_fifo_down(dev_priv->fman);
 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 	return ret;
@@ -341,10 +383,13 @@
 	BUG_ON(dev_priv->pinned_bo != NULL);
 
 	ttm_bo_unref(&dev_priv->dummy_query_bo);
+	if (dev_priv->has_mob)
+		vmw_otables_takedown(dev_priv);
 	vmw_fence_fifo_down(dev_priv->fman);
 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 }
 
+
 /**
  * Increase the 3d resource refcount.
  * If the count was prevously zero, initialize the fifo, switching to svga
@@ -510,6 +555,33 @@
 	return 0;
 }
 
+/**
+ * vmw_dma_masks - set required page- and dma masks
+ *
+ * @dev: Pointer to struct drm-device
+ *
+ * With 32-bit we can only handle 32 bit PFNs. Optionally set that
+ * restriction also for 64-bit systems.
+ */
+#ifdef CONFIG_INTEL_IOMMU
+static int vmw_dma_masks(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	if (intel_iommu_enabled &&
+	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
+		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
+		return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
+	}
+	return 0;
+}
+#else
+static int vmw_dma_masks(struct vmw_private *dev_priv)
+{
+	return 0;
+}
+#endif
+
 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 {
 	struct vmw_private *dev_priv;
@@ -532,6 +604,7 @@
 	mutex_init(&dev_priv->hw_mutex);
 	mutex_init(&dev_priv->cmdbuf_mutex);
 	mutex_init(&dev_priv->release_mutex);
+	mutex_init(&dev_priv->binding_mutex);
 	rwlock_init(&dev_priv->resource_lock);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -578,14 +651,9 @@
 
 	vmw_get_initial_size(dev_priv);
 
-	if (dev_priv->capabilities & SVGA_CAP_GMR) {
-		dev_priv->max_gmr_descriptors =
-			vmw_read(dev_priv,
-				 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
+	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 		dev_priv->max_gmr_ids =
 			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
-	}
-	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 		dev_priv->max_gmr_pages =
 			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
 		dev_priv->memory_size =
@@ -598,23 +666,45 @@
 		 */
 		dev_priv->memory_size = 512*1024*1024;
 	}
+	dev_priv->max_mob_pages = 0;
+	dev_priv->max_mob_size = 0;
+	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+		uint64_t mem_size =
+			vmw_read(dev_priv,
+				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+
+		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
+		dev_priv->prim_bb_mem =
+			vmw_read(dev_priv,
+				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
+		dev_priv->max_mob_size =
+			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
+	} else
+		dev_priv->prim_bb_mem = dev_priv->vram_size;
+
+	ret = vmw_dma_masks(dev_priv);
+	if (unlikely(ret != 0)) {
+		mutex_unlock(&dev_priv->hw_mutex);
+		goto out_err0;
+	}
+
+	if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
+		dev_priv->prim_bb_mem = dev_priv->vram_size;
 
 	mutex_unlock(&dev_priv->hw_mutex);
 
 	vmw_print_capabilities(dev_priv->capabilities);
 
-	if (dev_priv->capabilities & SVGA_CAP_GMR) {
+	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 		DRM_INFO("Max GMR ids is %u\n",
 			 (unsigned)dev_priv->max_gmr_ids);
-		DRM_INFO("Max GMR descriptors is %u\n",
-			 (unsigned)dev_priv->max_gmr_descriptors);
-	}
-	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 		DRM_INFO("Max number of GMR pages is %u\n",
 			 (unsigned)dev_priv->max_gmr_pages);
 		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
 			 (unsigned)dev_priv->memory_size / 1024);
 	}
+	DRM_INFO("Maximum display memory size is %u kiB\n",
+		 dev_priv->prim_bb_mem / 1024);
 	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
 		 dev_priv->vram_start, dev_priv->vram_size / 1024);
 	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
@@ -649,12 +739,22 @@
 	dev_priv->has_gmr = true;
 	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
 	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
-					 dev_priv->max_gmr_ids) != 0) {
+					 VMW_PL_GMR) != 0) {
 		DRM_INFO("No GMR memory available. "
 			 "Graphics memory resources are very limited.\n");
 		dev_priv->has_gmr = false;
 	}
 
+	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+		dev_priv->has_mob = true;
+		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
+				   VMW_PL_MOB) != 0) {
+			DRM_INFO("No MOB memory available. "
+				 "3D will be disabled.\n");
+			dev_priv->has_mob = false;
+		}
+	}
+
 	dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
 					       dev_priv->mmio_size);
 
@@ -757,6 +857,8 @@
 	iounmap(dev_priv->mmio_virt);
 out_err3:
 	arch_phys_wc_del(dev_priv->mmio_mtrr);
+	if (dev_priv->has_mob)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 	if (dev_priv->has_gmr)
 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
@@ -801,6 +903,8 @@
 	ttm_object_device_release(&dev_priv->tdev);
 	iounmap(dev_priv->mmio_virt);
 	arch_phys_wc_del(dev_priv->mmio_mtrr);
+	if (dev_priv->has_mob)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 	if (dev_priv->has_gmr)
 		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
@@ -840,6 +944,7 @@
 		drm_master_put(&vmw_fp->locked_master);
 	}
 
+	vmw_compat_shader_man_destroy(vmw_fp->shman);
 	ttm_object_file_release(&vmw_fp->tfile);
 	kfree(vmw_fp);
 }
@@ -859,11 +964,17 @@
 	if (unlikely(vmw_fp->tfile == NULL))
 		goto out_no_tfile;
 
+	vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
+	if (IS_ERR(vmw_fp->shman))
+		goto out_no_shman;
+
 	file_priv->driver_priv = vmw_fp;
 	dev_priv->bdev.dev_mapping = dev->dev_mapping;
 
 	return 0;
 
+out_no_shman:
+	ttm_object_file_release(&vmw_fp->tfile);
 out_no_tfile:
 	kfree(vmw_fp);
 	return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 20890ad..9e4be17 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20120209"
+#define VMWGFX_DRIVER_DATE "20121114"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 4
+#define VMWGFX_DRIVER_MINOR 5
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -50,19 +50,39 @@
 #define VMWGFX_MAX_VALIDATIONS 2048
 #define VMWGFX_MAX_DISPLAYS 16
 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
+#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
+
+/*
+ * Perhaps we should have sysfs entries for these.
+ */
+#define VMWGFX_NUM_GB_CONTEXT 256
+#define VMWGFX_NUM_GB_SHADER 20000
+#define VMWGFX_NUM_GB_SURFACE 32768
+#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
+			VMWGFX_NUM_GB_SHADER +\
+			VMWGFX_NUM_GB_SURFACE +\
+			VMWGFX_NUM_GB_SCREEN_TARGET)
 
 #define VMW_PL_GMR TTM_PL_PRIV0
 #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
+#define VMW_PL_MOB TTM_PL_PRIV1
+#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
 
 #define VMW_RES_CONTEXT ttm_driver_type0
 #define VMW_RES_SURFACE ttm_driver_type1
 #define VMW_RES_STREAM ttm_driver_type2
 #define VMW_RES_FENCE ttm_driver_type3
+#define VMW_RES_SHADER ttm_driver_type4
+
+struct vmw_compat_shader_manager;
 
 struct vmw_fpriv {
 	struct drm_master *locked_master;
 	struct ttm_object_file *tfile;
 	struct list_head fence_events;
+	bool gb_aware;
+	struct vmw_compat_shader_manager *shman;
 };
 
 struct vmw_dma_buffer {
@@ -82,6 +102,7 @@
 struct vmw_validate_buffer {
 	struct ttm_validate_buffer base;
 	struct drm_hash_item hash;
+	bool validate_as_mob;
 };
 
 struct vmw_res_func;
@@ -98,6 +119,7 @@
 	const struct vmw_res_func *func;
 	struct list_head lru_head; /* Protected by the resource lock */
 	struct list_head mob_head; /* Protected by @backup reserved */
+	struct list_head binding_head; /* Protected by binding_mutex */
 	void (*res_free) (struct vmw_resource *res);
 	void (*hw_destroy) (struct vmw_resource *res);
 };
@@ -106,6 +128,7 @@
 	vmw_res_context,
 	vmw_res_surface,
 	vmw_res_stream,
+	vmw_res_shader,
 	vmw_res_max
 };
 
@@ -154,6 +177,7 @@
 };
 
 struct vmw_relocation {
+	SVGAMobId *mob_loc;
 	SVGAGuestPtr *location;
 	uint32_t index;
 };
@@ -229,11 +253,77 @@
 	struct page *(*page)(struct vmw_piter *);
 };
 
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+	vmw_ctx_binding_shader,
+	vmw_ctx_binding_rt,
+	vmw_ctx_binding_tex,
+	vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - structure representing a single context binding
+ *
+ * @ctx: Pointer to the context structure. NULL means the binding is not
+ * active.
+ * @res: Non ref-counted pointer to the bound resource.
+ * @bt: The binding type.
+ * @i1: Union of information needed to unbind.
+ */
+struct vmw_ctx_bindinfo {
+	struct vmw_resource *ctx;
+	struct vmw_resource *res;
+	enum vmw_ctx_binding_type bt;
+	bool scrubbed;
+	union {
+		SVGA3dShaderType shader_type;
+		SVGA3dRenderTargetType rt_type;
+		uint32 texture_stage;
+	} i1;
+};
+
+/**
+ * struct vmw_ctx_binding - structure representing a single context binding
+ *                        - suitable for tracking in a context
+ *
+ * @ctx_list: List head for context.
+ * @res_list: List head for bound resource.
+ * @bi: Binding info
+ */
+struct vmw_ctx_binding {
+	struct list_head ctx_list;
+	struct list_head res_list;
+	struct vmw_ctx_bindinfo bi;
+};
+
+
+/**
+ * struct vmw_ctx_binding_state - context binding state
+ *
+ * @list: linked list of individual bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units/samplers bindings.
+ * @shaders: Shader bindings.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+	struct list_head list;
+	struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
+	struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+	struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
+};
+
 struct vmw_sw_context{
 	struct drm_open_hash res_ht;
 	bool res_ht_initialized;
 	bool kernel; /**< is the called made from the kernel */
-	struct ttm_object_file *tfile;
+	struct vmw_fpriv *fp;
 	struct list_head validate_nodes;
 	struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
 	uint32_t cur_reloc;
@@ -250,6 +340,8 @@
 	struct vmw_resource *last_query_ctx;
 	bool needs_post_query_barrier;
 	struct vmw_resource *error_resource;
+	struct vmw_ctx_binding_state staged_bindings;
+	struct list_head staged_shaders;
 };
 
 struct vmw_legacy_display;
@@ -281,6 +373,7 @@
 	unsigned int io_start;
 	uint32_t vram_start;
 	uint32_t vram_size;
+	uint32_t prim_bb_mem;
 	uint32_t mmio_start;
 	uint32_t mmio_size;
 	uint32_t fb_max_width;
@@ -290,11 +383,13 @@
 	__le32 __iomem *mmio_virt;
 	int mmio_mtrr;
 	uint32_t capabilities;
-	uint32_t max_gmr_descriptors;
 	uint32_t max_gmr_ids;
 	uint32_t max_gmr_pages;
+	uint32_t max_mob_pages;
+	uint32_t max_mob_size;
 	uint32_t memory_size;
 	bool has_gmr;
+	bool has_mob;
 	struct mutex hw_mutex;
 
 	/*
@@ -370,6 +465,7 @@
 
 	struct vmw_sw_context ctx;
 	struct mutex cmdbuf_mutex;
+	struct mutex binding_mutex;
 
 	/**
 	 * Operating mode.
@@ -415,6 +511,12 @@
 	 * DMA mapping stuff.
 	 */
 	enum vmw_dma_map_mode map_mode;
+
+	/*
+	 * Guest Backed stuff
+	 */
+	struct ttm_buffer_object *otable_bo;
+	struct vmw_otable *otables;
 };
 
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -471,23 +573,14 @@
  * Resource utilities - vmwgfx_resource.c
  */
 struct vmw_user_resource_conv;
-extern const struct vmw_user_resource_conv *user_surface_converter;
-extern const struct vmw_user_resource_conv *user_context_converter;
 
-extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
 extern void vmw_resource_unreference(struct vmw_resource **p_res);
 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res);
 extern int vmw_resource_validate(struct vmw_resource *res);
 extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
-extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
-				     struct drm_file *file_priv);
-extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-				    struct drm_file *file_priv);
-extern int vmw_context_check(struct vmw_private *dev_priv,
-			     struct ttm_object_file *tfile,
-			     int id,
-			     struct vmw_resource **p_res);
 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 				  struct ttm_object_file *tfile,
 				  uint32_t handle,
@@ -499,18 +592,6 @@
 	uint32_t handle,
 	const struct vmw_user_resource_conv *converter,
 	struct vmw_resource **p_res);
-extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
-				     struct drm_file *file_priv);
-extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
-				    struct drm_file *file_priv);
-extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
-				       struct drm_file *file_priv);
-extern int vmw_surface_check(struct vmw_private *dev_priv,
-			     struct ttm_object_file *tfile,
-			     uint32_t handle, int *id);
-extern int vmw_surface_validate(struct vmw_private *dev_priv,
-				struct vmw_surface *srf);
 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
 extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
 			   struct vmw_dma_buffer *vmw_bo,
@@ -519,10 +600,21 @@
 			   void (*bo_free) (struct ttm_buffer_object *bo));
 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
 				  struct ttm_object_file *tfile);
+extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+				 struct ttm_object_file *tfile,
+				 uint32_t size,
+				 bool shareable,
+				 uint32_t *handle,
+				 struct vmw_dma_buffer **p_dma_buf);
+extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+				     struct vmw_dma_buffer *dma_buf,
+				     uint32_t *handle);
 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file_priv);
 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file_priv);
+extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+					 struct drm_file *file_priv);
 extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
 					 uint32_t cur_validate_node);
 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
@@ -622,10 +714,16 @@
 extern struct ttm_placement vmw_vram_gmr_placement;
 extern struct ttm_placement vmw_vram_gmr_ne_placement;
 extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_placement vmw_sys_ne_placement;
 extern struct ttm_placement vmw_evictable_placement;
 extern struct ttm_placement vmw_srf_placement;
+extern struct ttm_placement vmw_mob_placement;
 extern struct ttm_bo_driver vmw_bo_driver;
 extern int vmw_dma_quiescent(struct drm_device *dev);
+extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
+extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
+extern const struct vmw_sg_table *
+vmw_bo_sg_table(struct ttm_buffer_object *bo);
 extern void vmw_piter_start(struct vmw_piter *viter,
 			    const struct vmw_sg_table *vsgt,
 			    unsigned long p_offs);
@@ -701,7 +799,7 @@
  * IRQs and wating - vmwgfx_irq.c
  */
 
-extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t vmw_irq_handler(int irq, void *arg);
 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
 			     uint32_t seqno, bool interruptible,
 			     unsigned long timeout);
@@ -832,6 +930,101 @@
 				  uint32_t handle, uint32_t flags,
 				  int *prime_fd);
 
+/*
+ * MemoryOBject management -  vmwgfx_mob.c
+ */
+struct vmw_mob;
+extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
+			const struct vmw_sg_table *vsgt,
+			unsigned long num_data_pages, int32_t mob_id);
+extern void vmw_mob_unbind(struct vmw_private *dev_priv,
+			   struct vmw_mob *mob);
+extern void vmw_mob_destroy(struct vmw_mob *mob);
+extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
+extern int vmw_otables_setup(struct vmw_private *dev_priv);
+extern void vmw_otables_takedown(struct vmw_private *dev_priv);
+
+/*
+ * Context management - vmwgfx_context.c
+ */
+
+extern const struct vmw_user_resource_conv *user_context_converter;
+
+extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
+
+extern int vmw_context_check(struct vmw_private *dev_priv,
+			     struct ttm_object_file *tfile,
+			     int id,
+			     struct vmw_resource **p_res);
+extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+				   const struct vmw_ctx_bindinfo *ci);
+extern void
+vmw_context_binding_state_transfer(struct vmw_resource *res,
+				   struct vmw_ctx_binding_state *cbs);
+extern void vmw_context_binding_res_list_kill(struct list_head *head);
+extern void vmw_context_binding_res_list_scrub(struct list_head *head);
+extern int vmw_context_rebind_all(struct vmw_resource *ctx);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+
+/*
+ * Surface management - vmwgfx_surface.c
+ */
+
+extern const struct vmw_user_resource_conv *user_surface_converter;
+
+extern void vmw_surface_res_free(struct vmw_resource *res);
+extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+				       struct drm_file *file_priv);
+extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+				       struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+					  struct drm_file *file_priv);
+extern int vmw_surface_check(struct vmw_private *dev_priv,
+			     struct ttm_object_file *tfile,
+			     uint32_t handle, int *id);
+extern int vmw_surface_validate(struct vmw_private *dev_priv,
+				struct vmw_surface *srf);
+
+/*
+ * Shader management - vmwgfx_shader.c
+ */
+
+extern const struct vmw_user_resource_conv *user_shader_converter;
+
+extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv);
+extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+				    SVGA3dShaderType shader_type,
+				    u32 *user_key);
+extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+				      struct list_head *list);
+extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+				      struct list_head *list);
+extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+				    u32 user_key,
+				    SVGA3dShaderType shader_type,
+				    struct list_head *list);
+extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+				 u32 user_key, const void *bytecode,
+				 SVGA3dShaderType shader_type,
+				 size_t size,
+				 struct ttm_object_file *tfile,
+				 struct list_head *list);
+extern struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv);
+extern void
+vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+
 
 /**
  * Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 599f646..efb575a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -54,6 +54,8 @@
  * @res: Ref-counted pointer to the resource.
  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
  * @new_backup: Refcounted pointer to the new backup buffer.
+ * @staged_bindings: If @res is a context, tracks bindings set up during
+ * the command batch. Otherwise NULL.
  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  * @first_usage: Set to true the first time the resource is referenced in
  * the command stream.
@@ -65,12 +67,32 @@
 	struct drm_hash_item hash;
 	struct vmw_resource *res;
 	struct vmw_dma_buffer *new_backup;
+	struct vmw_ctx_binding_state *staged_bindings;
 	unsigned long new_backup_offset;
 	bool first_usage;
 	bool no_buffer_needed;
 };
 
 /**
+ * struct vmw_cmd_entry - Describe a command for the verifier
+ *
+ * @user_allow: Whether allowed from the execbuf ioctl.
+ * @gb_disable: Whether disabled if guest-backed objects are available.
+ * @gb_enable: Whether enabled iff guest-backed objects are available.
+ */
+struct vmw_cmd_entry {
+	int (*func) (struct vmw_private *, struct vmw_sw_context *,
+		     SVGA3dCmdHeader *);
+	bool user_allow;
+	bool gb_disable;
+	bool gb_enable;
+};
+
+#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
+	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
+				       (_gb_disable), (_gb_enable)}
+
+/**
  * vmw_resource_unreserve - unreserve resources previously reserved for
  * command submission.
  *
@@ -87,6 +109,18 @@
 		struct vmw_dma_buffer *new_backup =
 			backoff ? NULL : val->new_backup;
 
+		/*
+		 * Transfer staged context bindings to the
+		 * persistent context binding tracker.
+		 */
+		if (unlikely(val->staged_bindings)) {
+			if (!backoff) {
+				vmw_context_binding_state_transfer
+					(val->res, val->staged_bindings);
+			}
+			kfree(val->staged_bindings);
+			val->staged_bindings = NULL;
+		}
 		vmw_resource_unreserve(res, new_backup,
 			val->new_backup_offset);
 		vmw_dmabuf_unreference(&val->new_backup);
@@ -146,6 +180,44 @@
 }
 
 /**
+ * vmw_resource_context_res_add - Put resources previously bound to a context on
+ * the validation list
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @sw_context: Pointer to a software context used for this command submission
+ * @ctx: Pointer to the context resource
+ *
+ * This function puts all resources that were previously bound to @ctx on
+ * the resource validation list. This is part of the context state reemission
+ */
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					struct vmw_resource *ctx)
+{
+	struct list_head *binding_list;
+	struct vmw_ctx_binding *entry;
+	int ret = 0;
+	struct vmw_resource *res;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	binding_list = vmw_context_binding_list(ctx);
+
+	list_for_each_entry(entry, binding_list, ctx_list) {
+		res = vmw_resource_reference_unless_doomed(entry->bi.res);
+		if (unlikely(res == NULL))
+			continue;
+
+		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+		vmw_resource_unreference(&res);
+		if (unlikely(ret != 0))
+			break;
+	}
+
+	mutex_unlock(&dev_priv->binding_mutex);
+	return ret;
+}
+
+/**
  * vmw_resource_relocation_add - Add a relocation to the relocation list
  *
  * @list: Pointer to head of relocation list.
@@ -201,8 +273,12 @@
 {
 	struct vmw_resource_relocation *rel;
 
-	list_for_each_entry(rel, list, head)
-		cb[rel->offset] = rel->res->id;
+	list_for_each_entry(rel, list, head) {
+		if (likely(rel->res != NULL))
+			cb[rel->offset] = rel->res->id;
+		else
+			cb[rel->offset] = SVGA_3D_CMD_NOP;
+	}
 }
 
 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -224,6 +300,7 @@
  *
  * @sw_context: The software context used for this command submission batch.
  * @bo: The buffer object to add.
+ * @validate_as_mob: Validate this buffer as a MOB.
  * @p_val_node: If non-NULL Will be updated with the validate node number
  * on return.
  *
@@ -232,6 +309,7 @@
  */
 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 				   struct ttm_buffer_object *bo,
+				   bool validate_as_mob,
 				   uint32_t *p_val_node)
 {
 	uint32_t val_node;
@@ -244,6 +322,10 @@
 				    &hash) == 0)) {
 		vval_buf = container_of(hash, struct vmw_validate_buffer,
 					hash);
+		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
+			DRM_ERROR("Inconsistent buffer usage.\n");
+			return -EINVAL;
+		}
 		val_buf = &vval_buf->base;
 		val_node = vval_buf - sw_context->val_bufs;
 	} else {
@@ -266,6 +348,7 @@
 		val_buf->bo = ttm_bo_reference(bo);
 		val_buf->reserved = false;
 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+		vval_buf->validate_as_mob = validate_as_mob;
 	}
 
 	sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
@@ -302,7 +385,8 @@
 			struct ttm_buffer_object *bo = &res->backup->base;
 
 			ret = vmw_bo_to_validate_list
-				(sw_context, bo, NULL);
+				(sw_context, bo,
+				 vmw_resource_needs_backup(res), NULL);
 
 			if (unlikely(ret != 0))
 				return ret;
@@ -339,22 +423,27 @@
 }
 
 /**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
  * on the resource validate list unless it's already there.
  *
  * @dev_priv: Pointer to a device private structure.
  * @sw_context: Pointer to the software context.
  * @res_type: Resource type.
  * @converter: User-space visisble type specific information.
- * @id: Pointer to the location in the command buffer currently being
+ * @id: user-space resource id handle.
+ * @id_loc: Pointer to the location in the command buffer currently being
  * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
  */
-static int vmw_cmd_res_check(struct vmw_private *dev_priv,
-			     struct vmw_sw_context *sw_context,
-			     enum vmw_res_type res_type,
-			     const struct vmw_user_resource_conv *converter,
-			     uint32_t *id,
-			     struct vmw_resource_val_node **p_val)
+static int
+vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
+			 struct vmw_sw_context *sw_context,
+			 enum vmw_res_type res_type,
+			 const struct vmw_user_resource_conv *converter,
+			 uint32_t id,
+			 uint32_t *id_loc,
+			 struct vmw_resource_val_node **p_val)
 {
 	struct vmw_res_cache_entry *rcache =
 		&sw_context->res_cache[res_type];
@@ -362,15 +451,22 @@
 	struct vmw_resource_val_node *node;
 	int ret;
 
-	if (*id == SVGA3D_INVALID_ID)
+	if (id == SVGA3D_INVALID_ID) {
+		if (p_val)
+			*p_val = NULL;
+		if (res_type == vmw_res_context) {
+			DRM_ERROR("Illegal context invalid id.\n");
+			return -EINVAL;
+		}
 		return 0;
+	}
 
 	/*
 	 * Fastpath in case of repeated commands referencing the same
 	 * resource
 	 */
 
-	if (likely(rcache->valid && *id == rcache->handle)) {
+	if (likely(rcache->valid && id == rcache->handle)) {
 		const struct vmw_resource *res = rcache->res;
 
 		rcache->node->first_usage = false;
@@ -379,28 +475,28 @@
 
 		return vmw_resource_relocation_add
 			(&sw_context->res_relocations, res,
-			 id - sw_context->buf_start);
+			 id_loc - sw_context->buf_start);
 	}
 
 	ret = vmw_user_resource_lookup_handle(dev_priv,
-					      sw_context->tfile,
-					      *id,
+					      sw_context->fp->tfile,
+					      id,
 					      converter,
 					      &res);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could not find or use resource 0x%08x.\n",
-			  (unsigned) *id);
+			  (unsigned) id);
 		dump_stack();
 		return ret;
 	}
 
 	rcache->valid = true;
 	rcache->res = res;
-	rcache->handle = *id;
+	rcache->handle = id;
 
 	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
 					  res,
-					  id - sw_context->buf_start);
+					  id_loc - sw_context->buf_start);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
 
@@ -411,6 +507,22 @@
 	rcache->node = node;
 	if (p_val)
 		*p_val = node;
+
+	if (dev_priv->has_mob && node->first_usage &&
+	    res_type == vmw_res_context) {
+		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+		if (unlikely(ret != 0))
+			goto out_no_reloc;
+		node->staged_bindings =
+			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
+		if (node->staged_bindings == NULL) {
+			DRM_ERROR("Failed to allocate context binding "
+				  "information.\n");
+			goto out_no_reloc;
+		}
+		INIT_LIST_HEAD(&node->staged_bindings->list);
+	}
+
 	vmw_resource_unreference(&res);
 	return 0;
 
@@ -422,6 +534,59 @@
 }
 
 /**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id_loc: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
+ */
+static int
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+		  struct vmw_sw_context *sw_context,
+		  enum vmw_res_type res_type,
+		  const struct vmw_user_resource_conv *converter,
+		  uint32_t *id_loc,
+		  struct vmw_resource_val_node **p_val)
+{
+	return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
+					converter, *id_loc, id_loc, p_val);
+}
+
+/**
+ * vmw_rebind_contexts - Rebind all resources previously bound to
+ * referenced contexts.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Rebind context binding points that have been scrubbed because of eviction.
+ */
+static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
+{
+	struct vmw_resource_val_node *val;
+	int ret;
+
+	list_for_each_entry(val, &sw_context->resource_list, head) {
+		if (likely(!val->staged_bindings))
+			continue;
+
+		ret = vmw_context_rebind_all(val->res);
+		if (unlikely(ret != 0)) {
+			if (ret != -ERESTARTSYS)
+				DRM_ERROR("Failed to rebind context.\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
  * vmw_cmd_cid_check - Check a command header for valid context information.
  *
  * @dev_priv: Pointer to a device private structure.
@@ -437,7 +602,7 @@
 {
 	struct vmw_cid_cmd {
 		SVGA3dCmdHeader header;
-		__le32 cid;
+		uint32_t cid;
 	} *cmd;
 
 	cmd = container_of(header, struct vmw_cid_cmd, header);
@@ -453,17 +618,35 @@
 		SVGA3dCmdHeader header;
 		SVGA3dCmdSetRenderTarget body;
 	} *cmd;
+	struct vmw_resource_val_node *ctx_node;
+	struct vmw_resource_val_node *res_node;
 	int ret;
 
-	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	cmd = container_of(header, struct vmw_sid_cmd, header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->body.cid,
+				&ctx_node);
 	if (unlikely(ret != 0))
 		return ret;
 
-	cmd = container_of(header, struct vmw_sid_cmd, header);
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				user_surface_converter,
-				&cmd->body.target.sid, NULL);
-	return ret;
+				&cmd->body.target.sid, &res_node);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (dev_priv->has_mob) {
+		struct vmw_ctx_bindinfo bi;
+
+		bi.ctx = ctx_node->res;
+		bi.res = res_node ? res_node->res : NULL;
+		bi.bt = vmw_ctx_binding_rt;
+		bi.i1.rt_type = cmd->body.type;
+		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+	}
+
+	return 0;
 }
 
 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -519,11 +702,6 @@
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
 
-	if (unlikely(!sw_context->kernel)) {
-		DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
-		return -EPERM;
-	}
-
 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				 user_surface_converter,
 				 &cmd->body.srcImage.sid, NULL);
@@ -541,11 +719,6 @@
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
 
-	if (unlikely(!sw_context->kernel)) {
-		DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
-		return -EPERM;
-	}
-
 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				 user_surface_converter, &cmd->body.sid,
 				 NULL);
@@ -586,7 +759,7 @@
 			sw_context->needs_post_query_barrier = true;
 			ret = vmw_bo_to_validate_list(sw_context,
 						      sw_context->cur_query_bo,
-						      NULL);
+						      dev_priv->has_mob, NULL);
 			if (unlikely(ret != 0))
 				return ret;
 		}
@@ -594,7 +767,7 @@
 
 		ret = vmw_bo_to_validate_list(sw_context,
 					      dev_priv->dummy_query_bo,
-					      NULL);
+					      dev_priv->has_mob, NULL);
 		if (unlikely(ret != 0))
 			return ret;
 
@@ -672,6 +845,66 @@
 }
 
 /**
+ * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
+ * handle to a MOB id.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @id: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
+ *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a MOB id. The translation does not take place immediately, but
+ * during a call to vmw_apply_relocations(). This function builds a relocation
+ * list and a list of buffers to validate. The former needs to be freed using
+ * either vmw_apply_relocations() or vmw_free_relocations(). The latter
+ * needs to be freed using vmw_clear_validations.
+ */
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGAMobId *id,
+				 struct vmw_dma_buffer **vmw_bo_p)
+{
+	struct vmw_dma_buffer *vmw_bo = NULL;
+	struct ttm_buffer_object *bo;
+	uint32_t handle = *id;
+	struct vmw_relocation *reloc;
+	int ret;
+
+	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not find or use MOB buffer.\n");
+		return -EINVAL;
+	}
+	bo = &vmw_bo->base;
+
+	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
+		DRM_ERROR("Max number relocations per submission"
+			  " exceeded\n");
+		ret = -EINVAL;
+		goto out_no_reloc;
+	}
+
+	reloc = &sw_context->relocs[sw_context->cur_reloc++];
+	reloc->mob_loc = id;
+	reloc->location = NULL;
+
+	ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
+	if (unlikely(ret != 0))
+		goto out_no_reloc;
+
+	*vmw_bo_p = vmw_bo;
+	return 0;
+
+out_no_reloc:
+	vmw_dmabuf_unreference(&vmw_bo);
+	vmw_bo_p = NULL;
+	return ret;
+}
+
+/**
  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
  * handle to a valid SVGAGuestPtr
  *
@@ -701,7 +934,7 @@
 	struct vmw_relocation *reloc;
 	int ret;
 
-	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could not find or use GMR region.\n");
 		return -EINVAL;
@@ -718,7 +951,7 @@
 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 	reloc->location = ptr;
 
-	ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
+	ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
 
@@ -732,6 +965,30 @@
 }
 
 /**
+ * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_begin_gb_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBeginGBQuery q;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
+			   header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				 user_context_converter, &cmd->q.cid,
+				 NULL);
+}
+
+/**
  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
  *
  * @dev_priv: Pointer to a device private struct.
@@ -750,12 +1007,64 @@
 	cmd = container_of(header, struct vmw_begin_query_cmd,
 			   header);
 
+	if (unlikely(dev_priv->has_mob)) {
+		struct {
+			SVGA3dCmdHeader header;
+			SVGA3dCmdBeginGBQuery q;
+		} gb_cmd;
+
+		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
+		gb_cmd.header.size = cmd->header.size;
+		gb_cmd.q.cid = cmd->q.cid;
+		gb_cmd.q.type = cmd->q.type;
+
+		memcpy(cmd, &gb_cmd, sizeof(*cmd));
+		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
+	}
+
 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 				 user_context_converter, &cmd->q.cid,
 				 NULL);
 }
 
 /**
+ * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				SVGA3dCmdHeader *header)
+{
+	struct vmw_dma_buffer *vmw_bo;
+	struct vmw_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdEndGBQuery q;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_query_cmd, header);
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
+				    &cmd->q.mobid,
+				    &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+
+	vmw_dmabuf_unreference(&vmw_bo);
+	return ret;
+}
+
+/**
  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
  *
  * @dev_priv: Pointer to a device private struct.
@@ -774,6 +1083,25 @@
 	int ret;
 
 	cmd = container_of(header, struct vmw_query_cmd, header);
+	if (dev_priv->has_mob) {
+		struct {
+			SVGA3dCmdHeader header;
+			SVGA3dCmdEndGBQuery q;
+		} gb_cmd;
+
+		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
+		gb_cmd.header.size = cmd->header.size;
+		gb_cmd.q.cid = cmd->q.cid;
+		gb_cmd.q.type = cmd->q.type;
+		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
+		gb_cmd.q.offset = cmd->q.guestResult.offset;
+
+		memcpy(cmd, &gb_cmd, sizeof(*cmd));
+		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
+	}
+
 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 	if (unlikely(ret != 0))
 		return ret;
@@ -790,7 +1118,40 @@
 	return ret;
 }
 
-/*
+/**
+ * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	struct vmw_dma_buffer *vmw_bo;
+	struct vmw_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdWaitForGBQuery q;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_query_cmd, header);
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
+				    &cmd->q.mobid,
+				    &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_dmabuf_unreference(&vmw_bo);
+	return 0;
+}
+
+/**
  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
  *
  * @dev_priv: Pointer to a device private struct.
@@ -809,6 +1170,25 @@
 	int ret;
 
 	cmd = container_of(header, struct vmw_query_cmd, header);
+	if (dev_priv->has_mob) {
+		struct {
+			SVGA3dCmdHeader header;
+			SVGA3dCmdWaitForGBQuery q;
+		} gb_cmd;
+
+		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+		gb_cmd.header.size = cmd->header.size;
+		gb_cmd.q.cid = cmd->q.cid;
+		gb_cmd.q.type = cmd->q.type;
+		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
+		gb_cmd.q.offset = cmd->q.guestResult.offset;
+
+		memcpy(cmd, &gb_cmd, sizeof(*cmd));
+		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
+	}
+
 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 	if (unlikely(ret != 0))
 		return ret;
@@ -853,7 +1233,8 @@
 
 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
 
-	vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
+	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
+			     header);
 
 out_no_surface:
 	vmw_dmabuf_unreference(&vmw_bo);
@@ -921,15 +1302,22 @@
 	struct vmw_tex_state_cmd {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdSetTextureState state;
-	};
+	} *cmd;
 
 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
 	  ((unsigned long) header + header->size + sizeof(header));
 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
 		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+	struct vmw_resource_val_node *ctx_node;
+	struct vmw_resource_val_node *res_node;
 	int ret;
 
-	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	cmd = container_of(header, struct vmw_tex_state_cmd,
+			   header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->state.cid,
+				&ctx_node);
 	if (unlikely(ret != 0))
 		return ret;
 
@@ -939,9 +1327,20 @@
 
 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 					user_surface_converter,
-					&cur_state->value, NULL);
+					&cur_state->value, &res_node);
 		if (unlikely(ret != 0))
 			return ret;
+
+		if (dev_priv->has_mob) {
+			struct vmw_ctx_bindinfo bi;
+
+			bi.ctx = ctx_node->res;
+			bi.res = res_node ? res_node->res : NULL;
+			bi.bt = vmw_ctx_binding_tex;
+			bi.i1.texture_stage = cur_state->stage;
+			vmw_context_binding_add(ctx_node->staged_bindings,
+						&bi);
+		}
 	}
 
 	return 0;
@@ -971,6 +1370,314 @@
 }
 
 /**
+ * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @res_type: The resource type.
+ * @converter: Information about user-space binding for this resource type.
+ * @res_id: Pointer to the user-space resource handle in the command stream.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving.
+ */
+static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 enum vmw_res_type res_type,
+				 const struct vmw_user_resource_conv
+				 *converter,
+				 uint32_t *res_id,
+				 uint32_t *buf_id,
+				 unsigned long backup_offset)
+{
+	int ret;
+	struct vmw_dma_buffer *dma_buf;
+	struct vmw_resource_val_node *val_node;
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
+				converter, res_id, &val_node);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (val_node->first_usage)
+		val_node->no_buffer_needed = true;
+
+	vmw_dmabuf_unreference(&val_node->new_backup);
+	val_node->new_backup = dma_buf;
+	val_node->new_backup_offset = backup_offset;
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	struct vmw_bind_gb_surface_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBSurface body;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
+
+	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
+				     user_surface_converter,
+				     &cmd->body.sid, &cmd->body.mobid,
+				     0);
+}
+
+/**
+ * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	struct vmw_gb_surface_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdUpdateGBImage body;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_gb_surface_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdUpdateGBSurface body;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_gb_surface_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdReadbackGBImage body;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
+				       struct vmw_sw_context *sw_context,
+				       SVGA3dCmdHeader *header)
+{
+	struct vmw_gb_surface_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdReadbackGBSurface body;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
+				       struct vmw_sw_context *sw_context,
+				       SVGA3dCmdHeader *header)
+{
+	struct vmw_gb_surface_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdInvalidateGBImage body;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_surface - Validate an
+ * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
+					 struct vmw_sw_context *sw_context,
+					 SVGA3dCmdHeader *header)
+{
+	struct vmw_gb_surface_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdInvalidateGBSurface body;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.sid, NULL);
+}
+
+
+/**
+ * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	struct vmw_shader_define_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineShader body;
+	} *cmd;
+	int ret;
+	size_t size;
+
+	cmd = container_of(header, struct vmw_shader_define_cmd,
+			   header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->body.cid,
+				NULL);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(!dev_priv->has_mob))
+		return 0;
+
+	size = cmd->header.size - sizeof(cmd->body);
+	ret = vmw_compat_shader_add(sw_context->fp->shman,
+				    cmd->body.shid, cmd + 1,
+				    cmd->body.type, size,
+				    sw_context->fp->tfile,
+				    &sw_context->staged_shaders);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return vmw_resource_relocation_add(&sw_context->res_relocations,
+					   NULL, &cmd->header.id -
+					   sw_context->buf_start);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_shader_destroy_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyShader body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_shader_destroy_cmd,
+			   header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->body.cid,
+				NULL);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(!dev_priv->has_mob))
+		return 0;
+
+	ret = vmw_compat_shader_remove(sw_context->fp->shman,
+				       cmd->body.shid,
+				       cmd->body.type,
+				       &sw_context->staged_shaders);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return vmw_resource_relocation_add(&sw_context->res_relocations,
+					   NULL, &cmd->header.id -
+					   sw_context->buf_start);
+
+	return 0;
+}
+
+/**
  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
  * command
  *
@@ -986,18 +1693,105 @@
 		SVGA3dCmdHeader header;
 		SVGA3dCmdSetShader body;
 	} *cmd;
+	struct vmw_resource_val_node *ctx_node;
 	int ret;
 
 	cmd = container_of(header, struct vmw_set_shader_cmd,
 			   header);
 
-	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->body.cid,
+				&ctx_node);
 	if (unlikely(ret != 0))
 		return ret;
 
+	if (dev_priv->has_mob) {
+		struct vmw_ctx_bindinfo bi;
+		struct vmw_resource_val_node *res_node;
+		u32 shid = cmd->body.shid;
+
+		if (shid != SVGA3D_INVALID_ID)
+			(void) vmw_compat_shader_lookup(sw_context->fp->shman,
+							cmd->body.type,
+							&shid);
+
+		ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
+					       vmw_res_shader,
+					       user_shader_converter,
+					       shid,
+					       &cmd->body.shid, &res_node);
+		if (unlikely(ret != 0))
+			return ret;
+
+		bi.ctx = ctx_node->res;
+		bi.res = res_node ? res_node->res : NULL;
+		bi.bt = vmw_ctx_binding_shader;
+		bi.i1.shader_type = cmd->body.type;
+		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+	}
+
 	return 0;
 }
 
+/**
+ * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
+				    struct vmw_sw_context *sw_context,
+				    SVGA3dCmdHeader *header)
+{
+	struct vmw_set_shader_const_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetShaderConst body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_set_shader_const_cmd,
+			   header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->body.cid,
+				NULL);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (dev_priv->has_mob)
+		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_bind_gb_shader_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBShader body;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
+			   header);
+
+	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
+				     user_shader_converter,
+				     &cmd->body.shid, &cmd->body.mobid,
+				     cmd->body.offsetInBytes);
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 				struct vmw_sw_context *sw_context,
 				void *buf, uint32_t *size)
@@ -1041,50 +1835,173 @@
 	return 0;
 }
 
-typedef int (*vmw_cmd_func) (struct vmw_private *,
-			     struct vmw_sw_context *,
-			     SVGA3dCmdHeader *);
-
-#define VMW_CMD_DEF(cmd, func) \
-	[cmd - SVGA_3D_CMD_BASE] = func
-
-static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
-	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
-	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
-	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
-	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
-	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
-	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
+static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
+		    true, false, false),
 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
-		    &vmw_cmd_set_render_target_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
-	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
-	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
-	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
-	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
-	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
-	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
+		    &vmw_cmd_set_render_target_check, true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
+		    true, false, false),
 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
-		    &vmw_cmd_blt_surf_screen_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
-	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
-	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
-	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
+		    &vmw_cmd_blt_surf_screen_check, false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
+		    &vmw_cmd_update_gb_surface, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
+		    &vmw_cmd_readback_gb_image, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
+		    &vmw_cmd_readback_gb_surface, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
+		    &vmw_cmd_invalidate_gb_image, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
+		    &vmw_cmd_invalidate_gb_surface, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
+		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
+		    true, false, true)
 };
 
 static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -1095,6 +2012,8 @@
 	uint32_t size_remaining = *size;
 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
 	int ret;
+	const struct vmw_cmd_entry *entry;
+	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
 
 	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
 	/* Handle any none 3D commands */
@@ -1107,18 +2026,43 @@
 
 	cmd_id -= SVGA_3D_CMD_BASE;
 	if (unlikely(*size > size_remaining))
-		goto out_err;
+		goto out_invalid;
 
 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
-		goto out_err;
+		goto out_invalid;
 
-	ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
+	entry = &vmw_cmd_entries[cmd_id];
+	if (unlikely(!entry->func))
+		goto out_invalid;
+
+	if (unlikely(!entry->user_allow && !sw_context->kernel))
+		goto out_privileged;
+
+	if (unlikely(entry->gb_disable && gb))
+		goto out_old;
+
+	if (unlikely(entry->gb_enable && !gb))
+		goto out_new;
+
+	ret = entry->func(dev_priv, sw_context, header);
 	if (unlikely(ret != 0))
-		goto out_err;
+		goto out_invalid;
 
 	return 0;
-out_err:
-	DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
+out_invalid:
+	DRM_ERROR("Invalid SVGA3D command: %d\n",
+		  cmd_id + SVGA_3D_CMD_BASE);
+	return -EINVAL;
+out_privileged:
+	DRM_ERROR("Privileged SVGA3D command: %d\n",
+		  cmd_id + SVGA_3D_CMD_BASE);
+	return -EPERM;
+out_old:
+	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
+		  cmd_id + SVGA_3D_CMD_BASE);
+	return -EINVAL;
+out_new:
+	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
 		  cmd_id + SVGA_3D_CMD_BASE);
 	return -EINVAL;
 }
@@ -1174,6 +2118,9 @@
 		case VMW_PL_GMR:
 			reloc->location->gmrId = bo->mem.start;
 			break;
+		case VMW_PL_MOB:
+			*reloc->mob_loc = bo->mem.start;
+			break;
 		default:
 			BUG();
 		}
@@ -1198,6 +2145,8 @@
 	list_for_each_entry_safe(val, val_next, list, head) {
 		list_del_init(&val->head);
 		vmw_resource_unreference(&val->res);
+		if (unlikely(val->staged_bindings))
+			kfree(val->staged_bindings);
 		kfree(val);
 	}
 }
@@ -1224,7 +2173,8 @@
 }
 
 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
-				      struct ttm_buffer_object *bo)
+				      struct ttm_buffer_object *bo,
+				      bool validate_as_mob)
 {
 	int ret;
 
@@ -1238,6 +2188,9 @@
 	     dev_priv->dummy_query_bo_pinned))
 		return 0;
 
+	if (validate_as_mob)
+		return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+
 	/**
 	 * Put BO in VRAM if there is space, otherwise as a GMR.
 	 * If there is no space in VRAM and GMR ids are all used up,
@@ -1259,7 +2212,6 @@
 	return ret;
 }
 
-
 static int vmw_validate_buffers(struct vmw_private *dev_priv,
 				struct vmw_sw_context *sw_context)
 {
@@ -1267,7 +2219,8 @@
 	int ret;
 
 	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
-		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
+		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+						 entry->validate_as_mob);
 		if (unlikely(ret != 0))
 			return ret;
 	}
@@ -1461,7 +2414,7 @@
 	} else
 		sw_context->kernel = true;
 
-	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+	sw_context->fp = vmw_fpriv(file_priv);
 	sw_context->cur_reloc = 0;
 	sw_context->cur_val_buf = 0;
 	sw_context->fence_flags = 0;
@@ -1478,16 +2431,17 @@
 			goto out_unlock;
 		sw_context->res_ht_initialized = true;
 	}
+	INIT_LIST_HEAD(&sw_context->staged_shaders);
 
 	INIT_LIST_HEAD(&resource_list);
 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
 				command_size);
 	if (unlikely(ret != 0))
-		goto out_err;
+		goto out_err_nores;
 
 	ret = vmw_resources_reserve(sw_context);
 	if (unlikely(ret != 0))
-		goto out_err;
+		goto out_err_nores;
 
 	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
 	if (unlikely(ret != 0))
@@ -1509,11 +2463,23 @@
 			goto out_err;
 	}
 
+	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
+	if (unlikely(ret != 0)) {
+		ret = -ERESTARTSYS;
+		goto out_err;
+	}
+
+	if (dev_priv->has_mob) {
+		ret = vmw_rebind_contexts(sw_context);
+		if (unlikely(ret != 0))
+			goto out_unlock_binding;
+	}
+
 	cmd = vmw_fifo_reserve(dev_priv, command_size);
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving fifo space for commands.\n");
 		ret = -ENOMEM;
-		goto out_err;
+		goto out_unlock_binding;
 	}
 
 	vmw_apply_relocations(sw_context);
@@ -1538,6 +2504,8 @@
 		DRM_ERROR("Fence submission error. Syncing.\n");
 
 	vmw_resource_list_unreserve(&sw_context->resource_list, false);
+	mutex_unlock(&dev_priv->binding_mutex);
+
 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
 				    (void *) fence);
 
@@ -1558,6 +2526,8 @@
 	}
 
 	list_splice_init(&sw_context->resource_list, &resource_list);
+	vmw_compat_shaders_commit(sw_context->fp->shman,
+				  &sw_context->staged_shaders);
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 	/*
@@ -1568,11 +2538,14 @@
 
 	return 0;
 
+out_unlock_binding:
+	mutex_unlock(&dev_priv->binding_mutex);
 out_err:
+	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+out_err_nores:
+	vmw_resource_list_unreserve(&sw_context->resource_list, true);
 	vmw_resource_relocations_free(&sw_context->res_relocations);
 	vmw_free_relocations(sw_context);
-	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
-	vmw_resource_list_unreserve(&sw_context->resource_list, true);
 	vmw_clear_validations(sw_context);
 	if (unlikely(dev_priv->pinned_bo != NULL &&
 		     !dev_priv->query_cid_valid))
@@ -1581,6 +2554,8 @@
 	list_splice_init(&sw_context->resource_list, &resource_list);
 	error_resource = sw_context->error_resource;
 	sw_context->error_resource = NULL;
+	vmw_compat_shaders_revert(sw_context->fp->shman,
+				  &sw_context->staged_shaders);
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 	/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c62d20e..436b013 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -271,7 +271,7 @@
 	spin_unlock_irq(&fman->lock);
 }
 
-void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
+static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
 				struct list_head *list)
 {
 	struct vmw_fence_action *action, *next_action;
@@ -897,7 +897,7 @@
  * Note that the action callbacks may be executed before this function
  * returns.
  */
-void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
+static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
 			      struct vmw_fence_action *action)
 {
 	struct vmw_fence_manager *fman = fence->fman;
@@ -993,7 +993,7 @@
 	struct drm_vmw_event_fence event;
 };
 
-int vmw_event_fence_action_create(struct drm_file *file_priv,
+static int vmw_event_fence_action_create(struct drm_file *file_priv,
 				  struct vmw_fence_obj *fence,
 				  uint32_t flags,
 				  uint64_t user_data,
@@ -1080,7 +1080,8 @@
 	 */
 	if (arg->handle) {
 		struct ttm_base_object *base =
-			ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
+			ttm_base_object_lookup_for_ref(dev_priv->tdev,
+						       arg->handle);
 
 		if (unlikely(base == NULL)) {
 			DRM_ERROR("Fence event invalid fence object handle "
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 3eb1486..6ccd993 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -35,6 +35,23 @@
 	uint32_t fifo_min, hwversion;
 	const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
+	if (!(dev_priv->capabilities & SVGA_CAP_3D))
+		return false;
+
+	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+		uint32_t result;
+
+		if (!dev_priv->has_mob)
+			return false;
+
+		mutex_lock(&dev_priv->hw_mutex);
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
+		result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+		mutex_unlock(&dev_priv->hw_mutex);
+
+		return (result != 0);
+	}
+
 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
 		return false;
 
@@ -511,24 +528,16 @@
 }
 
 /**
- * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
+ * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
+ * legacy query commands.
  *
  * @dev_priv: The device private structure.
  * @cid: The hardware context id used for the query.
  *
- * This function is used to emit a dummy occlusion query with
- * no primitives rendered between query begin and query end.
- * It's used to provide a query barrier, in order to know that when
- * this query is finished, all preceding queries are also finished.
- *
- * A Query results structure should have been initialized at the start
- * of the dev_priv->dummy_query_bo buffer object. And that buffer object
- * must also be either reserved or pinned when this function is called.
- *
- * Returns -ENOMEM on failure to reserve fifo space.
+ * See the vmw_fifo_emit_dummy_query documentation.
  */
-int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
-			      uint32_t cid)
+static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
+					    uint32_t cid)
 {
 	/*
 	 * A query wait without a preceding query end will
@@ -566,3 +575,75 @@
 
 	return 0;
 }
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * guest-backed resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * See the vmw_fifo_emit_dummy_query documentation.
+ */
+static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
+					uint32_t cid)
+{
+	/*
+	 * A query wait without a preceding query end will
+	 * actually finish all queries for this cid
+	 * without writing to the query result structure.
+	 */
+
+	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdWaitForGBQuery body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of fifo space for dummy query.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = cid;
+	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.offset = 0;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * appropriate resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * This function is used to emit a dummy occlusion query with
+ * no primitives rendered between query begin and query end.
+ * It's used to provide a query barrier, in order to know that when
+ * this query is finished, all preceding queries are also finished.
+ *
+ * A Query results structure should have been initialized at the start
+ * of the dev_priv->dummy_query_bo buffer object. And that buffer object
+ * must also be either reserved or pinned when this function is called.
+ *
+ * Returns -ENOMEM on failure to reserve fifo space.
+ */
+int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+			      uint32_t cid)
+{
+	if (dev_priv->has_mob)
+		return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
+
+	return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 6ef0b03..61d8d80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -125,181 +125,27 @@
 }
 
 
-static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
-				     struct list_head *desc_pages)
-{
-	struct page *page, *next;
-	struct svga_guest_mem_descriptor *page_virtual;
-	unsigned int desc_per_page = PAGE_SIZE /
-		sizeof(struct svga_guest_mem_descriptor) - 1;
-
-	if (list_empty(desc_pages))
-		return;
-
-	list_for_each_entry_safe(page, next, desc_pages, lru) {
-		list_del_init(&page->lru);
-
-		if (likely(desc_dma != DMA_ADDR_INVALID)) {
-			dma_unmap_page(dev, desc_dma, PAGE_SIZE,
-				       DMA_TO_DEVICE);
-		}
-
-		page_virtual = kmap_atomic(page);
-		desc_dma = (dma_addr_t)
-			le32_to_cpu(page_virtual[desc_per_page].ppn) <<
-			PAGE_SHIFT;
-		kunmap_atomic(page_virtual);
-
-		__free_page(page);
-	}
-}
-
-/**
- * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
- * the number of used descriptors.
- *
- */
-
-static int vmw_gmr_build_descriptors(struct device *dev,
-				     struct list_head *desc_pages,
-				     struct vmw_piter *iter,
-				     unsigned long num_pages,
-				     dma_addr_t *first_dma)
-{
-	struct page *page;
-	struct svga_guest_mem_descriptor *page_virtual = NULL;
-	struct svga_guest_mem_descriptor *desc_virtual = NULL;
-	unsigned int desc_per_page;
-	unsigned long prev_pfn;
-	unsigned long pfn;
-	int ret;
-	dma_addr_t desc_dma;
-
-	desc_per_page = PAGE_SIZE /
-	    sizeof(struct svga_guest_mem_descriptor) - 1;
-
-	while (likely(num_pages != 0)) {
-		page = alloc_page(__GFP_HIGHMEM);
-		if (unlikely(page == NULL)) {
-			ret = -ENOMEM;
-			goto out_err;
-		}
-
-		list_add_tail(&page->lru, desc_pages);
-		page_virtual = kmap_atomic(page);
-		desc_virtual = page_virtual - 1;
-		prev_pfn = ~(0UL);
-
-		while (likely(num_pages != 0)) {
-			pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
-
-			if (pfn != prev_pfn + 1) {
-
-				if (desc_virtual - page_virtual ==
-				    desc_per_page - 1)
-					break;
-
-				(++desc_virtual)->ppn = cpu_to_le32(pfn);
-				desc_virtual->num_pages = cpu_to_le32(1);
-			} else {
-				uint32_t tmp =
-				    le32_to_cpu(desc_virtual->num_pages);
-				desc_virtual->num_pages = cpu_to_le32(tmp + 1);
-			}
-			prev_pfn = pfn;
-			--num_pages;
-			vmw_piter_next(iter);
-		}
-
-		(++desc_virtual)->ppn = DMA_PAGE_INVALID;
-		desc_virtual->num_pages = cpu_to_le32(0);
-		kunmap_atomic(page_virtual);
-	}
-
-	desc_dma = 0;
-	list_for_each_entry_reverse(page, desc_pages, lru) {
-		page_virtual = kmap_atomic(page);
-		page_virtual[desc_per_page].ppn = cpu_to_le32
-			(desc_dma >> PAGE_SHIFT);
-		kunmap_atomic(page_virtual);
-		desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
-					DMA_TO_DEVICE);
-
-		if (unlikely(dma_mapping_error(dev, desc_dma)))
-			goto out_err;
-	}
-	*first_dma = desc_dma;
-
-	return 0;
-out_err:
-	vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
-	return ret;
-}
-
-static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
-				     int gmr_id, dma_addr_t desc_dma)
-{
-	mutex_lock(&dev_priv->hw_mutex);
-
-	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
-	wmb();
-	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
-	mb();
-
-	mutex_unlock(&dev_priv->hw_mutex);
-
-}
-
 int vmw_gmr_bind(struct vmw_private *dev_priv,
 		 const struct vmw_sg_table *vsgt,
 		 unsigned long num_pages,
 		 int gmr_id)
 {
-	struct list_head desc_pages;
-	dma_addr_t desc_dma = 0;
-	struct device *dev = dev_priv->dev->dev;
 	struct vmw_piter data_iter;
-	int ret;
 
 	vmw_piter_start(&data_iter, vsgt, 0);
 
 	if (unlikely(!vmw_piter_next(&data_iter)))
 		return 0;
 
-	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
-		return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
-
-	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
+	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
 		return -EINVAL;
 
-	if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
-		return -EINVAL;
-
-	INIT_LIST_HEAD(&desc_pages);
-
-	ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
-					num_pages, &desc_dma);
-	if (unlikely(ret != 0))
-		return ret;
-
-	vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
-	vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
-
-	return 0;
+	return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
 }
 
 
 void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
 {
-	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
+	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
 		vmw_gmr2_unbind(dev_priv, gmr_id);
-		return;
-	}
-
-	mutex_lock(&dev_priv->hw_mutex);
-	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
-	wmb();
-	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
-	mb();
-	mutex_unlock(&dev_priv->hw_mutex);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index c5c054a..b1273e8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -125,10 +125,21 @@
 		return -ENOMEM;
 
 	spin_lock_init(&gman->lock);
-	gman->max_gmr_pages = dev_priv->max_gmr_pages;
 	gman->used_gmr_pages = 0;
 	ida_init(&gman->gmr_ida);
-	gman->max_gmr_ids = p_size;
+
+	switch (p_size) {
+	case VMW_PL_GMR:
+		gman->max_gmr_ids = dev_priv->max_gmr_ids;
+		gman->max_gmr_pages = dev_priv->max_gmr_pages;
+		break;
+	case VMW_PL_MOB:
+		gman->max_gmr_ids = VMWGFX_NUM_MOB;
+		gman->max_gmr_pages = dev_priv->max_mob_pages;
+		break;
+	default:
+		BUG();
+	}
 	man->priv = (void *) gman;
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 45d5b5a..47b7094 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -29,12 +29,18 @@
 #include <drm/vmwgfx_drm.h>
 #include "vmwgfx_kms.h"
 
+struct svga_3d_compat_cap {
+	SVGA3dCapsRecordHeader header;
+	SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
+};
+
 int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct drm_vmw_getparam_arg *param =
 	    (struct drm_vmw_getparam_arg *)data;
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 
 	switch (param->param) {
 	case DRM_VMW_PARAM_NUM_STREAMS:
@@ -53,13 +59,18 @@
 		param->value = dev_priv->fifo.capabilities;
 		break;
 	case DRM_VMW_PARAM_MAX_FB_SIZE:
-		param->value = dev_priv->vram_size;
+		param->value = dev_priv->prim_bb_mem;
 		break;
 	case DRM_VMW_PARAM_FIFO_HW_VERSION:
 	{
 		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
+		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
+			param->value = SVGA3D_HWVERSION_WS8_B1;
+			break;
+		}
+
 		param->value =
 			ioread32(fifo_mem +
 				 ((fifo->capabilities &
@@ -69,7 +80,30 @@
 		break;
 	}
 	case DRM_VMW_PARAM_MAX_SURF_MEMORY:
-		param->value = dev_priv->memory_size;
+		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+		    !vmw_fp->gb_aware)
+			param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
+		else
+			param->value = dev_priv->memory_size;
+		break;
+	case DRM_VMW_PARAM_3D_CAPS_SIZE:
+		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+		    vmw_fp->gb_aware)
+			param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+		else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+			param->value = sizeof(struct svga_3d_compat_cap) +
+				sizeof(uint32_t);
+		else
+			param->value = (SVGA_FIFO_3D_CAPS_LAST -
+					SVGA_FIFO_3D_CAPS + 1) *
+				sizeof(uint32_t);
+		break;
+	case DRM_VMW_PARAM_MAX_MOB_MEMORY:
+		vmw_fp->gb_aware = true;
+		param->value = dev_priv->max_mob_pages * PAGE_SIZE;
+		break;
+	case DRM_VMW_PARAM_MAX_MOB_SIZE:
+		param->value = dev_priv->max_mob_size;
 		break;
 	default:
 		DRM_ERROR("Illegal vmwgfx get param request: %d\n",
@@ -80,6 +114,38 @@
 	return 0;
 }
 
+static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
+			       size_t size)
+{
+	struct svga_3d_compat_cap *compat_cap =
+		(struct svga_3d_compat_cap *) bounce;
+	unsigned int i;
+	size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
+	unsigned int max_size;
+
+	if (size < pair_offset)
+		return -EINVAL;
+
+	max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
+
+	if (max_size > SVGA3D_DEVCAP_MAX)
+		max_size = SVGA3D_DEVCAP_MAX;
+
+	compat_cap->header.length =
+		(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
+	compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
+
+	mutex_lock(&dev_priv->hw_mutex);
+	for (i = 0; i < max_size; ++i) {
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+		compat_cap->pairs[i][0] = i;
+		compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+	}
+	mutex_unlock(&dev_priv->hw_mutex);
+
+	return 0;
+}
+
 
 int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv)
@@ -92,29 +158,58 @@
 	void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
 	void *bounce;
 	int ret;
+	bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 
 	if (unlikely(arg->pad64 != 0)) {
 		DRM_ERROR("Illegal GET_3D_CAP argument.\n");
 		return -EINVAL;
 	}
 
-	size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2;
+	if (gb_objects && vmw_fp->gb_aware)
+		size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+	else if (gb_objects)
+		size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
+	else
+		size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
+			sizeof(uint32_t);
 
 	if (arg->max_size < size)
 		size = arg->max_size;
 
-	bounce = vmalloc(size);
+	bounce = vzalloc(size);
 	if (unlikely(bounce == NULL)) {
 		DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
 		return -ENOMEM;
 	}
 
-	fifo_mem = dev_priv->mmio_virt;
-	memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+	if (gb_objects && vmw_fp->gb_aware) {
+		int i, num;
+		uint32_t *bounce32 = (uint32_t *) bounce;
+
+		num = size / sizeof(uint32_t);
+		if (num > SVGA3D_DEVCAP_MAX)
+			num = SVGA3D_DEVCAP_MAX;
+
+		mutex_lock(&dev_priv->hw_mutex);
+		for (i = 0; i < num; ++i) {
+			vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+			*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+		}
+		mutex_unlock(&dev_priv->hw_mutex);
+	} else if (gb_objects) {
+		ret = vmw_fill_compat_cap(dev_priv, bounce, size);
+		if (unlikely(ret != 0))
+			goto out_err;
+	} else {
+		fifo_mem = dev_priv->mmio_virt;
+		memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+	}
 
 	ret = copy_to_user(buffer, bounce, size);
 	if (ret)
 		ret = -EFAULT;
+out_err:
 	vfree(bounce);
 
 	if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 4640adb..0c42376 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -30,7 +30,7 @@
 
 #define VMW_FENCE_WRAP (1 << 24)
 
-irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t vmw_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *)arg;
 	struct vmw_private *dev_priv = vmw_priv(dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 03f1c20..8a65041 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -40,7 +40,7 @@
  * Clip @num_rects number of @rects against @clip storing the
  * results in @out_rects and the number of passed rects in @out_num.
  */
-void vmw_clip_cliprects(struct drm_clip_rect *rects,
+static void vmw_clip_cliprects(struct drm_clip_rect *rects,
 			int num_rects,
 			struct vmw_clip_rect clip,
 			SVGASignedRect *out_rects,
@@ -423,7 +423,7 @@
 	struct drm_master *master;
 };
 
-void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 {
 	struct vmw_framebuffer_surface *vfbs =
 		vmw_framebuffer_to_vfbs(framebuffer);
@@ -589,7 +589,7 @@
 	return ret;
 }
 
-int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 				  struct drm_file *file_priv,
 				  unsigned flags, unsigned color,
 				  struct drm_clip_rect *clips,
@@ -609,9 +609,13 @@
 	if (!dev_priv->sou_priv)
 		return -EINVAL;
 
+	drm_modeset_lock_all(dev_priv->dev);
+
 	ret = ttm_read_lock(&vmaster->lock, true);
-	if (unlikely(ret != 0))
+	if (unlikely(ret != 0)) {
+		drm_modeset_unlock_all(dev_priv->dev);
 		return ret;
+	}
 
 	if (!num_clips) {
 		num_clips = 1;
@@ -629,6 +633,9 @@
 				   clips, num_clips, inc, NULL);
 
 	ttm_read_unlock(&vmaster->lock);
+
+	drm_modeset_unlock_all(dev_priv->dev);
+
 	return 0;
 }
 
@@ -665,9 +672,9 @@
 
 	if (unlikely(surface->mip_levels[0] != 1 ||
 		     surface->num_sizes != 1 ||
-		     surface->sizes[0].width < mode_cmd->width ||
-		     surface->sizes[0].height < mode_cmd->height ||
-		     surface->sizes[0].depth != 1)) {
+		     surface->base_size.width < mode_cmd->width ||
+		     surface->base_size.height < mode_cmd->height ||
+		     surface->base_size.depth != 1)) {
 		DRM_ERROR("Incompatible surface dimensions "
 			  "for requested mode.\n");
 		return -EINVAL;
@@ -754,7 +761,7 @@
 	struct vmw_dma_buffer *buffer;
 };
 
-void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 {
 	struct vmw_framebuffer_dmabuf *vfbd =
 		vmw_framebuffer_to_vfbd(framebuffer);
@@ -940,7 +947,7 @@
 	return ret;
 }
 
-int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
 				 struct drm_file *file_priv,
 				 unsigned flags, unsigned color,
 				 struct drm_clip_rect *clips,
@@ -953,9 +960,13 @@
 	struct drm_clip_rect norect;
 	int ret, increment = 1;
 
+	drm_modeset_lock_all(dev_priv->dev);
+
 	ret = ttm_read_lock(&vmaster->lock, true);
-	if (unlikely(ret != 0))
+	if (unlikely(ret != 0)) {
+		drm_modeset_unlock_all(dev_priv->dev);
 		return ret;
+	}
 
 	if (!num_clips) {
 		num_clips = 1;
@@ -979,6 +990,9 @@
 	}
 
 	ttm_read_unlock(&vmaster->lock);
+
+	drm_modeset_unlock_all(dev_priv->dev);
+
 	return ret;
 }
 
@@ -1631,7 +1645,7 @@
 				uint32_t pitch,
 				uint32_t height)
 {
-	return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+	return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
 }
 
 
@@ -1663,7 +1677,7 @@
  * Small shared kms functions.
  */
 
-int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
+static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
 			 struct drm_vmw_rect *rects)
 {
 	struct drm_device *dev = dev_priv->dev;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
new file mode 100644
index 0000000..d4a5a19
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -0,0 +1,653 @@
+/**************************************************************************
+ *
+ * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+/*
+ * If we set up the screen target otable, screen objects stop working.
+ */
+
+#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
+
+#ifdef CONFIG_64BIT
+#define VMW_PPN_SIZE 8
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
+#else
+#define VMW_PPN_SIZE 4
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
+#endif
+
+/*
+ * struct vmw_mob - Structure containing page table and metadata for a
+ * Guest Memory OBject.
+ *
+ * @num_pages       Number of pages that make up the page table.
+ * @pt_level        The indirection level of the page table. 0-2.
+ * @pt_root_page    DMA address of the level 0 page of the page table.
+ */
+struct vmw_mob {
+	struct ttm_buffer_object *pt_bo;
+	unsigned long num_pages;
+	unsigned pt_level;
+	dma_addr_t pt_root_page;
+	uint32_t id;
+};
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size:           Size of the table (page-aligned).
+ * @page_table:     Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+	unsigned long size;
+	struct vmw_mob *page_table;
+};
+
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+			       struct vmw_mob *mob);
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+			     struct vmw_piter data_iter,
+			     unsigned long num_data_pages);
+
+/*
+ * vmw_setup_otable_base - Issue an object table base setup command to
+ * the device
+ *
+ * @dev_priv:       Pointer to a device private structure
+ * @type:           Type of object table base
+ * @offset          Start of table offset into dev_priv::otable_bo
+ * @otable          Pointer to otable metadata;
+ *
+ * This function returns -ENOMEM if it fails to reserve fifo space,
+ * and may block waiting for fifo space.
+ */
+static int vmw_setup_otable_base(struct vmw_private *dev_priv,
+				 SVGAOTableType type,
+				 unsigned long offset,
+				 struct vmw_otable *otable)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetOTableBase64 body;
+	} *cmd;
+	struct vmw_mob *mob;
+	const struct vmw_sg_table *vsgt;
+	struct vmw_piter iter;
+	int ret;
+
+	BUG_ON(otable->page_table != NULL);
+
+	vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
+	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
+	WARN_ON(!vmw_piter_next(&iter));
+
+	mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
+	if (unlikely(mob == NULL)) {
+		DRM_ERROR("Failed creating OTable page table.\n");
+		return -ENOMEM;
+	}
+
+	if (otable->size <= PAGE_SIZE) {
+		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+		mob->pt_root_page = vmw_piter_dma_addr(&iter);
+	} else if (vsgt->num_regions == 1) {
+		mob->pt_level = SVGA3D_MOBFMT_RANGE;
+		mob->pt_root_page = vmw_piter_dma_addr(&iter);
+	} else {
+		ret = vmw_mob_pt_populate(dev_priv, mob);
+		if (unlikely(ret != 0))
+			goto out_no_populate;
+
+		vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
+		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.type = type;
+	cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+	cmd->body.sizeInBytes = otable->size;
+	cmd->body.validSizeInBytes = 0;
+	cmd->body.ptDepth = mob->pt_level;
+
+	/*
+	 * The device doesn't support this, But the otable size is
+	 * determined at compile-time, so this BUG shouldn't trigger
+	 * randomly.
+	 */
+	BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	otable->page_table = mob;
+
+	return 0;
+
+out_no_fifo:
+out_no_populate:
+	vmw_mob_destroy(mob);
+	return ret;
+}
+
+/*
+ * vmw_takedown_otable_base - Issue an object table base takedown command
+ * to the device
+ *
+ * @dev_priv:       Pointer to a device private structure
+ * @type:           Type of object table base
+ *
+ */
+static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
+				     SVGAOTableType type,
+				     struct vmw_otable *otable)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetOTableBase body;
+	} *cmd;
+	struct ttm_buffer_object *bo;
+
+	if (otable->page_table == NULL)
+		return;
+
+	bo = otable->page_table->pt_bo;
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL))
+		DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.type = type;
+	cmd->body.baseAddress = 0;
+	cmd->body.sizeInBytes = 0;
+	cmd->body.validSizeInBytes = 0;
+	cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	if (bo) {
+		int ret;
+
+		ret = ttm_bo_reserve(bo, false, true, false, NULL);
+		BUG_ON(ret != 0);
+
+		vmw_fence_single_bo(bo, NULL);
+		ttm_bo_unreserve(bo);
+	}
+
+	vmw_mob_destroy(otable->page_table);
+	otable->page_table = NULL;
+}
+
+/*
+ * vmw_otables_setup - Set up guest backed memory object tables
+ *
+ * @dev_priv:       Pointer to a device private structure
+ *
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A succesful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
+ */
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+	unsigned long offset;
+	unsigned long bo_size;
+	struct vmw_otable *otables;
+	SVGAOTableType i;
+	int ret;
+
+	otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
+			  GFP_KERNEL);
+	if (unlikely(otables == NULL)) {
+		DRM_ERROR("Failed to allocate space for otable "
+			  "metadata.\n");
+		return -ENOMEM;
+	}
+
+	otables[SVGA_OTABLE_MOB].size =
+		VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
+	otables[SVGA_OTABLE_SURFACE].size =
+		VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
+	otables[SVGA_OTABLE_CONTEXT].size =
+		VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
+	otables[SVGA_OTABLE_SHADER].size =
+		VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
+	otables[SVGA_OTABLE_SCREEN_TARGET].size =
+		VMWGFX_NUM_GB_SCREEN_TARGET *
+		SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
+
+	bo_size = 0;
+	for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
+		otables[i].size =
+			(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
+		bo_size += otables[i].size;
+	}
+
+	ret = ttm_bo_create(&dev_priv->bdev, bo_size,
+			    ttm_bo_type_device,
+			    &vmw_sys_ne_placement,
+			    0, false, NULL,
+			    &dev_priv->otable_bo);
+
+	if (unlikely(ret != 0))
+		goto out_no_bo;
+
+	ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
+	BUG_ON(ret != 0);
+	ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
+	if (unlikely(ret != 0))
+		goto out_unreserve;
+	ret = vmw_bo_map_dma(dev_priv->otable_bo);
+	if (unlikely(ret != 0))
+		goto out_unreserve;
+
+	ttm_bo_unreserve(dev_priv->otable_bo);
+
+	offset = 0;
+	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
+		ret = vmw_setup_otable_base(dev_priv, i, offset,
+					    &otables[i]);
+		if (unlikely(ret != 0))
+			goto out_no_setup;
+		offset += otables[i].size;
+	}
+
+	dev_priv->otables = otables;
+	return 0;
+
+out_unreserve:
+	ttm_bo_unreserve(dev_priv->otable_bo);
+out_no_setup:
+	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
+		vmw_takedown_otable_base(dev_priv, i, &otables[i]);
+
+	ttm_bo_unref(&dev_priv->otable_bo);
+out_no_bo:
+	kfree(otables);
+	return ret;
+}
+
+
+/*
+ * vmw_otables_takedown - Take down guest backed memory object tables
+ *
+ * @dev_priv:       Pointer to a device private structure
+ *
+ * Take down the Guest Memory Object tables.
+ */
+void vmw_otables_takedown(struct vmw_private *dev_priv)
+{
+	SVGAOTableType i;
+	struct ttm_buffer_object *bo = dev_priv->otable_bo;
+	int ret;
+
+	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
+		vmw_takedown_otable_base(dev_priv, i,
+					 &dev_priv->otables[i]);
+
+	ret = ttm_bo_reserve(bo, false, true, false, NULL);
+	BUG_ON(ret != 0);
+
+	vmw_fence_single_bo(bo, NULL);
+	ttm_bo_unreserve(bo);
+
+	ttm_bo_unref(&dev_priv->otable_bo);
+	kfree(dev_priv->otables);
+	dev_priv->otables = NULL;
+}
+
+
+/*
+ * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
+ * needed for a guest backed memory object.
+ *
+ * @data_pages:  Number of data pages in the memory object buffer.
+ */
+static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
+{
+	unsigned long data_size = data_pages * PAGE_SIZE;
+	unsigned long tot_size = 0;
+
+	while (likely(data_size > PAGE_SIZE)) {
+		data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
+		data_size *= VMW_PPN_SIZE;
+		tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
+	}
+
+	return tot_size >> PAGE_SHIFT;
+}
+
+/*
+ * vmw_mob_create - Create a mob, but don't populate it.
+ *
+ * @data_pages:  Number of data pages of the underlying buffer object.
+ */
+struct vmw_mob *vmw_mob_create(unsigned long data_pages)
+{
+	struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
+
+	if (unlikely(mob == NULL))
+		return NULL;
+
+	mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
+
+	return mob;
+}
+
+/*
+ * vmw_mob_pt_populate - Populate the mob pagetable
+ *
+ * @mob:         Pointer to the mob the pagetable of which we want to
+ *               populate.
+ *
+ * This function allocates memory to be used for the pagetable, and
+ * adjusts TTM memory accounting accordingly. Returns ENOMEM if
+ * memory resources aren't sufficient and may cause TTM buffer objects
+ * to be swapped out by using the TTM memory accounting function.
+ */
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+			       struct vmw_mob *mob)
+{
+	int ret;
+	BUG_ON(mob->pt_bo != NULL);
+
+	ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
+			    ttm_bo_type_device,
+			    &vmw_sys_ne_placement,
+			    0, false, NULL, &mob->pt_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
+
+	BUG_ON(ret != 0);
+	ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
+	if (unlikely(ret != 0))
+		goto out_unreserve;
+	ret = vmw_bo_map_dma(mob->pt_bo);
+	if (unlikely(ret != 0))
+		goto out_unreserve;
+
+	ttm_bo_unreserve(mob->pt_bo);
+	
+	return 0;
+
+out_unreserve:
+	ttm_bo_unreserve(mob->pt_bo);
+	ttm_bo_unref(&mob->pt_bo);
+
+	return ret;
+}
+
+/**
+ * vmw_mob_assign_ppn - Assign a value to a page table entry
+ *
+ * @addr: Pointer to pointer to page table entry.
+ * @val: The page table entry
+ *
+ * Assigns a value to a page table entry pointed to by *@addr and increments
+ * *@addr according to the page table entry size.
+ */
+#if (VMW_PPN_SIZE == 8)
+static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+{
+	*((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
+	*addr += 2;
+}
+#else
+static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+{
+	*(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
+}
+#endif
+
+/*
+ * vmw_mob_build_pt - Build a pagetable
+ *
+ * @data_addr:      Array of DMA addresses to the underlying buffer
+ *                  object's data pages.
+ * @num_data_pages: Number of buffer object data pages.
+ * @pt_pages:       Array of page pointers to the page table pages.
+ *
+ * Returns the number of page table pages actually used.
+ * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
+ */
+static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
+				      unsigned long num_data_pages,
+				      struct vmw_piter *pt_iter)
+{
+	unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
+	unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
+	unsigned long pt_page;
+	__le32 *addr, *save_addr;
+	unsigned long i;
+	struct page *page;
+
+	for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
+		page = vmw_piter_page(pt_iter);
+
+		save_addr = addr = kmap_atomic(page);
+
+		for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
+			vmw_mob_assign_ppn(&addr,
+					   vmw_piter_dma_addr(data_iter));
+			if (unlikely(--num_data_pages == 0))
+				break;
+			WARN_ON(!vmw_piter_next(data_iter));
+		}
+		kunmap_atomic(save_addr);
+		vmw_piter_next(pt_iter);
+	}
+
+	return num_pt_pages;
+}
+
+/*
+ * vmw_mob_build_pt - Set up a multilevel mob pagetable
+ *
+ * @mob:            Pointer to a mob whose page table needs setting up.
+ * @data_addr       Array of DMA addresses to the buffer object's data
+ *                  pages.
+ * @num_data_pages: Number of buffer object data pages.
+ *
+ * Uses tail recursion to set up a multilevel mob page table.
+ */
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+			     struct vmw_piter data_iter,
+			     unsigned long num_data_pages)
+{
+	unsigned long num_pt_pages = 0;
+	struct ttm_buffer_object *bo = mob->pt_bo;
+	struct vmw_piter save_pt_iter;
+	struct vmw_piter pt_iter;
+	const struct vmw_sg_table *vsgt;
+	int ret;
+
+	ret = ttm_bo_reserve(bo, false, true, false, NULL);
+	BUG_ON(ret != 0);
+
+	vsgt = vmw_bo_sg_table(bo);
+	vmw_piter_start(&pt_iter, vsgt, 0);
+	BUG_ON(!vmw_piter_next(&pt_iter));
+	mob->pt_level = 0;
+	while (likely(num_data_pages > 1)) {
+		++mob->pt_level;
+		BUG_ON(mob->pt_level > 2);
+		save_pt_iter = pt_iter;
+		num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
+						&pt_iter);
+		data_iter = save_pt_iter;
+		num_data_pages = num_pt_pages;
+	}
+
+	mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
+	ttm_bo_unreserve(bo);
+}
+
+/*
+ * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
+ *
+ * @mob:            Pointer to a mob to destroy.
+ */
+void vmw_mob_destroy(struct vmw_mob *mob)
+{
+	if (mob->pt_bo)
+		ttm_bo_unref(&mob->pt_bo);
+	kfree(mob);
+}
+
+/*
+ * vmw_mob_unbind - Hide a mob from the device.
+ *
+ * @dev_priv:       Pointer to a device private.
+ * @mob_id:         Device id of the mob to unbind.
+ */
+void vmw_mob_unbind(struct vmw_private *dev_priv,
+		    struct vmw_mob *mob)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBMob body;
+	} *cmd;
+	int ret;
+	struct ttm_buffer_object *bo = mob->pt_bo;
+
+	if (bo) {
+		ret = ttm_bo_reserve(bo, false, true, false, NULL);
+		/*
+		 * Noone else should be using this buffer.
+		 */
+		BUG_ON(ret != 0);
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for Memory "
+			  "Object unbinding.\n");
+	}
+	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.mobid = mob->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	if (bo) {
+		vmw_fence_single_bo(bo, NULL);
+		ttm_bo_unreserve(bo);
+	}
+	vmw_3d_resource_dec(dev_priv, false);
+}
+
+/*
+ * vmw_mob_bind - Make a mob visible to the device after first
+ *                populating it if necessary.
+ *
+ * @dev_priv:       Pointer to a device private.
+ * @mob:            Pointer to the mob we're making visible.
+ * @data_addr:      Array of DMA addresses to the data pages of the underlying
+ *                  buffer object.
+ * @num_data_pages: Number of data pages of the underlying buffer
+ *                  object.
+ * @mob_id:         Device id of the mob to bind
+ *
+ * This function is intended to be interfaced with the ttm_tt backend
+ * code.
+ */
+int vmw_mob_bind(struct vmw_private *dev_priv,
+		 struct vmw_mob *mob,
+		 const struct vmw_sg_table *vsgt,
+		 unsigned long num_data_pages,
+		 int32_t mob_id)
+{
+	int ret;
+	bool pt_set_up = false;
+	struct vmw_piter data_iter;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBMob64 body;
+	} *cmd;
+
+	mob->id = mob_id;
+	vmw_piter_start(&data_iter, vsgt, 0);
+	if (unlikely(!vmw_piter_next(&data_iter)))
+		return 0;
+
+	if (likely(num_data_pages == 1)) {
+		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+		mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+	} else if (vsgt->num_regions == 1) {
+		mob->pt_level = SVGA3D_MOBFMT_RANGE;
+		mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+	} else if (unlikely(mob->pt_bo == NULL)) {
+		ret = vmw_mob_pt_populate(dev_priv, mob);
+		if (unlikely(ret != 0))
+			return ret;
+
+		vmw_mob_pt_setup(mob, data_iter, num_data_pages);
+		pt_set_up = true;
+		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+	}
+
+	(void) vmw_3d_resource_inc(dev_priv, false);
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for Memory "
+			  "Object binding.\n");
+		goto out_no_cmd_space;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.mobid = mob_id;
+	cmd->body.ptDepth = mob->pt_level;
+	cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+	cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+
+out_no_cmd_space:
+	vmw_3d_resource_dec(dev_priv, false);
+	if (pt_set_up)
+		ttm_bo_unref(&mob->pt_bo);
+
+	return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 9b5ea2a..2aa4bc6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@
 	return res;
 }
 
+struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res)
+{
+	return kref_get_unless_zero(&res->kref) ? res : NULL;
+}
 
 /**
  * vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@
 		vmw_dmabuf_unreference(&res->backup);
 	}
 
-	if (likely(res->hw_destroy != NULL))
+	if (likely(res->hw_destroy != NULL)) {
 		res->hw_destroy(res);
+		mutex_lock(&dev_priv->binding_mutex);
+		vmw_context_binding_res_list_kill(&res->binding_head);
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
 
 	id = res->id;
 	if (res->res_free != NULL)
@@ -215,6 +224,7 @@
 	res->func = func;
 	INIT_LIST_HEAD(&res->lru_head);
 	INIT_LIST_HEAD(&res->mob_head);
+	INIT_LIST_HEAD(&res->binding_head);
 	res->id = -1;
 	res->backup = NULL;
 	res->backup_offset = 0;
@@ -441,6 +451,21 @@
 	ttm_bo_unref(&bo);
 }
 
+static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
+					    enum ttm_ref_type ref_type)
+{
+	struct vmw_user_dma_buffer *user_bo;
+	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
+
+	switch (ref_type) {
+	case TTM_REF_SYNCCPU_WRITE:
+		ttm_bo_synccpu_write_release(&user_bo->dma.base);
+		break;
+	default:
+		BUG();
+	}
+}
+
 /**
  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
  *
@@ -471,6 +496,8 @@
 	}
 
 	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+			      (dev_priv->has_mob) ?
+			      &vmw_sys_placement :
 			      &vmw_vram_sys_placement, true,
 			      &vmw_user_dmabuf_destroy);
 	if (unlikely(ret != 0))
@@ -482,7 +509,8 @@
 				    &user_bo->prime,
 				    shareable,
 				    ttm_buffer_type,
-				    &vmw_user_dmabuf_release, NULL);
+				    &vmw_user_dmabuf_release,
+				    &vmw_user_dmabuf_ref_obj_release);
 	if (unlikely(ret != 0)) {
 		ttm_bo_unref(&tmp);
 		goto out_no_base_object;
@@ -515,6 +543,130 @@
 		vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
 }
 
+/**
+ * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
+					struct ttm_object_file *tfile,
+					uint32_t flags)
+{
+	struct ttm_buffer_object *bo = &user_bo->dma.base;
+	bool existed;
+	int ret;
+
+	if (flags & drm_vmw_synccpu_allow_cs) {
+		struct ttm_bo_device *bdev = bo->bdev;
+
+		spin_lock(&bdev->fence_lock);
+		ret = ttm_bo_wait(bo, false, true,
+				  !!(flags & drm_vmw_synccpu_dontblock));
+		spin_unlock(&bdev->fence_lock);
+		return ret;
+	}
+
+	ret = ttm_bo_synccpu_write_grab
+		(bo, !!(flags & drm_vmw_synccpu_dontblock));
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+				 TTM_REF_SYNCCPU_WRITE, &existed);
+	if (ret != 0 || existed)
+		ttm_bo_synccpu_write_release(&user_bo->dma.base);
+
+	return ret;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
+					   struct ttm_object_file *tfile,
+					   uint32_t flags)
+{
+	if (!(flags & drm_vmw_synccpu_allow_cs))
+		return ttm_ref_object_base_unref(tfile, handle,
+						 TTM_REF_SYNCCPU_WRITE);
+
+	return 0;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv)
+{
+	struct drm_vmw_synccpu_arg *arg =
+		(struct drm_vmw_synccpu_arg *) data;
+	struct vmw_dma_buffer *dma_buf;
+	struct vmw_user_dma_buffer *user_bo;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	int ret;
+
+	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+			       drm_vmw_synccpu_dontblock |
+			       drm_vmw_synccpu_allow_cs)) != 0) {
+		DRM_ERROR("Illegal synccpu flags.\n");
+		return -EINVAL;
+	}
+
+	switch (arg->op) {
+	case drm_vmw_synccpu_grab:
+		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
+		if (unlikely(ret != 0))
+			return ret;
+
+		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
+				       dma);
+		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
+		vmw_dmabuf_unreference(&dma_buf);
+		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+			     ret != -EBUSY)) {
+			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+				  (unsigned int) arg->handle);
+			return ret;
+		}
+		break;
+	case drm_vmw_synccpu_release:
+		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
+						      arg->flags);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+				  (unsigned int) arg->handle);
+			return ret;
+		}
+		break;
+	default:
+		DRM_ERROR("Invalid synccpu operation.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file_priv)
 {
@@ -591,7 +743,8 @@
 }
 
 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
-			      struct vmw_dma_buffer *dma_buf)
+			      struct vmw_dma_buffer *dma_buf,
+			      uint32_t *handle)
 {
 	struct vmw_user_dma_buffer *user_bo;
 
@@ -599,6 +752,8 @@
 		return -EINVAL;
 
 	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+
+	*handle = user_bo->prime.base.hash.key;
 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
 				  TTM_REF_USAGE, NULL);
 }
@@ -1291,11 +1446,54 @@
  * @mem:            The truct ttm_mem_reg indicating to what memory
  *                  region the move is taking place.
  *
- * For now does nothing.
+ * Evicts the Guest Backed hardware resource if the backup
+ * buffer is being moved out of MOB memory.
+ * Note that this function should not race with the resource
+ * validation code as long as it accesses only members of struct
+ * resource that remain static while bo::res is !NULL and
+ * while we have @bo reserved. struct resource::backup is *not* a
+ * static member. The resource validation code will take care
+ * to set @bo::res to NULL, while having @bo reserved when the
+ * buffer is no longer bound to the resource, so @bo:res can be
+ * used to determine whether there is a need to unbind and whether
+ * it is safe to unbind.
  */
 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
 			      struct ttm_mem_reg *mem)
 {
+	struct vmw_dma_buffer *dma_buf;
+
+	if (mem == NULL)
+		return;
+
+	if (bo->destroy != vmw_dmabuf_bo_free &&
+	    bo->destroy != vmw_user_dmabuf_destroy)
+		return;
+
+	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+
+	if (mem->mem_type != VMW_PL_MOB) {
+		struct vmw_resource *res, *n;
+		struct ttm_bo_device *bdev = bo->bdev;
+		struct ttm_validate_buffer val_buf;
+
+		val_buf.bo = bo;
+
+		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
+
+			if (unlikely(res->func->unbind == NULL))
+				continue;
+
+			(void) res->func->unbind(res, true, &val_buf);
+			res->backup_dirty = true;
+			res->res_dirty = false;
+			list_del_init(&res->mob_head);
+		}
+
+		spin_lock(&bdev->fence_lock);
+		(void) ttm_bo_wait(bo, false, false, false);
+		spin_unlock(&bdev->fence_lock);
+	}
 }
 
 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
new file mode 100644
index 0000000..ee38565
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -0,0 +1,812 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+#define VMW_COMPAT_SHADER_HT_ORDER 12
+
+struct vmw_shader {
+	struct vmw_resource res;
+	SVGA3dShaderType type;
+	uint32_t size;
+};
+
+struct vmw_user_shader {
+	struct ttm_base_object base;
+	struct vmw_shader shader;
+};
+
+/**
+ * enum vmw_compat_shader_state - Staging state for compat shaders
+ */
+enum vmw_compat_shader_state {
+	VMW_COMPAT_COMMITED,
+	VMW_COMPAT_ADD,
+	VMW_COMPAT_DEL
+};
+
+/**
+ * struct vmw_compat_shader - Metadata for compat shaders.
+ *
+ * @handle: The TTM handle of the guest backed shader.
+ * @tfile: The struct ttm_object_file the guest backed shader is registered
+ * with.
+ * @hash: Hash item for lookup.
+ * @head: List head for staging lists or the compat shader manager list.
+ * @state: Staging state.
+ *
+ * The structure is protected by the cmdbuf lock.
+ */
+struct vmw_compat_shader {
+	u32 handle;
+	struct ttm_object_file *tfile;
+	struct drm_hash_item hash;
+	struct list_head head;
+	enum vmw_compat_shader_state state;
+};
+
+/**
+ * struct vmw_compat_shader_manager - Compat shader manager.
+ *
+ * @shaders: Hash table containing staged and commited compat shaders
+ * @list: List of commited shaders.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @shaders and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_compat_shader_manager {
+	struct drm_open_hash shaders;
+	struct list_head list;
+	struct vmw_private *dev_priv;
+};
+
+static void vmw_user_shader_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base);
+
+static int vmw_gb_shader_create(struct vmw_resource *res);
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_destroy(struct vmw_resource *res);
+
+static uint64_t vmw_user_shader_size;
+
+static const struct vmw_user_resource_conv user_shader_conv = {
+	.object_type = VMW_RES_SHADER,
+	.base_obj_to_res = vmw_user_shader_base_to_res,
+	.res_free = vmw_user_shader_free
+};
+
+const struct vmw_user_resource_conv *user_shader_converter =
+	&user_shader_conv;
+
+
+static const struct vmw_res_func vmw_gb_shader_func = {
+	.res_type = vmw_res_shader,
+	.needs_backup = true,
+	.may_evict = true,
+	.type_name = "guest backed shaders",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_gb_shader_create,
+	.destroy = vmw_gb_shader_destroy,
+	.bind = vmw_gb_shader_bind,
+	.unbind = vmw_gb_shader_unbind
+};
+
+/**
+ * Shader management:
+ */
+
+static inline struct vmw_shader *
+vmw_res_to_shader(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_shader, res);
+}
+
+static void vmw_hw_shader_destroy(struct vmw_resource *res)
+{
+	(void) vmw_gb_shader_destroy(res);
+}
+
+static int vmw_gb_shader_init(struct vmw_private *dev_priv,
+			      struct vmw_resource *res,
+			      uint32_t size,
+			      uint64_t offset,
+			      SVGA3dShaderType type,
+			      struct vmw_dma_buffer *byte_code,
+			      void (*res_free) (struct vmw_resource *res))
+{
+	struct vmw_shader *shader = vmw_res_to_shader(res);
+	int ret;
+
+	ret = vmw_resource_init(dev_priv, res, true,
+				res_free, &vmw_gb_shader_func);
+
+
+	if (unlikely(ret != 0)) {
+		if (res_free)
+			res_free(res);
+		else
+			kfree(res);
+		return ret;
+	}
+
+	res->backup_size = size;
+	if (byte_code) {
+		res->backup = vmw_dmabuf_reference(byte_code);
+		res->backup_offset = offset;
+	}
+	shader->size = size;
+	shader->type = type;
+
+	vmw_resource_activate(res, vmw_hw_shader_destroy);
+	return 0;
+}
+
+static int vmw_gb_shader_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_shader *shader = vmw_res_to_shader(res);
+	int ret;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBShader body;
+	} *cmd;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a shader id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "creation.\n");
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.shid = res->id;
+	cmd->body.type = shader->type;
+	cmd->body.sizeInBytes = shader->size;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	(void) vmw_3d_resource_inc(dev_priv, false);
+
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBShader body;
+	} *cmd;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.shid = res->id;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.offsetInBytes = 0;
+	res->backup_dirty = false;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+				bool readback,
+				struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBShader body;
+	} *cmd;
+	struct vmw_fence_obj *fence;
+
+	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.shid = res->id;
+	cmd->body.mobid = SVGA3D_INVALID_ID;
+	cmd->body.offsetInBytes = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_fence_single_bo(val_buf->bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static int vmw_gb_shader_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBShader body;
+	} *cmd;
+
+	if (likely(res->id == -1))
+		return 0;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_context_binding_res_list_scrub(&res->binding_head);
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "destruction.\n");
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.shid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	mutex_unlock(&dev_priv->binding_mutex);
+	vmw_resource_release_id(res);
+	vmw_3d_resource_dec(dev_priv, false);
+
+	return 0;
+}
+
+/**
+ * User-space shader management:
+ */
+
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base)
+{
+	return &(container_of(base, struct vmw_user_shader, base)->
+		 shader.res);
+}
+
+static void vmw_user_shader_free(struct vmw_resource *res)
+{
+	struct vmw_user_shader *ushader =
+		container_of(res, struct vmw_user_shader, shader.res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	ttm_base_object_kfree(ushader, base);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv),
+			    vmw_user_shader_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_resource *res = vmw_user_shader_base_to_res(base);
+
+	*p_base = NULL;
+	vmw_resource_unreference(&res);
+}
+
+int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+	return ttm_ref_object_base_unref(tfile, arg->handle,
+					 TTM_REF_USAGE);
+}
+
+static int vmw_shader_alloc(struct vmw_private *dev_priv,
+			    struct vmw_dma_buffer *buffer,
+			    size_t shader_size,
+			    size_t offset,
+			    SVGA3dShaderType shader_type,
+			    struct ttm_object_file *tfile,
+			    u32 *handle)
+{
+	struct vmw_user_shader *ushader;
+	struct vmw_resource *res, *tmp;
+	int ret;
+
+	/*
+	 * Approximate idr memory usage with 128 bytes. It will be limited
+	 * by maximum number_of shaders anyway.
+	 */
+	if (unlikely(vmw_user_shader_size == 0))
+		vmw_user_shader_size =
+			ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   vmw_user_shader_size,
+				   false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for shader "
+				  "creation.\n");
+		goto out;
+	}
+
+	ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
+	if (unlikely(ushader == NULL)) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_user_shader_size);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	res = &ushader->shader.res;
+	ushader->base.shareable = false;
+	ushader->base.tfile = NULL;
+
+	/*
+	 * From here on, the destructor takes over resource freeing.
+	 */
+
+	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+				 offset, shader_type, buffer,
+				 vmw_user_shader_free);
+	if (unlikely(ret != 0))
+		goto out;
+
+	tmp = vmw_resource_reference(res);
+	ret = ttm_base_object_init(tfile, &ushader->base, false,
+				   VMW_RES_SHADER,
+				   &vmw_user_shader_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		goto out_err;
+	}
+
+	if (handle)
+		*handle = ushader->base.hash.key;
+out_err:
+	vmw_resource_unreference(&res);
+out:
+	return ret;
+}
+
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_shader_create_arg *arg =
+		(struct drm_vmw_shader_create_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	struct vmw_dma_buffer *buffer = NULL;
+	SVGA3dShaderType shader_type;
+	int ret;
+
+	if (arg->buffer_handle != SVGA3D_INVALID_ID) {
+		ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+					     &buffer);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Could not find buffer for shader "
+				  "creation.\n");
+			return ret;
+		}
+
+		if ((u64)buffer->base.num_pages * PAGE_SIZE <
+		    (u64)arg->size + (u64)arg->offset) {
+			DRM_ERROR("Illegal buffer- or shader size.\n");
+			ret = -EINVAL;
+			goto out_bad_arg;
+		}
+	}
+
+	switch (arg->shader_type) {
+	case drm_vmw_shader_type_vs:
+		shader_type = SVGA3D_SHADERTYPE_VS;
+		break;
+	case drm_vmw_shader_type_ps:
+		shader_type = SVGA3D_SHADERTYPE_PS;
+		break;
+	case drm_vmw_shader_type_gs:
+		shader_type = SVGA3D_SHADERTYPE_GS;
+		break;
+	default:
+		DRM_ERROR("Illegal shader type.\n");
+		ret = -EINVAL;
+		goto out_bad_arg;
+	}
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		goto out_bad_arg;
+
+	ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+			       shader_type, tfile, &arg->shader_handle);
+
+	ttm_read_unlock(&vmaster->lock);
+out_bad_arg:
+	vmw_dmabuf_unreference(&buffer);
+	return ret;
+}
+
+/**
+ * vmw_compat_shader_lookup - Look up a compat shader
+ *
+ * @man: Pointer to the compat shader manager.
+ * @shader_type: The shader type, that combined with the user_key identifies
+ * the shader.
+ * @user_key: On entry, this should be a pointer to the user_key.
+ * On successful exit, it will contain the guest-backed shader's TTM handle.
+ *
+ * Returns 0 on success. Non-zero on failure, in which case the value pointed
+ * to by @user_key is unmodified.
+ */
+int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+			     SVGA3dShaderType shader_type,
+			     u32 *user_key)
+{
+	struct drm_hash_item *hash;
+	int ret;
+	unsigned long key = *user_key | (shader_type << 24);
+
+	ret = drm_ht_find_item(&man->shaders, key, &hash);
+	if (unlikely(ret != 0))
+		return ret;
+
+	*user_key = drm_hash_entry(hash, struct vmw_compat_shader,
+				   hash)->handle;
+
+	return 0;
+}
+
+/**
+ * vmw_compat_shader_free - Free a compat shader.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @entry: Pointer to a struct vmw_compat_shader.
+ *
+ * Frees a struct vmw_compat_shder entry and drops its reference to the
+ * guest backed shader.
+ */
+static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
+				   struct vmw_compat_shader *entry)
+{
+	list_del(&entry->head);
+	WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
+	WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
+					  TTM_REF_USAGE));
+	kfree(entry);
+}
+
+/**
+ * vmw_compat_shaders_commit - Commit a list of compat shader actions.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function commits a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+			       struct list_head *list)
+{
+	struct vmw_compat_shader *entry, *next;
+
+	list_for_each_entry_safe(entry, next, list, head) {
+		list_del(&entry->head);
+		switch (entry->state) {
+		case VMW_COMPAT_ADD:
+			entry->state = VMW_COMPAT_COMMITED;
+			list_add_tail(&entry->head, &man->list);
+			break;
+		case VMW_COMPAT_DEL:
+			ttm_ref_object_base_unref(entry->tfile, entry->handle,
+						  TTM_REF_USAGE);
+			kfree(entry);
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+}
+
+/**
+ * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function reverts a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+			       struct list_head *list)
+{
+	struct vmw_compat_shader *entry, *next;
+	int ret;
+
+	list_for_each_entry_safe(entry, next, list, head) {
+		switch (entry->state) {
+		case VMW_COMPAT_ADD:
+			vmw_compat_shader_free(man, entry);
+			break;
+		case VMW_COMPAT_DEL:
+			ret = drm_ht_insert_item(&man->shaders, &entry->hash);
+			list_del(&entry->head);
+			list_add_tail(&entry->head, &man->list);
+			entry->state = VMW_COMPAT_COMMITED;
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+}
+
+/**
+ * vmw_compat_shader_remove - Stage a compat shader for removal.
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @shader_type: Shader type.
+ * @list: Caller's list of staged shader actions.
+ *
+ * This function stages a compat shader for removal and removes the key from
+ * the shader manager's hash table. If the shader was previously only staged
+ * for addition it is completely removed (But the execbuf code may keep a
+ * reference if it was bound to a context between addition and removal). If
+ * it was previously commited to the manager, it is staged for removal.
+ */
+int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+			     u32 user_key, SVGA3dShaderType shader_type,
+			     struct list_head *list)
+{
+	struct vmw_compat_shader *entry;
+	struct drm_hash_item *hash;
+	int ret;
+
+	ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
+			       &hash);
+	if (likely(ret != 0))
+		return -EINVAL;
+
+	entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
+
+	switch (entry->state) {
+	case VMW_COMPAT_ADD:
+		vmw_compat_shader_free(man, entry);
+		break;
+	case VMW_COMPAT_COMMITED:
+		(void) drm_ht_remove_item(&man->shaders, &entry->hash);
+		list_del(&entry->head);
+		entry->state = VMW_COMPAT_DEL;
+		list_add_tail(&entry->head, list);
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_compat_shader_add - Create a compat shader and add the
+ * key to the manager
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @bytecode: Pointer to the bytecode of the shader.
+ * @shader_type: Shader type.
+ * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
+ * to be created with.
+ * @list: Caller's list of staged shader actions.
+ *
+ * Note that only the key is added to the shader manager's hash table.
+ * The shader is not yet added to the shader manager's list of shaders.
+ */
+int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+			  u32 user_key, const void *bytecode,
+			  SVGA3dShaderType shader_type,
+			  size_t size,
+			  struct ttm_object_file *tfile,
+			  struct list_head *list)
+{
+	struct vmw_dma_buffer *buf;
+	struct ttm_bo_kmap_obj map;
+	bool is_iomem;
+	struct vmw_compat_shader *compat;
+	u32 handle;
+	int ret;
+
+	if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+		return -EINVAL;
+
+	/* Allocate and pin a DMA buffer */
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (unlikely(buf == NULL))
+		return -ENOMEM;
+
+	ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+			      true, vmw_dmabuf_bo_free);
+	if (unlikely(ret != 0))
+		goto out;
+
+	ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
+	if (unlikely(ret != 0))
+		goto no_reserve;
+
+	/* Map and copy shader bytecode. */
+	ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
+			  &map);
+	if (unlikely(ret != 0)) {
+		ttm_bo_unreserve(&buf->base);
+		goto no_reserve;
+	}
+
+	memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
+	WARN_ON(is_iomem);
+
+	ttm_bo_kunmap(&map);
+	ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+	WARN_ON(ret != 0);
+	ttm_bo_unreserve(&buf->base);
+
+	/* Create a guest-backed shader container backed by the dma buffer */
+	ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
+			       tfile, &handle);
+	vmw_dmabuf_unreference(&buf);
+	if (unlikely(ret != 0))
+		goto no_reserve;
+	/*
+	 * Create a compat shader structure and stage it for insertion
+	 * in the manager
+	 */
+	compat = kzalloc(sizeof(*compat), GFP_KERNEL);
+	if (compat == NULL)
+		goto no_compat;
+
+	compat->hash.key = user_key |  (shader_type << 24);
+	ret = drm_ht_insert_item(&man->shaders, &compat->hash);
+	if (unlikely(ret != 0))
+		goto out_invalid_key;
+
+	compat->state = VMW_COMPAT_ADD;
+	compat->handle = handle;
+	compat->tfile = tfile;
+	list_add_tail(&compat->head, list);
+
+	return 0;
+
+out_invalid_key:
+	kfree(compat);
+no_compat:
+	ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+no_reserve:
+out:
+	return ret;
+}
+
+/**
+ * vmw_compat_shader_man_create - Create a compat shader manager
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Typically done at file open time. If successful returns a pointer to a
+ * compat shader manager. Otherwise returns an error pointer.
+ */
+struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv)
+{
+	struct vmw_compat_shader_manager *man;
+	int ret;
+
+	man = kzalloc(sizeof(*man), GFP_KERNEL);
+	if (man == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	man->dev_priv = dev_priv;
+	INIT_LIST_HEAD(&man->list);
+	ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
+	if (ret == 0)
+		return man;
+
+	kfree(man);
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ *
+ * @man: Pointer to the shader manager to destroy.
+ *
+ * Typically done at file close time.
+ */
+void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+{
+	struct vmw_compat_shader *entry, *next;
+
+	mutex_lock(&man->dev_priv->cmdbuf_mutex);
+	list_for_each_entry_safe(entry, next, &man->list, head)
+		vmw_compat_shader_free(man, entry);
+
+	mutex_unlock(&man->dev_priv->cmdbuf_mutex);
+	kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7de2ea8..82468d9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -41,7 +41,6 @@
 	struct ttm_prime_object prime;
 	struct vmw_surface srf;
 	uint32_t size;
-	uint32_t backup_handle;
 };
 
 /**
@@ -68,6 +67,14 @@
 				 struct ttm_validate_buffer *val_buf);
 static int vmw_legacy_srf_create(struct vmw_resource *res);
 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+static int vmw_gb_surface_create(struct vmw_resource *res);
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_destroy(struct vmw_resource *res);
+
 
 static const struct vmw_user_resource_conv user_surface_conv = {
 	.object_type = VMW_RES_SURFACE,
@@ -93,6 +100,18 @@
 	.unbind = &vmw_legacy_srf_unbind
 };
 
+static const struct vmw_res_func vmw_gb_surface_func = {
+	.res_type = vmw_res_surface,
+	.needs_backup = true,
+	.may_evict = true,
+	.type_name = "guest backed surfaces",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_gb_surface_create,
+	.destroy = vmw_gb_surface_destroy,
+	.bind = vmw_gb_surface_bind,
+	.unbind = vmw_gb_surface_unbind
+};
+
 /**
  * struct vmw_surface_dma - SVGA3D DMA command
  */
@@ -291,6 +310,11 @@
 	struct vmw_surface *srf;
 	void *cmd;
 
+	if (res->func->destroy == vmw_gb_surface_destroy) {
+		(void) vmw_gb_surface_destroy(res);
+		return;
+	}
+
 	if (res->id != -1) {
 
 		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
@@ -549,12 +573,15 @@
 	struct vmw_resource *res = &srf->res;
 
 	BUG_ON(res_free == NULL);
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	if (!dev_priv->has_mob)
+		(void) vmw_3d_resource_inc(dev_priv, false);
 	ret = vmw_resource_init(dev_priv, res, true, res_free,
+				(dev_priv->has_mob) ? &vmw_gb_surface_func :
 				&vmw_legacy_surface_func);
 
 	if (unlikely(ret != 0)) {
-		vmw_3d_resource_dec(dev_priv, false);
+		if (!dev_priv->has_mob)
+			vmw_3d_resource_dec(dev_priv, false);
 		res_free(res);
 		return ret;
 	}
@@ -750,7 +777,7 @@
 
 	srf->base_size = *srf->sizes;
 	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
-	srf->multisample_count = 1;
+	srf->multisample_count = 0;
 
 	cur_bo_offset = 0;
 	cur_offset = srf->offsets;
@@ -843,6 +870,7 @@
 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv)
 {
+	struct vmw_private *dev_priv = vmw_priv(dev);
 	union drm_vmw_surface_reference_arg *arg =
 	    (union drm_vmw_surface_reference_arg *)data;
 	struct drm_vmw_surface_arg *req = &arg->req;
@@ -854,7 +882,7 @@
 	struct ttm_base_object *base;
 	int ret = -EINVAL;
 
-	base = ttm_base_object_lookup(tfile, req->sid);
+	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
 	if (unlikely(base == NULL)) {
 		DRM_ERROR("Could not find surface to reference.\n");
 		return -EINVAL;
@@ -880,8 +908,8 @@
 	    rep->size_addr;
 
 	if (user_sizes)
-		ret = copy_to_user(user_sizes, srf->sizes,
-				   srf->num_sizes * sizeof(*srf->sizes));
+		ret = copy_to_user(user_sizes, &srf->base_size,
+				   sizeof(srf->base_size));
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("copy_to_user failed %p %u\n",
 			  user_sizes, srf->num_sizes);
@@ -893,3 +921,436 @@
 
 	return ret;
 }
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static int vmw_gb_surface_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf = vmw_res_to_srf(res);
+	uint32_t cmd_len, submit_len;
+	int ret;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBSurface body;
+	} *cmd;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	(void) vmw_3d_resource_inc(dev_priv, false);
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a surface id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	cmd_len = sizeof(cmd->body);
+	submit_len = sizeof(*cmd);
+	cmd = vmw_fifo_reserve(dev_priv, submit_len);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "creation.\n");
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+	cmd->header.size = cmd_len;
+	cmd->body.sid = srf->res.id;
+	cmd->body.surfaceFlags = srf->flags;
+	cmd->body.format = cpu_to_le32(srf->format);
+	cmd->body.numMipLevels = srf->mip_levels[0];
+	cmd->body.multisampleCount = srf->multisample_count;
+	cmd->body.autogenFilter = srf->autogen_filter;
+	cmd->body.size.width = srf->base_size.width;
+	cmd->body.size.height = srf->base_size.height;
+	cmd->body.size.depth = srf->base_size.depth;
+	vmw_fifo_commit(dev_priv, submit_len);
+
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	vmw_3d_resource_dec(dev_priv, false);
+	return ret;
+}
+
+
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBSurface body;
+	} *cmd1;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdUpdateGBSurface body;
+	} *cmd2;
+	uint32_t submit_size;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
+
+	cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd1 == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+	cmd1->header.size = sizeof(cmd1->body);
+	cmd1->body.sid = res->id;
+	cmd1->body.mobid = bo->mem.start;
+	if (res->backup_dirty) {
+		cmd2 = (void *) &cmd1[1];
+		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
+		cmd2->header.size = sizeof(cmd2->body);
+		cmd2->body.sid = res->id;
+		res->backup_dirty = false;
+	}
+	vmw_fifo_commit(dev_priv, submit_size);
+
+	return 0;
+}
+
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdReadbackGBSurface body;
+	} *cmd1;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdInvalidateGBSurface body;
+	} *cmd2;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBSurface body;
+	} *cmd3;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	if (readback) {
+		cmd1 = (void *) cmd;
+		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
+		cmd1->header.size = sizeof(cmd1->body);
+		cmd1->body.sid = res->id;
+		cmd3 = (void *) &cmd1[1];
+	} else {
+		cmd2 = (void *) cmd;
+		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
+		cmd2->header.size = sizeof(cmd2->body);
+		cmd2->body.sid = res->id;
+		cmd3 = (void *) &cmd2[1];
+	}
+
+	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+	cmd3->header.size = sizeof(cmd3->body);
+	cmd3->body.sid = res->id;
+	cmd3->body.mobid = SVGA3D_INVALID_ID;
+
+	vmw_fifo_commit(dev_priv, submit_size);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_fence_single_bo(val_buf->bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static int vmw_gb_surface_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBSurface body;
+	} *cmd;
+
+	if (likely(res->id == -1))
+		return 0;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_context_binding_res_list_scrub(&res->binding_head);
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "destruction.\n");
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.sid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	mutex_unlock(&dev_priv->binding_mutex);
+	vmw_resource_release_id(res);
+	vmw_3d_resource_dec(dev_priv, false);
+
+	return 0;
+}
+
+/**
+ * vmw_gb_surface_define_ioctl - Ioctl function implementing
+ *                               the user surface define functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_surface *user_srf;
+	struct vmw_surface *srf;
+	struct vmw_resource *res;
+	struct vmw_resource *tmp;
+	union drm_vmw_gb_surface_create_arg *arg =
+	    (union drm_vmw_gb_surface_create_arg *)data;
+	struct drm_vmw_gb_surface_create_req *req = &arg->req;
+	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	int ret;
+	uint32_t size;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	const struct svga3d_surface_desc *desc;
+	uint32_t backup_handle;
+
+	if (unlikely(vmw_user_surface_size == 0))
+		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+			128;
+
+	size = vmw_user_surface_size + 128;
+
+	desc = svga3dsurface_get_desc(req->format);
+	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+		DRM_ERROR("Invalid surface format for surface creation.\n");
+		return -EINVAL;
+	}
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   size, false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for surface"
+				  " creation.\n");
+		goto out_unlock;
+	}
+
+	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+	if (unlikely(user_srf == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_user_srf;
+	}
+
+	srf = &user_srf->srf;
+	res = &srf->res;
+
+	srf->flags = req->svga3d_flags;
+	srf->format = req->format;
+	srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
+	srf->mip_levels[0] = req->mip_levels;
+	srf->num_sizes = 1;
+	srf->sizes = NULL;
+	srf->offsets = NULL;
+	user_srf->size = size;
+	srf->base_size = req->base_size;
+	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+	srf->multisample_count = req->multisample_count;
+	res->backup_size = svga3dsurface_get_serialized_size
+	  (srf->format, srf->base_size, srf->mip_levels[0],
+	   srf->flags & SVGA3D_SURFACE_CUBEMAP);
+
+	user_srf->prime.base.shareable = false;
+	user_srf->prime.base.tfile = NULL;
+
+	/**
+	 * From this point, the generic resource management functions
+	 * destroy the object on failure.
+	 */
+
+	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	if (req->buffer_handle != SVGA3D_INVALID_ID) {
+		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
+					     &res->backup);
+	} else if (req->drm_surface_flags &
+		   drm_vmw_surface_flag_create_buffer)
+		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+					    res->backup_size,
+					    req->drm_surface_flags &
+					    drm_vmw_surface_flag_shareable,
+					    &backup_handle,
+					    &res->backup);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&res);
+		goto out_unlock;
+	}
+
+	tmp = vmw_resource_reference(&srf->res);
+	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+				    req->drm_surface_flags &
+				    drm_vmw_surface_flag_shareable,
+				    VMW_RES_SURFACE,
+				    &vmw_user_surface_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		vmw_resource_unreference(&res);
+		goto out_unlock;
+	}
+
+	rep->handle = user_srf->prime.base.hash.key;
+	rep->backup_size = res->backup_size;
+	if (res->backup) {
+		rep->buffer_map_handle =
+			drm_vma_node_offset_addr(&res->backup->base.vma_node);
+		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+		rep->buffer_handle = backup_handle;
+	} else {
+		rep->buffer_map_handle = 0;
+		rep->buffer_size = 0;
+		rep->buffer_handle = SVGA3D_INVALID_ID;
+	}
+
+	vmw_resource_unreference(&res);
+
+	ttm_read_unlock(&vmaster->lock);
+	return 0;
+out_no_user_srf:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
+}
+
+/**
+ * vmw_gb_surface_reference_ioctl - Ioctl function implementing
+ *                                  the user surface reference functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	union drm_vmw_gb_surface_reference_arg *arg =
+	    (union drm_vmw_gb_surface_reference_arg *)data;
+	struct drm_vmw_surface_arg *req = &arg->req;
+	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_surface *srf;
+	struct vmw_user_surface *user_srf;
+	struct ttm_base_object *base;
+	uint32_t backup_handle;
+	int ret = -EINVAL;
+
+	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
+	if (unlikely(base == NULL)) {
+		DRM_ERROR("Could not find surface to reference.\n");
+		return -EINVAL;
+	}
+
+	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
+		goto out_bad_resource;
+
+	user_srf = container_of(base, struct vmw_user_surface, prime.base);
+	srf = &user_srf->srf;
+	if (srf->res.backup == NULL) {
+		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
+		goto out_bad_resource;
+	}
+
+	ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
+				 TTM_REF_USAGE, NULL);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not add a reference to a GB surface.\n");
+		goto out_bad_resource;
+	}
+
+	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
+	ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
+					&backup_handle);
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not add a reference to a GB surface "
+			  "backup buffer.\n");
+		(void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+						 req->sid,
+						 TTM_REF_USAGE);
+		goto out_bad_resource;
+	}
+
+	rep->creq.svga3d_flags = srf->flags;
+	rep->creq.format = srf->format;
+	rep->creq.mip_levels = srf->mip_levels[0];
+	rep->creq.drm_surface_flags = 0;
+	rep->creq.multisample_count = srf->multisample_count;
+	rep->creq.autogen_filter = srf->autogen_filter;
+	rep->creq.buffer_handle = backup_handle;
+	rep->creq.base_size = srf->base_size;
+	rep->crep.handle = user_srf->prime.base.hash.key;
+	rep->crep.backup_size = srf->res.backup_size;
+	rep->crep.buffer_handle = backup_handle;
+	rep->crep.buffer_map_handle =
+		drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+
+out_bad_resource:
+	ttm_base_object_unref(&base);
+
+	return ret;
+}
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index 7d6bed2..b2fd029 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -1,6 +1,6 @@
 config TEGRA_HOST1X
 	tristate "NVIDIA Tegra host1x driver"
-	depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
+	depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
 	help
 	  Driver for the NVIDIA Tegra host1x hardware.
 
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index afa1e9e..c1189f0 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -7,7 +7,9 @@
 	channel.o \
 	job.o \
 	debug.o \
+	mipi.o \
 	hw/host1x01.o \
-	hw/host1x02.o
+	hw/host1x02.o \
+	hw/host1x04.o
 
 obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 6a92959..ccdd2e6 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -188,6 +188,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(host1x_device_init);
 
 int host1x_device_exit(struct host1x_device *device)
 {
@@ -213,6 +214,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(host1x_device_exit);
 
 static int host1x_register_client(struct host1x *host1x,
 				  struct host1x_client *client)
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index 83ea51b..b4ae3af 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -43,6 +43,7 @@
 
 	return host1x_hw_channel_submit(host, job);
 }
+EXPORT_SYMBOL(host1x_job_submit);
 
 struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
 {
@@ -60,6 +61,7 @@
 
 	return err ? NULL : channel;
 }
+EXPORT_SYMBOL(host1x_channel_get);
 
 void host1x_channel_put(struct host1x_channel *channel)
 {
@@ -76,6 +78,7 @@
 
 	mutex_unlock(&channel->reflock);
 }
+EXPORT_SYMBOL(host1x_channel_put);
 
 struct host1x_channel *host1x_channel_request(struct device *dev)
 {
@@ -115,6 +118,7 @@
 	mutex_unlock(&host->chlist_mutex);
 	return NULL;
 }
+EXPORT_SYMBOL(host1x_channel_request);
 
 void host1x_channel_free(struct host1x_channel *channel)
 {
@@ -124,3 +128,4 @@
 	list_del(&channel->list);
 	kfree(channel);
 }
+EXPORT_SYMBOL(host1x_channel_free);
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index 3ec7d77..ee3d12b 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -96,7 +96,6 @@
 		show_channels(ch, o, true);
 }
 
-#ifdef CONFIG_DEBUG_FS
 static void show_all_no_fifo(struct host1x *host1x, struct output *o)
 {
 	struct host1x_channel *ch;
@@ -153,7 +152,7 @@
 	.release	= single_release,
 };
 
-void host1x_debug_init(struct host1x *host1x)
+static void host1x_debugfs_init(struct host1x *host1x)
 {
 	struct dentry *de = debugfs_create_dir("tegra-host1x", NULL);
 
@@ -180,18 +179,22 @@
 			   &host1x_debug_force_timeout_channel);
 }
 
-void host1x_debug_deinit(struct host1x *host1x)
+static void host1x_debugfs_exit(struct host1x *host1x)
 {
 	debugfs_remove_recursive(host1x->debugfs);
 }
-#else
+
 void host1x_debug_init(struct host1x *host1x)
 {
+	if (IS_ENABLED(CONFIG_DEBUG_FS))
+		host1x_debugfs_init(host1x);
 }
+
 void host1x_debug_deinit(struct host1x *host1x)
 {
+	if (IS_ENABLED(CONFIG_DEBUG_FS))
+		host1x_debugfs_exit(host1x);
 }
-#endif
 
 void host1x_debug_dump(struct host1x *host1x)
 {
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 80da003..2529908 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -34,6 +34,7 @@
 #include "debug.h"
 #include "hw/host1x01.h"
 #include "hw/host1x02.h"
+#include "hw/host1x04.h"
 
 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
 {
@@ -77,7 +78,17 @@
 	.sync_offset = 0x3000,
 };
 
+static const struct host1x_info host1x04_info = {
+	.nb_channels = 12,
+	.nb_pts = 192,
+	.nb_mlocks = 16,
+	.nb_bases = 64,
+	.init = host1x04_init,
+	.sync_offset = 0x2100,
+};
+
 static struct of_device_id host1x_of_match[] = {
+	{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
 	{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
 	{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
 	{ .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
@@ -210,17 +221,26 @@
 		return err;
 
 	err = platform_driver_register(&tegra_host1x_driver);
-	if (err < 0) {
-		host1x_bus_exit();
-		return err;
-	}
+	if (err < 0)
+		goto unregister_bus;
+
+	err = platform_driver_register(&tegra_mipi_driver);
+	if (err < 0)
+		goto unregister_host1x;
 
 	return 0;
+
+unregister_host1x:
+	platform_driver_unregister(&tegra_host1x_driver);
+unregister_bus:
+	host1x_bus_exit();
+	return err;
 }
 module_init(tegra_host1x_init);
 
 static void __exit tegra_host1x_exit(void)
 {
+	platform_driver_unregister(&tegra_mipi_driver);
 	platform_driver_unregister(&tegra_host1x_driver);
 	host1x_bus_exit();
 }
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index a61a976..0b6e8e9 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -306,4 +306,6 @@
 	host->debug_op->show_mlocks(host, o);
 }
 
+extern struct platform_driver tegra_mipi_driver;
+
 #endif
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
index e98caca..928946c 100644
--- a/drivers/gpu/host1x/hw/host1x02.c
+++ b/drivers/gpu/host1x/hw/host1x02.c
@@ -17,8 +17,8 @@
  */
 
 /* include hw specification */
-#include "host1x01.h"
-#include "host1x01_hardware.h"
+#include "host1x02.h"
+#include "host1x02_hardware.h"
 
 /* include code */
 #include "cdma_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x02_hardware.h b/drivers/gpu/host1x/hw/host1x02_hardware.h
new file mode 100644
index 0000000..1549018
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra114
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X02_HARDWARE_H
+#define __HOST1X_HOST1X02_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x02_channel.h"
+#include "hw_host1x02_sync.h"
+#include "hw_host1x02_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+	unsigned indx, unsigned threshold)
+{
+	return host1x_uclass_wait_syncpt_indx_f(indx)
+		| host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+	unsigned indx, unsigned threshold)
+{
+	return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+		| host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+	unsigned indx, unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_wait_syncpt_base_indx_f(indx)
+		| host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+	unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+	unsigned cond, unsigned indx)
+{
+	return host1x_uclass_incr_syncpt_cond_f(cond)
+		| host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indbe_f(0xf)
+		| host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset);
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset)
+		| host1x_uclass_indoff_rwn_read_v();
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+	unsigned class_id, unsigned offset, unsigned mask)
+{
+	return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+	return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+	return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+	return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+	return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+	return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+		host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+	return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+	return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset,	unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x04.c b/drivers/gpu/host1x/hw/host1x04.c
new file mode 100644
index 0000000..8007c70
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x04.c
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for Tegra124 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x04.h"
+#include "host1x04_hardware.h"
+
+/* include code */
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x04_init(struct host1x *host)
+{
+	host->channel_op = &host1x_channel_ops;
+	host->cdma_op = &host1x_cdma_ops;
+	host->cdma_pb_op = &host1x_pushbuffer_ops;
+	host->syncpt_op = &host1x_syncpt_ops;
+	host->intr_op = &host1x_intr_ops;
+	host->debug_op = &host1x_debug_ops;
+
+	return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x04.h b/drivers/gpu/host1x/hw/host1x04.h
new file mode 100644
index 0000000..a9ab749
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x04.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra124 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X04_H
+#define HOST1X_HOST1X04_H
+
+struct host1x;
+
+int host1x04_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x04_hardware.h b/drivers/gpu/host1x/hw/host1x04_hardware.h
new file mode 100644
index 0000000..de1a381
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x04_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra124
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X04_HARDWARE_H
+#define __HOST1X_HOST1X04_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x04_channel.h"
+#include "hw_host1x04_sync.h"
+#include "hw_host1x04_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+	unsigned indx, unsigned threshold)
+{
+	return host1x_uclass_wait_syncpt_indx_f(indx)
+		| host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+	unsigned indx, unsigned threshold)
+{
+	return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+		| host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+	unsigned indx, unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_wait_syncpt_base_indx_f(indx)
+		| host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+	unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+	unsigned cond, unsigned indx)
+{
+	return host1x_uclass_incr_syncpt_cond_f(cond)
+		| host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indbe_f(0xf)
+		| host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset);
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset)
+		| host1x_uclass_indoff_rwn_read_v();
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+	unsigned class_id, unsigned offset, unsigned mask)
+{
+	return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+	return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+	return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+	return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+	return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+	return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+		host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+	return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+	return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset,	unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
index a3b3c98..028e49d 100644
--- a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
@@ -111,6 +111,12 @@
 }
 #define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
 	host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+	return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+	host1x_uclass_load_syncpt_base_r()
 static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
 {
 	return (v & 0xff) << 24;
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_channel.h b/drivers/gpu/host1x/hw/hw_host1x04_channel.h
new file mode 100644
index 0000000..95e6f96
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x04_channel.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X04_CHANNEL_H
+#define HOST1X_HW_HOST1X04_CHANNEL_H
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+	return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+	host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+	return (r >> 11) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+	host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+	return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+	host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+	return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+	host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+	return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+	host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+	return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+	host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+	return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+	host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+	return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+	host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+	host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+	return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+	host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+	return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+	host1x_channel_dmactrl_dmainitget()
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_sync.h b/drivers/gpu/host1x/hw/hw_host1x04_sync.h
new file mode 100644
index 0000000..ef2275b
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x04_sync.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X04_SYNC_H
+#define HOST1X_HW_HOST1X04_SYNC_H
+
+#define REGISTER_STRIDE	4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+	return 0xf80 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+	host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+	return 0xe80 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+	host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+	return 0xf00 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+	host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+	return 0xf20 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+	host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+	return 0xc00 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+	host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+	return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+	host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+	host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+	return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+	host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+	return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+	host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+	return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+	host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+	return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+	host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+	return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+	host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+	return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+	host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+	return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+	host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+	return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+	host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+	host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+	return 0x1380 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+	host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+	return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+	host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+	return 0xf60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+	host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+	return 0xc80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+	host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+	return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+	host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+	return (v & 0x3ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+	host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+	return (v & 0xf) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+	host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+	return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+	host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+	return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+	host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+	return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+	host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+	return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+	host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+	host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+	return 0xcc0 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+	host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+	return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+	host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+	host1x_sync_cbstat_cbclass_v(r)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_uclass.h b/drivers/gpu/host1x/hw/hw_host1x04_uclass.h
new file mode 100644
index 0000000..d1460e9
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x04_uclass.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X04_UCLASS_H
+#define HOST1X_HW_HOST1X04_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+	return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+	host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+	return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+	host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+	host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+	return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+	host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+	host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+	host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+	return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+	host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+	return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+	host1x_uclass_load_syncpt_base_r()
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+	host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+	return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+	host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+	return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+	host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+	return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+	host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+	return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+	host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+	return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+	return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index b26dcc8..db9017a 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -20,7 +20,6 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/io.h>
-#include <asm/mach/irq.h>
 
 #include "../intr.h"
 #include "../dev.h"
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index de5ec33..1146e3b 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -75,12 +75,14 @@
 
 	return job;
 }
+EXPORT_SYMBOL(host1x_job_alloc);
 
 struct host1x_job *host1x_job_get(struct host1x_job *job)
 {
 	kref_get(&job->ref);
 	return job;
 }
+EXPORT_SYMBOL(host1x_job_get);
 
 static void job_free(struct kref *ref)
 {
@@ -93,6 +95,7 @@
 {
 	kref_put(&job->ref, job_free);
 }
+EXPORT_SYMBOL(host1x_job_put);
 
 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
 			   u32 words, u32 offset)
@@ -104,6 +107,7 @@
 	cur_gather->offset = offset;
 	job->num_gathers++;
 }
+EXPORT_SYMBOL(host1x_job_add_gather);
 
 /*
  * NULL an already satisfied WAIT_SYNCPT host method, by patching its
@@ -560,6 +564,7 @@
 
 	return err;
 }
+EXPORT_SYMBOL(host1x_job_pin);
 
 void host1x_job_unpin(struct host1x_job *job)
 {
@@ -577,6 +582,7 @@
 				      job->gather_copy_mapped,
 				      job->gather_copy);
 }
+EXPORT_SYMBOL(host1x_job_unpin);
 
 /*
  * Debug routine used to dump job entries
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
new file mode 100644
index 0000000..9882ea1
--- /dev/null
+++ b/drivers/gpu/host1x/mipi.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dev.h"
+
+#define MIPI_CAL_CTRL			0x00
+#define MIPI_CAL_CTRL_START		(1 << 0)
+
+#define MIPI_CAL_AUTOCAL_CTRL		0x01
+
+#define MIPI_CAL_STATUS			0x02
+#define MIPI_CAL_STATUS_DONE		(1 << 16)
+#define MIPI_CAL_STATUS_ACTIVE		(1 <<  0)
+
+#define MIPI_CAL_CONFIG_CSIA		0x05
+#define MIPI_CAL_CONFIG_CSIB		0x06
+#define MIPI_CAL_CONFIG_CSIC		0x07
+#define MIPI_CAL_CONFIG_CSID		0x08
+#define MIPI_CAL_CONFIG_CSIE		0x09
+#define MIPI_CAL_CONFIG_DSIA		0x0e
+#define MIPI_CAL_CONFIG_DSIB		0x0f
+#define MIPI_CAL_CONFIG_DSIC		0x10
+#define MIPI_CAL_CONFIG_DSID		0x11
+
+#define MIPI_CAL_CONFIG_SELECT		(1 << 21)
+#define MIPI_CAL_CONFIG_HSPDOS(x)	(((x) & 0x1f) << 16)
+#define MIPI_CAL_CONFIG_HSPUOS(x)	(((x) & 0x1f) <<  8)
+#define MIPI_CAL_CONFIG_TERMOS(x)	(((x) & 0x1f) <<  0)
+
+#define MIPI_CAL_BIAS_PAD_CFG0		0x16
+#define MIPI_CAL_BIAS_PAD_PDVCLAMP	(1 << 1)
+#define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF	(1 << 0)
+
+#define MIPI_CAL_BIAS_PAD_CFG1		0x17
+
+#define MIPI_CAL_BIAS_PAD_CFG2		0x18
+#define MIPI_CAL_BIAS_PAD_PDVREG	(1 << 1)
+
+static const struct module {
+	unsigned long reg;
+} modules[] = {
+	{ .reg = MIPI_CAL_CONFIG_CSIA },
+	{ .reg = MIPI_CAL_CONFIG_CSIB },
+	{ .reg = MIPI_CAL_CONFIG_CSIC },
+	{ .reg = MIPI_CAL_CONFIG_CSID },
+	{ .reg = MIPI_CAL_CONFIG_CSIE },
+	{ .reg = MIPI_CAL_CONFIG_DSIA },
+	{ .reg = MIPI_CAL_CONFIG_DSIB },
+	{ .reg = MIPI_CAL_CONFIG_DSIC },
+	{ .reg = MIPI_CAL_CONFIG_DSID },
+};
+
+struct tegra_mipi {
+	void __iomem *regs;
+	struct mutex lock;
+	struct clk *clk;
+};
+
+struct tegra_mipi_device {
+	struct platform_device *pdev;
+	struct tegra_mipi *mipi;
+	struct device *device;
+	unsigned long pads;
+};
+
+static inline unsigned long tegra_mipi_readl(struct tegra_mipi *mipi,
+					     unsigned long reg)
+{
+	return readl(mipi->regs + (reg << 2));
+}
+
+static inline void tegra_mipi_writel(struct tegra_mipi *mipi,
+				     unsigned long value, unsigned long reg)
+{
+	writel(value, mipi->regs + (reg << 2));
+}
+
+struct tegra_mipi_device *tegra_mipi_request(struct device *device)
+{
+	struct device_node *np = device->of_node;
+	struct tegra_mipi_device *dev;
+	struct of_phandle_args args;
+	int err;
+
+	err = of_parse_phandle_with_args(np, "nvidia,mipi-calibrate",
+					 "#nvidia,mipi-calibrate-cells", 0,
+					 &args);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		of_node_put(args.np);
+		err = -ENOMEM;
+		goto out;
+	}
+
+	dev->pdev = of_find_device_by_node(args.np);
+	if (!dev->pdev) {
+		of_node_put(args.np);
+		err = -ENODEV;
+		goto free;
+	}
+
+	of_node_put(args.np);
+
+	dev->mipi = platform_get_drvdata(dev->pdev);
+	if (!dev->mipi) {
+		err = -EPROBE_DEFER;
+		goto pdev_put;
+	}
+
+	dev->pads = args.args[0];
+	dev->device = device;
+
+	return dev;
+
+pdev_put:
+	platform_device_put(dev->pdev);
+free:
+	kfree(dev);
+out:
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(tegra_mipi_request);
+
+void tegra_mipi_free(struct tegra_mipi_device *device)
+{
+	platform_device_put(device->pdev);
+	kfree(device);
+}
+EXPORT_SYMBOL(tegra_mipi_free);
+
+static int tegra_mipi_wait(struct tegra_mipi *mipi)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(250);
+	unsigned long value;
+
+	while (time_before(jiffies, timeout)) {
+		value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS);
+		if ((value & MIPI_CAL_STATUS_ACTIVE) == 0 &&
+		    (value & MIPI_CAL_STATUS_DONE) != 0)
+			return 0;
+
+		usleep_range(10, 50);
+	}
+
+	return -ETIMEDOUT;
+}
+
+int tegra_mipi_calibrate(struct tegra_mipi_device *device)
+{
+	unsigned long value;
+	unsigned int i;
+	int err;
+
+	err = clk_enable(device->mipi->clk);
+	if (err < 0)
+		return err;
+
+	mutex_lock(&device->mipi->lock);
+
+	value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG0);
+	value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
+	value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+	tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+	value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
+	value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+	tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+	for (i = 0; i < ARRAY_SIZE(modules); i++) {
+		if (device->pads & BIT(i))
+			value = MIPI_CAL_CONFIG_SELECT |
+				MIPI_CAL_CONFIG_HSPDOS(0) |
+				MIPI_CAL_CONFIG_HSPUOS(4) |
+				MIPI_CAL_CONFIG_TERMOS(5);
+		else
+			value = 0;
+
+		tegra_mipi_writel(device->mipi, value, modules[i].reg);
+	}
+
+	tegra_mipi_writel(device->mipi, MIPI_CAL_CTRL_START, MIPI_CAL_CTRL);
+
+	err = tegra_mipi_wait(device->mipi);
+
+	mutex_unlock(&device->mipi->lock);
+	clk_disable(device->mipi->clk);
+
+	return err;
+}
+EXPORT_SYMBOL(tegra_mipi_calibrate);
+
+static int tegra_mipi_probe(struct platform_device *pdev)
+{
+	struct tegra_mipi *mipi;
+	struct resource *res;
+	int err;
+
+	mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL);
+	if (!mipi)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	mipi->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(mipi->regs))
+		return PTR_ERR(mipi->regs);
+
+	mutex_init(&mipi->lock);
+
+	mipi->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(mipi->clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		return PTR_ERR(mipi->clk);
+	}
+
+	err = clk_prepare(mipi->clk);
+	if (err < 0)
+		return err;
+
+	platform_set_drvdata(pdev, mipi);
+
+	return 0;
+}
+
+static int tegra_mipi_remove(struct platform_device *pdev)
+{
+	struct tegra_mipi *mipi = platform_get_drvdata(pdev);
+
+	clk_unprepare(mipi->clk);
+
+	return 0;
+}
+
+static struct of_device_id tegra_mipi_of_match[] = {
+	{ .compatible = "nvidia,tegra114-mipi", },
+	{ },
+};
+
+struct platform_driver tegra_mipi_driver = {
+	.driver = {
+		.name = "tegra-mipi",
+		.of_match_table = tegra_mipi_of_match,
+	},
+	.probe = tegra_mipi_probe,
+	.remove = tegra_mipi_remove,
+};
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 159c479..bfb09d8 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -93,6 +93,7 @@
 {
 	return sp->id;
 }
+EXPORT_SYMBOL(host1x_syncpt_id);
 
 /*
  * Updates the value sent to hardware.
@@ -168,6 +169,7 @@
 {
 	return host1x_hw_syncpt_cpu_incr(sp->host, sp);
 }
+EXPORT_SYMBOL(host1x_syncpt_incr);
 
 /*
  * Updated sync point form hardware, and returns true if syncpoint is expired,
@@ -377,6 +379,7 @@
 	struct host1x *host = dev_get_drvdata(dev->parent);
 	return host1x_syncpt_alloc(host, dev, flags);
 }
+EXPORT_SYMBOL(host1x_syncpt_request);
 
 void host1x_syncpt_free(struct host1x_syncpt *sp)
 {
@@ -390,6 +393,7 @@
 	sp->name = NULL;
 	sp->client_managed = false;
 }
+EXPORT_SYMBOL(host1x_syncpt_free);
 
 void host1x_syncpt_deinit(struct host1x *host)
 {
@@ -408,6 +412,7 @@
 	smp_rmb();
 	return (u32)atomic_read(&sp->max_val);
 }
+EXPORT_SYMBOL(host1x_syncpt_read_max);
 
 /*
  * Read min, which is a shadow of the current sync point value in hardware.
@@ -417,6 +422,7 @@
 	smp_rmb();
 	return (u32)atomic_read(&sp->min_val);
 }
+EXPORT_SYMBOL(host1x_syncpt_read_min);
 
 int host1x_syncpt_nb_pts(struct host1x *host)
 {
@@ -439,13 +445,16 @@
 		return NULL;
 	return host->syncpt + id;
 }
+EXPORT_SYMBOL(host1x_syncpt_get);
 
 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
 {
 	return sp ? sp->base : NULL;
 }
+EXPORT_SYMBOL(host1x_syncpt_get_base);
 
 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
 {
 	return base->id;
 }
+EXPORT_SYMBOL(host1x_syncpt_base_id);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 4975581..f822fd2 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -469,6 +469,9 @@
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
 				USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+				USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS),
+		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 026ab0f..cc32a6f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1679,6 +1679,7 @@
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
@@ -1779,6 +1780,8 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
@@ -2118,6 +2121,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 8fae6d1..c24908f 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -157,6 +157,7 @@
 	u32			report_desc_size;
 	struct hv_input_dev_info hid_dev_info;
 	struct hid_device       *hid_device;
+	u8			input_buf[HID_MAX_BUFFER_SIZE];
 };
 
 
@@ -256,6 +257,7 @@
 	struct synthhid_msg *hid_msg;
 	struct mousevsc_dev *input_dev = hv_get_drvdata(device);
 	struct synthhid_input_report *input_report;
+	size_t len;
 
 	pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet +
 						(packet->offset8 << 3));
@@ -300,9 +302,12 @@
 			(struct synthhid_input_report *)pipe_msg->data;
 		if (!input_dev->init_complete)
 			break;
-		hid_input_report(input_dev->hid_device,
-				HID_INPUT_REPORT, input_report->buffer,
-				input_report->header.size, 1);
+
+		len = min(input_report->header.size,
+			  (u32)sizeof(input_dev->input_buf));
+		memcpy(input_dev->input_buf, input_report->buffer, len);
+		hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
+				 input_dev->input_buf, len, 1);
 		break;
 	default:
 		pr_err("unsupported hid msg type - type %d len %d",
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 92b40c0..22f28d6 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -135,6 +135,7 @@
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI  0x0255
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS   0x0257
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI	0x0290
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO	0x0291
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS	0x0292
@@ -240,6 +241,9 @@
 
 #define USB_VENDOR_ID_CYGNAL		0x10c4
 #define USB_DEVICE_ID_CYGNAL_RADIO_SI470X	0x818a
+#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH	0x81b9
+
+#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713       0x8244
 
 #define USB_VENDOR_ID_CYPRESS		0x04b4
 #define USB_DEVICE_ID_CYPRESS_MOUSE	0x0001
@@ -449,6 +453,9 @@
 #define USB_VENDOR_ID_INTEL_1		0x8087
 #define USB_DEVICE_ID_INTEL_HID_SENSOR	0x09fa
 
+#define USB_VENDOR_ID_STM_0             0x0483
+#define USB_DEVICE_ID_STM_HID_SENSOR    0x91d1
+
 #define USB_VENDOR_ID_ION		0x15e4
 #define USB_DEVICE_ID_ICADE		0x0132
 
@@ -617,6 +624,8 @@
 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB	0x0713
 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K	0x0730
 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500	0x076c
+#define USB_DEVICE_ID_MS_TOUCH_COVER_2	0x07a7
+#define USB_DEVICE_ID_MS_TYPE_COVER_2	0x07a9
 
 #define USB_VENDOR_ID_MOJO		0x8282
 #define USB_DEVICE_ID_RETRO_ADAPTER	0x3201
@@ -642,6 +651,7 @@
 
 #define USB_VENDOR_ID_NEXIO		0x1870
 #define USB_DEVICE_ID_NEXIO_MULTITOUCH_420	0x010d
+#define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750	0x0110
 
 #define USB_VENDOR_ID_NEXTWINDOW	0x1926
 #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN	0x0003
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d50e731..a713e62 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1178,7 +1178,7 @@
 
 	/* fall back to generic raw-output-report */
 	len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
-	buf = kmalloc(len, GFP_KERNEL);
+	buf = hid_alloc_report_buf(report, GFP_KERNEL);
 	if (!buf)
 		return;
 
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6ef6ee..404a3a8 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -208,6 +208,10 @@
 		.driver_data = MS_NOGET },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
 		.driver_data = MS_DUPLICATE_USAGES },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2),
+		.driver_data = 0 },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2),
+		.driver_data = 0 },
 
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
 		.driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f134d73..221d503 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1166,6 +1166,11 @@
 		MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
 			USB_DEVICE_ID_MULTITOUCH_3200) },
 
+	/* FocalTech Panels */
+	{ .driver_data = MT_CLS_SERIAL,
+		MT_USB_DEVICE(USB_VENDOR_ID_CYGNAL,
+			USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH) },
+
 	/* GeneralTouch panel */
 	{ .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
 		MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 46f4480..9c22e14 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -665,6 +665,9 @@
 	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1,
 			USB_DEVICE_ID_INTEL_HID_SENSOR),
 			.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
+			USB_DEVICE_ID_STM_HID_SENSOR),
+			.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
 	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
 		     HID_ANY_ID) },
 	{ }
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d1f81f5..42eebd1 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -582,7 +582,7 @@
 	int ret;
 	int len = i2c_hid_get_report_length(rep) - 2;
 
-	buf = kzalloc(len, GFP_KERNEL);
+	buf = hid_alloc_report_buf(rep, GFP_KERNEL);
 	if (!buf)
 		return;
 
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 175ec0a..dbd8387 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -74,6 +74,7 @@
 	{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index cea623c..69ea36f 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -209,7 +209,6 @@
 {
 	int i;
 	int pagecount;
-	unsigned long long pfn;
 	struct vmbus_channel_gpadl_header *gpadl_header;
 	struct vmbus_channel_gpadl_body *gpadl_body;
 	struct vmbus_channel_msginfo *msgheader;
@@ -219,7 +218,6 @@
 	int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
 
 	pagecount = size >> PAGE_SHIFT;
-	pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
 
 	/* do we need a gpadl body msg */
 	pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
@@ -248,7 +246,8 @@
 		gpadl_header->range[0].byte_offset = 0;
 		gpadl_header->range[0].byte_count = size;
 		for (i = 0; i < pfncount; i++)
-			gpadl_header->range[0].pfn_array[i] = pfn+i;
+			gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
+				kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
 		*msginfo = msgheader;
 		*messagecount = 1;
 
@@ -301,7 +300,9 @@
 			 * so the hypervisor gurantees that this is ok.
 			 */
 			for (i = 0; i < pfncurr; i++)
-				gpadl_body->pfn[i] = pfn + pfnsum + i;
+				gpadl_body->pfn[i] = slow_virt_to_phys(
+					kbuffer + PAGE_SIZE * (pfnsum + i)) >>
+					PAGE_SHIFT;
 
 			/* add to msg header */
 			list_add_tail(&msgbody->msglistentry,
@@ -327,7 +328,8 @@
 		gpadl_header->range[0].byte_offset = 0;
 		gpadl_header->range[0].byte_count = size;
 		for (i = 0; i < pagecount; i++)
-			gpadl_header->range[0].pfn_array[i] = pfn+i;
+			gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
+				kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
 
 		*msginfo = msgheader;
 		*messagecount = 1;
@@ -344,7 +346,7 @@
  * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
  *
  * @channel: a channel
- * @kbuffer: from kmalloc
+ * @kbuffer: from kmalloc or vmalloc
  * @size: page-size multiple
  * @gpadl_handle: some funky thing
  */
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index af6edf9..f2d7bf9 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -67,7 +67,6 @@
 	int ret = 0;
 	struct vmbus_channel_initiate_contact *msg;
 	unsigned long flags;
-	int t;
 
 	init_completion(&msginfo->waitevent);
 
@@ -78,6 +77,8 @@
 	msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
 	msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
 	msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+	if (version == VERSION_WIN8)
+		msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
 
 	/*
 	 * Add to list before we send the request since we may
@@ -100,15 +101,7 @@
 	}
 
 	/* Wait for the connection response */
-	t =  wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
-	if (t == 0) {
-		spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
-				flags);
-		list_del(&msginfo->msglistentry);
-		spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
-					flags);
-		return -ETIMEDOUT;
-	}
+	wait_for_completion(&msginfo->waitevent);
 
 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 	list_del(&msginfo->msglistentry);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 52d548f1..5ce43d8 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -573,8 +573,8 @@
 	help
 	  If you say yes here you get support for ITE IT8705F, IT8712F,
 	  IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E,
-	  IT8771E, IT8772E, IT8782F, and IT8783E/F sensor chips, and the
-	  SiS950 clone.
+	  IT8771E, IT8772E, IT8782F, IT8783E/F and IT8603E sensor chips,
+	  and the SiS950 clone.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called it87.
@@ -650,6 +650,7 @@
 config SENSORS_LM75
 	tristate "National Semiconductor LM75 and compatibles"
 	depends on I2C
+	depends on THERMAL || !THERMAL_OF
 	help
 	  If you say yes here you get support for one common type of
 	  temperature sensor chip, with models including:
@@ -1285,6 +1286,7 @@
 config SENSORS_TMP102
 	tristate "Texas Instruments TMP102"
 	depends on I2C
+	depends on THERMAL || !THERMAL_OF
 	help
 	  If you say yes here you get support for Texas Instruments TMP102
 	  sensor chips.
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 7e16e5d..9ffc4c8 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -2,7 +2,7 @@
  * adm1025.c
  *
  * Copyright (C) 2000       Chen-Yuan Wu <gwu@esoft.com>
- * Copyright (C) 2003-2009  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009  Jean Delvare <jdelvare@suse.de>
  *
  * The ADM1025 is a sensor chip made by Analog Devices. It reports up to 6
  * voltages (including its own power source) and up to two temperatures
@@ -615,6 +615,6 @@
 
 module_i2c_driver(adm1025_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("ADM1025 driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 9ee5e06..d19c790 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2006 Corentin LABBE <corentin.labbe@geomatys.fr>
  *
- * Based on LM83 Driver by Jean Delvare <khali@linux-fr.org>
+ * Based on LM83 Driver by Jean Delvare <jdelvare@suse.de>
  *
  * Give only processor, motherboard temperatures and fan tachs
  * Very rare chip please let me know if you use it
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 253ea39..a8a540c 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -4,7 +4,7 @@
  * Based on lm75.c and lm85.c
  * Supports adm1030 / adm1031
  * Copyright (C) 2004 Alexandre d'Alton <alex@alexdalton.org>
- * Reworked by Jean Delvare <khali@linux-fr.org>
+ * Reworked by Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 22d008b..3cefd1a 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -3,7 +3,7 @@
  * Copyright (C) 2007-2008, Advanced Micro Devices, Inc.
  * Copyright (C) 2008 Jordan Crouse <jordan@cosmicpenguin.net>
  * Copyright (C) 2008 Hans de Goede <hdegoede@redhat.com>
- * Copyright (C) 2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2009 Jean Delvare <jdelvare@suse.de>
  *
  * Derived from the lm83 driver by Jean Delvare
  *
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 029ecab..73b3865 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -278,10 +278,6 @@
 	if (hwmon_irq < 0)
 		return hwmon_irq;
 
-	hwmon_irq = regmap_irq_get_virq(hwmon->da9055->irq_data, hwmon_irq);
-	if (hwmon_irq < 0)
-		return hwmon_irq;
-
 	ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq,
 					NULL, da9055_auxadc_irq,
 					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 872d767..fc6f5d5 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -4,7 +4,7 @@
  * Christian W. Zuckschwerdt  <zany@triq.net>  2000-11-23
  * based on lm75.c by Frodo Looijaard <frodol@dds.nl>
  * Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
  *
  * The DS1621 device is a digital temperature/thermometer with 9-bit
  * resolution, a thermal alarm output (Tout), and user-defined minimum
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 82e661e..f76a74c 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -1,6 +1,6 @@
 /*
  * emc6w201.c - Hardware monitoring driver for the SMSC EMC6W201
- * Copyright (C) 2011  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2011  Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -548,6 +548,6 @@
 
 module_i2c_driver(emc6w201_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("SMSC EMC6W201 hardware monitoring driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 15b7f52..1a8aa12 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -1,7 +1,7 @@
 /*
  * f71805f.c - driver for the Fintek F71805F/FG and F71872F/FG Super-I/O
  *             chips integrated hardware monitoring features
- * Copyright (C) 2005-2006  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2006  Jean Delvare <jdelvare@suse.de>
  *
  * The F71805F/FG is a LPC Super-I/O chip made by Fintek. It integrates
  * complete hardware monitoring features: voltage, fan and temperature
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 95257a5..1e98305 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -4,7 +4,7 @@
  * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
  * Kyosti Malkki <kmalkki@cc.hut.fi>
  * Copyright (C) 2004 Hong-Gunn Chew <hglinux@gunnet.org> and
- * Jean Delvare <khali@linux-fr.org>
+ * Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 29ffa27..70749fc 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -10,7 +10,8 @@
  *  This driver supports only the Environment Controller in the IT8705F and
  *  similar parts.  The other devices are supported by different drivers.
  *
- *  Supports: IT8705F  Super I/O chip w/LPC interface
+ *  Supports: IT8603E  Super I/O chip w/LPC interface
+ *            IT8705F  Super I/O chip w/LPC interface
  *            IT8712F  Super I/O chip w/LPC interface
  *            IT8716F  Super I/O chip w/LPC interface
  *            IT8718F  Super I/O chip w/LPC interface
@@ -26,7 +27,7 @@
  *            Sis950   A clone of the IT8705F
  *
  *  Copyright (C) 2001 Chris Gauthron
- *  Copyright (C) 2005-2010 Jean Delvare <khali@linux-fr.org>
+ *  Copyright (C) 2005-2010 Jean Delvare <jdelvare@suse.de>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -64,7 +65,7 @@
 #define DRVNAME "it87"
 
 enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8771,
-	     it8772, it8782, it8783 };
+	     it8772, it8782, it8783, it8603 };
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
@@ -146,6 +147,7 @@
 #define IT8772E_DEVID 0x8772
 #define IT8782F_DEVID 0x8782
 #define IT8783E_DEVID 0x8783
+#define IT8306E_DEVID 0x8603
 #define IT87_ACT_REG  0x30
 #define IT87_BASE_REG 0x60
 
@@ -315,6 +317,12 @@
 		  | FEAT_TEMP_OLD_PECI,
 		.old_peci_mask = 0x4,
 	},
+	[it8603] = {
+		.name = "it8603",
+		.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+		.peci_mask = 0x07,
+	},
 };
 
 #define has_16bit_fans(data)	((data)->features & FEAT_16BIT_FANS)
@@ -361,7 +369,7 @@
 	unsigned long last_updated;	/* In jiffies */
 
 	u16 in_scaled;		/* Internal voltage sensors are scaled */
-	u8 in[9][3];		/* [nr][0]=in, [1]=min, [2]=max */
+	u8 in[10][3];		/* [nr][0]=in, [1]=min, [2]=max */
 	u8 has_fan;		/* Bitfield, fans enabled */
 	u16 fan[5][2];		/* Register values, [nr][0]=fan, [1]=min */
 	u8 has_temp;		/* Bitfield, temp sensors enabled */
@@ -578,6 +586,7 @@
 			    7, 2);
 
 static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 8, 0);
+static SENSOR_DEVICE_ATTR_2(in9_input, S_IRUGO, show_in, NULL, 9, 0);
 
 /* 3 temperatures */
 static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
@@ -734,7 +743,7 @@
 {
 	int ctrl = data->fan_main_ctrl & (1 << nr);
 
-	if (ctrl == 0)					/* Full speed */
+	if (ctrl == 0 && data->type != it8603)		/* Full speed */
 		return 0;
 	if (data->pwm_ctrl[nr] & 0x80)			/* Automatic mode */
 		return 2;
@@ -929,6 +938,10 @@
 			return -EINVAL;
 	}
 
+	/* IT8603E does not have on/off mode */
+	if (val == 0 && data->type == it8603)
+		return -EINVAL;
+
 	mutex_lock(&data->update_lock);
 
 	if (val == 0) {
@@ -948,10 +961,13 @@
 		else					/* Automatic mode */
 			data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
 		it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
-		/* set SmartGuardian mode */
-		data->fan_main_ctrl |= (1 << nr);
-		it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
-				 data->fan_main_ctrl);
+
+		if (data->type != it8603) {
+			/* set SmartGuardian mode */
+			data->fan_main_ctrl |= (1 << nr);
+			it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
+					 data->fan_main_ctrl);
+		}
 	}
 
 	mutex_unlock(&data->update_lock);
@@ -1415,6 +1431,8 @@
 static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
 static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
 static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2);
+/* special AVCC3 IT8306E in9 */
+static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 0);
 
 static ssize_t show_name(struct device *dev, struct device_attribute
 			 *devattr, char *buf)
@@ -1424,7 +1442,7 @@
 }
 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 
-static struct attribute *it87_attributes_in[9][5] = {
+static struct attribute *it87_attributes_in[10][5] = {
 {
 	&sensor_dev_attr_in0_input.dev_attr.attr,
 	&sensor_dev_attr_in0_min.dev_attr.attr,
@@ -1476,9 +1494,12 @@
 }, {
 	&sensor_dev_attr_in8_input.dev_attr.attr,
 	NULL
+}, {
+	&sensor_dev_attr_in9_input.dev_attr.attr,
+	NULL
 } };
 
-static const struct attribute_group it87_group_in[9] = {
+static const struct attribute_group it87_group_in[10] = {
 	{ .attrs = it87_attributes_in[0] },
 	{ .attrs = it87_attributes_in[1] },
 	{ .attrs = it87_attributes_in[2] },
@@ -1488,6 +1509,7 @@
 	{ .attrs = it87_attributes_in[6] },
 	{ .attrs = it87_attributes_in[7] },
 	{ .attrs = it87_attributes_in[8] },
+	{ .attrs = it87_attributes_in[9] },
 };
 
 static struct attribute *it87_attributes_temp[3][6] = {
@@ -1546,7 +1568,8 @@
 	&sensor_dev_attr_in5_beep.dev_attr.attr,
 	&sensor_dev_attr_in6_beep.dev_attr.attr,
 	&sensor_dev_attr_in7_beep.dev_attr.attr,
-	NULL
+	NULL,
+	NULL,
 };
 
 static struct attribute *it87_attributes_temp_beep[] = {
@@ -1685,6 +1708,7 @@
 	&sensor_dev_attr_in3_label.dev_attr.attr,
 	&sensor_dev_attr_in7_label.dev_attr.attr,
 	&sensor_dev_attr_in8_label.dev_attr.attr,
+	&sensor_dev_attr_in9_label.dev_attr.attr,
 	NULL
 };
 
@@ -1742,6 +1766,9 @@
 	case IT8783E_DEVID:
 		sio_data->type = it8783;
 		break;
+	case IT8306E_DEVID:
+		sio_data->type = it8603;
+		break;
 	case 0xffff:	/* No device at all */
 		goto exit;
 	default:
@@ -1763,11 +1790,16 @@
 
 	err = 0;
 	sio_data->revision = superio_inb(DEVREV) & 0x0f;
-	pr_info("Found IT%04xF chip at 0x%x, revision %d\n",
-		chip_type, *address, sio_data->revision);
+	pr_info("Found IT%04x%c chip at 0x%x, revision %d\n", chip_type,
+		chip_type == 0x8771 || chip_type == 0x8772 ||
+		chip_type == 0x8603 ? 'E' : 'F', *address,
+		sio_data->revision);
 
 	/* in8 (Vbat) is always internal */
 	sio_data->internal = (1 << 2);
+	/* Only the IT8603E has in9 */
+	if (sio_data->type != it8603)
+		sio_data->skip_in |= (1 << 9);
 
 	/* Read GPIO config and VID value from LDN 7 (GPIO) */
 	if (sio_data->type == it87) {
@@ -1844,7 +1876,38 @@
 			sio_data->internal |= (1 << 1);
 
 		sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+	} else if (sio_data->type == it8603) {
+		int reg27, reg29;
 
+		sio_data->skip_vid = 1;	/* No VID */
+		superio_select(GPIO);
+
+		reg27 = superio_inb(IT87_SIO_GPIO3_REG);
+
+		/* Check if fan3 is there or not */
+		if (reg27 & (1 << 6))
+			sio_data->skip_pwm |= (1 << 2);
+		if (reg27 & (1 << 7))
+			sio_data->skip_fan |= (1 << 2);
+
+		/* Check if fan2 is there or not */
+		reg29 = superio_inb(IT87_SIO_GPIO5_REG);
+		if (reg29 & (1 << 1))
+			sio_data->skip_pwm |= (1 << 1);
+		if (reg29 & (1 << 2))
+			sio_data->skip_fan |= (1 << 1);
+
+		sio_data->skip_in |= (1 << 5); /* No VIN5 */
+		sio_data->skip_in |= (1 << 6); /* No VIN6 */
+
+		/* no fan4 */
+		sio_data->skip_pwm |= (1 << 3);
+		sio_data->skip_fan |= (1 << 3);
+
+		sio_data->internal |= (1 << 1); /* in7 is VSB */
+		sio_data->internal |= (1 << 3); /* in9 is AVCC */
+
+		sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
 	} else {
 		int reg;
 		bool uart6;
@@ -1966,7 +2029,7 @@
 	int i;
 
 	sysfs_remove_group(&dev->kobj, &it87_group);
-	for (i = 0; i < 9; i++) {
+	for (i = 0; i < 10; i++) {
 		if (sio_data->skip_in & (1 << i))
 			continue;
 		sysfs_remove_group(&dev->kobj, &it87_group_in[i]);
@@ -2080,6 +2143,8 @@
 			data->in_scaled |= (1 << 7);	/* in7 is VSB */
 		if (sio_data->internal & (1 << 2))
 			data->in_scaled |= (1 << 8);	/* in8 is Vbat */
+		if (sio_data->internal & (1 << 3))
+			data->in_scaled |= (1 << 9);	/* in9 is AVCC */
 	} else if (sio_data->type == it8782 || sio_data->type == it8783) {
 		if (sio_data->internal & (1 << 0))
 			data->in_scaled |= (1 << 3);	/* in3 is VCC5V */
@@ -2102,7 +2167,7 @@
 	if (err)
 		return err;
 
-	for (i = 0; i < 9; i++) {
+	for (i = 0; i < 10; i++) {
 		if (sio_data->skip_in & (1 << i))
 			continue;
 		err = sysfs_create_group(&dev->kobj, &it87_group_in[i]);
@@ -2202,7 +2267,7 @@
 	}
 
 	/* Export labels for internal sensors */
-	for (i = 0; i < 3; i++) {
+	for (i = 0; i < 4; i++) {
 		if (!(sio_data->internal & (1 << i)))
 			continue;
 		err = sysfs_create_file(&dev->kobj,
@@ -2383,8 +2448,9 @@
 	}
 	data->has_fan = (data->fan_main_ctrl >> 4) & 0x07;
 
-	/* Set tachometers to 16-bit mode if needed */
-	if (has_16bit_fans(data)) {
+	/* Set tachometers to 16-bit mode if needed, IT8603E (and IT8728F?)
+	 * has it by default */
+	if (has_16bit_fans(data) && data->type != it8603) {
 		tmp = it87_read_value(data, IT87_REG_FAN_16BIT);
 		if (~tmp & 0x07 & data->has_fan) {
 			dev_dbg(&pdev->dev,
@@ -2464,6 +2530,8 @@
 		}
 		/* in8 (battery) has no limit registers */
 		data->in[8][0] = it87_read_value(data, IT87_REG_VIN(8));
+		if (data->type == it8603)
+			data->in[9][0] = it87_read_value(data, 0x2f);
 
 		for (i = 0; i < 5; i++) {
 			/* Skip disabled fans */
@@ -2620,7 +2688,7 @@
 }
 
 
-MODULE_AUTHOR("Chris Gauthron, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Chris Gauthron, Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
 module_param(update_vbat, bool, 0);
 MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index d0def50..b4ad598 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1,7 +1,7 @@
 /*
  * lm63.c - driver for the National Semiconductor LM63 temperature sensor
  *          with integrated fan control
- * Copyright (C) 2004-2008  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2008  Jean Delvare <jdelvare@suse.de>
  * Based on the lm90 driver.
  *
  * The LM63 is a sensor chip made by National Semiconductor. It measures
@@ -1202,6 +1202,6 @@
 
 module_i2c_driver(lm63_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("LM63 driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index a2f3b4a..9efadfc 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -2,7 +2,7 @@
  * lm78.c - Part of lm_sensors, Linux kernel modules for hardware
  *	    monitoring
  * Copyright (c) 1998, 1999  Frodo Looijaard <frodol@dds.nl>
- * Copyright (c) 2007, 2011  Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007, 2011  Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1108,7 +1108,7 @@
 	i2c_del_driver(&lm78_driver);
 }
 
-MODULE_AUTHOR("Frodo Looijaard, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Frodo Looijaard, Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("LM78/LM79 driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index e998034..abd2702 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -1,7 +1,7 @@
 /*
  * lm83.c - Part of lm_sensors, Linux kernel modules for hardware
  *          monitoring
- * Copyright (C) 2003-2009  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009  Jean Delvare <jdelvare@suse.de>
  *
  * Heavily inspired from the lm78, lm75 and adm1021 drivers. The LM83 is
  * a sensor chip made by National Semiconductor. It reports up to four
@@ -427,6 +427,6 @@
 
 module_i2c_driver(lm83_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("LM83 driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 3894c40..bed4af35 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -5,7 +5,7 @@
  * Copyright (c) 2002, 2003  Philip Pokorny <ppokorny@penguincomputing.com>
  * Copyright (c) 2003        Margit Schubert-While <margitsw@t-online.de>
  * Copyright (c) 2004        Justin Thiessen <jthiessen@penguincomputing.com>
- * Copyright (C) 2007--2009  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2007--2009  Jean Delvare <jdelvare@suse.de>
  *
  * Chip details at	      <http://www.national.com/ds/LM/LM85.pdf>
  *
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 333092c..4c5f202 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -5,7 +5,7 @@
  *                          Philip Edelbrock <phil@netroedge.com>
  *                          Stephen Rousset <stephen.rousset@rocketlogix.com>
  *                          Dan Eaton <dan.eaton@rocketlogix.com>
- * Copyright (C) 2004-2008  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2008  Jean Delvare <jdelvare@suse.de>
  *
  * Original port to Linux 2.6 by Jeff Oliver.
  *
@@ -1011,6 +1011,6 @@
 
 module_i2c_driver(lm87_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org> and others");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de> and others");
 MODULE_DESCRIPTION("LM87 driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 8b8f3aa..701e952 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1,7 +1,7 @@
 /*
  * lm90.c - Part of lm_sensors, Linux kernel modules for hardware
  *          monitoring
- * Copyright (C) 2003-2010  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2010  Jean Delvare <jdelvare@suse.de>
  *
  * Based on the lm83 driver. The LM90 is a sensor chip made by National
  * Semiconductor. It reports up to two temperatures (its own plus up to
@@ -1679,6 +1679,6 @@
 
 module_i2c_driver(lm90_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("LM90/ADM1032 driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 71626f3..9d0e87a 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -1,6 +1,6 @@
 /*
  * lm92 - Hardware monitoring driver
- * Copyright (C) 2005-2008  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2008  Jean Delvare <jdelvare@suse.de>
  *
  * Based on the lm90 driver, with some ideas taken from the lm_sensors
  * lm92 driver as well.
@@ -440,6 +440,6 @@
 
 module_i2c_driver(lm92_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("LM92/MAX6635 driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index a6f4605..6f1c6c0 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -12,7 +12,7 @@
  *	Copyright (c) 2003       Margit Schubert-While <margitsw@t-online.de>
  *
  * derived in part from w83l785ts.c:
- *	Copyright (c) 2003-2004 Jean Delvare <khali@linux-fr.org>
+ *	Copyright (c) 2003-2004 Jean Delvare <jdelvare@suse.de>
  *
  * Ported to Linux 2.6 by Eric J. Bowersox <ericb@aspsys.com>
  *	Copyright (c) 2005 Aspen Systems, Inc.
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 445e5d4..6638e99 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -2,7 +2,7 @@
  * max1619.c - Part of lm_sensors, Linux kernel modules for hardware
  *             monitoring
  * Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
- *                         Jean Delvare <khali@linux-fr.org>
+ *                         Jean Delvare <jdelvare@suse.de>
  *
  * Based on the lm90 driver. The MAX1619 is a sensor chip made by Maxim.
  * It reports up to two temperatures (its own plus up to
@@ -357,7 +357,6 @@
 
 module_i2c_driver(max1619_driver);
 
-MODULE_AUTHOR("Oleksij Rempel <bug-track@fisher-privat.net> and "
-	"Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Oleksij Rempel <bug-track@fisher-privat.net>, Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("MAX1619 sensor driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 8326fbd..6520bc5 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -8,7 +8,7 @@
  *
  *  Based on the max1619 driver.
  *  Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
- *                          Jean Delvare <khali@linux-fr.org>
+ *                          Jean Delvare <jdelvare@suse.de>
  *
  * The MAX6642 is a sensor chip made by Maxim.
  * It reports up to two temperatures (its own plus up to
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 8686e96..38d5a63 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -5,7 +5,7 @@
  * Copyright (C) 2012  Guenter Roeck <linux@roeck-us.net>
  *
  * Derived from w83627ehf driver
- * Copyright (C) 2005-2012  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2012  Jean Delvare <jdelvare@suse.de>
  * Copyright (C) 2006  Yuan Mu (Winbond),
  *		       Rudolf Marek <r.marek@assembler.cz>
  *		       David Hubbard <david.c.hubbard@gmail.com>
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8c232039..8a17f01 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -145,7 +145,7 @@
 static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 {
 	struct iio_channel *channel = pdata->chan;
-	unsigned int result;
+	s64 result;
 	int val, ret;
 
 	ret = iio_read_channel_raw(channel, &val);
@@ -155,10 +155,10 @@
 	}
 
 	/* unit: mV */
-	result = pdata->pullup_uv * val;
+	result = pdata->pullup_uv * (s64) val;
 	result >>= 12;
 
-	return result;
+	return (int)result;
 }
 
 static const struct of_device_id ntc_match[] = {
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index aa615ba..330fe11 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1,7 +1,7 @@
 /*
  *  pc87360.c - Part of lm_sensors, Linux kernel modules
  *              for hardware monitoring
- *  Copyright (C) 2004, 2007 Jean Delvare <khali@linux-fr.org>
+ *  Copyright (C) 2004, 2007 Jean Delvare <jdelvare@suse.de>
  *
  *  Copied from smsc47m1.c:
  *  Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
@@ -1808,7 +1808,7 @@
 }
 
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("PC8736x hardware monitor");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 6e6ea44..d847e0a 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -1,7 +1,7 @@
 /*
  *  pc87427.c - hardware monitoring driver for the
  *              National Semiconductor PC87427 Super-I/O chip
- *  Copyright (C) 2006, 2008, 2010  Jean Delvare <khali@linux-fr.org>
+ *  Copyright (C) 2006, 2008, 2010  Jean Delvare <jdelvare@suse.de>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License version 2 as
@@ -1347,7 +1347,7 @@
 	platform_driver_unregister(&pc87427_driver);
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("PC87427 hardware monitoring driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index 825883d..5740888 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2001-2004 Aurelien Jarno <aurelien@aurel32.net>
  * Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 3cbf66e..291d11f 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -90,7 +90,8 @@
 
 	u32 flags;		/* from platform data */
 
-	int exponent;		/* linear mode: exponent for output voltages */
+	int exponent[PMBUS_PAGES];
+				/* linear mode: exponent for output voltages */
 
 	const struct pmbus_driver_info *info;
 
@@ -410,7 +411,7 @@
 	long val;
 
 	if (sensor->class == PSC_VOLTAGE_OUT) {	/* LINEAR16 */
-		exponent = data->exponent;
+		exponent = data->exponent[sensor->page];
 		mantissa = (u16) sensor->data;
 	} else {				/* LINEAR11 */
 		exponent = ((s16)sensor->data) >> 11;
@@ -516,7 +517,7 @@
 #define MIN_MANTISSA	(511 * 1000)
 
 static u16 pmbus_data2reg_linear(struct pmbus_data *data,
-				 enum pmbus_sensor_classes class, long val)
+				 struct pmbus_sensor *sensor, long val)
 {
 	s16 exponent = 0, mantissa;
 	bool negative = false;
@@ -525,7 +526,7 @@
 	if (val == 0)
 		return 0;
 
-	if (class == PSC_VOLTAGE_OUT) {
+	if (sensor->class == PSC_VOLTAGE_OUT) {
 		/* LINEAR16 does not support negative voltages */
 		if (val < 0)
 			return 0;
@@ -534,10 +535,10 @@
 		 * For a static exponents, we don't have a choice
 		 * but to adjust the value to it.
 		 */
-		if (data->exponent < 0)
-			val <<= -data->exponent;
+		if (data->exponent[sensor->page] < 0)
+			val <<= -data->exponent[sensor->page];
 		else
-			val >>= data->exponent;
+			val >>= data->exponent[sensor->page];
 		val = DIV_ROUND_CLOSEST(val, 1000);
 		return val & 0xffff;
 	}
@@ -548,14 +549,14 @@
 	}
 
 	/* Power is in uW. Convert to mW before converting. */
-	if (class == PSC_POWER)
+	if (sensor->class == PSC_POWER)
 		val = DIV_ROUND_CLOSEST(val, 1000L);
 
 	/*
 	 * For simplicity, convert fan data to milli-units
 	 * before calculating the exponent.
 	 */
-	if (class == PSC_FAN)
+	if (sensor->class == PSC_FAN)
 		val = val * 1000;
 
 	/* Reduce large mantissa until it fits into 10 bit */
@@ -585,22 +586,22 @@
 }
 
 static u16 pmbus_data2reg_direct(struct pmbus_data *data,
-				 enum pmbus_sensor_classes class, long val)
+				 struct pmbus_sensor *sensor, long val)
 {
 	long m, b, R;
 
-	m = data->info->m[class];
-	b = data->info->b[class];
-	R = data->info->R[class];
+	m = data->info->m[sensor->class];
+	b = data->info->b[sensor->class];
+	R = data->info->R[sensor->class];
 
 	/* Power is in uW. Adjust R and b. */
-	if (class == PSC_POWER) {
+	if (sensor->class == PSC_POWER) {
 		R -= 3;
 		b *= 1000;
 	}
 
 	/* Calculate Y = (m * X + b) * 10^R */
-	if (class != PSC_FAN) {
+	if (sensor->class != PSC_FAN) {
 		R -= 3;		/* Adjust R and b for data in milli-units */
 		b *= 1000;
 	}
@@ -619,7 +620,7 @@
 }
 
 static u16 pmbus_data2reg_vid(struct pmbus_data *data,
-			      enum pmbus_sensor_classes class, long val)
+			      struct pmbus_sensor *sensor, long val)
 {
 	val = clamp_val(val, 500, 1600);
 
@@ -627,20 +628,20 @@
 }
 
 static u16 pmbus_data2reg(struct pmbus_data *data,
-			  enum pmbus_sensor_classes class, long val)
+			  struct pmbus_sensor *sensor, long val)
 {
 	u16 regval;
 
-	switch (data->info->format[class]) {
+	switch (data->info->format[sensor->class]) {
 	case direct:
-		regval = pmbus_data2reg_direct(data, class, val);
+		regval = pmbus_data2reg_direct(data, sensor, val);
 		break;
 	case vid:
-		regval = pmbus_data2reg_vid(data, class, val);
+		regval = pmbus_data2reg_vid(data, sensor, val);
 		break;
 	case linear:
 	default:
-		regval = pmbus_data2reg_linear(data, class, val);
+		regval = pmbus_data2reg_linear(data, sensor, val);
 		break;
 	}
 	return regval;
@@ -746,7 +747,7 @@
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
-	regval = pmbus_data2reg(data, sensor->class, val);
+	regval = pmbus_data2reg(data, sensor, val);
 	ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
 	if (ret < 0)
 		rv = ret;
@@ -1643,12 +1644,13 @@
  * This function is called for all chips.
  */
 static int pmbus_identify_common(struct i2c_client *client,
-				 struct pmbus_data *data)
+				 struct pmbus_data *data, int page)
 {
 	int vout_mode = -1;
 
-	if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE))
-		vout_mode = _pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+	if (pmbus_check_byte_register(client, page, PMBUS_VOUT_MODE))
+		vout_mode = _pmbus_read_byte_data(client, page,
+						  PMBUS_VOUT_MODE);
 	if (vout_mode >= 0 && vout_mode != 0xff) {
 		/*
 		 * Not all chips support the VOUT_MODE command,
@@ -1659,7 +1661,7 @@
 			if (data->info->format[PSC_VOLTAGE_OUT] != linear)
 				return -ENODEV;
 
-			data->exponent = ((s8)(vout_mode << 3)) >> 3;
+			data->exponent[page] = ((s8)(vout_mode << 3)) >> 3;
 			break;
 		case 1: /* VID mode         */
 			if (data->info->format[PSC_VOLTAGE_OUT] != vid)
@@ -1674,7 +1676,7 @@
 		}
 	}
 
-	pmbus_clear_fault_page(client, 0);
+	pmbus_clear_fault_page(client, page);
 	return 0;
 }
 
@@ -1682,7 +1684,7 @@
 			     struct pmbus_driver_info *info)
 {
 	struct device *dev = &client->dev;
-	int ret;
+	int page, ret;
 
 	/*
 	 * Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
@@ -1715,10 +1717,12 @@
 		return -ENODEV;
 	}
 
-	ret = pmbus_identify_common(client, data);
-	if (ret < 0) {
-		dev_err(dev, "Failed to identify chip capabilities\n");
-		return ret;
+	for (page = 0; page < info->pages; page++) {
+		ret = pmbus_identify_common(client, data, page);
+		if (ret < 0) {
+			dev_err(dev, "Failed to identify chip capabilities\n");
+			return ret;
+		}
 	}
 	return 0;
 }
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index e74bd7e..3532026 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -6,7 +6,7 @@
  *			     Kyösti Mälkki <kmalkki@cc.hut.fi>, and
  *			     Mark D. Studebaker <mdsxyz123@yahoo.com>
  * Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 81348fa..bd89e87 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -9,7 +9,7 @@
  *
  * derived in part from smsc47m1.c:
  * Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
- * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 05cb814..23a22c4 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -7,7 +7,7 @@
  * Super-I/O chips.
  *
  * Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
- * Copyright (C) 2004-2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2007 Jean Delvare <jdelvare@suse.de>
  * Ported to Linux 2.6 by Gabriele Gorla <gorlik@yahoo.com>
  *			and Jean Delvare
  *
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 23ff210..f0ab61d 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1,7 +1,7 @@
 /*
  *  w83627ehf - Driver for the hardware monitoring functionality of
  *		the Winbond W83627EHF Super-I/O chip
- *  Copyright (C) 2005-2012  Jean Delvare <khali@linux-fr.org>
+ *  Copyright (C) 2005-2012  Jean Delvare <jdelvare@suse.de>
  *  Copyright (C) 2006  Yuan Mu (Winbond),
  *			Rudolf Marek <r.marek@assembler.cz>
  *			David Hubbard <david.c.hubbard@gmail.com>
@@ -2889,7 +2889,7 @@
 	platform_driver_unregister(&w83627ehf_driver);
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("W83627EHF driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index cb9cd32..c1726be 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -5,7 +5,7 @@
  *			      Philip Edelbrock <phil@netroedge.com>,
  *			      and Mark Studebaker <mdsxyz123@yahoo.com>
  * Ported to 2.6 by Bernhard C. Schrenk <clemy@clemy.org>
- * Copyright (c) 2007 - 1012  Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007 - 1012  Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index f9d5139..8491161 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -4,7 +4,7 @@
  * Copyright (c) 1998 - 2001  Frodo Looijaard <frodol@dds.nl>,
  *			      Philip Edelbrock <phil@netroedge.com>,
  *			      and Mark Studebaker <mdsxyz123@yahoo.com>
- * Copyright (c) 2007 - 2008  Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007 - 2008  Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 908209d..2189413 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -2,7 +2,7 @@
  *  w83795.c - Linux kernel driver for hardware monitoring
  *  Copyright (C) 2008 Nuvoton Technology Corp.
  *                Wei Song
- *  Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ *  Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -2282,6 +2282,6 @@
 
 module_i2c_driver(w83795_driver);
 
-MODULE_AUTHOR("Wei Song, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Wei Song, Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("W83795G/ADG hardware monitoring driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 39dbe99..6384b26 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -1,7 +1,7 @@
 /*
  * w83l785ts.c - Part of lm_sensors, Linux kernel modules for hardware
  *               monitoring
- * Copyright (C) 2003-2009  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009  Jean Delvare <jdelvare@suse.de>
  *
  * Inspired from the lm83 driver. The W83L785TS-S is a sensor chip made
  * by Winbond. It reports a single external temperature with a 1 deg
@@ -10,7 +10,7 @@
  *   http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L785TS-S.pdf
  *
  * Ported to Linux 2.6 by Wolfgang Ziegler <nuppla@gmx.at> and Jean Delvare
- * <khali@linux-fr.org>.
+ * <jdelvare@suse.de>.
  *
  * Thanks to James Bolt <james@evilpenguin.com> for benchmarking the read
  * error handling mechanism.
@@ -299,6 +299,6 @@
 
 module_i2c_driver(w83l785ts_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("W83L785TS-S driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index fad22b0..65ef966 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -20,12 +20,11 @@
  * ------------------------------------------------------------------------- */
 
 /* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki
-   <kmalkki@cc.hut.fi> and Jean Delvare <khali@linux-fr.org> */
+   <kmalkki@cc.hut.fi> and Jean Delvare <jdelvare@suse.de> */
 
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/i2c.h>
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index f892a42..8b10f88 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -24,7 +24,6 @@
 #include <linux/moduleparam.h>
 #include <linux/delay.h>
 #include <linux/jiffies.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-pca.h>
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 5c23795..3437009 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -30,7 +30,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-pcf.h>
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6bcdea5..f5ed031 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -152,6 +152,7 @@
 	    ATI SB700/SP5100
 	    ATI SB800
 	    AMD Hudson-2
+	    AMD ML
 	    AMD CZ
 	    Serverworks OSB4
 	    Serverworks CSB5
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index ed9f48d..9d7be5a 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -12,7 +12,7 @@
  *  On Acorn machines, the following i2c devices are on the bus:
  *	- PCF8583 real time clock & static RAM
  */
-#include <linux/init.h>
+#include <linux/module.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 3f49181..7d60d3a 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -58,7 +58,6 @@
 #include <linux/delay.h>
 #include <linux/ioport.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
 
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 84ccd94..4611e47 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -20,7 +20,6 @@
 #include <linux/delay.h>
 #include <linux/i2c.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/acpi.h>
 
 #define ALI1563_MAX_TIMEOUT	500
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 26bcc61..4823206 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -65,7 +65,6 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
 
diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
index 07f01ac..41fc683 100644
--- a/drivers/i2c/busses/i2c-amd756-s4882.c
+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
@@ -1,7 +1,7 @@
 /*
  * i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard
  *
- * Copyright (C) 2004, 2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004, 2008 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -250,7 +250,7 @@
 		       "Physical bus restoration failed\n");
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("S4882 SMBus multiplexing");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index e13e2aa..819d3c1 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -41,7 +41,6 @@
 #include <linux/stddef.h>
 #include <linux/ioport.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
 
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index a44e6e7..f3d4d79 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -13,7 +13,6 @@
 #include <linux/kernel.h>
 #include <linux/stddef.h>
 #include <linux/ioport.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <linux/acpi.h>
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index b5b8923..8762458 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -31,7 +31,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index ce7ffba..bdf040f 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -19,7 +19,6 @@
 #include <linux/io.h>
 #include <linux/i2c.h>
 #include <linux/gpio.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 3e5ea2c..be7f0a2 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -33,7 +33,6 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/errno.h>
 #include <linux/stddef.h>
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index ff15ae9..e08e458 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -18,7 +18,6 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
 #include <linux/fs.h>
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 044f85b..9fd711c 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -12,7 +12,6 @@
 #include <linux/module.h>
 
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/time.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 436b0f2..512fcfa 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -12,7 +12,6 @@
  * of this archive for more details.
  */
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 79c3d90..e248257 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -27,7 +27,6 @@
 #include <linux/types.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
-#include <linux/init.h>
 #include <linux/io.h>
 #include <asm/hydra.h>
 
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 737e298..349c2d3 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -2,7 +2,7 @@
     Copyright (c) 1998 - 2002  Frodo Looijaard <frodol@dds.nl>,
     Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker
     <mdsxyz123@yahoo.com>
-    Copyright (C) 2007 - 2012  Jean Delvare <khali@linux-fr.org>
+    Copyright (C) 2007 - 2012  Jean Delvare <jdelvare@suse.de>
     Copyright (C) 2010         Intel Corporation,
                                David Woodhouse <dwmw2@infradead.org>
 
@@ -1312,8 +1312,7 @@
 	pci_unregister_driver(&i801_driver);
 }
 
-MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, "
-	      "Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("I801 SMBus driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index f744410..274312c 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -36,7 +36,6 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <asm/irq.h>
 #include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index dd24aa0..3d16c2f 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -34,7 +34,6 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index af21304..cf99dbf 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -33,7 +33,6 @@
 #include <linux/stddef.h>
 #include <linux/ioport.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/io.h>
 #include <linux/acpi.h>
 
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index bb132ea..8ce4f51 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -62,7 +62,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/kernel.h>
 #include <linux/stddef.h>
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index b6a741c..f539163 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -16,7 +16,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/sched.h>
-#include <linux/init.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index b8c5187..d52d849 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -97,7 +97,6 @@
 enum {
 	MV64XXX_I2C_ACTION_INVALID,
 	MV64XXX_I2C_ACTION_CONTINUE,
-	MV64XXX_I2C_ACTION_OFFLOAD_SEND_START,
 	MV64XXX_I2C_ACTION_SEND_START,
 	MV64XXX_I2C_ACTION_SEND_RESTART,
 	MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
@@ -204,6 +203,9 @@
 	unsigned long ctrl_reg;
 	struct i2c_msg *msg = drv_data->msgs;
 
+	if (!drv_data->offload_enabled)
+		return -EOPNOTSUPP;
+
 	drv_data->msg = msg;
 	drv_data->byte_posn = 0;
 	drv_data->bytes_left = msg->len;
@@ -433,8 +435,7 @@
 
 		drv_data->msgs++;
 		drv_data->num_msgs--;
-		if (!(drv_data->offload_enabled &&
-				mv64xxx_i2c_offload_msg(drv_data))) {
+		if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
 			drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
 			writel(drv_data->cntl_bits,
 			drv_data->reg_base + drv_data->reg_offsets.control);
@@ -458,15 +459,14 @@
 			drv_data->reg_base + drv_data->reg_offsets.control);
 		break;
 
-	case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START:
-		if (!mv64xxx_i2c_offload_msg(drv_data))
-			break;
-		else
-			drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
-		/* FALLTHRU */
 	case MV64XXX_I2C_ACTION_SEND_START:
-		writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
-			drv_data->reg_base + drv_data->reg_offsets.control);
+		/* Can we offload this msg ? */
+		if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
+			/* No, switch to standard path */
+			mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+			writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
+				drv_data->reg_base + drv_data->reg_offsets.control);
+		}
 		break;
 
 	case MV64XXX_I2C_ACTION_SEND_ADDR_1:
@@ -625,15 +625,10 @@
 	unsigned long	flags;
 
 	spin_lock_irqsave(&drv_data->lock, flags);
-	if (drv_data->offload_enabled) {
-		drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START;
-		drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
-	} else {
-		mv64xxx_i2c_prepare_for_io(drv_data, msg);
 
-		drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
-		drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
-	}
+	drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+	drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+
 	drv_data->send_stop = is_last;
 	drv_data->block = 1;
 	mv64xxx_i2c_do_action(drv_data);
diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
index 2ca268d..b170bdf 100644
--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
@@ -1,7 +1,7 @@
 /*
  * i2c-nforce2-s4985.c - i2c-nforce2 extras for the Tyan S4985 motherboard
  *
- * Copyright (C) 2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2008 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -245,7 +245,7 @@
 		       "Physical bus restoration failed\n");
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("S4985 SMBus multiplexing");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ac88f40..0038c45 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -51,7 +51,6 @@
 #include <linux/kernel.h>
 #include <linux/stddef.h>
 #include <linux/ioport.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <linux/dmi.h>
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index c61f37a..80e06fa 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -15,7 +15,6 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index b929ba2..81042b0 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -18,7 +18,6 @@
 #include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/io.h>
 #include <linux/of.h>
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index aa95778..62f55fe 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -1,7 +1,7 @@
 /* ------------------------------------------------------------------------ *
  * i2c-parport-light.c I2C bus over parallel port                           *
  * ------------------------------------------------------------------------ *
-   Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+   Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
 
    Based on older i2c-velleman.c driver
    Copyright (C) 1995-2000 Simon G. Vogl
@@ -273,7 +273,7 @@
 	release_region(base, 3);
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("I2C bus over parallel port (light)");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 81d8878..a27aae2 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -1,7 +1,7 @@
 /* ------------------------------------------------------------------------ *
  * i2c-parport.c I2C bus over parallel port                                 *
  * ------------------------------------------------------------------------ *
-   Copyright (C) 2003-2011 Jean Delvare <khali@linux-fr.org>
+   Copyright (C) 2003-2011 Jean Delvare <jdelvare@suse.de>
 
    Based on older i2c-philips-par.c driver
    Copyright (C) 1995-2000 Simon G. Vogl
@@ -298,7 +298,7 @@
 	parport_unregister_driver(&i2c_parport_driver);
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("I2C bus over parallel port");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index 3fe6523..e572f3a 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -1,7 +1,7 @@
 /* ------------------------------------------------------------------------ *
  * i2c-parport.h I2C bus over parallel port                                 *
  * ------------------------------------------------------------------------ *
-   Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+   Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 39e2755..845f125 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -12,7 +12,6 @@
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/jiffies.h>
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index a028617..39dd8ec 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -22,7 +22,7 @@
 	Intel PIIX4, 440MX
 	Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
 	ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800
-	AMD Hudson-2, CZ
+	AMD Hudson-2, ML, CZ
 	SMSC Victory66
 
    Note: we assume there can only be one device, with one or more
@@ -38,7 +38,6 @@
 #include <linux/ioport.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/dmi.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
@@ -208,16 +207,16 @@
 				   "WARNING: SMBus interface has been FORCEFULLY ENABLED!\n");
 		} else {
 			dev_err(&PIIX4_dev->dev,
-				"Host SMBus controller not enabled!\n");
+				"SMBus Host Controller not enabled!\n");
 			release_region(piix4_smba, SMBIOSIZE);
 			return -ENODEV;
 		}
 	}
 
 	if (((temp & 0x0E) == 8) || ((temp & 0x0E) == 2))
-		dev_dbg(&PIIX4_dev->dev, "Using Interrupt 9 for SMBus.\n");
+		dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus\n");
 	else if ((temp & 0x0E) == 0)
-		dev_dbg(&PIIX4_dev->dev, "Using Interrupt SMI# for SMBus.\n");
+		dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus\n");
 	else
 		dev_err(&PIIX4_dev->dev, "Illegal Interrupt configuration "
 			"(or code out of date)!\n");
@@ -235,7 +234,8 @@
 {
 	unsigned short piix4_smba;
 	unsigned short smba_idx = 0xcd6;
-	u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en;
+	u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status;
+	u8 i2ccfg, i2ccfg_offset = 0x10;
 
 	/* SB800 and later SMBus does not support forcing address */
 	if (force || force_addr) {
@@ -245,7 +245,15 @@
 	}
 
 	/* Determine the address of the SMBus areas */
-	smb_en = (aux) ? 0x28 : 0x2c;
+	if ((PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
+	     PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+	     PIIX4_dev->revision >= 0x41) ||
+	    (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
+	     PIIX4_dev->device == 0x790b &&
+	     PIIX4_dev->revision >= 0x49))
+		smb_en = 0x00;
+	else
+		smb_en = (aux) ? 0x28 : 0x2c;
 
 	if (!request_region(smba_idx, 2, "smba_idx")) {
 		dev_err(&PIIX4_dev->dev, "SMBus base address index region "
@@ -258,13 +266,22 @@
 	smba_en_hi = inb_p(smba_idx + 1);
 	release_region(smba_idx, 2);
 
-	if ((smba_en_lo & 1) == 0) {
+	if (!smb_en) {
+		smb_en_status = smba_en_lo & 0x10;
+		piix4_smba = smba_en_hi << 8;
+		if (aux)
+			piix4_smba |= 0x20;
+	} else {
+		smb_en_status = smba_en_lo & 0x01;
+		piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
+	}
+
+	if (!smb_en_status) {
 		dev_err(&PIIX4_dev->dev,
-			"Host SMBus controller not enabled!\n");
+			"SMBus Host Controller not enabled!\n");
 		return -ENODEV;
 	}
 
-	piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
 	if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
 		return -ENODEV;
 
@@ -277,7 +294,8 @@
 	/* Aux SMBus does not support IRQ information */
 	if (aux) {
 		dev_info(&PIIX4_dev->dev,
-			 "SMBus Host Controller at 0x%x\n", piix4_smba);
+			 "Auxiliary SMBus Host Controller at 0x%x\n",
+			 piix4_smba);
 		return piix4_smba;
 	}
 
@@ -292,9 +310,9 @@
 	release_region(piix4_smba + i2ccfg_offset, 1);
 
 	if (i2ccfg & 1)
-		dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus.\n");
+		dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus\n");
 	else
-		dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus.\n");
+		dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus\n");
 
 	dev_info(&PIIX4_dev->dev,
 		 "SMBus Host Controller at 0x%x, revision %d\n",
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index f6389e2..8564768 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -26,7 +26,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 8c87f4a..01e9677 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -24,7 +24,6 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/of_irq.h>
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index ac80199..c83fc3c 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -17,7 +17,6 @@
 #include <linux/types.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/clk.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 2c2fd7c..0282d4d 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -26,7 +26,6 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/i2c.h>
@@ -111,6 +110,7 @@
 	void __iomem *io;
 	struct i2c_adapter adap;
 	struct i2c_msg	*msg;
+	struct clk *clk;
 
 	spinlock_t lock;
 	wait_queue_head_t wait;
@@ -227,18 +227,12 @@
 				    u32 bus_speed,
 				    struct device *dev)
 {
-	struct clk *clkp = clk_get(dev, NULL);
 	u32 scgd, cdf;
 	u32 round, ick;
 	u32 scl;
 	u32 cdf_width;
 	unsigned long rate;
 
-	if (IS_ERR(clkp)) {
-		dev_err(dev, "couldn't get clock\n");
-		return PTR_ERR(clkp);
-	}
-
 	switch (priv->devtype) {
 	case I2C_RCAR_GEN1:
 		cdf_width = 2;
@@ -266,7 +260,7 @@
 	 * clkp : peripheral_clk
 	 * F[]  : integer up-valuation
 	 */
-	rate = clk_get_rate(clkp);
+	rate = clk_get_rate(priv->clk);
 	cdf = rate / 20000000;
 	if (cdf >= 1 << cdf_width) {
 		dev_err(dev, "Input clock %lu too high\n", rate);
@@ -308,7 +302,7 @@
 
 scgd_find:
 	dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n",
-		scl, bus_speed, clk_get_rate(clkp), round, cdf, scgd);
+		scl, bus_speed, clk_get_rate(priv->clk), round, cdf, scgd);
 
 	/*
 	 * keep icccr value
@@ -604,7 +598,7 @@
 		 * error handling
 		 */
 		if (rcar_i2c_flags_has(priv, ID_NACK)) {
-			ret = -EREMOTEIO;
+			ret = -ENXIO;
 			break;
 		}
 
@@ -623,7 +617,7 @@
 
 	pm_runtime_put(dev);
 
-	if (ret < 0)
+	if (ret < 0 && ret != -ENXIO)
 		dev_err(dev, "error %d : %x\n", ret, priv->flags);
 
 	return ret;
@@ -664,6 +658,12 @@
 		return -ENOMEM;
 	}
 
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(dev, "cannot get clock\n");
+		return PTR_ERR(priv->clk);
+	}
+
 	bus_speed = 100000; /* default 100 kHz */
 	ret = of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed);
 	if (ret < 0 && pdata && pdata->bus_speed)
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 5992355..dfc98df 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -12,7 +12,6 @@
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/stddef.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/acpi.h>
 
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 5e8f136..d76f3d9 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -11,7 +11,6 @@
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/platform_device.h>
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 4fc87e7..294c80f 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -20,7 +20,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 36a9556..19b8505 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -45,7 +45,6 @@
 #include <linux/delay.h>
 #include <linux/pci.h>
 #include <linux/ioport.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index b9faf9b..f8aa0c2 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -36,7 +36,6 @@
 #include <linux/stddef.h>
 #include <linux/ioport.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
 
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 6ffa56e0..0576026 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -3,7 +3,7 @@
  * These devices include an I2C master which can be controlled over the
  * serial port.
  *
- * Copyright (C) 2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2007 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -321,7 +321,7 @@
 	serio_unregister_driver(&taos_drv);
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("TAOS evaluation module driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index be66251..49d7f14 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -22,7 +22,6 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/ioport.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index b2d90e1..40d36df 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -2,7 +2,7 @@
     Copyright (c) 1998 - 2002  Frodo Looijaard <frodol@dds.nl>,
     Philip Edelbrock <phil@netroedge.com>, Kyösti Mälkki <kmalkki@cc.hut.fi>,
     Mark D. Studebaker <mdsxyz123@yahoo.com>
-    Copyright (C) 2005 - 2008  Jean Delvare <khali@linux-fr.org>
+    Copyright (C) 2005 - 2008  Jean Delvare <jdelvare@suse.de>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
@@ -503,7 +503,7 @@
 
 MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>, "
 	      "Mark D. Studebaker <mdsxyz123@yahoo.com> and "
-	      "Jean Delvare <khali@linux-fr.org>");
+	      "Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("vt82c596 SMBus driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 6f9918f..2810750 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -30,7 +30,6 @@
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/delay.h>
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 7945b05..17f7352 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -11,7 +11,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index ae1258b..8eadf0f4 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -26,7 +26,6 @@
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/io.h>
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index c4c5588..5fb80b8 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -21,7 +21,7 @@
 /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>.
    All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl>
    SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and
-   Jean Delvare <khali@linux-fr.org>
+   Jean Delvare <jdelvare@suse.de>
    Mux support by Rodolfo Giometti <giometti@enneenne.com> and
    Michael Lawnick <michael.lawnick.ext@nsn.com>
    OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
@@ -261,10 +261,9 @@
 
 	acpi_dev_pm_attach(&client->dev, true);
 	status = driver->probe(client, i2c_match_id(driver->id_table, client));
-	if (status) {
-		i2c_set_clientdata(client, NULL);
+	if (status)
 		acpi_dev_pm_detach(&client->dev, true);
-	}
+
 	return status;
 }
 
@@ -272,7 +271,7 @@
 {
 	struct i2c_client	*client = i2c_verify_client(dev);
 	struct i2c_driver	*driver;
-	int			status;
+	int status = 0;
 
 	if (!client || !dev->driver)
 		return 0;
@@ -281,12 +280,8 @@
 	if (driver->remove) {
 		dev_dbg(dev, "remove\n");
 		status = driver->remove(client);
-	} else {
-		dev->driver = NULL;
-		status = 0;
 	}
-	if (status == 0)
-		i2c_set_clientdata(client, NULL);
+
 	acpi_dev_pm_detach(&client->dev, true);
 	return status;
 }
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index c99b229..fc99f0d 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -2,7 +2,7 @@
  * i2c-smbus.c - SMBus extensions to the I2C protocol
  *
  * Copyright (C) 2008 David Brownell
- * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -246,6 +246,6 @@
 
 module_i2c_driver(smbalert_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("SMBus protocol extensions support");
 MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c
index d0a9c59..77e4849 100644
--- a/drivers/i2c/i2c-stub.c
+++ b/drivers/i2c/i2c-stub.c
@@ -2,7 +2,7 @@
     i2c-stub.c - I2C/SMBus chip emulator
 
     Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com>
-    Copyright (C) 2007, 2012 Jean Delvare <khali@linux-fr.org>
+    Copyright (C) 2007, 2012 Jean Delvare <jdelvare@suse.de>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index c58e093..69afffa 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -19,7 +19,6 @@
 #include <linux/kernel.h>
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/platform_device.h>
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 8a8c56f..d8989c8 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -12,7 +12,6 @@
 #include <linux/i2c-mux.h>
 #include <linux/i2c-mux-gpio.h>
 #include <linux/platform_device.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index c4f08ad..cb77277 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -17,7 +17,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index e835304..550bd36 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -28,7 +28,7 @@
  * Based on:
  *	i2c-virtual_cb.c from Brian Kuschak <bkuschak@yahoo.com>
  * and
- *	pca9540.c from Jean Delvare <khali@linux-fr.org>.
+ *	pca9540.c from Jean Delvare <jdelvare@suse.de>.
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -40,7 +40,6 @@
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
 #include <linux/i2c/pca954x.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/slab.h>
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index d7978dc..4ff0ef3 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -18,7 +18,6 @@
 
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/i2c-mux-pinctrl.h>
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
index 6490a2d..f079ca2 100644
--- a/drivers/ide/ide-cd_verbose.c
+++ b/drivers/ide/ide-cd_verbose.c
@@ -9,7 +9,9 @@
 #include <linux/kernel.h>
 #include <linux/blkdev.h>
 #include <linux/cdrom.h>
+#include <linux/ide.h>
 #include <scsi/scsi.h>
+#include "ide-cd.h"
 
 #ifndef CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS
 void ide_cd_log_error(const char *name, struct request *failed_command,
diff --git a/drivers/ide/ide-pio-blacklist.c b/drivers/ide/ide-pio-blacklist.c
index a8c2c8f8..40e683a 100644
--- a/drivers/ide/ide-pio-blacklist.c
+++ b/drivers/ide/ide-pio-blacklist.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/string.h>
+#include <linux/ide.h>
 
 static struct ide_pio_info {
 	const char	*name;
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 3bec922..bfec313 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -447,14 +447,14 @@
 	{ },
 };
 
-#define BMA180_CHANNEL(_index) {					\
+#define BMA180_CHANNEL(_axis) {					\
 	.type = IIO_ACCEL,						\
-	.indexed = 1,							\
-	.channel = (_index),						\
+	.modified = 1,							\
+	.channel2 = IIO_MOD_##_axis,					\
 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |			\
 		BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),	\
 	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),		\
-	.scan_index = (_index),						\
+	.scan_index = AXIS_##_axis,					\
 	.scan_type = {							\
 		.sign = 's',						\
 		.realbits = 14,						\
@@ -465,10 +465,10 @@
 }
 
 static const struct iio_chan_spec bma180_channels[] = {
-	BMA180_CHANNEL(AXIS_X),
-	BMA180_CHANNEL(AXIS_Y),
-	BMA180_CHANNEL(AXIS_Z),
-	IIO_CHAN_SOFT_TIMESTAMP(4),
+	BMA180_CHANNEL(X),
+	BMA180_CHANNEL(Y),
+	BMA180_CHANNEL(Z),
+	IIO_CHAN_SOFT_TIMESTAMP(3),
 };
 
 static irqreturn_t bma180_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index e283f2f..3602592 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1560,7 +1560,7 @@
 	st->client = client;
 
 	st->vref_uv = st->chip_info->int_vref_mv * 1000;
-	vref = devm_regulator_get(&client->dev, "vref");
+	vref = devm_regulator_get_optional(&client->dev, "vref");
 	if (!IS_ERR(vref)) {
 		int vref_uv;
 
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 2f8f9d6..0916bf6 100644
--- a/drivers/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -189,6 +189,7 @@
 	ADIS16300_SCAN_INCLI_X,
 	ADIS16300_SCAN_INCLI_Y,
 	ADIS16400_SCAN_ADC,
+	ADIS16400_SCAN_TIMESTAMP,
 };
 
 #ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 368660d..7c582f7 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -632,7 +632,7 @@
 	ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
 	ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
 	ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
-	IIO_CHAN_SOFT_TIMESTAMP(12)
+	IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
 };
 
 static const struct iio_chan_spec adis16448_channels[] = {
@@ -659,7 +659,7 @@
 		},
 	},
 	ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
-	IIO_CHAN_SOFT_TIMESTAMP(11)
+	IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
 };
 
 static const struct iio_chan_spec adis16350_channels[] = {
@@ -677,7 +677,7 @@
 	ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
 	ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
 	ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
-	IIO_CHAN_SOFT_TIMESTAMP(11)
+	IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
 };
 
 static const struct iio_chan_spec adis16300_channels[] = {
@@ -690,7 +690,7 @@
 	ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
 	ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
 	ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
-	IIO_CHAN_SOFT_TIMESTAMP(14)
+	IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
 };
 
 static const struct iio_chan_spec adis16334_channels[] = {
@@ -701,7 +701,7 @@
 	ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
 	ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
 	ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
-	IIO_CHAN_SOFT_TIMESTAMP(8)
+	IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
 };
 
 static struct attribute *adis16400_attributes[] = {
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 3d81101..94daa9f 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -460,10 +460,14 @@
 {
 	struct tsl2563_chip *chip = iio_priv(indio_dev);
 
-	if (chan->channel == IIO_MOD_LIGHT_BOTH)
+	if (mask != IIO_CHAN_INFO_CALIBSCALE)
+		return -EINVAL;
+	if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
 		chip->calib0 = calib_from_sysfs(val);
-	else
+	else if (chan->channel2 == IIO_MOD_LIGHT_IR)
 		chip->calib1 = calib_from_sysfs(val);
+	else
+		return -EINVAL;
 
 	return 0;
 }
@@ -472,14 +476,14 @@
 			    struct iio_chan_spec const *chan,
 			    int *val,
 			    int *val2,
-			    long m)
+			    long mask)
 {
 	int ret = -EINVAL;
 	u32 calib0, calib1;
 	struct tsl2563_chip *chip = iio_priv(indio_dev);
 
 	mutex_lock(&chip->lock);
-	switch (m) {
+	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
 	case IIO_CHAN_INFO_PROCESSED:
 		switch (chan->type) {
@@ -498,7 +502,7 @@
 			ret = tsl2563_get_adc(chip);
 			if (ret)
 				goto error_ret;
-			if (chan->channel == 0)
+			if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
 				*val = chip->data0;
 			else
 				*val = chip->data1;
@@ -510,7 +514,7 @@
 		break;
 
 	case IIO_CHAN_INFO_CALIBSCALE:
-		if (chan->channel == 0)
+		if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
 			*val = calib_to_sysfs(chip->calib0);
 		else
 			*val = calib_to_sysfs(chip->calib1);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index ff284e5..0542354 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -85,6 +85,7 @@
 #define AK8975_MAX_CONVERSION_TIMEOUT	500
 #define AK8975_CONVERSION_DONE_POLL_TIME 10
 #define AK8975_DATA_READY_TIMEOUT	((100*HZ)/1000)
+#define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256)
 
 /*
  * Per-instance context data for the device.
@@ -265,15 +266,15 @@
  *
  * Since 1uT = 0.01 gauss, our final scale factor becomes:
  *
- * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100
- * Hadj = H * ((ASA + 128) * 30 / 256
+ * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
+ * Hadj = H * ((ASA + 128) * 0.003) / 256
  *
  * Since ASA doesn't change, we cache the resultant scale factor into the
  * device context in ak8975_setup().
  */
-	data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8;
-	data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8;
-	data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8;
+	data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]);
+	data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]);
+	data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]);
 
 	return 0;
 }
@@ -428,8 +429,9 @@
 	case IIO_CHAN_INFO_RAW:
 		return ak8975_read_axis(indio_dev, chan->address, val);
 	case IIO_CHAN_INFO_SCALE:
-		*val = data->raw_to_gauss[chan->address];
-		return IIO_VAL_INT;
+		*val = 0;
+		*val2 = data->raw_to_gauss[chan->address];
+		return IIO_VAL_INT_PLUS_MICRO;
 	}
 	return -EINVAL;
 }
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 4b65b6d..f66955f 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -106,7 +106,7 @@
 
 	while (n-- > 0)
 		len += scnprintf(buf + len, PAGE_SIZE - len,
-			"%d.%d ", vals[n][0], vals[n][1]);
+			"%d.%06d ", vals[n][0], vals[n][1]);
 
 	/* replace trailing space by newline */
 	buf[len - 1] = '\n';
@@ -154,6 +154,9 @@
 
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
+		if (iio_buffer_enabled(indio_dev))
+			return -EBUSY;
+
 		switch (chan->type) {
 		case IIO_MAGN: /* in 0.1 uT / LSB */
 			ret = mag3110_read(data, buffer);
@@ -199,6 +202,9 @@
 	struct mag3110_data *data = iio_priv(indio_dev);
 	int rate;
 
+	if (iio_buffer_enabled(indio_dev))
+		return -EBUSY;
+
 	switch (mask) {
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		rate = mag3110_get_samp_freq_index(data, val, val2);
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index d53cf51..00400c3 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -1082,6 +1082,7 @@
 
 	/* Initialize network device */
 	if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
+		ret = -ENOMEM;
 		iounmap(mmio_regs);
 		goto bail4;
 	}
@@ -1151,7 +1152,8 @@
 		goto bail10;
 	}
 
-	if (c2_register_device(c2dev))
+	ret = c2_register_device(c2dev);
+	if (ret)
 		goto bail10;
 
 	return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index b7c9869..d2a6d96 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -576,7 +576,8 @@
 		goto bail4;
 
 	/* Initialize cached the adapter limits */
-	if (c2_rnic_query(c2dev, &c2dev->props))
+	err = c2_rnic_query(c2dev, &c2dev->props);
+	if (err)
 		goto bail5;
 
 	/* Initialize the PD pool */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 4512687..d286bde 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3352,6 +3352,7 @@
 		goto free_dst;
 	}
 
+	neigh_release(neigh);
 	step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
 	rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
 	window = (__force u16) htons((__force u16)tcph->window);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c2702f5..e81c554 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -347,7 +347,7 @@
 	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ?
 						IB_WIDTH_4X : IB_WIDTH_1X;
 	props->active_speed	= IB_SPEED_QDR;
-	props->port_cap_flags	= IB_PORT_CM_SUP;
+	props->port_cap_flags	= IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
 	props->gid_tbl_len	= mdev->dev->caps.gid_table_len[port];
 	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
 	props->pkey_tbl_len	= 1;
@@ -1357,6 +1357,21 @@
 	&dev_attr_board_id
 };
 
+static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
+				     struct net_device *dev)
+{
+	memcpy(eui, dev->dev_addr, 3);
+	memcpy(eui + 5, dev->dev_addr + 3, 3);
+	if (vlan_id < 0x1000) {
+		eui[3] = vlan_id >> 8;
+		eui[4] = vlan_id & 0xff;
+	} else {
+		eui[3] = 0xff;
+		eui[4] = 0xfe;
+	}
+	eui[0] ^= 2;
+}
+
 static void update_gids_task(struct work_struct *work)
 {
 	struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
@@ -1393,7 +1408,6 @@
 	struct mlx4_cmd_mailbox *mailbox;
 	union ib_gid *gids;
 	int err;
-	int i;
 	struct mlx4_dev	*dev = gw->dev->dev;
 
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -1405,18 +1419,16 @@
 	gids = mailbox->buf;
 	memcpy(gids, gw->gids, sizeof(gw->gids));
 
-	for (i = 1; i < gw->dev->num_ports + 1; i++) {
-		if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) ==
-					    IB_LINK_LAYER_ETHERNET) {
-			err = mlx4_cmd(dev, mailbox->dma,
-				       MLX4_SET_PORT_GID_TABLE << 8 | i,
-				       1, MLX4_CMD_SET_PORT,
-				       MLX4_CMD_TIME_CLASS_B,
-				       MLX4_CMD_WRAPPED);
-			if (err)
-				pr_warn(KERN_WARNING
-					"set port %d command failed\n", i);
-		}
+	if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
+				    IB_LINK_LAYER_ETHERNET) {
+		err = mlx4_cmd(dev, mailbox->dma,
+			       MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
+			       1, MLX4_CMD_SET_PORT,
+			       MLX4_CMD_TIME_CLASS_B,
+			       MLX4_CMD_WRAPPED);
+		if (err)
+			pr_warn(KERN_WARNING
+				"set port %d command failed\n", gw->port);
 	}
 
 	mlx4_free_cmd_mailbox(dev, mailbox);
@@ -1425,7 +1437,8 @@
 }
 
 static int update_gid_table(struct mlx4_ib_dev *dev, int port,
-			    union ib_gid *gid, int clear)
+			    union ib_gid *gid, int clear,
+			    int default_gid)
 {
 	struct update_gid_work *work;
 	int i;
@@ -1434,26 +1447,31 @@
 	int found = -1;
 	int max_gids;
 
-	max_gids = dev->dev->caps.gid_table_len[port];
-	for (i = 0; i < max_gids; ++i) {
-		if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
-			    sizeof(*gid)))
-			found = i;
-
-		if (clear) {
-			if (found >= 0) {
-				need_update = 1;
-				dev->iboe.gid_table[port - 1][found] = zgid;
-				break;
-			}
-		} else {
-			if (found >= 0)
-				break;
-
-			if (free < 0 &&
-			    !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid,
+	if (default_gid) {
+		free = 0;
+	} else {
+		max_gids = dev->dev->caps.gid_table_len[port];
+		for (i = 1; i < max_gids; ++i) {
+			if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
 				    sizeof(*gid)))
-				free = i;
+				found = i;
+
+			if (clear) {
+				if (found >= 0) {
+					need_update = 1;
+					dev->iboe.gid_table[port - 1][found] =
+						zgid;
+					break;
+				}
+			} else {
+				if (found >= 0)
+					break;
+
+				if (free < 0 &&
+				    !memcmp(&dev->iboe.gid_table[port - 1][i],
+					    &zgid, sizeof(*gid)))
+					free = i;
+			}
 		}
 	}
 
@@ -1478,18 +1496,26 @@
 	return 0;
 }
 
-static int reset_gid_table(struct mlx4_ib_dev *dev)
+static void mlx4_make_default_gid(struct  net_device *dev, union ib_gid *gid)
+{
+	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+	mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
+}
+
+
+static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
 {
 	struct update_gid_work *work;
 
-
 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
 	if (!work)
 		return -ENOMEM;
-	memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table));
+
+	memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
 	memset(work->gids, 0, sizeof(work->gids));
 	INIT_WORK(&work->work, reset_gids_task);
 	work->dev = dev;
+	work->port = port;
 	queue_work(wq, &work->work);
 	return 0;
 }
@@ -1502,6 +1528,12 @@
 	struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
 				rdma_vlan_dev_real_dev(event_netdev) :
 				event_netdev;
+	union ib_gid default_gid;
+
+	mlx4_make_default_gid(real_dev, &default_gid);
+
+	if (!memcmp(gid, &default_gid, sizeof(*gid)))
+		return 0;
 
 	if (event != NETDEV_DOWN && event != NETDEV_UP)
 		return 0;
@@ -1520,7 +1552,7 @@
 		     (!netif_is_bond_master(real_dev) &&
 		     (real_dev == iboe->netdevs[port - 1])))
 			update_gid_table(ibdev, port, gid,
-					 event == NETDEV_DOWN);
+					 event == NETDEV_DOWN, 0);
 
 	spin_unlock(&iboe->lock);
 	return 0;
@@ -1536,7 +1568,6 @@
 				rdma_vlan_dev_real_dev(dev) : dev;
 
 	iboe = &ibdev->iboe;
-	spin_lock(&iboe->lock);
 
 	for (port = 1; port <= MLX4_MAX_PORTS; ++port)
 		if ((netif_is_bond_master(real_dev) &&
@@ -1545,8 +1576,6 @@
 		     (real_dev == iboe->netdevs[port - 1])))
 			break;
 
-	spin_unlock(&iboe->lock);
-
 	if ((port == 0) || (port > MLX4_MAX_PORTS))
 		return 0;
 	else
@@ -1607,7 +1636,7 @@
 			/*ifa->ifa_address;*/
 			ipv6_addr_set_v4mapped(ifa->ifa_address,
 					       (struct in6_addr *)&gid);
-			update_gid_table(ibdev, port, &gid, 0);
+			update_gid_table(ibdev, port, &gid, 0, 0);
 		}
 		endfor_ifa(in_dev);
 		in_dev_put(in_dev);
@@ -1619,7 +1648,7 @@
 		read_lock_bh(&in6_dev->lock);
 		list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
 			pgid = (union ib_gid *)&ifp->addr;
-			update_gid_table(ibdev, port, pgid, 0);
+			update_gid_table(ibdev, port, pgid, 0, 0);
 		}
 		read_unlock_bh(&in6_dev->lock);
 		in6_dev_put(in6_dev);
@@ -1627,14 +1656,26 @@
 #endif
 }
 
+static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
+				 struct  net_device *dev, u8 port)
+{
+	union ib_gid gid;
+	mlx4_make_default_gid(dev, &gid);
+	update_gid_table(ibdev, port, &gid, 0, 1);
+}
+
 static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
 {
 	struct	net_device *dev;
+	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+	int i;
 
-	if (reset_gid_table(ibdev))
-		return -1;
+	for (i = 1; i <= ibdev->num_ports; ++i)
+		if (reset_gid_table(ibdev, i))
+			return -1;
 
 	read_lock(&dev_base_lock);
+	spin_lock(&iboe->lock);
 
 	for_each_netdev(&init_net, dev) {
 		u8 port = mlx4_ib_get_dev_port(dev, ibdev);
@@ -1642,6 +1683,7 @@
 			mlx4_ib_get_dev_addr(dev, ibdev, port);
 	}
 
+	spin_unlock(&iboe->lock);
 	read_unlock(&dev_base_lock);
 
 	return 0;
@@ -1656,25 +1698,57 @@
 
 	spin_lock(&iboe->lock);
 	mlx4_foreach_ib_transport_port(port, ibdev->dev) {
+		enum ib_port_state	port_state = IB_PORT_NOP;
 		struct net_device *old_master = iboe->masters[port - 1];
+		struct net_device *curr_netdev;
 		struct net_device *curr_master;
+
 		iboe->netdevs[port - 1] =
 			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
+		if (iboe->netdevs[port - 1])
+			mlx4_ib_set_default_gid(ibdev,
+						iboe->netdevs[port - 1], port);
+		curr_netdev = iboe->netdevs[port - 1];
 
 		if (iboe->netdevs[port - 1] &&
 		    netif_is_bond_slave(iboe->netdevs[port - 1])) {
-			rtnl_lock();
 			iboe->masters[port - 1] = netdev_master_upper_dev_get(
 				iboe->netdevs[port - 1]);
-			rtnl_unlock();
+		} else {
+			iboe->masters[port - 1] = NULL;
 		}
 		curr_master = iboe->masters[port - 1];
 
+		if (curr_netdev) {
+			port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
+						IB_PORT_ACTIVE : IB_PORT_DOWN;
+			mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+		} else {
+			reset_gid_table(ibdev, port);
+		}
+		/* if using bonding/team and a slave port is down, we don't the bond IP
+		 * based gids in the table since flows that select port by gid may get
+		 * the down port.
+		 */
+		if (curr_master && (port_state == IB_PORT_DOWN)) {
+			reset_gid_table(ibdev, port);
+			mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+		}
 		/* if bonding is used it is possible that we add it to masters
-		    only after IP address is assigned to the net bonding
-		    interface */
-		if (curr_master && (old_master != curr_master))
+		 * only after IP address is assigned to the net bonding
+		 * interface.
+		*/
+		if (curr_master && (old_master != curr_master)) {
+			reset_gid_table(ibdev, port);
+			mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
 			mlx4_ib_get_dev_addr(curr_master, ibdev, port);
+		}
+
+		if (!curr_master && (old_master != curr_master)) {
+			reset_gid_table(ibdev, port);
+			mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+			mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+		}
 	}
 
 	spin_unlock(&iboe->lock);
@@ -1810,6 +1884,7 @@
 	int i, j;
 	int err;
 	struct mlx4_ib_iboe *iboe;
+	int ib_num_ports = 0;
 
 	pr_info_once("%s", mlx4_ib_version);
 
@@ -1985,10 +2060,14 @@
 				ibdev->counters[i] = -1;
 	}
 
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+		ib_num_ports++;
+
 	spin_lock_init(&ibdev->sm_lock);
 	mutex_init(&ibdev->cap_mask_mutex);
 
-	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+	    ib_num_ports) {
 		ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
 		err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
 					    MLX4_IB_UC_STEER_QPN_ALIGN,
@@ -2051,7 +2130,11 @@
 			}
 		}
 #endif
+		for (i = 1 ; i <= ibdev->num_ports ; ++i)
+			reset_gid_table(ibdev, i);
+		rtnl_lock();
 		mlx4_ib_scan_netdevs(ibdev);
+		rtnl_unlock();
 		mlx4_ib_init_gid_table(ibdev);
 	}
 
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index 8e6aebf..10df386 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,6 +1,6 @@
 config MLX5_INFINIBAND
 	tristate "Mellanox Connect-IB HCA support"
-	depends on NETDEVICES && ETHERNET && PCI && X86
+	depends on NETDEVICES && ETHERNET && PCI
 	select NET_VENDOR_MELLANOX
 	select MLX5_CORE
 	---help---
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9660d09..aa03e73 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -261,8 +261,7 @@
 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
 		IB_DEVICE_PORT_ACTIVE_EVENT		|
 		IB_DEVICE_SYS_IMAGE_GUID		|
-		IB_DEVICE_RC_RNR_NAK_GEN		|
-		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+		IB_DEVICE_RC_RNR_NAK_GEN;
 	flags = dev->mdev.caps.flags;
 	if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
@@ -536,24 +535,38 @@
 						  struct ib_udata *udata)
 {
 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
-	struct mlx5_ib_alloc_ucontext_req req;
+	struct mlx5_ib_alloc_ucontext_req_v2 req;
 	struct mlx5_ib_alloc_ucontext_resp resp;
 	struct mlx5_ib_ucontext *context;
 	struct mlx5_uuar_info *uuari;
 	struct mlx5_uar *uars;
 	int gross_uuars;
 	int num_uars;
+	int ver;
 	int uuarn;
 	int err;
 	int i;
+	int reqlen;
 
 	if (!dev->ib_active)
 		return ERR_PTR(-EAGAIN);
 
-	err = ib_copy_from_udata(&req, udata, sizeof(req));
+	memset(&req, 0, sizeof(req));
+	reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
+	if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
+		ver = 0;
+	else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+		ver = 2;
+	else
+		return ERR_PTR(-EINVAL);
+
+	err = ib_copy_from_udata(&req, udata, reqlen);
 	if (err)
 		return ERR_PTR(err);
 
+	if (req.flags || req.reserved)
+		return ERR_PTR(-EINVAL);
+
 	if (req.total_num_uuars > MLX5_MAX_UUARS)
 		return ERR_PTR(-ENOMEM);
 
@@ -626,6 +639,7 @@
 	if (err)
 		goto out_uars;
 
+	uuari->ver = ver;
 	uuari->num_low_latency_uuars = req.num_low_latency_uuars;
 	uuari->uars = uars;
 	uuari->num_uars = num_uars;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ae37fb9..7dfe8a1 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -216,7 +216,9 @@
 
 	case IB_QPT_UC:
 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
-			sizeof(struct mlx5_wqe_raddr_seg);
+			sizeof(struct mlx5_wqe_raddr_seg) +
+			sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+			sizeof(struct mlx5_mkey_seg);
 		break;
 
 	case IB_QPT_UD:
@@ -428,11 +430,17 @@
 		break;
 
 	case MLX5_IB_LATENCY_CLASS_MEDIUM:
-		uuarn = alloc_med_class_uuar(uuari);
+		if (uuari->ver < 2)
+			uuarn = -ENOMEM;
+		else
+			uuarn = alloc_med_class_uuar(uuari);
 		break;
 
 	case MLX5_IB_LATENCY_CLASS_HIGH:
-		uuarn = alloc_high_class_uuar(uuari);
+		if (uuari->ver < 2)
+			uuarn = -ENOMEM;
+		else
+			uuarn = alloc_high_class_uuar(uuari);
 		break;
 
 	case MLX5_IB_LATENCY_CLASS_FAST_PATH:
@@ -657,8 +665,8 @@
 	int err;
 
 	uuari = &dev->mdev.priv.uuari;
-	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
-		qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+	if (init_attr->create_flags)
+		return -EINVAL;
 
 	if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
 		lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index 32a2a5d..0f4f8e4 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -62,6 +62,13 @@
 	__u32	num_low_latency_uuars;
 };
 
+struct mlx5_ib_alloc_ucontext_req_v2 {
+	__u32	total_num_uuars;
+	__u32	num_low_latency_uuars;
+	__u32	flags;
+	__u32	reserved;
+};
+
 struct mlx5_ib_alloc_ucontext_resp {
 	__u32	qp_tab_size;
 	__u32	bf_reg_size;
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 4291410..353c7b0 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -675,8 +675,11 @@
 	INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
 
 	/* Initialize network devices */
-	if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
+	netdev = nes_netdev_init(nesdev, mmio_regs);
+	if (netdev == NULL) {
+		ret = -ENOMEM;
 		goto bail7;
+	}
 
 	/* Register network device */
 	ret = register_netdev(netdev);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 2ca86ca..1a8a945 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -127,7 +127,7 @@
 
 	is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
 	if (is_vlan)
-		netdev = vlan_dev_real_dev(netdev);
+		netdev = rdma_vlan_dev_real_dev(netdev);
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index aa92f40..e0cc201 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -176,7 +176,7 @@
 	props->port_cap_flags =
 	    IB_PORT_CM_SUP |
 	    IB_PORT_REINIT_SUP |
-	    IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
+	    IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
 	props->gid_tbl_len = OCRDMA_MAX_SGID;
 	props->pkey_tbl_len = 1;
 	props->bad_pkey_cntr = 0;
@@ -1416,7 +1416,7 @@
 					  OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
 						OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
 	qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
-					      OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
+					      OCRDMA_QP_PARAMS_TCLASS_MASK) >>
 						OCRDMA_QP_PARAMS_TCLASS_SHIFT;
 
 	qp_attr->ah_attr.ah_flags = IB_AH_GRH;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bfc02f..d1bd213 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2395,6 +2395,11 @@
 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
 	qib_write_kreg(dd, kr_scratch, 0ULL);
 
+	/* ensure previous Tx parameters are not still forced */
+	qib_write_kreg_port(ppd, krp_tx_deemph_override,
+		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+		reset_tx_deemphasis_override));
+
 	if (qib_compat_ddr_negotiate) {
 		ppd->cpspec->ibdeltainprog = 1;
 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 7ecc606..f8dfd76 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -629,6 +629,7 @@
 {
 	enum usnic_transport_type trans_type = qp_flow->trans_type;
 	int err;
+	uint16_t port_num = 0;
 
 	switch (trans_type) {
 	case USNIC_TRANSPORT_ROCE_CUSTOM:
@@ -637,9 +638,15 @@
 	case USNIC_TRANSPORT_IPV4_UDP:
 		err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
 							NULL, NULL,
-							(uint16_t *) id);
+							&port_num);
 		if (err)
 			return err;
+		/*
+		 * Copy port_num to stack first and then to *id,
+		 * so that the short to int cast works for little
+		 * and big endian systems.
+		 */
+		*id = port_num;
 		break;
 	default:
 		usnic_err("Unsupported transport %u\n", trans_type);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 5388226..334f34b 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -610,11 +610,12 @@
 		ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
 					ISER_HEADERS_LEN, DMA_TO_DEVICE);
 		kmem_cache_free(ig.desc_cache, tx_desc);
+		tx_desc = NULL;
 	}
 
 	atomic_dec(&ib_conn->post_send_buf_count);
 
-	if (tx_desc->type == ISCSI_TX_CONTROL) {
+	if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
 		/* this arithmetic is legal by libiscsi dd_data allocation */
 		task = (void *) ((long)(void *)tx_desc -
 				  sizeof(struct iscsi_task));
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index afe9567..ca37ede 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -652,9 +652,13 @@
 	/* getting here when the state is UP means that the conn is being *
 	 * terminated asynchronously from the iSCSI layer's perspective.  */
 	if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
-				      ISER_CONN_TERMINATING))
-		iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
-				   ISCSI_ERR_CONN_FAILED);
+					ISER_CONN_TERMINATING)){
+		if (ib_conn->iser_conn)
+			iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+					   ISCSI_ERR_CONN_FAILED);
+		else
+			iser_err("iscsi_iser connection isn't bound\n");
+	}
 
 	/* Complete the termination process if no posts are pending */
 	if (ib_conn->post_recv_buf_count == 0 &&
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 9804fca..d18d08a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -47,10 +47,10 @@
 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 	       struct isert_rdma_wr *wr);
 static void
-isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
+isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
 static int
-isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
-		    struct isert_rdma_wr *wr);
+isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+	       struct isert_rdma_wr *wr);
 
 static void
 isert_qp_event_callback(struct ib_event *e, void *context)
@@ -227,11 +227,11 @@
 
 	/* asign function handlers */
 	if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
-		device->use_frwr = 1;
-		device->reg_rdma_mem = isert_reg_rdma_frwr;
-		device->unreg_rdma_mem = isert_unreg_rdma_frwr;
+		device->use_fastreg = 1;
+		device->reg_rdma_mem = isert_reg_rdma;
+		device->unreg_rdma_mem = isert_unreg_rdma;
 	} else {
-		device->use_frwr = 0;
+		device->use_fastreg = 0;
 		device->reg_rdma_mem = isert_map_rdma;
 		device->unreg_rdma_mem = isert_unmap_cmd;
 	}
@@ -239,9 +239,10 @@
 	device->cqs_used = min_t(int, num_online_cpus(),
 				 device->ib_device->num_comp_vectors);
 	device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
-	pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
+	pr_debug("Using %d CQs, device %s supports %d vectors support "
+		 "Fast registration %d\n",
 		 device->cqs_used, device->ib_device->name,
-		 device->ib_device->num_comp_vectors, device->use_frwr);
+		 device->ib_device->num_comp_vectors, device->use_fastreg);
 	device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
 				device->cqs_used, GFP_KERNEL);
 	if (!device->cq_desc) {
@@ -250,13 +251,6 @@
 	}
 	cq_desc = device->cq_desc;
 
-	device->dev_pd = ib_alloc_pd(ib_dev);
-	if (IS_ERR(device->dev_pd)) {
-		ret = PTR_ERR(device->dev_pd);
-		pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
-		goto out_cq_desc;
-	}
-
 	for (i = 0; i < device->cqs_used; i++) {
 		cq_desc[i].device = device;
 		cq_desc[i].cq_index = i;
@@ -294,13 +288,6 @@
 			goto out_cq;
 	}
 
-	device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
-	if (IS_ERR(device->dev_mr)) {
-		ret = PTR_ERR(device->dev_mr);
-		pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
-		goto out_cq;
-	}
-
 	return 0;
 
 out_cq:
@@ -316,9 +303,6 @@
 			ib_destroy_cq(device->dev_tx_cq[j]);
 		}
 	}
-	ib_dealloc_pd(device->dev_pd);
-
-out_cq_desc:
 	kfree(device->cq_desc);
 
 	return ret;
@@ -341,8 +325,6 @@
 		device->dev_tx_cq[i] = NULL;
 	}
 
-	ib_dereg_mr(device->dev_mr);
-	ib_dealloc_pd(device->dev_pd);
 	kfree(device->cq_desc);
 }
 
@@ -398,18 +380,18 @@
 }
 
 static void
-isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
+isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
 {
 	struct fast_reg_descriptor *fr_desc, *tmp;
 	int i = 0;
 
-	if (list_empty(&isert_conn->conn_frwr_pool))
+	if (list_empty(&isert_conn->conn_fr_pool))
 		return;
 
-	pr_debug("Freeing conn %p frwr pool", isert_conn);
+	pr_debug("Freeing conn %p fastreg pool", isert_conn);
 
 	list_for_each_entry_safe(fr_desc, tmp,
-				 &isert_conn->conn_frwr_pool, list) {
+				 &isert_conn->conn_fr_pool, list) {
 		list_del(&fr_desc->list);
 		ib_free_fast_reg_page_list(fr_desc->data_frpl);
 		ib_dereg_mr(fr_desc->data_mr);
@@ -417,20 +399,47 @@
 		++i;
 	}
 
-	if (i < isert_conn->conn_frwr_pool_size)
+	if (i < isert_conn->conn_fr_pool_size)
 		pr_warn("Pool still has %d regions registered\n",
-			isert_conn->conn_frwr_pool_size - i);
+			isert_conn->conn_fr_pool_size - i);
 }
 
 static int
-isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
+isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
+		     struct fast_reg_descriptor *fr_desc)
+{
+	fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
+							 ISCSI_ISER_SG_TABLESIZE);
+	if (IS_ERR(fr_desc->data_frpl)) {
+		pr_err("Failed to allocate data frpl err=%ld\n",
+		       PTR_ERR(fr_desc->data_frpl));
+		return PTR_ERR(fr_desc->data_frpl);
+	}
+
+	fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+	if (IS_ERR(fr_desc->data_mr)) {
+		pr_err("Failed to allocate data frmr err=%ld\n",
+		       PTR_ERR(fr_desc->data_mr));
+		ib_free_fast_reg_page_list(fr_desc->data_frpl);
+		return PTR_ERR(fr_desc->data_mr);
+	}
+	pr_debug("Create fr_desc %p page_list %p\n",
+		 fr_desc, fr_desc->data_frpl->page_list);
+
+	fr_desc->valid = true;
+
+	return 0;
+}
+
+static int
+isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
 {
 	struct fast_reg_descriptor *fr_desc;
 	struct isert_device *device = isert_conn->conn_device;
 	int i, ret;
 
-	INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
-	isert_conn->conn_frwr_pool_size = 0;
+	INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
+	isert_conn->conn_fr_pool_size = 0;
 	for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
 		fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
 		if (!fr_desc) {
@@ -439,40 +448,26 @@
 			goto err;
 		}
 
-		fr_desc->data_frpl =
-			ib_alloc_fast_reg_page_list(device->ib_device,
-						    ISCSI_ISER_SG_TABLESIZE);
-		if (IS_ERR(fr_desc->data_frpl)) {
-			pr_err("Failed to allocate fr_pg_list err=%ld\n",
-			       PTR_ERR(fr_desc->data_frpl));
-			ret = PTR_ERR(fr_desc->data_frpl);
+		ret = isert_create_fr_desc(device->ib_device,
+					   isert_conn->conn_pd, fr_desc);
+		if (ret) {
+			pr_err("Failed to create fastreg descriptor err=%d\n",
+			       ret);
+			kfree(fr_desc);
 			goto err;
 		}
 
-		fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
-					ISCSI_ISER_SG_TABLESIZE);
-		if (IS_ERR(fr_desc->data_mr)) {
-			pr_err("Failed to allocate frmr err=%ld\n",
-			       PTR_ERR(fr_desc->data_mr));
-			ret = PTR_ERR(fr_desc->data_mr);
-			ib_free_fast_reg_page_list(fr_desc->data_frpl);
-			goto err;
-		}
-		pr_debug("Create fr_desc %p page_list %p\n",
-			 fr_desc, fr_desc->data_frpl->page_list);
-
-		fr_desc->valid = true;
-		list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
-		isert_conn->conn_frwr_pool_size++;
+		list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
+		isert_conn->conn_fr_pool_size++;
 	}
 
-	pr_debug("Creating conn %p frwr pool size=%d",
-		 isert_conn, isert_conn->conn_frwr_pool_size);
+	pr_debug("Creating conn %p fastreg pool size=%d",
+		 isert_conn, isert_conn->conn_fr_pool_size);
 
 	return 0;
 
 err:
-	isert_conn_free_frwr_pool(isert_conn);
+	isert_conn_free_fastreg_pool(isert_conn);
 	return ret;
 }
 
@@ -558,14 +553,29 @@
 	}
 
 	isert_conn->conn_device = device;
-	isert_conn->conn_pd = device->dev_pd;
-	isert_conn->conn_mr = device->dev_mr;
+	isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
+	if (IS_ERR(isert_conn->conn_pd)) {
+		ret = PTR_ERR(isert_conn->conn_pd);
+		pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
+		       isert_conn, ret);
+		goto out_pd;
+	}
 
-	if (device->use_frwr) {
-		ret = isert_conn_create_frwr_pool(isert_conn);
+	isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
+					   IB_ACCESS_LOCAL_WRITE);
+	if (IS_ERR(isert_conn->conn_mr)) {
+		ret = PTR_ERR(isert_conn->conn_mr);
+		pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
+		       isert_conn, ret);
+		goto out_mr;
+	}
+
+	if (device->use_fastreg) {
+		ret = isert_conn_create_fastreg_pool(isert_conn);
 		if (ret) {
-			pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
-			goto out_frwr;
+			pr_err("Conn: %p failed to create fastreg pool\n",
+			       isert_conn);
+			goto out_fastreg;
 		}
 	}
 
@@ -582,9 +592,13 @@
 	return 0;
 
 out_conn_dev:
-	if (device->use_frwr)
-		isert_conn_free_frwr_pool(isert_conn);
-out_frwr:
+	if (device->use_fastreg)
+		isert_conn_free_fastreg_pool(isert_conn);
+out_fastreg:
+	ib_dereg_mr(isert_conn->conn_mr);
+out_mr:
+	ib_dealloc_pd(isert_conn->conn_pd);
+out_pd:
 	isert_device_try_release(device);
 out_rsp_dma_map:
 	ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
@@ -608,8 +622,8 @@
 
 	pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
 
-	if (device && device->use_frwr)
-		isert_conn_free_frwr_pool(isert_conn);
+	if (device && device->use_fastreg)
+		isert_conn_free_fastreg_pool(isert_conn);
 
 	if (isert_conn->conn_qp) {
 		cq_index = ((struct isert_cq_desc *)
@@ -623,6 +637,9 @@
 	isert_free_rx_descriptors(isert_conn);
 	rdma_destroy_id(isert_conn->conn_cm_id);
 
+	ib_dereg_mr(isert_conn->conn_mr);
+	ib_dealloc_pd(isert_conn->conn_pd);
+
 	if (isert_conn->login_buf) {
 		ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
 				    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
@@ -1024,13 +1041,13 @@
 }
 
 static struct iscsi_cmd
-*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
+*isert_allocate_cmd(struct iscsi_conn *conn)
 {
 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
 	struct isert_cmd *isert_cmd;
 	struct iscsi_cmd *cmd;
 
-	cmd = iscsit_allocate_cmd(conn, gfp);
+	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
 	if (!cmd) {
 		pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
 		return NULL;
@@ -1219,7 +1236,7 @@
 
 	switch (opcode) {
 	case ISCSI_OP_SCSI_CMD:
-		cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+		cmd = isert_allocate_cmd(conn);
 		if (!cmd)
 			break;
 
@@ -1233,7 +1250,7 @@
 					rx_desc, (unsigned char *)hdr);
 		break;
 	case ISCSI_OP_NOOP_OUT:
-		cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+		cmd = isert_allocate_cmd(conn);
 		if (!cmd)
 			break;
 
@@ -1246,7 +1263,7 @@
 						(unsigned char *)hdr);
 		break;
 	case ISCSI_OP_SCSI_TMFUNC:
-		cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+		cmd = isert_allocate_cmd(conn);
 		if (!cmd)
 			break;
 
@@ -1254,7 +1271,7 @@
 						(unsigned char *)hdr);
 		break;
 	case ISCSI_OP_LOGOUT:
-		cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+		cmd = isert_allocate_cmd(conn);
 		if (!cmd)
 			break;
 
@@ -1265,7 +1282,7 @@
 						    HZ);
 		break;
 	case ISCSI_OP_TEXT:
-		cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+		cmd = isert_allocate_cmd(conn);
 		if (!cmd)
 			break;
 
@@ -1404,25 +1421,25 @@
 }
 
 static void
-isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
+isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 {
 	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 	LIST_HEAD(unmap_list);
 
-	pr_debug("unreg_frwr_cmd: %p\n", isert_cmd);
+	pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
 
 	if (wr->fr_desc) {
-		pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
+		pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
 			 isert_cmd, wr->fr_desc);
 		spin_lock_bh(&isert_conn->conn_lock);
-		list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool);
+		list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
 		spin_unlock_bh(&isert_conn->conn_lock);
 		wr->fr_desc = NULL;
 	}
 
 	if (wr->sge) {
-		pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd);
+		pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
 		ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
 				(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
 				DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -2163,26 +2180,22 @@
 
 static int
 isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
-		  struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
-		  struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
+		  struct isert_conn *isert_conn, struct scatterlist *sg_start,
+		  struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
+		  unsigned int data_len)
 {
-	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
 	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
-	struct scatterlist *sg_start;
-	u32 sg_off, page_off;
 	struct ib_send_wr fr_wr, inv_wr;
 	struct ib_send_wr *bad_wr, *wr = NULL;
+	int ret, pagelist_len;
+	u32 page_off;
 	u8 key;
-	int ret, sg_nents, pagelist_len;
 
-	sg_off = offset / PAGE_SIZE;
-	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
-	sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
-			 ISCSI_ISER_SG_TABLESIZE);
+	sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
 	page_off = offset % PAGE_SIZE;
 
-	pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
-		 isert_cmd, fr_desc, sg_nents, sg_off, offset);
+	pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
+		 fr_desc, sg_nents, offset);
 
 	pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
 					     &fr_desc->data_frpl->page_list[0]);
@@ -2232,8 +2245,8 @@
 }
 
 static int
-isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
-		    struct isert_rdma_wr *wr)
+isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+	       struct isert_rdma_wr *wr)
 {
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
@@ -2251,9 +2264,9 @@
 	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
 		data_left = se_cmd->data_length;
 	} else {
-		sg_off = cmd->write_data_done / PAGE_SIZE;
-		data_left = se_cmd->data_length - cmd->write_data_done;
 		offset = cmd->write_data_done;
+		sg_off = offset / PAGE_SIZE;
+		data_left = se_cmd->data_length - cmd->write_data_done;
 		isert_cmd->tx_desc.isert_cmd = isert_cmd;
 	}
 
@@ -2311,16 +2324,16 @@
 		wr->fr_desc = NULL;
 	} else {
 		spin_lock_irqsave(&isert_conn->conn_lock, flags);
-		fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
+		fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
 					   struct fast_reg_descriptor, list);
 		list_del(&fr_desc->list);
 		spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
 		wr->fr_desc = fr_desc;
 
-		ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
-				  ib_sge, offset, data_len);
+		ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
+					ib_sge, sg_nents, offset, data_len);
 		if (ret) {
-			list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
+			list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
 			goto unmap_sg;
 		}
 	}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 691f90f..708a069 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -119,9 +119,9 @@
 	wait_queue_head_t	conn_wait;
 	wait_queue_head_t	conn_wait_comp_err;
 	struct kref		conn_kref;
-	struct list_head	conn_frwr_pool;
-	int			conn_frwr_pool_size;
-	/* lock to protect frwr_pool */
+	struct list_head	conn_fr_pool;
+	int			conn_fr_pool_size;
+	/* lock to protect fastreg pool */
 	spinlock_t		conn_lock;
 #define ISERT_COMP_BATCH_COUNT	8
 	int			conn_comp_batch;
@@ -139,13 +139,11 @@
 };
 
 struct isert_device {
-	int			use_frwr;
+	int			use_fastreg;
 	int			cqs_used;
 	int			refcount;
 	int			cq_active_qps[ISERT_MAX_CQ];
 	struct ib_device	*ib_device;
-	struct ib_pd		*dev_pd;
-	struct ib_mr		*dev_mr;
 	struct ib_cq		*dev_rx_cq[ISERT_MAX_CQ];
 	struct ib_cq		*dev_tx_cq[ISERT_MAX_CQ];
 	struct isert_cq_desc	*cq_desc;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 520a7e5..0e537d8 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3666,9 +3666,9 @@
 	unsigned long val;
 	int ret;
 
-	ret = strict_strtoul(page, 0, &val);
+	ret = kstrtoul(page, 0, &val);
 	if (ret < 0) {
-		pr_err("strict_strtoul() failed with ret: %d\n", ret);
+		pr_err("kstrtoul() failed with ret: %d\n", ret);
 		return -EINVAL;
 	}
 	if (val > MAX_SRPT_RDMA_SIZE) {
@@ -3706,9 +3706,9 @@
 	unsigned long val;
 	int ret;
 
-	ret = strict_strtoul(page, 0, &val);
+	ret = kstrtoul(page, 0, &val);
 	if (ret < 0) {
-		pr_err("strict_strtoul() failed with ret: %d\n", ret);
+		pr_err("kstrtoul() failed with ret: %d\n", ret);
 		return -EINVAL;
 	}
 	if (val > MAX_SRPT_RSP_SIZE) {
@@ -3746,9 +3746,9 @@
 	unsigned long val;
 	int ret;
 
-	ret = strict_strtoul(page, 0, &val);
+	ret = kstrtoul(page, 0, &val);
 	if (ret < 0) {
-		pr_err("strict_strtoul() failed with ret: %d\n", ret);
+		pr_err("kstrtoul() failed with ret: %d\n", ret);
 		return -EINVAL;
 	}
 	if (val > MAX_SRPT_SRQ_SIZE) {
@@ -3793,7 +3793,7 @@
 	unsigned long tmp;
         int ret;
 
-	ret = strict_strtoul(page, 0, &tmp);
+	ret = kstrtoul(page, 0, &tmp);
 	if (ret < 0) {
 		printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
 		return -EINVAL;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 3e7fdbb..79bbc21 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -207,6 +207,7 @@
 	bool "IOMMU for Renesas IPMMU/IPMMUI"
 	default n
 	depends on ARM
+	depends on SH_MOBILE || COMPILE_TEST
 	select IOMMU_API
 	select ARM_DMA_USE_IOMMU
 	select SHMOBILE_IPMMU
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 72531f0..faf0da4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -248,8 +248,8 @@
 	if (!dev || !dev->dma_mask)
 		return false;
 
-	/* No device or no PCI device */
-	if (dev->bus != &pci_bus_type)
+	/* No PCI device */
+	if (!dev_is_pci(dev))
 		return false;
 
 	devid = get_device_id(dev);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e46a8870..8911850 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -24,7 +24,7 @@
  *	- v7/v8 long-descriptor format
  *	- Non-secure access to the SMMU
  *	- 4k and 64k pages, with contiguous pte hints.
- *	- Up to 39-bit addressing
+ *	- Up to 42-bit addressing (dependent on VA_BITS)
  *	- Context fault reporting
  */
 
@@ -61,12 +61,13 @@
 #define ARM_SMMU_GR1(smmu)		((smmu)->base + (smmu)->pagesize)
 
 /* Page table bits */
-#define ARM_SMMU_PTE_PAGE		(((pteval_t)3) << 0)
+#define ARM_SMMU_PTE_XN			(((pteval_t)3) << 53)
 #define ARM_SMMU_PTE_CONT		(((pteval_t)1) << 52)
 #define ARM_SMMU_PTE_AF			(((pteval_t)1) << 10)
 #define ARM_SMMU_PTE_SH_NS		(((pteval_t)0) << 8)
 #define ARM_SMMU_PTE_SH_OS		(((pteval_t)2) << 8)
 #define ARM_SMMU_PTE_SH_IS		(((pteval_t)3) << 8)
+#define ARM_SMMU_PTE_PAGE		(((pteval_t)3) << 0)
 
 #if PAGE_SIZE == SZ_4K
 #define ARM_SMMU_PTE_CONT_ENTRIES	16
@@ -1205,7 +1206,7 @@
 				   unsigned long pfn, int flags, int stage)
 {
 	pte_t *pte, *start;
-	pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
+	pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
 
 	if (pmd_none(*pmd)) {
 		/* Allocate a new set of tables */
@@ -1244,7 +1245,9 @@
 	}
 
 	/* If no access, create a faulting entry to avoid TLB fills */
-	if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
+	if (flags & IOMMU_EXEC)
+		pteval &= ~ARM_SMMU_PTE_XN;
+	else if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
 		pteval &= ~ARM_SMMU_PTE_PAGE;
 
 	pteval |= ARM_SMMU_PTE_SH_IS;
@@ -1494,6 +1497,13 @@
 {
 	struct arm_smmu_device *child, *parent, *smmu;
 	struct arm_smmu_master *master = NULL;
+	struct iommu_group *group;
+	int ret;
+
+	if (dev->archdata.iommu) {
+		dev_warn(dev, "IOMMU driver already assigned to device\n");
+		return -EINVAL;
+	}
 
 	spin_lock(&arm_smmu_devices_lock);
 	list_for_each_entry(parent, &arm_smmu_devices, list) {
@@ -1526,13 +1536,23 @@
 	if (!master)
 		return -ENODEV;
 
+	group = iommu_group_alloc();
+	if (IS_ERR(group)) {
+		dev_err(dev, "Failed to allocate IOMMU group\n");
+		return PTR_ERR(group);
+	}
+
+	ret = iommu_group_add_device(group, dev);
+	iommu_group_put(group);
 	dev->archdata.iommu = smmu;
-	return 0;
+
+	return ret;
 }
 
 static void arm_smmu_remove_device(struct device *dev)
 {
 	dev->archdata.iommu = NULL;
+	iommu_group_remove_device(dev);
 }
 
 static struct iommu_ops arm_smmu_ops = {
@@ -1730,7 +1750,6 @@
 	 * allocation (PTRS_PER_PGD).
 	 */
 #ifdef CONFIG_64BIT
-	/* Current maximum output size of 39 bits */
 	smmu->s1_output_size = min(39UL, size);
 #else
 	smmu->s1_output_size = min(32UL, size);
@@ -1745,7 +1764,7 @@
 	} else {
 #ifdef CONFIG_64BIT
 		size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
-		size = min(39, arm_smmu_id_size_to_bits(size));
+		size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
 #else
 		size = 32;
 #endif
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8b452c9..1581565 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -52,6 +52,9 @@
 struct acpi_table_header * __initdata dmar_tbl;
 static acpi_size dmar_tbl_size;
 
+static int alloc_iommu(struct dmar_drhd_unit *drhd);
+static void free_iommu(struct intel_iommu *iommu);
+
 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
 {
 	/*
@@ -100,7 +103,6 @@
 	if (!pdev) {
 		pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
 			segment, scope->bus, path->device, path->function);
-		*dev = NULL;
 		return 0;
 	}
 	if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
@@ -151,7 +153,7 @@
 			ret = dmar_parse_one_dev_scope(scope,
 				&(*devices)[index], segment);
 			if (ret) {
-				kfree(*devices);
+				dmar_free_dev_scope(devices, cnt);
 				return ret;
 			}
 			index ++;
@@ -162,6 +164,17 @@
 	return 0;
 }
 
+void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
+{
+	if (*devices && *cnt) {
+		while (--*cnt >= 0)
+			pci_dev_put((*devices)[*cnt]);
+		kfree(*devices);
+		*devices = NULL;
+		*cnt = 0;
+	}
+}
+
 /**
  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
  * structure which uniquely represent one DMA remapping hardware unit
@@ -193,25 +206,28 @@
 	return 0;
 }
 
+static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
+{
+	if (dmaru->devices && dmaru->devices_cnt)
+		dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
+	if (dmaru->iommu)
+		free_iommu(dmaru->iommu);
+	kfree(dmaru);
+}
+
 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
 {
 	struct acpi_dmar_hardware_unit *drhd;
-	int ret = 0;
 
 	drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
 
 	if (dmaru->include_all)
 		return 0;
 
-	ret = dmar_parse_dev_scope((void *)(drhd + 1),
-				((void *)drhd) + drhd->header.length,
-				&dmaru->devices_cnt, &dmaru->devices,
-				drhd->segment);
-	if (ret) {
-		list_del(&dmaru->list);
-		kfree(dmaru);
-	}
-	return ret;
+	return dmar_parse_dev_scope((void *)(drhd + 1),
+				    ((void *)drhd) + drhd->header.length,
+				    &dmaru->devices_cnt, &dmaru->devices,
+				    drhd->segment);
 }
 
 #ifdef CONFIG_ACPI_NUMA
@@ -423,7 +439,7 @@
 int __init dmar_dev_scope_init(void)
 {
 	static int dmar_dev_scope_initialized;
-	struct dmar_drhd_unit *drhd, *drhd_n;
+	struct dmar_drhd_unit *drhd;
 	int ret = -ENODEV;
 
 	if (dmar_dev_scope_initialized)
@@ -432,7 +448,7 @@
 	if (list_empty(&dmar_drhd_units))
 		goto fail;
 
-	list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
+	list_for_each_entry(drhd, &dmar_drhd_units, list) {
 		ret = dmar_parse_dev(drhd);
 		if (ret)
 			goto fail;
@@ -456,24 +472,23 @@
 	static int dmar_table_initialized;
 	int ret;
 
-	if (dmar_table_initialized)
-		return 0;
+	if (dmar_table_initialized == 0) {
+		ret = parse_dmar_table();
+		if (ret < 0) {
+			if (ret != -ENODEV)
+				pr_info("parse DMAR table failure.\n");
+		} else  if (list_empty(&dmar_drhd_units)) {
+			pr_info("No DMAR devices found\n");
+			ret = -ENODEV;
+		}
 
-	dmar_table_initialized = 1;
-
-	ret = parse_dmar_table();
-	if (ret) {
-		if (ret != -ENODEV)
-			pr_info("parse DMAR table failure.\n");
-		return ret;
+		if (ret < 0)
+			dmar_table_initialized = ret;
+		else
+			dmar_table_initialized = 1;
 	}
 
-	if (list_empty(&dmar_drhd_units)) {
-		pr_info("No DMAR devices found\n");
-		return -ENODEV;
-	}
-
-	return 0;
+	return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
 }
 
 static void warn_invalid_dmar(u64 addr, const char *message)
@@ -488,7 +503,7 @@
 		dmi_get_system_info(DMI_PRODUCT_VERSION));
 }
 
-int __init check_zero_address(void)
+static int __init check_zero_address(void)
 {
 	struct acpi_table_dmar *dmar;
 	struct acpi_dmar_header *entry_header;
@@ -546,14 +561,6 @@
 	if (ret)
 		ret = check_zero_address();
 	{
-		struct acpi_table_dmar *dmar;
-
-		dmar = (struct acpi_table_dmar *) dmar_tbl;
-
-		if (ret && irq_remapping_enabled && cpu_has_x2apic &&
-		    dmar->flags & 0x1)
-			pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
-
 		if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
 			iommu_detected = 1;
 			/* Make sure ACS will be enabled */
@@ -565,7 +572,7 @@
 			x86_init.iommu.iommu_init = intel_iommu_init;
 #endif
 	}
-	early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
+	early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
 	dmar_tbl = NULL;
 
 	return ret ? 1 : -ENODEV;
@@ -647,7 +654,7 @@
 	return err;
 }
 
-int alloc_iommu(struct dmar_drhd_unit *drhd)
+static int alloc_iommu(struct dmar_drhd_unit *drhd)
 {
 	struct intel_iommu *iommu;
 	u32 ver, sts;
@@ -721,12 +728,19 @@
 	return err;
 }
 
-void free_iommu(struct intel_iommu *iommu)
+static void free_iommu(struct intel_iommu *iommu)
 {
-	if (!iommu)
-		return;
+	if (iommu->irq) {
+		free_irq(iommu->irq, iommu);
+		irq_set_handler_data(iommu->irq, NULL);
+		destroy_irq(iommu->irq);
+	}
 
-	free_dmar_iommu(iommu);
+	if (iommu->qi) {
+		free_page((unsigned long)iommu->qi->desc);
+		kfree(iommu->qi->desc_status);
+		kfree(iommu->qi);
+	}
 
 	if (iommu->reg)
 		unmap_iommu(iommu);
@@ -1050,7 +1064,7 @@
 	desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
 	if (!desc_page) {
 		kfree(qi);
-		iommu->qi = 0;
+		iommu->qi = NULL;
 		return -ENOMEM;
 	}
 
@@ -1060,7 +1074,7 @@
 	if (!qi->desc_status) {
 		free_page((unsigned long) qi->desc);
 		kfree(qi);
-		iommu->qi = 0;
+		iommu->qi = NULL;
 		return -ENOMEM;
 	}
 
@@ -1111,9 +1125,7 @@
 	"Blocked an interrupt request due to source-id verification failure",
 };
 
-#define MAX_FAULT_REASON_IDX 	(ARRAY_SIZE(fault_reason_strings) - 1)
-
-const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
+static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
 {
 	if (fault_reason >= 0x20 && (fault_reason - 0x20 <
 					ARRAY_SIZE(irq_remap_fault_reasons))) {
@@ -1303,15 +1315,14 @@
 int __init enable_drhd_fault_handling(void)
 {
 	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
 
 	/*
 	 * Enable fault control interrupt.
 	 */
-	for_each_drhd_unit(drhd) {
-		int ret;
-		struct intel_iommu *iommu = drhd->iommu;
+	for_each_iommu(iommu, drhd) {
 		u32 fault_status;
-		ret = dmar_set_interrupt(iommu);
+		int ret = dmar_set_interrupt(iommu);
 
 		if (ret) {
 			pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
@@ -1366,4 +1377,22 @@
 		return 0;
 	return dmar->flags & 0x1;
 }
+
+static int __init dmar_free_unused_resources(void)
+{
+	struct dmar_drhd_unit *dmaru, *dmaru_n;
+
+	/* DMAR units are in use */
+	if (irq_remapping_enabled || intel_iommu_enabled)
+		return 0;
+
+	list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
+		list_del(&dmaru->list);
+		dmar_free_drhd(dmaru);
+	}
+
+	return 0;
+}
+
+late_initcall(dmar_free_unused_resources);
 IOMMU_INIT_POST(detect_intel_iommu);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index c857c30..93072ba 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -691,7 +691,7 @@
 	 * Use LIODN of the PCI controller while attaching a
 	 * PCI device.
 	 */
-	if (dev->bus == &pci_bus_type) {
+	if (dev_is_pci(dev)) {
 		pdev = to_pci_dev(dev);
 		pci_ctl = pci_bus_to_host(pdev->bus);
 		/*
@@ -729,7 +729,7 @@
 	 * Use LIODN of the PCI controller while detaching a
 	 * PCI device.
 	 */
-	if (dev->bus == &pci_bus_type) {
+	if (dev_is_pci(dev)) {
 		pdev = to_pci_dev(dev);
 		pci_ctl = pci_bus_to_host(pdev->bus);
 		/*
@@ -1056,7 +1056,7 @@
 	 * For platform devices we allocate a separate group for
 	 * each of the devices.
 	 */
-	if (dev->bus == &pci_bus_type) {
+	if (dev_is_pci(dev)) {
 		pdev = to_pci_dev(dev);
 		/* Don't create device groups for virtual PCI bridges */
 		if (pdev->subordinate)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 59779e1..a22c86c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -63,6 +63,7 @@
 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
 
 #define MAX_AGAW_WIDTH 64
+#define MAX_AGAW_PFN_WIDTH	(MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
 
 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
@@ -106,12 +107,12 @@
 
 static inline int agaw_to_width(int agaw)
 {
-	return 30 + agaw * LEVEL_STRIDE;
+	return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
 }
 
 static inline int width_to_agaw(int width)
 {
-	return (width - 30) / LEVEL_STRIDE;
+	return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
 }
 
 static inline unsigned int level_to_offset_bits(int level)
@@ -141,7 +142,7 @@
 
 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
 {
-	return  1 << ((lvl - 1) * LEVEL_STRIDE);
+	return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
 }
 
 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
@@ -288,26 +289,6 @@
 	pte->val = 0;
 }
 
-static inline void dma_set_pte_readable(struct dma_pte *pte)
-{
-	pte->val |= DMA_PTE_READ;
-}
-
-static inline void dma_set_pte_writable(struct dma_pte *pte)
-{
-	pte->val |= DMA_PTE_WRITE;
-}
-
-static inline void dma_set_pte_snp(struct dma_pte *pte)
-{
-	pte->val |= DMA_PTE_SNP;
-}
-
-static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
-{
-	pte->val = (pte->val & ~3) | (prot & 3);
-}
-
 static inline u64 dma_pte_addr(struct dma_pte *pte)
 {
 #ifdef CONFIG_64BIT
@@ -318,11 +299,6 @@
 #endif
 }
 
-static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
-{
-	pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
-}
-
 static inline bool dma_pte_present(struct dma_pte *pte)
 {
 	return (pte->val & 3) != 0;
@@ -406,7 +382,7 @@
 
 static void flush_unmaps_timeout(unsigned long data);
 
-DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
+static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
 
 #define HIGH_WATER_MARK 250
 struct deferred_flush_tables {
@@ -652,9 +628,7 @@
 	struct dmar_drhd_unit *drhd = NULL;
 	int i;
 
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
+	for_each_active_drhd_unit(drhd) {
 		if (segment != drhd->segment)
 			continue;
 
@@ -865,7 +839,6 @@
 	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
 	unsigned int large_page = 1;
 	struct dma_pte *first_pte, *pte;
-	int order;
 
 	BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
 	BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -890,8 +863,7 @@
 
 	} while (start_pfn && start_pfn <= last_pfn);
 
-	order = (large_page - 1) * 9;
-	return order;
+	return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
 }
 
 static void dma_pte_free_level(struct dmar_domain *domain, int level,
@@ -1255,8 +1227,8 @@
 	unsigned long nlongs;
 
 	ndomains = cap_ndoms(iommu->cap);
-	pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
-			ndomains);
+	pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
+		 iommu->seq_id, ndomains);
 	nlongs = BITS_TO_LONGS(ndomains);
 
 	spin_lock_init(&iommu->lock);
@@ -1266,13 +1238,17 @@
 	 */
 	iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
 	if (!iommu->domain_ids) {
-		printk(KERN_ERR "Allocating domain id array failed\n");
+		pr_err("IOMMU%d: allocating domain id array failed\n",
+		       iommu->seq_id);
 		return -ENOMEM;
 	}
 	iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
 			GFP_KERNEL);
 	if (!iommu->domains) {
-		printk(KERN_ERR "Allocating domain array failed\n");
+		pr_err("IOMMU%d: allocating domain array failed\n",
+		       iommu->seq_id);
+		kfree(iommu->domain_ids);
+		iommu->domain_ids = NULL;
 		return -ENOMEM;
 	}
 
@@ -1289,10 +1265,10 @@
 static void domain_exit(struct dmar_domain *domain);
 static void vm_domain_exit(struct dmar_domain *domain);
 
-void free_dmar_iommu(struct intel_iommu *iommu)
+static void free_dmar_iommu(struct intel_iommu *iommu)
 {
 	struct dmar_domain *domain;
-	int i;
+	int i, count;
 	unsigned long flags;
 
 	if ((iommu->domains) && (iommu->domain_ids)) {
@@ -1301,28 +1277,24 @@
 			clear_bit(i, iommu->domain_ids);
 
 			spin_lock_irqsave(&domain->iommu_lock, flags);
-			if (--domain->iommu_count == 0) {
+			count = --domain->iommu_count;
+			spin_unlock_irqrestore(&domain->iommu_lock, flags);
+			if (count == 0) {
 				if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
 					vm_domain_exit(domain);
 				else
 					domain_exit(domain);
 			}
-			spin_unlock_irqrestore(&domain->iommu_lock, flags);
 		}
 	}
 
 	if (iommu->gcmd & DMA_GCMD_TE)
 		iommu_disable_translation(iommu);
 
-	if (iommu->irq) {
-		irq_set_handler_data(iommu->irq, NULL);
-		/* This will mask the irq */
-		free_irq(iommu->irq, iommu);
-		destroy_irq(iommu->irq);
-	}
-
 	kfree(iommu->domains);
 	kfree(iommu->domain_ids);
+	iommu->domains = NULL;
+	iommu->domain_ids = NULL;
 
 	g_iommus[iommu->seq_id] = NULL;
 
@@ -2245,8 +2217,6 @@
 	if (!si_domain)
 		return -EFAULT;
 
-	pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
-
 	for_each_active_iommu(iommu, drhd) {
 		ret = iommu_attach_domain(si_domain, iommu);
 		if (ret) {
@@ -2261,6 +2231,8 @@
 	}
 
 	si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+	pr_debug("IOMMU: identity mapping domain is domain %d\n",
+		 si_domain->id);
 
 	if (hw)
 		return 0;
@@ -2492,11 +2464,7 @@
 		goto error;
 	}
 
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
-
-		iommu = drhd->iommu;
+	for_each_active_iommu(iommu, drhd) {
 		g_iommus[iommu->seq_id] = iommu;
 
 		ret = iommu_init_domains(iommu);
@@ -2520,12 +2488,7 @@
 	/*
 	 * Start from the sane iommu hardware state.
 	 */
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
-
-		iommu = drhd->iommu;
-
+	for_each_active_iommu(iommu, drhd) {
 		/*
 		 * If the queued invalidation is already initialized by us
 		 * (for example, while enabling interrupt-remapping) then
@@ -2545,12 +2508,7 @@
 		dmar_disable_qi(iommu);
 	}
 
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
-
-		iommu = drhd->iommu;
-
+	for_each_active_iommu(iommu, drhd) {
 		if (dmar_enable_qi(iommu)) {
 			/*
 			 * Queued Invalidate not enabled, use Register Based
@@ -2633,17 +2591,16 @@
 	 *   global invalidate iotlb
 	 *   enable translation
 	 */
-	for_each_drhd_unit(drhd) {
+	for_each_iommu(iommu, drhd) {
 		if (drhd->ignored) {
 			/*
 			 * we always have to disable PMRs or DMA may fail on
 			 * this device
 			 */
 			if (force_on)
-				iommu_disable_protect_mem_regions(drhd->iommu);
+				iommu_disable_protect_mem_regions(iommu);
 			continue;
 		}
-		iommu = drhd->iommu;
 
 		iommu_flush_write_buffer(iommu);
 
@@ -2665,12 +2622,9 @@
 
 	return 0;
 error:
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
-		iommu = drhd->iommu;
-		free_iommu(iommu);
-	}
+	for_each_active_iommu(iommu, drhd)
+		free_dmar_iommu(iommu);
+	kfree(deferred_flush);
 	kfree(g_iommus);
 	return ret;
 }
@@ -2758,7 +2712,7 @@
 	struct pci_dev *pdev;
 	int found;
 
-	if (unlikely(dev->bus != &pci_bus_type))
+	if (unlikely(!dev_is_pci(dev)))
 		return 1;
 
 	pdev = to_pci_dev(dev);
@@ -3318,9 +3272,9 @@
 		}
 	}
 
-	for_each_drhd_unit(drhd) {
+	for_each_active_drhd_unit(drhd) {
 		int i;
-		if (drhd->ignored || drhd->include_all)
+		if (drhd->include_all)
 			continue;
 
 		for (i = 0; i < drhd->devices_cnt; i++)
@@ -3514,18 +3468,12 @@
 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
 {
 	struct acpi_dmar_reserved_memory *rmrr;
-	int ret;
 
 	rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
-	ret = dmar_parse_dev_scope((void *)(rmrr + 1),
-		((void *)rmrr) + rmrr->header.length,
-		&rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
-
-	if (ret || (rmrru->devices_cnt == 0)) {
-		list_del(&rmrru->list);
-		kfree(rmrru);
-	}
-	return ret;
+	return dmar_parse_dev_scope((void *)(rmrr + 1),
+				    ((void *)rmrr) + rmrr->header.length,
+				    &rmrru->devices_cnt, &rmrru->devices,
+				    rmrr->segment);
 }
 
 static LIST_HEAD(dmar_atsr_units);
@@ -3550,23 +3498,39 @@
 
 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
 {
-	int rc;
 	struct acpi_dmar_atsr *atsr;
 
 	if (atsru->include_all)
 		return 0;
 
 	atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
-	rc = dmar_parse_dev_scope((void *)(atsr + 1),
-				(void *)atsr + atsr->header.length,
-				&atsru->devices_cnt, &atsru->devices,
-				atsr->segment);
-	if (rc || !atsru->devices_cnt) {
-		list_del(&atsru->list);
-		kfree(atsru);
+	return dmar_parse_dev_scope((void *)(atsr + 1),
+				    (void *)atsr + atsr->header.length,
+				    &atsru->devices_cnt, &atsru->devices,
+				    atsr->segment);
+}
+
+static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
+{
+	dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
+	kfree(atsru);
+}
+
+static void intel_iommu_free_dmars(void)
+{
+	struct dmar_rmrr_unit *rmrru, *rmrr_n;
+	struct dmar_atsr_unit *atsru, *atsr_n;
+
+	list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
+		list_del(&rmrru->list);
+		dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
+		kfree(rmrru);
 	}
 
-	return rc;
+	list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
+		list_del(&atsru->list);
+		intel_iommu_free_atsr(atsru);
+	}
 }
 
 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
@@ -3610,17 +3574,17 @@
 
 int __init dmar_parse_rmrr_atsr_dev(void)
 {
-	struct dmar_rmrr_unit *rmrr, *rmrr_n;
-	struct dmar_atsr_unit *atsr, *atsr_n;
+	struct dmar_rmrr_unit *rmrr;
+	struct dmar_atsr_unit *atsr;
 	int ret = 0;
 
-	list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
+	list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
 		ret = rmrr_parse_dev(rmrr);
 		if (ret)
 			return ret;
 	}
 
-	list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+	list_for_each_entry(atsr, &dmar_atsr_units, list) {
 		ret = atsr_parse_dev(atsr);
 		if (ret)
 			return ret;
@@ -3667,8 +3631,9 @@
 
 int __init intel_iommu_init(void)
 {
-	int ret = 0;
+	int ret = -ENODEV;
 	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
 
 	/* VT-d is required for a TXT/tboot launch, so enforce that */
 	force_on = tboot_force_iommu();
@@ -3676,36 +3641,29 @@
 	if (dmar_table_init()) {
 		if (force_on)
 			panic("tboot: Failed to initialize DMAR table\n");
-		return 	-ENODEV;
+		goto out_free_dmar;
 	}
 
 	/*
 	 * Disable translation if already enabled prior to OS handover.
 	 */
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu;
-
-		if (drhd->ignored)
-			continue;
-
-		iommu = drhd->iommu;
+	for_each_active_iommu(iommu, drhd)
 		if (iommu->gcmd & DMA_GCMD_TE)
 			iommu_disable_translation(iommu);
-	}
 
 	if (dmar_dev_scope_init() < 0) {
 		if (force_on)
 			panic("tboot: Failed to initialize DMAR device scope\n");
-		return 	-ENODEV;
+		goto out_free_dmar;
 	}
 
 	if (no_iommu || dmar_disabled)
-		return -ENODEV;
+		goto out_free_dmar;
 
 	if (iommu_init_mempool()) {
 		if (force_on)
 			panic("tboot: Failed to initialize iommu memory\n");
-		return 	-ENODEV;
+		goto out_free_dmar;
 	}
 
 	if (list_empty(&dmar_rmrr_units))
@@ -3717,7 +3675,7 @@
 	if (dmar_init_reserved_ranges()) {
 		if (force_on)
 			panic("tboot: Failed to reserve iommu ranges\n");
-		return 	-ENODEV;
+		goto out_free_mempool;
 	}
 
 	init_no_remapping_devices();
@@ -3727,9 +3685,7 @@
 		if (force_on)
 			panic("tboot: Failed to initialize DMARs\n");
 		printk(KERN_ERR "IOMMU: dmar init failed\n");
-		put_iova_domain(&reserved_iova_list);
-		iommu_exit_mempool();
-		return ret;
+		goto out_free_reserved_range;
 	}
 	printk(KERN_INFO
 	"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
@@ -3749,6 +3705,14 @@
 	intel_iommu_enabled = 1;
 
 	return 0;
+
+out_free_reserved_range:
+	put_iova_domain(&reserved_iova_list);
+out_free_mempool:
+	iommu_exit_mempool();
+out_free_dmar:
+	intel_iommu_free_dmars();
+	return ret;
 }
 
 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
@@ -3877,7 +3841,7 @@
 }
 
 /* domain id for virtual machine, it won't be set in context */
-static unsigned long vm_domid;
+static atomic_t vm_domid = ATOMIC_INIT(0);
 
 static struct dmar_domain *iommu_alloc_vm_domain(void)
 {
@@ -3887,7 +3851,7 @@
 	if (!domain)
 		return NULL;
 
-	domain->id = vm_domid++;
+	domain->id = atomic_inc_return(&vm_domid);
 	domain->nid = -1;
 	memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
 	domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
@@ -3934,11 +3898,7 @@
 	unsigned long i;
 	unsigned long ndomains;
 
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
-		iommu = drhd->iommu;
-
+	for_each_active_iommu(iommu, drhd) {
 		ndomains = cap_ndoms(iommu->cap);
 		for_each_set_bit(i, iommu->domain_ids, ndomains) {
 			if (iommu->domains[i] == domain) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 0cb7528..ef5f65d 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -40,13 +40,15 @@
 
 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
 
+static int __init parse_ioapics_under_ir(void);
+
 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
 {
 	struct irq_cfg *cfg = irq_get_chip_data(irq);
 	return cfg ? &cfg->irq_2_iommu : NULL;
 }
 
-int get_irte(int irq, struct irte *entry)
+static int get_irte(int irq, struct irte *entry)
 {
 	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 	unsigned long flags;
@@ -69,19 +71,13 @@
 	struct ir_table *table = iommu->ir_table;
 	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 	struct irq_cfg *cfg = irq_get_chip_data(irq);
-	u16 index, start_index;
 	unsigned int mask = 0;
 	unsigned long flags;
-	int i;
+	int index;
 
 	if (!count || !irq_iommu)
 		return -1;
 
-	/*
-	 * start the IRTE search from index 0.
-	 */
-	index = start_index = 0;
-
 	if (count > 1) {
 		count = __roundup_pow_of_two(count);
 		mask = ilog2(count);
@@ -96,32 +92,17 @@
 	}
 
 	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
-	do {
-		for (i = index; i < index + count; i++)
-			if  (table->base[i].present)
-				break;
-		/* empty index found */
-		if (i == index + count)
-			break;
-
-		index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
-
-		if (index == start_index) {
-			raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-			printk(KERN_ERR "can't allocate an IRTE\n");
-			return -1;
-		}
-	} while (1);
-
-	for (i = index; i < index + count; i++)
-		table->base[i].present = 1;
-
-	cfg->remapped = 1;
-	irq_iommu->iommu = iommu;
-	irq_iommu->irte_index =  index;
-	irq_iommu->sub_handle = 0;
-	irq_iommu->irte_mask = mask;
-
+	index = bitmap_find_free_region(table->bitmap,
+					INTR_REMAP_TABLE_ENTRIES, mask);
+	if (index < 0) {
+		pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
+	} else {
+		cfg->remapped = 1;
+		irq_iommu->iommu = iommu;
+		irq_iommu->irte_index =  index;
+		irq_iommu->sub_handle = 0;
+		irq_iommu->irte_mask = mask;
+	}
 	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
 	return index;
@@ -254,6 +235,8 @@
 		set_64bit(&entry->low, 0);
 		set_64bit(&entry->high, 0);
 	}
+	bitmap_release_region(iommu->ir_table->bitmap, index,
+			      irq_iommu->irte_mask);
 
 	return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 }
@@ -336,7 +319,7 @@
 		return -1;
 	}
 
-	set_irte_sid(irte, 1, 0, sid);
+	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
 
 	return 0;
 }
@@ -453,6 +436,7 @@
 {
 	struct ir_table *ir_table;
 	struct page *pages;
+	unsigned long *bitmap;
 
 	ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
 					     GFP_ATOMIC);
@@ -464,13 +448,23 @@
 				 INTR_REMAP_PAGE_ORDER);
 
 	if (!pages) {
-		printk(KERN_ERR "failed to allocate pages of order %d\n",
-		       INTR_REMAP_PAGE_ORDER);
+		pr_err("IR%d: failed to allocate pages of order %d\n",
+		       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
 		kfree(iommu->ir_table);
 		return -ENOMEM;
 	}
 
+	bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
+			 sizeof(long), GFP_ATOMIC);
+	if (bitmap == NULL) {
+		pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
+		__free_pages(pages, INTR_REMAP_PAGE_ORDER);
+		kfree(ir_table);
+		return -ENOMEM;
+	}
+
 	ir_table->base = page_address(pages);
+	ir_table->bitmap = bitmap;
 
 	iommu_set_irq_remapping(iommu, mode);
 	return 0;
@@ -521,6 +515,7 @@
 static int __init intel_irq_remapping_supported(void)
 {
 	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
 
 	if (disable_irq_remap)
 		return 0;
@@ -539,12 +534,9 @@
 	if (!dmar_ir_support())
 		return 0;
 
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu = drhd->iommu;
-
+	for_each_iommu(iommu, drhd)
 		if (!ecap_ir_support(iommu->ecap))
 			return 0;
-	}
 
 	return 1;
 }
@@ -552,6 +544,7 @@
 static int __init intel_enable_irq_remapping(void)
 {
 	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
 	bool x2apic_present;
 	int setup = 0;
 	int eim = 0;
@@ -564,6 +557,8 @@
 	}
 
 	if (x2apic_present) {
+		pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
+
 		eim = !dmar_x2apic_optout();
 		if (!eim)
 			printk(KERN_WARNING
@@ -572,9 +567,7 @@
 				"Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
 	}
 
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu = drhd->iommu;
-
+	for_each_iommu(iommu, drhd) {
 		/*
 		 * If the queued invalidation is already initialized,
 		 * shouldn't disable it.
@@ -599,9 +592,7 @@
 	/*
 	 * check for the Interrupt-remapping support
 	 */
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu = drhd->iommu;
-
+	for_each_iommu(iommu, drhd) {
 		if (!ecap_ir_support(iommu->ecap))
 			continue;
 
@@ -615,10 +606,8 @@
 	/*
 	 * Enable queued invalidation for all the DRHD's.
 	 */
-	for_each_drhd_unit(drhd) {
-		int ret;
-		struct intel_iommu *iommu = drhd->iommu;
-		ret = dmar_enable_qi(iommu);
+	for_each_iommu(iommu, drhd) {
+		int ret = dmar_enable_qi(iommu);
 
 		if (ret) {
 			printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
@@ -631,9 +620,7 @@
 	/*
 	 * Setup Interrupt-remapping for all the DRHD's now.
 	 */
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu = drhd->iommu;
-
+	for_each_iommu(iommu, drhd) {
 		if (!ecap_ir_support(iommu->ecap))
 			continue;
 
@@ -774,22 +761,20 @@
  * Finds the assocaition between IOAPIC's and its Interrupt-remapping
  * hardware unit.
  */
-int __init parse_ioapics_under_ir(void)
+static int __init parse_ioapics_under_ir(void)
 {
 	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
 	int ir_supported = 0;
 	int ioapic_idx;
 
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu = drhd->iommu;
-
+	for_each_iommu(iommu, drhd)
 		if (ecap_ir_support(iommu->ecap)) {
 			if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
 				return -1;
 
 			ir_supported = 1;
 		}
-	}
 
 	if (!ir_supported)
 		return 0;
@@ -807,7 +792,7 @@
 	return 1;
 }
 
-int __init ir_dev_scope_init(void)
+static int __init ir_dev_scope_init(void)
 {
 	if (!irq_remapping_enabled)
 		return 0;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 39f81ae..228632c9 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -150,7 +150,7 @@
 		return do_setup_msix_irqs(dev, nvec);
 }
 
-void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
+static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
 {
 	/*
 	 * Intr-remapping uses pin number as the virtual vector
@@ -295,8 +295,8 @@
 					     vector, attr);
 }
 
-int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
-			      bool force)
+static int set_remapped_irq_affinity(struct irq_data *data,
+				     const struct cpumask *mask, bool force)
 {
 	if (!config_enabled(CONFIG_SMP) || !remap_ops ||
 	    !remap_ops->set_affinity)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index ee249bc..e550ccb 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -20,6 +20,7 @@
 #include <linux/export.h>
 #include <linux/limits.h>
 #include <linux/of.h>
+#include <linux/of_iommu.h>
 
 /**
  * of_get_dma_window - Parse *dma-window property and returns 0 if found.
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index d572863..7a3b928 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -380,14 +380,13 @@
 		kmem_cache_destroy(l1cache);
 		return -ENOMEM;
 	}
-	archdata = kmalloc(sizeof(*archdata), GFP_KERNEL);
+	archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
 	if (!archdata) {
 		kmem_cache_destroy(l1cache);
 		kmem_cache_destroy(l2cache);
 		return -ENOMEM;
 	}
 	spin_lock_init(&archdata->attach_lock);
-	archdata->attached = NULL;
 	archdata->ipmmu = ipmmu;
 	ipmmu_archdata = archdata;
 	bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c
index 8321f89..e3bc2e1 100644
--- a/drivers/iommu/shmobile-ipmmu.c
+++ b/drivers/iommu/shmobile-ipmmu.c
@@ -35,12 +35,12 @@
 	if (!ipmmu)
 		return;
 
-	mutex_lock(&ipmmu->flush_lock);
+	spin_lock(&ipmmu->flush_lock);
 	if (ipmmu->tlb_enabled)
 		ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
 	else
 		ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
-	mutex_unlock(&ipmmu->flush_lock);
+	spin_unlock(&ipmmu->flush_lock);
 }
 
 void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
@@ -49,7 +49,7 @@
 	if (!ipmmu)
 		return;
 
-	mutex_lock(&ipmmu->flush_lock);
+	spin_lock(&ipmmu->flush_lock);
 	switch (size) {
 	default:
 		ipmmu->tlb_enabled = 0;
@@ -85,7 +85,7 @@
 	}
 	ipmmu_reg_write(ipmmu, IMTTBR, phys);
 	ipmmu_reg_write(ipmmu, IMASID, asid);
-	mutex_unlock(&ipmmu->flush_lock);
+	spin_unlock(&ipmmu->flush_lock);
 }
 
 static int ipmmu_probe(struct platform_device *pdev)
@@ -104,7 +104,7 @@
 		dev_err(&pdev->dev, "cannot allocate device data\n");
 		return -ENOMEM;
 	}
-	mutex_init(&ipmmu->flush_lock);
+	spin_lock_init(&ipmmu->flush_lock);
 	ipmmu->dev = &pdev->dev;
 	ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
 						resource_size(res));
diff --git a/drivers/iommu/shmobile-ipmmu.h b/drivers/iommu/shmobile-ipmmu.h
index 4d53684..9524743 100644
--- a/drivers/iommu/shmobile-ipmmu.h
+++ b/drivers/iommu/shmobile-ipmmu.h
@@ -14,7 +14,7 @@
 	struct device *dev;
 	void __iomem *ipmmu_base;
 	int tlb_enabled;
-	struct mutex flush_lock;
+	spinlock_t flush_lock;
 	const char * const *dev_names;
 	unsigned int num_dev_names;
 };
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 86b484c..5194afb 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -21,6 +21,7 @@
 obj-$(CONFIG_RENESAS_INTC_IRQPIN)	+= irq-renesas-intc-irqpin.o
 obj-$(CONFIG_RENESAS_IRQC)		+= irq-renesas-irqc.o
 obj-$(CONFIG_VERSATILE_FPGA_IRQ)	+= irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_NSPIRE)		+= irq-zevio.o
 obj-$(CONFIG_ARCH_VT8500)		+= irq-vt8500.o
 obj-$(CONFIG_TB10X_IRQC)		+= irq-tb10x.o
 obj-$(CONFIG_XTENSA)			+= irq-xtensa-pic.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 433cc85..5409564 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -59,8 +59,6 @@
 #define PCI_MSI_DOORBELL_END                    (32)
 #define PCI_MSI_DOORBELL_MASK                   0xFFFF0000
 
-static DEFINE_RAW_SPINLOCK(irq_controller_lock);
-
 static void __iomem *per_cpu_int_base;
 static void __iomem *main_int_base;
 static struct irq_domain *armada_370_xp_mpic_domain;
@@ -239,6 +237,8 @@
 #endif
 
 #ifdef CONFIG_SMP
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
 static int armada_xp_set_affinity(struct irq_data *d,
 				  const struct cpumask *mask_val, bool force)
 {
@@ -381,7 +381,7 @@
 						ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
 				& PCI_MSI_DOORBELL_MASK;
 
-			writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base +
+			writel(~msimask, per_cpu_int_base +
 			       ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
 
 			for (msinr = PCI_MSI_DOORBELL_START;
@@ -407,7 +407,7 @@
 						ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
 				& IPI_DOORBELL_MASK;
 
-			writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
+			writel(~ipimask, per_cpu_int_base +
 				ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
 
 			/* Handle all pending doorbells */
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
new file mode 100644
index 0000000..8ed04c4
--- /dev/null
+++ b/drivers/irqchip/irq-zevio.c
@@ -0,0 +1,127 @@
+/*
+ *  linux/drivers/irqchip/irq-zevio.c
+ *
+ *  Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/irq.h>
+#include <asm/exception.h>
+
+#include "irqchip.h"
+
+#define IO_STATUS	0x000
+#define IO_RAW_STATUS	0x004
+#define IO_ENABLE	0x008
+#define IO_DISABLE	0x00C
+#define IO_CURRENT	0x020
+#define IO_RESET	0x028
+#define IO_MAX_PRIOTY	0x02C
+
+#define IO_IRQ_BASE	0x000
+#define IO_FIQ_BASE	0x100
+
+#define IO_INVERT_SEL	0x200
+#define IO_STICKY_SEL	0x204
+#define IO_PRIORITY_SEL	0x300
+
+#define MAX_INTRS	32
+#define FIQ_START	MAX_INTRS
+
+static struct irq_domain *zevio_irq_domain;
+static void __iomem *zevio_irq_io;
+
+static void zevio_irq_ack(struct irq_data *irqd)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
+	struct irq_chip_regs *regs =
+		&container_of(irqd->chip, struct irq_chip_type, chip)->regs;
+
+	readl(gc->reg_base + regs->ack);
+}
+
+static asmlinkage void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
+{
+	int irqnr;
+
+	while (readl(zevio_irq_io + IO_STATUS)) {
+		irqnr = readl(zevio_irq_io + IO_CURRENT);
+		irqnr = irq_find_mapping(zevio_irq_domain, irqnr);
+		handle_IRQ(irqnr, regs);
+	};
+}
+
+static void __init zevio_init_irq_base(void __iomem *base)
+{
+	/* Disable all interrupts */
+	writel(~0, base + IO_DISABLE);
+
+	/* Accept interrupts of all priorities */
+	writel(0xF, base + IO_MAX_PRIOTY);
+
+	/* Reset existing interrupts */
+	readl(base + IO_RESET);
+}
+
+static int __init zevio_of_init(struct device_node *node,
+				struct device_node *parent)
+{
+	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+	struct irq_chip_generic *gc;
+	int ret;
+
+	if (WARN_ON(zevio_irq_io || zevio_irq_domain))
+		return -EBUSY;
+
+	zevio_irq_io = of_iomap(node, 0);
+	BUG_ON(!zevio_irq_io);
+
+	/* Do not invert interrupt status bits */
+	writel(~0, zevio_irq_io + IO_INVERT_SEL);
+
+	/* Disable sticky interrupts */
+	writel(0, zevio_irq_io + IO_STICKY_SEL);
+
+	/* We don't use IRQ priorities. Set each IRQ to highest priority. */
+	memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
+
+	/* Init IRQ and FIQ */
+	zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
+	zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
+
+	zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
+						 &irq_generic_chip_ops, NULL);
+	BUG_ON(!zevio_irq_domain);
+
+	ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
+					     "zevio_intc", handle_level_irq,
+					     clr, 0, IRQ_GC_INIT_MASK_CACHE);
+	BUG_ON(ret);
+
+	gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
+	gc->reg_base				= zevio_irq_io;
+	gc->chip_types[0].chip.irq_ack		= zevio_irq_ack;
+	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_disable_reg;
+	gc->chip_types[0].chip.irq_unmask	= irq_gc_unmask_enable_reg;
+	gc->chip_types[0].regs.mask		= IO_IRQ_BASE + IO_ENABLE;
+	gc->chip_types[0].regs.enable		= IO_IRQ_BASE + IO_ENABLE;
+	gc->chip_types[0].regs.disable		= IO_IRQ_BASE + IO_DISABLE;
+	gc->chip_types[0].regs.ack		= IO_IRQ_BASE + IO_RESET;
+
+	set_handle_irq(zevio_handle_irq);
+
+	pr_info("TI-NSPIRE classic IRQ controller\n");
+	return 0;
+}
+
+IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index af1b020..b420f8b 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -810,7 +810,7 @@
 	dp += sprintf(dp, "    octet 3  ");
 	dp += prbits(dp, *p, 8, 8);
 	*dp++ = '\n';
-	if (!(*p++ & 80)) {
+	if (!(*p++ & 0x80)) {
 		dp += sprintf(dp, "    octet 4  ");
 		dp += prbits(dp, *p++, 8, 8);
 		*dp++ = '\n';
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
index 0e9c825..c488b84 100644
--- a/drivers/md/bcache/Makefile
+++ b/drivers/md/bcache/Makefile
@@ -1,7 +1,8 @@
 
 obj-$(CONFIG_BCACHE)	+= bcache.o
 
-bcache-y		:= alloc.o btree.o bset.o io.o journal.o writeback.o\
-	movinggc.o request.o super.o sysfs.o debug.o util.o trace.o stats.o closure.o
+bcache-y		:= alloc.o bset.o btree.o closure.o debug.o extents.o\
+	io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
+	util.o writeback.o
 
 CFLAGS_request.o	+= -Iblock
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 4c9852d..c0d37d0 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -132,10 +132,16 @@
 {
 	BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
 
-	if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] &&
-	    CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO)
-		return false;
+	if (CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) {
+		unsigned i;
 
+		for (i = 0; i < RESERVE_NONE; i++)
+			if (!fifo_full(&ca->free[i]))
+				goto add;
+
+		return false;
+	}
+add:
 	b->prio = 0;
 
 	if (can_inc_bucket_gen(b) &&
@@ -162,8 +168,21 @@
 	fifo_push(&ca->free_inc, b - ca->buckets);
 }
 
-#define bucket_prio(b)				\
-	(((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
+/*
+ * Determines what order we're going to reuse buckets, smallest bucket_prio()
+ * first: we also take into account the number of sectors of live data in that
+ * bucket, and in order for that multiply to make sense we have to scale bucket
+ *
+ * Thus, we scale the bucket priorities so that the bucket with the smallest
+ * prio is worth 1/8th of what INITIAL_PRIO is worth.
+ */
+
+#define bucket_prio(b)							\
+({									\
+	unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;	\
+									\
+	(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);	\
+})
 
 #define bucket_max_cmp(l, r)	(bucket_prio(l) < bucket_prio(r))
 #define bucket_min_cmp(l, r)	(bucket_prio(l) > bucket_prio(r))
@@ -304,6 +323,21 @@
 	__set_current_state(TASK_RUNNING);				\
 } while (0)
 
+static int bch_allocator_push(struct cache *ca, long bucket)
+{
+	unsigned i;
+
+	/* Prios/gens are actually the most important reserve */
+	if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
+		return true;
+
+	for (i = 0; i < RESERVE_NR; i++)
+		if (fifo_push(&ca->free[i], bucket))
+			return true;
+
+	return false;
+}
+
 static int bch_allocator_thread(void *arg)
 {
 	struct cache *ca = arg;
@@ -336,9 +370,7 @@
 				mutex_lock(&ca->set->bucket_lock);
 			}
 
-			allocator_wait(ca, !fifo_full(&ca->free));
-
-			fifo_push(&ca->free, bucket);
+			allocator_wait(ca, bch_allocator_push(ca, bucket));
 			wake_up(&ca->set->bucket_wait);
 		}
 
@@ -365,34 +397,29 @@
 	}
 }
 
-long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
+long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
 {
 	DEFINE_WAIT(w);
 	struct bucket *b;
 	long r;
 
 	/* fastpath */
-	if (fifo_used(&ca->free) > ca->watermark[watermark]) {
-		fifo_pop(&ca->free, r);
+	if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
+	    fifo_pop(&ca->free[reserve], r))
 		goto out;
-	}
 
 	if (!wait)
 		return -1;
 
-	while (1) {
-		if (fifo_used(&ca->free) > ca->watermark[watermark]) {
-			fifo_pop(&ca->free, r);
-			break;
-		}
-
+	do {
 		prepare_to_wait(&ca->set->bucket_wait, &w,
 				TASK_UNINTERRUPTIBLE);
 
 		mutex_unlock(&ca->set->bucket_lock);
 		schedule();
 		mutex_lock(&ca->set->bucket_lock);
-	}
+	} while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
+		 !fifo_pop(&ca->free[reserve], r));
 
 	finish_wait(&ca->set->bucket_wait, &w);
 out:
@@ -401,12 +428,14 @@
 	if (expensive_debug_checks(ca->set)) {
 		size_t iter;
 		long i;
+		unsigned j;
 
 		for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
 			BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
 
-		fifo_for_each(i, &ca->free, iter)
-			BUG_ON(i == r);
+		for (j = 0; j < RESERVE_NR; j++)
+			fifo_for_each(i, &ca->free[j], iter)
+				BUG_ON(i == r);
 		fifo_for_each(i, &ca->free_inc, iter)
 			BUG_ON(i == r);
 		fifo_for_each(i, &ca->unused, iter)
@@ -419,7 +448,7 @@
 
 	SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
 
-	if (watermark <= WATERMARK_METADATA) {
+	if (reserve <= RESERVE_PRIO) {
 		SET_GC_MARK(b, GC_MARK_METADATA);
 		SET_GC_MOVE(b, 0);
 		b->prio = BTREE_PRIO;
@@ -445,7 +474,7 @@
 	}
 }
 
-int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
 			   struct bkey *k, int n, bool wait)
 {
 	int i;
@@ -459,7 +488,7 @@
 
 	for (i = 0; i < n; i++) {
 		struct cache *ca = c->cache_by_alloc[i];
-		long b = bch_bucket_alloc(ca, watermark, wait);
+		long b = bch_bucket_alloc(ca, reserve, wait);
 
 		if (b == -1)
 			goto err;
@@ -478,12 +507,12 @@
 	return -1;
 }
 
-int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
 			 struct bkey *k, int n, bool wait)
 {
 	int ret;
 	mutex_lock(&c->bucket_lock);
-	ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
+	ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
 	mutex_unlock(&c->bucket_lock);
 	return ret;
 }
@@ -573,8 +602,8 @@
 
 	while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
 		unsigned watermark = write_prio
-			? WATERMARK_MOVINGGC
-			: WATERMARK_NONE;
+			? RESERVE_MOVINGGC
+			: RESERVE_NONE;
 
 		spin_unlock(&c->data_bucket_lock);
 
@@ -689,7 +718,7 @@
 	 * Then 8 for btree allocations
 	 * Then half for the moving garbage collector
 	 */
-
+#if 0
 	ca->watermark[WATERMARK_PRIO] = 0;
 
 	ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
@@ -699,6 +728,6 @@
 
 	ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
 		ca->watermark[WATERMARK_MOVINGGC];
-
+#endif
 	return 0;
 }
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 754f4317..a4c7306 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -187,6 +187,7 @@
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
+#include "bset.h"
 #include "util.h"
 #include "closure.h"
 
@@ -209,7 +210,9 @@
 #define GC_MARK_RECLAIMABLE	0
 #define GC_MARK_DIRTY		1
 #define GC_MARK_METADATA	2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
+#define GC_SECTORS_USED_SIZE	13
+#define MAX_GC_SECTORS_USED	(~(~0ULL << GC_SECTORS_USED_SIZE))
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
 
 #include "journal.h"
@@ -280,7 +283,6 @@
 	unsigned long		sectors_dirty_last;
 	long			sectors_dirty_derivative;
 
-	mempool_t		*unaligned_bvec;
 	struct bio_set		*bio_split;
 
 	unsigned		data_csum:1;
@@ -310,7 +312,8 @@
 	struct cache_sb		sb;
 	struct bio		sb_bio;
 	struct bio_vec		sb_bv[1];
-	struct closure_with_waitlist sb_write;
+	struct closure		sb_write;
+	struct semaphore	sb_write_mutex;
 
 	/* Refcount on the cache set. Always nonzero when we're caching. */
 	atomic_t		count;
@@ -383,12 +386,12 @@
 	unsigned		writeback_rate_p_term_inverse;
 };
 
-enum alloc_watermarks {
-	WATERMARK_PRIO,
-	WATERMARK_METADATA,
-	WATERMARK_MOVINGGC,
-	WATERMARK_NONE,
-	WATERMARK_MAX
+enum alloc_reserve {
+	RESERVE_BTREE,
+	RESERVE_PRIO,
+	RESERVE_MOVINGGC,
+	RESERVE_NONE,
+	RESERVE_NR,
 };
 
 struct cache {
@@ -400,8 +403,6 @@
 	struct kobject		kobj;
 	struct block_device	*bdev;
 
-	unsigned		watermark[WATERMARK_MAX];
-
 	struct task_struct	*alloc_thread;
 
 	struct closure		prio;
@@ -430,7 +431,7 @@
 	 * because all the data they contained was overwritten), so we only
 	 * need to discard them before they can be moved to the free list.
 	 */
-	DECLARE_FIFO(long, free);
+	DECLARE_FIFO(long, free)[RESERVE_NR];
 	DECLARE_FIFO(long, free_inc);
 	DECLARE_FIFO(long, unused);
 
@@ -515,7 +516,8 @@
 	uint64_t		cached_dev_sectors;
 	struct closure		caching;
 
-	struct closure_with_waitlist sb_write;
+	struct closure		sb_write;
+	struct semaphore	sb_write_mutex;
 
 	mempool_t		*search;
 	mempool_t		*bio_meta;
@@ -630,13 +632,15 @@
 
 #ifdef CONFIG_BCACHE_DEBUG
 	struct btree		*verify_data;
+	struct bset		*verify_ondisk;
 	struct mutex		verify_lock;
 #endif
 
 	unsigned		nr_uuids;
 	struct uuid_entry	*uuids;
 	BKEY_PADDED(uuid_bucket);
-	struct closure_with_waitlist uuid_write;
+	struct closure		uuid_write;
+	struct semaphore	uuid_write_mutex;
 
 	/*
 	 * A btree node on disk could have too many bsets for an iterator to fit
@@ -644,13 +648,7 @@
 	 */
 	mempool_t		*fill_iter;
 
-	/*
-	 * btree_sort() is a merge sort and requires temporary space - single
-	 * element mempool
-	 */
-	struct mutex		sort_lock;
-	struct bset		*sort;
-	unsigned		sort_crit_factor;
+	struct bset_sort_state	sort;
 
 	/* List of buckets we're currently writing data to */
 	struct list_head	data_buckets;
@@ -666,7 +664,6 @@
 	unsigned		congested_read_threshold_us;
 	unsigned		congested_write_threshold_us;
 
-	struct time_stats	sort_time;
 	struct time_stats	btree_gc_time;
 	struct time_stats	btree_split_time;
 	struct time_stats	btree_read_time;
@@ -684,9 +681,9 @@
 	unsigned		error_decay;
 
 	unsigned short		journal_delay_ms;
+	bool			expensive_debug_checks;
 	unsigned		verify:1;
 	unsigned		key_merging_disabled:1;
-	unsigned		expensive_debug_checks:1;
 	unsigned		gc_always_rewrite:1;
 	unsigned		shrinker_disabled:1;
 	unsigned		copy_gc_enabled:1;
@@ -708,13 +705,8 @@
 	struct bio		bio;
 };
 
-static inline unsigned local_clock_us(void)
-{
-	return local_clock() >> 10;
-}
-
 #define BTREE_PRIO		USHRT_MAX
-#define INITIAL_PRIO		32768
+#define INITIAL_PRIO		32768U
 
 #define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
 #define btree_blocks(b)							\
@@ -727,21 +719,6 @@
 #define bucket_bytes(c)		((c)->sb.bucket_size << 9)
 #define block_bytes(c)		((c)->sb.block_size << 9)
 
-#define __set_bytes(i, k)	(sizeof(*(i)) + (k) * sizeof(uint64_t))
-#define set_bytes(i)		__set_bytes(i, i->keys)
-
-#define __set_blocks(i, k, c)	DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
-#define set_blocks(i, c)	__set_blocks(i, (i)->keys, c)
-
-#define node(i, j)		((struct bkey *) ((i)->d + (j)))
-#define end(i)			node(i, (i)->keys)
-
-#define index(i, b)							\
-	((size_t) (((void *) i - (void *) (b)->sets[0].data) /		\
-		   block_bytes(b->c)))
-
-#define btree_data_space(b)	(PAGE_SIZE << (b)->page_order)
-
 #define prios_per_bucket(c)				\
 	((bucket_bytes(c) - sizeof(struct prio_set)) /	\
 	 sizeof(struct bucket_disk))
@@ -784,20 +761,34 @@
 	return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
 }
 
-/* Btree key macros */
-
-static inline void bkey_init(struct bkey *k)
+static inline uint8_t gen_after(uint8_t a, uint8_t b)
 {
-	*k = ZERO_KEY;
+	uint8_t r = a - b;
+	return r > 128U ? 0 : r;
 }
 
+static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
+				unsigned i)
+{
+	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
+}
+
+static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
+				 unsigned i)
+{
+	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
+}
+
+/* Btree key macros */
+
 /*
  * This is used for various on disk data structures - cache_sb, prio_set, bset,
  * jset: The checksum is _always_ the first 8 bytes of these structs
  */
 #define csum_set(i)							\
 	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\
-	      ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
+		  ((void *) bset_bkey_last(i)) -			\
+		  (((void *) (i)) + sizeof(uint64_t)))
 
 /* Error handling macros */
 
@@ -902,7 +893,6 @@
 void bch_bbio_free(struct bio *, struct cache_set *);
 struct bio *bch_bbio_alloc(struct cache_set *);
 
-struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
 void bch_generic_make_request(struct bio *, struct bio_split_pool *);
 void __bch_submit_bbio(struct bio *, struct cache_set *);
 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 7d388b8..3f74b4b 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -5,30 +5,134 @@
  * Copyright 2012 Google, Inc.
  */
 
-#include "bcache.h"
-#include "btree.h"
-#include "debug.h"
+#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
 
+#include "util.h"
+#include "bset.h"
+
+#include <linux/console.h>
 #include <linux/random.h>
 #include <linux/prefetch.h>
 
+#ifdef CONFIG_BCACHE_DEBUG
+
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
+{
+	struct bkey *k, *next;
+
+	for (k = i->start; k < bset_bkey_last(i); k = next) {
+		next = bkey_next(k);
+
+		printk(KERN_ERR "block %u key %li/%u: ", set,
+		       (uint64_t *) k - i->d, i->keys);
+
+		if (b->ops->key_dump)
+			b->ops->key_dump(b, k);
+		else
+			printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+
+		if (next < bset_bkey_last(i) &&
+		    bkey_cmp(k, b->ops->is_extents ?
+			     &START_KEY(next) : next) > 0)
+			printk(KERN_ERR "Key skipped backwards\n");
+	}
+}
+
+void bch_dump_bucket(struct btree_keys *b)
+{
+	unsigned i;
+
+	console_lock();
+	for (i = 0; i <= b->nsets; i++)
+		bch_dump_bset(b, b->set[i].data,
+			      bset_sector_offset(b, b->set[i].data));
+	console_unlock();
+}
+
+int __bch_count_data(struct btree_keys *b)
+{
+	unsigned ret = 0;
+	struct btree_iter iter;
+	struct bkey *k;
+
+	if (b->ops->is_extents)
+		for_each_key(b, k, &iter)
+			ret += KEY_SIZE(k);
+	return ret;
+}
+
+void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+{
+	va_list args;
+	struct bkey *k, *p = NULL;
+	struct btree_iter iter;
+	const char *err;
+
+	for_each_key(b, k, &iter) {
+		if (b->ops->is_extents) {
+			err = "Keys out of order";
+			if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
+				goto bug;
+
+			if (bch_ptr_invalid(b, k))
+				continue;
+
+			err =  "Overlapping keys";
+			if (p && bkey_cmp(p, &START_KEY(k)) > 0)
+				goto bug;
+		} else {
+			if (bch_ptr_bad(b, k))
+				continue;
+
+			err = "Duplicate keys";
+			if (p && !bkey_cmp(p, k))
+				goto bug;
+		}
+		p = k;
+	}
+#if 0
+	err = "Key larger than btree node key";
+	if (p && bkey_cmp(p, &b->key) > 0)
+		goto bug;
+#endif
+	return;
+bug:
+	bch_dump_bucket(b);
+
+	va_start(args, fmt);
+	vprintk(fmt, args);
+	va_end(args);
+
+	panic("bch_check_keys error:  %s:\n", err);
+}
+
+static void bch_btree_iter_next_check(struct btree_iter *iter)
+{
+	struct bkey *k = iter->data->k, *next = bkey_next(k);
+
+	if (next < iter->data->end &&
+	    bkey_cmp(k, iter->b->ops->is_extents ?
+		     &START_KEY(next) : next) > 0) {
+		bch_dump_bucket(iter->b);
+		panic("Key skipped backwards\n");
+	}
+}
+
+#else
+
+static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
+
+#endif
+
 /* Keylists */
 
-int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
+int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
 {
 	size_t oldsize = bch_keylist_nkeys(l);
-	size_t newsize = oldsize + 2 + nptrs;
+	size_t newsize = oldsize + u64s;
 	uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
 	uint64_t *new_keys;
 
-	/* The journalling code doesn't handle the case where the keys to insert
-	 * is bigger than an empty write: If we just return -ENOMEM here,
-	 * bio_insert() and bio_invalidate() will insert the keys created so far
-	 * and finish the rest when the keylist is empty.
-	 */
-	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
-		return -ENOMEM;
-
 	newsize = roundup_pow_of_two(newsize);
 
 	if (newsize <= KEYLIST_INLINE ||
@@ -71,136 +175,6 @@
 		bch_keylist_bytes(l));
 }
 
-/* Pointer validation */
-
-static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
-	unsigned i;
-
-	for (i = 0; i < KEY_PTRS(k); i++)
-		if (ptr_available(c, k, i)) {
-			struct cache *ca = PTR_CACHE(c, k, i);
-			size_t bucket = PTR_BUCKET_NR(c, k, i);
-			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-			if (KEY_SIZE(k) + r > c->sb.bucket_size ||
-			    bucket <  ca->sb.first_bucket ||
-			    bucket >= ca->sb.nbuckets)
-				return true;
-		}
-
-	return false;
-}
-
-bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
-	char buf[80];
-
-	if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
-		goto bad;
-
-	if (__ptr_invalid(c, k))
-		goto bad;
-
-	return false;
-bad:
-	bch_bkey_to_text(buf, sizeof(buf), k);
-	cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
-	return true;
-}
-
-bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
-	char buf[80];
-
-	if (!KEY_SIZE(k))
-		return true;
-
-	if (KEY_SIZE(k) > KEY_OFFSET(k))
-		goto bad;
-
-	if (__ptr_invalid(c, k))
-		goto bad;
-
-	return false;
-bad:
-	bch_bkey_to_text(buf, sizeof(buf), k);
-	cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
-	return true;
-}
-
-static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
-				     unsigned ptr)
-{
-	struct bucket *g = PTR_BUCKET(b->c, k, ptr);
-	char buf[80];
-
-	if (mutex_trylock(&b->c->bucket_lock)) {
-		if (b->level) {
-			if (KEY_DIRTY(k) ||
-			    g->prio != BTREE_PRIO ||
-			    (b->c->gc_mark_valid &&
-			     GC_MARK(g) != GC_MARK_METADATA))
-				goto err;
-
-		} else {
-			if (g->prio == BTREE_PRIO)
-				goto err;
-
-			if (KEY_DIRTY(k) &&
-			    b->c->gc_mark_valid &&
-			    GC_MARK(g) != GC_MARK_DIRTY)
-				goto err;
-		}
-		mutex_unlock(&b->c->bucket_lock);
-	}
-
-	return false;
-err:
-	mutex_unlock(&b->c->bucket_lock);
-	bch_bkey_to_text(buf, sizeof(buf), k);
-	btree_bug(b,
-"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
-		  buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
-		  g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
-	return true;
-}
-
-bool bch_ptr_bad(struct btree *b, const struct bkey *k)
-{
-	struct bucket *g;
-	unsigned i, stale;
-
-	if (!bkey_cmp(k, &ZERO_KEY) ||
-	    !KEY_PTRS(k) ||
-	    bch_ptr_invalid(b, k))
-		return true;
-
-	for (i = 0; i < KEY_PTRS(k); i++) {
-		if (!ptr_available(b->c, k, i))
-			return true;
-
-		g = PTR_BUCKET(b->c, k, i);
-		stale = ptr_stale(b->c, k, i);
-
-		btree_bug_on(stale > 96, b,
-			     "key too stale: %i, need_gc %u",
-			     stale, b->c->need_gc);
-
-		btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
-			     b, "stale dirty pointer");
-
-		if (stale)
-			return true;
-
-		if (expensive_debug_checks(b->c) &&
-		    ptr_bad_expensive_checks(b, k, i))
-			return true;
-	}
-
-	return false;
-}
-
 /* Key/pointer manipulation */
 
 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
@@ -255,56 +229,138 @@
 	return true;
 }
 
-static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+/* Auxiliary search trees */
+
+/* 32 bits total: */
+#define BKEY_MID_BITS		3
+#define BKEY_EXPONENT_BITS	7
+#define BKEY_MANTISSA_BITS	(32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
+#define BKEY_MANTISSA_MASK	((1 << BKEY_MANTISSA_BITS) - 1)
+
+struct bkey_float {
+	unsigned	exponent:BKEY_EXPONENT_BITS;
+	unsigned	m:BKEY_MID_BITS;
+	unsigned	mantissa:BKEY_MANTISSA_BITS;
+} __packed;
+
+/*
+ * BSET_CACHELINE was originally intended to match the hardware cacheline size -
+ * it used to be 64, but I realized the lookup code would touch slightly less
+ * memory if it was 128.
+ *
+ * It definites the number of bytes (in struct bset) per struct bkey_float in
+ * the auxiliar search tree - when we're done searching the bset_float tree we
+ * have this many bytes left that we do a linear search over.
+ *
+ * Since (after level 5) every level of the bset_tree is on a new cacheline,
+ * we're touching one fewer cacheline in the bset tree in exchange for one more
+ * cacheline in the linear search - but the linear search might stop before it
+ * gets to the second cacheline.
+ */
+
+#define BSET_CACHELINE		128
+
+/* Space required for the btree node keys */
+static inline size_t btree_keys_bytes(struct btree_keys *b)
 {
-	return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
-		~((uint64_t)1 << 63);
+	return PAGE_SIZE << b->page_order;
 }
 
-/* Tries to merge l and r: l should be lower than r
- * Returns true if we were able to merge. If we did merge, l will be the merged
- * key, r will be untouched.
- */
-bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
+static inline size_t btree_keys_cachelines(struct btree_keys *b)
+{
+	return btree_keys_bytes(b) / BSET_CACHELINE;
+}
+
+/* Space required for the auxiliary search trees */
+static inline size_t bset_tree_bytes(struct btree_keys *b)
+{
+	return btree_keys_cachelines(b) * sizeof(struct bkey_float);
+}
+
+/* Space required for the prev pointers */
+static inline size_t bset_prev_bytes(struct btree_keys *b)
+{
+	return btree_keys_cachelines(b) * sizeof(uint8_t);
+}
+
+/* Memory allocation */
+
+void bch_btree_keys_free(struct btree_keys *b)
+{
+	struct bset_tree *t = b->set;
+
+	if (bset_prev_bytes(b) < PAGE_SIZE)
+		kfree(t->prev);
+	else
+		free_pages((unsigned long) t->prev,
+			   get_order(bset_prev_bytes(b)));
+
+	if (bset_tree_bytes(b) < PAGE_SIZE)
+		kfree(t->tree);
+	else
+		free_pages((unsigned long) t->tree,
+			   get_order(bset_tree_bytes(b)));
+
+	free_pages((unsigned long) t->data, b->page_order);
+
+	t->prev = NULL;
+	t->tree = NULL;
+	t->data = NULL;
+}
+EXPORT_SYMBOL(bch_btree_keys_free);
+
+int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
+{
+	struct bset_tree *t = b->set;
+
+	BUG_ON(t->data);
+
+	b->page_order = page_order;
+
+	t->data = (void *) __get_free_pages(gfp, b->page_order);
+	if (!t->data)
+		goto err;
+
+	t->tree = bset_tree_bytes(b) < PAGE_SIZE
+		? kmalloc(bset_tree_bytes(b), gfp)
+		: (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
+	if (!t->tree)
+		goto err;
+
+	t->prev = bset_prev_bytes(b) < PAGE_SIZE
+		? kmalloc(bset_prev_bytes(b), gfp)
+		: (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
+	if (!t->prev)
+		goto err;
+
+	return 0;
+err:
+	bch_btree_keys_free(b);
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(bch_btree_keys_alloc);
+
+void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
+			 bool *expensive_debug_checks)
 {
 	unsigned i;
 
-	if (key_merging_disabled(b->c))
-		return false;
+	b->ops = ops;
+	b->expensive_debug_checks = expensive_debug_checks;
+	b->nsets = 0;
+	b->last_set_unwritten = 0;
 
-	if (KEY_PTRS(l) != KEY_PTRS(r) ||
-	    KEY_DIRTY(l) != KEY_DIRTY(r) ||
-	    bkey_cmp(l, &START_KEY(r)))
-		return false;
-
-	for (i = 0; i < KEY_PTRS(l); i++)
-		if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
-		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
-			return false;
-
-	/* Keys with no pointers aren't restricted to one bucket and could
-	 * overflow KEY_SIZE
+	/* XXX: shouldn't be needed */
+	for (i = 0; i < MAX_BSETS; i++)
+		b->set[i].size = 0;
+	/*
+	 * Second loop starts at 1 because b->keys[0]->data is the memory we
+	 * allocated
 	 */
-	if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
-		SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
-		SET_KEY_SIZE(l, USHRT_MAX);
-
-		bch_cut_front(l, r);
-		return false;
-	}
-
-	if (KEY_CSUM(l)) {
-		if (KEY_CSUM(r))
-			l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
-		else
-			SET_KEY_CSUM(l, 0);
-	}
-
-	SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
-	SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
-
-	return true;
+	for (i = 1; i < MAX_BSETS; i++)
+		b->set[i].data = NULL;
 }
+EXPORT_SYMBOL(bch_btree_keys_init);
 
 /* Binary tree stuff for auxiliary search trees */
 
@@ -455,9 +511,11 @@
 	return ((void *) k - (void *) t->data) / BSET_CACHELINE;
 }
 
-static unsigned bkey_to_cacheline_offset(struct bkey *k)
+static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
+					 unsigned cacheline,
+					 struct bkey *k)
 {
-	return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
+	return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
 }
 
 static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
@@ -504,7 +562,7 @@
 		: tree_to_prev_bkey(t, j >> ffs(j));
 
 	struct bkey *r = is_power_of_2(j + 1)
-		? node(t->data, t->data->keys - bkey_u64s(&t->end))
+		? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
 		: tree_to_bkey(t, j >> (ffz(j) + 1));
 
 	BUG_ON(m < l || m > r);
@@ -528,9 +586,9 @@
 		f->exponent = 127;
 }
 
-static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
+static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
 {
-	if (t != b->sets) {
+	if (t != b->set) {
 		unsigned j = roundup(t[-1].size,
 				     64 / sizeof(struct bkey_float));
 
@@ -538,33 +596,54 @@
 		t->prev = t[-1].prev + j;
 	}
 
-	while (t < b->sets + MAX_BSETS)
+	while (t < b->set + MAX_BSETS)
 		t++->size = 0;
 }
 
-static void bset_build_unwritten_tree(struct btree *b)
+static void bch_bset_build_unwritten_tree(struct btree_keys *b)
 {
-	struct bset_tree *t = b->sets + b->nsets;
+	struct bset_tree *t = bset_tree_last(b);
+
+	BUG_ON(b->last_set_unwritten);
+	b->last_set_unwritten = 1;
 
 	bset_alloc_tree(b, t);
 
-	if (t->tree != b->sets->tree + bset_tree_space(b)) {
-		t->prev[0] = bkey_to_cacheline_offset(t->data->start);
+	if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
+		t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
 		t->size = 1;
 	}
 }
 
-static void bset_build_written_tree(struct btree *b)
+void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
 {
-	struct bset_tree *t = b->sets + b->nsets;
-	struct bkey *k = t->data->start;
+	if (i != b->set->data) {
+		b->set[++b->nsets].data = i;
+		i->seq = b->set->data->seq;
+	} else
+		get_random_bytes(&i->seq, sizeof(uint64_t));
+
+	i->magic	= magic;
+	i->version	= 0;
+	i->keys		= 0;
+
+	bch_bset_build_unwritten_tree(b);
+}
+EXPORT_SYMBOL(bch_bset_init_next);
+
+void bch_bset_build_written_tree(struct btree_keys *b)
+{
+	struct bset_tree *t = bset_tree_last(b);
+	struct bkey *prev = NULL, *k = t->data->start;
 	unsigned j, cacheline = 1;
 
+	b->last_set_unwritten = 0;
+
 	bset_alloc_tree(b, t);
 
 	t->size = min_t(unsigned,
-			bkey_to_cacheline(t, end(t->data)),
-			b->sets->tree + bset_tree_space(b) - t->tree);
+			bkey_to_cacheline(t, bset_bkey_last(t->data)),
+			b->set->tree + btree_keys_cachelines(b) - t->tree);
 
 	if (t->size < 2) {
 		t->size = 0;
@@ -577,16 +656,14 @@
 	for (j = inorder_next(0, t->size);
 	     j;
 	     j = inorder_next(j, t->size)) {
-		while (bkey_to_cacheline(t, k) != cacheline)
-			k = bkey_next(k);
+		while (bkey_to_cacheline(t, k) < cacheline)
+			prev = k, k = bkey_next(k);
 
-		t->prev[j] = bkey_u64s(k);
-		k = bkey_next(k);
-		cacheline++;
-		t->tree[j].m = bkey_to_cacheline_offset(k);
+		t->prev[j] = bkey_u64s(prev);
+		t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
 	}
 
-	while (bkey_next(k) != end(t->data))
+	while (bkey_next(k) != bset_bkey_last(t->data))
 		k = bkey_next(k);
 
 	t->end = *k;
@@ -597,14 +674,17 @@
 	     j = inorder_next(j, t->size))
 		make_bfloat(t, j);
 }
+EXPORT_SYMBOL(bch_bset_build_written_tree);
 
-void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
+/* Insert */
+
+void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
 {
 	struct bset_tree *t;
 	unsigned inorder, j = 1;
 
-	for (t = b->sets; t <= &b->sets[b->nsets]; t++)
-		if (k < end(t->data))
+	for (t = b->set; t <= bset_tree_last(b); t++)
+		if (k < bset_bkey_last(t->data))
 			goto found_set;
 
 	BUG();
@@ -617,7 +697,7 @@
 	if (k == t->data->start)
 		goto fix_left;
 
-	if (bkey_next(k) == end(t->data)) {
+	if (bkey_next(k) == bset_bkey_last(t->data)) {
 		t->end = *k;
 		goto fix_right;
 	}
@@ -642,10 +722,12 @@
 			j = j * 2 + 1;
 		} while (j < t->size);
 }
+EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
 
-void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
+static void bch_bset_fix_lookup_table(struct btree_keys *b,
+				      struct bset_tree *t,
+				      struct bkey *k)
 {
-	struct bset_tree *t = &b->sets[b->nsets];
 	unsigned shift = bkey_u64s(k);
 	unsigned j = bkey_to_cacheline(t, k);
 
@@ -657,8 +739,8 @@
 	 * lookup table for the first key that is strictly greater than k:
 	 * it's either k's cacheline or the next one
 	 */
-	if (j < t->size &&
-	    table_to_bkey(t, j) <= k)
+	while (j < t->size &&
+	       table_to_bkey(t, j) <= k)
 		j++;
 
 	/* Adjust all the lookup table entries, and find a new key for any that
@@ -673,54 +755,124 @@
 			while (k < cacheline_to_bkey(t, j, 0))
 				k = bkey_next(k);
 
-			t->prev[j] = bkey_to_cacheline_offset(k);
+			t->prev[j] = bkey_to_cacheline_offset(t, j, k);
 		}
 	}
 
-	if (t->size == b->sets->tree + bset_tree_space(b) - t->tree)
+	if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
 		return;
 
 	/* Possibly add a new entry to the end of the lookup table */
 
 	for (k = table_to_bkey(t, t->size - 1);
-	     k != end(t->data);
+	     k != bset_bkey_last(t->data);
 	     k = bkey_next(k))
 		if (t->size == bkey_to_cacheline(t, k)) {
-			t->prev[t->size] = bkey_to_cacheline_offset(k);
+			t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
 			t->size++;
 		}
 }
 
-void bch_bset_init_next(struct btree *b)
+/*
+ * Tries to merge l and r: l should be lower than r
+ * Returns true if we were able to merge. If we did merge, l will be the merged
+ * key, r will be untouched.
+ */
+bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
 {
-	struct bset *i = write_block(b);
+	if (!b->ops->key_merge)
+		return false;
 
-	if (i != b->sets[0].data) {
-		b->sets[++b->nsets].data = i;
-		i->seq = b->sets[0].data->seq;
-	} else
-		get_random_bytes(&i->seq, sizeof(uint64_t));
+	/*
+	 * Generic header checks
+	 * Assumes left and right are in order
+	 * Left and right must be exactly aligned
+	 */
+	if (!bch_bkey_equal_header(l, r) ||
+	     bkey_cmp(l, &START_KEY(r)))
+		return false;
 
-	i->magic	= bset_magic(&b->c->sb);
-	i->version	= 0;
-	i->keys		= 0;
-
-	bset_build_unwritten_tree(b);
+	return b->ops->key_merge(b, l, r);
 }
+EXPORT_SYMBOL(bch_bkey_try_merge);
+
+void bch_bset_insert(struct btree_keys *b, struct bkey *where,
+		     struct bkey *insert)
+{
+	struct bset_tree *t = bset_tree_last(b);
+
+	BUG_ON(!b->last_set_unwritten);
+	BUG_ON(bset_byte_offset(b, t->data) +
+	       __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
+	       PAGE_SIZE << b->page_order);
+
+	memmove((uint64_t *) where + bkey_u64s(insert),
+		where,
+		(void *) bset_bkey_last(t->data) - (void *) where);
+
+	t->data->keys += bkey_u64s(insert);
+	bkey_copy(where, insert);
+	bch_bset_fix_lookup_table(b, t, where);
+}
+EXPORT_SYMBOL(bch_bset_insert);
+
+unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+			      struct bkey *replace_key)
+{
+	unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
+	struct bset *i = bset_tree_last(b)->data;
+	struct bkey *m, *prev = NULL;
+	struct btree_iter iter;
+
+	BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
+
+	m = bch_btree_iter_init(b, &iter, b->ops->is_extents
+				? PRECEDING_KEY(&START_KEY(k))
+				: PRECEDING_KEY(k));
+
+	if (b->ops->insert_fixup(b, k, &iter, replace_key))
+		return status;
+
+	status = BTREE_INSERT_STATUS_INSERT;
+
+	while (m != bset_bkey_last(i) &&
+	       bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
+		prev = m, m = bkey_next(m);
+
+	/* prev is in the tree, if we merge we're done */
+	status = BTREE_INSERT_STATUS_BACK_MERGE;
+	if (prev &&
+	    bch_bkey_try_merge(b, prev, k))
+		goto merged;
+#if 0
+	status = BTREE_INSERT_STATUS_OVERWROTE;
+	if (m != bset_bkey_last(i) &&
+	    KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
+		goto copy;
+#endif
+	status = BTREE_INSERT_STATUS_FRONT_MERGE;
+	if (m != bset_bkey_last(i) &&
+	    bch_bkey_try_merge(b, k, m))
+		goto copy;
+
+	bch_bset_insert(b, m, k);
+copy:	bkey_copy(m, k);
+merged:
+	return status;
+}
+EXPORT_SYMBOL(bch_btree_insert_key);
+
+/* Lookup */
 
 struct bset_search_iter {
 	struct bkey *l, *r;
 };
 
-static struct bset_search_iter bset_search_write_set(struct btree *b,
-						     struct bset_tree *t,
+static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
 						     const struct bkey *search)
 {
 	unsigned li = 0, ri = t->size;
 
-	BUG_ON(!b->nsets &&
-	       t->size < bkey_to_cacheline(t, end(t->data)));
-
 	while (li + 1 != ri) {
 		unsigned m = (li + ri) >> 1;
 
@@ -732,12 +884,11 @@
 
 	return (struct bset_search_iter) {
 		table_to_bkey(t, li),
-		ri < t->size ? table_to_bkey(t, ri) : end(t->data)
+		ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
 	};
 }
 
-static struct bset_search_iter bset_search_tree(struct btree *b,
-						struct bset_tree *t,
+static struct bset_search_iter bset_search_tree(struct bset_tree *t,
 						const struct bkey *search)
 {
 	struct bkey *l, *r;
@@ -784,7 +935,7 @@
 			f = &t->tree[inorder_next(j, t->size)];
 			r = cacheline_to_bkey(t, inorder, f->m);
 		} else
-			r = end(t->data);
+			r = bset_bkey_last(t->data);
 	} else {
 		r = cacheline_to_bkey(t, inorder, f->m);
 
@@ -798,7 +949,7 @@
 	return (struct bset_search_iter) {l, r};
 }
 
-struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
+struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
 			       const struct bkey *search)
 {
 	struct bset_search_iter i;
@@ -820,7 +971,7 @@
 
 	if (unlikely(!t->size)) {
 		i.l = t->data->start;
-		i.r = end(t->data);
+		i.r = bset_bkey_last(t->data);
 	} else if (bset_written(b, t)) {
 		/*
 		 * Each node in the auxiliary search tree covers a certain range
@@ -830,23 +981,27 @@
 		 */
 
 		if (unlikely(bkey_cmp(search, &t->end) >= 0))
-			return end(t->data);
+			return bset_bkey_last(t->data);
 
 		if (unlikely(bkey_cmp(search, t->data->start) < 0))
 			return t->data->start;
 
-		i = bset_search_tree(b, t, search);
-	} else
-		i = bset_search_write_set(b, t, search);
+		i = bset_search_tree(t, search);
+	} else {
+		BUG_ON(!b->nsets &&
+		       t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
 
-	if (expensive_debug_checks(b->c)) {
+		i = bset_search_write_set(t, search);
+	}
+
+	if (btree_keys_expensive_checks(b)) {
 		BUG_ON(bset_written(b, t) &&
 		       i.l != t->data->start &&
 		       bkey_cmp(tree_to_prev_bkey(t,
 			  inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
 				search) > 0);
 
-		BUG_ON(i.r != end(t->data) &&
+		BUG_ON(i.r != bset_bkey_last(t->data) &&
 		       bkey_cmp(i.r, search) <= 0);
 	}
 
@@ -856,22 +1011,17 @@
 
 	return i.l;
 }
+EXPORT_SYMBOL(__bch_bset_search);
 
 /* Btree iterator */
 
-/*
- * Returns true if l > r - unless l == r, in which case returns true if l is
- * older than r.
- *
- * Necessary for btree_sort_fixup() - if there are multiple keys that compare
- * equal in different sets, we have to process them newest to oldest.
- */
+typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
+				 struct btree_iter_set);
+
 static inline bool btree_iter_cmp(struct btree_iter_set l,
 				  struct btree_iter_set r)
 {
-	int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
-
-	return c ? c > 0 : l.k < r.k;
+	return bkey_cmp(l.k, r.k) > 0;
 }
 
 static inline bool btree_iter_end(struct btree_iter *iter)
@@ -888,8 +1038,10 @@
 				 btree_iter_cmp));
 }
 
-struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
-				   struct bkey *search, struct bset_tree *start)
+static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+					  struct btree_iter *iter,
+					  struct bkey *search,
+					  struct bset_tree *start)
 {
 	struct bkey *ret = NULL;
 	iter->size = ARRAY_SIZE(iter->data);
@@ -899,15 +1051,24 @@
 	iter->b = b;
 #endif
 
-	for (; start <= &b->sets[b->nsets]; start++) {
+	for (; start <= bset_tree_last(b); start++) {
 		ret = bch_bset_search(b, start, search);
-		bch_btree_iter_push(iter, ret, end(start->data));
+		bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
 	}
 
 	return ret;
 }
 
-struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+struct bkey *bch_btree_iter_init(struct btree_keys *b,
+				 struct btree_iter *iter,
+				 struct bkey *search)
+{
+	return __bch_btree_iter_init(b, iter, search, b->set);
+}
+EXPORT_SYMBOL(bch_btree_iter_init);
+
+static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+						 btree_iter_cmp_fn *cmp)
 {
 	struct btree_iter_set unused;
 	struct bkey *ret = NULL;
@@ -924,16 +1085,23 @@
 		}
 
 		if (iter->data->k == iter->data->end)
-			heap_pop(iter, unused, btree_iter_cmp);
+			heap_pop(iter, unused, cmp);
 		else
-			heap_sift(iter, 0, btree_iter_cmp);
+			heap_sift(iter, 0, cmp);
 	}
 
 	return ret;
 }
 
+struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+{
+	return __bch_btree_iter_next(iter, btree_iter_cmp);
+
+}
+EXPORT_SYMBOL(bch_btree_iter_next);
+
 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
-					struct btree *b, ptr_filter_fn fn)
+					struct btree_keys *b, ptr_filter_fn fn)
 {
 	struct bkey *ret;
 
@@ -946,70 +1114,58 @@
 
 /* Mergesort */
 
-static void sort_key_next(struct btree_iter *iter,
-			  struct btree_iter_set *i)
+void bch_bset_sort_state_free(struct bset_sort_state *state)
 {
-	i->k = bkey_next(i->k);
-
-	if (i->k == i->end)
-		*i = iter->data[--iter->used];
+	if (state->pool)
+		mempool_destroy(state->pool);
 }
 
-static void btree_sort_fixup(struct btree_iter *iter)
+int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
 {
-	while (iter->used > 1) {
-		struct btree_iter_set *top = iter->data, *i = top + 1;
+	spin_lock_init(&state->time.lock);
 
-		if (iter->used > 2 &&
-		    btree_iter_cmp(i[0], i[1]))
-			i++;
+	state->page_order = page_order;
+	state->crit_factor = int_sqrt(1 << page_order);
 
-		if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
-			break;
+	state->pool = mempool_create_page_pool(1, page_order);
+	if (!state->pool)
+		return -ENOMEM;
 
-		if (!KEY_SIZE(i->k)) {
-			sort_key_next(iter, i);
-			heap_sift(iter, i - top, btree_iter_cmp);
-			continue;
-		}
-
-		if (top->k > i->k) {
-			if (bkey_cmp(top->k, i->k) >= 0)
-				sort_key_next(iter, i);
-			else
-				bch_cut_front(top->k, i->k);
-
-			heap_sift(iter, i - top, btree_iter_cmp);
-		} else {
-			/* can't happen because of comparison func */
-			BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
-			bch_cut_back(&START_KEY(i->k), top->k);
-		}
-	}
+	return 0;
 }
+EXPORT_SYMBOL(bch_bset_sort_state_init);
 
-static void btree_mergesort(struct btree *b, struct bset *out,
+static void btree_mergesort(struct btree_keys *b, struct bset *out,
 			    struct btree_iter *iter,
 			    bool fixup, bool remove_stale)
 {
+	int i;
 	struct bkey *k, *last = NULL;
-	bool (*bad)(struct btree *, const struct bkey *) = remove_stale
+	BKEY_PADDED(k) tmp;
+	bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
 		? bch_ptr_bad
 		: bch_ptr_invalid;
 
-	while (!btree_iter_end(iter)) {
-		if (fixup && !b->level)
-			btree_sort_fixup(iter);
+	/* Heapify the iterator, using our comparison function */
+	for (i = iter->used / 2 - 1; i >= 0; --i)
+		heap_sift(iter, i, b->ops->sort_cmp);
 
-		k = bch_btree_iter_next(iter);
+	while (!btree_iter_end(iter)) {
+		if (b->ops->sort_fixup && fixup)
+			k = b->ops->sort_fixup(iter, &tmp.k);
+		else
+			k = NULL;
+
+		if (!k)
+			k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
+
 		if (bad(b, k))
 			continue;
 
 		if (!last) {
 			last = out->start;
 			bkey_copy(last, k);
-		} else if (b->level ||
-			   !bch_bkey_try_merge(b, last, k)) {
+		} else if (!bch_bkey_try_merge(b, last, k)) {
 			last = bkey_next(last);
 			bkey_copy(last, k);
 		}
@@ -1020,27 +1176,30 @@
 	pr_debug("sorted %i keys", out->keys);
 }
 
-static void __btree_sort(struct btree *b, struct btree_iter *iter,
-			 unsigned start, unsigned order, bool fixup)
+static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
+			 unsigned start, unsigned order, bool fixup,
+			 struct bset_sort_state *state)
 {
 	uint64_t start_time;
-	bool remove_stale = !b->written;
+	bool used_mempool = false;
 	struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
 						     order);
 	if (!out) {
-		mutex_lock(&b->c->sort_lock);
-		out = b->c->sort;
-		order = ilog2(bucket_pages(b->c));
+		struct page *outp;
+
+		BUG_ON(order > state->page_order);
+
+		outp = mempool_alloc(state->pool, GFP_NOIO);
+		out = page_address(outp);
+		used_mempool = true;
+		order = state->page_order;
 	}
 
 	start_time = local_clock();
 
-	btree_mergesort(b, out, iter, fixup, remove_stale);
+	btree_mergesort(b, out, iter, fixup, false);
 	b->nsets = start;
 
-	if (!fixup && !start && b->written)
-		bch_btree_verify(b, out);
-
 	if (!start && order == b->page_order) {
 		/*
 		 * Our temporary buffer is the same size as the btree node's
@@ -1048,84 +1207,76 @@
 		 * memcpy()
 		 */
 
-		out->magic	= bset_magic(&b->c->sb);
-		out->seq	= b->sets[0].data->seq;
-		out->version	= b->sets[0].data->version;
-		swap(out, b->sets[0].data);
-
-		if (b->c->sort == b->sets[0].data)
-			b->c->sort = out;
+		out->magic	= b->set->data->magic;
+		out->seq	= b->set->data->seq;
+		out->version	= b->set->data->version;
+		swap(out, b->set->data);
 	} else {
-		b->sets[start].data->keys = out->keys;
-		memcpy(b->sets[start].data->start, out->start,
-		       (void *) end(out) - (void *) out->start);
+		b->set[start].data->keys = out->keys;
+		memcpy(b->set[start].data->start, out->start,
+		       (void *) bset_bkey_last(out) - (void *) out->start);
 	}
 
-	if (out == b->c->sort)
-		mutex_unlock(&b->c->sort_lock);
+	if (used_mempool)
+		mempool_free(virt_to_page(out), state->pool);
 	else
 		free_pages((unsigned long) out, order);
 
-	if (b->written)
-		bset_build_written_tree(b);
+	bch_bset_build_written_tree(b);
 
 	if (!start)
-		bch_time_stats_update(&b->c->sort_time, start_time);
+		bch_time_stats_update(&state->time, start_time);
 }
 
-void bch_btree_sort_partial(struct btree *b, unsigned start)
+void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
+			    struct bset_sort_state *state)
 {
 	size_t order = b->page_order, keys = 0;
 	struct btree_iter iter;
 	int oldsize = bch_count_data(b);
 
-	__bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
-
-	BUG_ON(b->sets[b->nsets].data == write_block(b) &&
-	       (b->sets[b->nsets].size || b->nsets));
-
+	__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
 
 	if (start) {
 		unsigned i;
 
 		for (i = start; i <= b->nsets; i++)
-			keys += b->sets[i].data->keys;
+			keys += b->set[i].data->keys;
 
-		order = roundup_pow_of_two(__set_bytes(b->sets->data,
-						       keys)) / PAGE_SIZE;
-		if (order)
-			order = ilog2(order);
+		order = get_order(__set_bytes(b->set->data, keys));
 	}
 
-	__btree_sort(b, &iter, start, order, false);
+	__btree_sort(b, &iter, start, order, false, state);
 
-	EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
+	EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
 }
+EXPORT_SYMBOL(bch_btree_sort_partial);
 
-void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
+void bch_btree_sort_and_fix_extents(struct btree_keys *b,
+				    struct btree_iter *iter,
+				    struct bset_sort_state *state)
 {
-	BUG_ON(!b->written);
-	__btree_sort(b, iter, 0, b->page_order, true);
+	__btree_sort(b, iter, 0, b->page_order, true, state);
 }
 
-void bch_btree_sort_into(struct btree *b, struct btree *new)
+void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+			 struct bset_sort_state *state)
 {
 	uint64_t start_time = local_clock();
 
 	struct btree_iter iter;
 	bch_btree_iter_init(b, &iter, NULL);
 
-	btree_mergesort(b, new->sets->data, &iter, false, true);
+	btree_mergesort(b, new->set->data, &iter, false, true);
 
-	bch_time_stats_update(&b->c->sort_time, start_time);
+	bch_time_stats_update(&state->time, start_time);
 
-	bkey_copy_key(&new->key, &b->key);
-	new->sets->size = 0;
+	new->set->size = 0; // XXX: why?
 }
 
 #define SORT_CRIT	(4096 / sizeof(uint64_t))
 
-void bch_btree_sort_lazy(struct btree *b)
+void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
 {
 	unsigned crit = SORT_CRIT;
 	int i;
@@ -1134,50 +1285,32 @@
 	if (!b->nsets)
 		goto out;
 
-	/* If not a leaf node, always sort */
-	if (b->level) {
-		bch_btree_sort(b);
-		return;
-	}
-
 	for (i = b->nsets - 1; i >= 0; --i) {
-		crit *= b->c->sort_crit_factor;
+		crit *= state->crit_factor;
 
-		if (b->sets[i].data->keys < crit) {
-			bch_btree_sort_partial(b, i);
+		if (b->set[i].data->keys < crit) {
+			bch_btree_sort_partial(b, i, state);
 			return;
 		}
 	}
 
 	/* Sort if we'd overflow */
 	if (b->nsets + 1 == MAX_BSETS) {
-		bch_btree_sort(b);
+		bch_btree_sort(b, state);
 		return;
 	}
 
 out:
-	bset_build_written_tree(b);
+	bch_bset_build_written_tree(b);
 }
+EXPORT_SYMBOL(bch_btree_sort_lazy);
 
-/* Sysfs stuff */
-
-struct bset_stats {
-	struct btree_op op;
-	size_t nodes;
-	size_t sets_written, sets_unwritten;
-	size_t bytes_written, bytes_unwritten;
-	size_t floats, failed;
-};
-
-static int btree_bset_stats(struct btree_op *op, struct btree *b)
+void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
 {
-	struct bset_stats *stats = container_of(op, struct bset_stats, op);
 	unsigned i;
 
-	stats->nodes++;
-
 	for (i = 0; i <= b->nsets; i++) {
-		struct bset_tree *t = &b->sets[i];
+		struct bset_tree *t = &b->set[i];
 		size_t bytes = t->data->keys * sizeof(uint64_t);
 		size_t j;
 
@@ -1195,32 +1328,4 @@
 			stats->bytes_unwritten += bytes;
 		}
 	}
-
-	return MAP_CONTINUE;
-}
-
-int bch_bset_print_stats(struct cache_set *c, char *buf)
-{
-	struct bset_stats t;
-	int ret;
-
-	memset(&t, 0, sizeof(struct bset_stats));
-	bch_btree_op_init(&t.op, -1);
-
-	ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
-	if (ret < 0)
-		return ret;
-
-	return snprintf(buf, PAGE_SIZE,
-			"btree nodes:		%zu\n"
-			"written sets:		%zu\n"
-			"unwritten sets:		%zu\n"
-			"written key bytes:	%zu\n"
-			"unwritten key bytes:	%zu\n"
-			"floats:			%zu\n"
-			"failed:			%zu\n",
-			t.nodes,
-			t.sets_written, t.sets_unwritten,
-			t.bytes_written, t.bytes_unwritten,
-			t.floats, t.failed);
 }
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 1d3c24f..003260f 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -1,7 +1,11 @@
 #ifndef _BCACHE_BSET_H
 #define _BCACHE_BSET_H
 
-#include <linux/slab.h>
+#include <linux/bcache.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "util.h" /* for time_stats */
 
 /*
  * BKEYS:
@@ -142,20 +146,13 @@
  * first key in that range of bytes again.
  */
 
-/* Btree key comparison/iteration */
+struct btree_keys;
+struct btree_iter;
+struct btree_iter_set;
+struct bkey_float;
 
 #define MAX_BSETS		4U
 
-struct btree_iter {
-	size_t size, used;
-#ifdef CONFIG_BCACHE_DEBUG
-	struct btree *b;
-#endif
-	struct btree_iter_set {
-		struct bkey *k, *end;
-	} data[MAX_BSETS];
-};
-
 struct bset_tree {
 	/*
 	 * We construct a binary tree in an array as if the array
@@ -165,14 +162,14 @@
 	 */
 
 	/* size of the binary tree and prev array */
-	unsigned	size;
+	unsigned		size;
 
 	/* function of size - precalculated for to_inorder() */
-	unsigned	extra;
+	unsigned		extra;
 
 	/* copy of the last key in the set */
-	struct bkey	end;
-	struct bkey_float *tree;
+	struct bkey		end;
+	struct bkey_float	*tree;
 
 	/*
 	 * The nodes in the bset tree point to specific keys - this
@@ -182,12 +179,219 @@
 	 * to keep bkey_float to 4 bytes and prev isn't used in the fast
 	 * path.
 	 */
-	uint8_t		*prev;
+	uint8_t			*prev;
 
 	/* The actual btree node, with pointers to each sorted set */
-	struct bset	*data;
+	struct bset		*data;
 };
 
+struct btree_keys_ops {
+	bool		(*sort_cmp)(struct btree_iter_set,
+				    struct btree_iter_set);
+	struct bkey	*(*sort_fixup)(struct btree_iter *, struct bkey *);
+	bool		(*insert_fixup)(struct btree_keys *, struct bkey *,
+					struct btree_iter *, struct bkey *);
+	bool		(*key_invalid)(struct btree_keys *,
+				       const struct bkey *);
+	bool		(*key_bad)(struct btree_keys *, const struct bkey *);
+	bool		(*key_merge)(struct btree_keys *,
+				     struct bkey *, struct bkey *);
+	void		(*key_to_text)(char *, size_t, const struct bkey *);
+	void		(*key_dump)(struct btree_keys *, const struct bkey *);
+
+	/*
+	 * Only used for deciding whether to use START_KEY(k) or just the key
+	 * itself in a couple places
+	 */
+	bool		is_extents;
+};
+
+struct btree_keys {
+	const struct btree_keys_ops	*ops;
+	uint8_t			page_order;
+	uint8_t			nsets;
+	unsigned		last_set_unwritten:1;
+	bool			*expensive_debug_checks;
+
+	/*
+	 * Sets of sorted keys - the real btree node - plus a binary search tree
+	 *
+	 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
+	 * to the memory we have allocated for this btree node. Additionally,
+	 * set[0]->data points to the entire btree node as it exists on disk.
+	 */
+	struct bset_tree	set[MAX_BSETS];
+};
+
+static inline struct bset_tree *bset_tree_last(struct btree_keys *b)
+{
+	return b->set + b->nsets;
+}
+
+static inline bool bset_written(struct btree_keys *b, struct bset_tree *t)
+{
+	return t <= b->set + b->nsets - b->last_set_unwritten;
+}
+
+static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
+{
+	return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
+}
+
+static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)
+{
+	return ((size_t) i) - ((size_t) b->set->data);
+}
+
+static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
+{
+	return bset_byte_offset(b, i) >> 9;
+}
+
+#define __set_bytes(i, k)	(sizeof(*(i)) + (k) * sizeof(uint64_t))
+#define set_bytes(i)		__set_bytes(i, i->keys)
+
+#define __set_blocks(i, k, block_bytes)				\
+	DIV_ROUND_UP(__set_bytes(i, k), block_bytes)
+#define set_blocks(i, block_bytes)				\
+	__set_blocks(i, (i)->keys, block_bytes)
+
+static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
+{
+	struct bset_tree *t = bset_tree_last(b);
+
+	BUG_ON((PAGE_SIZE << b->page_order) <
+	       (bset_byte_offset(b, t->data) + set_bytes(t->data)));
+
+	if (!b->last_set_unwritten)
+		return 0;
+
+	return ((PAGE_SIZE << b->page_order) -
+		(bset_byte_offset(b, t->data) + set_bytes(t->data))) /
+		sizeof(u64);
+}
+
+static inline struct bset *bset_next_set(struct btree_keys *b,
+					 unsigned block_bytes)
+{
+	struct bset *i = bset_tree_last(b)->data;
+
+	return ((void *) i) + roundup(set_bytes(i), block_bytes);
+}
+
+void bch_btree_keys_free(struct btree_keys *);
+int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t);
+void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
+			 bool *);
+
+void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
+void bch_bset_build_written_tree(struct btree_keys *);
+void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
+bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
+void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
+unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
+			      struct bkey *);
+
+enum {
+	BTREE_INSERT_STATUS_NO_INSERT = 0,
+	BTREE_INSERT_STATUS_INSERT,
+	BTREE_INSERT_STATUS_BACK_MERGE,
+	BTREE_INSERT_STATUS_OVERWROTE,
+	BTREE_INSERT_STATUS_FRONT_MERGE,
+};
+
+/* Btree key iteration */
+
+struct btree_iter {
+	size_t size, used;
+#ifdef CONFIG_BCACHE_DEBUG
+	struct btree_keys *b;
+#endif
+	struct btree_iter_set {
+		struct bkey *k, *end;
+	} data[MAX_BSETS];
+};
+
+typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *);
+
+struct bkey *bch_btree_iter_next(struct btree_iter *);
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
+					struct btree_keys *, ptr_filter_fn);
+
+void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
+struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *,
+				 struct bkey *);
+
+struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *,
+			       const struct bkey *);
+
+/*
+ * Returns the first key that is strictly greater than search
+ */
+static inline struct bkey *bch_bset_search(struct btree_keys *b,
+					   struct bset_tree *t,
+					   const struct bkey *search)
+{
+	return search ? __bch_bset_search(b, t, search) : t->data->start;
+}
+
+#define for_each_key_filter(b, k, iter, filter)				\
+	for (bch_btree_iter_init((b), (iter), NULL);			\
+	     ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
+
+#define for_each_key(b, k, iter)					\
+	for (bch_btree_iter_init((b), (iter), NULL);			\
+	     ((k) = bch_btree_iter_next(iter));)
+
+/* Sorting */
+
+struct bset_sort_state {
+	mempool_t		*pool;
+
+	unsigned		page_order;
+	unsigned		crit_factor;
+
+	struct time_stats	time;
+};
+
+void bch_bset_sort_state_free(struct bset_sort_state *);
+int bch_bset_sort_state_init(struct bset_sort_state *, unsigned);
+void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
+void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
+			 struct bset_sort_state *);
+void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
+				    struct bset_sort_state *);
+void bch_btree_sort_partial(struct btree_keys *, unsigned,
+			    struct bset_sort_state *);
+
+static inline void bch_btree_sort(struct btree_keys *b,
+				  struct bset_sort_state *state)
+{
+	bch_btree_sort_partial(b, 0, state);
+}
+
+struct bset_stats {
+	size_t sets_written, sets_unwritten;
+	size_t bytes_written, bytes_unwritten;
+	size_t floats, failed;
+};
+
+void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
+
+/* Bkey utility code */
+
+#define bset_bkey_last(i)	bkey_idx((struct bkey *) (i)->d, (i)->keys)
+
+static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
+{
+	return bkey_idx(i->start, idx);
+}
+
+static inline void bkey_init(struct bkey *k)
+{
+	*k = ZERO_KEY;
+}
+
 static __always_inline int64_t bkey_cmp(const struct bkey *l,
 					const struct bkey *r)
 {
@@ -196,6 +400,62 @@
 		: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
 }
 
+void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
+			      unsigned);
+bool __bch_cut_front(const struct bkey *, struct bkey *);
+bool __bch_cut_back(const struct bkey *, struct bkey *);
+
+static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
+{
+	BUG_ON(bkey_cmp(where, k) > 0);
+	return __bch_cut_front(where, k);
+}
+
+static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
+{
+	BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
+	return __bch_cut_back(where, k);
+}
+
+#define PRECEDING_KEY(_k)					\
+({								\
+	struct bkey *_ret = NULL;				\
+								\
+	if (KEY_INODE(_k) || KEY_OFFSET(_k)) {			\
+		_ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0);	\
+								\
+		if (!_ret->low)					\
+			_ret->high--;				\
+		_ret->low--;					\
+	}							\
+								\
+	_ret;							\
+})
+
+static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
+{
+	return b->ops->key_invalid(b, k);
+}
+
+static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k)
+{
+	return b->ops->key_bad(b, k);
+}
+
+static inline void bch_bkey_to_text(struct btree_keys *b, char *buf,
+				    size_t size, const struct bkey *k)
+{
+	return b->ops->key_to_text(buf, size, k);
+}
+
+static inline bool bch_bkey_equal_header(const struct bkey *l,
+					 const struct bkey *r)
+{
+	return (KEY_DIRTY(l) == KEY_DIRTY(r) &&
+		KEY_PTRS(l) == KEY_PTRS(r) &&
+		KEY_CSUM(l) == KEY_CSUM(l));
+}
+
 /* Keylists */
 
 struct keylist {
@@ -257,136 +517,44 @@
 
 struct bkey *bch_keylist_pop(struct keylist *);
 void bch_keylist_pop_front(struct keylist *);
-int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
+int __bch_keylist_realloc(struct keylist *, unsigned);
 
-void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
-			      unsigned);
-bool __bch_cut_front(const struct bkey *, struct bkey *);
-bool __bch_cut_back(const struct bkey *, struct bkey *);
+/* Debug stuff */
 
-static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
+#ifdef CONFIG_BCACHE_DEBUG
+
+int __bch_count_data(struct btree_keys *);
+void __bch_check_keys(struct btree_keys *, const char *, ...);
+void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
+void bch_dump_bucket(struct btree_keys *);
+
+#else
+
+static inline int __bch_count_data(struct btree_keys *b) { return -1; }
+static inline void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
+static inline void bch_dump_bucket(struct btree_keys *b) {}
+void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
+
+#endif
+
+static inline bool btree_keys_expensive_checks(struct btree_keys *b)
 {
-	BUG_ON(bkey_cmp(where, k) > 0);
-	return __bch_cut_front(where, k);
+#ifdef CONFIG_BCACHE_DEBUG
+	return *b->expensive_debug_checks;
+#else
+	return false;
+#endif
 }
 
-static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
+static inline int bch_count_data(struct btree_keys *b)
 {
-	BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
-	return __bch_cut_back(where, k);
+	return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1;
 }
 
-const char *bch_ptr_status(struct cache_set *, const struct bkey *);
-bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
-bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
-
-bool bch_ptr_bad(struct btree *, const struct bkey *);
-
-static inline uint8_t gen_after(uint8_t a, uint8_t b)
-{
-	uint8_t r = a - b;
-	return r > 128U ? 0 : r;
-}
-
-static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
-				unsigned i)
-{
-	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
-}
-
-static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
-				 unsigned i)
-{
-	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
-}
-
-
-typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
-
-struct bkey *bch_btree_iter_next(struct btree_iter *);
-struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
-					struct btree *, ptr_filter_fn);
-
-void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
-struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *,
-				   struct bkey *, struct bset_tree *);
-
-/* 32 bits total: */
-#define BKEY_MID_BITS		3
-#define BKEY_EXPONENT_BITS	7
-#define BKEY_MANTISSA_BITS	22
-#define BKEY_MANTISSA_MASK	((1 << BKEY_MANTISSA_BITS) - 1)
-
-struct bkey_float {
-	unsigned	exponent:BKEY_EXPONENT_BITS;
-	unsigned	m:BKEY_MID_BITS;
-	unsigned	mantissa:BKEY_MANTISSA_BITS;
-} __packed;
-
-/*
- * BSET_CACHELINE was originally intended to match the hardware cacheline size -
- * it used to be 64, but I realized the lookup code would touch slightly less
- * memory if it was 128.
- *
- * It definites the number of bytes (in struct bset) per struct bkey_float in
- * the auxiliar search tree - when we're done searching the bset_float tree we
- * have this many bytes left that we do a linear search over.
- *
- * Since (after level 5) every level of the bset_tree is on a new cacheline,
- * we're touching one fewer cacheline in the bset tree in exchange for one more
- * cacheline in the linear search - but the linear search might stop before it
- * gets to the second cacheline.
- */
-
-#define BSET_CACHELINE		128
-#define bset_tree_space(b)	(btree_data_space(b) / BSET_CACHELINE)
-
-#define bset_tree_bytes(b)	(bset_tree_space(b) * sizeof(struct bkey_float))
-#define bset_prev_bytes(b)	(bset_tree_space(b) * sizeof(uint8_t))
-
-void bch_bset_init_next(struct btree *);
-
-void bch_bset_fix_invalidated_key(struct btree *, struct bkey *);
-void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
-
-struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
-			   const struct bkey *);
-
-/*
- * Returns the first key that is strictly greater than search
- */
-static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
-					   const struct bkey *search)
-{
-	return search ? __bch_bset_search(b, t, search) : t->data->start;
-}
-
-#define PRECEDING_KEY(_k)					\
-({								\
-	struct bkey *_ret = NULL;				\
-								\
-	if (KEY_INODE(_k) || KEY_OFFSET(_k)) {			\
-		_ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0);	\
-								\
-		if (!_ret->low)					\
-			_ret->high--;				\
-		_ret->low--;					\
-	}							\
-								\
-	_ret;							\
-})
-
-bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
-void bch_btree_sort_lazy(struct btree *);
-void bch_btree_sort_into(struct btree *, struct btree *);
-void bch_btree_sort_and_fix_extents(struct btree *, struct btree_iter *);
-void bch_btree_sort_partial(struct btree *, unsigned);
-
-static inline void bch_btree_sort(struct btree *b)
-{
-	bch_btree_sort_partial(b, 0);
-}
-
-int bch_bset_print_stats(struct cache_set *, char *);
+#define bch_check_keys(b, ...)						\
+do {									\
+	if (btree_keys_expensive_checks(b))				\
+		__bch_check_keys(b, __VA_ARGS__);			\
+} while (0)
 
 #endif
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 31bb53f..5f9c2a6 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -23,7 +23,7 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
-#include "writeback.h"
+#include "extents.h"
 
 #include <linux/slab.h>
 #include <linux/bitops.h>
@@ -89,13 +89,6 @@
  * Test module load/unload
  */
 
-enum {
-	BTREE_INSERT_STATUS_INSERT,
-	BTREE_INSERT_STATUS_BACK_MERGE,
-	BTREE_INSERT_STATUS_OVERWROTE,
-	BTREE_INSERT_STATUS_FRONT_MERGE,
-};
-
 #define MAX_NEED_GC		64
 #define MAX_SAVE_PRIO		72
 
@@ -106,14 +99,6 @@
 
 static struct workqueue_struct *btree_io_wq;
 
-static inline bool should_split(struct btree *b)
-{
-	struct bset *i = write_block(b);
-	return b->written >= btree_blocks(b) ||
-		(b->written + __set_blocks(i, i->keys + 15, b->c)
-		 > btree_blocks(b));
-}
-
 #define insert_lock(s, b)	((b)->level <= (s)->lock)
 
 /*
@@ -167,6 +152,8 @@
 			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\
 		}							\
 		rw_unlock(_w, _b);					\
+		if (_r == -EINTR)					\
+			schedule();					\
 		bch_cannibalize_unlock(c);				\
 		if (_r == -ENOSPC) {					\
 			wait_event((c)->try_wait,			\
@@ -175,9 +162,15 @@
 		}							\
 	} while (_r == -EINTR);						\
 									\
+	finish_wait(&(c)->bucket_wait, &(op)->wait);			\
 	_r;								\
 })
 
+static inline struct bset *write_block(struct btree *b)
+{
+	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
+}
+
 /* Btree key manipulation */
 
 void bkey_put(struct cache_set *c, struct bkey *k)
@@ -194,16 +187,16 @@
 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
 {
 	uint64_t crc = b->key.ptr[0];
-	void *data = (void *) i + 8, *end = end(i);
+	void *data = (void *) i + 8, *end = bset_bkey_last(i);
 
 	crc = bch_crc64_update(crc, data, end - data);
 	return crc ^ 0xffffffffffffffffULL;
 }
 
-static void bch_btree_node_read_done(struct btree *b)
+void bch_btree_node_read_done(struct btree *b)
 {
 	const char *err = "bad btree header";
-	struct bset *i = b->sets[0].data;
+	struct bset *i = btree_bset_first(b);
 	struct btree_iter *iter;
 
 	iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
@@ -211,21 +204,22 @@
 	iter->used = 0;
 
 #ifdef CONFIG_BCACHE_DEBUG
-	iter->b = b;
+	iter->b = &b->keys;
 #endif
 
 	if (!i->seq)
 		goto err;
 
 	for (;
-	     b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
+	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
 	     i = write_block(b)) {
 		err = "unsupported bset version";
 		if (i->version > BCACHE_BSET_VERSION)
 			goto err;
 
 		err = "bad btree header";
-		if (b->written + set_blocks(i, b->c) > btree_blocks(b))
+		if (b->written + set_blocks(i, block_bytes(b->c)) >
+		    btree_blocks(b))
 			goto err;
 
 		err = "bad magic";
@@ -245,39 +239,40 @@
 		}
 
 		err = "empty set";
-		if (i != b->sets[0].data && !i->keys)
+		if (i != b->keys.set[0].data && !i->keys)
 			goto err;
 
-		bch_btree_iter_push(iter, i->start, end(i));
+		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
 
-		b->written += set_blocks(i, b->c);
+		b->written += set_blocks(i, block_bytes(b->c));
 	}
 
 	err = "corrupted btree";
 	for (i = write_block(b);
-	     index(i, b) < btree_blocks(b);
+	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
 	     i = ((void *) i) + block_bytes(b->c))
-		if (i->seq == b->sets[0].data->seq)
+		if (i->seq == b->keys.set[0].data->seq)
 			goto err;
 
-	bch_btree_sort_and_fix_extents(b, iter);
+	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
 
-	i = b->sets[0].data;
+	i = b->keys.set[0].data;
 	err = "short btree key";
-	if (b->sets[0].size &&
-	    bkey_cmp(&b->key, &b->sets[0].end) < 0)
+	if (b->keys.set[0].size &&
+	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
 		goto err;
 
 	if (b->written < btree_blocks(b))
-		bch_bset_init_next(b);
+		bch_bset_init_next(&b->keys, write_block(b),
+				   bset_magic(&b->c->sb));
 out:
 	mempool_free(iter, b->c->fill_iter);
 	return;
 err:
 	set_btree_node_io_error(b);
-	bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
+	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
 			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
-			    index(i, b), i->keys);
+			    bset_block_offset(b, i), i->keys);
 	goto out;
 }
 
@@ -287,7 +282,7 @@
 	closure_put(cl);
 }
 
-void bch_btree_node_read(struct btree *b)
+static void bch_btree_node_read(struct btree *b)
 {
 	uint64_t start_time = local_clock();
 	struct closure cl;
@@ -299,11 +294,11 @@
 
 	bio = bch_bbio_alloc(b->c);
 	bio->bi_rw	= REQ_META|READ_SYNC;
-	bio->bi_size	= KEY_SIZE(&b->key) << 9;
+	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
 	bio->bi_end_io	= btree_node_read_endio;
 	bio->bi_private	= &cl;
 
-	bch_bio_map(bio, b->sets[0].data);
+	bch_bio_map(bio, b->keys.set[0].data);
 
 	bch_submit_bbio(bio, b->c, &b->key, 0);
 	closure_sync(&cl);
@@ -340,9 +335,16 @@
 	w->journal	= NULL;
 }
 
+static void btree_node_write_unlock(struct closure *cl)
+{
+	struct btree *b = container_of(cl, struct btree, io);
+
+	up(&b->io_mutex);
+}
+
 static void __btree_node_write_done(struct closure *cl)
 {
-	struct btree *b = container_of(cl, struct btree, io.cl);
+	struct btree *b = container_of(cl, struct btree, io);
 	struct btree_write *w = btree_prev_write(b);
 
 	bch_bbio_free(b->bio, b->c);
@@ -353,16 +355,16 @@
 		queue_delayed_work(btree_io_wq, &b->work,
 				   msecs_to_jiffies(30000));
 
-	closure_return(cl);
+	closure_return_with_destructor(cl, btree_node_write_unlock);
 }
 
 static void btree_node_write_done(struct closure *cl)
 {
-	struct btree *b = container_of(cl, struct btree, io.cl);
+	struct btree *b = container_of(cl, struct btree, io);
 	struct bio_vec *bv;
 	int n;
 
-	__bio_for_each_segment(bv, b->bio, n, 0)
+	bio_for_each_segment_all(bv, b->bio, n)
 		__free_page(bv->bv_page);
 
 	__btree_node_write_done(cl);
@@ -371,7 +373,7 @@
 static void btree_node_write_endio(struct bio *bio, int error)
 {
 	struct closure *cl = bio->bi_private;
-	struct btree *b = container_of(cl, struct btree, io.cl);
+	struct btree *b = container_of(cl, struct btree, io);
 
 	if (error)
 		set_btree_node_io_error(b);
@@ -382,8 +384,8 @@
 
 static void do_btree_node_write(struct btree *b)
 {
-	struct closure *cl = &b->io.cl;
-	struct bset *i = b->sets[b->nsets].data;
+	struct closure *cl = &b->io;
+	struct bset *i = btree_bset_last(b);
 	BKEY_PADDED(key) k;
 
 	i->version	= BCACHE_BSET_VERSION;
@@ -395,7 +397,7 @@
 	b->bio->bi_end_io	= btree_node_write_endio;
 	b->bio->bi_private	= cl;
 	b->bio->bi_rw		= REQ_META|WRITE_SYNC|REQ_FUA;
-	b->bio->bi_size		= set_blocks(i, b->c) * block_bytes(b->c);
+	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c));
 	bch_bio_map(b->bio, i);
 
 	/*
@@ -414,14 +416,15 @@
 	 */
 
 	bkey_copy(&k.key, &b->key);
-	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
+	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
+		       bset_sector_offset(&b->keys, i));
 
 	if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
 		int j;
 		struct bio_vec *bv;
 		void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
 
-		bio_for_each_segment(bv, b->bio, j)
+		bio_for_each_segment_all(bv, b->bio, j)
 			memcpy(page_address(bv->bv_page),
 			       base + j * PAGE_SIZE, PAGE_SIZE);
 
@@ -435,40 +438,54 @@
 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
 
 		closure_sync(cl);
-		__btree_node_write_done(cl);
+		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
 	}
 }
 
 void bch_btree_node_write(struct btree *b, struct closure *parent)
 {
-	struct bset *i = b->sets[b->nsets].data;
+	struct bset *i = btree_bset_last(b);
 
 	trace_bcache_btree_write(b);
 
 	BUG_ON(current->bio_list);
 	BUG_ON(b->written >= btree_blocks(b));
 	BUG_ON(b->written && !i->keys);
-	BUG_ON(b->sets->data->seq != i->seq);
-	bch_check_keys(b, "writing");
+	BUG_ON(btree_bset_first(b)->seq != i->seq);
+	bch_check_keys(&b->keys, "writing");
 
 	cancel_delayed_work(&b->work);
 
 	/* If caller isn't waiting for write, parent refcount is cache set */
-	closure_lock(&b->io, parent ?: &b->c->cl);
+	down(&b->io_mutex);
+	closure_init(&b->io, parent ?: &b->c->cl);
 
 	clear_bit(BTREE_NODE_dirty,	 &b->flags);
 	change_bit(BTREE_NODE_write_idx, &b->flags);
 
 	do_btree_node_write(b);
 
-	b->written += set_blocks(i, b->c);
-	atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
+	atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
 			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
 
-	bch_btree_sort_lazy(b);
+	b->written += set_blocks(i, block_bytes(b->c));
+
+	/* If not a leaf node, always sort */
+	if (b->level && b->keys.nsets)
+		bch_btree_sort(&b->keys, &b->c->sort);
+	else
+		bch_btree_sort_lazy(&b->keys, &b->c->sort);
+
+	/*
+	 * do verify if there was more than one set initially (i.e. we did a
+	 * sort) and we sorted down to a single set:
+	 */
+	if (i != b->keys.set->data && !b->keys.nsets)
+		bch_btree_verify(b);
 
 	if (b->written < btree_blocks(b))
-		bch_bset_init_next(b);
+		bch_bset_init_next(&b->keys, write_block(b),
+				   bset_magic(&b->c->sb));
 }
 
 static void bch_btree_node_write_sync(struct btree *b)
@@ -493,7 +510,7 @@
 
 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
 {
-	struct bset *i = b->sets[b->nsets].data;
+	struct bset *i = btree_bset_last(b);
 	struct btree_write *w = btree_current_write(b);
 
 	BUG_ON(!b->written);
@@ -528,24 +545,6 @@
  * mca -> memory cache
  */
 
-static void mca_reinit(struct btree *b)
-{
-	unsigned i;
-
-	b->flags	= 0;
-	b->written	= 0;
-	b->nsets	= 0;
-
-	for (i = 0; i < MAX_BSETS; i++)
-		b->sets[i].size = 0;
-	/*
-	 * Second loop starts at 1 because b->sets[0]->data is the memory we
-	 * allocated
-	 */
-	for (i = 1; i < MAX_BSETS; i++)
-		b->sets[i].data = NULL;
-}
-
 #define mca_reserve(c)	(((c->root && c->root->level)		\
 			  ? c->root->level : 1) * 8 + 16)
 #define mca_can_free(c)						\
@@ -553,28 +552,12 @@
 
 static void mca_data_free(struct btree *b)
 {
-	struct bset_tree *t = b->sets;
-	BUG_ON(!closure_is_unlocked(&b->io.cl));
+	BUG_ON(b->io_mutex.count != 1);
 
-	if (bset_prev_bytes(b) < PAGE_SIZE)
-		kfree(t->prev);
-	else
-		free_pages((unsigned long) t->prev,
-			   get_order(bset_prev_bytes(b)));
+	bch_btree_keys_free(&b->keys);
 
-	if (bset_tree_bytes(b) < PAGE_SIZE)
-		kfree(t->tree);
-	else
-		free_pages((unsigned long) t->tree,
-			   get_order(bset_tree_bytes(b)));
-
-	free_pages((unsigned long) t->data, b->page_order);
-
-	t->prev = NULL;
-	t->tree = NULL;
-	t->data = NULL;
-	list_move(&b->list, &b->c->btree_cache_freed);
 	b->c->bucket_cache_used--;
+	list_move(&b->list, &b->c->btree_cache_freed);
 }
 
 static void mca_bucket_free(struct btree *b)
@@ -593,34 +576,16 @@
 
 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
 {
-	struct bset_tree *t = b->sets;
-	BUG_ON(t->data);
-
-	b->page_order = max_t(unsigned,
-			      ilog2(b->c->btree_pages),
-			      btree_order(k));
-
-	t->data = (void *) __get_free_pages(gfp, b->page_order);
-	if (!t->data)
-		goto err;
-
-	t->tree = bset_tree_bytes(b) < PAGE_SIZE
-		? kmalloc(bset_tree_bytes(b), gfp)
-		: (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
-	if (!t->tree)
-		goto err;
-
-	t->prev = bset_prev_bytes(b) < PAGE_SIZE
-		? kmalloc(bset_prev_bytes(b), gfp)
-		: (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
-	if (!t->prev)
-		goto err;
-
-	list_move(&b->list, &b->c->btree_cache);
-	b->c->bucket_cache_used++;
-	return;
-err:
-	mca_data_free(b);
+	if (!bch_btree_keys_alloc(&b->keys,
+				  max_t(unsigned,
+					ilog2(b->c->btree_pages),
+					btree_order(k)),
+				  gfp)) {
+		b->c->bucket_cache_used++;
+		list_move(&b->list, &b->c->btree_cache);
+	} else {
+		list_move(&b->list, &b->c->btree_cache_freed);
+	}
 }
 
 static struct btree *mca_bucket_alloc(struct cache_set *c,
@@ -635,7 +600,7 @@
 	INIT_LIST_HEAD(&b->list);
 	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
 	b->c = c;
-	closure_init_unlocked(&b->io);
+	sema_init(&b->io_mutex, 1);
 
 	mca_data_alloc(b, k, gfp);
 	return b;
@@ -651,24 +616,31 @@
 	if (!down_write_trylock(&b->lock))
 		return -ENOMEM;
 
-	BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
 
-	if (b->page_order < min_order ||
-	    (!flush &&
-	     (btree_node_dirty(b) ||
-	      atomic_read(&b->io.cl.remaining) != -1))) {
-		rw_unlock(true, b);
-		return -ENOMEM;
+	if (b->keys.page_order < min_order)
+		goto out_unlock;
+
+	if (!flush) {
+		if (btree_node_dirty(b))
+			goto out_unlock;
+
+		if (down_trylock(&b->io_mutex))
+			goto out_unlock;
+		up(&b->io_mutex);
 	}
 
 	if (btree_node_dirty(b))
 		bch_btree_node_write_sync(b);
 
 	/* wait for any in flight btree write */
-	closure_wait_event(&b->io.wait, &cl,
-			   atomic_read(&b->io.cl.remaining) == -1);
+	down(&b->io_mutex);
+	up(&b->io_mutex);
 
 	return 0;
+out_unlock:
+	rw_unlock(true, b);
+	return -ENOMEM;
 }
 
 static unsigned long bch_mca_scan(struct shrinker *shrink,
@@ -714,14 +686,10 @@
 		}
 	}
 
-	/*
-	 * Can happen right when we first start up, before we've read in any
-	 * btree nodes
-	 */
-	if (list_empty(&c->btree_cache))
-		goto out;
-
 	for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
+		if (list_empty(&c->btree_cache))
+			goto out;
+
 		b = list_first_entry(&c->btree_cache, struct btree, list);
 		list_rotate_left(&c->btree_cache);
 
@@ -767,6 +735,8 @@
 #ifdef CONFIG_BCACHE_DEBUG
 	if (c->verify_data)
 		list_move(&c->verify_data->list, &c->btree_cache);
+
+	free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
 #endif
 
 	list_splice(&c->btree_cache_freeable,
@@ -807,10 +777,13 @@
 #ifdef CONFIG_BCACHE_DEBUG
 	mutex_init(&c->verify_lock);
 
+	c->verify_ondisk = (void *)
+		__get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
+
 	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
 
 	if (c->verify_data &&
-	    c->verify_data->sets[0].data)
+	    c->verify_data->keys.set->data)
 		list_del_init(&c->verify_data->list);
 	else
 		c->verify_data = NULL;
@@ -908,7 +881,7 @@
 	list_for_each_entry(b, &c->btree_cache_freed, list)
 		if (!mca_reap(b, 0, false)) {
 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
-			if (!b->sets[0].data)
+			if (!b->keys.set[0].data)
 				goto err;
 			else
 				goto out;
@@ -919,10 +892,10 @@
 		goto err;
 
 	BUG_ON(!down_write_trylock(&b->lock));
-	if (!b->sets->data)
+	if (!b->keys.set->data)
 		goto err;
 out:
-	BUG_ON(!closure_is_unlocked(&b->io.cl));
+	BUG_ON(b->io_mutex.count != 1);
 
 	bkey_copy(&b->key, k);
 	list_move(&b->list, &c->btree_cache);
@@ -930,10 +903,17 @@
 	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
 
 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
-	b->level	= level;
 	b->parent	= (void *) ~0UL;
+	b->flags	= 0;
+	b->written	= 0;
+	b->level	= level;
 
-	mca_reinit(b);
+	if (!b->level)
+		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
+				    &b->c->expensive_debug_checks);
+	else
+		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
+				    &b->c->expensive_debug_checks);
 
 	return b;
 err:
@@ -994,13 +974,13 @@
 
 	b->accessed = 1;
 
-	for (; i <= b->nsets && b->sets[i].size; i++) {
-		prefetch(b->sets[i].tree);
-		prefetch(b->sets[i].data);
+	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
+		prefetch(b->keys.set[i].tree);
+		prefetch(b->keys.set[i].data);
 	}
 
-	for (; i <= b->nsets; i++)
-		prefetch(b->sets[i].data);
+	for (; i <= b->keys.nsets; i++)
+		prefetch(b->keys.set[i].data);
 
 	if (btree_node_io_error(b)) {
 		rw_unlock(write, b);
@@ -1063,7 +1043,7 @@
 
 	mutex_lock(&c->bucket_lock);
 retry:
-	if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
+	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
 		goto err;
 
 	bkey_put(c, &k.key);
@@ -1080,7 +1060,7 @@
 	}
 
 	b->accessed = 1;
-	bch_bset_init_next(b);
+	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
 
 	mutex_unlock(&c->bucket_lock);
 
@@ -1098,8 +1078,10 @@
 static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
 {
 	struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
-	if (!IS_ERR_OR_NULL(n))
-		bch_btree_sort_into(b, n);
+	if (!IS_ERR_OR_NULL(n)) {
+		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
+		bkey_copy_key(&n->key, &b->key);
+	}
 
 	return n;
 }
@@ -1120,6 +1102,28 @@
 	atomic_inc(&b->c->prio_blocked);
 }
 
+static int btree_check_reserve(struct btree *b, struct btree_op *op)
+{
+	struct cache_set *c = b->c;
+	struct cache *ca;
+	unsigned i, reserve = c->root->level * 2 + 1;
+	int ret = 0;
+
+	mutex_lock(&c->bucket_lock);
+
+	for_each_cache(ca, c, i)
+		if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
+			if (op)
+				prepare_to_wait(&c->bucket_wait, &op->wait,
+						TASK_UNINTERRUPTIBLE);
+			ret = -EINTR;
+			break;
+		}
+
+	mutex_unlock(&c->bucket_lock);
+	return ret;
+}
+
 /* Garbage collection */
 
 uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
@@ -1163,7 +1167,7 @@
 		/* guard against overflow */
 		SET_GC_SECTORS_USED(g, min_t(unsigned,
 					     GC_SECTORS_USED(g) + KEY_SIZE(k),
-					     (1 << 14) - 1));
+					     MAX_GC_SECTORS_USED));
 
 		BUG_ON(!GC_SECTORS_USED(g));
 	}
@@ -1183,11 +1187,11 @@
 
 	gc->nodes++;
 
-	for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
 		stale = max(stale, btree_mark_key(b, k));
 		keys++;
 
-		if (bch_ptr_bad(b, k))
+		if (bch_ptr_bad(&b->keys, k))
 			continue;
 
 		gc->key_bytes += bkey_u64s(k);
@@ -1197,9 +1201,9 @@
 		gc->data += KEY_SIZE(k);
 	}
 
-	for (t = b->sets; t <= &b->sets[b->nsets]; t++)
+	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
 		btree_bug_on(t->size &&
-			     bset_written(b, t) &&
+			     bset_written(&b->keys, t) &&
 			     bkey_cmp(&b->key, &t->end) < 0,
 			     b, "found short btree key in gc");
 
@@ -1243,7 +1247,8 @@
 	blocks = btree_default_blocks(b->c) * 2 / 3;
 
 	if (nodes < 2 ||
-	    __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
+	    __set_blocks(b->keys.set[0].data, keys,
+			 block_bytes(b->c)) > blocks * (nodes - 1))
 		return 0;
 
 	for (i = 0; i < nodes; i++) {
@@ -1253,18 +1258,19 @@
 	}
 
 	for (i = nodes - 1; i > 0; --i) {
-		struct bset *n1 = new_nodes[i]->sets->data;
-		struct bset *n2 = new_nodes[i - 1]->sets->data;
+		struct bset *n1 = btree_bset_first(new_nodes[i]);
+		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
 		struct bkey *k, *last = NULL;
 
 		keys = 0;
 
 		if (i > 1) {
 			for (k = n2->start;
-			     k < end(n2);
+			     k < bset_bkey_last(n2);
 			     k = bkey_next(k)) {
 				if (__set_blocks(n1, n1->keys + keys +
-						 bkey_u64s(k), b->c) > blocks)
+						 bkey_u64s(k),
+						 block_bytes(b->c)) > blocks)
 					break;
 
 				last = k;
@@ -1280,7 +1286,8 @@
 			 * though)
 			 */
 			if (__set_blocks(n1, n1->keys + n2->keys,
-					 b->c) > btree_blocks(new_nodes[i]))
+					 block_bytes(b->c)) >
+			    btree_blocks(new_nodes[i]))
 				goto out_nocoalesce;
 
 			keys = n2->keys;
@@ -1288,27 +1295,28 @@
 			last = &r->b->key;
 		}
 
-		BUG_ON(__set_blocks(n1, n1->keys + keys,
-				    b->c) > btree_blocks(new_nodes[i]));
+		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
+		       btree_blocks(new_nodes[i]));
 
 		if (last)
 			bkey_copy_key(&new_nodes[i]->key, last);
 
-		memcpy(end(n1),
+		memcpy(bset_bkey_last(n1),
 		       n2->start,
-		       (void *) node(n2, keys) - (void *) n2->start);
+		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
 
 		n1->keys += keys;
 		r[i].keys = n1->keys;
 
 		memmove(n2->start,
-			node(n2, keys),
-			(void *) end(n2) - (void *) node(n2, keys));
+			bset_bkey_idx(n2, keys),
+			(void *) bset_bkey_last(n2) -
+			(void *) bset_bkey_idx(n2, keys));
 
 		n2->keys -= keys;
 
-		if (bch_keylist_realloc(keylist,
-					KEY_PTRS(&new_nodes[i]->key), b->c))
+		if (__bch_keylist_realloc(keylist,
+					  bkey_u64s(&new_nodes[i]->key)))
 			goto out_nocoalesce;
 
 		bch_btree_node_write(new_nodes[i], &cl);
@@ -1316,7 +1324,7 @@
 	}
 
 	for (i = 0; i < nodes; i++) {
-		if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c))
+		if (__bch_keylist_realloc(keylist, bkey_u64s(&r[i].b->key)))
 			goto out_nocoalesce;
 
 		make_btree_freeing_key(r[i].b, keylist->top);
@@ -1324,7 +1332,7 @@
 	}
 
 	/* We emptied out this node */
-	BUG_ON(new_nodes[0]->sets->data->keys);
+	BUG_ON(btree_bset_first(new_nodes[0])->keys);
 	btree_node_free(new_nodes[0]);
 	rw_unlock(true, new_nodes[0]);
 
@@ -1370,7 +1378,7 @@
 	struct btree_iter iter;
 	unsigned ret = 0;
 
-	for_each_key_filter(b, k, &iter, bch_ptr_bad)
+	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
 		ret += bkey_u64s(k);
 
 	return ret;
@@ -1390,13 +1398,13 @@
 	struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
 
 	bch_keylist_init(&keys);
-	bch_btree_iter_init(b, &iter, &b->c->gc_done);
+	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
 
 	for (i = 0; i < GC_MERGE_NODES; i++)
 		r[i].b = ERR_PTR(-EINTR);
 
 	while (1) {
-		k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
 		if (k) {
 			r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
 			if (IS_ERR(r->b)) {
@@ -1416,7 +1424,8 @@
 
 		if (!IS_ERR(last->b)) {
 			should_rewrite = btree_gc_mark_node(last->b, gc);
-			if (should_rewrite) {
+			if (should_rewrite &&
+			    !btree_check_reserve(b, NULL)) {
 				n = btree_node_alloc_replacement(last->b,
 								 false);
 
@@ -1705,7 +1714,7 @@
 	struct bucket *g;
 	struct btree_iter iter;
 
-	for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
 		for (i = 0; i < KEY_PTRS(k); i++) {
 			if (!ptr_available(b->c, k, i))
 				continue;
@@ -1728,10 +1737,11 @@
 	}
 
 	if (b->level) {
-		bch_btree_iter_init(b, &iter, NULL);
+		bch_btree_iter_init(&b->keys, &iter, NULL);
 
 		do {
-			k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+			k = bch_btree_iter_next_filter(&iter, &b->keys,
+						       bch_ptr_bad);
 			if (k)
 				btree_node_prefetch(b->c, k, b->level - 1);
 
@@ -1774,235 +1784,36 @@
 
 /* Btree insertion */
 
-static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
+static bool btree_insert_key(struct btree *b, struct bkey *k,
+			     struct bkey *replace_key)
 {
-	struct bset *i = b->sets[b->nsets].data;
-
-	memmove((uint64_t *) where + bkey_u64s(insert),
-		where,
-		(void *) end(i) - (void *) where);
-
-	i->keys += bkey_u64s(insert);
-	bkey_copy(where, insert);
-	bch_bset_fix_lookup_table(b, where);
-}
-
-static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
-				    struct btree_iter *iter,
-				    struct bkey *replace_key)
-{
-	void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
-	{
-		if (KEY_DIRTY(k))
-			bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
-						     offset, -sectors);
-	}
-
-	uint64_t old_offset;
-	unsigned old_size, sectors_found = 0;
-
-	while (1) {
-		struct bkey *k = bch_btree_iter_next(iter);
-		if (!k ||
-		    bkey_cmp(&START_KEY(k), insert) >= 0)
-			break;
-
-		if (bkey_cmp(k, &START_KEY(insert)) <= 0)
-			continue;
-
-		old_offset = KEY_START(k);
-		old_size = KEY_SIZE(k);
-
-		/*
-		 * We might overlap with 0 size extents; we can't skip these
-		 * because if they're in the set we're inserting to we have to
-		 * adjust them so they don't overlap with the key we're
-		 * inserting. But we don't want to check them for replace
-		 * operations.
-		 */
-
-		if (replace_key && KEY_SIZE(k)) {
-			/*
-			 * k might have been split since we inserted/found the
-			 * key we're replacing
-			 */
-			unsigned i;
-			uint64_t offset = KEY_START(k) -
-				KEY_START(replace_key);
-
-			/* But it must be a subset of the replace key */
-			if (KEY_START(k) < KEY_START(replace_key) ||
-			    KEY_OFFSET(k) > KEY_OFFSET(replace_key))
-				goto check_failed;
-
-			/* We didn't find a key that we were supposed to */
-			if (KEY_START(k) > KEY_START(insert) + sectors_found)
-				goto check_failed;
-
-			if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
-			    KEY_DIRTY(k) != KEY_DIRTY(replace_key))
-				goto check_failed;
-
-			/* skip past gen */
-			offset <<= 8;
-
-			BUG_ON(!KEY_PTRS(replace_key));
-
-			for (i = 0; i < KEY_PTRS(replace_key); i++)
-				if (k->ptr[i] != replace_key->ptr[i] + offset)
-					goto check_failed;
-
-			sectors_found = KEY_OFFSET(k) - KEY_START(insert);
-		}
-
-		if (bkey_cmp(insert, k) < 0 &&
-		    bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
-			/*
-			 * We overlapped in the middle of an existing key: that
-			 * means we have to split the old key. But we have to do
-			 * slightly different things depending on whether the
-			 * old key has been written out yet.
-			 */
-
-			struct bkey *top;
-
-			subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
-
-			if (bkey_written(b, k)) {
-				/*
-				 * We insert a new key to cover the top of the
-				 * old key, and the old key is modified in place
-				 * to represent the bottom split.
-				 *
-				 * It's completely arbitrary whether the new key
-				 * is the top or the bottom, but it has to match
-				 * up with what btree_sort_fixup() does - it
-				 * doesn't check for this kind of overlap, it
-				 * depends on us inserting a new key for the top
-				 * here.
-				 */
-				top = bch_bset_search(b, &b->sets[b->nsets],
-						      insert);
-				shift_keys(b, top, k);
-			} else {
-				BKEY_PADDED(key) temp;
-				bkey_copy(&temp.key, k);
-				shift_keys(b, k, &temp.key);
-				top = bkey_next(k);
-			}
-
-			bch_cut_front(insert, top);
-			bch_cut_back(&START_KEY(insert), k);
-			bch_bset_fix_invalidated_key(b, k);
-			return false;
-		}
-
-		if (bkey_cmp(insert, k) < 0) {
-			bch_cut_front(insert, k);
-		} else {
-			if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
-				old_offset = KEY_START(insert);
-
-			if (bkey_written(b, k) &&
-			    bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
-				/*
-				 * Completely overwrote, so we don't have to
-				 * invalidate the binary search tree
-				 */
-				bch_cut_front(k, k);
-			} else {
-				__bch_cut_back(&START_KEY(insert), k);
-				bch_bset_fix_invalidated_key(b, k);
-			}
-		}
-
-		subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
-	}
-
-check_failed:
-	if (replace_key) {
-		if (!sectors_found) {
-			return true;
-		} else if (sectors_found < KEY_SIZE(insert)) {
-			SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
-				       (KEY_SIZE(insert) - sectors_found));
-			SET_KEY_SIZE(insert, sectors_found);
-		}
-	}
-
-	return false;
-}
-
-static bool btree_insert_key(struct btree *b, struct btree_op *op,
-			     struct bkey *k, struct bkey *replace_key)
-{
-	struct bset *i = b->sets[b->nsets].data;
-	struct bkey *m, *prev;
-	unsigned status = BTREE_INSERT_STATUS_INSERT;
+	unsigned status;
 
 	BUG_ON(bkey_cmp(k, &b->key) > 0);
-	BUG_ON(b->level && !KEY_PTRS(k));
-	BUG_ON(!b->level && !KEY_OFFSET(k));
 
-	if (!b->level) {
-		struct btree_iter iter;
+	status = bch_btree_insert_key(&b->keys, k, replace_key);
+	if (status != BTREE_INSERT_STATUS_NO_INSERT) {
+		bch_check_keys(&b->keys, "%u for %s", status,
+			       replace_key ? "replace" : "insert");
 
-		/*
-		 * bset_search() returns the first key that is strictly greater
-		 * than the search key - but for back merging, we want to find
-		 * the previous key.
-		 */
-		prev = NULL;
-		m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
+		trace_bcache_btree_insert_key(b, k, replace_key != NULL,
+					      status);
+		return true;
+	} else
+		return false;
+}
 
-		if (fix_overlapping_extents(b, k, &iter, replace_key)) {
-			op->insert_collision = true;
-			return false;
-		}
+static size_t insert_u64s_remaining(struct btree *b)
+{
+	long ret = bch_btree_keys_u64s_remaining(&b->keys);
 
-		if (KEY_DIRTY(k))
-			bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
-						     KEY_START(k), KEY_SIZE(k));
+	/*
+	 * Might land in the middle of an existing extent and have to split it
+	 */
+	if (b->keys.ops->is_extents)
+		ret -= KEY_MAX_U64S;
 
-		while (m != end(i) &&
-		       bkey_cmp(k, &START_KEY(m)) > 0)
-			prev = m, m = bkey_next(m);
-
-		if (key_merging_disabled(b->c))
-			goto insert;
-
-		/* prev is in the tree, if we merge we're done */
-		status = BTREE_INSERT_STATUS_BACK_MERGE;
-		if (prev &&
-		    bch_bkey_try_merge(b, prev, k))
-			goto merged;
-
-		status = BTREE_INSERT_STATUS_OVERWROTE;
-		if (m != end(i) &&
-		    KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
-			goto copy;
-
-		status = BTREE_INSERT_STATUS_FRONT_MERGE;
-		if (m != end(i) &&
-		    bch_bkey_try_merge(b, k, m))
-			goto copy;
-	} else {
-		BUG_ON(replace_key);
-		m = bch_bset_search(b, &b->sets[b->nsets], k);
-	}
-
-insert:	shift_keys(b, m, k);
-copy:	bkey_copy(m, k);
-merged:
-	bch_check_keys(b, "%u for %s", status,
-		       replace_key ? "replace" : "insert");
-
-	if (b->level && !KEY_OFFSET(k))
-		btree_current_write(b)->prio_blocked++;
-
-	trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
-
-	return true;
+	return max(ret, 0L);
 }
 
 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
@@ -2010,21 +1821,19 @@
 				  struct bkey *replace_key)
 {
 	bool ret = false;
-	int oldsize = bch_count_data(b);
+	int oldsize = bch_count_data(&b->keys);
 
 	while (!bch_keylist_empty(insert_keys)) {
-		struct bset *i = write_block(b);
 		struct bkey *k = insert_keys->keys;
 
-		if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
-		    > btree_blocks(b))
+		if (bkey_u64s(k) > insert_u64s_remaining(b))
 			break;
 
 		if (bkey_cmp(k, &b->key) <= 0) {
 			if (!b->level)
 				bkey_put(b->c, k);
 
-			ret |= btree_insert_key(b, op, k, replace_key);
+			ret |= btree_insert_key(b, k, replace_key);
 			bch_keylist_pop_front(insert_keys);
 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
 			BKEY_PADDED(key) temp;
@@ -2033,16 +1842,19 @@
 			bch_cut_back(&b->key, &temp.key);
 			bch_cut_front(&b->key, insert_keys->keys);
 
-			ret |= btree_insert_key(b, op, &temp.key, replace_key);
+			ret |= btree_insert_key(b, &temp.key, replace_key);
 			break;
 		} else {
 			break;
 		}
 	}
 
+	if (!ret)
+		op->insert_collision = true;
+
 	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
 
-	BUG_ON(bch_count_data(b) < oldsize);
+	BUG_ON(bch_count_data(&b->keys) < oldsize);
 	return ret;
 }
 
@@ -2059,16 +1871,21 @@
 	closure_init_stack(&cl);
 	bch_keylist_init(&parent_keys);
 
+	if (!b->level &&
+	    btree_check_reserve(b, op))
+		return -EINTR;
+
 	n1 = btree_node_alloc_replacement(b, true);
 	if (IS_ERR(n1))
 		goto err;
 
-	split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
+	split = set_blocks(btree_bset_first(n1),
+			   block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
 
 	if (split) {
 		unsigned keys = 0;
 
-		trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
+		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
 
 		n2 = bch_btree_node_alloc(b->c, b->level, true);
 		if (IS_ERR(n2))
@@ -2087,18 +1904,20 @@
 		 * search tree yet
 		 */
 
-		while (keys < (n1->sets[0].data->keys * 3) / 5)
-			keys += bkey_u64s(node(n1->sets[0].data, keys));
+		while (keys < (btree_bset_first(n1)->keys * 3) / 5)
+			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
+							keys));
 
-		bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
-		keys += bkey_u64s(node(n1->sets[0].data, keys));
+		bkey_copy_key(&n1->key,
+			      bset_bkey_idx(btree_bset_first(n1), keys));
+		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
 
-		n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
-		n1->sets[0].data->keys = keys;
+		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
+		btree_bset_first(n1)->keys = keys;
 
-		memcpy(n2->sets[0].data->start,
-		       end(n1->sets[0].data),
-		       n2->sets[0].data->keys * sizeof(uint64_t));
+		memcpy(btree_bset_first(n2)->start,
+		       bset_bkey_last(btree_bset_first(n1)),
+		       btree_bset_first(n2)->keys * sizeof(uint64_t));
 
 		bkey_copy_key(&n2->key, &b->key);
 
@@ -2106,7 +1925,7 @@
 		bch_btree_node_write(n2, &cl);
 		rw_unlock(true, n2);
 	} else {
-		trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
+		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
 
 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
 	}
@@ -2149,18 +1968,21 @@
 
 	return 0;
 err_free2:
+	bkey_put(b->c, &n2->key);
 	btree_node_free(n2);
 	rw_unlock(true, n2);
 err_free1:
+	bkey_put(b->c, &n1->key);
 	btree_node_free(n1);
 	rw_unlock(true, n1);
 err:
+	WARN(1, "bcache: btree split failed");
+
 	if (n3 == ERR_PTR(-EAGAIN) ||
 	    n2 == ERR_PTR(-EAGAIN) ||
 	    n1 == ERR_PTR(-EAGAIN))
 		return -EAGAIN;
 
-	pr_warn("couldn't split");
 	return -ENOMEM;
 }
 
@@ -2171,7 +1993,7 @@
 {
 	BUG_ON(b->level && replace_key);
 
-	if (should_split(b)) {
+	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
 		if (current->bio_list) {
 			op->lock = b->c->root->level + 1;
 			return -EAGAIN;
@@ -2180,11 +2002,13 @@
 			return -EINTR;
 		} else {
 			/* Invalidated all iterators */
-			return btree_split(b, op, insert_keys, replace_key) ?:
-				-EINTR;
+			int ret = btree_split(b, op, insert_keys, replace_key);
+
+			return bch_keylist_empty(insert_keys) ?
+				0 : ret ?: -EINTR;
 		}
 	} else {
-		BUG_ON(write_block(b) != b->sets[b->nsets].data);
+		BUG_ON(write_block(b) != btree_bset_last(b));
 
 		if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
 			if (!b->level)
@@ -2323,9 +2147,9 @@
 		struct bkey *k;
 		struct btree_iter iter;
 
-		bch_btree_iter_init(b, &iter, from);
+		bch_btree_iter_init(&b->keys, &iter, from);
 
-		while ((k = bch_btree_iter_next_filter(&iter, b,
+		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
 						       bch_ptr_bad))) {
 			ret = btree(map_nodes_recurse, k, b,
 				    op, from, fn, flags);
@@ -2356,9 +2180,9 @@
 	struct bkey *k;
 	struct btree_iter iter;
 
-	bch_btree_iter_init(b, &iter, from);
+	bch_btree_iter_init(&b->keys, &iter, from);
 
-	while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
+	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
 		ret = !b->level
 			? fn(op, b, k)
 			: btree(map_keys_recurse, k, b, op, from, fn, flags);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 767e755..af065e9 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -130,20 +130,12 @@
 	unsigned long		flags;
 	uint16_t		written;	/* would be nice to kill */
 	uint8_t			level;
-	uint8_t			nsets;
-	uint8_t			page_order;
 
-	/*
-	 * Set of sorted keys - the real btree node - plus a binary search tree
-	 *
-	 * sets[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
-	 * to the memory we have allocated for this btree node. Additionally,
-	 * set[0]->data points to the entire btree node as it exists on disk.
-	 */
-	struct bset_tree	sets[MAX_BSETS];
+	struct btree_keys	keys;
 
 	/* For outstanding btree writes, used as a lock - protects write_idx */
-	struct closure_with_waitlist	io;
+	struct closure		io;
+	struct semaphore	io_mutex;
 
 	struct list_head	list;
 	struct delayed_work	work;
@@ -179,24 +171,19 @@
 	return b->writes + (btree_node_write_idx(b) ^ 1);
 }
 
-static inline unsigned bset_offset(struct btree *b, struct bset *i)
+static inline struct bset *btree_bset_first(struct btree *b)
 {
-	return (((size_t) i) - ((size_t) b->sets->data)) >> 9;
+	return b->keys.set->data;
 }
 
-static inline struct bset *write_block(struct btree *b)
+static inline struct bset *btree_bset_last(struct btree *b)
 {
-	return ((void *) b->sets[0].data) + b->written * block_bytes(b->c);
+	return bset_tree_last(&b->keys)->data;
 }
 
-static inline bool bset_written(struct btree *b, struct bset_tree *t)
+static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
 {
-	return t->data < write_block(b);
-}
-
-static inline bool bkey_written(struct btree *b, struct bkey *k)
-{
-	return k < write_block(b)->start;
+	return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
 }
 
 static inline void set_gc_sectors(struct cache_set *c)
@@ -204,21 +191,6 @@
 	atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
 }
 
-static inline struct bkey *bch_btree_iter_init(struct btree *b,
-					       struct btree_iter *iter,
-					       struct bkey *search)
-{
-	return __bch_btree_iter_init(b, iter, search, b->sets);
-}
-
-static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
-{
-	if (b->level)
-		return bch_btree_ptr_invalid(b->c, k);
-	else
-		return bch_extent_ptr_invalid(b->c, k);
-}
-
 void bkey_put(struct cache_set *c, struct bkey *k);
 
 /* Looping macros */
@@ -229,17 +201,12 @@
 	     iter++)							\
 		hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
 
-#define for_each_key_filter(b, k, iter, filter)				\
-	for (bch_btree_iter_init((b), (iter), NULL);			\
-	     ((k) = bch_btree_iter_next_filter((iter), b, filter));)
-
-#define for_each_key(b, k, iter)					\
-	for (bch_btree_iter_init((b), (iter), NULL);			\
-	     ((k) = bch_btree_iter_next(iter));)
-
 /* Recursing down the btree */
 
 struct btree_op {
+	/* for waiting on btree reserve in btree_split() */
+	wait_queue_t		wait;
+
 	/* Btree level at which we start taking write locks */
 	short			lock;
 
@@ -249,6 +216,7 @@
 static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
 {
 	memset(op, 0, sizeof(struct btree_op));
+	init_wait(&op->wait);
 	op->lock = write_lock_level;
 }
 
@@ -267,7 +235,7 @@
 	(w ? up_write : up_read)(&b->lock);
 }
 
-void bch_btree_node_read(struct btree *);
+void bch_btree_node_read_done(struct btree *);
 void bch_btree_node_write(struct btree *, struct closure *);
 
 void bch_btree_set_root(struct btree *);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index dfff241..7a228de 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -11,19 +11,6 @@
 
 #include "closure.h"
 
-#define CL_FIELD(type, field)					\
-	case TYPE_ ## type:					\
-	return &container_of(cl, struct type, cl)->field
-
-static struct closure_waitlist *closure_waitlist(struct closure *cl)
-{
-	switch (cl->type) {
-		CL_FIELD(closure_with_waitlist, wait);
-	default:
-		return NULL;
-	}
-}
-
 static inline void closure_put_after_sub(struct closure *cl, int flags)
 {
 	int r = flags & CLOSURE_REMAINING_MASK;
@@ -42,17 +29,10 @@
 			closure_queue(cl);
 		} else {
 			struct closure *parent = cl->parent;
-			struct closure_waitlist *wait = closure_waitlist(cl);
 			closure_fn *destructor = cl->fn;
 
 			closure_debug_destroy(cl);
 
-			smp_mb();
-			atomic_set(&cl->remaining, -1);
-
-			if (wait)
-				closure_wake_up(wait);
-
 			if (destructor)
 				destructor(cl);
 
@@ -69,19 +49,18 @@
 }
 EXPORT_SYMBOL(closure_sub);
 
+/**
+ * closure_put - decrement a closure's refcount
+ */
 void closure_put(struct closure *cl)
 {
 	closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
 }
 EXPORT_SYMBOL(closure_put);
 
-static void set_waiting(struct closure *cl, unsigned long f)
-{
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
-	cl->waiting_on = f;
-#endif
-}
-
+/**
+ * closure_wake_up - wake up all closures on a wait list, without memory barrier
+ */
 void __closure_wake_up(struct closure_waitlist *wait_list)
 {
 	struct llist_node *list;
@@ -106,27 +85,34 @@
 		cl = container_of(reverse, struct closure, list);
 		reverse = llist_next(reverse);
 
-		set_waiting(cl, 0);
+		closure_set_waiting(cl, 0);
 		closure_sub(cl, CLOSURE_WAITING + 1);
 	}
 }
 EXPORT_SYMBOL(__closure_wake_up);
 
-bool closure_wait(struct closure_waitlist *list, struct closure *cl)
+/**
+ * closure_wait - add a closure to a waitlist
+ *
+ * @waitlist will own a ref on @cl, which will be released when
+ * closure_wake_up() is called on @waitlist.
+ *
+ */
+bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
 {
 	if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
 		return false;
 
-	set_waiting(cl, _RET_IP_);
+	closure_set_waiting(cl, _RET_IP_);
 	atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
-	llist_add(&cl->list, &list->list);
+	llist_add(&cl->list, &waitlist->list);
 
 	return true;
 }
 EXPORT_SYMBOL(closure_wait);
 
 /**
- * closure_sync() - sleep until a closure a closure has nothing left to wait on
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
  *
  * Sleeps until the refcount hits 1 - the thread that's running the closure owns
  * the last refcount.
@@ -148,46 +134,6 @@
 }
 EXPORT_SYMBOL(closure_sync);
 
-/**
- * closure_trylock() - try to acquire the closure, without waiting
- * @cl:		closure to lock
- *
- * Returns true if the closure was succesfully locked.
- */
-bool closure_trylock(struct closure *cl, struct closure *parent)
-{
-	if (atomic_cmpxchg(&cl->remaining, -1,
-			   CLOSURE_REMAINING_INITIALIZER) != -1)
-		return false;
-
-	smp_mb();
-
-	cl->parent = parent;
-	if (parent)
-		closure_get(parent);
-
-	closure_set_ret_ip(cl);
-	closure_debug_create(cl);
-	return true;
-}
-EXPORT_SYMBOL(closure_trylock);
-
-void __closure_lock(struct closure *cl, struct closure *parent,
-		    struct closure_waitlist *wait_list)
-{
-	struct closure wait;
-	closure_init_stack(&wait);
-
-	while (1) {
-		if (closure_trylock(cl, parent))
-			return;
-
-		closure_wait_event(wait_list, &wait,
-				   atomic_read(&cl->remaining) == -1);
-	}
-}
-EXPORT_SYMBOL(__closure_lock);
-
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 
 static LIST_HEAD(closure_list);
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 9762f1b..7ef7461 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -72,30 +72,6 @@
  * closure - _always_ use continue_at(). Doing so consistently will help
  * eliminate an entire class of particularly pernicious races.
  *
- * For a closure to wait on an arbitrary event, we need to introduce waitlists:
- *
- * struct closure_waitlist list;
- * closure_wait_event(list, cl, condition);
- * closure_wake_up(wait_list);
- *
- * These work analagously to wait_event() and wake_up() - except that instead of
- * operating on the current thread (for wait_event()) and lists of threads, they
- * operate on an explicit closure and lists of closures.
- *
- * Because it's a closure we can now wait either synchronously or
- * asynchronously. closure_wait_event() returns the current value of the
- * condition, and if it returned false continue_at() or closure_sync() can be
- * used to wait for it to become true.
- *
- * It's useful for waiting on things when you can't sleep in the context in
- * which you must check the condition (perhaps a spinlock held, or you might be
- * beneath generic_make_request() - in which case you can't sleep on IO).
- *
- * closure_wait_event() will wait either synchronously or asynchronously,
- * depending on whether the closure is in blocking mode or not. You can pick a
- * mode explicitly with closure_wait_event_sync() and
- * closure_wait_event_async(), which do just what you might expect.
- *
  * Lastly, you might have a wait list dedicated to a specific event, and have no
  * need for specifying the condition - you just want to wait until someone runs
  * closure_wake_up() on the appropriate wait list. In that case, just use
@@ -121,40 +97,6 @@
  * All this implies that a closure should typically be embedded in a particular
  * struct (which its refcount will normally control the lifetime of), and that
  * struct can very much be thought of as a stack frame.
- *
- * Locking:
- *
- * Closures are based on work items but they can be thought of as more like
- * threads - in that like threads and unlike work items they have a well
- * defined lifetime; they are created (with closure_init()) and eventually
- * complete after a continue_at(cl, NULL, NULL).
- *
- * Suppose you've got some larger structure with a closure embedded in it that's
- * used for periodically doing garbage collection. You only want one garbage
- * collection happening at a time, so the natural thing to do is protect it with
- * a lock. However, it's difficult to use a lock protecting a closure correctly
- * because the unlock should come after the last continue_to() (additionally, if
- * you're using the closure asynchronously a mutex won't work since a mutex has
- * to be unlocked by the same process that locked it).
- *
- * So to make it less error prone and more efficient, we also have the ability
- * to use closures as locks:
- *
- * closure_init_unlocked();
- * closure_trylock();
- *
- * That's all we need for trylock() - the last closure_put() implicitly unlocks
- * it for you.  But for closure_lock(), we also need a wait list:
- *
- * struct closure_with_waitlist frobnicator_cl;
- *
- * closure_init_unlocked(&frobnicator_cl);
- * closure_lock(&frobnicator_cl);
- *
- * A closure_with_waitlist embeds a closure and a wait list - much like struct
- * delayed_work embeds a work item and a timer_list. The important thing is, use
- * it exactly like you would a regular closure and closure_put() will magically
- * handle everything for you.
  */
 
 struct closure;
@@ -164,12 +106,6 @@
 	struct llist_head	list;
 };
 
-enum closure_type {
-	TYPE_closure				= 0,
-	TYPE_closure_with_waitlist		= 1,
-	MAX_CLOSURE_TYPE			= 1,
-};
-
 enum closure_state {
 	/*
 	 * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
@@ -224,8 +160,6 @@
 
 	atomic_t		remaining;
 
-	enum closure_type	type;
-
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 #define CLOSURE_MAGIC_DEAD	0xc054dead
 #define CLOSURE_MAGIC_ALIVE	0xc054a11e
@@ -237,34 +171,12 @@
 #endif
 };
 
-struct closure_with_waitlist {
-	struct closure		cl;
-	struct closure_waitlist	wait;
-};
-
-extern unsigned invalid_closure_type(void);
-
-#define __CLOSURE_TYPE(cl, _t)						\
-	  __builtin_types_compatible_p(typeof(cl), struct _t)		\
-		? TYPE_ ## _t :						\
-
-#define __closure_type(cl)						\
-(									\
-	__CLOSURE_TYPE(cl, closure)					\
-	__CLOSURE_TYPE(cl, closure_with_waitlist)			\
-	invalid_closure_type()						\
-)
-
 void closure_sub(struct closure *cl, int v);
 void closure_put(struct closure *cl);
 void __closure_wake_up(struct closure_waitlist *list);
 bool closure_wait(struct closure_waitlist *list, struct closure *cl);
 void closure_sync(struct closure *cl);
 
-bool closure_trylock(struct closure *cl, struct closure *parent);
-void __closure_lock(struct closure *cl, struct closure *parent,
-		    struct closure_waitlist *wait_list);
-
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 
 void closure_debug_init(void);
@@ -293,114 +205,13 @@
 #endif
 }
 
-static inline void closure_get(struct closure *cl)
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
 {
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
-	BUG_ON((atomic_inc_return(&cl->remaining) &
-		CLOSURE_REMAINING_MASK) <= 1);
-#else
-	atomic_inc(&cl->remaining);
+	cl->waiting_on = f;
 #endif
 }
 
-static inline void closure_set_stopped(struct closure *cl)
-{
-	atomic_sub(CLOSURE_RUNNING, &cl->remaining);
-}
-
-static inline bool closure_is_unlocked(struct closure *cl)
-{
-	return atomic_read(&cl->remaining) == -1;
-}
-
-static inline void do_closure_init(struct closure *cl, struct closure *parent,
-				   bool running)
-{
-	cl->parent = parent;
-	if (parent)
-		closure_get(parent);
-
-	if (running) {
-		closure_debug_create(cl);
-		atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
-	} else
-		atomic_set(&cl->remaining, -1);
-
-	closure_set_ip(cl);
-}
-
-/*
- * Hack to get at the embedded closure if there is one, by doing an unsafe cast:
- * the result of __closure_type() is thrown away, it's used merely for type
- * checking.
- */
-#define __to_internal_closure(cl)				\
-({								\
-	BUILD_BUG_ON(__closure_type(*cl) > MAX_CLOSURE_TYPE);	\
-	(struct closure *) cl;					\
-})
-
-#define closure_init_type(cl, parent, running)			\
-do {								\
-	struct closure *_cl = __to_internal_closure(cl);	\
-	_cl->type = __closure_type(*(cl));			\
-	do_closure_init(_cl, parent, running);			\
-} while (0)
-
-/**
- * __closure_init() - Initialize a closure, skipping the memset()
- *
- * May be used instead of closure_init() when memory has already been zeroed.
- */
-#define __closure_init(cl, parent)				\
-	closure_init_type(cl, parent, true)
-
-/**
- * closure_init() - Initialize a closure, setting the refcount to 1
- * @cl:		closure to initialize
- * @parent:	parent of the new closure. cl will take a refcount on it for its
- *		lifetime; may be NULL.
- */
-#define closure_init(cl, parent)				\
-do {								\
-	memset((cl), 0, sizeof(*(cl)));				\
-	__closure_init(cl, parent);				\
-} while (0)
-
-static inline void closure_init_stack(struct closure *cl)
-{
-	memset(cl, 0, sizeof(struct closure));
-	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
-}
-
-/**
- * closure_init_unlocked() - Initialize a closure but leave it unlocked.
- * @cl:		closure to initialize
- *
- * For when the closure will be used as a lock. The closure may not be used
- * until after a closure_lock() or closure_trylock().
- */
-#define closure_init_unlocked(cl)				\
-do {								\
-	memset((cl), 0, sizeof(*(cl)));				\
-	closure_init_type(cl, NULL, false);			\
-} while (0)
-
-/**
- * closure_lock() - lock and initialize a closure.
- * @cl:		the closure to lock
- * @parent:	the new parent for this closure
- *
- * The closure must be of one of the types that has a waitlist (otherwise we
- * wouldn't be able to sleep on contention).
- *
- * @parent has exactly the same meaning as in closure_init(); if non null, the
- * closure will take a reference on @parent which will be released when it is
- * unlocked.
- */
-#define closure_lock(cl, parent)				\
-	__closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
-
 static inline void __closure_end_sleep(struct closure *cl)
 {
 	__set_current_state(TASK_RUNNING);
@@ -419,65 +230,9 @@
 		atomic_add(CLOSURE_SLEEPING, &cl->remaining);
 }
 
-/**
- * closure_wake_up() - wake up all closures on a wait list.
- */
-static inline void closure_wake_up(struct closure_waitlist *list)
+static inline void closure_set_stopped(struct closure *cl)
 {
-	smp_mb();
-	__closure_wake_up(list);
-}
-
-/*
- * Wait on an event, synchronously or asynchronously - analogous to wait_event()
- * but for closures.
- *
- * The loop is oddly structured so as to avoid a race; we must check the
- * condition again after we've added ourself to the waitlist. We know if we were
- * already on the waitlist because closure_wait() returns false; thus, we only
- * schedule or break if closure_wait() returns false. If it returns true, we
- * just loop again - rechecking the condition.
- *
- * The __closure_wake_up() is necessary because we may race with the event
- * becoming true; i.e. we see event false -> wait -> recheck condition, but the
- * thread that made the event true may have called closure_wake_up() before we
- * added ourself to the wait list.
- *
- * We have to call closure_sync() at the end instead of just
- * __closure_end_sleep() because a different thread might've called
- * closure_wake_up() before us and gotten preempted before they dropped the
- * refcount on our closure. If this was a stack allocated closure, that would be
- * bad.
- */
-#define closure_wait_event(list, cl, condition)				\
-({									\
-	typeof(condition) ret;						\
-									\
-	while (1) {							\
-		ret = (condition);					\
-		if (ret) {						\
-			__closure_wake_up(list);			\
-			closure_sync(cl);				\
-			break;						\
-		}							\
-									\
-		__closure_start_sleep(cl);				\
-									\
-		if (!closure_wait(list, cl))				\
-			schedule();					\
-	}								\
-									\
-	ret;								\
-})
-
-static inline void closure_queue(struct closure *cl)
-{
-	struct workqueue_struct *wq = cl->wq;
-	if (wq) {
-		INIT_WORK(&cl->work, cl->work.func);
-		BUG_ON(!queue_work(wq, &cl->work));
-	} else
-		cl->fn(cl);
+	atomic_sub(CLOSURE_RUNNING, &cl->remaining);
 }
 
 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
@@ -491,6 +246,76 @@
 	smp_mb__before_atomic_dec();
 }
 
+static inline void closure_queue(struct closure *cl)
+{
+	struct workqueue_struct *wq = cl->wq;
+	if (wq) {
+		INIT_WORK(&cl->work, cl->work.func);
+		BUG_ON(!queue_work(wq, &cl->work));
+	} else
+		cl->fn(cl);
+}
+
+/**
+ * closure_get - increment a closure's refcount
+ */
+static inline void closure_get(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+	BUG_ON((atomic_inc_return(&cl->remaining) &
+		CLOSURE_REMAINING_MASK) <= 1);
+#else
+	atomic_inc(&cl->remaining);
+#endif
+}
+
+/**
+ * closure_init - Initialize a closure, setting the refcount to 1
+ * @cl:		closure to initialize
+ * @parent:	parent of the new closure. cl will take a refcount on it for its
+ *		lifetime; may be NULL.
+ */
+static inline void closure_init(struct closure *cl, struct closure *parent)
+{
+	memset(cl, 0, sizeof(struct closure));
+	cl->parent = parent;
+	if (parent)
+		closure_get(parent);
+
+	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+
+	closure_debug_create(cl);
+	closure_set_ip(cl);
+}
+
+static inline void closure_init_stack(struct closure *cl)
+{
+	memset(cl, 0, sizeof(struct closure));
+	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
+}
+
+/**
+ * closure_wake_up - wake up all closures on a wait list.
+ */
+static inline void closure_wake_up(struct closure_waitlist *list)
+{
+	smp_mb();
+	__closure_wake_up(list);
+}
+
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * NOTE: This macro expands to a return in the calling function!
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
+ */
 #define continue_at(_cl, _fn, _wq)					\
 do {									\
 	set_closure_fn(_cl, _fn, _wq);					\
@@ -498,8 +323,28 @@
 	return;								\
 } while (0)
 
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
 #define closure_return(_cl)	continue_at((_cl), NULL, NULL)
 
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * NOTE: like continue_at(), this macro expands to a return in the caller!
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
 #define continue_at_nobarrier(_cl, _fn, _wq)				\
 do {									\
 	set_closure_fn(_cl, _fn, _wq);					\
@@ -507,6 +352,15 @@
 	return;								\
 } while (0)
 
+/**
+ * closure_return - finish execution of a closure, with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
 #define closure_return_with_destructor(_cl, _destructor)		\
 do {									\
 	set_closure_fn(_cl, _destructor, NULL);				\
@@ -514,6 +368,13 @@
 	return;								\
 } while (0)
 
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
 static inline void closure_call(struct closure *cl, closure_fn fn,
 				struct workqueue_struct *wq,
 				struct closure *parent)
@@ -522,12 +383,4 @@
 	continue_at_nobarrier(cl, fn, wq);
 }
 
-static inline void closure_trylock_call(struct closure *cl, closure_fn fn,
-					struct workqueue_struct *wq,
-					struct closure *parent)
-{
-	if (closure_trylock(cl, parent))
-		continue_at_nobarrier(cl, fn, wq);
-}
-
 #endif /* _LINUX_CLOSURE_H */
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 264fcfb..8b1f1d5 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -8,6 +8,7 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
+#include "extents.h"
 
 #include <linux/console.h>
 #include <linux/debugfs.h>
@@ -17,163 +18,96 @@
 
 static struct dentry *debug;
 
-const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
-{
-	unsigned i;
-
-	for (i = 0; i < KEY_PTRS(k); i++)
-		if (ptr_available(c, k, i)) {
-			struct cache *ca = PTR_CACHE(c, k, i);
-			size_t bucket = PTR_BUCKET_NR(c, k, i);
-			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-			if (KEY_SIZE(k) + r > c->sb.bucket_size)
-				return "bad, length too big";
-			if (bucket <  ca->sb.first_bucket)
-				return "bad, short offset";
-			if (bucket >= ca->sb.nbuckets)
-				return "bad, offset past end of device";
-			if (ptr_stale(c, k, i))
-				return "stale";
-		}
-
-	if (!bkey_cmp(k, &ZERO_KEY))
-		return "bad, null key";
-	if (!KEY_PTRS(k))
-		return "bad, no pointers";
-	if (!KEY_SIZE(k))
-		return "zeroed key";
-	return "";
-}
-
-int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
-{
-	unsigned i = 0;
-	char *out = buf, *end = buf + size;
-
-#define p(...)	(out += scnprintf(out, end - out, __VA_ARGS__))
-
-	p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k));
-
-	if (KEY_PTRS(k))
-		while (1) {
-			p("%llu:%llu gen %llu",
-			  PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i));
-
-			if (++i == KEY_PTRS(k))
-				break;
-
-			p(", ");
-		}
-
-	p("]");
-
-	if (KEY_DIRTY(k))
-		p(" dirty");
-	if (KEY_CSUM(k))
-		p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
-#undef p
-	return out - buf;
-}
-
 #ifdef CONFIG_BCACHE_DEBUG
 
-static void dump_bset(struct btree *b, struct bset *i)
-{
-	struct bkey *k, *next;
-	unsigned j;
-	char buf[80];
+#define for_each_written_bset(b, start, i)				\
+	for (i = (start);						\
+	     (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
+	     i->seq == (start)->seq;					\
+	     i = (void *) i + set_blocks(i, block_bytes(b->c)) *	\
+		 block_bytes(b->c))
 
-	for (k = i->start; k < end(i); k = next) {
-		next = bkey_next(k);
-
-		bch_bkey_to_text(buf, sizeof(buf), k);
-		printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
-		       (uint64_t *) k - i->d, i->keys, buf);
-
-		for (j = 0; j < KEY_PTRS(k); j++) {
-			size_t n = PTR_BUCKET_NR(b->c, k, j);
-			printk(" bucket %zu", n);
-
-			if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
-				printk(" prio %i",
-				       PTR_BUCKET(b->c, k, j)->prio);
-		}
-
-		printk(" %s\n", bch_ptr_status(b->c, k));
-
-		if (next < end(i) &&
-		    bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
-			printk(KERN_ERR "Key skipped backwards\n");
-	}
-}
-
-static void bch_dump_bucket(struct btree *b)
-{
-	unsigned i;
-
-	console_lock();
-	for (i = 0; i <= b->nsets; i++)
-		dump_bset(b, b->sets[i].data);
-	console_unlock();
-}
-
-void bch_btree_verify(struct btree *b, struct bset *new)
+void bch_btree_verify(struct btree *b)
 {
 	struct btree *v = b->c->verify_data;
-	struct closure cl;
-	closure_init_stack(&cl);
+	struct bset *ondisk, *sorted, *inmemory;
+	struct bio *bio;
 
-	if (!b->c->verify)
+	if (!b->c->verify || !b->c->verify_ondisk)
 		return;
 
-	closure_wait_event(&b->io.wait, &cl,
-			   atomic_read(&b->io.cl.remaining) == -1);
-
+	down(&b->io_mutex);
 	mutex_lock(&b->c->verify_lock);
 
+	ondisk = b->c->verify_ondisk;
+	sorted = b->c->verify_data->keys.set->data;
+	inmemory = b->keys.set->data;
+
 	bkey_copy(&v->key, &b->key);
 	v->written = 0;
 	v->level = b->level;
+	v->keys.ops = b->keys.ops;
 
-	bch_btree_node_read(v);
-	closure_wait_event(&v->io.wait, &cl,
-			   atomic_read(&b->io.cl.remaining) == -1);
+	bio = bch_bbio_alloc(b->c);
+	bio->bi_bdev		= PTR_CACHE(b->c, &b->key, 0)->bdev;
+	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
+	bio->bi_iter.bi_size	= KEY_SIZE(&v->key) << 9;
+	bch_bio_map(bio, sorted);
 
-	if (new->keys != v->sets[0].data->keys ||
-	    memcmp(new->start,
-		   v->sets[0].data->start,
-		   (void *) end(new) - (void *) new->start)) {
-		unsigned i, j;
+	submit_bio_wait(REQ_META|READ_SYNC, bio);
+	bch_bbio_free(bio, b->c);
+
+	memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
+
+	bch_btree_node_read_done(v);
+	sorted = v->keys.set->data;
+
+	if (inmemory->keys != sorted->keys ||
+	    memcmp(inmemory->start,
+		   sorted->start,
+		   (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
+		struct bset *i;
+		unsigned j;
 
 		console_lock();
 
-		printk(KERN_ERR "*** original memory node:\n");
-		for (i = 0; i <= b->nsets; i++)
-			dump_bset(b, b->sets[i].data);
+		printk(KERN_ERR "*** in memory:\n");
+		bch_dump_bset(&b->keys, inmemory, 0);
 
-		printk(KERN_ERR "*** sorted memory node:\n");
-		dump_bset(b, new);
+		printk(KERN_ERR "*** read back in:\n");
+		bch_dump_bset(&v->keys, sorted, 0);
 
-		printk(KERN_ERR "*** on disk node:\n");
-		dump_bset(v, v->sets[0].data);
+		for_each_written_bset(b, ondisk, i) {
+			unsigned block = ((void *) i - (void *) ondisk) /
+				block_bytes(b->c);
 
-		for (j = 0; j < new->keys; j++)
-			if (new->d[j] != v->sets[0].data->d[j])
+			printk(KERN_ERR "*** on disk block %u:\n", block);
+			bch_dump_bset(&b->keys, i, block);
+		}
+
+		printk(KERN_ERR "*** block %zu not written\n",
+		       ((void *) i - (void *) ondisk) / block_bytes(b->c));
+
+		for (j = 0; j < inmemory->keys; j++)
+			if (inmemory->d[j] != sorted->d[j])
 				break;
 
+		printk(KERN_ERR "b->written %u\n", b->written);
+
 		console_unlock();
 		panic("verify failed at %u\n", j);
 	}
 
 	mutex_unlock(&b->c->verify_lock);
+	up(&b->io_mutex);
 }
 
 void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 {
 	char name[BDEVNAME_SIZE];
 	struct bio *check;
-	struct bio_vec *bv;
+	struct bio_vec bv, *bv2;
+	struct bvec_iter iter;
 	int i;
 
 	check = bio_clone(bio, GFP_NOIO);
@@ -185,95 +119,27 @@
 
 	submit_bio_wait(READ_SYNC, check);
 
-	bio_for_each_segment(bv, bio, i) {
-		void *p1 = kmap_atomic(bv->bv_page);
-		void *p2 = page_address(check->bi_io_vec[i].bv_page);
+	bio_for_each_segment(bv, bio, iter) {
+		void *p1 = kmap_atomic(bv.bv_page);
+		void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
 
-		cache_set_err_on(memcmp(p1 + bv->bv_offset,
-					p2 + bv->bv_offset,
-					bv->bv_len),
+		cache_set_err_on(memcmp(p1 + bv.bv_offset,
+					p2 + bv.bv_offset,
+					bv.bv_len),
 				 dc->disk.c,
 				 "verify failed at dev %s sector %llu",
 				 bdevname(dc->bdev, name),
-				 (uint64_t) bio->bi_sector);
+				 (uint64_t) bio->bi_iter.bi_sector);
 
 		kunmap_atomic(p1);
 	}
 
-	bio_for_each_segment_all(bv, check, i)
-		__free_page(bv->bv_page);
+	bio_for_each_segment_all(bv2, check, i)
+		__free_page(bv2->bv_page);
 out_put:
 	bio_put(check);
 }
 
-int __bch_count_data(struct btree *b)
-{
-	unsigned ret = 0;
-	struct btree_iter iter;
-	struct bkey *k;
-
-	if (!b->level)
-		for_each_key(b, k, &iter)
-			ret += KEY_SIZE(k);
-	return ret;
-}
-
-void __bch_check_keys(struct btree *b, const char *fmt, ...)
-{
-	va_list args;
-	struct bkey *k, *p = NULL;
-	struct btree_iter iter;
-	const char *err;
-
-	for_each_key(b, k, &iter) {
-		if (!b->level) {
-			err = "Keys out of order";
-			if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
-				goto bug;
-
-			if (bch_ptr_invalid(b, k))
-				continue;
-
-			err =  "Overlapping keys";
-			if (p && bkey_cmp(p, &START_KEY(k)) > 0)
-				goto bug;
-		} else {
-			if (bch_ptr_bad(b, k))
-				continue;
-
-			err = "Duplicate keys";
-			if (p && !bkey_cmp(p, k))
-				goto bug;
-		}
-		p = k;
-	}
-
-	err = "Key larger than btree node key";
-	if (p && bkey_cmp(p, &b->key) > 0)
-		goto bug;
-
-	return;
-bug:
-	bch_dump_bucket(b);
-
-	va_start(args, fmt);
-	vprintk(fmt, args);
-	va_end(args);
-
-	panic("bcache error: %s:\n", err);
-}
-
-void bch_btree_iter_next_check(struct btree_iter *iter)
-{
-	struct bkey *k = iter->data->k, *next = bkey_next(k);
-
-	if (next < iter->data->end &&
-	    bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
-		bch_dump_bucket(iter->b);
-		panic("Key skipped backwards\n");
-	}
-}
-
 #endif
 
 #ifdef CONFIG_DEBUG_FS
@@ -320,7 +186,7 @@
 		if (!w)
 			break;
 
-		bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
+		bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
 		i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
 		bch_keybuf_del(&i->keys, w);
 	}
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index 2ede60e..1f63c19 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -1,47 +1,30 @@
 #ifndef _BCACHE_DEBUG_H
 #define _BCACHE_DEBUG_H
 
-/* Btree/bkey debug printing */
-
-int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
+struct bio;
+struct cached_dev;
+struct cache_set;
 
 #ifdef CONFIG_BCACHE_DEBUG
 
-void bch_btree_verify(struct btree *, struct bset *);
+void bch_btree_verify(struct btree *);
 void bch_data_verify(struct cached_dev *, struct bio *);
-int __bch_count_data(struct btree *);
-void __bch_check_keys(struct btree *, const char *, ...);
-void bch_btree_iter_next_check(struct btree_iter *);
 
-#define EBUG_ON(cond)			BUG_ON(cond)
 #define expensive_debug_checks(c)	((c)->expensive_debug_checks)
 #define key_merging_disabled(c)		((c)->key_merging_disabled)
 #define bypass_torture_test(d)		((d)->bypass_torture_test)
 
 #else /* DEBUG */
 
-static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
+static inline void bch_btree_verify(struct btree *b) {}
 static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
-static inline int __bch_count_data(struct btree *b) { return -1; }
-static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
-static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
 
-#define EBUG_ON(cond)			do { if (cond); } while (0)
 #define expensive_debug_checks(c)	0
 #define key_merging_disabled(c)		0
 #define bypass_torture_test(d)		0
 
 #endif
 
-#define bch_count_data(b)						\
-	(expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
-
-#define bch_check_keys(b, ...)						\
-do {									\
-	if (expensive_debug_checks((b)->c))				\
-		__bch_check_keys(b, __VA_ARGS__);			\
-} while (0)
-
 #ifdef CONFIG_DEBUG_FS
 void bch_debug_init_cache_set(struct cache_set *);
 #else
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
new file mode 100644
index 0000000..416d1a3
--- /dev/null
+++ b/drivers/md/bcache/extents.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
+ *
+ * Uses a block device as cache for other block devices; optimized for SSDs.
+ * All allocation is done in buckets, which should match the erase block size
+ * of the device.
+ *
+ * Buckets containing cached data are kept on a heap sorted by priority;
+ * bucket priority is increased on cache hit, and periodically all the buckets
+ * on the heap have their priority scaled down. This currently is just used as
+ * an LRU but in the future should allow for more intelligent heuristics.
+ *
+ * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
+ * counter. Garbage collection is used to remove stale pointers.
+ *
+ * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
+ * as keys are inserted we only sort the pages that have not yet been written.
+ * When garbage collection is run, we resort the entire node.
+ *
+ * All configuration is done via sysfs; see Documentation/bcache.txt.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "extents.h"
+#include "writeback.h"
+
+static void sort_key_next(struct btree_iter *iter,
+			  struct btree_iter_set *i)
+{
+	i->k = bkey_next(i->k);
+
+	if (i->k == i->end)
+		*i = iter->data[--iter->used];
+}
+
+static bool bch_key_sort_cmp(struct btree_iter_set l,
+			     struct btree_iter_set r)
+{
+	int64_t c = bkey_cmp(l.k, r.k);
+
+	return c ? c > 0 : l.k < r.k;
+}
+
+static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+	unsigned i;
+
+	for (i = 0; i < KEY_PTRS(k); i++)
+		if (ptr_available(c, k, i)) {
+			struct cache *ca = PTR_CACHE(c, k, i);
+			size_t bucket = PTR_BUCKET_NR(c, k, i);
+			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+			if (KEY_SIZE(k) + r > c->sb.bucket_size ||
+			    bucket <  ca->sb.first_bucket ||
+			    bucket >= ca->sb.nbuckets)
+				return true;
+		}
+
+	return false;
+}
+
+/* Common among btree and extent ptrs */
+
+static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+{
+	unsigned i;
+
+	for (i = 0; i < KEY_PTRS(k); i++)
+		if (ptr_available(c, k, i)) {
+			struct cache *ca = PTR_CACHE(c, k, i);
+			size_t bucket = PTR_BUCKET_NR(c, k, i);
+			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+			if (KEY_SIZE(k) + r > c->sb.bucket_size)
+				return "bad, length too big";
+			if (bucket <  ca->sb.first_bucket)
+				return "bad, short offset";
+			if (bucket >= ca->sb.nbuckets)
+				return "bad, offset past end of device";
+			if (ptr_stale(c, k, i))
+				return "stale";
+		}
+
+	if (!bkey_cmp(k, &ZERO_KEY))
+		return "bad, null key";
+	if (!KEY_PTRS(k))
+		return "bad, no pointers";
+	if (!KEY_SIZE(k))
+		return "zeroed key";
+	return "";
+}
+
+void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
+{
+	unsigned i = 0;
+	char *out = buf, *end = buf + size;
+
+#define p(...)	(out += scnprintf(out, end - out, __VA_ARGS__))
+
+	p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
+
+	for (i = 0; i < KEY_PTRS(k); i++) {
+		if (i)
+			p(", ");
+
+		if (PTR_DEV(k, i) == PTR_CHECK_DEV)
+			p("check dev");
+		else
+			p("%llu:%llu gen %llu", PTR_DEV(k, i),
+			  PTR_OFFSET(k, i), PTR_GEN(k, i));
+	}
+
+	p("]");
+
+	if (KEY_DIRTY(k))
+		p(" dirty");
+	if (KEY_CSUM(k))
+		p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
+#undef p
+}
+
+static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
+{
+	struct btree *b = container_of(keys, struct btree, keys);
+	unsigned j;
+	char buf[80];
+
+	bch_extent_to_text(buf, sizeof(buf), k);
+	printk(" %s", buf);
+
+	for (j = 0; j < KEY_PTRS(k); j++) {
+		size_t n = PTR_BUCKET_NR(b->c, k, j);
+		printk(" bucket %zu", n);
+
+		if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
+			printk(" prio %i",
+			       PTR_BUCKET(b->c, k, j)->prio);
+	}
+
+	printk(" %s\n", bch_ptr_status(b->c, k));
+}
+
+/* Btree ptrs */
+
+bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+	char buf[80];
+
+	if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
+		goto bad;
+
+	if (__ptr_invalid(c, k))
+		goto bad;
+
+	return false;
+bad:
+	bch_extent_to_text(buf, sizeof(buf), k);
+	cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
+	return true;
+}
+
+static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+	struct btree *b = container_of(bk, struct btree, keys);
+	return __bch_btree_ptr_invalid(b->c, k);
+}
+
+static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
+{
+	unsigned i;
+	char buf[80];
+	struct bucket *g;
+
+	if (mutex_trylock(&b->c->bucket_lock)) {
+		for (i = 0; i < KEY_PTRS(k); i++)
+			if (ptr_available(b->c, k, i)) {
+				g = PTR_BUCKET(b->c, k, i);
+
+				if (KEY_DIRTY(k) ||
+				    g->prio != BTREE_PRIO ||
+				    (b->c->gc_mark_valid &&
+				     GC_MARK(g) != GC_MARK_METADATA))
+					goto err;
+			}
+
+		mutex_unlock(&b->c->bucket_lock);
+	}
+
+	return false;
+err:
+	mutex_unlock(&b->c->bucket_lock);
+	bch_extent_to_text(buf, sizeof(buf), k);
+	btree_bug(b,
+"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+		  buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
+		  g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+	return true;
+}
+
+static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
+{
+	struct btree *b = container_of(bk, struct btree, keys);
+	unsigned i;
+
+	if (!bkey_cmp(k, &ZERO_KEY) ||
+	    !KEY_PTRS(k) ||
+	    bch_ptr_invalid(bk, k))
+		return true;
+
+	for (i = 0; i < KEY_PTRS(k); i++)
+		if (!ptr_available(b->c, k, i) ||
+		    ptr_stale(b->c, k, i))
+			return true;
+
+	if (expensive_debug_checks(b->c) &&
+	    btree_ptr_bad_expensive(b, k))
+		return true;
+
+	return false;
+}
+
+static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
+				       struct bkey *insert,
+				       struct btree_iter *iter,
+				       struct bkey *replace_key)
+{
+	struct btree *b = container_of(bk, struct btree, keys);
+
+	if (!KEY_OFFSET(insert))
+		btree_current_write(b)->prio_blocked++;
+
+	return false;
+}
+
+const struct btree_keys_ops bch_btree_keys_ops = {
+	.sort_cmp	= bch_key_sort_cmp,
+	.insert_fixup	= bch_btree_ptr_insert_fixup,
+	.key_invalid	= bch_btree_ptr_invalid,
+	.key_bad	= bch_btree_ptr_bad,
+	.key_to_text	= bch_extent_to_text,
+	.key_dump	= bch_bkey_dump,
+};
+
+/* Extents */
+
+/*
+ * Returns true if l > r - unless l == r, in which case returns true if l is
+ * older than r.
+ *
+ * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+ * equal in different sets, we have to process them newest to oldest.
+ */
+static bool bch_extent_sort_cmp(struct btree_iter_set l,
+				struct btree_iter_set r)
+{
+	int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
+
+	return c ? c > 0 : l.k < r.k;
+}
+
+static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
+					  struct bkey *tmp)
+{
+	while (iter->used > 1) {
+		struct btree_iter_set *top = iter->data, *i = top + 1;
+
+		if (iter->used > 2 &&
+		    bch_extent_sort_cmp(i[0], i[1]))
+			i++;
+
+		if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
+			break;
+
+		if (!KEY_SIZE(i->k)) {
+			sort_key_next(iter, i);
+			heap_sift(iter, i - top, bch_extent_sort_cmp);
+			continue;
+		}
+
+		if (top->k > i->k) {
+			if (bkey_cmp(top->k, i->k) >= 0)
+				sort_key_next(iter, i);
+			else
+				bch_cut_front(top->k, i->k);
+
+			heap_sift(iter, i - top, bch_extent_sort_cmp);
+		} else {
+			/* can't happen because of comparison func */
+			BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+
+			if (bkey_cmp(i->k, top->k) < 0) {
+				bkey_copy(tmp, top->k);
+
+				bch_cut_back(&START_KEY(i->k), tmp);
+				bch_cut_front(i->k, top->k);
+				heap_sift(iter, 0, bch_extent_sort_cmp);
+
+				return tmp;
+			} else {
+				bch_cut_back(&START_KEY(i->k), top->k);
+			}
+		}
+	}
+
+	return NULL;
+}
+
+static bool bch_extent_insert_fixup(struct btree_keys *b,
+				    struct bkey *insert,
+				    struct btree_iter *iter,
+				    struct bkey *replace_key)
+{
+	struct cache_set *c = container_of(b, struct btree, keys)->c;
+
+	void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
+	{
+		if (KEY_DIRTY(k))
+			bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
+						     offset, -sectors);
+	}
+
+	uint64_t old_offset;
+	unsigned old_size, sectors_found = 0;
+
+	BUG_ON(!KEY_OFFSET(insert));
+	BUG_ON(!KEY_SIZE(insert));
+
+	while (1) {
+		struct bkey *k = bch_btree_iter_next(iter);
+		if (!k)
+			break;
+
+		if (bkey_cmp(&START_KEY(k), insert) >= 0) {
+			if (KEY_SIZE(k))
+				break;
+			else
+				continue;
+		}
+
+		if (bkey_cmp(k, &START_KEY(insert)) <= 0)
+			continue;
+
+		old_offset = KEY_START(k);
+		old_size = KEY_SIZE(k);
+
+		/*
+		 * We might overlap with 0 size extents; we can't skip these
+		 * because if they're in the set we're inserting to we have to
+		 * adjust them so they don't overlap with the key we're
+		 * inserting. But we don't want to check them for replace
+		 * operations.
+		 */
+
+		if (replace_key && KEY_SIZE(k)) {
+			/*
+			 * k might have been split since we inserted/found the
+			 * key we're replacing
+			 */
+			unsigned i;
+			uint64_t offset = KEY_START(k) -
+				KEY_START(replace_key);
+
+			/* But it must be a subset of the replace key */
+			if (KEY_START(k) < KEY_START(replace_key) ||
+			    KEY_OFFSET(k) > KEY_OFFSET(replace_key))
+				goto check_failed;
+
+			/* We didn't find a key that we were supposed to */
+			if (KEY_START(k) > KEY_START(insert) + sectors_found)
+				goto check_failed;
+
+			if (!bch_bkey_equal_header(k, replace_key))
+				goto check_failed;
+
+			/* skip past gen */
+			offset <<= 8;
+
+			BUG_ON(!KEY_PTRS(replace_key));
+
+			for (i = 0; i < KEY_PTRS(replace_key); i++)
+				if (k->ptr[i] != replace_key->ptr[i] + offset)
+					goto check_failed;
+
+			sectors_found = KEY_OFFSET(k) - KEY_START(insert);
+		}
+
+		if (bkey_cmp(insert, k) < 0 &&
+		    bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
+			/*
+			 * We overlapped in the middle of an existing key: that
+			 * means we have to split the old key. But we have to do
+			 * slightly different things depending on whether the
+			 * old key has been written out yet.
+			 */
+
+			struct bkey *top;
+
+			subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
+
+			if (bkey_written(b, k)) {
+				/*
+				 * We insert a new key to cover the top of the
+				 * old key, and the old key is modified in place
+				 * to represent the bottom split.
+				 *
+				 * It's completely arbitrary whether the new key
+				 * is the top or the bottom, but it has to match
+				 * up with what btree_sort_fixup() does - it
+				 * doesn't check for this kind of overlap, it
+				 * depends on us inserting a new key for the top
+				 * here.
+				 */
+				top = bch_bset_search(b, bset_tree_last(b),
+						      insert);
+				bch_bset_insert(b, top, k);
+			} else {
+				BKEY_PADDED(key) temp;
+				bkey_copy(&temp.key, k);
+				bch_bset_insert(b, k, &temp.key);
+				top = bkey_next(k);
+			}
+
+			bch_cut_front(insert, top);
+			bch_cut_back(&START_KEY(insert), k);
+			bch_bset_fix_invalidated_key(b, k);
+			goto out;
+		}
+
+		if (bkey_cmp(insert, k) < 0) {
+			bch_cut_front(insert, k);
+		} else {
+			if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
+				old_offset = KEY_START(insert);
+
+			if (bkey_written(b, k) &&
+			    bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
+				/*
+				 * Completely overwrote, so we don't have to
+				 * invalidate the binary search tree
+				 */
+				bch_cut_front(k, k);
+			} else {
+				__bch_cut_back(&START_KEY(insert), k);
+				bch_bset_fix_invalidated_key(b, k);
+			}
+		}
+
+		subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
+	}
+
+check_failed:
+	if (replace_key) {
+		if (!sectors_found) {
+			return true;
+		} else if (sectors_found < KEY_SIZE(insert)) {
+			SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
+				       (KEY_SIZE(insert) - sectors_found));
+			SET_KEY_SIZE(insert, sectors_found);
+		}
+	}
+out:
+	if (KEY_DIRTY(insert))
+		bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
+					     KEY_START(insert),
+					     KEY_SIZE(insert));
+
+	return false;
+}
+
+static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+	struct btree *b = container_of(bk, struct btree, keys);
+	char buf[80];
+
+	if (!KEY_SIZE(k))
+		return true;
+
+	if (KEY_SIZE(k) > KEY_OFFSET(k))
+		goto bad;
+
+	if (__ptr_invalid(b->c, k))
+		goto bad;
+
+	return false;
+bad:
+	bch_extent_to_text(buf, sizeof(buf), k);
+	cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k));
+	return true;
+}
+
+static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
+				     unsigned ptr)
+{
+	struct bucket *g = PTR_BUCKET(b->c, k, ptr);
+	char buf[80];
+
+	if (mutex_trylock(&b->c->bucket_lock)) {
+		if (b->c->gc_mark_valid &&
+		    ((GC_MARK(g) != GC_MARK_DIRTY &&
+		      KEY_DIRTY(k)) ||
+		     GC_MARK(g) == GC_MARK_METADATA))
+			goto err;
+
+		if (g->prio == BTREE_PRIO)
+			goto err;
+
+		mutex_unlock(&b->c->bucket_lock);
+	}
+
+	return false;
+err:
+	mutex_unlock(&b->c->bucket_lock);
+	bch_extent_to_text(buf, sizeof(buf), k);
+	btree_bug(b,
+"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+		  buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
+		  g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+	return true;
+}
+
+static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
+{
+	struct btree *b = container_of(bk, struct btree, keys);
+	struct bucket *g;
+	unsigned i, stale;
+
+	if (!KEY_PTRS(k) ||
+	    bch_extent_invalid(bk, k))
+		return true;
+
+	for (i = 0; i < KEY_PTRS(k); i++)
+		if (!ptr_available(b->c, k, i))
+			return true;
+
+	if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
+		return false;
+
+	for (i = 0; i < KEY_PTRS(k); i++) {
+		g = PTR_BUCKET(b->c, k, i);
+		stale = ptr_stale(b->c, k, i);
+
+		btree_bug_on(stale > 96, b,
+			     "key too stale: %i, need_gc %u",
+			     stale, b->c->need_gc);
+
+		btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
+			     b, "stale dirty pointer");
+
+		if (stale)
+			return true;
+
+		if (expensive_debug_checks(b->c) &&
+		    bch_extent_bad_expensive(b, k, i))
+			return true;
+	}
+
+	return false;
+}
+
+static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+{
+	return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
+		~((uint64_t)1 << 63);
+}
+
+static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
+{
+	struct btree *b = container_of(bk, struct btree, keys);
+	unsigned i;
+
+	if (key_merging_disabled(b->c))
+		return false;
+
+	for (i = 0; i < KEY_PTRS(l); i++)
+		if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
+			return false;
+
+	/* Keys with no pointers aren't restricted to one bucket and could
+	 * overflow KEY_SIZE
+	 */
+	if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
+		SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
+		SET_KEY_SIZE(l, USHRT_MAX);
+
+		bch_cut_front(l, r);
+		return false;
+	}
+
+	if (KEY_CSUM(l)) {
+		if (KEY_CSUM(r))
+			l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
+		else
+			SET_KEY_CSUM(l, 0);
+	}
+
+	SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
+	SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+
+	return true;
+}
+
+const struct btree_keys_ops bch_extent_keys_ops = {
+	.sort_cmp	= bch_extent_sort_cmp,
+	.sort_fixup	= bch_extent_sort_fixup,
+	.insert_fixup	= bch_extent_insert_fixup,
+	.key_invalid	= bch_extent_invalid,
+	.key_bad	= bch_extent_bad,
+	.key_merge	= bch_extent_merge,
+	.key_to_text	= bch_extent_to_text,
+	.key_dump	= bch_bkey_dump,
+	.is_extents	= true,
+};
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
new file mode 100644
index 0000000..e4e2340
--- /dev/null
+++ b/drivers/md/bcache/extents.h
@@ -0,0 +1,13 @@
+#ifndef _BCACHE_EXTENTS_H
+#define _BCACHE_EXTENTS_H
+
+extern const struct btree_keys_ops bch_btree_keys_ops;
+extern const struct btree_keys_ops bch_extent_keys_ops;
+
+struct bkey;
+struct cache_set;
+
+void bch_extent_to_text(char *, size_t, const struct bkey *);
+bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
+
+#endif /* _BCACHE_EXTENTS_H */
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9056632..fa028fa 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,178 +11,40 @@
 
 #include <linux/blkdev.h>
 
-static void bch_bi_idx_hack_endio(struct bio *bio, int error)
-{
-	struct bio *p = bio->bi_private;
-
-	bio_endio(p, error);
-	bio_put(bio);
-}
-
-static void bch_generic_make_request_hack(struct bio *bio)
-{
-	if (bio->bi_idx) {
-		struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
-
-		memcpy(clone->bi_io_vec,
-		       bio_iovec(bio),
-		       bio_segments(bio) * sizeof(struct bio_vec));
-
-		clone->bi_sector	= bio->bi_sector;
-		clone->bi_bdev		= bio->bi_bdev;
-		clone->bi_rw		= bio->bi_rw;
-		clone->bi_vcnt		= bio_segments(bio);
-		clone->bi_size		= bio->bi_size;
-
-		clone->bi_private	= bio;
-		clone->bi_end_io	= bch_bi_idx_hack_endio;
-
-		bio = clone;
-	}
-
-	/*
-	 * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
-	 * bios might have had more than that (before we split them per device
-	 * limitations).
-	 *
-	 * To be taken out once immutable bvec stuff is in.
-	 */
-	bio->bi_max_vecs = bio->bi_vcnt;
-
-	generic_make_request(bio);
-}
-
-/**
- * bch_bio_split - split a bio
- * @bio:	bio to split
- * @sectors:	number of sectors to split from the front of @bio
- * @gfp:	gfp mask
- * @bs:		bio set to allocate from
- *
- * Allocates and returns a new bio which represents @sectors from the start of
- * @bio, and updates @bio to represent the remaining sectors.
- *
- * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
- * unchanged.
- *
- * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
- * bvec boundry; it is the caller's responsibility to ensure that @bio is not
- * freed before the split.
- */
-struct bio *bch_bio_split(struct bio *bio, int sectors,
-			  gfp_t gfp, struct bio_set *bs)
-{
-	unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
-	struct bio_vec *bv;
-	struct bio *ret = NULL;
-
-	BUG_ON(sectors <= 0);
-
-	if (sectors >= bio_sectors(bio))
-		return bio;
-
-	if (bio->bi_rw & REQ_DISCARD) {
-		ret = bio_alloc_bioset(gfp, 1, bs);
-		if (!ret)
-			return NULL;
-		idx = 0;
-		goto out;
-	}
-
-	bio_for_each_segment(bv, bio, idx) {
-		vcnt = idx - bio->bi_idx;
-
-		if (!nbytes) {
-			ret = bio_alloc_bioset(gfp, vcnt, bs);
-			if (!ret)
-				return NULL;
-
-			memcpy(ret->bi_io_vec, bio_iovec(bio),
-			       sizeof(struct bio_vec) * vcnt);
-
-			break;
-		} else if (nbytes < bv->bv_len) {
-			ret = bio_alloc_bioset(gfp, ++vcnt, bs);
-			if (!ret)
-				return NULL;
-
-			memcpy(ret->bi_io_vec, bio_iovec(bio),
-			       sizeof(struct bio_vec) * vcnt);
-
-			ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
-			bv->bv_offset	+= nbytes;
-			bv->bv_len	-= nbytes;
-			break;
-		}
-
-		nbytes -= bv->bv_len;
-	}
-out:
-	ret->bi_bdev	= bio->bi_bdev;
-	ret->bi_sector	= bio->bi_sector;
-	ret->bi_size	= sectors << 9;
-	ret->bi_rw	= bio->bi_rw;
-	ret->bi_vcnt	= vcnt;
-	ret->bi_max_vecs = vcnt;
-
-	bio->bi_sector	+= sectors;
-	bio->bi_size	-= sectors << 9;
-	bio->bi_idx	 = idx;
-
-	if (bio_integrity(bio)) {
-		if (bio_integrity_clone(ret, bio, gfp)) {
-			bio_put(ret);
-			return NULL;
-		}
-
-		bio_integrity_trim(ret, 0, bio_sectors(ret));
-		bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
-	}
-
-	return ret;
-}
-
 static unsigned bch_bio_max_sectors(struct bio *bio)
 {
-	unsigned ret = bio_sectors(bio);
 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-	unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
-				      queue_max_segments(q));
+	struct bio_vec bv;
+	struct bvec_iter iter;
+	unsigned ret = 0, seg = 0;
 
 	if (bio->bi_rw & REQ_DISCARD)
-		return min(ret, q->limits.max_discard_sectors);
+		return min(bio_sectors(bio), q->limits.max_discard_sectors);
 
-	if (bio_segments(bio) > max_segments ||
-	    q->merge_bvec_fn) {
-		struct bio_vec *bv;
-		int i, seg = 0;
+	bio_for_each_segment(bv, bio, iter) {
+		struct bvec_merge_data bvm = {
+			.bi_bdev	= bio->bi_bdev,
+			.bi_sector	= bio->bi_iter.bi_sector,
+			.bi_size	= ret << 9,
+			.bi_rw		= bio->bi_rw,
+		};
 
-		ret = 0;
+		if (seg == min_t(unsigned, BIO_MAX_PAGES,
+				 queue_max_segments(q)))
+			break;
 
-		bio_for_each_segment(bv, bio, i) {
-			struct bvec_merge_data bvm = {
-				.bi_bdev	= bio->bi_bdev,
-				.bi_sector	= bio->bi_sector,
-				.bi_size	= ret << 9,
-				.bi_rw		= bio->bi_rw,
-			};
+		if (q->merge_bvec_fn &&
+		    q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
+			break;
 
-			if (seg == max_segments)
-				break;
-
-			if (q->merge_bvec_fn &&
-			    q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
-				break;
-
-			seg++;
-			ret += bv->bv_len >> 9;
-		}
+		seg++;
+		ret += bv.bv_len >> 9;
 	}
 
 	ret = min(ret, queue_max_sectors(q));
 
 	WARN_ON(!ret);
-	ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
+	ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
 
 	return ret;
 }
@@ -193,7 +55,7 @@
 
 	s->bio->bi_end_io = s->bi_end_io;
 	s->bio->bi_private = s->bi_private;
-	bio_endio(s->bio, 0);
+	bio_endio_nodec(s->bio, 0);
 
 	closure_debug_destroy(&s->cl);
 	mempool_free(s, s->p->bio_split_hook);
@@ -232,19 +94,19 @@
 	bio_get(bio);
 
 	do {
-		n = bch_bio_split(bio, bch_bio_max_sectors(bio),
-				  GFP_NOIO, s->p->bio_split);
+		n = bio_next_split(bio, bch_bio_max_sectors(bio),
+				   GFP_NOIO, s->p->bio_split);
 
 		n->bi_end_io	= bch_bio_submit_split_endio;
 		n->bi_private	= &s->cl;
 
 		closure_get(&s->cl);
-		bch_generic_make_request_hack(n);
+		generic_make_request(n);
 	} while (n != bio);
 
 	continue_at(&s->cl, bch_bio_submit_split_done, NULL);
 submit:
-	bch_generic_make_request_hack(bio);
+	generic_make_request(bio);
 }
 
 /* Bios with headers */
@@ -272,8 +134,8 @@
 {
 	struct bbio *b = container_of(bio, struct bbio, bio);
 
-	bio->bi_sector	= PTR_OFFSET(&b->key, 0);
-	bio->bi_bdev	= PTR_CACHE(c, &b->key, 0)->bdev;
+	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
+	bio->bi_bdev		= PTR_CACHE(c, &b->key, 0)->bdev;
 
 	b->submit_time_us = local_clock_us();
 	closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ecdaa67..18039af 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -44,17 +44,17 @@
 
 	closure_init_stack(&cl);
 
-	pr_debug("reading %llu", (uint64_t) bucket);
+	pr_debug("reading %u", bucket_index);
 
 	while (offset < ca->sb.bucket_size) {
 reread:		left = ca->sb.bucket_size - offset;
-		len = min_t(unsigned, left, PAGE_SECTORS * 8);
+		len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
 
 		bio_reset(bio);
-		bio->bi_sector	= bucket + offset;
+		bio->bi_iter.bi_sector	= bucket + offset;
 		bio->bi_bdev	= ca->bdev;
 		bio->bi_rw	= READ;
-		bio->bi_size	= len << 9;
+		bio->bi_iter.bi_size	= len << 9;
 
 		bio->bi_end_io	= journal_read_endio;
 		bio->bi_private = &cl;
@@ -74,19 +74,28 @@
 			struct list_head *where;
 			size_t blocks, bytes = set_bytes(j);
 
-			if (j->magic != jset_magic(&ca->sb))
+			if (j->magic != jset_magic(&ca->sb)) {
+				pr_debug("%u: bad magic", bucket_index);
 				return ret;
+			}
 
-			if (bytes > left << 9)
+			if (bytes > left << 9 ||
+			    bytes > PAGE_SIZE << JSET_BITS) {
+				pr_info("%u: too big, %zu bytes, offset %u",
+					bucket_index, bytes, offset);
 				return ret;
+			}
 
 			if (bytes > len << 9)
 				goto reread;
 
-			if (j->csum != csum_set(j))
+			if (j->csum != csum_set(j)) {
+				pr_info("%u: bad csum, %zu bytes, offset %u",
+					bucket_index, bytes, offset);
 				return ret;
+			}
 
-			blocks = set_blocks(j, ca->set);
+			blocks = set_blocks(j, block_bytes(ca->set));
 
 			while (!list_empty(list)) {
 				i = list_first_entry(list,
@@ -275,7 +284,7 @@
 		}
 
 		for (k = i->j.start;
-		     k < end(&i->j);
+		     k < bset_bkey_last(&i->j);
 		     k = bkey_next(k)) {
 			unsigned j;
 
@@ -313,7 +322,7 @@
 				 n, i->j.seq - 1, start, end);
 
 		for (k = i->j.start;
-		     k < end(&i->j);
+		     k < bset_bkey_last(&i->j);
 		     k = bkey_next(k)) {
 			trace_bcache_journal_replay_key(k);
 
@@ -437,13 +446,13 @@
 		atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
 
 		bio_init(bio);
-		bio->bi_sector		= bucket_to_sector(ca->set,
+		bio->bi_iter.bi_sector	= bucket_to_sector(ca->set,
 						ca->sb.d[ja->discard_idx]);
 		bio->bi_bdev		= ca->bdev;
 		bio->bi_rw		= REQ_WRITE|REQ_DISCARD;
 		bio->bi_max_vecs	= 1;
 		bio->bi_io_vec		= bio->bi_inline_vecs;
-		bio->bi_size		= bucket_bytes(ca);
+		bio->bi_iter.bi_size	= bucket_bytes(ca);
 		bio->bi_end_io		= journal_discard_endio;
 
 		closure_get(&ca->set->cl);
@@ -555,6 +564,14 @@
 	continue_at_nobarrier(cl, journal_write, system_wq);
 }
 
+static void journal_write_unlock(struct closure *cl)
+{
+	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+
+	c->journal.io_in_flight = 0;
+	spin_unlock(&c->journal.lock);
+}
+
 static void journal_write_unlocked(struct closure *cl)
 	__releases(c->journal.lock)
 {
@@ -562,22 +579,15 @@
 	struct cache *ca;
 	struct journal_write *w = c->journal.cur;
 	struct bkey *k = &c->journal.key;
-	unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
+	unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
+		c->sb.block_size;
 
 	struct bio *bio;
 	struct bio_list list;
 	bio_list_init(&list);
 
 	if (!w->need_write) {
-		/*
-		 * XXX: have to unlock closure before we unlock journal lock,
-		 * else we race with bch_journal(). But this way we race
-		 * against cache set unregister. Doh.
-		 */
-		set_closure_fn(cl, NULL, NULL);
-		closure_sub(cl, CLOSURE_RUNNING + 1);
-		spin_unlock(&c->journal.lock);
-		return;
+		closure_return_with_destructor(cl, journal_write_unlock);
 	} else if (journal_full(&c->journal)) {
 		journal_reclaim(c);
 		spin_unlock(&c->journal.lock);
@@ -586,7 +596,7 @@
 		continue_at(cl, journal_write, system_wq);
 	}
 
-	c->journal.blocks_free -= set_blocks(w->data, c);
+	c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
 
 	w->data->btree_level = c->root->level;
 
@@ -608,10 +618,10 @@
 		atomic_long_add(sectors, &ca->meta_sectors_written);
 
 		bio_reset(bio);
-		bio->bi_sector	= PTR_OFFSET(k, i);
+		bio->bi_iter.bi_sector	= PTR_OFFSET(k, i);
 		bio->bi_bdev	= ca->bdev;
 		bio->bi_rw	= REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
-		bio->bi_size	= sectors << 9;
+		bio->bi_iter.bi_size = sectors << 9;
 
 		bio->bi_end_io	= journal_write_endio;
 		bio->bi_private = w;
@@ -653,10 +663,12 @@
 
 	w->need_write = true;
 
-	if (closure_trylock(cl, &c->cl))
-		journal_write_unlocked(cl);
-	else
+	if (!c->journal.io_in_flight) {
+		c->journal.io_in_flight = 1;
+		closure_call(cl, journal_write_unlocked, NULL, &c->cl);
+	} else {
 		spin_unlock(&c->journal.lock);
+	}
 }
 
 static struct journal_write *journal_wait_for_write(struct cache_set *c,
@@ -664,6 +676,7 @@
 {
 	size_t sectors;
 	struct closure cl;
+	bool wait = false;
 
 	closure_init_stack(&cl);
 
@@ -673,16 +686,19 @@
 		struct journal_write *w = c->journal.cur;
 
 		sectors = __set_blocks(w->data, w->data->keys + nkeys,
-				       c) * c->sb.block_size;
+				       block_bytes(c)) * c->sb.block_size;
 
 		if (sectors <= min_t(size_t,
 				     c->journal.blocks_free * c->sb.block_size,
 				     PAGE_SECTORS << JSET_BITS))
 			return w;
 
-		/* XXX: tracepoint */
+		if (wait)
+			closure_wait(&c->journal.wait, &cl);
+
 		if (!journal_full(&c->journal)) {
-			trace_bcache_journal_entry_full(c);
+			if (wait)
+				trace_bcache_journal_entry_full(c);
 
 			/*
 			 * XXX: If we were inserting so many keys that they
@@ -692,12 +708,11 @@
 			 */
 			BUG_ON(!w->data->keys);
 
-			closure_wait(&w->wait, &cl);
 			journal_try_write(c); /* unlocks */
 		} else {
-			trace_bcache_journal_full(c);
+			if (wait)
+				trace_bcache_journal_full(c);
 
-			closure_wait(&c->journal.wait, &cl);
 			journal_reclaim(c);
 			spin_unlock(&c->journal.lock);
 
@@ -706,6 +721,7 @@
 
 		closure_sync(&cl);
 		spin_lock(&c->journal.lock);
+		wait = true;
 	}
 }
 
@@ -736,7 +752,7 @@
 
 	w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
 
-	memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
+	memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
 	w->data->keys += bch_keylist_nkeys(keys);
 
 	ret = &fifo_back(&c->journal.pin);
@@ -780,7 +796,6 @@
 {
 	struct journal *j = &c->journal;
 
-	closure_init_unlocked(&j->io);
 	spin_lock_init(&j->lock);
 	INIT_DELAYED_WORK(&j->work, journal_write_work);
 
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index a6472fd..9180c44 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -104,6 +104,7 @@
 	/* used when waiting because the journal was full */
 	struct closure_waitlist	wait;
 	struct closure		io;
+	int			io_in_flight;
 	struct delayed_work	work;
 
 	/* Number of blocks free in the bucket(s) we're currently writing to */
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index f2f0998..9eb60d1 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -86,7 +86,7 @@
 	bio_get(bio);
 	bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 
-	bio->bi_size		= KEY_SIZE(&io->w->key) << 9;
+	bio->bi_iter.bi_size	= KEY_SIZE(&io->w->key) << 9;
 	bio->bi_max_vecs	= DIV_ROUND_UP(KEY_SIZE(&io->w->key),
 					       PAGE_SECTORS);
 	bio->bi_private		= &io->cl;
@@ -102,7 +102,7 @@
 	if (!op->error) {
 		moving_init(io);
 
-		io->bio.bio.bi_sector = KEY_START(&io->w->key);
+		io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
 		op->write_prio		= 1;
 		op->bio			= &io->bio.bio;
 
@@ -211,7 +211,7 @@
 	for_each_cache(ca, c, i) {
 		unsigned sectors_to_move = 0;
 		unsigned reserve_sectors = ca->sb.bucket_size *
-			min(fifo_used(&ca->free), ca->free.size / 2);
+			fifo_used(&ca->free[RESERVE_MOVINGGC]);
 
 		ca->heap.used = 0;
 
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 61bcfc2..5d5d031 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -197,14 +197,14 @@
 
 static void bio_csum(struct bio *bio, struct bkey *k)
 {
-	struct bio_vec *bv;
+	struct bio_vec bv;
+	struct bvec_iter iter;
 	uint64_t csum = 0;
-	int i;
 
-	bio_for_each_segment(bv, bio, i) {
-		void *d = kmap(bv->bv_page) + bv->bv_offset;
-		csum = bch_crc64_update(csum, d, bv->bv_len);
-		kunmap(bv->bv_page);
+	bio_for_each_segment(bv, bio, iter) {
+		void *d = kmap(bv.bv_page) + bv.bv_offset;
+		csum = bch_crc64_update(csum, d, bv.bv_len);
+		kunmap(bv.bv_page);
 	}
 
 	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -254,26 +254,44 @@
 	closure_return(cl);
 }
 
+static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
+			       struct cache_set *c)
+{
+	size_t oldsize = bch_keylist_nkeys(l);
+	size_t newsize = oldsize + u64s;
+
+	/*
+	 * The journalling code doesn't handle the case where the keys to insert
+	 * is bigger than an empty write: If we just return -ENOMEM here,
+	 * bio_insert() and bio_invalidate() will insert the keys created so far
+	 * and finish the rest when the keylist is empty.
+	 */
+	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
+		return -ENOMEM;
+
+	return __bch_keylist_realloc(l, u64s);
+}
+
 static void bch_data_invalidate(struct closure *cl)
 {
 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 	struct bio *bio = op->bio;
 
 	pr_debug("invalidating %i sectors from %llu",
-		 bio_sectors(bio), (uint64_t) bio->bi_sector);
+		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
 
 	while (bio_sectors(bio)) {
 		unsigned sectors = min(bio_sectors(bio),
 				       1U << (KEY_SIZE_BITS - 1));
 
-		if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
+		if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
 			goto out;
 
-		bio->bi_sector	+= sectors;
-		bio->bi_size	-= sectors << 9;
+		bio->bi_iter.bi_sector	+= sectors;
+		bio->bi_iter.bi_size	-= sectors << 9;
 
 		bch_keylist_add(&op->insert_keys,
-				&KEY(op->inode, bio->bi_sector, sectors));
+				&KEY(op->inode, bio->bi_iter.bi_sector, sectors));
 	}
 
 	op->insert_data_done = true;
@@ -335,14 +353,14 @@
 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 	struct bio *bio = op->bio, *n;
 
-	if (op->bypass)
-		return bch_data_invalidate(cl);
-
 	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
 		set_gc_sectors(op->c);
 		wake_up_gc(op->c);
 	}
 
+	if (op->bypass)
+		return bch_data_invalidate(cl);
+
 	/*
 	 * Journal writes are marked REQ_FLUSH; if the original write was a
 	 * flush, it'll wait on the journal write.
@@ -356,21 +374,21 @@
 
 		/* 1 for the device pointer and 1 for the chksum */
 		if (bch_keylist_realloc(&op->insert_keys,
-					1 + (op->csum ? 1 : 0),
+					3 + (op->csum ? 1 : 0),
 					op->c))
 			continue_at(cl, bch_data_insert_keys, bcache_wq);
 
 		k = op->insert_keys.top;
 		bkey_init(k);
 		SET_KEY_INODE(k, op->inode);
-		SET_KEY_OFFSET(k, bio->bi_sector);
+		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
 
 		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
 				       op->write_point, op->write_prio,
 				       op->writeback))
 			goto err;
 
-		n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
+		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
 
 		n->bi_end_io	= bch_data_insert_endio;
 		n->bi_private	= cl;
@@ -521,7 +539,7 @@
 	     (bio->bi_rw & REQ_WRITE)))
 		goto skip;
 
-	if (bio->bi_sector & (c->sb.block_size - 1) ||
+	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
 	    bio_sectors(bio) & (c->sb.block_size - 1)) {
 		pr_debug("skipping unaligned io");
 		goto skip;
@@ -545,8 +563,8 @@
 
 	spin_lock(&dc->io_lock);
 
-	hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
-		if (i->last == bio->bi_sector &&
+	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
+		if (i->last == bio->bi_iter.bi_sector &&
 		    time_before(jiffies, i->jiffies))
 			goto found;
 
@@ -555,8 +573,8 @@
 	add_sequential(task);
 	i->sequential = 0;
 found:
-	if (i->sequential + bio->bi_size > i->sequential)
-		i->sequential	+= bio->bi_size;
+	if (i->sequential + bio->bi_iter.bi_size > i->sequential)
+		i->sequential	+= bio->bi_iter.bi_size;
 
 	i->last			 = bio_end_sector(bio);
 	i->jiffies		 = jiffies + msecs_to_jiffies(5000);
@@ -596,16 +614,13 @@
 	/* Stack frame for bio_complete */
 	struct closure		cl;
 
-	struct bcache_device	*d;
-
 	struct bbio		bio;
 	struct bio		*orig_bio;
 	struct bio		*cache_miss;
+	struct bcache_device	*d;
 
 	unsigned		insert_bio_sectors;
-
 	unsigned		recoverable:1;
-	unsigned		unaligned_bvec:1;
 	unsigned		write:1;
 	unsigned		read_dirty_data:1;
 
@@ -630,7 +645,8 @@
 
 	if (error)
 		s->iop.error = error;
-	else if (ptr_stale(s->iop.c, &b->key, 0)) {
+	else if (!KEY_DIRTY(&b->key) &&
+		 ptr_stale(s->iop.c, &b->key, 0)) {
 		atomic_long_inc(&s->iop.c->cache_read_races);
 		s->iop.error = -EINTR;
 	}
@@ -649,15 +665,15 @@
 	struct bkey *bio_key;
 	unsigned ptr;
 
-	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
+	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
 		return MAP_CONTINUE;
 
 	if (KEY_INODE(k) != s->iop.inode ||
-	    KEY_START(k) > bio->bi_sector) {
+	    KEY_START(k) > bio->bi_iter.bi_sector) {
 		unsigned bio_sectors = bio_sectors(bio);
 		unsigned sectors = KEY_INODE(k) == s->iop.inode
 			? min_t(uint64_t, INT_MAX,
-				KEY_START(k) - bio->bi_sector)
+				KEY_START(k) - bio->bi_iter.bi_sector)
 			: INT_MAX;
 
 		int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -679,14 +695,14 @@
 	if (KEY_DIRTY(k))
 		s->read_dirty_data = true;
 
-	n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
-				     KEY_OFFSET(k) - bio->bi_sector),
-			  GFP_NOIO, s->d->bio_split);
+	n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
+				      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
+			   GFP_NOIO, s->d->bio_split);
 
 	bio_key = &container_of(n, struct bbio, bio)->key;
 	bch_bkey_copy_single_ptr(bio_key, k, ptr);
 
-	bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
+	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
 	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
 
 	n->bi_end_io	= bch_cache_read_endio;
@@ -711,10 +727,13 @@
 {
 	struct search *s = container_of(cl, struct search, iop.cl);
 	struct bio *bio = &s->bio.bio;
+	int ret;
 
-	int ret = bch_btree_map_keys(&s->op, s->iop.c,
-				     &KEY(s->iop.inode, bio->bi_sector, 0),
-				     cache_lookup_fn, MAP_END_KEY);
+	bch_btree_op_init(&s->op, -1);
+
+	ret = bch_btree_map_keys(&s->op, s->iop.c,
+				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
+				 cache_lookup_fn, MAP_END_KEY);
 	if (ret == -EAGAIN)
 		continue_at(cl, cache_lookup, bcache_wq);
 
@@ -755,13 +774,15 @@
 	}
 }
 
-static void do_bio_hook(struct search *s)
+static void do_bio_hook(struct search *s, struct bio *orig_bio)
 {
 	struct bio *bio = &s->bio.bio;
-	memcpy(bio, s->orig_bio, sizeof(struct bio));
 
+	bio_init(bio);
+	__bio_clone_fast(bio, orig_bio);
 	bio->bi_end_io		= request_endio;
 	bio->bi_private		= &s->cl;
+
 	atomic_set(&bio->bi_cnt, 3);
 }
 
@@ -773,43 +794,36 @@
 	if (s->iop.bio)
 		bio_put(s->iop.bio);
 
-	if (s->unaligned_bvec)
-		mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
-
 	closure_debug_destroy(cl);
 	mempool_free(s, s->d->c->search);
 }
 
-static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
+static inline struct search *search_alloc(struct bio *bio,
+					  struct bcache_device *d)
 {
 	struct search *s;
-	struct bio_vec *bv;
 
 	s = mempool_alloc(d->c->search, GFP_NOIO);
-	memset(s, 0, offsetof(struct search, iop.insert_keys));
 
-	__closure_init(&s->cl, NULL);
+	closure_init(&s->cl, NULL);
+	do_bio_hook(s, bio);
 
-	s->iop.inode		= d->id;
-	s->iop.c		= d->c;
-	s->d			= d;
-	s->op.lock		= -1;
-	s->iop.write_point	= hash_long((unsigned long) current, 16);
 	s->orig_bio		= bio;
-	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
-	s->iop.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
+	s->cache_miss		= NULL;
+	s->d			= d;
 	s->recoverable		= 1;
+	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
+	s->read_dirty_data	= 0;
 	s->start_time		= jiffies;
-	do_bio_hook(s);
 
-	if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
-		bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
-		memcpy(bv, bio_iovec(bio),
-		       sizeof(struct bio_vec) * bio_segments(bio));
-
-		s->bio.bio.bi_io_vec	= bv;
-		s->unaligned_bvec	= 1;
-	}
+	s->iop.c		= d->c;
+	s->iop.bio		= NULL;
+	s->iop.inode		= d->id;
+	s->iop.write_point	= hash_long((unsigned long) current, 16);
+	s->iop.write_prio	= 0;
+	s->iop.error		= 0;
+	s->iop.flags		= 0;
+	s->iop.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
 
 	return s;
 }
@@ -849,26 +863,13 @@
 {
 	struct search *s = container_of(cl, struct search, cl);
 	struct bio *bio = &s->bio.bio;
-	struct bio_vec *bv;
-	int i;
 
 	if (s->recoverable) {
 		/* Retry from the backing device: */
 		trace_bcache_read_retry(s->orig_bio);
 
 		s->iop.error = 0;
-		bv = s->bio.bio.bi_io_vec;
-		do_bio_hook(s);
-		s->bio.bio.bi_io_vec = bv;
-
-		if (!s->unaligned_bvec)
-			bio_for_each_segment(bv, s->orig_bio, i)
-				bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
-		else
-			memcpy(s->bio.bio.bi_io_vec,
-			       bio_iovec(s->orig_bio),
-			       sizeof(struct bio_vec) *
-			       bio_segments(s->orig_bio));
+		do_bio_hook(s, s->orig_bio);
 
 		/* XXX: invalidate cache */
 
@@ -893,9 +894,9 @@
 
 	if (s->iop.bio) {
 		bio_reset(s->iop.bio);
-		s->iop.bio->bi_sector = s->cache_miss->bi_sector;
+		s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
 		s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
-		s->iop.bio->bi_size = s->insert_bio_sectors << 9;
+		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
 		bch_bio_map(s->iop.bio, NULL);
 
 		bio_copy_data(s->cache_miss, s->iop.bio);
@@ -904,8 +905,7 @@
 		s->cache_miss = NULL;
 	}
 
-	if (verify(dc, &s->bio.bio) && s->recoverable &&
-	    !s->unaligned_bvec && !s->read_dirty_data)
+	if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
 		bch_data_verify(dc, s->orig_bio);
 
 	bio_complete(s);
@@ -945,7 +945,7 @@
 	struct bio *miss, *cache_bio;
 
 	if (s->cache_miss || s->iop.bypass) {
-		miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+		miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
 		goto out_submit;
 	}
@@ -959,7 +959,7 @@
 	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 
 	s->iop.replace_key = KEY(s->iop.inode,
-				 bio->bi_sector + s->insert_bio_sectors,
+				 bio->bi_iter.bi_sector + s->insert_bio_sectors,
 				 s->insert_bio_sectors);
 
 	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -968,7 +968,7 @@
 
 	s->iop.replace = true;
 
-	miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+	miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 
 	/* btree_search_recurse()'s btree iterator is no good anymore */
 	ret = miss == bio ? MAP_DONE : -EINTR;
@@ -979,9 +979,9 @@
 	if (!cache_bio)
 		goto out_submit;
 
-	cache_bio->bi_sector	= miss->bi_sector;
-	cache_bio->bi_bdev	= miss->bi_bdev;
-	cache_bio->bi_size	= s->insert_bio_sectors << 9;
+	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector;
+	cache_bio->bi_bdev		= miss->bi_bdev;
+	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9;
 
 	cache_bio->bi_end_io	= request_endio;
 	cache_bio->bi_private	= &s->cl;
@@ -1031,7 +1031,7 @@
 {
 	struct closure *cl = &s->cl;
 	struct bio *bio = &s->bio.bio;
-	struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
+	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
 	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
 
 	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1087,8 +1087,7 @@
 			closure_bio_submit(flush, cl, s->d);
 		}
 	} else {
-		s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
-					      dc->disk.bio_split);
+		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
 
 		closure_bio_submit(bio, cl, s->d);
 	}
@@ -1126,13 +1125,13 @@
 	part_stat_unlock();
 
 	bio->bi_bdev = dc->bdev;
-	bio->bi_sector += dc->sb.data_offset;
+	bio->bi_iter.bi_sector += dc->sb.data_offset;
 
 	if (cached_dev_get(dc)) {
 		s = search_alloc(bio, d);
 		trace_bcache_request_start(s->d, bio);
 
-		if (!bio->bi_size) {
+		if (!bio->bi_iter.bi_size) {
 			/*
 			 * can't call bch_journal_meta from under
 			 * generic_make_request
@@ -1204,24 +1203,24 @@
 static int flash_dev_cache_miss(struct btree *b, struct search *s,
 				struct bio *bio, unsigned sectors)
 {
-	struct bio_vec *bv;
-	int i;
+	struct bio_vec bv;
+	struct bvec_iter iter;
 
 	/* Zero fill bio */
 
-	bio_for_each_segment(bv, bio, i) {
-		unsigned j = min(bv->bv_len >> 9, sectors);
+	bio_for_each_segment(bv, bio, iter) {
+		unsigned j = min(bv.bv_len >> 9, sectors);
 
-		void *p = kmap(bv->bv_page);
-		memset(p + bv->bv_offset, 0, j << 9);
-		kunmap(bv->bv_page);
+		void *p = kmap(bv.bv_page);
+		memset(p + bv.bv_offset, 0, j << 9);
+		kunmap(bv.bv_page);
 
 		sectors	-= j;
 	}
 
-	bio_advance(bio, min(sectors << 9, bio->bi_size));
+	bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
 
-	if (!bio->bi_size)
+	if (!bio->bi_iter.bi_size)
 		return MAP_DONE;
 
 	return MAP_CONTINUE;
@@ -1255,7 +1254,7 @@
 
 	trace_bcache_request_start(s->d, bio);
 
-	if (!bio->bi_size) {
+	if (!bio->bi_iter.bi_size) {
 		/*
 		 * can't call bch_journal_meta from under
 		 * generic_make_request
@@ -1265,7 +1264,7 @@
 				      bcache_wq);
 	} else if (rw) {
 		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
-					&KEY(d->id, bio->bi_sector, 0),
+					&KEY(d->id, bio->bi_iter.bi_sector, 0),
 					&KEY(d->id, bio_end_sector(bio), 0));
 
 		s->iop.bypass		= (bio->bi_rw & REQ_DISCARD) != 0;
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 2cd65bf..39f21db 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -13,17 +13,22 @@
 	uint16_t		write_prio;
 	short			error;
 
-	unsigned		bypass:1;
-	unsigned		writeback:1;
-	unsigned		flush_journal:1;
-	unsigned		csum:1;
+	union {
+		uint16_t	flags;
 
-	unsigned		replace:1;
-	unsigned		replace_collision:1;
+	struct {
+		unsigned	bypass:1;
+		unsigned	writeback:1;
+		unsigned	flush_journal:1;
+		unsigned	csum:1;
 
-	unsigned		insert_data_done:1;
+		unsigned	replace:1;
+		unsigned	replace_collision:1;
 
-	/* Anything past this point won't get zeroed in search_alloc() */
+		unsigned	insert_data_done:1;
+	};
+	};
+
 	struct keylist		insert_keys;
 	BKEY_PADDED(replace_key);
 };
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c57bfa0..24a3a15 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -9,6 +9,7 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
+#include "extents.h"
 #include "request.h"
 #include "writeback.h"
 
@@ -225,7 +226,7 @@
 	struct cached_dev *dc = bio->bi_private;
 	/* XXX: error checking */
 
-	closure_put(&dc->sb_write.cl);
+	closure_put(&dc->sb_write);
 }
 
 static void __write_super(struct cache_sb *sb, struct bio *bio)
@@ -233,9 +234,9 @@
 	struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
 	unsigned i;
 
-	bio->bi_sector	= SB_SECTOR;
-	bio->bi_rw	= REQ_SYNC|REQ_META;
-	bio->bi_size	= SB_SIZE;
+	bio->bi_iter.bi_sector	= SB_SECTOR;
+	bio->bi_rw		= REQ_SYNC|REQ_META;
+	bio->bi_iter.bi_size	= SB_SIZE;
 	bch_bio_map(bio, NULL);
 
 	out->offset		= cpu_to_le64(sb->offset);
@@ -263,12 +264,20 @@
 	submit_bio(REQ_WRITE, bio);
 }
 
+static void bch_write_bdev_super_unlock(struct closure *cl)
+{
+	struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
+
+	up(&dc->sb_write_mutex);
+}
+
 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
 {
-	struct closure *cl = &dc->sb_write.cl;
+	struct closure *cl = &dc->sb_write;
 	struct bio *bio = &dc->sb_bio;
 
-	closure_lock(&dc->sb_write, parent);
+	down(&dc->sb_write_mutex);
+	closure_init(cl, parent);
 
 	bio_reset(bio);
 	bio->bi_bdev	= dc->bdev;
@@ -278,7 +287,7 @@
 	closure_get(cl);
 	__write_super(&dc->sb, bio);
 
-	closure_return(cl);
+	closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
 }
 
 static void write_super_endio(struct bio *bio, int error)
@@ -286,16 +295,24 @@
 	struct cache *ca = bio->bi_private;
 
 	bch_count_io_errors(ca, error, "writing superblock");
-	closure_put(&ca->set->sb_write.cl);
+	closure_put(&ca->set->sb_write);
+}
+
+static void bcache_write_super_unlock(struct closure *cl)
+{
+	struct cache_set *c = container_of(cl, struct cache_set, sb_write);
+
+	up(&c->sb_write_mutex);
 }
 
 void bcache_write_super(struct cache_set *c)
 {
-	struct closure *cl = &c->sb_write.cl;
+	struct closure *cl = &c->sb_write;
 	struct cache *ca;
 	unsigned i;
 
-	closure_lock(&c->sb_write, &c->cl);
+	down(&c->sb_write_mutex);
+	closure_init(cl, &c->cl);
 
 	c->sb.seq++;
 
@@ -317,7 +334,7 @@
 		__write_super(&ca->sb, bio);
 	}
 
-	closure_return(cl);
+	closure_return_with_destructor(cl, bcache_write_super_unlock);
 }
 
 /* UUID io */
@@ -325,29 +342,37 @@
 static void uuid_endio(struct bio *bio, int error)
 {
 	struct closure *cl = bio->bi_private;
-	struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl);
+	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
 
 	cache_set_err_on(error, c, "accessing uuids");
 	bch_bbio_free(bio, c);
 	closure_put(cl);
 }
 
+static void uuid_io_unlock(struct closure *cl)
+{
+	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+
+	up(&c->uuid_write_mutex);
+}
+
 static void uuid_io(struct cache_set *c, unsigned long rw,
 		    struct bkey *k, struct closure *parent)
 {
-	struct closure *cl = &c->uuid_write.cl;
+	struct closure *cl = &c->uuid_write;
 	struct uuid_entry *u;
 	unsigned i;
 	char buf[80];
 
 	BUG_ON(!parent);
-	closure_lock(&c->uuid_write, parent);
+	down(&c->uuid_write_mutex);
+	closure_init(cl, parent);
 
 	for (i = 0; i < KEY_PTRS(k); i++) {
 		struct bio *bio = bch_bbio_alloc(c);
 
 		bio->bi_rw	= REQ_SYNC|REQ_META|rw;
-		bio->bi_size	= KEY_SIZE(k) << 9;
+		bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
 
 		bio->bi_end_io	= uuid_endio;
 		bio->bi_private = cl;
@@ -359,7 +384,7 @@
 			break;
 	}
 
-	bch_bkey_to_text(buf, sizeof(buf), k);
+	bch_extent_to_text(buf, sizeof(buf), k);
 	pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
 
 	for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
@@ -368,14 +393,14 @@
 				 u - c->uuids, u->uuid, u->label,
 				 u->first_reg, u->last_reg, u->invalidated);
 
-	closure_return(cl);
+	closure_return_with_destructor(cl, uuid_io_unlock);
 }
 
 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
 {
 	struct bkey *k = &j->uuid_bucket;
 
-	if (bch_btree_ptr_invalid(c, k))
+	if (__bch_btree_ptr_invalid(c, k))
 		return "bad uuid pointer";
 
 	bkey_copy(&c->uuid_bucket, k);
@@ -420,7 +445,7 @@
 
 	lockdep_assert_held(&bch_register_lock);
 
-	if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
+	if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
 		return 1;
 
 	SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -503,10 +528,10 @@
 
 	closure_init_stack(cl);
 
-	bio->bi_sector	= bucket * ca->sb.bucket_size;
-	bio->bi_bdev	= ca->bdev;
-	bio->bi_rw	= REQ_SYNC|REQ_META|rw;
-	bio->bi_size	= bucket_bytes(ca);
+	bio->bi_iter.bi_sector	= bucket * ca->sb.bucket_size;
+	bio->bi_bdev		= ca->bdev;
+	bio->bi_rw		= REQ_SYNC|REQ_META|rw;
+	bio->bi_iter.bi_size	= bucket_bytes(ca);
 
 	bio->bi_end_io	= prio_endio;
 	bio->bi_private = ca;
@@ -538,8 +563,8 @@
 	atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
 			&ca->meta_sectors_written);
 
-	pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
-		 fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+	//pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
+	//	 fifo_used(&ca->free_inc), fifo_used(&ca->unused));
 
 	for (i = prio_buckets(ca) - 1; i >= 0; --i) {
 		long bucket;
@@ -558,7 +583,7 @@
 		p->magic	= pset_magic(&ca->sb);
 		p->csum		= bch_crc64(&p->magic, bucket_bytes(ca) - 8);
 
-		bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
+		bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
 		BUG_ON(bucket == -1);
 
 		mutex_unlock(&ca->set->bucket_lock);
@@ -739,8 +764,6 @@
 	}
 
 	bio_split_pool_free(&d->bio_split_hook);
-	if (d->unaligned_bvec)
-		mempool_destroy(d->unaligned_bvec);
 	if (d->bio_split)
 		bioset_free(d->bio_split);
 	if (is_vmalloc_addr(d->full_dirty_stripes))
@@ -793,8 +816,6 @@
 		return minor;
 
 	if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-	    !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
-				sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
 	    bio_split_pool_init(&d->bio_split_hook) ||
 	    !(d->disk = alloc_disk(1))) {
 		ida_simple_remove(&bcache_minor, minor);
@@ -1102,7 +1123,7 @@
 	set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
 	kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
 	INIT_WORK(&dc->detach, cached_dev_detach_finish);
-	closure_init_unlocked(&dc->sb_write);
+	sema_init(&dc->sb_write_mutex, 1);
 	INIT_LIST_HEAD(&dc->io_lru);
 	spin_lock_init(&dc->io_lock);
 	bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
@@ -1114,6 +1135,12 @@
 		hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
 	}
 
+	dc->disk.stripe_size = q->limits.io_opt >> 9;
+
+	if (dc->disk.stripe_size)
+		dc->partial_stripes_expensive =
+			q->limits.raid_partial_stripes_expensive;
+
 	ret = bcache_device_init(&dc->disk, block_size,
 			 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
 	if (ret)
@@ -1325,8 +1352,8 @@
 		if (ca)
 			kobject_put(&ca->kobj);
 
+	bch_bset_sort_state_free(&c->sort);
 	free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
-	free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
 
 	if (c->bio_split)
 		bioset_free(c->bio_split);
@@ -1451,21 +1478,17 @@
 	c->block_bits		= ilog2(sb->block_size);
 	c->nr_uuids		= bucket_bytes(c) / sizeof(struct uuid_entry);
 
-	c->btree_pages		= c->sb.bucket_size / PAGE_SECTORS;
+	c->btree_pages		= bucket_pages(c);
 	if (c->btree_pages > BTREE_MAX_PAGES)
 		c->btree_pages = max_t(int, c->btree_pages / 4,
 				       BTREE_MAX_PAGES);
 
-	c->sort_crit_factor = int_sqrt(c->btree_pages);
-
-	closure_init_unlocked(&c->sb_write);
+	sema_init(&c->sb_write_mutex, 1);
 	mutex_init(&c->bucket_lock);
 	init_waitqueue_head(&c->try_wait);
 	init_waitqueue_head(&c->bucket_wait);
-	closure_init_unlocked(&c->uuid_write);
-	mutex_init(&c->sort_lock);
+	sema_init(&c->uuid_write_mutex, 1);
 
-	spin_lock_init(&c->sort_time.lock);
 	spin_lock_init(&c->btree_gc_time.lock);
 	spin_lock_init(&c->btree_split_time.lock);
 	spin_lock_init(&c->btree_read_time.lock);
@@ -1493,11 +1516,11 @@
 				bucket_pages(c))) ||
 	    !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
 	    !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-	    !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
 	    !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
 	    bch_journal_alloc(c) ||
 	    bch_btree_cache_alloc(c) ||
-	    bch_open_buckets_alloc(c))
+	    bch_open_buckets_alloc(c) ||
+	    bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
 		goto err;
 
 	c->congested_read_threshold_us	= 2000;
@@ -1553,7 +1576,7 @@
 		k = &j->btree_root;
 
 		err = "bad btree root";
-		if (bch_btree_ptr_invalid(c, k))
+		if (__bch_btree_ptr_invalid(c, k))
 			goto err;
 
 		err = "error reading btree root";
@@ -1747,6 +1770,7 @@
 void bch_cache_release(struct kobject *kobj)
 {
 	struct cache *ca = container_of(kobj, struct cache, kobj);
+	unsigned i;
 
 	if (ca->set)
 		ca->set->cache[ca->sb.nr_this_dev] = NULL;
@@ -1760,7 +1784,9 @@
 	free_heap(&ca->heap);
 	free_fifo(&ca->unused);
 	free_fifo(&ca->free_inc);
-	free_fifo(&ca->free);
+
+	for (i = 0; i < RESERVE_NR; i++)
+		free_fifo(&ca->free[i]);
 
 	if (ca->sb_bio.bi_inline_vecs[0].bv_page)
 		put_page(ca->sb_bio.bi_io_vec[0].bv_page);
@@ -1786,10 +1812,12 @@
 	ca->journal.bio.bi_max_vecs = 8;
 	ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
 
-	free = roundup_pow_of_two(ca->sb.nbuckets) >> 9;
-	free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
+	free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
 
-	if (!init_fifo(&ca->free,	free, GFP_KERNEL) ||
+	if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
+	    !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+	    !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
+	    !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
 	    !init_fifo(&ca->free_inc,	free << 2, GFP_KERNEL) ||
 	    !init_fifo(&ca->unused,	free << 2, GFP_KERNEL) ||
 	    !init_heap(&ca->heap,	free << 3, GFP_KERNEL) ||
@@ -2034,7 +2062,8 @@
 		kobject_put(bcache_kobj);
 	if (bcache_wq)
 		destroy_workqueue(bcache_wq);
-	unregister_blkdev(bcache_major, "bcache");
+	if (bcache_major)
+		unregister_blkdev(bcache_major, "bcache");
 	unregister_reboot_notifier(&reboot);
 }
 
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index a1f8561..d8458d4 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -102,7 +102,6 @@
 rw_attribute(key_merging_disabled);
 rw_attribute(gc_always_rewrite);
 rw_attribute(expensive_debug_checks);
-rw_attribute(freelist_percent);
 rw_attribute(cache_replacement_policy);
 rw_attribute(btree_shrinker_disabled);
 rw_attribute(copy_gc_enabled);
@@ -401,6 +400,48 @@
 };
 KTYPE(bch_flash_dev);
 
+struct bset_stats_op {
+	struct btree_op op;
+	size_t nodes;
+	struct bset_stats stats;
+};
+
+static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
+{
+	struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
+
+	op->nodes++;
+	bch_btree_keys_stats(&b->keys, &op->stats);
+
+	return MAP_CONTINUE;
+}
+
+static int bch_bset_print_stats(struct cache_set *c, char *buf)
+{
+	struct bset_stats_op op;
+	int ret;
+
+	memset(&op, 0, sizeof(op));
+	bch_btree_op_init(&op.op, -1);
+
+	ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, btree_bset_stats);
+	if (ret < 0)
+		return ret;
+
+	return snprintf(buf, PAGE_SIZE,
+			"btree nodes:		%zu\n"
+			"written sets:		%zu\n"
+			"unwritten sets:		%zu\n"
+			"written key bytes:	%zu\n"
+			"unwritten key bytes:	%zu\n"
+			"floats:			%zu\n"
+			"failed:			%zu\n",
+			op.nodes,
+			op.stats.sets_written, op.stats.sets_unwritten,
+			op.stats.bytes_written, op.stats.bytes_unwritten,
+			op.stats.floats, op.stats.failed);
+}
+
 SHOW(__bch_cache_set)
 {
 	unsigned root_usage(struct cache_set *c)
@@ -419,7 +460,7 @@
 			rw_lock(false, b, b->level);
 		} while (b != c->root);
 
-		for_each_key_filter(b, k, &iter, bch_ptr_bad)
+		for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
 			bytes += bkey_bytes(k);
 
 		rw_unlock(false, b);
@@ -434,7 +475,7 @@
 
 		mutex_lock(&c->bucket_lock);
 		list_for_each_entry(b, &c->btree_cache, list)
-			ret += 1 << (b->page_order + PAGE_SHIFT);
+			ret += 1 << (b->keys.page_order + PAGE_SHIFT);
 
 		mutex_unlock(&c->bucket_lock);
 		return ret;
@@ -491,7 +532,7 @@
 
 	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
 	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
-	sysfs_print_time_stats(&c->sort_time,		btree_sort, ms, us);
+	sysfs_print_time_stats(&c->sort.time,		btree_sort, ms, us);
 	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
 	sysfs_print_time_stats(&c->try_harder_time,	try_harder, ms, us);
 
@@ -711,9 +752,6 @@
 	sysfs_print(io_errors,
 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
 
-	sysfs_print(freelist_percent, ca->free.size * 100 /
-		    ((size_t) ca->sb.nbuckets));
-
 	if (attr == &sysfs_cache_replacement_policy)
 		return bch_snprint_string_list(buf, PAGE_SIZE,
 					       cache_replacement_policies,
@@ -820,32 +858,6 @@
 		}
 	}
 
-	if (attr == &sysfs_freelist_percent) {
-		DECLARE_FIFO(long, free);
-		long i;
-		size_t p = strtoul_or_return(buf);
-
-		p = clamp_t(size_t,
-			    ((size_t) ca->sb.nbuckets * p) / 100,
-			    roundup_pow_of_two(ca->sb.nbuckets) >> 9,
-			    ca->sb.nbuckets / 2);
-
-		if (!init_fifo_exact(&free, p, GFP_KERNEL))
-			return -ENOMEM;
-
-		mutex_lock(&ca->set->bucket_lock);
-
-		fifo_move(&free, &ca->free);
-		fifo_swap(&free, &ca->free);
-
-		mutex_unlock(&ca->set->bucket_lock);
-
-		while (fifo_pop(&free, i))
-			atomic_dec(&ca->buckets[i].pin);
-
-		free_fifo(&free);
-	}
-
 	if (attr == &sysfs_clear_stats) {
 		atomic_long_set(&ca->sectors_written, 0);
 		atomic_long_set(&ca->btree_sectors_written, 0);
@@ -869,7 +881,6 @@
 	&sysfs_metadata_written,
 	&sysfs_io_errors,
 	&sysfs_clear_stats,
-	&sysfs_freelist_percent,
 	&sysfs_cache_replacement_policy,
 	NULL
 };
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index bb37618..db3ae4c 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -224,10 +224,10 @@
 
 void bch_bio_map(struct bio *bio, void *base)
 {
-	size_t size = bio->bi_size;
+	size_t size = bio->bi_iter.bi_size;
 	struct bio_vec *bv = bio->bi_io_vec;
 
-	BUG_ON(!bio->bi_size);
+	BUG_ON(!bio->bi_iter.bi_size);
 	BUG_ON(bio->bi_vcnt);
 
 	bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1030c60..ac7d0d1 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -2,6 +2,7 @@
 #ifndef _BCACHE_UTIL_H
 #define _BCACHE_UTIL_H
 
+#include <linux/blkdev.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/llist.h>
@@ -17,11 +18,13 @@
 
 #ifdef CONFIG_BCACHE_DEBUG
 
+#define EBUG_ON(cond)			BUG_ON(cond)
 #define atomic_dec_bug(v)	BUG_ON(atomic_dec_return(v) < 0)
 #define atomic_inc_bug(v, i)	BUG_ON(atomic_inc_return(v) <= i)
 
 #else /* DEBUG */
 
+#define EBUG_ON(cond)			do { if (cond); } while (0)
 #define atomic_dec_bug(v)	atomic_dec(v)
 #define atomic_inc_bug(v, i)	atomic_inc(v)
 
@@ -391,6 +394,11 @@
 
 void bch_time_stats_update(struct time_stats *stats, uint64_t time);
 
+static inline unsigned local_clock_us(void)
+{
+	return local_clock() >> 10;
+}
+
 #define NSEC_PER_ns			1L
 #define NSEC_PER_us			NSEC_PER_USEC
 #define NSEC_PER_ms			NSEC_PER_MSEC
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 6c44fe0..f4300e4 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -111,7 +111,7 @@
 	if (!io->dc->writeback_percent)
 		bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 
-	bio->bi_size		= KEY_SIZE(&w->key) << 9;
+	bio->bi_iter.bi_size	= KEY_SIZE(&w->key) << 9;
 	bio->bi_max_vecs	= DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
 	bio->bi_private		= w;
 	bio->bi_io_vec		= bio->bi_inline_vecs;
@@ -184,7 +184,7 @@
 
 	dirty_init(w);
 	io->bio.bi_rw		= WRITE;
-	io->bio.bi_sector	= KEY_START(&w->key);
+	io->bio.bi_iter.bi_sector = KEY_START(&w->key);
 	io->bio.bi_bdev		= io->dc->bdev;
 	io->bio.bi_end_io	= dirty_endio;
 
@@ -253,7 +253,7 @@
 		io->dc		= dc;
 
 		dirty_init(w);
-		io->bio.bi_sector	= PTR_OFFSET(&w->key, 0);
+		io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
 		io->bio.bi_bdev		= PTR_CACHE(dc->disk.c,
 						    &w->key, 0)->bdev;
 		io->bio.bi_rw		= READ;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c9ddcf4..e2f8598 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -50,7 +50,7 @@
 		return false;
 
 	if (dc->partial_stripes_expensive &&
-	    bcache_dev_stripe_dirty(dc, bio->bi_sector,
+	    bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
 				    bio_sectors(bio)))
 		return true;
 
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 3a8cfa2..dd36461 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -17,55 +17,24 @@
  * original bio state.
  */
 
-struct dm_bio_vec_details {
-#if PAGE_SIZE < 65536
-	__u16 bv_len;
-	__u16 bv_offset;
-#else
-	unsigned bv_len;
-	unsigned bv_offset;
-#endif
-};
-
 struct dm_bio_details {
-	sector_t bi_sector;
 	struct block_device *bi_bdev;
-	unsigned int bi_size;
-	unsigned short bi_idx;
 	unsigned long bi_flags;
-	struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES];
+	struct bvec_iter bi_iter;
 };
 
 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
 {
-	unsigned i;
-
-	bd->bi_sector = bio->bi_sector;
 	bd->bi_bdev = bio->bi_bdev;
-	bd->bi_size = bio->bi_size;
-	bd->bi_idx = bio->bi_idx;
 	bd->bi_flags = bio->bi_flags;
-
-	for (i = 0; i < bio->bi_vcnt; i++) {
-		bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
-		bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
-	}
+	bd->bi_iter = bio->bi_iter;
 }
 
 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
 {
-	unsigned i;
-
-	bio->bi_sector = bd->bi_sector;
 	bio->bi_bdev = bd->bi_bdev;
-	bio->bi_size = bd->bi_size;
-	bio->bi_idx = bd->bi_idx;
 	bio->bi_flags = bd->bi_flags;
-
-	for (i = 0; i < bio->bi_vcnt; i++) {
-		bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
-		bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
-	}
+	bio->bi_iter = bd->bi_iter;
 }
 
 #endif
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 9ed4212..66c5d13 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -540,7 +540,7 @@
 	bio_init(&b->bio);
 	b->bio.bi_io_vec = b->bio_vec;
 	b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
-	b->bio.bi_sector = block << b->c->sectors_per_block_bits;
+	b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
 	b->bio.bi_bdev = b->c->bdev;
 	b->bio.bi_end_io = end_io;
 
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 930e8c3..1e018e9 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -72,7 +72,7 @@
 
 static void iot_update_stats(struct io_tracker *t, struct bio *bio)
 {
-	if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+	if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
 		t->nr_seq_samples++;
 	else {
 		/*
@@ -87,7 +87,7 @@
 		t->nr_rand_samples++;
 	}
 
-	t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+	t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
 }
 
 static void iot_check_for_pattern_switch(struct io_tracker *t)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 09334c2..ffd472e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -85,6 +85,12 @@
 {
 	bio->bi_end_io = h->bi_end_io;
 	bio->bi_private = h->bi_private;
+
+	/*
+	 * Must bump bi_remaining to allow bio to complete with
+	 * restored bi_end_io.
+	 */
+	atomic_inc(&bio->bi_remaining);
 }
 
 /*----------------------------------------------------------------*/
@@ -664,15 +670,17 @@
 static void remap_to_cache(struct cache *cache, struct bio *bio,
 			   dm_cblock_t cblock)
 {
-	sector_t bi_sector = bio->bi_sector;
+	sector_t bi_sector = bio->bi_iter.bi_sector;
 
 	bio->bi_bdev = cache->cache_dev->bdev;
 	if (!block_size_is_power_of_two(cache))
-		bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
-				sector_div(bi_sector, cache->sectors_per_block);
+		bio->bi_iter.bi_sector =
+			(from_cblock(cblock) * cache->sectors_per_block) +
+			sector_div(bi_sector, cache->sectors_per_block);
 	else
-		bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
-				(bi_sector & (cache->sectors_per_block - 1));
+		bio->bi_iter.bi_sector =
+			(from_cblock(cblock) << cache->sectors_per_block_shift) |
+			(bi_sector & (cache->sectors_per_block - 1));
 }
 
 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +720,7 @@
 
 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
 {
-	sector_t block_nr = bio->bi_sector;
+	sector_t block_nr = bio->bi_iter.bi_sector;
 
 	if (!block_size_is_power_of_two(cache))
 		(void) sector_div(block_nr, cache->sectors_per_block);
@@ -1027,7 +1035,7 @@
 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
 {
 	return (bio_data_dir(bio) == WRITE) &&
-		(bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
+		(bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
 }
 
 static void avoid_copy(struct dm_cache_migration *mg)
@@ -1252,7 +1260,7 @@
 	size_t pb_data_size = get_per_bio_data_size(cache);
 	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
 
-	BUG_ON(bio->bi_size);
+	BUG_ON(bio->bi_iter.bi_size);
 	if (!pb->req_nr)
 		remap_to_origin(cache, bio);
 	else
@@ -1275,9 +1283,9 @@
  */
 static void process_discard_bio(struct cache *cache, struct bio *bio)
 {
-	dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+	dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
 						  cache->discard_block_size);
-	dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+	dm_block_t end_block = bio_end_sector(bio);
 	dm_block_t b;
 
 	end_block = block_div(end_block, cache->discard_block_size);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 81b0fa6..784695d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -39,10 +39,8 @@
 	struct completion restart;
 	struct bio *bio_in;
 	struct bio *bio_out;
-	unsigned int offset_in;
-	unsigned int offset_out;
-	unsigned int idx_in;
-	unsigned int idx_out;
+	struct bvec_iter iter_in;
+	struct bvec_iter iter_out;
 	sector_t cc_sector;
 	atomic_t cc_pending;
 };
@@ -826,10 +824,10 @@
 {
 	ctx->bio_in = bio_in;
 	ctx->bio_out = bio_out;
-	ctx->offset_in = 0;
-	ctx->offset_out = 0;
-	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
-	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
+	if (bio_in)
+		ctx->iter_in = bio_in->bi_iter;
+	if (bio_out)
+		ctx->iter_out = bio_out->bi_iter;
 	ctx->cc_sector = sector + cc->iv_offset;
 	init_completion(&ctx->restart);
 }
@@ -857,8 +855,8 @@
 			       struct convert_context *ctx,
 			       struct ablkcipher_request *req)
 {
-	struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
-	struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+	struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
+	struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
 	struct dm_crypt_request *dmreq;
 	u8 *iv;
 	int r;
@@ -869,24 +867,15 @@
 	dmreq->iv_sector = ctx->cc_sector;
 	dmreq->ctx = ctx;
 	sg_init_table(&dmreq->sg_in, 1);
-	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
-		    bv_in->bv_offset + ctx->offset_in);
+	sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
+		    bv_in.bv_offset);
 
 	sg_init_table(&dmreq->sg_out, 1);
-	sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
-		    bv_out->bv_offset + ctx->offset_out);
+	sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
+		    bv_out.bv_offset);
 
-	ctx->offset_in += 1 << SECTOR_SHIFT;
-	if (ctx->offset_in >= bv_in->bv_len) {
-		ctx->offset_in = 0;
-		ctx->idx_in++;
-	}
-
-	ctx->offset_out += 1 << SECTOR_SHIFT;
-	if (ctx->offset_out >= bv_out->bv_len) {
-		ctx->offset_out = 0;
-		ctx->idx_out++;
-	}
+	bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
+	bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
 
 	if (cc->iv_gen_ops) {
 		r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -937,8 +926,7 @@
 
 	atomic_set(&ctx->cc_pending, 1);
 
-	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
-	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
+	while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
 
 		crypt_alloc_req(cc, ctx);
 
@@ -1021,7 +1009,7 @@
 		size -= len;
 	}
 
-	if (!clone->bi_size) {
+	if (!clone->bi_iter.bi_size) {
 		bio_put(clone);
 		return NULL;
 	}
@@ -1161,7 +1149,7 @@
 	crypt_inc_pending(io);
 
 	clone_init(io, clone);
-	clone->bi_sector = cc->start + io->sector;
+	clone->bi_iter.bi_sector = cc->start + io->sector;
 
 	generic_make_request(clone);
 	return 0;
@@ -1207,9 +1195,9 @@
 	}
 
 	/* crypt_convert should have filled the clone bio */
-	BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
+	BUG_ON(io->ctx.iter_out.bi_size);
 
-	clone->bi_sector = cc->start + io->sector;
+	clone->bi_iter.bi_sector = cc->start + io->sector;
 
 	if (async)
 		kcryptd_queue_io(io);
@@ -1224,7 +1212,7 @@
 	struct dm_crypt_io *new_io;
 	int crypt_finished;
 	unsigned out_of_pages = 0;
-	unsigned remaining = io->base_bio->bi_size;
+	unsigned remaining = io->base_bio->bi_iter.bi_size;
 	sector_t sector = io->sector;
 	int r;
 
@@ -1246,9 +1234,9 @@
 		}
 
 		io->ctx.bio_out = clone;
-		io->ctx.idx_out = 0;
+		io->ctx.iter_out = clone->bi_iter;
 
-		remaining -= clone->bi_size;
+		remaining -= clone->bi_iter.bi_size;
 		sector += bio_sectors(clone);
 
 		crypt_inc_pending(io);
@@ -1290,8 +1278,7 @@
 			crypt_inc_pending(new_io);
 			crypt_convert_init(cc, &new_io->ctx, NULL,
 					   io->base_bio, sector);
-			new_io->ctx.idx_in = io->ctx.idx_in;
-			new_io->ctx.offset_in = io->ctx.offset_in;
+			new_io->ctx.iter_in = io->ctx.iter_in;
 
 			/*
 			 * Fragments after the first use the base_io
@@ -1869,11 +1856,12 @@
 	if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
 		bio->bi_bdev = cc->dev->bdev;
 		if (bio_sectors(bio))
-			bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
+			bio->bi_iter.bi_sector = cc->start +
+				dm_target_offset(ti, bio->bi_iter.bi_sector);
 		return DM_MAPIO_REMAPPED;
 	}
 
-	io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
+	io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
 
 	if (bio_data_dir(io->base_bio) == READ) {
 		if (kcryptd_io_read(io, GFP_NOWAIT))
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index a8a511c..42c3a27 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -277,14 +277,15 @@
 	if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
 		bio->bi_bdev = dc->dev_write->bdev;
 		if (bio_sectors(bio))
-			bio->bi_sector = dc->start_write +
-					 dm_target_offset(ti, bio->bi_sector);
+			bio->bi_iter.bi_sector = dc->start_write +
+				dm_target_offset(ti, bio->bi_iter.bi_sector);
 
 		return delay_bio(dc, dc->write_delay, bio);
 	}
 
 	bio->bi_bdev = dc->dev_read->bdev;
-	bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
+	bio->bi_iter.bi_sector = dc->start_read +
+		dm_target_offset(ti, bio->bi_iter.bi_sector);
 
 	return delay_bio(dc, dc->read_delay, bio);
 }
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index c80a0ec..b257e46 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -248,7 +248,8 @@
 
 	bio->bi_bdev = fc->dev->bdev;
 	if (bio_sectors(bio))
-		bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
+		bio->bi_iter.bi_sector =
+			flakey_map_sector(ti, bio->bi_iter.bi_sector);
 }
 
 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@
 		DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
 			"(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
 			bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
-			(bio_data_dir(bio) == WRITE) ? 'w' : 'r',
-			bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
+			(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+			(unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
 	}
 }
 
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2a20986..b2b8a10 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -201,26 +201,29 @@
 /*
  * Functions for getting the pages from a bvec.
  */
-static void bvec_get_page(struct dpages *dp,
+static void bio_get_page(struct dpages *dp,
 		  struct page **p, unsigned long *len, unsigned *offset)
 {
-	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
-	*p = bvec->bv_page;
-	*len = bvec->bv_len;
-	*offset = bvec->bv_offset;
+	struct bio *bio = dp->context_ptr;
+	struct bio_vec bvec = bio_iovec(bio);
+	*p = bvec.bv_page;
+	*len = bvec.bv_len;
+	*offset = bvec.bv_offset;
 }
 
-static void bvec_next_page(struct dpages *dp)
+static void bio_next_page(struct dpages *dp)
 {
-	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
-	dp->context_ptr = bvec + 1;
+	struct bio *bio = dp->context_ptr;
+	struct bio_vec bvec = bio_iovec(bio);
+
+	bio_advance(bio, bvec.bv_len);
 }
 
-static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
+static void bio_dp_init(struct dpages *dp, struct bio *bio)
 {
-	dp->get_page = bvec_get_page;
-	dp->next_page = bvec_next_page;
-	dp->context_ptr = bvec;
+	dp->get_page = bio_get_page;
+	dp->next_page = bio_next_page;
+	dp->context_ptr = bio;
 }
 
 /*
@@ -304,14 +307,14 @@
 					  dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
 
 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
-		bio->bi_sector = where->sector + (where->count - remaining);
+		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
 		bio->bi_bdev = where->bdev;
 		bio->bi_end_io = endio;
 		store_io_and_region_in_bio(bio, io, region);
 
 		if (rw & REQ_DISCARD) {
 			num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
-			bio->bi_size = num_sectors << SECTOR_SHIFT;
+			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
 			remaining -= num_sectors;
 		} else if (rw & REQ_WRITE_SAME) {
 			/*
@@ -320,7 +323,7 @@
 			dp->get_page(dp, &page, &len, &offset);
 			bio_add_page(bio, page, logical_block_size, offset);
 			num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
-			bio->bi_size = num_sectors << SECTOR_SHIFT;
+			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
 
 			offset = 0;
 			remaining -= num_sectors;
@@ -457,8 +460,8 @@
 		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
 		break;
 
-	case DM_IO_BVEC:
-		bvec_dp_init(dp, io_req->mem.ptr.bvec);
+	case DM_IO_BIO:
+		bio_dp_init(dp, io_req->mem.ptr.bio);
 		break;
 
 	case DM_IO_VMA:
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 4f99d26..53e848c 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -85,7 +85,8 @@
 
 	bio->bi_bdev = lc->dev->bdev;
 	if (bio_sectors(bio))
-		bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+		bio->bi_iter.bi_sector =
+			linear_map_sector(ti, bio->bi_iter.bi_sector);
 }
 
 static int linear_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9584443..f284e0b 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -432,7 +432,7 @@
 	region_t region = dm_rh_bio_to_region(ms->rh, bio);
 
 	if (log->type->in_sync(log, region, 0))
-		return choose_mirror(ms,  bio->bi_sector) ? 1 : 0;
+		return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;
 
 	return 0;
 }
@@ -442,15 +442,15 @@
  */
 static sector_t map_sector(struct mirror *m, struct bio *bio)
 {
-	if (unlikely(!bio->bi_size))
+	if (unlikely(!bio->bi_iter.bi_size))
 		return 0;
-	return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
+	return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
 }
 
 static void map_bio(struct mirror *m, struct bio *bio)
 {
 	bio->bi_bdev = m->dev->bdev;
-	bio->bi_sector = map_sector(m, bio);
+	bio->bi_iter.bi_sector = map_sector(m, bio);
 }
 
 static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -526,8 +526,8 @@
 	struct dm_io_region io;
 	struct dm_io_request io_req = {
 		.bi_rw = READ,
-		.mem.type = DM_IO_BVEC,
-		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+		.mem.type = DM_IO_BIO,
+		.mem.ptr.bio = bio,
 		.notify.fn = read_callback,
 		.notify.context = bio,
 		.client = m->ms->io_client,
@@ -559,7 +559,7 @@
 		 * We can only read balance if the region is in sync.
 		 */
 		if (likely(region_in_sync(ms, region, 1)))
-			m = choose_mirror(ms, bio->bi_sector);
+			m = choose_mirror(ms, bio->bi_iter.bi_sector);
 		else if (m && atomic_read(&m->error_count))
 			m = NULL;
 
@@ -629,8 +629,8 @@
 	struct mirror *m;
 	struct dm_io_request io_req = {
 		.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
-		.mem.type = DM_IO_BVEC,
-		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+		.mem.type = DM_IO_BIO,
+		.mem.ptr.bio = bio,
 		.notify.fn = write_callback,
 		.notify.context = bio,
 		.client = ms->io_client,
@@ -1181,7 +1181,7 @@
 	 * The region is in-sync and we can perform reads directly.
 	 * Store enough information so we can retry if it fails.
 	 */
-	m = choose_mirror(ms, bio->bi_sector);
+	m = choose_mirror(ms, bio->bi_iter.bi_sector);
 	if (unlikely(!m))
 		return -EIO;
 
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 69732e0..b929fd5 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -126,7 +126,8 @@
 
 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
 {
-	return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
+	return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
+				      rh->target_begin);
 }
 EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
 
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 7177185..ebddef5 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1438,6 +1438,7 @@
 	if (full_bio) {
 		full_bio->bi_end_io = pe->full_bio_end_io;
 		full_bio->bi_private = pe->full_bio_private;
+		atomic_inc(&full_bio->bi_remaining);
 	}
 	free_pending_exception(pe);
 
@@ -1619,11 +1620,10 @@
 			    struct bio *bio, chunk_t chunk)
 {
 	bio->bi_bdev = s->cow->bdev;
-	bio->bi_sector = chunk_to_sector(s->store,
-					 dm_chunk_number(e->new_chunk) +
-					 (chunk - e->old_chunk)) +
-					 (bio->bi_sector &
-					  s->store->chunk_mask);
+	bio->bi_iter.bi_sector =
+		chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
+				(chunk - e->old_chunk)) +
+		(bio->bi_iter.bi_sector & s->store->chunk_mask);
 }
 
 static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1641,7 +1641,7 @@
 		return DM_MAPIO_REMAPPED;
 	}
 
-	chunk = sector_to_chunk(s->store, bio->bi_sector);
+	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
 
 	/* Full snapshots are not usable */
 	/* To get here the table must be live so s->active is always set. */
@@ -1702,7 +1702,8 @@
 		r = DM_MAPIO_SUBMITTED;
 
 		if (!pe->started &&
-		    bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+		    bio->bi_iter.bi_size ==
+		    (s->store->chunk_size << SECTOR_SHIFT)) {
 			pe->started = 1;
 			up_write(&s->lock);
 			start_full_bio(pe, bio);
@@ -1758,7 +1759,7 @@
 		return DM_MAPIO_REMAPPED;
 	}
 
-	chunk = sector_to_chunk(s->store, bio->bi_sector);
+	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
 
 	down_write(&s->lock);
 
@@ -2095,7 +2096,7 @@
 	down_read(&_origins_lock);
 	o = __lookup_origin(origin->bdev);
 	if (o)
-		r = __origin_write(&o->snapshots, bio->bi_sector, bio);
+		r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
 	up_read(&_origins_lock);
 
 	return r;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 73c1712..d1600d2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -259,13 +259,15 @@
 {
 	sector_t begin, end;
 
-	stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
+	stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
+				target_stripe, &begin);
 	stripe_map_range_sector(sc, bio_end_sector(bio),
 				target_stripe, &end);
 	if (begin < end) {
 		bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
-		bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
-		bio->bi_size = to_bytes(end - begin);
+		bio->bi_iter.bi_sector = begin +
+			sc->stripe[target_stripe].physical_start;
+		bio->bi_iter.bi_size = to_bytes(end - begin);
 		return DM_MAPIO_REMAPPED;
 	} else {
 		/* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@
 		return stripe_map_range(sc, bio, target_bio_nr);
 	}
 
-	stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
+	stripe_map_sector(sc, bio->bi_iter.bi_sector,
+			  &stripe, &bio->bi_iter.bi_sector);
 
-	bio->bi_sector += sc->stripe[stripe].physical_start;
+	bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
 	bio->bi_bdev = sc->stripe[stripe].dev->bdev;
 
 	return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index ff9ac4b..09a688b 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -311,11 +311,11 @@
 static int switch_map(struct dm_target *ti, struct bio *bio)
 {
 	struct switch_ctx *sctx = ti->private;
-	sector_t offset = dm_target_offset(ti, bio->bi_sector);
+	sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
 	unsigned path_nr = switch_get_path_nr(sctx, offset);
 
 	bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
-	bio->bi_sector = sctx->path_list[path_nr].start + offset;
+	bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
 
 	return DM_MAPIO_REMAPPED;
 }
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 726228b..faaf944 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -414,7 +414,7 @@
 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 {
 	struct pool *pool = tc->pool;
-	sector_t block_nr = bio->bi_sector;
+	sector_t block_nr = bio->bi_iter.bi_sector;
 
 	if (block_size_is_power_of_two(pool))
 		block_nr >>= pool->sectors_per_block_shift;
@@ -427,14 +427,15 @@
 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 {
 	struct pool *pool = tc->pool;
-	sector_t bi_sector = bio->bi_sector;
+	sector_t bi_sector = bio->bi_iter.bi_sector;
 
 	bio->bi_bdev = tc->pool_dev->bdev;
 	if (block_size_is_power_of_two(pool))
-		bio->bi_sector = (block << pool->sectors_per_block_shift) |
-				(bi_sector & (pool->sectors_per_block - 1));
+		bio->bi_iter.bi_sector =
+			(block << pool->sectors_per_block_shift) |
+			(bi_sector & (pool->sectors_per_block - 1));
 	else
-		bio->bi_sector = (block * pool->sectors_per_block) +
+		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
 				 sector_div(bi_sector, pool->sectors_per_block);
 }
 
@@ -612,8 +613,10 @@
 
 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 {
-	if (m->bio)
+	if (m->bio) {
 		m->bio->bi_end_io = m->saved_bi_end_io;
+		atomic_inc(&m->bio->bi_remaining);
+	}
 	cell_error(m->tc->pool, m->cell);
 	list_del(&m->list);
 	mempool_free(m, m->tc->pool->mapping_pool);
@@ -627,8 +630,10 @@
 	int r;
 
 	bio = m->bio;
-	if (bio)
+	if (bio) {
 		bio->bi_end_io = m->saved_bi_end_io;
+		atomic_inc(&bio->bi_remaining);
+	}
 
 	if (m->err) {
 		cell_error(pool, m->cell);
@@ -731,7 +736,8 @@
  */
 static int io_overlaps_block(struct pool *pool, struct bio *bio)
 {
-	return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
+	return bio->bi_iter.bi_size ==
+		(pool->sectors_per_block << SECTOR_SHIFT);
 }
 
 static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1136,7 +1142,7 @@
 	if (bio_detain(pool, &key, bio, &cell))
 		return;
 
-	if (bio_data_dir(bio) == WRITE && bio->bi_size)
+	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
 		break_sharing(tc, bio, block, &key, lookup_result, cell);
 	else {
 		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1159,7 +1165,7 @@
 	/*
 	 * Remap empty bios (flushes) immediately, without provisioning.
 	 */
-	if (!bio->bi_size) {
+	if (!bio->bi_iter.bi_size) {
 		inc_all_io_entry(pool, bio);
 		cell_defer_no_holder(tc, cell);
 
@@ -1258,7 +1264,7 @@
 	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
 	switch (r) {
 	case 0:
-		if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
+		if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
 			handle_unserviceable_bio(tc->pool, bio);
 		else {
 			inc_all_io_entry(tc->pool, bio);
@@ -2939,7 +2945,7 @@
 
 static int thin_map(struct dm_target *ti, struct bio *bio)
 {
-	bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
+	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
 
 	return thin_bio_map(ti, bio);
 }
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 4b7941d..796007a 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -73,15 +73,10 @@
 	sector_t block;
 	unsigned n_blocks;
 
-	/* saved bio vector */
-	struct bio_vec *io_vec;
-	unsigned io_vec_size;
+	struct bvec_iter iter;
 
 	struct work_struct work;
 
-	/* A space for short vectors; longer vectors are allocated separately. */
-	struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
-
 	/*
 	 * Three variably-size fields follow this struct:
 	 *
@@ -284,9 +279,10 @@
 static int verity_verify_io(struct dm_verity_io *io)
 {
 	struct dm_verity *v = io->v;
+	struct bio *bio = dm_bio_from_per_bio_data(io,
+						   v->ti->per_bio_data_size);
 	unsigned b;
 	int i;
-	unsigned vector = 0, offset = 0;
 
 	for (b = 0; b < io->n_blocks; b++) {
 		struct shash_desc *desc;
@@ -336,31 +332,22 @@
 		}
 
 		todo = 1 << v->data_dev_block_bits;
-		do {
-			struct bio_vec *bv;
+		while (io->iter.bi_size) {
 			u8 *page;
-			unsigned len;
+			struct bio_vec bv = bio_iter_iovec(bio, io->iter);
 
-			BUG_ON(vector >= io->io_vec_size);
-			bv = &io->io_vec[vector];
-			page = kmap_atomic(bv->bv_page);
-			len = bv->bv_len - offset;
-			if (likely(len >= todo))
-				len = todo;
-			r = crypto_shash_update(desc,
-					page + bv->bv_offset + offset, len);
+			page = kmap_atomic(bv.bv_page);
+			r = crypto_shash_update(desc, page + bv.bv_offset,
+						bv.bv_len);
 			kunmap_atomic(page);
+
 			if (r < 0) {
 				DMERR("crypto_shash_update failed: %d", r);
 				return r;
 			}
-			offset += len;
-			if (likely(offset == bv->bv_len)) {
-				offset = 0;
-				vector++;
-			}
-			todo -= len;
-		} while (todo);
+
+			bio_advance_iter(bio, &io->iter, bv.bv_len);
+		}
 
 		if (!v->version) {
 			r = crypto_shash_update(desc, v->salt, v->salt_size);
@@ -383,8 +370,6 @@
 			return -EIO;
 		}
 	}
-	BUG_ON(vector != io->io_vec_size);
-	BUG_ON(offset);
 
 	return 0;
 }
@@ -400,10 +385,7 @@
 	bio->bi_end_io = io->orig_bi_end_io;
 	bio->bi_private = io->orig_bi_private;
 
-	if (io->io_vec != io->io_vec_inline)
-		mempool_free(io->io_vec, v->vec_mempool);
-
-	bio_endio(bio, error);
+	bio_endio_nodec(bio, error);
 }
 
 static void verity_work(struct work_struct *w)
@@ -493,9 +475,9 @@
 	struct dm_verity_io *io;
 
 	bio->bi_bdev = v->data_dev->bdev;
-	bio->bi_sector = verity_map_sector(v, bio->bi_sector);
+	bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
 
-	if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
+	if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
 	    ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
 		DMERR_LIMIT("unaligned io");
 		return -EIO;
@@ -514,18 +496,12 @@
 	io->v = v;
 	io->orig_bi_end_io = bio->bi_end_io;
 	io->orig_bi_private = bio->bi_private;
-	io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
-	io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
+	io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
+	io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
 
 	bio->bi_end_io = verity_end_io;
 	bio->bi_private = io;
-	io->io_vec_size = bio_segments(bio);
-	if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
-		io->io_vec = io->io_vec_inline;
-	else
-		io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
-	memcpy(io->io_vec, bio_iovec(bio),
-	       io->io_vec_size * sizeof(struct bio_vec));
+	io->iter = bio->bi_iter;
 
 	verity_submit_prefetch(v, io);
 
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b49c762..8c53b09 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -575,7 +575,7 @@
 		atomic_inc_return(&md->pending[rw]));
 
 	if (unlikely(dm_stats_used(&md->stats)))
-		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
 				    bio_sectors(bio), false, 0, &io->stats_aux);
 }
 
@@ -593,7 +593,7 @@
 	part_stat_unlock();
 
 	if (unlikely(dm_stats_used(&md->stats)))
-		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
 				    bio_sectors(bio), true, duration, &io->stats_aux);
 
 	/*
@@ -742,7 +742,7 @@
 		if (io_error == DM_ENDIO_REQUEUE)
 			return;
 
-		if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
+		if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
 			/*
 			 * Preflush done for flush with data, reissue
 			 * without REQ_FLUSH.
@@ -797,7 +797,7 @@
 	struct dm_rq_clone_bio_info *info = clone->bi_private;
 	struct dm_rq_target_io *tio = info->tio;
 	struct bio *bio = info->orig;
-	unsigned int nr_bytes = info->orig->bi_size;
+	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
 
 	bio_put(clone);
 
@@ -1128,7 +1128,7 @@
 	 * this io.
 	 */
 	atomic_inc(&tio->io->io_count);
-	sector = clone->bi_sector;
+	sector = clone->bi_iter.bi_sector;
 	r = ti->type->map(ti, clone);
 	if (r == DM_MAPIO_REMAPPED) {
 		/* the bio has been remapped so dispatch it */
@@ -1155,76 +1155,32 @@
 	struct dm_io *io;
 	sector_t sector;
 	sector_t sector_count;
-	unsigned short idx;
 };
 
 static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
 {
-	bio->bi_sector = sector;
-	bio->bi_size = to_bytes(len);
-}
-
-static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
-{
-	bio->bi_idx = idx;
-	bio->bi_vcnt = idx + bv_count;
-	bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-}
-
-static void clone_bio_integrity(struct bio *bio, struct bio *clone,
-				unsigned short idx, unsigned len, unsigned offset,
-				unsigned trim)
-{
-	if (!bio_integrity(bio))
-		return;
-
-	bio_integrity_clone(clone, bio, GFP_NOIO);
-
-	if (trim)
-		bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
-}
-
-/*
- * Creates a little bio that just does part of a bvec.
- */
-static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
-			    sector_t sector, unsigned short idx,
-			    unsigned offset, unsigned len)
-{
-	struct bio *clone = &tio->clone;
-	struct bio_vec *bv = bio->bi_io_vec + idx;
-
-	*clone->bi_io_vec = *bv;
-
-	bio_setup_sector(clone, sector, len);
-
-	clone->bi_bdev = bio->bi_bdev;
-	clone->bi_rw = bio->bi_rw;
-	clone->bi_vcnt = 1;
-	clone->bi_io_vec->bv_offset = offset;
-	clone->bi_io_vec->bv_len = clone->bi_size;
-	clone->bi_flags |= 1 << BIO_CLONED;
-
-	clone_bio_integrity(bio, clone, idx, len, offset, 1);
+	bio->bi_iter.bi_sector = sector;
+	bio->bi_iter.bi_size = to_bytes(len);
 }
 
 /*
  * Creates a bio that consists of range of complete bvecs.
  */
 static void clone_bio(struct dm_target_io *tio, struct bio *bio,
-		      sector_t sector, unsigned short idx,
-		      unsigned short bv_count, unsigned len)
+		      sector_t sector, unsigned len)
 {
 	struct bio *clone = &tio->clone;
-	unsigned trim = 0;
 
-	__bio_clone(clone, bio);
-	bio_setup_sector(clone, sector, len);
-	bio_setup_bv(clone, idx, bv_count);
+	__bio_clone_fast(clone, bio);
 
-	if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
-		trim = 1;
-	clone_bio_integrity(bio, clone, idx, len, 0, trim);
+	if (bio_integrity(bio))
+		bio_integrity_clone(clone, bio, GFP_NOIO);
+
+	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+	clone->bi_iter.bi_size = to_bytes(len);
+
+	if (bio_integrity(bio))
+		bio_integrity_trim(clone, 0, len);
 }
 
 static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1257,7 +1213,7 @@
 	 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
 	 * and discard, so no need for concern about wasted bvec allocations.
 	 */
-	 __bio_clone(clone, ci->bio);
+	 __bio_clone_fast(clone, ci->bio);
 	if (len)
 		bio_setup_sector(clone, ci->sector, len);
 
@@ -1286,10 +1242,7 @@
 }
 
 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
-				     sector_t sector, int nr_iovecs,
-				     unsigned short idx, unsigned short bv_count,
-				     unsigned offset, unsigned len,
-				     unsigned split_bvec)
+				     sector_t sector, unsigned len)
 {
 	struct bio *bio = ci->bio;
 	struct dm_target_io *tio;
@@ -1303,11 +1256,8 @@
 		num_target_bios = ti->num_write_bios(ti, bio);
 
 	for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
-		tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
-		if (split_bvec)
-			clone_split_bio(tio, bio, sector, idx, offset, len);
-		else
-			clone_bio(tio, bio, sector, idx, bv_count, len);
+		tio = alloc_tio(ci, ti, 0, target_bio_nr);
+		clone_bio(tio, bio, sector, len);
 		__map_bio(tio);
 	}
 }
@@ -1379,68 +1329,13 @@
 }
 
 /*
- * Find maximum number of sectors / bvecs we can process with a single bio.
- */
-static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
-{
-	struct bio *bio = ci->bio;
-	sector_t bv_len, total_len = 0;
-
-	for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
-		bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
-
-		if (bv_len > max)
-			break;
-
-		max -= bv_len;
-		total_len += bv_len;
-	}
-
-	return total_len;
-}
-
-static int __split_bvec_across_targets(struct clone_info *ci,
-				       struct dm_target *ti, sector_t max)
-{
-	struct bio *bio = ci->bio;
-	struct bio_vec *bv = bio->bi_io_vec + ci->idx;
-	sector_t remaining = to_sector(bv->bv_len);
-	unsigned offset = 0;
-	sector_t len;
-
-	do {
-		if (offset) {
-			ti = dm_table_find_target(ci->map, ci->sector);
-			if (!dm_target_is_valid(ti))
-				return -EIO;
-
-			max = max_io_len(ci->sector, ti);
-		}
-
-		len = min(remaining, max);
-
-		__clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
-					 bv->bv_offset + offset, len, 1);
-
-		ci->sector += len;
-		ci->sector_count -= len;
-		offset += to_bytes(len);
-	} while (remaining -= len);
-
-	ci->idx++;
-
-	return 0;
-}
-
-/*
  * Select the correct strategy for processing a non-flush bio.
  */
 static int __split_and_process_non_flush(struct clone_info *ci)
 {
 	struct bio *bio = ci->bio;
 	struct dm_target *ti;
-	sector_t len, max;
-	int idx;
+	unsigned len;
 
 	if (unlikely(bio->bi_rw & REQ_DISCARD))
 		return __send_discard(ci);
@@ -1451,41 +1346,14 @@
 	if (!dm_target_is_valid(ti))
 		return -EIO;
 
-	max = max_io_len(ci->sector, ti);
+	len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
 
-	/*
-	 * Optimise for the simple case where we can do all of
-	 * the remaining io with a single clone.
-	 */
-	if (ci->sector_count <= max) {
-		__clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
-					 ci->idx, bio->bi_vcnt - ci->idx, 0,
-					 ci->sector_count, 0);
-		ci->sector_count = 0;
-		return 0;
-	}
+	__clone_and_map_data_bio(ci, ti, ci->sector, len);
 
-	/*
-	 * There are some bvecs that don't span targets.
-	 * Do as many of these as possible.
-	 */
-	if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
-		len = __len_within_target(ci, max, &idx);
+	ci->sector += len;
+	ci->sector_count -= len;
 
-		__clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
-					 ci->idx, idx - ci->idx, 0, len, 0);
-
-		ci->sector += len;
-		ci->sector_count -= len;
-		ci->idx = idx;
-
-		return 0;
-	}
-
-	/*
-	 * Handle a bvec that must be split between two or more targets.
-	 */
-	return __split_bvec_across_targets(ci, ti, max);
+	return 0;
 }
 
 /*
@@ -1510,8 +1378,7 @@
 	ci.io->bio = bio;
 	ci.io->md = md;
 	spin_lock_init(&ci.io->endio_lock);
-	ci.sector = bio->bi_sector;
-	ci.idx = bio->bi_idx;
+	ci.sector = bio->bi_iter.bi_sector;
 
 	start_io_acct(ci.io);
 
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 3193aef..e8b4574 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -74,8 +74,8 @@
 {
 	struct bio *b = bio->bi_private;
 
-	b->bi_size = bio->bi_size;
-	b->bi_sector = bio->bi_sector;
+	b->bi_iter.bi_size = bio->bi_iter.bi_size;
+	b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
 
 	bio_put(bio);
 
@@ -185,26 +185,31 @@
 			return;
 		}
 
-		if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
+		if (check_sector(conf, bio->bi_iter.bi_sector,
+				 bio_end_sector(bio), WRITE))
 			failit = 1;
 		if (check_mode(conf, WritePersistent)) {
-			add_sector(conf, bio->bi_sector, WritePersistent);
+			add_sector(conf, bio->bi_iter.bi_sector,
+				   WritePersistent);
 			failit = 1;
 		}
 		if (check_mode(conf, WriteTransient))
 			failit = 1;
 	} else {
 		/* read request */
-		if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
+		if (check_sector(conf, bio->bi_iter.bi_sector,
+				 bio_end_sector(bio), READ))
 			failit = 1;
 		if (check_mode(conf, ReadTransient))
 			failit = 1;
 		if (check_mode(conf, ReadPersistent)) {
-			add_sector(conf, bio->bi_sector, ReadPersistent);
+			add_sector(conf, bio->bi_iter.bi_sector,
+				   ReadPersistent);
 			failit = 1;
 		}
 		if (check_mode(conf, ReadFixable)) {
-			add_sector(conf, bio->bi_sector, ReadFixable);
+			add_sector(conf, bio->bi_iter.bi_sector,
+				   ReadFixable);
 			failit = 1;
 		}
 	}
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index f03fabd..56f534b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -288,65 +288,65 @@
 
 static void linear_make_request(struct mddev *mddev, struct bio *bio)
 {
+	char b[BDEVNAME_SIZE];
 	struct dev_info *tmp_dev;
-	sector_t start_sector;
+	struct bio *split;
+	sector_t start_sector, end_sector, data_offset;
 
 	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
 		md_flush_request(mddev, bio);
 		return;
 	}
 
-	rcu_read_lock();
-	tmp_dev = which_dev(mddev, bio->bi_sector);
-	start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+	do {
+		rcu_read_lock();
 
-
-	if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
-		     || (bio->bi_sector < start_sector))) {
-		char b[BDEVNAME_SIZE];
-
-		printk(KERN_ERR
-		       "md/linear:%s: make_request: Sector %llu out of bounds on "
-		       "dev %s: %llu sectors, offset %llu\n",
-		       mdname(mddev),
-		       (unsigned long long)bio->bi_sector,
-		       bdevname(tmp_dev->rdev->bdev, b),
-		       (unsigned long long)tmp_dev->rdev->sectors,
-		       (unsigned long long)start_sector);
-		rcu_read_unlock();
-		bio_io_error(bio);
-		return;
-	}
-	if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
-		/* This bio crosses a device boundary, so we have to
-		 * split it.
-		 */
-		struct bio_pair *bp;
-		sector_t end_sector = tmp_dev->end_sector;
+		tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+		start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+		end_sector = tmp_dev->end_sector;
+		data_offset = tmp_dev->rdev->data_offset;
+		bio->bi_bdev = tmp_dev->rdev->bdev;
 
 		rcu_read_unlock();
 
-		bp = bio_split(bio, end_sector - bio->bi_sector);
+		if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
+			     bio->bi_iter.bi_sector < start_sector))
+			goto out_of_bounds;
 
-		linear_make_request(mddev, &bp->bio1);
-		linear_make_request(mddev, &bp->bio2);
-		bio_pair_release(bp);
-		return;
-	}
-		    
-	bio->bi_bdev = tmp_dev->rdev->bdev;
-	bio->bi_sector = bio->bi_sector - start_sector
-		+ tmp_dev->rdev->data_offset;
-	rcu_read_unlock();
+		if (unlikely(bio_end_sector(bio) > end_sector)) {
+			/* This bio crosses a device boundary, so we have to
+			 * split it.
+			 */
+			split = bio_split(bio, end_sector -
+					  bio->bi_iter.bi_sector,
+					  GFP_NOIO, fs_bio_set);
+			bio_chain(split, bio);
+		} else {
+			split = bio;
+		}
 
-	if (unlikely((bio->bi_rw & REQ_DISCARD) &&
-		     !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
-		/* Just ignore it */
-		bio_endio(bio, 0);
-		return;
-	}
+		split->bi_iter.bi_sector = split->bi_iter.bi_sector -
+			start_sector + data_offset;
 
-	generic_make_request(bio);
+		if (unlikely((split->bi_rw & REQ_DISCARD) &&
+			 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+			/* Just ignore it */
+			bio_endio(split, 0);
+		} else
+			generic_make_request(split);
+	} while (split != bio);
+	return;
+
+out_of_bounds:
+	printk(KERN_ERR
+	       "md/linear:%s: make_request: Sector %llu out of bounds on "
+	       "dev %s: %llu sectors, offset %llu\n",
+	       mdname(mddev),
+	       (unsigned long long)bio->bi_iter.bi_sector,
+	       bdevname(tmp_dev->rdev->bdev, b),
+	       (unsigned long long)tmp_dev->rdev->sectors,
+	       (unsigned long long)start_sector);
+	bio_io_error(bio);
 }
 
 static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 40c5313..4ad5cc4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -393,7 +393,7 @@
 	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
 	struct bio *bio = mddev->flush_bio;
 
-	if (bio->bi_size == 0)
+	if (bio->bi_iter.bi_size == 0)
 		/* an empty barrier - all done */
 		bio_endio(bio, 0);
 	else {
@@ -754,7 +754,7 @@
 	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
 
 	bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
-	bio->bi_sector = sector;
+	bio->bi_iter.bi_sector = sector;
 	bio_add_page(bio, page, size, 0);
 	bio->bi_private = rdev;
 	bio->bi_end_io = super_written;
@@ -782,18 +782,16 @@
 	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
 	int ret;
 
-	rw |= REQ_SYNC;
-
 	bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
 		rdev->meta_bdev : rdev->bdev;
 	if (metadata_op)
-		bio->bi_sector = sector + rdev->sb_start;
+		bio->bi_iter.bi_sector = sector + rdev->sb_start;
 	else if (rdev->mddev->reshape_position != MaxSector &&
 		 (rdev->mddev->reshape_backwards ==
 		  (sector >= rdev->mddev->reshape_position)))
-		bio->bi_sector = sector + rdev->new_data_offset;
+		bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
 	else
-		bio->bi_sector = sector + rdev->data_offset;
+		bio->bi_iter.bi_sector = sector + rdev->data_offset;
 	bio_add_page(bio, page, size, 0);
 	submit_bio_wait(rw, bio);
 
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1642eae..849ad39 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -100,7 +100,7 @@
 		md_error (mp_bh->mddev, rdev);
 		printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 
 		       bdevname(rdev->bdev,b), 
-		       (unsigned long long)bio->bi_sector);
+		       (unsigned long long)bio->bi_iter.bi_sector);
 		multipath_reschedule_retry(mp_bh);
 	} else
 		multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@
 	multipath = conf->multipaths + mp_bh->path;
 
 	mp_bh->bio = *bio;
-	mp_bh->bio.bi_sector += multipath->rdev->data_offset;
+	mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
 	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
 	mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
 	mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 
 		bio = &mp_bh->bio;
-		bio->bi_sector = mp_bh->master_bio->bi_sector;
+		bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
 		
 		if ((mp_bh->path = multipath_map (conf))<0) {
 			printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
 				" error for block %llu\n",
 				bdevname(bio->bi_bdev,b),
-				(unsigned long long)bio->bi_sector);
+				(unsigned long long)bio->bi_iter.bi_sector);
 			multipath_end_bh_io(mp_bh, -EIO);
 		} else {
 			printk(KERN_ERR "multipath: %s: redirecting sector %llu"
 				" to another IO path\n",
 				bdevname(bio->bi_bdev,b),
-				(unsigned long long)bio->bi_sector);
+				(unsigned long long)bio->bi_iter.bi_sector);
 			*bio = *(mp_bh->master_bio);
-			bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
+			bio->bi_iter.bi_sector +=
+				conf->multipaths[mp_bh->path].rdev->data_offset;
 			bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
 			bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
 			bio->bi_end_io = multipath_end_request;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c4d420b..407a99e 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -501,10 +501,11 @@
 			unsigned int chunk_sects, struct bio *bio)
 {
 	if (likely(is_power_of_2(chunk_sects))) {
-		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+		return chunk_sects >=
+			((bio->bi_iter.bi_sector & (chunk_sects-1))
 					+ bio_sectors(bio));
 	} else{
-		sector_t sector = bio->bi_sector;
+		sector_t sector = bio->bi_iter.bi_sector;
 		return chunk_sects >= (sector_div(sector, chunk_sects)
 						+ bio_sectors(bio));
 	}
@@ -512,64 +513,44 @@
 
 static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 {
-	unsigned int chunk_sects;
-	sector_t sector_offset;
 	struct strip_zone *zone;
 	struct md_rdev *tmp_dev;
+	struct bio *split;
 
 	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
 		md_flush_request(mddev, bio);
 		return;
 	}
 
-	chunk_sects = mddev->chunk_sectors;
-	if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
-		sector_t sector = bio->bi_sector;
-		struct bio_pair *bp;
-		/* Sanity check -- queue functions should prevent this happening */
-		if (bio_segments(bio) > 1)
-			goto bad_map;
-		/* This is a one page bio that upper layers
-		 * refuse to split for us, so we need to split it.
-		 */
-		if (likely(is_power_of_2(chunk_sects)))
-			bp = bio_split(bio, chunk_sects - (sector &
-							   (chunk_sects-1)));
-		else
-			bp = bio_split(bio, chunk_sects -
-				       sector_div(sector, chunk_sects));
-		raid0_make_request(mddev, &bp->bio1);
-		raid0_make_request(mddev, &bp->bio2);
-		bio_pair_release(bp);
-		return;
-	}
+	do {
+		sector_t sector = bio->bi_iter.bi_sector;
+		unsigned chunk_sects = mddev->chunk_sectors;
 
-	sector_offset = bio->bi_sector;
-	zone = find_zone(mddev->private, &sector_offset);
-	tmp_dev = map_sector(mddev, zone, bio->bi_sector,
-			     &sector_offset);
-	bio->bi_bdev = tmp_dev->bdev;
-	bio->bi_sector = sector_offset + zone->dev_start +
-		tmp_dev->data_offset;
+		unsigned sectors = chunk_sects -
+			(likely(is_power_of_2(chunk_sects))
+			 ? (sector & (chunk_sects-1))
+			 : sector_div(sector, chunk_sects));
 
-	if (unlikely((bio->bi_rw & REQ_DISCARD) &&
-		     !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
-		/* Just ignore it */
-		bio_endio(bio, 0);
-		return;
-	}
+		if (sectors < bio_sectors(bio)) {
+			split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
+			bio_chain(split, bio);
+		} else {
+			split = bio;
+		}
 
-	generic_make_request(bio);
-	return;
+		zone = find_zone(mddev->private, &sector);
+		tmp_dev = map_sector(mddev, zone, sector, &sector);
+		split->bi_bdev = tmp_dev->bdev;
+		split->bi_iter.bi_sector = sector + zone->dev_start +
+			tmp_dev->data_offset;
 
-bad_map:
-	printk("md/raid0:%s: make_request bug: can't convert block across chunks"
-	       " or bigger than %dk %llu %d\n",
-	       mdname(mddev), chunk_sects / 2,
-	       (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
-
-	bio_io_error(bio);
-	return;
+		if (unlikely((split->bi_rw & REQ_DISCARD) &&
+			 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+			/* Just ignore it */
+			bio_endio(split, 0);
+		} else
+			generic_make_request(split);
+	} while (split != bio);
 }
 
 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a49cfcc..4a6ca1c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -229,7 +229,7 @@
 	int done;
 	struct r1conf *conf = r1_bio->mddev->private;
 	sector_t start_next_window = r1_bio->start_next_window;
-	sector_t bi_sector = bio->bi_sector;
+	sector_t bi_sector = bio->bi_iter.bi_sector;
 
 	if (bio->bi_phys_segments) {
 		unsigned long flags;
@@ -265,9 +265,8 @@
 	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
 		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
 			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
-			 (unsigned long long) bio->bi_sector,
-			 (unsigned long long) bio->bi_sector +
-			 bio_sectors(bio) - 1);
+			 (unsigned long long) bio->bi_iter.bi_sector,
+			 (unsigned long long) bio_end_sector(bio) - 1);
 
 		call_bio_endio(r1_bio);
 	}
@@ -466,9 +465,8 @@
 				struct bio *mbio = r1_bio->master_bio;
 				pr_debug("raid1: behind end write sectors"
 					 " %llu-%llu\n",
-					 (unsigned long long) mbio->bi_sector,
-					 (unsigned long long) mbio->bi_sector +
-					 bio_sectors(mbio) - 1);
+					 (unsigned long long) mbio->bi_iter.bi_sector,
+					 (unsigned long long) bio_end_sector(mbio) - 1);
 				call_bio_endio(r1_bio);
 			}
 		}
@@ -875,7 +873,7 @@
 		else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
 				>= bio_end_sector(bio)) ||
 			 (conf->next_resync + NEXT_NORMALIO_DISTANCE
-				<= bio->bi_sector))
+				<= bio->bi_iter.bi_sector))
 			wait = false;
 		else
 			wait = true;
@@ -913,14 +911,14 @@
 
 	if (bio && bio_data_dir(bio) == WRITE) {
 		if (conf->next_resync + NEXT_NORMALIO_DISTANCE
-		    <= bio->bi_sector) {
+		    <= bio->bi_iter.bi_sector) {
 			if (conf->start_next_window == MaxSector)
 				conf->start_next_window =
 					conf->next_resync +
 					NEXT_NORMALIO_DISTANCE;
 
 			if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
-			    <= bio->bi_sector)
+			    <= bio->bi_iter.bi_sector)
 				conf->next_window_requests++;
 			else
 				conf->current_window_requests++;
@@ -1027,7 +1025,8 @@
 		if (bvecs[i].bv_page)
 			put_page(bvecs[i].bv_page);
 	kfree(bvecs);
-	pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
+	pr_debug("%dB behind alloc failed, doing sync I/O\n",
+		 bio->bi_iter.bi_size);
 }
 
 struct raid1_plug_cb {
@@ -1107,7 +1106,7 @@
 
 	if (bio_data_dir(bio) == WRITE &&
 	    bio_end_sector(bio) > mddev->suspend_lo &&
-	    bio->bi_sector < mddev->suspend_hi) {
+	    bio->bi_iter.bi_sector < mddev->suspend_hi) {
 		/* As the suspend_* range is controlled by
 		 * userspace, we want an interruptible
 		 * wait.
@@ -1118,7 +1117,7 @@
 			prepare_to_wait(&conf->wait_barrier,
 					&w, TASK_INTERRUPTIBLE);
 			if (bio_end_sector(bio) <= mddev->suspend_lo ||
-			    bio->bi_sector >= mddev->suspend_hi)
+			    bio->bi_iter.bi_sector >= mddev->suspend_hi)
 				break;
 			schedule();
 		}
@@ -1140,7 +1139,7 @@
 	r1_bio->sectors = bio_sectors(bio);
 	r1_bio->state = 0;
 	r1_bio->mddev = mddev;
-	r1_bio->sector = bio->bi_sector;
+	r1_bio->sector = bio->bi_iter.bi_sector;
 
 	/* We might need to issue multiple reads to different
 	 * devices if there are bad blocks around, so we keep
@@ -1180,12 +1179,13 @@
 		r1_bio->read_disk = rdisk;
 
 		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-		bio_trim(read_bio, r1_bio->sector - bio->bi_sector,
+		bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
 			 max_sectors);
 
 		r1_bio->bios[rdisk] = read_bio;
 
-		read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
+		read_bio->bi_iter.bi_sector = r1_bio->sector +
+			mirror->rdev->data_offset;
 		read_bio->bi_bdev = mirror->rdev->bdev;
 		read_bio->bi_end_io = raid1_end_read_request;
 		read_bio->bi_rw = READ | do_sync;
@@ -1197,7 +1197,7 @@
 			 */
 
 			sectors_handled = (r1_bio->sector + max_sectors
-					   - bio->bi_sector);
+					   - bio->bi_iter.bi_sector);
 			r1_bio->sectors = max_sectors;
 			spin_lock_irq(&conf->device_lock);
 			if (bio->bi_phys_segments == 0)
@@ -1218,7 +1218,8 @@
 			r1_bio->sectors = bio_sectors(bio) - sectors_handled;
 			r1_bio->state = 0;
 			r1_bio->mddev = mddev;
-			r1_bio->sector = bio->bi_sector + sectors_handled;
+			r1_bio->sector = bio->bi_iter.bi_sector +
+				sectors_handled;
 			goto read_again;
 		} else
 			generic_make_request(read_bio);
@@ -1321,7 +1322,7 @@
 			if (r1_bio->bios[j])
 				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
 		r1_bio->state = 0;
-		allow_barrier(conf, start_next_window, bio->bi_sector);
+		allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
 		start_next_window = wait_barrier(conf, bio);
 		/*
@@ -1348,7 +1349,7 @@
 			bio->bi_phys_segments++;
 		spin_unlock_irq(&conf->device_lock);
 	}
-	sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
+	sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
 
 	atomic_set(&r1_bio->remaining, 1);
 	atomic_set(&r1_bio->behind_remaining, 0);
@@ -1360,7 +1361,7 @@
 			continue;
 
 		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-		bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+		bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
 
 		if (first_clone) {
 			/* do behind I/O ?
@@ -1394,7 +1395,7 @@
 
 		r1_bio->bios[i] = mbio;
 
-		mbio->bi_sector	= (r1_bio->sector +
+		mbio->bi_iter.bi_sector	= (r1_bio->sector +
 				   conf->mirrors[i].rdev->data_offset);
 		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
 		mbio->bi_end_io	= raid1_end_write_request;
@@ -1434,7 +1435,7 @@
 		r1_bio->sectors = bio_sectors(bio) - sectors_handled;
 		r1_bio->state = 0;
 		r1_bio->mddev = mddev;
-		r1_bio->sector = bio->bi_sector + sectors_handled;
+		r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
 		goto retry_write;
 	}
 
@@ -1952,20 +1953,24 @@
 	for (i = 0; i < conf->raid_disks * 2; i++) {
 		int j;
 		int size;
+		int uptodate;
 		struct bio *b = r1_bio->bios[i];
 		if (b->bi_end_io != end_sync_read)
 			continue;
-		/* fixup the bio for reuse */
+		/* fixup the bio for reuse, but preserve BIO_UPTODATE */
+		uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
 		bio_reset(b);
+		if (!uptodate)
+			clear_bit(BIO_UPTODATE, &b->bi_flags);
 		b->bi_vcnt = vcnt;
-		b->bi_size = r1_bio->sectors << 9;
-		b->bi_sector = r1_bio->sector +
+		b->bi_iter.bi_size = r1_bio->sectors << 9;
+		b->bi_iter.bi_sector = r1_bio->sector +
 			conf->mirrors[i].rdev->data_offset;
 		b->bi_bdev = conf->mirrors[i].rdev->bdev;
 		b->bi_end_io = end_sync_read;
 		b->bi_private = r1_bio;
 
-		size = b->bi_size;
+		size = b->bi_iter.bi_size;
 		for (j = 0; j < vcnt ; j++) {
 			struct bio_vec *bi;
 			bi = &b->bi_io_vec[j];
@@ -1989,11 +1994,14 @@
 		int j;
 		struct bio *pbio = r1_bio->bios[primary];
 		struct bio *sbio = r1_bio->bios[i];
+		int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
 
 		if (sbio->bi_end_io != end_sync_read)
 			continue;
+		/* Now we can 'fixup' the BIO_UPTODATE flag */
+		set_bit(BIO_UPTODATE, &sbio->bi_flags);
 
-		if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
+		if (uptodate) {
 			for (j = vcnt; j-- ; ) {
 				struct page *p, *s;
 				p = pbio->bi_io_vec[j].bv_page;
@@ -2008,7 +2016,7 @@
 		if (j >= 0)
 			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
 		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
-			      && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
+			      && uptodate)) {
 			/* No need to write to this device. */
 			sbio->bi_end_io = NULL;
 			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@@ -2220,11 +2228,11 @@
 		}
 
 		wbio->bi_rw = WRITE;
-		wbio->bi_sector = r1_bio->sector;
-		wbio->bi_size = r1_bio->sectors << 9;
+		wbio->bi_iter.bi_sector = r1_bio->sector;
+		wbio->bi_iter.bi_size = r1_bio->sectors << 9;
 
 		bio_trim(wbio, sector - r1_bio->sector, sectors);
-		wbio->bi_sector += rdev->data_offset;
+		wbio->bi_iter.bi_sector += rdev->data_offset;
 		wbio->bi_bdev = rdev->bdev;
 		if (submit_bio_wait(WRITE, wbio) == 0)
 			/* failure! */
@@ -2338,7 +2346,8 @@
 		}
 		r1_bio->read_disk = disk;
 		bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
-		bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+		bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
+			 max_sectors);
 		r1_bio->bios[r1_bio->read_disk] = bio;
 		rdev = conf->mirrors[disk].rdev;
 		printk_ratelimited(KERN_ERR
@@ -2347,7 +2356,7 @@
 				   mdname(mddev),
 				   (unsigned long long)r1_bio->sector,
 				   bdevname(rdev->bdev, b));
-		bio->bi_sector = r1_bio->sector + rdev->data_offset;
+		bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
 		bio->bi_bdev = rdev->bdev;
 		bio->bi_end_io = raid1_end_read_request;
 		bio->bi_rw = READ | do_sync;
@@ -2356,7 +2365,7 @@
 			/* Drat - have to split this up more */
 			struct bio *mbio = r1_bio->master_bio;
 			int sectors_handled = (r1_bio->sector + max_sectors
-					       - mbio->bi_sector);
+					       - mbio->bi_iter.bi_sector);
 			r1_bio->sectors = max_sectors;
 			spin_lock_irq(&conf->device_lock);
 			if (mbio->bi_phys_segments == 0)
@@ -2374,7 +2383,8 @@
 			r1_bio->state = 0;
 			set_bit(R1BIO_ReadError, &r1_bio->state);
 			r1_bio->mddev = mddev;
-			r1_bio->sector = mbio->bi_sector + sectors_handled;
+			r1_bio->sector = mbio->bi_iter.bi_sector +
+				sectors_handled;
 
 			goto read_more;
 		} else
@@ -2598,7 +2608,7 @@
 		}
 		if (bio->bi_end_io) {
 			atomic_inc(&rdev->nr_pending);
-			bio->bi_sector = sector_nr + rdev->data_offset;
+			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
 			bio->bi_bdev = rdev->bdev;
 			bio->bi_private = r1_bio;
 		}
@@ -2698,7 +2708,7 @@
 							continue;
 						/* remove last page from this bio */
 						bio->bi_vcnt--;
-						bio->bi_size -= len;
+						bio->bi_iter.bi_size -= len;
 						bio->bi_flags &= ~(1<< BIO_SEG_VALID);
 					}
 					goto bio_full;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8d39d63..33fc408 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1152,14 +1152,12 @@
 	kfree(plug);
 }
 
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void __make_request(struct mddev *mddev, struct bio *bio)
 {
 	struct r10conf *conf = mddev->private;
 	struct r10bio *r10_bio;
 	struct bio *read_bio;
 	int i;
-	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
-	int chunk_sects = chunk_mask + 1;
 	const int rw = bio_data_dir(bio);
 	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
 	const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
@@ -1174,88 +1172,27 @@
 	int max_sectors;
 	int sectors;
 
-	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
-		md_flush_request(mddev, bio);
-		return;
-	}
-
-	/* If this request crosses a chunk boundary, we need to
-	 * split it.  This will only happen for 1 PAGE (or less) requests.
-	 */
-	if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
-		     > chunk_sects
-		     && (conf->geo.near_copies < conf->geo.raid_disks
-			 || conf->prev.near_copies < conf->prev.raid_disks))) {
-		struct bio_pair *bp;
-		/* Sanity check -- queue functions should prevent this happening */
-		if (bio_segments(bio) > 1)
-			goto bad_map;
-		/* This is a one page bio that upper layers
-		 * refuse to split for us, so we need to split it.
-		 */
-		bp = bio_split(bio,
-			       chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
-
-		/* Each of these 'make_request' calls will call 'wait_barrier'.
-		 * If the first succeeds but the second blocks due to the resync
-		 * thread raising the barrier, we will deadlock because the
-		 * IO to the underlying device will be queued in generic_make_request
-		 * and will never complete, so will never reduce nr_pending.
-		 * So increment nr_waiting here so no new raise_barriers will
-		 * succeed, and so the second wait_barrier cannot block.
-		 */
-		spin_lock_irq(&conf->resync_lock);
-		conf->nr_waiting++;
-		spin_unlock_irq(&conf->resync_lock);
-
-		make_request(mddev, &bp->bio1);
-		make_request(mddev, &bp->bio2);
-
-		spin_lock_irq(&conf->resync_lock);
-		conf->nr_waiting--;
-		wake_up(&conf->wait_barrier);
-		spin_unlock_irq(&conf->resync_lock);
-
-		bio_pair_release(bp);
-		return;
-	bad_map:
-		printk("md/raid10:%s: make_request bug: can't convert block across chunks"
-		       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
-		       (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
-
-		bio_io_error(bio);
-		return;
-	}
-
-	md_write_start(mddev, bio);
-
-	/*
-	 * Register the new request and wait if the reconstruction
-	 * thread has put up a bar for new requests.
-	 * Continue immediately if no resync is active currently.
-	 */
-	wait_barrier(conf);
-
 	sectors = bio_sectors(bio);
 	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
-	    bio->bi_sector < conf->reshape_progress &&
-	    bio->bi_sector + sectors > conf->reshape_progress) {
+	    bio->bi_iter.bi_sector < conf->reshape_progress &&
+	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
 		/* IO spans the reshape position.  Need to wait for
 		 * reshape to pass
 		 */
 		allow_barrier(conf);
 		wait_event(conf->wait_barrier,
-			   conf->reshape_progress <= bio->bi_sector ||
-			   conf->reshape_progress >= bio->bi_sector + sectors);
+			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
+			   conf->reshape_progress >= bio->bi_iter.bi_sector +
+			   sectors);
 		wait_barrier(conf);
 	}
 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
 	    bio_data_dir(bio) == WRITE &&
 	    (mddev->reshape_backwards
-	     ? (bio->bi_sector < conf->reshape_safe &&
-		bio->bi_sector + sectors > conf->reshape_progress)
-	     : (bio->bi_sector + sectors > conf->reshape_safe &&
-		bio->bi_sector < conf->reshape_progress))) {
+	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
+		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
+	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
+		bio->bi_iter.bi_sector < conf->reshape_progress))) {
 		/* Need to update reshape_position in metadata */
 		mddev->reshape_position = conf->reshape_progress;
 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1210,7 @@
 	r10_bio->sectors = sectors;
 
 	r10_bio->mddev = mddev;
-	r10_bio->sector = bio->bi_sector;
+	r10_bio->sector = bio->bi_iter.bi_sector;
 	r10_bio->state = 0;
 
 	/* We might need to issue multiple reads to different
@@ -1302,13 +1239,13 @@
 		slot = r10_bio->read_slot;
 
 		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-		bio_trim(read_bio, r10_bio->sector - bio->bi_sector,
+		bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
 			 max_sectors);
 
 		r10_bio->devs[slot].bio = read_bio;
 		r10_bio->devs[slot].rdev = rdev;
 
-		read_bio->bi_sector = r10_bio->devs[slot].addr +
+		read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
 			choose_data_offset(r10_bio, rdev);
 		read_bio->bi_bdev = rdev->bdev;
 		read_bio->bi_end_io = raid10_end_read_request;
@@ -1320,7 +1257,7 @@
 			 * need another r10_bio.
 			 */
 			sectors_handled = (r10_bio->sector + max_sectors
-					   - bio->bi_sector);
+					   - bio->bi_iter.bi_sector);
 			r10_bio->sectors = max_sectors;
 			spin_lock_irq(&conf->device_lock);
 			if (bio->bi_phys_segments == 0)
@@ -1341,7 +1278,8 @@
 			r10_bio->sectors = bio_sectors(bio) - sectors_handled;
 			r10_bio->state = 0;
 			r10_bio->mddev = mddev;
-			r10_bio->sector = bio->bi_sector + sectors_handled;
+			r10_bio->sector = bio->bi_iter.bi_sector +
+				sectors_handled;
 			goto read_again;
 		} else
 			generic_make_request(read_bio);
@@ -1499,7 +1437,8 @@
 			bio->bi_phys_segments++;
 		spin_unlock_irq(&conf->device_lock);
 	}
-	sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+	sectors_handled = r10_bio->sector + max_sectors -
+		bio->bi_iter.bi_sector;
 
 	atomic_set(&r10_bio->remaining, 1);
 	bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1449,11 @@
 		if (r10_bio->devs[i].bio) {
 			struct md_rdev *rdev = conf->mirrors[d].rdev;
 			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-			bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+			bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
 				 max_sectors);
 			r10_bio->devs[i].bio = mbio;
 
-			mbio->bi_sector	= (r10_bio->devs[i].addr+
+			mbio->bi_iter.bi_sector	= (r10_bio->devs[i].addr+
 					   choose_data_offset(r10_bio,
 							      rdev));
 			mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1492,11 @@
 				rdev = conf->mirrors[d].rdev;
 			}
 			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-			bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+			bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
 				 max_sectors);
 			r10_bio->devs[i].repl_bio = mbio;
 
-			mbio->bi_sector	= (r10_bio->devs[i].addr +
+			mbio->bi_iter.bi_sector	= (r10_bio->devs[i].addr +
 					   choose_data_offset(
 						   r10_bio, rdev));
 			mbio->bi_bdev = rdev->bdev;
@@ -1591,11 +1530,57 @@
 		r10_bio->sectors = bio_sectors(bio) - sectors_handled;
 
 		r10_bio->mddev = mddev;
-		r10_bio->sector = bio->bi_sector + sectors_handled;
+		r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
 		r10_bio->state = 0;
 		goto retry_write;
 	}
 	one_write_done(r10_bio);
+}
+
+static void make_request(struct mddev *mddev, struct bio *bio)
+{
+	struct r10conf *conf = mddev->private;
+	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
+	int chunk_sects = chunk_mask + 1;
+
+	struct bio *split;
+
+	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+		md_flush_request(mddev, bio);
+		return;
+	}
+
+	md_write_start(mddev, bio);
+
+	/*
+	 * Register the new request and wait if the reconstruction
+	 * thread has put up a bar for new requests.
+	 * Continue immediately if no resync is active currently.
+	 */
+	wait_barrier(conf);
+
+	do {
+
+		/*
+		 * If this request crosses a chunk boundary, we need to split
+		 * it.
+		 */
+		if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
+			     bio_sectors(bio) > chunk_sects
+			     && (conf->geo.near_copies < conf->geo.raid_disks
+				 || conf->prev.near_copies <
+				 conf->prev.raid_disks))) {
+			split = bio_split(bio, chunk_sects -
+					  (bio->bi_iter.bi_sector &
+					   (chunk_sects - 1)),
+					  GFP_NOIO, fs_bio_set);
+			bio_chain(split, bio);
+		} else {
+			split = bio;
+		}
+
+		__make_request(mddev, split);
+	} while (split != bio);
 
 	/* In case raid10d snuck in to freeze_array */
 	wake_up(&conf->wait_barrier);
@@ -2124,10 +2109,10 @@
 		bio_reset(tbio);
 
 		tbio->bi_vcnt = vcnt;
-		tbio->bi_size = r10_bio->sectors << 9;
+		tbio->bi_iter.bi_size = r10_bio->sectors << 9;
 		tbio->bi_rw = WRITE;
 		tbio->bi_private = r10_bio;
-		tbio->bi_sector = r10_bio->devs[i].addr;
+		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
 
 		for (j=0; j < vcnt ; j++) {
 			tbio->bi_io_vec[j].bv_offset = 0;
@@ -2144,7 +2129,7 @@
 		atomic_inc(&r10_bio->remaining);
 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
 
-		tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
+		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
 		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
 		generic_make_request(tbio);
 	}
@@ -2614,8 +2599,8 @@
 			sectors = sect_to_write;
 		/* Write at 'sector' for 'sectors' */
 		wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-		bio_trim(wbio, sector - bio->bi_sector, sectors);
-		wbio->bi_sector = (r10_bio->devs[i].addr+
+		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
+		wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
 				   choose_data_offset(r10_bio, rdev) +
 				   (sector - r10_bio->sector));
 		wbio->bi_bdev = rdev->bdev;
@@ -2687,10 +2672,10 @@
 		(unsigned long long)r10_bio->sector);
 	bio = bio_clone_mddev(r10_bio->master_bio,
 			      GFP_NOIO, mddev);
-	bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors);
+	bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
 	r10_bio->devs[slot].bio = bio;
 	r10_bio->devs[slot].rdev = rdev;
-	bio->bi_sector = r10_bio->devs[slot].addr
+	bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
 		+ choose_data_offset(r10_bio, rdev);
 	bio->bi_bdev = rdev->bdev;
 	bio->bi_rw = READ | do_sync;
@@ -2701,7 +2686,7 @@
 		struct bio *mbio = r10_bio->master_bio;
 		int sectors_handled =
 			r10_bio->sector + max_sectors
-			- mbio->bi_sector;
+			- mbio->bi_iter.bi_sector;
 		r10_bio->sectors = max_sectors;
 		spin_lock_irq(&conf->device_lock);
 		if (mbio->bi_phys_segments == 0)
@@ -2719,7 +2704,7 @@
 		set_bit(R10BIO_ReadError,
 			&r10_bio->state);
 		r10_bio->mddev = mddev;
-		r10_bio->sector = mbio->bi_sector
+		r10_bio->sector = mbio->bi_iter.bi_sector
 			+ sectors_handled;
 
 		goto read_more;
@@ -3157,7 +3142,8 @@
 				bio->bi_end_io = end_sync_read;
 				bio->bi_rw = READ;
 				from_addr = r10_bio->devs[j].addr;
-				bio->bi_sector = from_addr + rdev->data_offset;
+				bio->bi_iter.bi_sector = from_addr +
+					rdev->data_offset;
 				bio->bi_bdev = rdev->bdev;
 				atomic_inc(&rdev->nr_pending);
 				/* and we write to 'i' (if not in_sync) */
@@ -3181,7 +3167,7 @@
 					bio->bi_private = r10_bio;
 					bio->bi_end_io = end_sync_write;
 					bio->bi_rw = WRITE;
-					bio->bi_sector = to_addr
+					bio->bi_iter.bi_sector = to_addr
 						+ rdev->data_offset;
 					bio->bi_bdev = rdev->bdev;
 					atomic_inc(&r10_bio->remaining);
@@ -3210,7 +3196,8 @@
 				bio->bi_private = r10_bio;
 				bio->bi_end_io = end_sync_write;
 				bio->bi_rw = WRITE;
-				bio->bi_sector = to_addr + rdev->data_offset;
+				bio->bi_iter.bi_sector = to_addr +
+					rdev->data_offset;
 				bio->bi_bdev = rdev->bdev;
 				atomic_inc(&r10_bio->remaining);
 				break;
@@ -3328,7 +3315,7 @@
 			bio->bi_private = r10_bio;
 			bio->bi_end_io = end_sync_read;
 			bio->bi_rw = READ;
-			bio->bi_sector = sector +
+			bio->bi_iter.bi_sector = sector +
 				conf->mirrors[d].rdev->data_offset;
 			bio->bi_bdev = conf->mirrors[d].rdev->bdev;
 			count++;
@@ -3350,7 +3337,7 @@
 			bio->bi_private = r10_bio;
 			bio->bi_end_io = end_sync_write;
 			bio->bi_rw = WRITE;
-			bio->bi_sector = sector +
+			bio->bi_iter.bi_sector = sector +
 				conf->mirrors[d].replacement->data_offset;
 			bio->bi_bdev = conf->mirrors[d].replacement->bdev;
 			count++;
@@ -3397,7 +3384,7 @@
 			     bio2 = bio2->bi_next) {
 				/* remove last page from this bio */
 				bio2->bi_vcnt--;
-				bio2->bi_size -= len;
+				bio2->bi_iter.bi_size -= len;
 				bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
 			}
 			goto bio_full;
@@ -4418,7 +4405,7 @@
 	read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
 
 	read_bio->bi_bdev = rdev->bdev;
-	read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
 			       + rdev->data_offset);
 	read_bio->bi_private = r10_bio;
 	read_bio->bi_end_io = end_sync_read;
@@ -4426,7 +4413,7 @@
 	read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
 	read_bio->bi_flags |= 1 << BIO_UPTODATE;
 	read_bio->bi_vcnt = 0;
-	read_bio->bi_size = 0;
+	read_bio->bi_iter.bi_size = 0;
 	r10_bio->master_bio = read_bio;
 	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
 
@@ -4452,7 +4439,8 @@
 
 		bio_reset(b);
 		b->bi_bdev = rdev2->bdev;
-		b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
+		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
+			rdev2->new_data_offset;
 		b->bi_private = r10_bio;
 		b->bi_end_io = end_reshape_write;
 		b->bi_rw = WRITE;
@@ -4479,7 +4467,7 @@
 			     bio2 = bio2->bi_next) {
 				/* Remove last page from this bio */
 				bio2->bi_vcnt--;
-				bio2->bi_size -= len;
+				bio2->bi_iter.bi_size -= len;
 				bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
 			}
 			goto bio_full;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 03f82ab..16f5c21 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -133,7 +133,7 @@
 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
 {
 	int sectors = bio_sectors(bio);
-	if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
+	if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
 		return bio->bi_next;
 	else
 		return NULL;
@@ -225,7 +225,7 @@
 
 		return_bi = bi->bi_next;
 		bi->bi_next = NULL;
-		bi->bi_size = 0;
+		bi->bi_iter.bi_size = 0;
 		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
 					 bi, 0);
 		bio_endio(bi, 0);
@@ -852,10 +852,10 @@
 				bi->bi_rw, i);
 			atomic_inc(&sh->count);
 			if (use_new_offset(conf, sh))
-				bi->bi_sector = (sh->sector
+				bi->bi_iter.bi_sector = (sh->sector
 						 + rdev->new_data_offset);
 			else
-				bi->bi_sector = (sh->sector
+				bi->bi_iter.bi_sector = (sh->sector
 						 + rdev->data_offset);
 			if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
 				bi->bi_rw |= REQ_NOMERGE;
@@ -863,7 +863,7 @@
 			bi->bi_vcnt = 1;
 			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 			bi->bi_io_vec[0].bv_offset = 0;
-			bi->bi_size = STRIPE_SIZE;
+			bi->bi_iter.bi_size = STRIPE_SIZE;
 			/*
 			 * If this is discard request, set bi_vcnt 0. We don't
 			 * want to confuse SCSI because SCSI will replace payload
@@ -899,15 +899,15 @@
 				rbi->bi_rw, i);
 			atomic_inc(&sh->count);
 			if (use_new_offset(conf, sh))
-				rbi->bi_sector = (sh->sector
+				rbi->bi_iter.bi_sector = (sh->sector
 						  + rrdev->new_data_offset);
 			else
-				rbi->bi_sector = (sh->sector
+				rbi->bi_iter.bi_sector = (sh->sector
 						  + rrdev->data_offset);
 			rbi->bi_vcnt = 1;
 			rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 			rbi->bi_io_vec[0].bv_offset = 0;
-			rbi->bi_size = STRIPE_SIZE;
+			rbi->bi_iter.bi_size = STRIPE_SIZE;
 			/*
 			 * If this is discard request, set bi_vcnt 0. We don't
 			 * want to confuse SCSI because SCSI will replace payload
@@ -935,24 +935,24 @@
 async_copy_data(int frombio, struct bio *bio, struct page *page,
 	sector_t sector, struct dma_async_tx_descriptor *tx)
 {
-	struct bio_vec *bvl;
+	struct bio_vec bvl;
+	struct bvec_iter iter;
 	struct page *bio_page;
-	int i;
 	int page_offset;
 	struct async_submit_ctl submit;
 	enum async_tx_flags flags = 0;
 
-	if (bio->bi_sector >= sector)
-		page_offset = (signed)(bio->bi_sector - sector) * 512;
+	if (bio->bi_iter.bi_sector >= sector)
+		page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
 	else
-		page_offset = (signed)(sector - bio->bi_sector) * -512;
+		page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
 
 	if (frombio)
 		flags |= ASYNC_TX_FENCE;
 	init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
 
-	bio_for_each_segment(bvl, bio, i) {
-		int len = bvl->bv_len;
+	bio_for_each_segment(bvl, bio, iter) {
+		int len = bvl.bv_len;
 		int clen;
 		int b_offset = 0;
 
@@ -968,8 +968,8 @@
 			clen = len;
 
 		if (clen > 0) {
-			b_offset += bvl->bv_offset;
-			bio_page = bvl->bv_page;
+			b_offset += bvl.bv_offset;
+			bio_page = bvl.bv_page;
 			if (frombio)
 				tx = async_memcpy(page, bio_page, page_offset,
 						  b_offset, clen, &submit);
@@ -1012,7 +1012,7 @@
 			BUG_ON(!dev->read);
 			rbi = dev->read;
 			dev->read = NULL;
-			while (rbi && rbi->bi_sector <
+			while (rbi && rbi->bi_iter.bi_sector <
 				dev->sector + STRIPE_SECTORS) {
 				rbi2 = r5_next_bio(rbi, dev->sector);
 				if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1048,7 +1048,7 @@
 			dev->read = rbi = dev->toread;
 			dev->toread = NULL;
 			spin_unlock_irq(&sh->stripe_lock);
-			while (rbi && rbi->bi_sector <
+			while (rbi && rbi->bi_iter.bi_sector <
 				dev->sector + STRIPE_SECTORS) {
 				tx = async_copy_data(0, rbi, dev->page,
 					dev->sector, tx);
@@ -1390,7 +1390,7 @@
 			wbi = dev->written = chosen;
 			spin_unlock_irq(&sh->stripe_lock);
 
-			while (wbi && wbi->bi_sector <
+			while (wbi && wbi->bi_iter.bi_sector <
 				dev->sector + STRIPE_SECTORS) {
 				if (wbi->bi_rw & REQ_FUA)
 					set_bit(R5_WantFUA, &dev->flags);
@@ -2615,7 +2615,7 @@
 	int firstwrite=0;
 
 	pr_debug("adding bi b#%llu to stripe s#%llu\n",
-		(unsigned long long)bi->bi_sector,
+		(unsigned long long)bi->bi_iter.bi_sector,
 		(unsigned long long)sh->sector);
 
 	/*
@@ -2633,12 +2633,12 @@
 			firstwrite = 1;
 	} else
 		bip = &sh->dev[dd_idx].toread;
-	while (*bip && (*bip)->bi_sector < bi->bi_sector) {
-		if (bio_end_sector(*bip) > bi->bi_sector)
+	while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
+		if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
 			goto overlap;
 		bip = & (*bip)->bi_next;
 	}
-	if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
+	if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
 		goto overlap;
 
 	BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2652,7 +2652,7 @@
 		sector_t sector = sh->dev[dd_idx].sector;
 		for (bi=sh->dev[dd_idx].towrite;
 		     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
-			     bi && bi->bi_sector <= sector;
+			     bi && bi->bi_iter.bi_sector <= sector;
 		     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
 			if (bio_end_sector(bi) >= sector)
 				sector = bio_end_sector(bi);
@@ -2662,7 +2662,7 @@
 	}
 
 	pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
-		(unsigned long long)(*bip)->bi_sector,
+		(unsigned long long)(*bip)->bi_iter.bi_sector,
 		(unsigned long long)sh->sector, dd_idx);
 	spin_unlock_irq(&sh->stripe_lock);
 
@@ -2737,7 +2737,7 @@
 		if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
 			wake_up(&conf->wait_for_overlap);
 
-		while (bi && bi->bi_sector <
+		while (bi && bi->bi_iter.bi_sector <
 			sh->dev[i].sector + STRIPE_SECTORS) {
 			struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
 			clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2756,7 +2756,7 @@
 		bi = sh->dev[i].written;
 		sh->dev[i].written = NULL;
 		if (bi) bitmap_end = 1;
-		while (bi && bi->bi_sector <
+		while (bi && bi->bi_iter.bi_sector <
 		       sh->dev[i].sector + STRIPE_SECTORS) {
 			struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
 			clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2780,7 +2780,7 @@
 			spin_unlock_irq(&sh->stripe_lock);
 			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
 				wake_up(&conf->wait_for_overlap);
-			while (bi && bi->bi_sector <
+			while (bi && bi->bi_iter.bi_sector <
 			       sh->dev[i].sector + STRIPE_SECTORS) {
 				struct bio *nextbi =
 					r5_next_bio(bi, sh->dev[i].sector);
@@ -3004,7 +3004,7 @@
 					clear_bit(R5_UPTODATE, &dev->flags);
 				wbi = dev->written;
 				dev->written = NULL;
-				while (wbi && wbi->bi_sector <
+				while (wbi && wbi->bi_iter.bi_sector <
 					dev->sector + STRIPE_SECTORS) {
 					wbi2 = r5_next_bio(wbi, dev->sector);
 					if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4096,7 +4096,7 @@
 
 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
 {
-	sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+	sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
 	unsigned int chunk_sectors = mddev->chunk_sectors;
 	unsigned int bio_sectors = bio_sectors(bio);
 
@@ -4233,9 +4233,9 @@
 	/*
 	 *	compute position
 	 */
-	align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector,
-						    0,
-						    &dd_idx, NULL);
+	align_bi->bi_iter.bi_sector =
+		raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
+				     0, &dd_idx, NULL);
 
 	end_sector = bio_end_sector(align_bi);
 	rcu_read_lock();
@@ -4260,7 +4260,8 @@
 		align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
 
 		if (!bio_fits_rdev(align_bi) ||
-		    is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
+		    is_badblock(rdev, align_bi->bi_iter.bi_sector,
+				bio_sectors(align_bi),
 				&first_bad, &bad_sectors)) {
 			/* too big in some way, or has a known bad block */
 			bio_put(align_bi);
@@ -4269,7 +4270,7 @@
 		}
 
 		/* No reshape active, so we can trust rdev->data_offset */
-		align_bi->bi_sector += rdev->data_offset;
+		align_bi->bi_iter.bi_sector += rdev->data_offset;
 
 		spin_lock_irq(&conf->device_lock);
 		wait_event_lock_irq(conf->wait_for_stripe,
@@ -4281,7 +4282,7 @@
 		if (mddev->gendisk)
 			trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
 					      align_bi, disk_devt(mddev->gendisk),
-					      raid_bio->bi_sector);
+					      raid_bio->bi_iter.bi_sector);
 		generic_make_request(align_bi);
 		return 1;
 	} else {
@@ -4464,8 +4465,8 @@
 		/* Skip discard while reshape is happening */
 		return;
 
-	logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
-	last_sector = bi->bi_sector + (bi->bi_size>>9);
+	logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+	last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
 
 	bi->bi_next = NULL;
 	bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4569,7 +4570,7 @@
 		return;
 	}
 
-	logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+	logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
 	last_sector = bio_end_sector(bi);
 	bi->bi_next = NULL;
 	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
@@ -5053,7 +5054,8 @@
 	int remaining;
 	int handled = 0;
 
-	logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+	logical_sector = raid_bio->bi_iter.bi_sector &
+		~((sector_t)STRIPE_SECTORS-1);
 	sector = raid5_compute_sector(conf, logical_sector,
 				      0, &dd_idx, NULL);
 	last_sector = bio_end_sector(raid_bio);
@@ -5512,23 +5514,43 @@
 	return sectors * (raid_disks - conf->max_degraded);
 }
 
+static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+	safe_put_page(percpu->spare_page);
+	kfree(percpu->scribble);
+	percpu->spare_page = NULL;
+	percpu->scribble = NULL;
+}
+
+static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+	if (conf->level == 6 && !percpu->spare_page)
+		percpu->spare_page = alloc_page(GFP_KERNEL);
+	if (!percpu->scribble)
+		percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+
+	if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
+		free_scratch_buffer(conf, percpu);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
 static void raid5_free_percpu(struct r5conf *conf)
 {
-	struct raid5_percpu *percpu;
 	unsigned long cpu;
 
 	if (!conf->percpu)
 		return;
 
-	get_online_cpus();
-	for_each_possible_cpu(cpu) {
-		percpu = per_cpu_ptr(conf->percpu, cpu);
-		safe_put_page(percpu->spare_page);
-		kfree(percpu->scribble);
-	}
 #ifdef CONFIG_HOTPLUG_CPU
 	unregister_cpu_notifier(&conf->cpu_notify);
 #endif
+
+	get_online_cpus();
+	for_each_possible_cpu(cpu)
+		free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
 	put_online_cpus();
 
 	free_percpu(conf->percpu);
@@ -5555,15 +5577,7 @@
 	switch (action) {
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
-		if (conf->level == 6 && !percpu->spare_page)
-			percpu->spare_page = alloc_page(GFP_KERNEL);
-		if (!percpu->scribble)
-			percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-
-		if (!percpu->scribble ||
-		    (conf->level == 6 && !percpu->spare_page)) {
-			safe_put_page(percpu->spare_page);
-			kfree(percpu->scribble);
+		if (alloc_scratch_buffer(conf, percpu)) {
 			pr_err("%s: failed memory allocation for cpu%ld\n",
 			       __func__, cpu);
 			return notifier_from_errno(-ENOMEM);
@@ -5571,10 +5585,7 @@
 		break;
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
-		safe_put_page(percpu->spare_page);
-		kfree(percpu->scribble);
-		percpu->spare_page = NULL;
-		percpu->scribble = NULL;
+		free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
 		break;
 	default:
 		break;
@@ -5586,40 +5597,29 @@
 static int raid5_alloc_percpu(struct r5conf *conf)
 {
 	unsigned long cpu;
-	struct page *spare_page;
-	struct raid5_percpu __percpu *allcpus;
-	void *scribble;
-	int err;
+	int err = 0;
 
-	allcpus = alloc_percpu(struct raid5_percpu);
-	if (!allcpus)
+	conf->percpu = alloc_percpu(struct raid5_percpu);
+	if (!conf->percpu)
 		return -ENOMEM;
-	conf->percpu = allcpus;
 
-	get_online_cpus();
-	err = 0;
-	for_each_present_cpu(cpu) {
-		if (conf->level == 6) {
-			spare_page = alloc_page(GFP_KERNEL);
-			if (!spare_page) {
-				err = -ENOMEM;
-				break;
-			}
-			per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
-		}
-		scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-		if (!scribble) {
-			err = -ENOMEM;
-			break;
-		}
-		per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
-	}
 #ifdef CONFIG_HOTPLUG_CPU
 	conf->cpu_notify.notifier_call = raid456_cpu_notify;
 	conf->cpu_notify.priority = 0;
-	if (err == 0)
-		err = register_cpu_notifier(&conf->cpu_notify);
+	err = register_cpu_notifier(&conf->cpu_notify);
+	if (err)
+		return err;
 #endif
+
+	get_online_cpus();
+	for_each_present_cpu(cpu) {
+		err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+		if (err) {
+			pr_err("%s: failed memory allocation for cpu%ld\n",
+			       __func__, cpu);
+			break;
+		}
+	}
 	put_online_cpus();
 
 	return err;
@@ -6101,6 +6101,7 @@
 		blk_queue_io_min(mddev->queue, chunk_size);
 		blk_queue_io_opt(mddev->queue, chunk_size *
 				 (conf->raid_disks - conf->max_degraded));
+		mddev->queue->limits.raid_partial_stripes_expensive = 1;
 		/*
 		 * We can only discard a whole stripe. It doesn't make sense to
 		 * discard data disk but write parity disk
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 8270388..1d0758a 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -172,6 +172,9 @@
 config MEDIA_SUBDRV_AUTOSELECT
 	bool "Autoselect ancillary drivers (tuners, sensors, i2c, frontends)"
 	depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_CAMERA_SUPPORT
+	depends on HAS_IOMEM
+	select I2C
+	select I2C_MUX
 	default y
 	help
 	  By default, a media driver auto-selects all possible ancillary
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 419a2d6..f19a2cc 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -239,6 +239,7 @@
 #define USB_PID_AVERMEDIA_A835B_4835			0x4835
 #define USB_PID_AVERMEDIA_1867				0x1867
 #define USB_PID_AVERMEDIA_A867				0xa867
+#define USB_PID_AVERMEDIA_H335				0x0335
 #define USB_PID_AVERMEDIA_TWINSTAR			0x0825
 #define USB_PID_TECHNOTREND_CONNECT_S2400               0x3006
 #define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM	0x3009
@@ -317,6 +318,7 @@
 #define USB_PID_WINFAST_DTV_DONGLE_H			0x60f6
 #define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2		0x6f01
 #define USB_PID_WINFAST_DTV_DONGLE_GOLD			0x6029
+#define USB_PID_WINFAST_DTV_DONGLE_MINID		0x6f0f
 #define USB_PID_GENPIX_8PSK_REV_1_COLD			0x0200
 #define USB_PID_GENPIX_8PSK_REV_1_WARM			0x0201
 #define USB_PID_GENPIX_8PSK_REV_2			0x0202
@@ -365,6 +367,7 @@
 #define USB_PID_TERRATEC_DVBS2CI_V2			0x10ac
 #define USB_PID_TECHNISAT_USB2_HDCI_V1			0x0001
 #define USB_PID_TECHNISAT_USB2_HDCI_V2			0x0002
+#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI		0x0003
 #define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2		0x0004
 #define USB_PID_TECHNISAT_USB2_DVB_S2			0x0500
 #define USB_PID_CPYTO_REDI_PC50A			0xa803
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index bddbab4..dd12a1e 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -35,6 +35,13 @@
 	help
 	  A Silicon tuner that supports DVB-S and DVB-S2 modes
 
+config DVB_M88DS3103
+	tristate "Montage M88DS3103"
+	depends on DVB_CORE && I2C && I2C_MUX
+	default m if !MEDIA_SUBDRV_AUTOSELECT
+	help
+	  Say Y when you want to support this frontend.
+
 comment "Multistandard (cable + terrestrial) frontends"
 	depends on DVB_CORE
 
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index f9cb43d..0c75a6a 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -85,6 +85,7 @@
 obj-$(CONFIG_DVB_STV0900) += stv0900.o
 obj-$(CONFIG_DVB_STV090x) += stv090x.o
 obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
+obj-$(CONFIG_DVB_M88DS3103) += m88ds3103.o
 obj-$(CONFIG_DVB_ISL6423) += isl6423.o
 obj-$(CONFIG_DVB_EC100) += ec100.o
 obj-$(CONFIG_DVB_HD29L2) += hd29l2.o
diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c
index 74fbb5d..780da58 100644
--- a/drivers/media/dvb-frontends/a8293.c
+++ b/drivers/media/dvb-frontends/a8293.c
@@ -96,6 +96,8 @@
 	if (ret)
 		goto err;
 
+	usleep_range(1500, 50000);
+
 	return ret;
 err:
 	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index 476b422..a6c3c9e 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -135,15 +135,33 @@
 
 
 enum cmds {
-	CMD_SET_VCO     = 0x10,
-	CMD_TUNEREQUEST = 0x11,
-	CMD_MPEGCONFIG  = 0x13,
-	CMD_TUNERINIT   = 0x14,
-	CMD_LNBSEND     = 0x21, /* Formerly CMD_SEND_DISEQC */
-	CMD_LNBDCLEVEL  = 0x22,
-	CMD_SET_TONE    = 0x23,
-	CMD_UPDFWVERS   = 0x35,
-	CMD_TUNERSLEEP  = 0x36,
+	CMD_SET_VCOFREQ    = 0x10,
+	CMD_TUNEREQUEST    = 0x11,
+	CMD_GLOBAL_MPEGCFG = 0x13,
+	CMD_MPEGCFG        = 0x14,
+	CMD_TUNERINIT      = 0x15,
+	CMD_GET_SRATE      = 0x18,
+	CMD_SET_GOLDCODE   = 0x19,
+	CMD_GET_AGCACC     = 0x1a,
+	CMD_DEMODINIT      = 0x1b,
+	CMD_GETCTLACC      = 0x1c,
+
+	CMD_LNBCONFIG      = 0x20,
+	CMD_LNBSEND        = 0x21,
+	CMD_LNBDCLEVEL     = 0x22,
+	CMD_LNBPCBCONFIG   = 0x23,
+	CMD_LNBSENDTONEBST = 0x24,
+	CMD_LNBUPDREPLY    = 0x25,
+
+	CMD_SET_GPIOMODE   = 0x30,
+	CMD_SET_GPIOEN     = 0x31,
+	CMD_SET_GPIODIR    = 0x32,
+	CMD_SET_GPIOOUT    = 0x33,
+	CMD_ENABLERSCORR   = 0x34,
+	CMD_FWVERSION      = 0x35,
+	CMD_SET_SLEEPMODE  = 0x36,
+	CMD_BERCTRL        = 0x3c,
+	CMD_EVENTCTRL      = 0x3d,
 };
 
 static LIST_HEAD(hybrid_tuner_instance_list);
@@ -619,8 +637,8 @@
 	cx24117_writereg(state, 0xf7, 0x0c);
 	cx24117_writereg(state, 0xe0, 0x00);
 
-	/* CMD 1B */
-	cmd.args[0] = 0x1b;
+	/* Init demodulator */
+	cmd.args[0] = CMD_DEMODINIT;
 	cmd.args[1] = 0x00;
 	cmd.args[2] = 0x01;
 	cmd.args[3] = 0x00;
@@ -629,8 +647,8 @@
 	if (ret != 0)
 		goto error;
 
-	/* CMD 10 */
-	cmd.args[0] = CMD_SET_VCO;
+	/* Set VCO frequency */
+	cmd.args[0] = CMD_SET_VCOFREQ;
 	cmd.args[1] = 0x06;
 	cmd.args[2] = 0x2b;
 	cmd.args[3] = 0xd8;
@@ -648,8 +666,8 @@
 	if (ret != 0)
 		goto error;
 
-	/* CMD 15 */
-	cmd.args[0] = 0x15;
+	/* Tuner init */
+	cmd.args[0] = CMD_TUNERINIT;
 	cmd.args[1] = 0x00;
 	cmd.args[2] = 0x01;
 	cmd.args[3] = 0x00;
@@ -667,8 +685,8 @@
 	if (ret != 0)
 		goto error;
 
-	/* CMD 13 */
-	cmd.args[0] = CMD_MPEGCONFIG;
+	/* Global MPEG config */
+	cmd.args[0] = CMD_GLOBAL_MPEGCFG;
 	cmd.args[1] = 0x00;
 	cmd.args[2] = 0x00;
 	cmd.args[3] = 0x00;
@@ -679,9 +697,9 @@
 	if (ret != 0)
 		goto error;
 
-	/* CMD 14 */
+	/* MPEG config for each demod */
 	for (i = 0; i < 2; i++) {
-		cmd.args[0] = CMD_TUNERINIT;
+		cmd.args[0] = CMD_MPEGCFG;
 		cmd.args[1] = (u8) i;
 		cmd.args[2] = 0x00;
 		cmd.args[3] = 0x05;
@@ -699,8 +717,8 @@
 	cx24117_writereg(state, 0xcf, 0x00);
 	cx24117_writereg(state, 0xe5, 0x04);
 
-	/* Firmware CMD 35: Get firmware version */
-	cmd.args[0] = CMD_UPDFWVERS;
+	/* Get firmware version */
+	cmd.args[0] = CMD_FWVERSION;
 	cmd.len = 2;
 	for (i = 0; i < 4; i++) {
 		cmd.args[1] = i;
@@ -779,8 +797,8 @@
 	u8 reg = (state->demod == 0) ?
 		CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1;
 
-	/* Firmware CMD 1A */
-	cmd.args[0] = 0x1a;
+	/* Read AGC accumulator register */
+	cmd.args[0] = CMD_GET_AGCACC;
 	cmd.args[1] = (u8) state->demod;
 	cmd.len = 2;
 	ret = cx24117_cmd_execute(fe, &cmd);
@@ -899,22 +917,15 @@
 		voltage == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" :
 		"SEC_VOLTAGE_OFF");
 
-	/* CMD 32 */
-	cmd.args[0] = 0x32;
-	cmd.args[1] = reg;
-	cmd.args[2] = reg;
+	/* Prepare a set GPIO logic level CMD */
+	cmd.args[0] = CMD_SET_GPIOOUT;
+	cmd.args[2] = reg; /* mask */
 	cmd.len = 3;
-	ret = cx24117_cmd_execute(fe, &cmd);
-	if (ret)
-		return ret;
 
 	if ((voltage == SEC_VOLTAGE_13) ||
 	    (voltage == SEC_VOLTAGE_18)) {
-		/* CMD 33 */
-		cmd.args[0] = 0x33;
+		/* power on LNB */
 		cmd.args[1] = reg;
-		cmd.args[2] = reg;
-		cmd.len = 3;
 		ret = cx24117_cmd_execute(fe, &cmd);
 		if (ret != 0)
 			return ret;
@@ -926,22 +937,22 @@
 		/* Wait for voltage/min repeat delay */
 		msleep(100);
 
-		/* CMD 22 - CMD_LNBDCLEVEL */
+		/* Set 13V/18V select pin */
 		cmd.args[0] = CMD_LNBDCLEVEL;
 		cmd.args[1] = state->demod ? 0 : 1;
 		cmd.args[2] = (voltage == SEC_VOLTAGE_18 ? 0x01 : 0x00);
 		cmd.len = 3;
+		ret = cx24117_cmd_execute(fe, &cmd);
 
 		/* Min delay time before DiSEqC send */
 		msleep(20);
 	} else {
-		cmd.args[0] = 0x33;
+		/* power off LNB */
 		cmd.args[1] = 0x00;
-		cmd.args[2] = reg;
-		cmd.len = 3;
+		ret = cx24117_cmd_execute(fe, &cmd);
 	}
 
-	return cx24117_cmd_execute(fe, &cmd);
+	return ret;
 }
 
 static int cx24117_set_tone(struct dvb_frontend *fe,
@@ -968,8 +979,7 @@
 	msleep(20);
 
 	/* Set the tone */
-	/* CMD 23 - CMD_SET_TONE */
-	cmd.args[0] = CMD_SET_TONE;
+	cmd.args[0] = CMD_LNBPCBCONFIG;
 	cmd.args[1] = (state->demod ? 0 : 1);
 	cmd.args[2] = 0x00;
 	cmd.args[3] = 0x00;
@@ -1166,7 +1176,7 @@
 
 	switch (demod) {
 	case 0:
-		dev_err(&state->priv->i2c->dev,
+		dev_err(&i2c->dev,
 			"%s: Error attaching frontend %d\n",
 			KBUILD_MODNAME, demod);
 		goto error1;
@@ -1190,12 +1200,6 @@
 	state->demod = demod - 1;
 	state->priv = priv;
 
-	/* test i2c bus for ack */
-	if (demod == 0) {
-		if (cx24117_readreg(state, 0x00) < 0)
-			goto error3;
-	}
-
 	dev_info(&state->priv->i2c->dev,
 		"%s: Attaching frontend %d\n",
 		KBUILD_MODNAME, state->demod);
@@ -1206,8 +1210,6 @@
 	state->frontend.demodulator_priv = state;
 	return &state->frontend;
 
-error3:
-	kfree(state);
 error2:
 	cx24117_release_priv(priv);
 error1:
@@ -1231,8 +1233,8 @@
 
 	mutex_lock(&state->priv->fe_lock);
 
-	/* Firmware CMD 36: Power config */
-	cmd.args[0] = CMD_TUNERSLEEP;
+	/* Set sleep mode off */
+	cmd.args[0] = CMD_SET_SLEEPMODE;
 	cmd.args[1] = (state->demod ? 1 : 0);
 	cmd.args[2] = 0;
 	cmd.len = 3;
@@ -1244,8 +1246,8 @@
 	if (ret != 0)
 		goto exit;
 
-	/* CMD 3C */
-	cmd.args[0] = 0x3c;
+	/* Set BER control */
+	cmd.args[0] = CMD_BERCTRL;
 	cmd.args[1] = (state->demod ? 1 : 0);
 	cmd.args[2] = 0x10;
 	cmd.args[3] = 0x10;
@@ -1254,12 +1256,22 @@
 	if (ret != 0)
 		goto exit;
 
-	/* CMD 34 */
-	cmd.args[0] = 0x34;
+	/* Set RS correction (enable/disable) */
+	cmd.args[0] = CMD_ENABLERSCORR;
 	cmd.args[1] = (state->demod ? 1 : 0);
 	cmd.args[2] = CX24117_OCC;
 	cmd.len = 3;
 	ret = cx24117_cmd_execute_nolock(fe, &cmd);
+	if (ret != 0)
+		goto exit;
+
+	/* Set GPIO direction */
+	/* Set as output - controls LNB power on/off */
+	cmd.args[0] = CMD_SET_GPIODIR;
+	cmd.args[1] = 0x30;
+	cmd.args[2] = 0x30;
+	cmd.len = 3;
+	ret = cx24117_cmd_execute_nolock(fe, &cmd);
 
 exit:
 	mutex_unlock(&state->priv->fe_lock);
@@ -1278,8 +1290,8 @@
 	dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
 		__func__, state->demod);
 
-	/* Firmware CMD 36: Power config */
-	cmd.args[0] = CMD_TUNERSLEEP;
+	/* Set sleep mode on */
+	cmd.args[0] = CMD_SET_SLEEPMODE;
 	cmd.args[1] = (state->demod ? 1 : 0);
 	cmd.args[2] = 1;
 	cmd.len = 3;
@@ -1558,7 +1570,8 @@
 
 	u8 buf[0x1f-4];
 
-	cmd.args[0] = 0x1c;
+	/* Read current tune parameters */
+	cmd.args[0] = CMD_GETCTLACC;
 	cmd.args[1] = (u8) state->demod;
 	cmd.len = 2;
 	ret = cx24117_cmd_execute(fe, &cmd);
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 6dbbee4..1632d78 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -11,6 +11,7 @@
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/mutex.h>
+#include <asm/div64.h>
 
 #include "dvb_math.h"
 
@@ -118,6 +119,12 @@
 	u8 longest_intlv_layer;
 	u16 output_mode;
 
+	/* for DVBv5 stats */
+	s64 init_ucb;
+	unsigned long per_jiffies_stats;
+	unsigned long ber_jiffies_stats;
+	unsigned long ber_jiffies_stats_layer[3];
+
 #ifdef DIB8000_AGC_FREEZE
 	u16 agc1_max;
 	u16 agc1_min;
@@ -157,15 +164,10 @@
 	return ret;
 }
 
-static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
+static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
 {
 	u16 ret;
 
-	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
-		return 0;
-	}
-
 	state->i2c_write_buffer[0] = reg >> 8;
 	state->i2c_write_buffer[1] = reg & 0xff;
 
@@ -183,6 +185,21 @@
 		dprintk("i2c read error on %d", reg);
 
 	ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+
+	return ret;
+}
+
+static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
+{
+	u16 ret;
+
+	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+		dprintk("could not acquire lock");
+		return 0;
+	}
+
+	ret = __dib8000_read_word(state, reg);
+
 	mutex_unlock(&state->i2c_buffer_lock);
 
 	return ret;
@@ -192,8 +209,15 @@
 {
 	u16 rw[2];
 
-	rw[0] = dib8000_read_word(state, reg + 0);
-	rw[1] = dib8000_read_word(state, reg + 1);
+	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+		dprintk("could not acquire lock");
+		return 0;
+	}
+
+	rw[0] = __dib8000_read_word(state, reg + 0);
+	rw[1] = __dib8000_read_word(state, reg + 1);
+
+	mutex_unlock(&state->i2c_buffer_lock);
 
 	return ((rw[0] << 16) | (rw[1]));
 }
@@ -787,7 +811,7 @@
 			dprintk("PLL: Update ratio (prediv: %d, ratio: %d)", state->cfg.pll->pll_prediv, ratio);
 			dib8000_write_word(state, 901, (state->cfg.pll->pll_prediv << 8) | (ratio << 0)); /* only the PLL ratio is updated. */
 		}
-}
+	}
 
 	return 0;
 }
@@ -966,6 +990,45 @@
 	return value;
 }
 
+static int dib8000_read_unc_blocks(struct dvb_frontend *fe, u32 *unc);
+
+static void dib8000_reset_stats(struct dvb_frontend *fe)
+{
+	struct dib8000_state *state = fe->demodulator_priv;
+	struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache;
+	u32 ucb;
+
+	memset(&c->strength, 0, sizeof(c->strength));
+	memset(&c->cnr, 0, sizeof(c->cnr));
+	memset(&c->post_bit_error, 0, sizeof(c->post_bit_error));
+	memset(&c->post_bit_count, 0, sizeof(c->post_bit_count));
+	memset(&c->block_error, 0, sizeof(c->block_error));
+
+	c->strength.len = 1;
+	c->cnr.len = 1;
+	c->block_error.len = 1;
+	c->block_count.len = 1;
+	c->post_bit_error.len = 1;
+	c->post_bit_count.len = 1;
+
+	c->strength.stat[0].scale = FE_SCALE_DECIBEL;
+	c->strength.stat[0].uvalue = 0;
+
+	c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+	dib8000_read_unc_blocks(fe, &ucb);
+
+	state->init_ucb = -ucb;
+	state->ber_jiffies_stats = 0;
+	state->per_jiffies_stats = 0;
+	memset(&state->ber_jiffies_stats_layer, 0,
+	       sizeof(state->ber_jiffies_stats_layer));
+}
+
 static int dib8000_reset(struct dvb_frontend *fe)
 {
 	struct dib8000_state *state = fe->demodulator_priv;
@@ -1071,6 +1134,8 @@
 
 	dib8000_set_power_mode(state, DIB8000_POWER_INTERFACE_ONLY);
 
+	dib8000_reset_stats(fe);
+
 	return 0;
 }
 
@@ -2445,7 +2510,8 @@
 	if (state->revision == 0x8090)
 		internal = dib8000_read32(state, 23) / 1000;
 
-	if (state->autosearch_state == AS_SEARCHING_FFT) {
+	if ((state->revision >= 0x8002) &&
+	    (state->autosearch_state == AS_SEARCHING_FFT)) {
 		dib8000_write_word(state,  37, 0x0065); /* P_ctrl_pha_off_max default values */
 		dib8000_write_word(state, 116, 0x0000); /* P_ana_gain to 0 */
 
@@ -2481,7 +2547,8 @@
 		dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (1 << 13)); /* P_restart_ccg = 1 */
 		dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (0 << 13)); /* P_restart_ccg = 0 */
 		dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x7ff) | (0 << 15) | (1 << 13)); /* P_restart_search = 0; */
-	} else if (state->autosearch_state == AS_SEARCHING_GUARD) {
+	} else if ((state->revision >= 0x8002) &&
+		   (state->autosearch_state == AS_SEARCHING_GUARD)) {
 		c->transmission_mode = TRANSMISSION_MODE_8K;
 		c->guard_interval = GUARD_INTERVAL_1_8;
 		c->inversion = 0;
@@ -2583,7 +2650,8 @@
 	struct dib8000_state *state = fe->demodulator_priv;
 	u16 irq_pending = dib8000_read_word(state, 1284);
 
-	if (state->autosearch_state == AS_SEARCHING_FFT) {
+	if ((state->revision >= 0x8002) &&
+	    (state->autosearch_state == AS_SEARCHING_FFT)) {
 		if (irq_pending & 0x1) {
 			dprintk("dib8000_autosearch_irq: max correlation result available");
 			return 3;
@@ -2853,6 +2921,91 @@
 	return 0;
 }
 
+/**
+ * is_manual_mode - Check if TMCC should be used for parameters settings
+ * @c:	struct dvb_frontend_properties
+ *
+ * By default, TMCC table should be used for parameter settings on most
+ * usercases. However, sometimes it is desirable to lock the demod to
+ * use the manual parameters.
+ *
+ * On manual mode, the current dib8000_tune state machine is very restrict:
+ * It requires that both per-layer and per-transponder parameters to be
+ * properly specified, otherwise the device won't lock.
+ *
+ * Check if all those conditions are properly satisfied before allowing
+ * the device to use the manual frequency lock mode.
+ */
+static int is_manual_mode(struct dtv_frontend_properties *c)
+{
+	int i, n_segs = 0;
+
+	/* Use auto mode on DVB-T compat mode */
+	if (c->delivery_system != SYS_ISDBT)
+		return 0;
+
+	/*
+	 * Transmission mode is only detected on auto mode, currently
+	 */
+	if (c->transmission_mode == TRANSMISSION_MODE_AUTO) {
+		dprintk("transmission mode auto");
+		return 0;
+	}
+
+	/*
+	 * Guard interval is only detected on auto mode, currently
+	 */
+	if (c->guard_interval == GUARD_INTERVAL_AUTO) {
+		dprintk("guard interval auto");
+		return 0;
+	}
+
+	/*
+	 * If no layer is enabled, assume auto mode, as at least one
+	 * layer should be enabled
+	 */
+	if (!c->isdbt_layer_enabled) {
+		dprintk("no layer modulation specified");
+		return 0;
+	}
+
+	/*
+	 * Check if the per-layer parameters aren't auto and
+	 * disable a layer if segment count is 0 or invalid.
+	 */
+	for (i = 0; i < 3; i++) {
+		if (!(c->isdbt_layer_enabled & 1 << i))
+			continue;
+
+		if ((c->layer[i].segment_count > 13) ||
+		    (c->layer[i].segment_count == 0)) {
+			c->isdbt_layer_enabled &= ~(1 << i);
+			continue;
+		}
+
+		n_segs += c->layer[i].segment_count;
+
+		if ((c->layer[i].modulation == QAM_AUTO) ||
+		    (c->layer[i].fec == FEC_AUTO)) {
+			dprintk("layer %c has either modulation or FEC auto",
+				'A' + i);
+			return 0;
+		}
+	}
+
+	/*
+	 * Userspace specified a wrong number of segments.
+	 *	fallback to auto mode.
+	 */
+	if (n_segs == 0 || n_segs > 13) {
+		dprintk("number of segments is invalid");
+		return 0;
+	}
+
+	/* Everything looks ok for manual mode */
+	return 1;
+}
+
 static int dib8000_tune(struct dvb_frontend *fe)
 {
 	struct dib8000_state *state = fe->demodulator_priv;
@@ -2878,40 +3031,19 @@
 
 	switch (*tune_state) {
 	case CT_DEMOD_START: /* 30 */
+			dib8000_reset_stats(fe);
+
 			if (state->revision == 0x8090)
 				dib8090p_init_sdram(state);
 			state->status = FE_STATUS_TUNE_PENDING;
-			if ((c->delivery_system != SYS_ISDBT) ||
-					(c->inversion == INVERSION_AUTO) ||
-					(c->transmission_mode == TRANSMISSION_MODE_AUTO) ||
-					(c->guard_interval == GUARD_INTERVAL_AUTO) ||
-					(((c->isdbt_layer_enabled & (1 << 0)) != 0) &&
-					 (c->layer[0].segment_count != 0xff) &&
-					 (c->layer[0].segment_count != 0) &&
-					 ((c->layer[0].modulation == QAM_AUTO) ||
-					  (c->layer[0].fec == FEC_AUTO))) ||
-					(((c->isdbt_layer_enabled & (1 << 1)) != 0) &&
-					 (c->layer[1].segment_count != 0xff) &&
-					 (c->layer[1].segment_count != 0) &&
-					 ((c->layer[1].modulation == QAM_AUTO) ||
-					  (c->layer[1].fec == FEC_AUTO))) ||
-					(((c->isdbt_layer_enabled & (1 << 2)) != 0) &&
-					 (c->layer[2].segment_count != 0xff) &&
-					 (c->layer[2].segment_count != 0) &&
-					 ((c->layer[2].modulation == QAM_AUTO) ||
-					  (c->layer[2].fec == FEC_AUTO))) ||
-					(((c->layer[0].segment_count == 0) ||
-					  ((c->isdbt_layer_enabled & (1 << 0)) == 0)) &&
-					 ((c->layer[1].segment_count == 0) ||
-					  ((c->isdbt_layer_enabled & (2 << 0)) == 0)) &&
-					 ((c->layer[2].segment_count == 0) || ((c->isdbt_layer_enabled & (3 << 0)) == 0))))
-				state->channel_parameters_set = 0; /* auto search */
-			else
-				state->channel_parameters_set = 1; /* channel parameters are known */
+			state->channel_parameters_set = is_manual_mode(c);
+
+			dprintk("Tuning channel on %s search mode",
+				state->channel_parameters_set ? "manual" : "auto");
 
 			dib8000_viterbi_state(state, 0); /* force chan dec in restart */
 
-			/* Layer monit */
+			/* Layer monitor */
 			dib8000_write_word(state, 285, dib8000_read_word(state, 285) & 0x60);
 
 			dib8000_set_frequency_offset(state);
@@ -3256,15 +3388,27 @@
 	return dib8000_set_adc_state(state, DIBX000_SLOW_ADC_OFF) | dib8000_set_adc_state(state, DIBX000_ADC_OFF);
 }
 
+static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat);
+
 static int dib8000_get_frontend(struct dvb_frontend *fe)
 {
 	struct dib8000_state *state = fe->demodulator_priv;
 	u16 i, val = 0;
-	fe_status_t stat;
+	fe_status_t stat = 0;
 	u8 index_frontend, sub_index_frontend;
 
 	fe->dtv_property_cache.bandwidth_hz = 6000000;
 
+	/*
+	 * If called to early, get_frontend makes dib8000_tune to either
+	 * not lock or not sync. This causes dvbv5-scan/dvbv5-zap to fail.
+	 * So, let's just return if frontend 0 has not locked.
+	 */
+	dib8000_read_status(fe, &stat);
+	if (!(stat & FE_HAS_SYNC))
+		return 0;
+
+	dprintk("TMCC lock");
 	for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
 		state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
 		if (stat&FE_HAS_SYNC) {
@@ -3335,9 +3479,13 @@
 		fe->dtv_property_cache.layer[i].segment_count = val & 0x0F;
 		dprintk("dib8000_get_frontend : Layer %d segments = %d ", i, fe->dtv_property_cache.layer[i].segment_count);
 
-		val = dib8000_read_word(state, 499 + i);
-		fe->dtv_property_cache.layer[i].interleaving = val & 0x3;
-		dprintk("dib8000_get_frontend : Layer %d time_intlv = %d ", i, fe->dtv_property_cache.layer[i].interleaving);
+		val = dib8000_read_word(state, 499 + i) & 0x3;
+		/* Interleaving can be 0, 1, 2 or 4 */
+		if (val == 3)
+			val = 4;
+		fe->dtv_property_cache.layer[i].interleaving = val;
+		dprintk("dib8000_get_frontend : Layer %d time_intlv = %d ",
+			i, fe->dtv_property_cache.layer[i].interleaving);
 
 		val = dib8000_read_word(state, 481 + i);
 		switch (val & 0x7) {
@@ -3556,6 +3704,8 @@
 	return 0;
 }
 
+static int dib8000_get_stats(struct dvb_frontend *fe, fe_status_t stat);
+
 static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
 {
 	struct dib8000_state *state = fe->demodulator_priv;
@@ -3593,6 +3743,7 @@
 		if (lock & 0x01)
 			*stat |= FE_HAS_VITERBI;
 	}
+	dib8000_get_stats(fe, *stat);
 
 	return 0;
 }
@@ -3699,6 +3850,357 @@
 	return 0;
 }
 
+struct per_layer_regs {
+	u16 lock, ber, per;
+};
+
+static const struct per_layer_regs per_layer_regs[] = {
+	{ 554, 560, 562 },
+	{ 555, 576, 578 },
+	{ 556, 581, 583 },
+};
+
+struct linear_segments {
+	unsigned x;
+	signed y;
+};
+
+/*
+ * Table to estimate signal strength in dBm.
+ * This table was empirically determinated by measuring the signal
+ * strength generated by a DTA-2111 RF generator directly connected into
+ * a dib8076 device (a PixelView PV-D231U stick), using a good quality
+ * 3 meters RC6 cable and good RC6 connectors.
+ * The real value can actually be different on other devices, depending
+ * on several factors, like if LNA is enabled or not, if diversity is
+ * enabled, type of connectors, etc.
+ * Yet, it is better to use this measure in dB than a random non-linear
+ * percentage value, especially for antenna adjustments.
+ * On my tests, the precision of the measure using this table is about
+ * 0.5 dB, with sounds reasonable enough.
+ */
+static struct linear_segments strength_to_db_table[] = {
+	{ 55953, 108500 },	/* -22.5 dBm */
+	{ 55394, 108000 },
+	{ 53834, 107000 },
+	{ 52863, 106000 },
+	{ 52239, 105000 },
+	{ 52012, 104000 },
+	{ 51803, 103000 },
+	{ 51566, 102000 },
+	{ 51356, 101000 },
+	{ 51112, 100000 },
+	{ 50869,  99000 },
+	{ 50600,  98000 },
+	{ 50363,  97000 },
+	{ 50117,  96000 },	/* -35 dBm */
+	{ 49889,  95000 },
+	{ 49680,  94000 },
+	{ 49493,  93000 },
+	{ 49302,  92000 },
+	{ 48929,  91000 },
+	{ 48416,  90000 },
+	{ 48035,  89000 },
+	{ 47593,  88000 },
+	{ 47282,  87000 },
+	{ 46953,  86000 },
+	{ 46698,  85000 },
+	{ 45617,  84000 },
+	{ 44773,  83000 },
+	{ 43845,  82000 },
+	{ 43020,  81000 },
+	{ 42010,  80000 },	/* -51 dBm */
+	{     0,      0 },
+};
+
+static u32 interpolate_value(u32 value, struct linear_segments *segments,
+			     unsigned len)
+{
+	u64 tmp64;
+	u32 dx;
+	s32 dy;
+	int i, ret;
+
+	if (value >= segments[0].x)
+		return segments[0].y;
+	if (value < segments[len-1].x)
+		return segments[len-1].y;
+
+	for (i = 1; i < len - 1; i++) {
+		/* If value is identical, no need to interpolate */
+		if (value == segments[i].x)
+			return segments[i].y;
+		if (value > segments[i].x)
+			break;
+	}
+
+	/* Linear interpolation between the two (x,y) points */
+	dy = segments[i - 1].y - segments[i].y;
+	dx = segments[i - 1].x - segments[i].x;
+
+	tmp64 = value - segments[i].x;
+	tmp64 *= dy;
+	do_div(tmp64, dx);
+	ret = segments[i].y + tmp64;
+
+	return ret;
+}
+
+static u32 dib8000_get_time_us(struct dvb_frontend *fe, int layer)
+{
+	struct dib8000_state *state = fe->demodulator_priv;
+	struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache;
+	int ini_layer, end_layer, i;
+	u64 time_us, tmp64;
+	u32 tmp, denom;
+	int guard, rate_num, rate_denum = 1, bits_per_symbol, nsegs;
+	int interleaving = 0, fft_div;
+
+	if (layer >= 0) {
+		ini_layer = layer;
+		end_layer = layer + 1;
+	} else {
+		ini_layer = 0;
+		end_layer = 3;
+	}
+
+	switch (c->guard_interval) {
+	case GUARD_INTERVAL_1_4:
+		guard = 4;
+		break;
+	case GUARD_INTERVAL_1_8:
+		guard = 8;
+		break;
+	case GUARD_INTERVAL_1_16:
+		guard = 16;
+		break;
+	default:
+	case GUARD_INTERVAL_1_32:
+		guard = 32;
+		break;
+	}
+
+	switch (c->transmission_mode) {
+	case TRANSMISSION_MODE_2K:
+		fft_div = 4;
+		break;
+	case TRANSMISSION_MODE_4K:
+		fft_div = 2;
+		break;
+	default:
+	case TRANSMISSION_MODE_8K:
+		fft_div = 1;
+		break;
+	}
+
+	denom = 0;
+	for (i = ini_layer; i < end_layer; i++) {
+		nsegs = c->layer[i].segment_count;
+		if (nsegs == 0 || nsegs > 13)
+			continue;
+
+		switch (c->layer[i].modulation) {
+		case DQPSK:
+		case QPSK:
+			bits_per_symbol = 2;
+			break;
+		case QAM_16:
+			bits_per_symbol = 4;
+			break;
+		default:
+		case QAM_64:
+			bits_per_symbol = 6;
+			break;
+		}
+
+		switch (c->layer[i].fec) {
+		case FEC_1_2:
+			rate_num = 1;
+			rate_denum = 2;
+			break;
+		case FEC_2_3:
+			rate_num = 2;
+			rate_denum = 3;
+			break;
+		case FEC_3_4:
+			rate_num = 3;
+			rate_denum = 4;
+			break;
+		case FEC_5_6:
+			rate_num = 5;
+			rate_denum = 6;
+			break;
+		default:
+		case FEC_7_8:
+			rate_num = 7;
+			rate_denum = 8;
+			break;
+		}
+
+		interleaving = c->layer[i].interleaving;
+
+		denom += bits_per_symbol * rate_num * fft_div * nsegs * 384;
+	}
+
+	/* If all goes wrong, wait for 1s for the next stats */
+	if (!denom)
+		return 0;
+
+	/* Estimate the period for the total bit rate */
+	time_us = rate_denum * (1008 * 1562500L);
+	tmp64 = time_us;
+	do_div(tmp64, guard);
+	time_us = time_us + tmp64;
+	time_us += denom / 2;
+	do_div(time_us, denom);
+
+	tmp = 1008 * 96 * interleaving;
+	time_us += tmp + tmp / guard;
+
+	return time_us;
+}
+
+static int dib8000_get_stats(struct dvb_frontend *fe, fe_status_t stat)
+{
+	struct dib8000_state *state = fe->demodulator_priv;
+	struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache;
+	int i;
+	int show_per_stats = 0;
+	u32 time_us = 0, snr, val;
+	u64 blocks;
+	s32 db;
+	u16 strength;
+
+	/* Get Signal strength */
+	dib8000_read_signal_strength(fe, &strength);
+	val = strength;
+	db = interpolate_value(val,
+			       strength_to_db_table,
+			       ARRAY_SIZE(strength_to_db_table)) - 131000;
+	c->strength.stat[0].svalue = db;
+
+	/* UCB/BER/CNR measures require lock */
+	if (!(stat & FE_HAS_LOCK)) {
+		c->cnr.len = 1;
+		c->block_count.len = 1;
+		c->block_error.len = 1;
+		c->post_bit_error.len = 1;
+		c->post_bit_count.len = 1;
+		c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		return 0;
+	}
+
+	/* Check if time for stats was elapsed */
+	if (time_after(jiffies, state->per_jiffies_stats)) {
+		state->per_jiffies_stats = jiffies + msecs_to_jiffies(1000);
+
+		/* Get SNR */
+		snr = dib8000_get_snr(fe);
+		for (i = 1; i < MAX_NUMBER_OF_FRONTENDS; i++) {
+			if (state->fe[i])
+				snr += dib8000_get_snr(state->fe[i]);
+		}
+		snr = snr >> 16;
+
+		if (snr) {
+			snr = 10 * intlog10(snr);
+			snr = (1000L * snr) >> 24;
+		} else {
+			snr = 0;
+		}
+		c->cnr.stat[0].svalue = snr;
+		c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+
+		/* Get UCB measures */
+		dib8000_read_unc_blocks(fe, &val);
+		if (val < state->init_ucb)
+			state->init_ucb += 0x100000000LL;
+
+		c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+		c->block_error.stat[0].uvalue = val + state->init_ucb;
+
+		/* Estimate the number of packets based on bitrate */
+		if (!time_us)
+			time_us = dib8000_get_time_us(fe, -1);
+
+		if (time_us) {
+			blocks = 1250000ULL * 1000000ULL;
+			do_div(blocks, time_us * 8 * 204);
+			c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+			c->block_count.stat[0].uvalue += blocks;
+		}
+
+		show_per_stats = 1;
+	}
+
+	/* Get post-BER measures */
+	if (time_after(jiffies, state->ber_jiffies_stats)) {
+		time_us = dib8000_get_time_us(fe, -1);
+		state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
+
+		dprintk("Next all layers stats available in %u us.", time_us);
+
+		dib8000_read_ber(fe, &val);
+		c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+		c->post_bit_error.stat[0].uvalue += val;
+
+		c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+		c->post_bit_count.stat[0].uvalue += 100000000;
+	}
+
+	if (state->revision < 0x8002)
+		return 0;
+
+	c->block_error.len = 4;
+	c->post_bit_error.len = 4;
+	c->post_bit_count.len = 4;
+
+	for (i = 0; i < 3; i++) {
+		unsigned nsegs = c->layer[i].segment_count;
+
+		if (nsegs == 0 || nsegs > 13)
+			continue;
+
+		time_us = 0;
+
+		if (time_after(jiffies, state->ber_jiffies_stats_layer[i])) {
+			time_us = dib8000_get_time_us(fe, i);
+
+			state->ber_jiffies_stats_layer[i] = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
+			dprintk("Next layer %c  stats will be available in %u us\n",
+				'A' + i, time_us);
+
+			val = dib8000_read_word(state, per_layer_regs[i].ber);
+			c->post_bit_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+			c->post_bit_error.stat[1 + i].uvalue += val;
+
+			c->post_bit_count.stat[1 + i].scale = FE_SCALE_COUNTER;
+			c->post_bit_count.stat[1 + i].uvalue += 100000000;
+		}
+
+		if (show_per_stats) {
+			val = dib8000_read_word(state, per_layer_regs[i].per);
+
+			c->block_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+			c->block_error.stat[1 + i].uvalue += val;
+
+			if (!time_us)
+				time_us = dib8000_get_time_us(fe, i);
+			if (time_us) {
+				blocks = 1250000ULL * 1000000ULL;
+				do_div(blocks, time_us * 8 * 204);
+				c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+				c->block_count.stat[0].uvalue += blocks;
+			}
+		}
+	}
+	return 0;
+}
+
 int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave)
 {
 	struct dib8000_state *state = fe->demodulator_priv;
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h
index f22eb9f..f6cb346 100644
--- a/drivers/media/dvb-frontends/drxk.h
+++ b/drivers/media/dvb-frontends/drxk.h
@@ -29,7 +29,6 @@
  *				A value of 0 (default) or lower indicates that
  *				the correct number of parameters will be
  *				automatically detected.
- * @load_firmware_sync:		Force the firmware load to be synchronous.
  *
  * On the *_gpio vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
  * UIO-3.
@@ -41,7 +40,6 @@
 	bool	parallel_ts;
 	bool	dynamic_clk;
 	bool	enable_merr_cfg;
-	bool	load_firmware_sync;
 
 	bool	antenna_dvbt;
 	u16	antenna_gpio;
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index bf29a3f..cce94a7 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6830,25 +6830,13 @@
 
 	/* Load firmware and initialize DRX-K */
 	if (state->microcode_name) {
-		if (config->load_firmware_sync) {
-			const struct firmware *fw = NULL;
+		const struct firmware *fw = NULL;
 
-			status = request_firmware(&fw, state->microcode_name,
-						  state->i2c->dev.parent);
-			if (status < 0)
-				fw = NULL;
-			load_firmware_cb(fw, state);
-		} else {
-			status = request_firmware_nowait(THIS_MODULE, 1,
-					      state->microcode_name,
-					      state->i2c->dev.parent,
-					      GFP_KERNEL,
-					      state, load_firmware_cb);
-			if (status < 0) {
-				pr_err("failed to request a firmware\n");
-				return NULL;
-			}
-		}
+		status = request_firmware(&fw, state->microcode_name,
+					  state->i2c->dev.parent);
+		if (status < 0)
+			fw = NULL;
+		load_firmware_cb(fw, state);
 	} else if (init_drxk(state) < 0)
 		goto error;
 
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
new file mode 100644
index 0000000..b8a7897
--- /dev/null
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -0,0 +1,1311 @@
+/*
+ * Montage M88DS3103 demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#include "m88ds3103_priv.h"
+
+static struct dvb_frontend_ops m88ds3103_ops;
+
+/* write multiple registers */
+static int m88ds3103_wr_regs(struct m88ds3103_priv *priv,
+		u8 reg, const u8 *val, int len)
+{
+#define MAX_WR_LEN 32
+#define MAX_WR_XFER_LEN (MAX_WR_LEN + 1)
+	int ret;
+	u8 buf[MAX_WR_XFER_LEN];
+	struct i2c_msg msg[1] = {
+		{
+			.addr = priv->cfg->i2c_addr,
+			.flags = 0,
+			.len = 1 + len,
+			.buf = buf,
+		}
+	};
+
+	if (WARN_ON(len > MAX_WR_LEN))
+		return -EINVAL;
+
+	buf[0] = reg;
+	memcpy(&buf[1], val, len);
+
+	mutex_lock(&priv->i2c_mutex);
+	ret = i2c_transfer(priv->i2c, msg, 1);
+	mutex_unlock(&priv->i2c_mutex);
+	if (ret == 1) {
+		ret = 0;
+	} else {
+		dev_warn(&priv->i2c->dev,
+				"%s: i2c wr failed=%d reg=%02x len=%d\n",
+				KBUILD_MODNAME, ret, reg, len);
+		ret = -EREMOTEIO;
+	}
+
+	return ret;
+}
+
+/* read multiple registers */
+static int m88ds3103_rd_regs(struct m88ds3103_priv *priv,
+		u8 reg, u8 *val, int len)
+{
+#define MAX_RD_LEN 3
+#define MAX_RD_XFER_LEN (MAX_RD_LEN)
+	int ret;
+	u8 buf[MAX_RD_XFER_LEN];
+	struct i2c_msg msg[2] = {
+		{
+			.addr = priv->cfg->i2c_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &reg,
+		}, {
+			.addr = priv->cfg->i2c_addr,
+			.flags = I2C_M_RD,
+			.len = len,
+			.buf = buf,
+		}
+	};
+
+	if (WARN_ON(len > MAX_RD_LEN))
+		return -EINVAL;
+
+	mutex_lock(&priv->i2c_mutex);
+	ret = i2c_transfer(priv->i2c, msg, 2);
+	mutex_unlock(&priv->i2c_mutex);
+	if (ret == 2) {
+		memcpy(val, buf, len);
+		ret = 0;
+	} else {
+		dev_warn(&priv->i2c->dev,
+				"%s: i2c rd failed=%d reg=%02x len=%d\n",
+				KBUILD_MODNAME, ret, reg, len);
+		ret = -EREMOTEIO;
+	}
+
+	return ret;
+}
+
+/* write single register */
+static int m88ds3103_wr_reg(struct m88ds3103_priv *priv, u8 reg, u8 val)
+{
+	return m88ds3103_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register */
+static int m88ds3103_rd_reg(struct m88ds3103_priv *priv, u8 reg, u8 *val)
+{
+	return m88ds3103_rd_regs(priv, reg, val, 1);
+}
+
+/* write single register with mask */
+static int m88ds3103_wr_reg_mask(struct m88ds3103_priv *priv,
+		u8 reg, u8 val, u8 mask)
+{
+	int ret;
+	u8 u8tmp;
+
+	/* no need for read if whole reg is written */
+	if (mask != 0xff) {
+		ret = m88ds3103_rd_regs(priv, reg, &u8tmp, 1);
+		if (ret)
+			return ret;
+
+		val &= mask;
+		u8tmp &= ~mask;
+		val |= u8tmp;
+	}
+
+	return m88ds3103_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register with mask */
+static int m88ds3103_rd_reg_mask(struct m88ds3103_priv *priv,
+		u8 reg, u8 *val, u8 mask)
+{
+	int ret, i;
+	u8 u8tmp;
+
+	ret = m88ds3103_rd_regs(priv, reg, &u8tmp, 1);
+	if (ret)
+		return ret;
+
+	u8tmp &= mask;
+
+	/* find position of the first bit */
+	for (i = 0; i < 8; i++) {
+		if ((mask >> i) & 0x01)
+			break;
+	}
+	*val = u8tmp >> i;
+
+	return 0;
+}
+
+/* write reg val table using reg addr auto increment */
+static int m88ds3103_wr_reg_val_tab(struct m88ds3103_priv *priv,
+		const struct m88ds3103_reg_val *tab, int tab_len)
+{
+	int ret, i, j;
+	u8 buf[83];
+	dev_dbg(&priv->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
+
+	if (tab_len > 83) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	for (i = 0, j = 0; i < tab_len; i++, j++) {
+		buf[j] = tab[i].val;
+
+		if (i == tab_len - 1 || tab[i].reg != tab[i + 1].reg - 1 ||
+				!((j + 1) % (priv->cfg->i2c_wr_max - 1))) {
+			ret = m88ds3103_wr_regs(priv, tab[i].reg - j, buf, j + 1);
+			if (ret)
+				goto err;
+
+			j = -1;
+		}
+	}
+
+	return 0;
+err:
+	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static int m88ds3103_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+	struct m88ds3103_priv *priv = fe->demodulator_priv;
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+	int ret;
+	u8 u8tmp;
+
+	*status = 0;
+
+	if (!priv->warm) {
+		ret = -EAGAIN;
+		goto err;
+	}
+
+	switch (c->delivery_system) {
+	case SYS_DVBS:
+		ret = m88ds3103_rd_reg_mask(priv, 0xd1, &u8tmp, 0x07);
+		if (ret)
+			goto err;
+
+		if (u8tmp == 0x07)
+			*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+					FE_HAS_VITERBI | FE_HAS_SYNC |
+					FE_HAS_LOCK;
+		break;
+	case SYS_DVBS2:
+		ret = m88ds3103_rd_reg_mask(priv, 0x0d, &u8tmp, 0x8f);
+		if (ret)
+			goto err;
+
+		if (u8tmp == 0x8f)
+			*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+					FE_HAS_VITERBI | FE_HAS_SYNC |
+					FE_HAS_LOCK;
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+				__func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	priv->fe_status = *status;
+
+	dev_dbg(&priv->i2c->dev, "%s: lock=%02x status=%02x\n",
+			__func__, u8tmp, *status);
+
+	return 0;
+err:
+	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static int m88ds3103_set_frontend(struct dvb_frontend *fe)
+{
+	struct m88ds3103_priv *priv = fe->demodulator_priv;
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+	int ret, len;
+	const struct m88ds3103_reg_val *init;
+	u8 u8tmp, u8tmp1, u8tmp2;
+	u8 buf[2];
+	u16 u16tmp, divide_ratio;
+	u32 tuner_frequency, target_mclk, ts_clk;
+	s32 s32tmp;
+	dev_dbg(&priv->i2c->dev,
+			"%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
+			__func__, c->delivery_system,
+			c->modulation, c->frequency, c->symbol_rate,
+			c->inversion, c->pilot, c->rolloff);
+
+	if (!priv->warm) {
+		ret = -EAGAIN;
+		goto err;
+	}
+
+	/* program tuner */
+	if (fe->ops.tuner_ops.set_params) {
+		ret = fe->ops.tuner_ops.set_params(fe);
+		if (ret)
+			goto err;
+	}
+
+	if (fe->ops.tuner_ops.get_frequency) {
+		ret = fe->ops.tuner_ops.get_frequency(fe, &tuner_frequency);
+		if (ret)
+			goto err;
+	}
+
+	/* reset */
+	ret = m88ds3103_wr_reg(priv, 0x07, 0x80);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg(priv, 0x00, 0x01);
+	if (ret)
+		goto err;
+
+	switch (c->delivery_system) {
+	case SYS_DVBS:
+		len = ARRAY_SIZE(m88ds3103_dvbs_init_reg_vals);
+		init = m88ds3103_dvbs_init_reg_vals;
+		target_mclk = 96000;
+		break;
+	case SYS_DVBS2:
+		len = ARRAY_SIZE(m88ds3103_dvbs2_init_reg_vals);
+		init = m88ds3103_dvbs2_init_reg_vals;
+
+		switch (priv->cfg->ts_mode) {
+		case M88DS3103_TS_SERIAL:
+		case M88DS3103_TS_SERIAL_D7:
+			if (c->symbol_rate < 18000000)
+				target_mclk = 96000;
+			else
+				target_mclk = 144000;
+			break;
+		case M88DS3103_TS_PARALLEL:
+		case M88DS3103_TS_PARALLEL_12:
+		case M88DS3103_TS_PARALLEL_16:
+		case M88DS3103_TS_PARALLEL_19_2:
+		case M88DS3103_TS_CI:
+			if (c->symbol_rate < 18000000)
+				target_mclk = 96000;
+			else if (c->symbol_rate < 28000000)
+				target_mclk = 144000;
+			else
+				target_mclk = 192000;
+			break;
+		default:
+			dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n",
+					__func__);
+			ret = -EINVAL;
+			goto err;
+		}
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+				__func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* program init table */
+	if (c->delivery_system != priv->delivery_system) {
+		ret = m88ds3103_wr_reg_val_tab(priv, init, len);
+		if (ret)
+			goto err;
+	}
+
+	u8tmp1 = 0; /* silence compiler warning */
+	switch (priv->cfg->ts_mode) {
+	case M88DS3103_TS_SERIAL:
+		u8tmp1 = 0x00;
+		ts_clk = 0;
+		u8tmp = 0x46;
+		break;
+	case M88DS3103_TS_SERIAL_D7:
+		u8tmp1 = 0x20;
+		ts_clk = 0;
+		u8tmp = 0x46;
+		break;
+	case M88DS3103_TS_PARALLEL:
+		ts_clk = 24000;
+		u8tmp = 0x42;
+		break;
+	case M88DS3103_TS_PARALLEL_12:
+		ts_clk = 12000;
+		u8tmp = 0x42;
+		break;
+	case M88DS3103_TS_PARALLEL_16:
+		ts_clk = 16000;
+		u8tmp = 0x42;
+		break;
+	case M88DS3103_TS_PARALLEL_19_2:
+		ts_clk = 19200;
+		u8tmp = 0x42;
+		break;
+	case M88DS3103_TS_CI:
+		ts_clk = 6000;
+		u8tmp = 0x43;
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* TS mode */
+	ret = m88ds3103_wr_reg(priv, 0xfd, u8tmp);
+	if (ret)
+		goto err;
+
+	switch (priv->cfg->ts_mode) {
+	case M88DS3103_TS_SERIAL:
+	case M88DS3103_TS_SERIAL_D7:
+		ret = m88ds3103_wr_reg_mask(priv, 0x29, u8tmp1, 0x20);
+		if (ret)
+			goto err;
+	}
+
+	if (ts_clk) {
+		divide_ratio = DIV_ROUND_UP(target_mclk, ts_clk);
+		u8tmp1 = divide_ratio / 2;
+		u8tmp2 = DIV_ROUND_UP(divide_ratio, 2);
+	} else {
+		divide_ratio = 0;
+		u8tmp1 = 0;
+		u8tmp2 = 0;
+	}
+
+	dev_dbg(&priv->i2c->dev,
+			"%s: target_mclk=%d ts_clk=%d divide_ratio=%d\n",
+			__func__, target_mclk, ts_clk, divide_ratio);
+
+	u8tmp1--;
+	u8tmp2--;
+	/* u8tmp1[5:2] => fe[3:0], u8tmp1[1:0] => ea[7:6] */
+	u8tmp1 &= 0x3f;
+	/* u8tmp2[5:0] => ea[5:0] */
+	u8tmp2 &= 0x3f;
+
+	ret = m88ds3103_rd_reg(priv, 0xfe, &u8tmp);
+	if (ret)
+		goto err;
+
+	u8tmp = ((u8tmp  & 0xf0) << 0) | u8tmp1 >> 2;
+	ret = m88ds3103_wr_reg(priv, 0xfe, u8tmp);
+	if (ret)
+		goto err;
+
+	u8tmp = ((u8tmp1 & 0x03) << 6) | u8tmp2 >> 0;
+	ret = m88ds3103_wr_reg(priv, 0xea, u8tmp);
+	if (ret)
+		goto err;
+
+	switch (target_mclk) {
+	case 72000:
+		u8tmp1 = 0x00; /* 0b00 */
+		u8tmp2 = 0x03; /* 0b11 */
+		break;
+	case 96000:
+		u8tmp1 = 0x02; /* 0b10 */
+		u8tmp2 = 0x01; /* 0b01 */
+		break;
+	case 115200:
+		u8tmp1 = 0x01; /* 0b01 */
+		u8tmp2 = 0x01; /* 0b01 */
+		break;
+	case 144000:
+		u8tmp1 = 0x00; /* 0b00 */
+		u8tmp2 = 0x01; /* 0b01 */
+		break;
+	case 192000:
+		u8tmp1 = 0x03; /* 0b11 */
+		u8tmp2 = 0x00; /* 0b00 */
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s: invalid target_mclk\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg_mask(priv, 0x24, u8tmp2 << 6, 0xc0);
+	if (ret)
+		goto err;
+
+	if (c->symbol_rate <= 3000000)
+		u8tmp = 0x20;
+	else if (c->symbol_rate <= 10000000)
+		u8tmp = 0x10;
+	else
+		u8tmp = 0x06;
+
+	ret = m88ds3103_wr_reg(priv, 0xc3, 0x08);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg(priv, 0xc8, u8tmp);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg(priv, 0xc4, 0x08);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg(priv, 0xc7, 0x00);
+	if (ret)
+		goto err;
+
+	u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, M88DS3103_MCLK_KHZ / 2);
+	buf[0] = (u16tmp >> 0) & 0xff;
+	buf[1] = (u16tmp >> 8) & 0xff;
+	ret = m88ds3103_wr_regs(priv, 0x61, buf, 2);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg_mask(priv, 0x4d, priv->cfg->spec_inv << 1, 0x02);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg_mask(priv, 0x30, priv->cfg->agc_inv << 4, 0x10);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg(priv, 0x33, priv->cfg->agc);
+	if (ret)
+		goto err;
+
+	dev_dbg(&priv->i2c->dev, "%s: carrier offset=%d\n", __func__,
+			(tuner_frequency - c->frequency));
+
+	s32tmp = 0x10000 * (tuner_frequency - c->frequency);
+	s32tmp = DIV_ROUND_CLOSEST(s32tmp, M88DS3103_MCLK_KHZ);
+	if (s32tmp < 0)
+		s32tmp += 0x10000;
+
+	buf[0] = (s32tmp >> 0) & 0xff;
+	buf[1] = (s32tmp >> 8) & 0xff;
+	ret = m88ds3103_wr_regs(priv, 0x5e, buf, 2);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg(priv, 0x00, 0x00);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg(priv, 0xb2, 0x00);
+	if (ret)
+		goto err;
+
+	priv->delivery_system = c->delivery_system;
+
+	return 0;
+err:
+	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static int m88ds3103_init(struct dvb_frontend *fe)
+{
+	struct m88ds3103_priv *priv = fe->demodulator_priv;
+	int ret, len, remaining;
+	const struct firmware *fw = NULL;
+	u8 *fw_file = M88DS3103_FIRMWARE;
+	u8 u8tmp;
+	dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+	/* set cold state by default */
+	priv->warm = false;
+
+	/* wake up device from sleep */
+	ret = m88ds3103_wr_reg_mask(priv, 0x08, 0x01, 0x01);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg_mask(priv, 0x04, 0x00, 0x01);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg_mask(priv, 0x23, 0x00, 0x10);
+	if (ret)
+		goto err;
+
+	/* reset */
+	ret = m88ds3103_wr_reg(priv, 0x07, 0x60);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
+	if (ret)
+		goto err;
+
+	/* firmware status */
+	ret = m88ds3103_rd_reg(priv, 0xb9, &u8tmp);
+	if (ret)
+		goto err;
+
+	dev_dbg(&priv->i2c->dev, "%s: firmware=%02x\n", __func__, u8tmp);
+
+	if (u8tmp)
+		goto skip_fw_download;
+
+	/* cold state - try to download firmware */
+	dev_info(&priv->i2c->dev, "%s: found a '%s' in cold state\n",
+			KBUILD_MODNAME, m88ds3103_ops.info.name);
+
+	/* request the firmware, this will block and timeout */
+	ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
+	if (ret) {
+		dev_err(&priv->i2c->dev, "%s: firmare file '%s' not found\n",
+				KBUILD_MODNAME, fw_file);
+		goto err;
+	}
+
+	dev_info(&priv->i2c->dev, "%s: downloading firmware from file '%s'\n",
+			KBUILD_MODNAME, fw_file);
+
+	ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
+	if (ret)
+		goto err;
+
+	for (remaining = fw->size; remaining > 0;
+			remaining -= (priv->cfg->i2c_wr_max - 1)) {
+		len = remaining;
+		if (len > (priv->cfg->i2c_wr_max - 1))
+			len = (priv->cfg->i2c_wr_max - 1);
+
+		ret = m88ds3103_wr_regs(priv, 0xb0,
+				&fw->data[fw->size - remaining], len);
+		if (ret) {
+			dev_err(&priv->i2c->dev,
+					"%s: firmware download failed=%d\n",
+					KBUILD_MODNAME, ret);
+			goto err;
+		}
+	}
+
+	ret = m88ds3103_wr_reg(priv, 0xb2, 0x00);
+	if (ret)
+		goto err;
+
+	release_firmware(fw);
+	fw = NULL;
+
+	ret = m88ds3103_rd_reg(priv, 0xb9, &u8tmp);
+	if (ret)
+		goto err;
+
+	if (!u8tmp) {
+		dev_info(&priv->i2c->dev, "%s: firmware did not run\n",
+				KBUILD_MODNAME);
+		ret = -EFAULT;
+		goto err;
+	}
+
+	dev_info(&priv->i2c->dev, "%s: found a '%s' in warm state\n",
+			KBUILD_MODNAME, m88ds3103_ops.info.name);
+	dev_info(&priv->i2c->dev, "%s: firmware version %X.%X\n",
+			KBUILD_MODNAME, (u8tmp >> 4) & 0xf, (u8tmp >> 0 & 0xf));
+
+skip_fw_download:
+	/* warm state */
+	priv->warm = true;
+
+	return 0;
+err:
+	if (fw)
+		release_firmware(fw);
+
+	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static int m88ds3103_sleep(struct dvb_frontend *fe)
+{
+	struct m88ds3103_priv *priv = fe->demodulator_priv;
+	int ret;
+	dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+	priv->delivery_system = SYS_UNDEFINED;
+
+	/* TS Hi-Z */
+	ret = m88ds3103_wr_reg_mask(priv, 0x27, 0x00, 0x01);
+	if (ret)
+		goto err;
+
+	/* sleep */
+	ret = m88ds3103_wr_reg_mask(priv, 0x08, 0x00, 0x01);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg_mask(priv, 0x04, 0x01, 0x01);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg_mask(priv, 0x23, 0x10, 0x10);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static int m88ds3103_get_frontend(struct dvb_frontend *fe)
+{
+	struct m88ds3103_priv *priv = fe->demodulator_priv;
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+	int ret;
+	u8 buf[3];
+	dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+	if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+		ret = -EAGAIN;
+		goto err;
+	}
+
+	switch (c->delivery_system) {
+	case SYS_DVBS:
+		ret = m88ds3103_rd_reg(priv, 0xe0, &buf[0]);
+		if (ret)
+			goto err;
+
+		ret = m88ds3103_rd_reg(priv, 0xe6, &buf[1]);
+		if (ret)
+			goto err;
+
+		switch ((buf[0] >> 2) & 0x01) {
+		case 0:
+			c->inversion = INVERSION_OFF;
+			break;
+		case 1:
+			c->inversion = INVERSION_ON;
+			break;
+		default:
+			dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n",
+					__func__);
+		}
+
+		switch ((buf[1] >> 5) & 0x07) {
+		case 0:
+			c->fec_inner = FEC_7_8;
+			break;
+		case 1:
+			c->fec_inner = FEC_5_6;
+			break;
+		case 2:
+			c->fec_inner = FEC_3_4;
+			break;
+		case 3:
+			c->fec_inner = FEC_2_3;
+			break;
+		case 4:
+			c->fec_inner = FEC_1_2;
+			break;
+		default:
+			dev_dbg(&priv->i2c->dev, "%s: invalid fec_inner\n",
+					__func__);
+		}
+
+		c->modulation = QPSK;
+
+		break;
+	case SYS_DVBS2:
+		ret = m88ds3103_rd_reg(priv, 0x7e, &buf[0]);
+		if (ret)
+			goto err;
+
+		ret = m88ds3103_rd_reg(priv, 0x89, &buf[1]);
+		if (ret)
+			goto err;
+
+		ret = m88ds3103_rd_reg(priv, 0xf2, &buf[2]);
+		if (ret)
+			goto err;
+
+		switch ((buf[0] >> 0) & 0x0f) {
+		case 2:
+			c->fec_inner = FEC_2_5;
+			break;
+		case 3:
+			c->fec_inner = FEC_1_2;
+			break;
+		case 4:
+			c->fec_inner = FEC_3_5;
+			break;
+		case 5:
+			c->fec_inner = FEC_2_3;
+			break;
+		case 6:
+			c->fec_inner = FEC_3_4;
+			break;
+		case 7:
+			c->fec_inner = FEC_4_5;
+			break;
+		case 8:
+			c->fec_inner = FEC_5_6;
+			break;
+		case 9:
+			c->fec_inner = FEC_8_9;
+			break;
+		case 10:
+			c->fec_inner = FEC_9_10;
+			break;
+		default:
+			dev_dbg(&priv->i2c->dev, "%s: invalid fec_inner\n",
+					__func__);
+		}
+
+		switch ((buf[0] >> 5) & 0x01) {
+		case 0:
+			c->pilot = PILOT_OFF;
+			break;
+		case 1:
+			c->pilot = PILOT_ON;
+			break;
+		default:
+			dev_dbg(&priv->i2c->dev, "%s: invalid pilot\n",
+					__func__);
+		}
+
+		switch ((buf[0] >> 6) & 0x07) {
+		case 0:
+			c->modulation = QPSK;
+			break;
+		case 1:
+			c->modulation = PSK_8;
+			break;
+		case 2:
+			c->modulation = APSK_16;
+			break;
+		case 3:
+			c->modulation = APSK_32;
+			break;
+		default:
+			dev_dbg(&priv->i2c->dev, "%s: invalid modulation\n",
+					__func__);
+		}
+
+		switch ((buf[1] >> 7) & 0x01) {
+		case 0:
+			c->inversion = INVERSION_OFF;
+			break;
+		case 1:
+			c->inversion = INVERSION_ON;
+			break;
+		default:
+			dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n",
+					__func__);
+		}
+
+		switch ((buf[2] >> 0) & 0x03) {
+		case 0:
+			c->rolloff = ROLLOFF_35;
+			break;
+		case 1:
+			c->rolloff = ROLLOFF_25;
+			break;
+		case 2:
+			c->rolloff = ROLLOFF_20;
+			break;
+		default:
+			dev_dbg(&priv->i2c->dev, "%s: invalid rolloff\n",
+					__func__);
+		}
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+				__func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = m88ds3103_rd_regs(priv, 0x6d, buf, 2);
+	if (ret)
+		goto err;
+
+	c->symbol_rate = 1ull * ((buf[1] << 8) | (buf[0] << 0)) *
+			M88DS3103_MCLK_KHZ * 1000 / 0x10000;
+
+	return 0;
+err:
+	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static int m88ds3103_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+	struct m88ds3103_priv *priv = fe->demodulator_priv;
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+	int ret, i, tmp;
+	u8 buf[3];
+	u16 noise, signal;
+	u32 noise_tot, signal_tot;
+	dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+	/* reports SNR in resolution of 0.1 dB */
+
+	/* more iterations for more accurate estimation */
+	#define M88DS3103_SNR_ITERATIONS 3
+
+	switch (c->delivery_system) {
+	case SYS_DVBS:
+		tmp = 0;
+
+		for (i = 0; i < M88DS3103_SNR_ITERATIONS; i++) {
+			ret = m88ds3103_rd_reg(priv, 0xff, &buf[0]);
+			if (ret)
+				goto err;
+
+			tmp += buf[0];
+		}
+
+		/* use of one register limits max value to 15 dB */
+		/* SNR(X) dB = 10 * ln(X) / ln(10) dB */
+		tmp = DIV_ROUND_CLOSEST(tmp, 8 * M88DS3103_SNR_ITERATIONS);
+		if (tmp)
+			*snr = 100ul * intlog2(tmp) / intlog2(10);
+		else
+			*snr = 0;
+		break;
+	case SYS_DVBS2:
+		noise_tot = 0;
+		signal_tot = 0;
+
+		for (i = 0; i < M88DS3103_SNR_ITERATIONS; i++) {
+			ret = m88ds3103_rd_regs(priv, 0x8c, buf, 3);
+			if (ret)
+				goto err;
+
+			noise = buf[1] << 6;    /* [13:6] */
+			noise |= buf[0] & 0x3f; /*  [5:0] */
+			noise >>= 2;
+			signal = buf[2] * buf[2];
+			signal >>= 1;
+
+			noise_tot += noise;
+			signal_tot += signal;
+		}
+
+		noise = noise_tot / M88DS3103_SNR_ITERATIONS;
+		signal = signal_tot / M88DS3103_SNR_ITERATIONS;
+
+		/* SNR(X) dB = 10 * log10(X) dB */
+		if (signal > noise) {
+			tmp = signal / noise;
+			*snr = 100ul * intlog10(tmp) / (1 << 24);
+		} else {
+			*snr = 0;
+		}
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+				__func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return 0;
+err:
+	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+
+static int m88ds3103_set_tone(struct dvb_frontend *fe,
+	fe_sec_tone_mode_t fe_sec_tone_mode)
+{
+	struct m88ds3103_priv *priv = fe->demodulator_priv;
+	int ret;
+	u8 u8tmp, tone, reg_a1_mask;
+	dev_dbg(&priv->i2c->dev, "%s: fe_sec_tone_mode=%d\n", __func__,
+			fe_sec_tone_mode);
+
+	if (!priv->warm) {
+		ret = -EAGAIN;
+		goto err;
+	}
+
+	switch (fe_sec_tone_mode) {
+	case SEC_TONE_ON:
+		tone = 0;
+		reg_a1_mask = 0x87;
+		break;
+	case SEC_TONE_OFF:
+		tone = 1;
+		reg_a1_mask = 0x00;
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_tone_mode\n",
+				__func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	u8tmp = tone << 7 | priv->cfg->envelope_mode << 5;
+	ret = m88ds3103_wr_reg_mask(priv, 0xa2, u8tmp, 0xe0);
+	if (ret)
+		goto err;
+
+	u8tmp = 1 << 2;
+	ret = m88ds3103_wr_reg_mask(priv, 0xa1, u8tmp, reg_a1_mask);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
+		struct dvb_diseqc_master_cmd *diseqc_cmd)
+{
+	struct m88ds3103_priv *priv = fe->demodulator_priv;
+	int ret, i;
+	u8 u8tmp;
+	dev_dbg(&priv->i2c->dev, "%s: msg=%*ph\n", __func__,
+			diseqc_cmd->msg_len, diseqc_cmd->msg);
+
+	if (!priv->warm) {
+		ret = -EAGAIN;
+		goto err;
+	}
+
+	if (diseqc_cmd->msg_len < 3 || diseqc_cmd->msg_len > 6) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	u8tmp = priv->cfg->envelope_mode << 5;
+	ret = m88ds3103_wr_reg_mask(priv, 0xa2, u8tmp, 0xe0);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_regs(priv, 0xa3, diseqc_cmd->msg,
+			diseqc_cmd->msg_len);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg(priv, 0xa1,
+			(diseqc_cmd->msg_len - 1) << 3 | 0x07);
+	if (ret)
+		goto err;
+
+	/* DiSEqC message typical period is 54 ms */
+	usleep_range(40000, 60000);
+
+	/* wait DiSEqC TX ready */
+	for (i = 20, u8tmp = 1; i && u8tmp; i--) {
+		usleep_range(5000, 10000);
+
+		ret = m88ds3103_rd_reg_mask(priv, 0xa1, &u8tmp, 0x40);
+		if (ret)
+			goto err;
+	}
+
+	dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+
+	if (i == 0) {
+		dev_dbg(&priv->i2c->dev, "%s: diseqc tx timeout\n", __func__);
+
+		ret = m88ds3103_wr_reg_mask(priv, 0xa1, 0x40, 0xc0);
+		if (ret)
+			goto err;
+	}
+
+	ret = m88ds3103_wr_reg_mask(priv, 0xa2, 0x80, 0xc0);
+	if (ret)
+		goto err;
+
+	if (i == 0) {
+		ret = -ETIMEDOUT;
+		goto err;
+	}
+
+	return 0;
+err:
+	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe,
+	fe_sec_mini_cmd_t fe_sec_mini_cmd)
+{
+	struct m88ds3103_priv *priv = fe->demodulator_priv;
+	int ret, i;
+	u8 u8tmp, burst;
+	dev_dbg(&priv->i2c->dev, "%s: fe_sec_mini_cmd=%d\n", __func__,
+			fe_sec_mini_cmd);
+
+	if (!priv->warm) {
+		ret = -EAGAIN;
+		goto err;
+	}
+
+	u8tmp = priv->cfg->envelope_mode << 5;
+	ret = m88ds3103_wr_reg_mask(priv, 0xa2, u8tmp, 0xe0);
+	if (ret)
+		goto err;
+
+	switch (fe_sec_mini_cmd) {
+	case SEC_MINI_A:
+		burst = 0x02;
+		break;
+	case SEC_MINI_B:
+		burst = 0x01;
+		break;
+	default:
+		dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_mini_cmd\n",
+				__func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = m88ds3103_wr_reg(priv, 0xa1, burst);
+	if (ret)
+		goto err;
+
+	/* DiSEqC ToneBurst period is 12.5 ms */
+	usleep_range(11000, 20000);
+
+	/* wait DiSEqC TX ready */
+	for (i = 5, u8tmp = 1; i && u8tmp; i--) {
+		usleep_range(800, 2000);
+
+		ret = m88ds3103_rd_reg_mask(priv, 0xa1, &u8tmp, 0x40);
+		if (ret)
+			goto err;
+	}
+
+	dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+
+	ret = m88ds3103_wr_reg_mask(priv, 0xa2, 0x80, 0xc0);
+	if (ret)
+		goto err;
+
+	if (i == 0) {
+		dev_dbg(&priv->i2c->dev, "%s: diseqc tx timeout\n", __func__);
+		ret = -ETIMEDOUT;
+		goto err;
+	}
+
+	return 0;
+err:
+	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static int m88ds3103_get_tune_settings(struct dvb_frontend *fe,
+	struct dvb_frontend_tune_settings *s)
+{
+	s->min_delay_ms = 3000;
+
+	return 0;
+}
+
+static void m88ds3103_release(struct dvb_frontend *fe)
+{
+	struct m88ds3103_priv *priv = fe->demodulator_priv;
+	i2c_del_mux_adapter(priv->i2c_adapter);
+	kfree(priv);
+}
+
+static int m88ds3103_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
+{
+	struct m88ds3103_priv *priv = mux_priv;
+	int ret;
+	struct i2c_msg gate_open_msg[1] = {
+		{
+			.addr = priv->cfg->i2c_addr,
+			.flags = 0,
+			.len = 2,
+			.buf = "\x03\x11",
+		}
+	};
+
+	mutex_lock(&priv->i2c_mutex);
+
+	/* open tuner I2C repeater for 1 xfer, closes automatically */
+	ret = __i2c_transfer(priv->i2c, gate_open_msg, 1);
+	if (ret != 1) {
+		dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d\n",
+				KBUILD_MODNAME, ret);
+		if (ret >= 0)
+			ret = -EREMOTEIO;
+
+		return ret;
+	}
+
+	return 0;
+}
+
+static int m88ds3103_deselect(struct i2c_adapter *adap, void *mux_priv,
+		u32 chan)
+{
+	struct m88ds3103_priv *priv = mux_priv;
+
+	mutex_unlock(&priv->i2c_mutex);
+
+	return 0;
+}
+
+struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
+		struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
+{
+	int ret;
+	struct m88ds3103_priv *priv;
+	u8 chip_id, u8tmp;
+
+	/* allocate memory for the internal priv */
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		ret = -ENOMEM;
+		dev_err(&i2c->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+		goto err;
+	}
+
+	priv->cfg = cfg;
+	priv->i2c = i2c;
+	mutex_init(&priv->i2c_mutex);
+
+	ret = m88ds3103_rd_reg(priv, 0x01, &chip_id);
+	if (ret)
+		goto err;
+
+	dev_dbg(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+
+	switch (chip_id) {
+	case 0xd0:
+		break;
+	default:
+		goto err;
+	}
+
+	switch (priv->cfg->clock_out) {
+	case M88DS3103_CLOCK_OUT_DISABLED:
+		u8tmp = 0x80;
+		break;
+	case M88DS3103_CLOCK_OUT_ENABLED:
+		u8tmp = 0x00;
+		break;
+	case M88DS3103_CLOCK_OUT_ENABLED_DIV2:
+		u8tmp = 0x10;
+		break;
+	default:
+		goto err;
+	}
+
+	ret = m88ds3103_wr_reg(priv, 0x29, u8tmp);
+	if (ret)
+		goto err;
+
+	/* sleep */
+	ret = m88ds3103_wr_reg_mask(priv, 0x08, 0x00, 0x01);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg_mask(priv, 0x04, 0x01, 0x01);
+	if (ret)
+		goto err;
+
+	ret = m88ds3103_wr_reg_mask(priv, 0x23, 0x10, 0x10);
+	if (ret)
+		goto err;
+
+	/* create mux i2c adapter for tuner */
+	priv->i2c_adapter = i2c_add_mux_adapter(i2c, &i2c->dev, priv, 0, 0, 0,
+			m88ds3103_select, m88ds3103_deselect);
+	if (priv->i2c_adapter == NULL)
+		goto err;
+
+	*tuner_i2c_adapter = priv->i2c_adapter;
+
+	/* create dvb_frontend */
+	memcpy(&priv->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops));
+	priv->fe.demodulator_priv = priv;
+
+	return &priv->fe;
+err:
+	dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
+	kfree(priv);
+	return NULL;
+}
+EXPORT_SYMBOL(m88ds3103_attach);
+
+static struct dvb_frontend_ops m88ds3103_ops = {
+	.delsys = { SYS_DVBS, SYS_DVBS2 },
+	.info = {
+		.name = "Montage M88DS3103",
+		.frequency_min =  950000,
+		.frequency_max = 2150000,
+		.frequency_tolerance = 5000,
+		.symbol_rate_min =  1000000,
+		.symbol_rate_max = 45000000,
+		.caps = FE_CAN_INVERSION_AUTO |
+			FE_CAN_FEC_1_2 |
+			FE_CAN_FEC_2_3 |
+			FE_CAN_FEC_3_4 |
+			FE_CAN_FEC_4_5 |
+			FE_CAN_FEC_5_6 |
+			FE_CAN_FEC_6_7 |
+			FE_CAN_FEC_7_8 |
+			FE_CAN_FEC_8_9 |
+			FE_CAN_FEC_AUTO |
+			FE_CAN_QPSK |
+			FE_CAN_RECOVER |
+			FE_CAN_2G_MODULATION
+	},
+
+	.release = m88ds3103_release,
+
+	.get_tune_settings = m88ds3103_get_tune_settings,
+
+	.init = m88ds3103_init,
+	.sleep = m88ds3103_sleep,
+
+	.set_frontend = m88ds3103_set_frontend,
+	.get_frontend = m88ds3103_get_frontend,
+
+	.read_status = m88ds3103_read_status,
+	.read_snr = m88ds3103_read_snr,
+
+	.diseqc_send_master_cmd = m88ds3103_diseqc_send_master_cmd,
+	.diseqc_send_burst = m88ds3103_diseqc_send_burst,
+
+	.set_tone = m88ds3103_set_tone,
+};
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Montage M88DS3103 DVB-S/S2 demodulator driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(M88DS3103_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/m88ds3103.h b/drivers/media/dvb-frontends/m88ds3103.h
new file mode 100644
index 0000000..bbb7e3a
--- /dev/null
+++ b/drivers/media/dvb-frontends/m88ds3103.h
@@ -0,0 +1,114 @@
+/*
+ * Montage M88DS3103 demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#ifndef M88DS3103_H
+#define M88DS3103_H
+
+#include <linux/dvb/frontend.h>
+
+struct m88ds3103_config {
+	/*
+	 * I2C address
+	 * Default: none, must set
+	 * 0x68, ...
+	 */
+	u8 i2c_addr;
+
+	/*
+	 * clock
+	 * Default: none, must set
+	 * 27000000
+	 */
+	u32 clock;
+
+	/*
+	 * max bytes I2C provider is asked to write at once
+	 * Default: none, must set
+	 * 33, 65, ...
+	 */
+	u16 i2c_wr_max;
+
+	/*
+	 * TS output mode
+	 * Default: M88DS3103_TS_SERIAL
+	 */
+#define M88DS3103_TS_SERIAL             0 /* TS output pin D0, normal */
+#define M88DS3103_TS_SERIAL_D7          1 /* TS output pin D7 */
+#define M88DS3103_TS_PARALLEL           2 /* 24 MHz, normal */
+#define M88DS3103_TS_PARALLEL_12        3 /* 12 MHz */
+#define M88DS3103_TS_PARALLEL_16        4 /* 16 MHz */
+#define M88DS3103_TS_PARALLEL_19_2      5 /* 19.2 MHz */
+#define M88DS3103_TS_CI                 6 /* 6 MHz */
+	u8 ts_mode;
+
+	/*
+	 * spectrum inversion
+	 * Default: 0
+	 */
+	u8 spec_inv:1;
+
+	/*
+	 * AGC polarity
+	 * Default: 0
+	 */
+	u8 agc_inv:1;
+
+	/*
+	 * clock output
+	 * Default: M88DS3103_CLOCK_OUT_DISABLED
+	 */
+#define M88DS3103_CLOCK_OUT_DISABLED        0
+#define M88DS3103_CLOCK_OUT_ENABLED         1
+#define M88DS3103_CLOCK_OUT_ENABLED_DIV2    2
+	u8 clock_out;
+
+	/*
+	 * DiSEqC envelope mode
+	 * Default: 0
+	 */
+	u8 envelope_mode:1;
+
+	/*
+	 * AGC configuration
+	 * Default: none, must set
+	 */
+	u8 agc;
+};
+
+/*
+ * Driver implements own I2C-adapter for tuner I2C access. That's since chip
+ * has I2C-gate control which closes gate automatically after I2C transfer.
+ * Using own I2C adapter we can workaround that.
+ */
+
+#if defined(CONFIG_DVB_M88DS3103) || \
+		(defined(CONFIG_DVB_M88DS3103_MODULE) && defined(MODULE))
+extern struct dvb_frontend *m88ds3103_attach(
+		const struct m88ds3103_config *config,
+		struct i2c_adapter *i2c,
+		struct i2c_adapter **tuner_i2c);
+#else
+static inline struct dvb_frontend *m88ds3103_attach(
+		const struct m88ds3103_config *config,
+		struct i2c_adapter *i2c,
+		struct i2c_adapter **tuner_i2c)
+{
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
+	return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/m88ds3103_priv.h b/drivers/media/dvb-frontends/m88ds3103_priv.h
new file mode 100644
index 0000000..84c3c06
--- /dev/null
+++ b/drivers/media/dvb-frontends/m88ds3103_priv.h
@@ -0,0 +1,215 @@
+/*
+ * Montage M88DS3103 demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#ifndef M88DS3103_PRIV_H
+#define M88DS3103_PRIV_H
+
+#include "dvb_frontend.h"
+#include "m88ds3103.h"
+#include "dvb_math.h"
+#include <linux/firmware.h>
+#include <linux/i2c-mux.h>
+
+#define M88DS3103_FIRMWARE "dvb-demod-m88ds3103.fw"
+#define M88DS3103_MCLK_KHZ 96000
+
+struct m88ds3103_priv {
+	struct i2c_adapter *i2c;
+	/* mutex needed due to own tuner I2C adapter */
+	struct mutex i2c_mutex;
+	const struct m88ds3103_config *cfg;
+	struct dvb_frontend fe;
+	fe_delivery_system_t delivery_system;
+	fe_status_t fe_status;
+	bool warm; /* FW running */
+	struct i2c_adapter *i2c_adapter;
+};
+
+struct m88ds3103_reg_val {
+	u8 reg;
+	u8 val;
+};
+
+static const struct m88ds3103_reg_val m88ds3103_dvbs_init_reg_vals[] = {
+	{0x23, 0x07},
+	{0x08, 0x03},
+	{0x0c, 0x02},
+	{0x21, 0x54},
+	{0x25, 0x8a},
+	{0x27, 0x31},
+	{0x30, 0x08},
+	{0x31, 0x40},
+	{0x32, 0x32},
+	{0x35, 0xff},
+	{0x3a, 0x00},
+	{0x37, 0x10},
+	{0x38, 0x10},
+	{0x39, 0x02},
+	{0x42, 0x60},
+	{0x4a, 0x80},
+	{0x4b, 0x04},
+	{0x4d, 0x91},
+	{0x5d, 0xc8},
+	{0x50, 0x36},
+	{0x51, 0x36},
+	{0x52, 0x36},
+	{0x53, 0x36},
+	{0x56, 0x01},
+	{0x63, 0x0f},
+	{0x64, 0x30},
+	{0x65, 0x40},
+	{0x68, 0x26},
+	{0x69, 0x4c},
+	{0x70, 0x20},
+	{0x71, 0x70},
+	{0x72, 0x04},
+	{0x73, 0x00},
+	{0x70, 0x40},
+	{0x71, 0x70},
+	{0x72, 0x04},
+	{0x73, 0x00},
+	{0x70, 0x60},
+	{0x71, 0x70},
+	{0x72, 0x04},
+	{0x73, 0x00},
+	{0x70, 0x80},
+	{0x71, 0x70},
+	{0x72, 0x04},
+	{0x73, 0x00},
+	{0x70, 0xa0},
+	{0x71, 0x70},
+	{0x72, 0x04},
+	{0x73, 0x00},
+	{0x70, 0x1f},
+	{0x76, 0x38},
+	{0x77, 0xa6},
+	{0x78, 0x0c},
+	{0x79, 0x80},
+	{0x7f, 0x14},
+	{0x7c, 0x00},
+	{0xae, 0x82},
+	{0x80, 0x64},
+	{0x81, 0x66},
+	{0x82, 0x44},
+	{0x85, 0x04},
+	{0xcd, 0xf4},
+	{0x90, 0x33},
+	{0xa0, 0x44},
+	{0xc0, 0x08},
+	{0xc3, 0x10},
+	{0xc4, 0x08},
+	{0xc5, 0xf0},
+	{0xc6, 0xff},
+	{0xc7, 0x00},
+	{0xc8, 0x1a},
+	{0xc9, 0x80},
+	{0xe0, 0xf8},
+	{0xe6, 0x8b},
+	{0xd0, 0x40},
+	{0xf8, 0x20},
+	{0xfa, 0x0f},
+	{0x00, 0x00},
+	{0xbd, 0x01},
+	{0xb8, 0x00},
+};
+
+static const struct m88ds3103_reg_val m88ds3103_dvbs2_init_reg_vals[] = {
+	{0x23, 0x07},
+	{0x08, 0x07},
+	{0x0c, 0x02},
+	{0x21, 0x54},
+	{0x25, 0x8a},
+	{0x27, 0x31},
+	{0x30, 0x08},
+	{0x32, 0x32},
+	{0x35, 0xff},
+	{0x3a, 0x00},
+	{0x37, 0x10},
+	{0x38, 0x10},
+	{0x39, 0x02},
+	{0x42, 0x60},
+	{0x4a, 0x80},
+	{0x4b, 0x04},
+	{0x4d, 0x91},
+	{0x5d, 0xc8},
+	{0x50, 0x36},
+	{0x51, 0x36},
+	{0x52, 0x36},
+	{0x53, 0x36},
+	{0x56, 0x01},
+	{0x63, 0x0f},
+	{0x64, 0x10},
+	{0x65, 0x20},
+	{0x68, 0x46},
+	{0x69, 0xcd},
+	{0x70, 0x20},
+	{0x71, 0x70},
+	{0x72, 0x04},
+	{0x73, 0x00},
+	{0x70, 0x40},
+	{0x71, 0x70},
+	{0x72, 0x04},
+	{0x73, 0x00},
+	{0x70, 0x60},
+	{0x71, 0x70},
+	{0x72, 0x04},
+	{0x73, 0x00},
+	{0x70, 0x80},
+	{0x71, 0x70},
+	{0x72, 0x04},
+	{0x73, 0x00},
+	{0x70, 0xa0},
+	{0x71, 0x70},
+	{0x72, 0x04},
+	{0x73, 0x00},
+	{0x70, 0x1f},
+	{0x76, 0x38},
+	{0x77, 0xa6},
+	{0x78, 0x0c},
+	{0x79, 0x80},
+	{0x7f, 0x14},
+	{0x85, 0x08},
+	{0xcd, 0xf4},
+	{0x90, 0x33},
+	{0x86, 0x00},
+	{0x87, 0x0f},
+	{0x89, 0x00},
+	{0x8b, 0x44},
+	{0x8c, 0x66},
+	{0x9d, 0xc1},
+	{0x8a, 0x10},
+	{0xad, 0x40},
+	{0xa0, 0x44},
+	{0xc0, 0x08},
+	{0xc1, 0x10},
+	{0xc2, 0x08},
+	{0xc3, 0x10},
+	{0xc4, 0x08},
+	{0xc5, 0xf0},
+	{0xc6, 0xff},
+	{0xc7, 0x00},
+	{0xc8, 0x1a},
+	{0xc9, 0x80},
+	{0xca, 0x23},
+	{0xcb, 0x24},
+	{0xcc, 0xf4},
+	{0xce, 0x74},
+	{0x00, 0x00},
+	{0xbd, 0x01},
+	{0xb8, 0x00},
+};
+
+#endif
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 4da5272..b235146 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -110,28 +110,94 @@
 	return b1[0];
 }
 
+static u32 m88rs2000_get_mclk(struct dvb_frontend *fe)
+{
+	struct m88rs2000_state *state = fe->demodulator_priv;
+	u32 mclk;
+	u8 reg;
+	/* Must not be 0x00 or 0xff */
+	reg = m88rs2000_readreg(state, 0x86);
+	if (!reg || reg == 0xff)
+		return 0;
+
+	reg /= 2;
+	reg += 1;
+
+	mclk = (u32)(reg * RS2000_FE_CRYSTAL_KHZ + 28 / 2) / 28;
+
+	return mclk;
+}
+
+static int m88rs2000_set_carrieroffset(struct dvb_frontend *fe, s16 offset)
+{
+	struct m88rs2000_state *state = fe->demodulator_priv;
+	u32 mclk;
+	s32 tmp;
+	u8 reg;
+	int ret;
+
+	mclk = m88rs2000_get_mclk(fe);
+	if (!mclk)
+		return -EINVAL;
+
+	tmp = (offset * 4096 + (s32)mclk / 2) / (s32)mclk;
+	if (tmp < 0)
+		tmp += 4096;
+
+	/* Carrier Offset */
+	ret = m88rs2000_writereg(state, 0x9c, (u8)(tmp >> 4));
+
+	reg = m88rs2000_readreg(state, 0x9d);
+	reg &= 0xf;
+	reg |= (u8)(tmp & 0xf) << 4;
+
+	ret |= m88rs2000_writereg(state, 0x9d, reg);
+
+	return ret;
+}
+
 static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate)
 {
 	struct m88rs2000_state *state = fe->demodulator_priv;
 	int ret;
-	u32 temp;
+	u64 temp;
+	u32 mclk;
 	u8 b[3];
 
 	if ((srate < 1000000) || (srate > 45000000))
 		return -EINVAL;
 
+	mclk = m88rs2000_get_mclk(fe);
+	if (!mclk)
+		return -EINVAL;
+
 	temp = srate / 1000;
-	temp *= 11831;
-	temp /= 68;
-	temp -= 3;
+	temp *= 1 << 24;
+
+	do_div(temp, mclk);
 
 	b[0] = (u8) (temp >> 16) & 0xff;
 	b[1] = (u8) (temp >> 8) & 0xff;
 	b[2] = (u8) temp & 0xff;
+
 	ret = m88rs2000_writereg(state, 0x93, b[2]);
 	ret |= m88rs2000_writereg(state, 0x94, b[1]);
 	ret |= m88rs2000_writereg(state, 0x95, b[0]);
 
+	if (srate > 10000000)
+		ret |= m88rs2000_writereg(state, 0xa0, 0x20);
+	else
+		ret |= m88rs2000_writereg(state, 0xa0, 0x60);
+
+	ret |= m88rs2000_writereg(state, 0xa1, 0xe0);
+
+	if (srate > 12000000)
+		ret |= m88rs2000_writereg(state, 0xa3, 0x20);
+	else if (srate > 2800000)
+		ret |= m88rs2000_writereg(state, 0xa3, 0x98);
+	else
+		ret |= m88rs2000_writereg(state, 0xa3, 0x90);
+
 	deb_info("m88rs2000: m88rs2000_set_symbolrate\n");
 	return ret;
 }
@@ -261,8 +327,6 @@
 
 struct inittab fe_reset[] = {
 	{DEMOD_WRITE, 0x00, 0x01},
-	{DEMOD_WRITE, 0xf1, 0xbf},
-	{DEMOD_WRITE, 0x00, 0x01},
 	{DEMOD_WRITE, 0x20, 0x81},
 	{DEMOD_WRITE, 0x21, 0x80},
 	{DEMOD_WRITE, 0x10, 0x33},
@@ -305,9 +369,6 @@
 	{DEMOD_WRITE, 0x9b, 0x64},
 	{DEMOD_WRITE, 0x9e, 0x00},
 	{DEMOD_WRITE, 0x9f, 0xf8},
-	{DEMOD_WRITE, 0xa0, 0x20},
-	{DEMOD_WRITE, 0xa1, 0xe0},
-	{DEMOD_WRITE, 0xa3, 0x38},
 	{DEMOD_WRITE, 0x98, 0xff},
 	{DEMOD_WRITE, 0xc0, 0x0f},
 	{DEMOD_WRITE, 0x89, 0x01},
@@ -408,7 +469,7 @@
 
 	*status = 0;
 
-	if ((reg & 0x7) == 0x7) {
+	if ((reg & 0xee) == 0xee) {
 		*status = FE_HAS_CARRIER | FE_HAS_SIGNAL | FE_HAS_VITERBI
 			| FE_HAS_SYNC | FE_HAS_LOCK;
 		if (state->config->set_ts_params)
@@ -480,34 +541,39 @@
 static int m88rs2000_set_fec(struct m88rs2000_state *state,
 		fe_code_rate_t fec)
 {
-	u16 fec_set;
+	u8 fec_set, reg;
+	int ret;
+
 	switch (fec) {
-	/* This is not confirmed kept for reference */
-/*	case FEC_1_2:
-		fec_set = 0x88;
+	case FEC_1_2:
+		fec_set = 0x8;
 		break;
 	case FEC_2_3:
-		fec_set = 0x68;
+		fec_set = 0x10;
 		break;
 	case FEC_3_4:
-		fec_set = 0x48;
+		fec_set = 0x20;
 		break;
 	case FEC_5_6:
-		fec_set = 0x28;
+		fec_set = 0x40;
 		break;
 	case FEC_7_8:
-		fec_set = 0x18;
-		break; */
+		fec_set = 0x80;
+		break;
 	case FEC_AUTO:
 	default:
-		fec_set = 0x08;
+		fec_set = 0x0;
 	}
-	m88rs2000_writereg(state, 0x76, fec_set);
 
-	return 0;
+	reg = m88rs2000_readreg(state, 0x70);
+	reg &= 0x7;
+	ret = m88rs2000_writereg(state, 0x70, reg | fec_set);
+
+	ret |= m88rs2000_writereg(state, 0x76, 0x8);
+
+	return ret;
 }
 
-
 static fe_code_rate_t m88rs2000_get_fec(struct m88rs2000_state *state)
 {
 	u8 reg;
@@ -515,18 +581,20 @@
 	reg = m88rs2000_readreg(state, 0x76);
 	m88rs2000_writereg(state, 0x9a, 0xb0);
 
+	reg &= 0xf0;
+	reg >>= 5;
+
 	switch (reg) {
-	case 0x88:
+	case 0x4:
 		return FEC_1_2;
-	case 0x68:
+	case 0x3:
 		return FEC_2_3;
-	case 0x48:
+	case 0x2:
 		return FEC_3_4;
-	case 0x28:
+	case 0x1:
 		return FEC_5_6;
-	case 0x18:
+	case 0x0:
 		return FEC_7_8;
-	case 0x08:
 	default:
 		break;
 	}
@@ -540,9 +608,8 @@
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	fe_status_t status;
 	int i, ret = 0;
-	s32 tmp;
 	u32 tuner_freq;
-	u16 offset = 0;
+	s16 offset = 0;
 	u8 reg;
 
 	state->no_lock_count = 0;
@@ -567,38 +634,31 @@
 	if (ret < 0)
 		return -ENODEV;
 
-	offset = tuner_freq - c->frequency;
+	offset = (s16)((s32)tuner_freq - c->frequency);
 
-	/* calculate offset assuming 96000kHz*/
-	tmp = offset;
-	tmp *= 65536;
+	/* default mclk value 96.4285 * 2 * 1000 = 192857 */
+	if (((c->frequency % 192857) >= (192857 - 3000)) ||
+				(c->frequency % 192857) <= 3000)
+		ret = m88rs2000_writereg(state, 0x86, 0xc2);
+	else
+		ret = m88rs2000_writereg(state, 0x86, 0xc6);
 
-	tmp = (2 * tmp + 96000) / (2 * 96000);
-	if (tmp < 0)
-		tmp += 65536;
-
-	offset = tmp & 0xffff;
-
-	ret = m88rs2000_writereg(state, 0x9a, 0x30);
-	/* Unknown usually 0xc6 sometimes 0xc1 */
-	reg = m88rs2000_readreg(state, 0x86);
-	ret |= m88rs2000_writereg(state, 0x86, reg);
-	/* Offset lower nibble always 0 */
-	ret |= m88rs2000_writereg(state, 0x9c, (offset >> 8));
-	ret |= m88rs2000_writereg(state, 0x9d, offset & 0xf0);
-
-
-	/* Reset Demod */
-	ret = m88rs2000_tab_set(state, fe_reset);
+	ret |= m88rs2000_set_carrieroffset(fe, offset);
 	if (ret < 0)
 		return -ENODEV;
 
-	/* Unknown */
-	reg = m88rs2000_readreg(state, 0x70);
-	ret = m88rs2000_writereg(state, 0x70, reg);
+	/* Reset demod by symbol rate */
+	if (c->symbol_rate > 27500000)
+		ret = m88rs2000_writereg(state, 0xf1, 0xa4);
+	else
+		ret = m88rs2000_writereg(state, 0xf1, 0xbf);
+
+	ret |= m88rs2000_tab_set(state, fe_reset);
+	if (ret < 0)
+		return -ENODEV;
 
 	/* Set FEC */
-	ret |= m88rs2000_set_fec(state, c->fec_inner);
+	ret = m88rs2000_set_fec(state, c->fec_inner);
 	ret |= m88rs2000_writereg(state, 0x85, 0x1);
 	ret |= m88rs2000_writereg(state, 0x8a, 0xbf);
 	ret |= m88rs2000_writereg(state, 0x8d, 0x1e);
@@ -620,7 +680,7 @@
 
 	for (i = 0; i < 25; i++) {
 		reg = m88rs2000_readreg(state, 0x8c);
-		if ((reg & 0x7) == 0x7) {
+		if ((reg & 0xee) == 0xee) {
 			status = FE_HAS_LOCK;
 			break;
 		}
diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h
index 14ce31e..0a50ea9 100644
--- a/drivers/media/dvb-frontends/m88rs2000.h
+++ b/drivers/media/dvb-frontends/m88rs2000.h
@@ -53,6 +53,8 @@
 }
 #endif /* CONFIG_DVB_M88RS2000 */
 
+#define RS2000_FE_CRYSTAL_KHZ 27000
+
 enum {
 	DEMOD_WRITE = 0x1,
 	WRITE_DELAY = 0x10,
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index fbca985..8a8e1ec 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -2,7 +2,7 @@
  *    Support for NXT2002 and NXT2004 - VSB/QAM
  *
  *    Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com>
- *    Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net>
+ *    Copyright (C) 2006-2014 Michael Krufky <mkrufky@linuxtv.org>
  *    based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net>
  *    and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com>
  *
@@ -40,7 +40,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 /* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE  64
+#define MAX_XFER_SIZE  256
 
 #define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw"
 #define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw"
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 842654d..4aa9c53 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -555,14 +555,6 @@
 	  This is a Video4Linux2 sensor-level driver for the Micron
 	  MT9V032 752x480 CMOS sensor.
 
-config VIDEO_TCM825X
-	tristate "TCM825x camera sensor support"
-	depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_INT_DEVICE
-	depends on MEDIA_CAMERA_SUPPORT
-	---help---
-	  This is a driver for the Toshiba TCM825x VGA camera sensor.
-	  It is used for example in Nokia N800.
-
 config VIDEO_SR030PC30
 	tristate "Siliconfile SR030PC30 sensor support"
 	depends on I2C && VIDEO_V4L2
@@ -594,6 +586,13 @@
           This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
           camera sensor with an embedded SoC image signal processor.
 
+config VIDEO_S5K5BAF
+	tristate "Samsung S5K5BAF sensor support"
+	depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+	---help---
+	  This is a V4L2 sensor-level driver for Samsung S5K5BAF 2M
+	  camera sensor with an embedded SoC image signal processor.
+
 source "drivers/media/i2c/smiapp/Kconfig"
 
 config VIDEO_S5C73M3
@@ -655,6 +654,18 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called upd64083.
 
+comment "Audio/Video compression chips"
+
+config VIDEO_SAA6752HS
+	tristate "Philips SAA6752HS MPEG-2 Audio/Video Encoder"
+	depends on VIDEO_V4L2 && I2C
+	---help---
+	  Support for the Philips SAA6752HS MPEG-2 video and MPEG-audio/AC-3
+	  audio encoder with multiplexer.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called saa6752hs.
+
 comment "Miscellaneous helper chips"
 
 config VIDEO_THS7303
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index e03f177..48888ae 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
 obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
 obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
+obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
 obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
 obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
 obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
@@ -57,7 +58,6 @@
 obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
 obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
 obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
-obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
 obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
 obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
 obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
@@ -67,6 +67,7 @@
 obj-$(CONFIG_VIDEO_NOON010PC30)	+= noon010pc30.o
 obj-$(CONFIG_VIDEO_S5K6AA)	+= s5k6aa.o
 obj-$(CONFIG_VIDEO_S5K4ECGX)	+= s5k4ecgx.o
+obj-$(CONFIG_VIDEO_S5K5BAF)	+= s5k5baf.o
 obj-$(CONFIG_VIDEO_S5C73M3)	+= s5c73m3/
 obj-$(CONFIG_VIDEO_ADP1653)	+= adp1653.o
 obj-$(CONFIG_VIDEO_AS3645A)	+= as3645a.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index b06a7e5..83225d6 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -66,11 +66,6 @@
 **********************************************************************
 */
 
-struct i2c_reg_value {
-	u8 reg;
-	u8 value;
-};
-
 struct ad9389b_state_edid {
 	/* total number of blocks */
 	u32 blocks;
@@ -143,14 +138,14 @@
 		if (ret == 0)
 			return 0;
 	}
-	v4l2_err(sd, "I2C Write Problem\n");
+	v4l2_err(sd, "%s: failed reg 0x%x, val 0x%x\n", __func__, reg, val);
 	return ret;
 }
 
 /* To set specific bits in the register, a clear-mask is given (to be AND-ed),
    and then the value-mask (to be OR-ed). */
 static inline void ad9389b_wr_and_or(struct v4l2_subdev *sd, u8 reg,
-						u8 clr_mask, u8 val_mask)
+				     u8 clr_mask, u8 val_mask)
 {
 	ad9389b_wr(sd, reg, (ad9389b_rd(sd, reg) & clr_mask) | val_mask);
 }
@@ -321,12 +316,12 @@
 	struct ad9389b_state *state = get_ad9389b_state(sd);
 
 	v4l2_dbg(1, debug, sd,
-		"%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
+		 "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
 
 	if (state->hdmi_mode_ctrl == ctrl) {
 		/* Set HDMI or DVI-D */
 		ad9389b_wr_and_or(sd, 0xaf, 0xfd,
-				ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
+				  ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
 		return 0;
 	}
 	if (state->rgb_quantization_range_ctrl == ctrl)
@@ -387,61 +382,57 @@
 	v4l2_info(sd, "chip revision %d\n", state->chip_revision);
 	v4l2_info(sd, "power %s\n", state->power_on ? "on" : "off");
 	v4l2_info(sd, "%s hotplug, %s Rx Sense, %s EDID (%d block(s))\n",
-			(ad9389b_rd(sd, 0x42) & MASK_AD9389B_HPD_DETECT) ?
-							"detected" : "no",
-			(ad9389b_rd(sd, 0x42) & MASK_AD9389B_MSEN_DETECT) ?
-							"detected" : "no",
-			edid->segments ? "found" : "no", edid->blocks);
-	if (state->have_monitor) {
-		v4l2_info(sd, "%s output %s\n",
-				  (ad9389b_rd(sd, 0xaf) & 0x02) ?
-				  "HDMI" : "DVI-D",
-				  (ad9389b_rd(sd, 0xa1) & 0x3c) ?
-				  "disabled" : "enabled");
-	}
+		  (ad9389b_rd(sd, 0x42) & MASK_AD9389B_HPD_DETECT) ?
+		  "detected" : "no",
+		  (ad9389b_rd(sd, 0x42) & MASK_AD9389B_MSEN_DETECT) ?
+		  "detected" : "no",
+		  edid->segments ? "found" : "no", edid->blocks);
+	v4l2_info(sd, "%s output %s\n",
+		  (ad9389b_rd(sd, 0xaf) & 0x02) ?
+		  "HDMI" : "DVI-D",
+		  (ad9389b_rd(sd, 0xa1) & 0x3c) ?
+		  "disabled" : "enabled");
 	v4l2_info(sd, "ad9389b: %s\n", (ad9389b_rd(sd, 0xb8) & 0x40) ?
-					"encrypted" : "no encryption");
+		  "encrypted" : "no encryption");
 	v4l2_info(sd, "state: %s, error: %s, detect count: %u, msk/irq: %02x/%02x\n",
-			states[ad9389b_rd(sd, 0xc8) & 0xf],
-			errors[ad9389b_rd(sd, 0xc8) >> 4],
-			state->edid_detect_counter,
-			ad9389b_rd(sd, 0x94), ad9389b_rd(sd, 0x96));
+		  states[ad9389b_rd(sd, 0xc8) & 0xf],
+		  errors[ad9389b_rd(sd, 0xc8) >> 4],
+		  state->edid_detect_counter,
+		  ad9389b_rd(sd, 0x94), ad9389b_rd(sd, 0x96));
 	manual_gear = ad9389b_rd(sd, 0x98) & 0x80;
 	v4l2_info(sd, "ad9389b: RGB quantization: %s range\n",
-			ad9389b_rd(sd, 0x3b) & 0x01 ? "limited" : "full");
+		  ad9389b_rd(sd, 0x3b) & 0x01 ? "limited" : "full");
 	v4l2_info(sd, "ad9389b: %s gear %d\n",
 		  manual_gear ? "manual" : "automatic",
 		  manual_gear ? ((ad9389b_rd(sd, 0x98) & 0x70) >> 4) :
-				((ad9389b_rd(sd, 0x9e) & 0x0e) >> 1));
-	if (state->have_monitor) {
-		if (ad9389b_rd(sd, 0xaf) & 0x02) {
-			/* HDMI only */
-			u8 manual_cts = ad9389b_rd(sd, 0x0a) & 0x80;
-			u32 N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
-				 ad9389b_rd(sd, 0x02) << 8 |
-				 ad9389b_rd(sd, 0x03);
-			u8 vic_detect = ad9389b_rd(sd, 0x3e) >> 2;
-			u8 vic_sent = ad9389b_rd(sd, 0x3d) & 0x3f;
-			u32 CTS;
+		  ((ad9389b_rd(sd, 0x9e) & 0x0e) >> 1));
+	if (ad9389b_rd(sd, 0xaf) & 0x02) {
+		/* HDMI only */
+		u8 manual_cts = ad9389b_rd(sd, 0x0a) & 0x80;
+		u32 N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
+			ad9389b_rd(sd, 0x02) << 8 |
+			ad9389b_rd(sd, 0x03);
+		u8 vic_detect = ad9389b_rd(sd, 0x3e) >> 2;
+		u8 vic_sent = ad9389b_rd(sd, 0x3d) & 0x3f;
+		u32 CTS;
 
-			if (manual_cts)
-				CTS = (ad9389b_rd(sd, 0x07) & 0xf) << 16 |
-				       ad9389b_rd(sd, 0x08) << 8 |
-				       ad9389b_rd(sd, 0x09);
-			else
-				CTS = (ad9389b_rd(sd, 0x04) & 0xf) << 16 |
-				       ad9389b_rd(sd, 0x05) << 8 |
-				       ad9389b_rd(sd, 0x06);
-			N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
-			     ad9389b_rd(sd, 0x02) << 8 |
-			     ad9389b_rd(sd, 0x03);
+		if (manual_cts)
+			CTS = (ad9389b_rd(sd, 0x07) & 0xf) << 16 |
+			      ad9389b_rd(sd, 0x08) << 8 |
+			      ad9389b_rd(sd, 0x09);
+		else
+			CTS = (ad9389b_rd(sd, 0x04) & 0xf) << 16 |
+			      ad9389b_rd(sd, 0x05) << 8 |
+			      ad9389b_rd(sd, 0x06);
+		N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
+		    ad9389b_rd(sd, 0x02) << 8 |
+		    ad9389b_rd(sd, 0x03);
 
-			v4l2_info(sd, "ad9389b: CTS %s mode: N %d, CTS %d\n",
-				manual_cts ? "manual" : "automatic", N, CTS);
+		v4l2_info(sd, "ad9389b: CTS %s mode: N %d, CTS %d\n",
+			  manual_cts ? "manual" : "automatic", N, CTS);
 
-			v4l2_info(sd, "ad9389b: VIC: detected %d, sent %d\n",
-				vic_detect, vic_sent);
-		}
+		v4l2_info(sd, "ad9389b: VIC: detected %d, sent %d\n",
+			  vic_detect, vic_sent);
 	}
 	if (state->dv_timings.type == V4L2_DV_BT_656_1120)
 		v4l2_print_dv_timings(sd->name, "timings: ",
@@ -486,7 +477,7 @@
 	}
 	if (i > 1)
 		v4l2_dbg(1, debug, sd,
-			"needed %d retries to powerup the ad9389b\n", i);
+			 "needed %d retries to powerup the ad9389b\n", i);
 
 	/* Select chip: AD9389B */
 	ad9389b_wr_and_or(sd, 0xba, 0xef, 0x10);
@@ -556,14 +547,16 @@
 	irq_status = ad9389b_rd(sd, 0x96);
 	/* clear detected interrupts */
 	ad9389b_wr(sd, 0x96, irq_status);
+	/* enable interrupts */
+	ad9389b_set_isr(sd, true);
 
-	if (irq_status & (MASK_AD9389B_HPD_INT | MASK_AD9389B_MSEN_INT))
+	v4l2_dbg(1, debug, sd, "%s: irq_status 0x%x\n", __func__, irq_status);
+
+	if (irq_status & (MASK_AD9389B_HPD_INT))
 		ad9389b_check_monitor_present_status(sd);
 	if (irq_status & MASK_AD9389B_EDID_RDY_INT)
 		ad9389b_check_edid_status(sd);
 
-	/* enable interrupts */
-	ad9389b_set_isr(sd, true);
 	*handled = true;
 	return 0;
 }
@@ -599,7 +592,7 @@
 	if (edid->blocks + edid->start_block >= state->edid.segments * 2)
 		edid->blocks = state->edid.segments * 2 - edid->start_block;
 	memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
-				128 * edid->blocks);
+	       128 * edid->blocks);
 	return 0;
 }
 
@@ -612,8 +605,6 @@
 /* Enable/disable ad9389b output */
 static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
 {
-	struct ad9389b_state *state = get_ad9389b_state(sd);
-
 	v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
 
 	ad9389b_wr_and_or(sd, 0xa1, ~0x3c, (enable ? 0 : 0x3c));
@@ -621,7 +612,6 @@
 		ad9389b_check_monitor_present_status(sd);
 	} else {
 		ad9389b_s_power(sd, 0);
-		state->have_monitor = false;
 	}
 	return 0;
 }
@@ -686,14 +676,14 @@
 }
 
 static int ad9389b_enum_dv_timings(struct v4l2_subdev *sd,
-			struct v4l2_enum_dv_timings *timings)
+				   struct v4l2_enum_dv_timings *timings)
 {
 	return v4l2_enum_dv_timings_cap(timings, &ad9389b_timings_cap,
 			NULL, NULL);
 }
 
 static int ad9389b_dv_timings_cap(struct v4l2_subdev *sd,
-			struct v4l2_dv_timings_cap *cap)
+				  struct v4l2_dv_timings_cap *cap)
 {
 	*cap = ad9389b_timings_cap;
 	return 0;
@@ -724,15 +714,15 @@
 	u32 N;
 
 	switch (freq) {
-	case 32000: N = 4096; break;
-	case 44100: N = 6272; break;
-	case 48000: N = 6144; break;
-	case 88200: N = 12544; break;
-	case 96000: N = 12288; break;
+	case 32000:  N = 4096;  break;
+	case 44100:  N = 6272;  break;
+	case 48000:  N = 6144;  break;
+	case 88200:  N = 12544; break;
+	case 96000:  N = 12288; break;
 	case 176400: N = 25088; break;
 	case 192000: N = 24576; break;
 	default:
-		return -EINVAL;
+	     return -EINVAL;
 	}
 
 	/* Set N (used with CTS to regenerate the audio clock) */
@@ -748,15 +738,15 @@
 	u32 i2s_sf;
 
 	switch (freq) {
-	case 32000: i2s_sf = 0x30; break;
-	case 44100: i2s_sf = 0x00; break;
-	case 48000: i2s_sf = 0x20; break;
-	case 88200: i2s_sf = 0x80; break;
-	case 96000: i2s_sf = 0xa0; break;
+	case 32000:  i2s_sf = 0x30; break;
+	case 44100:  i2s_sf = 0x00; break;
+	case 48000:  i2s_sf = 0x20; break;
+	case 88200:  i2s_sf = 0x80; break;
+	case 96000:  i2s_sf = 0xa0; break;
 	case 176400: i2s_sf = 0xc0; break;
 	case 192000: i2s_sf = 0xe0; break;
 	default:
-		return -EINVAL;
+	     return -EINVAL;
 	}
 
 	/* Set sampling frequency for I2S audio to 48 kHz */
@@ -800,7 +790,7 @@
 
 /* ----------------------------------------------------------------------- */
 static void ad9389b_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd,
-							int segment, u8 *buf)
+				  int segment, u8 *buf)
 {
 	int i, j;
 
@@ -826,8 +816,8 @@
 static void ad9389b_edid_handler(struct work_struct *work)
 {
 	struct delayed_work *dwork = to_delayed_work(work);
-	struct ad9389b_state *state = container_of(dwork,
-			struct ad9389b_state, edid_handler);
+	struct ad9389b_state *state =
+		container_of(dwork, struct ad9389b_state, edid_handler);
 	struct v4l2_subdev *sd = &state->sd;
 	struct ad9389b_edid_detect ed;
 
@@ -845,11 +835,10 @@
 		if (state->edid.read_retries) {
 			state->edid.read_retries--;
 			v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
-			state->have_monitor = false;
 			ad9389b_s_power(sd, false);
 			ad9389b_s_power(sd, true);
 			queue_delayed_work(state->work_queue,
-					&state->edid_handler, EDID_DELAY);
+					   &state->edid_handler, EDID_DELAY);
 			return;
 		}
 	}
@@ -915,49 +904,35 @@
 	v4l2_subdev_notify(sd, AD9389B_MONITOR_DETECT, (void *)&mdt);
 }
 
-static void ad9389b_check_monitor_present_status(struct v4l2_subdev *sd)
+static void ad9389b_update_monitor_present_status(struct v4l2_subdev *sd)
 {
 	struct ad9389b_state *state = get_ad9389b_state(sd);
 	/* read hotplug and rx-sense state */
 	u8 status = ad9389b_rd(sd, 0x42);
 
 	v4l2_dbg(1, debug, sd, "%s: status: 0x%x%s%s\n",
-			 __func__,
-			 status,
-			 status & MASK_AD9389B_HPD_DETECT ? ", hotplug" : "",
-			 status & MASK_AD9389B_MSEN_DETECT ? ", rx-sense" : "");
+		 __func__,
+		 status,
+		 status & MASK_AD9389B_HPD_DETECT ? ", hotplug" : "",
+		 status & MASK_AD9389B_MSEN_DETECT ? ", rx-sense" : "");
 
-	if ((status & MASK_AD9389B_HPD_DETECT) &&
-	    ((status & MASK_AD9389B_MSEN_DETECT) || state->edid.segments)) {
-		v4l2_dbg(1, debug, sd,
-				"%s: hotplug and (rx-sense or edid)\n", __func__);
-		if (!state->have_monitor) {
-			v4l2_dbg(1, debug, sd, "%s: monitor detected\n", __func__);
-			state->have_monitor = true;
-			ad9389b_set_isr(sd, true);
-			if (!ad9389b_s_power(sd, true)) {
-				v4l2_dbg(1, debug, sd,
-					"%s: monitor detected, powerup failed\n", __func__);
-				return;
-			}
-			ad9389b_setup(sd);
-			ad9389b_notify_monitor_detect(sd);
-			state->edid.read_retries = EDID_MAX_RETRIES;
-			queue_delayed_work(state->work_queue,
-					&state->edid_handler, EDID_DELAY);
-		}
-	} else if (status & MASK_AD9389B_HPD_DETECT) {
+	if (status & MASK_AD9389B_HPD_DETECT) {
 		v4l2_dbg(1, debug, sd, "%s: hotplug detected\n", __func__);
+		state->have_monitor = true;
+		if (!ad9389b_s_power(sd, true)) {
+			v4l2_dbg(1, debug, sd,
+				 "%s: monitor detected, powerup failed\n", __func__);
+			return;
+		}
+		ad9389b_setup(sd);
+		ad9389b_notify_monitor_detect(sd);
 		state->edid.read_retries = EDID_MAX_RETRIES;
 		queue_delayed_work(state->work_queue,
-				&state->edid_handler, EDID_DELAY);
+				   &state->edid_handler, EDID_DELAY);
 	} else if (!(status & MASK_AD9389B_HPD_DETECT)) {
 		v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
-		if (state->have_monitor) {
-			v4l2_dbg(1, debug, sd, "%s: monitor not detected\n", __func__);
-			state->have_monitor = false;
-			ad9389b_notify_monitor_detect(sd);
-		}
+		state->have_monitor = false;
+		ad9389b_notify_monitor_detect(sd);
 		ad9389b_s_power(sd, false);
 		memset(&state->edid, 0, sizeof(struct ad9389b_state_edid));
 	}
@@ -966,6 +941,35 @@
 	v4l2_ctrl_s_ctrl(state->hotplug_ctrl, ad9389b_have_hotplug(sd) ? 0x1 : 0x0);
 	v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, ad9389b_have_rx_sense(sd) ? 0x1 : 0x0);
 	v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+
+	/* update with setting from ctrls */
+	ad9389b_s_ctrl(state->rgb_quantization_range_ctrl);
+	ad9389b_s_ctrl(state->hdmi_mode_ctrl);
+}
+
+static void ad9389b_check_monitor_present_status(struct v4l2_subdev *sd)
+{
+	struct ad9389b_state *state = get_ad9389b_state(sd);
+	int retry = 0;
+
+	ad9389b_update_monitor_present_status(sd);
+
+	/*
+	 * Rapid toggling of the hotplug may leave the chip powered off,
+	 * even if we think it is on. In that case reset and power up again.
+	 */
+	while (state->power_on && (ad9389b_rd(sd, 0x41) & 0x40)) {
+		if (++retry > 5) {
+			v4l2_err(sd, "retried %d times, give up\n", retry);
+			return;
+		}
+		v4l2_dbg(1, debug, sd, "%s: reset and re-check status (%d)\n", __func__, retry);
+		ad9389b_notify_monitor_detect(sd);
+		cancel_delayed_work_sync(&state->edid_handler);
+		memset(&state->edid, 0, sizeof(struct ad9389b_state_edid));
+		ad9389b_s_power(sd, false);
+		ad9389b_update_monitor_present_status(sd);
+	}
 }
 
 static bool edid_block_verify_crc(u8 *edid_block)
@@ -978,7 +982,7 @@
 	return sum == 0;
 }
 
-static bool edid_segment_verify_crc(struct v4l2_subdev *sd, u32 segment)
+static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
 {
 	struct ad9389b_state *state = get_ad9389b_state(sd);
 	u32 blocks = state->edid.blocks;
@@ -992,6 +996,25 @@
 	return false;
 }
 
+static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
+{
+	static const u8 hdmi_header[] = {
+		0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+	};
+	struct ad9389b_state *state = get_ad9389b_state(sd);
+	u8 *data = state->edid.data;
+	int i;
+
+	if (segment)
+		return true;
+
+	for (i = 0; i < ARRAY_SIZE(hdmi_header); i++)
+		if (data[i] != hdmi_header[i])
+			return false;
+
+	return true;
+}
+
 static bool ad9389b_check_edid_status(struct v4l2_subdev *sd)
 {
 	struct ad9389b_state *state = get_ad9389b_state(sd);
@@ -1000,7 +1023,7 @@
 	u8 edidRdy = ad9389b_rd(sd, 0xc5);
 
 	v4l2_dbg(1, debug, sd, "%s: edid ready (retries: %d)\n",
-			 __func__, EDID_MAX_RETRIES - state->edid.read_retries);
+		 __func__, EDID_MAX_RETRIES - state->edid.read_retries);
 
 	if (!(edidRdy & MASK_AD9389B_EDID_RDY))
 		return false;
@@ -1013,16 +1036,16 @@
 	v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment);
 	ad9389b_edid_rd(sd, 256, &state->edid.data[segment * 256]);
 	ad9389b_dbg_dump_edid(2, debug, sd, segment,
-			&state->edid.data[segment * 256]);
+			      &state->edid.data[segment * 256]);
 	if (segment == 0) {
 		state->edid.blocks = state->edid.data[0x7e] + 1;
 		v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n",
-				__func__, state->edid.blocks);
+			 __func__, state->edid.blocks);
 	}
-	if (!edid_segment_verify_crc(sd, segment)) {
+	if (!edid_verify_crc(sd, segment) ||
+	    !edid_verify_header(sd, segment)) {
 		/* edid crc error, force reread of edid segment */
-		v4l2_err(sd, "%s: edid crc error\n", __func__);
-		state->have_monitor = false;
+		v4l2_err(sd, "%s: edid crc or header error\n", __func__);
 		ad9389b_s_power(sd, false);
 		ad9389b_s_power(sd, true);
 		return false;
@@ -1032,12 +1055,12 @@
 	if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
 		/* Request next EDID segment */
 		v4l2_dbg(1, debug, sd, "%s: request segment %d\n",
-				__func__, state->edid.segments);
+			 __func__, state->edid.segments);
 		ad9389b_wr(sd, 0xc9, 0xf);
 		ad9389b_wr(sd, 0xc4, state->edid.segments);
 		state->edid.read_retries = EDID_MAX_RETRIES;
 		queue_delayed_work(state->work_queue,
-				&state->edid_handler, EDID_DELAY);
+				   &state->edid_handler, EDID_DELAY);
 		return false;
 	}
 
@@ -1081,7 +1104,7 @@
 		return -EIO;
 
 	v4l_dbg(1, debug, client, "detecting ad9389b client on address 0x%x\n",
-			client->addr << 1);
+		client->addr << 1);
 
 	state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
 	if (!state)
@@ -1140,7 +1163,7 @@
 		goto err_entity;
 	}
 	v4l2_dbg(1, debug, sd, "reg 0x41 0x%x, chip version (reg 0x00) 0x%x\n",
-			ad9389b_rd(sd, 0x41), state->chip_revision);
+		 ad9389b_rd(sd, 0x41), state->chip_revision);
 
 	state->edid_i2c_client = i2c_new_dummy(client->adapter, (0x7e>>1));
 	if (state->edid_i2c_client == NULL) {
@@ -1163,7 +1186,7 @@
 	ad9389b_set_isr(sd, true);
 
 	v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
-			  client->addr << 1, client->adapter->name);
+		  client->addr << 1, client->adapter->name);
 	return 0;
 
 err_unreg:
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 7c8d971..ee61894 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -452,6 +452,29 @@
 			  errors[adv7511_rd(sd, 0xc8) >> 4], state->edid_detect_counter,
 			  adv7511_rd(sd, 0x94), adv7511_rd(sd, 0x96));
 	v4l2_info(sd, "RGB quantization: %s range\n", adv7511_rd(sd, 0x18) & 0x80 ? "limited" : "full");
+	if (adv7511_rd(sd, 0xaf) & 0x02) {
+		/* HDMI only */
+		u8 manual_cts = adv7511_rd(sd, 0x0a) & 0x80;
+		u32 N = (adv7511_rd(sd, 0x01) & 0xf) << 16 |
+			adv7511_rd(sd, 0x02) << 8 |
+			adv7511_rd(sd, 0x03);
+		u8 vic_detect = adv7511_rd(sd, 0x3e) >> 2;
+		u8 vic_sent = adv7511_rd(sd, 0x3d) & 0x3f;
+		u32 CTS;
+
+		if (manual_cts)
+			CTS = (adv7511_rd(sd, 0x07) & 0xf) << 16 |
+			      adv7511_rd(sd, 0x08) << 8 |
+			      adv7511_rd(sd, 0x09);
+		else
+			CTS = (adv7511_rd(sd, 0x04) & 0xf) << 16 |
+			      adv7511_rd(sd, 0x05) << 8 |
+			      adv7511_rd(sd, 0x06);
+		v4l2_info(sd, "CTS %s mode: N %d, CTS %d\n",
+			  manual_cts ? "manual" : "automatic", N, CTS);
+		v4l2_info(sd, "VIC: detected %d, sent %d\n",
+			  vic_detect, vic_sent);
+	}
 	if (state->dv_timings.type == V4L2_DV_BT_656_1120)
 		v4l2_print_dv_timings(sd->name, "timings: ",
 				&state->dv_timings, false);
@@ -942,26 +965,38 @@
 
 static bool edid_block_verify_crc(uint8_t *edid_block)
 {
-	int i;
 	uint8_t sum = 0;
+	int i;
 
 	for (i = 0; i < 128; i++)
-		sum += *(edid_block + i);
-	return (sum == 0);
+		sum += edid_block[i];
+	return sum == 0;
 }
 
-static bool edid_segment_verify_crc(struct v4l2_subdev *sd, u32 segment)
+static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
 {
 	struct adv7511_state *state = get_adv7511_state(sd);
 	u32 blocks = state->edid.blocks;
 	uint8_t *data = state->edid.data;
 
-	if (edid_block_verify_crc(&data[segment * 256])) {
-		if ((segment + 1) * 2 <= blocks)
-			return edid_block_verify_crc(&data[segment * 256 + 128]);
+	if (!edid_block_verify_crc(&data[segment * 256]))
+		return false;
+	if ((segment + 1) * 2 <= blocks)
+		return edid_block_verify_crc(&data[segment * 256 + 128]);
+	return true;
+}
+
+static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
+{
+	static const u8 hdmi_header[] = {
+		0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+	};
+	struct adv7511_state *state = get_adv7511_state(sd);
+	u8 *data = state->edid.data;
+
+	if (segment != 0)
 		return true;
-	}
-	return false;
+	return !memcmp(data, hdmi_header, sizeof(hdmi_header));
 }
 
 static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
@@ -990,9 +1025,10 @@
 			state->edid.blocks = state->edid.data[0x7e] + 1;
 			v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n", __func__, state->edid.blocks);
 		}
-		if (!edid_segment_verify_crc(sd, segment)) {
+		if (!edid_verify_crc(sd, segment) ||
+		    !edid_verify_header(sd, segment)) {
 			/* edid crc error, force reread of edid segment */
-			v4l2_dbg(1, debug, sd, "%s: edid crc error\n", __func__);
+			v4l2_err(sd, "%s: edid crc or header error\n", __func__);
 			state->have_monitor = false;
 			adv7511_s_power(sd, false);
 			adv7511_s_power(sd, true);
@@ -1038,6 +1074,12 @@
 
 	/* clear all interrupts */
 	adv7511_wr(sd, 0x96, 0xff);
+	/*
+	 * Stop HPD from resetting a lot of registers.
+	 * It might leave the chip in a partly un-initialized state,
+	 * in particular with regards to hotplug bounces.
+	 */
+	adv7511_wr_and_or(sd, 0xd6, 0x3f, 0xc0);
 	memset(edid, 0, sizeof(struct adv7511_state_edid));
 	state->have_monitor = false;
 	adv7511_set_isr(sd, false);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index a324106b..71c8570 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -53,8 +53,6 @@
 /* ADV7604 system clock frequency */
 #define ADV7604_fsc (28636360)
 
-#define DIGITAL_INPUT (state->mode == ADV7604_MODE_HDMI)
-
 /*
  **********************************************************************
  *
@@ -67,17 +65,19 @@
 	struct v4l2_subdev sd;
 	struct media_pad pad;
 	struct v4l2_ctrl_handler hdl;
-	enum adv7604_mode mode;
+	enum adv7604_input_port selected_input;
 	struct v4l2_dv_timings timings;
-	u8 edid[256];
-	unsigned edid_blocks;
+	struct {
+		u8 edid[256];
+		u32 present;
+		unsigned blocks;
+	} edid;
+	u16 spa_port_a[2];
 	struct v4l2_fract aspect_ratio;
 	u32 rgb_quantization_range;
 	struct workqueue_struct *work_queues;
 	struct delayed_work delayed_work_enable_hotplug;
-	bool connector_hdmi;
 	bool restart_stdi_once;
-	u32 prev_input_status;
 
 	/* i2c clients */
 	struct i2c_client *i2c_avlink;
@@ -160,6 +160,7 @@
 	V4L2_DV_BT_DMT_1792X1344P60,
 	V4L2_DV_BT_DMT_1856X1392P60,
 	V4L2_DV_BT_DMT_1920X1200P60_RB,
+	V4L2_DV_BT_DMT_1366X768P60_RB,
 	V4L2_DV_BT_DMT_1366X768P60,
 	V4L2_DV_BT_DMT_1920X1080P60,
 	{ },
@@ -507,6 +508,21 @@
 	return 0;
 }
 
+static inline int edid_write_block(struct v4l2_subdev *sd,
+					unsigned len, const u8 *val)
+{
+	struct adv7604_state *state = to_state(sd);
+	int err = 0;
+	int i;
+
+	v4l2_dbg(2, debug, sd, "%s: write EDID block (%d byte)\n", __func__, len);
+
+	for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
+		err = adv_smbus_write_i2c_block_data(state->i2c_edid, i,
+				I2C_SMBUS_BLOCK_MAX, val + i);
+	return err;
+}
+
 static void adv7604_delayed_work_enable_hotplug(struct work_struct *work)
 {
 	struct delayed_work *dwork = to_delayed_work(work);
@@ -516,48 +532,7 @@
 
 	v4l2_dbg(2, debug, sd, "%s: enable hotplug\n", __func__);
 
-	v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)1);
-}
-
-static inline int edid_write_block(struct v4l2_subdev *sd,
-					unsigned len, const u8 *val)
-{
-	struct i2c_client *client = v4l2_get_subdevdata(sd);
-	struct adv7604_state *state = to_state(sd);
-	int err = 0;
-	int i;
-
-	v4l2_dbg(2, debug, sd, "%s: write EDID block (%d byte)\n", __func__, len);
-
-	v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)0);
-
-	/* Disables I2C access to internal EDID ram from DDC port */
-	rep_write_and_or(sd, 0x77, 0xf0, 0x0);
-
-	for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
-		err = adv_smbus_write_i2c_block_data(state->i2c_edid, i,
-				I2C_SMBUS_BLOCK_MAX, val + i);
-	if (err)
-		return err;
-
-	/* adv7604 calculates the checksums and enables I2C access to internal
-	   EDID ram from DDC port. */
-	rep_write_and_or(sd, 0x77, 0xf0, 0x1);
-
-	for (i = 0; i < 1000; i++) {
-		if (rep_read(sd, 0x7d) & 1)
-			break;
-		mdelay(1);
-	}
-	if (i == 1000) {
-		v4l_err(client, "error enabling edid\n");
-		return -EIO;
-	}
-
-	/* enable hotplug after 100 ms */
-	queue_delayed_work(state->work_queues,
-			&state->delayed_work_enable_hotplug, HZ / 10);
-	return 0;
+	v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)&state->edid.present);
 }
 
 static inline int hdmi_read(struct v4l2_subdev *sd, u8 reg)
@@ -574,6 +549,11 @@
 	return adv_smbus_write_byte_data(state->i2c_hdmi, reg, val);
 }
 
+static inline int hdmi_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+	return hdmi_write(sd, reg, (hdmi_read(sd, reg) & mask) | val);
+}
+
 static inline int test_read(struct v4l2_subdev *sd, u8 reg)
 {
 	struct adv7604_state *state = to_state(sd);
@@ -623,6 +603,26 @@
 
 /* ----------------------------------------------------------------------- */
 
+static inline bool is_analog_input(struct v4l2_subdev *sd)
+{
+	struct adv7604_state *state = to_state(sd);
+
+	return state->selected_input == ADV7604_INPUT_VGA_RGB ||
+	       state->selected_input == ADV7604_INPUT_VGA_COMP;
+}
+
+static inline bool is_digital_input(struct v4l2_subdev *sd)
+{
+	struct adv7604_state *state = to_state(sd);
+
+	return state->selected_input == ADV7604_INPUT_HDMI_PORT_A ||
+	       state->selected_input == ADV7604_INPUT_HDMI_PORT_B ||
+	       state->selected_input == ADV7604_INPUT_HDMI_PORT_C ||
+	       state->selected_input == ADV7604_INPUT_HDMI_PORT_D;
+}
+
+/* ----------------------------------------------------------------------- */
+
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 static void adv7604_inv_register(struct v4l2_subdev *sd)
 {
@@ -696,45 +696,47 @@
 static int adv7604_s_register(struct v4l2_subdev *sd,
 					const struct v4l2_dbg_register *reg)
 {
+	u8 val = reg->val & 0xff;
+
 	switch (reg->reg >> 8) {
 	case 0:
-		io_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		io_write(sd, reg->reg & 0xff, val);
 		break;
 	case 1:
-		avlink_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		avlink_write(sd, reg->reg & 0xff, val);
 		break;
 	case 2:
-		cec_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		cec_write(sd, reg->reg & 0xff, val);
 		break;
 	case 3:
-		infoframe_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		infoframe_write(sd, reg->reg & 0xff, val);
 		break;
 	case 4:
-		esdp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		esdp_write(sd, reg->reg & 0xff, val);
 		break;
 	case 5:
-		dpp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		dpp_write(sd, reg->reg & 0xff, val);
 		break;
 	case 6:
-		afe_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		afe_write(sd, reg->reg & 0xff, val);
 		break;
 	case 7:
-		rep_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		rep_write(sd, reg->reg & 0xff, val);
 		break;
 	case 8:
-		edid_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		edid_write(sd, reg->reg & 0xff, val);
 		break;
 	case 9:
-		hdmi_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		hdmi_write(sd, reg->reg & 0xff, val);
 		break;
 	case 0xa:
-		test_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		test_write(sd, reg->reg & 0xff, val);
 		break;
 	case 0xb:
-		cp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		cp_write(sd, reg->reg & 0xff, val);
 		break;
 	case 0xc:
-		vdp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+		vdp_write(sd, reg->reg & 0xff, val);
 		break;
 	default:
 		v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
@@ -748,10 +750,13 @@
 static int adv7604_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
 {
 	struct adv7604_state *state = to_state(sd);
+	u8 reg_io_6f = io_read(sd, 0x6f);
 
-	/* port A only */
 	return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl,
-				((io_read(sd, 0x6f) & 0x10) >> 4));
+			((reg_io_6f & 0x10) >> 4) |
+			((reg_io_6f & 0x08) >> 2) |
+			(reg_io_6f & 0x04) |
+			((reg_io_6f & 0x02) << 2));
 }
 
 static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
@@ -759,12 +764,11 @@
 		const struct adv7604_video_standards *predef_vid_timings,
 		const struct v4l2_dv_timings *timings)
 {
-	struct adv7604_state *state = to_state(sd);
 	int i;
 
 	for (i = 0; predef_vid_timings[i].timings.bt.width; i++) {
 		if (!v4l2_match_dv_timings(timings, &predef_vid_timings[i].timings,
-					DIGITAL_INPUT ? 250000 : 1000000))
+					is_digital_input(sd) ? 250000 : 1000000))
 			continue;
 		io_write(sd, 0x00, predef_vid_timings[i].vid_std); /* video std */
 		io_write(sd, 0x01, (predef_vid_timings[i].v_freq << 4) +
@@ -799,27 +803,22 @@
 	cp_write(sd, 0xab, 0x00);
 	cp_write(sd, 0xac, 0x00);
 
-	switch (state->mode) {
-	case ADV7604_MODE_COMP:
-	case ADV7604_MODE_GR:
+	if (is_analog_input(sd)) {
 		err = find_and_set_predefined_video_timings(sd,
 				0x01, adv7604_prim_mode_comp, timings);
 		if (err)
 			err = find_and_set_predefined_video_timings(sd,
 					0x02, adv7604_prim_mode_gr, timings);
-		break;
-	case ADV7604_MODE_HDMI:
+	} else if (is_digital_input(sd)) {
 		err = find_and_set_predefined_video_timings(sd,
 				0x05, adv7604_prim_mode_hdmi_comp, timings);
 		if (err)
 			err = find_and_set_predefined_video_timings(sd,
 					0x06, adv7604_prim_mode_hdmi_gr, timings);
-		break;
-	default:
-		v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
-				__func__, state->mode);
+	} else {
+		v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+				__func__, state->selected_input);
 		err = -1;
-		break;
 	}
 
 
@@ -846,9 +845,7 @@
 
 	v4l2_dbg(2, debug, sd, "%s\n", __func__);
 
-	switch (state->mode) {
-	case ADV7604_MODE_COMP:
-	case ADV7604_MODE_GR:
+	if (is_analog_input(sd)) {
 		/* auto graphics */
 		io_write(sd, 0x00, 0x07); /* video std */
 		io_write(sd, 0x01, 0x02); /* prim mode */
@@ -858,33 +855,28 @@
 		/* Should only be set in auto-graphics mode [REF_02, p. 91-92] */
 		/* setup PLL_DIV_MAN_EN and PLL_DIV_RATIO */
 		/* IO-map reg. 0x16 and 0x17 should be written in sequence */
-		if (adv_smbus_write_i2c_block_data(client, 0x16, 2, pll)) {
+		if (adv_smbus_write_i2c_block_data(client, 0x16, 2, pll))
 			v4l2_err(sd, "writing to reg 0x16 and 0x17 failed\n");
-			break;
-		}
 
 		/* active video - horizontal timing */
 		cp_write(sd, 0xa2, (cp_start_sav >> 4) & 0xff);
 		cp_write(sd, 0xa3, ((cp_start_sav & 0x0f) << 4) |
-					((cp_start_eav >> 8) & 0x0f));
+				   ((cp_start_eav >> 8) & 0x0f));
 		cp_write(sd, 0xa4, cp_start_eav & 0xff);
 
 		/* active video - vertical timing */
 		cp_write(sd, 0xa5, (cp_start_vbi >> 4) & 0xff);
 		cp_write(sd, 0xa6, ((cp_start_vbi & 0xf) << 4) |
-					((cp_end_vbi >> 8) & 0xf));
+				   ((cp_end_vbi >> 8) & 0xf));
 		cp_write(sd, 0xa7, cp_end_vbi & 0xff);
-		break;
-	case ADV7604_MODE_HDMI:
+	} else if (is_digital_input(sd)) {
 		/* set default prim_mode/vid_std for HDMI
 		   according to [REF_03, c. 4.2] */
 		io_write(sd, 0x00, 0x02); /* video std */
 		io_write(sd, 0x01, 0x06); /* prim mode */
-		break;
-	default:
-		v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
-				__func__, state->mode);
-		break;
+	} else {
+		v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+				__func__, state->selected_input);
 	}
 
 	cp_write(sd, 0x8f, (ch1_fr_ll >> 8) & 0x7);
@@ -893,43 +885,149 @@
 	cp_write(sd, 0xac, (height & 0x0f) << 4);
 }
 
+static void adv7604_set_offset(struct v4l2_subdev *sd, bool auto_offset, u16 offset_a, u16 offset_b, u16 offset_c)
+{
+	struct adv7604_state *state = to_state(sd);
+	u8 offset_buf[4];
+
+	if (auto_offset) {
+		offset_a = 0x3ff;
+		offset_b = 0x3ff;
+		offset_c = 0x3ff;
+	}
+
+	v4l2_dbg(2, debug, sd, "%s: %s offset: a = 0x%x, b = 0x%x, c = 0x%x\n",
+			__func__, auto_offset ? "Auto" : "Manual",
+			offset_a, offset_b, offset_c);
+
+	offset_buf[0] = (cp_read(sd, 0x77) & 0xc0) | ((offset_a & 0x3f0) >> 4);
+	offset_buf[1] = ((offset_a & 0x00f) << 4) | ((offset_b & 0x3c0) >> 6);
+	offset_buf[2] = ((offset_b & 0x03f) << 2) | ((offset_c & 0x300) >> 8);
+	offset_buf[3] = offset_c & 0x0ff;
+
+	/* Registers must be written in this order with no i2c access in between */
+	if (adv_smbus_write_i2c_block_data(state->i2c_cp, 0x77, 4, offset_buf))
+		v4l2_err(sd, "%s: i2c error writing to CP reg 0x77, 0x78, 0x79, 0x7a\n", __func__);
+}
+
+static void adv7604_set_gain(struct v4l2_subdev *sd, bool auto_gain, u16 gain_a, u16 gain_b, u16 gain_c)
+{
+	struct adv7604_state *state = to_state(sd);
+	u8 gain_buf[4];
+	u8 gain_man = 1;
+	u8 agc_mode_man = 1;
+
+	if (auto_gain) {
+		gain_man = 0;
+		agc_mode_man = 0;
+		gain_a = 0x100;
+		gain_b = 0x100;
+		gain_c = 0x100;
+	}
+
+	v4l2_dbg(2, debug, sd, "%s: %s gain: a = 0x%x, b = 0x%x, c = 0x%x\n",
+			__func__, auto_gain ? "Auto" : "Manual",
+			gain_a, gain_b, gain_c);
+
+	gain_buf[0] = ((gain_man << 7) | (agc_mode_man << 6) | ((gain_a & 0x3f0) >> 4));
+	gain_buf[1] = (((gain_a & 0x00f) << 4) | ((gain_b & 0x3c0) >> 6));
+	gain_buf[2] = (((gain_b & 0x03f) << 2) | ((gain_c & 0x300) >> 8));
+	gain_buf[3] = ((gain_c & 0x0ff));
+
+	/* Registers must be written in this order with no i2c access in between */
+	if (adv_smbus_write_i2c_block_data(state->i2c_cp, 0x73, 4, gain_buf))
+		v4l2_err(sd, "%s: i2c error writing to CP reg 0x73, 0x74, 0x75, 0x76\n", __func__);
+}
+
 static void set_rgb_quantization_range(struct v4l2_subdev *sd)
 {
 	struct adv7604_state *state = to_state(sd);
+	bool rgb_output = io_read(sd, 0x02) & 0x02;
+	bool hdmi_signal = hdmi_read(sd, 0x05) & 0x80;
+
+	v4l2_dbg(2, debug, sd, "%s: RGB quantization range: %d, RGB out: %d, HDMI: %d\n",
+			__func__, state->rgb_quantization_range,
+			rgb_output, hdmi_signal);
+
+	adv7604_set_gain(sd, true, 0x0, 0x0, 0x0);
+	adv7604_set_offset(sd, true, 0x0, 0x0, 0x0);
 
 	switch (state->rgb_quantization_range) {
 	case V4L2_DV_RGB_RANGE_AUTO:
-		/* automatic */
-		if (DIGITAL_INPUT && !(hdmi_read(sd, 0x05) & 0x80)) {
-			/* receiving DVI-D signal */
+		if (state->selected_input == ADV7604_INPUT_VGA_RGB) {
+			/* Receiving analog RGB signal
+			 * Set RGB full range (0-255) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x10);
+			break;
+		}
 
-			/* ADV7604 selects RGB limited range regardless of
-			   input format (CE/IT) in automatic mode */
-			if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
-				/* RGB limited range (16-235) */
-				io_write_and_or(sd, 0x02, 0x0f, 0x00);
-
-			} else {
-				/* RGB full range (0-255) */
-				io_write_and_or(sd, 0x02, 0x0f, 0x10);
-			}
-		} else {
-			/* receiving HDMI or analog signal, set automode */
+		if (state->selected_input == ADV7604_INPUT_VGA_COMP) {
+			/* Receiving analog YPbPr signal
+			 * Set automode */
 			io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+			break;
+		}
+
+		if (hdmi_signal) {
+			/* Receiving HDMI signal
+			 * Set automode */
+			io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+			break;
+		}
+
+		/* Receiving DVI-D signal
+		 * ADV7604 selects RGB limited range regardless of
+		 * input format (CE/IT) in automatic mode */
+		if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+			/* RGB limited range (16-235) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x00);
+		} else {
+			/* RGB full range (0-255) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x10);
+
+			if (is_digital_input(sd) && rgb_output) {
+				adv7604_set_offset(sd, false, 0x40, 0x40, 0x40);
+			} else {
+				adv7604_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
+				adv7604_set_offset(sd, false, 0x70, 0x70, 0x70);
+			}
 		}
 		break;
 	case V4L2_DV_RGB_RANGE_LIMITED:
+		if (state->selected_input == ADV7604_INPUT_VGA_COMP) {
+			/* YCrCb limited range (16-235) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x20);
+			break;
+		}
+
 		/* RGB limited range (16-235) */
 		io_write_and_or(sd, 0x02, 0x0f, 0x00);
+
 		break;
 	case V4L2_DV_RGB_RANGE_FULL:
+		if (state->selected_input == ADV7604_INPUT_VGA_COMP) {
+			/* YCrCb full range (0-255) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x60);
+			break;
+		}
+
 		/* RGB full range (0-255) */
 		io_write_and_or(sd, 0x02, 0x0f, 0x10);
+
+		if (is_analog_input(sd) || hdmi_signal)
+			break;
+
+		/* Adjust gain/offset for DVI-D signals only */
+		if (rgb_output) {
+			adv7604_set_offset(sd, false, 0x40, 0x40, 0x40);
+		} else {
+			adv7604_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
+			adv7604_set_offset(sd, false, 0x70, 0x70, 0x70);
+		}
 		break;
 	}
 }
 
-
 static int adv7604_s_ctrl(struct v4l2_ctrl *ctrl)
 {
 	struct v4l2_subdev *sd = to_sd(ctrl);
@@ -983,8 +1081,9 @@
 
 static inline bool no_signal_tmds(struct v4l2_subdev *sd)
 {
-	/* TODO port B, C and D */
-	return !(io_read(sd, 0x6a) & 0x10);
+	struct adv7604_state *state = to_state(sd);
+
+	return !(io_read(sd, 0x6a) & (0x10 >> state->selected_input));
 }
 
 static inline bool no_lock_tmds(struct v4l2_subdev *sd)
@@ -1011,7 +1110,6 @@
 
 static inline bool no_signal(struct v4l2_subdev *sd)
 {
-	struct adv7604_state *state = to_state(sd);
 	bool ret;
 
 	ret = no_power(sd);
@@ -1019,7 +1117,7 @@
 	ret |= no_lock_stdi(sd);
 	ret |= no_lock_sspd(sd);
 
-	if (DIGITAL_INPUT) {
+	if (is_digital_input(sd)) {
 		ret |= no_lock_tmds(sd);
 		ret |= no_signal_tmds(sd);
 	}
@@ -1036,13 +1134,11 @@
 
 static int adv7604_g_input_status(struct v4l2_subdev *sd, u32 *status)
 {
-	struct adv7604_state *state = to_state(sd);
-
 	*status = 0;
 	*status |= no_power(sd) ? V4L2_IN_ST_NO_POWER : 0;
 	*status |= no_signal(sd) ? V4L2_IN_ST_NO_SIGNAL : 0;
 	if (no_lock_cp(sd))
-		*status |= DIGITAL_INPUT ? V4L2_IN_ST_NO_SYNC : V4L2_IN_ST_NO_H_LOCK;
+		*status |= is_digital_input(sd) ? V4L2_IN_ST_NO_SYNC : V4L2_IN_ST_NO_H_LOCK;
 
 	v4l2_dbg(1, debug, sd, "%s: status = 0x%x\n", __func__, *status);
 
@@ -1157,13 +1253,11 @@
 static int adv7604_dv_timings_cap(struct v4l2_subdev *sd,
 			struct v4l2_dv_timings_cap *cap)
 {
-	struct adv7604_state *state = to_state(sd);
-
 	cap->type = V4L2_DV_BT_656_1120;
 	cap->bt.max_width = 1920;
 	cap->bt.max_height = 1200;
 	cap->bt.min_pixelclock = 25000000;
-	if (DIGITAL_INPUT)
+	if (is_digital_input(sd))
 		cap->bt.max_pixelclock = 225000000;
 	else
 		cap->bt.max_pixelclock = 170000000;
@@ -1179,12 +1273,11 @@
 static void adv7604_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
 		struct v4l2_dv_timings *timings)
 {
-	struct adv7604_state *state = to_state(sd);
 	int i;
 
 	for (i = 0; adv7604_timings[i].bt.width; i++) {
 		if (v4l2_match_dv_timings(timings, &adv7604_timings[i],
-					DIGITAL_INPUT ? 250000 : 1000000)) {
+					is_digital_input(sd) ? 250000 : 1000000)) {
 			*timings = adv7604_timings[i];
 			break;
 		}
@@ -1204,6 +1297,7 @@
 	memset(timings, 0, sizeof(struct v4l2_dv_timings));
 
 	if (no_signal(sd)) {
+		state->restart_stdi_once = true;
 		v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
 		return -ENOLINK;
 	}
@@ -1216,7 +1310,7 @@
 	bt->interlaced = stdi.interlaced ?
 		V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
 
-	if (DIGITAL_INPUT) {
+	if (is_digital_input(sd)) {
 		uint32_t freq;
 
 		timings->type = V4L2_DV_BT_656_1120;
@@ -1305,8 +1399,8 @@
 		return -ENOLINK;
 	}
 
-	if ((!DIGITAL_INPUT && bt->pixelclock > 170000000) ||
-			(DIGITAL_INPUT && bt->pixelclock > 225000000)) {
+	if ((is_analog_input(sd) && bt->pixelclock > 170000000) ||
+			(is_digital_input(sd) && bt->pixelclock > 225000000)) {
 		v4l2_dbg(1, debug, sd, "%s: pixelclock out of range %d\n",
 				__func__, (u32)bt->pixelclock);
 		return -ERANGE;
@@ -1329,10 +1423,15 @@
 	if (!timings)
 		return -EINVAL;
 
+	if (v4l2_match_dv_timings(&state->timings, timings, 0)) {
+		v4l2_dbg(1, debug, sd, "%s: no change\n", __func__);
+		return 0;
+	}
+
 	bt = &timings->bt;
 
-	if ((!DIGITAL_INPUT && bt->pixelclock > 170000000) ||
-			(DIGITAL_INPUT && bt->pixelclock > 225000000)) {
+	if ((is_analog_input(sd) && bt->pixelclock > 170000000) ||
+			(is_digital_input(sd) && bt->pixelclock > 225000000)) {
 		v4l2_dbg(1, debug, sd, "%s: pixelclock out of range %d\n",
 				__func__, (u32)bt->pixelclock);
 		return -ERANGE;
@@ -1354,7 +1453,6 @@
 
 	set_rgb_quantization_range(sd);
 
-
 	if (debug > 1)
 		v4l2_print_dv_timings(sd->name, "adv7604_s_dv_timings: ",
 				      timings, true);
@@ -1374,30 +1472,24 @@
 {
 	struct adv7604_state *state = to_state(sd);
 
-	switch (state->mode) {
-	case ADV7604_MODE_COMP:
-	case ADV7604_MODE_GR:
-		/* enable */
+	if (is_analog_input(sd)) {
 		io_write(sd, 0x15, 0xb0);   /* Disable Tristate of Pins (no audio) */
-		break;
-	case ADV7604_MODE_HDMI:
-		/* enable */
-		hdmi_write(sd, 0x1a, 0x0a); /* Unmute audio */
+	} else if (is_digital_input(sd)) {
+		hdmi_write_and_or(sd, 0x00, 0xfc, state->selected_input);
 		hdmi_write(sd, 0x01, 0x00); /* Enable HDMI clock terminators */
 		io_write(sd, 0x15, 0xa0);   /* Disable Tristate of Pins */
-		break;
-	default:
-		v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
-				__func__, state->mode);
-		break;
+		hdmi_write_and_or(sd, 0x1a, 0xef, 0x00); /* Unmute audio */
+	} else {
+		v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+				__func__, state->selected_input);
 	}
 }
 
 static void disable_input(struct v4l2_subdev *sd)
 {
-	/* disable */
+	hdmi_write_and_or(sd, 0x1a, 0xef, 0x10); /* Mute audio */
+	msleep(16); /* 512 samples with >= 32 kHz sample rate [REF_03, c. 7.16.10] */
 	io_write(sd, 0x15, 0xbe);   /* Tristate all outputs from video core */
-	hdmi_write(sd, 0x1a, 0x1a); /* Mute audio */
 	hdmi_write(sd, 0x01, 0x78); /* Disable HDMI clock terminators */
 }
 
@@ -1405,9 +1497,7 @@
 {
 	struct adv7604_state *state = to_state(sd);
 
-	switch (state->mode) {
-	case ADV7604_MODE_COMP:
-	case ADV7604_MODE_GR:
+	if (is_analog_input(sd)) {
 		/* reset ADI recommended settings for HDMI: */
 		/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
 		hdmi_write(sd, 0x0d, 0x04); /* HDMI filter optimization */
@@ -1433,9 +1523,9 @@
 		cp_write(sd, 0x3e, 0x04); /* CP core pre-gain control */
 		cp_write(sd, 0xc3, 0x39); /* CP coast control. Graphics mode */
 		cp_write(sd, 0x40, 0x5c); /* CP core pre-gain control. Graphics mode */
-		break;
+	} else if (is_digital_input(sd)) {
+		hdmi_write(sd, 0x00, state->selected_input & 0x03);
 
-	case ADV7604_MODE_HDMI:
 		/* set ADI recommended settings for HDMI: */
 		/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
 		hdmi_write(sd, 0x0d, 0x84); /* HDMI filter optimization */
@@ -1461,12 +1551,9 @@
 		cp_write(sd, 0x3e, 0x00); /* CP core pre-gain control */
 		cp_write(sd, 0xc3, 0x39); /* CP coast control. Graphics mode */
 		cp_write(sd, 0x40, 0x80); /* CP core pre-gain control. Graphics mode */
-
-		break;
-	default:
-		v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
-				__func__, state->mode);
-		break;
+	} else {
+		v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+				__func__, state->selected_input);
 	}
 }
 
@@ -1475,9 +1562,13 @@
 {
 	struct adv7604_state *state = to_state(sd);
 
-	v4l2_dbg(2, debug, sd, "%s: input %d", __func__, input);
+	v4l2_dbg(2, debug, sd, "%s: input %d, selected input %d",
+			__func__, input, state->selected_input);
 
-	state->mode = input;
+	if (input == state->selected_input)
+		return 0;
+
+	state->selected_input = input;
 
 	disable_input(sd);
 
@@ -1516,36 +1607,47 @@
 
 static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
 {
-	struct adv7604_state *state = to_state(sd);
-	u8 fmt_change, fmt_change_digital, tx_5v;
-	u32 input_status;
+	const u8 irq_reg_0x43 = io_read(sd, 0x43);
+	const u8 irq_reg_0x6b = io_read(sd, 0x6b);
+	const u8 irq_reg_0x70 = io_read(sd, 0x70);
+	u8 fmt_change_digital;
+	u8 fmt_change;
+	u8 tx_5v;
+
+	if (irq_reg_0x43)
+		io_write(sd, 0x44, irq_reg_0x43);
+	if (irq_reg_0x70)
+		io_write(sd, 0x71, irq_reg_0x70);
+	if (irq_reg_0x6b)
+		io_write(sd, 0x6c, irq_reg_0x6b);
+
+	v4l2_dbg(2, debug, sd, "%s: ", __func__);
 
 	/* format change */
-	fmt_change = io_read(sd, 0x43) & 0x98;
-	if (fmt_change)
-		io_write(sd, 0x44, fmt_change);
-	fmt_change_digital = DIGITAL_INPUT ? (io_read(sd, 0x6b) & 0xc0) : 0;
-	if (fmt_change_digital)
-		io_write(sd, 0x6c, fmt_change_digital);
+	fmt_change = irq_reg_0x43 & 0x98;
+	fmt_change_digital = is_digital_input(sd) ? (irq_reg_0x6b & 0xc0) : 0;
+
 	if (fmt_change || fmt_change_digital) {
 		v4l2_dbg(1, debug, sd,
 			"%s: fmt_change = 0x%x, fmt_change_digital = 0x%x\n",
 			__func__, fmt_change, fmt_change_digital);
 
-		adv7604_g_input_status(sd, &input_status);
-		if (input_status != state->prev_input_status) {
-			v4l2_dbg(1, debug, sd,
-				"%s: input_status = 0x%x, prev_input_status = 0x%x\n",
-				__func__, input_status, state->prev_input_status);
-			state->prev_input_status = input_status;
-			v4l2_subdev_notify(sd, ADV7604_FMT_CHANGE, NULL);
-		}
+		v4l2_subdev_notify(sd, ADV7604_FMT_CHANGE, NULL);
 
 		if (handled)
 			*handled = true;
 	}
+	/* HDMI/DVI mode */
+	if (irq_reg_0x6b & 0x01) {
+		v4l2_dbg(1, debug, sd, "%s: irq %s mode\n", __func__,
+			(io_read(sd, 0x6a) & 0x01) ? "HDMI" : "DVI");
+		set_rgb_quantization_range(sd);
+		if (handled)
+			*handled = true;
+	}
+
 	/* tx 5v detect */
-	tx_5v = io_read(sd, 0x70) & 0x10;
+	tx_5v = io_read(sd, 0x70) & 0x1e;
 	if (tx_5v) {
 		v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
 		io_write(sd, 0x71, tx_5v);
@@ -1559,55 +1661,178 @@
 static int adv7604_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
 {
 	struct adv7604_state *state = to_state(sd);
+	u8 *data = NULL;
 
-	if (edid->pad != 0)
+	if (edid->pad > ADV7604_EDID_PORT_D)
 		return -EINVAL;
 	if (edid->blocks == 0)
 		return -EINVAL;
-	if (edid->start_block >= state->edid_blocks)
+	if (edid->blocks > 2)
 		return -EINVAL;
-	if (edid->start_block + edid->blocks > state->edid_blocks)
-		edid->blocks = state->edid_blocks - edid->start_block;
+	if (edid->start_block > 1)
+		return -EINVAL;
+	if (edid->start_block == 1)
+		edid->blocks = 1;
 	if (!edid->edid)
 		return -EINVAL;
-	memcpy(edid->edid + edid->start_block * 128,
-	       state->edid + edid->start_block * 128,
+
+	if (edid->blocks > state->edid.blocks)
+		edid->blocks = state->edid.blocks;
+
+	switch (edid->pad) {
+	case ADV7604_EDID_PORT_A:
+	case ADV7604_EDID_PORT_B:
+	case ADV7604_EDID_PORT_C:
+	case ADV7604_EDID_PORT_D:
+		if (state->edid.present & (1 << edid->pad))
+			data = state->edid.edid;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	if (!data)
+		return -ENODATA;
+
+	memcpy(edid->edid,
+	       data + edid->start_block * 128,
 	       edid->blocks * 128);
 	return 0;
 }
 
+static int get_edid_spa_location(const u8 *edid)
+{
+	u8 d;
+
+	if ((edid[0x7e] != 1) ||
+	    (edid[0x80] != 0x02) ||
+	    (edid[0x81] != 0x03)) {
+		return -1;
+	}
+
+	/* search Vendor Specific Data Block (tag 3) */
+	d = edid[0x82] & 0x7f;
+	if (d > 4) {
+		int i = 0x84;
+		int end = 0x80 + d;
+
+		do {
+			u8 tag = edid[i] >> 5;
+			u8 len = edid[i] & 0x1f;
+
+			if ((tag == 3) && (len >= 5))
+				return i + 4;
+			i += len + 1;
+		} while (i < end);
+	}
+	return -1;
+}
+
 static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
 {
 	struct adv7604_state *state = to_state(sd);
+	int spa_loc;
+	int tmp = 0;
 	int err;
+	int i;
 
-	if (edid->pad != 0)
+	if (edid->pad > ADV7604_EDID_PORT_D)
 		return -EINVAL;
 	if (edid->start_block != 0)
 		return -EINVAL;
 	if (edid->blocks == 0) {
-		/* Pull down the hotplug pin */
-		v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)0);
-		/* Disables I2C access to internal EDID ram from DDC port */
-		rep_write_and_or(sd, 0x77, 0xf0, 0x0);
-		state->edid_blocks = 0;
+		/* Disable hotplug and I2C access to EDID RAM from DDC port */
+		state->edid.present &= ~(1 << edid->pad);
+		v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)&state->edid.present);
+		rep_write_and_or(sd, 0x77, 0xf0, state->edid.present);
+
 		/* Fall back to a 16:9 aspect ratio */
 		state->aspect_ratio.numerator = 16;
 		state->aspect_ratio.denominator = 9;
+
+		if (!state->edid.present)
+			state->edid.blocks = 0;
+
+		v4l2_dbg(2, debug, sd, "%s: clear EDID pad %d, edid.present = 0x%x\n",
+				__func__, edid->pad, state->edid.present);
 		return 0;
 	}
-	if (edid->blocks > 2)
+	if (edid->blocks > 2) {
+		edid->blocks = 2;
 		return -E2BIG;
+	}
 	if (!edid->edid)
 		return -EINVAL;
-	memcpy(state->edid, edid->edid, 128 * edid->blocks);
-	state->edid_blocks = edid->blocks;
+
+	v4l2_dbg(2, debug, sd, "%s: write EDID pad %d, edid.present = 0x%x\n",
+			__func__, edid->pad, state->edid.present);
+
+	/* Disable hotplug and I2C access to EDID RAM from DDC port */
+	cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+	v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)&tmp);
+	rep_write_and_or(sd, 0x77, 0xf0, 0x00);
+
+	spa_loc = get_edid_spa_location(edid->edid);
+	if (spa_loc < 0)
+		spa_loc = 0xc0; /* Default value [REF_02, p. 116] */
+
+	switch (edid->pad) {
+	case ADV7604_EDID_PORT_A:
+		state->spa_port_a[0] = edid->edid[spa_loc];
+		state->spa_port_a[1] = edid->edid[spa_loc + 1];
+		break;
+	case ADV7604_EDID_PORT_B:
+		rep_write(sd, 0x70, edid->edid[spa_loc]);
+		rep_write(sd, 0x71, edid->edid[spa_loc + 1]);
+		break;
+	case ADV7604_EDID_PORT_C:
+		rep_write(sd, 0x72, edid->edid[spa_loc]);
+		rep_write(sd, 0x73, edid->edid[spa_loc + 1]);
+		break;
+	case ADV7604_EDID_PORT_D:
+		rep_write(sd, 0x74, edid->edid[spa_loc]);
+		rep_write(sd, 0x75, edid->edid[spa_loc + 1]);
+		break;
+	default:
+		return -EINVAL;
+	}
+	rep_write(sd, 0x76, spa_loc & 0xff);
+	rep_write_and_or(sd, 0x77, 0xbf, (spa_loc >> 2) & 0x40);
+
+	edid->edid[spa_loc] = state->spa_port_a[0];
+	edid->edid[spa_loc + 1] = state->spa_port_a[1];
+
+	memcpy(state->edid.edid, edid->edid, 128 * edid->blocks);
+	state->edid.blocks = edid->blocks;
 	state->aspect_ratio = v4l2_calc_aspect_ratio(edid->edid[0x15],
 			edid->edid[0x16]);
-	err = edid_write_block(sd, 128 * edid->blocks, state->edid);
-	if (err < 0)
-		v4l2_err(sd, "error %d writing edid\n", err);
-	return err;
+	state->edid.present |= 1 << edid->pad;
+
+	err = edid_write_block(sd, 128 * edid->blocks, state->edid.edid);
+	if (err < 0) {
+		v4l2_err(sd, "error %d writing edid pad %d\n", err, edid->pad);
+		return err;
+	}
+
+	/* adv7604 calculates the checksums and enables I2C access to internal
+	   EDID RAM from DDC port. */
+	rep_write_and_or(sd, 0x77, 0xf0, state->edid.present);
+
+	for (i = 0; i < 1000; i++) {
+		if (rep_read(sd, 0x7d) & state->edid.present)
+			break;
+		mdelay(1);
+	}
+	if (i == 1000) {
+		v4l2_err(sd, "error enabling edid (0x%x)\n", state->edid.present);
+		return -EIO;
+	}
+
+
+	/* enable hotplug after 100 ms */
+	queue_delayed_work(state->work_queues,
+			&state->delayed_work_enable_hotplug, HZ / 10);
+	return 0;
 }
 
 /*********** avi info frame CEA-861-E **************/
@@ -1670,7 +1895,7 @@
 	char *input_color_space_txt[16] = {
 		"RGB limited range (16-235)", "RGB full range (0-255)",
 		"YCbCr Bt.601 (16-235)", "YCbCr Bt.709 (16-235)",
-		"XvYCC Bt.601", "XvYCC Bt.709",
+		"xvYCC Bt.601", "xvYCC Bt.709",
 		"YCbCr Bt.601 (0-255)", "YCbCr Bt.709 (0-255)",
 		"invalid", "invalid", "invalid", "invalid", "invalid",
 		"invalid", "invalid", "automatic"
@@ -1689,16 +1914,20 @@
 
 	v4l2_info(sd, "-----Chip status-----\n");
 	v4l2_info(sd, "Chip power: %s\n", no_power(sd) ? "off" : "on");
-	v4l2_info(sd, "Connector type: %s\n", state->connector_hdmi ?
-			"HDMI" : (DIGITAL_INPUT ? "DVI-D" : "DVI-A"));
-	v4l2_info(sd, "EDID: %s\n", ((rep_read(sd, 0x7d) & 0x01) &&
-			(rep_read(sd, 0x77) & 0x01)) ? "enabled" : "disabled ");
+	v4l2_info(sd, "EDID enabled port A: %s, B: %s, C: %s, D: %s\n",
+			((rep_read(sd, 0x7d) & 0x01) ? "Yes" : "No"),
+			((rep_read(sd, 0x7d) & 0x02) ? "Yes" : "No"),
+			((rep_read(sd, 0x7d) & 0x04) ? "Yes" : "No"),
+			((rep_read(sd, 0x7d) & 0x08) ? "Yes" : "No"));
 	v4l2_info(sd, "CEC: %s\n", !!(cec_read(sd, 0x2a) & 0x01) ?
 			"enabled" : "disabled");
 
 	v4l2_info(sd, "-----Signal status-----\n");
-	v4l2_info(sd, "Cable detected (+5V power): %s\n",
-			(io_read(sd, 0x6f) & 0x10) ? "true" : "false");
+	v4l2_info(sd, "Cable detected (+5V power) port A: %s, B: %s, C: %s, D: %s\n",
+			((io_read(sd, 0x6f) & 0x10) ? "Yes" : "No"),
+			((io_read(sd, 0x6f) & 0x08) ? "Yes" : "No"),
+			((io_read(sd, 0x6f) & 0x04) ? "Yes" : "No"),
+			((io_read(sd, 0x6f) & 0x02) ? "Yes" : "No"));
 	v4l2_info(sd, "TMDS signal detected: %s\n",
 			no_signal_tmds(sd) ? "false" : "true");
 	v4l2_info(sd, "TMDS signal locked: %s\n",
@@ -1744,11 +1973,14 @@
 	v4l2_info(sd, "Color space conversion: %s\n",
 			csc_coeff_sel_rb[cp_read(sd, 0xfc) >> 4]);
 
-	if (!DIGITAL_INPUT)
+	if (!is_digital_input(sd))
 		return 0;
 
 	v4l2_info(sd, "-----%s status-----\n", is_hdmi(sd) ? "HDMI" : "DVI-D");
-	v4l2_info(sd, "HDCP encrypted content: %s\n", (hdmi_read(sd, 0x05) & 0x40) ? "true" : "false");
+	v4l2_info(sd, "Digital video port selected: %c\n",
+			(hdmi_read(sd, 0x00) & 0x03) + 'A');
+	v4l2_info(sd, "HDCP encrypted content: %s\n",
+			(hdmi_read(sd, 0x05) & 0x40) ? "true" : "false");
 	v4l2_info(sd, "HDCP keys read: %s%s\n",
 			(hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no",
 			(hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : "");
@@ -1894,10 +2126,16 @@
 					pdata->replicate_av_codes << 1 |
 					pdata->invert_cbcr << 0);
 
-	/* TODO from platform data */
 	cp_write(sd, 0x69, 0x30);   /* Enable CP CSC */
-	io_write(sd, 0x06, 0xa6);   /* positive VS and HS */
-	io_write(sd, 0x14, 0x7f);   /* Drive strength adjusted to max */
+
+	/* VS, HS polarities */
+	io_write(sd, 0x06, 0xa0 | pdata->inv_vs_pol << 2 | pdata->inv_hs_pol << 1);
+
+	/* Adjust drive strength */
+	io_write(sd, 0x14, 0x40 | pdata->dr_str_data << 4 |
+				pdata->dr_str_clk << 2 |
+				pdata->dr_str_sync);
+
 	cp_write(sd, 0xba, (pdata->hdmi_free_run_mode << 1) | 0x01); /* HDMI free run */
 	cp_write(sd, 0xf3, 0xdc); /* Low threshold to enter/exit free run mode */
 	cp_write(sd, 0xf9, 0x23); /*  STDI ch. 1 - LCVS change threshold -
@@ -1907,6 +2145,11 @@
 	cp_write(sd, 0xc9, 0x2d); /* use prim_mode and vid_std as free run resolution
 				     for digital formats */
 
+	/* HDMI audio */
+	hdmi_write_and_or(sd, 0x15, 0xfc, 0x03); /* Mute on FIFO over-/underflow [REF_01, c. 1.2.18] */
+	hdmi_write_and_or(sd, 0x1a, 0xf1, 0x08); /* Wait 1 s before unmute */
+	hdmi_write_and_or(sd, 0x68, 0xf9, 0x06); /* FIFO reset on over-/underflow [REF_01, c. 1.2.19] */
+
 	/* TODO from platform data */
 	afe_write(sd, 0xb5, 0x01);  /* Setting MCLK to 256Fs */
 
@@ -1917,8 +2160,8 @@
 	io_write(sd, 0x40, 0xc2); /* Configure INT1 */
 	io_write(sd, 0x41, 0xd7); /* STDI irq for any change, disable INT2 */
 	io_write(sd, 0x46, 0x98); /* Enable SSPD, STDI and CP unlocked interrupts */
-	io_write(sd, 0x6e, 0xc0); /* Enable V_LOCKED and DE_REGEN_LCK interrupts */
-	io_write(sd, 0x73, 0x10); /* Enable CABLE_DET_A_ST (+5v) interrupt */
+	io_write(sd, 0x6e, 0xc1); /* Enable V_LOCKED, DE_REGEN_LCK, HDMI_MODE interrupts */
+	io_write(sd, 0x73, 0x1e); /* Enable CABLE_DET_A_ST (+5v) interrupts */
 
 	return v4l2_ctrl_handler_setup(sd->ctrl_handler);
 }
@@ -1964,6 +2207,8 @@
 static int adv7604_probe(struct i2c_client *client,
 			 const struct i2c_device_id *id)
 {
+	static const struct v4l2_dv_timings cea640x480 =
+		V4L2_DV_BT_CEA_640X480P59_94;
 	struct adv7604_state *state;
 	struct adv7604_platform_data *pdata = client->dev.platform_data;
 	struct v4l2_ctrl_handler *hdl;
@@ -1984,19 +2229,19 @@
 
 	/* initialize variables */
 	state->restart_stdi_once = true;
-	state->prev_input_status = ~0;
+	state->selected_input = ~0;
 
 	/* platform data */
 	if (!pdata) {
 		v4l_err(client, "No platform data!\n");
 		return -ENODEV;
 	}
-	memcpy(&state->pdata, pdata, sizeof(state->pdata));
+	state->pdata = *pdata;
+	state->timings = cea640x480;
 
 	sd = &state->sd;
 	v4l2_i2c_subdev_init(sd, client, &adv7604_ops);
 	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-	state->connector_hdmi = pdata->connector_hdmi;
 
 	/* i2c access to adv7604? */
 	if (adv_smbus_read_byte_data_check(client, 0xfb, false) != 0x68) {
@@ -2020,7 +2265,7 @@
 
 	/* private controls */
 	state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
-			V4L2_CID_DV_RX_POWER_PRESENT, 0, 1, 0, 0);
+			V4L2_CID_DV_RX_POWER_PRESENT, 0, 0x0f, 0, 0);
 	state->rgb_quantization_range_ctrl =
 		v4l2_ctrl_new_std_menu(hdl, &adv7604_ctrl_ops,
 			V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index b154f36..9bbd665 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -20,10 +20,13 @@
 
 /*
  * References (c = chapter, p = page):
- * REF_01 - Analog devices, ADV7842, Register Settings Recommendations,
- *		Revision 2.5, June 2010
- * REF_02 - Analog devices, Register map documentation, Documentation of
- *		the register maps, Software manual, Rev. F, June 2010
+ * REF_01 - Analog devices, ADV7842,
+ *		Register Settings Recommendations, Rev. 1.9, April 2011
+ * REF_02 - Analog devices, Software User Guide, UG-206,
+ *		ADV7842 I2C Register Maps, Rev. 0, November 2010
+ * REF_03 - Analog devices, Hardware User Guide, UG-214,
+ *		ADV7842 Fast Switching 2:1 HDMI 1.4 Receiver with 3D-Comb
+ *		Decoder and Digitizer , Rev. 0, January 2011
  */
 
 
@@ -61,6 +64,7 @@
 */
 
 struct adv7842_state {
+	struct adv7842_platform_data pdata;
 	struct v4l2_subdev sd;
 	struct media_pad pad;
 	struct v4l2_ctrl_handler hdl;
@@ -81,7 +85,7 @@
 	bool is_cea_format;
 	struct workqueue_struct *work_queues;
 	struct delayed_work delayed_work_enable_hotplug;
-	bool connector_hdmi;
+	bool restart_stdi_once;
 	bool hdmi_port_a;
 
 	/* i2c clients */
@@ -491,6 +495,11 @@
 	return adv_smbus_write_byte_data(state->i2c_hdmi, reg, val);
 }
 
+static inline int hdmi_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+	return hdmi_write(sd, reg, (hdmi_read(sd, reg) & mask) | val);
+}
+
 static inline int cp_read(struct v4l2_subdev *sd, u8 reg)
 {
 	struct adv7842_state *state = to_state(sd);
@@ -532,7 +541,7 @@
 
 	adv_smbus_write_byte_no_check(client, 0xff, 0x80);
 
-	mdelay(2);
+	mdelay(5);
 }
 
 /* ----------------------------------------------------------------------- */
@@ -587,10 +596,10 @@
 	v4l2_dbg(2, debug, sd, "%s: enable hotplug on ports: 0x%x\n",
 			__func__, present);
 
-	if (present & 0x1)
-		mask |= 0x20; /* port A */
-	if (present & 0x2)
-		mask |= 0x10; /* port B */
+	if (present & (0x04 << ADV7842_EDID_PORT_A))
+		mask |= 0x20;
+	if (present & (0x04 << ADV7842_EDID_PORT_B))
+		mask |= 0x10;
 	io_write_and_or(sd, 0x20, 0xcf, mask);
 }
 
@@ -679,14 +688,12 @@
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct adv7842_state *state = to_state(sd);
 	const u8 *val = state->hdmi_edid.edid;
-	u8 cur_mask = rep_read(sd, 0x77) & 0x0c;
-	u8 mask = port == 0 ? 0x4 : 0x8;
 	int spa_loc = edid_spa_location(val);
 	int err = 0;
 	int i;
 
-	v4l2_dbg(2, debug, sd, "%s: write EDID on port %d (spa at 0x%x)\n",
-			__func__, port, spa_loc);
+	v4l2_dbg(2, debug, sd, "%s: write EDID on port %c (spa at 0x%x)\n",
+			__func__, (port == ADV7842_EDID_PORT_A) ? 'A' : 'B', spa_loc);
 
 	/* HPA disable on port A and B */
 	io_write_and_or(sd, 0x20, 0xcf, 0x00);
@@ -694,6 +701,9 @@
 	/* Disable I2C access to internal EDID ram from HDMI DDC ports */
 	rep_write_and_or(sd, 0x77, 0xf3, 0x00);
 
+	if (!state->hdmi_edid.present)
+		return 0;
+
 	/* edid segment pointer '0' for HDMI ports */
 	rep_write_and_or(sd, 0x77, 0xef, 0x00);
 
@@ -703,44 +713,32 @@
 	if (err)
 		return err;
 
-	if (spa_loc > 0) {
-		if (port == 0) {
-			/* port A SPA */
-			rep_write(sd, 0x72, val[spa_loc]);
-			rep_write(sd, 0x73, val[spa_loc + 1]);
-		} else {
-			/* port B SPA */
-			rep_write(sd, 0x74, val[spa_loc]);
-			rep_write(sd, 0x75, val[spa_loc + 1]);
-		}
-		rep_write(sd, 0x76, spa_loc);
+	if (spa_loc < 0)
+		spa_loc = 0xc0; /* Default value [REF_02, p. 199] */
+
+	if (port == ADV7842_EDID_PORT_A) {
+		rep_write(sd, 0x72, val[spa_loc]);
+		rep_write(sd, 0x73, val[spa_loc + 1]);
 	} else {
-		/* default register values for SPA */
-		if (port == 0) {
-			/* port A SPA */
-			rep_write(sd, 0x72, 0);
-			rep_write(sd, 0x73, 0);
-		} else {
-			/* port B SPA */
-			rep_write(sd, 0x74, 0);
-			rep_write(sd, 0x75, 0);
-		}
-		rep_write(sd, 0x76, 0xc0);
+		rep_write(sd, 0x74, val[spa_loc]);
+		rep_write(sd, 0x75, val[spa_loc + 1]);
 	}
-	rep_write_and_or(sd, 0x77, 0xbf, 0x00);
+	rep_write(sd, 0x76, spa_loc & 0xff);
+	rep_write_and_or(sd, 0x77, 0xbf, (spa_loc >> 2) & 0x40);
 
 	/* Calculates the checksums and enables I2C access to internal
 	 * EDID ram from HDMI DDC ports
 	 */
-	rep_write_and_or(sd, 0x77, 0xf3, mask | cur_mask);
+	rep_write_and_or(sd, 0x77, 0xf3, state->hdmi_edid.present);
 
 	for (i = 0; i < 1000; i++) {
-		if (rep_read(sd, 0x7d) & mask)
+		if (rep_read(sd, 0x7d) & state->hdmi_edid.present)
 			break;
 		mdelay(1);
 	}
 	if (i == 1000) {
-		v4l_err(client, "error enabling edid on port %d\n", port);
+		v4l_err(client, "error enabling edid on port %c\n",
+				(port == ADV7842_EDID_PORT_A) ? 'A' : 'B');
 		return -EIO;
 	}
 
@@ -927,7 +925,7 @@
 	cp_write(sd, 0x27, 0x00);
 	cp_write(sd, 0x28, 0x00);
 	cp_write(sd, 0x29, 0x00);
-	cp_write(sd, 0x8f, 0x00);
+	cp_write(sd, 0x8f, 0x40);
 	cp_write(sd, 0x90, 0x00);
 	cp_write(sd, 0xa5, 0x00);
 	cp_write(sd, 0xa6, 0x00);
@@ -1033,34 +1031,60 @@
 {
 	struct adv7842_state *state = to_state(sd);
 
+	v4l2_dbg(2, debug, sd, "%s: rgb_quantization_range = %d\n",
+		       __func__, state->rgb_quantization_range);
+
 	switch (state->rgb_quantization_range) {
 	case V4L2_DV_RGB_RANGE_AUTO:
-		/* automatic */
-		if (is_digital_input(sd) && !(hdmi_read(sd, 0x05) & 0x80)) {
-			/* receiving DVI-D signal */
+		if (state->mode == ADV7842_MODE_RGB) {
+			/* Receiving analog RGB signal
+			 * Set RGB full range (0-255) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x10);
+			break;
+		}
 
-			/* ADV7842 selects RGB limited range regardless of
-			   input format (CE/IT) in automatic mode */
-			if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
-				/* RGB limited range (16-235) */
-				io_write_and_or(sd, 0x02, 0x0f, 0x00);
-
-			} else {
-				/* RGB full range (0-255) */
-				io_write_and_or(sd, 0x02, 0x0f, 0x10);
-			}
-		} else {
-			/* receiving HDMI or analog signal, set automode */
+		if (state->mode == ADV7842_MODE_COMP) {
+			/* Receiving analog YPbPr signal
+			 * Set automode */
 			io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+			break;
+		}
+
+		if (hdmi_read(sd, 0x05) & 0x80) {
+			/* Receiving HDMI signal
+			 * Set automode */
+			io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+			break;
+		}
+
+		/* Receiving DVI-D signal
+		 * ADV7842 selects RGB limited range regardless of
+		 * input format (CE/IT) in automatic mode */
+		if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+			/* RGB limited range (16-235) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x00);
+		} else {
+			/* RGB full range (0-255) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x10);
 		}
 		break;
 	case V4L2_DV_RGB_RANGE_LIMITED:
-		/* RGB limited range (16-235) */
-		io_write_and_or(sd, 0x02, 0x0f, 0x00);
+		if (state->mode == ADV7842_MODE_COMP) {
+			/* YCrCb limited range (16-235) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x20);
+		} else {
+			/* RGB limited range (16-235) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x00);
+		}
 		break;
 	case V4L2_DV_RGB_RANGE_FULL:
-		/* RGB full range (0-255) */
-		io_write_and_or(sd, 0x02, 0x0f, 0x10);
+		if (state->mode == ADV7842_MODE_COMP) {
+			/* YCrCb full range (0-255) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x60);
+		} else {
+			/* RGB full range (0-255) */
+			io_write_and_or(sd, 0x02, 0x0f, 0x10);
+		}
 		break;
 	}
 }
@@ -1298,7 +1322,7 @@
 }
 
 /* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
-   if the format is listed in adv7604_timings[] */
+   if the format is listed in adv7842_timings[] */
 static void adv7842_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
 		struct v4l2_dv_timings *timings)
 {
@@ -1314,119 +1338,106 @@
 	struct v4l2_bt_timings *bt = &timings->bt;
 	struct stdi_readback stdi = { 0 };
 
+	v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
 	/* SDP block */
 	if (state->mode == ADV7842_MODE_SDP)
 		return -ENODATA;
 
 	/* read STDI */
 	if (read_stdi(sd, &stdi)) {
+		state->restart_stdi_once = true;
 		v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
 		return -ENOLINK;
 	}
 	bt->interlaced = stdi.interlaced ?
 		V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
-	bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ? V4L2_DV_VSYNC_POS_POL : 0) |
-		((hdmi_read(sd, 0x05) & 0x20) ? V4L2_DV_HSYNC_POS_POL : 0);
-	bt->vsync = stdi.lcvs;
 
 	if (is_digital_input(sd)) {
-		bool lock = hdmi_read(sd, 0x04) & 0x02;
-		bool interlaced = hdmi_read(sd, 0x0b) & 0x20;
-		unsigned w = (hdmi_read(sd, 0x07) & 0x1f) * 256 + hdmi_read(sd, 0x08);
-		unsigned h = (hdmi_read(sd, 0x09) & 0x1f) * 256 + hdmi_read(sd, 0x0a);
-		unsigned w_total = (hdmi_read(sd, 0x1e) & 0x3f) * 256 +
-			hdmi_read(sd, 0x1f);
-		unsigned h_total = ((hdmi_read(sd, 0x26) & 0x3f) * 256 +
-				    hdmi_read(sd, 0x27)) / 2;
-		unsigned freq = (((hdmi_read(sd, 0x51) << 1) +
-					(hdmi_read(sd, 0x52) >> 7)) * 1000000) +
-			((hdmi_read(sd, 0x52) & 0x7f) * 1000000) / 128;
-		int i;
-
-		if (is_hdmi(sd)) {
-			/* adjust for deep color mode */
-			freq = freq * 8 / (((hdmi_read(sd, 0x0b) & 0xc0)>>6) * 2 + 8);
-		}
-
-		/* No lock? */
-		if (!lock) {
-			v4l2_dbg(1, debug, sd, "%s: no lock on TMDS signal\n", __func__);
-			return -ENOLCK;
-		}
-		/* Interlaced? */
-		if (interlaced) {
-			v4l2_dbg(1, debug, sd, "%s: interlaced video not supported\n", __func__);
-			return -ERANGE;
-		}
-
-		for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
-			const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
-
-			if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
-						   adv7842_get_dv_timings_cap(sd),
-						   adv7842_check_dv_timings, NULL))
-				continue;
-			if (w_total != htotal(bt) || h_total != vtotal(bt))
-				continue;
-
-			if (w != bt->width || h != bt->height)
-				continue;
-
-			if (abs(freq - bt->pixelclock) > 1000000)
-				continue;
-			*timings = v4l2_dv_timings_presets[i];
-			return 0;
-		}
+		uint32_t freq;
 
 		timings->type = V4L2_DV_BT_656_1120;
 
-		bt->width = w;
-		bt->height = h;
-		bt->interlaced = (hdmi_read(sd, 0x0b) & 0x20) ?
-			V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
-		bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ?
-			V4L2_DV_VSYNC_POS_POL : 0) | ((hdmi_read(sd, 0x05) & 0x20) ?
-			V4L2_DV_HSYNC_POS_POL : 0);
-		bt->pixelclock = (((hdmi_read(sd, 0x51) << 1) +
-				   (hdmi_read(sd, 0x52) >> 7)) * 1000000) +
-				 ((hdmi_read(sd, 0x52) & 0x7f) * 1000000) / 128;
-		bt->hfrontporch = (hdmi_read(sd, 0x20) & 0x1f) * 256 +
-			hdmi_read(sd, 0x21);
-		bt->hsync = (hdmi_read(sd, 0x22) & 0x1f) * 256 +
-			hdmi_read(sd, 0x23);
-		bt->hbackporch = (hdmi_read(sd, 0x24) & 0x1f) * 256 +
-			hdmi_read(sd, 0x25);
-		bt->vfrontporch = ((hdmi_read(sd, 0x2a) & 0x3f) * 256 +
-				   hdmi_read(sd, 0x2b)) / 2;
-		bt->il_vfrontporch = ((hdmi_read(sd, 0x2c) & 0x3f) * 256 +
-				      hdmi_read(sd, 0x2d)) / 2;
-		bt->vsync = ((hdmi_read(sd, 0x2e) & 0x3f) * 256 +
-			     hdmi_read(sd, 0x2f)) / 2;
-		bt->il_vsync = ((hdmi_read(sd, 0x30) & 0x3f) * 256 +
-				hdmi_read(sd, 0x31)) / 2;
-		bt->vbackporch = ((hdmi_read(sd, 0x32) & 0x3f) * 256 +
-				  hdmi_read(sd, 0x33)) / 2;
-		bt->il_vbackporch = ((hdmi_read(sd, 0x34) & 0x3f) * 256 +
-				     hdmi_read(sd, 0x35)) / 2;
+		bt->width = (hdmi_read(sd, 0x07) & 0x0f) * 256 + hdmi_read(sd, 0x08);
+		bt->height = (hdmi_read(sd, 0x09) & 0x0f) * 256 + hdmi_read(sd, 0x0a);
+		freq = (hdmi_read(sd, 0x06) * 1000000) +
+		       ((hdmi_read(sd, 0x3b) & 0x30) >> 4) * 250000;
 
-		bt->standards = 0;
-		bt->flags = 0;
-	} else {
-		/* Interlaced? */
-		if (stdi.interlaced) {
-			v4l2_dbg(1, debug, sd, "%s: interlaced video not supported\n", __func__);
-			return -ERANGE;
+		if (is_hdmi(sd)) {
+			/* adjust for deep color mode */
+			freq = freq * 8 / (((hdmi_read(sd, 0x0b) & 0xc0) >> 5) + 8);
 		}
-
+		bt->pixelclock = freq;
+		bt->hfrontporch = (hdmi_read(sd, 0x20) & 0x03) * 256 +
+			hdmi_read(sd, 0x21);
+		bt->hsync = (hdmi_read(sd, 0x22) & 0x03) * 256 +
+			hdmi_read(sd, 0x23);
+		bt->hbackporch = (hdmi_read(sd, 0x24) & 0x03) * 256 +
+			hdmi_read(sd, 0x25);
+		bt->vfrontporch = ((hdmi_read(sd, 0x2a) & 0x1f) * 256 +
+			hdmi_read(sd, 0x2b)) / 2;
+		bt->vsync = ((hdmi_read(sd, 0x2e) & 0x1f) * 256 +
+			hdmi_read(sd, 0x2f)) / 2;
+		bt->vbackporch = ((hdmi_read(sd, 0x32) & 0x1f) * 256 +
+			hdmi_read(sd, 0x33)) / 2;
+		bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ? V4L2_DV_VSYNC_POS_POL : 0) |
+			((hdmi_read(sd, 0x05) & 0x20) ? V4L2_DV_HSYNC_POS_POL : 0);
+		if (bt->interlaced == V4L2_DV_INTERLACED) {
+			bt->height += (hdmi_read(sd, 0x0b) & 0x0f) * 256 +
+					hdmi_read(sd, 0x0c);
+			bt->il_vfrontporch = ((hdmi_read(sd, 0x2c) & 0x1f) * 256 +
+					hdmi_read(sd, 0x2d)) / 2;
+			bt->il_vsync = ((hdmi_read(sd, 0x30) & 0x1f) * 256 +
+					hdmi_read(sd, 0x31)) / 2;
+			bt->vbackporch = ((hdmi_read(sd, 0x34) & 0x1f) * 256 +
+					hdmi_read(sd, 0x35)) / 2;
+		}
+		adv7842_fill_optional_dv_timings_fields(sd, timings);
+	} else {
+		/* find format
+		 * Since LCVS values are inaccurate [REF_03, p. 339-340],
+		 * stdi2dv_timings() is called with lcvs +-1 if the first attempt fails.
+		 */
+		if (!stdi2dv_timings(sd, &stdi, timings))
+			goto found;
+		stdi.lcvs += 1;
+		v4l2_dbg(1, debug, sd, "%s: lcvs + 1 = %d\n", __func__, stdi.lcvs);
+		if (!stdi2dv_timings(sd, &stdi, timings))
+			goto found;
+		stdi.lcvs -= 2;
+		v4l2_dbg(1, debug, sd, "%s: lcvs - 1 = %d\n", __func__, stdi.lcvs);
 		if (stdi2dv_timings(sd, &stdi, timings)) {
+			/*
+			 * The STDI block may measure wrong values, especially
+			 * for lcvs and lcf. If the driver can not find any
+			 * valid timing, the STDI block is restarted to measure
+			 * the video timings again. The function will return an
+			 * error, but the restart of STDI will generate a new
+			 * STDI interrupt and the format detection process will
+			 * restart.
+			 */
+			if (state->restart_stdi_once) {
+				v4l2_dbg(1, debug, sd, "%s: restart STDI\n", __func__);
+				/* TODO restart STDI for Sync Channel 2 */
+				/* enter one-shot mode */
+				cp_write_and_or(sd, 0x86, 0xf9, 0x00);
+				/* trigger STDI restart */
+				cp_write_and_or(sd, 0x86, 0xf9, 0x04);
+				/* reset to continuous mode */
+				cp_write_and_or(sd, 0x86, 0xf9, 0x02);
+				state->restart_stdi_once = false;
+				return -ENOLINK;
+			}
 			v4l2_dbg(1, debug, sd, "%s: format not supported\n", __func__);
 			return -ERANGE;
 		}
+		state->restart_stdi_once = true;
 	}
+found:
 
 	if (debug > 1)
-		v4l2_print_dv_timings(sd->name, "adv7842_query_dv_timings: ",
-				      timings, true);
+		v4l2_print_dv_timings(sd->name, "adv7842_query_dv_timings:",
+				timings, true);
 	return 0;
 }
 
@@ -1437,9 +1448,16 @@
 	struct v4l2_bt_timings *bt;
 	int err;
 
+	v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
 	if (state->mode == ADV7842_MODE_SDP)
 		return -ENODATA;
 
+	if (v4l2_match_dv_timings(&state->timings, timings, 0)) {
+		v4l2_dbg(1, debug, sd, "%s: no change\n", __func__);
+		return 0;
+	}
+
 	bt = &timings->bt;
 
 	if (!v4l2_valid_dv_timings(timings, adv7842_get_dv_timings_cap(sd),
@@ -1450,7 +1468,7 @@
 
 	state->timings = *timings;
 
-	cp_write(sd, 0x91, bt->interlaced ? 0x50 : 0x10);
+	cp_write(sd, 0x91, bt->interlaced ? 0x40 : 0x00);
 
 	/* Use prim_mode and vid_std when available */
 	err = configure_predefined_video_timings(sd, timings);
@@ -1483,18 +1501,18 @@
 static void enable_input(struct v4l2_subdev *sd)
 {
 	struct adv7842_state *state = to_state(sd);
+
+	set_rgb_quantization_range(sd);
 	switch (state->mode) {
 	case ADV7842_MODE_SDP:
 	case ADV7842_MODE_COMP:
 	case ADV7842_MODE_RGB:
-		/* enable */
 		io_write(sd, 0x15, 0xb0);   /* Disable Tristate of Pins (no audio) */
 		break;
 	case ADV7842_MODE_HDMI:
-		/* enable */
-		hdmi_write(sd, 0x1a, 0x0a); /* Unmute audio */
 		hdmi_write(sd, 0x01, 0x00); /* Enable HDMI clock terminators */
 		io_write(sd, 0x15, 0xa0);   /* Disable Tristate of Pins */
+		hdmi_write_and_or(sd, 0x1a, 0xef, 0x00); /* Unmute audio */
 		break;
 	default:
 		v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
@@ -1505,9 +1523,9 @@
 
 static void disable_input(struct v4l2_subdev *sd)
 {
-	/* disable */
+	hdmi_write_and_or(sd, 0x1a, 0xef, 0x10); /* Mute audio [REF_01, c. 2.2.2] */
+	msleep(16); /* 512 samples with >= 32 kHz sample rate [REF_03, c. 8.29] */
 	io_write(sd, 0x15, 0xbe);   /* Tristate all outputs from video core */
-	hdmi_write(sd, 0x1a, 0x1a); /* Mute audio */
 	hdmi_write(sd, 0x01, 0x78); /* Disable HDMI clock terminators */
 }
 
@@ -1575,9 +1593,6 @@
 		afe_write(sd, 0x00, 0x00); /* power up ADC */
 		afe_write(sd, 0xc8, 0x00); /* phase control */
 
-		io_write(sd, 0x19, 0x83); /* LLC DLL phase */
-		io_write(sd, 0x33, 0x40); /* LLC DLL enable */
-
 		io_write(sd, 0xdd, 0x90); /* Manual 2x output clock */
 		/* script says register 0xde, which don't exist in manual */
 
@@ -1611,8 +1626,6 @@
 		/* deinterlacer enabled and 3D comb */
 		sdp_write_and_or(sd, 0x12, 0xf6, 0x09);
 
-		sdp_write(sd, 0xdd, 0x08); /* free run auto */
-
 		break;
 
 	case ADV7842_MODE_COMP:
@@ -1627,6 +1640,13 @@
 
 		afe_write(sd, 0x00, 0x00); /* power up ADC */
 		afe_write(sd, 0xc8, 0x00); /* phase control */
+		if (state->mode == ADV7842_MODE_COMP) {
+			/* force to YCrCb */
+			io_write_and_or(sd, 0x02, 0x0f, 0x60);
+		} else {
+			/* force to RGB */
+			io_write_and_or(sd, 0x02, 0x0f, 0x10);
+		}
 
 		/* set ADI recommended settings for digitizer */
 		/* "ADV7842 Register Settings Recommendations
@@ -1722,19 +1742,19 @@
 
 	switch (input) {
 	case ADV7842_SELECT_HDMI_PORT_A:
-		/* TODO select HDMI_COMP or HDMI_GR */
 		state->mode = ADV7842_MODE_HDMI;
 		state->vid_std_select = ADV7842_HDMI_COMP_VID_STD_HD_1250P;
 		state->hdmi_port_a = true;
 		break;
 	case ADV7842_SELECT_HDMI_PORT_B:
-		/* TODO select HDMI_COMP or HDMI_GR */
 		state->mode = ADV7842_MODE_HDMI;
 		state->vid_std_select = ADV7842_HDMI_COMP_VID_STD_HD_1250P;
 		state->hdmi_port_a = false;
 		break;
 	case ADV7842_SELECT_VGA_COMP:
-		v4l2_info(sd, "%s: VGA component: todo\n", __func__);
+		state->mode = ADV7842_MODE_COMP;
+		state->vid_std_select = ADV7842_RGB_VID_STD_AUTO_GRAPH_MODE;
+		break;
 	case ADV7842_SELECT_VGA_RGB:
 		state->mode = ADV7842_MODE_RGB;
 		state->vid_std_select = ADV7842_RGB_VID_STD_AUTO_GRAPH_MODE;
@@ -1814,12 +1834,15 @@
 		io_write(sd, 0x78, 0x03);
 		/* Enable SDP Standard Detection Change and SDP Video Detected */
 		io_write(sd, 0xa0, 0x09);
+		/* Enable HDMI_MODE interrupt */
+		io_write(sd, 0x69, 0x08);
 	} else {
 		io_write(sd, 0x46, 0x0);
 		io_write(sd, 0x5a, 0x0);
 		io_write(sd, 0x73, 0x0);
 		io_write(sd, 0x78, 0x0);
 		io_write(sd, 0xa0, 0x0);
+		io_write(sd, 0x69, 0x0);
 	}
 }
 
@@ -1827,11 +1850,9 @@
 {
 	struct adv7842_state *state = to_state(sd);
 	u8 fmt_change_cp, fmt_change_digital, fmt_change_sdp;
-	u8 irq_status[5];
-	u8 irq_cfg = io_read(sd, 0x40);
+	u8 irq_status[6];
 
-	/* disable irq-pin output */
-	io_write(sd, 0x40, irq_cfg | 0x3);
+	adv7842_irq_enable(sd, false);
 
 	/* read status */
 	irq_status[0] = io_read(sd, 0x43);
@@ -1839,6 +1860,7 @@
 	irq_status[2] = io_read(sd, 0x70);
 	irq_status[3] = io_read(sd, 0x75);
 	irq_status[4] = io_read(sd, 0x9d);
+	irq_status[5] = io_read(sd, 0x66);
 
 	/* and clear */
 	if (irq_status[0])
@@ -1851,10 +1873,14 @@
 		io_write(sd, 0x76, irq_status[3]);
 	if (irq_status[4])
 		io_write(sd, 0x9e, irq_status[4]);
+	if (irq_status[5])
+		io_write(sd, 0x67, irq_status[5]);
 
-	v4l2_dbg(1, debug, sd, "%s: irq %x, %x, %x, %x, %x\n", __func__,
+	adv7842_irq_enable(sd, true);
+
+	v4l2_dbg(1, debug, sd, "%s: irq %x, %x, %x, %x, %x, %x\n", __func__,
 		 irq_status[0], irq_status[1], irq_status[2],
-		 irq_status[3], irq_status[4]);
+		 irq_status[3], irq_status[4], irq_status[5]);
 
 	/* format change CP */
 	fmt_change_cp = irq_status[0] & 0x9c;
@@ -1871,25 +1897,72 @@
 	else
 		fmt_change_digital = 0;
 
-	/* notify */
+	/* format change */
 	if (fmt_change_cp || fmt_change_digital || fmt_change_sdp) {
 		v4l2_dbg(1, debug, sd,
 			 "%s: fmt_change_cp = 0x%x, fmt_change_digital = 0x%x, fmt_change_sdp = 0x%x\n",
 			 __func__, fmt_change_cp, fmt_change_digital,
 			 fmt_change_sdp);
 		v4l2_subdev_notify(sd, ADV7842_FMT_CHANGE, NULL);
+		if (handled)
+			*handled = true;
 	}
 
-	/* 5v cable detect */
-	if (irq_status[2])
+	/* HDMI/DVI mode */
+	if (irq_status[5] & 0x08) {
+		v4l2_dbg(1, debug, sd, "%s: irq %s mode\n", __func__,
+			 (io_read(sd, 0x65) & 0x08) ? "HDMI" : "DVI");
+		if (handled)
+			*handled = true;
+	}
+
+	/* tx 5v detect */
+	if (irq_status[2] & 0x3) {
+		v4l2_dbg(1, debug, sd, "%s: irq tx_5v\n", __func__);
 		adv7842_s_detect_tx_5v_ctrl(sd);
+		if (handled)
+			*handled = true;
+	}
+	return 0;
+}
 
-	if (handled)
-		*handled = true;
+static int adv7842_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+	struct adv7842_state *state = to_state(sd);
+	u8 *data = NULL;
 
-	/* re-enable irq-pin output */
-	io_write(sd, 0x40, irq_cfg);
+	if (edid->pad > ADV7842_EDID_PORT_VGA)
+		return -EINVAL;
+	if (edid->blocks == 0)
+		return -EINVAL;
+	if (edid->blocks > 2)
+		return -EINVAL;
+	if (edid->start_block > 1)
+		return -EINVAL;
+	if (edid->start_block == 1)
+		edid->blocks = 1;
+	if (!edid->edid)
+		return -EINVAL;
 
+	switch (edid->pad) {
+	case ADV7842_EDID_PORT_A:
+	case ADV7842_EDID_PORT_B:
+		if (state->hdmi_edid.present & (0x04 << edid->pad))
+			data = state->hdmi_edid.edid;
+		break;
+	case ADV7842_EDID_PORT_VGA:
+		if (state->vga_edid.present)
+			data = state->vga_edid.edid;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (!data)
+		return -ENODATA;
+
+	memcpy(edid->edid,
+	       data + edid->start_block * 128,
+	       edid->blocks * 128);
 	return 0;
 }
 
@@ -1898,7 +1971,7 @@
 	struct adv7842_state *state = to_state(sd);
 	int err = 0;
 
-	if (e->pad > 2)
+	if (e->pad > ADV7842_EDID_PORT_VGA)
 		return -EINVAL;
 	if (e->start_block != 0)
 		return -EINVAL;
@@ -1911,20 +1984,25 @@
 	state->aspect_ratio = v4l2_calc_aspect_ratio(e->edid[0x15],
 			e->edid[0x16]);
 
-	if (e->pad == 2) {
+	switch (e->pad) {
+	case ADV7842_EDID_PORT_VGA:
 		memset(&state->vga_edid.edid, 0, 256);
 		state->vga_edid.present = e->blocks ? 0x1 : 0x0;
 		memcpy(&state->vga_edid.edid, e->edid, 128 * e->blocks);
 		err = edid_write_vga_segment(sd);
-	} else {
-		u32 mask = 0x1<<e->pad;
+		break;
+	case ADV7842_EDID_PORT_A:
+	case ADV7842_EDID_PORT_B:
 		memset(&state->hdmi_edid.edid, 0, 256);
 		if (e->blocks)
-			state->hdmi_edid.present |= mask;
+			state->hdmi_edid.present |= 0x04 << e->pad;
 		else
-			state->hdmi_edid.present &= ~mask;
-		memcpy(&state->hdmi_edid.edid, e->edid, 128*e->blocks);
+			state->hdmi_edid.present &= ~(0x04 << e->pad);
+		memcpy(&state->hdmi_edid.edid, e->edid, 128 * e->blocks);
 		err = edid_write_hdmi_segment(sd, e->pad);
+		break;
+	default:
+		return -EINVAL;
 	}
 	if (err < 0)
 		v4l2_err(sd, "error %d writing edid on port %d\n", err, e->pad);
@@ -2156,7 +2234,7 @@
 	static const char * const input_color_space_txt[16] = {
 		"RGB limited range (16-235)", "RGB full range (0-255)",
 		"YCbCr Bt.601 (16-235)", "YCbCr Bt.709 (16-235)",
-		"XvYCC Bt.601", "XvYCC Bt.709",
+		"xvYCC Bt.601", "xvYCC Bt.709",
 		"YCbCr Bt.601 (0-255)", "YCbCr Bt.709 (0-255)",
 		"invalid", "invalid", "invalid", "invalid", "invalid",
 		"invalid", "invalid", "automatic"
@@ -2175,8 +2253,6 @@
 
 	v4l2_info(sd, "-----Chip status-----\n");
 	v4l2_info(sd, "Chip power: %s\n", no_power(sd) ? "off" : "on");
-	v4l2_info(sd, "Connector type: %s\n", state->connector_hdmi ?
-			"HDMI" : (is_digital_input(sd) ? "DVI-D" : "DVI-A"));
 	v4l2_info(sd, "HDMI/DVI-D port selected: %s\n",
 			state->hdmi_port_a ? "A" : "B");
 	v4l2_info(sd, "EDID A %s, B %s\n",
@@ -2354,15 +2430,63 @@
 	return 0;
 }
 
+static void adv7842_s_sdp_io(struct v4l2_subdev *sd, struct adv7842_sdp_io_sync_adjustment *s)
+{
+	if (s && s->adjust) {
+		sdp_io_write(sd, 0x94, (s->hs_beg >> 8) & 0xf);
+		sdp_io_write(sd, 0x95, s->hs_beg & 0xff);
+		sdp_io_write(sd, 0x96, (s->hs_width >> 8) & 0xf);
+		sdp_io_write(sd, 0x97, s->hs_width & 0xff);
+		sdp_io_write(sd, 0x98, (s->de_beg >> 8) & 0xf);
+		sdp_io_write(sd, 0x99, s->de_beg & 0xff);
+		sdp_io_write(sd, 0x9a, (s->de_end >> 8) & 0xf);
+		sdp_io_write(sd, 0x9b, s->de_end & 0xff);
+		sdp_io_write(sd, 0xa8, s->vs_beg_o);
+		sdp_io_write(sd, 0xa9, s->vs_beg_e);
+		sdp_io_write(sd, 0xaa, s->vs_end_o);
+		sdp_io_write(sd, 0xab, s->vs_end_e);
+		sdp_io_write(sd, 0xac, s->de_v_beg_o);
+		sdp_io_write(sd, 0xad, s->de_v_beg_e);
+		sdp_io_write(sd, 0xae, s->de_v_end_o);
+		sdp_io_write(sd, 0xaf, s->de_v_end_e);
+	} else {
+		/* set to default */
+		sdp_io_write(sd, 0x94, 0x00);
+		sdp_io_write(sd, 0x95, 0x00);
+		sdp_io_write(sd, 0x96, 0x00);
+		sdp_io_write(sd, 0x97, 0x20);
+		sdp_io_write(sd, 0x98, 0x00);
+		sdp_io_write(sd, 0x99, 0x00);
+		sdp_io_write(sd, 0x9a, 0x00);
+		sdp_io_write(sd, 0x9b, 0x00);
+		sdp_io_write(sd, 0xa8, 0x04);
+		sdp_io_write(sd, 0xa9, 0x04);
+		sdp_io_write(sd, 0xaa, 0x04);
+		sdp_io_write(sd, 0xab, 0x04);
+		sdp_io_write(sd, 0xac, 0x04);
+		sdp_io_write(sd, 0xad, 0x04);
+		sdp_io_write(sd, 0xae, 0x04);
+		sdp_io_write(sd, 0xaf, 0x04);
+	}
+}
+
 static int adv7842_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
 {
 	struct adv7842_state *state = to_state(sd);
+	struct adv7842_platform_data *pdata = &state->pdata;
 
 	v4l2_dbg(1, debug, sd, "%s:\n", __func__);
 
 	if (state->mode != ADV7842_MODE_SDP)
 		return -ENODATA;
 
+	if (norm & V4L2_STD_625_50)
+		adv7842_s_sdp_io(sd, &pdata->sdp_io_sync_625);
+	else if (norm & V4L2_STD_525_60)
+		adv7842_s_sdp_io(sd, &pdata->sdp_io_sync_525);
+	else
+		adv7842_s_sdp_io(sd, NULL);
+
 	if (norm & V4L2_STD_ALL) {
 		state->norm = norm;
 		return 0;
@@ -2385,9 +2509,10 @@
 
 /* ----------------------------------------------------------------------- */
 
-static int adv7842_core_init(struct v4l2_subdev *sd,
-		const struct adv7842_platform_data *pdata)
+static int adv7842_core_init(struct v4l2_subdev *sd)
 {
+	struct adv7842_state *state = to_state(sd);
+	struct adv7842_platform_data *pdata = &state->pdata;
 	hdmi_write(sd, 0x48,
 		   (pdata->disable_pwrdnb ? 0x80 : 0) |
 		   (pdata->disable_cable_det_rst ? 0x40 : 0));
@@ -2400,7 +2525,7 @@
 
 	/* video format */
 	io_write(sd, 0x02,
-		 pdata->inp_color_space << 4 |
+		 0xf0 |
 		 pdata->alt_gamma << 3 |
 		 pdata->op_656_range << 2 |
 		 pdata->rgb_out << 1 |
@@ -2412,13 +2537,24 @@
 			pdata->replicate_av_codes << 1 |
 			pdata->invert_cbcr << 0);
 
+	/* HDMI audio */
+	hdmi_write_and_or(sd, 0x1a, 0xf1, 0x08); /* Wait 1 s before unmute */
+
 	/* Drive strength */
-	io_write_and_or(sd, 0x14, 0xc0, pdata->drive_strength.data<<4 |
-			pdata->drive_strength.clock<<2 |
-			pdata->drive_strength.sync);
+	io_write_and_or(sd, 0x14, 0xc0,
+			pdata->dr_str_data << 4 |
+			pdata->dr_str_clk << 2 |
+			pdata->dr_str_sync);
 
 	/* HDMI free run */
-	cp_write(sd, 0xba, (pdata->hdmi_free_run_mode << 1) | 0x01);
+	cp_write_and_or(sd, 0xba, 0xfc, pdata->hdmi_free_run_enable |
+					(pdata->hdmi_free_run_mode << 1));
+
+	/* SPD free run */
+	sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force |
+					 (pdata->sdp_free_run_cbar_en << 1) |
+					 (pdata->sdp_free_run_man_col_en << 2) |
+					 (pdata->sdp_free_run_auto << 3));
 
 	/* TODO from platform data */
 	cp_write(sd, 0x69, 0x14);   /* Enable CP CSC */
@@ -2431,18 +2567,6 @@
 
 	sdp_csc_coeff(sd, &pdata->sdp_csc_coeff);
 
-	if (pdata->sdp_io_sync.adjust) {
-		const struct adv7842_sdp_io_sync_adjustment *s = &pdata->sdp_io_sync;
-		sdp_io_write(sd, 0x94, (s->hs_beg>>8) & 0xf);
-		sdp_io_write(sd, 0x95, s->hs_beg & 0xff);
-		sdp_io_write(sd, 0x96, (s->hs_width>>8) & 0xf);
-		sdp_io_write(sd, 0x97, s->hs_width & 0xff);
-		sdp_io_write(sd, 0x98, (s->de_beg>>8) & 0xf);
-		sdp_io_write(sd, 0x99, s->de_beg & 0xff);
-		sdp_io_write(sd, 0x9a, (s->de_end>>8) & 0xf);
-		sdp_io_write(sd, 0x9b, s->de_end & 0xff);
-	}
-
 	/* todo, improve settings for sdram */
 	if (pdata->sd_ram_size >= 128) {
 		sdp_write(sd, 0x12, 0x0d); /* Frame TBC,3D comb enabled */
@@ -2483,12 +2607,11 @@
 	io_write_and_or(sd, 0x20, 0xcf, 0x00);
 
 	/* LLC */
-	/* Set phase to 16. TODO: get this from platform_data */
-	io_write(sd, 0x19, 0x90);
+	io_write(sd, 0x19, 0x80 | pdata->llc_dll_phase);
 	io_write(sd, 0x33, 0x40);
 
 	/* interrupts */
-	io_write(sd, 0x40, 0xe2); /* Configure INT1 */
+	io_write(sd, 0x40, 0xf2); /* Configure INT1 */
 
 	adv7842_irq_enable(sd, true);
 
@@ -2588,6 +2711,7 @@
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct adv7842_state *state = to_state(sd);
 	struct adv7842_platform_data *pdata = client->dev.platform_data;
+	struct v4l2_dv_timings timings;
 	int ret = 0;
 
 	if (!pdata)
@@ -2610,7 +2734,7 @@
 	adv7842_rewrite_i2c_addresses(sd, pdata);
 
 	/* and re-init chip and state */
-	adv7842_core_init(sd, pdata);
+	adv7842_core_init(sd);
 
 	disable_input(sd);
 
@@ -2618,11 +2742,15 @@
 
 	enable_input(sd);
 
-	adv7842_s_dv_timings(sd, &state->timings);
-
 	edid_write_vga_segment(sd);
-	edid_write_hdmi_segment(sd, 0);
-	edid_write_hdmi_segment(sd, 1);
+	edid_write_hdmi_segment(sd, ADV7842_EDID_PORT_A);
+	edid_write_hdmi_segment(sd, ADV7842_EDID_PORT_B);
+
+	timings = state->timings;
+
+	memset(&state->timings, 0, sizeof(struct v4l2_dv_timings));
+
+	adv7842_s_dv_timings(sd, &timings);
 
 	return ret;
 }
@@ -2670,6 +2798,7 @@
 };
 
 static const struct v4l2_subdev_pad_ops adv7842_pad_ops = {
+	.get_edid = adv7842_get_edid,
 	.set_edid = adv7842_set_edid,
 };
 
@@ -2712,8 +2841,9 @@
 };
 
 
-static void adv7842_unregister_clients(struct adv7842_state *state)
+static void adv7842_unregister_clients(struct v4l2_subdev *sd)
 {
+	struct adv7842_state *state = to_state(sd);
 	if (state->i2c_avlink)
 		i2c_unregister_device(state->i2c_avlink);
 	if (state->i2c_cec)
@@ -2736,21 +2866,79 @@
 		i2c_unregister_device(state->i2c_cp);
 	if (state->i2c_vdp)
 		i2c_unregister_device(state->i2c_vdp);
+
+	state->i2c_avlink = NULL;
+	state->i2c_cec = NULL;
+	state->i2c_infoframe = NULL;
+	state->i2c_sdp_io = NULL;
+	state->i2c_sdp = NULL;
+	state->i2c_afe = NULL;
+	state->i2c_repeater = NULL;
+	state->i2c_edid = NULL;
+	state->i2c_hdmi = NULL;
+	state->i2c_cp = NULL;
+	state->i2c_vdp = NULL;
 }
 
-static struct i2c_client *adv7842_dummy_client(struct v4l2_subdev *sd,
+static struct i2c_client *adv7842_dummy_client(struct v4l2_subdev *sd, const char *desc,
 					       u8 addr, u8 io_reg)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	struct i2c_client *cp;
 
 	io_write(sd, io_reg, addr << 1);
-	return i2c_new_dummy(client->adapter, io_read(sd, io_reg) >> 1);
+
+	if (addr == 0) {
+		v4l2_err(sd, "no %s i2c addr configured\n", desc);
+		return NULL;
+	}
+
+	cp = i2c_new_dummy(client->adapter, io_read(sd, io_reg) >> 1);
+	if (!cp)
+		v4l2_err(sd, "register %s on i2c addr 0x%x failed\n", desc, addr);
+
+	return cp;
+}
+
+static int adv7842_register_clients(struct v4l2_subdev *sd)
+{
+	struct adv7842_state *state = to_state(sd);
+	struct adv7842_platform_data *pdata = &state->pdata;
+
+	state->i2c_avlink = adv7842_dummy_client(sd, "avlink", pdata->i2c_avlink, 0xf3);
+	state->i2c_cec = adv7842_dummy_client(sd, "cec", pdata->i2c_cec, 0xf4);
+	state->i2c_infoframe = adv7842_dummy_client(sd, "infoframe", pdata->i2c_infoframe, 0xf5);
+	state->i2c_sdp_io = adv7842_dummy_client(sd, "sdp_io", pdata->i2c_sdp_io, 0xf2);
+	state->i2c_sdp = adv7842_dummy_client(sd, "sdp", pdata->i2c_sdp, 0xf1);
+	state->i2c_afe = adv7842_dummy_client(sd, "afe", pdata->i2c_afe, 0xf8);
+	state->i2c_repeater = adv7842_dummy_client(sd, "repeater", pdata->i2c_repeater, 0xf9);
+	state->i2c_edid = adv7842_dummy_client(sd, "edid", pdata->i2c_edid, 0xfa);
+	state->i2c_hdmi = adv7842_dummy_client(sd, "hdmi", pdata->i2c_hdmi, 0xfb);
+	state->i2c_cp = adv7842_dummy_client(sd, "cp", pdata->i2c_cp, 0xfd);
+	state->i2c_vdp = adv7842_dummy_client(sd, "vdp", pdata->i2c_vdp, 0xfe);
+
+	if (!state->i2c_avlink ||
+	    !state->i2c_cec ||
+	    !state->i2c_infoframe ||
+	    !state->i2c_sdp_io ||
+	    !state->i2c_sdp ||
+	    !state->i2c_afe ||
+	    !state->i2c_repeater ||
+	    !state->i2c_edid ||
+	    !state->i2c_hdmi ||
+	    !state->i2c_cp ||
+	    !state->i2c_vdp)
+		return -1;
+
+	return 0;
 }
 
 static int adv7842_probe(struct i2c_client *client,
 			 const struct i2c_device_id *id)
 {
 	struct adv7842_state *state;
+	static const struct v4l2_dv_timings cea640x480 =
+		V4L2_DV_BT_CEA_640X480P59_94;
 	struct adv7842_platform_data *pdata = client->dev.platform_data;
 	struct v4l2_ctrl_handler *hdl;
 	struct v4l2_subdev *sd;
@@ -2775,13 +2963,17 @@
 		return -ENOMEM;
 	}
 
+	/* platform data */
+	state->pdata = *pdata;
+	state->timings = cea640x480;
+
 	sd = &state->sd;
 	v4l2_i2c_subdev_init(sd, client, &adv7842_ops);
 	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-	state->connector_hdmi = pdata->connector_hdmi;
 	state->mode = pdata->mode;
 
-	state->hdmi_port_a = true;
+	state->hdmi_port_a = pdata->input == ADV7842_SELECT_HDMI_PORT_A;
+	state->restart_stdi_once = true;
 
 	/* i2c access to adv7842? */
 	rev = adv_smbus_read_byte_data_check(client, 0xea, false) << 8 |
@@ -2843,21 +3035,7 @@
 		goto err_hdl;
 	}
 
-	state->i2c_avlink = adv7842_dummy_client(sd, pdata->i2c_avlink, 0xf3);
-	state->i2c_cec = adv7842_dummy_client(sd, pdata->i2c_cec, 0xf4);
-	state->i2c_infoframe = adv7842_dummy_client(sd, pdata->i2c_infoframe, 0xf5);
-	state->i2c_sdp_io = adv7842_dummy_client(sd, pdata->i2c_sdp_io, 0xf2);
-	state->i2c_sdp = adv7842_dummy_client(sd, pdata->i2c_sdp, 0xf1);
-	state->i2c_afe = adv7842_dummy_client(sd, pdata->i2c_afe, 0xf8);
-	state->i2c_repeater = adv7842_dummy_client(sd, pdata->i2c_repeater, 0xf9);
-	state->i2c_edid = adv7842_dummy_client(sd, pdata->i2c_edid, 0xfa);
-	state->i2c_hdmi = adv7842_dummy_client(sd, pdata->i2c_hdmi, 0xfb);
-	state->i2c_cp = adv7842_dummy_client(sd, pdata->i2c_cp, 0xfd);
-	state->i2c_vdp = adv7842_dummy_client(sd, pdata->i2c_vdp, 0xfe);
-	if (!state->i2c_avlink || !state->i2c_cec || !state->i2c_infoframe ||
-	    !state->i2c_sdp_io || !state->i2c_sdp || !state->i2c_afe ||
-	    !state->i2c_repeater || !state->i2c_edid || !state->i2c_hdmi ||
-	    !state->i2c_cp || !state->i2c_vdp) {
+	if (adv7842_register_clients(sd) < 0) {
 		err = -ENOMEM;
 		v4l2_err(sd, "failed to create all i2c clients\n");
 		goto err_i2c;
@@ -2879,7 +3057,7 @@
 	if (err)
 		goto err_work_queues;
 
-	err = adv7842_core_init(sd, pdata);
+	err = adv7842_core_init(sd);
 	if (err)
 		goto err_entity;
 
@@ -2893,7 +3071,7 @@
 	cancel_delayed_work(&state->delayed_work_enable_hotplug);
 	destroy_workqueue(state->work_queues);
 err_i2c:
-	adv7842_unregister_clients(state);
+	adv7842_unregister_clients(sd);
 err_hdl:
 	v4l2_ctrl_handler_free(hdl);
 	return err;
@@ -2912,7 +3090,7 @@
 	destroy_workqueue(state->work_queues);
 	v4l2_device_unregister_subdev(sd);
 	media_entity_cleanup(&sd->entity);
-	adv7842_unregister_clients(to_state(sd));
+	adv7842_unregister_clients(sd);
 	v4l2_ctrl_handler_free(sd->ctrl_handler);
 	return 0;
 }
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index 3317a9a..d98ca3a 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -172,28 +172,28 @@
 static int lm3560_get_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
 {
 	struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no);
+	int rval = -EINVAL;
 
 	mutex_lock(&flash->lock);
 
 	if (ctrl->id == V4L2_CID_FLASH_FAULT) {
-		int rval;
 		s32 fault = 0;
 		unsigned int reg_val;
 		rval = regmap_read(flash->regmap, REG_FLAG, &reg_val);
 		if (rval < 0)
-			return rval;
-		if (rval & FAULT_SHORT_CIRCUIT)
+			goto out;
+		if (reg_val & FAULT_SHORT_CIRCUIT)
 			fault |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
-		if (rval & FAULT_OVERTEMP)
+		if (reg_val & FAULT_OVERTEMP)
 			fault |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
-		if (rval & FAULT_TIMEOUT)
+		if (reg_val & FAULT_TIMEOUT)
 			fault |= V4L2_FLASH_FAULT_TIMEOUT;
 		ctrl->cur.val = fault;
-		return 0;
 	}
 
+out:
 	mutex_unlock(&flash->lock);
-	return -EINVAL;
+	return rval;
 }
 
 static int lm3560_set_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
@@ -219,15 +219,19 @@
 		break;
 
 	case V4L2_CID_FLASH_STROBE:
-		if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
-			return -EBUSY;
+		if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) {
+			rval = -EBUSY;
+			goto err_out;
+		}
 		flash->led_mode = V4L2_FLASH_LED_MODE_FLASH;
 		rval = lm3560_mode_ctrl(flash);
 		break;
 
 	case V4L2_CID_FLASH_STROBE_STOP:
-		if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
-			return -EBUSY;
+		if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) {
+			rval = -EBUSY;
+			goto err_out;
+		}
 		flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
 		rval = lm3560_mode_ctrl(flash);
 		break;
@@ -247,8 +251,8 @@
 		break;
 	}
 
-	mutex_unlock(&flash->lock);
 err_out:
+	mutex_unlock(&flash->lock);
 	return rval;
 }
 
@@ -444,14 +448,14 @@
 	if (rval < 0)
 		return rval;
 
+	i2c_set_clientdata(client, flash);
+
 	return 0;
 }
 
 static int lm3560_remove(struct i2c_client *client)
 {
-	struct v4l2_subdev *subdev = i2c_get_clientdata(client);
-	struct lm3560_flash *flash = container_of(subdev, struct lm3560_flash,
-						  subdev_led[LM3560_LED_MAX]);
+	struct lm3560_flash *flash = i2c_get_clientdata(client);
 	unsigned int i;
 
 	for (i = LM3560_LED0; i < LM3560_LED_MAX; i++) {
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index 846b15f..85ec3ba 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -459,13 +459,15 @@
 			  MT9M032_COLUMN_START_MAX);
 	rect.top = clamp(ALIGN(crop->rect.top, 2), MT9M032_ROW_START_MIN,
 			 MT9M032_ROW_START_MAX);
-	rect.width = clamp(ALIGN(crop->rect.width, 2), MT9M032_COLUMN_SIZE_MIN,
-			   MT9M032_COLUMN_SIZE_MAX);
-	rect.height = clamp(ALIGN(crop->rect.height, 2), MT9M032_ROW_SIZE_MIN,
-			    MT9M032_ROW_SIZE_MAX);
+	rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+			     MT9M032_COLUMN_SIZE_MIN, MT9M032_COLUMN_SIZE_MAX);
+	rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+			      MT9M032_ROW_SIZE_MIN, MT9M032_ROW_SIZE_MAX);
 
-	rect.width = min(rect.width, MT9M032_PIXEL_ARRAY_WIDTH - rect.left);
-	rect.height = min(rect.height, MT9M032_PIXEL_ARRAY_HEIGHT - rect.top);
+	rect.width = min_t(unsigned int, rect.width,
+			   MT9M032_PIXEL_ARRAY_WIDTH - rect.left);
+	rect.height = min_t(unsigned int, rect.height,
+			    MT9M032_PIXEL_ARRAY_HEIGHT - rect.top);
 
 	__crop = __mt9m032_get_pad_crop(sensor, fh, crop->which);
 
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 1c2303d..e5ddf47 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -519,11 +519,13 @@
 
 	/* Clamp the width and height to avoid dividing by zero. */
 	width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
-			max(__crop->width / 7, MT9P031_WINDOW_WIDTH_MIN),
+			max_t(unsigned int, __crop->width / 7,
+			      MT9P031_WINDOW_WIDTH_MIN),
 			__crop->width);
 	height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
-			max(__crop->height / 8, MT9P031_WINDOW_HEIGHT_MIN),
-			__crop->height);
+			 max_t(unsigned int, __crop->height / 8,
+			       MT9P031_WINDOW_HEIGHT_MIN),
+			 __crop->height);
 
 	hratio = DIV_ROUND_CLOSEST(__crop->width, width);
 	vratio = DIV_ROUND_CLOSEST(__crop->height, height);
@@ -565,15 +567,17 @@
 			  MT9P031_COLUMN_START_MAX);
 	rect.top = clamp(ALIGN(crop->rect.top, 2), MT9P031_ROW_START_MIN,
 			 MT9P031_ROW_START_MAX);
-	rect.width = clamp(ALIGN(crop->rect.width, 2),
-			   MT9P031_WINDOW_WIDTH_MIN,
-			   MT9P031_WINDOW_WIDTH_MAX);
-	rect.height = clamp(ALIGN(crop->rect.height, 2),
-			    MT9P031_WINDOW_HEIGHT_MIN,
-			    MT9P031_WINDOW_HEIGHT_MAX);
+	rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+			     MT9P031_WINDOW_WIDTH_MIN,
+			     MT9P031_WINDOW_WIDTH_MAX);
+	rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+			      MT9P031_WINDOW_HEIGHT_MIN,
+			      MT9P031_WINDOW_HEIGHT_MAX);
 
-	rect.width = min(rect.width, MT9P031_PIXEL_ARRAY_WIDTH - rect.left);
-	rect.height = min(rect.height, MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
+	rect.width = min_t(unsigned int, rect.width,
+			   MT9P031_PIXEL_ARRAY_WIDTH - rect.left);
+	rect.height = min_t(unsigned int, rect.height,
+			    MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
 
 	__crop = __mt9p031_get_pad_crop(mt9p031, fh, crop->pad, crop->which);
 
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 7964634..d41c70e 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -291,10 +291,12 @@
 
 	/* Clamp the width and height to avoid dividing by zero. */
 	width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
-			max(__crop->width / 8, MT9T001_WINDOW_HEIGHT_MIN + 1),
+			max_t(unsigned int, __crop->width / 8,
+			      MT9T001_WINDOW_HEIGHT_MIN + 1),
 			__crop->width);
 	height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
-			 max(__crop->height / 8, MT9T001_WINDOW_HEIGHT_MIN + 1),
+			 max_t(unsigned int, __crop->height / 8,
+			       MT9T001_WINDOW_HEIGHT_MIN + 1),
 			 __crop->height);
 
 	hratio = DIV_ROUND_CLOSEST(__crop->width, width);
@@ -339,15 +341,17 @@
 	rect.top = clamp(ALIGN(crop->rect.top, 2),
 			 MT9T001_ROW_START_MIN,
 			 MT9T001_ROW_START_MAX);
-	rect.width = clamp(ALIGN(crop->rect.width, 2),
-			   MT9T001_WINDOW_WIDTH_MIN + 1,
-			   MT9T001_WINDOW_WIDTH_MAX + 1);
-	rect.height = clamp(ALIGN(crop->rect.height, 2),
-			    MT9T001_WINDOW_HEIGHT_MIN + 1,
-			    MT9T001_WINDOW_HEIGHT_MAX + 1);
+	rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+			     MT9T001_WINDOW_WIDTH_MIN + 1,
+			     MT9T001_WINDOW_WIDTH_MAX + 1);
+	rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+			      MT9T001_WINDOW_HEIGHT_MIN + 1,
+			      MT9T001_WINDOW_HEIGHT_MAX + 1);
 
-	rect.width = min(rect.width, MT9T001_PIXEL_ARRAY_WIDTH - rect.left);
-	rect.height = min(rect.height, MT9T001_PIXEL_ARRAY_HEIGHT - rect.top);
+	rect.width = min_t(unsigned int, rect.width,
+			   MT9T001_PIXEL_ARRAY_WIDTH - rect.left);
+	rect.height = min_t(unsigned int, rect.height,
+			    MT9T001_PIXEL_ARRAY_HEIGHT - rect.top);
 
 	__crop = __mt9t001_get_pad_crop(mt9t001, fh, crop->pad, crop->which);
 
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 2c50eff..36c504b 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -27,14 +27,16 @@
 #include <media/v4l2-device.h>
 #include <media/v4l2-subdev.h>
 
-#define MT9V032_PIXEL_ARRAY_HEIGHT			492
-#define MT9V032_PIXEL_ARRAY_WIDTH			782
+/* The first four rows are black rows. The active area spans 753x481 pixels. */
+#define MT9V032_PIXEL_ARRAY_HEIGHT			485
+#define MT9V032_PIXEL_ARRAY_WIDTH			753
 
 #define MT9V032_SYSCLK_FREQ_DEF				26600000
 
 #define MT9V032_CHIP_VERSION				0x00
 #define		MT9V032_CHIP_ID_REV1			0x1311
 #define		MT9V032_CHIP_ID_REV3			0x1313
+#define		MT9V034_CHIP_ID_REV1			0X1324
 #define MT9V032_COLUMN_START				0x01
 #define		MT9V032_COLUMN_START_MIN		1
 #define		MT9V032_COLUMN_START_DEF		1
@@ -53,12 +55,15 @@
 #define		MT9V032_WINDOW_WIDTH_MAX		752
 #define MT9V032_HORIZONTAL_BLANKING			0x05
 #define		MT9V032_HORIZONTAL_BLANKING_MIN		43
+#define		MT9V034_HORIZONTAL_BLANKING_MIN		61
 #define		MT9V032_HORIZONTAL_BLANKING_DEF		94
 #define		MT9V032_HORIZONTAL_BLANKING_MAX		1023
 #define MT9V032_VERTICAL_BLANKING			0x06
 #define		MT9V032_VERTICAL_BLANKING_MIN		4
+#define		MT9V034_VERTICAL_BLANKING_MIN		2
 #define		MT9V032_VERTICAL_BLANKING_DEF		45
 #define		MT9V032_VERTICAL_BLANKING_MAX		3000
+#define		MT9V034_VERTICAL_BLANKING_MAX		32288
 #define MT9V032_CHIP_CONTROL				0x07
 #define		MT9V032_CHIP_CONTROL_MASTER_MODE	(1 << 3)
 #define		MT9V032_CHIP_CONTROL_DOUT_ENABLE	(1 << 7)
@@ -68,8 +73,10 @@
 #define MT9V032_SHUTTER_WIDTH_CONTROL			0x0a
 #define MT9V032_TOTAL_SHUTTER_WIDTH			0x0b
 #define		MT9V032_TOTAL_SHUTTER_WIDTH_MIN		1
+#define		MT9V034_TOTAL_SHUTTER_WIDTH_MIN		0
 #define		MT9V032_TOTAL_SHUTTER_WIDTH_DEF		480
 #define		MT9V032_TOTAL_SHUTTER_WIDTH_MAX		32767
+#define		MT9V034_TOTAL_SHUTTER_WIDTH_MAX		32765
 #define MT9V032_RESET					0x0c
 #define MT9V032_READ_MODE				0x0d
 #define		MT9V032_READ_MODE_ROW_BIN_MASK		(3 << 0)
@@ -81,6 +88,8 @@
 #define		MT9V032_READ_MODE_DARK_COLUMNS		(1 << 6)
 #define		MT9V032_READ_MODE_DARK_ROWS		(1 << 7)
 #define MT9V032_PIXEL_OPERATION_MODE			0x0f
+#define		MT9V034_PIXEL_OPERATION_MODE_HDR	(1 << 0)
+#define		MT9V034_PIXEL_OPERATION_MODE_COLOR	(1 << 1)
 #define		MT9V032_PIXEL_OPERATION_MODE_COLOR	(1 << 2)
 #define		MT9V032_PIXEL_OPERATION_MODE_HDR	(1 << 6)
 #define MT9V032_ANALOG_GAIN				0x35
@@ -96,9 +105,12 @@
 #define		MT9V032_DARK_AVG_HIGH_THRESH_MASK	(255 << 8)
 #define		MT9V032_DARK_AVG_HIGH_THRESH_SHIFT	8
 #define MT9V032_ROW_NOISE_CORR_CONTROL			0x70
+#define		MT9V034_ROW_NOISE_CORR_ENABLE		(1 << 0)
+#define		MT9V034_ROW_NOISE_CORR_USE_BLK_AVG	(1 << 1)
 #define		MT9V032_ROW_NOISE_CORR_ENABLE		(1 << 5)
 #define		MT9V032_ROW_NOISE_CORR_USE_BLK_AVG	(1 << 7)
 #define MT9V032_PIXEL_CLOCK				0x74
+#define MT9V034_PIXEL_CLOCK				0x72
 #define		MT9V032_PIXEL_CLOCK_INV_LINE		(1 << 0)
 #define		MT9V032_PIXEL_CLOCK_INV_FRAME		(1 << 1)
 #define		MT9V032_PIXEL_CLOCK_XOR_LINE		(1 << 2)
@@ -120,12 +132,88 @@
 #define		MT9V032_AGC_ENABLE			(1 << 1)
 #define MT9V032_THERMAL_INFO				0xc1
 
+enum mt9v032_model {
+	MT9V032_MODEL_V032_COLOR,
+	MT9V032_MODEL_V032_MONO,
+	MT9V032_MODEL_V034_COLOR,
+	MT9V032_MODEL_V034_MONO,
+};
+
+struct mt9v032_model_version {
+	unsigned int version;
+	const char *name;
+};
+
+struct mt9v032_model_data {
+	unsigned int min_row_time;
+	unsigned int min_hblank;
+	unsigned int min_vblank;
+	unsigned int max_vblank;
+	unsigned int min_shutter;
+	unsigned int max_shutter;
+	unsigned int pclk_reg;
+};
+
+struct mt9v032_model_info {
+	const struct mt9v032_model_data *data;
+	bool color;
+};
+
+static const struct mt9v032_model_version mt9v032_versions[] = {
+	{ MT9V032_CHIP_ID_REV1, "MT9V032 rev1/2" },
+	{ MT9V032_CHIP_ID_REV3, "MT9V032 rev3" },
+	{ MT9V034_CHIP_ID_REV1, "MT9V034 rev1" },
+};
+
+static const struct mt9v032_model_data mt9v032_model_data[] = {
+	{
+		/* MT9V032 revisions 1/2/3 */
+		.min_row_time = 660,
+		.min_hblank = MT9V032_HORIZONTAL_BLANKING_MIN,
+		.min_vblank = MT9V032_VERTICAL_BLANKING_MIN,
+		.max_vblank = MT9V032_VERTICAL_BLANKING_MAX,
+		.min_shutter = MT9V032_TOTAL_SHUTTER_WIDTH_MIN,
+		.max_shutter = MT9V032_TOTAL_SHUTTER_WIDTH_MAX,
+		.pclk_reg = MT9V032_PIXEL_CLOCK,
+	}, {
+		/* MT9V034 */
+		.min_row_time = 690,
+		.min_hblank = MT9V034_HORIZONTAL_BLANKING_MIN,
+		.min_vblank = MT9V034_VERTICAL_BLANKING_MIN,
+		.max_vblank = MT9V034_VERTICAL_BLANKING_MAX,
+		.min_shutter = MT9V034_TOTAL_SHUTTER_WIDTH_MIN,
+		.max_shutter = MT9V034_TOTAL_SHUTTER_WIDTH_MAX,
+		.pclk_reg = MT9V034_PIXEL_CLOCK,
+	},
+};
+
+static const struct mt9v032_model_info mt9v032_models[] = {
+	[MT9V032_MODEL_V032_COLOR] = {
+		.data = &mt9v032_model_data[0],
+		.color = true,
+	},
+	[MT9V032_MODEL_V032_MONO] = {
+		.data = &mt9v032_model_data[0],
+		.color = false,
+	},
+	[MT9V032_MODEL_V034_COLOR] = {
+		.data = &mt9v032_model_data[1],
+		.color = true,
+	},
+	[MT9V032_MODEL_V034_MONO] = {
+		.data = &mt9v032_model_data[1],
+		.color = false,
+	},
+};
+
 struct mt9v032 {
 	struct v4l2_subdev subdev;
 	struct media_pad pad;
 
 	struct v4l2_mbus_framefmt format;
 	struct v4l2_rect crop;
+	unsigned int hratio;
+	unsigned int vratio;
 
 	struct v4l2_ctrl_handler ctrls;
 	struct {
@@ -139,6 +227,8 @@
 	struct clk *clk;
 
 	struct mt9v032_platform_data *pdata;
+	const struct mt9v032_model_info *model;
+	const struct mt9v032_model_version *version;
 
 	u32 sysclk;
 	u16 chip_control;
@@ -210,13 +300,18 @@
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
 	struct v4l2_rect *crop = &mt9v032->crop;
+	unsigned int min_hblank = mt9v032->model->data->min_hblank;
+	unsigned int hblank;
 
-	return mt9v032_write(client, MT9V032_HORIZONTAL_BLANKING,
-			     max_t(s32, mt9v032->hblank, 660 - crop->width));
+	if (mt9v032->version->version == MT9V034_CHIP_ID_REV1)
+		min_hblank += (mt9v032->hratio - 1) * 10;
+	min_hblank = max_t(unsigned int, (int)mt9v032->model->data->min_row_time - crop->width,
+			   (int)min_hblank);
+	hblank = max_t(unsigned int, mt9v032->hblank, min_hblank);
+
+	return mt9v032_write(client, MT9V032_HORIZONTAL_BLANKING, hblank);
 }
 
-#define EXT_CLK		25000000
-
 static int mt9v032_power_on(struct mt9v032 *mt9v032)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
@@ -259,7 +354,7 @@
 
 	/* Configure the pixel clock polarity */
 	if (mt9v032->pdata && mt9v032->pdata->clk_pol) {
-		ret = mt9v032_write(client, MT9V032_PIXEL_CLOCK,
+		ret = mt9v032_write(client, mt9v032->model->data->pclk_reg,
 				MT9V032_PIXEL_CLOCK_INV_PXL_CLK);
 		if (ret < 0)
 			return ret;
@@ -312,22 +407,20 @@
 		       | MT9V032_CHIP_CONTROL_SEQUENTIAL;
 	struct i2c_client *client = v4l2_get_subdevdata(subdev);
 	struct mt9v032 *mt9v032 = to_mt9v032(subdev);
-	struct v4l2_mbus_framefmt *format = &mt9v032->format;
 	struct v4l2_rect *crop = &mt9v032->crop;
-	unsigned int hratio;
-	unsigned int vratio;
+	unsigned int hbin;
+	unsigned int vbin;
 	int ret;
 
 	if (!enable)
 		return mt9v032_set_chip_control(mt9v032, mode, 0);
 
 	/* Configure the window size and row/column bin */
-	hratio = DIV_ROUND_CLOSEST(crop->width, format->width);
-	vratio = DIV_ROUND_CLOSEST(crop->height, format->height);
-
+	hbin = fls(mt9v032->hratio) - 1;
+	vbin = fls(mt9v032->vratio) - 1;
 	ret = mt9v032_write(client, MT9V032_READ_MODE,
-		    (hratio - 1) << MT9V032_READ_MODE_ROW_BIN_SHIFT |
-		    (vratio - 1) << MT9V032_READ_MODE_COLUMN_BIN_SHIFT);
+			    hbin << MT9V032_READ_MODE_COLUMN_BIN_SHIFT |
+			    vbin << MT9V032_READ_MODE_ROW_BIN_SHIFT);
 	if (ret < 0)
 		return ret;
 
@@ -370,12 +463,12 @@
 				   struct v4l2_subdev_fh *fh,
 				   struct v4l2_subdev_frame_size_enum *fse)
 {
-	if (fse->index >= 8 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10)
+	if (fse->index >= 3 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10)
 		return -EINVAL;
 
-	fse->min_width = MT9V032_WINDOW_WIDTH_DEF / fse->index;
+	fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index);
 	fse->max_width = fse->min_width;
-	fse->min_height = MT9V032_WINDOW_HEIGHT_DEF / fse->index;
+	fse->min_height = MT9V032_WINDOW_HEIGHT_DEF / (1 << fse->index);
 	fse->max_height = fse->min_height;
 
 	return 0;
@@ -392,18 +485,30 @@
 	return 0;
 }
 
-static void mt9v032_configure_pixel_rate(struct mt9v032 *mt9v032,
-					 unsigned int hratio)
+static void mt9v032_configure_pixel_rate(struct mt9v032 *mt9v032)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
 	int ret;
 
 	ret = v4l2_ctrl_s_ctrl_int64(mt9v032->pixel_rate,
-				     mt9v032->sysclk / hratio);
+				     mt9v032->sysclk / mt9v032->hratio);
 	if (ret < 0)
 		dev_warn(&client->dev, "failed to set pixel rate (%d)\n", ret);
 }
 
+static unsigned int mt9v032_calc_ratio(unsigned int input, unsigned int output)
+{
+	/* Compute the power-of-two binning factor closest to the input size to
+	 * output size ratio. Given that the output size is bounded by input/4
+	 * and input, a generic implementation would be an ineffective luxury.
+	 */
+	if (output * 3 > input * 2)
+		return 1;
+	if (output * 3 > input)
+		return 2;
+	return 4;
+}
+
 static int mt9v032_set_format(struct v4l2_subdev *subdev,
 			      struct v4l2_subdev_fh *fh,
 			      struct v4l2_subdev_format *format)
@@ -420,22 +525,28 @@
 					format->which);
 
 	/* Clamp the width and height to avoid dividing by zero. */
-	width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
-			max(__crop->width / 8, MT9V032_WINDOW_WIDTH_MIN),
-			__crop->width);
-	height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
-			 max(__crop->height / 8, MT9V032_WINDOW_HEIGHT_MIN),
-			 __crop->height);
+	width = clamp(ALIGN(format->format.width, 2),
+		      max_t(unsigned int, __crop->width / 4,
+			    MT9V032_WINDOW_WIDTH_MIN),
+		      __crop->width);
+	height = clamp(ALIGN(format->format.height, 2),
+		       max_t(unsigned int, __crop->height / 4,
+			     MT9V032_WINDOW_HEIGHT_MIN),
+		       __crop->height);
 
-	hratio = DIV_ROUND_CLOSEST(__crop->width, width);
-	vratio = DIV_ROUND_CLOSEST(__crop->height, height);
+	hratio = mt9v032_calc_ratio(__crop->width, width);
+	vratio = mt9v032_calc_ratio(__crop->height, height);
 
 	__format = __mt9v032_get_pad_format(mt9v032, fh, format->pad,
 					    format->which);
 	__format->width = __crop->width / hratio;
 	__format->height = __crop->height / vratio;
-	if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
-		mt9v032_configure_pixel_rate(mt9v032, hratio);
+
+	if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+		mt9v032->hratio = hratio;
+		mt9v032->vratio = vratio;
+		mt9v032_configure_pixel_rate(mt9v032);
+	}
 
 	format->format = *__format;
 
@@ -471,15 +582,17 @@
 	rect.top = clamp(ALIGN(crop->rect.top + 1, 2) - 1,
 			 MT9V032_ROW_START_MIN,
 			 MT9V032_ROW_START_MAX);
-	rect.width = clamp(ALIGN(crop->rect.width, 2),
-			   MT9V032_WINDOW_WIDTH_MIN,
-			   MT9V032_WINDOW_WIDTH_MAX);
-	rect.height = clamp(ALIGN(crop->rect.height, 2),
-			    MT9V032_WINDOW_HEIGHT_MIN,
-			    MT9V032_WINDOW_HEIGHT_MAX);
+	rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+			     MT9V032_WINDOW_WIDTH_MIN,
+			     MT9V032_WINDOW_WIDTH_MAX);
+	rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+			      MT9V032_WINDOW_HEIGHT_MIN,
+			      MT9V032_WINDOW_HEIGHT_MAX);
 
-	rect.width = min(rect.width, MT9V032_PIXEL_ARRAY_WIDTH - rect.left);
-	rect.height = min(rect.height, MT9V032_PIXEL_ARRAY_HEIGHT - rect.top);
+	rect.width = min_t(unsigned int,
+			   rect.width, MT9V032_PIXEL_ARRAY_WIDTH - rect.left);
+	rect.height = min_t(unsigned int,
+			    rect.height, MT9V032_PIXEL_ARRAY_HEIGHT - rect.top);
 
 	__crop = __mt9v032_get_pad_crop(mt9v032, fh, crop->pad, crop->which);
 
@@ -491,8 +604,11 @@
 						    crop->which);
 		__format->width = rect.width;
 		__format->height = rect.height;
-		if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE)
-			mt9v032_configure_pixel_rate(mt9v032, 1);
+		if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+			mt9v032->hratio = 1;
+			mt9v032->vratio = 1;
+			mt9v032_configure_pixel_rate(mt9v032);
+		}
 	}
 
 	*__crop = rect;
@@ -641,7 +757,8 @@
 {
 	struct i2c_client *client = v4l2_get_subdevdata(subdev);
 	struct mt9v032 *mt9v032 = to_mt9v032(subdev);
-	s32 data;
+	unsigned int i;
+	s32 version;
 	int ret;
 
 	dev_info(&client->dev, "Probing MT9V032 at address 0x%02x\n",
@@ -654,25 +771,38 @@
 	}
 
 	/* Read and check the sensor version */
-	data = mt9v032_read(client, MT9V032_CHIP_VERSION);
-	if (data != MT9V032_CHIP_ID_REV1 && data != MT9V032_CHIP_ID_REV3) {
-		dev_err(&client->dev, "MT9V032 not detected, wrong version "
-				"0x%04x\n", data);
+	version = mt9v032_read(client, MT9V032_CHIP_VERSION);
+	if (version < 0) {
+		dev_err(&client->dev, "Failed reading chip version\n");
+		return version;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mt9v032_versions); ++i) {
+		if (mt9v032_versions[i].version == version) {
+			mt9v032->version = &mt9v032_versions[i];
+			break;
+		}
+	}
+
+	if (mt9v032->version == NULL) {
+		dev_err(&client->dev, "Unsupported chip version 0x%04x\n",
+			version);
 		return -ENODEV;
 	}
 
 	mt9v032_power_off(mt9v032);
 
-	dev_info(&client->dev, "MT9V032 detected at address 0x%02x\n",
-			client->addr);
+	dev_info(&client->dev, "%s detected at address 0x%02x\n",
+		 mt9v032->version->name, client->addr);
 
-	mt9v032_configure_pixel_rate(mt9v032, 1);
+	mt9v032_configure_pixel_rate(mt9v032);
 
 	return ret;
 }
 
 static int mt9v032_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
 {
+	struct mt9v032 *mt9v032 = to_mt9v032(subdev);
 	struct v4l2_mbus_framefmt *format;
 	struct v4l2_rect *crop;
 
@@ -683,7 +813,12 @@
 	crop->height = MT9V032_WINDOW_HEIGHT_DEF;
 
 	format = v4l2_subdev_get_try_format(fh, 0);
-	format->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+	if (mt9v032->model->color)
+		format->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+	else
+		format->code = V4L2_MBUS_FMT_Y10_1X10;
+
 	format->width = MT9V032_WINDOW_WIDTH_DEF;
 	format->height = MT9V032_WINDOW_HEIGHT_DEF;
 	format->field = V4L2_FIELD_NONE;
@@ -755,6 +890,7 @@
 
 	mutex_init(&mt9v032->power_lock);
 	mt9v032->pdata = pdata;
+	mt9v032->model = (const void *)did->driver_data;
 
 	v4l2_ctrl_handler_init(&mt9v032->ctrls, 10);
 
@@ -767,16 +903,16 @@
 			       V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL, 0,
 			       V4L2_EXPOSURE_AUTO);
 	v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
-			  V4L2_CID_EXPOSURE, MT9V032_TOTAL_SHUTTER_WIDTH_MIN,
-			  MT9V032_TOTAL_SHUTTER_WIDTH_MAX, 1,
+			  V4L2_CID_EXPOSURE, mt9v032->model->data->min_shutter,
+			  mt9v032->model->data->max_shutter, 1,
 			  MT9V032_TOTAL_SHUTTER_WIDTH_DEF);
 	v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
-			  V4L2_CID_HBLANK, MT9V032_HORIZONTAL_BLANKING_MIN,
+			  V4L2_CID_HBLANK, mt9v032->model->data->min_hblank,
 			  MT9V032_HORIZONTAL_BLANKING_MAX, 1,
 			  MT9V032_HORIZONTAL_BLANKING_DEF);
 	v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
-			  V4L2_CID_VBLANK, MT9V032_VERTICAL_BLANKING_MIN,
-			  MT9V032_VERTICAL_BLANKING_MAX, 1,
+			  V4L2_CID_VBLANK, mt9v032->model->data->min_vblank,
+			  mt9v032->model->data->max_vblank, 1,
 			  MT9V032_VERTICAL_BLANKING_DEF);
 	mt9v032->test_pattern = v4l2_ctrl_new_std_menu_items(&mt9v032->ctrls,
 				&mt9v032_ctrl_ops, V4L2_CID_TEST_PATTERN,
@@ -819,12 +955,19 @@
 	mt9v032->crop.width = MT9V032_WINDOW_WIDTH_DEF;
 	mt9v032->crop.height = MT9V032_WINDOW_HEIGHT_DEF;
 
-	mt9v032->format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+	if (mt9v032->model->color)
+		mt9v032->format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+	else
+		mt9v032->format.code = V4L2_MBUS_FMT_Y10_1X10;
+
 	mt9v032->format.width = MT9V032_WINDOW_WIDTH_DEF;
 	mt9v032->format.height = MT9V032_WINDOW_HEIGHT_DEF;
 	mt9v032->format.field = V4L2_FIELD_NONE;
 	mt9v032->format.colorspace = V4L2_COLORSPACE_SRGB;
 
+	mt9v032->hratio = 1;
+	mt9v032->vratio = 1;
+
 	mt9v032->aec_agc = MT9V032_AEC_ENABLE | MT9V032_AGC_ENABLE;
 	mt9v032->hblank = MT9V032_HORIZONTAL_BLANKING_DEF;
 	mt9v032->sysclk = MT9V032_SYSCLK_FREQ_DEF;
@@ -855,7 +998,10 @@
 }
 
 static const struct i2c_device_id mt9v032_id[] = {
-	{ "mt9v032", 0 },
+	{ "mt9v032", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_COLOR] },
+	{ "mt9v032m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_MONO] },
+	{ "mt9v034", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_COLOR] },
+	{ "mt9v034m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_MONO] },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, mt9v032_id);
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
new file mode 100644
index 0000000..77e10e0
--- /dev/null
+++ b/drivers/media/i2c/s5k5baf.c
@@ -0,0 +1,2053 @@
+/*
+ * Driver for Samsung S5K5BAF UXGA 1/5" 2M CMOS Image Sensor
+ * with embedded SoC ISP.
+ *
+ * Copyright (C) 2013, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on S5K6AA driver authored by Sylwester Nawrocki
+ * Copyright (C) 2013, Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-of.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define S5K5BAF_DRIVER_NAME		"s5k5baf"
+#define S5K5BAF_DEFAULT_MCLK_FREQ	24000000U
+#define S5K5BAF_CLK_NAME		"mclk"
+
+#define S5K5BAF_FW_FILENAME		"s5k5baf-cfg.bin"
+#define S5K5BAF_FW_TAG			"SF00"
+#define S5K5BAG_FW_TAG_LEN		2
+#define S5K5BAG_FW_MAX_COUNT		16
+
+#define S5K5BAF_CIS_WIDTH		1600
+#define S5K5BAF_CIS_HEIGHT		1200
+#define S5K5BAF_WIN_WIDTH_MIN		8
+#define S5K5BAF_WIN_HEIGHT_MIN		8
+#define S5K5BAF_GAIN_RED_DEF		127
+#define S5K5BAF_GAIN_GREEN_DEF		95
+#define S5K5BAF_GAIN_BLUE_DEF		180
+/* Default number of MIPI CSI-2 data lanes used */
+#define S5K5BAF_DEF_NUM_LANES		1
+
+#define AHB_MSB_ADDR_PTR		0xfcfc
+
+/*
+ * Register interface pages (the most significant word of the address)
+ */
+#define PAGE_IF_HW			0xd000
+#define PAGE_IF_SW			0x7000
+
+/*
+ * H/W register Interface (PAGE_IF_HW)
+ */
+#define REG_SW_LOAD_COMPLETE		0x0014
+#define REG_CMDWR_PAGE			0x0028
+#define REG_CMDWR_ADDR			0x002a
+#define REG_CMDRD_PAGE			0x002c
+#define REG_CMDRD_ADDR			0x002e
+#define REG_CMD_BUF			0x0f12
+#define REG_SET_HOST_INT		0x1000
+#define REG_CLEAR_HOST_INT		0x1030
+#define REG_PATTERN_SET			0x3100
+#define REG_PATTERN_WIDTH		0x3118
+#define REG_PATTERN_HEIGHT		0x311a
+#define REG_PATTERN_PARAM		0x311c
+
+/*
+ * S/W register interface (PAGE_IF_SW)
+ */
+
+/* Firmware revision information */
+#define REG_FW_APIVER			0x012e
+#define  S5K5BAF_FW_APIVER		0x0001
+#define REG_FW_REVISION			0x0130
+#define REG_FW_SENSOR_ID		0x0152
+
+/* Initialization parameters */
+/* Master clock frequency in KHz */
+#define REG_I_INCLK_FREQ_L		0x01b8
+#define REG_I_INCLK_FREQ_H		0x01ba
+#define  MIN_MCLK_FREQ_KHZ		6000U
+#define  MAX_MCLK_FREQ_KHZ		48000U
+#define REG_I_USE_NPVI_CLOCKS		0x01c6
+#define  NPVI_CLOCKS			1
+#define REG_I_USE_NMIPI_CLOCKS		0x01c8
+#define  NMIPI_CLOCKS			1
+#define REG_I_BLOCK_INTERNAL_PLL_CALC	0x01ca
+
+/* Clock configurations, n = 0..2. REG_I_* frequency unit is 4 kHz. */
+#define REG_I_OPCLK_4KHZ(n)		((n) * 6 + 0x01cc)
+#define REG_I_MIN_OUTRATE_4KHZ(n)	((n) * 6 + 0x01ce)
+#define REG_I_MAX_OUTRATE_4KHZ(n)	((n) * 6 + 0x01d0)
+#define  SCLK_PVI_FREQ			24000
+#define  SCLK_MIPI_FREQ			48000
+#define  PCLK_MIN_FREQ			6000
+#define  PCLK_MAX_FREQ			48000
+#define REG_I_USE_REGS_API		0x01de
+#define REG_I_INIT_PARAMS_UPDATED	0x01e0
+#define REG_I_ERROR_INFO		0x01e2
+
+/* General purpose parameters */
+#define REG_USER_BRIGHTNESS		0x01e4
+#define REG_USER_CONTRAST		0x01e6
+#define REG_USER_SATURATION		0x01e8
+#define REG_USER_SHARPBLUR		0x01ea
+
+#define REG_G_SPEC_EFFECTS		0x01ee
+#define REG_G_ENABLE_PREV		0x01f0
+#define REG_G_ENABLE_PREV_CHG		0x01f2
+#define REG_G_NEW_CFG_SYNC		0x01f8
+#define REG_G_PREVREQ_IN_WIDTH		0x01fa
+#define REG_G_PREVREQ_IN_HEIGHT		0x01fc
+#define REG_G_PREVREQ_IN_XOFFS		0x01fe
+#define REG_G_PREVREQ_IN_YOFFS		0x0200
+#define REG_G_PREVZOOM_IN_WIDTH		0x020a
+#define REG_G_PREVZOOM_IN_HEIGHT	0x020c
+#define REG_G_PREVZOOM_IN_XOFFS		0x020e
+#define REG_G_PREVZOOM_IN_YOFFS		0x0210
+#define REG_G_INPUTS_CHANGE_REQ		0x021a
+#define REG_G_ACTIVE_PREV_CFG		0x021c
+#define REG_G_PREV_CFG_CHG		0x021e
+#define REG_G_PREV_OPEN_AFTER_CH	0x0220
+#define REG_G_PREV_CFG_ERROR		0x0222
+#define  CFG_ERROR_RANGE		0x0b
+#define REG_G_PREV_CFG_BYPASS_CHANGED	0x022a
+#define REG_G_ACTUAL_P_FR_TIME		0x023a
+#define REG_G_ACTUAL_P_OUT_RATE		0x023c
+#define REG_G_ACTUAL_C_FR_TIME		0x023e
+#define REG_G_ACTUAL_C_OUT_RATE		0x0240
+
+/* Preview control section. n = 0...4. */
+#define PREG(n, x)			((n) * 0x26 + x)
+#define REG_P_OUT_WIDTH(n)		PREG(n, 0x0242)
+#define REG_P_OUT_HEIGHT(n)		PREG(n, 0x0244)
+#define REG_P_FMT(n)			PREG(n, 0x0246)
+#define REG_P_MAX_OUT_RATE(n)		PREG(n, 0x0248)
+#define REG_P_MIN_OUT_RATE(n)		PREG(n, 0x024a)
+#define REG_P_PVI_MASK(n)		PREG(n, 0x024c)
+#define  PVI_MASK_MIPI			0x52
+#define REG_P_CLK_INDEX(n)		PREG(n, 0x024e)
+#define  CLK_PVI_INDEX			0
+#define  CLK_MIPI_INDEX			NPVI_CLOCKS
+#define REG_P_FR_RATE_TYPE(n)		PREG(n, 0x0250)
+#define  FR_RATE_DYNAMIC		0
+#define  FR_RATE_FIXED			1
+#define  FR_RATE_FIXED_ACCURATE		2
+#define REG_P_FR_RATE_Q_TYPE(n)		PREG(n, 0x0252)
+#define  FR_RATE_Q_DYNAMIC		0
+#define  FR_RATE_Q_BEST_FRRATE		1 /* Binning enabled */
+#define  FR_RATE_Q_BEST_QUALITY		2 /* Binning disabled */
+/* Frame period in 0.1 ms units */
+#define REG_P_MAX_FR_TIME(n)		PREG(n, 0x0254)
+#define REG_P_MIN_FR_TIME(n)		PREG(n, 0x0256)
+#define  S5K5BAF_MIN_FR_TIME		333  /* x100 us */
+#define  S5K5BAF_MAX_FR_TIME		6500 /* x100 us */
+/* The below 5 registers are for "device correction" values */
+#define REG_P_SATURATION(n)		PREG(n, 0x0258)
+#define REG_P_SHARP_BLUR(n)		PREG(n, 0x025a)
+#define REG_P_GLAMOUR(n)		PREG(n, 0x025c)
+#define REG_P_COLORTEMP(n)		PREG(n, 0x025e)
+#define REG_P_GAMMA_INDEX(n)		PREG(n, 0x0260)
+#define REG_P_PREV_MIRROR(n)		PREG(n, 0x0262)
+#define REG_P_CAP_MIRROR(n)		PREG(n, 0x0264)
+#define REG_P_CAP_ROTATION(n)		PREG(n, 0x0266)
+
+/* Extended image property controls */
+/* Exposure time in 10 us units */
+#define REG_SF_USR_EXPOSURE_L		0x03bc
+#define REG_SF_USR_EXPOSURE_H		0x03be
+#define REG_SF_USR_EXPOSURE_CHG		0x03c0
+#define REG_SF_USR_TOT_GAIN		0x03c2
+#define REG_SF_USR_TOT_GAIN_CHG		0x03c4
+#define REG_SF_RGAIN			0x03c6
+#define REG_SF_RGAIN_CHG		0x03c8
+#define REG_SF_GGAIN			0x03ca
+#define REG_SF_GGAIN_CHG		0x03cc
+#define REG_SF_BGAIN			0x03ce
+#define REG_SF_BGAIN_CHG		0x03d0
+#define REG_SF_WBGAIN_CHG		0x03d2
+#define REG_SF_FLICKER_QUANT		0x03d4
+#define REG_SF_FLICKER_QUANT_CHG	0x03d6
+
+/* Output interface (parallel/MIPI) setup */
+#define REG_OIF_EN_MIPI_LANES		0x03f2
+#define REG_OIF_EN_PACKETS		0x03f4
+#define  EN_PACKETS_CSI2		0xc3
+#define REG_OIF_CFG_CHG			0x03f6
+
+/* Auto-algorithms enable mask */
+#define REG_DBG_AUTOALG_EN		0x03f8
+#define  AALG_ALL_EN			BIT(0)
+#define  AALG_AE_EN			BIT(1)
+#define  AALG_DIVLEI_EN			BIT(2)
+#define  AALG_WB_EN			BIT(3)
+#define  AALG_USE_WB_FOR_ISP		BIT(4)
+#define  AALG_FLICKER_EN		BIT(5)
+#define  AALG_FIT_EN			BIT(6)
+#define  AALG_WRHW_EN			BIT(7)
+
+/* Pointers to color correction matrices */
+#define REG_PTR_CCM_HORIZON		0x06d0
+#define REG_PTR_CCM_INCANDESCENT	0x06d4
+#define REG_PTR_CCM_WARM_WHITE		0x06d8
+#define REG_PTR_CCM_COOL_WHITE		0x06dc
+#define REG_PTR_CCM_DL50		0x06e0
+#define REG_PTR_CCM_DL65		0x06e4
+#define REG_PTR_CCM_OUTDOOR		0x06ec
+
+#define REG_ARR_CCM(n)			(0x2800 + 36 * (n))
+
+static const char * const s5k5baf_supply_names[] = {
+	"vdda",		/* Analog power supply 2.8V (2.6V to 3.0V) */
+	"vddreg",	/* Regulator input power supply 1.8V (1.7V to 1.9V)
+			   or 2.8V (2.6V to 3.0) */
+	"vddio",	/* I/O power supply 1.8V (1.65V to 1.95V)
+			   or 2.8V (2.5V to 3.1V) */
+};
+#define S5K5BAF_NUM_SUPPLIES ARRAY_SIZE(s5k5baf_supply_names)
+
+struct s5k5baf_gpio {
+	int gpio;
+	int level;
+};
+
+enum s5k5baf_gpio_id {
+	STBY,
+	RST,
+	NUM_GPIOS,
+};
+
+#define PAD_CIS 0
+#define PAD_OUT 1
+#define NUM_CIS_PADS 1
+#define NUM_ISP_PADS 2
+
+struct s5k5baf_pixfmt {
+	enum v4l2_mbus_pixelcode code;
+	u32 colorspace;
+	/* REG_P_FMT(x) register value */
+	u16 reg_p_fmt;
+};
+
+struct s5k5baf_ctrls {
+	struct v4l2_ctrl_handler handler;
+	struct { /* Auto / manual white balance cluster */
+		struct v4l2_ctrl *awb;
+		struct v4l2_ctrl *gain_red;
+		struct v4l2_ctrl *gain_blue;
+	};
+	struct { /* Mirror cluster */
+		struct v4l2_ctrl *hflip;
+		struct v4l2_ctrl *vflip;
+	};
+	struct { /* Auto exposure / manual exposure and gain cluster */
+		struct v4l2_ctrl *auto_exp;
+		struct v4l2_ctrl *exposure;
+		struct v4l2_ctrl *gain;
+	};
+};
+
+enum {
+	S5K5BAF_FW_ID_PATCH,
+	S5K5BAF_FW_ID_CCM,
+	S5K5BAF_FW_ID_CIS,
+};
+
+struct s5k5baf_fw {
+	u16 count;
+	struct {
+		u16 id;
+		u16 offset;
+	} seq[0];
+	u16 data[0];
+};
+
+struct s5k5baf {
+	struct s5k5baf_gpio gpios[NUM_GPIOS];
+	enum v4l2_mbus_type bus_type;
+	u8 nlanes;
+	struct regulator_bulk_data supplies[S5K5BAF_NUM_SUPPLIES];
+
+	struct clk *clock;
+	u32 mclk_frequency;
+
+	struct s5k5baf_fw *fw;
+
+	struct v4l2_subdev cis_sd;
+	struct media_pad cis_pad;
+
+	struct v4l2_subdev sd;
+	struct media_pad pads[NUM_ISP_PADS];
+
+	/* protects the struct members below */
+	struct mutex lock;
+
+	int error;
+
+	struct v4l2_rect crop_sink;
+	struct v4l2_rect compose;
+	struct v4l2_rect crop_source;
+	/* index to s5k5baf_formats array */
+	int pixfmt;
+	/* actual frame interval in 100us */
+	u16 fiv;
+	/* requested frame interval in 100us */
+	u16 req_fiv;
+	/* cache for REG_DBG_AUTOALG_EN register */
+	u16 auto_alg;
+
+	struct s5k5baf_ctrls ctrls;
+
+	unsigned int streaming:1;
+	unsigned int apply_cfg:1;
+	unsigned int apply_crop:1;
+	unsigned int valid_auto_alg:1;
+	unsigned int power;
+};
+
+static const struct s5k5baf_pixfmt s5k5baf_formats[] = {
+	{ V4L2_MBUS_FMT_VYUY8_2X8,	V4L2_COLORSPACE_JPEG,	5 },
+	/* range 16-240 */
+	{ V4L2_MBUS_FMT_VYUY8_2X8,	V4L2_COLORSPACE_REC709,	6 },
+	{ V4L2_MBUS_FMT_RGB565_2X8_BE,	V4L2_COLORSPACE_JPEG,	0 },
+};
+
+static struct v4l2_rect s5k5baf_cis_rect = {
+	0, 0, S5K5BAF_CIS_WIDTH, S5K5BAF_CIS_HEIGHT
+};
+
+/* Setfile contains set of I2C command sequences. Each sequence has its ID.
+ * setfile format:
+ *	u8 magic[4];
+ *	u16 count;		number of sequences
+ *	struct {
+ *		u16 id;		sequence id
+ *		u16 offset;	sequence offset in data array
+ *	} seq[count];
+ *	u16 data[*];		array containing sequences
+ *
+ */
+static int s5k5baf_fw_parse(struct device *dev, struct s5k5baf_fw **fw,
+			    size_t count, const u16 *data)
+{
+	struct s5k5baf_fw *f;
+	u16 *d, i, *end;
+	int ret;
+
+	if (count < S5K5BAG_FW_TAG_LEN + 1) {
+		dev_err(dev, "firmware file too short (%zu)\n", count);
+		return -EINVAL;
+	}
+
+	ret = memcmp(data, S5K5BAF_FW_TAG, S5K5BAG_FW_TAG_LEN * sizeof(u16));
+	if (ret != 0) {
+		dev_err(dev, "invalid firmware magic number\n");
+		return -EINVAL;
+	}
+
+	data += S5K5BAG_FW_TAG_LEN;
+	count -= S5K5BAG_FW_TAG_LEN;
+
+	d = devm_kzalloc(dev, count * sizeof(u16), GFP_KERNEL);
+
+	for (i = 0; i < count; ++i)
+		d[i] = le16_to_cpu(data[i]);
+
+	f = (struct s5k5baf_fw *)d;
+	if (count < 1 + 2 * f->count) {
+		dev_err(dev, "invalid firmware header (count=%d size=%zu)\n",
+			f->count, 2 * (count + S5K5BAG_FW_TAG_LEN));
+		return -EINVAL;
+	}
+	end = d + count;
+	d += 1 + 2 * f->count;
+
+	for (i = 0; i < f->count; ++i) {
+		if (f->seq[i].offset + d <= end)
+			continue;
+		dev_err(dev, "invalid firmware header (seq=%d)\n", i);
+		return -EINVAL;
+	}
+
+	*fw = f;
+
+	return 0;
+}
+
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+	return &container_of(ctrl->handler, struct s5k5baf, ctrls.handler)->sd;
+}
+
+static inline bool s5k5baf_is_cis_subdev(struct v4l2_subdev *sd)
+{
+	return sd->entity.type == MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+}
+
+static inline struct s5k5baf *to_s5k5baf(struct v4l2_subdev *sd)
+{
+	if (s5k5baf_is_cis_subdev(sd))
+		return container_of(sd, struct s5k5baf, cis_sd);
+	else
+		return container_of(sd, struct s5k5baf, sd);
+}
+
+static u16 s5k5baf_i2c_read(struct s5k5baf *state, u16 addr)
+{
+	struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+	__be16 w, r;
+	struct i2c_msg msg[] = {
+		{ .addr = c->addr, .flags = 0,
+		  .len = 2, .buf = (u8 *)&w },
+		{ .addr = c->addr, .flags = I2C_M_RD,
+		  .len = 2, .buf = (u8 *)&r },
+	};
+	int ret;
+
+	if (state->error)
+		return 0;
+
+	w = cpu_to_be16(addr);
+	ret = i2c_transfer(c->adapter, msg, 2);
+	r = be16_to_cpu(r);
+
+	v4l2_dbg(3, debug, c, "i2c_read: 0x%04x : 0x%04x\n", addr, r);
+
+	if (ret != 2) {
+		v4l2_err(c, "i2c_read: error during transfer (%d)\n", ret);
+		state->error = ret;
+	}
+	return r;
+}
+
+static void s5k5baf_i2c_write(struct s5k5baf *state, u16 addr, u16 val)
+{
+	u8 buf[4] = { addr >> 8, addr & 0xFF, val >> 8, val & 0xFF };
+	struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+	int ret;
+
+	if (state->error)
+		return;
+
+	ret = i2c_master_send(c, buf, 4);
+	v4l2_dbg(3, debug, c, "i2c_write: 0x%04x : 0x%04x\n", addr, val);
+
+	if (ret != 4) {
+		v4l2_err(c, "i2c_write: error during transfer (%d)\n", ret);
+		state->error = ret;
+	}
+}
+
+static u16 s5k5baf_read(struct s5k5baf *state, u16 addr)
+{
+	s5k5baf_i2c_write(state, REG_CMDRD_ADDR, addr);
+	return s5k5baf_i2c_read(state, REG_CMD_BUF);
+}
+
+static void s5k5baf_write(struct s5k5baf *state, u16 addr, u16 val)
+{
+	s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
+	s5k5baf_i2c_write(state, REG_CMD_BUF, val);
+}
+
+static void s5k5baf_write_arr_seq(struct s5k5baf *state, u16 addr,
+				  u16 count, const u16 *seq)
+{
+	struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+	__be16 buf[65];
+
+	s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
+	if (state->error)
+		return;
+
+	v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
+		 min(2 * count, 64), seq);
+
+	buf[0] = __constant_cpu_to_be16(REG_CMD_BUF);
+
+	while (count > 0) {
+		int n = min_t(int, count, ARRAY_SIZE(buf) - 1);
+		int ret, i;
+
+		for (i = 1; i <= n; ++i)
+			buf[i] = cpu_to_be16(*seq++);
+
+		i *= 2;
+		ret = i2c_master_send(c, (char *)buf, i);
+		if (ret != i) {
+			v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
+			state->error = ret;
+			break;
+		}
+
+		count -= n;
+	}
+}
+
+#define s5k5baf_write_seq(state, addr, seq...) \
+	s5k5baf_write_arr_seq(state, addr, sizeof((char[]){ seq }), \
+			      (const u16 []){ seq });
+
+/* add items count at the beginning of the list */
+#define NSEQ(seq...) sizeof((char[]){ seq }), seq
+
+/*
+ * s5k5baf_write_nseq() - Writes sequences of values to sensor memory via i2c
+ * @nseq: sequence of u16 words in format:
+ *	(N, address, value[1]...value[N-1])*,0
+ * Ex.:
+ *	u16 seq[] = { NSEQ(0x4000, 1, 1), NSEQ(0x4010, 640, 480), 0 };
+ *	ret = s5k5baf_write_nseq(c, seq);
+ */
+static void s5k5baf_write_nseq(struct s5k5baf *state, const u16 *nseq)
+{
+	int count;
+
+	while ((count = *nseq++)) {
+		u16 addr = *nseq++;
+		--count;
+
+		s5k5baf_write_arr_seq(state, addr, count, nseq);
+		nseq += count;
+	}
+}
+
+static void s5k5baf_synchronize(struct s5k5baf *state, int timeout, u16 addr)
+{
+	unsigned long end = jiffies + msecs_to_jiffies(timeout);
+	u16 reg;
+
+	s5k5baf_write(state, addr, 1);
+	do {
+		reg = s5k5baf_read(state, addr);
+		if (state->error || !reg)
+			return;
+		usleep_range(5000, 10000);
+	} while (time_is_after_jiffies(end));
+
+	v4l2_err(&state->sd, "timeout on register synchronize (%#x)\n", addr);
+	state->error = -ETIMEDOUT;
+}
+
+static u16 *s5k5baf_fw_get_seq(struct s5k5baf *state, u16 seq_id)
+{
+	struct s5k5baf_fw *fw = state->fw;
+	u16 *data;
+	int i;
+
+	if (fw == NULL)
+		return NULL;
+
+	data = fw->data + 2 * fw->count;
+
+	for (i = 0; i < fw->count; ++i) {
+		if (fw->seq[i].id == seq_id)
+			return data + fw->seq[i].offset;
+	}
+
+	return NULL;
+}
+
+static void s5k5baf_hw_patch(struct s5k5baf *state)
+{
+	u16 *seq = s5k5baf_fw_get_seq(state, S5K5BAF_FW_ID_PATCH);
+
+	if (seq)
+		s5k5baf_write_nseq(state, seq);
+}
+
+static void s5k5baf_hw_set_clocks(struct s5k5baf *state)
+{
+	unsigned long mclk = state->mclk_frequency / 1000;
+	u16 status;
+	static const u16 nseq_clk_cfg[] = {
+		NSEQ(REG_I_USE_NPVI_CLOCKS,
+		  NPVI_CLOCKS, NMIPI_CLOCKS, 0,
+		  SCLK_PVI_FREQ / 4, PCLK_MIN_FREQ / 4, PCLK_MAX_FREQ / 4,
+		  SCLK_MIPI_FREQ / 4, PCLK_MIN_FREQ / 4, PCLK_MAX_FREQ / 4),
+		NSEQ(REG_I_USE_REGS_API, 1),
+		0
+	};
+
+	s5k5baf_write_seq(state, REG_I_INCLK_FREQ_L, mclk & 0xffff, mclk >> 16);
+	s5k5baf_write_nseq(state, nseq_clk_cfg);
+
+	s5k5baf_synchronize(state, 250, REG_I_INIT_PARAMS_UPDATED);
+	status = s5k5baf_read(state, REG_I_ERROR_INFO);
+	if (!state->error && status) {
+		v4l2_err(&state->sd, "error configuring PLL (%d)\n", status);
+		state->error = -EINVAL;
+	}
+}
+
+/* set custom color correction matrices for various illuminations */
+static void s5k5baf_hw_set_ccm(struct s5k5baf *state)
+{
+	u16 *seq = s5k5baf_fw_get_seq(state, S5K5BAF_FW_ID_CCM);
+
+	if (seq)
+		s5k5baf_write_nseq(state, seq);
+}
+
+/* CIS sensor tuning, based on undocumented android driver code */
+static void s5k5baf_hw_set_cis(struct s5k5baf *state)
+{
+	u16 *seq = s5k5baf_fw_get_seq(state, S5K5BAF_FW_ID_CIS);
+
+	if (!seq)
+		return;
+
+	s5k5baf_i2c_write(state, REG_CMDWR_PAGE, PAGE_IF_HW);
+	s5k5baf_write_nseq(state, seq);
+	s5k5baf_i2c_write(state, REG_CMDWR_PAGE, PAGE_IF_SW);
+}
+
+static void s5k5baf_hw_sync_cfg(struct s5k5baf *state)
+{
+	s5k5baf_write(state, REG_G_PREV_CFG_CHG, 1);
+	if (state->apply_crop) {
+		s5k5baf_write(state, REG_G_INPUTS_CHANGE_REQ, 1);
+		s5k5baf_write(state, REG_G_PREV_CFG_BYPASS_CHANGED, 1);
+	}
+	s5k5baf_synchronize(state, 500, REG_G_NEW_CFG_SYNC);
+}
+/* Set horizontal and vertical image flipping */
+static void s5k5baf_hw_set_mirror(struct s5k5baf *state)
+{
+	u16 flip = state->ctrls.vflip->val | (state->ctrls.vflip->val << 1);
+
+	s5k5baf_write(state, REG_P_PREV_MIRROR(0), flip);
+	if (state->streaming)
+		s5k5baf_hw_sync_cfg(state);
+}
+
+static void s5k5baf_hw_set_alg(struct s5k5baf *state, u16 alg, bool enable)
+{
+	u16 cur_alg, new_alg;
+
+	if (!state->valid_auto_alg)
+		cur_alg = s5k5baf_read(state, REG_DBG_AUTOALG_EN);
+	else
+		cur_alg = state->auto_alg;
+
+	new_alg = enable ? (cur_alg | alg) : (cur_alg & ~alg);
+
+	if (new_alg != cur_alg)
+		s5k5baf_write(state, REG_DBG_AUTOALG_EN, new_alg);
+
+	if (state->error)
+		return;
+
+	state->valid_auto_alg = 1;
+	state->auto_alg = new_alg;
+}
+
+/* Configure auto/manual white balance and R/G/B gains */
+static void s5k5baf_hw_set_awb(struct s5k5baf *state, int awb)
+{
+	struct s5k5baf_ctrls *ctrls = &state->ctrls;
+
+	if (!awb)
+		s5k5baf_write_seq(state, REG_SF_RGAIN,
+				  ctrls->gain_red->val, 1,
+				  S5K5BAF_GAIN_GREEN_DEF, 1,
+				  ctrls->gain_blue->val, 1,
+				  1);
+
+	s5k5baf_hw_set_alg(state, AALG_WB_EN, awb);
+}
+
+/* Program FW with exposure time, 'exposure' in us units */
+static void s5k5baf_hw_set_user_exposure(struct s5k5baf *state, int exposure)
+{
+	unsigned int time = exposure / 10;
+
+	s5k5baf_write_seq(state, REG_SF_USR_EXPOSURE_L,
+			  time & 0xffff, time >> 16, 1);
+}
+
+static void s5k5baf_hw_set_user_gain(struct s5k5baf *state, int gain)
+{
+	s5k5baf_write_seq(state, REG_SF_USR_TOT_GAIN, gain, 1);
+}
+
+/* Set auto/manual exposure and total gain */
+static void s5k5baf_hw_set_auto_exposure(struct s5k5baf *state, int value)
+{
+	if (value == V4L2_EXPOSURE_AUTO) {
+		s5k5baf_hw_set_alg(state, AALG_AE_EN | AALG_DIVLEI_EN, true);
+	} else {
+		unsigned int exp_time = state->ctrls.exposure->val;
+
+		s5k5baf_hw_set_user_exposure(state, exp_time);
+		s5k5baf_hw_set_user_gain(state, state->ctrls.gain->val);
+		s5k5baf_hw_set_alg(state, AALG_AE_EN | AALG_DIVLEI_EN, false);
+	}
+}
+
+static void s5k5baf_hw_set_anti_flicker(struct s5k5baf *state, int v)
+{
+	if (v == V4L2_CID_POWER_LINE_FREQUENCY_AUTO) {
+		s5k5baf_hw_set_alg(state, AALG_FLICKER_EN, true);
+	} else {
+		/* The V4L2_CID_LINE_FREQUENCY control values match
+		 * the register values */
+		s5k5baf_write_seq(state, REG_SF_FLICKER_QUANT, v, 1);
+		s5k5baf_hw_set_alg(state, AALG_FLICKER_EN, false);
+	}
+}
+
+static void s5k5baf_hw_set_colorfx(struct s5k5baf *state, int val)
+{
+	static const u16 colorfx[] = {
+		[V4L2_COLORFX_NONE] = 0,
+		[V4L2_COLORFX_BW] = 1,
+		[V4L2_COLORFX_NEGATIVE] = 2,
+		[V4L2_COLORFX_SEPIA] = 3,
+		[V4L2_COLORFX_SKY_BLUE] = 4,
+		[V4L2_COLORFX_SKETCH] = 5,
+	};
+
+	s5k5baf_write(state, REG_G_SPEC_EFFECTS, colorfx[val]);
+}
+
+static int s5k5baf_find_pixfmt(struct v4l2_mbus_framefmt *mf)
+{
+	int i, c = -1;
+
+	for (i = 0; i < ARRAY_SIZE(s5k5baf_formats); i++) {
+		if (mf->colorspace != s5k5baf_formats[i].colorspace)
+			continue;
+		if (mf->code == s5k5baf_formats[i].code)
+			return i;
+		if (c < 0)
+			c = i;
+	}
+	return (c < 0) ? 0 : c;
+}
+
+static int s5k5baf_clear_error(struct s5k5baf *state)
+{
+	int ret = state->error;
+
+	state->error = 0;
+	return ret;
+}
+
+static int s5k5baf_hw_set_video_bus(struct s5k5baf *state)
+{
+	u16 en_pkts;
+
+	if (state->bus_type == V4L2_MBUS_CSI2)
+		en_pkts = EN_PACKETS_CSI2;
+	else
+		en_pkts = 0;
+
+	s5k5baf_write_seq(state, REG_OIF_EN_MIPI_LANES,
+			  state->nlanes, en_pkts, 1);
+
+	return s5k5baf_clear_error(state);
+}
+
+static u16 s5k5baf_get_cfg_error(struct s5k5baf *state)
+{
+	u16 err = s5k5baf_read(state, REG_G_PREV_CFG_ERROR);
+	if (err)
+		s5k5baf_write(state, REG_G_PREV_CFG_ERROR, 0);
+	return err;
+}
+
+static void s5k5baf_hw_set_fiv(struct s5k5baf *state, u16 fiv)
+{
+	s5k5baf_write(state, REG_P_MAX_FR_TIME(0), fiv);
+	s5k5baf_hw_sync_cfg(state);
+}
+
+static void s5k5baf_hw_find_min_fiv(struct s5k5baf *state)
+{
+	u16 err, fiv;
+	int n;
+
+	fiv = s5k5baf_read(state,  REG_G_ACTUAL_P_FR_TIME);
+	if (state->error)
+		return;
+
+	for (n = 5; n > 0; --n) {
+		s5k5baf_hw_set_fiv(state, fiv);
+		err = s5k5baf_get_cfg_error(state);
+		if (state->error)
+			return;
+		switch (err) {
+		case CFG_ERROR_RANGE:
+			++fiv;
+			break;
+		case 0:
+			state->fiv = fiv;
+			v4l2_info(&state->sd,
+				  "found valid frame interval: %d00us\n", fiv);
+			return;
+		default:
+			v4l2_err(&state->sd,
+				 "error setting frame interval: %d\n", err);
+			state->error = -EINVAL;
+		}
+	};
+	v4l2_err(&state->sd, "cannot find correct frame interval\n");
+	state->error = -ERANGE;
+}
+
+static void s5k5baf_hw_validate_cfg(struct s5k5baf *state)
+{
+	u16 err;
+
+	err = s5k5baf_get_cfg_error(state);
+	if (state->error)
+		return;
+
+	switch (err) {
+	case 0:
+		state->apply_cfg = 1;
+		return;
+	case CFG_ERROR_RANGE:
+		s5k5baf_hw_find_min_fiv(state);
+		if (!state->error)
+			state->apply_cfg = 1;
+		return;
+	default:
+		v4l2_err(&state->sd,
+			 "error setting format: %d\n", err);
+		state->error = -EINVAL;
+	}
+}
+
+static void s5k5baf_rescale(struct v4l2_rect *r, const struct v4l2_rect *v,
+			    const struct v4l2_rect *n,
+			    const struct v4l2_rect *d)
+{
+	r->left = v->left * n->width / d->width;
+	r->top = v->top * n->height / d->height;
+	r->width = v->width * n->width / d->width;
+	r->height = v->height * n->height / d->height;
+}
+
+static int s5k5baf_hw_set_crop_rects(struct s5k5baf *state)
+{
+	struct v4l2_rect *p, r;
+	u16 err;
+	int ret;
+
+	p = &state->crop_sink;
+	s5k5baf_write_seq(state, REG_G_PREVREQ_IN_WIDTH, p->width, p->height,
+			  p->left, p->top);
+
+	s5k5baf_rescale(&r, &state->crop_source, &state->crop_sink,
+			&state->compose);
+	s5k5baf_write_seq(state, REG_G_PREVZOOM_IN_WIDTH, r.width, r.height,
+			  r.left, r.top);
+
+	s5k5baf_synchronize(state, 500, REG_G_INPUTS_CHANGE_REQ);
+	s5k5baf_synchronize(state, 500, REG_G_PREV_CFG_BYPASS_CHANGED);
+	err = s5k5baf_get_cfg_error(state);
+	ret = s5k5baf_clear_error(state);
+	if (ret < 0)
+		return ret;
+
+	switch (err) {
+	case 0:
+		break;
+	case CFG_ERROR_RANGE:
+		/* retry crop with frame interval set to max */
+		s5k5baf_hw_set_fiv(state, S5K5BAF_MAX_FR_TIME);
+		err = s5k5baf_get_cfg_error(state);
+		ret = s5k5baf_clear_error(state);
+		if (ret < 0)
+			return ret;
+		if (err) {
+			v4l2_err(&state->sd,
+				 "crop error on max frame interval: %d\n", err);
+			state->error = -EINVAL;
+		}
+		s5k5baf_hw_set_fiv(state, state->req_fiv);
+		s5k5baf_hw_validate_cfg(state);
+		break;
+	default:
+		v4l2_err(&state->sd, "crop error: %d\n", err);
+		return -EINVAL;
+	}
+
+	if (!state->apply_cfg)
+		return 0;
+
+	p = &state->crop_source;
+	s5k5baf_write_seq(state, REG_P_OUT_WIDTH(0), p->width, p->height);
+	s5k5baf_hw_set_fiv(state, state->req_fiv);
+	s5k5baf_hw_validate_cfg(state);
+
+	return s5k5baf_clear_error(state);
+}
+
+static void s5k5baf_hw_set_config(struct s5k5baf *state)
+{
+	u16 reg_fmt = s5k5baf_formats[state->pixfmt].reg_p_fmt;
+	struct v4l2_rect *r = &state->crop_source;
+
+	s5k5baf_write_seq(state, REG_P_OUT_WIDTH(0),
+			  r->width, r->height, reg_fmt,
+			  PCLK_MAX_FREQ >> 2, PCLK_MIN_FREQ >> 2,
+			  PVI_MASK_MIPI, CLK_MIPI_INDEX,
+			  FR_RATE_FIXED, FR_RATE_Q_DYNAMIC,
+			  state->req_fiv, S5K5BAF_MIN_FR_TIME);
+	s5k5baf_hw_sync_cfg(state);
+	s5k5baf_hw_validate_cfg(state);
+}
+
+
+static void s5k5baf_hw_set_test_pattern(struct s5k5baf *state, int id)
+{
+	s5k5baf_i2c_write(state, REG_PATTERN_WIDTH, 800);
+	s5k5baf_i2c_write(state, REG_PATTERN_HEIGHT, 511);
+	s5k5baf_i2c_write(state, REG_PATTERN_PARAM, 0);
+	s5k5baf_i2c_write(state, REG_PATTERN_SET, id);
+}
+
+static void s5k5baf_gpio_assert(struct s5k5baf *state, int id)
+{
+	struct s5k5baf_gpio *gpio = &state->gpios[id];
+
+	gpio_set_value(gpio->gpio, gpio->level);
+}
+
+static void s5k5baf_gpio_deassert(struct s5k5baf *state, int id)
+{
+	struct s5k5baf_gpio *gpio = &state->gpios[id];
+
+	gpio_set_value(gpio->gpio, !gpio->level);
+}
+
+static int s5k5baf_power_on(struct s5k5baf *state)
+{
+	int ret;
+
+	ret = regulator_bulk_enable(S5K5BAF_NUM_SUPPLIES, state->supplies);
+	if (ret < 0)
+		goto err;
+
+	ret = clk_set_rate(state->clock, state->mclk_frequency);
+	if (ret < 0)
+		goto err_reg_dis;
+
+	ret = clk_prepare_enable(state->clock);
+	if (ret < 0)
+		goto err_reg_dis;
+
+	v4l2_dbg(1, debug, &state->sd, "clock frequency: %ld\n",
+		 clk_get_rate(state->clock));
+
+	s5k5baf_gpio_deassert(state, STBY);
+	usleep_range(50, 100);
+	s5k5baf_gpio_deassert(state, RST);
+	return 0;
+
+err_reg_dis:
+	regulator_bulk_disable(S5K5BAF_NUM_SUPPLIES, state->supplies);
+err:
+	v4l2_err(&state->sd, "%s() failed (%d)\n", __func__, ret);
+	return ret;
+}
+
+static int s5k5baf_power_off(struct s5k5baf *state)
+{
+	int ret;
+
+	state->streaming = 0;
+	state->apply_cfg = 0;
+	state->apply_crop = 0;
+
+	s5k5baf_gpio_assert(state, RST);
+	s5k5baf_gpio_assert(state, STBY);
+
+	if (!IS_ERR(state->clock))
+		clk_disable_unprepare(state->clock);
+
+	ret = regulator_bulk_disable(S5K5BAF_NUM_SUPPLIES,
+					state->supplies);
+	if (ret < 0)
+		v4l2_err(&state->sd, "failed to disable regulators\n");
+
+	return 0;
+}
+
+static void s5k5baf_hw_init(struct s5k5baf *state)
+{
+	s5k5baf_i2c_write(state, AHB_MSB_ADDR_PTR, PAGE_IF_HW);
+	s5k5baf_i2c_write(state, REG_CLEAR_HOST_INT, 0);
+	s5k5baf_i2c_write(state, REG_SW_LOAD_COMPLETE, 1);
+	s5k5baf_i2c_write(state, REG_CMDRD_PAGE, PAGE_IF_SW);
+	s5k5baf_i2c_write(state, REG_CMDWR_PAGE, PAGE_IF_SW);
+}
+
+/*
+ * V4L2 subdev core and video operations
+ */
+
+static void s5k5baf_initialize_data(struct s5k5baf *state)
+{
+	state->pixfmt = 0;
+	state->req_fiv = 10000 / 15;
+	state->fiv = state->req_fiv;
+	state->valid_auto_alg = 0;
+}
+
+static int s5k5baf_load_setfile(struct s5k5baf *state)
+{
+	struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+	const struct firmware *fw;
+	int ret;
+
+	ret = request_firmware(&fw, S5K5BAF_FW_FILENAME, &c->dev);
+	if (ret < 0) {
+		dev_warn(&c->dev, "firmware file (%s) not loaded\n",
+			 S5K5BAF_FW_FILENAME);
+		return ret;
+	}
+
+	ret = s5k5baf_fw_parse(&c->dev, &state->fw, fw->size / 2,
+			       (u16 *)fw->data);
+
+	release_firmware(fw);
+
+	return ret;
+}
+
+static int s5k5baf_set_power(struct v4l2_subdev *sd, int on)
+{
+	struct s5k5baf *state = to_s5k5baf(sd);
+	int ret = 0;
+
+	mutex_lock(&state->lock);
+
+	if (!on != state->power)
+		goto out;
+
+	if (on) {
+		if (state->fw == NULL)
+			s5k5baf_load_setfile(state);
+
+		s5k5baf_initialize_data(state);
+		ret = s5k5baf_power_on(state);
+		if (ret < 0)
+			goto out;
+
+		s5k5baf_hw_init(state);
+		s5k5baf_hw_patch(state);
+		s5k5baf_i2c_write(state, REG_SET_HOST_INT, 1);
+		s5k5baf_hw_set_clocks(state);
+
+		ret = s5k5baf_hw_set_video_bus(state);
+		if (ret < 0)
+			goto out;
+
+		s5k5baf_hw_set_cis(state);
+		s5k5baf_hw_set_ccm(state);
+
+		ret = s5k5baf_clear_error(state);
+		if (!ret)
+			state->power++;
+	} else {
+		s5k5baf_power_off(state);
+		state->power--;
+	}
+
+out:
+	mutex_unlock(&state->lock);
+
+	if (!ret && on)
+		ret = v4l2_ctrl_handler_setup(&state->ctrls.handler);
+
+	return ret;
+}
+
+static void s5k5baf_hw_set_stream(struct s5k5baf *state, int enable)
+{
+	s5k5baf_write_seq(state, REG_G_ENABLE_PREV, enable, 1);
+}
+
+static int s5k5baf_s_stream(struct v4l2_subdev *sd, int on)
+{
+	struct s5k5baf *state = to_s5k5baf(sd);
+	int ret;
+
+	mutex_lock(&state->lock);
+
+	if (state->streaming == !!on) {
+		ret = 0;
+		goto out;
+	}
+
+	if (on) {
+		s5k5baf_hw_set_config(state);
+		ret = s5k5baf_hw_set_crop_rects(state);
+		if (ret < 0)
+			goto out;
+		s5k5baf_hw_set_stream(state, 1);
+		s5k5baf_i2c_write(state, 0xb0cc, 0x000b);
+	} else {
+		s5k5baf_hw_set_stream(state, 0);
+	}
+	ret = s5k5baf_clear_error(state);
+	if (!ret)
+		state->streaming = !state->streaming;
+
+out:
+	mutex_unlock(&state->lock);
+
+	return ret;
+}
+
+static int s5k5baf_g_frame_interval(struct v4l2_subdev *sd,
+				   struct v4l2_subdev_frame_interval *fi)
+{
+	struct s5k5baf *state = to_s5k5baf(sd);
+
+	mutex_lock(&state->lock);
+	fi->interval.numerator = state->fiv;
+	fi->interval.denominator = 10000;
+	mutex_unlock(&state->lock);
+
+	return 0;
+}
+
+static void s5k5baf_set_frame_interval(struct s5k5baf *state,
+				       struct v4l2_subdev_frame_interval *fi)
+{
+	struct v4l2_fract *i = &fi->interval;
+
+	if (fi->interval.denominator == 0)
+		state->req_fiv = S5K5BAF_MAX_FR_TIME;
+	else
+		state->req_fiv = clamp_t(u32,
+					 i->numerator * 10000 / i->denominator,
+					 S5K5BAF_MIN_FR_TIME,
+					 S5K5BAF_MAX_FR_TIME);
+
+	state->fiv = state->req_fiv;
+	if (state->apply_cfg) {
+		s5k5baf_hw_set_fiv(state, state->req_fiv);
+		s5k5baf_hw_validate_cfg(state);
+	}
+	*i = (struct v4l2_fract){ state->fiv, 10000 };
+	if (state->fiv == state->req_fiv)
+		v4l2_info(&state->sd, "frame interval changed to %d00us\n",
+			  state->fiv);
+}
+
+static int s5k5baf_s_frame_interval(struct v4l2_subdev *sd,
+				   struct v4l2_subdev_frame_interval *fi)
+{
+	struct s5k5baf *state = to_s5k5baf(sd);
+
+	mutex_lock(&state->lock);
+	s5k5baf_set_frame_interval(state, fi);
+	mutex_unlock(&state->lock);
+	return 0;
+}
+
+/*
+ * V4L2 subdev pad level and video operations
+ */
+static int s5k5baf_enum_frame_interval(struct v4l2_subdev *sd,
+			      struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_frame_interval_enum *fie)
+{
+	if (fie->index > S5K5BAF_MAX_FR_TIME - S5K5BAF_MIN_FR_TIME ||
+	    fie->pad != PAD_CIS)
+		return -EINVAL;
+
+	v4l_bound_align_image(&fie->width, S5K5BAF_WIN_WIDTH_MIN,
+			      S5K5BAF_CIS_WIDTH, 1,
+			      &fie->height, S5K5BAF_WIN_HEIGHT_MIN,
+			      S5K5BAF_CIS_HEIGHT, 1, 0);
+
+	fie->interval.numerator = S5K5BAF_MIN_FR_TIME + fie->index;
+	fie->interval.denominator = 10000;
+
+	return 0;
+}
+
+static int s5k5baf_enum_mbus_code(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_fh *fh,
+				 struct v4l2_subdev_mbus_code_enum *code)
+{
+	if (code->pad == PAD_CIS) {
+		if (code->index > 0)
+			return -EINVAL;
+		code->code = V4L2_MBUS_FMT_FIXED;
+		return 0;
+	}
+
+	if (code->index >= ARRAY_SIZE(s5k5baf_formats))
+		return -EINVAL;
+
+	code->code = s5k5baf_formats[code->index].code;
+	return 0;
+}
+
+static int s5k5baf_enum_frame_size(struct v4l2_subdev *sd,
+				  struct v4l2_subdev_fh *fh,
+				  struct v4l2_subdev_frame_size_enum *fse)
+{
+	int i;
+
+	if (fse->index > 0)
+		return -EINVAL;
+
+	if (fse->pad == PAD_CIS) {
+		fse->code = V4L2_MBUS_FMT_FIXED;
+		fse->min_width = S5K5BAF_CIS_WIDTH;
+		fse->max_width = S5K5BAF_CIS_WIDTH;
+		fse->min_height = S5K5BAF_CIS_HEIGHT;
+		fse->max_height = S5K5BAF_CIS_HEIGHT;
+		return 0;
+	}
+
+	i = ARRAY_SIZE(s5k5baf_formats);
+	while (--i)
+		if (fse->code == s5k5baf_formats[i].code)
+			break;
+	fse->code = s5k5baf_formats[i].code;
+	fse->min_width = S5K5BAF_WIN_WIDTH_MIN;
+	fse->max_width = S5K5BAF_CIS_WIDTH;
+	fse->max_height = S5K5BAF_WIN_HEIGHT_MIN;
+	fse->min_height = S5K5BAF_CIS_HEIGHT;
+
+	return 0;
+}
+
+static void s5k5baf_try_cis_format(struct v4l2_mbus_framefmt *mf)
+{
+	mf->width = S5K5BAF_CIS_WIDTH;
+	mf->height = S5K5BAF_CIS_HEIGHT;
+	mf->code = V4L2_MBUS_FMT_FIXED;
+	mf->colorspace = V4L2_COLORSPACE_JPEG;
+	mf->field = V4L2_FIELD_NONE;
+}
+
+static int s5k5baf_try_isp_format(struct v4l2_mbus_framefmt *mf)
+{
+	int pixfmt;
+
+	v4l_bound_align_image(&mf->width, S5K5BAF_WIN_WIDTH_MIN,
+			      S5K5BAF_CIS_WIDTH, 1,
+			      &mf->height, S5K5BAF_WIN_HEIGHT_MIN,
+			      S5K5BAF_CIS_HEIGHT, 1, 0);
+
+	pixfmt = s5k5baf_find_pixfmt(mf);
+
+	mf->colorspace = s5k5baf_formats[pixfmt].colorspace;
+	mf->code = s5k5baf_formats[pixfmt].code;
+	mf->field = V4L2_FIELD_NONE;
+
+	return pixfmt;
+}
+
+static int s5k5baf_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			  struct v4l2_subdev_format *fmt)
+{
+	struct s5k5baf *state = to_s5k5baf(sd);
+	const struct s5k5baf_pixfmt *pixfmt;
+	struct v4l2_mbus_framefmt *mf;
+
+	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+		mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+		fmt->format = *mf;
+		return 0;
+	}
+
+	mf = &fmt->format;
+	if (fmt->pad == PAD_CIS) {
+		s5k5baf_try_cis_format(mf);
+		return 0;
+	}
+	mf->field = V4L2_FIELD_NONE;
+	mutex_lock(&state->lock);
+	pixfmt = &s5k5baf_formats[state->pixfmt];
+	mf->width = state->crop_source.width;
+	mf->height = state->crop_source.height;
+	mf->code = pixfmt->code;
+	mf->colorspace = pixfmt->colorspace;
+	mutex_unlock(&state->lock);
+
+	return 0;
+}
+
+static int s5k5baf_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			  struct v4l2_subdev_format *fmt)
+{
+	struct v4l2_mbus_framefmt *mf = &fmt->format;
+	struct s5k5baf *state = to_s5k5baf(sd);
+	const struct s5k5baf_pixfmt *pixfmt;
+	int ret = 0;
+
+	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+		*v4l2_subdev_get_try_format(fh, fmt->pad) = *mf;
+		return 0;
+	}
+
+	if (fmt->pad == PAD_CIS) {
+		s5k5baf_try_cis_format(mf);
+		return 0;
+	}
+
+	mutex_lock(&state->lock);
+
+	if (state->streaming) {
+		mutex_unlock(&state->lock);
+		return -EBUSY;
+	}
+
+	state->pixfmt = s5k5baf_try_isp_format(mf);
+	pixfmt = &s5k5baf_formats[state->pixfmt];
+	mf->code = pixfmt->code;
+	mf->colorspace = pixfmt->colorspace;
+	mf->width = state->crop_source.width;
+	mf->height = state->crop_source.height;
+
+	mutex_unlock(&state->lock);
+	return ret;
+}
+
+enum selection_rect { R_CIS, R_CROP_SINK, R_COMPOSE, R_CROP_SOURCE, R_INVALID };
+
+static enum selection_rect s5k5baf_get_sel_rect(u32 pad, u32 target)
+{
+	switch (target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		return pad ? R_COMPOSE : R_CIS;
+	case V4L2_SEL_TGT_CROP:
+		return pad ? R_CROP_SOURCE : R_CROP_SINK;
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+		return pad ? R_INVALID : R_CROP_SINK;
+	case V4L2_SEL_TGT_COMPOSE:
+		return pad ? R_INVALID : R_COMPOSE;
+	default:
+		return R_INVALID;
+	}
+}
+
+static int s5k5baf_is_bound_target(u32 target)
+{
+	return target == V4L2_SEL_TGT_CROP_BOUNDS ||
+		target == V4L2_SEL_TGT_COMPOSE_BOUNDS;
+}
+
+static int s5k5baf_get_selection(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_fh *fh,
+				 struct v4l2_subdev_selection *sel)
+{
+	static enum selection_rect rtype;
+	struct s5k5baf *state = to_s5k5baf(sd);
+
+	rtype = s5k5baf_get_sel_rect(sel->pad, sel->target);
+
+	switch (rtype) {
+	case R_INVALID:
+		return -EINVAL;
+	case R_CIS:
+		sel->r = s5k5baf_cis_rect;
+		return 0;
+	default:
+		break;
+	}
+
+	if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+		if (rtype == R_COMPOSE)
+			sel->r = *v4l2_subdev_get_try_compose(fh, sel->pad);
+		else
+			sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+		return 0;
+	}
+
+	mutex_lock(&state->lock);
+	switch (rtype) {
+	case R_CROP_SINK:
+		sel->r = state->crop_sink;
+		break;
+	case R_COMPOSE:
+		sel->r = state->compose;
+		break;
+	case R_CROP_SOURCE:
+		sel->r = state->crop_source;
+		break;
+	default:
+		break;
+	}
+	if (s5k5baf_is_bound_target(sel->target)) {
+		sel->r.left = 0;
+		sel->r.top = 0;
+	}
+	mutex_unlock(&state->lock);
+
+	return 0;
+}
+
+/* bounds range [start, start+len) to [0, max) and aligns to 2 */
+static void s5k5baf_bound_range(u32 *start, u32 *len, u32 max)
+{
+	if (*len > max)
+		*len = max;
+	if (*start + *len > max)
+		*start = max - *len;
+	*start &= ~1;
+	*len &= ~1;
+	if (*len < S5K5BAF_WIN_WIDTH_MIN)
+		*len = S5K5BAF_WIN_WIDTH_MIN;
+}
+
+static void s5k5baf_bound_rect(struct v4l2_rect *r, u32 width, u32 height)
+{
+	s5k5baf_bound_range(&r->left, &r->width, width);
+	s5k5baf_bound_range(&r->top, &r->height, height);
+}
+
+static void s5k5baf_set_rect_and_adjust(struct v4l2_rect **rects,
+					enum selection_rect first,
+					struct v4l2_rect *v)
+{
+	struct v4l2_rect *r, *br;
+	enum selection_rect i = first;
+
+	*rects[first] = *v;
+	do {
+		r = rects[i];
+		br = rects[i - 1];
+		s5k5baf_bound_rect(r, br->width, br->height);
+	} while (++i != R_INVALID);
+	*v = *rects[first];
+}
+
+static bool s5k5baf_cmp_rect(const struct v4l2_rect *r1,
+			     const struct v4l2_rect *r2)
+{
+	return !memcmp(r1, r2, sizeof(*r1));
+}
+
+static int s5k5baf_set_selection(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_fh *fh,
+				 struct v4l2_subdev_selection *sel)
+{
+	static enum selection_rect rtype;
+	struct s5k5baf *state = to_s5k5baf(sd);
+	struct v4l2_rect **rects;
+	int ret = 0;
+
+	rtype = s5k5baf_get_sel_rect(sel->pad, sel->target);
+	if (rtype == R_INVALID || s5k5baf_is_bound_target(sel->target))
+		return -EINVAL;
+
+	/* allow only scaling on compose */
+	if (rtype == R_COMPOSE) {
+		sel->r.left = 0;
+		sel->r.top = 0;
+	}
+
+	if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+		rects = (struct v4l2_rect * []) {
+				&s5k5baf_cis_rect,
+				v4l2_subdev_get_try_crop(fh, PAD_CIS),
+				v4l2_subdev_get_try_compose(fh, PAD_CIS),
+				v4l2_subdev_get_try_crop(fh, PAD_OUT)
+			};
+		s5k5baf_set_rect_and_adjust(rects, rtype, &sel->r);
+		return 0;
+	}
+
+	rects = (struct v4l2_rect * []) {
+			&s5k5baf_cis_rect,
+			&state->crop_sink,
+			&state->compose,
+			&state->crop_source
+		};
+	mutex_lock(&state->lock);
+	if (state->streaming) {
+		/* adjust sel->r to avoid output resolution change */
+		if (rtype < R_CROP_SOURCE) {
+			if (sel->r.width < state->crop_source.width)
+				sel->r.width = state->crop_source.width;
+			if (sel->r.height < state->crop_source.height)
+				sel->r.height = state->crop_source.height;
+		} else {
+			sel->r.width = state->crop_source.width;
+			sel->r.height = state->crop_source.height;
+		}
+	}
+	s5k5baf_set_rect_and_adjust(rects, rtype, &sel->r);
+	if (!s5k5baf_cmp_rect(&state->crop_sink, &s5k5baf_cis_rect) ||
+	    !s5k5baf_cmp_rect(&state->compose, &s5k5baf_cis_rect))
+		state->apply_crop = 1;
+	if (state->streaming)
+		ret = s5k5baf_hw_set_crop_rects(state);
+	mutex_unlock(&state->lock);
+
+	return ret;
+}
+
+static const struct v4l2_subdev_pad_ops s5k5baf_cis_pad_ops = {
+	.enum_mbus_code		= s5k5baf_enum_mbus_code,
+	.enum_frame_size	= s5k5baf_enum_frame_size,
+	.get_fmt		= s5k5baf_get_fmt,
+	.set_fmt		= s5k5baf_set_fmt,
+};
+
+static const struct v4l2_subdev_pad_ops s5k5baf_pad_ops = {
+	.enum_mbus_code		= s5k5baf_enum_mbus_code,
+	.enum_frame_size	= s5k5baf_enum_frame_size,
+	.enum_frame_interval	= s5k5baf_enum_frame_interval,
+	.get_fmt		= s5k5baf_get_fmt,
+	.set_fmt		= s5k5baf_set_fmt,
+	.get_selection		= s5k5baf_get_selection,
+	.set_selection		= s5k5baf_set_selection,
+};
+
+static const struct v4l2_subdev_video_ops s5k5baf_video_ops = {
+	.g_frame_interval	= s5k5baf_g_frame_interval,
+	.s_frame_interval	= s5k5baf_s_frame_interval,
+	.s_stream		= s5k5baf_s_stream,
+};
+
+/*
+ * V4L2 subdev controls
+ */
+
+static int s5k5baf_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+	struct s5k5baf *state = to_s5k5baf(sd);
+	int ret;
+
+	v4l2_dbg(1, debug, sd, "ctrl: %s, value: %d\n", ctrl->name, ctrl->val);
+
+	mutex_lock(&state->lock);
+
+	if (state->power == 0)
+		goto unlock;
+
+	switch (ctrl->id) {
+	case V4L2_CID_AUTO_WHITE_BALANCE:
+		s5k5baf_hw_set_awb(state, ctrl->val);
+		break;
+
+	case V4L2_CID_BRIGHTNESS:
+		s5k5baf_write(state, REG_USER_BRIGHTNESS, ctrl->val);
+		break;
+
+	case V4L2_CID_COLORFX:
+		s5k5baf_hw_set_colorfx(state, ctrl->val);
+		break;
+
+	case V4L2_CID_CONTRAST:
+		s5k5baf_write(state, REG_USER_CONTRAST, ctrl->val);
+		break;
+
+	case V4L2_CID_EXPOSURE_AUTO:
+		s5k5baf_hw_set_auto_exposure(state, ctrl->val);
+		break;
+
+	case V4L2_CID_HFLIP:
+		s5k5baf_hw_set_mirror(state);
+		break;
+
+	case V4L2_CID_POWER_LINE_FREQUENCY:
+		s5k5baf_hw_set_anti_flicker(state, ctrl->val);
+		break;
+
+	case V4L2_CID_SATURATION:
+		s5k5baf_write(state, REG_USER_SATURATION, ctrl->val);
+		break;
+
+	case V4L2_CID_SHARPNESS:
+		s5k5baf_write(state, REG_USER_SHARPBLUR, ctrl->val);
+		break;
+
+	case V4L2_CID_WHITE_BALANCE_TEMPERATURE:
+		s5k5baf_write(state, REG_P_COLORTEMP(0), ctrl->val);
+		if (state->apply_cfg)
+			s5k5baf_hw_sync_cfg(state);
+		break;
+
+	case V4L2_CID_TEST_PATTERN:
+		s5k5baf_hw_set_test_pattern(state, ctrl->val);
+		break;
+	}
+unlock:
+	ret = s5k5baf_clear_error(state);
+	mutex_unlock(&state->lock);
+	return ret;
+}
+
+static const struct v4l2_ctrl_ops s5k5baf_ctrl_ops = {
+	.s_ctrl	= s5k5baf_s_ctrl,
+};
+
+static const char * const s5k5baf_test_pattern_menu[] = {
+	"Disabled",
+	"Blank",
+	"Bars",
+	"Gradients",
+	"Textile",
+	"Textile2",
+	"Squares"
+};
+
+static int s5k5baf_initialize_ctrls(struct s5k5baf *state)
+{
+	const struct v4l2_ctrl_ops *ops = &s5k5baf_ctrl_ops;
+	struct s5k5baf_ctrls *ctrls = &state->ctrls;
+	struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+	int ret;
+
+	ret = v4l2_ctrl_handler_init(hdl, 16);
+	if (ret < 0) {
+		v4l2_err(&state->sd, "cannot init ctrl handler (%d)\n", ret);
+		return ret;
+	}
+
+	/* Auto white balance cluster */
+	ctrls->awb = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTO_WHITE_BALANCE,
+				       0, 1, 1, 1);
+	ctrls->gain_red = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
+					    0, 255, 1, S5K5BAF_GAIN_RED_DEF);
+	ctrls->gain_blue = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
+					     0, 255, 1, S5K5BAF_GAIN_BLUE_DEF);
+	v4l2_ctrl_auto_cluster(3, &ctrls->awb, 0, false);
+
+	ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+	ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+	v4l2_ctrl_cluster(2, &ctrls->hflip);
+
+	ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
+				V4L2_CID_EXPOSURE_AUTO,
+				V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
+	/* Exposure time: x 1 us */
+	ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+					    0, 6000000U, 1, 100000U);
+	/* Total gain: 256 <=> 1x */
+	ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
+					0, 256, 1, 256);
+	v4l2_ctrl_auto_cluster(3, &ctrls->auto_exp, 0, false);
+
+	v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_POWER_LINE_FREQUENCY,
+			       V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
+			       V4L2_CID_POWER_LINE_FREQUENCY_AUTO);
+
+	v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_COLORFX,
+			       V4L2_COLORFX_SKY_BLUE, ~0x6f, V4L2_COLORFX_NONE);
+
+	v4l2_ctrl_new_std(hdl, ops, V4L2_CID_WHITE_BALANCE_TEMPERATURE,
+			  0, 256, 1, 0);
+
+	v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION, -127, 127, 1, 0);
+	v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -127, 127, 1, 0);
+	v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -127, 127, 1, 0);
+	v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SHARPNESS, -127, 127, 1, 0);
+
+	v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+				     ARRAY_SIZE(s5k5baf_test_pattern_menu) - 1,
+				     0, 0, s5k5baf_test_pattern_menu);
+
+	if (hdl->error) {
+		v4l2_err(&state->sd, "error creating controls (%d)\n",
+			 hdl->error);
+		ret = hdl->error;
+		v4l2_ctrl_handler_free(hdl);
+		return ret;
+	}
+
+	state->sd.ctrl_handler = hdl;
+	return 0;
+}
+
+/*
+ * V4L2 subdev internal operations
+ */
+static int s5k5baf_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct v4l2_mbus_framefmt *mf;
+
+	mf = v4l2_subdev_get_try_format(fh, PAD_CIS);
+	s5k5baf_try_cis_format(mf);
+
+	if (s5k5baf_is_cis_subdev(sd))
+		return 0;
+
+	mf = v4l2_subdev_get_try_format(fh, PAD_OUT);
+	mf->colorspace = s5k5baf_formats[0].colorspace;
+	mf->code = s5k5baf_formats[0].code;
+	mf->width = s5k5baf_cis_rect.width;
+	mf->height = s5k5baf_cis_rect.height;
+	mf->field = V4L2_FIELD_NONE;
+
+	*v4l2_subdev_get_try_crop(fh, PAD_CIS) = s5k5baf_cis_rect;
+	*v4l2_subdev_get_try_compose(fh, PAD_CIS) = s5k5baf_cis_rect;
+	*v4l2_subdev_get_try_crop(fh, PAD_OUT) = s5k5baf_cis_rect;
+
+	return 0;
+}
+
+static int s5k5baf_check_fw_revision(struct s5k5baf *state)
+{
+	u16 api_ver = 0, fw_rev = 0, s_id = 0;
+	int ret;
+
+	api_ver = s5k5baf_read(state, REG_FW_APIVER);
+	fw_rev = s5k5baf_read(state, REG_FW_REVISION) & 0xff;
+	s_id = s5k5baf_read(state, REG_FW_SENSOR_ID);
+	ret = s5k5baf_clear_error(state);
+	if (ret < 0)
+		return ret;
+
+	v4l2_info(&state->sd, "FW API=%#x, revision=%#x sensor_id=%#x\n",
+		  api_ver, fw_rev, s_id);
+
+	if (api_ver != S5K5BAF_FW_APIVER) {
+		v4l2_err(&state->sd, "FW API version not supported\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int s5k5baf_registered(struct v4l2_subdev *sd)
+{
+	struct s5k5baf *state = to_s5k5baf(sd);
+	int ret;
+
+	ret = v4l2_device_register_subdev(sd->v4l2_dev, &state->cis_sd);
+	if (ret < 0)
+		v4l2_err(sd, "failed to register subdev %s\n",
+			 state->cis_sd.name);
+	else
+		ret = media_entity_create_link(&state->cis_sd.entity, PAD_CIS,
+					       &state->sd.entity, PAD_CIS,
+					       MEDIA_LNK_FL_IMMUTABLE |
+					       MEDIA_LNK_FL_ENABLED);
+	return ret;
+}
+
+static void s5k5baf_unregistered(struct v4l2_subdev *sd)
+{
+	struct s5k5baf *state = to_s5k5baf(sd);
+	v4l2_device_unregister_subdev(&state->cis_sd);
+}
+
+static const struct v4l2_subdev_ops s5k5baf_cis_subdev_ops = {
+	.pad	= &s5k5baf_cis_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops s5k5baf_cis_subdev_internal_ops = {
+	.open = s5k5baf_open,
+};
+
+static const struct v4l2_subdev_internal_ops s5k5baf_subdev_internal_ops = {
+	.registered = s5k5baf_registered,
+	.unregistered = s5k5baf_unregistered,
+	.open = s5k5baf_open,
+};
+
+static const struct v4l2_subdev_core_ops s5k5baf_core_ops = {
+	.s_power = s5k5baf_set_power,
+	.log_status = v4l2_ctrl_subdev_log_status,
+};
+
+static const struct v4l2_subdev_ops s5k5baf_subdev_ops = {
+	.core = &s5k5baf_core_ops,
+	.pad = &s5k5baf_pad_ops,
+	.video = &s5k5baf_video_ops,
+};
+
+static int s5k5baf_configure_gpios(struct s5k5baf *state)
+{
+	static const char const *name[] = { "S5K5BAF_STBY", "S5K5BAF_RST" };
+	struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+	struct s5k5baf_gpio *g = state->gpios;
+	int ret, i;
+
+	for (i = 0; i < NUM_GPIOS; ++i) {
+		int flags = GPIOF_DIR_OUT;
+		if (g[i].level)
+			flags |= GPIOF_INIT_HIGH;
+		ret = devm_gpio_request_one(&c->dev, g[i].gpio, flags, name[i]);
+		if (ret < 0) {
+			v4l2_err(c, "failed to request gpio %s\n", name[i]);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static int s5k5baf_parse_gpios(struct s5k5baf_gpio *gpios, struct device *dev)
+{
+	static const char * const names[] = {
+		"stbyn-gpios",
+		"rstn-gpios",
+	};
+	struct device_node *node = dev->of_node;
+	enum of_gpio_flags flags;
+	int ret, i;
+
+	for (i = 0; i < NUM_GPIOS; ++i) {
+		ret = of_get_named_gpio_flags(node, names[i], 0, &flags);
+		if (ret < 0) {
+			dev_err(dev, "no %s GPIO pin provided\n", names[i]);
+			return ret;
+		}
+		gpios[i].gpio = ret;
+		gpios[i].level = !(flags & OF_GPIO_ACTIVE_LOW);
+	}
+
+	return 0;
+}
+
+static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
+{
+	struct device_node *node = dev->of_node;
+	struct device_node *node_ep;
+	struct v4l2_of_endpoint ep;
+	int ret;
+
+	if (!node) {
+		dev_err(dev, "no device-tree node provided\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(node, "clock-frequency",
+				   &state->mclk_frequency);
+	if (ret < 0) {
+		state->mclk_frequency = S5K5BAF_DEFAULT_MCLK_FREQ;
+		dev_info(dev, "using default %u Hz clock frequency\n",
+			 state->mclk_frequency);
+	}
+
+	ret = s5k5baf_parse_gpios(state->gpios, dev);
+	if (ret < 0)
+		return ret;
+
+	node_ep = v4l2_of_get_next_endpoint(node, NULL);
+	if (!node_ep) {
+		dev_err(dev, "no endpoint defined at node %s\n",
+			node->full_name);
+		return -EINVAL;
+	}
+
+	v4l2_of_parse_endpoint(node_ep, &ep);
+	of_node_put(node_ep);
+	state->bus_type = ep.bus_type;
+
+	switch (state->bus_type) {
+	case V4L2_MBUS_CSI2:
+		state->nlanes = ep.bus.mipi_csi2.num_data_lanes;
+		break;
+	case V4L2_MBUS_PARALLEL:
+		break;
+	default:
+		dev_err(dev, "unsupported bus in endpoint defined at node %s\n",
+			node->full_name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int s5k5baf_configure_subdevs(struct s5k5baf *state,
+				     struct i2c_client *c)
+{
+	struct v4l2_subdev *sd;
+	int ret;
+
+	sd = &state->cis_sd;
+	v4l2_subdev_init(sd, &s5k5baf_cis_subdev_ops);
+	sd->owner = THIS_MODULE;
+	v4l2_set_subdevdata(sd, state);
+	snprintf(sd->name, sizeof(sd->name), "S5K5BAF-CIS %d-%04x",
+		 i2c_adapter_id(c->adapter), c->addr);
+
+	sd->internal_ops = &s5k5baf_cis_subdev_internal_ops;
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	state->cis_pad.flags = MEDIA_PAD_FL_SOURCE;
+	sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+	ret = media_entity_init(&sd->entity, NUM_CIS_PADS, &state->cis_pad, 0);
+	if (ret < 0)
+		goto err;
+
+	sd = &state->sd;
+	v4l2_i2c_subdev_init(sd, c, &s5k5baf_subdev_ops);
+	snprintf(sd->name, sizeof(sd->name), "S5K5BAF-ISP %d-%04x",
+		 i2c_adapter_id(c->adapter), c->addr);
+
+	sd->internal_ops = &s5k5baf_subdev_internal_ops;
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	state->pads[PAD_CIS].flags = MEDIA_PAD_FL_SINK;
+	state->pads[PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+	sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+	ret = media_entity_init(&sd->entity, NUM_ISP_PADS, state->pads, 0);
+
+	if (!ret)
+		return 0;
+
+	media_entity_cleanup(&state->cis_sd.entity);
+err:
+	dev_err(&c->dev, "cannot init media entity %s\n", sd->name);
+	return ret;
+}
+
+static int s5k5baf_configure_regulators(struct s5k5baf *state)
+{
+	struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+	int ret;
+	int i;
+
+	for (i = 0; i < S5K5BAF_NUM_SUPPLIES; i++)
+		state->supplies[i].supply = s5k5baf_supply_names[i];
+
+	ret = devm_regulator_bulk_get(&c->dev, S5K5BAF_NUM_SUPPLIES,
+				      state->supplies);
+	if (ret < 0)
+		v4l2_err(c, "failed to get regulators\n");
+	return ret;
+}
+
+static int s5k5baf_probe(struct i2c_client *c,
+			const struct i2c_device_id *id)
+{
+	struct s5k5baf *state;
+	int ret;
+
+	state = devm_kzalloc(&c->dev, sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	mutex_init(&state->lock);
+	state->crop_sink = s5k5baf_cis_rect;
+	state->compose = s5k5baf_cis_rect;
+	state->crop_source = s5k5baf_cis_rect;
+
+	ret = s5k5baf_parse_device_node(state, &c->dev);
+	if (ret < 0)
+		return ret;
+
+	ret = s5k5baf_configure_subdevs(state, c);
+	if (ret < 0)
+		return ret;
+
+	ret = s5k5baf_configure_gpios(state);
+	if (ret < 0)
+		goto err_me;
+
+	ret = s5k5baf_configure_regulators(state);
+	if (ret < 0)
+		goto err_me;
+
+	state->clock = devm_clk_get(state->sd.dev, S5K5BAF_CLK_NAME);
+	if (IS_ERR(state->clock)) {
+		ret = -EPROBE_DEFER;
+		goto err_me;
+	}
+
+	ret = s5k5baf_power_on(state);
+	if (ret < 0) {
+		ret = -EPROBE_DEFER;
+		goto err_me;
+	}
+	s5k5baf_hw_init(state);
+	ret = s5k5baf_check_fw_revision(state);
+
+	s5k5baf_power_off(state);
+	if (ret < 0)
+		goto err_me;
+
+	ret = s5k5baf_initialize_ctrls(state);
+	if (ret < 0)
+		goto err_me;
+
+	ret = v4l2_async_register_subdev(&state->sd);
+	if (ret < 0)
+		goto err_ctrl;
+
+	return 0;
+
+err_ctrl:
+	v4l2_ctrl_handler_free(state->sd.ctrl_handler);
+err_me:
+	media_entity_cleanup(&state->sd.entity);
+	media_entity_cleanup(&state->cis_sd.entity);
+	return ret;
+}
+
+static int s5k5baf_remove(struct i2c_client *c)
+{
+	struct v4l2_subdev *sd = i2c_get_clientdata(c);
+	struct s5k5baf *state = to_s5k5baf(sd);
+
+	v4l2_async_unregister_subdev(sd);
+	v4l2_ctrl_handler_free(sd->ctrl_handler);
+	media_entity_cleanup(&sd->entity);
+
+	sd = &state->cis_sd;
+	v4l2_device_unregister_subdev(sd);
+	media_entity_cleanup(&sd->entity);
+
+	return 0;
+}
+
+static const struct i2c_device_id s5k5baf_id[] = {
+	{ S5K5BAF_DRIVER_NAME, 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, s5k5baf_id);
+
+static const struct of_device_id s5k5baf_of_match[] = {
+	{ .compatible = "samsung,s5k5baf" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, s5k5baf_of_match);
+
+static struct i2c_driver s5k5baf_i2c_driver = {
+	.driver = {
+		.of_match_table = s5k5baf_of_match,
+		.name = S5K5BAF_DRIVER_NAME
+	},
+	.probe		= s5k5baf_probe,
+	.remove		= s5k5baf_remove,
+	.id_table	= s5k5baf_id,
+};
+
+module_i2c_driver(s5k5baf_i2c_driver);
+
+MODULE_DESCRIPTION("Samsung S5K5BAF(X) UXGA camera driver");
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index 70bc72e..2960b5a 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -150,14 +150,14 @@
 
 /* ---------------------------------------------------------------------- */
 
-static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf)
+static bool block_from_buf(struct saa6588 *s, unsigned char *buf)
 {
 	int i;
 
 	if (s->rd_index == s->wr_index) {
 		if (debug > 2)
 			dprintk(PREFIX "Read: buffer empty.\n");
-		return 0;
+		return false;
 	}
 
 	if (debug > 2) {
@@ -166,8 +166,7 @@
 			dprintk("0x%02x ", s->buffer[i]);
 	}
 
-	if (copy_to_user(user_buf, &s->buffer[s->rd_index], 3))
-		return -EFAULT;
+	memcpy(buf, &s->buffer[s->rd_index], 3);
 
 	s->rd_index += 3;
 	if (s->rd_index >= s->buf_size)
@@ -177,22 +176,22 @@
 	if (debug > 2)
 		dprintk("%d blocks total.\n", s->block_count);
 
-	return 1;
+	return true;
 }
 
 static void read_from_buf(struct saa6588 *s, struct saa6588_command *a)
 {
-	unsigned long flags;
-
 	unsigned char __user *buf_ptr = a->buffer;
-	unsigned int i;
+	unsigned char buf[3];
+	unsigned long flags;
 	unsigned int rd_blocks;
+	unsigned int i;
 
 	a->result = 0;
 	if (!a->buffer)
 		return;
 
-	while (!s->data_available_for_read) {
+	while (!a->nonblocking && !s->data_available_for_read) {
 		int ret = wait_event_interruptible(s->read_queue,
 					     s->data_available_for_read);
 		if (ret == -ERESTARTSYS) {
@@ -201,24 +200,31 @@
 		}
 	}
 
-	spin_lock_irqsave(&s->lock, flags);
 	rd_blocks = a->block_count;
+	spin_lock_irqsave(&s->lock, flags);
 	if (rd_blocks > s->block_count)
 		rd_blocks = s->block_count;
+	spin_unlock_irqrestore(&s->lock, flags);
 
-	if (!rd_blocks) {
-		spin_unlock_irqrestore(&s->lock, flags);
+	if (!rd_blocks)
 		return;
-	}
 
 	for (i = 0; i < rd_blocks; i++) {
-		if (block_to_user_buf(s, buf_ptr)) {
-			buf_ptr += 3;
-			a->result++;
-		} else
+		bool got_block;
+
+		spin_lock_irqsave(&s->lock, flags);
+		got_block = block_from_buf(s, buf);
+		spin_unlock_irqrestore(&s->lock, flags);
+		if (!got_block)
 			break;
+		if (copy_to_user(buf_ptr, buf, 3)) {
+			a->result = -EFAULT;
+			return;
+		}
+		buf_ptr += 3;
+		a->result += 3;
 	}
-	a->result *= 3;
+	spin_lock_irqsave(&s->lock, flags);
 	s->data_available_for_read = (s->block_count > 0);
 	spin_unlock_irqrestore(&s->lock, flags);
 }
@@ -394,14 +400,11 @@
 	struct saa6588_command *a = arg;
 
 	switch (cmd) {
-		/* --- open() for /dev/radio --- */
-	case SAA6588_CMD_OPEN:
-		a->result = 0;	/* return error if chip doesn't work ??? */
-		break;
 		/* --- close() for /dev/radio --- */
 	case SAA6588_CMD_CLOSE:
 		s->data_available_for_read = 1;
 		wake_up_interruptible(&s->read_queue);
+		s->data_available_for_read = 0;
 		a->result = 0;
 		break;
 		/* --- read() for /dev/radio --- */
@@ -411,9 +414,8 @@
 		/* --- poll() for /dev/radio --- */
 	case SAA6588_CMD_POLL:
 		a->result = 0;
-		if (s->data_available_for_read) {
+		if (s->data_available_for_read)
 			a->result |= POLLIN | POLLRDNORM;
-		}
 		poll_wait(a->instance, &s->read_queue, a->event_list);
 		break;
 
diff --git a/drivers/media/pci/saa7134/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
similarity index 97%
rename from drivers/media/pci/saa7134/saa6752hs.c
rename to drivers/media/i2c/saa6752hs.c
index 8ac4b1f..8272c0b 100644
--- a/drivers/media/pci/saa7134/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -33,11 +33,11 @@
 #include <linux/i2c.h>
 #include <linux/types.h>
 #include <linux/videodev2.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-common.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
 
 #define MPEG_VIDEO_TARGET_BITRATE_MAX  27000
 #define MPEG_VIDEO_MAX_BITRATE_MAX     27000
@@ -124,7 +124,7 @@
 
 /* ---------------------------------------------------------------------- */
 
-static u8 PAT[] = {
+static const u8 PAT[] = {
 	0xc2, /* i2c register */
 	0x00, /* table number for encoder */
 
@@ -150,7 +150,7 @@
 	0x00, 0x00, 0x00, 0x00 /* CRC32 */
 };
 
-static u8 PMT[] = {
+static const u8 PMT[] = {
 	0xc2, /* i2c register */
 	0x01, /* table number for encoder */
 
@@ -179,7 +179,7 @@
 	0x00, 0x00, 0x00, 0x00 /* CRC32 */
 };
 
-static u8 PMT_AC3[] = {
+static const u8 PMT_AC3[] = {
 	0xc2, /* i2c register */
 	0x01, /* table number for encoder(1) */
 	0x47, /* sync */
@@ -212,7 +212,7 @@
 	0xED, 0xDE, 0x2D, 0xF3 /* CRC32 BE */
 };
 
-static struct saa6752hs_mpeg_params param_defaults =
+static const struct saa6752hs_mpeg_params param_defaults =
 {
 	.ts_pid_pmt      = 16,
 	.ts_pid_video    = 260,
@@ -643,13 +643,6 @@
 
 static const struct v4l2_subdev_core_ops saa6752hs_core_ops = {
 	.init = saa6752hs_init,
-	.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
-	.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
-	.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
-	.g_ctrl = v4l2_subdev_g_ctrl,
-	.s_ctrl = v4l2_subdev_s_ctrl,
-	.queryctrl = v4l2_subdev_queryctrl,
-	.querymenu = v4l2_subdev_querymenu,
 	.s_std = saa6752hs_s_std,
 };
 
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index ae66d91..8741cae 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -399,7 +399,6 @@
 
 	BUG_ON(max(internal_csi_format_idx, csi_format_idx) + pixel_order
 	       >= ARRAY_SIZE(smiapp_csi_data_formats));
-	BUG_ON(min(internal_csi_format_idx, csi_format_idx) < 0);
 
 	dev_dbg(&client->dev, "new pixel order %s\n",
 		pixel_order_str[pixel_order]);
@@ -2028,8 +2027,8 @@
 	sel->r.width = min(sel->r.width, src_size->width);
 	sel->r.height = min(sel->r.height, src_size->height);
 
-	sel->r.left = min(sel->r.left, src_size->width - sel->r.width);
-	sel->r.top = min(sel->r.top, src_size->height - sel->r.height);
+	sel->r.left = min_t(int, sel->r.left, src_size->width - sel->r.width);
+	sel->r.top = min_t(int, sel->r.top, src_size->height - sel->r.height);
 
 	*crops[sel->pad] = sel->r;
 
@@ -2121,8 +2120,8 @@
 
 	sel->r.left = max(0, sel->r.left & ~1);
 	sel->r.top = max(0, sel->r.top & ~1);
-	sel->r.width = max(0, SMIAPP_ALIGN_DIM(sel->r.width, sel->flags));
-	sel->r.height = max(0, SMIAPP_ALIGN_DIM(sel->r.height, sel->flags));
+	sel->r.width = SMIAPP_ALIGN_DIM(sel->r.width, sel->flags);
+	sel->r.height =	SMIAPP_ALIGN_DIM(sel->r.height, sel->flags);
 
 	sel->r.width = max_t(unsigned int,
 			     sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE],
diff --git a/drivers/media/i2c/soc_camera/mt9m111.c b/drivers/media/i2c/soc_camera/mt9m111.c
index 6f40566..ccf5940 100644
--- a/drivers/media/i2c/soc_camera/mt9m111.c
+++ b/drivers/media/i2c/soc_camera/mt9m111.c
@@ -208,8 +208,8 @@
 	struct mt9m111_context *ctx;
 	struct v4l2_rect rect;	/* cropping rectangle */
 	struct v4l2_clk *clk;
-	int width;		/* output */
-	int height;		/* sizes */
+	unsigned int width;	/* output */
+	unsigned int height;	/* sizes */
 	struct mutex power_lock; /* lock to protect power_count */
 	int power_count;
 	const struct mt9m111_datafmt *fmt;
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 89c0b13..542d252 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -58,21 +58,17 @@
 	struct i2c_client *c = v4l2_get_subdevdata(sd);
 	unsigned char buffer[1];
 	int rc;
+	struct i2c_msg msg[] = {
+		{ .addr = c->addr, .flags = 0,
+		  .buf = &addr, .len = 1 },
+		{ .addr = c->addr, .flags = I2C_M_RD,
+		  .buf = buffer, .len = 1 }
+	};
 
-	buffer[0] = addr;
-
-	rc = i2c_master_send(c, buffer, 1);
-	if (rc < 0) {
-		v4l2_err(sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
-		return rc;
-	}
-
-	msleep(10);
-
-	rc = i2c_master_recv(c, buffer, 1);
-	if (rc < 0) {
-		v4l2_err(sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
-		return rc;
+	rc = i2c_transfer(c->adapter, msg, 2);
+	if (rc < 0 || rc != 2) {
+		v4l2_err(sd, "i2c i/o error: rc == %d (should be 2)\n", rc);
+		return rc < 0 ? rc : -EIO;
 	}
 
 	v4l2_dbg(2, debug, sd, "tvp5150: read 0x%02x = 0x%02x\n", addr, buffer[0]);
@@ -867,7 +863,7 @@
 	struct v4l2_rect rect = a->c;
 	struct tvp5150 *decoder = to_tvp5150(sd);
 	v4l2_std_id std;
-	int hmax;
+	unsigned int hmax;
 
 	v4l2_dbg(1, debug, sd, "%s left=%d, top=%d, width=%d, height=%d\n",
 		__func__, rect.left, rect.top, rect.width, rect.height);
@@ -877,9 +873,9 @@
 
 	/* tvp5150 has some special limits */
 	rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT);
-	rect.width = clamp(rect.width,
-			   TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
-			   TVP5150_H_MAX - rect.left);
+	rect.width = clamp_t(unsigned int, rect.width,
+			     TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
+			     TVP5150_H_MAX - rect.left);
 	rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP);
 
 	/* Calculate height based on current standard */
@@ -893,9 +889,9 @@
 	else
 		hmax = TVP5150_V_MAX_OTHERS;
 
-	rect.height = clamp(rect.height,
-			    hmax - TVP5150_MAX_CROP_TOP - rect.top,
-			    hmax - rect.top);
+	rect.height = clamp_t(unsigned int, rect.height,
+			      hmax - TVP5150_MAX_CROP_TOP - rect.top,
+			      hmax - rect.top);
 
 	tvp5150_write(sd, TVP5150_VERT_BLANKING_START, rect.top);
 	tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP,
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 25bdd93..23f4f65 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -503,6 +503,7 @@
 	return &container_of(ctrl->handler, struct vs6624, hdl)->sd;
 }
 
+#ifdef CONFIG_VIDEO_ADV_DEBUG
 static int vs6624_read(struct v4l2_subdev *sd, u16 index)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -515,6 +516,7 @@
 
 	return buf[0];
 }
+#endif
 
 static int vs6624_write(struct v4l2_subdev *sd, u16 index,
 				u8 value)
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 2c286c3..37c334e 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -235,6 +235,8 @@
 	media_entity_graph_walk_start(&graph, entity);
 
 	while ((entity = media_entity_graph_walk_next(&graph))) {
+		DECLARE_BITMAP(active, entity->num_pads);
+		DECLARE_BITMAP(has_no_links, entity->num_pads);
 		unsigned int i;
 
 		entity->stream_count++;
@@ -248,21 +250,46 @@
 		if (!entity->ops || !entity->ops->link_validate)
 			continue;
 
+		bitmap_zero(active, entity->num_pads);
+		bitmap_fill(has_no_links, entity->num_pads);
+
 		for (i = 0; i < entity->num_links; i++) {
 			struct media_link *link = &entity->links[i];
+			struct media_pad *pad = link->sink->entity == entity
+						? link->sink : link->source;
 
-			/* Is this pad part of an enabled link? */
-			if (!(link->flags & MEDIA_LNK_FL_ENABLED))
-				continue;
+			/* Mark that a pad is connected by a link. */
+			bitmap_clear(has_no_links, pad->index, 1);
 
-			/* Are we the sink or not? */
-			if (link->sink->entity != entity)
+			/*
+			 * Pads that either do not need to connect or
+			 * are connected through an enabled link are
+			 * fine.
+			 */
+			if (!(pad->flags & MEDIA_PAD_FL_MUST_CONNECT) ||
+			    link->flags & MEDIA_LNK_FL_ENABLED)
+				bitmap_set(active, pad->index, 1);
+
+			/*
+			 * Link validation will only take place for
+			 * sink ends of the link that are enabled.
+			 */
+			if (link->sink != pad ||
+			    !(link->flags & MEDIA_LNK_FL_ENABLED))
 				continue;
 
 			ret = entity->ops->link_validate(link);
 			if (ret < 0 && ret != -ENOIOCTLCMD)
 				goto error;
 		}
+
+		/* Either no links or validated links are fine. */
+		bitmap_or(active, active, has_no_links, entity->num_pads);
+
+		if (!bitmap_full(active, entity->num_pads)) {
+			ret = -EPIPE;
+			goto error;
+		}
 	}
 
 	mutex_unlock(&mdev->graph_mutex);
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index d85cb0a..6662b49 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -2426,7 +2426,7 @@
 	},
 		/* ---- card 0x87---------------------------------- */
 	[BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = {
-		/* Michael Krufky <mkrufky@m1k.net> */
+		/* Michael Krufky <mkrufky@linuxtv.org> */
 		.name           = "DViCO FusionHDTV 5 Lite",
 		.tuner_type     = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */
 		.tuner_addr	= ADDR_UNSET,
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 92a06fd..afcd53b 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -1126,9 +1126,9 @@
 		c->min_scaled_height = 32;
 	} else {
 		c->min_scaled_width =
-			(max(48, c->rect.width >> 4) + 3) & ~3;
+			(max_t(unsigned int, 48, c->rect.width >> 4) + 3) & ~3;
 		c->min_scaled_height =
-			max(32, c->rect.height >> 4);
+			max_t(unsigned int, 32, c->rect.height >> 4);
 	}
 
 	c->max_scaled_width  = c->rect.width & ~3;
@@ -2024,7 +2024,7 @@
 		/* We cannot scale up. When the scaled image is larger
 		   than crop.rect we adjust the crop.rect as required
 		   by the V4L2 spec, hence cropcap.bounds are our limit. */
-		max_width = min(b->width, (__s32) MAX_HACTIVE);
+		max_width = min_t(unsigned int, b->width, MAX_HACTIVE);
 		max_height = b->height;
 
 		/* We cannot capture the same line as video and VBI data.
@@ -3266,7 +3266,9 @@
 	struct bttv_fh *fh = file->private_data;
 	struct bttv *btv = fh->btv;
 	struct saa6588_command cmd;
-	cmd.block_count = count/3;
+
+	cmd.block_count = count / 3;
+	cmd.nonblocking = file->f_flags & O_NONBLOCK;
 	cmd.buffer = data;
 	cmd.instance = file;
 	cmd.result = -ENODEV;
diff --git a/drivers/media/pci/bt8xx/bttv-gpio.c b/drivers/media/pci/bt8xx/bttv-gpio.c
index 922e823..3f364b7 100644
--- a/drivers/media/pci/bt8xx/bttv-gpio.c
+++ b/drivers/media/pci/bt8xx/bttv-gpio.c
@@ -98,7 +98,7 @@
 
 	err = device_register(&sub->dev);
 	if (0 != err) {
-		kfree(sub);
+		put_device(&sub->dev);
 		return err;
 	}
 	pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index c1f8cc6..716bdc5 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -327,13 +327,16 @@
 	struct i2c_client *c;
 	u8 eedata[256];
 
+	memset(tv, 0, sizeof(*tv));
+
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return;
 
 	strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name));
 	c->adapter = &cx->i2c_adap[0];
 	c->addr = 0xa0 >> 1;
 
-	memset(tv, 0, sizeof(*tv));
 	if (tveeprom_read(c, eedata, sizeof(eedata)))
 		goto ret;
 
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index 6e91e84..b1e08c3 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -618,7 +618,7 @@
  * Only boards with eeprom and byte 1 at eeprom=1 have it
  */
 
-static DEFINE_PCI_DEVICE_TABLE(cx25821_audio_pci_tbl) = {
+static const struct pci_device_id cx25821_audio_pci_tbl[] = {
 	{0x14f1, 0x0920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{0,}
 };
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index b762c5b..e81173c 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1361,7 +1361,7 @@
 	kfree(dev);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(cx25821_pci_tbl) = {
+static const struct pci_device_id cx25821_pci_tbl[] = {
 	{
 		/* CX25821 Athena */
 		.vendor = 0x14f1,
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 400eb1c..d014206e 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -931,9 +931,9 @@
  */
 static void cx88_audio_finidev(struct pci_dev *pci)
 {
-	struct cx88_audio_dev *card = pci_get_drvdata(pci);
+	struct snd_card *card = pci_get_drvdata(pci);
 
-	snd_card_free((void *)card);
+	snd_card_free(card);
 
 	devno--;
 }
diff --git a/drivers/media/pci/saa7134/Kconfig b/drivers/media/pci/saa7134/Kconfig
index 15b90d6..7883393 100644
--- a/drivers/media/pci/saa7134/Kconfig
+++ b/drivers/media/pci/saa7134/Kconfig
@@ -6,6 +6,7 @@
 	select VIDEO_TVEEPROM
 	select CRC32
 	select VIDEO_SAA6588 if MEDIA_SUBDRV_AUTOSELECT
+	select VIDEO_SAA6752HS if MEDIA_SUBDRV_AUTOSELECT
 	---help---
 	  This is a video4linux driver for Philips SAA713x based
 	  TV cards.
diff --git a/drivers/media/pci/saa7134/Makefile b/drivers/media/pci/saa7134/Makefile
index 3537548..58de9b0 100644
--- a/drivers/media/pci/saa7134/Makefile
+++ b/drivers/media/pci/saa7134/Makefile
@@ -4,7 +4,7 @@
 saa7134-y +=	saa7134-video.o
 saa7134-$(CONFIG_VIDEO_SAA7134_RC) += saa7134-input.o
 
-obj-$(CONFIG_VIDEO_SAA7134) +=  saa6752hs.o saa7134.o saa7134-empress.o
+obj-$(CONFIG_VIDEO_SAA7134) +=  saa7134.o saa7134-empress.o
 
 obj-$(CONFIG_VIDEO_SAA7134_ALSA) += saa7134-alsa.o
 
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index d45e7f6..c9b2350 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -2590,7 +2590,7 @@
 		}},
 	},
 	[SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = {
-		/* Michael Krufky <mkrufky@m1k.net>
+		/* Michael Krufky <mkrufky@linuxtv.org>
 		 * Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder
 		 * AFAIK, there is no analog demod, thus,
 		 * no support for analog television.
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 27d7ee7..1362b4a 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -751,6 +751,7 @@
 	saa7134_input_fini(dev);
 	saa7134_vbi_fini(dev);
 	saa7134_tvaudio_fini(dev);
+	saa7134_video_fini(dev);
 	return 0;
 }
 
@@ -802,7 +803,6 @@
 	*vfd = *template;
 	vfd->v4l2_dev  = &dev->v4l2_dev;
 	vfd->release = video_device_release;
-	vfd->debug   = video_debug;
 	snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
 		 dev->name, type, saa7134_boards[dev->board].name);
 	set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
@@ -1008,13 +1008,13 @@
 
 	/* load i2c helpers */
 	if (card_is_empress(dev)) {
-		struct v4l2_subdev *sd =
+		dev->empress_sd =
 			v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
 				"saa6752hs",
 				saa7134_boards[dev->board].empress_addr, NULL);
 
-		if (sd)
-			sd->grp_id = GRP_EMPRESS;
+		if (dev->empress_sd)
+			dev->empress_sd->grp_id = GRP_EMPRESS;
 	}
 
 	if (saa7134_boards[dev->board].rds_addr) {
@@ -1046,6 +1046,7 @@
 		printk(KERN_INFO "%s: Overlay support disabled.\n", dev->name);
 
 	dev->video_dev = vdev_init(dev,&saa7134_video_template,"video");
+	dev->video_dev->ctrl_handler = &dev->ctrl_handler;
 	err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
 				    video_nr[dev->nr]);
 	if (err < 0) {
@@ -1057,6 +1058,7 @@
 	       dev->name, video_device_node_name(dev->video_dev));
 
 	dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi");
+	dev->vbi_dev->ctrl_handler = &dev->ctrl_handler;
 
 	err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
 				    vbi_nr[dev->nr]);
@@ -1067,6 +1069,7 @@
 
 	if (card_has_radio(dev)) {
 		dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
+		dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler;
 		err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
 					    radio_nr[dev->nr]);
 		if (err < 0)
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 3022eb2..0a9047e 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -23,12 +23,12 @@
 #include <linux/kernel.h>
 #include <linux/delay.h>
 
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+
 #include "saa7134-reg.h"
 #include "saa7134.h"
 
-#include <media/saa6752hs.h>
-#include <media/v4l2-common.h>
-
 /* ------------------------------------------------------------------ */
 
 MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
@@ -85,52 +85,54 @@
 {
 	struct video_device *vdev = video_devdata(file);
 	struct saa7134_dev *dev = video_drvdata(file);
-	int err;
+	struct saa7134_fh *fh;
 
-	dprintk("open dev=%s\n", video_device_node_name(vdev));
-	err = -EBUSY;
-	if (!mutex_trylock(&dev->empress_tsq.vb_lock))
-		return err;
-	if (atomic_read(&dev->empress_users))
-		goto done;
+	/* allocate + initialize per filehandle data */
+	fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+	if (NULL == fh)
+		return -ENOMEM;
+
+	v4l2_fh_init(&fh->fh, vdev);
+	file->private_data = fh;
+	fh->is_empress = true;
+	v4l2_fh_add(&fh->fh);
 
 	/* Unmute audio */
 	saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
 		saa_readb(SAA7134_AUDIO_MUTE_CTRL) & ~(1 << 6));
 
-	atomic_inc(&dev->empress_users);
-	file->private_data = dev;
-	err = 0;
-
-done:
-	mutex_unlock(&dev->empress_tsq.vb_lock);
-	return err;
+	return 0;
 }
 
 static int ts_release(struct file *file)
 {
-	struct saa7134_dev *dev = file->private_data;
+	struct saa7134_dev *dev = video_drvdata(file);
+	struct saa7134_fh *fh = file->private_data;
 
-	videobuf_stop(&dev->empress_tsq);
-	videobuf_mmap_free(&dev->empress_tsq);
+	if (res_check(fh, RESOURCE_EMPRESS)) {
+		videobuf_stop(&dev->empress_tsq);
+		videobuf_mmap_free(&dev->empress_tsq);
 
-	/* stop the encoder */
-	ts_reset_encoder(dev);
+		/* stop the encoder */
+		ts_reset_encoder(dev);
 
-	/* Mute audio */
-	saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
-		saa_readb(SAA7134_AUDIO_MUTE_CTRL) | (1 << 6));
+		/* Mute audio */
+		saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
+				saa_readb(SAA7134_AUDIO_MUTE_CTRL) | (1 << 6));
+	}
 
-	atomic_dec(&dev->empress_users);
-
+	v4l2_fh_del(&fh->fh);
+	v4l2_fh_exit(&fh->fh);
 	return 0;
 }
 
 static ssize_t
 ts_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
 {
-	struct saa7134_dev *dev = file->private_data;
+	struct saa7134_dev *dev = video_drvdata(file);
 
+	if (res_locked(dev, RESOURCE_EMPRESS))
+		return -EBUSY;
 	if (!dev->empress_started)
 		ts_init_encoder(dev);
 
@@ -142,68 +144,27 @@
 static unsigned int
 ts_poll(struct file *file, struct poll_table_struct *wait)
 {
-	struct saa7134_dev *dev = file->private_data;
+	unsigned long req_events = poll_requested_events(wait);
+	struct saa7134_dev *dev = video_drvdata(file);
+	struct saa7134_fh *fh = file->private_data;
+	unsigned int rc = 0;
 
-	return videobuf_poll_stream(file, &dev->empress_tsq, wait);
+	if (v4l2_event_pending(&fh->fh))
+		rc = POLLPRI;
+	else if (req_events & POLLPRI)
+		poll_wait(file, &fh->fh.wait, wait);
+	return rc | videobuf_poll_stream(file, &dev->empress_tsq, wait);
 }
 
 
 static int
 ts_mmap(struct file *file, struct vm_area_struct * vma)
 {
-	struct saa7134_dev *dev = file->private_data;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	return videobuf_mmap_mapper(&dev->empress_tsq, vma);
 }
 
-/*
- * This function is _not_ called directly, but from
- * video_generic_ioctl (and maybe others).  userspace
- * copying is done already, arg is a kernel pointer.
- */
-
-static int empress_querycap(struct file *file, void  *priv,
-					struct v4l2_capability *cap)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	strcpy(cap->driver, "saa7134");
-	strlcpy(cap->card, saa7134_boards[dev->board].name,
-		sizeof(cap->card));
-	sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
-	cap->capabilities =
-		V4L2_CAP_VIDEO_CAPTURE |
-		V4L2_CAP_READWRITE |
-		V4L2_CAP_STREAMING;
-	return 0;
-}
-
-static int empress_enum_input(struct file *file, void *priv,
-					struct v4l2_input *i)
-{
-	if (i->index != 0)
-		return -EINVAL;
-
-	i->type = V4L2_INPUT_TYPE_CAMERA;
-	strcpy(i->name, "CCIR656");
-
-	return 0;
-}
-
-static int empress_g_input(struct file *file, void *priv, unsigned int *i)
-{
-	*i = 0;
-	return 0;
-}
-
-static int empress_s_input(struct file *file, void *priv, unsigned int i)
-{
-	if (i != 0)
-		return -EINVAL;
-
-	return 0;
-}
-
 static int empress_enum_fmt_vid_cap(struct file *file, void  *priv,
 					struct v4l2_fmtdesc *f)
 {
@@ -219,7 +180,7 @@
 static int empress_g_fmt_vid_cap(struct file *file, void *priv,
 				struct v4l2_format *f)
 {
-	struct saa7134_dev *dev = file->private_data;
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct v4l2_mbus_framefmt mbus_fmt;
 
 	saa_call_all(dev, video, g_mbus_fmt, &mbus_fmt);
@@ -236,7 +197,7 @@
 static int empress_s_fmt_vid_cap(struct file *file, void *priv,
 				struct v4l2_format *f)
 {
-	struct saa7134_dev *dev = file->private_data;
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct v4l2_mbus_framefmt mbus_fmt;
 
 	v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
@@ -254,7 +215,7 @@
 static int empress_try_fmt_vid_cap(struct file *file, void *priv,
 				struct v4l2_format *f)
 {
-	struct saa7134_dev *dev = file->private_data;
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct v4l2_mbus_framefmt mbus_fmt;
 
 	v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
@@ -269,175 +230,6 @@
 	return 0;
 }
 
-static int empress_reqbufs(struct file *file, void *priv,
-					struct v4l2_requestbuffers *p)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	return videobuf_reqbufs(&dev->empress_tsq, p);
-}
-
-static int empress_querybuf(struct file *file, void *priv,
-					struct v4l2_buffer *b)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	return videobuf_querybuf(&dev->empress_tsq, b);
-}
-
-static int empress_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	return videobuf_qbuf(&dev->empress_tsq, b);
-}
-
-static int empress_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	return videobuf_dqbuf(&dev->empress_tsq, b,
-				file->f_flags & O_NONBLOCK);
-}
-
-static int empress_streamon(struct file *file, void *priv,
-					enum v4l2_buf_type type)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	return videobuf_streamon(&dev->empress_tsq);
-}
-
-static int empress_streamoff(struct file *file, void *priv,
-					enum v4l2_buf_type type)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	return videobuf_streamoff(&dev->empress_tsq);
-}
-
-static int empress_s_ext_ctrls(struct file *file, void *priv,
-			       struct v4l2_ext_controls *ctrls)
-{
-	struct saa7134_dev *dev = file->private_data;
-	int err;
-
-	/* count == 0 is abused in saa6752hs.c, so that special
-		case is handled here explicitly. */
-	if (ctrls->count == 0)
-		return 0;
-
-	if (ctrls->ctrl_class != V4L2_CTRL_CLASS_MPEG)
-		return -EINVAL;
-
-	err = saa_call_empress(dev, core, s_ext_ctrls, ctrls);
-	ts_init_encoder(dev);
-
-	return err;
-}
-
-static int empress_g_ext_ctrls(struct file *file, void *priv,
-			       struct v4l2_ext_controls *ctrls)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	if (ctrls->ctrl_class != V4L2_CTRL_CLASS_MPEG)
-		return -EINVAL;
-	return saa_call_empress(dev, core, g_ext_ctrls, ctrls);
-}
-
-static int empress_g_ctrl(struct file *file, void *priv,
-					struct v4l2_control *c)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	return saa7134_g_ctrl_internal(dev, NULL, c);
-}
-
-static int empress_s_ctrl(struct file *file, void *priv,
-					struct v4l2_control *c)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	return saa7134_s_ctrl_internal(dev, NULL, c);
-}
-
-static int empress_queryctrl(struct file *file, void *priv,
-					struct v4l2_queryctrl *c)
-{
-	/* Must be sorted from low to high control ID! */
-	static const u32 user_ctrls[] = {
-		V4L2_CID_USER_CLASS,
-		V4L2_CID_BRIGHTNESS,
-		V4L2_CID_CONTRAST,
-		V4L2_CID_SATURATION,
-		V4L2_CID_HUE,
-		V4L2_CID_AUDIO_VOLUME,
-		V4L2_CID_AUDIO_MUTE,
-		V4L2_CID_HFLIP,
-		0
-	};
-
-	/* Must be sorted from low to high control ID! */
-	static const u32 mpeg_ctrls[] = {
-		V4L2_CID_MPEG_CLASS,
-		V4L2_CID_MPEG_STREAM_TYPE,
-		V4L2_CID_MPEG_STREAM_PID_PMT,
-		V4L2_CID_MPEG_STREAM_PID_AUDIO,
-		V4L2_CID_MPEG_STREAM_PID_VIDEO,
-		V4L2_CID_MPEG_STREAM_PID_PCR,
-		V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ,
-		V4L2_CID_MPEG_AUDIO_ENCODING,
-		V4L2_CID_MPEG_AUDIO_L2_BITRATE,
-		V4L2_CID_MPEG_VIDEO_ENCODING,
-		V4L2_CID_MPEG_VIDEO_ASPECT,
-		V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
-		V4L2_CID_MPEG_VIDEO_BITRATE,
-		V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
-		0
-	};
-	static const u32 *ctrl_classes[] = {
-		user_ctrls,
-		mpeg_ctrls,
-		NULL
-	};
-	struct saa7134_dev *dev = file->private_data;
-
-	c->id = v4l2_ctrl_next(ctrl_classes, c->id);
-	if (c->id == 0)
-		return -EINVAL;
-	if (c->id == V4L2_CID_USER_CLASS || c->id == V4L2_CID_MPEG_CLASS)
-		return v4l2_ctrl_query_fill(c, 0, 0, 0, 0);
-	if (V4L2_CTRL_ID2CLASS(c->id) != V4L2_CTRL_CLASS_MPEG)
-		return saa7134_queryctrl(file, priv, c);
-	return saa_call_empress(dev, core, queryctrl, c);
-}
-
-static int empress_querymenu(struct file *file, void *priv,
-					struct v4l2_querymenu *c)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	if (V4L2_CTRL_ID2CLASS(c->id) != V4L2_CTRL_CLASS_MPEG)
-		return -EINVAL;
-	return saa_call_empress(dev, core, querymenu, c);
-}
-
-static int empress_s_std(struct file *file, void *priv, v4l2_std_id id)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	return saa7134_s_std_internal(dev, NULL, id);
-}
-
-static int empress_g_std(struct file *file, void *priv, v4l2_std_id *id)
-{
-	struct saa7134_dev *dev = file->private_data;
-
-	*id = dev->tvnorm->id;
-	return 0;
-}
-
 static const struct v4l2_file_operations ts_fops =
 {
 	.owner	  = THIS_MODULE,
@@ -450,28 +242,29 @@
 };
 
 static const struct v4l2_ioctl_ops ts_ioctl_ops = {
-	.vidioc_querycap		= empress_querycap,
+	.vidioc_querycap		= saa7134_querycap,
 	.vidioc_enum_fmt_vid_cap	= empress_enum_fmt_vid_cap,
 	.vidioc_try_fmt_vid_cap		= empress_try_fmt_vid_cap,
 	.vidioc_s_fmt_vid_cap		= empress_s_fmt_vid_cap,
 	.vidioc_g_fmt_vid_cap		= empress_g_fmt_vid_cap,
-	.vidioc_reqbufs			= empress_reqbufs,
-	.vidioc_querybuf		= empress_querybuf,
-	.vidioc_qbuf			= empress_qbuf,
-	.vidioc_dqbuf			= empress_dqbuf,
-	.vidioc_streamon		= empress_streamon,
-	.vidioc_streamoff		= empress_streamoff,
-	.vidioc_s_ext_ctrls		= empress_s_ext_ctrls,
-	.vidioc_g_ext_ctrls		= empress_g_ext_ctrls,
-	.vidioc_enum_input		= empress_enum_input,
-	.vidioc_g_input			= empress_g_input,
-	.vidioc_s_input			= empress_s_input,
-	.vidioc_queryctrl		= empress_queryctrl,
-	.vidioc_querymenu		= empress_querymenu,
-	.vidioc_g_ctrl			= empress_g_ctrl,
-	.vidioc_s_ctrl			= empress_s_ctrl,
-	.vidioc_s_std			= empress_s_std,
-	.vidioc_g_std			= empress_g_std,
+	.vidioc_reqbufs			= saa7134_reqbufs,
+	.vidioc_querybuf		= saa7134_querybuf,
+	.vidioc_qbuf			= saa7134_qbuf,
+	.vidioc_dqbuf			= saa7134_dqbuf,
+	.vidioc_streamon		= saa7134_streamon,
+	.vidioc_streamoff		= saa7134_streamoff,
+	.vidioc_g_frequency		= saa7134_g_frequency,
+	.vidioc_s_frequency		= saa7134_s_frequency,
+	.vidioc_g_tuner			= saa7134_g_tuner,
+	.vidioc_s_tuner			= saa7134_s_tuner,
+	.vidioc_enum_input		= saa7134_enum_input,
+	.vidioc_g_input			= saa7134_g_input,
+	.vidioc_s_input			= saa7134_s_input,
+	.vidioc_s_std			= saa7134_s_std,
+	.vidioc_g_std			= saa7134_g_std,
+	.vidioc_log_status		= v4l2_ctrl_log_status,
+	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
 };
 
 /* ----------------------------------------------------------- */
@@ -501,9 +294,26 @@
 	schedule_work(&dev->empress_workqueue);
 }
 
+static bool empress_ctrl_filter(const struct v4l2_ctrl *ctrl)
+{
+	switch (ctrl->id) {
+	case V4L2_CID_BRIGHTNESS:
+	case V4L2_CID_HUE:
+	case V4L2_CID_CONTRAST:
+	case V4L2_CID_SATURATION:
+	case V4L2_CID_AUDIO_MUTE:
+	case V4L2_CID_AUDIO_VOLUME:
+	case V4L2_CID_PRIVATE_INVERT:
+	case V4L2_CID_PRIVATE_AUTOMUTE:
+		return true;
+	default:
+		return false;
+	}
+}
 
 static int empress_init(struct saa7134_dev *dev)
 {
+	struct v4l2_ctrl_handler *hdl = &dev->empress_ctrl_handler;
 	int err;
 
 	dprintk("%s: %s\n",dev->name,__func__);
@@ -516,6 +326,16 @@
 	snprintf(dev->empress_dev->name, sizeof(dev->empress_dev->name),
 		 "%s empress (%s)", dev->name,
 		 saa7134_boards[dev->board].name);
+	set_bit(V4L2_FL_USE_FH_PRIO, &dev->empress_dev->flags);
+	v4l2_ctrl_handler_init(hdl, 21);
+	v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, empress_ctrl_filter);
+	if (dev->empress_sd)
+		v4l2_ctrl_add_handler(hdl, dev->empress_sd->ctrl_handler, NULL);
+	if (hdl->error) {
+		video_device_release(dev->empress_dev);
+		return hdl->error;
+	}
+	dev->empress_dev->ctrl_handler = hdl;
 
 	INIT_WORK(&dev->empress_workqueue, empress_signal_update);
 
@@ -551,6 +371,7 @@
 		return 0;
 	flush_work(&dev->empress_workqueue);
 	video_unregister_device(dev->empress_dev);
+	v4l2_ctrl_handler_free(&dev->empress_ctrl_handler);
 	dev->empress_dev = NULL;
 	return 0;
 }
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index e9aa94b..d4da18d 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -117,8 +117,7 @@
 			  struct videobuf_buffer *vb,
 			  enum v4l2_field field)
 {
-	struct saa7134_fh *fh   = q->priv_data;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = q->priv_data;
 	struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
 	struct saa7134_tvnorm *norm = dev->tvnorm;
 	unsigned int lines, llength, size;
@@ -141,7 +140,7 @@
 		buf->vb.width  = llength;
 		buf->vb.height = lines;
 		buf->vb.size   = size;
-		buf->pt        = &fh->pt_vbi;
+		buf->pt        = &dev->pt_vbi;
 
 		err = videobuf_iolock(q,&buf->vb,NULL);
 		if (err)
@@ -166,8 +165,7 @@
 static int
 buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
 {
-	struct saa7134_fh *fh   = q->priv_data;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = q->priv_data;
 	int llength,lines;
 
 	lines   = dev->tvnorm->vbi_v_stop_0 - dev->tvnorm->vbi_v_start_0 +1;
@@ -181,8 +179,7 @@
 
 static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
 {
-	struct saa7134_fh *fh = q->priv_data;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = q->priv_data;
 	struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
 
 	saa7134_buffer_queue(dev,&dev->vbi_q,buf);
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index fb60da8..eb472b5 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -27,10 +27,12 @@
 #include <linux/slab.h>
 #include <linux/sort.h>
 
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/saa6588.h>
+
 #include "saa7134-reg.h"
 #include "saa7134.h"
-#include <media/v4l2-common.h>
-#include <media/saa6588.h>
 
 /* ------------------------------------------------------------------ */
 
@@ -369,117 +371,6 @@
 };
 #define TVNORMS ARRAY_SIZE(tvnorms)
 
-#define V4L2_CID_PRIVATE_INVERT      (V4L2_CID_PRIVATE_BASE + 0)
-#define V4L2_CID_PRIVATE_Y_ODD       (V4L2_CID_PRIVATE_BASE + 1)
-#define V4L2_CID_PRIVATE_Y_EVEN      (V4L2_CID_PRIVATE_BASE + 2)
-#define V4L2_CID_PRIVATE_AUTOMUTE    (V4L2_CID_PRIVATE_BASE + 3)
-#define V4L2_CID_PRIVATE_LASTP1      (V4L2_CID_PRIVATE_BASE + 4)
-
-static const struct v4l2_queryctrl no_ctrl = {
-	.name  = "42",
-	.flags = V4L2_CTRL_FLAG_DISABLED,
-};
-static const struct v4l2_queryctrl video_ctrls[] = {
-	/* --- video --- */
-	{
-		.id            = V4L2_CID_BRIGHTNESS,
-		.name          = "Brightness",
-		.minimum       = 0,
-		.maximum       = 255,
-		.step          = 1,
-		.default_value = 128,
-		.type          = V4L2_CTRL_TYPE_INTEGER,
-	},{
-		.id            = V4L2_CID_CONTRAST,
-		.name          = "Contrast",
-		.minimum       = 0,
-		.maximum       = 127,
-		.step          = 1,
-		.default_value = 68,
-		.type          = V4L2_CTRL_TYPE_INTEGER,
-	},{
-		.id            = V4L2_CID_SATURATION,
-		.name          = "Saturation",
-		.minimum       = 0,
-		.maximum       = 127,
-		.step          = 1,
-		.default_value = 64,
-		.type          = V4L2_CTRL_TYPE_INTEGER,
-	},{
-		.id            = V4L2_CID_HUE,
-		.name          = "Hue",
-		.minimum       = -128,
-		.maximum       = 127,
-		.step          = 1,
-		.default_value = 0,
-		.type          = V4L2_CTRL_TYPE_INTEGER,
-	},{
-		.id            = V4L2_CID_HFLIP,
-		.name          = "Mirror",
-		.minimum       = 0,
-		.maximum       = 1,
-		.type          = V4L2_CTRL_TYPE_BOOLEAN,
-	},
-	/* --- audio --- */
-	{
-		.id            = V4L2_CID_AUDIO_MUTE,
-		.name          = "Mute",
-		.minimum       = 0,
-		.maximum       = 1,
-		.type          = V4L2_CTRL_TYPE_BOOLEAN,
-	},{
-		.id            = V4L2_CID_AUDIO_VOLUME,
-		.name          = "Volume",
-		.minimum       = -15,
-		.maximum       = 15,
-		.step          = 1,
-		.default_value = 0,
-		.type          = V4L2_CTRL_TYPE_INTEGER,
-	},
-	/* --- private --- */
-	{
-		.id            = V4L2_CID_PRIVATE_INVERT,
-		.name          = "Invert",
-		.minimum       = 0,
-		.maximum       = 1,
-		.type          = V4L2_CTRL_TYPE_BOOLEAN,
-	},{
-		.id            = V4L2_CID_PRIVATE_Y_ODD,
-		.name          = "y offset odd field",
-		.minimum       = 0,
-		.maximum       = 128,
-		.step          = 1,
-		.default_value = 0,
-		.type          = V4L2_CTRL_TYPE_INTEGER,
-	},{
-		.id            = V4L2_CID_PRIVATE_Y_EVEN,
-		.name          = "y offset even field",
-		.minimum       = 0,
-		.maximum       = 128,
-		.step          = 1,
-		.default_value = 0,
-		.type          = V4L2_CTRL_TYPE_INTEGER,
-	},{
-		.id            = V4L2_CID_PRIVATE_AUTOMUTE,
-		.name          = "automute",
-		.minimum       = 0,
-		.maximum       = 1,
-		.default_value = 1,
-		.type          = V4L2_CTRL_TYPE_BOOLEAN,
-	}
-};
-static const unsigned int CTRLS = ARRAY_SIZE(video_ctrls);
-
-static const struct v4l2_queryctrl* ctrl_by_id(unsigned int id)
-{
-	unsigned int i;
-
-	for (i = 0; i < CTRLS; i++)
-		if (video_ctrls[i].id == id)
-			return video_ctrls+i;
-	return NULL;
-}
-
 static struct saa7134_format* format_by_fourcc(unsigned int fourcc)
 {
 	unsigned int i;
@@ -514,16 +405,6 @@
 	return 1;
 }
 
-static int res_check(struct saa7134_fh *fh, unsigned int bit)
-{
-	return (fh->resources & bit);
-}
-
-static int res_locked(struct saa7134_dev *dev, unsigned int bit)
-{
-	return (dev->resources & bit);
-}
-
 static
 void res_free(struct saa7134_dev *dev, struct saa7134_fh *fh, unsigned int bits)
 {
@@ -868,7 +749,7 @@
 	return 0;
 }
 
-static int start_preview(struct saa7134_dev *dev, struct saa7134_fh *fh)
+static int start_preview(struct saa7134_dev *dev)
 {
 	unsigned long base,control,bpl;
 	int err;
@@ -923,7 +804,7 @@
 	return 0;
 }
 
-static int stop_preview(struct saa7134_dev *dev, struct saa7134_fh *fh)
+static int stop_preview(struct saa7134_dev *dev)
 {
 	dev->ovenable = 0;
 	saa7134_set_dmabits(dev);
@@ -1018,8 +899,7 @@
 			  struct videobuf_buffer *vb,
 			  enum v4l2_field field)
 {
-	struct saa7134_fh *fh = q->priv_data;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = q->priv_data;
 	struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
 	unsigned int size;
 	int err;
@@ -1057,7 +937,7 @@
 		buf->vb.size   = size;
 		buf->vb.field  = field;
 		buf->fmt       = dev->fmt;
-		buf->pt        = &fh->pt_cap;
+		buf->pt        = &dev->pt_cap;
 		dev->video_q.curr = NULL;
 
 		err = videobuf_iolock(q,&buf->vb,&dev->ovbuf);
@@ -1082,8 +962,7 @@
 static int
 buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
 {
-	struct saa7134_fh *fh = q->priv_data;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = q->priv_data;
 
 	*size = dev->fmt->depth * dev->width * dev->height >> 3;
 	if (0 == *count)
@@ -1094,10 +973,10 @@
 
 static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
 {
-	struct saa7134_fh *fh = q->priv_data;
+	struct saa7134_dev *dev = q->priv_data;
 	struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
 
-	saa7134_buffer_queue(fh->dev,&fh->dev->video_q,buf);
+	saa7134_buffer_queue(dev, &dev->video_q, buf);
 }
 
 static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
@@ -1116,133 +995,56 @@
 
 /* ------------------------------------------------------------------ */
 
-int saa7134_g_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, struct v4l2_control *c)
+static int saa7134_s_ctrl(struct v4l2_ctrl *ctrl)
 {
-	const struct v4l2_queryctrl* ctrl;
-
-	ctrl = ctrl_by_id(c->id);
-	if (NULL == ctrl)
-		return -EINVAL;
-	switch (c->id) {
-	case V4L2_CID_BRIGHTNESS:
-		c->value = dev->ctl_bright;
-		break;
-	case V4L2_CID_HUE:
-		c->value = dev->ctl_hue;
-		break;
-	case V4L2_CID_CONTRAST:
-		c->value = dev->ctl_contrast;
-		break;
-	case V4L2_CID_SATURATION:
-		c->value = dev->ctl_saturation;
-		break;
-	case V4L2_CID_AUDIO_MUTE:
-		c->value = dev->ctl_mute;
-		break;
-	case V4L2_CID_AUDIO_VOLUME:
-		c->value = dev->ctl_volume;
-		break;
-	case V4L2_CID_PRIVATE_INVERT:
-		c->value = dev->ctl_invert;
-		break;
-	case V4L2_CID_HFLIP:
-		c->value = dev->ctl_mirror;
-		break;
-	case V4L2_CID_PRIVATE_Y_EVEN:
-		c->value = dev->ctl_y_even;
-		break;
-	case V4L2_CID_PRIVATE_Y_ODD:
-		c->value = dev->ctl_y_odd;
-		break;
-	case V4L2_CID_PRIVATE_AUTOMUTE:
-		c->value = dev->ctl_automute;
-		break;
-	default:
-		return -EINVAL;
-	}
-	return 0;
-}
-EXPORT_SYMBOL_GPL(saa7134_g_ctrl_internal);
-
-static int saa7134_g_ctrl(struct file *file, void *priv, struct v4l2_control *c)
-{
-	struct saa7134_fh *fh = priv;
-
-	return saa7134_g_ctrl_internal(fh->dev, fh, c);
-}
-
-int saa7134_s_ctrl_internal(struct saa7134_dev *dev,  struct saa7134_fh *fh, struct v4l2_control *c)
-{
-	const struct v4l2_queryctrl* ctrl;
+	struct saa7134_dev *dev = container_of(ctrl->handler, struct saa7134_dev, ctrl_handler);
 	unsigned long flags;
 	int restart_overlay = 0;
-	int err;
 
-	err = -EINVAL;
-
-	mutex_lock(&dev->lock);
-
-	ctrl = ctrl_by_id(c->id);
-	if (NULL == ctrl)
-		goto error;
-
-	dprintk("set_control name=%s val=%d\n",ctrl->name,c->value);
-	switch (ctrl->type) {
-	case V4L2_CTRL_TYPE_BOOLEAN:
-	case V4L2_CTRL_TYPE_MENU:
-	case V4L2_CTRL_TYPE_INTEGER:
-		if (c->value < ctrl->minimum)
-			c->value = ctrl->minimum;
-		if (c->value > ctrl->maximum)
-			c->value = ctrl->maximum;
-		break;
-	default:
-		/* nothing */;
-	}
-	switch (c->id) {
+	switch (ctrl->id) {
 	case V4L2_CID_BRIGHTNESS:
-		dev->ctl_bright = c->value;
-		saa_writeb(SAA7134_DEC_LUMA_BRIGHT, dev->ctl_bright);
+		dev->ctl_bright = ctrl->val;
+		saa_writeb(SAA7134_DEC_LUMA_BRIGHT, ctrl->val);
 		break;
 	case V4L2_CID_HUE:
-		dev->ctl_hue = c->value;
-		saa_writeb(SAA7134_DEC_CHROMA_HUE, dev->ctl_hue);
+		dev->ctl_hue = ctrl->val;
+		saa_writeb(SAA7134_DEC_CHROMA_HUE, ctrl->val);
 		break;
 	case V4L2_CID_CONTRAST:
-		dev->ctl_contrast = c->value;
+		dev->ctl_contrast = ctrl->val;
 		saa_writeb(SAA7134_DEC_LUMA_CONTRAST,
 			   dev->ctl_invert ? -dev->ctl_contrast : dev->ctl_contrast);
 		break;
 	case V4L2_CID_SATURATION:
-		dev->ctl_saturation = c->value;
+		dev->ctl_saturation = ctrl->val;
 		saa_writeb(SAA7134_DEC_CHROMA_SATURATION,
 			   dev->ctl_invert ? -dev->ctl_saturation : dev->ctl_saturation);
 		break;
 	case V4L2_CID_AUDIO_MUTE:
-		dev->ctl_mute = c->value;
+		dev->ctl_mute = ctrl->val;
 		saa7134_tvaudio_setmute(dev);
 		break;
 	case V4L2_CID_AUDIO_VOLUME:
-		dev->ctl_volume = c->value;
+		dev->ctl_volume = ctrl->val;
 		saa7134_tvaudio_setvolume(dev,dev->ctl_volume);
 		break;
 	case V4L2_CID_PRIVATE_INVERT:
-		dev->ctl_invert = c->value;
+		dev->ctl_invert = ctrl->val;
 		saa_writeb(SAA7134_DEC_LUMA_CONTRAST,
 			   dev->ctl_invert ? -dev->ctl_contrast : dev->ctl_contrast);
 		saa_writeb(SAA7134_DEC_CHROMA_SATURATION,
 			   dev->ctl_invert ? -dev->ctl_saturation : dev->ctl_saturation);
 		break;
 	case V4L2_CID_HFLIP:
-		dev->ctl_mirror = c->value;
+		dev->ctl_mirror = ctrl->val;
 		restart_overlay = 1;
 		break;
 	case V4L2_CID_PRIVATE_Y_EVEN:
-		dev->ctl_y_even = c->value;
+		dev->ctl_y_even = ctrl->val;
 		restart_overlay = 1;
 		break;
 	case V4L2_CID_PRIVATE_Y_ODD:
-		dev->ctl_y_odd = c->value;
+		dev->ctl_y_odd = ctrl->val;
 		restart_overlay = 1;
 		break;
 	case V4L2_CID_PRIVATE_AUTOMUTE:
@@ -1252,7 +1054,7 @@
 		tda9887_cfg.tuner = TUNER_TDA9887;
 		tda9887_cfg.priv = &dev->tda9887_conf;
 
-		dev->ctl_automute = c->value;
+		dev->ctl_automute = ctrl->val;
 		if (dev->tda9887_conf) {
 			if (dev->ctl_automute)
 				dev->tda9887_conf |= TDA9887_AUTOMUTE;
@@ -1264,27 +1066,15 @@
 		break;
 	}
 	default:
-		goto error;
+		return -EINVAL;
 	}
-	if (restart_overlay && fh && res_check(fh, RESOURCE_OVERLAY)) {
-		spin_lock_irqsave(&dev->slock,flags);
-		stop_preview(dev,fh);
-		start_preview(dev,fh);
-		spin_unlock_irqrestore(&dev->slock,flags);
+	if (restart_overlay && res_locked(dev, RESOURCE_OVERLAY)) {
+		spin_lock_irqsave(&dev->slock, flags);
+		stop_preview(dev);
+		start_preview(dev);
+		spin_unlock_irqrestore(&dev->slock, flags);
 	}
-	err = 0;
-
-error:
-	mutex_unlock(&dev->lock);
-	return err;
-}
-EXPORT_SYMBOL_GPL(saa7134_s_ctrl_internal);
-
-static int saa7134_s_ctrl(struct file *file, void *f, struct v4l2_control *c)
-{
-	struct saa7134_fh *fh = f;
-
-	return saa7134_s_ctrl_internal(fh->dev, fh, c);
+	return 0;
 }
 
 /* ------------------------------------------------------------------ */
@@ -1292,15 +1082,16 @@
 static struct videobuf_queue *saa7134_queue(struct file *file)
 {
 	struct video_device *vdev = video_devdata(file);
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct saa7134_fh *fh = file->private_data;
 	struct videobuf_queue *q = NULL;
 
 	switch (vdev->vfl_type) {
 	case VFL_TYPE_GRABBER:
-		q = &fh->cap;
+		q = fh->is_empress ? &dev->empress_tsq : &dev->cap;
 		break;
 	case VFL_TYPE_VBI:
-		q = &fh->vbi;
+		q = &dev->vbi;
 		break;
 	default:
 		BUG();
@@ -1311,9 +1102,10 @@
 static int saa7134_resource(struct file *file)
 {
 	struct video_device *vdev = video_devdata(file);
+	struct saa7134_fh *fh = file->private_data;
 
 	if (vdev->vfl_type == VFL_TYPE_GRABBER)
-		return RESOURCE_VIDEO;
+		return fh->is_empress ? RESOURCE_EMPRESS : RESOURCE_VIDEO;
 
 	if (vdev->vfl_type == VFL_TYPE_VBI)
 		return RESOURCE_VBI;
@@ -1335,22 +1127,6 @@
 
 	v4l2_fh_init(&fh->fh, vdev);
 	file->private_data = fh;
-	fh->dev      = dev;
-
-	videobuf_queue_sg_init(&fh->cap, &video_qops,
-			    &dev->pci->dev, &dev->slock,
-			    V4L2_BUF_TYPE_VIDEO_CAPTURE,
-			    V4L2_FIELD_INTERLACED,
-			    sizeof(struct saa7134_buf),
-			    fh, NULL);
-	videobuf_queue_sg_init(&fh->vbi, &saa7134_vbi_qops,
-			    &dev->pci->dev, &dev->slock,
-			    V4L2_BUF_TYPE_VBI_CAPTURE,
-			    V4L2_FIELD_SEQ_TB,
-			    sizeof(struct saa7134_buf),
-			    fh, NULL);
-	saa7134_pgtable_alloc(dev->pci,&fh->pt_cap);
-	saa7134_pgtable_alloc(dev->pci,&fh->pt_vbi);
 
 	if (vdev->vfl_type == VFL_TYPE_RADIO) {
 		/* switch to radio mode */
@@ -1369,17 +1145,18 @@
 video_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
 {
 	struct video_device *vdev = video_devdata(file);
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct saa7134_fh *fh = file->private_data;
 
 	switch (vdev->vfl_type) {
 	case VFL_TYPE_GRABBER:
-		if (res_locked(fh->dev,RESOURCE_VIDEO))
+		if (res_locked(dev, RESOURCE_VIDEO))
 			return -EBUSY;
 		return videobuf_read_one(saa7134_queue(file),
 					 data, count, ppos,
 					 file->f_flags & O_NONBLOCK);
 	case VFL_TYPE_VBI:
-		if (!res_get(fh->dev,fh,RESOURCE_VBI))
+		if (!res_get(dev, fh, RESOURCE_VBI))
 			return -EBUSY;
 		return videobuf_read_stream(saa7134_queue(file),
 					    data, count, ppos, 1,
@@ -1394,52 +1171,59 @@
 static unsigned int
 video_poll(struct file *file, struct poll_table_struct *wait)
 {
+	unsigned long req_events = poll_requested_events(wait);
 	struct video_device *vdev = video_devdata(file);
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct saa7134_fh *fh = file->private_data;
 	struct videobuf_buffer *buf = NULL;
 	unsigned int rc = 0;
 
-	if (vdev->vfl_type == VFL_TYPE_VBI)
-		return videobuf_poll_stream(file, &fh->vbi, wait);
+	if (v4l2_event_pending(&fh->fh))
+		rc = POLLPRI;
+	else if (req_events & POLLPRI)
+		poll_wait(file, &fh->fh.wait, wait);
 
-	if (res_check(fh,RESOURCE_VIDEO)) {
-		mutex_lock(&fh->cap.vb_lock);
-		if (!list_empty(&fh->cap.stream))
-			buf = list_entry(fh->cap.stream.next, struct videobuf_buffer, stream);
+	if (vdev->vfl_type == VFL_TYPE_VBI)
+		return rc | videobuf_poll_stream(file, &dev->vbi, wait);
+
+	if (res_check(fh, RESOURCE_VIDEO)) {
+		mutex_lock(&dev->cap.vb_lock);
+		if (!list_empty(&dev->cap.stream))
+			buf = list_entry(dev->cap.stream.next, struct videobuf_buffer, stream);
 	} else {
-		mutex_lock(&fh->cap.vb_lock);
-		if (UNSET == fh->cap.read_off) {
+		mutex_lock(&dev->cap.vb_lock);
+		if (UNSET == dev->cap.read_off) {
 			/* need to capture a new frame */
-			if (res_locked(fh->dev,RESOURCE_VIDEO))
+			if (res_locked(dev, RESOURCE_VIDEO))
 				goto err;
-			if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,fh->cap.field))
+			if (0 != dev->cap.ops->buf_prepare(&dev->cap,
+					dev->cap.read_buf, dev->cap.field))
 				goto err;
-			fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf);
-			fh->cap.read_off = 0;
+			dev->cap.ops->buf_queue(&dev->cap, dev->cap.read_buf);
+			dev->cap.read_off = 0;
 		}
-		buf = fh->cap.read_buf;
+		buf = dev->cap.read_buf;
 	}
 
 	if (!buf)
 		goto err;
 
 	poll_wait(file, &buf->done, wait);
-	if (buf->state == VIDEOBUF_DONE ||
-	    buf->state == VIDEOBUF_ERROR)
-		rc = POLLIN|POLLRDNORM;
-	mutex_unlock(&fh->cap.vb_lock);
+	if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR)
+		rc |= POLLIN | POLLRDNORM;
+	mutex_unlock(&dev->cap.vb_lock);
 	return rc;
 
 err:
-	mutex_unlock(&fh->cap.vb_lock);
-	return POLLERR;
+	mutex_unlock(&dev->cap.vb_lock);
+	return rc | POLLERR;
 }
 
 static int video_release(struct file *file)
 {
 	struct video_device *vdev = video_devdata(file);
-	struct saa7134_fh  *fh  = file->private_data;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
+	struct saa7134_fh *fh = file->private_data;
 	struct saa6588_command cmd;
 	unsigned long flags;
 
@@ -1448,26 +1232,28 @@
 	/* turn off overlay */
 	if (res_check(fh, RESOURCE_OVERLAY)) {
 		spin_lock_irqsave(&dev->slock,flags);
-		stop_preview(dev,fh);
+		stop_preview(dev);
 		spin_unlock_irqrestore(&dev->slock,flags);
-		res_free(dev,fh,RESOURCE_OVERLAY);
+		res_free(dev, fh, RESOURCE_OVERLAY);
 	}
 
 	/* stop video capture */
 	if (res_check(fh, RESOURCE_VIDEO)) {
 		pm_qos_remove_request(&dev->qos_request);
-		videobuf_streamoff(&fh->cap);
-		res_free(dev,fh,RESOURCE_VIDEO);
+		videobuf_streamoff(&dev->cap);
+		res_free(dev, fh, RESOURCE_VIDEO);
+		videobuf_mmap_free(&dev->cap);
 	}
-	if (fh->cap.read_buf) {
-		buffer_release(&fh->cap,fh->cap.read_buf);
-		kfree(fh->cap.read_buf);
+	if (dev->cap.read_buf) {
+		buffer_release(&dev->cap, dev->cap.read_buf);
+		kfree(dev->cap.read_buf);
 	}
 
 	/* stop vbi capture */
 	if (res_check(fh, RESOURCE_VBI)) {
-		videobuf_stop(&fh->vbi);
-		res_free(dev,fh,RESOURCE_VBI);
+		videobuf_stop(&dev->vbi);
+		res_free(dev, fh, RESOURCE_VBI);
+		videobuf_mmap_free(&dev->vbi);
 	}
 
 	/* ts-capture will not work in planar mode, so turn it off Hac: 04.05*/
@@ -1480,12 +1266,6 @@
 	if (vdev->vfl_type == VFL_TYPE_RADIO)
 		saa_call_all(dev, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
 
-	/* free stuff */
-	videobuf_mmap_free(&fh->cap);
-	videobuf_mmap_free(&fh->vbi);
-	saa7134_pgtable_free(dev->pci,&fh->pt_cap);
-	saa7134_pgtable_free(dev->pci,&fh->pt_vbi);
-
 	v4l2_fh_del(&fh->fh);
 	v4l2_fh_exit(&fh->fh);
 	file->private_data = NULL;
@@ -1501,11 +1281,11 @@
 static ssize_t radio_read(struct file *file, char __user *data,
 			 size_t count, loff_t *ppos)
 {
-	struct saa7134_fh *fh = file->private_data;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct saa6588_command cmd;
 
 	cmd.block_count = count/3;
+	cmd.nonblocking = file->f_flags & O_NONBLOCK;
 	cmd.buffer = data;
 	cmd.instance = file;
 	cmd.result = -ENODEV;
@@ -1517,16 +1297,16 @@
 
 static unsigned int radio_poll(struct file *file, poll_table *wait)
 {
-	struct saa7134_fh *fh = file->private_data;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct saa6588_command cmd;
+	unsigned int rc = v4l2_ctrl_poll(file, wait);
 
 	cmd.instance = file;
 	cmd.event_list = wait;
-	cmd.result = -ENODEV;
+	cmd.result = 0;
 	saa_call_all(dev, core, ioctl, SAA6588_CMD_POLL, &cmd);
 
-	return cmd.result;
+	return rc | cmd.result;
 }
 
 /* ------------------------------------------------------------------ */
@@ -1534,8 +1314,7 @@
 static int saa7134_try_get_set_fmt_vbi_cap(struct file *file, void *priv,
 						struct v4l2_format *f)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct saa7134_tvnorm *norm = dev->tvnorm;
 
 	memset(&f->fmt.vbi.reserved, 0, sizeof(f->fmt.vbi.reserved));
@@ -1555,12 +1334,11 @@
 static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
 				struct v4l2_format *f)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	f->fmt.pix.width        = dev->width;
 	f->fmt.pix.height       = dev->height;
-	f->fmt.pix.field        = fh->cap.field;
+	f->fmt.pix.field        = dev->cap.field;
 	f->fmt.pix.pixelformat  = dev->fmt->fourcc;
 	f->fmt.pix.bytesperline =
 		(f->fmt.pix.width * dev->fmt->depth) >> 3;
@@ -1574,8 +1352,7 @@
 static int saa7134_g_fmt_vid_overlay(struct file *file, void *priv,
 				struct v4l2_format *f)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct v4l2_clip __user *clips = f->fmt.win.clips;
 	u32 clipcount = f->fmt.win.clipcount;
 	int err = 0;
@@ -1607,8 +1384,7 @@
 static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
 						struct v4l2_format *f)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct saa7134_format *fmt;
 	enum v4l2_field field;
 	unsigned int maxw, maxh;
@@ -1659,8 +1435,7 @@
 static int saa7134_try_fmt_vid_overlay(struct file *file, void *priv,
 						struct v4l2_format *f)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	if (saa7134_no_overlay > 0) {
 		printk(KERN_ERR "V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
@@ -1675,8 +1450,7 @@
 static int saa7134_s_fmt_vid_cap(struct file *file, void *priv,
 					struct v4l2_format *f)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	int err;
 
 	err = saa7134_try_fmt_vid_cap(file, priv, f);
@@ -1686,15 +1460,14 @@
 	dev->fmt       = format_by_fourcc(f->fmt.pix.pixelformat);
 	dev->width     = f->fmt.pix.width;
 	dev->height    = f->fmt.pix.height;
-	fh->cap.field = f->fmt.pix.field;
+	dev->cap.field = f->fmt.pix.field;
 	return 0;
 }
 
 static int saa7134_s_fmt_vid_overlay(struct file *file, void *priv,
 					struct v4l2_format *f)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	int err;
 	unsigned long flags;
 
@@ -1719,10 +1492,10 @@
 		return -EFAULT;
 	}
 
-	if (res_check(fh, RESOURCE_OVERLAY)) {
+	if (res_check(priv, RESOURCE_OVERLAY)) {
 		spin_lock_irqsave(&dev->slock, flags);
-		stop_preview(dev, fh);
-		start_preview(dev, fh);
+		stop_preview(dev);
+		start_preview(dev);
 		spin_unlock_irqrestore(&dev->slock, flags);
 	}
 
@@ -1730,26 +1503,9 @@
 	return 0;
 }
 
-int saa7134_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *c)
+int saa7134_enum_input(struct file *file, void *priv, struct v4l2_input *i)
 {
-	const struct v4l2_queryctrl *ctrl;
-
-	if ((c->id <  V4L2_CID_BASE ||
-	     c->id >= V4L2_CID_LASTP1) &&
-	    (c->id <  V4L2_CID_PRIVATE_BASE ||
-	     c->id >= V4L2_CID_PRIVATE_LASTP1))
-		return -EINVAL;
-	ctrl = ctrl_by_id(c->id);
-	*c = (NULL != ctrl) ? *ctrl : no_ctrl;
-	return 0;
-}
-EXPORT_SYMBOL_GPL(saa7134_queryctrl);
-
-static int saa7134_enum_input(struct file *file, void *priv,
-					struct v4l2_input *i)
-{
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	unsigned int n;
 
 	n = i->index;
@@ -1769,27 +1525,27 @@
 		if (0 != (v1 & 0x40))
 			i->status |= V4L2_IN_ST_NO_H_LOCK;
 		if (0 != (v2 & 0x40))
-			i->status |= V4L2_IN_ST_NO_SYNC;
+			i->status |= V4L2_IN_ST_NO_SIGNAL;
 		if (0 != (v2 & 0x0e))
 			i->status |= V4L2_IN_ST_MACROVISION;
 	}
 	i->std = SAA7134_NORMS;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(saa7134_enum_input);
 
-static int saa7134_g_input(struct file *file, void *priv, unsigned int *i)
+int saa7134_g_input(struct file *file, void *priv, unsigned int *i)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	*i = dev->ctl_input;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(saa7134_g_input);
 
-static int saa7134_s_input(struct file *file, void *priv, unsigned int i)
+int saa7134_s_input(struct file *file, void *priv, unsigned int i)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	if (i >= SAA7134_INPUT_MAX)
 		return -EINVAL;
@@ -1800,13 +1556,14 @@
 	mutex_unlock(&dev->lock);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(saa7134_s_input);
 
-static int saa7134_querycap(struct file *file, void  *priv,
+int saa7134_querycap(struct file *file, void *priv,
 					struct v4l2_capability *cap)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct video_device *vdev = video_devdata(file);
+	struct saa7134_fh *fh = priv;
 	u32 radio_caps, video_caps, vbi_caps;
 
 	unsigned int tuner_type = dev->tuner_type;
@@ -1825,7 +1582,7 @@
 		radio_caps |= V4L2_CAP_RDS_CAPTURE;
 
 	video_caps = V4L2_CAP_VIDEO_CAPTURE;
-	if (saa7134_no_overlay <= 0)
+	if (saa7134_no_overlay <= 0 && !fh->is_empress)
 		video_caps |= V4L2_CAP_VIDEO_OVERLAY;
 
 	vbi_caps = V4L2_CAP_VBI_CAPTURE;
@@ -1851,14 +1608,17 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(saa7134_querycap);
 
-int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_std_id id)
+int saa7134_s_std(struct file *file, void *priv, v4l2_std_id id)
 {
+	struct saa7134_dev *dev = video_drvdata(file);
+	struct saa7134_fh *fh = priv;
 	unsigned long flags;
 	unsigned int i;
 	v4l2_std_id fixup;
 
-	if (!fh && res_locked(dev, RESOURCE_OVERLAY)) {
+	if (fh->is_empress && res_locked(dev, RESOURCE_OVERLAY)) {
 		/* Don't change the std from the mpeg device
 		   if overlay is active. */
 		return -EBUSY;
@@ -1898,15 +1658,15 @@
 	id = tvnorms[i].id;
 
 	mutex_lock(&dev->lock);
-	if (fh && res_check(fh, RESOURCE_OVERLAY)) {
+	if (!fh->is_empress && res_check(fh, RESOURCE_OVERLAY)) {
 		spin_lock_irqsave(&dev->slock, flags);
-		stop_preview(dev, fh);
+		stop_preview(dev);
 		spin_unlock_irqrestore(&dev->slock, flags);
 
 		set_tvnorm(dev, &tvnorms[i]);
 
 		spin_lock_irqsave(&dev->slock, flags);
-		start_preview(dev, fh);
+		start_preview(dev);
 		spin_unlock_irqrestore(&dev->slock, flags);
 	} else
 		set_tvnorm(dev, &tvnorms[i]);
@@ -1915,29 +1675,21 @@
 	mutex_unlock(&dev->lock);
 	return 0;
 }
-EXPORT_SYMBOL_GPL(saa7134_s_std_internal);
+EXPORT_SYMBOL_GPL(saa7134_s_std);
 
-static int saa7134_s_std(struct file *file, void *priv, v4l2_std_id id)
+int saa7134_g_std(struct file *file, void *priv, v4l2_std_id *id)
 {
-	struct saa7134_fh *fh = priv;
-
-	return saa7134_s_std_internal(fh->dev, fh, id);
-}
-
-static int saa7134_g_std(struct file *file, void *priv, v4l2_std_id *id)
-{
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	*id = dev->tvnorm->id;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(saa7134_g_std);
 
 static int saa7134_cropcap(struct file *file, void *priv,
 					struct v4l2_cropcap *cap)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
 	    cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
@@ -1959,8 +1711,7 @@
 
 static int saa7134_g_crop(struct file *file, void *f, struct v4l2_crop *crop)
 {
-	struct saa7134_fh *fh = f;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
 	    crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
@@ -1971,22 +1722,17 @@
 
 static int saa7134_s_crop(struct file *file, void *f, const struct v4l2_crop *crop)
 {
-	struct saa7134_fh *fh = f;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct v4l2_rect *b = &dev->crop_bounds;
 	struct v4l2_rect *c = &dev->crop_current;
 
 	if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
 	    crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
 		return -EINVAL;
-	if (crop->c.height < 0)
-		return -EINVAL;
-	if (crop->c.width < 0)
-		return -EINVAL;
 
-	if (res_locked(fh->dev, RESOURCE_OVERLAY))
+	if (res_locked(dev, RESOURCE_OVERLAY))
 		return -EBUSY;
-	if (res_locked(fh->dev, RESOURCE_VIDEO))
+	if (res_locked(dev, RESOURCE_VIDEO))
 		return -EBUSY;
 
 	*c = crop->c;
@@ -2006,11 +1752,10 @@
 	return 0;
 }
 
-static int saa7134_g_tuner(struct file *file, void *priv,
+int saa7134_g_tuner(struct file *file, void *priv,
 					struct v4l2_tuner *t)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	int n;
 
 	if (0 != t->index)
@@ -2037,12 +1782,12 @@
 		t->signal = 0xffff;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(saa7134_g_tuner);
 
-static int saa7134_s_tuner(struct file *file, void *priv,
+int saa7134_s_tuner(struct file *file, void *priv,
 					const struct v4l2_tuner *t)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	int rx, mode;
 
 	if (0 != t->index)
@@ -2058,12 +1803,12 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(saa7134_s_tuner);
 
-static int saa7134_g_frequency(struct file *file, void *priv,
+int saa7134_g_frequency(struct file *file, void *priv,
 					struct v4l2_frequency *f)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	if (0 != f->tuner)
 		return -EINVAL;
@@ -2072,12 +1817,12 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(saa7134_g_frequency);
 
-static int saa7134_s_frequency(struct file *file, void *priv,
+int saa7134_s_frequency(struct file *file, void *priv,
 					const struct v4l2_frequency *f)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	if (0 != f->tuner)
 		return -EINVAL;
@@ -2089,6 +1834,7 @@
 	mutex_unlock(&dev->lock);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(saa7134_s_frequency);
 
 static int saa7134_enum_fmt_vid_cap(struct file *file, void  *priv,
 					struct v4l2_fmtdesc *f)
@@ -2126,8 +1872,7 @@
 static int saa7134_g_fbuf(struct file *file, void *f,
 				struct v4l2_framebuffer *fb)
 {
-	struct saa7134_fh *fh = f;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	*fb = dev->ovbuf;
 	fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
@@ -2138,8 +1883,7 @@
 static int saa7134_s_fbuf(struct file *file, void *f,
 					const struct v4l2_framebuffer *fb)
 {
-	struct saa7134_fh *fh = f;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	struct saa7134_format *fmt;
 
 	if (!capable(CAP_SYS_ADMIN) &&
@@ -2160,10 +1904,9 @@
 	return 0;
 }
 
-static int saa7134_overlay(struct file *file, void *f, unsigned int on)
+static int saa7134_overlay(struct file *file, void *priv, unsigned int on)
 {
-	struct saa7134_fh *fh = f;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	unsigned long flags;
 
 	if (on) {
@@ -2172,54 +1915,57 @@
 			return -EINVAL;
 		}
 
-		if (!res_get(dev, fh, RESOURCE_OVERLAY))
+		if (!res_get(dev, priv, RESOURCE_OVERLAY))
 			return -EBUSY;
 		spin_lock_irqsave(&dev->slock, flags);
-		start_preview(dev, fh);
+		start_preview(dev);
 		spin_unlock_irqrestore(&dev->slock, flags);
 	}
 	if (!on) {
-		if (!res_check(fh, RESOURCE_OVERLAY))
+		if (!res_check(priv, RESOURCE_OVERLAY))
 			return -EINVAL;
 		spin_lock_irqsave(&dev->slock, flags);
-		stop_preview(dev, fh);
+		stop_preview(dev);
 		spin_unlock_irqrestore(&dev->slock, flags);
-		res_free(dev, fh, RESOURCE_OVERLAY);
+		res_free(dev, priv, RESOURCE_OVERLAY);
 	}
 	return 0;
 }
 
-static int saa7134_reqbufs(struct file *file, void *priv,
+int saa7134_reqbufs(struct file *file, void *priv,
 					struct v4l2_requestbuffers *p)
 {
 	return videobuf_reqbufs(saa7134_queue(file), p);
 }
+EXPORT_SYMBOL_GPL(saa7134_reqbufs);
 
-static int saa7134_querybuf(struct file *file, void *priv,
+int saa7134_querybuf(struct file *file, void *priv,
 					struct v4l2_buffer *b)
 {
 	return videobuf_querybuf(saa7134_queue(file), b);
 }
+EXPORT_SYMBOL_GPL(saa7134_querybuf);
 
-static int saa7134_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+int saa7134_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
 {
 	return videobuf_qbuf(saa7134_queue(file), b);
 }
+EXPORT_SYMBOL_GPL(saa7134_qbuf);
 
-static int saa7134_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+int saa7134_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
 {
 	return videobuf_dqbuf(saa7134_queue(file), b,
 				file->f_flags & O_NONBLOCK);
 }
+EXPORT_SYMBOL_GPL(saa7134_dqbuf);
 
-static int saa7134_streamon(struct file *file, void *priv,
+int saa7134_streamon(struct file *file, void *priv,
 					enum v4l2_buf_type type)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 	int res = saa7134_resource(file);
 
-	if (!res_get(dev, fh, res))
+	if (!res_get(dev, priv, res))
 		return -EBUSY;
 
 	/* The SAA7134 has a 1K FIFO; the datasheet suggests that when
@@ -2229,36 +1975,37 @@
 	 * Unfortunately, I lack register-level documentation to check the
 	 * Linux FIFO setup and confirm the perfect value.
 	 */
-	pm_qos_add_request(&dev->qos_request,
-			   PM_QOS_CPU_DMA_LATENCY,
-			   20);
+	if (res != RESOURCE_EMPRESS)
+		pm_qos_add_request(&dev->qos_request,
+			   PM_QOS_CPU_DMA_LATENCY, 20);
 
 	return videobuf_streamon(saa7134_queue(file));
 }
+EXPORT_SYMBOL_GPL(saa7134_streamon);
 
-static int saa7134_streamoff(struct file *file, void *priv,
+int saa7134_streamoff(struct file *file, void *priv,
 					enum v4l2_buf_type type)
 {
+	struct saa7134_dev *dev = video_drvdata(file);
 	int err;
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
 	int res = saa7134_resource(file);
 
-	pm_qos_remove_request(&dev->qos_request);
+	if (res != RESOURCE_EMPRESS)
+		pm_qos_remove_request(&dev->qos_request);
 
 	err = videobuf_streamoff(saa7134_queue(file));
 	if (err < 0)
 		return err;
-	res_free(dev, fh, res);
+	res_free(dev, priv, res);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(saa7134_streamoff);
 
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 static int vidioc_g_register (struct file *file, void *priv,
 			      struct v4l2_dbg_register *reg)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	reg->val = saa_readb(reg->reg & 0xffffff);
 	reg->size = 1;
@@ -2268,8 +2015,7 @@
 static int vidioc_s_register (struct file *file, void *priv,
 				const struct v4l2_dbg_register *reg)
 {
-	struct saa7134_fh *fh = priv;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	saa_writeb(reg->reg & 0xffffff, reg->val);
 	return 0;
@@ -2279,8 +2025,7 @@
 static int radio_g_tuner(struct file *file, void *priv,
 					struct v4l2_tuner *t)
 {
-	struct saa7134_fh *fh = file->private_data;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	if (0 != t->index)
 		return -EINVAL;
@@ -2299,8 +2044,7 @@
 static int radio_s_tuner(struct file *file, void *priv,
 					const struct v4l2_tuner *t)
 {
-	struct saa7134_fh *fh = file->private_data;
-	struct saa7134_dev *dev = fh->dev;
+	struct saa7134_dev *dev = video_drvdata(file);
 
 	if (0 != t->index)
 		return -EINVAL;
@@ -2309,50 +2053,6 @@
 	return 0;
 }
 
-static int radio_enum_input(struct file *file, void *priv,
-					struct v4l2_input *i)
-{
-	if (i->index != 0)
-		return -EINVAL;
-
-	strcpy(i->name, "Radio");
-	i->type = V4L2_INPUT_TYPE_TUNER;
-
-	return 0;
-}
-
-static int radio_g_input(struct file *filp, void *priv, unsigned int *i)
-{
-	*i = 0;
-	return 0;
-}
-
-static int radio_s_input(struct file *filp, void *priv, unsigned int i)
-{
-	return 0;
-}
-
-static int radio_s_std(struct file *file, void *fh, v4l2_std_id norm)
-{
-	return 0;
-}
-
-static int radio_queryctrl(struct file *file, void *priv,
-					struct v4l2_queryctrl *c)
-{
-	const struct v4l2_queryctrl *ctrl;
-
-	if (c->id <  V4L2_CID_BASE ||
-	    c->id >= V4L2_CID_LASTP1)
-		return -EINVAL;
-	if (c->id == V4L2_CID_AUDIO_MUTE) {
-		ctrl = ctrl_by_id(c->id);
-		*c = *ctrl;
-	} else
-		*c = no_ctrl;
-	return 0;
-}
-
 static const struct v4l2_file_operations video_fops =
 {
 	.owner	  = THIS_MODULE,
@@ -2387,9 +2087,6 @@
 	.vidioc_enum_input		= saa7134_enum_input,
 	.vidioc_g_input			= saa7134_g_input,
 	.vidioc_s_input			= saa7134_s_input,
-	.vidioc_queryctrl		= saa7134_queryctrl,
-	.vidioc_g_ctrl			= saa7134_g_ctrl,
-	.vidioc_s_ctrl			= saa7134_s_ctrl,
 	.vidioc_streamon		= saa7134_streamon,
 	.vidioc_streamoff		= saa7134_streamoff,
 	.vidioc_g_tuner			= saa7134_g_tuner,
@@ -2405,6 +2102,9 @@
 	.vidioc_g_register              = vidioc_g_register,
 	.vidioc_s_register              = vidioc_s_register,
 #endif
+	.vidioc_log_status		= v4l2_ctrl_log_status,
+	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
 };
 
 static const struct v4l2_file_operations radio_fops = {
@@ -2419,16 +2119,11 @@
 static const struct v4l2_ioctl_ops radio_ioctl_ops = {
 	.vidioc_querycap	= saa7134_querycap,
 	.vidioc_g_tuner		= radio_g_tuner,
-	.vidioc_enum_input	= radio_enum_input,
 	.vidioc_s_tuner		= radio_s_tuner,
-	.vidioc_s_input		= radio_s_input,
-	.vidioc_s_std		= radio_s_std,
-	.vidioc_queryctrl	= radio_queryctrl,
-	.vidioc_g_input		= radio_g_input,
-	.vidioc_g_ctrl		= saa7134_g_ctrl,
-	.vidioc_s_ctrl		= saa7134_s_ctrl,
 	.vidioc_g_frequency	= saa7134_g_frequency,
 	.vidioc_s_frequency	= saa7134_s_frequency,
+	.vidioc_subscribe_event	= v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
 };
 
 /* ----------------------------------------------------------- */
@@ -2447,8 +2142,55 @@
 	.ioctl_ops 		= &radio_ioctl_ops,
 };
 
+static const struct v4l2_ctrl_ops saa7134_ctrl_ops = {
+	.s_ctrl = saa7134_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_invert = {
+	.ops = &saa7134_ctrl_ops,
+	.id = V4L2_CID_PRIVATE_INVERT,
+	.name = "Invert",
+	.type = V4L2_CTRL_TYPE_BOOLEAN,
+	.min = 0,
+	.max = 1,
+	.step = 1,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_y_odd = {
+	.ops = &saa7134_ctrl_ops,
+	.id = V4L2_CID_PRIVATE_Y_ODD,
+	.name = "Y Offset Odd Field",
+	.type = V4L2_CTRL_TYPE_INTEGER,
+	.min = 0,
+	.max = 128,
+	.step = 1,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_y_even = {
+	.ops = &saa7134_ctrl_ops,
+	.id = V4L2_CID_PRIVATE_Y_EVEN,
+	.name = "Y Offset Even Field",
+	.type = V4L2_CTRL_TYPE_INTEGER,
+	.min = 0,
+	.max = 128,
+	.step = 1,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_automute = {
+	.ops = &saa7134_ctrl_ops,
+	.id = V4L2_CID_PRIVATE_AUTOMUTE,
+	.name = "Automute",
+	.type = V4L2_CTRL_TYPE_BOOLEAN,
+	.min = 0,
+	.max = 1,
+	.step = 1,
+	.def = 1,
+};
+
 int saa7134_video_init1(struct saa7134_dev *dev)
 {
+	struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
+
 	/* sanitycheck insmod options */
 	if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
 		gbuffers = 2;
@@ -2456,17 +2198,38 @@
 		gbufsize = gbufsize_max;
 	gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK;
 
-	/* put some sensible defaults into the data structures ... */
-	dev->ctl_bright     = ctrl_by_id(V4L2_CID_BRIGHTNESS)->default_value;
-	dev->ctl_contrast   = ctrl_by_id(V4L2_CID_CONTRAST)->default_value;
-	dev->ctl_hue        = ctrl_by_id(V4L2_CID_HUE)->default_value;
-	dev->ctl_saturation = ctrl_by_id(V4L2_CID_SATURATION)->default_value;
-	dev->ctl_volume     = ctrl_by_id(V4L2_CID_AUDIO_VOLUME)->default_value;
-	dev->ctl_mute       = 1; // ctrl_by_id(V4L2_CID_AUDIO_MUTE)->default_value;
-	dev->ctl_invert     = ctrl_by_id(V4L2_CID_PRIVATE_INVERT)->default_value;
-	dev->ctl_automute   = ctrl_by_id(V4L2_CID_PRIVATE_AUTOMUTE)->default_value;
+	v4l2_ctrl_handler_init(hdl, 11);
+	v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+			V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+	v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+			V4L2_CID_CONTRAST, 0, 127, 1, 68);
+	v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+			V4L2_CID_SATURATION, 0, 127, 1, 64);
+	v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+			V4L2_CID_HUE, -128, 127, 1, 0);
+	v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+			V4L2_CID_HFLIP, 0, 1, 1, 0);
+	v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+			V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+	v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+			V4L2_CID_AUDIO_VOLUME, -15, 15, 1, 0);
+	v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_invert, NULL);
+	v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_y_odd, NULL);
+	v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_y_even, NULL);
+	v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_automute, NULL);
+	if (hdl->error)
+		return hdl->error;
+	if (card_has_radio(dev)) {
+		hdl = &dev->radio_ctrl_handler;
+		v4l2_ctrl_handler_init(hdl, 2);
+		v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler,
+				v4l2_ctrl_radio_filter);
+		if (hdl->error)
+			return hdl->error;
+	}
+	dev->ctl_mute       = 1;
 
-	if (dev->tda9887_conf && dev->ctl_automute)
+	if (dev->tda9887_conf && saa7134_ctrl_automute.def)
 		dev->tda9887_conf |= TDA9887_AUTOMUTE;
 	dev->automute       = 0;
 
@@ -2489,9 +2252,34 @@
 	if (saa7134_boards[dev->board].video_out)
 		saa7134_videoport_init(dev);
 
+	videobuf_queue_sg_init(&dev->cap, &video_qops,
+			    &dev->pci->dev, &dev->slock,
+			    V4L2_BUF_TYPE_VIDEO_CAPTURE,
+			    V4L2_FIELD_INTERLACED,
+			    sizeof(struct saa7134_buf),
+			    dev, NULL);
+	videobuf_queue_sg_init(&dev->vbi, &saa7134_vbi_qops,
+			    &dev->pci->dev, &dev->slock,
+			    V4L2_BUF_TYPE_VBI_CAPTURE,
+			    V4L2_FIELD_SEQ_TB,
+			    sizeof(struct saa7134_buf),
+			    dev, NULL);
+	saa7134_pgtable_alloc(dev->pci, &dev->pt_cap);
+	saa7134_pgtable_alloc(dev->pci, &dev->pt_vbi);
+
 	return 0;
 }
 
+void saa7134_video_fini(struct saa7134_dev *dev)
+{
+	/* free stuff */
+	saa7134_pgtable_free(dev->pci, &dev->pt_cap);
+	saa7134_pgtable_free(dev->pci, &dev->pt_vbi);
+	v4l2_ctrl_handler_free(&dev->ctrl_handler);
+	if (card_has_radio(dev))
+		v4l2_ctrl_handler_free(&dev->radio_ctrl_handler);
+}
+
 int saa7134_videoport_init(struct saa7134_dev *dev)
 {
 	/* enable video output */
@@ -2533,6 +2321,7 @@
 	/* init video hw */
 	set_tvnorm(dev,&tvnorms[0]);
 	video_mux(dev,0);
+	v4l2_ctrl_handler_setup(&dev->ctrl_handler);
 	saa7134_tvaudio_setmute(dev);
 	saa7134_tvaudio_setvolume(dev,dev->ctl_volume);
 	return 0;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 8d1453a..2474e84 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -37,6 +37,7 @@
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
 #include <media/tuner.h>
 #include <media/rc-core.h>
 #include <media/ir-kbd-i2c.h>
@@ -410,12 +411,18 @@
 #define card(dev)             (saa7134_boards[dev->board])
 #define card_in(dev,n)        (saa7134_boards[dev->board].inputs[n])
 
+#define V4L2_CID_PRIVATE_INVERT      (V4L2_CID_USER_SAA7134_BASE + 0)
+#define V4L2_CID_PRIVATE_Y_ODD       (V4L2_CID_USER_SAA7134_BASE + 1)
+#define V4L2_CID_PRIVATE_Y_EVEN      (V4L2_CID_USER_SAA7134_BASE + 2)
+#define V4L2_CID_PRIVATE_AUTOMUTE    (V4L2_CID_USER_SAA7134_BASE + 3)
+
 /* ----------------------------------------------------------- */
 /* device / file handle status                                 */
 
 #define RESOURCE_OVERLAY       1
 #define RESOURCE_VIDEO         2
 #define RESOURCE_VBI           4
+#define RESOURCE_EMPRESS       8
 
 #define INTERLACE_AUTO         0
 #define INTERLACE_ON           1
@@ -470,16 +477,8 @@
 /* video filehandle status */
 struct saa7134_fh {
 	struct v4l2_fh             fh;
-	struct saa7134_dev         *dev;
+	bool			   is_empress;
 	unsigned int               resources;
-
-	/* video capture */
-	struct videobuf_queue      cap;
-	struct saa7134_pgtable     pt_cap;
-
-	/* vbi capture */
-	struct videobuf_queue      vbi;
-	struct saa7134_pgtable     pt_vbi;
 };
 
 /* dmasound dsp status */
@@ -589,7 +588,11 @@
 
 	/* video+ts+vbi capture */
 	struct saa7134_dmaqueue    video_q;
+	struct videobuf_queue      cap;
+	struct saa7134_pgtable     pt_cap;
 	struct saa7134_dmaqueue    vbi_q;
+	struct videobuf_queue      vbi;
+	struct saa7134_pgtable     pt_vbi;
 	unsigned int               video_fieldcount;
 	unsigned int               vbi_fieldcount;
 	struct saa7134_format      *fmt;
@@ -599,6 +602,7 @@
 	/* various v4l controls */
 	struct saa7134_tvnorm      *tvnorm;              /* video */
 	struct saa7134_tvaudio     *tvaudio;
+	struct v4l2_ctrl_handler   ctrl_handler;
 	unsigned int               ctl_input;
 	int                        ctl_bright;
 	int                        ctl_contrast;
@@ -626,6 +630,7 @@
 	int                        last_carrier;
 	int                        nosignal;
 	unsigned int               insuspend;
+	struct v4l2_ctrl_handler   radio_ctrl_handler;
 
 	/* I2C keyboard data */
 	struct IR_i2c_init_data    init_data;
@@ -638,10 +643,11 @@
 
 	/* SAA7134_MPEG_EMPRESS only */
 	struct video_device        *empress_dev;
+	struct v4l2_subdev	   *empress_sd;
 	struct videobuf_queue      empress_tsq;
-	atomic_t 		   empress_users;
 	struct work_struct         empress_workqueue;
 	int                        empress_started;
+	struct v4l2_ctrl_handler   empress_ctrl_handler;
 
 #if IS_ENABLED(CONFIG_VIDEO_SAA7134_DVB)
 	/* SAA7134_MPEG_DVB only */
@@ -699,6 +705,16 @@
 	_rc;								\
 })
 
+static inline int res_check(struct saa7134_fh *fh, unsigned int bit)
+{
+	return fh->resources & bit;
+}
+
+static inline int res_locked(struct saa7134_dev *dev, unsigned int bit)
+{
+	return dev->resources & bit;
+}
+
 /* ----------------------------------------------------------- */
 /* saa7134-core.c                                              */
 
@@ -761,10 +777,31 @@
 extern struct video_device saa7134_video_template;
 extern struct video_device saa7134_radio_template;
 
-int saa7134_s_ctrl_internal(struct saa7134_dev *dev,  struct saa7134_fh *fh, struct v4l2_control *c);
-int saa7134_g_ctrl_internal(struct saa7134_dev *dev,  struct saa7134_fh *fh, struct v4l2_control *c);
-int saa7134_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *c);
-int saa7134_s_std_internal(struct saa7134_dev *dev,  struct saa7134_fh *fh, v4l2_std_id id);
+int saa7134_s_std(struct file *file, void *priv, v4l2_std_id id);
+int saa7134_g_std(struct file *file, void *priv, v4l2_std_id *id);
+int saa7134_enum_input(struct file *file, void *priv, struct v4l2_input *i);
+int saa7134_g_input(struct file *file, void *priv, unsigned int *i);
+int saa7134_s_input(struct file *file, void *priv, unsigned int i);
+int saa7134_querycap(struct file *file, void  *priv,
+					struct v4l2_capability *cap);
+int saa7134_g_tuner(struct file *file, void *priv,
+					struct v4l2_tuner *t);
+int saa7134_s_tuner(struct file *file, void *priv,
+					const struct v4l2_tuner *t);
+int saa7134_g_frequency(struct file *file, void *priv,
+					struct v4l2_frequency *f);
+int saa7134_s_frequency(struct file *file, void *priv,
+					const struct v4l2_frequency *f);
+int saa7134_reqbufs(struct file *file, void *priv,
+					struct v4l2_requestbuffers *p);
+int saa7134_querybuf(struct file *file, void *priv,
+					struct v4l2_buffer *b);
+int saa7134_qbuf(struct file *file, void *priv, struct v4l2_buffer *b);
+int saa7134_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b);
+int saa7134_streamon(struct file *file, void *priv,
+					enum v4l2_buf_type type);
+int saa7134_streamoff(struct file *file, void *priv,
+					enum v4l2_buf_type type);
 
 int saa7134_videoport_init(struct saa7134_dev *dev);
 void saa7134_set_tvnorm_hw(struct saa7134_dev *dev);
@@ -773,6 +810,7 @@
 int saa7134_video_init2(struct saa7134_dev *dev);
 void saa7134_irq_video_signalchange(struct saa7134_dev *dev);
 void saa7134_irq_video_done(struct saa7134_dev *dev, unsigned long status);
+void saa7134_video_fini(struct saa7134_dev *dev);
 
 
 /* ----------------------------------------------------------- */
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 77edc11..e5cfb6c 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -1303,7 +1303,7 @@
 
 #endif
 
-static DEFINE_PCI_DEVICE_TABLE(sta2x11_vip_pci_tbl) = {
+static const struct pci_device_id sta2x11_vip_pci_tbl[] = {
 	{PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIP)},
 	{0,}
 };
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index d7f0249..b2a4403 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -36,7 +36,8 @@
 config VIDEO_SH_VOU
 	tristate "SuperH VOU video output driver"
 	depends on MEDIA_CAMERA_SUPPORT
-	depends on VIDEO_DEV && ARCH_SHMOBILE && I2C
+	depends on VIDEO_DEV && I2C
+	depends on ARCH_SHMOBILE || COMPILE_TEST
 	select VIDEOBUF_DMA_CONTIG
 	help
 	  Support for the Video Output Unit (VOU) on SuperH SoCs.
@@ -90,13 +91,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called arv.
 
-config VIDEO_OMAP2
-	tristate "OMAP2 Camera Capture Interface driver"
-	depends on VIDEO_DEV && ARCH_OMAP2 && VIDEO_V4L2_INT_DEVICE
-	select VIDEOBUF_DMA_SG
-	---help---
-	  This is a v4l2 driver for the TI OMAP2 camera capture interface
-
 config VIDEO_OMAP3
 	tristate "OMAP 3 Camera support"
 	depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 1348ba1..e5269da 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -2,8 +2,6 @@
 # Makefile for the video capture/playback device drivers.
 #
 
-omap2cam-objs	:=	omap24xxcam.o omap24xxcam-dma.o
-
 obj-$(CONFIG_VIDEO_VINO) += indycam.o
 obj-$(CONFIG_VIDEO_VINO) += vino.o
 
@@ -14,7 +12,6 @@
 obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
 obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
 
-obj-$(CONFIG_VIDEO_OMAP2)		+= omap2cam.o
 obj-$(CONFIG_VIDEO_OMAP3)	+= omap3isp/
 
 obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index eac472b..b02aba4 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -347,7 +347,7 @@
 	/* If buffer queue is empty, return error */
 	if (list_empty(&layer->dma_queue)) {
 		v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
-		return -EINVAL;
+		return -ENOBUFS;
 	}
 	/* Get the next frame from the buffer queue */
 	layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 52ac5e6..735ec47 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -277,7 +277,7 @@
 	if (list_empty(&common->dma_queue)) {
 		spin_unlock_irqrestore(&common->irqlock, flags);
 		vpif_dbg(1, debug, "buffer queue is empty\n");
-		return -EIO;
+		return -ENOBUFS;
 	}
 
 	/* Get the next frame from the buffer queue */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index c31bcf1..9d115cd 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -239,7 +239,7 @@
 	if (list_empty(&common->dma_queue)) {
 		spin_unlock_irqrestore(&common->irqlock, flags);
 		vpif_err("buffer queue is empty\n");
-		return -EIO;
+		return -ENOBUFS;
 	}
 
 	/* Get the next frame from the buffer queue */
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index d2d3b4b..01ed1ecd 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -1,7 +1,7 @@
 
 config VIDEO_SAMSUNG_EXYNOS4_IS
 	bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
-	depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && PM_RUNTIME
+	depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
 	depends on (PLAT_S5P || ARCH_EXYNOS)
 	help
 	  Say Y here to enable camera host interface devices for
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index fb27ff7..8a712ca 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -549,7 +549,7 @@
 		vc->streaming = false;
 	}
 
-	ret = vb2_fop_release(file);
+	ret = _vb2_fop_release(file, NULL);
 
 	if (close) {
 		clear_bit(ST_CAPT_BUSY, &fimc->state);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index f791569..da2fc86 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -998,41 +998,46 @@
 
 	ret = devm_request_irq(dev, res->start, fimc_irq_handler,
 			       0, dev_name(dev), fimc);
-	if (ret) {
+	if (ret < 0) {
 		dev_err(dev, "failed to install irq (%d)\n", ret);
-		goto err_clk;
+		goto err_sclk;
 	}
 
 	ret = fimc_initialize_capture_subdev(fimc);
-	if (ret)
-		goto err_clk;
+	if (ret < 0)
+		goto err_sclk;
 
 	platform_set_drvdata(pdev, fimc);
 	pm_runtime_enable(dev);
-	ret = pm_runtime_get_sync(dev);
-	if (ret < 0)
-		goto err_sd;
+
+	if (!pm_runtime_enabled(dev)) {
+		ret = clk_enable(fimc->clock[CLK_GATE]);
+		if (ret < 0)
+			goto err_sd;
+	}
+
 	/* Initialize contiguous memory allocator */
 	fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
 	if (IS_ERR(fimc->alloc_ctx)) {
 		ret = PTR_ERR(fimc->alloc_ctx);
-		goto err_pm;
+		goto err_gclk;
 	}
 
 	dev_dbg(dev, "FIMC.%d registered successfully\n", fimc->id);
-
-	pm_runtime_put(dev);
 	return 0;
-err_pm:
-	pm_runtime_put(dev);
+
+err_gclk:
+	if (!pm_runtime_enabled(dev))
+		clk_disable(fimc->clock[CLK_GATE]);
 err_sd:
 	fimc_unregister_capture_subdev(fimc);
-err_clk:
+err_sclk:
 	clk_disable(fimc->clock[CLK_BUS]);
 	fimc_clk_put(fimc);
 	return ret;
 }
 
+#ifdef CONFIG_PM_RUNTIME
 static int fimc_runtime_resume(struct device *dev)
 {
 	struct fimc_dev *fimc =	dev_get_drvdata(dev);
@@ -1065,6 +1070,7 @@
 	dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
 	return ret;
 }
+#endif
 
 #ifdef CONFIG_PM_SLEEP
 static int fimc_resume(struct device *dev)
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
index 3d376fa..1790fb4 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -481,7 +481,6 @@
  * @flags:		additional flags for image conversion
  * @state:		flags to keep track of user configuration
  * @fimc_dev:		the FIMC device this context applies to
- * @m2m_ctx:		memory-to-memory device context
  * @fh:			v4l2 file handle
  * @ctrls:		v4l2 controls structure
  */
@@ -502,7 +501,6 @@
 	u32			flags;
 	u32			state;
 	struct fimc_dev		*fimc_dev;
-	struct v4l2_m2m_ctx	*m2m_ctx;
 	struct v4l2_fh		fh;
 	struct fimc_ctrls	ctrls;
 };
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/exynos4-is/fimc-is-regs.c
index f758e26..2628733 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.c
@@ -33,47 +33,23 @@
 	mcuctl_write(INTGR0_INTGD(0), is, MCUCTL_REG_INTGR0);
 }
 
-int fimc_is_hw_wait_intsr0_intsd0(struct fimc_is *is)
-{
-	unsigned int timeout = 2000;
-	u32 cfg, status;
-
-	cfg = mcuctl_read(is, MCUCTL_REG_INTSR0);
-	status = INTSR0_GET_INTSD(0, cfg);
-
-	while (status) {
-		cfg = mcuctl_read(is, MCUCTL_REG_INTSR0);
-		status = INTSR0_GET_INTSD(0, cfg);
-		if (timeout == 0) {
-			dev_warn(&is->pdev->dev, "%s timeout\n",
-				 __func__);
-			return -ETIME;
-		}
-		timeout--;
-		udelay(1);
-	}
-	return 0;
-}
-
 int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is)
 {
 	unsigned int timeout = 2000;
 	u32 cfg, status;
 
-	cfg = mcuctl_read(is, MCUCTL_REG_INTMSR0);
-	status = INTMSR0_GET_INTMSD(0, cfg);
-
-	while (status) {
+	do {
 		cfg = mcuctl_read(is, MCUCTL_REG_INTMSR0);
 		status = INTMSR0_GET_INTMSD(0, cfg);
-		if (timeout == 0) {
+
+		if (--timeout == 0) {
 			dev_warn(&is->pdev->dev, "%s timeout\n",
 				 __func__);
-			return -ETIME;
+			return -ETIMEDOUT;
 		}
-		timeout--;
 		udelay(1);
-	}
+	} while (status != 0);
+
 	return 0;
 }
 
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.h b/drivers/media/platform/exynos4-is/fimc-is-regs.h
index 5fa2fda..1d9d4ff 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.h
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.h
@@ -145,7 +145,6 @@
 int fimc_is_hw_get_params(struct fimc_is *is, unsigned int num);
 
 void fimc_is_hw_set_intgr0_gd0(struct fimc_is *is);
-int fimc_is_hw_wait_intsr0_intsd0(struct fimc_is *is);
 int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is);
 void fimc_is_hw_set_sensor_num(struct fimc_is *is);
 void fimc_is_hw_stream_on(struct fimc_is *is);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 9770fa9..13a4228 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -781,6 +781,9 @@
 	return is->debugfs_entry == NULL ? -EIO : 0;
 }
 
+static int fimc_is_runtime_resume(struct device *dev);
+static int fimc_is_runtime_suspend(struct device *dev);
+
 static int fimc_is_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -835,14 +838,20 @@
 	}
 	pm_runtime_enable(dev);
 
+	if (!pm_runtime_enabled(dev)) {
+		ret = fimc_is_runtime_resume(dev);
+		if (ret < 0)
+			goto err_irq;
+	}
+
 	ret = pm_runtime_get_sync(dev);
 	if (ret < 0)
-		goto err_irq;
+		goto err_pm;
 
 	is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
 	if (IS_ERR(is->alloc_ctx)) {
 		ret = PTR_ERR(is->alloc_ctx);
-		goto err_irq;
+		goto err_pm;
 	}
 	/*
 	 * Register FIMC-IS V4L2 subdevs to this driver. The video nodes
@@ -867,10 +876,13 @@
 
 err_dfs:
 	fimc_is_debugfs_remove(is);
-err_vb:
-	vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
 err_sd:
 	fimc_is_unregister_subdevs(is);
+err_vb:
+	vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
+err_pm:
+	if (!pm_runtime_enabled(dev))
+		fimc_is_runtime_suspend(dev);
 err_irq:
 	free_irq(is->irq, is);
 err_clk:
@@ -919,10 +931,13 @@
 
 static int fimc_is_remove(struct platform_device *pdev)
 {
-	struct fimc_is *is = platform_get_drvdata(pdev);
+	struct device *dev = &pdev->dev;
+	struct fimc_is *is = dev_get_drvdata(dev);
 
-	pm_runtime_disable(&pdev->dev);
-	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(dev);
+	pm_runtime_set_suspended(dev);
+	if (!pm_runtime_status_suspended(dev))
+		fimc_is_runtime_suspend(dev);
 	free_irq(is->irq, is);
 	fimc_is_unregister_subdevs(is);
 	vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.c b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
index 72a343e3b..d0dc7ee 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite-reg.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
@@ -133,7 +133,7 @@
 	int i = ARRAY_SIZE(src_pixfmt_map);
 	u32 cfg;
 
-	while (--i >= 0) {
+	while (--i) {
 		if (src_pixfmt_map[i][0] == pixelcode)
 			break;
 	}
@@ -240,7 +240,7 @@
 	u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
 	int i = ARRAY_SIZE(pixcode);
 
-	while (--i >= 0)
+	while (--i)
 		if (pixcode[i][0] == f->fmt->mbus_code)
 			break;
 	cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index e5798f7..779ec3c 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -546,7 +546,7 @@
 		mutex_unlock(&entity->parent->graph_mutex);
 	}
 
-	vb2_fop_release(file);
+	_vb2_fop_release(file, NULL);
 	pm_runtime_put(&fimc->pdev->dev);
 	clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
 
@@ -1549,42 +1549,46 @@
 			       0, dev_name(dev), fimc);
 	if (ret) {
 		dev_err(dev, "Failed to install irq (%d)\n", ret);
-		goto err_clk;
+		goto err_clk_put;
 	}
 
 	/* The video node will be created within the subdev's registered() op */
 	ret = fimc_lite_create_capture_subdev(fimc);
 	if (ret)
-		goto err_clk;
+		goto err_clk_put;
 
 	platform_set_drvdata(pdev, fimc);
 	pm_runtime_enable(dev);
-	ret = pm_runtime_get_sync(dev);
-	if (ret < 0)
-		goto err_sd;
+
+	if (!pm_runtime_enabled(dev)) {
+		ret = clk_enable(fimc->clock);
+		if (ret < 0)
+			goto err_sd;
+	}
 
 	fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
 	if (IS_ERR(fimc->alloc_ctx)) {
 		ret = PTR_ERR(fimc->alloc_ctx);
-		goto err_pm;
+		goto err_clk_dis;
 	}
 
-	pm_runtime_put(dev);
-
 	fimc_lite_set_default_config(fimc);
 
 	dev_dbg(dev, "FIMC-LITE.%d registered successfully\n",
 		fimc->index);
 	return 0;
-err_pm:
-	pm_runtime_put(dev);
+
+err_clk_dis:
+	if (!pm_runtime_enabled(dev))
+		clk_disable(fimc->clock);
 err_sd:
 	fimc_lite_unregister_capture_subdev(fimc);
-err_clk:
+err_clk_put:
 	fimc_lite_clk_put(fimc);
 	return ret;
 }
 
+#ifdef CONFIG_PM_RUNTIME
 static int fimc_lite_runtime_resume(struct device *dev)
 {
 	struct fimc_lite *fimc = dev_get_drvdata(dev);
@@ -1600,6 +1604,7 @@
 	clk_disable(fimc->clock);
 	return 0;
 }
+#endif
 
 #ifdef CONFIG_PM_SLEEP
 static int fimc_lite_resume(struct device *dev)
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index 8d33b68..9da95bd 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -44,17 +44,17 @@
 {
 	struct vb2_buffer *src_vb, *dst_vb;
 
-	if (!ctx || !ctx->m2m_ctx)
+	if (!ctx || !ctx->fh.m2m_ctx)
 		return;
 
-	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
-	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+	src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+	dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
 
 	if (src_vb && dst_vb) {
 		v4l2_m2m_buf_done(src_vb, vb_state);
 		v4l2_m2m_buf_done(dst_vb, vb_state);
 		v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
-				    ctx->m2m_ctx);
+				    ctx->fh.m2m_ctx);
 	}
 }
 
@@ -123,12 +123,12 @@
 		fimc_prepare_dma_offset(ctx, df);
 	}
 
-	src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 	ret = fimc_prepare_addr(ctx, src_vb, sf, &sf->paddr);
 	if (ret)
 		goto dma_unlock;
 
-	dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
 	ret = fimc_prepare_addr(ctx, dst_vb, df, &df->paddr);
 	if (ret)
 		goto dma_unlock;
@@ -219,31 +219,15 @@
 static void fimc_buf_queue(struct vb2_buffer *vb)
 {
 	struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-
-	dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
-
-	if (ctx->m2m_ctx)
-		v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
-}
-
-static void fimc_lock(struct vb2_queue *vq)
-{
-	struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
-	mutex_lock(&ctx->fimc_dev->lock);
-}
-
-static void fimc_unlock(struct vb2_queue *vq)
-{
-	struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
-	mutex_unlock(&ctx->fimc_dev->lock);
+	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
 }
 
 static struct vb2_ops fimc_qops = {
 	.queue_setup	 = fimc_queue_setup,
 	.buf_prepare	 = fimc_buf_prepare,
 	.buf_queue	 = fimc_buf_queue,
-	.wait_prepare	 = fimc_unlock,
-	.wait_finish	 = fimc_lock,
+	.wait_prepare	 = vb2_ops_wait_prepare,
+	.wait_finish	 = vb2_ops_wait_finish,
 	.stop_streaming	 = stop_streaming,
 	.start_streaming = start_streaming,
 };
@@ -385,7 +369,7 @@
 	if (ret)
 		return ret;
 
-	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
 
 	if (vb2_is_busy(vq)) {
 		v4l2_err(&fimc->m2m.vfd, "queue (%d) busy\n", f->type);
@@ -410,56 +394,6 @@
 	return 0;
 }
 
-static int fimc_m2m_reqbufs(struct file *file, void *fh,
-			    struct v4l2_requestbuffers *reqbufs)
-{
-	struct fimc_ctx *ctx = fh_to_ctx(fh);
-	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int fimc_m2m_querybuf(struct file *file, void *fh,
-			     struct v4l2_buffer *buf)
-{
-	struct fimc_ctx *ctx = fh_to_ctx(fh);
-	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_qbuf(struct file *file, void *fh,
-			 struct v4l2_buffer *buf)
-{
-	struct fimc_ctx *ctx = fh_to_ctx(fh);
-	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_dqbuf(struct file *file, void *fh,
-			  struct v4l2_buffer *buf)
-{
-	struct fimc_ctx *ctx = fh_to_ctx(fh);
-	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_expbuf(struct file *file, void *fh,
-			    struct v4l2_exportbuffer *eb)
-{
-	struct fimc_ctx *ctx = fh_to_ctx(fh);
-	return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
-}
-
-
-static int fimc_m2m_streamon(struct file *file, void *fh,
-			     enum v4l2_buf_type type)
-{
-	struct fimc_ctx *ctx = fh_to_ctx(fh);
-	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int fimc_m2m_streamoff(struct file *file, void *fh,
-			    enum v4l2_buf_type type)
-{
-	struct fimc_ctx *ctx = fh_to_ctx(fh);
-	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
 static int fimc_m2m_cropcap(struct file *file, void *fh,
 			    struct v4l2_cropcap *cr)
 {
@@ -598,13 +532,13 @@
 	.vidioc_try_fmt_vid_out_mplane	= fimc_m2m_try_fmt_mplane,
 	.vidioc_s_fmt_vid_cap_mplane	= fimc_m2m_s_fmt_mplane,
 	.vidioc_s_fmt_vid_out_mplane	= fimc_m2m_s_fmt_mplane,
-	.vidioc_reqbufs			= fimc_m2m_reqbufs,
-	.vidioc_querybuf		= fimc_m2m_querybuf,
-	.vidioc_qbuf			= fimc_m2m_qbuf,
-	.vidioc_dqbuf			= fimc_m2m_dqbuf,
-	.vidioc_expbuf			= fimc_m2m_expbuf,
-	.vidioc_streamon		= fimc_m2m_streamon,
-	.vidioc_streamoff		= fimc_m2m_streamoff,
+	.vidioc_reqbufs			= v4l2_m2m_ioctl_reqbufs,
+	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
+	.vidioc_qbuf			= v4l2_m2m_ioctl_qbuf,
+	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
+	.vidioc_expbuf			= v4l2_m2m_ioctl_expbuf,
+	.vidioc_streamon		= v4l2_m2m_ioctl_streamon,
+	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
 	.vidioc_g_crop			= fimc_m2m_g_crop,
 	.vidioc_s_crop			= fimc_m2m_s_crop,
 	.vidioc_cropcap			= fimc_m2m_cropcap
@@ -624,6 +558,7 @@
 	src_vq->mem_ops = &vb2_dma_contig_memops;
 	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
 	src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	src_vq->lock = &ctx->fimc_dev->lock;
 
 	ret = vb2_queue_init(src_vq);
 	if (ret)
@@ -636,6 +571,7 @@
 	dst_vq->mem_ops = &vb2_dma_contig_memops;
 	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
 	dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	dst_vq->lock = &ctx->fimc_dev->lock;
 
 	return vb2_queue_init(dst_vq);
 }
@@ -708,9 +644,9 @@
 	ctx->out_path = FIMC_IO_DMA;
 	ctx->scaler.enabled = 1;
 
-	ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
-	if (IS_ERR(ctx->m2m_ctx)) {
-		ret = PTR_ERR(ctx->m2m_ctx);
+	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
+	if (IS_ERR(ctx->fh.m2m_ctx)) {
+		ret = PTR_ERR(ctx->fh.m2m_ctx);
 		goto error_c;
 	}
 
@@ -725,7 +661,7 @@
 	return 0;
 
 error_m2m_ctx:
-	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
 error_c:
 	fimc_ctrls_delete(ctx);
 error_fh:
@@ -747,7 +683,7 @@
 
 	mutex_lock(&fimc->lock);
 
-	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
 	fimc_ctrls_delete(ctx);
 	v4l2_fh_del(&ctx->fh);
 	v4l2_fh_exit(&ctx->fh);
@@ -760,45 +696,13 @@
 	return 0;
 }
 
-static unsigned int fimc_m2m_poll(struct file *file,
-				  struct poll_table_struct *wait)
-{
-	struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
-	struct fimc_dev *fimc = ctx->fimc_dev;
-	int ret;
-
-	if (mutex_lock_interruptible(&fimc->lock))
-		return -ERESTARTSYS;
-
-	ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
-	mutex_unlock(&fimc->lock);
-
-	return ret;
-}
-
-
-static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
-	struct fimc_dev *fimc = ctx->fimc_dev;
-	int ret;
-
-	if (mutex_lock_interruptible(&fimc->lock))
-		return -ERESTARTSYS;
-
-	ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
-	mutex_unlock(&fimc->lock);
-
-	return ret;
-}
-
 static const struct v4l2_file_operations fimc_m2m_fops = {
 	.owner		= THIS_MODULE,
 	.open		= fimc_m2m_open,
 	.release	= fimc_m2m_release,
-	.poll		= fimc_m2m_poll,
+	.poll		= v4l2_m2m_fop_poll,
 	.unlocked_ioctl	= video_ioctl2,
-	.mmap		= fimc_m2m_mmap,
+	.mmap		= v4l2_m2m_fop_mmap,
 };
 
 static struct v4l2_m2m_ops m2m_ops = {
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 9fc2af6..f3c3591 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -91,7 +91,7 @@
 #define S5PCSIS_INTSRC_ODD_BEFORE	(1 << 29)
 #define S5PCSIS_INTSRC_ODD_AFTER	(1 << 28)
 #define S5PCSIS_INTSRC_ODD		(0x3 << 28)
-#define S5PCSIS_INTSRC_NON_IMAGE_DATA	(0xff << 28)
+#define S5PCSIS_INTSRC_NON_IMAGE_DATA	(0xf << 28)
 #define S5PCSIS_INTSRC_FRAME_START	(1 << 27)
 #define S5PCSIS_INTSRC_FRAME_END	(1 << 26)
 #define S5PCSIS_INTSRC_ERR_SOT_HS	(0xf << 12)
@@ -790,6 +790,7 @@
 #define s5pcsis_parse_dt(pdev, state) (-ENOSYS)
 #endif
 
+static int s5pcsis_pm_resume(struct device *dev, bool runtime);
 static const struct of_device_id s5pcsis_of_match[];
 
 static int s5pcsis_probe(struct platform_device *pdev)
@@ -902,13 +903,21 @@
 	/* .. and a pointer to the subdev. */
 	platform_set_drvdata(pdev, &state->sd);
 	memcpy(state->events, s5pcsis_events, sizeof(state->events));
+
 	pm_runtime_enable(dev);
+	if (!pm_runtime_enabled(dev)) {
+		ret = s5pcsis_pm_resume(dev, true);
+		if (ret < 0)
+			goto e_m_ent;
+	}
 
 	dev_info(&pdev->dev, "lanes: %d, hs_settle: %d, wclk: %d, freq: %u\n",
 		 state->num_lanes, state->hs_settle, state->wclk_ext,
 		 state->clk_frequency);
 	return 0;
 
+e_m_ent:
+	media_entity_cleanup(&state->sd.entity);
 e_clkdis:
 	clk_disable(state->clock[CSIS_CLK_MUX]);
 e_clkput:
@@ -1014,7 +1023,7 @@
 	struct csis_state *state = sd_to_csis_state(sd);
 
 	pm_runtime_disable(&pdev->dev);
-	s5pcsis_pm_suspend(&pdev->dev, false);
+	s5pcsis_pm_suspend(&pdev->dev, true);
 	clk_disable(state->clock[CSIS_CLK_MUX]);
 	pm_runtime_set_suspended(&pdev->dev);
 	s5pcsis_clk_put(state);
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 6a23223..dbf0ce3 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1580,7 +1580,7 @@
 	}
 
 	/* enable VIU clock */
-	clk = devm_clk_get(&op->dev, "viu_clk");
+	clk = devm_clk_get(&op->dev, "ipg");
 	if (IS_ERR(clk)) {
 		dev_err(&op->dev, "failed to lookup the clock!\n");
 		ret = PTR_ERR(clk);
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 65cab70..6bb86b5 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -918,7 +918,7 @@
 		return ret;
 	}
 
-	ctx->xt = kzalloc(sizeof(struct dma_async_tx_descriptor) +
+	ctx->xt = kzalloc(sizeof(struct dma_interleaved_template) +
 				sizeof(struct data_chunk), GFP_KERNEL);
 	if (!ctx->xt) {
 		kfree(ctx);
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
index 8df5975..08e2437 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -177,8 +177,6 @@
 
 	enum v4l2_colorspace	colorspace;
 
-	struct v4l2_m2m_ctx	*m2m_ctx;
-
 	/* Source and destination queue data */
 	struct m2mtest_q_data   q_data[2];
 };
@@ -342,8 +340,8 @@
 {
 	struct m2mtest_ctx *ctx = priv;
 
-	if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < ctx->translen
-	    || v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < ctx->translen) {
+	if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen
+	    || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) {
 		dprintk(ctx->dev, "Not enough buffers available\n");
 		return 0;
 	}
@@ -359,21 +357,6 @@
 	ctx->aborting = 1;
 }
 
-static void m2mtest_lock(void *priv)
-{
-	struct m2mtest_ctx *ctx = priv;
-	struct m2mtest_dev *dev = ctx->dev;
-	mutex_lock(&dev->dev_mutex);
-}
-
-static void m2mtest_unlock(void *priv)
-{
-	struct m2mtest_ctx *ctx = priv;
-	struct m2mtest_dev *dev = ctx->dev;
-	mutex_unlock(&dev->dev_mutex);
-}
-
-
 /* device_run() - prepares and starts the device
  *
  * This simulates all the immediate preparations required before starting
@@ -386,8 +369,8 @@
 	struct m2mtest_dev *dev = ctx->dev;
 	struct vb2_buffer *src_buf, *dst_buf;
 
-	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
-	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
 
 	device_process(ctx, src_buf, dst_buf);
 
@@ -409,8 +392,8 @@
 		return;
 	}
 
-	src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
-	dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+	src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+	dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
 
 	curr_ctx->num_processed++;
 
@@ -423,7 +406,7 @@
 	    || curr_ctx->aborting) {
 		dprintk(curr_ctx->dev, "Finishing transaction\n");
 		curr_ctx->num_processed = 0;
-		v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->m2m_ctx);
+		v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->fh.m2m_ctx);
 	} else {
 		device_run(curr_ctx);
 	}
@@ -491,7 +474,7 @@
 	struct vb2_queue *vq;
 	struct m2mtest_q_data *q_data;
 
-	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
 	if (!vq)
 		return -EINVAL;
 
@@ -594,7 +577,7 @@
 	struct m2mtest_q_data *q_data;
 	struct vb2_queue *vq;
 
-	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
 	if (!vq)
 		return -EINVAL;
 
@@ -648,52 +631,6 @@
 	return ret;
 }
 
-static int vidioc_reqbufs(struct file *file, void *priv,
-			  struct v4l2_requestbuffers *reqbufs)
-{
-	struct m2mtest_ctx *ctx = file2ctx(file);
-
-	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
-			   struct v4l2_buffer *buf)
-{
-	struct m2mtest_ctx *ctx = file2ctx(file);
-
-	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
-	struct m2mtest_ctx *ctx = file2ctx(file);
-
-	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
-	struct m2mtest_ctx *ctx = file2ctx(file);
-
-	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_streamon(struct file *file, void *priv,
-			   enum v4l2_buf_type type)
-{
-	struct m2mtest_ctx *ctx = file2ctx(file);
-
-	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
-			    enum v4l2_buf_type type)
-{
-	struct m2mtest_ctx *ctx = file2ctx(file);
-
-	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
 static int m2mtest_s_ctrl(struct v4l2_ctrl *ctrl)
 {
 	struct m2mtest_ctx *ctx =
@@ -748,14 +685,14 @@
 	.vidioc_try_fmt_vid_out	= vidioc_try_fmt_vid_out,
 	.vidioc_s_fmt_vid_out	= vidioc_s_fmt_vid_out,
 
-	.vidioc_reqbufs		= vidioc_reqbufs,
-	.vidioc_querybuf	= vidioc_querybuf,
+	.vidioc_reqbufs		= v4l2_m2m_ioctl_reqbufs,
+	.vidioc_querybuf	= v4l2_m2m_ioctl_querybuf,
+	.vidioc_qbuf		= v4l2_m2m_ioctl_qbuf,
+	.vidioc_dqbuf		= v4l2_m2m_ioctl_dqbuf,
 
-	.vidioc_qbuf		= vidioc_qbuf,
-	.vidioc_dqbuf		= vidioc_dqbuf,
+	.vidioc_streamon	= v4l2_m2m_ioctl_streamon,
+	.vidioc_streamoff	= v4l2_m2m_ioctl_streamoff,
 
-	.vidioc_streamon	= vidioc_streamon,
-	.vidioc_streamoff	= vidioc_streamoff,
 	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
 	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
 };
@@ -818,27 +755,15 @@
 static void m2mtest_buf_queue(struct vb2_buffer *vb)
 {
 	struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-	v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
-}
-
-static void m2mtest_wait_prepare(struct vb2_queue *q)
-{
-	struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
-	m2mtest_unlock(ctx);
-}
-
-static void m2mtest_wait_finish(struct vb2_queue *q)
-{
-	struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
-	m2mtest_lock(ctx);
+	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
 }
 
 static struct vb2_ops m2mtest_qops = {
 	.queue_setup	 = m2mtest_queue_setup,
 	.buf_prepare	 = m2mtest_buf_prepare,
 	.buf_queue	 = m2mtest_buf_queue,
-	.wait_prepare	 = m2mtest_wait_prepare,
-	.wait_finish	 = m2mtest_wait_finish,
+	.wait_prepare	 = vb2_ops_wait_prepare,
+	.wait_finish	 = vb2_ops_wait_finish,
 };
 
 static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
@@ -853,6 +778,7 @@
 	src_vq->ops = &m2mtest_qops;
 	src_vq->mem_ops = &vb2_vmalloc_memops;
 	src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	src_vq->lock = &ctx->dev->dev_mutex;
 
 	ret = vb2_queue_init(src_vq);
 	if (ret)
@@ -865,6 +791,7 @@
 	dst_vq->ops = &m2mtest_qops;
 	dst_vq->mem_ops = &vb2_vmalloc_memops;
 	dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	dst_vq->lock = &ctx->dev->dev_mutex;
 
 	return vb2_queue_init(dst_vq);
 }
@@ -936,10 +863,10 @@
 	ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
 	ctx->colorspace = V4L2_COLORSPACE_REC709;
 
-	ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
 
-	if (IS_ERR(ctx->m2m_ctx)) {
-		rc = PTR_ERR(ctx->m2m_ctx);
+	if (IS_ERR(ctx->fh.m2m_ctx)) {
+		rc = PTR_ERR(ctx->fh.m2m_ctx);
 
 		v4l2_ctrl_handler_free(hdl);
 		kfree(ctx);
@@ -949,7 +876,8 @@
 	v4l2_fh_add(&ctx->fh);
 	atomic_inc(&dev->num_inst);
 
-	dprintk(dev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+	dprintk(dev, "Created instance: %p, m2m_ctx: %p\n",
+		ctx, ctx->fh.m2m_ctx);
 
 open_unlock:
 	mutex_unlock(&dev->dev_mutex);
@@ -967,7 +895,7 @@
 	v4l2_fh_exit(&ctx->fh);
 	v4l2_ctrl_handler_free(&ctx->hdl);
 	mutex_lock(&dev->dev_mutex);
-	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
 	mutex_unlock(&dev->dev_mutex);
 	kfree(ctx);
 
@@ -976,34 +904,13 @@
 	return 0;
 }
 
-static unsigned int m2mtest_poll(struct file *file,
-				 struct poll_table_struct *wait)
-{
-	struct m2mtest_ctx *ctx = file2ctx(file);
-
-	return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
-}
-
-static int m2mtest_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	struct m2mtest_dev *dev = video_drvdata(file);
-	struct m2mtest_ctx *ctx = file2ctx(file);
-	int res;
-
-	if (mutex_lock_interruptible(&dev->dev_mutex))
-		return -ERESTARTSYS;
-	res = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
-	mutex_unlock(&dev->dev_mutex);
-	return res;
-}
-
 static const struct v4l2_file_operations m2mtest_fops = {
 	.owner		= THIS_MODULE,
 	.open		= m2mtest_open,
 	.release	= m2mtest_release,
-	.poll		= m2mtest_poll,
+	.poll		= v4l2_m2m_fop_poll,
 	.unlocked_ioctl	= video_ioctl2,
-	.mmap		= m2mtest_mmap,
+	.mmap		= v4l2_m2m_fop_mmap,
 };
 
 static struct video_device m2mtest_videodev = {
@@ -1019,8 +926,6 @@
 	.device_run	= device_run,
 	.job_ready	= job_ready,
 	.job_abort	= job_abort,
-	.lock		= m2mtest_lock,
-	.unlock		= m2mtest_unlock,
 };
 
 static int m2mtest_probe(struct platform_device *pdev)
@@ -1133,4 +1038,3 @@
 
 module_init(m2mtest_init);
 module_exit(m2mtest_exit);
-
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index fdbdeae..5807185 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -873,15 +873,12 @@
 	unsigned long flags;
 	int ret;
 
-	/* If the preview engine crashed it might not respond to read/write
-	 * operations on the L4 bus. This would result in a bus fault and a
-	 * kernel oops. Refuse to start streaming in that case. This check must
-	 * be performed before the loop below to avoid starting entities if the
-	 * pipeline won't start anyway (those entities would then likely fail to
-	 * stop, making the problem worse).
+	/* Refuse to start streaming if an entity included in the pipeline has
+	 * crashed. This check must be performed before the loop below to avoid
+	 * starting entities if the pipeline won't start anyway (those entities
+	 * would then likely fail to stop, making the problem worse).
 	 */
-	if ((pipe->entities & isp->crashed) &
-	    (1U << isp->isp_prev.subdev.entity.id))
+	if (pipe->entities & isp->crashed)
 		return -EIO;
 
 	spin_lock_irqsave(&pipe->lock, flags);
@@ -1014,13 +1011,23 @@
 		else
 			ret = 0;
 
+		/* Handle stop failures. An entity that fails to stop can
+		 * usually just be restarted. Flag the stop failure nonetheless
+		 * to trigger an ISP reset the next time the device is released,
+		 * just in case.
+		 *
+		 * The preview engine is a special case. A failure to stop can
+		 * mean a hardware crash. When that happens the preview engine
+		 * won't respond to read/write operations on the L4 bus anymore,
+		 * resulting in a bus fault and a kernel oops next time it gets
+		 * accessed. Mark it as crashed to prevent pipelines including
+		 * it from being started.
+		 */
 		if (ret) {
 			dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
-			/* If the entity failed to stopped, assume it has
-			 * crashed. Mark it as such, the ISP will be reset when
-			 * applications will release it.
-			 */
-			isp->crashed |= 1U << subdev->entity.id;
+			isp->stop_failure = true;
+			if (subdev == &isp->isp_prev.subdev)
+				isp->crashed |= 1U << subdev->entity.id;
 			failure = -ETIMEDOUT;
 		}
 	}
@@ -1057,6 +1064,23 @@
 }
 
 /*
+ * omap3isp_pipeline_cancel_stream - Cancel stream on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Cancelling a stream mark all buffers on all video nodes in the pipeline as
+ * erroneous and makes sure no new buffer can be queued. This function is called
+ * when a fatal error that prevents any further operation on the pipeline
+ * occurs.
+ */
+void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe)
+{
+	if (pipe->input)
+		omap3isp_video_cancel_stream(pipe->input);
+	if (pipe->output)
+		omap3isp_video_cancel_stream(pipe->output);
+}
+
+/*
  * isp_pipeline_resume - Resume streaming on a pipeline
  * @pipe: ISP pipeline
  *
@@ -1208,6 +1232,7 @@
 		udelay(1);
 	}
 
+	isp->stop_failure = false;
 	isp->crashed = 0;
 	return 0;
 }
@@ -1619,7 +1644,7 @@
 		/* Reset the ISP if an entity has failed to stop. This is the
 		 * only way to recover from such conditions.
 		 */
-		if (isp->crashed)
+		if (isp->crashed || isp->stop_failure)
 			isp_reset(isp);
 		isp_disable_clocks(isp);
 	}
@@ -2130,28 +2155,13 @@
 	/* request the mem region for the camera registers */
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
-	if (!mem) {
-		dev_err(isp->dev, "no mem resource?\n");
-		return -ENODEV;
-	}
-
-	if (!devm_request_mem_region(isp->dev, mem->start, resource_size(mem),
-				     pdev->name)) {
-		dev_err(isp->dev,
-			"cannot reserve camera register I/O region\n");
-		return -ENODEV;
-	}
-	isp->mmio_base_phys[res] = mem->start;
-	isp->mmio_size[res] = resource_size(mem);
 
 	/* map the region */
-	isp->mmio_base[res] = devm_ioremap_nocache(isp->dev,
-						   isp->mmio_base_phys[res],
-						   isp->mmio_size[res]);
-	if (!isp->mmio_base[res]) {
-		dev_err(isp->dev, "cannot map camera register I/O region\n");
-		return -ENODEV;
-	}
+	isp->mmio_base[res] = devm_ioremap_resource(isp->dev, mem);
+	if (IS_ERR(isp->mmio_base[res]))
+		return PTR_ERR(isp->mmio_base[res]);
+
+	isp->mmio_base_phys[res] = mem->start;
 
 	return 0;
 }
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index d1e857e..081f5ec 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -152,9 +152,9 @@
  *             regions.
  * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
  *                  regions.
- * @mmio_size: Array with ISP register regions size in bytes.
  * @stat_lock: Spinlock for handling statistics
  * @isp_mutex: Mutex for serializing requests to ISP.
+ * @stop_failure: Indicates that an entity failed to stop.
  * @crashed: Bitmask of crashed entities (indexed by entity ID)
  * @has_context: Context has been saved at least once and can be restored.
  * @ref_count: Reference count for handling multiple ISP requests.
@@ -188,11 +188,11 @@
 
 	void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
 	unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
-	resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
 
 	/* ISP Obj */
 	spinlock_t stat_lock;	/* common lock for statistic drivers */
 	struct mutex isp_mutex;	/* For handling ref_count field */
+	bool stop_failure;
 	u32 crashed;
 	int has_context;
 	int ref_count;
@@ -238,6 +238,7 @@
 
 int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
 				 enum isp_pipeline_stream_state state);
+void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe);
 void omap3isp_configure_bridge(struct isp_device *isp,
 			       enum ccdc_input_entity input,
 			       const struct isp_parallel_platform_data *pdata,
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 907a205..5db2c88 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -1516,6 +1516,8 @@
 
 	if (ccdc_sbl_wait_idle(ccdc, 1000)) {
 		dev_info(isp->dev, "CCDC won't become idle!\n");
+		isp->crashed |= 1U << ccdc->subdev.entity.id;
+		omap3isp_pipeline_cancel_stream(pipe);
 		goto done;
 	}
 
@@ -2484,7 +2486,8 @@
 	v4l2_set_subdevdata(sd, ccdc);
 	sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
 
-	pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+				    | MEDIA_PAD_FL_MUST_CONNECT;
 	pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
 	pads[CCDC_PAD_SOURCE_OF].flags = MEDIA_PAD_FL_SOURCE;
 
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index e716514..e84fe05 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -1076,7 +1076,8 @@
 	v4l2_set_subdevdata(sd, ccp2);
 	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
 
-	pads[CCP2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[CCP2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+				    | MEDIA_PAD_FL_MUST_CONNECT;
 	pads[CCP2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
 
 	me->ops = &ccp2_media_ops;
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 6db245d..62056082 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -1245,7 +1245,8 @@
 	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
 
 	pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-	pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+				    | MEDIA_PAD_FL_MUST_CONNECT;
 
 	me->ops = &csi2_media_ops;
 	ret = media_entity_init(me, CSI2_PADS_NUM, pads, 0);
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index cd8831a..1c776c1 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -2283,7 +2283,8 @@
 	v4l2_ctrl_handler_setup(&prev->ctrls);
 	sd->ctrl_handler = &prev->ctrls;
 
-	pads[PREV_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[PREV_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+				    | MEDIA_PAD_FL_MUST_CONNECT;
 	pads[PREV_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
 
 	me->ops = &preview_media_ops;
diff --git a/drivers/media/platform/omap3isp/ispqueue.c b/drivers/media/platform/omap3isp/ispqueue.c
index e15f013..5f0f8fa 100644
--- a/drivers/media/platform/omap3isp/ispqueue.c
+++ b/drivers/media/platform/omap3isp/ispqueue.c
@@ -553,8 +553,10 @@
 	switch (buf->state) {
 	case ISP_BUF_STATE_ERROR:
 		vbuf->flags |= V4L2_BUF_FLAG_ERROR;
+		/* Fallthrough */
 	case ISP_BUF_STATE_DONE:
 		vbuf->flags |= V4L2_BUF_FLAG_DONE;
+		break;
 	case ISP_BUF_STATE_QUEUED:
 	case ISP_BUF_STATE_ACTIVE:
 		vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index d11fb26..0d36b8b 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -1532,6 +1532,20 @@
 	return 0;
 }
 
+static int resizer_link_validate(struct v4l2_subdev *sd,
+				 struct media_link *link,
+				 struct v4l2_subdev_format *source_fmt,
+				 struct v4l2_subdev_format *sink_fmt)
+{
+	struct isp_res_device *res = v4l2_get_subdevdata(sd);
+	struct isp_pipeline *pipe = to_isp_pipeline(&sd->entity);
+
+	omap3isp_resizer_max_rate(res, &pipe->max_rate);
+
+	return v4l2_subdev_link_validate_default(sd, link,
+						 source_fmt, sink_fmt);
+}
+
 /*
  * resizer_init_formats - Initialize formats on all pads
  * @sd: ISP resizer V4L2 subdevice
@@ -1570,6 +1584,7 @@
 	.set_fmt = resizer_set_format,
 	.get_selection = resizer_get_selection,
 	.set_selection = resizer_set_selection,
+	.link_validate = resizer_link_validate,
 };
 
 /* subdev operations */
@@ -1701,7 +1716,8 @@
 	v4l2_set_subdevdata(sd, res);
 	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
 
-	pads[RESZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[RESZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+				    | MEDIA_PAD_FL_MUST_CONNECT;
 	pads[RESZ_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
 
 	me->ops = &resizer_media_ops;
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 61e17f9..a75407c 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -1067,7 +1067,7 @@
 	subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
 	v4l2_set_subdevdata(subdev, stat);
 
-	stat->pad.flags = MEDIA_PAD_FL_SINK;
+	stat->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
 	me->ops = NULL;
 
 	return media_entity_init(me, 1, &stat->pad, 0);
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index f6304bb..856fdf5 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -278,55 +278,6 @@
 	return 0;
 }
 
-/*
- * Validate a pipeline by checking both ends of all links for format
- * discrepancies.
- *
- * Compute the minimum time per frame value as the maximum of time per frame
- * limits reported by every block in the pipeline.
- *
- * Return 0 if all formats match, or -EPIPE if at least one link is found with
- * different formats on its two ends or if the pipeline doesn't start with a
- * video source (either a subdev with no input pad, or a non-subdev entity).
- */
-static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
-{
-	struct isp_device *isp = pipe->output->isp;
-	struct media_pad *pad;
-	struct v4l2_subdev *subdev;
-
-	subdev = isp_video_remote_subdev(pipe->output, NULL);
-	if (subdev == NULL)
-		return -EPIPE;
-
-	while (1) {
-		/* Retrieve the sink format */
-		pad = &subdev->entity.pads[0];
-		if (!(pad->flags & MEDIA_PAD_FL_SINK))
-			break;
-
-		/* Update the maximum frame rate */
-		if (subdev == &isp->isp_res.subdev)
-			omap3isp_resizer_max_rate(&isp->isp_res,
-						  &pipe->max_rate);
-
-		/* Retrieve the source format. Return an error if no source
-		 * entity can be found, and stop checking the pipeline if the
-		 * source entity isn't a subdev.
-		 */
-		pad = media_entity_remote_pad(pad);
-		if (pad == NULL)
-			return -EPIPE;
-
-		if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
-			break;
-
-		subdev = media_entity_to_v4l2_subdev(pad->entity);
-	}
-
-	return 0;
-}
-
 static int
 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
 {
@@ -460,6 +411,15 @@
 	struct isp_video *video = vfh->video;
 	unsigned long addr;
 
+	/* Refuse to prepare the buffer is the video node has registered an
+	 * error. We don't need to take any lock here as the operation is
+	 * inherently racy. The authoritative check will be performed in the
+	 * queue handler, which can't return an error, this check is just a best
+	 * effort to notify userspace as early as possible.
+	 */
+	if (unlikely(video->error))
+		return -EIO;
+
 	addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
 	if (IS_ERR_VALUE(addr))
 		return -EIO;
@@ -496,6 +456,12 @@
 	unsigned int empty;
 	unsigned int start;
 
+	if (unlikely(video->error)) {
+		buf->state = ISP_BUF_STATE_ERROR;
+		wake_up(&buf->wait);
+		return;
+	}
+
 	empty = list_empty(&video->dmaqueue);
 	list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
 
@@ -618,6 +584,36 @@
 }
 
 /*
+ * omap3isp_video_cancel_stream - Cancel stream on a video node
+ * @video: ISP video object
+ *
+ * Cancelling a stream mark all buffers on the video node as erroneous and makes
+ * sure no new buffer can be queued.
+ */
+void omap3isp_video_cancel_stream(struct isp_video *video)
+{
+	struct isp_video_queue *queue = video->queue;
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->irqlock, flags);
+
+	while (!list_empty(&video->dmaqueue)) {
+		struct isp_video_buffer *buf;
+
+		buf = list_first_entry(&video->dmaqueue,
+				       struct isp_video_buffer, irqlist);
+		list_del(&buf->irqlist);
+
+		buf->state = ISP_BUF_STATE_ERROR;
+		wake_up(&buf->wait);
+	}
+
+	video->error = true;
+
+	spin_unlock_irqrestore(&queue->irqlock, flags);
+}
+
+/*
  * omap3isp_video_resume - Perform resume operation on the buffers
  * @video: ISP video object
  * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
@@ -1051,11 +1047,6 @@
 	if (ret < 0)
 		goto err_check_format;
 
-	/* Validate the pipeline and update its state. */
-	ret = isp_video_validate_pipeline(pipe);
-	if (ret < 0)
-		goto err_check_format;
-
 	pipe->error = false;
 
 	spin_lock_irqsave(&pipe->lock, flags);
@@ -1159,6 +1150,7 @@
 	omap3isp_video_queue_streamoff(&vfh->queue);
 	video->queue = NULL;
 	video->streaming = 0;
+	video->error = false;
 
 	if (video->isp->pdata->set_constraints)
 		video->isp->pdata->set_constraints(video->isp, false);
@@ -1332,11 +1324,13 @@
 	switch (video->type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
 		direction = "output";
-		video->pad.flags = MEDIA_PAD_FL_SINK;
+		video->pad.flags = MEDIA_PAD_FL_SINK
+				   | MEDIA_PAD_FL_MUST_CONNECT;
 		break;
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
 		direction = "input";
-		video->pad.flags = MEDIA_PAD_FL_SOURCE;
+		video->pad.flags = MEDIA_PAD_FL_SOURCE
+				   | MEDIA_PAD_FL_MUST_CONNECT;
 		video->video.vfl_dir = VFL_DIR_TX;
 		break;
 
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 1ad470e..4e19407 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -178,6 +178,7 @@
 	/* Pipeline state */
 	struct isp_pipeline pipe;
 	struct mutex stream_lock;	/* pipeline and stream states */
+	bool error;
 
 	/* Video buffers queue */
 	struct isp_video_queue *queue;
@@ -207,6 +208,7 @@
 			    struct v4l2_device *vdev);
 void omap3isp_video_unregister(struct isp_video *video);
 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video);
+void omap3isp_video_cancel_stream(struct isp_video *video);
 void omap3isp_video_resume(struct isp_video *video, int continuous);
 struct media_pad *omap3isp_video_remote_pad(struct isp_video *video);
 
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 0b29483..0fcf7d7 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -136,10 +136,9 @@
 static void g2d_buf_queue(struct vb2_buffer *vb)
 {
 	struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-	v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
 }
 
-
 static struct vb2_ops g2d_qops = {
 	.queue_setup	= g2d_queue_setup,
 	.buf_prepare	= g2d_buf_prepare,
@@ -159,6 +158,7 @@
 	src_vq->mem_ops = &vb2_dma_contig_memops;
 	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
 	src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	src_vq->lock = &ctx->dev->mutex;
 
 	ret = vb2_queue_init(src_vq);
 	if (ret)
@@ -171,6 +171,7 @@
 	dst_vq->mem_ops = &vb2_dma_contig_memops;
 	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
 	dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	dst_vq->lock = &ctx->dev->mutex;
 
 	return vb2_queue_init(dst_vq);
 }
@@ -253,9 +254,9 @@
 		kfree(ctx);
 		return -ERESTARTSYS;
 	}
-	ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
-	if (IS_ERR(ctx->m2m_ctx)) {
-		ret = PTR_ERR(ctx->m2m_ctx);
+	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+	if (IS_ERR(ctx->fh.m2m_ctx)) {
+		ret = PTR_ERR(ctx->fh.m2m_ctx);
 		mutex_unlock(&dev->mutex);
 		kfree(ctx);
 		return ret;
@@ -324,7 +325,7 @@
 	struct vb2_queue *vq;
 	struct g2d_frame *frm;
 
-	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
 	if (!vq)
 		return -EINVAL;
 	frm = get_frame(ctx, f->type);
@@ -384,7 +385,7 @@
 	ret = vidioc_try_fmt(file, prv, f);
 	if (ret)
 		return ret;
-	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
 	if (vb2_is_busy(vq)) {
 		v4l2_err(&dev->v4l2_dev, "queue (%d) bust\n", f->type);
 		return -EBUSY;
@@ -410,72 +411,6 @@
 	return 0;
 }
 
-static unsigned int g2d_poll(struct file *file, struct poll_table_struct *wait)
-{
-	struct g2d_ctx *ctx = fh2ctx(file->private_data);
-	struct g2d_dev *dev = ctx->dev;
-	unsigned int res;
-
-	mutex_lock(&dev->mutex);
-	res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
-	mutex_unlock(&dev->mutex);
-	return res;
-}
-
-static int g2d_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	struct g2d_ctx *ctx = fh2ctx(file->private_data);
-	struct g2d_dev *dev = ctx->dev;
-	int ret;
-
-	if (mutex_lock_interruptible(&dev->mutex))
-		return -ERESTARTSYS;
-	ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
-	mutex_unlock(&dev->mutex);
-	return ret;
-}
-
-static int vidioc_reqbufs(struct file *file, void *priv,
-			struct v4l2_requestbuffers *reqbufs)
-{
-	struct g2d_ctx *ctx = priv;
-	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
-			struct v4l2_buffer *buf)
-{
-	struct g2d_ctx *ctx = priv;
-	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
-	struct g2d_ctx *ctx = priv;
-	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
-	struct g2d_ctx *ctx = priv;
-	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-
-static int vidioc_streamon(struct file *file, void *priv,
-					enum v4l2_buf_type type)
-{
-	struct g2d_ctx *ctx = priv;
-	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
-					enum v4l2_buf_type type)
-{
-	struct g2d_ctx *ctx = priv;
-	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
 static int vidioc_cropcap(struct file *file, void *priv,
 					struct v4l2_cropcap *cr)
 {
@@ -551,20 +486,6 @@
 	return 0;
 }
 
-static void g2d_lock(void *prv)
-{
-	struct g2d_ctx *ctx = prv;
-	struct g2d_dev *dev = ctx->dev;
-	mutex_lock(&dev->mutex);
-}
-
-static void g2d_unlock(void *prv)
-{
-	struct g2d_ctx *ctx = prv;
-	struct g2d_dev *dev = ctx->dev;
-	mutex_unlock(&dev->mutex);
-}
-
 static void job_abort(void *prv)
 {
 	struct g2d_ctx *ctx = prv;
@@ -589,8 +510,8 @@
 
 	dev->curr = ctx;
 
-	src = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
-	dst = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+	dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
 
 	clk_enable(dev->gate);
 	g2d_reset(dev);
@@ -631,8 +552,8 @@
 
 	BUG_ON(ctx == NULL);
 
-	src = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
-	dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+	src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+	dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
 
 	BUG_ON(src == NULL);
 	BUG_ON(dst == NULL);
@@ -642,7 +563,7 @@
 
 	v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
 	v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
-	v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
+	v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
 
 	dev->curr = NULL;
 	wake_up(&dev->irq_queue);
@@ -653,9 +574,9 @@
 	.owner		= THIS_MODULE,
 	.open		= g2d_open,
 	.release	= g2d_release,
-	.poll		= g2d_poll,
+	.poll		= v4l2_m2m_fop_poll,
 	.unlocked_ioctl	= video_ioctl2,
-	.mmap		= g2d_mmap,
+	.mmap		= v4l2_m2m_fop_mmap,
 };
 
 static const struct v4l2_ioctl_ops g2d_ioctl_ops = {
@@ -671,14 +592,13 @@
 	.vidioc_try_fmt_vid_out		= vidioc_try_fmt,
 	.vidioc_s_fmt_vid_out		= vidioc_s_fmt,
 
-	.vidioc_reqbufs			= vidioc_reqbufs,
-	.vidioc_querybuf		= vidioc_querybuf,
+	.vidioc_reqbufs			= v4l2_m2m_ioctl_reqbufs,
+	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
+	.vidioc_qbuf			= v4l2_m2m_ioctl_qbuf,
+	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
 
-	.vidioc_qbuf			= vidioc_qbuf,
-	.vidioc_dqbuf			= vidioc_dqbuf,
-
-	.vidioc_streamon		= vidioc_streamon,
-	.vidioc_streamoff		= vidioc_streamoff,
+	.vidioc_streamon		= v4l2_m2m_ioctl_streamon,
+	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
 
 	.vidioc_g_crop			= vidioc_g_crop,
 	.vidioc_s_crop			= vidioc_s_crop,
@@ -697,8 +617,6 @@
 static struct v4l2_m2m_ops g2d_m2m_ops = {
 	.device_run	= device_run,
 	.job_abort	= job_abort,
-	.lock		= g2d_lock,
-	.unlock		= g2d_unlock,
 };
 
 static const struct of_device_id exynos_g2d_match[];
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
index 300ca05..b0e52ab 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -57,7 +57,6 @@
 struct g2d_ctx {
 	struct v4l2_fh fh;
 	struct g2d_dev		*dev;
-	struct v4l2_m2m_ctx	*m2m_ctx;
 	struct g2d_frame	in;
 	struct g2d_frame	out;
 	struct v4l2_ctrl	*ctrl_hflip;
diff --git a/drivers/media/platform/s5p-jpeg/Makefile b/drivers/media/platform/s5p-jpeg/Makefile
index d18cb5e..a1a9169 100644
--- a/drivers/media/platform/s5p-jpeg/Makefile
+++ b/drivers/media/platform/s5p-jpeg/Makefile
@@ -1,2 +1,2 @@
-s5p-jpeg-objs := jpeg-core.o
+s5p-jpeg-objs := jpeg-core.o jpeg-hw-exynos4.o jpeg-hw-s5p.o
 obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 9b88a460..7d68d0b 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1,9 +1,10 @@
 /* linux/drivers/media/platform/s5p-jpeg/jpeg-core.c
  *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2011-2013 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
  * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -17,6 +18,7 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
@@ -28,41 +30,18 @@
 #include <media/videobuf2-dma-contig.h>
 
 #include "jpeg-core.h"
-#include "jpeg-hw.h"
+#include "jpeg-hw-s5p.h"
+#include "jpeg-hw-exynos4.h"
+#include "jpeg-regs.h"
 
-static struct s5p_jpeg_fmt formats_enc[] = {
+static struct s5p_jpeg_fmt sjpeg_formats[] = {
 	{
 		.name		= "JPEG JFIF",
 		.fourcc		= V4L2_PIX_FMT_JPEG,
-		.colplanes	= 1,
-		.types		= MEM2MEM_CAPTURE,
-	},
-	{
-		.name		= "YUV 4:2:2 packed, YCbYCr",
-		.fourcc		= V4L2_PIX_FMT_YUYV,
-		.depth		= 16,
-		.colplanes	= 1,
-		.types		= MEM2MEM_OUTPUT,
-	},
-	{
-		.name		= "RGB565",
-		.fourcc		= V4L2_PIX_FMT_RGB565,
-		.depth		= 16,
-		.colplanes	= 1,
-		.types		= MEM2MEM_OUTPUT,
-	},
-};
-#define NUM_FORMATS_ENC ARRAY_SIZE(formats_enc)
-
-static struct s5p_jpeg_fmt formats_dec[] = {
-	{
-		.name		= "YUV 4:2:0 planar, YCbCr",
-		.fourcc		= V4L2_PIX_FMT_YUV420,
-		.depth		= 12,
-		.colplanes	= 3,
-		.h_align	= 4,
-		.v_align	= 4,
-		.types		= MEM2MEM_CAPTURE,
+		.flags		= SJPEG_FMT_FLAG_ENC_CAPTURE |
+				  SJPEG_FMT_FLAG_DEC_OUTPUT |
+				  SJPEG_FMT_FLAG_S5P |
+				  SJPEG_FMT_FLAG_EXYNOS4,
 	},
 	{
 		.name		= "YUV 4:2:2 packed, YCbYCr",
@@ -71,27 +50,214 @@
 		.colplanes	= 1,
 		.h_align	= 4,
 		.v_align	= 3,
-		.types		= MEM2MEM_CAPTURE,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_S5P |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_422,
 	},
 	{
-		.name		= "JPEG JFIF",
-		.fourcc		= V4L2_PIX_FMT_JPEG,
+		.name		= "YUV 4:2:2 packed, YCbYCr",
+		.fourcc		= V4L2_PIX_FMT_YUYV,
+		.depth		= 16,
 		.colplanes	= 1,
-		.types		= MEM2MEM_OUTPUT,
+		.h_align	= 1,
+		.v_align	= 0,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+	},
+	{
+		.name		= "YUV 4:2:2 packed, YCrYCb",
+		.fourcc		= V4L2_PIX_FMT_YVYU,
+		.depth		= 16,
+		.colplanes	= 1,
+		.h_align	= 1,
+		.v_align	= 0,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+	},
+	{
+		.name		= "RGB565",
+		.fourcc		= V4L2_PIX_FMT_RGB565,
+		.depth		= 16,
+		.colplanes	= 1,
+		.h_align	= 0,
+		.v_align	= 0,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+	},
+	{
+		.name		= "RGB565",
+		.fourcc		= V4L2_PIX_FMT_RGB565,
+		.depth		= 16,
+		.colplanes	= 1,
+		.h_align	= 0,
+		.v_align	= 0,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_S5P |
+				  SJPEG_FMT_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+	},
+	{
+		.name		= "ARGB8888, 32 bpp",
+		.fourcc		= V4L2_PIX_FMT_RGB32,
+		.depth		= 32,
+		.colplanes	= 1,
+		.h_align	= 0,
+		.v_align	= 0,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+	},
+	{
+		.name		= "YUV 4:4:4 planar, Y/CbCr",
+		.fourcc		= V4L2_PIX_FMT_NV24,
+		.depth		= 24,
+		.colplanes	= 2,
+		.h_align	= 0,
+		.v_align	= 0,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+	},
+	{
+		.name		= "YUV 4:4:4 planar, Y/CrCb",
+		.fourcc		= V4L2_PIX_FMT_NV42,
+		.depth		= 24,
+		.colplanes	= 2,
+		.h_align	= 0,
+		.v_align	= 0,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+	},
+	{
+		.name		= "YUV 4:2:2 planar, Y/CrCb",
+		.fourcc		= V4L2_PIX_FMT_NV61,
+		.depth		= 16,
+		.colplanes	= 2,
+		.h_align	= 1,
+		.v_align	= 0,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+	},
+	{
+		.name		= "YUV 4:2:2 planar, Y/CbCr",
+		.fourcc		= V4L2_PIX_FMT_NV16,
+		.depth		= 16,
+		.colplanes	= 2,
+		.h_align	= 1,
+		.v_align	= 0,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+	},
+	{
+		.name		= "YUV 4:2:0 planar, Y/CbCr",
+		.fourcc		= V4L2_PIX_FMT_NV12,
+		.depth		= 12,
+		.colplanes	= 2,
+		.h_align	= 1,
+		.v_align	= 1,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+	},
+	{
+		.name		= "YUV 4:2:0 planar, Y/CbCr",
+		.fourcc		= V4L2_PIX_FMT_NV12,
+		.depth		= 12,
+		.colplanes	= 2,
+		.h_align	= 4,
+		.v_align	= 4,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_S5P |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+	},
+	{
+		.name		= "YUV 4:2:0 planar, Y/CrCb",
+		.fourcc		= V4L2_PIX_FMT_NV21,
+		.depth		= 12,
+		.colplanes	= 2,
+		.h_align	= 1,
+		.v_align	= 1,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+	},
+	{
+		.name		= "YUV 4:2:0 contiguous 3-planar, Y/Cb/Cr",
+		.fourcc		= V4L2_PIX_FMT_YUV420,
+		.depth		= 12,
+		.colplanes	= 3,
+		.h_align	= 1,
+		.v_align	= 1,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+	},
+	{
+		.name		= "Gray",
+		.fourcc		= V4L2_PIX_FMT_GREY,
+		.depth		= 8,
+		.colplanes	= 1,
+		.flags		= SJPEG_FMT_FLAG_ENC_OUTPUT |
+				  SJPEG_FMT_FLAG_DEC_CAPTURE |
+				  SJPEG_FMT_FLAG_EXYNOS4 |
+				  SJPEG_FMT_NON_RGB,
+		.subsampling	= V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
 	},
 };
-#define NUM_FORMATS_DEC ARRAY_SIZE(formats_dec)
+#define SJPEG_NUM_FORMATS ARRAY_SIZE(sjpeg_formats)
 
 static const unsigned char qtbl_luminance[4][64] = {
-	{/* level 1 - high quality */
-		 8,  6,  6,  8, 12, 14, 16, 17,
-		 6,  6,  6,  8, 10, 13, 12, 15,
-		 6,  6,  7,  8, 13, 14, 18, 24,
-		 8,  8,  8, 14, 13, 19, 24, 35,
-		12, 10, 13, 13, 20, 26, 34, 39,
-		14, 13, 14, 19, 26, 34, 39, 39,
-		16, 12, 18, 24, 34, 39, 39, 39,
-		17, 15, 24, 35, 39, 39, 39, 39
+	{/*level 0 - high compression quality */
+		20, 16, 25, 39, 50, 46, 62, 68,
+		16, 18, 23, 38, 38, 53, 65, 68,
+		25, 23, 31, 38, 53, 65, 68, 68,
+		39, 38, 38, 53, 65, 68, 68, 68,
+		50, 38, 53, 65, 68, 68, 68, 68,
+		46, 53, 65, 68, 68, 68, 68, 68,
+		62, 65, 68, 68, 68, 68, 68, 68,
+		68, 68, 68, 68, 68, 68, 68, 68
+	},
+	{/* level 1 */
+		16, 11, 11, 16, 23, 27, 31, 30,
+		11, 12, 12, 15, 20, 23, 23, 30,
+		11, 12, 13, 16, 23, 26, 35, 47,
+		16, 15, 16, 23, 26, 37, 47, 64,
+		23, 20, 23, 26, 39, 51, 64, 64,
+		27, 23, 26, 37, 51, 64, 64, 64,
+		31, 23, 35, 47, 64, 64, 64, 64,
+		30, 30, 47, 64, 64, 64, 64, 64
 	},
 	{/* level 2 */
 		12,  8,  8, 12, 17, 21, 24, 23,
@@ -103,38 +269,38 @@
 		24, 18, 27, 36, 51, 59, 59, 59,
 		23, 23, 36, 53, 59, 59, 59, 59
 	},
-	{/* level 3 */
-		16, 11, 11, 16, 23, 27, 31, 30,
-		11, 12, 12, 15, 20, 23, 23, 30,
-		11, 12, 13, 16, 23, 26, 35, 47,
-		16, 15, 16, 23, 26, 37, 47, 64,
-		23, 20, 23, 26, 39, 51, 64, 64,
-		27, 23, 26, 37, 51, 64, 64, 64,
-		31, 23, 35, 47, 64, 64, 64, 64,
-		30, 30, 47, 64, 64, 64, 64, 64
-	},
-	{/*level 4 - low quality */
-		20, 16, 25, 39, 50, 46, 62, 68,
-		16, 18, 23, 38, 38, 53, 65, 68,
-		25, 23, 31, 38, 53, 65, 68, 68,
-		39, 38, 38, 53, 65, 68, 68, 68,
-		50, 38, 53, 65, 68, 68, 68, 68,
-		46, 53, 65, 68, 68, 68, 68, 68,
-		62, 65, 68, 68, 68, 68, 68, 68,
-		68, 68, 68, 68, 68, 68, 68, 68
+	{/* level 3 - low compression quality */
+		 8,  6,  6,  8, 12, 14, 16, 17,
+		 6,  6,  6,  8, 10, 13, 12, 15,
+		 6,  6,  7,  8, 13, 14, 18, 24,
+		 8,  8,  8, 14, 13, 19, 24, 35,
+		12, 10, 13, 13, 20, 26, 34, 39,
+		14, 13, 14, 19, 26, 34, 39, 39,
+		16, 12, 18, 24, 34, 39, 39, 39,
+		17, 15, 24, 35, 39, 39, 39, 39
 	}
 };
 
 static const unsigned char qtbl_chrominance[4][64] = {
-	{/* level 1 - high quality */
-		 9,  8,  9, 11, 14, 17, 19, 24,
-		 8, 10,  9, 11, 14, 13, 17, 22,
-		 9,  9, 13, 14, 13, 15, 23, 26,
-		11, 11, 14, 14, 15, 20, 26, 33,
-		14, 14, 13, 15, 20, 24, 33, 39,
-		17, 13, 15, 20, 24, 32, 39, 39,
-		19, 17, 23, 26, 33, 39, 39, 39,
-		24, 22, 26, 33, 39, 39, 39, 39
+	{/*level 0 - high compression quality */
+		21, 25, 32, 38, 54, 68, 68, 68,
+		25, 28, 24, 38, 54, 68, 68, 68,
+		32, 24, 32, 43, 66, 68, 68, 68,
+		38, 38, 43, 53, 68, 68, 68, 68,
+		54, 54, 66, 68, 68, 68, 68, 68,
+		68, 68, 68, 68, 68, 68, 68, 68,
+		68, 68, 68, 68, 68, 68, 68, 68,
+		68, 68, 68, 68, 68, 68, 68, 68
+	},
+	{/* level 1 */
+		17, 15, 17, 21, 20, 26, 38, 48,
+		15, 19, 18, 17, 20, 26, 35, 43,
+		17, 18, 20, 22, 26, 30, 46, 53,
+		21, 17, 22, 28, 30, 39, 53, 64,
+		20, 20, 26, 30, 39, 48, 64, 64,
+		26, 26, 30, 39, 48, 63, 64, 64,
+		38, 35, 46, 53, 64, 64, 64, 64,
+		48, 43, 53, 64, 64, 64, 64, 64
 	},
 	{/* level 2 */
 		13, 11, 13, 16, 20, 20, 29, 37,
@@ -146,25 +312,15 @@
 		29, 26, 35, 40, 50, 59, 59, 59,
 		37, 32, 40, 50, 59, 59, 59, 59
 	},
-	{/* level 3 */
-		17, 15, 17, 21, 20, 26, 38, 48,
-		15, 19, 18, 17, 20, 26, 35, 43,
-		17, 18, 20, 22, 26, 30, 46, 53,
-		21, 17, 22, 28, 30, 39, 53, 64,
-		20, 20, 26, 30, 39, 48, 64, 64,
-		26, 26, 30, 39, 48, 63, 64, 64,
-		38, 35, 46, 53, 64, 64, 64, 64,
-		48, 43, 53, 64, 64, 64, 64, 64
-	},
-	{/*level 4 - low quality */
-		21, 25, 32, 38, 54, 68, 68, 68,
-		25, 28, 24, 38, 54, 68, 68, 68,
-		32, 24, 32, 43, 66, 68, 68, 68,
-		38, 38, 43, 53, 68, 68, 68, 68,
-		54, 54, 66, 68, 68, 68, 68, 68,
-		68, 68, 68, 68, 68, 68, 68, 68,
-		68, 68, 68, 68, 68, 68, 68, 68,
-		68, 68, 68, 68, 68, 68, 68, 68
+	{/* level 3 - low compression quality */
+		 9,  8,  9, 11, 14, 17, 19, 24,
+		 8, 10,  9, 11, 14, 13, 17, 22,
+		 9,  9, 13, 14, 13, 15, 23, 26,
+		11, 11, 14, 14, 15, 20, 26, 33,
+		14, 14, 13, 15, 20, 24, 33, 39,
+		17, 13, 15, 20, 24, 32, 39, 39,
+		19, 17, 23, 26, 33, 39, 39, 39,
+		24, 22, 26, 33, 39, 39, 39, 39
 	}
 };
 
@@ -202,6 +358,106 @@
 	0xf9, 0xfa
 };
 
+/*
+ * Fourcc downgrade schema lookup tables for 422 and 420
+ * chroma subsampling - fourcc on each position maps on the
+ * fourcc from the table fourcc_to_dwngrd_schema_id which allows
+ * to get the most suitable fourcc counterpart for the given
+ * downgraded subsampling property.
+ */
+static const u32 subs422_fourcc_dwngrd_schema[] = {
+	V4L2_PIX_FMT_NV16,
+	V4L2_PIX_FMT_NV61,
+};
+
+static const u32 subs420_fourcc_dwngrd_schema[] = {
+	V4L2_PIX_FMT_NV12,
+	V4L2_PIX_FMT_NV21,
+	V4L2_PIX_FMT_NV12,
+	V4L2_PIX_FMT_NV21,
+	V4L2_PIX_FMT_NV12,
+	V4L2_PIX_FMT_NV21,
+	V4L2_PIX_FMT_GREY,
+	V4L2_PIX_FMT_GREY,
+	V4L2_PIX_FMT_GREY,
+	V4L2_PIX_FMT_GREY,
+};
+
+/*
+ * Lookup table for translation of a fourcc to the position
+ * of its downgraded counterpart in the *fourcc_dwngrd_schema
+ * tables.
+ */
+static const u32 fourcc_to_dwngrd_schema_id[] = {
+	V4L2_PIX_FMT_NV24,
+	V4L2_PIX_FMT_NV42,
+	V4L2_PIX_FMT_NV16,
+	V4L2_PIX_FMT_NV61,
+	V4L2_PIX_FMT_YUYV,
+	V4L2_PIX_FMT_YVYU,
+	V4L2_PIX_FMT_NV12,
+	V4L2_PIX_FMT_NV21,
+	V4L2_PIX_FMT_YUV420,
+	V4L2_PIX_FMT_GREY,
+};
+
+static int s5p_jpeg_get_dwngrd_sch_id_by_fourcc(u32 fourcc)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(fourcc_to_dwngrd_schema_id); ++i) {
+		if (fourcc_to_dwngrd_schema_id[i] == fourcc)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+static int s5p_jpeg_adjust_fourcc_to_subsampling(
+					enum v4l2_jpeg_chroma_subsampling subs,
+					u32 in_fourcc,
+					u32 *out_fourcc,
+					struct s5p_jpeg_ctx *ctx)
+{
+	int dwngrd_sch_id;
+
+	if (ctx->subsampling != V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY) {
+		dwngrd_sch_id =
+			s5p_jpeg_get_dwngrd_sch_id_by_fourcc(in_fourcc);
+		if (dwngrd_sch_id < 0)
+			return -EINVAL;
+	}
+
+	switch (ctx->subsampling) {
+	case V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY:
+		*out_fourcc = V4L2_PIX_FMT_GREY;
+		break;
+	case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
+		if (dwngrd_sch_id >
+				ARRAY_SIZE(subs420_fourcc_dwngrd_schema) - 1)
+			return -EINVAL;
+		*out_fourcc = subs420_fourcc_dwngrd_schema[dwngrd_sch_id];
+		break;
+	case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
+		if (dwngrd_sch_id >
+				ARRAY_SIZE(subs422_fourcc_dwngrd_schema) - 1)
+			return -EINVAL;
+		*out_fourcc = subs422_fourcc_dwngrd_schema[dwngrd_sch_id];
+		break;
+	default:
+		*out_fourcc = V4L2_PIX_FMT_GREY;
+		break;
+	}
+
+	return 0;
+}
+
+static int exynos4x12_decoded_subsampling[] = {
+	V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
+	V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+	V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+	V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+};
+
 static inline struct s5p_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
 {
 	return container_of(c->handler, struct s5p_jpeg_ctx, ctrl_handler);
@@ -212,8 +468,24 @@
 	return container_of(fh, struct s5p_jpeg_ctx, fh);
 }
 
-static inline void jpeg_set_qtbl(void __iomem *regs, const unsigned char *qtbl,
-		   unsigned long tab, int len)
+static int s5p_jpeg_to_user_subsampling(struct s5p_jpeg_ctx *ctx)
+{
+	WARN_ON(ctx->subsampling > 3);
+
+	if (ctx->jpeg->variant->version == SJPEG_S5P) {
+		if (ctx->subsampling > 2)
+			return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+		return ctx->subsampling;
+	} else {
+		if (ctx->subsampling > 2)
+			return V4L2_JPEG_CHROMA_SUBSAMPLING_420;
+		return exynos4x12_decoded_subsampling[ctx->subsampling];
+	}
+}
+
+static inline void s5p_jpeg_set_qtbl(void __iomem *regs,
+				     const unsigned char *qtbl,
+				     unsigned long tab, int len)
 {
 	int i;
 
@@ -221,22 +493,25 @@
 		writel((unsigned int)qtbl[i], regs + tab + (i * 0x04));
 }
 
-static inline void jpeg_set_qtbl_lum(void __iomem *regs, int quality)
+static inline void s5p_jpeg_set_qtbl_lum(void __iomem *regs, int quality)
 {
 	/* this driver fills quantisation table 0 with data for luma */
-	jpeg_set_qtbl(regs, qtbl_luminance[quality], S5P_JPG_QTBL_CONTENT(0),
-		      ARRAY_SIZE(qtbl_luminance[quality]));
+	s5p_jpeg_set_qtbl(regs, qtbl_luminance[quality],
+			  S5P_JPG_QTBL_CONTENT(0),
+			  ARRAY_SIZE(qtbl_luminance[quality]));
 }
 
-static inline void jpeg_set_qtbl_chr(void __iomem *regs, int quality)
+static inline void s5p_jpeg_set_qtbl_chr(void __iomem *regs, int quality)
 {
 	/* this driver fills quantisation table 1 with data for chroma */
-	jpeg_set_qtbl(regs, qtbl_chrominance[quality], S5P_JPG_QTBL_CONTENT(1),
-		      ARRAY_SIZE(qtbl_chrominance[quality]));
+	s5p_jpeg_set_qtbl(regs, qtbl_chrominance[quality],
+			  S5P_JPG_QTBL_CONTENT(1),
+			  ARRAY_SIZE(qtbl_chrominance[quality]));
 }
 
-static inline void jpeg_set_htbl(void __iomem *regs, const unsigned char *htbl,
-		   unsigned long tab, int len)
+static inline void s5p_jpeg_set_htbl(void __iomem *regs,
+				     const unsigned char *htbl,
+				     unsigned long tab, int len)
 {
 	int i;
 
@@ -244,28 +519,84 @@
 		writel((unsigned int)htbl[i], regs + tab + (i * 0x04));
 }
 
-static inline void jpeg_set_hdctbl(void __iomem *regs)
+static inline void s5p_jpeg_set_hdctbl(void __iomem *regs)
 {
 	/* this driver fills table 0 for this component */
-	jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0), ARRAY_SIZE(hdctbl0));
+	s5p_jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0),
+						ARRAY_SIZE(hdctbl0));
 }
 
-static inline void jpeg_set_hdctblg(void __iomem *regs)
+static inline void s5p_jpeg_set_hdctblg(void __iomem *regs)
 {
 	/* this driver fills table 0 for this component */
-	jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0), ARRAY_SIZE(hdctblg0));
+	s5p_jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0),
+						ARRAY_SIZE(hdctblg0));
 }
 
-static inline void jpeg_set_hactbl(void __iomem *regs)
+static inline void s5p_jpeg_set_hactbl(void __iomem *regs)
 {
 	/* this driver fills table 0 for this component */
-	jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0), ARRAY_SIZE(hactbl0));
+	s5p_jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0),
+						ARRAY_SIZE(hactbl0));
 }
 
-static inline void jpeg_set_hactblg(void __iomem *regs)
+static inline void s5p_jpeg_set_hactblg(void __iomem *regs)
 {
 	/* this driver fills table 0 for this component */
-	jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0), ARRAY_SIZE(hactblg0));
+	s5p_jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0),
+						ARRAY_SIZE(hactblg0));
+}
+
+static inline void exynos4_jpeg_set_tbl(void __iomem *regs,
+					const unsigned char *tbl,
+					unsigned long tab, int len)
+{
+	int i;
+	unsigned int dword;
+
+	for (i = 0; i < len; i += 4) {
+		dword = tbl[i] |
+			(tbl[i + 1] << 8) |
+			(tbl[i + 2] << 16) |
+			(tbl[i + 3] << 24);
+		writel(dword, regs + tab + i);
+	}
+}
+
+static inline void exynos4_jpeg_set_qtbl_lum(void __iomem *regs, int quality)
+{
+	/* this driver fills quantisation table 0 with data for luma */
+	exynos4_jpeg_set_tbl(regs, qtbl_luminance[quality],
+			     EXYNOS4_QTBL_CONTENT(0),
+			     ARRAY_SIZE(qtbl_luminance[quality]));
+}
+
+static inline void exynos4_jpeg_set_qtbl_chr(void __iomem *regs, int quality)
+{
+	/* this driver fills quantisation table 1 with data for chroma */
+	exynos4_jpeg_set_tbl(regs, qtbl_chrominance[quality],
+			     EXYNOS4_QTBL_CONTENT(1),
+			     ARRAY_SIZE(qtbl_chrominance[quality]));
+}
+
+void exynos4_jpeg_set_huff_tbl(void __iomem *base)
+{
+	exynos4_jpeg_set_tbl(base, hdctbl0, EXYNOS4_HUFF_TBL_HDCLL,
+							ARRAY_SIZE(hdctbl0));
+	exynos4_jpeg_set_tbl(base, hdctbl0, EXYNOS4_HUFF_TBL_HDCCL,
+							ARRAY_SIZE(hdctbl0));
+	exynos4_jpeg_set_tbl(base, hdctblg0, EXYNOS4_HUFF_TBL_HDCLV,
+							ARRAY_SIZE(hdctblg0));
+	exynos4_jpeg_set_tbl(base, hdctblg0, EXYNOS4_HUFF_TBL_HDCCV,
+							ARRAY_SIZE(hdctblg0));
+	exynos4_jpeg_set_tbl(base, hactbl0, EXYNOS4_HUFF_TBL_HACLL,
+							ARRAY_SIZE(hactbl0));
+	exynos4_jpeg_set_tbl(base, hactbl0, EXYNOS4_HUFF_TBL_HACCL,
+							ARRAY_SIZE(hactbl0));
+	exynos4_jpeg_set_tbl(base, hactblg0, EXYNOS4_HUFF_TBL_HACLV,
+							ARRAY_SIZE(hactblg0));
+	exynos4_jpeg_set_tbl(base, hactblg0, EXYNOS4_HUFF_TBL_HACCV,
+							ARRAY_SIZE(hactblg0));
 }
 
 /*
@@ -276,8 +607,8 @@
 
 static int queue_init(void *priv, struct vb2_queue *src_vq,
 		      struct vb2_queue *dst_vq);
-static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode,
-						 __u32 pixelformat);
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
+				__u32 pixelformat, unsigned int fmt_type);
 static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx);
 
 static int s5p_jpeg_open(struct file *file)
@@ -285,7 +616,7 @@
 	struct s5p_jpeg *jpeg = video_drvdata(file);
 	struct video_device *vfd = video_devdata(file);
 	struct s5p_jpeg_ctx *ctx;
-	struct s5p_jpeg_fmt *out_fmt;
+	struct s5p_jpeg_fmt *out_fmt, *cap_fmt;
 	int ret = 0;
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -306,24 +637,31 @@
 	ctx->jpeg = jpeg;
 	if (vfd == jpeg->vfd_encoder) {
 		ctx->mode = S5P_JPEG_ENCODE;
-		out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_RGB565);
+		out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_RGB565,
+							FMT_TYPE_OUTPUT);
+		cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
+							FMT_TYPE_CAPTURE);
 	} else {
 		ctx->mode = S5P_JPEG_DECODE;
-		out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_JPEG);
+		out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
+							FMT_TYPE_OUTPUT);
+		cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_YUYV,
+							FMT_TYPE_CAPTURE);
 	}
 
+	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init);
+	if (IS_ERR(ctx->fh.m2m_ctx)) {
+		ret = PTR_ERR(ctx->fh.m2m_ctx);
+		goto error;
+	}
+
+	ctx->out_q.fmt = out_fmt;
+	ctx->cap_q.fmt = cap_fmt;
+
 	ret = s5p_jpeg_controls_create(ctx);
 	if (ret < 0)
 		goto error;
 
-	ctx->m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init);
-	if (IS_ERR(ctx->m2m_ctx)) {
-		ret = PTR_ERR(ctx->m2m_ctx);
-		goto error;
-	}
-
-	ctx->out_q.fmt = out_fmt;
-	ctx->cap_q.fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_YUYV);
 	mutex_unlock(&jpeg->lock);
 	return 0;
 
@@ -342,49 +680,23 @@
 	struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
 
 	mutex_lock(&jpeg->lock);
-	v4l2_m2m_ctx_release(ctx->m2m_ctx);
-	mutex_unlock(&jpeg->lock);
+	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
 	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
 	v4l2_fh_del(&ctx->fh);
 	v4l2_fh_exit(&ctx->fh);
 	kfree(ctx);
+	mutex_unlock(&jpeg->lock);
 
 	return 0;
 }
 
-static unsigned int s5p_jpeg_poll(struct file *file,
-				 struct poll_table_struct *wait)
-{
-	struct s5p_jpeg *jpeg = video_drvdata(file);
-	struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
-	unsigned int res;
-
-	mutex_lock(&jpeg->lock);
-	res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
-	mutex_unlock(&jpeg->lock);
-	return res;
-}
-
-static int s5p_jpeg_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	struct s5p_jpeg *jpeg = video_drvdata(file);
-	struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
-	int ret;
-
-	if (mutex_lock_interruptible(&jpeg->lock))
-		return -ERESTARTSYS;
-	ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
-	mutex_unlock(&jpeg->lock);
-	return ret;
-}
-
 static const struct v4l2_file_operations s5p_jpeg_fops = {
 	.owner		= THIS_MODULE,
 	.open		= s5p_jpeg_open,
 	.release	= s5p_jpeg_release,
-	.poll		= s5p_jpeg_poll,
+	.poll		= v4l2_m2m_fop_poll,
 	.unlocked_ioctl	= video_ioctl2,
-	.mmap		= s5p_jpeg_mmap,
+	.mmap		= v4l2_m2m_fop_mmap,
 };
 
 /*
@@ -427,10 +739,11 @@
 }
 
 static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
-			       unsigned long buffer, unsigned long size)
+			       unsigned long buffer, unsigned long size,
+			       struct s5p_jpeg_ctx *ctx)
 {
 	int c, components, notfound;
-	unsigned int height, width, word;
+	unsigned int height, width, word, subsampling = 0;
 	long length;
 	struct s5p_jpeg_buffer jpeg_buffer;
 
@@ -469,7 +782,15 @@
 				break;
 			notfound = 0;
 
-			skip(&jpeg_buffer, components * 3);
+			if (components == 1) {
+				subsampling = 0x33;
+			} else {
+				skip(&jpeg_buffer, 1);
+				subsampling = get_byte(&jpeg_buffer);
+				skip(&jpeg_buffer, 1);
+			}
+
+			skip(&jpeg_buffer, components * 2);
 			break;
 
 		/* skip payload-less markers */
@@ -491,6 +812,24 @@
 	result->w = width;
 	result->h = height;
 	result->size = components;
+
+	switch (subsampling) {
+	case 0x11:
+		ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444;
+		break;
+	case 0x21:
+		ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422;
+		break;
+	case 0x22:
+		ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420;
+		break;
+	case 0x33:
+		ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+		break;
+	default:
+		return false;
+	}
+
 	return !notfound;
 }
 
@@ -521,13 +860,13 @@
 	return 0;
 }
 
-static int enum_fmt(struct s5p_jpeg_fmt *formats, int n,
+static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
 		    struct v4l2_fmtdesc *f, u32 type)
 {
 	int i, num = 0;
 
 	for (i = 0; i < n; ++i) {
-		if (formats[i].types & type) {
+		if (sjpeg_formats[i].flags & type) {
 			/* index-th format of type type found ? */
 			if (num == f->index)
 				break;
@@ -541,8 +880,8 @@
 	if (i >= n)
 		return -EINVAL;
 
-	strlcpy(f->description, formats[i].name, sizeof(f->description));
-	f->pixelformat = formats[i].fourcc;
+	strlcpy(f->description, sjpeg_formats[i].name, sizeof(f->description));
+	f->pixelformat = sjpeg_formats[i].fourcc;
 
 	return 0;
 }
@@ -553,10 +892,11 @@
 	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
 
 	if (ctx->mode == S5P_JPEG_ENCODE)
-		return enum_fmt(formats_enc, NUM_FORMATS_ENC, f,
-				MEM2MEM_CAPTURE);
+		return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+				SJPEG_FMT_FLAG_ENC_CAPTURE);
 
-	return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_CAPTURE);
+	return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+					SJPEG_FMT_FLAG_DEC_CAPTURE);
 }
 
 static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
@@ -565,10 +905,11 @@
 	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
 
 	if (ctx->mode == S5P_JPEG_ENCODE)
-		return enum_fmt(formats_enc, NUM_FORMATS_ENC, f,
-				MEM2MEM_OUTPUT);
+		return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+				SJPEG_FMT_FLAG_ENC_OUTPUT);
 
-	return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_OUTPUT);
+	return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+					SJPEG_FMT_FLAG_DEC_OUTPUT);
 }
 
 static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
@@ -589,7 +930,7 @@
 	struct v4l2_pix_format *pix = &f->fmt.pix;
 	struct s5p_jpeg_ctx *ct = fh_to_ctx(priv);
 
-	vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type);
+	vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
 	if (!vq)
 		return -EINVAL;
 
@@ -615,29 +956,35 @@
 	return 0;
 }
 
-static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode,
-						 u32 pixelformat)
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
+				u32 pixelformat, unsigned int fmt_type)
 {
-	unsigned int k;
-	struct s5p_jpeg_fmt *formats;
-	int n;
+	unsigned int k, fmt_flag, ver_flag;
 
-	if (mode == S5P_JPEG_ENCODE) {
-		formats = formats_enc;
-		n = NUM_FORMATS_ENC;
-	} else {
-		formats = formats_dec;
-		n = NUM_FORMATS_DEC;
-	}
+	if (ctx->mode == S5P_JPEG_ENCODE)
+		fmt_flag = (fmt_type == FMT_TYPE_OUTPUT) ?
+				SJPEG_FMT_FLAG_ENC_OUTPUT :
+				SJPEG_FMT_FLAG_ENC_CAPTURE;
+	else
+		fmt_flag = (fmt_type == FMT_TYPE_OUTPUT) ?
+				SJPEG_FMT_FLAG_DEC_OUTPUT :
+				SJPEG_FMT_FLAG_DEC_CAPTURE;
 
-	for (k = 0; k < n; k++) {
-		struct s5p_jpeg_fmt *fmt = &formats[k];
-		if (fmt->fourcc == pixelformat)
+	if (ctx->jpeg->variant->version == SJPEG_S5P)
+		ver_flag = SJPEG_FMT_FLAG_S5P;
+	else
+		ver_flag = SJPEG_FMT_FLAG_EXYNOS4;
+
+	for (k = 0; k < ARRAY_SIZE(sjpeg_formats); k++) {
+		struct s5p_jpeg_fmt *fmt = &sjpeg_formats[k];
+		if (fmt->fourcc == pixelformat &&
+		    fmt->flags & fmt_flag &&
+		    fmt->flags & ver_flag) {
 			return fmt;
+		}
 	}
 
 	return NULL;
-
 }
 
 static void jpeg_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
@@ -673,7 +1020,7 @@
 
 	/* V4L2 specification suggests the driver corrects the format struct
 	 * if any of the dimensions is unsupported */
-	if (q_type == MEM2MEM_OUTPUT)
+	if (q_type == FMT_TYPE_OUTPUT)
 		jpeg_bound_align_image(&pix->width, S5P_JPEG_MIN_WIDTH,
 				       S5P_JPEG_MAX_WIDTH, 0,
 				       &pix->height, S5P_JPEG_MIN_HEIGHT,
@@ -695,7 +1042,7 @@
 			bpl = pix->width; /* planar */
 
 		if (fmt->colplanes == 1 && /* packed */
-		    (bpl << 3) * fmt->depth < pix->width)
+		    (bpl << 3) / fmt->depth < pix->width)
 			bpl = (pix->width * fmt->depth) >> 3;
 
 		pix->bytesperline = bpl;
@@ -709,17 +1056,41 @@
 				  struct v4l2_format *f)
 {
 	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+	struct v4l2_pix_format *pix = &f->fmt.pix;
 	struct s5p_jpeg_fmt *fmt;
+	int ret;
 
-	fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat);
-	if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+	fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
+						FMT_TYPE_CAPTURE);
+	if (!fmt) {
 		v4l2_err(&ctx->jpeg->v4l2_dev,
 			 "Fourcc format (0x%08x) invalid.\n",
 			 f->fmt.pix.pixelformat);
 		return -EINVAL;
 	}
 
-	return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_CAPTURE);
+	/*
+	 * The exynos4x12 device requires resulting YUV image
+	 * subsampling not to be lower than the input jpeg subsampling.
+	 * If this requirement is not met then downgrade the requested
+	 * capture format to the one with subsampling equal to the input jpeg.
+	 */
+	if ((ctx->jpeg->variant->version != SJPEG_S5P) &&
+	    (ctx->mode == S5P_JPEG_DECODE) &&
+	    (fmt->flags & SJPEG_FMT_NON_RGB) &&
+	    (fmt->subsampling < ctx->subsampling)) {
+		ret = s5p_jpeg_adjust_fourcc_to_subsampling(ctx->subsampling,
+							    fmt->fourcc,
+							    &pix->pixelformat,
+							    ctx);
+		if (ret < 0)
+			pix->pixelformat = V4L2_PIX_FMT_GREY;
+
+		fmt = s5p_jpeg_find_format(ctx, pix->pixelformat,
+							FMT_TYPE_CAPTURE);
+	}
+
+	return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_CAPTURE);
 }
 
 static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv,
@@ -728,15 +1099,16 @@
 	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
 	struct s5p_jpeg_fmt *fmt;
 
-	fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat);
-	if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+	fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
+						FMT_TYPE_OUTPUT);
+	if (!fmt) {
 		v4l2_err(&ctx->jpeg->v4l2_dev,
 			 "Fourcc format (0x%08x) invalid.\n",
 			 f->fmt.pix.pixelformat);
 		return -EINVAL;
 	}
 
-	return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_OUTPUT);
+	return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_OUTPUT);
 }
 
 static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
@@ -744,8 +1116,10 @@
 	struct vb2_queue *vq;
 	struct s5p_jpeg_q_data *q_data = NULL;
 	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_ctrl *ctrl_subs;
+	unsigned int f_type;
 
-	vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type);
+	vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
 	if (!vq)
 		return -EINVAL;
 
@@ -757,7 +1131,10 @@
 		return -EBUSY;
 	}
 
-	q_data->fmt = s5p_jpeg_find_format(ct->mode, pix->pixelformat);
+	f_type = V4L2_TYPE_IS_OUTPUT(f->type) ?
+			FMT_TYPE_OUTPUT : FMT_TYPE_CAPTURE;
+
+	q_data->fmt = s5p_jpeg_find_format(ct, pix->pixelformat, f_type);
 	q_data->w = pix->width;
 	q_data->h = pix->height;
 	if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG)
@@ -765,6 +1142,13 @@
 	else
 		q_data->size = pix->sizeimage;
 
+	if (f_type == FMT_TYPE_OUTPUT) {
+		ctrl_subs = v4l2_ctrl_find(&ct->ctrl_handler,
+					V4L2_CID_JPEG_CHROMA_SUBSAMPLING);
+		if (ctrl_subs)
+			v4l2_ctrl_s_ctrl(ctrl_subs, q_data->fmt->subsampling);
+	}
+
 	return 0;
 }
 
@@ -792,60 +1176,14 @@
 	return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
 }
 
-static int s5p_jpeg_reqbufs(struct file *file, void *priv,
-			  struct v4l2_requestbuffers *reqbufs)
-{
-	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
-	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int s5p_jpeg_querybuf(struct file *file, void *priv,
-			   struct v4l2_buffer *buf)
-{
-	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
-	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int s5p_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
-	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
-	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int s5p_jpeg_dqbuf(struct file *file, void *priv,
-			  struct v4l2_buffer *buf)
-{
-	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
-	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int s5p_jpeg_streamon(struct file *file, void *priv,
-			   enum v4l2_buf_type type)
-{
-	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
-	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int s5p_jpeg_streamoff(struct file *file, void *priv,
-			    enum v4l2_buf_type type)
-{
-	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
-	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
 static int s5p_jpeg_g_selection(struct file *file, void *priv,
 			 struct v4l2_selection *s)
 {
 	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
 
 	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
-	    s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	    s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    ctx->jpeg->variant->version != SJPEG_S5P)
 		return -EINVAL;
 
 	/* For JPEG blob active == default == bounds */
@@ -884,12 +1222,7 @@
 	switch (ctrl->id) {
 	case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
 		spin_lock_irqsave(&jpeg->slock, flags);
-
-		WARN_ON(ctx->subsampling > S5P_SUBSAMPLING_MODE_GRAY);
-		if (ctx->subsampling > 2)
-			ctrl->val = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
-		else
-			ctrl->val = ctx->subsampling;
+		ctrl->val = s5p_jpeg_to_user_subsampling(ctx);
 		spin_unlock_irqrestore(&jpeg->slock, flags);
 		break;
 	}
@@ -897,6 +1230,40 @@
 	return 0;
 }
 
+static int s5p_jpeg_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+	if (ctrl->id == V4L2_CID_JPEG_CHROMA_SUBSAMPLING) {
+		if (ctx->jpeg->variant->version == SJPEG_S5P)
+			goto error_free;
+		/*
+		 * The exynos4x12 device requires input raw image fourcc
+		 * to be V4L2_PIX_FMT_GREY if gray jpeg format
+		 * is to be set.
+		 */
+		if (ctx->out_q.fmt->fourcc != V4L2_PIX_FMT_GREY &&
+		    ctrl->val == V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY) {
+			ret = -EINVAL;
+			goto error_free;
+		}
+		/*
+		 * The exynos4x12 device requires resulting jpeg subsampling
+		 * not to be lower than the input raw image subsampling.
+		 */
+		if (ctx->out_q.fmt->subsampling > ctrl->val)
+			ctrl->val = ctx->out_q.fmt->subsampling;
+	}
+
+error_free:
+	spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+	return ret;
+}
+
 static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
 {
 	struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
@@ -906,7 +1273,7 @@
 
 	switch (ctrl->id) {
 	case V4L2_CID_JPEG_COMPRESSION_QUALITY:
-		ctx->compr_quality = S5P_JPEG_COMPR_QUAL_WORST - ctrl->val;
+		ctx->compr_quality = ctrl->val;
 		break;
 	case V4L2_CID_JPEG_RESTART_INTERVAL:
 		ctx->restart_interval = ctrl->val;
@@ -922,6 +1289,7 @@
 
 static const struct v4l2_ctrl_ops s5p_jpeg_ctrl_ops = {
 	.g_volatile_ctrl	= s5p_jpeg_g_volatile_ctrl,
+	.try_ctrl		= s5p_jpeg_try_ctrl,
 	.s_ctrl			= s5p_jpeg_s_ctrl,
 };
 
@@ -929,18 +1297,20 @@
 {
 	unsigned int mask = ~0x27; /* 444, 422, 420, GRAY */
 	struct v4l2_ctrl *ctrl;
+	int ret;
 
 	v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
 
 	if (ctx->mode == S5P_JPEG_ENCODE) {
 		v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
 				  V4L2_CID_JPEG_COMPRESSION_QUALITY,
-				  0, 3, 1, 3);
+				  0, 3, 1, S5P_JPEG_COMPR_QUAL_WORST);
 
 		v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
 				  V4L2_CID_JPEG_RESTART_INTERVAL,
 				  0, 3, 0xffff, 0);
-		mask = ~0x06; /* 422, 420 */
+		if (ctx->jpeg->variant->version == SJPEG_S5P)
+			mask = ~0x06; /* 422, 420 */
 	}
 
 	ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
@@ -948,13 +1318,24 @@
 				      V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY, mask,
 				      V4L2_JPEG_CHROMA_SUBSAMPLING_422);
 
-	if (ctx->ctrl_handler.error)
-		return ctx->ctrl_handler.error;
+	if (ctx->ctrl_handler.error) {
+		ret = ctx->ctrl_handler.error;
+		goto error_free;
+	}
 
 	if (ctx->mode == S5P_JPEG_DECODE)
 		ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE |
 			V4L2_CTRL_FLAG_READ_ONLY;
-	return 0;
+
+	ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+	if (ret < 0)
+		goto error_free;
+
+	return ret;
+
+error_free:
+	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+	return ret;
 }
 
 static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = {
@@ -972,14 +1353,13 @@
 	.vidioc_s_fmt_vid_cap		= s5p_jpeg_s_fmt_vid_cap,
 	.vidioc_s_fmt_vid_out		= s5p_jpeg_s_fmt_vid_out,
 
-	.vidioc_reqbufs			= s5p_jpeg_reqbufs,
-	.vidioc_querybuf		= s5p_jpeg_querybuf,
+	.vidioc_reqbufs			= v4l2_m2m_ioctl_reqbufs,
+	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
+	.vidioc_qbuf			= v4l2_m2m_ioctl_qbuf,
+	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
 
-	.vidioc_qbuf			= s5p_jpeg_qbuf,
-	.vidioc_dqbuf			= s5p_jpeg_dqbuf,
-
-	.vidioc_streamon		= s5p_jpeg_streamon,
-	.vidioc_streamoff		= s5p_jpeg_streamoff,
+	.vidioc_streamon		= v4l2_m2m_ioctl_streamon,
+	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
 
 	.vidioc_g_selection		= s5p_jpeg_g_selection,
 };
@@ -995,74 +1375,181 @@
 	struct s5p_jpeg_ctx *ctx = priv;
 	struct s5p_jpeg *jpeg = ctx->jpeg;
 	struct vb2_buffer *src_buf, *dst_buf;
-	unsigned long src_addr, dst_addr;
+	unsigned long src_addr, dst_addr, flags;
 
-	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
-	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
 	src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
 	dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
 
-	jpeg_reset(jpeg->regs);
-	jpeg_poweron(jpeg->regs);
-	jpeg_proc_mode(jpeg->regs, ctx->mode);
+	s5p_jpeg_reset(jpeg->regs);
+	s5p_jpeg_poweron(jpeg->regs);
+	s5p_jpeg_proc_mode(jpeg->regs, ctx->mode);
 	if (ctx->mode == S5P_JPEG_ENCODE) {
 		if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565)
-			jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_565);
+			s5p_jpeg_input_raw_mode(jpeg->regs,
+							S5P_JPEG_RAW_IN_565);
 		else
-			jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_422);
-		jpeg_subsampling_mode(jpeg->regs, ctx->subsampling);
-		jpeg_dri(jpeg->regs, ctx->restart_interval);
-		jpeg_x(jpeg->regs, ctx->out_q.w);
-		jpeg_y(jpeg->regs, ctx->out_q.h);
-		jpeg_imgadr(jpeg->regs, src_addr);
-		jpeg_jpgadr(jpeg->regs, dst_addr);
+			s5p_jpeg_input_raw_mode(jpeg->regs,
+							S5P_JPEG_RAW_IN_422);
+		s5p_jpeg_subsampling_mode(jpeg->regs, ctx->subsampling);
+		s5p_jpeg_dri(jpeg->regs, ctx->restart_interval);
+		s5p_jpeg_x(jpeg->regs, ctx->out_q.w);
+		s5p_jpeg_y(jpeg->regs, ctx->out_q.h);
+		s5p_jpeg_imgadr(jpeg->regs, src_addr);
+		s5p_jpeg_jpgadr(jpeg->regs, dst_addr);
 
 		/* ultimately comes from sizeimage from userspace */
-		jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size);
+		s5p_jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size);
 
 		/* JPEG RGB to YCbCr conversion matrix */
-		jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11);
-		jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12);
-		jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13);
-		jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21);
-		jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22);
-		jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23);
-		jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31);
-		jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32);
-		jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33);
+		s5p_jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11);
+		s5p_jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12);
+		s5p_jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13);
+		s5p_jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21);
+		s5p_jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22);
+		s5p_jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23);
+		s5p_jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31);
+		s5p_jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32);
+		s5p_jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33);
 
 		/*
 		 * JPEG IP allows storing 4 quantization tables
 		 * We fill table 0 for luma and table 1 for chroma
 		 */
-		jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
-		jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
+		s5p_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
+		s5p_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
 		/* use table 0 for Y */
-		jpeg_qtbl(jpeg->regs, 1, 0);
+		s5p_jpeg_qtbl(jpeg->regs, 1, 0);
 		/* use table 1 for Cb and Cr*/
-		jpeg_qtbl(jpeg->regs, 2, 1);
-		jpeg_qtbl(jpeg->regs, 3, 1);
+		s5p_jpeg_qtbl(jpeg->regs, 2, 1);
+		s5p_jpeg_qtbl(jpeg->regs, 3, 1);
 
 		/* Y, Cb, Cr use Huffman table 0 */
-		jpeg_htbl_ac(jpeg->regs, 1);
-		jpeg_htbl_dc(jpeg->regs, 1);
-		jpeg_htbl_ac(jpeg->regs, 2);
-		jpeg_htbl_dc(jpeg->regs, 2);
-		jpeg_htbl_ac(jpeg->regs, 3);
-		jpeg_htbl_dc(jpeg->regs, 3);
+		s5p_jpeg_htbl_ac(jpeg->regs, 1);
+		s5p_jpeg_htbl_dc(jpeg->regs, 1);
+		s5p_jpeg_htbl_ac(jpeg->regs, 2);
+		s5p_jpeg_htbl_dc(jpeg->regs, 2);
+		s5p_jpeg_htbl_ac(jpeg->regs, 3);
+		s5p_jpeg_htbl_dc(jpeg->regs, 3);
 	} else { /* S5P_JPEG_DECODE */
-		jpeg_rst_int_enable(jpeg->regs, true);
-		jpeg_data_num_int_enable(jpeg->regs, true);
-		jpeg_final_mcu_num_int_enable(jpeg->regs, true);
+		s5p_jpeg_rst_int_enable(jpeg->regs, true);
+		s5p_jpeg_data_num_int_enable(jpeg->regs, true);
+		s5p_jpeg_final_mcu_num_int_enable(jpeg->regs, true);
 		if (ctx->cap_q.fmt->fourcc == V4L2_PIX_FMT_YUYV)
-			jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422);
+			s5p_jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422);
 		else
-			jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_420);
-		jpeg_jpgadr(jpeg->regs, src_addr);
-		jpeg_imgadr(jpeg->regs, dst_addr);
+			s5p_jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_420);
+		s5p_jpeg_jpgadr(jpeg->regs, src_addr);
+		s5p_jpeg_imgadr(jpeg->regs, dst_addr);
 	}
 
-	jpeg_start(jpeg->regs);
+	s5p_jpeg_start(jpeg->regs);
+
+	spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+}
+
+static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
+{
+	struct s5p_jpeg *jpeg = ctx->jpeg;
+	struct s5p_jpeg_fmt *fmt;
+	struct vb2_buffer *vb;
+	struct s5p_jpeg_addr jpeg_addr;
+	u32 pix_size, padding_bytes = 0;
+
+	pix_size = ctx->cap_q.w * ctx->cap_q.h;
+
+	if (ctx->mode == S5P_JPEG_ENCODE) {
+		vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+		fmt = ctx->out_q.fmt;
+		if (ctx->out_q.w % 2 && fmt->h_align > 0)
+			padding_bytes = ctx->out_q.h;
+	} else {
+		fmt = ctx->cap_q.fmt;
+		vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+	}
+
+	jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+	if (fmt->colplanes == 2) {
+		jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
+	} else if (fmt->colplanes == 3) {
+		jpeg_addr.cb = jpeg_addr.y + pix_size;
+		if (fmt->fourcc == V4L2_PIX_FMT_YUV420)
+			jpeg_addr.cr = jpeg_addr.cb + pix_size / 4;
+		else
+			jpeg_addr.cr = jpeg_addr.cb + pix_size / 2;
+	}
+
+	exynos4_jpeg_set_frame_buf_address(jpeg->regs, &jpeg_addr);
+}
+
+static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
+{
+	struct s5p_jpeg *jpeg = ctx->jpeg;
+	struct vb2_buffer *vb;
+	unsigned int jpeg_addr = 0;
+
+	if (ctx->mode == S5P_JPEG_ENCODE)
+		vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+	else
+		vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+
+	jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+	exynos4_jpeg_set_stream_buf_address(jpeg->regs, jpeg_addr);
+}
+
+static void exynos4_jpeg_device_run(void *priv)
+{
+	struct s5p_jpeg_ctx *ctx = priv;
+	struct s5p_jpeg *jpeg = ctx->jpeg;
+	unsigned int bitstream_size;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+	if (ctx->mode == S5P_JPEG_ENCODE) {
+		exynos4_jpeg_sw_reset(jpeg->regs);
+		exynos4_jpeg_set_interrupt(jpeg->regs);
+		exynos4_jpeg_set_huf_table_enable(jpeg->regs, 1);
+
+		exynos4_jpeg_set_huff_tbl(jpeg->regs);
+
+		/*
+		 * JPEG IP allows storing 4 quantization tables
+		 * We fill table 0 for luma and table 1 for chroma
+		 */
+		exynos4_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
+		exynos4_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
+
+		exynos4_jpeg_set_encode_tbl_select(jpeg->regs,
+							ctx->compr_quality);
+		exynos4_jpeg_set_stream_size(jpeg->regs, ctx->cap_q.w,
+							ctx->cap_q.h);
+
+		exynos4_jpeg_set_enc_out_fmt(jpeg->regs, ctx->subsampling);
+		exynos4_jpeg_set_img_fmt(jpeg->regs, ctx->out_q.fmt->fourcc);
+		exynos4_jpeg_set_img_addr(ctx);
+		exynos4_jpeg_set_jpeg_addr(ctx);
+		exynos4_jpeg_set_encode_hoff_cnt(jpeg->regs,
+							ctx->out_q.fmt->fourcc);
+	} else {
+		exynos4_jpeg_sw_reset(jpeg->regs);
+		exynos4_jpeg_set_interrupt(jpeg->regs);
+		exynos4_jpeg_set_img_addr(ctx);
+		exynos4_jpeg_set_jpeg_addr(ctx);
+		exynos4_jpeg_set_img_fmt(jpeg->regs, ctx->cap_q.fmt->fourcc);
+
+		bitstream_size = DIV_ROUND_UP(ctx->out_q.size, 32);
+
+		exynos4_jpeg_set_dec_bitstream_size(jpeg->regs, bitstream_size);
+	}
+
+	exynos4_jpeg_set_enc_dec_mode(jpeg->regs, ctx->mode);
+
+	spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
 }
 
 static int s5p_jpeg_job_ready(void *priv)
@@ -1082,6 +1569,12 @@
 	.device_run	= s5p_jpeg_device_run,
 	.job_ready	= s5p_jpeg_job_ready,
 	.job_abort	= s5p_jpeg_job_abort,
+}
+;
+static struct v4l2_m2m_ops exynos_jpeg_m2m_ops = {
+	.device_run	= exynos4_jpeg_device_run,
+	.job_ready	= s5p_jpeg_job_ready,
+	.job_abort	= s5p_jpeg_job_abort,
 };
 
 /*
@@ -1149,7 +1642,7 @@
 		ctx->hdr_parsed = s5p_jpeg_parse_hdr(&tmp,
 		     (unsigned long)vb2_plane_vaddr(vb, 0),
 		     min((unsigned long)ctx->out_q.size,
-			 vb2_get_plane_payload(vb, 0)));
+			 vb2_get_plane_payload(vb, 0)), ctx);
 		if (!ctx->hdr_parsed) {
 			vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
 			return;
@@ -1162,30 +1655,9 @@
 		q_data = &ctx->cap_q;
 		q_data->w = tmp.w;
 		q_data->h = tmp.h;
-
-		jpeg_bound_align_image(&q_data->w, S5P_JPEG_MIN_WIDTH,
-				       S5P_JPEG_MAX_WIDTH, q_data->fmt->h_align,
-				       &q_data->h, S5P_JPEG_MIN_HEIGHT,
-				       S5P_JPEG_MAX_HEIGHT, q_data->fmt->v_align
-				      );
-		q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3;
 	}
-	if (ctx->m2m_ctx)
-		v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
-}
 
-static void s5p_jpeg_wait_prepare(struct vb2_queue *vq)
-{
-	struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
-
-	mutex_unlock(&ctx->jpeg->lock);
-}
-
-static void s5p_jpeg_wait_finish(struct vb2_queue *vq)
-{
-	struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
-
-	mutex_lock(&ctx->jpeg->lock);
+	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
 }
 
 static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
@@ -1211,8 +1683,8 @@
 	.queue_setup		= s5p_jpeg_queue_setup,
 	.buf_prepare		= s5p_jpeg_buf_prepare,
 	.buf_queue		= s5p_jpeg_buf_queue,
-	.wait_prepare		= s5p_jpeg_wait_prepare,
-	.wait_finish		= s5p_jpeg_wait_finish,
+	.wait_prepare		= vb2_ops_wait_prepare,
+	.wait_finish		= vb2_ops_wait_finish,
 	.start_streaming	= s5p_jpeg_start_streaming,
 	.stop_streaming		= s5p_jpeg_stop_streaming,
 };
@@ -1230,6 +1702,7 @@
 	src_vq->ops = &s5p_jpeg_qops;
 	src_vq->mem_ops = &vb2_dma_contig_memops;
 	src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	src_vq->lock = &ctx->jpeg->lock;
 
 	ret = vb2_queue_init(src_vq);
 	if (ret)
@@ -1242,6 +1715,7 @@
 	dst_vq->ops = &s5p_jpeg_qops;
 	dst_vq->mem_ops = &vb2_dma_contig_memops;
 	dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	dst_vq->lock = &ctx->jpeg->lock;
 
 	return vb2_queue_init(dst_vq);
 }
@@ -1267,26 +1741,27 @@
 
 	curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
 
-	src_buf = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
-	dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+	src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
 
 	if (curr_ctx->mode == S5P_JPEG_ENCODE)
-		enc_jpeg_too_large = jpeg_enc_stream_stat(jpeg->regs);
-	timer_elapsed = jpeg_timer_stat(jpeg->regs);
-	op_completed = jpeg_result_stat_ok(jpeg->regs);
+		enc_jpeg_too_large = s5p_jpeg_enc_stream_stat(jpeg->regs);
+	timer_elapsed = s5p_jpeg_timer_stat(jpeg->regs);
+	op_completed = s5p_jpeg_result_stat_ok(jpeg->regs);
 	if (curr_ctx->mode == S5P_JPEG_DECODE)
-		op_completed = op_completed && jpeg_stream_stat_ok(jpeg->regs);
+		op_completed = op_completed &&
+					s5p_jpeg_stream_stat_ok(jpeg->regs);
 
 	if (enc_jpeg_too_large) {
 		state = VB2_BUF_STATE_ERROR;
-		jpeg_clear_enc_stream_stat(jpeg->regs);
+		s5p_jpeg_clear_enc_stream_stat(jpeg->regs);
 	} else if (timer_elapsed) {
 		state = VB2_BUF_STATE_ERROR;
-		jpeg_clear_timer_stat(jpeg->regs);
+		s5p_jpeg_clear_timer_stat(jpeg->regs);
 	} else if (!op_completed) {
 		state = VB2_BUF_STATE_ERROR;
 	} else {
-		payload_size = jpeg_compressed_size(jpeg->regs);
+		payload_size = s5p_jpeg_compressed_size(jpeg->regs);
 	}
 
 	dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
@@ -1296,16 +1771,79 @@
 	if (curr_ctx->mode == S5P_JPEG_ENCODE)
 		vb2_set_plane_payload(dst_buf, 0, payload_size);
 	v4l2_m2m_buf_done(dst_buf, state);
-	v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->m2m_ctx);
+	v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
 
-	curr_ctx->subsampling = jpeg_get_subsampling_mode(jpeg->regs);
+	curr_ctx->subsampling = s5p_jpeg_get_subsampling_mode(jpeg->regs);
 	spin_unlock(&jpeg->slock);
 
-	jpeg_clear_int(jpeg->regs);
+	s5p_jpeg_clear_int(jpeg->regs);
 
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
+{
+	unsigned int int_status;
+	struct vb2_buffer *src_vb, *dst_vb;
+	struct s5p_jpeg *jpeg = priv;
+	struct s5p_jpeg_ctx *curr_ctx;
+	unsigned long payload_size = 0;
+
+	spin_lock(&jpeg->slock);
+
+	curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+
+	src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+	dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+	int_status = exynos4_jpeg_get_int_status(jpeg->regs);
+
+	if (int_status) {
+		switch (int_status & 0x1f) {
+		case 0x1:
+			jpeg->irq_ret = ERR_PROT;
+			break;
+		case 0x2:
+			jpeg->irq_ret = OK_ENC_OR_DEC;
+			break;
+		case 0x4:
+			jpeg->irq_ret = ERR_DEC_INVALID_FORMAT;
+			break;
+		case 0x8:
+			jpeg->irq_ret = ERR_MULTI_SCAN;
+			break;
+		case 0x10:
+			jpeg->irq_ret = ERR_FRAME;
+			break;
+		default:
+			jpeg->irq_ret = ERR_UNKNOWN;
+			break;
+		}
+	} else {
+		jpeg->irq_ret = ERR_UNKNOWN;
+	}
+
+	if (jpeg->irq_ret == OK_ENC_OR_DEC) {
+		if (curr_ctx->mode == S5P_JPEG_ENCODE) {
+			payload_size = exynos4_jpeg_get_stream_size(jpeg->regs);
+			vb2_set_plane_payload(dst_vb, 0, payload_size);
+		}
+		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+	} else {
+		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+	}
+
+	v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
+	curr_ctx->subsampling = exynos4_jpeg_get_frame_fmt(jpeg->regs);
+
+	spin_unlock(&jpeg->slock);
+	return IRQ_HANDLED;
+}
+
+static void *jpeg_get_drv_data(struct platform_device *pdev);
+
 /*
  * ============================================================================
  * Driver basic infrastructure
@@ -1316,13 +1854,19 @@
 {
 	struct s5p_jpeg *jpeg;
 	struct resource *res;
+	struct v4l2_m2m_ops *samsung_jpeg_m2m_ops;
 	int ret;
 
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
 	/* JPEG IP abstraction struct */
 	jpeg = devm_kzalloc(&pdev->dev, sizeof(struct s5p_jpeg), GFP_KERNEL);
 	if (!jpeg)
 		return -ENOMEM;
 
+	jpeg->variant = jpeg_get_drv_data(pdev);
+
 	mutex_init(&jpeg->lock);
 	spin_lock_init(&jpeg->slock);
 	jpeg->dev = &pdev->dev;
@@ -1341,8 +1885,8 @@
 		return ret;
 	}
 
-	ret = devm_request_irq(&pdev->dev, jpeg->irq, s5p_jpeg_irq, 0,
-			dev_name(&pdev->dev), jpeg);
+	ret = devm_request_irq(&pdev->dev, jpeg->irq, jpeg->variant->jpeg_irq,
+				0, dev_name(&pdev->dev), jpeg);
 	if (ret) {
 		dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpeg->irq);
 		return ret;
@@ -1356,7 +1900,6 @@
 		return ret;
 	}
 	dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk);
-	clk_prepare_enable(jpeg->clk);
 
 	/* v4l2 device */
 	ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
@@ -1365,8 +1908,13 @@
 		goto clk_get_rollback;
 	}
 
+	if (jpeg->variant->version == SJPEG_S5P)
+		samsung_jpeg_m2m_ops = &s5p_jpeg_m2m_ops;
+	else
+		samsung_jpeg_m2m_ops = &exynos_jpeg_m2m_ops;
+
 	/* mem2mem device */
-	jpeg->m2m_dev = v4l2_m2m_init(&s5p_jpeg_m2m_ops);
+	jpeg->m2m_dev = v4l2_m2m_init(samsung_jpeg_m2m_ops);
 	if (IS_ERR(jpeg->m2m_dev)) {
 		v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n");
 		ret = PTR_ERR(jpeg->m2m_dev);
@@ -1387,8 +1935,8 @@
 		ret = -ENOMEM;
 		goto vb2_allocator_rollback;
 	}
-	strlcpy(jpeg->vfd_encoder->name, S5P_JPEG_M2M_NAME,
-		sizeof(jpeg->vfd_encoder->name));
+	snprintf(jpeg->vfd_encoder->name, sizeof(jpeg->vfd_encoder->name),
+				"%s-enc", S5P_JPEG_M2M_NAME);
 	jpeg->vfd_encoder->fops		= &s5p_jpeg_fops;
 	jpeg->vfd_encoder->ioctl_ops	= &s5p_jpeg_ioctl_ops;
 	jpeg->vfd_encoder->minor	= -1;
@@ -1415,8 +1963,8 @@
 		ret = -ENOMEM;
 		goto enc_vdev_register_rollback;
 	}
-	strlcpy(jpeg->vfd_decoder->name, S5P_JPEG_M2M_NAME,
-		sizeof(jpeg->vfd_decoder->name));
+	snprintf(jpeg->vfd_decoder->name, sizeof(jpeg->vfd_decoder->name),
+				"%s-dec", S5P_JPEG_M2M_NAME);
 	jpeg->vfd_decoder->fops		= &s5p_jpeg_fops;
 	jpeg->vfd_decoder->ioctl_ops	= &s5p_jpeg_ioctl_ops;
 	jpeg->vfd_decoder->minor	= -1;
@@ -1464,7 +2012,6 @@
 	v4l2_device_unregister(&jpeg->v4l2_dev);
 
 clk_get_rollback:
-	clk_disable_unprepare(jpeg->clk);
 	clk_put(jpeg->clk);
 
 	return ret;
@@ -1484,7 +2031,9 @@
 	v4l2_m2m_release(jpeg->m2m_dev);
 	v4l2_device_unregister(&jpeg->v4l2_dev);
 
-	clk_disable_unprepare(jpeg->clk);
+	if (!pm_runtime_status_suspended(&pdev->dev))
+		clk_disable_unprepare(jpeg->clk);
+
 	clk_put(jpeg->clk);
 
 	return 0;
@@ -1492,41 +2041,119 @@
 
 static int s5p_jpeg_runtime_suspend(struct device *dev)
 {
+	struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(jpeg->clk);
+
 	return 0;
 }
 
 static int s5p_jpeg_runtime_resume(struct device *dev)
 {
 	struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
+	unsigned long flags;
+	int ret;
+
+	ret = clk_prepare_enable(jpeg->clk);
+	if (ret < 0)
+		return ret;
+
+	spin_lock_irqsave(&jpeg->slock, flags);
+
 	/*
 	 * JPEG IP allows storing two Huffman tables for each component
-	 * We fill table 0 for each component
+	 * We fill table 0 for each component and do this here only
+	 * for S5PC210 device as Exynos4x12 requires programming its
+	 * Huffman tables each time the encoding process is initialized.
 	 */
-	jpeg_set_hdctbl(jpeg->regs);
-	jpeg_set_hdctblg(jpeg->regs);
-	jpeg_set_hactbl(jpeg->regs);
-	jpeg_set_hactblg(jpeg->regs);
+	if (jpeg->variant->version == SJPEG_S5P) {
+		s5p_jpeg_set_hdctbl(jpeg->regs);
+		s5p_jpeg_set_hdctblg(jpeg->regs);
+		s5p_jpeg_set_hactbl(jpeg->regs);
+		s5p_jpeg_set_hactblg(jpeg->regs);
+	}
+
+	spin_unlock_irqrestore(&jpeg->slock, flags);
+
 	return 0;
 }
 
+static int s5p_jpeg_suspend(struct device *dev)
+{
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return s5p_jpeg_runtime_suspend(dev);
+}
+
+static int s5p_jpeg_resume(struct device *dev)
+{
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return s5p_jpeg_runtime_resume(dev);
+}
+
 static const struct dev_pm_ops s5p_jpeg_pm_ops = {
-	.runtime_suspend = s5p_jpeg_runtime_suspend,
-	.runtime_resume	 = s5p_jpeg_runtime_resume,
+	SET_SYSTEM_SLEEP_PM_OPS(s5p_jpeg_suspend, s5p_jpeg_resume)
+	SET_RUNTIME_PM_OPS(s5p_jpeg_runtime_suspend, s5p_jpeg_runtime_resume, NULL)
 };
 
+#ifdef CONFIG_OF
+static struct s5p_jpeg_variant s5p_jpeg_drvdata = {
+	.version	= SJPEG_S5P,
+	.jpeg_irq	= s5p_jpeg_irq,
+};
+
+static struct s5p_jpeg_variant exynos4_jpeg_drvdata = {
+	.version	= SJPEG_EXYNOS4,
+	.jpeg_irq	= exynos4_jpeg_irq,
+};
+
+static const struct of_device_id samsung_jpeg_match[] = {
+	{
+		.compatible = "samsung,s5pv210-jpeg",
+		.data = &s5p_jpeg_drvdata,
+	}, {
+		.compatible = "samsung,exynos4210-jpeg",
+		.data = &s5p_jpeg_drvdata,
+	}, {
+		.compatible = "samsung,exynos4212-jpeg",
+		.data = &exynos4_jpeg_drvdata,
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, samsung_jpeg_match);
+
+static void *jpeg_get_drv_data(struct platform_device *pdev)
+{
+	struct s5p_jpeg_variant *driver_data = NULL;
+	const struct of_device_id *match;
+
+	match = of_match_node(of_match_ptr(samsung_jpeg_match),
+					 pdev->dev.of_node);
+	if (match)
+		driver_data = (struct s5p_jpeg_variant *)match->data;
+
+	return driver_data;
+}
+#endif
+
 static struct platform_driver s5p_jpeg_driver = {
 	.probe = s5p_jpeg_probe,
 	.remove = s5p_jpeg_remove,
 	.driver = {
-		.owner = THIS_MODULE,
-		.name = S5P_JPEG_M2M_NAME,
-		.pm = &s5p_jpeg_pm_ops,
+		.of_match_table	= of_match_ptr(samsung_jpeg_match),
+		.owner		= THIS_MODULE,
+		.name		= S5P_JPEG_M2M_NAME,
+		.pm		= &s5p_jpeg_pm_ops,
 	},
 };
 
 module_platform_driver(s5p_jpeg_driver);
 
 MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>");
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
 MODULE_DESCRIPTION("Samsung JPEG codec driver");
 MODULE_LICENSE("GPL");
-
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 8a4013e..f482dbf 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -13,6 +13,7 @@
 #ifndef JPEG_CORE_H_
 #define JPEG_CORE_H_
 
+#include <linux/interrupt.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-fh.h>
 #include <media/v4l2-ctrls.h>
@@ -43,8 +44,45 @@
 #define DHP				0xde
 
 /* Flags that indicate a format can be used for capture/output */
-#define MEM2MEM_CAPTURE			(1 << 0)
-#define MEM2MEM_OUTPUT			(1 << 1)
+#define SJPEG_FMT_FLAG_ENC_CAPTURE	(1 << 0)
+#define SJPEG_FMT_FLAG_ENC_OUTPUT	(1 << 1)
+#define SJPEG_FMT_FLAG_DEC_CAPTURE	(1 << 2)
+#define SJPEG_FMT_FLAG_DEC_OUTPUT	(1 << 3)
+#define SJPEG_FMT_FLAG_S5P		(1 << 4)
+#define SJPEG_FMT_FLAG_EXYNOS4		(1 << 5)
+#define SJPEG_FMT_RGB			(1 << 6)
+#define SJPEG_FMT_NON_RGB		(1 << 7)
+
+#define S5P_JPEG_ENCODE		0
+#define S5P_JPEG_DECODE		1
+
+#define FMT_TYPE_OUTPUT		0
+#define FMT_TYPE_CAPTURE	1
+
+#define SJPEG_SUBSAMPLING_444	0x11
+#define SJPEG_SUBSAMPLING_422	0x21
+#define SJPEG_SUBSAMPLING_420	0x22
+
+/* Version numbers */
+
+#define SJPEG_S5P	1
+#define SJPEG_EXYNOS4	2
+
+enum exynos4_jpeg_result {
+	OK_ENC_OR_DEC,
+	ERR_PROT,
+	ERR_DEC_INVALID_FORMAT,
+	ERR_MULTI_SCAN,
+	ERR_FRAME,
+	ERR_UNKNOWN,
+};
+
+enum  exynos4_jpeg_img_quality_level {
+	QUALITY_LEVEL_1 = 0,	/* high */
+	QUALITY_LEVEL_2,
+	QUALITY_LEVEL_3,
+	QUALITY_LEVEL_4,	/* low */
+};
 
 /**
  * struct s5p_jpeg - JPEG IP abstraction
@@ -71,9 +109,16 @@
 
 	void __iomem		*regs;
 	unsigned int		irq;
+	enum exynos4_jpeg_result irq_ret;
 	struct clk		*clk;
 	struct device		*dev;
 	void			*alloc_ctx;
+	struct s5p_jpeg_variant *variant;
+};
+
+struct s5p_jpeg_variant {
+	unsigned int	version;
+	irqreturn_t	(*jpeg_irq)(int irq, void *priv);
 };
 
 /**
@@ -84,16 +129,18 @@
  * @colplanes:	number of color planes (1 for packed formats)
  * @h_align:	horizontal alignment order (align to 2^h_align)
  * @v_align:	vertical alignment order (align to 2^v_align)
- * @types:	types of queue this format is applicable to
+ * @flags:	flags describing format applicability
  */
 struct s5p_jpeg_fmt {
 	char	*name;
 	u32	fourcc;
 	int	depth;
 	int	colplanes;
+	int	memplanes;
 	int	h_align;
 	int	v_align;
-	u32	types;
+	int	subsampling;
+	u32	flags;
 };
 
 /**
@@ -115,7 +162,6 @@
  * @jpeg:		JPEG IP device for this context
  * @mode:		compression (encode) operation or decompression (decode)
  * @compr_quality:	destination image quality in compression (encode) mode
- * @m2m_ctx:		mem2mem device context
  * @out_q:		source (output) queue information
  * @cap_fmt:		destination (capture) queue queue information
  * @hdr_parsed:		set if header has been parsed during decompression
@@ -127,7 +173,6 @@
 	unsigned short		compr_quality;
 	unsigned short		restart_interval;
 	unsigned short		subsampling;
-	struct v4l2_m2m_ctx	*m2m_ctx;
 	struct s5p_jpeg_q_data	out_q;
 	struct s5p_jpeg_q_data	cap_q;
 	struct v4l2_fh		fh;
@@ -147,4 +192,16 @@
 	unsigned long data;
 };
 
+/**
+ * struct s5p_jpeg_addr - JPEG converter physical address set for DMA
+ * @y:   luminance plane physical address
+ * @cb:  Cb plane physical address
+ * @cr:  Cr plane physical address
+ */
+struct s5p_jpeg_addr {
+	u32     y;
+	u32     cb;
+	u32     cr;
+};
+
 #endif /* JPEG_CORE_H */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
new file mode 100644
index 0000000..da8d6a1
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
@@ -0,0 +1,279 @@
+/* Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * Register interface file for JPEG driver on Exynos4x12.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "jpeg-core.h"
+#include "jpeg-hw-exynos4.h"
+#include "jpeg-regs.h"
+
+void exynos4_jpeg_sw_reset(void __iomem *base)
+{
+	unsigned int reg;
+
+	reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
+	writel(reg & ~EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
+
+	ndelay(100000);
+
+	writel(reg | EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode)
+{
+	unsigned int reg;
+
+	reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
+	/* set exynos4_jpeg mod register */
+	if (mode == S5P_JPEG_DECODE) {
+		writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) |
+					EXYNOS4_DEC_MODE,
+			base + EXYNOS4_JPEG_CNTL_REG);
+	} else {/* encode */
+		writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) |
+					EXYNOS4_ENC_MODE,
+			base + EXYNOS4_JPEG_CNTL_REG);
+	}
+}
+
+void exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt)
+{
+	unsigned int reg;
+
+	reg = readl(base + EXYNOS4_IMG_FMT_REG) &
+			EXYNOS4_ENC_IN_FMT_MASK; /* clear except enc format */
+
+	switch (img_fmt) {
+	case V4L2_PIX_FMT_GREY:
+		reg = reg | EXYNOS4_ENC_GRAY_IMG | EXYNOS4_GRAY_IMG_IP;
+		break;
+	case V4L2_PIX_FMT_RGB32:
+		reg = reg | EXYNOS4_ENC_RGB_IMG |
+				EXYNOS4_RGB_IP_RGB_32BIT_IMG;
+		break;
+	case V4L2_PIX_FMT_RGB565:
+		reg = reg | EXYNOS4_ENC_RGB_IMG |
+				EXYNOS4_RGB_IP_RGB_16BIT_IMG;
+		break;
+	case V4L2_PIX_FMT_NV24:
+		reg = reg | EXYNOS4_ENC_YUV_444_IMG |
+				EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
+				EXYNOS4_SWAP_CHROMA_CBCR;
+		break;
+	case V4L2_PIX_FMT_NV42:
+		reg = reg | EXYNOS4_ENC_YUV_444_IMG |
+				EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
+				EXYNOS4_SWAP_CHROMA_CRCB;
+		break;
+	case V4L2_PIX_FMT_YUYV:
+		reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+				EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
+				EXYNOS4_SWAP_CHROMA_CBCR;
+		break;
+
+	case V4L2_PIX_FMT_YVYU:
+		reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+				EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
+				EXYNOS4_SWAP_CHROMA_CRCB;
+		break;
+	case V4L2_PIX_FMT_NV16:
+		reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+				EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
+				EXYNOS4_SWAP_CHROMA_CBCR;
+		break;
+	case V4L2_PIX_FMT_NV61:
+		reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+				EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
+				EXYNOS4_SWAP_CHROMA_CRCB;
+		break;
+	case V4L2_PIX_FMT_NV12:
+		reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+				EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
+				EXYNOS4_SWAP_CHROMA_CBCR;
+		break;
+	case V4L2_PIX_FMT_NV21:
+		reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+				EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
+				EXYNOS4_SWAP_CHROMA_CRCB;
+		break;
+	case V4L2_PIX_FMT_YUV420:
+		reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+				EXYNOS4_YUV_420_IP_YUV_420_3P_IMG |
+				EXYNOS4_SWAP_CHROMA_CBCR;
+		break;
+	default:
+		break;
+
+	}
+
+	writel(reg, base + EXYNOS4_IMG_FMT_REG);
+}
+
+void exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt)
+{
+	unsigned int reg;
+
+	reg = readl(base + EXYNOS4_IMG_FMT_REG) &
+			~EXYNOS4_ENC_FMT_MASK; /* clear enc format */
+
+	switch (out_fmt) {
+	case V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY:
+		reg = reg | EXYNOS4_ENC_FMT_GRAY;
+		break;
+
+	case V4L2_JPEG_CHROMA_SUBSAMPLING_444:
+		reg = reg | EXYNOS4_ENC_FMT_YUV_444;
+		break;
+
+	case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
+		reg = reg | EXYNOS4_ENC_FMT_YUV_422;
+		break;
+
+	case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
+		reg = reg | EXYNOS4_ENC_FMT_YUV_420;
+		break;
+
+	default:
+		break;
+	}
+
+	writel(reg, base + EXYNOS4_IMG_FMT_REG);
+}
+
+void exynos4_jpeg_set_interrupt(void __iomem *base)
+{
+	unsigned int reg;
+
+	reg = readl(base + EXYNOS4_INT_EN_REG) & ~EXYNOS4_INT_EN_MASK;
+	writel(EXYNOS4_INT_EN_ALL, base + EXYNOS4_INT_EN_REG);
+}
+
+unsigned int exynos4_jpeg_get_int_status(void __iomem *base)
+{
+	unsigned int	int_status;
+
+	int_status = readl(base + EXYNOS4_INT_STATUS_REG);
+
+	return int_status;
+}
+
+unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base)
+{
+	unsigned int fifo_status;
+
+	fifo_status = readl(base + EXYNOS4_FIFO_STATUS_REG);
+
+	return fifo_status;
+}
+
+void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value)
+{
+	unsigned int	reg;
+
+	reg = readl(base + EXYNOS4_JPEG_CNTL_REG) & ~EXYNOS4_HUF_TBL_EN;
+
+	if (value == 1)
+		writel(reg | EXYNOS4_HUF_TBL_EN,
+					base + EXYNOS4_JPEG_CNTL_REG);
+	else
+		writel(reg | ~EXYNOS4_HUF_TBL_EN,
+					base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_sys_int_enable(void __iomem *base, int value)
+{
+	unsigned int	reg;
+
+	reg = readl(base + EXYNOS4_JPEG_CNTL_REG) & ~(EXYNOS4_SYS_INT_EN);
+
+	if (value == 1)
+		writel(EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
+	else
+		writel(~EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_stream_buf_address(void __iomem *base,
+					 unsigned int address)
+{
+	writel(address, base + EXYNOS4_OUT_MEM_BASE_REG);
+}
+
+void exynos4_jpeg_set_stream_size(void __iomem *base,
+		unsigned int x_value, unsigned int y_value)
+{
+	writel(0x0, base + EXYNOS4_JPEG_IMG_SIZE_REG); /* clear */
+	writel(EXYNOS4_X_SIZE(x_value) | EXYNOS4_Y_SIZE(y_value),
+			base + EXYNOS4_JPEG_IMG_SIZE_REG);
+}
+
+void exynos4_jpeg_set_frame_buf_address(void __iomem *base,
+				struct s5p_jpeg_addr *exynos4_jpeg_addr)
+{
+	writel(exynos4_jpeg_addr->y, base + EXYNOS4_IMG_BA_PLANE_1_REG);
+	writel(exynos4_jpeg_addr->cb, base + EXYNOS4_IMG_BA_PLANE_2_REG);
+	writel(exynos4_jpeg_addr->cr, base + EXYNOS4_IMG_BA_PLANE_3_REG);
+}
+
+void exynos4_jpeg_set_encode_tbl_select(void __iomem *base,
+		enum exynos4_jpeg_img_quality_level level)
+{
+	unsigned int	reg;
+
+	reg = EXYNOS4_Q_TBL_COMP1_0 | EXYNOS4_Q_TBL_COMP2_1 |
+		EXYNOS4_Q_TBL_COMP3_1 |
+		EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_1 |
+		EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_0 |
+		EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_1;
+
+	writel(reg, base + EXYNOS4_TBL_SEL_REG);
+}
+
+void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt)
+{
+	if (fmt == V4L2_PIX_FMT_GREY)
+		writel(0xd2, base + EXYNOS4_HUFF_CNT_REG);
+	else
+		writel(0x1a2, base + EXYNOS4_HUFF_CNT_REG);
+}
+
+unsigned int exynos4_jpeg_get_stream_size(void __iomem *base)
+{
+	unsigned int size;
+
+	size = readl(base + EXYNOS4_BITSTREAM_SIZE_REG);
+	return size;
+}
+
+void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size)
+{
+	writel(size, base + EXYNOS4_BITSTREAM_SIZE_REG);
+}
+
+void exynos4_jpeg_get_frame_size(void __iomem *base,
+			unsigned int *width, unsigned int *height)
+{
+	*width = (readl(base + EXYNOS4_DECODE_XY_SIZE_REG) &
+				EXYNOS4_DECODED_SIZE_MASK);
+	*height = (readl(base + EXYNOS4_DECODE_XY_SIZE_REG) >> 16) &
+				EXYNOS4_DECODED_SIZE_MASK;
+}
+
+unsigned int exynos4_jpeg_get_frame_fmt(void __iomem *base)
+{
+	return readl(base + EXYNOS4_DECODE_IMG_FMT_REG) &
+				EXYNOS4_JPEG_DECODED_IMG_FMT_MASK;
+}
+
+void exynos4_jpeg_set_timer_count(void __iomem *base, unsigned int size)
+{
+	writel(size, base + EXYNOS4_INT_TIMER_COUNT_REG);
+}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
new file mode 100644
index 0000000..c228d28
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * Header file of the register interface for JPEG driver on Exynos4x12.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef JPEG_HW_EXYNOS4_H_
+#define JPEG_HW_EXYNOS4_H_
+
+void exynos4_jpeg_sw_reset(void __iomem *base);
+void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode);
+void exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt);
+void exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt);
+void exynos4_jpeg_set_enc_tbl(void __iomem *base);
+void exynos4_jpeg_set_interrupt(void __iomem *base);
+unsigned int exynos4_jpeg_get_int_status(void __iomem *base);
+void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value);
+void exynos4_jpeg_set_sys_int_enable(void __iomem *base, int value);
+void exynos4_jpeg_set_stream_buf_address(void __iomem *base,
+					 unsigned int address);
+void exynos4_jpeg_set_stream_size(void __iomem *base,
+		unsigned int x_value, unsigned int y_value);
+void exynos4_jpeg_set_frame_buf_address(void __iomem *base,
+				struct s5p_jpeg_addr *jpeg_addr);
+void exynos4_jpeg_set_encode_tbl_select(void __iomem *base,
+		enum exynos4_jpeg_img_quality_level level);
+void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt);
+void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size);
+unsigned int exynos4_jpeg_get_stream_size(void __iomem *base);
+void exynos4_jpeg_get_frame_size(void __iomem *base,
+			unsigned int *width, unsigned int *height);
+unsigned int exynos4_jpeg_get_frame_fmt(void __iomem *base);
+unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base);
+void exynos4_jpeg_set_timer_count(void __iomem *base, unsigned int size);
+
+#endif /* JPEG_HW_EXYNOS4_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
similarity index 70%
rename from drivers/media/platform/s5p-jpeg/jpeg-hw.h
rename to drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
index b47e887..52407d7 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
@@ -9,27 +9,15 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#ifndef JPEG_HW_H_
-#define JPEG_HW_H_
 
 #include <linux/io.h>
 #include <linux/videodev2.h>
 
-#include "jpeg-hw.h"
+#include "jpeg-core.h"
 #include "jpeg-regs.h"
+#include "jpeg-hw-s5p.h"
 
-#define S5P_JPEG_MIN_WIDTH		32
-#define S5P_JPEG_MIN_HEIGHT		32
-#define S5P_JPEG_MAX_WIDTH		8192
-#define S5P_JPEG_MAX_HEIGHT		8192
-#define S5P_JPEG_ENCODE			0
-#define S5P_JPEG_DECODE			1
-#define S5P_JPEG_RAW_IN_565		0
-#define S5P_JPEG_RAW_IN_422		1
-#define S5P_JPEG_RAW_OUT_422		0
-#define S5P_JPEG_RAW_OUT_420		1
-
-static inline void jpeg_reset(void __iomem *regs)
+void s5p_jpeg_reset(void __iomem *regs)
 {
 	unsigned long reg;
 
@@ -42,12 +30,12 @@
 	}
 }
 
-static inline void jpeg_poweron(void __iomem *regs)
+void s5p_jpeg_poweron(void __iomem *regs)
 {
 	writel(S5P_POWER_ON, regs + S5P_JPGCLKCON);
 }
 
-static inline void jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
+void s5p_jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
 {
 	unsigned long reg, m;
 
@@ -63,7 +51,7 @@
 	writel(reg, regs + S5P_JPGCMOD);
 }
 
-static inline void jpeg_input_raw_y16(void __iomem *regs, bool y16)
+void s5p_jpeg_input_raw_y16(void __iomem *regs, bool y16)
 {
 	unsigned long reg;
 
@@ -75,7 +63,7 @@
 	writel(reg, regs + S5P_JPGCMOD);
 }
 
-static inline void jpeg_proc_mode(void __iomem *regs, unsigned long mode)
+void s5p_jpeg_proc_mode(void __iomem *regs, unsigned long mode)
 {
 	unsigned long reg, m;
 
@@ -90,7 +78,7 @@
 	writel(reg, regs + S5P_JPGMOD);
 }
 
-static inline void jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
+void s5p_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
 {
 	unsigned long reg, m;
 
@@ -105,12 +93,12 @@
 	writel(reg, regs + S5P_JPGMOD);
 }
 
-static inline unsigned int jpeg_get_subsampling_mode(void __iomem *regs)
+unsigned int s5p_jpeg_get_subsampling_mode(void __iomem *regs)
 {
 	return readl(regs + S5P_JPGMOD) & S5P_SUBSAMPLING_MODE_MASK;
 }
 
-static inline void jpeg_dri(void __iomem *regs, unsigned int dri)
+void s5p_jpeg_dri(void __iomem *regs, unsigned int dri)
 {
 	unsigned long reg;
 
@@ -125,7 +113,7 @@
 	writel(reg, regs + S5P_JPGDRI_L);
 }
 
-static inline void jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
+void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
 {
 	unsigned long reg;
 
@@ -135,7 +123,7 @@
 	writel(reg, regs + S5P_JPG_QTBL);
 }
 
-static inline void jpeg_htbl_ac(void __iomem *regs, unsigned int t)
+void s5p_jpeg_htbl_ac(void __iomem *regs, unsigned int t)
 {
 	unsigned long reg;
 
@@ -146,7 +134,7 @@
 	writel(reg, regs + S5P_JPG_HTBL);
 }
 
-static inline void jpeg_htbl_dc(void __iomem *regs, unsigned int t)
+void s5p_jpeg_htbl_dc(void __iomem *regs, unsigned int t)
 {
 	unsigned long reg;
 
@@ -157,7 +145,7 @@
 	writel(reg, regs + S5P_JPG_HTBL);
 }
 
-static inline void jpeg_y(void __iomem *regs, unsigned int y)
+void s5p_jpeg_y(void __iomem *regs, unsigned int y)
 {
 	unsigned long reg;
 
@@ -172,7 +160,7 @@
 	writel(reg, regs + S5P_JPGY_L);
 }
 
-static inline void jpeg_x(void __iomem *regs, unsigned int x)
+void s5p_jpeg_x(void __iomem *regs, unsigned int x)
 {
 	unsigned long reg;
 
@@ -187,7 +175,7 @@
 	writel(reg, regs + S5P_JPGX_L);
 }
 
-static inline void jpeg_rst_int_enable(void __iomem *regs, bool enable)
+void s5p_jpeg_rst_int_enable(void __iomem *regs, bool enable)
 {
 	unsigned long reg;
 
@@ -198,7 +186,7 @@
 	writel(reg, regs + S5P_JPGINTSE);
 }
 
-static inline void jpeg_data_num_int_enable(void __iomem *regs, bool enable)
+void s5p_jpeg_data_num_int_enable(void __iomem *regs, bool enable)
 {
 	unsigned long reg;
 
@@ -209,7 +197,7 @@
 	writel(reg, regs + S5P_JPGINTSE);
 }
 
-static inline void jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
+void s5p_jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
 {
 	unsigned long reg;
 
@@ -220,7 +208,7 @@
 	writel(reg, regs + S5P_JPGINTSE);
 }
 
-static inline void jpeg_timer_enable(void __iomem *regs, unsigned long val)
+void s5p_jpeg_timer_enable(void __iomem *regs, unsigned long val)
 {
 	unsigned long reg;
 
@@ -231,7 +219,7 @@
 	writel(reg, regs + S5P_JPG_TIMER_SE);
 }
 
-static inline void jpeg_timer_disable(void __iomem *regs)
+void s5p_jpeg_timer_disable(void __iomem *regs)
 {
 	unsigned long reg;
 
@@ -240,13 +228,13 @@
 	writel(reg, regs + S5P_JPG_TIMER_SE);
 }
 
-static inline int jpeg_timer_stat(void __iomem *regs)
+int s5p_jpeg_timer_stat(void __iomem *regs)
 {
 	return (int)((readl(regs + S5P_JPG_TIMER_ST) & S5P_TIMER_INT_STAT_MASK)
 		     >> S5P_TIMER_INT_STAT_SHIFT);
 }
 
-static inline void jpeg_clear_timer_stat(void __iomem *regs)
+void s5p_jpeg_clear_timer_stat(void __iomem *regs)
 {
 	unsigned long reg;
 
@@ -255,7 +243,7 @@
 	writel(reg, regs + S5P_JPG_TIMER_SE);
 }
 
-static inline void jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
+void s5p_jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
 {
 	unsigned long reg;
 
@@ -266,13 +254,13 @@
 	writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
 }
 
-static inline int jpeg_enc_stream_stat(void __iomem *regs)
+int s5p_jpeg_enc_stream_stat(void __iomem *regs)
 {
 	return (int)(readl(regs + S5P_JPG_ENC_STREAM_INTST) &
 		     S5P_ENC_STREAM_INT_STAT_MASK);
 }
 
-static inline void jpeg_clear_enc_stream_stat(void __iomem *regs)
+void s5p_jpeg_clear_enc_stream_stat(void __iomem *regs)
 {
 	unsigned long reg;
 
@@ -281,7 +269,7 @@
 	writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
 }
 
-static inline void jpeg_outform_raw(void __iomem *regs, unsigned long format)
+void s5p_jpeg_outform_raw(void __iomem *regs, unsigned long format)
 {
 	unsigned long reg, f;
 
@@ -296,17 +284,17 @@
 	writel(reg, regs + S5P_JPG_OUTFORM);
 }
 
-static inline void jpeg_jpgadr(void __iomem *regs, unsigned long addr)
+void s5p_jpeg_jpgadr(void __iomem *regs, unsigned long addr)
 {
 	writel(addr, regs + S5P_JPG_JPGADR);
 }
 
-static inline void jpeg_imgadr(void __iomem *regs, unsigned long addr)
+void s5p_jpeg_imgadr(void __iomem *regs, unsigned long addr)
 {
 	writel(addr, regs + S5P_JPG_IMGADR);
 }
 
-static inline void jpeg_coef(void __iomem *regs, unsigned int i,
+void s5p_jpeg_coef(void __iomem *regs, unsigned int i,
 			     unsigned int j, unsigned int coef)
 {
 	unsigned long reg;
@@ -317,24 +305,24 @@
 	writel(reg, regs + S5P_JPG_COEF(i));
 }
 
-static inline void jpeg_start(void __iomem *regs)
+void s5p_jpeg_start(void __iomem *regs)
 {
 	writel(1, regs + S5P_JSTART);
 }
 
-static inline int jpeg_result_stat_ok(void __iomem *regs)
+int s5p_jpeg_result_stat_ok(void __iomem *regs)
 {
 	return (int)((readl(regs + S5P_JPGINTST) & S5P_RESULT_STAT_MASK)
 		     >> S5P_RESULT_STAT_SHIFT);
 }
 
-static inline int jpeg_stream_stat_ok(void __iomem *regs)
+int s5p_jpeg_stream_stat_ok(void __iomem *regs)
 {
 	return !(int)((readl(regs + S5P_JPGINTST) & S5P_STREAM_STAT_MASK)
 		      >> S5P_STREAM_STAT_SHIFT);
 }
 
-static inline void jpeg_clear_int(void __iomem *regs)
+void s5p_jpeg_clear_int(void __iomem *regs)
 {
 	unsigned long reg;
 
@@ -343,7 +331,7 @@
 	reg = readl(regs + S5P_JPGOPR);
 }
 
-static inline unsigned int jpeg_compressed_size(void __iomem *regs)
+unsigned int s5p_jpeg_compressed_size(void __iomem *regs)
 {
 	unsigned long jpeg_size = 0;
 
@@ -353,5 +341,3 @@
 
 	return (unsigned int)jpeg_size;
 }
-
-#endif /* JPEG_HW_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
new file mode 100644
index 0000000..c11ebe8
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
@@ -0,0 +1,63 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-hw.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef JPEG_HW_S5P_H_
+#define JPEG_HW_S5P_H_
+
+#include <linux/io.h>
+#include <linux/videodev2.h>
+
+#include "jpeg-regs.h"
+
+#define S5P_JPEG_MIN_WIDTH		32
+#define S5P_JPEG_MIN_HEIGHT		32
+#define S5P_JPEG_MAX_WIDTH		8192
+#define S5P_JPEG_MAX_HEIGHT		8192
+#define S5P_JPEG_RAW_IN_565		0
+#define S5P_JPEG_RAW_IN_422		1
+#define S5P_JPEG_RAW_OUT_422		0
+#define S5P_JPEG_RAW_OUT_420		1
+
+void s5p_jpeg_reset(void __iomem *regs);
+void s5p_jpeg_poweron(void __iomem *regs);
+void s5p_jpeg_input_raw_mode(void __iomem *regs, unsigned long mode);
+void s5p_jpeg_input_raw_y16(void __iomem *regs, bool y16);
+void s5p_jpeg_proc_mode(void __iomem *regs, unsigned long mode);
+void s5p_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode);
+unsigned int s5p_jpeg_get_subsampling_mode(void __iomem *regs);
+void s5p_jpeg_dri(void __iomem *regs, unsigned int dri);
+void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n);
+void s5p_jpeg_htbl_ac(void __iomem *regs, unsigned int t);
+void s5p_jpeg_htbl_dc(void __iomem *regs, unsigned int t);
+void s5p_jpeg_y(void __iomem *regs, unsigned int y);
+void s5p_jpeg_x(void __iomem *regs, unsigned int x);
+void s5p_jpeg_rst_int_enable(void __iomem *regs, bool enable);
+void s5p_jpeg_data_num_int_enable(void __iomem *regs, bool enable);
+void s5p_jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl);
+void s5p_jpeg_timer_enable(void __iomem *regs, unsigned long val);
+void s5p_jpeg_timer_disable(void __iomem *regs);
+int s5p_jpeg_timer_stat(void __iomem *regs);
+void s5p_jpeg_clear_timer_stat(void __iomem *regs);
+void s5p_jpeg_enc_stream_int(void __iomem *regs, unsigned long size);
+int s5p_jpeg_enc_stream_stat(void __iomem *regs);
+void s5p_jpeg_clear_enc_stream_stat(void __iomem *regs);
+void s5p_jpeg_outform_raw(void __iomem *regs, unsigned long format);
+void s5p_jpeg_jpgadr(void __iomem *regs, unsigned long addr);
+void s5p_jpeg_imgadr(void __iomem *regs, unsigned long addr);
+void s5p_jpeg_coef(void __iomem *regs, unsigned int i,
+			     unsigned int j, unsigned int coef);
+void s5p_jpeg_start(void __iomem *regs);
+int s5p_jpeg_result_stat_ok(void __iomem *regs);
+int s5p_jpeg_stream_stat_ok(void __iomem *regs);
+void s5p_jpeg_clear_int(void __iomem *regs);
+unsigned int s5p_jpeg_compressed_size(void __iomem *regs);
+
+#endif /* JPEG_HW_S5P_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
index 38e5081..33f2c73 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
@@ -2,10 +2,11 @@
  *
  * Register definition file for Samsung JPEG codec driver
  *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2011-2013 Samsung Electronics Co., Ltd.
  *		http://www.samsung.com
  *
  * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -15,6 +16,8 @@
 #ifndef JPEG_REGS_H_
 #define JPEG_REGS_H_
 
+/* Register and bit definitions for S5PC210 */
+
 /* JPEG mode register */
 #define S5P_JPGMOD			0x00
 #define S5P_PROC_MODE_MASK		(0x1 << 3)
@@ -166,5 +169,209 @@
 /* JPEG AC Huffman table register */
 #define S5P_JPG_HACTBLG(n)		(0x8c0 + (n) * 0x400)
 
+
+/* Register and bit definitions for Exynos 4x12 */
+
+/* JPEG Codec Control Registers */
+#define EXYNOS4_JPEG_CNTL_REG		0x00
+#define EXYNOS4_INT_EN_REG		0x04
+#define EXYNOS4_INT_TIMER_COUNT_REG	0x08
+#define EXYNOS4_INT_STATUS_REG		0x0c
+#define EXYNOS4_OUT_MEM_BASE_REG		0x10
+#define EXYNOS4_JPEG_IMG_SIZE_REG	0x14
+#define EXYNOS4_IMG_BA_PLANE_1_REG	0x18
+#define EXYNOS4_IMG_SO_PLANE_1_REG	0x1c
+#define EXYNOS4_IMG_PO_PLANE_1_REG	0x20
+#define EXYNOS4_IMG_BA_PLANE_2_REG	0x24
+#define EXYNOS4_IMG_SO_PLANE_2_REG	0x28
+#define EXYNOS4_IMG_PO_PLANE_2_REG	0x2c
+#define EXYNOS4_IMG_BA_PLANE_3_REG	0x30
+#define EXYNOS4_IMG_SO_PLANE_3_REG	0x34
+#define EXYNOS4_IMG_PO_PLANE_3_REG	0x38
+
+#define EXYNOS4_TBL_SEL_REG		0x3c
+
+#define EXYNOS4_IMG_FMT_REG		0x40
+
+#define EXYNOS4_BITSTREAM_SIZE_REG	0x44
+#define EXYNOS4_PADDING_REG		0x48
+#define EXYNOS4_HUFF_CNT_REG		0x4c
+#define EXYNOS4_FIFO_STATUS_REG	0x50
+#define EXYNOS4_DECODE_XY_SIZE_REG	0x54
+#define EXYNOS4_DECODE_IMG_FMT_REG	0x58
+
+#define EXYNOS4_QUAN_TBL_ENTRY_REG	0x100
+#define EXYNOS4_HUFF_TBL_ENTRY_REG	0x200
+
+
+/****************************************************************/
+/* Bit definition part						*/
+/****************************************************************/
+
+/* JPEG CNTL Register bit */
+#define EXYNOS4_ENC_DEC_MODE_MASK	(0xfffffffc << 0)
+#define EXYNOS4_DEC_MODE			(1 << 0)
+#define EXYNOS4_ENC_MODE			(1 << 1)
+#define EXYNOS4_AUTO_RST_MARKER		(1 << 2)
+#define EXYNOS4_RST_INTERVAL_SHIFT	3
+#define EXYNOS4_RST_INTERVAL(x)		(((x) & 0xffff) \
+						<< EXYNOS4_RST_INTERVAL_SHIFT)
+#define EXYNOS4_HUF_TBL_EN		(1 << 19)
+#define EXYNOS4_HOR_SCALING_SHIFT	20
+#define EXYNOS4_HOR_SCALING_MASK		(3 << EXYNOS4_HOR_SCALING_SHIFT)
+#define EXYNOS4_HOR_SCALING(x)		(((x) & 0x3) \
+						<< EXYNOS4_HOR_SCALING_SHIFT)
+#define EXYNOS4_VER_SCALING_SHIFT	22
+#define EXYNOS4_VER_SCALING_MASK		(3 << EXYNOS4_VER_SCALING_SHIFT)
+#define EXYNOS4_VER_SCALING(x)		(((x) & 0x3) \
+						<< EXYNOS4_VER_SCALING_SHIFT)
+#define EXYNOS4_PADDING			(1 << 27)
+#define EXYNOS4_SYS_INT_EN		(1 << 28)
+#define EXYNOS4_SOFT_RESET_HI		(1 << 29)
+
+/* JPEG INT Register bit */
+#define EXYNOS4_INT_EN_MASK		(0x1f << 0)
+#define EXYNOS4_PROT_ERR_INT_EN		(1 << 0)
+#define EXYNOS4_IMG_COMPLETION_INT_EN	(1 << 1)
+#define EXYNOS4_DEC_INVALID_FORMAT_EN	(1 << 2)
+#define EXYNOS4_MULTI_SCAN_ERROR_EN	(1 << 3)
+#define EXYNOS4_FRAME_ERR_EN		(1 << 4)
+#define EXYNOS4_INT_EN_ALL		(0x1f << 0)
+
+#define EXYNOS4_MOD_REG_PROC_ENC		(0 << 3)
+#define EXYNOS4_MOD_REG_PROC_DEC		(1 << 3)
+
+#define EXYNOS4_MOD_REG_SUBSAMPLE_444	(0 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_422	(1 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_420	(2 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_GRAY	(3 << 0)
+
+
+/* JPEG IMAGE SIZE Register bit */
+#define EXYNOS4_X_SIZE_SHIFT		0
+#define EXYNOS4_X_SIZE_MASK		(0xffff << EXYNOS4_X_SIZE_SHIFT)
+#define EXYNOS4_X_SIZE(x)		(((x) & 0xffff) << EXYNOS4_X_SIZE_SHIFT)
+#define EXYNOS4_Y_SIZE_SHIFT		16
+#define EXYNOS4_Y_SIZE_MASK		(0xffff << EXYNOS4_Y_SIZE_SHIFT)
+#define EXYNOS4_Y_SIZE(x)		(((x) & 0xffff) << EXYNOS4_Y_SIZE_SHIFT)
+
+/* JPEG IMAGE FORMAT Register bit */
+#define EXYNOS4_ENC_IN_FMT_MASK		0xffff0000
+#define EXYNOS4_ENC_GRAY_IMG		(0 << 0)
+#define EXYNOS4_ENC_RGB_IMG		(1 << 0)
+#define EXYNOS4_ENC_YUV_444_IMG		(2 << 0)
+#define EXYNOS4_ENC_YUV_422_IMG		(3 << 0)
+#define EXYNOS4_ENC_YUV_440_IMG		(4 << 0)
+
+#define EXYNOS4_DEC_GRAY_IMG		(0 << 0)
+#define EXYNOS4_DEC_RGB_IMG		(1 << 0)
+#define EXYNOS4_DEC_YUV_444_IMG		(2 << 0)
+#define EXYNOS4_DEC_YUV_422_IMG		(3 << 0)
+#define EXYNOS4_DEC_YUV_420_IMG		(4 << 0)
+
+#define EXYNOS4_GRAY_IMG_IP_SHIFT	3
+#define EXYNOS4_GRAY_IMG_IP_MASK		(7 << EXYNOS4_GRAY_IMG_IP_SHIFT)
+#define EXYNOS4_GRAY_IMG_IP		(4 << EXYNOS4_GRAY_IMG_IP_SHIFT)
+
+#define EXYNOS4_RGB_IP_SHIFT		6
+#define EXYNOS4_RGB_IP_MASK		(7 << EXYNOS4_RGB_IP_SHIFT)
+#define EXYNOS4_RGB_IP_RGB_16BIT_IMG	(4 << EXYNOS4_RGB_IP_SHIFT)
+#define EXYNOS4_RGB_IP_RGB_32BIT_IMG	(5 << EXYNOS4_RGB_IP_SHIFT)
+
+#define EXYNOS4_YUV_444_IP_SHIFT			9
+#define EXYNOS4_YUV_444_IP_MASK			(7 << EXYNOS4_YUV_444_IP_SHIFT)
+#define EXYNOS4_YUV_444_IP_YUV_444_2P_IMG	(4 << EXYNOS4_YUV_444_IP_SHIFT)
+#define EXYNOS4_YUV_444_IP_YUV_444_3P_IMG	(5 << EXYNOS4_YUV_444_IP_SHIFT)
+
+#define EXYNOS4_YUV_422_IP_SHIFT			12
+#define EXYNOS4_YUV_422_IP_MASK			(7 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_1P_IMG	(4 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_2P_IMG	(5 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_3P_IMG	(6 << EXYNOS4_YUV_422_IP_SHIFT)
+
+#define EXYNOS4_YUV_420_IP_SHIFT			15
+#define EXYNOS4_YUV_420_IP_MASK			(7 << EXYNOS4_YUV_420_IP_SHIFT)
+#define EXYNOS4_YUV_420_IP_YUV_420_2P_IMG	(4 << EXYNOS4_YUV_420_IP_SHIFT)
+#define EXYNOS4_YUV_420_IP_YUV_420_3P_IMG	(5 << EXYNOS4_YUV_420_IP_SHIFT)
+
+#define EXYNOS4_ENC_FMT_SHIFT			24
+#define EXYNOS4_ENC_FMT_MASK			(3 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_GRAY			(0 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_444			(1 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_422			(2 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_420			(3 << EXYNOS4_ENC_FMT_SHIFT)
+
+#define EXYNOS4_JPEG_DECODED_IMG_FMT_MASK	0x03
+
+#define EXYNOS4_SWAP_CHROMA_CRCB			(1 << 26)
+#define EXYNOS4_SWAP_CHROMA_CBCR			(0 << 26)
+
+/* JPEG HUFF count Register bit */
+#define EXYNOS4_HUFF_COUNT_MASK			0xffff
+
+/* JPEG Decoded_img_x_y_size Register bit */
+#define EXYNOS4_DECODED_SIZE_MASK		0x0000ffff
+
+/* JPEG Decoded image format Register bit */
+#define EXYNOS4_DECODED_IMG_FMT_MASK		0x3
+
+/* JPEG TBL SEL Register bit */
+#define EXYNOS4_Q_TBL_COMP1_0		(0 << 0)
+#define EXYNOS4_Q_TBL_COMP1_1		(1 << 0)
+#define EXYNOS4_Q_TBL_COMP1_2		(2 << 0)
+#define EXYNOS4_Q_TBL_COMP1_3		(3 << 0)
+
+#define EXYNOS4_Q_TBL_COMP2_0		(0 << 2)
+#define EXYNOS4_Q_TBL_COMP2_1		(1 << 2)
+#define EXYNOS4_Q_TBL_COMP2_2		(2 << 2)
+#define EXYNOS4_Q_TBL_COMP2_3		(3 << 2)
+
+#define EXYNOS4_Q_TBL_COMP3_0		(0 << 4)
+#define EXYNOS4_Q_TBL_COMP3_1		(1 << 4)
+#define EXYNOS4_Q_TBL_COMP3_2		(2 << 4)
+#define EXYNOS4_Q_TBL_COMP3_3		(3 << 4)
+
+#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_0	(0 << 6)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_1	(1 << 6)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_0	(2 << 6)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_1	(3 << 6)
+
+#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_0	(0 << 8)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_1	(1 << 8)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_0	(2 << 8)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_1	(3 << 8)
+
+#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_0	(0 << 10)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_1	(1 << 10)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_0	(2 << 10)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_1	(3 << 10)
+
+/* JPEG quantizer table register */
+#define EXYNOS4_QTBL_CONTENT(n)	(0x100 + (n) * 0x40)
+
+/* JPEG DC luminance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCLL	0x200
+
+/* JPEG DC luminance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCLV	0x210
+
+/* JPEG DC chrominance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCCL	0x220
+
+/* JPEG DC chrominance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCCV	0x230
+
+/* JPEG AC luminance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACLL	0x240
+
+/* JPEG AC luminance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACLV	0x250
+
+/* JPEG AC chrominance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACCL	0x300
+
+/* JPEG AC chrominance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACCV	0x310
+
 #endif /* JPEG_REGS_H_ */
 
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index e46067a..e2aac59 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -177,21 +177,6 @@
 		mutex_unlock(&dev->mfc_mutex);
 }
 
-static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
-{
-	struct video_device *vdev = video_devdata(file);
-
-	if (!vdev) {
-		mfc_err("failed to get video_device");
-		return MFCNODE_INVALID;
-	}
-	if (vdev->index == 0)
-		return MFCNODE_DECODER;
-	else if (vdev->index == 1)
-		return MFCNODE_ENCODER;
-	return MFCNODE_INVALID;
-}
-
 static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
 {
 	mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
@@ -705,6 +690,7 @@
 /* Open an MFC node */
 static int s5p_mfc_open(struct file *file)
 {
+	struct video_device *vdev = video_devdata(file);
 	struct s5p_mfc_dev *dev = video_drvdata(file);
 	struct s5p_mfc_ctx *ctx = NULL;
 	struct vb2_queue *q;
@@ -742,7 +728,7 @@
 	/* Mark context as idle */
 	clear_work_bit_irqsave(ctx);
 	dev->ctx[ctx->num] = ctx;
-	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+	if (vdev == dev->vfd_dec) {
 		ctx->type = MFCINST_DECODER;
 		ctx->c_ops = get_dec_codec_ops();
 		s5p_mfc_dec_init(ctx);
@@ -752,7 +738,7 @@
 			mfc_err("Failed to setup mfc controls\n");
 			goto err_ctrls_setup;
 		}
-	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+	} else if (vdev == dev->vfd_enc) {
 		ctx->type = MFCINST_ENCODER;
 		ctx->c_ops = get_enc_codec_ops();
 		/* only for encoder */
@@ -797,10 +783,10 @@
 	q = &ctx->vq_dst;
 	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
 	q->drv_priv = &ctx->fh;
-	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+	if (vdev == dev->vfd_dec) {
 		q->io_modes = VB2_MMAP;
 		q->ops = get_dec_queue_ops();
-	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+	} else if (vdev == dev->vfd_enc) {
 		q->io_modes = VB2_MMAP | VB2_USERPTR;
 		q->ops = get_enc_queue_ops();
 	} else {
@@ -819,10 +805,10 @@
 	q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
 	q->io_modes = VB2_MMAP;
 	q->drv_priv = &ctx->fh;
-	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+	if (vdev == dev->vfd_dec) {
 		q->io_modes = VB2_MMAP;
 		q->ops = get_dec_queue_ops();
-	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+	} else if (vdev == dev->vfd_enc) {
 		q->io_modes = VB2_MMAP | VB2_USERPTR;
 		q->ops = get_enc_queue_ops();
 	} else {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 6920b54..f723f1f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -115,15 +115,6 @@
 };
 
 /**
- * enum s5p_mfc_node_type - The type of an MFC device node.
- */
-enum s5p_mfc_node_type {
-	MFCNODE_INVALID = -1,
-	MFCNODE_DECODER = 0,
-	MFCNODE_ENCODER = 1,
-};
-
-/**
  * enum s5p_mfc_inst_type - The type of an MFC instance.
  */
 enum s5p_mfc_inst_type {
@@ -422,6 +413,11 @@
 	enum v4l2_vp8_golden_frame_sel golden_frame_sel;
 	u8 hier_layer;
 	u8 hier_layer_qp[3];
+	u8 rc_min_qp;
+	u8 rc_max_qp;
+	u8 rc_frame_qp;
+	u8 rc_p_frame_qp;
+	u8 profile;
 };
 
 /**
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 4ff3b6c..91b6e02 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -618,6 +618,46 @@
 		.default_value = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV,
 		.menu_skip_mask = 0,
 	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_VPX_MAX_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 127,
+		.step = 1,
+		.default_value = 127,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_VPX_MIN_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 11,
+		.step = 1,
+		.default_value = 0,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 127,
+		.step = 1,
+		.default_value = 10,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 127,
+		.step = 1,
+		.default_value = 10,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDEO_VPX_PROFILE,
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 3,
+		.step = 1,
+		.default_value = 0,
+	},
 };
 
 #define NUM_CTRLS ARRAY_SIZE(controls)
@@ -1557,6 +1597,21 @@
 	case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
 		p->codec.vp8.golden_frame_sel = ctrl->val;
 		break;
+	case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP:
+		p->codec.vp8.rc_min_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP:
+		p->codec.vp8.rc_max_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP:
+		p->codec.vp8.rc_frame_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP:
+		p->codec.vp8.rc_p_frame_qp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+		p->codec.vp8.profile = ctrl->val;
+		break;
 	default:
 		v4l2_err(&dev->v4l2_dev, "Invalid control, id=%d, val=%d\n",
 							ctrl->id, ctrl->val);
@@ -1863,7 +1918,7 @@
 		if (ctx->src_bufs_cnt < ctx->pb_count) {
 			mfc_err("Need minimum %d OUTPUT buffers\n",
 					ctx->pb_count);
-			return -EINVAL;
+			return -ENOBUFS;
 		}
 	}
 
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 461358c..f6ff2db 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1197,10 +1197,8 @@
 	reg |= ((p->num_b_frame & 0x3) << 16);
 	WRITEL(reg, S5P_FIMV_E_GOP_CONFIG_V6);
 
-	/* profile & level */
-	reg = 0;
-	/** profile */
-	reg |= (0x1 << 4);
+	/* profile - 0 ~ 3 */
+	reg = p_vp8->profile & 0x3;
 	WRITEL(reg, S5P_FIMV_E_PICTURE_PROFILE_V6);
 
 	/* rate control config. */
@@ -1218,6 +1216,26 @@
 		WRITEL(reg, S5P_FIMV_E_RC_FRAME_RATE_V6);
 	}
 
+	/* frame QP */
+	reg &= ~(0x7F);
+	reg |= p_vp8->rc_frame_qp & 0x7F;
+	WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
+
+	/* other QPs */
+	WRITEL(0x0, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
+	if (!p->rc_frame && !p->rc_mb) {
+		reg = 0;
+		reg |= ((p_vp8->rc_p_frame_qp & 0x7F) << 8);
+		reg |= p_vp8->rc_frame_qp & 0x7F;
+		WRITEL(reg, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
+	}
+
+	/* max QP */
+	reg = ((p_vp8->rc_max_qp & 0x7F) << 8);
+	/* min QP */
+	reg |= p_vp8->rc_min_qp & 0x7F;
+	WRITEL(reg, S5P_FIMV_E_RC_QP_BOUND_V6);
+
 	/* vbv buffer size */
 	if (p->frame_skip_mode ==
 			V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index 51805a5..bc08b5f 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -347,19 +347,41 @@
 {
 	struct mxr_device *mdev = to_mdev(dev);
 	struct mxr_resources *res = &mdev->res;
+	int ret;
 
 	mxr_dbg(mdev, "resume - start\n");
 	mutex_lock(&mdev->mutex);
 	/* turn clocks on */
-	clk_enable(res->mixer);
-	clk_enable(res->vp);
-	clk_enable(res->sclk_mixer);
+	ret = clk_prepare_enable(res->mixer);
+	if (ret < 0) {
+		dev_err(mdev->dev, "clk_prepare_enable(mixer) failed\n");
+		goto fail;
+	}
+	ret = clk_prepare_enable(res->vp);
+	if (ret < 0) {
+		dev_err(mdev->dev, "clk_prepare_enable(vp) failed\n");
+		goto fail_mixer;
+	}
+	ret = clk_prepare_enable(res->sclk_mixer);
+	if (ret < 0) {
+		dev_err(mdev->dev, "clk_prepare_enable(sclk_mixer) failed\n");
+		goto fail_vp;
+	}
 	/* apply default configuration */
 	mxr_reg_reset(mdev);
 	mxr_dbg(mdev, "resume - finished\n");
 
 	mutex_unlock(&mdev->mutex);
 	return 0;
+
+fail_vp:
+	clk_disable_unprepare(res->vp);
+fail_mixer:
+	clk_disable_unprepare(res->mixer);
+fail:
+	mutex_unlock(&mdev->mutex);
+	dev_err(mdev->dev, "resume failed\n");
+	return ret;
 }
 
 static int mxr_runtime_suspend(struct device *dev)
@@ -369,9 +391,9 @@
 	mxr_dbg(mdev, "suspend - start\n");
 	mutex_lock(&mdev->mutex);
 	/* turn clocks off */
-	clk_disable(res->sclk_mixer);
-	clk_disable(res->vp);
-	clk_disable(res->mixer);
+	clk_disable_unprepare(res->sclk_mixer);
+	clk_disable_unprepare(res->vp);
+	clk_disable_unprepare(res->mixer);
 	mutex_unlock(&mdev->mutex);
 	mxr_dbg(mdev, "suspend - finished\n");
 	return 0;
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 81b97db..c5059ba 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -948,7 +948,7 @@
 
 	if (count == 0) {
 		mxr_dbg(mdev, "no output buffers queued\n");
-		return -EINVAL;
+		return -ENOBUFS;
 	}
 
 	/* block any changes in output configuration */
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
index 0afa90f..5a7c379 100644
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ b/drivers/media/platform/s5p-tv/sdo_drv.c
@@ -55,6 +55,8 @@
 	struct clk *dacphy;
 	/** clock for control of VPLL */
 	struct clk *fout_vpll;
+	/** vpll rate before sdo stream was on */
+	unsigned long vpll_rate;
 	/** regulator for SDO IP power */
 	struct regulator *vdac;
 	/** regulator for SDO plug detection */
@@ -193,17 +195,33 @@
 
 static int sdo_streamon(struct sdo_device *sdev)
 {
+	int ret;
+
 	/* set proper clock for Timing Generator */
-	clk_set_rate(sdev->fout_vpll, 54000000);
+	sdev->vpll_rate = clk_get_rate(sdev->fout_vpll);
+	ret = clk_set_rate(sdev->fout_vpll, 54000000);
+	if (ret < 0) {
+		dev_err(sdev->dev, "Failed to set vpll rate\n");
+		return ret;
+	}
 	dev_info(sdev->dev, "fout_vpll.rate = %lu\n",
 	clk_get_rate(sdev->fout_vpll));
 	/* enable clock in SDO */
 	sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_CLOCK_ON);
-	clk_enable(sdev->dacphy);
+	ret = clk_prepare_enable(sdev->dacphy);
+	if (ret < 0) {
+		dev_err(sdev->dev, "clk_prepare_enable(dacphy) failed\n");
+		goto fail;
+	}
 	/* enable DAC */
 	sdo_write_mask(sdev, SDO_DAC, ~0, SDO_POWER_ON_DAC);
 	sdo_reg_debug(sdev);
 	return 0;
+
+fail:
+	sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
+	clk_set_rate(sdev->fout_vpll, sdev->vpll_rate);
+	return ret;
 }
 
 static int sdo_streamoff(struct sdo_device *sdev)
@@ -211,7 +229,7 @@
 	int tries;
 
 	sdo_write_mask(sdev, SDO_DAC, 0, SDO_POWER_ON_DAC);
-	clk_disable(sdev->dacphy);
+	clk_disable_unprepare(sdev->dacphy);
 	sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
 	for (tries = 100; tries; --tries) {
 		if (sdo_read(sdev, SDO_CLKCON) & SDO_TVOUT_CLOCK_READY)
@@ -220,6 +238,7 @@
 	}
 	if (tries == 0)
 		dev_err(sdev->dev, "failed to stop streaming\n");
+	clk_set_rate(sdev->fout_vpll, sdev->vpll_rate);
 	return tries ? 0 : -EIO;
 }
 
@@ -254,7 +273,7 @@
 	dev_info(dev, "suspend\n");
 	regulator_disable(sdev->vdet);
 	regulator_disable(sdev->vdac);
-	clk_disable(sdev->sclk_dac);
+	clk_disable_unprepare(sdev->sclk_dac);
 	return 0;
 }
 
@@ -266,7 +285,7 @@
 
 	dev_info(dev, "resume\n");
 
-	ret = clk_enable(sdev->sclk_dac);
+	ret = clk_prepare_enable(sdev->sclk_dac);
 	if (ret < 0)
 		return ret;
 
@@ -299,7 +318,7 @@
 vdac_r_dis:
 	regulator_disable(sdev->vdac);
 dac_clk_dis:
-	clk_disable(sdev->sclk_dac);
+	clk_disable_unprepare(sdev->sclk_dac);
 	return ret;
 }
 
@@ -405,7 +424,11 @@
 	}
 
 	/* enable gate for dac clock, because mixer uses it */
-	clk_enable(sdev->dac);
+	ret = clk_prepare_enable(sdev->dac);
+	if (ret < 0) {
+		dev_err(dev, "clk_prepare_enable(dac) failed\n");
+		goto fail_fout_vpll;
+	}
 
 	/* configure power management */
 	pm_runtime_enable(dev);
@@ -444,7 +467,7 @@
 	struct sdo_device *sdev = sd_to_sdev(sd);
 
 	pm_runtime_disable(&pdev->dev);
-	clk_disable(sdev->dac);
+	clk_disable_unprepare(sdev->dac);
 	clk_put(sdev->fout_vpll);
 	clk_put(sdev->dacphy);
 	clk_put(sdev->dac);
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 4f30341..e5f1d4c 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -286,7 +286,7 @@
 	vb->size = vb->height * bytes_per_line;
 	if (vb->baddr && vb->bsize < vb->size) {
 		/* User buffer too small */
-		dev_warn(vq->dev, "User buffer too small: [%u] @ %lx\n",
+		dev_warn(vq->dev, "User buffer too small: [%zu] @ %lx\n",
 			 vb->bsize, vb->baddr);
 		return -EINVAL;
 	}
@@ -302,9 +302,10 @@
 	}
 
 	dev_dbg(vou_dev->v4l2_dev.dev,
-		"%s(): fmt #%d, %u bytes per line, phys 0x%x, type %d, state %d\n",
+		"%s(): fmt #%d, %u bytes per line, phys %pad, type %d, state %d\n",
 		__func__, vou_dev->pix_idx, bytes_per_line,
-		videobuf_to_dma_contig(vb), vb->memory, vb->state);
+		({ dma_addr_t addr = videobuf_to_dma_contig(vb); &addr; }),
+		vb->memory, vb->state);
 
 	return 0;
 }
@@ -442,7 +443,7 @@
 				      int pix_idx, int w_idx, int h_idx)
 {
 	struct sh_vou_fmt *fmt = vou_fmt + pix_idx;
-	unsigned int black_left, black_top, width_max, height_max,
+	unsigned int black_left, black_top, width_max,
 		frame_in_height, frame_out_height, frame_out_top;
 	struct v4l2_rect *rect = &vou_dev->rect;
 	struct v4l2_pix_format *pix = &vou_dev->pix;
@@ -450,10 +451,10 @@
 
 	if (vou_dev->std & V4L2_STD_525_60) {
 		width_max = 858;
-		height_max = 262;
+		/* height_max = 262; */
 	} else {
 		width_max = 864;
-		height_max = 312;
+		/* height_max = 312; */
 	}
 
 	frame_in_height = pix->height / 2;
@@ -1052,7 +1053,6 @@
 	static unsigned long j;
 	struct videobuf_buffer *vb;
 	static int cnt;
-	static int side;
 	u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked;
 	u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR);
 
@@ -1080,7 +1080,7 @@
 		irq_status, masked, vou_status, cnt);
 
 	cnt++;
-	side = vou_status & 0x10000;
+	/* side = vou_status & 0x10000; */
 
 	/* Clear only set interrupts */
 	sh_vou_reg_a_write(vou_dev, VOUIR, masked);
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 1044856..4835173 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -34,13 +34,6 @@
 #define MIN_FRAME_RATE			15
 #define FRAME_INTERVAL_MILLI_SEC	(1000 / MIN_FRAME_RATE)
 
-/* ISI states */
-enum {
-	ISI_STATE_IDLE = 0,
-	ISI_STATE_READY,
-	ISI_STATE_WAIT_SOF,
-};
-
 /* Frame buffer descriptor */
 struct fbd {
 	/* Physical address of the frame buffer */
@@ -75,11 +68,6 @@
 	void __iomem			*regs;
 
 	int				sequence;
-	/* State of the ISI module in capturing mode */
-	int				state;
-
-	/* Wait queue for waiting for SOF */
-	wait_queue_head_t		vsync_wq;
 
 	struct vb2_alloc_ctx		*alloc_ctx;
 
@@ -124,16 +112,16 @@
 	case V4L2_MBUS_FMT_Y8_1X8:
 		cr = ISI_CFG2_GRAYSCALE;
 		break;
-	case V4L2_MBUS_FMT_UYVY8_2X8:
+	case V4L2_MBUS_FMT_VYUY8_2X8:
 		cr = ISI_CFG2_YCC_SWAP_MODE_3;
 		break;
-	case V4L2_MBUS_FMT_VYUY8_2X8:
+	case V4L2_MBUS_FMT_UYVY8_2X8:
 		cr = ISI_CFG2_YCC_SWAP_MODE_2;
 		break;
-	case V4L2_MBUS_FMT_YUYV8_2X8:
+	case V4L2_MBUS_FMT_YVYU8_2X8:
 		cr = ISI_CFG2_YCC_SWAP_MODE_1;
 		break;
-	case V4L2_MBUS_FMT_YVYU8_2X8:
+	case V4L2_MBUS_FMT_YUYV8_2X8:
 		cr = ISI_CFG2_YCC_SWAP_DEFAULT;
 		break;
 	/* RGB, TODO */
@@ -144,6 +132,8 @@
 	isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
 
 	cfg2 = isi_readl(isi, ISI_CFG2);
+	/* Set YCC swap mode */
+	cfg2 &= ~ISI_CFG2_YCC_SWAP_MODE_MASK;
 	cfg2 |= cr;
 	/* Set width */
 	cfg2 &= ~(ISI_CFG2_IM_HSIZE_MASK);
@@ -207,12 +197,6 @@
 		isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS);
 		ret = IRQ_HANDLED;
 	} else {
-		if ((pending & ISI_SR_VSYNC) &&
-				(isi->state == ISI_STATE_IDLE)) {
-			isi->state = ISI_STATE_READY;
-			wake_up_interruptible(&isi->vsync_wq);
-			ret = IRQ_HANDLED;
-		}
 		if (likely(pending & ISI_SR_CXFR_DONE))
 			ret = atmel_isi_handle_streaming(isi);
 	}
@@ -259,16 +243,6 @@
 	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
 	struct atmel_isi *isi = ici->priv;
 	unsigned long size;
-	int ret;
-
-	/* Reset ISI */
-	ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
-	if (ret < 0) {
-		dev_err(icd->parent, "Reset ISI timed out\n");
-		return ret;
-	}
-	/* Disable all interrupts */
-	isi_writel(isi, ISI_INTDIS, ~0UL);
 
 	size = icd->sizeimage;
 
@@ -374,6 +348,7 @@
 	isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
 	isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
 
+	cfg1 &= ~ISI_CFG1_FRATE_DIV_MASK;
 	/* Enable linked list */
 	cfg1 |= isi->pdata->frate | ISI_CFG1_DISCR;
 
@@ -407,43 +382,27 @@
 	struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
 	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
 	struct atmel_isi *isi = ici->priv;
-
 	u32 sr = 0;
 	int ret;
 
-	spin_lock_irq(&isi->lock);
-	isi->state = ISI_STATE_IDLE;
-	/* Clear any pending SOF interrupt */
-	sr = isi_readl(isi, ISI_STATUS);
-	/* Enable VSYNC interrupt for SOF */
-	isi_writel(isi, ISI_INTEN, ISI_SR_VSYNC);
-	isi_writel(isi, ISI_CTRL, ISI_CTRL_EN);
-	spin_unlock_irq(&isi->lock);
-
-	dev_dbg(icd->parent, "Waiting for SOF\n");
-	ret = wait_event_interruptible(isi->vsync_wq,
-				       isi->state != ISI_STATE_IDLE);
-	if (ret)
-		goto err;
-
-	if (isi->state != ISI_STATE_READY) {
-		ret = -EIO;
-		goto err;
+	/* Reset ISI */
+	ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
+	if (ret < 0) {
+		dev_err(icd->parent, "Reset ISI timed out\n");
+		return ret;
 	}
+	/* Disable all interrupts */
+	isi_writel(isi, ISI_INTDIS, ~0UL);
 
 	spin_lock_irq(&isi->lock);
-	isi->state = ISI_STATE_WAIT_SOF;
-	isi_writel(isi, ISI_INTDIS, ISI_SR_VSYNC);
+	/* Clear any pending interrupt */
+	sr = isi_readl(isi, ISI_STATUS);
+
 	if (count)
 		start_dma(isi, isi->active);
 	spin_unlock_irq(&isi->lock);
 
 	return 0;
-err:
-	isi->active = NULL;
-	isi->sequence = 0;
-	INIT_LIST_HEAD(&isi->video_buffer_list);
-	return ret;
 }
 
 /* abort streaming and wait for last buffer */
@@ -765,14 +724,16 @@
 	struct atmel_isi *isi = ici->priv;
 	int ret;
 
-	ret = clk_enable(isi->pclk);
+	ret = clk_prepare_enable(isi->pclk);
 	if (ret)
 		return ret;
 
-	ret = clk_enable(isi->mck);
-	if (ret) {
-		clk_disable(isi->pclk);
-		return ret;
+	if (!IS_ERR(isi->mck)) {
+		ret = clk_prepare_enable(isi->mck);
+		if (ret) {
+			clk_disable_unprepare(isi->pclk);
+			return ret;
+		}
 	}
 
 	return 0;
@@ -783,8 +744,9 @@
 {
 	struct atmel_isi *isi = ici->priv;
 
-	clk_disable(isi->mck);
-	clk_disable(isi->pclk);
+	if (!IS_ERR(isi->mck))
+		clk_disable_unprepare(isi->mck);
+	clk_disable_unprepare(isi->pclk);
 }
 
 static unsigned int isi_camera_poll(struct file *file, poll_table *pt)
@@ -906,7 +868,6 @@
 	struct atmel_isi *isi = container_of(soc_host,
 					struct atmel_isi, soc_host);
 
-	free_irq(isi->irq, isi);
 	soc_camera_host_unregister(soc_host);
 	vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
 	dma_free_coherent(&pdev->dev,
@@ -914,13 +875,6 @@
 			isi->p_fb_descriptors,
 			isi->fb_descriptors_phys);
 
-	iounmap(isi->regs);
-	clk_unprepare(isi->mck);
-	clk_put(isi->mck);
-	clk_unprepare(isi->pclk);
-	clk_put(isi->pclk);
-	kfree(isi);
-
 	return 0;
 }
 
@@ -928,7 +882,6 @@
 {
 	unsigned int irq;
 	struct atmel_isi *isi;
-	struct clk *pclk;
 	struct resource *regs;
 	int ret, i;
 	struct device *dev = &pdev->dev;
@@ -936,64 +889,50 @@
 	struct isi_platform_data *pdata;
 
 	pdata = dev->platform_data;
-	if (!pdata || !pdata->data_width_flags || !pdata->mck_hz) {
+	if (!pdata || !pdata->data_width_flags) {
 		dev_err(&pdev->dev,
 			"No config available for Atmel ISI\n");
 		return -EINVAL;
 	}
 
-	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!regs)
-		return -ENXIO;
-
-	pclk = clk_get(&pdev->dev, "isi_clk");
-	if (IS_ERR(pclk))
-		return PTR_ERR(pclk);
-
-	ret = clk_prepare(pclk);
-	if (ret)
-		goto err_clk_prepare_pclk;
-
-	isi = kzalloc(sizeof(struct atmel_isi), GFP_KERNEL);
+	isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL);
 	if (!isi) {
-		ret = -ENOMEM;
 		dev_err(&pdev->dev, "Can't allocate interface!\n");
-		goto err_alloc_isi;
+		return -ENOMEM;
 	}
 
-	isi->pclk = pclk;
+	isi->pclk = devm_clk_get(&pdev->dev, "isi_clk");
+	if (IS_ERR(isi->pclk))
+		return PTR_ERR(isi->pclk);
+
 	isi->pdata = pdata;
 	isi->active = NULL;
 	spin_lock_init(&isi->lock);
-	init_waitqueue_head(&isi->vsync_wq);
 	INIT_LIST_HEAD(&isi->video_buffer_list);
 	INIT_LIST_HEAD(&isi->dma_desc_head);
 
-	/* Get ISI_MCK, provided by programmable clock or external clock */
-	isi->mck = clk_get(dev, "isi_mck");
-	if (IS_ERR(isi->mck)) {
-		dev_err(dev, "Failed to get isi_mck\n");
-		ret = PTR_ERR(isi->mck);
-		goto err_clk_get;
+	/* ISI_MCK is the sensor master clock. It should be handled by the
+	 * sensor driver directly, as the ISI has no use for that clock. Make
+	 * the clock optional here while platforms transition to the correct
+	 * model.
+	 */
+	isi->mck = devm_clk_get(dev, "isi_mck");
+	if (!IS_ERR(isi->mck)) {
+		/* Set ISI_MCK's frequency, it should be faster than pixel
+		 * clock.
+		 */
+		ret = clk_set_rate(isi->mck, pdata->mck_hz);
+		if (ret < 0)
+			return ret;
 	}
 
-	ret = clk_prepare(isi->mck);
-	if (ret)
-		goto err_clk_prepare_mck;
-
-	/* Set ISI_MCK's frequency, it should be faster than pixel clock */
-	ret = clk_set_rate(isi->mck, pdata->mck_hz);
-	if (ret < 0)
-		goto err_set_mck_rate;
-
 	isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
 				sizeof(struct fbd) * MAX_BUFFER_NUM,
 				&isi->fb_descriptors_phys,
 				GFP_KERNEL);
 	if (!isi->p_fb_descriptors) {
-		ret = -ENOMEM;
 		dev_err(&pdev->dev, "Can't allocate descriptors!\n");
-		goto err_alloc_descriptors;
+		return -ENOMEM;
 	}
 
 	for (i = 0; i < MAX_BUFFER_NUM; i++) {
@@ -1009,9 +948,10 @@
 		goto err_alloc_ctx;
 	}
 
-	isi->regs = ioremap(regs->start, resource_size(regs));
-	if (!isi->regs) {
-		ret = -ENOMEM;
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	isi->regs = devm_ioremap_resource(&pdev->dev, regs);
+	if (IS_ERR(isi->regs)) {
+		ret = PTR_ERR(isi->regs);
 		goto err_ioremap;
 	}
 
@@ -1028,7 +968,7 @@
 		goto err_req_irq;
 	}
 
-	ret = request_irq(irq, isi_interrupt, 0, "isi", isi);
+	ret = devm_request_irq(&pdev->dev, irq, isi_interrupt, 0, "isi", isi);
 	if (ret) {
 		dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
 		goto err_req_irq;
@@ -1050,9 +990,7 @@
 	return 0;
 
 err_register_soc_camera_host:
-	free_irq(isi->irq, isi);
 err_req_irq:
-	iounmap(isi->regs);
 err_ioremap:
 	vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
 err_alloc_ctx:
@@ -1060,17 +998,6 @@
 			sizeof(struct fbd) * MAX_BUFFER_NUM,
 			isi->p_fb_descriptors,
 			isi->fb_descriptors_phys);
-err_alloc_descriptors:
-err_set_mck_rate:
-	clk_unprepare(isi->mck);
-err_clk_prepare_mck:
-	clk_put(isi->mck);
-err_clk_get:
-	kfree(isi);
-err_alloc_isi:
-	clk_unprepare(pclk);
-err_clk_prepare_pclk:
-	clk_put(pclk);
 
 	return ret;
 }
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index 45a0276..d73abca 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -659,7 +659,7 @@
 	unsigned long flags;
 
 	if (count < 2)
-		return -EINVAL;
+		return -ENOBUFS;
 
 	spin_lock_irqsave(&pcdev->lock, flags);
 
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 6866bb4..3b1c05a 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -106,7 +106,7 @@
 #define VIN_MAX_HEIGHT		2048
 
 enum chip_id {
-	RCAR_H2,
+	RCAR_GEN2,
 	RCAR_H1,
 	RCAR_M1,
 	RCAR_E1,
@@ -302,7 +302,7 @@
 		dmr = 0;
 		break;
 	case V4L2_PIX_FMT_RGB32:
-		if (priv->chip == RCAR_H2 || priv->chip == RCAR_H1 ||
+		if (priv->chip == RCAR_GEN2 || priv->chip == RCAR_H1 ||
 		    priv->chip == RCAR_E1) {
 			dmr = VNDMR_EXRGB;
 			break;
@@ -1384,7 +1384,8 @@
 };
 
 static struct platform_device_id rcar_vin_id_table[] = {
-	{ "r8a7790-vin",  RCAR_H2 },
+	{ "r8a7791-vin",  RCAR_GEN2 },
+	{ "r8a7790-vin",  RCAR_GEN2 },
 	{ "r8a7779-vin",  RCAR_H1 },
 	{ "r8a7778-vin",  RCAR_M1 },
 	{ "uPD35004-vin", RCAR_E1 },
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
index cbd3a34..8e74fb7 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
@@ -141,8 +141,8 @@
 	 * Popular special case - some cameras can only handle fixed sizes like
 	 * QVGA, VGA,... Take care to avoid infinite loop.
 	 */
-	width = max(cam_rect->width, 2);
-	height = max(cam_rect->height, 2);
+	width = max_t(unsigned int, cam_rect->width, 2);
+	height = max_t(unsigned int, cam_rect->height, 2);
 
 	/*
 	 * Loop as long as sensor is not covering the requested rectangle and
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
index cbf0a80..be680f8 100644
--- a/drivers/media/platform/ti-vpe/Makefile
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -1,5 +1,5 @@
 obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
 
-ti-vpe-y := vpe.o vpdma.o
+ti-vpe-y := vpe.o sc.o csc.o vpdma.o
 
 ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
new file mode 100644
index 0000000..acfea50
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -0,0 +1,196 @@
+/*
+ * Color space converter library
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include "csc.h"
+
+/*
+ * 16 coefficients in the order:
+ * a0, b0, c0, a1, b1, c1, a2, b2, c2, d0, d1, d2
+ * (we may need to pass non-default values from user space later on, we might
+ * need to make the coefficient struct more easy to populate)
+ */
+struct colorspace_coeffs {
+	u16	sd[12];
+	u16	hd[12];
+};
+
+/* VIDEO_RANGE: limited range, GRAPHICS_RANGE: full range */
+#define	CSC_COEFFS_VIDEO_RANGE_Y2R	0
+#define	CSC_COEFFS_GRAPHICS_RANGE_Y2R	1
+#define	CSC_COEFFS_VIDEO_RANGE_R2Y	2
+#define	CSC_COEFFS_GRAPHICS_RANGE_R2Y	3
+
+/* default colorspace coefficients */
+static struct colorspace_coeffs colorspace_coeffs[4] = {
+	[CSC_COEFFS_VIDEO_RANGE_Y2R] = {
+		{
+			/* SDTV */
+			0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
+			0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+		},
+		{
+			/* HDTV */
+			0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
+			0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+		},
+	},
+	[CSC_COEFFS_GRAPHICS_RANGE_Y2R] = {
+		{
+			/* SDTV */
+			0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
+			0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+		},
+		{
+			/* HDTV */
+			0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+			0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+		},
+	},
+	[CSC_COEFFS_VIDEO_RANGE_R2Y] = {
+		{
+			/* SDTV */
+			0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
+			0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
+		},
+		{
+			/* HDTV */
+			0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
+			0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
+		},
+	},
+	[CSC_COEFFS_GRAPHICS_RANGE_R2Y] = {
+		{
+			/* SDTV */
+			0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
+			0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
+		},
+		{
+			/* HDTV */
+			0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+			0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+		},
+	},
+};
+
+void csc_dump_regs(struct csc_data *csc)
+{
+	struct device *dev = &csc->pdev->dev;
+
+	u32 read_reg(struct csc_data *csc, int offset)
+	{
+		return ioread32(csc->base + offset);
+	}
+
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(csc, CSC_##r))
+
+	DUMPREG(CSC00);
+	DUMPREG(CSC01);
+	DUMPREG(CSC02);
+	DUMPREG(CSC03);
+	DUMPREG(CSC04);
+	DUMPREG(CSC05);
+
+#undef DUMPREG
+}
+
+void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5)
+{
+	*csc_reg5 |= CSC_BYPASS;
+}
+
+/*
+ * set the color space converter coefficient shadow register values
+ */
+void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
+		enum v4l2_colorspace src_colorspace,
+		enum v4l2_colorspace dst_colorspace)
+{
+	u32 *csc_reg5 = csc_reg0 + 5;
+	u32 *shadow_csc = csc_reg0;
+	struct colorspace_coeffs *sd_hd_coeffs;
+	u16 *coeff, *end_coeff;
+	enum v4l2_colorspace yuv_colorspace;
+	int sel = 0;
+
+	/*
+	 * support only graphics data range(full range) for now, a control ioctl
+	 * would be nice here
+	 */
+	/* Y2R */
+	if (dst_colorspace == V4L2_COLORSPACE_SRGB &&
+			(src_colorspace == V4L2_COLORSPACE_SMPTE170M ||
+			src_colorspace == V4L2_COLORSPACE_REC709)) {
+		/* Y2R */
+		sel = 1;
+		yuv_colorspace = src_colorspace;
+	} else if ((dst_colorspace == V4L2_COLORSPACE_SMPTE170M ||
+			dst_colorspace == V4L2_COLORSPACE_REC709) &&
+			src_colorspace == V4L2_COLORSPACE_SRGB) {
+		/* R2Y */
+		sel = 3;
+		yuv_colorspace = dst_colorspace;
+	} else {
+		*csc_reg5 |= CSC_BYPASS;
+		return;
+	}
+
+	sd_hd_coeffs = &colorspace_coeffs[sel];
+
+	/* select between SD or HD coefficients */
+	if (yuv_colorspace == V4L2_COLORSPACE_SMPTE170M)
+		coeff = sd_hd_coeffs->sd;
+	else
+		coeff = sd_hd_coeffs->hd;
+
+	end_coeff = coeff + 12;
+
+	for (; coeff < end_coeff; coeff += 2)
+		*shadow_csc++ = (*(coeff + 1) << 16) | *coeff;
+}
+
+struct csc_data *csc_create(struct platform_device *pdev)
+{
+	struct csc_data *csc;
+
+	dev_dbg(&pdev->dev, "csc_create\n");
+
+	csc = devm_kzalloc(&pdev->dev, sizeof(*csc), GFP_KERNEL);
+	if (!csc) {
+		dev_err(&pdev->dev, "couldn't alloc csc_data\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	csc->pdev = pdev;
+
+	csc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"vpe_csc");
+	if (csc->res == NULL) {
+		dev_err(&pdev->dev, "missing platform resources data\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	csc->base = devm_ioremap_resource(&pdev->dev, csc->res);
+	if (!csc->base) {
+		dev_err(&pdev->dev, "failed to ioremap\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return csc;
+}
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti-vpe/csc.h
new file mode 100644
index 0000000..1ad2b6d
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/csc.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef TI_CSC_H
+#define TI_CSC_H
+
+/* VPE color space converter regs */
+#define CSC_CSC00		0x00
+#define CSC_A0_MASK		0x1fff
+#define CSC_A0_SHIFT		0
+#define CSC_B0_MASK		0x1fff
+#define CSC_B0_SHIFT		16
+
+#define CSC_CSC01		0x04
+#define CSC_C0_MASK		0x1fff
+#define CSC_C0_SHIFT		0
+#define CSC_A1_MASK		0x1fff
+#define CSC_A1_SHIFT		16
+
+#define CSC_CSC02		0x08
+#define CSC_B1_MASK		0x1fff
+#define CSC_B1_SHIFT		0
+#define CSC_C1_MASK		0x1fff
+#define CSC_C1_SHIFT		16
+
+#define CSC_CSC03		0x0c
+#define CSC_A2_MASK		0x1fff
+#define CSC_A2_SHIFT		0
+#define CSC_B2_MASK		0x1fff
+#define CSC_B2_SHIFT		16
+
+#define CSC_CSC04		0x10
+#define CSC_C2_MASK		0x1fff
+#define CSC_C2_SHIFT		0
+#define CSC_D0_MASK		0x0fff
+#define CSC_D0_SHIFT		16
+
+#define CSC_CSC05		0x14
+#define CSC_D1_MASK		0x0fff
+#define CSC_D1_SHIFT		0
+#define CSC_D2_MASK		0x0fff
+#define CSC_D2_SHIFT		16
+
+#define CSC_BYPASS		(1 << 28)
+
+struct csc_data {
+	void __iomem		*base;
+	struct resource		*res;
+
+	struct platform_device	*pdev;
+};
+
+void csc_dump_regs(struct csc_data *csc);
+void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5);
+void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
+		enum v4l2_colorspace src_colorspace,
+		enum v4l2_colorspace dst_colorspace);
+struct csc_data *csc_create(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/sc.c b/drivers/media/platform/ti-vpe/sc.c
new file mode 100644
index 0000000..93f0af54
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc.c
@@ -0,0 +1,311 @@
+/*
+ * Scaler library
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "sc.h"
+#include "sc_coeff.h"
+
+void sc_dump_regs(struct sc_data *sc)
+{
+	struct device *dev = &sc->pdev->dev;
+
+	u32 read_reg(struct sc_data *sc, int offset)
+	{
+		return ioread32(sc->base + offset);
+	}
+
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(sc, CFG_##r))
+
+	DUMPREG(SC0);
+	DUMPREG(SC1);
+	DUMPREG(SC2);
+	DUMPREG(SC3);
+	DUMPREG(SC4);
+	DUMPREG(SC5);
+	DUMPREG(SC6);
+	DUMPREG(SC8);
+	DUMPREG(SC9);
+	DUMPREG(SC10);
+	DUMPREG(SC11);
+	DUMPREG(SC12);
+	DUMPREG(SC13);
+	DUMPREG(SC17);
+	DUMPREG(SC18);
+	DUMPREG(SC19);
+	DUMPREG(SC20);
+	DUMPREG(SC21);
+	DUMPREG(SC22);
+	DUMPREG(SC23);
+	DUMPREG(SC24);
+	DUMPREG(SC25);
+
+#undef DUMPREG
+}
+
+/*
+ * set the horizontal scaler coefficients according to the ratio of output to
+ * input widths, after accounting for up to two levels of decimation
+ */
+void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
+		unsigned int dst_w)
+{
+	int sixteenths;
+	int idx;
+	int i, j;
+	u16 *coeff_h = addr;
+	const u16 *cp;
+
+	if (dst_w > src_w) {
+		idx = HS_UP_SCALE;
+	} else {
+		if ((dst_w << 1) < src_w)
+			dst_w <<= 1;	/* first level decimation */
+		if ((dst_w << 1) < src_w)
+			dst_w <<= 1;	/* second level decimation */
+
+		if (dst_w == src_w) {
+			idx = HS_LE_16_16_SCALE;
+		} else {
+			sixteenths = (dst_w << 4) / src_w;
+			if (sixteenths < 8)
+				sixteenths = 8;
+			idx = HS_LT_9_16_SCALE + sixteenths - 8;
+		}
+	}
+
+	if (idx == sc->hs_index)
+		return;
+
+	cp = scaler_hs_coeffs[idx];
+
+	for (i = 0; i < SC_NUM_PHASES * 2; i++) {
+		for (j = 0; j < SC_H_NUM_TAPS; j++)
+			*coeff_h++ = *cp++;
+		/*
+		 * for each phase, the scaler expects space for 8 coefficients
+		 * in it's memory. For the horizontal scaler, we copy the first
+		 * 7 coefficients and skip the last slot to move to the next
+		 * row to hold coefficients for the next phase
+		 */
+		coeff_h += SC_NUM_TAPS_MEM_ALIGN - SC_H_NUM_TAPS;
+	}
+
+	sc->hs_index = idx;
+
+	sc->load_coeff_h = true;
+}
+
+/*
+ * set the vertical scaler coefficients according to the ratio of output to
+ * input heights
+ */
+void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
+		unsigned int dst_h)
+{
+	int sixteenths;
+	int idx;
+	int i, j;
+	u16 *coeff_v = addr;
+	const u16 *cp;
+
+	if (dst_h > src_h) {
+		idx = VS_UP_SCALE;
+	} else if (dst_h == src_h) {
+		idx = VS_1_TO_1_SCALE;
+	} else {
+		sixteenths = (dst_h << 4) / src_h;
+		if (sixteenths < 8)
+			sixteenths = 8;
+		idx = VS_LT_9_16_SCALE + sixteenths - 8;
+	}
+
+	if (idx == sc->vs_index)
+		return;
+
+	cp = scaler_vs_coeffs[idx];
+
+	for (i = 0; i < SC_NUM_PHASES * 2; i++) {
+		for (j = 0; j < SC_V_NUM_TAPS; j++)
+			*coeff_v++ = *cp++;
+		/*
+		 * for the vertical scaler, we copy the first 5 coefficients and
+		 * skip the last 3 slots to move to the next row to hold
+		 * coefficients for the next phase
+		 */
+		coeff_v += SC_NUM_TAPS_MEM_ALIGN - SC_V_NUM_TAPS;
+	}
+
+	sc->vs_index = idx;
+	sc->load_coeff_v = true;
+}
+
+void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
+		u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
+		unsigned int dst_w, unsigned int dst_h)
+{
+	struct device *dev = &sc->pdev->dev;
+	u32 val;
+	int dcm_x, dcm_shift;
+	bool use_rav;
+	unsigned long lltmp;
+	u32 lin_acc_inc, lin_acc_inc_u;
+	u32 col_acc_offset;
+	u16 factor = 0;
+	int row_acc_init_rav = 0, row_acc_init_rav_b = 0;
+	u32 row_acc_inc = 0, row_acc_offset = 0, row_acc_offset_b = 0;
+	/*
+	 * location of SC register in payload memory with respect to the first
+	 * register in the mmr address data block
+	 */
+	u32 *sc_reg9 = sc_reg8 + 1;
+	u32 *sc_reg12 = sc_reg8 + 4;
+	u32 *sc_reg13 = sc_reg8 + 5;
+	u32 *sc_reg24 = sc_reg17 + 7;
+
+	val = sc_reg0[0];
+
+	/* clear all the features(they may get enabled elsewhere later) */
+	val &= ~(CFG_SELFGEN_FID | CFG_TRIM | CFG_ENABLE_SIN2_VER_INTP |
+		CFG_INTERLACE_I | CFG_DCM_4X | CFG_DCM_2X | CFG_AUTO_HS |
+		CFG_ENABLE_EV | CFG_USE_RAV | CFG_INVT_FID | CFG_SC_BYPASS |
+		CFG_INTERLACE_O | CFG_Y_PK_EN | CFG_HP_BYPASS | CFG_LINEAR);
+
+	if (src_w == dst_w && src_h == dst_h) {
+		val |= CFG_SC_BYPASS;
+		sc_reg0[0] = val;
+		return;
+	}
+
+	/* we only support linear scaling for now */
+	val |= CFG_LINEAR;
+
+	/* configure horizontal scaler */
+
+	/* enable 2X or 4X decimation */
+	dcm_x = src_w / dst_w;
+	if (dcm_x > 4) {
+		val |= CFG_DCM_4X;
+		dcm_shift = 2;
+	} else if (dcm_x > 2) {
+		val |= CFG_DCM_2X;
+		dcm_shift = 1;
+	} else {
+		dcm_shift = 0;
+	}
+
+	lltmp = dst_w - 1;
+	lin_acc_inc = div64_u64(((u64)(src_w >> dcm_shift) - 1) << 24, lltmp);
+	lin_acc_inc_u = 0;
+	col_acc_offset = 0;
+
+	dev_dbg(dev, "hs config: src_w = %d, dst_w = %d, decimation = %s, lin_acc_inc = %08x\n",
+		src_w, dst_w, dcm_shift == 2 ? "4x" :
+		(dcm_shift == 1 ? "2x" : "none"), lin_acc_inc);
+
+	/* configure vertical scaler */
+
+	/* use RAV for vertical scaler if vertical downscaling is > 4x */
+	if (dst_h < (src_h >> 2)) {
+		use_rav = true;
+		val |= CFG_USE_RAV;
+	} else {
+		use_rav = false;
+	}
+
+	if (use_rav) {
+		/* use RAV */
+		factor = (u16) ((dst_h << 10) / src_h);
+
+		row_acc_init_rav = factor + ((1 + factor) >> 1);
+		if (row_acc_init_rav >= 1024)
+			row_acc_init_rav -= 1024;
+
+		row_acc_init_rav_b = row_acc_init_rav +
+				(1 + (row_acc_init_rav >> 1)) -
+				(1024 >> 1);
+
+		if (row_acc_init_rav_b < 0) {
+			row_acc_init_rav_b += row_acc_init_rav;
+			row_acc_init_rav *= 2;
+		}
+
+		dev_dbg(dev, "vs config(RAV): src_h = %d, dst_h = %d, factor = %d, acc_init = %08x, acc_init_b = %08x\n",
+			src_h, dst_h, factor, row_acc_init_rav,
+			row_acc_init_rav_b);
+	} else {
+		/* use polyphase */
+		row_acc_inc = ((src_h - 1) << 16) / (dst_h - 1);
+		row_acc_offset = 0;
+		row_acc_offset_b = 0;
+
+		dev_dbg(dev, "vs config(POLY): src_h = %d, dst_h = %d,row_acc_inc = %08x\n",
+			src_h, dst_h, row_acc_inc);
+	}
+
+
+	sc_reg0[0] = val;
+	sc_reg0[1] = row_acc_inc;
+	sc_reg0[2] = row_acc_offset;
+	sc_reg0[3] = row_acc_offset_b;
+
+	sc_reg0[4] = ((lin_acc_inc_u & CFG_LIN_ACC_INC_U_MASK) <<
+			CFG_LIN_ACC_INC_U_SHIFT) | (dst_w << CFG_TAR_W_SHIFT) |
+			(dst_h << CFG_TAR_H_SHIFT);
+
+	sc_reg0[5] = (src_w << CFG_SRC_W_SHIFT) | (src_h << CFG_SRC_H_SHIFT);
+
+	sc_reg0[6] = (row_acc_init_rav_b << CFG_ROW_ACC_INIT_RAV_B_SHIFT) |
+		(row_acc_init_rav << CFG_ROW_ACC_INIT_RAV_SHIFT);
+
+	*sc_reg9 = lin_acc_inc;
+
+	*sc_reg12 = col_acc_offset << CFG_COL_ACC_OFFSET_SHIFT;
+
+	*sc_reg13 = factor;
+
+	*sc_reg24 = (src_w << CFG_ORG_W_SHIFT) | (src_h << CFG_ORG_H_SHIFT);
+}
+
+struct sc_data *sc_create(struct platform_device *pdev)
+{
+	struct sc_data *sc;
+
+	dev_dbg(&pdev->dev, "sc_create\n");
+
+	sc = devm_kzalloc(&pdev->dev, sizeof(*sc), GFP_KERNEL);
+	if (!sc) {
+		dev_err(&pdev->dev, "couldn't alloc sc_data\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sc->pdev = pdev;
+
+	sc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sc");
+	if (!sc->res) {
+		dev_err(&pdev->dev, "missing platform resources data\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	sc->base = devm_ioremap_resource(&pdev->dev, sc->res);
+	if (!sc->base) {
+		dev_err(&pdev->dev, "failed to ioremap\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return sc;
+}
diff --git a/drivers/media/platform/ti-vpe/sc.h b/drivers/media/platform/ti-vpe/sc.h
new file mode 100644
index 0000000..60e411e
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef TI_SC_H
+#define TI_SC_H
+
+/* Scaler regs */
+#define CFG_SC0				0x0
+#define CFG_INTERLACE_O			(1 << 0)
+#define CFG_LINEAR			(1 << 1)
+#define CFG_SC_BYPASS			(1 << 2)
+#define CFG_INVT_FID			(1 << 3)
+#define CFG_USE_RAV			(1 << 4)
+#define CFG_ENABLE_EV			(1 << 5)
+#define CFG_AUTO_HS			(1 << 6)
+#define CFG_DCM_2X			(1 << 7)
+#define CFG_DCM_4X			(1 << 8)
+#define CFG_HP_BYPASS			(1 << 9)
+#define CFG_INTERLACE_I			(1 << 10)
+#define CFG_ENABLE_SIN2_VER_INTP	(1 << 11)
+#define CFG_Y_PK_EN			(1 << 14)
+#define CFG_TRIM			(1 << 15)
+#define CFG_SELFGEN_FID			(1 << 16)
+
+#define CFG_SC1				0x4
+#define CFG_ROW_ACC_INC_MASK		0x07ffffff
+#define CFG_ROW_ACC_INC_SHIFT		0
+
+#define CFG_SC2				0x08
+#define CFG_ROW_ACC_OFFSET_MASK		0x0fffffff
+#define CFG_ROW_ACC_OFFSET_SHIFT	0
+
+#define CFG_SC3				0x0c
+#define CFG_ROW_ACC_OFFSET_B_MASK	0x0fffffff
+#define CFG_ROW_ACC_OFFSET_B_SHIFT	0
+
+#define CFG_SC4				0x10
+#define CFG_TAR_H_MASK			0x07ff
+#define CFG_TAR_H_SHIFT			0
+#define CFG_TAR_W_MASK			0x07ff
+#define CFG_TAR_W_SHIFT			12
+#define CFG_LIN_ACC_INC_U_MASK		0x07
+#define CFG_LIN_ACC_INC_U_SHIFT		24
+#define CFG_NLIN_ACC_INIT_U_MASK	0x07
+#define CFG_NLIN_ACC_INIT_U_SHIFT	28
+
+#define CFG_SC5				0x14
+#define CFG_SRC_H_MASK			0x07ff
+#define CFG_SRC_H_SHIFT			0
+#define CFG_SRC_W_MASK			0x07ff
+#define CFG_SRC_W_SHIFT			12
+#define CFG_NLIN_ACC_INC_U_MASK		0x07
+#define CFG_NLIN_ACC_INC_U_SHIFT	24
+
+#define CFG_SC6				0x18
+#define CFG_ROW_ACC_INIT_RAV_MASK	0x03ff
+#define CFG_ROW_ACC_INIT_RAV_SHIFT	0
+#define CFG_ROW_ACC_INIT_RAV_B_MASK	0x03ff
+#define CFG_ROW_ACC_INIT_RAV_B_SHIFT	10
+
+#define CFG_SC8				0x20
+#define CFG_NLIN_LEFT_MASK		0x07ff
+#define CFG_NLIN_LEFT_SHIFT		0
+#define CFG_NLIN_RIGHT_MASK		0x07ff
+#define CFG_NLIN_RIGHT_SHIFT		12
+
+#define CFG_SC9				0x24
+#define CFG_LIN_ACC_INC			CFG_SC9
+
+#define CFG_SC10			0x28
+#define CFG_NLIN_ACC_INIT		CFG_SC10
+
+#define CFG_SC11			0x2c
+#define CFG_NLIN_ACC_INC		CFG_SC11
+
+#define CFG_SC12			0x30
+#define CFG_COL_ACC_OFFSET_MASK		0x01ffffff
+#define CFG_COL_ACC_OFFSET_SHIFT	0
+
+#define CFG_SC13			0x34
+#define CFG_SC_FACTOR_RAV_MASK		0xff
+#define CFG_SC_FACTOR_RAV_SHIFT		0
+#define CFG_CHROMA_INTP_THR_MASK	0x03ff
+#define CFG_CHROMA_INTP_THR_SHIFT	12
+#define CFG_DELTA_CHROMA_THR_MASK	0x0f
+#define CFG_DELTA_CHROMA_THR_SHIFT	24
+
+#define CFG_SC17			0x44
+#define CFG_EV_THR_MASK			0x03ff
+#define CFG_EV_THR_SHIFT		12
+#define CFG_DELTA_LUMA_THR_MASK		0x0f
+#define CFG_DELTA_LUMA_THR_SHIFT	24
+#define CFG_DELTA_EV_THR_MASK		0x0f
+#define CFG_DELTA_EV_THR_SHIFT		28
+
+#define CFG_SC18			0x48
+#define CFG_HS_FACTOR_MASK		0x03ff
+#define CFG_HS_FACTOR_SHIFT		0
+#define CFG_CONF_DEFAULT_MASK		0x01ff
+#define CFG_CONF_DEFAULT_SHIFT		16
+
+#define CFG_SC19			0x4c
+#define CFG_HPF_COEFF0_MASK		0xff
+#define CFG_HPF_COEFF0_SHIFT		0
+#define CFG_HPF_COEFF1_MASK		0xff
+#define CFG_HPF_COEFF1_SHIFT		8
+#define CFG_HPF_COEFF2_MASK		0xff
+#define CFG_HPF_COEFF2_SHIFT		16
+#define CFG_HPF_COEFF3_MASK		0xff
+#define CFG_HPF_COEFF3_SHIFT		23
+
+#define CFG_SC20			0x50
+#define CFG_HPF_COEFF4_MASK		0xff
+#define CFG_HPF_COEFF4_SHIFT		0
+#define CFG_HPF_COEFF5_MASK		0xff
+#define CFG_HPF_COEFF5_SHIFT		8
+#define CFG_HPF_NORM_SHIFT_MASK		0x07
+#define CFG_HPF_NORM_SHIFT_SHIFT	16
+#define CFG_NL_LIMIT_MASK		0x1ff
+#define CFG_NL_LIMIT_SHIFT		20
+
+#define CFG_SC21			0x54
+#define CFG_NL_LO_THR_MASK		0x01ff
+#define CFG_NL_LO_THR_SHIFT		0
+#define CFG_NL_LO_SLOPE_MASK		0xff
+#define CFG_NL_LO_SLOPE_SHIFT		16
+
+#define CFG_SC22			0x58
+#define CFG_NL_HI_THR_MASK		0x01ff
+#define CFG_NL_HI_THR_SHIFT		0
+#define CFG_NL_HI_SLOPE_SH_MASK		0x07
+#define CFG_NL_HI_SLOPE_SH_SHIFT	16
+
+#define CFG_SC23			0x5c
+#define CFG_GRADIENT_THR_MASK		0x07ff
+#define CFG_GRADIENT_THR_SHIFT		0
+#define CFG_GRADIENT_THR_RANGE_MASK	0x0f
+#define CFG_GRADIENT_THR_RANGE_SHIFT	12
+#define CFG_MIN_GY_THR_MASK		0xff
+#define CFG_MIN_GY_THR_SHIFT		16
+#define CFG_MIN_GY_THR_RANGE_MASK	0x0f
+#define CFG_MIN_GY_THR_RANGE_SHIFT	28
+
+#define CFG_SC24			0x60
+#define CFG_ORG_H_MASK			0x07ff
+#define CFG_ORG_H_SHIFT			0
+#define CFG_ORG_W_MASK			0x07ff
+#define CFG_ORG_W_SHIFT			16
+
+#define CFG_SC25			0x64
+#define CFG_OFF_H_MASK			0x07ff
+#define CFG_OFF_H_SHIFT			0
+#define CFG_OFF_W_MASK			0x07ff
+#define CFG_OFF_W_SHIFT			16
+
+/* number of phases supported by the polyphase scalers */
+#define SC_NUM_PHASES			32
+
+/* number of taps used by horizontal polyphase scaler */
+#define SC_H_NUM_TAPS			7
+
+/* number of taps used by vertical polyphase scaler */
+#define SC_V_NUM_TAPS			5
+
+/* number of taps expected by the scaler in it's coefficient memory */
+#define SC_NUM_TAPS_MEM_ALIGN		8
+
+/*
+ * coefficient memory size in bytes:
+ * num phases x num sets(luma and chroma) x num taps(aligned) x coeff size
+ */
+#define SC_COEF_SRAM_SIZE	(SC_NUM_PHASES * 2 * SC_NUM_TAPS_MEM_ALIGN * 2)
+
+struct sc_data {
+	void __iomem		*base;
+	struct resource		*res;
+
+	dma_addr_t		loaded_coeff_h; /* loaded h coeffs in SC */
+	dma_addr_t		loaded_coeff_v; /* loaded v coeffs in SC */
+
+	bool			load_coeff_h;	/* have new h SC coeffs */
+	bool			load_coeff_v;	/* have new v SC coeffs */
+
+	unsigned int		hs_index;	/* h SC coeffs selector */
+	unsigned int		vs_index;	/* v SC coeffs selector */
+
+	struct platform_device *pdev;
+};
+
+void sc_dump_regs(struct sc_data *sc);
+void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
+		unsigned int dst_w);
+void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
+		unsigned int dst_h);
+void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
+		u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
+		unsigned int dst_w, unsigned int dst_h);
+struct sc_data *sc_create(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/sc_coeff.h b/drivers/media/platform/ti-vpe/sc_coeff.h
new file mode 100644
index 0000000..5bfa5c0
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc_coeff.h
@@ -0,0 +1,1342 @@
+/*
+ * VPE SC coefs
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_SC_COEFF_H
+#define __TI_SC_COEFF_H
+
+/* horizontal scaler coefficients */
+enum {
+	HS_UP_SCALE = 0,
+	HS_LT_9_16_SCALE,
+	HS_LT_10_16_SCALE,
+	HS_LT_11_16_SCALE,
+	HS_LT_12_16_SCALE,
+	HS_LT_13_16_SCALE,
+	HS_LT_14_16_SCALE,
+	HS_LT_15_16_SCALE,
+	HS_LE_16_16_SCALE,
+};
+
+static const u16 scaler_hs_coeffs[13][SC_NUM_PHASES * 2 * SC_H_NUM_TAPS] = {
+	[HS_UP_SCALE] = {
+		/* Luma */
+		0x001F, 0x1F90, 0x00D2, 0x06FE, 0x00D2, 0x1F90, 0x001F,
+		0x001C, 0x1F9E, 0x009F, 0x06FB, 0x0108, 0x1F82, 0x0022,
+		0x0019, 0x1FAC, 0x006F, 0x06F3, 0x0140, 0x1F74, 0x0025,
+		0x0016, 0x1FB9, 0x0041, 0x06E7, 0x017B, 0x1F66, 0x0028,
+		0x0013, 0x1FC6, 0x0017, 0x06D6, 0x01B7, 0x1F58, 0x002B,
+		0x0010, 0x1FD3, 0x1FEF, 0x06C0, 0x01F6, 0x1F4B, 0x002D,
+		0x000E, 0x1FDF, 0x1FCB, 0x06A5, 0x0235, 0x1F3F, 0x002F,
+		0x000B, 0x1FEA, 0x1FAA, 0x0686, 0x0277, 0x1F33, 0x0031,
+		0x0009, 0x1FF5, 0x1F8C, 0x0663, 0x02B8, 0x1F28, 0x0033,
+		0x0007, 0x1FFF, 0x1F72, 0x063A, 0x02FB, 0x1F1F, 0x0034,
+		0x0005, 0x0008, 0x1F5A, 0x060F, 0x033E, 0x1F17, 0x0035,
+		0x0003, 0x0010, 0x1F46, 0x05E0, 0x0382, 0x1F10, 0x0035,
+		0x0002, 0x0017, 0x1F34, 0x05AF, 0x03C5, 0x1F0B, 0x0034,
+		0x0001, 0x001E, 0x1F26, 0x0579, 0x0407, 0x1F08, 0x0033,
+		0x0000, 0x0023, 0x1F1A, 0x0541, 0x0449, 0x1F07, 0x0032,
+		0x1FFF, 0x0028, 0x1F12, 0x0506, 0x048A, 0x1F08, 0x002F,
+		0x002C, 0x1F0C, 0x04C8, 0x04C8, 0x1F0C, 0x002C, 0x0000,
+		0x002F, 0x1F08, 0x048A, 0x0506, 0x1F12, 0x0028, 0x1FFF,
+		0x0032, 0x1F07, 0x0449, 0x0541, 0x1F1A, 0x0023, 0x0000,
+		0x0033, 0x1F08, 0x0407, 0x0579, 0x1F26, 0x001E, 0x0001,
+		0x0034, 0x1F0B, 0x03C5, 0x05AF, 0x1F34, 0x0017, 0x0002,
+		0x0035, 0x1F10, 0x0382, 0x05E0, 0x1F46, 0x0010, 0x0003,
+		0x0035, 0x1F17, 0x033E, 0x060F, 0x1F5A, 0x0008, 0x0005,
+		0x0034, 0x1F1F, 0x02FB, 0x063A, 0x1F72, 0x1FFF, 0x0007,
+		0x0033, 0x1F28, 0x02B8, 0x0663, 0x1F8C, 0x1FF5, 0x0009,
+		0x0031, 0x1F33, 0x0277, 0x0686, 0x1FAA, 0x1FEA, 0x000B,
+		0x002F, 0x1F3F, 0x0235, 0x06A5, 0x1FCB, 0x1FDF, 0x000E,
+		0x002D, 0x1F4B, 0x01F6, 0x06C0, 0x1FEF, 0x1FD3, 0x0010,
+		0x002B, 0x1F58, 0x01B7, 0x06D6, 0x0017, 0x1FC6, 0x0013,
+		0x0028, 0x1F66, 0x017B, 0x06E7, 0x0041, 0x1FB9, 0x0016,
+		0x0025, 0x1F74, 0x0140, 0x06F3, 0x006F, 0x1FAC, 0x0019,
+		0x0022, 0x1F82, 0x0108, 0x06FB, 0x009F, 0x1F9E, 0x001C,
+		/* Chroma */
+		0x001F, 0x1F90, 0x00D2, 0x06FE, 0x00D2, 0x1F90, 0x001F,
+		0x001C, 0x1F9E, 0x009F, 0x06FB, 0x0108, 0x1F82, 0x0022,
+		0x0019, 0x1FAC, 0x006F, 0x06F3, 0x0140, 0x1F74, 0x0025,
+		0x0016, 0x1FB9, 0x0041, 0x06E7, 0x017B, 0x1F66, 0x0028,
+		0x0013, 0x1FC6, 0x0017, 0x06D6, 0x01B7, 0x1F58, 0x002B,
+		0x0010, 0x1FD3, 0x1FEF, 0x06C0, 0x01F6, 0x1F4B, 0x002D,
+		0x000E, 0x1FDF, 0x1FCB, 0x06A5, 0x0235, 0x1F3F, 0x002F,
+		0x000B, 0x1FEA, 0x1FAA, 0x0686, 0x0277, 0x1F33, 0x0031,
+		0x0009, 0x1FF5, 0x1F8C, 0x0663, 0x02B8, 0x1F28, 0x0033,
+		0x0007, 0x1FFF, 0x1F72, 0x063A, 0x02FB, 0x1F1F, 0x0034,
+		0x0005, 0x0008, 0x1F5A, 0x060F, 0x033E, 0x1F17, 0x0035,
+		0x0003, 0x0010, 0x1F46, 0x05E0, 0x0382, 0x1F10, 0x0035,
+		0x0002, 0x0017, 0x1F34, 0x05AF, 0x03C5, 0x1F0B, 0x0034,
+		0x0001, 0x001E, 0x1F26, 0x0579, 0x0407, 0x1F08, 0x0033,
+		0x0000, 0x0023, 0x1F1A, 0x0541, 0x0449, 0x1F07, 0x0032,
+		0x1FFF, 0x0028, 0x1F12, 0x0506, 0x048A, 0x1F08, 0x002F,
+		0x002C, 0x1F0C, 0x04C8, 0x04C8, 0x1F0C, 0x002C, 0x0000,
+		0x002F, 0x1F08, 0x048A, 0x0506, 0x1F12, 0x0028, 0x1FFF,
+		0x0032, 0x1F07, 0x0449, 0x0541, 0x1F1A, 0x0023, 0x0000,
+		0x0033, 0x1F08, 0x0407, 0x0579, 0x1F26, 0x001E, 0x0001,
+		0x0034, 0x1F0B, 0x03C5, 0x05AF, 0x1F34, 0x0017, 0x0002,
+		0x0035, 0x1F10, 0x0382, 0x05E0, 0x1F46, 0x0010, 0x0003,
+		0x0035, 0x1F17, 0x033E, 0x060F, 0x1F5A, 0x0008, 0x0005,
+		0x0034, 0x1F1F, 0x02FB, 0x063A, 0x1F72, 0x1FFF, 0x0007,
+		0x0033, 0x1F28, 0x02B8, 0x0663, 0x1F8C, 0x1FF5, 0x0009,
+		0x0031, 0x1F33, 0x0277, 0x0686, 0x1FAA, 0x1FEA, 0x000B,
+		0x002F, 0x1F3F, 0x0235, 0x06A5, 0x1FCB, 0x1FDF, 0x000E,
+		0x002D, 0x1F4B, 0x01F6, 0x06C0, 0x1FEF, 0x1FD3, 0x0010,
+		0x002B, 0x1F58, 0x01B7, 0x06D6, 0x0017, 0x1FC6, 0x0013,
+		0x0028, 0x1F66, 0x017B, 0x06E7, 0x0041, 0x1FB9, 0x0016,
+		0x0025, 0x1F74, 0x0140, 0x06F3, 0x006F, 0x1FAC, 0x0019,
+		0x0022, 0x1F82, 0x0108, 0x06FB, 0x009F, 0x1F9E, 0x001C,
+	},
+	[HS_LT_9_16_SCALE] = {
+		/* Luma */
+		0x1FA3, 0x005E, 0x024A, 0x036A, 0x024A, 0x005E, 0x1FA3,
+		0x1FA3, 0x0052, 0x023A, 0x036A, 0x0259, 0x006A, 0x1FA4,
+		0x1FA3, 0x0046, 0x022A, 0x036A, 0x0269, 0x0076, 0x1FA4,
+		0x1FA3, 0x003B, 0x021A, 0x0368, 0x0278, 0x0083, 0x1FA5,
+		0x1FA4, 0x0031, 0x020A, 0x0365, 0x0286, 0x0090, 0x1FA6,
+		0x1FA5, 0x0026, 0x01F9, 0x0362, 0x0294, 0x009E, 0x1FA8,
+		0x1FA6, 0x001C, 0x01E8, 0x035E, 0x02A3, 0x00AB, 0x1FAA,
+		0x1FA7, 0x0013, 0x01D7, 0x035A, 0x02B0, 0x00B9, 0x1FAC,
+		0x1FA9, 0x000A, 0x01C6, 0x0354, 0x02BD, 0x00C7, 0x1FAF,
+		0x1FAA, 0x0001, 0x01B6, 0x034E, 0x02C9, 0x00D6, 0x1FB2,
+		0x1FAC, 0x1FF9, 0x01A5, 0x0347, 0x02D5, 0x00E5, 0x1FB5,
+		0x1FAE, 0x1FF1, 0x0194, 0x0340, 0x02E1, 0x00F3, 0x1FB9,
+		0x1FB0, 0x1FEA, 0x0183, 0x0338, 0x02EC, 0x0102, 0x1FBD,
+		0x1FB2, 0x1FE3, 0x0172, 0x0330, 0x02F6, 0x0112, 0x1FC1,
+		0x1FB4, 0x1FDC, 0x0161, 0x0327, 0x0301, 0x0121, 0x1FC6,
+		0x1FB7, 0x1FD6, 0x0151, 0x031D, 0x030A, 0x0130, 0x1FCB,
+		0x1FD2, 0x0136, 0x02F8, 0x02F8, 0x0136, 0x1FD2, 0x0000,
+		0x1FCB, 0x0130, 0x030A, 0x031D, 0x0151, 0x1FD6, 0x1FB7,
+		0x1FC6, 0x0121, 0x0301, 0x0327, 0x0161, 0x1FDC, 0x1FB4,
+		0x1FC1, 0x0112, 0x02F6, 0x0330, 0x0172, 0x1FE3, 0x1FB2,
+		0x1FBD, 0x0102, 0x02EC, 0x0338, 0x0183, 0x1FEA, 0x1FB0,
+		0x1FB9, 0x00F3, 0x02E1, 0x0340, 0x0194, 0x1FF1, 0x1FAE,
+		0x1FB5, 0x00E5, 0x02D5, 0x0347, 0x01A5, 0x1FF9, 0x1FAC,
+		0x1FB2, 0x00D6, 0x02C9, 0x034E, 0x01B6, 0x0001, 0x1FAA,
+		0x1FAF, 0x00C7, 0x02BD, 0x0354, 0x01C6, 0x000A, 0x1FA9,
+		0x1FAC, 0x00B9, 0x02B0, 0x035A, 0x01D7, 0x0013, 0x1FA7,
+		0x1FAA, 0x00AB, 0x02A3, 0x035E, 0x01E8, 0x001C, 0x1FA6,
+		0x1FA8, 0x009E, 0x0294, 0x0362, 0x01F9, 0x0026, 0x1FA5,
+		0x1FA6, 0x0090, 0x0286, 0x0365, 0x020A, 0x0031, 0x1FA4,
+		0x1FA5, 0x0083, 0x0278, 0x0368, 0x021A, 0x003B, 0x1FA3,
+		0x1FA4, 0x0076, 0x0269, 0x036A, 0x022A, 0x0046, 0x1FA3,
+		0x1FA4, 0x006A, 0x0259, 0x036A, 0x023A, 0x0052, 0x1FA3,
+		/* Chroma */
+		0x1FA3, 0x005E, 0x024A, 0x036A, 0x024A, 0x005E, 0x1FA3,
+		0x1FA3, 0x0052, 0x023A, 0x036A, 0x0259, 0x006A, 0x1FA4,
+		0x1FA3, 0x0046, 0x022A, 0x036A, 0x0269, 0x0076, 0x1FA4,
+		0x1FA3, 0x003B, 0x021A, 0x0368, 0x0278, 0x0083, 0x1FA5,
+		0x1FA4, 0x0031, 0x020A, 0x0365, 0x0286, 0x0090, 0x1FA6,
+		0x1FA5, 0x0026, 0x01F9, 0x0362, 0x0294, 0x009E, 0x1FA8,
+		0x1FA6, 0x001C, 0x01E8, 0x035E, 0x02A3, 0x00AB, 0x1FAA,
+		0x1FA7, 0x0013, 0x01D7, 0x035A, 0x02B0, 0x00B9, 0x1FAC,
+		0x1FA9, 0x000A, 0x01C6, 0x0354, 0x02BD, 0x00C7, 0x1FAF,
+		0x1FAA, 0x0001, 0x01B6, 0x034E, 0x02C9, 0x00D6, 0x1FB2,
+		0x1FAC, 0x1FF9, 0x01A5, 0x0347, 0x02D5, 0x00E5, 0x1FB5,
+		0x1FAE, 0x1FF1, 0x0194, 0x0340, 0x02E1, 0x00F3, 0x1FB9,
+		0x1FB0, 0x1FEA, 0x0183, 0x0338, 0x02EC, 0x0102, 0x1FBD,
+		0x1FB2, 0x1FE3, 0x0172, 0x0330, 0x02F6, 0x0112, 0x1FC1,
+		0x1FB4, 0x1FDC, 0x0161, 0x0327, 0x0301, 0x0121, 0x1FC6,
+		0x1FB7, 0x1FD6, 0x0151, 0x031D, 0x030A, 0x0130, 0x1FCB,
+		0x1FD2, 0x0136, 0x02F8, 0x02F8, 0x0136, 0x1FD2, 0x0000,
+		0x1FCB, 0x0130, 0x030A, 0x031D, 0x0151, 0x1FD6, 0x1FB7,
+		0x1FC6, 0x0121, 0x0301, 0x0327, 0x0161, 0x1FDC, 0x1FB4,
+		0x1FC1, 0x0112, 0x02F6, 0x0330, 0x0172, 0x1FE3, 0x1FB2,
+		0x1FBD, 0x0102, 0x02EC, 0x0338, 0x0183, 0x1FEA, 0x1FB0,
+		0x1FB9, 0x00F3, 0x02E1, 0x0340, 0x0194, 0x1FF1, 0x1FAE,
+		0x1FB5, 0x00E5, 0x02D5, 0x0347, 0x01A5, 0x1FF9, 0x1FAC,
+		0x1FB2, 0x00D6, 0x02C9, 0x034E, 0x01B6, 0x0001, 0x1FAA,
+		0x1FAF, 0x00C7, 0x02BD, 0x0354, 0x01C6, 0x000A, 0x1FA9,
+		0x1FAC, 0x00B9, 0x02B0, 0x035A, 0x01D7, 0x0013, 0x1FA7,
+		0x1FAA, 0x00AB, 0x02A3, 0x035E, 0x01E8, 0x001C, 0x1FA6,
+		0x1FA8, 0x009E, 0x0294, 0x0362, 0x01F9, 0x0026, 0x1FA5,
+		0x1FA6, 0x0090, 0x0286, 0x0365, 0x020A, 0x0031, 0x1FA4,
+		0x1FA5, 0x0083, 0x0278, 0x0368, 0x021A, 0x003B, 0x1FA3,
+		0x1FA4, 0x0076, 0x0269, 0x036A, 0x022A, 0x0046, 0x1FA3,
+		0x1FA4, 0x006A, 0x0259, 0x036A, 0x023A, 0x0052, 0x1FA3,
+	},
+	[HS_LT_10_16_SCALE] = {
+		/* Luma */
+		0x1F8D, 0x000C, 0x026A, 0x03FA, 0x026A, 0x000C, 0x1F8D,
+		0x1F8F, 0x0000, 0x0255, 0x03FA, 0x027F, 0x0019, 0x1F8A,
+		0x1F92, 0x1FF5, 0x023F, 0x03F8, 0x0293, 0x0027, 0x1F88,
+		0x1F95, 0x1FEA, 0x022A, 0x03F6, 0x02A7, 0x0034, 0x1F86,
+		0x1F99, 0x1FDF, 0x0213, 0x03F2, 0x02BB, 0x0043, 0x1F85,
+		0x1F9C, 0x1FD5, 0x01FE, 0x03ED, 0x02CF, 0x0052, 0x1F83,
+		0x1FA0, 0x1FCC, 0x01E8, 0x03E7, 0x02E1, 0x0061, 0x1F83,
+		0x1FA4, 0x1FC3, 0x01D2, 0x03E0, 0x02F4, 0x0071, 0x1F82,
+		0x1FA7, 0x1FBB, 0x01BC, 0x03D9, 0x0306, 0x0081, 0x1F82,
+		0x1FAB, 0x1FB4, 0x01A6, 0x03D0, 0x0317, 0x0092, 0x1F82,
+		0x1FAF, 0x1FAD, 0x0190, 0x03C7, 0x0327, 0x00A3, 0x1F83,
+		0x1FB3, 0x1FA7, 0x017A, 0x03BC, 0x0337, 0x00B5, 0x1F84,
+		0x1FB8, 0x1FA1, 0x0165, 0x03B0, 0x0346, 0x00C7, 0x1F85,
+		0x1FBC, 0x1F9C, 0x0150, 0x03A4, 0x0354, 0x00D9, 0x1F87,
+		0x1FC0, 0x1F98, 0x013A, 0x0397, 0x0361, 0x00EC, 0x1F8A,
+		0x1FC4, 0x1F93, 0x0126, 0x0389, 0x036F, 0x00FE, 0x1F8D,
+		0x1F93, 0x010A, 0x0363, 0x0363, 0x010A, 0x1F93, 0x0000,
+		0x1F8D, 0x00FE, 0x036F, 0x0389, 0x0126, 0x1F93, 0x1FC4,
+		0x1F8A, 0x00EC, 0x0361, 0x0397, 0x013A, 0x1F98, 0x1FC0,
+		0x1F87, 0x00D9, 0x0354, 0x03A4, 0x0150, 0x1F9C, 0x1FBC,
+		0x1F85, 0x00C7, 0x0346, 0x03B0, 0x0165, 0x1FA1, 0x1FB8,
+		0x1F84, 0x00B5, 0x0337, 0x03BC, 0x017A, 0x1FA7, 0x1FB3,
+		0x1F83, 0x00A3, 0x0327, 0x03C7, 0x0190, 0x1FAD, 0x1FAF,
+		0x1F82, 0x0092, 0x0317, 0x03D0, 0x01A6, 0x1FB4, 0x1FAB,
+		0x1F82, 0x0081, 0x0306, 0x03D9, 0x01BC, 0x1FBB, 0x1FA7,
+		0x1F82, 0x0071, 0x02F4, 0x03E0, 0x01D2, 0x1FC3, 0x1FA4,
+		0x1F83, 0x0061, 0x02E1, 0x03E7, 0x01E8, 0x1FCC, 0x1FA0,
+		0x1F83, 0x0052, 0x02CF, 0x03ED, 0x01FE, 0x1FD5, 0x1F9C,
+		0x1F85, 0x0043, 0x02BB, 0x03F2, 0x0213, 0x1FDF, 0x1F99,
+		0x1F86, 0x0034, 0x02A7, 0x03F6, 0x022A, 0x1FEA, 0x1F95,
+		0x1F88, 0x0027, 0x0293, 0x03F8, 0x023F, 0x1FF5, 0x1F92,
+		0x1F8A, 0x0019, 0x027F, 0x03FA, 0x0255, 0x0000, 0x1F8F,
+		/* Chroma */
+		0x1F8D, 0x000C, 0x026A, 0x03FA, 0x026A, 0x000C, 0x1F8D,
+		0x1F8F, 0x0000, 0x0255, 0x03FA, 0x027F, 0x0019, 0x1F8A,
+		0x1F92, 0x1FF5, 0x023F, 0x03F8, 0x0293, 0x0027, 0x1F88,
+		0x1F95, 0x1FEA, 0x022A, 0x03F6, 0x02A7, 0x0034, 0x1F86,
+		0x1F99, 0x1FDF, 0x0213, 0x03F2, 0x02BB, 0x0043, 0x1F85,
+		0x1F9C, 0x1FD5, 0x01FE, 0x03ED, 0x02CF, 0x0052, 0x1F83,
+		0x1FA0, 0x1FCC, 0x01E8, 0x03E7, 0x02E1, 0x0061, 0x1F83,
+		0x1FA4, 0x1FC3, 0x01D2, 0x03E0, 0x02F4, 0x0071, 0x1F82,
+		0x1FA7, 0x1FBB, 0x01BC, 0x03D9, 0x0306, 0x0081, 0x1F82,
+		0x1FAB, 0x1FB4, 0x01A6, 0x03D0, 0x0317, 0x0092, 0x1F82,
+		0x1FAF, 0x1FAD, 0x0190, 0x03C7, 0x0327, 0x00A3, 0x1F83,
+		0x1FB3, 0x1FA7, 0x017A, 0x03BC, 0x0337, 0x00B5, 0x1F84,
+		0x1FB8, 0x1FA1, 0x0165, 0x03B0, 0x0346, 0x00C7, 0x1F85,
+		0x1FBC, 0x1F9C, 0x0150, 0x03A4, 0x0354, 0x00D9, 0x1F87,
+		0x1FC0, 0x1F98, 0x013A, 0x0397, 0x0361, 0x00EC, 0x1F8A,
+		0x1FC4, 0x1F93, 0x0126, 0x0389, 0x036F, 0x00FE, 0x1F8D,
+		0x1F93, 0x010A, 0x0363, 0x0363, 0x010A, 0x1F93, 0x0000,
+		0x1F8D, 0x00FE, 0x036F, 0x0389, 0x0126, 0x1F93, 0x1FC4,
+		0x1F8A, 0x00EC, 0x0361, 0x0397, 0x013A, 0x1F98, 0x1FC0,
+		0x1F87, 0x00D9, 0x0354, 0x03A4, 0x0150, 0x1F9C, 0x1FBC,
+		0x1F85, 0x00C7, 0x0346, 0x03B0, 0x0165, 0x1FA1, 0x1FB8,
+		0x1F84, 0x00B5, 0x0337, 0x03BC, 0x017A, 0x1FA7, 0x1FB3,
+		0x1F83, 0x00A3, 0x0327, 0x03C7, 0x0190, 0x1FAD, 0x1FAF,
+		0x1F82, 0x0092, 0x0317, 0x03D0, 0x01A6, 0x1FB4, 0x1FAB,
+		0x1F82, 0x0081, 0x0306, 0x03D9, 0x01BC, 0x1FBB, 0x1FA7,
+		0x1F82, 0x0071, 0x02F4, 0x03E0, 0x01D2, 0x1FC3, 0x1FA4,
+		0x1F83, 0x0061, 0x02E1, 0x03E7, 0x01E8, 0x1FCC, 0x1FA0,
+		0x1F83, 0x0052, 0x02CF, 0x03ED, 0x01FE, 0x1FD5, 0x1F9C,
+		0x1F85, 0x0043, 0x02BB, 0x03F2, 0x0213, 0x1FDF, 0x1F99,
+		0x1F86, 0x0034, 0x02A7, 0x03F6, 0x022A, 0x1FEA, 0x1F95,
+		0x1F88, 0x0027, 0x0293, 0x03F8, 0x023F, 0x1FF5, 0x1F92,
+		0x1F8A, 0x0019, 0x027F, 0x03FA, 0x0255, 0x0000, 0x1F8F,
+	},
+	[HS_LT_11_16_SCALE] = {
+		/* Luma */
+		0x1F95, 0x1FB5, 0x0272, 0x0488, 0x0272, 0x1FB5, 0x1F95,
+		0x1F9B, 0x1FAA, 0x0257, 0x0486, 0x028D, 0x1FC1, 0x1F90,
+		0x1FA0, 0x1FA0, 0x023C, 0x0485, 0x02A8, 0x1FCD, 0x1F8A,
+		0x1FA6, 0x1F96, 0x0221, 0x0481, 0x02C2, 0x1FDB, 0x1F85,
+		0x1FAC, 0x1F8E, 0x0205, 0x047C, 0x02DC, 0x1FE9, 0x1F80,
+		0x1FB1, 0x1F86, 0x01E9, 0x0476, 0x02F6, 0x1FF8, 0x1F7C,
+		0x1FB7, 0x1F7F, 0x01CE, 0x046E, 0x030F, 0x0008, 0x1F77,
+		0x1FBD, 0x1F79, 0x01B3, 0x0465, 0x0326, 0x0019, 0x1F73,
+		0x1FC3, 0x1F73, 0x0197, 0x045B, 0x033E, 0x002A, 0x1F70,
+		0x1FC8, 0x1F6F, 0x017D, 0x044E, 0x0355, 0x003C, 0x1F6D,
+		0x1FCE, 0x1F6B, 0x0162, 0x0441, 0x036B, 0x004F, 0x1F6A,
+		0x1FD3, 0x1F68, 0x0148, 0x0433, 0x0380, 0x0063, 0x1F67,
+		0x1FD8, 0x1F65, 0x012E, 0x0424, 0x0395, 0x0077, 0x1F65,
+		0x1FDE, 0x1F63, 0x0115, 0x0413, 0x03A8, 0x008B, 0x1F64,
+		0x1FE3, 0x1F62, 0x00FC, 0x0403, 0x03BA, 0x00A0, 0x1F62,
+		0x1FE7, 0x1F62, 0x00E4, 0x03EF, 0x03CC, 0x00B6, 0x1F62,
+		0x1F63, 0x00CA, 0x03D3, 0x03D3, 0x00CA, 0x1F63, 0x0000,
+		0x1F62, 0x00B6, 0x03CC, 0x03EF, 0x00E4, 0x1F62, 0x1FE7,
+		0x1F62, 0x00A0, 0x03BA, 0x0403, 0x00FC, 0x1F62, 0x1FE3,
+		0x1F64, 0x008B, 0x03A8, 0x0413, 0x0115, 0x1F63, 0x1FDE,
+		0x1F65, 0x0077, 0x0395, 0x0424, 0x012E, 0x1F65, 0x1FD8,
+		0x1F67, 0x0063, 0x0380, 0x0433, 0x0148, 0x1F68, 0x1FD3,
+		0x1F6A, 0x004F, 0x036B, 0x0441, 0x0162, 0x1F6B, 0x1FCE,
+		0x1F6D, 0x003C, 0x0355, 0x044E, 0x017D, 0x1F6F, 0x1FC8,
+		0x1F70, 0x002A, 0x033E, 0x045B, 0x0197, 0x1F73, 0x1FC3,
+		0x1F73, 0x0019, 0x0326, 0x0465, 0x01B3, 0x1F79, 0x1FBD,
+		0x1F77, 0x0008, 0x030F, 0x046E, 0x01CE, 0x1F7F, 0x1FB7,
+		0x1F7C, 0x1FF8, 0x02F6, 0x0476, 0x01E9, 0x1F86, 0x1FB1,
+		0x1F80, 0x1FE9, 0x02DC, 0x047C, 0x0205, 0x1F8E, 0x1FAC,
+		0x1F85, 0x1FDB, 0x02C2, 0x0481, 0x0221, 0x1F96, 0x1FA6,
+		0x1F8A, 0x1FCD, 0x02A8, 0x0485, 0x023C, 0x1FA0, 0x1FA0,
+		0x1F90, 0x1FC1, 0x028D, 0x0486, 0x0257, 0x1FAA, 0x1F9B,
+		/* Chroma */
+		0x1F95, 0x1FB5, 0x0272, 0x0488, 0x0272, 0x1FB5, 0x1F95,
+		0x1F9B, 0x1FAA, 0x0257, 0x0486, 0x028D, 0x1FC1, 0x1F90,
+		0x1FA0, 0x1FA0, 0x023C, 0x0485, 0x02A8, 0x1FCD, 0x1F8A,
+		0x1FA6, 0x1F96, 0x0221, 0x0481, 0x02C2, 0x1FDB, 0x1F85,
+		0x1FAC, 0x1F8E, 0x0205, 0x047C, 0x02DC, 0x1FE9, 0x1F80,
+		0x1FB1, 0x1F86, 0x01E9, 0x0476, 0x02F6, 0x1FF8, 0x1F7C,
+		0x1FB7, 0x1F7F, 0x01CE, 0x046E, 0x030F, 0x0008, 0x1F77,
+		0x1FBD, 0x1F79, 0x01B3, 0x0465, 0x0326, 0x0019, 0x1F73,
+		0x1FC3, 0x1F73, 0x0197, 0x045B, 0x033E, 0x002A, 0x1F70,
+		0x1FC8, 0x1F6F, 0x017D, 0x044E, 0x0355, 0x003C, 0x1F6D,
+		0x1FCE, 0x1F6B, 0x0162, 0x0441, 0x036B, 0x004F, 0x1F6A,
+		0x1FD3, 0x1F68, 0x0148, 0x0433, 0x0380, 0x0063, 0x1F67,
+		0x1FD8, 0x1F65, 0x012E, 0x0424, 0x0395, 0x0077, 0x1F65,
+		0x1FDE, 0x1F63, 0x0115, 0x0413, 0x03A8, 0x008B, 0x1F64,
+		0x1FE3, 0x1F62, 0x00FC, 0x0403, 0x03BA, 0x00A0, 0x1F62,
+		0x1FE7, 0x1F62, 0x00E4, 0x03EF, 0x03CC, 0x00B6, 0x1F62,
+		0x1F63, 0x00CA, 0x03D3, 0x03D3, 0x00CA, 0x1F63, 0x0000,
+		0x1F62, 0x00B6, 0x03CC, 0x03EF, 0x00E4, 0x1F62, 0x1FE7,
+		0x1F62, 0x00A0, 0x03BA, 0x0403, 0x00FC, 0x1F62, 0x1FE3,
+		0x1F64, 0x008B, 0x03A8, 0x0413, 0x0115, 0x1F63, 0x1FDE,
+		0x1F65, 0x0077, 0x0395, 0x0424, 0x012E, 0x1F65, 0x1FD8,
+		0x1F67, 0x0063, 0x0380, 0x0433, 0x0148, 0x1F68, 0x1FD3,
+		0x1F6A, 0x004F, 0x036B, 0x0441, 0x0162, 0x1F6B, 0x1FCE,
+		0x1F6D, 0x003C, 0x0355, 0x044E, 0x017D, 0x1F6F, 0x1FC8,
+		0x1F70, 0x002A, 0x033E, 0x045B, 0x0197, 0x1F73, 0x1FC3,
+		0x1F73, 0x0019, 0x0326, 0x0465, 0x01B3, 0x1F79, 0x1FBD,
+		0x1F77, 0x0008, 0x030F, 0x046E, 0x01CE, 0x1F7F, 0x1FB7,
+		0x1F7C, 0x1FF8, 0x02F6, 0x0476, 0x01E9, 0x1F86, 0x1FB1,
+		0x1F80, 0x1FE9, 0x02DC, 0x047C, 0x0205, 0x1F8E, 0x1FAC,
+		0x1F85, 0x1FDB, 0x02C2, 0x0481, 0x0221, 0x1F96, 0x1FA6,
+		0x1F8A, 0x1FCD, 0x02A8, 0x0485, 0x023C, 0x1FA0, 0x1FA0,
+		0x1F90, 0x1FC1, 0x028D, 0x0486, 0x0257, 0x1FAA, 0x1F9B,
+	},
+	[HS_LT_12_16_SCALE] = {
+		/* Luma */
+		0x1FBB, 0x1F65, 0x025E, 0x0504, 0x025E, 0x1F65, 0x1FBB,
+		0x1FC3, 0x1F5D, 0x023C, 0x0503, 0x027F, 0x1F6E, 0x1FB4,
+		0x1FCA, 0x1F56, 0x021B, 0x0501, 0x02A0, 0x1F78, 0x1FAC,
+		0x1FD1, 0x1F50, 0x01FA, 0x04FD, 0x02C0, 0x1F83, 0x1FA5,
+		0x1FD8, 0x1F4B, 0x01D9, 0x04F6, 0x02E1, 0x1F90, 0x1F9D,
+		0x1FDF, 0x1F47, 0x01B8, 0x04EF, 0x0301, 0x1F9D, 0x1F95,
+		0x1FE6, 0x1F43, 0x0198, 0x04E5, 0x0321, 0x1FAB, 0x1F8E,
+		0x1FEC, 0x1F41, 0x0178, 0x04DA, 0x0340, 0x1FBB, 0x1F86,
+		0x1FF2, 0x1F40, 0x0159, 0x04CC, 0x035E, 0x1FCC, 0x1F7F,
+		0x1FF8, 0x1F40, 0x013A, 0x04BE, 0x037B, 0x1FDD, 0x1F78,
+		0x1FFE, 0x1F40, 0x011B, 0x04AD, 0x0398, 0x1FF0, 0x1F72,
+		0x0003, 0x1F41, 0x00FD, 0x049C, 0x03B4, 0x0004, 0x1F6B,
+		0x0008, 0x1F43, 0x00E0, 0x0489, 0x03CE, 0x0019, 0x1F65,
+		0x000D, 0x1F46, 0x00C4, 0x0474, 0x03E8, 0x002E, 0x1F5F,
+		0x0011, 0x1F49, 0x00A9, 0x045E, 0x0400, 0x0045, 0x1F5A,
+		0x0015, 0x1F4D, 0x008E, 0x0447, 0x0418, 0x005C, 0x1F55,
+		0x1F4F, 0x0076, 0x043B, 0x043B, 0x0076, 0x1F4F, 0x0000,
+		0x1F55, 0x005C, 0x0418, 0x0447, 0x008E, 0x1F4D, 0x0015,
+		0x1F5A, 0x0045, 0x0400, 0x045E, 0x00A9, 0x1F49, 0x0011,
+		0x1F5F, 0x002E, 0x03E8, 0x0474, 0x00C4, 0x1F46, 0x000D,
+		0x1F65, 0x0019, 0x03CE, 0x0489, 0x00E0, 0x1F43, 0x0008,
+		0x1F6B, 0x0004, 0x03B4, 0x049C, 0x00FD, 0x1F41, 0x0003,
+		0x1F72, 0x1FF0, 0x0398, 0x04AD, 0x011B, 0x1F40, 0x1FFE,
+		0x1F78, 0x1FDD, 0x037B, 0x04BE, 0x013A, 0x1F40, 0x1FF8,
+		0x1F7F, 0x1FCC, 0x035E, 0x04CC, 0x0159, 0x1F40, 0x1FF2,
+		0x1F86, 0x1FBB, 0x0340, 0x04DA, 0x0178, 0x1F41, 0x1FEC,
+		0x1F8E, 0x1FAB, 0x0321, 0x04E5, 0x0198, 0x1F43, 0x1FE6,
+		0x1F95, 0x1F9D, 0x0301, 0x04EF, 0x01B8, 0x1F47, 0x1FDF,
+		0x1F9D, 0x1F90, 0x02E1, 0x04F6, 0x01D9, 0x1F4B, 0x1FD8,
+		0x1FA5, 0x1F83, 0x02C0, 0x04FD, 0x01FA, 0x1F50, 0x1FD1,
+		0x1FAC, 0x1F78, 0x02A0, 0x0501, 0x021B, 0x1F56, 0x1FCA,
+		0x1FB4, 0x1F6E, 0x027F, 0x0503, 0x023C, 0x1F5D, 0x1FC3,
+		/* Chroma */
+		0x1FBB, 0x1F65, 0x025E, 0x0504, 0x025E, 0x1F65, 0x1FBB,
+		0x1FC3, 0x1F5D, 0x023C, 0x0503, 0x027F, 0x1F6E, 0x1FB4,
+		0x1FCA, 0x1F56, 0x021B, 0x0501, 0x02A0, 0x1F78, 0x1FAC,
+		0x1FD1, 0x1F50, 0x01FA, 0x04FD, 0x02C0, 0x1F83, 0x1FA5,
+		0x1FD8, 0x1F4B, 0x01D9, 0x04F6, 0x02E1, 0x1F90, 0x1F9D,
+		0x1FDF, 0x1F47, 0x01B8, 0x04EF, 0x0301, 0x1F9D, 0x1F95,
+		0x1FE6, 0x1F43, 0x0198, 0x04E5, 0x0321, 0x1FAB, 0x1F8E,
+		0x1FEC, 0x1F41, 0x0178, 0x04DA, 0x0340, 0x1FBB, 0x1F86,
+		0x1FF2, 0x1F40, 0x0159, 0x04CC, 0x035E, 0x1FCC, 0x1F7F,
+		0x1FF8, 0x1F40, 0x013A, 0x04BE, 0x037B, 0x1FDD, 0x1F78,
+		0x1FFE, 0x1F40, 0x011B, 0x04AD, 0x0398, 0x1FF0, 0x1F72,
+		0x0003, 0x1F41, 0x00FD, 0x049C, 0x03B4, 0x0004, 0x1F6B,
+		0x0008, 0x1F43, 0x00E0, 0x0489, 0x03CE, 0x0019, 0x1F65,
+		0x000D, 0x1F46, 0x00C4, 0x0474, 0x03E8, 0x002E, 0x1F5F,
+		0x0011, 0x1F49, 0x00A9, 0x045E, 0x0400, 0x0045, 0x1F5A,
+		0x0015, 0x1F4D, 0x008E, 0x0447, 0x0418, 0x005C, 0x1F55,
+		0x1F4F, 0x0076, 0x043B, 0x043B, 0x0076, 0x1F4F, 0x0000,
+		0x1F55, 0x005C, 0x0418, 0x0447, 0x008E, 0x1F4D, 0x0015,
+		0x1F5A, 0x0045, 0x0400, 0x045E, 0x00A9, 0x1F49, 0x0011,
+		0x1F5F, 0x002E, 0x03E8, 0x0474, 0x00C4, 0x1F46, 0x000D,
+		0x1F65, 0x0019, 0x03CE, 0x0489, 0x00E0, 0x1F43, 0x0008,
+		0x1F6B, 0x0004, 0x03B4, 0x049C, 0x00FD, 0x1F41, 0x0003,
+		0x1F72, 0x1FF0, 0x0398, 0x04AD, 0x011B, 0x1F40, 0x1FFE,
+		0x1F78, 0x1FDD, 0x037B, 0x04BE, 0x013A, 0x1F40, 0x1FF8,
+		0x1F7F, 0x1FCC, 0x035E, 0x04CC, 0x0159, 0x1F40, 0x1FF2,
+		0x1F86, 0x1FBB, 0x0340, 0x04DA, 0x0178, 0x1F41, 0x1FEC,
+		0x1F8E, 0x1FAB, 0x0321, 0x04E5, 0x0198, 0x1F43, 0x1FE6,
+		0x1F95, 0x1F9D, 0x0301, 0x04EF, 0x01B8, 0x1F47, 0x1FDF,
+		0x1F9D, 0x1F90, 0x02E1, 0x04F6, 0x01D9, 0x1F4B, 0x1FD8,
+		0x1FA5, 0x1F83, 0x02C0, 0x04FD, 0x01FA, 0x1F50, 0x1FD1,
+		0x1FAC, 0x1F78, 0x02A0, 0x0501, 0x021B, 0x1F56, 0x1FCA,
+		0x1FB4, 0x1F6E, 0x027F, 0x0503, 0x023C, 0x1F5D, 0x1FC3,
+	},
+	[HS_LT_13_16_SCALE] = {
+		/* Luma */
+		0x1FF4, 0x1F29, 0x022D, 0x056C, 0x022D, 0x1F29, 0x1FF4,
+		0x1FFC, 0x1F26, 0x0206, 0x056A, 0x0254, 0x1F2E, 0x1FEC,
+		0x0003, 0x1F24, 0x01E0, 0x0567, 0x027A, 0x1F34, 0x1FE4,
+		0x000A, 0x1F23, 0x01BA, 0x0561, 0x02A2, 0x1F3B, 0x1FDB,
+		0x0011, 0x1F22, 0x0194, 0x055B, 0x02C9, 0x1F43, 0x1FD2,
+		0x0017, 0x1F23, 0x016F, 0x0551, 0x02F0, 0x1F4D, 0x1FC9,
+		0x001D, 0x1F25, 0x014B, 0x0545, 0x0316, 0x1F58, 0x1FC0,
+		0x0022, 0x1F28, 0x0127, 0x0538, 0x033C, 0x1F65, 0x1FB6,
+		0x0027, 0x1F2C, 0x0104, 0x0528, 0x0361, 0x1F73, 0x1FAD,
+		0x002B, 0x1F30, 0x00E2, 0x0518, 0x0386, 0x1F82, 0x1FA3,
+		0x002F, 0x1F36, 0x00C2, 0x0504, 0x03AA, 0x1F92, 0x1F99,
+		0x0032, 0x1F3C, 0x00A2, 0x04EF, 0x03CD, 0x1FA4, 0x1F90,
+		0x0035, 0x1F42, 0x0083, 0x04D9, 0x03EF, 0x1FB8, 0x1F86,
+		0x0038, 0x1F49, 0x0065, 0x04C0, 0x0410, 0x1FCD, 0x1F7D,
+		0x003A, 0x1F51, 0x0048, 0x04A6, 0x0431, 0x1FE3, 0x1F73,
+		0x003C, 0x1F59, 0x002D, 0x048A, 0x0450, 0x1FFA, 0x1F6A,
+		0x1F5D, 0x0014, 0x048F, 0x048F, 0x0014, 0x1F5D, 0x0000,
+		0x1F6A, 0x1FFA, 0x0450, 0x048A, 0x002D, 0x1F59, 0x003C,
+		0x1F73, 0x1FE3, 0x0431, 0x04A6, 0x0048, 0x1F51, 0x003A,
+		0x1F7D, 0x1FCD, 0x0410, 0x04C0, 0x0065, 0x1F49, 0x0038,
+		0x1F86, 0x1FB8, 0x03EF, 0x04D9, 0x0083, 0x1F42, 0x0035,
+		0x1F90, 0x1FA4, 0x03CD, 0x04EF, 0x00A2, 0x1F3C, 0x0032,
+		0x1F99, 0x1F92, 0x03AA, 0x0504, 0x00C2, 0x1F36, 0x002F,
+		0x1FA3, 0x1F82, 0x0386, 0x0518, 0x00E2, 0x1F30, 0x002B,
+		0x1FAD, 0x1F73, 0x0361, 0x0528, 0x0104, 0x1F2C, 0x0027,
+		0x1FB6, 0x1F65, 0x033C, 0x0538, 0x0127, 0x1F28, 0x0022,
+		0x1FC0, 0x1F58, 0x0316, 0x0545, 0x014B, 0x1F25, 0x001D,
+		0x1FC9, 0x1F4D, 0x02F0, 0x0551, 0x016F, 0x1F23, 0x0017,
+		0x1FD2, 0x1F43, 0x02C9, 0x055B, 0x0194, 0x1F22, 0x0011,
+		0x1FDB, 0x1F3B, 0x02A2, 0x0561, 0x01BA, 0x1F23, 0x000A,
+		0x1FE4, 0x1F34, 0x027A, 0x0567, 0x01E0, 0x1F24, 0x0003,
+		0x1FEC, 0x1F2E, 0x0254, 0x056A, 0x0206, 0x1F26, 0x1FFC,
+		/* Chroma */
+		0x1FF4, 0x1F29, 0x022D, 0x056C, 0x022D, 0x1F29, 0x1FF4,
+		0x1FFC, 0x1F26, 0x0206, 0x056A, 0x0254, 0x1F2E, 0x1FEC,
+		0x0003, 0x1F24, 0x01E0, 0x0567, 0x027A, 0x1F34, 0x1FE4,
+		0x000A, 0x1F23, 0x01BA, 0x0561, 0x02A2, 0x1F3B, 0x1FDB,
+		0x0011, 0x1F22, 0x0194, 0x055B, 0x02C9, 0x1F43, 0x1FD2,
+		0x0017, 0x1F23, 0x016F, 0x0551, 0x02F0, 0x1F4D, 0x1FC9,
+		0x001D, 0x1F25, 0x014B, 0x0545, 0x0316, 0x1F58, 0x1FC0,
+		0x0022, 0x1F28, 0x0127, 0x0538, 0x033C, 0x1F65, 0x1FB6,
+		0x0027, 0x1F2C, 0x0104, 0x0528, 0x0361, 0x1F73, 0x1FAD,
+		0x002B, 0x1F30, 0x00E2, 0x0518, 0x0386, 0x1F82, 0x1FA3,
+		0x002F, 0x1F36, 0x00C2, 0x0504, 0x03AA, 0x1F92, 0x1F99,
+		0x0032, 0x1F3C, 0x00A2, 0x04EF, 0x03CD, 0x1FA4, 0x1F90,
+		0x0035, 0x1F42, 0x0083, 0x04D9, 0x03EF, 0x1FB8, 0x1F86,
+		0x0038, 0x1F49, 0x0065, 0x04C0, 0x0410, 0x1FCD, 0x1F7D,
+		0x003A, 0x1F51, 0x0048, 0x04A6, 0x0431, 0x1FE3, 0x1F73,
+		0x003C, 0x1F59, 0x002D, 0x048A, 0x0450, 0x1FFA, 0x1F6A,
+		0x1F5D, 0x0014, 0x048F, 0x048F, 0x0014, 0x1F5D, 0x0000,
+		0x1F6A, 0x1FFA, 0x0450, 0x048A, 0x002D, 0x1F59, 0x003C,
+		0x1F73, 0x1FE3, 0x0431, 0x04A6, 0x0048, 0x1F51, 0x003A,
+		0x1F7D, 0x1FCD, 0x0410, 0x04C0, 0x0065, 0x1F49, 0x0038,
+		0x1F86, 0x1FB8, 0x03EF, 0x04D9, 0x0083, 0x1F42, 0x0035,
+		0x1F90, 0x1FA4, 0x03CD, 0x04EF, 0x00A2, 0x1F3C, 0x0032,
+		0x1F99, 0x1F92, 0x03AA, 0x0504, 0x00C2, 0x1F36, 0x002F,
+		0x1FA3, 0x1F82, 0x0386, 0x0518, 0x00E2, 0x1F30, 0x002B,
+		0x1FAD, 0x1F73, 0x0361, 0x0528, 0x0104, 0x1F2C, 0x0027,
+		0x1FB6, 0x1F65, 0x033C, 0x0538, 0x0127, 0x1F28, 0x0022,
+		0x1FC0, 0x1F58, 0x0316, 0x0545, 0x014B, 0x1F25, 0x001D,
+		0x1FC9, 0x1F4D, 0x02F0, 0x0551, 0x016F, 0x1F23, 0x0017,
+		0x1FD2, 0x1F43, 0x02C9, 0x055B, 0x0194, 0x1F22, 0x0011,
+		0x1FDB, 0x1F3B, 0x02A2, 0x0561, 0x01BA, 0x1F23, 0x000A,
+		0x1FE4, 0x1F34, 0x027A, 0x0567, 0x01E0, 0x1F24, 0x0003,
+		0x1FEC, 0x1F2E, 0x0254, 0x056A, 0x0206, 0x1F26, 0x1FFC,
+	},
+	[HS_LT_14_16_SCALE] = {
+		/* Luma */
+		0x002F, 0x1F0B, 0x01E7, 0x05BE, 0x01E7, 0x1F0B, 0x002F,
+		0x0035, 0x1F0D, 0x01BC, 0x05BD, 0x0213, 0x1F0A, 0x0028,
+		0x003A, 0x1F11, 0x0191, 0x05BA, 0x023F, 0x1F0A, 0x0021,
+		0x003F, 0x1F15, 0x0167, 0x05B3, 0x026C, 0x1F0C, 0x001A,
+		0x0043, 0x1F1B, 0x013E, 0x05AA, 0x0299, 0x1F0F, 0x0012,
+		0x0046, 0x1F21, 0x0116, 0x05A1, 0x02C6, 0x1F13, 0x0009,
+		0x0049, 0x1F28, 0x00EF, 0x0593, 0x02F4, 0x1F19, 0x0000,
+		0x004C, 0x1F30, 0x00C9, 0x0584, 0x0321, 0x1F20, 0x1FF6,
+		0x004E, 0x1F39, 0x00A4, 0x0572, 0x034D, 0x1F2A, 0x1FEC,
+		0x004F, 0x1F43, 0x0080, 0x055E, 0x037A, 0x1F34, 0x1FE2,
+		0x0050, 0x1F4D, 0x005E, 0x0548, 0x03A5, 0x1F41, 0x1FD7,
+		0x0050, 0x1F57, 0x003D, 0x0531, 0x03D1, 0x1F4F, 0x1FCB,
+		0x0050, 0x1F62, 0x001E, 0x0516, 0x03FB, 0x1F5F, 0x1FC0,
+		0x004F, 0x1F6D, 0x0000, 0x04FA, 0x0425, 0x1F71, 0x1FB4,
+		0x004E, 0x1F79, 0x1FE4, 0x04DC, 0x044D, 0x1F84, 0x1FA8,
+		0x004D, 0x1F84, 0x1FCA, 0x04BC, 0x0474, 0x1F99, 0x1F9C,
+		0x1F8C, 0x1FAE, 0x04C6, 0x04C6, 0x1FAE, 0x1F8C, 0x0000,
+		0x1F9C, 0x1F99, 0x0474, 0x04BC, 0x1FCA, 0x1F84, 0x004D,
+		0x1FA8, 0x1F84, 0x044D, 0x04DC, 0x1FE4, 0x1F79, 0x004E,
+		0x1FB4, 0x1F71, 0x0425, 0x04FA, 0x0000, 0x1F6D, 0x004F,
+		0x1FC0, 0x1F5F, 0x03FB, 0x0516, 0x001E, 0x1F62, 0x0050,
+		0x1FCB, 0x1F4F, 0x03D1, 0x0531, 0x003D, 0x1F57, 0x0050,
+		0x1FD7, 0x1F41, 0x03A5, 0x0548, 0x005E, 0x1F4D, 0x0050,
+		0x1FE2, 0x1F34, 0x037A, 0x055E, 0x0080, 0x1F43, 0x004F,
+		0x1FEC, 0x1F2A, 0x034D, 0x0572, 0x00A4, 0x1F39, 0x004E,
+		0x1FF6, 0x1F20, 0x0321, 0x0584, 0x00C9, 0x1F30, 0x004C,
+		0x0000, 0x1F19, 0x02F4, 0x0593, 0x00EF, 0x1F28, 0x0049,
+		0x0009, 0x1F13, 0x02C6, 0x05A1, 0x0116, 0x1F21, 0x0046,
+		0x0012, 0x1F0F, 0x0299, 0x05AA, 0x013E, 0x1F1B, 0x0043,
+		0x001A, 0x1F0C, 0x026C, 0x05B3, 0x0167, 0x1F15, 0x003F,
+		0x0021, 0x1F0A, 0x023F, 0x05BA, 0x0191, 0x1F11, 0x003A,
+		0x0028, 0x1F0A, 0x0213, 0x05BD, 0x01BC, 0x1F0D, 0x0035,
+		/* Chroma */
+		0x002F, 0x1F0B, 0x01E7, 0x05BE, 0x01E7, 0x1F0B, 0x002F,
+		0x0035, 0x1F0D, 0x01BC, 0x05BD, 0x0213, 0x1F0A, 0x0028,
+		0x003A, 0x1F11, 0x0191, 0x05BA, 0x023F, 0x1F0A, 0x0021,
+		0x003F, 0x1F15, 0x0167, 0x05B3, 0x026C, 0x1F0C, 0x001A,
+		0x0043, 0x1F1B, 0x013E, 0x05AA, 0x0299, 0x1F0F, 0x0012,
+		0x0046, 0x1F21, 0x0116, 0x05A1, 0x02C6, 0x1F13, 0x0009,
+		0x0049, 0x1F28, 0x00EF, 0x0593, 0x02F4, 0x1F19, 0x0000,
+		0x004C, 0x1F30, 0x00C9, 0x0584, 0x0321, 0x1F20, 0x1FF6,
+		0x004E, 0x1F39, 0x00A4, 0x0572, 0x034D, 0x1F2A, 0x1FEC,
+		0x004F, 0x1F43, 0x0080, 0x055E, 0x037A, 0x1F34, 0x1FE2,
+		0x0050, 0x1F4D, 0x005E, 0x0548, 0x03A5, 0x1F41, 0x1FD7,
+		0x0050, 0x1F57, 0x003D, 0x0531, 0x03D1, 0x1F4F, 0x1FCB,
+		0x0050, 0x1F62, 0x001E, 0x0516, 0x03FB, 0x1F5F, 0x1FC0,
+		0x004F, 0x1F6D, 0x0000, 0x04FA, 0x0425, 0x1F71, 0x1FB4,
+		0x004E, 0x1F79, 0x1FE4, 0x04DC, 0x044D, 0x1F84, 0x1FA8,
+		0x004D, 0x1F84, 0x1FCA, 0x04BC, 0x0474, 0x1F99, 0x1F9C,
+		0x1F8C, 0x1FAE, 0x04C6, 0x04C6, 0x1FAE, 0x1F8C, 0x0000,
+		0x1F9C, 0x1F99, 0x0474, 0x04BC, 0x1FCA, 0x1F84, 0x004D,
+		0x1FA8, 0x1F84, 0x044D, 0x04DC, 0x1FE4, 0x1F79, 0x004E,
+		0x1FB4, 0x1F71, 0x0425, 0x04FA, 0x0000, 0x1F6D, 0x004F,
+		0x1FC0, 0x1F5F, 0x03FB, 0x0516, 0x001E, 0x1F62, 0x0050,
+		0x1FCB, 0x1F4F, 0x03D1, 0x0531, 0x003D, 0x1F57, 0x0050,
+		0x1FD7, 0x1F41, 0x03A5, 0x0548, 0x005E, 0x1F4D, 0x0050,
+		0x1FE2, 0x1F34, 0x037A, 0x055E, 0x0080, 0x1F43, 0x004F,
+		0x1FEC, 0x1F2A, 0x034D, 0x0572, 0x00A4, 0x1F39, 0x004E,
+		0x1FF6, 0x1F20, 0x0321, 0x0584, 0x00C9, 0x1F30, 0x004C,
+		0x0000, 0x1F19, 0x02F4, 0x0593, 0x00EF, 0x1F28, 0x0049,
+		0x0009, 0x1F13, 0x02C6, 0x05A1, 0x0116, 0x1F21, 0x0046,
+		0x0012, 0x1F0F, 0x0299, 0x05AA, 0x013E, 0x1F1B, 0x0043,
+		0x001A, 0x1F0C, 0x026C, 0x05B3, 0x0167, 0x1F15, 0x003F,
+		0x0021, 0x1F0A, 0x023F, 0x05BA, 0x0191, 0x1F11, 0x003A,
+		0x0028, 0x1F0A, 0x0213, 0x05BD, 0x01BC, 0x1F0D, 0x0035,
+	},
+	[HS_LT_15_16_SCALE] = {
+		/* Luma */
+		0x005B, 0x1F0A, 0x0195, 0x060C, 0x0195, 0x1F0A, 0x005B,
+		0x005D, 0x1F13, 0x0166, 0x0609, 0x01C6, 0x1F03, 0x0058,
+		0x005F, 0x1F1C, 0x0138, 0x0605, 0x01F7, 0x1EFD, 0x0054,
+		0x0060, 0x1F26, 0x010B, 0x05FF, 0x0229, 0x1EF8, 0x004F,
+		0x0060, 0x1F31, 0x00DF, 0x05F5, 0x025C, 0x1EF5, 0x004A,
+		0x0060, 0x1F3D, 0x00B5, 0x05E8, 0x028F, 0x1EF3, 0x0044,
+		0x005F, 0x1F49, 0x008C, 0x05DA, 0x02C3, 0x1EF2, 0x003D,
+		0x005E, 0x1F56, 0x0065, 0x05C7, 0x02F6, 0x1EF4, 0x0036,
+		0x005C, 0x1F63, 0x003F, 0x05B3, 0x032B, 0x1EF7, 0x002D,
+		0x0059, 0x1F71, 0x001B, 0x059D, 0x035F, 0x1EFB, 0x0024,
+		0x0057, 0x1F7F, 0x1FF9, 0x0583, 0x0392, 0x1F02, 0x001A,
+		0x0053, 0x1F8D, 0x1FD9, 0x0567, 0x03C5, 0x1F0B, 0x0010,
+		0x0050, 0x1F9B, 0x1FBB, 0x0548, 0x03F8, 0x1F15, 0x0005,
+		0x004C, 0x1FA9, 0x1F9E, 0x0528, 0x042A, 0x1F22, 0x1FF9,
+		0x0048, 0x1FB7, 0x1F84, 0x0505, 0x045A, 0x1F31, 0x1FED,
+		0x0043, 0x1FC5, 0x1F6C, 0x04E0, 0x048A, 0x1F42, 0x1FE0,
+		0x1FD1, 0x1F50, 0x04DF, 0x04DF, 0x1F50, 0x1FD1, 0x0000,
+		0x1FE0, 0x1F42, 0x048A, 0x04E0, 0x1F6C, 0x1FC5, 0x0043,
+		0x1FED, 0x1F31, 0x045A, 0x0505, 0x1F84, 0x1FB7, 0x0048,
+		0x1FF9, 0x1F22, 0x042A, 0x0528, 0x1F9E, 0x1FA9, 0x004C,
+		0x0005, 0x1F15, 0x03F8, 0x0548, 0x1FBB, 0x1F9B, 0x0050,
+		0x0010, 0x1F0B, 0x03C5, 0x0567, 0x1FD9, 0x1F8D, 0x0053,
+		0x001A, 0x1F02, 0x0392, 0x0583, 0x1FF9, 0x1F7F, 0x0057,
+		0x0024, 0x1EFB, 0x035F, 0x059D, 0x001B, 0x1F71, 0x0059,
+		0x002D, 0x1EF7, 0x032B, 0x05B3, 0x003F, 0x1F63, 0x005C,
+		0x0036, 0x1EF4, 0x02F6, 0x05C7, 0x0065, 0x1F56, 0x005E,
+		0x003D, 0x1EF2, 0x02C3, 0x05DA, 0x008C, 0x1F49, 0x005F,
+		0x0044, 0x1EF3, 0x028F, 0x05E8, 0x00B5, 0x1F3D, 0x0060,
+		0x004A, 0x1EF5, 0x025C, 0x05F5, 0x00DF, 0x1F31, 0x0060,
+		0x004F, 0x1EF8, 0x0229, 0x05FF, 0x010B, 0x1F26, 0x0060,
+		0x0054, 0x1EFD, 0x01F7, 0x0605, 0x0138, 0x1F1C, 0x005F,
+		0x0058, 0x1F03, 0x01C6, 0x0609, 0x0166, 0x1F13, 0x005D,
+		/* Chroma */
+		0x005B, 0x1F0A, 0x0195, 0x060C, 0x0195, 0x1F0A, 0x005B,
+		0x005D, 0x1F13, 0x0166, 0x0609, 0x01C6, 0x1F03, 0x0058,
+		0x005F, 0x1F1C, 0x0138, 0x0605, 0x01F7, 0x1EFD, 0x0054,
+		0x0060, 0x1F26, 0x010B, 0x05FF, 0x0229, 0x1EF8, 0x004F,
+		0x0060, 0x1F31, 0x00DF, 0x05F5, 0x025C, 0x1EF5, 0x004A,
+		0x0060, 0x1F3D, 0x00B5, 0x05E8, 0x028F, 0x1EF3, 0x0044,
+		0x005F, 0x1F49, 0x008C, 0x05DA, 0x02C3, 0x1EF2, 0x003D,
+		0x005E, 0x1F56, 0x0065, 0x05C7, 0x02F6, 0x1EF4, 0x0036,
+		0x005C, 0x1F63, 0x003F, 0x05B3, 0x032B, 0x1EF7, 0x002D,
+		0x0059, 0x1F71, 0x001B, 0x059D, 0x035F, 0x1EFB, 0x0024,
+		0x0057, 0x1F7F, 0x1FF9, 0x0583, 0x0392, 0x1F02, 0x001A,
+		0x0053, 0x1F8D, 0x1FD9, 0x0567, 0x03C5, 0x1F0B, 0x0010,
+		0x0050, 0x1F9B, 0x1FBB, 0x0548, 0x03F8, 0x1F15, 0x0005,
+		0x004C, 0x1FA9, 0x1F9E, 0x0528, 0x042A, 0x1F22, 0x1FF9,
+		0x0048, 0x1FB7, 0x1F84, 0x0505, 0x045A, 0x1F31, 0x1FED,
+		0x0043, 0x1FC5, 0x1F6C, 0x04E0, 0x048A, 0x1F42, 0x1FE0,
+		0x1FD1, 0x1F50, 0x04DF, 0x04DF, 0x1F50, 0x1FD1, 0x0000,
+		0x1FE0, 0x1F42, 0x048A, 0x04E0, 0x1F6C, 0x1FC5, 0x0043,
+		0x1FED, 0x1F31, 0x045A, 0x0505, 0x1F84, 0x1FB7, 0x0048,
+		0x1FF9, 0x1F22, 0x042A, 0x0528, 0x1F9E, 0x1FA9, 0x004C,
+		0x0005, 0x1F15, 0x03F8, 0x0548, 0x1FBB, 0x1F9B, 0x0050,
+		0x0010, 0x1F0B, 0x03C5, 0x0567, 0x1FD9, 0x1F8D, 0x0053,
+		0x001A, 0x1F02, 0x0392, 0x0583, 0x1FF9, 0x1F7F, 0x0057,
+		0x0024, 0x1EFB, 0x035F, 0x059D, 0x001B, 0x1F71, 0x0059,
+		0x002D, 0x1EF7, 0x032B, 0x05B3, 0x003F, 0x1F63, 0x005C,
+		0x0036, 0x1EF4, 0x02F6, 0x05C7, 0x0065, 0x1F56, 0x005E,
+		0x003D, 0x1EF2, 0x02C3, 0x05DA, 0x008C, 0x1F49, 0x005F,
+		0x0044, 0x1EF3, 0x028F, 0x05E8, 0x00B5, 0x1F3D, 0x0060,
+		0x004A, 0x1EF5, 0x025C, 0x05F5, 0x00DF, 0x1F31, 0x0060,
+		0x004F, 0x1EF8, 0x0229, 0x05FF, 0x010B, 0x1F26, 0x0060,
+		0x0054, 0x1EFD, 0x01F7, 0x0605, 0x0138, 0x1F1C, 0x005F,
+		0x0058, 0x1F03, 0x01C6, 0x0609, 0x0166, 0x1F13, 0x005D,
+	},
+	[HS_LE_16_16_SCALE] = {
+		/* Luma */
+		0x006E, 0x1F24, 0x013E, 0x0660, 0x013E, 0x1F24, 0x006E,
+		0x006C, 0x1F33, 0x010B, 0x065D, 0x0172, 0x1F17, 0x0070,
+		0x0069, 0x1F41, 0x00DA, 0x0659, 0x01A8, 0x1F0B, 0x0070,
+		0x0066, 0x1F51, 0x00AA, 0x0650, 0x01DF, 0x1F00, 0x0070,
+		0x0062, 0x1F61, 0x007D, 0x0644, 0x0217, 0x1EF6, 0x006F,
+		0x005E, 0x1F71, 0x0051, 0x0636, 0x0250, 0x1EED, 0x006D,
+		0x0059, 0x1F81, 0x0028, 0x0624, 0x028A, 0x1EE5, 0x006B,
+		0x0054, 0x1F91, 0x0000, 0x060F, 0x02C5, 0x1EE0, 0x0067,
+		0x004E, 0x1FA2, 0x1FDB, 0x05F6, 0x0300, 0x1EDC, 0x0063,
+		0x0049, 0x1FB2, 0x1FB8, 0x05DB, 0x033B, 0x1EDA, 0x005D,
+		0x0043, 0x1FC3, 0x1F98, 0x05BC, 0x0376, 0x1ED9, 0x0057,
+		0x003D, 0x1FD3, 0x1F7A, 0x059B, 0x03B1, 0x1EDB, 0x004F,
+		0x0036, 0x1FE2, 0x1F5E, 0x0578, 0x03EC, 0x1EDF, 0x0047,
+		0x0030, 0x1FF1, 0x1F45, 0x0551, 0x0426, 0x1EE6, 0x003D,
+		0x002A, 0x0000, 0x1F2E, 0x0528, 0x045F, 0x1EEE, 0x0033,
+		0x0023, 0x000E, 0x1F19, 0x04FD, 0x0498, 0x1EFA, 0x0027,
+		0x001B, 0x1F04, 0x04E1, 0x04E1, 0x1F04, 0x001B, 0x0000,
+		0x0027, 0x1EFA, 0x0498, 0x04FD, 0x1F19, 0x000E, 0x0023,
+		0x0033, 0x1EEE, 0x045F, 0x0528, 0x1F2E, 0x0000, 0x002A,
+		0x003D, 0x1EE6, 0x0426, 0x0551, 0x1F45, 0x1FF1, 0x0030,
+		0x0047, 0x1EDF, 0x03EC, 0x0578, 0x1F5E, 0x1FE2, 0x0036,
+		0x004F, 0x1EDB, 0x03B1, 0x059B, 0x1F7A, 0x1FD3, 0x003D,
+		0x0057, 0x1ED9, 0x0376, 0x05BC, 0x1F98, 0x1FC3, 0x0043,
+		0x005D, 0x1EDA, 0x033B, 0x05DB, 0x1FB8, 0x1FB2, 0x0049,
+		0x0063, 0x1EDC, 0x0300, 0x05F6, 0x1FDB, 0x1FA2, 0x004E,
+		0x0067, 0x1EE0, 0x02C5, 0x060F, 0x0000, 0x1F91, 0x0054,
+		0x006B, 0x1EE5, 0x028A, 0x0624, 0x0028, 0x1F81, 0x0059,
+		0x006D, 0x1EED, 0x0250, 0x0636, 0x0051, 0x1F71, 0x005E,
+		0x006F, 0x1EF6, 0x0217, 0x0644, 0x007D, 0x1F61, 0x0062,
+		0x0070, 0x1F00, 0x01DF, 0x0650, 0x00AA, 0x1F51, 0x0066,
+		0x0070, 0x1F0B, 0x01A8, 0x0659, 0x00DA, 0x1F41, 0x0069,
+		0x0070, 0x1F17, 0x0172, 0x065D, 0x010B, 0x1F33, 0x006C,
+		/* Chroma */
+		0x006E, 0x1F24, 0x013E, 0x0660, 0x013E, 0x1F24, 0x006E,
+		0x006C, 0x1F33, 0x010B, 0x065D, 0x0172, 0x1F17, 0x0070,
+		0x0069, 0x1F41, 0x00DA, 0x0659, 0x01A8, 0x1F0B, 0x0070,
+		0x0066, 0x1F51, 0x00AA, 0x0650, 0x01DF, 0x1F00, 0x0070,
+		0x0062, 0x1F61, 0x007D, 0x0644, 0x0217, 0x1EF6, 0x006F,
+		0x005E, 0x1F71, 0x0051, 0x0636, 0x0250, 0x1EED, 0x006D,
+		0x0059, 0x1F81, 0x0028, 0x0624, 0x028A, 0x1EE5, 0x006B,
+		0x0054, 0x1F91, 0x0000, 0x060F, 0x02C5, 0x1EE0, 0x0067,
+		0x004E, 0x1FA2, 0x1FDB, 0x05F6, 0x0300, 0x1EDC, 0x0063,
+		0x0049, 0x1FB2, 0x1FB8, 0x05DB, 0x033B, 0x1EDA, 0x005D,
+		0x0043, 0x1FC3, 0x1F98, 0x05BC, 0x0376, 0x1ED9, 0x0057,
+		0x003D, 0x1FD3, 0x1F7A, 0x059B, 0x03B1, 0x1EDB, 0x004F,
+		0x0036, 0x1FE2, 0x1F5E, 0x0578, 0x03EC, 0x1EDF, 0x0047,
+		0x0030, 0x1FF1, 0x1F45, 0x0551, 0x0426, 0x1EE6, 0x003D,
+		0x002A, 0x0000, 0x1F2E, 0x0528, 0x045F, 0x1EEE, 0x0033,
+		0x0023, 0x000E, 0x1F19, 0x04FD, 0x0498, 0x1EFA, 0x0027,
+		0x001B, 0x1F04, 0x04E1, 0x04E1, 0x1F04, 0x001B, 0x0000,
+		0x0027, 0x1EFA, 0x0498, 0x04FD, 0x1F19, 0x000E, 0x0023,
+		0x0033, 0x1EEE, 0x045F, 0x0528, 0x1F2E, 0x0000, 0x002A,
+		0x003D, 0x1EE6, 0x0426, 0x0551, 0x1F45, 0x1FF1, 0x0030,
+		0x0047, 0x1EDF, 0x03EC, 0x0578, 0x1F5E, 0x1FE2, 0x0036,
+		0x004F, 0x1EDB, 0x03B1, 0x059B, 0x1F7A, 0x1FD3, 0x003D,
+		0x0057, 0x1ED9, 0x0376, 0x05BC, 0x1F98, 0x1FC3, 0x0043,
+		0x005D, 0x1EDA, 0x033B, 0x05DB, 0x1FB8, 0x1FB2, 0x0049,
+		0x0063, 0x1EDC, 0x0300, 0x05F6, 0x1FDB, 0x1FA2, 0x004E,
+		0x0067, 0x1EE0, 0x02C5, 0x060F, 0x0000, 0x1F91, 0x0054,
+		0x006B, 0x1EE5, 0x028A, 0x0624, 0x0028, 0x1F81, 0x0059,
+		0x006D, 0x1EED, 0x0250, 0x0636, 0x0051, 0x1F71, 0x005E,
+		0x006F, 0x1EF6, 0x0217, 0x0644, 0x007D, 0x1F61, 0x0062,
+		0x0070, 0x1F00, 0x01DF, 0x0650, 0x00AA, 0x1F51, 0x0066,
+		0x0070, 0x1F0B, 0x01A8, 0x0659, 0x00DA, 0x1F41, 0x0069,
+		0x0070, 0x1F17, 0x0172, 0x065D, 0x010B, 0x1F33, 0x006C,
+	},
+};
+
+/* vertical scaler coefficients */
+enum {
+	VS_UP_SCALE = 0,
+	VS_LT_9_16_SCALE,
+	VS_LT_10_16_SCALE,
+	VS_LT_11_16_SCALE,
+	VS_LT_12_16_SCALE,
+	VS_LT_13_16_SCALE,
+	VS_LT_14_16_SCALE,
+	VS_LT_15_16_SCALE,
+	VS_LT_16_16_SCALE,
+	VS_1_TO_1_SCALE,
+};
+
+static const u16 scaler_vs_coeffs[15][SC_NUM_PHASES * 2 * SC_V_NUM_TAPS] = {
+	[VS_UP_SCALE] = {
+		/* Luma */
+		0x1FD1, 0x00B1, 0x06FC, 0x00B1, 0x1FD1,
+		0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+		0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+		0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+		0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+		0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+		0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+		0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+		0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+		0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+		0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+		0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+		0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+		0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+		0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+		0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+		0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+		0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+		0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+		0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+		0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+		0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+		0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+		0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+		0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+		0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+		0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+		0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+		0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+		0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+		0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+		0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+		/* Chroma */
+		0x1FD1, 0x00B1, 0x06FC, 0x00B1, 0x1FD1,
+		0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+		0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+		0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+		0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+		0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+		0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+		0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+		0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+		0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+		0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+		0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+		0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+		0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+		0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+		0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+		0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+		0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+		0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+		0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+		0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+		0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+		0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+		0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+		0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+		0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+		0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+		0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+		0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+		0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+		0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+		0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+	},
+	[VS_LT_9_16_SCALE] = {
+		/* Luma */
+		0x001C, 0x01F6, 0x03DC, 0x01F6, 0x001C,
+		0x0018, 0x01DF, 0x03DB, 0x020C, 0x0022,
+		0x0013, 0x01C9, 0x03D9, 0x0223, 0x0028,
+		0x000F, 0x01B3, 0x03D6, 0x023A, 0x002E,
+		0x000C, 0x019D, 0x03D2, 0x0250, 0x0035,
+		0x0009, 0x0188, 0x03CC, 0x0266, 0x003D,
+		0x0006, 0x0173, 0x03C5, 0x027D, 0x0045,
+		0x0004, 0x015E, 0x03BD, 0x0293, 0x004E,
+		0x0002, 0x014A, 0x03B4, 0x02A8, 0x0058,
+		0x0000, 0x0136, 0x03AA, 0x02BE, 0x0062,
+		0x1FFF, 0x0123, 0x039E, 0x02D3, 0x006D,
+		0x1FFE, 0x0110, 0x0392, 0x02E8, 0x0078,
+		0x1FFD, 0x00FE, 0x0384, 0x02FC, 0x0085,
+		0x1FFD, 0x00ED, 0x0376, 0x030F, 0x0091,
+		0x1FFC, 0x00DC, 0x0367, 0x0322, 0x009F,
+		0x1FFC, 0x00CC, 0x0357, 0x0334, 0x00AD,
+		0x00BC, 0x0344, 0x0344, 0x00BC, 0x0000,
+		0x00AD, 0x0334, 0x0357, 0x00CC, 0x1FFC,
+		0x009F, 0x0322, 0x0367, 0x00DC, 0x1FFC,
+		0x0091, 0x030F, 0x0376, 0x00ED, 0x1FFD,
+		0x0085, 0x02FC, 0x0384, 0x00FE, 0x1FFD,
+		0x0078, 0x02E8, 0x0392, 0x0110, 0x1FFE,
+		0x006D, 0x02D3, 0x039E, 0x0123, 0x1FFF,
+		0x0062, 0x02BE, 0x03AA, 0x0136, 0x0000,
+		0x0058, 0x02A8, 0x03B4, 0x014A, 0x0002,
+		0x004E, 0x0293, 0x03BD, 0x015E, 0x0004,
+		0x0045, 0x027D, 0x03C5, 0x0173, 0x0006,
+		0x003D, 0x0266, 0x03CC, 0x0188, 0x0009,
+		0x0035, 0x0250, 0x03D2, 0x019D, 0x000C,
+		0x002E, 0x023A, 0x03D6, 0x01B3, 0x000F,
+		0x0028, 0x0223, 0x03D9, 0x01C9, 0x0013,
+		0x0022, 0x020C, 0x03DB, 0x01DF, 0x0018,
+		/* Chroma */
+		0x001C, 0x01F6, 0x03DC, 0x01F6, 0x001C,
+		0x0018, 0x01DF, 0x03DB, 0x020C, 0x0022,
+		0x0013, 0x01C9, 0x03D9, 0x0223, 0x0028,
+		0x000F, 0x01B3, 0x03D6, 0x023A, 0x002E,
+		0x000C, 0x019D, 0x03D2, 0x0250, 0x0035,
+		0x0009, 0x0188, 0x03CC, 0x0266, 0x003D,
+		0x0006, 0x0173, 0x03C5, 0x027D, 0x0045,
+		0x0004, 0x015E, 0x03BD, 0x0293, 0x004E,
+		0x0002, 0x014A, 0x03B4, 0x02A8, 0x0058,
+		0x0000, 0x0136, 0x03AA, 0x02BE, 0x0062,
+		0x1FFF, 0x0123, 0x039E, 0x02D3, 0x006D,
+		0x1FFE, 0x0110, 0x0392, 0x02E8, 0x0078,
+		0x1FFD, 0x00FE, 0x0384, 0x02FC, 0x0085,
+		0x1FFD, 0x00ED, 0x0376, 0x030F, 0x0091,
+		0x1FFC, 0x00DC, 0x0367, 0x0322, 0x009F,
+		0x1FFC, 0x00CC, 0x0357, 0x0334, 0x00AD,
+		0x00BC, 0x0344, 0x0344, 0x00BC, 0x0000,
+		0x00AD, 0x0334, 0x0357, 0x00CC, 0x1FFC,
+		0x009F, 0x0322, 0x0367, 0x00DC, 0x1FFC,
+		0x0091, 0x030F, 0x0376, 0x00ED, 0x1FFD,
+		0x0085, 0x02FC, 0x0384, 0x00FE, 0x1FFD,
+		0x0078, 0x02E8, 0x0392, 0x0110, 0x1FFE,
+		0x006D, 0x02D3, 0x039E, 0x0123, 0x1FFF,
+		0x0062, 0x02BE, 0x03AA, 0x0136, 0x0000,
+		0x0058, 0x02A8, 0x03B4, 0x014A, 0x0002,
+		0x004E, 0x0293, 0x03BD, 0x015E, 0x0004,
+		0x0045, 0x027D, 0x03C5, 0x0173, 0x0006,
+		0x003D, 0x0266, 0x03CC, 0x0188, 0x0009,
+		0x0035, 0x0250, 0x03D2, 0x019D, 0x000C,
+		0x002E, 0x023A, 0x03D6, 0x01B3, 0x000F,
+		0x0028, 0x0223, 0x03D9, 0x01C9, 0x0013,
+		0x0022, 0x020C, 0x03DB, 0x01DF, 0x0018,
+	},
+	[VS_LT_10_16_SCALE] = {
+		/* Luma */
+		0x0003, 0x01E9, 0x0428, 0x01E9, 0x0003,
+		0x0000, 0x01D0, 0x0426, 0x0203, 0x0007,
+		0x1FFD, 0x01B7, 0x0424, 0x021C, 0x000C,
+		0x1FFB, 0x019E, 0x0420, 0x0236, 0x0011,
+		0x1FF9, 0x0186, 0x041A, 0x0250, 0x0017,
+		0x1FF7, 0x016E, 0x0414, 0x026A, 0x001D,
+		0x1FF6, 0x0157, 0x040B, 0x0284, 0x0024,
+		0x1FF5, 0x0140, 0x0401, 0x029E, 0x002C,
+		0x1FF4, 0x012A, 0x03F6, 0x02B7, 0x0035,
+		0x1FF4, 0x0115, 0x03E9, 0x02D0, 0x003E,
+		0x1FF4, 0x0100, 0x03DB, 0x02E9, 0x0048,
+		0x1FF4, 0x00EC, 0x03CC, 0x0301, 0x0053,
+		0x1FF4, 0x00D9, 0x03BC, 0x0318, 0x005F,
+		0x1FF5, 0x00C7, 0x03AA, 0x032F, 0x006B,
+		0x1FF6, 0x00B5, 0x0398, 0x0345, 0x0078,
+		0x1FF6, 0x00A5, 0x0384, 0x035B, 0x0086,
+		0x0094, 0x036C, 0x036C, 0x0094, 0x0000,
+		0x0086, 0x035B, 0x0384, 0x00A5, 0x1FF6,
+		0x0078, 0x0345, 0x0398, 0x00B5, 0x1FF6,
+		0x006B, 0x032F, 0x03AA, 0x00C7, 0x1FF5,
+		0x005F, 0x0318, 0x03BC, 0x00D9, 0x1FF4,
+		0x0053, 0x0301, 0x03CC, 0x00EC, 0x1FF4,
+		0x0048, 0x02E9, 0x03DB, 0x0100, 0x1FF4,
+		0x003E, 0x02D0, 0x03E9, 0x0115, 0x1FF4,
+		0x0035, 0x02B7, 0x03F6, 0x012A, 0x1FF4,
+		0x002C, 0x029E, 0x0401, 0x0140, 0x1FF5,
+		0x0024, 0x0284, 0x040B, 0x0157, 0x1FF6,
+		0x001D, 0x026A, 0x0414, 0x016E, 0x1FF7,
+		0x0017, 0x0250, 0x041A, 0x0186, 0x1FF9,
+		0x0011, 0x0236, 0x0420, 0x019E, 0x1FFB,
+		0x000C, 0x021C, 0x0424, 0x01B7, 0x1FFD,
+		0x0007, 0x0203, 0x0426, 0x01D0, 0x0000,
+		/* Chroma */
+		0x0003, 0x01E9, 0x0428, 0x01E9, 0x0003,
+		0x0000, 0x01D0, 0x0426, 0x0203, 0x0007,
+		0x1FFD, 0x01B7, 0x0424, 0x021C, 0x000C,
+		0x1FFB, 0x019E, 0x0420, 0x0236, 0x0011,
+		0x1FF9, 0x0186, 0x041A, 0x0250, 0x0017,
+		0x1FF7, 0x016E, 0x0414, 0x026A, 0x001D,
+		0x1FF6, 0x0157, 0x040B, 0x0284, 0x0024,
+		0x1FF5, 0x0140, 0x0401, 0x029E, 0x002C,
+		0x1FF4, 0x012A, 0x03F6, 0x02B7, 0x0035,
+		0x1FF4, 0x0115, 0x03E9, 0x02D0, 0x003E,
+		0x1FF4, 0x0100, 0x03DB, 0x02E9, 0x0048,
+		0x1FF4, 0x00EC, 0x03CC, 0x0301, 0x0053,
+		0x1FF4, 0x00D9, 0x03BC, 0x0318, 0x005F,
+		0x1FF5, 0x00C7, 0x03AA, 0x032F, 0x006B,
+		0x1FF6, 0x00B5, 0x0398, 0x0345, 0x0078,
+		0x1FF6, 0x00A5, 0x0384, 0x035B, 0x0086,
+		0x0094, 0x036C, 0x036C, 0x0094, 0x0000,
+		0x0086, 0x035B, 0x0384, 0x00A5, 0x1FF6,
+		0x0078, 0x0345, 0x0398, 0x00B5, 0x1FF6,
+		0x006B, 0x032F, 0x03AA, 0x00C7, 0x1FF5,
+		0x005F, 0x0318, 0x03BC, 0x00D9, 0x1FF4,
+		0x0053, 0x0301, 0x03CC, 0x00EC, 0x1FF4,
+		0x0048, 0x02E9, 0x03DB, 0x0100, 0x1FF4,
+		0x003E, 0x02D0, 0x03E9, 0x0115, 0x1FF4,
+		0x0035, 0x02B7, 0x03F6, 0x012A, 0x1FF4,
+		0x002C, 0x029E, 0x0401, 0x0140, 0x1FF5,
+		0x0024, 0x0284, 0x040B, 0x0157, 0x1FF6,
+		0x001D, 0x026A, 0x0414, 0x016E, 0x1FF7,
+		0x0017, 0x0250, 0x041A, 0x0186, 0x1FF9,
+		0x0011, 0x0236, 0x0420, 0x019E, 0x1FFB,
+		0x000C, 0x021C, 0x0424, 0x01B7, 0x1FFD,
+		0x0007, 0x0203, 0x0426, 0x01D0, 0x0000,
+	},
+	[VS_LT_11_16_SCALE] = {
+		/* Luma */
+		0x1FEC, 0x01D6, 0x047C, 0x01D6, 0x1FEC,
+		0x1FEA, 0x01BA, 0x047B, 0x01F3, 0x1FEE,
+		0x1FE9, 0x019D, 0x0478, 0x0211, 0x1FF1,
+		0x1FE8, 0x0182, 0x0473, 0x022E, 0x1FF5,
+		0x1FE8, 0x0167, 0x046C, 0x024C, 0x1FF9,
+		0x1FE8, 0x014D, 0x0464, 0x026A, 0x1FFD,
+		0x1FE8, 0x0134, 0x0459, 0x0288, 0x0003,
+		0x1FE9, 0x011B, 0x044D, 0x02A6, 0x0009,
+		0x1FE9, 0x0104, 0x0440, 0x02C3, 0x0010,
+		0x1FEA, 0x00ED, 0x0430, 0x02E1, 0x0018,
+		0x1FEB, 0x00D7, 0x0420, 0x02FD, 0x0021,
+		0x1FED, 0x00C2, 0x040D, 0x0319, 0x002B,
+		0x1FEE, 0x00AE, 0x03F9, 0x0336, 0x0035,
+		0x1FF0, 0x009C, 0x03E3, 0x0350, 0x0041,
+		0x1FF1, 0x008A, 0x03CD, 0x036B, 0x004D,
+		0x1FF3, 0x0079, 0x03B5, 0x0384, 0x005B,
+		0x0069, 0x0397, 0x0397, 0x0069, 0x0000,
+		0x005B, 0x0384, 0x03B5, 0x0079, 0x1FF3,
+		0x004D, 0x036B, 0x03CD, 0x008A, 0x1FF1,
+		0x0041, 0x0350, 0x03E3, 0x009C, 0x1FF0,
+		0x0035, 0x0336, 0x03F9, 0x00AE, 0x1FEE,
+		0x002B, 0x0319, 0x040D, 0x00C2, 0x1FED,
+		0x0021, 0x02FD, 0x0420, 0x00D7, 0x1FEB,
+		0x0018, 0x02E1, 0x0430, 0x00ED, 0x1FEA,
+		0x0010, 0x02C3, 0x0440, 0x0104, 0x1FE9,
+		0x0009, 0x02A6, 0x044D, 0x011B, 0x1FE9,
+		0x0003, 0x0288, 0x0459, 0x0134, 0x1FE8,
+		0x1FFD, 0x026A, 0x0464, 0x014D, 0x1FE8,
+		0x1FF9, 0x024C, 0x046C, 0x0167, 0x1FE8,
+		0x1FF5, 0x022E, 0x0473, 0x0182, 0x1FE8,
+		0x1FF1, 0x0211, 0x0478, 0x019D, 0x1FE9,
+		0x1FEE, 0x01F3, 0x047B, 0x01BA, 0x1FEA,
+		/* Chroma */
+		0x1FEC, 0x01D6, 0x047C, 0x01D6, 0x1FEC,
+		0x1FEA, 0x01BA, 0x047B, 0x01F3, 0x1FEE,
+		0x1FE9, 0x019D, 0x0478, 0x0211, 0x1FF1,
+		0x1FE8, 0x0182, 0x0473, 0x022E, 0x1FF5,
+		0x1FE8, 0x0167, 0x046C, 0x024C, 0x1FF9,
+		0x1FE8, 0x014D, 0x0464, 0x026A, 0x1FFD,
+		0x1FE8, 0x0134, 0x0459, 0x0288, 0x0003,
+		0x1FE9, 0x011B, 0x044D, 0x02A6, 0x0009,
+		0x1FE9, 0x0104, 0x0440, 0x02C3, 0x0010,
+		0x1FEA, 0x00ED, 0x0430, 0x02E1, 0x0018,
+		0x1FEB, 0x00D7, 0x0420, 0x02FD, 0x0021,
+		0x1FED, 0x00C2, 0x040D, 0x0319, 0x002B,
+		0x1FEE, 0x00AE, 0x03F9, 0x0336, 0x0035,
+		0x1FF0, 0x009C, 0x03E3, 0x0350, 0x0041,
+		0x1FF1, 0x008A, 0x03CD, 0x036B, 0x004D,
+		0x1FF3, 0x0079, 0x03B5, 0x0384, 0x005B,
+		0x0069, 0x0397, 0x0397, 0x0069, 0x0000,
+		0x005B, 0x0384, 0x03B5, 0x0079, 0x1FF3,
+		0x004D, 0x036B, 0x03CD, 0x008A, 0x1FF1,
+		0x0041, 0x0350, 0x03E3, 0x009C, 0x1FF0,
+		0x0035, 0x0336, 0x03F9, 0x00AE, 0x1FEE,
+		0x002B, 0x0319, 0x040D, 0x00C2, 0x1FED,
+		0x0021, 0x02FD, 0x0420, 0x00D7, 0x1FEB,
+		0x0018, 0x02E1, 0x0430, 0x00ED, 0x1FEA,
+		0x0010, 0x02C3, 0x0440, 0x0104, 0x1FE9,
+		0x0009, 0x02A6, 0x044D, 0x011B, 0x1FE9,
+		0x0003, 0x0288, 0x0459, 0x0134, 0x1FE8,
+		0x1FFD, 0x026A, 0x0464, 0x014D, 0x1FE8,
+		0x1FF9, 0x024C, 0x046C, 0x0167, 0x1FE8,
+		0x1FF5, 0x022E, 0x0473, 0x0182, 0x1FE8,
+		0x1FF1, 0x0211, 0x0478, 0x019D, 0x1FE9,
+		0x1FEE, 0x01F3, 0x047B, 0x01BA, 0x1FEA,
+	},
+	[VS_LT_12_16_SCALE] = {
+		/* Luma */
+		0x1FD8, 0x01BC, 0x04D8, 0x01BC, 0x1FD8,
+		0x1FD8, 0x019C, 0x04D8, 0x01DC, 0x1FD8,
+		0x1FD8, 0x017D, 0x04D4, 0x01FE, 0x1FD9,
+		0x1FD9, 0x015E, 0x04CF, 0x0220, 0x1FDA,
+		0x1FDB, 0x0141, 0x04C7, 0x0241, 0x1FDC,
+		0x1FDC, 0x0125, 0x04BC, 0x0264, 0x1FDF,
+		0x1FDE, 0x0109, 0x04B0, 0x0286, 0x1FE3,
+		0x1FE0, 0x00EF, 0x04A1, 0x02A9, 0x1FE7,
+		0x1FE2, 0x00D6, 0x0491, 0x02CB, 0x1FEC,
+		0x1FE4, 0x00BE, 0x047E, 0x02EE, 0x1FF2,
+		0x1FE6, 0x00A7, 0x046A, 0x030F, 0x1FFA,
+		0x1FE9, 0x0092, 0x0453, 0x0330, 0x0002,
+		0x1FEB, 0x007E, 0x043B, 0x0351, 0x000B,
+		0x1FED, 0x006B, 0x0421, 0x0372, 0x0015,
+		0x1FEF, 0x005A, 0x0406, 0x0391, 0x0020,
+		0x1FF1, 0x0049, 0x03EA, 0x03AF, 0x002D,
+		0x003A, 0x03C6, 0x03C6, 0x003A, 0x0000,
+		0x002D, 0x03AF, 0x03EA, 0x0049, 0x1FF1,
+		0x0020, 0x0391, 0x0406, 0x005A, 0x1FEF,
+		0x0015, 0x0372, 0x0421, 0x006B, 0x1FED,
+		0x000B, 0x0351, 0x043B, 0x007E, 0x1FEB,
+		0x0002, 0x0330, 0x0453, 0x0092, 0x1FE9,
+		0x1FFA, 0x030F, 0x046A, 0x00A7, 0x1FE6,
+		0x1FF2, 0x02EE, 0x047E, 0x00BE, 0x1FE4,
+		0x1FEC, 0x02CB, 0x0491, 0x00D6, 0x1FE2,
+		0x1FE7, 0x02A9, 0x04A1, 0x00EF, 0x1FE0,
+		0x1FE3, 0x0286, 0x04B0, 0x0109, 0x1FDE,
+		0x1FDF, 0x0264, 0x04BC, 0x0125, 0x1FDC,
+		0x1FDC, 0x0241, 0x04C7, 0x0141, 0x1FDB,
+		0x1FDA, 0x0220, 0x04CF, 0x015E, 0x1FD9,
+		0x1FD9, 0x01FE, 0x04D4, 0x017D, 0x1FD8,
+		0x1FD8, 0x01DC, 0x04D8, 0x019C, 0x1FD8,
+		/* Chroma */
+		0x1FD8, 0x01BC, 0x04D8, 0x01BC, 0x1FD8,
+		0x1FD8, 0x019C, 0x04D8, 0x01DC, 0x1FD8,
+		0x1FD8, 0x017D, 0x04D4, 0x01FE, 0x1FD9,
+		0x1FD9, 0x015E, 0x04CF, 0x0220, 0x1FDA,
+		0x1FDB, 0x0141, 0x04C7, 0x0241, 0x1FDC,
+		0x1FDC, 0x0125, 0x04BC, 0x0264, 0x1FDF,
+		0x1FDE, 0x0109, 0x04B0, 0x0286, 0x1FE3,
+		0x1FE0, 0x00EF, 0x04A1, 0x02A9, 0x1FE7,
+		0x1FE2, 0x00D6, 0x0491, 0x02CB, 0x1FEC,
+		0x1FE4, 0x00BE, 0x047E, 0x02EE, 0x1FF2,
+		0x1FE6, 0x00A7, 0x046A, 0x030F, 0x1FFA,
+		0x1FE9, 0x0092, 0x0453, 0x0330, 0x0002,
+		0x1FEB, 0x007E, 0x043B, 0x0351, 0x000B,
+		0x1FED, 0x006B, 0x0421, 0x0372, 0x0015,
+		0x1FEF, 0x005A, 0x0406, 0x0391, 0x0020,
+		0x1FF1, 0x0049, 0x03EA, 0x03AF, 0x002D,
+		0x003A, 0x03C6, 0x03C6, 0x003A, 0x0000,
+		0x002D, 0x03AF, 0x03EA, 0x0049, 0x1FF1,
+		0x0020, 0x0391, 0x0406, 0x005A, 0x1FEF,
+		0x0015, 0x0372, 0x0421, 0x006B, 0x1FED,
+		0x000B, 0x0351, 0x043B, 0x007E, 0x1FEB,
+		0x0002, 0x0330, 0x0453, 0x0092, 0x1FE9,
+		0x1FFA, 0x030F, 0x046A, 0x00A7, 0x1FE6,
+		0x1FF2, 0x02EE, 0x047E, 0x00BE, 0x1FE4,
+		0x1FEC, 0x02CB, 0x0491, 0x00D6, 0x1FE2,
+		0x1FE7, 0x02A9, 0x04A1, 0x00EF, 0x1FE0,
+		0x1FE3, 0x0286, 0x04B0, 0x0109, 0x1FDE,
+		0x1FDF, 0x0264, 0x04BC, 0x0125, 0x1FDC,
+		0x1FDC, 0x0241, 0x04C7, 0x0141, 0x1FDB,
+		0x1FDA, 0x0220, 0x04CF, 0x015E, 0x1FD9,
+		0x1FD9, 0x01FE, 0x04D4, 0x017D, 0x1FD8,
+		0x1FD8, 0x01DC, 0x04D8, 0x019C, 0x1FD8,
+	},
+	[VS_LT_13_16_SCALE] = {
+		/* Luma */
+		0x1FC8, 0x0199, 0x053E, 0x0199, 0x1FC8,
+		0x1FCA, 0x0175, 0x053E, 0x01BD, 0x1FC6,
+		0x1FCD, 0x0153, 0x0539, 0x01E2, 0x1FC5,
+		0x1FCF, 0x0132, 0x0532, 0x0209, 0x1FC4,
+		0x1FD2, 0x0112, 0x0529, 0x022F, 0x1FC4,
+		0x1FD5, 0x00F4, 0x051C, 0x0256, 0x1FC5,
+		0x1FD8, 0x00D7, 0x050D, 0x027E, 0x1FC6,
+		0x1FDC, 0x00BB, 0x04FB, 0x02A6, 0x1FC8,
+		0x1FDF, 0x00A1, 0x04E7, 0x02CE, 0x1FCB,
+		0x1FE2, 0x0089, 0x04D1, 0x02F5, 0x1FCF,
+		0x1FE5, 0x0072, 0x04B8, 0x031D, 0x1FD4,
+		0x1FE8, 0x005D, 0x049E, 0x0344, 0x1FD9,
+		0x1FEB, 0x0049, 0x0480, 0x036B, 0x1FE1,
+		0x1FEE, 0x0037, 0x0462, 0x0390, 0x1FE9,
+		0x1FF0, 0x0026, 0x0442, 0x03B6, 0x1FF2,
+		0x1FF2, 0x0017, 0x0420, 0x03DA, 0x1FFD,
+		0x0009, 0x03F7, 0x03F7, 0x0009, 0x0000,
+		0x1FFD, 0x03DA, 0x0420, 0x0017, 0x1FF2,
+		0x1FF2, 0x03B6, 0x0442, 0x0026, 0x1FF0,
+		0x1FE9, 0x0390, 0x0462, 0x0037, 0x1FEE,
+		0x1FE1, 0x036B, 0x0480, 0x0049, 0x1FEB,
+		0x1FD9, 0x0344, 0x049E, 0x005D, 0x1FE8,
+		0x1FD4, 0x031D, 0x04B8, 0x0072, 0x1FE5,
+		0x1FCF, 0x02F5, 0x04D1, 0x0089, 0x1FE2,
+		0x1FCB, 0x02CE, 0x04E7, 0x00A1, 0x1FDF,
+		0x1FC8, 0x02A6, 0x04FB, 0x00BB, 0x1FDC,
+		0x1FC6, 0x027E, 0x050D, 0x00D7, 0x1FD8,
+		0x1FC5, 0x0256, 0x051C, 0x00F4, 0x1FD5,
+		0x1FC4, 0x022F, 0x0529, 0x0112, 0x1FD2,
+		0x1FC4, 0x0209, 0x0532, 0x0132, 0x1FCF,
+		0x1FC5, 0x01E2, 0x0539, 0x0153, 0x1FCD,
+		0x1FC6, 0x01BD, 0x053E, 0x0175, 0x1FCA,
+		/* Chroma */
+		0x1FC8, 0x0199, 0x053E, 0x0199, 0x1FC8,
+		0x1FCA, 0x0175, 0x053E, 0x01BD, 0x1FC6,
+		0x1FCD, 0x0153, 0x0539, 0x01E2, 0x1FC5,
+		0x1FCF, 0x0132, 0x0532, 0x0209, 0x1FC4,
+		0x1FD2, 0x0112, 0x0529, 0x022F, 0x1FC4,
+		0x1FD5, 0x00F4, 0x051C, 0x0256, 0x1FC5,
+		0x1FD8, 0x00D7, 0x050D, 0x027E, 0x1FC6,
+		0x1FDC, 0x00BB, 0x04FB, 0x02A6, 0x1FC8,
+		0x1FDF, 0x00A1, 0x04E7, 0x02CE, 0x1FCB,
+		0x1FE2, 0x0089, 0x04D1, 0x02F5, 0x1FCF,
+		0x1FE5, 0x0072, 0x04B8, 0x031D, 0x1FD4,
+		0x1FE8, 0x005D, 0x049E, 0x0344, 0x1FD9,
+		0x1FEB, 0x0049, 0x0480, 0x036B, 0x1FE1,
+		0x1FEE, 0x0037, 0x0462, 0x0390, 0x1FE9,
+		0x1FF0, 0x0026, 0x0442, 0x03B6, 0x1FF2,
+		0x1FF2, 0x0017, 0x0420, 0x03DA, 0x1FFD,
+		0x0009, 0x03F7, 0x03F7, 0x0009, 0x0000,
+		0x1FFD, 0x03DA, 0x0420, 0x0017, 0x1FF2,
+		0x1FF2, 0x03B6, 0x0442, 0x0026, 0x1FF0,
+		0x1FE9, 0x0390, 0x0462, 0x0037, 0x1FEE,
+		0x1FE1, 0x036B, 0x0480, 0x0049, 0x1FEB,
+		0x1FD9, 0x0344, 0x049E, 0x005D, 0x1FE8,
+		0x1FD4, 0x031D, 0x04B8, 0x0072, 0x1FE5,
+		0x1FCF, 0x02F5, 0x04D1, 0x0089, 0x1FE2,
+		0x1FCB, 0x02CE, 0x04E7, 0x00A1, 0x1FDF,
+		0x1FC8, 0x02A6, 0x04FB, 0x00BB, 0x1FDC,
+		0x1FC6, 0x027E, 0x050D, 0x00D7, 0x1FD8,
+		0x1FC5, 0x0256, 0x051C, 0x00F4, 0x1FD5,
+		0x1FC4, 0x022F, 0x0529, 0x0112, 0x1FD2,
+		0x1FC4, 0x0209, 0x0532, 0x0132, 0x1FCF,
+		0x1FC5, 0x01E2, 0x0539, 0x0153, 0x1FCD,
+		0x1FC6, 0x01BD, 0x053E, 0x0175, 0x1FCA,
+	},
+	[VS_LT_14_16_SCALE] = {
+		/* Luma */
+		0x1FBF, 0x016C, 0x05AA, 0x016C, 0x1FBF,
+		0x1FC3, 0x0146, 0x05A8, 0x0194, 0x1FBB,
+		0x1FC7, 0x0121, 0x05A3, 0x01BD, 0x1FB8,
+		0x1FCB, 0x00FD, 0x059B, 0x01E8, 0x1FB5,
+		0x1FD0, 0x00DC, 0x058F, 0x0213, 0x1FB2,
+		0x1FD4, 0x00BC, 0x0580, 0x0240, 0x1FB0,
+		0x1FD8, 0x009E, 0x056E, 0x026D, 0x1FAF,
+		0x1FDC, 0x0082, 0x055A, 0x029A, 0x1FAE,
+		0x1FE0, 0x0067, 0x0542, 0x02C9, 0x1FAE,
+		0x1FE4, 0x004F, 0x0528, 0x02F6, 0x1FAF,
+		0x1FE8, 0x0038, 0x050A, 0x0325, 0x1FB1,
+		0x1FEB, 0x0024, 0x04EB, 0x0352, 0x1FB4,
+		0x1FEE, 0x0011, 0x04C8, 0x0380, 0x1FB9,
+		0x1FF1, 0x0000, 0x04A4, 0x03AC, 0x1FBF,
+		0x1FF4, 0x1FF1, 0x047D, 0x03D8, 0x1FC6,
+		0x1FF6, 0x1FE4, 0x0455, 0x0403, 0x1FCE,
+		0x1FD8, 0x0428, 0x0428, 0x1FD8, 0x0000,
+		0x1FCE, 0x0403, 0x0455, 0x1FE4, 0x1FF6,
+		0x1FC6, 0x03D8, 0x047D, 0x1FF1, 0x1FF4,
+		0x1FBF, 0x03AC, 0x04A4, 0x0000, 0x1FF1,
+		0x1FB9, 0x0380, 0x04C8, 0x0011, 0x1FEE,
+		0x1FB4, 0x0352, 0x04EB, 0x0024, 0x1FEB,
+		0x1FB1, 0x0325, 0x050A, 0x0038, 0x1FE8,
+		0x1FAF, 0x02F6, 0x0528, 0x004F, 0x1FE4,
+		0x1FAE, 0x02C9, 0x0542, 0x0067, 0x1FE0,
+		0x1FAE, 0x029A, 0x055A, 0x0082, 0x1FDC,
+		0x1FAF, 0x026D, 0x056E, 0x009E, 0x1FD8,
+		0x1FB0, 0x0240, 0x0580, 0x00BC, 0x1FD4,
+		0x1FB2, 0x0213, 0x058F, 0x00DC, 0x1FD0,
+		0x1FB5, 0x01E8, 0x059B, 0x00FD, 0x1FCB,
+		0x1FB8, 0x01BD, 0x05A3, 0x0121, 0x1FC7,
+		0x1FBB, 0x0194, 0x05A8, 0x0146, 0x1FC3,
+		/* Chroma */
+		0x1FBF, 0x016C, 0x05AA, 0x016C, 0x1FBF,
+		0x1FC3, 0x0146, 0x05A8, 0x0194, 0x1FBB,
+		0x1FC7, 0x0121, 0x05A3, 0x01BD, 0x1FB8,
+		0x1FCB, 0x00FD, 0x059B, 0x01E8, 0x1FB5,
+		0x1FD0, 0x00DC, 0x058F, 0x0213, 0x1FB2,
+		0x1FD4, 0x00BC, 0x0580, 0x0240, 0x1FB0,
+		0x1FD8, 0x009E, 0x056E, 0x026D, 0x1FAF,
+		0x1FDC, 0x0082, 0x055A, 0x029A, 0x1FAE,
+		0x1FE0, 0x0067, 0x0542, 0x02C9, 0x1FAE,
+		0x1FE4, 0x004F, 0x0528, 0x02F6, 0x1FAF,
+		0x1FE8, 0x0038, 0x050A, 0x0325, 0x1FB1,
+		0x1FEB, 0x0024, 0x04EB, 0x0352, 0x1FB4,
+		0x1FEE, 0x0011, 0x04C8, 0x0380, 0x1FB9,
+		0x1FF1, 0x0000, 0x04A4, 0x03AC, 0x1FBF,
+		0x1FF4, 0x1FF1, 0x047D, 0x03D8, 0x1FC6,
+		0x1FF6, 0x1FE4, 0x0455, 0x0403, 0x1FCE,
+		0x1FD8, 0x0428, 0x0428, 0x1FD8, 0x0000,
+		0x1FCE, 0x0403, 0x0455, 0x1FE4, 0x1FF6,
+		0x1FC6, 0x03D8, 0x047D, 0x1FF1, 0x1FF4,
+		0x1FBF, 0x03AC, 0x04A4, 0x0000, 0x1FF1,
+		0x1FB9, 0x0380, 0x04C8, 0x0011, 0x1FEE,
+		0x1FB4, 0x0352, 0x04EB, 0x0024, 0x1FEB,
+		0x1FB1, 0x0325, 0x050A, 0x0038, 0x1FE8,
+		0x1FAF, 0x02F6, 0x0528, 0x004F, 0x1FE4,
+		0x1FAE, 0x02C9, 0x0542, 0x0067, 0x1FE0,
+		0x1FAE, 0x029A, 0x055A, 0x0082, 0x1FDC,
+		0x1FAF, 0x026D, 0x056E, 0x009E, 0x1FD8,
+		0x1FB0, 0x0240, 0x0580, 0x00BC, 0x1FD4,
+		0x1FB2, 0x0213, 0x058F, 0x00DC, 0x1FD0,
+		0x1FB5, 0x01E8, 0x059B, 0x00FD, 0x1FCB,
+		0x1FB8, 0x01BD, 0x05A3, 0x0121, 0x1FC7,
+		0x1FBB, 0x0194, 0x05A8, 0x0146, 0x1FC3,
+	},
+	[VS_LT_15_16_SCALE] = {
+		/* Luma */
+		0x1FBD, 0x0136, 0x061A, 0x0136, 0x1FBD,
+		0x1FC3, 0x010D, 0x0617, 0x0161, 0x1FB8,
+		0x1FC9, 0x00E6, 0x0611, 0x018E, 0x1FB2,
+		0x1FCE, 0x00C1, 0x0607, 0x01BD, 0x1FAD,
+		0x1FD4, 0x009E, 0x05F9, 0x01ED, 0x1FA8,
+		0x1FD9, 0x007D, 0x05E8, 0x021F, 0x1FA3,
+		0x1FDE, 0x005E, 0x05D3, 0x0252, 0x1F9F,
+		0x1FE2, 0x0042, 0x05BC, 0x0285, 0x1F9B,
+		0x1FE7, 0x0029, 0x059F, 0x02B9, 0x1F98,
+		0x1FEA, 0x0011, 0x0580, 0x02EF, 0x1F96,
+		0x1FEE, 0x1FFC, 0x055D, 0x0324, 0x1F95,
+		0x1FF1, 0x1FE9, 0x0538, 0x0359, 0x1F95,
+		0x1FF4, 0x1FD8, 0x0510, 0x038E, 0x1F96,
+		0x1FF7, 0x1FC9, 0x04E5, 0x03C2, 0x1F99,
+		0x1FF9, 0x1FBD, 0x04B8, 0x03F5, 0x1F9D,
+		0x1FFB, 0x1FB2, 0x0489, 0x0428, 0x1FA2,
+		0x1FAA, 0x0456, 0x0456, 0x1FAA, 0x0000,
+		0x1FA2, 0x0428, 0x0489, 0x1FB2, 0x1FFB,
+		0x1F9D, 0x03F5, 0x04B8, 0x1FBD, 0x1FF9,
+		0x1F99, 0x03C2, 0x04E5, 0x1FC9, 0x1FF7,
+		0x1F96, 0x038E, 0x0510, 0x1FD8, 0x1FF4,
+		0x1F95, 0x0359, 0x0538, 0x1FE9, 0x1FF1,
+		0x1F95, 0x0324, 0x055D, 0x1FFC, 0x1FEE,
+		0x1F96, 0x02EF, 0x0580, 0x0011, 0x1FEA,
+		0x1F98, 0x02B9, 0x059F, 0x0029, 0x1FE7,
+		0x1F9B, 0x0285, 0x05BC, 0x0042, 0x1FE2,
+		0x1F9F, 0x0252, 0x05D3, 0x005E, 0x1FDE,
+		0x1FA3, 0x021F, 0x05E8, 0x007D, 0x1FD9,
+		0x1FA8, 0x01ED, 0x05F9, 0x009E, 0x1FD4,
+		0x1FAD, 0x01BD, 0x0607, 0x00C1, 0x1FCE,
+		0x1FB2, 0x018E, 0x0611, 0x00E6, 0x1FC9,
+		0x1FB8, 0x0161, 0x0617, 0x010D, 0x1FC3,
+		/* Chroma */
+		0x1FBD, 0x0136, 0x061A, 0x0136, 0x1FBD,
+		0x1FC3, 0x010D, 0x0617, 0x0161, 0x1FB8,
+		0x1FC9, 0x00E6, 0x0611, 0x018E, 0x1FB2,
+		0x1FCE, 0x00C1, 0x0607, 0x01BD, 0x1FAD,
+		0x1FD4, 0x009E, 0x05F9, 0x01ED, 0x1FA8,
+		0x1FD9, 0x007D, 0x05E8, 0x021F, 0x1FA3,
+		0x1FDE, 0x005E, 0x05D3, 0x0252, 0x1F9F,
+		0x1FE2, 0x0042, 0x05BC, 0x0285, 0x1F9B,
+		0x1FE7, 0x0029, 0x059F, 0x02B9, 0x1F98,
+		0x1FEA, 0x0011, 0x0580, 0x02EF, 0x1F96,
+		0x1FEE, 0x1FFC, 0x055D, 0x0324, 0x1F95,
+		0x1FF1, 0x1FE9, 0x0538, 0x0359, 0x1F95,
+		0x1FF4, 0x1FD8, 0x0510, 0x038E, 0x1F96,
+		0x1FF7, 0x1FC9, 0x04E5, 0x03C2, 0x1F99,
+		0x1FF9, 0x1FBD, 0x04B8, 0x03F5, 0x1F9D,
+		0x1FFB, 0x1FB2, 0x0489, 0x0428, 0x1FA2,
+		0x1FAA, 0x0456, 0x0456, 0x1FAA, 0x0000,
+		0x1FA2, 0x0428, 0x0489, 0x1FB2, 0x1FFB,
+		0x1F9D, 0x03F5, 0x04B8, 0x1FBD, 0x1FF9,
+		0x1F99, 0x03C2, 0x04E5, 0x1FC9, 0x1FF7,
+		0x1F96, 0x038E, 0x0510, 0x1FD8, 0x1FF4,
+		0x1F95, 0x0359, 0x0538, 0x1FE9, 0x1FF1,
+		0x1F95, 0x0324, 0x055D, 0x1FFC, 0x1FEE,
+		0x1F96, 0x02EF, 0x0580, 0x0011, 0x1FEA,
+		0x1F98, 0x02B9, 0x059F, 0x0029, 0x1FE7,
+		0x1F9B, 0x0285, 0x05BC, 0x0042, 0x1FE2,
+		0x1F9F, 0x0252, 0x05D3, 0x005E, 0x1FDE,
+		0x1FA3, 0x021F, 0x05E8, 0x007D, 0x1FD9,
+		0x1FA8, 0x01ED, 0x05F9, 0x009E, 0x1FD4,
+		0x1FAD, 0x01BD, 0x0607, 0x00C1, 0x1FCE,
+		0x1FB2, 0x018E, 0x0611, 0x00E6, 0x1FC9,
+		0x1FB8, 0x0161, 0x0617, 0x010D, 0x1FC3,
+	},
+	[VS_LT_16_16_SCALE] = {
+		/* Luma */
+		0x1FC3, 0x00F8, 0x068A, 0x00F8, 0x1FC3,
+		0x1FCA, 0x00CC, 0x0689, 0x0125, 0x1FBC,
+		0x1FD1, 0x00A3, 0x0681, 0x0156, 0x1FB5,
+		0x1FD7, 0x007D, 0x0676, 0x0188, 0x1FAE,
+		0x1FDD, 0x005A, 0x0666, 0x01BD, 0x1FA6,
+		0x1FE3, 0x0039, 0x0652, 0x01F3, 0x1F9F,
+		0x1FE8, 0x001B, 0x0639, 0x022C, 0x1F98,
+		0x1FEC, 0x0000, 0x061D, 0x0265, 0x1F92,
+		0x1FF0, 0x1FE8, 0x05FC, 0x02A0, 0x1F8C,
+		0x1FF4, 0x1FD2, 0x05D7, 0x02DC, 0x1F87,
+		0x1FF7, 0x1FBF, 0x05AF, 0x0319, 0x1F82,
+		0x1FFA, 0x1FAF, 0x0583, 0x0356, 0x1F7E,
+		0x1FFC, 0x1FA1, 0x0554, 0x0393, 0x1F7C,
+		0x1FFE, 0x1F95, 0x0523, 0x03CF, 0x1F7B,
+		0x0000, 0x1F8C, 0x04EE, 0x040B, 0x1F7B,
+		0x0001, 0x1F85, 0x04B8, 0x0446, 0x1F7C,
+		0x1F80, 0x0480, 0x0480, 0x1F80, 0x0000,
+		0x1F7C, 0x0446, 0x04B8, 0x1F85, 0x0001,
+		0x1F7B, 0x040B, 0x04EE, 0x1F8C, 0x0000,
+		0x1F7B, 0x03CF, 0x0523, 0x1F95, 0x1FFE,
+		0x1F7C, 0x0393, 0x0554, 0x1FA1, 0x1FFC,
+		0x1F7E, 0x0356, 0x0583, 0x1FAF, 0x1FFA,
+		0x1F82, 0x0319, 0x05AF, 0x1FBF, 0x1FF7,
+		0x1F87, 0x02DC, 0x05D7, 0x1FD2, 0x1FF4,
+		0x1F8C, 0x02A0, 0x05FC, 0x1FE8, 0x1FF0,
+		0x1F92, 0x0265, 0x061D, 0x0000, 0x1FEC,
+		0x1F98, 0x022C, 0x0639, 0x001B, 0x1FE8,
+		0x1F9F, 0x01F3, 0x0652, 0x0039, 0x1FE3,
+		0x1FA6, 0x01BD, 0x0666, 0x005A, 0x1FDD,
+		0x1FAE, 0x0188, 0x0676, 0x007D, 0x1FD7,
+		0x1FB5, 0x0156, 0x0681, 0x00A3, 0x1FD1,
+		0x1FBC, 0x0125, 0x0689, 0x00CC, 0x1FCA,
+		/* Chroma */
+		0x1FC3, 0x00F8, 0x068A, 0x00F8, 0x1FC3,
+		0x1FCA, 0x00CC, 0x0689, 0x0125, 0x1FBC,
+		0x1FD1, 0x00A3, 0x0681, 0x0156, 0x1FB5,
+		0x1FD7, 0x007D, 0x0676, 0x0188, 0x1FAE,
+		0x1FDD, 0x005A, 0x0666, 0x01BD, 0x1FA6,
+		0x1FE3, 0x0039, 0x0652, 0x01F3, 0x1F9F,
+		0x1FE8, 0x001B, 0x0639, 0x022C, 0x1F98,
+		0x1FEC, 0x0000, 0x061D, 0x0265, 0x1F92,
+		0x1FF0, 0x1FE8, 0x05FC, 0x02A0, 0x1F8C,
+		0x1FF4, 0x1FD2, 0x05D7, 0x02DC, 0x1F87,
+		0x1FF7, 0x1FBF, 0x05AF, 0x0319, 0x1F82,
+		0x1FFA, 0x1FAF, 0x0583, 0x0356, 0x1F7E,
+		0x1FFC, 0x1FA1, 0x0554, 0x0393, 0x1F7C,
+		0x1FFE, 0x1F95, 0x0523, 0x03CF, 0x1F7B,
+		0x0000, 0x1F8C, 0x04EE, 0x040B, 0x1F7B,
+		0x0001, 0x1F85, 0x04B8, 0x0446, 0x1F7C,
+		0x1F80, 0x0480, 0x0480, 0x1F80, 0x0000,
+		0x1F7C, 0x0446, 0x04B8, 0x1F85, 0x0001,
+		0x1F7B, 0x040B, 0x04EE, 0x1F8C, 0x0000,
+		0x1F7B, 0x03CF, 0x0523, 0x1F95, 0x1FFE,
+		0x1F7C, 0x0393, 0x0554, 0x1FA1, 0x1FFC,
+		0x1F7E, 0x0356, 0x0583, 0x1FAF, 0x1FFA,
+		0x1F82, 0x0319, 0x05AF, 0x1FBF, 0x1FF7,
+		0x1F87, 0x02DC, 0x05D7, 0x1FD2, 0x1FF4,
+		0x1F8C, 0x02A0, 0x05FC, 0x1FE8, 0x1FF0,
+		0x1F92, 0x0265, 0x061D, 0x0000, 0x1FEC,
+		0x1F98, 0x022C, 0x0639, 0x001B, 0x1FE8,
+		0x1F9F, 0x01F3, 0x0652, 0x0039, 0x1FE3,
+		0x1FA6, 0x01BD, 0x0666, 0x005A, 0x1FDD,
+		0x1FAE, 0x0188, 0x0676, 0x007D, 0x1FD7,
+		0x1FB5, 0x0156, 0x0681, 0x00A3, 0x1FD1,
+		0x1FBC, 0x0125, 0x0689, 0x00CC, 0x1FCA,
+	},
+	[VS_1_TO_1_SCALE] = {
+		/* Luma */
+		0x0000, 0x0000, 0x0800, 0x0000, 0x0000,
+		0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+		0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+		0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+		0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+		0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+		0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+		0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+		0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+		0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+		0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+		0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+		0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+		0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+		0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+		0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+		0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+		0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+		0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+		0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+		0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+		0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+		0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+		0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+		0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+		0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+		0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+		0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+		0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+		0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+		0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+		0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+		/* Chroma */
+		0x0000, 0x0000, 0x0800, 0x0000, 0x0000,
+		0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+		0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+		0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+		0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+		0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+		0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+		0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+		0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+		0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+		0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+		0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+		0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+		0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+		0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+		0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+		0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+		0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+		0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+		0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+		0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+		0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+		0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+		0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+		0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+		0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+		0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+		0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+		0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+		0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+		0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+		0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+	},
+};
+#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index fcbe48a..e8175e7 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -30,38 +30,47 @@
 
 const struct vpdma_data_format vpdma_yuv_fmts[] = {
 	[VPDMA_DATA_FMT_Y444] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
 		.data_type	= DATA_TYPE_Y444,
 		.depth		= 8,
 	},
 	[VPDMA_DATA_FMT_Y422] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
 		.data_type	= DATA_TYPE_Y422,
 		.depth		= 8,
 	},
 	[VPDMA_DATA_FMT_Y420] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
 		.data_type	= DATA_TYPE_Y420,
 		.depth		= 8,
 	},
 	[VPDMA_DATA_FMT_C444] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
 		.data_type	= DATA_TYPE_C444,
 		.depth		= 8,
 	},
 	[VPDMA_DATA_FMT_C422] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
 		.data_type	= DATA_TYPE_C422,
 		.depth		= 8,
 	},
 	[VPDMA_DATA_FMT_C420] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
 		.data_type	= DATA_TYPE_C420,
 		.depth		= 4,
 	},
 	[VPDMA_DATA_FMT_YC422] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
 		.data_type	= DATA_TYPE_YC422,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_YC444] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
 		.data_type	= DATA_TYPE_YC444,
 		.depth		= 24,
 	},
 	[VPDMA_DATA_FMT_CY422] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
 		.data_type	= DATA_TYPE_CY422,
 		.depth		= 16,
 	},
@@ -69,82 +78,102 @@
 
 const struct vpdma_data_format vpdma_rgb_fmts[] = {
 	[VPDMA_DATA_FMT_RGB565] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_RGB16_565,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_ARGB16_1555] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_ARGB_1555,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_ARGB16] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_ARGB_4444,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_RGBA16_5551] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_RGBA_5551,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_RGBA16] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_RGBA_4444,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_ARGB24] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_ARGB24_6666,
 		.depth		= 24,
 	},
 	[VPDMA_DATA_FMT_RGB24] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_RGB24_888,
 		.depth		= 24,
 	},
 	[VPDMA_DATA_FMT_ARGB32] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_ARGB32_8888,
 		.depth		= 32,
 	},
 	[VPDMA_DATA_FMT_RGBA24] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_RGBA24_6666,
 		.depth		= 24,
 	},
 	[VPDMA_DATA_FMT_RGBA32] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_RGBA32_8888,
 		.depth		= 32,
 	},
 	[VPDMA_DATA_FMT_BGR565] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_BGR16_565,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_ABGR16_1555] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_ABGR_1555,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_ABGR16] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_ABGR_4444,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_BGRA16_5551] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_BGRA_5551,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_BGRA16] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_BGRA_4444,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_ABGR24] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_ABGR24_6666,
 		.depth		= 24,
 	},
 	[VPDMA_DATA_FMT_BGR24] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_BGR24_888,
 		.depth		= 24,
 	},
 	[VPDMA_DATA_FMT_ABGR32] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_ABGR32_8888,
 		.depth		= 32,
 	},
 	[VPDMA_DATA_FMT_BGRA24] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_BGRA24_6666,
 		.depth		= 24,
 	},
 	[VPDMA_DATA_FMT_BGRA32] = {
+		.type		= VPDMA_DATA_FMT_TYPE_RGB,
 		.data_type	= DATA_TYPE_BGRA32_8888,
 		.depth		= 32,
 	},
@@ -152,6 +181,7 @@
 
 const struct vpdma_data_format vpdma_misc_fmts[] = {
 	[VPDMA_DATA_FMT_MV] = {
+		.type		= VPDMA_DATA_FMT_TYPE_MISC,
 		.data_type	= DATA_TYPE_MV,
 		.depth		= 4,
 	},
@@ -599,10 +629,11 @@
 
 	channel = next_chan = chan_info[chan].num;
 
-	if (fmt->data_type == DATA_TYPE_C420)
+	if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
+			fmt->data_type == DATA_TYPE_C420)
 		depth = 8;
 
-	stride = (depth * c_rect->width) >> 3;
+	stride = ALIGN((depth * c_rect->width) >> 3, VPDMA_STRIDE_ALIGN);
 	dma_addr += (c_rect->left * depth) >> 3;
 
 	dtd = list->next;
@@ -649,13 +680,14 @@
 
 	channel = next_chan = chan_info[chan].num;
 
-	if (fmt->data_type == DATA_TYPE_C420) {
+	if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
+			fmt->data_type == DATA_TYPE_C420) {
 		height >>= 1;
 		frame_height >>= 1;
 		depth = 8;
 	}
 
-	stride = (depth * c_rect->width) >> 3;
+	stride = ALIGN((depth * c_rect->width) >> 3, VPDMA_STRIDE_ALIGN);
 	dma_addr += (c_rect->left * depth) >> 3;
 
 	dtd = list->next;
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index eaa2a71..cf40f11 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -39,13 +39,23 @@
 	bool ready;
 };
 
+enum vpdma_data_format_type {
+	VPDMA_DATA_FMT_TYPE_YUV,
+	VPDMA_DATA_FMT_TYPE_RGB,
+	VPDMA_DATA_FMT_TYPE_MISC,
+};
+
 struct vpdma_data_format {
+	enum vpdma_data_format_type type;
 	int data_type;
 	u8 depth;
 };
 
 #define VPDMA_DESC_ALIGN		16	/* 16-byte descriptor alignment */
-
+#define VPDMA_STRIDE_ALIGN		16	/*
+						 * line stride of source and dest
+						 * buffers should be 16 byte aligned
+						 */
 #define VPDMA_DTD_DESC_SIZE		32	/* 8 words */
 #define VPDMA_CFD_CTD_DESC_SIZE		16	/* 4 words */
 
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
index f0e9a80..c1a6ce1 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -78,7 +78,7 @@
 #define DATA_TYPE_C420				0x6
 #define DATA_TYPE_YC422				0x7
 #define DATA_TYPE_YC444				0x8
-#define DATA_TYPE_CY422				0x23
+#define DATA_TYPE_CY422				0x27
 
 #define DATA_TYPE_RGB16_565			0x0
 #define DATA_TYPE_ARGB_1555			0x1
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 4e58069..1296c53 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -30,6 +30,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/videodev2.h>
+#include <linux/log2.h>
 
 #include <media/v4l2-common.h>
 #include <media/v4l2-ctrls.h>
@@ -42,6 +43,8 @@
 
 #include "vpdma.h"
 #include "vpe_regs.h"
+#include "sc.h"
+#include "csc.h"
 
 #define VPE_MODULE_NAME "vpe"
 
@@ -54,10 +57,6 @@
 /* required alignments */
 #define S_ALIGN		0	/* multiple of 1 */
 #define H_ALIGN		1	/* multiple of 2 */
-#define W_ALIGN		1	/* multiple of 2 */
-
-/* multiple of 128 bits, line stride, 16 bytes */
-#define L_ALIGN		4
 
 /* flags that indicate a format can be used for capture/output */
 #define VPE_FMT_TYPE_CAPTURE	(1 << 0)
@@ -268,6 +267,38 @@
 		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
 				  },
 	},
+	{
+		.name		= "RGB888 packed",
+		.fourcc		= V4L2_PIX_FMT_RGB24,
+		.types		= VPE_FMT_TYPE_CAPTURE,
+		.coplanar	= 0,
+		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
+				  },
+	},
+	{
+		.name		= "ARGB32",
+		.fourcc		= V4L2_PIX_FMT_RGB32,
+		.types		= VPE_FMT_TYPE_CAPTURE,
+		.coplanar	= 0,
+		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
+				  },
+	},
+	{
+		.name		= "BGR888 packed",
+		.fourcc		= V4L2_PIX_FMT_BGR24,
+		.types		= VPE_FMT_TYPE_CAPTURE,
+		.coplanar	= 0,
+		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
+				  },
+	},
+	{
+		.name		= "ABGR32",
+		.fourcc		= V4L2_PIX_FMT_BGR32,
+		.types		= VPE_FMT_TYPE_CAPTURE,
+		.coplanar	= 0,
+		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
+				  },
+	},
 };
 
 /*
@@ -327,9 +358,12 @@
 
 	int			irq;
 	void __iomem		*base;
+	struct resource		*res;
 
 	struct vb2_alloc_ctx	*alloc_ctx;
 	struct vpdma_data	*vpdma;		/* vpdma data handle */
+	struct sc_data		*sc;		/* scaler data handle */
+	struct csc_data		*csc;		/* csc data handle */
 };
 
 /*
@@ -356,6 +390,8 @@
 	void			*mv_buf[2];		/* virtual addrs of motion vector bufs */
 	size_t			mv_buf_size;		/* current motion vector buffer size */
 	struct vpdma_buf	mmr_adb;		/* shadow reg addr/data block */
+	struct vpdma_buf	sc_coeff_h;		/* h coeff buffer */
+	struct vpdma_buf	sc_coeff_v;		/* v coeff buffer */
 	struct vpdma_desc_list	desc_list;		/* DMA descriptor list */
 
 	bool			deinterlacing;		/* using de-interlacer */
@@ -438,14 +474,23 @@
 	u32			us3_regs[8];
 	struct vpdma_adb_hdr	dei_hdr;
 	u32			dei_regs[8];
-	struct vpdma_adb_hdr	sc_hdr;
-	u32			sc_regs[1];
-	u32			sc_pad[3];
+	struct vpdma_adb_hdr	sc_hdr0;
+	u32			sc_regs0[7];
+	u32			sc_pad0[1];
+	struct vpdma_adb_hdr	sc_hdr8;
+	u32			sc_regs8[6];
+	u32			sc_pad8[2];
+	struct vpdma_adb_hdr	sc_hdr17;
+	u32			sc_regs17[9];
+	u32			sc_pad17[3];
 	struct vpdma_adb_hdr	csc_hdr;
 	u32			csc_regs[6];
 	u32			csc_pad[2];
 };
 
+#define GET_OFFSET_TOP(ctx, obj, reg)	\
+	((obj)->res->start - ctx->dev->res->start + reg)
+
 #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a)	\
 	VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
 /*
@@ -458,8 +503,14 @@
 	VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
 	VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
 	VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
-	VPE_SET_MMR_ADB_HDR(ctx, sc_hdr, sc_regs, VPE_SC_MP_SC0);
-	VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, VPE_CSC_CSC00);
+	VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0,
+		GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0));
+	VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8,
+		GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8));
+	VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17,
+		GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17));
+	VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs,
+		GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00));
 };
 
 /*
@@ -670,17 +721,20 @@
 static void set_dst_registers(struct vpe_ctx *ctx)
 {
 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+	enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
 	struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
 	u32 val = 0;
 
-	/* select RGB path when color space conversion is supported in future */
-	if (fmt->fourcc == V4L2_PIX_FMT_RGB24)
-		val |= VPE_RGB_OUT_SELECT | VPE_CSC_SRC_DEI_SCALER;
+	if (clrspc == V4L2_COLORSPACE_SRGB)
+		val |= VPE_RGB_OUT_SELECT;
 	else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
 		val |= VPE_COLOR_SEPARATE_422;
 
-	/* The source of CHR_DS is always the scaler, whether it's used or not */
-	val |= VPE_DS_SRC_DEI_SCALER;
+	/*
+	 * the source of CHR_DS and CSC is always the scaler, irrespective of
+	 * whether it's used or not
+	 */
+	val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
 
 	if (fmt->fourcc != V4L2_PIX_FMT_NV12)
 		val |= VPE_DS_BYPASS;
@@ -742,28 +796,6 @@
 	ctx->load_mmrs = true;
 }
 
-static void set_csc_coeff_bypass(struct vpe_ctx *ctx)
-{
-	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
-	u32 *shadow_csc_reg5 = &mmr_adb->csc_regs[5];
-
-	*shadow_csc_reg5 |= VPE_CSC_BYPASS;
-
-	ctx->load_mmrs = true;
-}
-
-static void set_sc_regs_bypass(struct vpe_ctx *ctx)
-{
-	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
-	u32 *sc_reg0 = &mmr_adb->sc_regs[0];
-	u32 val = 0;
-
-	val |= VPE_SC_BYPASS;
-	*sc_reg0 = val;
-
-	ctx->load_mmrs = true;
-}
-
 /*
  * Set the shadow registers whose values are modified when either the
  * source or destination format is changed.
@@ -772,6 +804,11 @@
 {
 	struct vpe_q_data *s_q_data =  &ctx->q_data[Q_DATA_SRC];
 	struct vpe_q_data *d_q_data =  &ctx->q_data[Q_DATA_DST];
+	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+	unsigned int src_w = s_q_data->c_rect.width;
+	unsigned int src_h = s_q_data->c_rect.height;
+	unsigned int dst_w = d_q_data->c_rect.width;
+	unsigned int dst_h = d_q_data->c_rect.height;
 	size_t mv_buf_size;
 	int ret;
 
@@ -780,12 +817,23 @@
 
 	if ((s_q_data->flags & Q_DATA_INTERLACED) &&
 			!(d_q_data->flags & Q_DATA_INTERLACED)) {
+		int bytes_per_line;
 		const struct vpdma_data_format *mv =
 			&vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
 
+		/*
+		 * we make sure that the source image has a 16 byte aligned
+		 * stride, we need to do the same for the motion vector buffer
+		 * by aligning it's stride to the next 16 byte boundry. this
+		 * extra space will not be used by the de-interlacer, but will
+		 * ensure that vpdma operates correctly
+		 */
+		bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
+					VPDMA_STRIDE_ALIGN);
+		mv_buf_size = bytes_per_line * s_q_data->height;
+
 		ctx->deinterlacing = 1;
-		mv_buf_size =
-			(s_q_data->width * s_q_data->height * mv->depth) >> 3;
+		src_h <<= 1;
 	} else {
 		ctx->deinterlacing = 0;
 		mv_buf_size = 0;
@@ -799,8 +847,16 @@
 
 	set_cfg_and_line_modes(ctx);
 	set_dei_regs(ctx);
-	set_csc_coeff_bypass(ctx);
-	set_sc_regs_bypass(ctx);
+
+	csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
+		s_q_data->colorspace, d_q_data->colorspace);
+
+	sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
+	sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
+
+	sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0],
+		&mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
+		src_w, src_h, dst_w, dst_h);
 
 	return 0;
 }
@@ -916,35 +972,10 @@
 	DUMPREG(DEI_FMD_STATUS_R0);
 	DUMPREG(DEI_FMD_STATUS_R1);
 	DUMPREG(DEI_FMD_STATUS_R2);
-	DUMPREG(SC_MP_SC0);
-	DUMPREG(SC_MP_SC1);
-	DUMPREG(SC_MP_SC2);
-	DUMPREG(SC_MP_SC3);
-	DUMPREG(SC_MP_SC4);
-	DUMPREG(SC_MP_SC5);
-	DUMPREG(SC_MP_SC6);
-	DUMPREG(SC_MP_SC8);
-	DUMPREG(SC_MP_SC9);
-	DUMPREG(SC_MP_SC10);
-	DUMPREG(SC_MP_SC11);
-	DUMPREG(SC_MP_SC12);
-	DUMPREG(SC_MP_SC13);
-	DUMPREG(SC_MP_SC17);
-	DUMPREG(SC_MP_SC18);
-	DUMPREG(SC_MP_SC19);
-	DUMPREG(SC_MP_SC20);
-	DUMPREG(SC_MP_SC21);
-	DUMPREG(SC_MP_SC22);
-	DUMPREG(SC_MP_SC23);
-	DUMPREG(SC_MP_SC24);
-	DUMPREG(SC_MP_SC25);
-	DUMPREG(CSC_CSC00);
-	DUMPREG(CSC_CSC01);
-	DUMPREG(CSC_CSC02);
-	DUMPREG(CSC_CSC03);
-	DUMPREG(CSC_CSC04);
-	DUMPREG(CSC_CSC05);
 #undef DUMPREG
+
+	sc_dump_regs(dev->sc);
+	csc_dump_regs(dev->csc);
 }
 
 static void add_out_dtd(struct vpe_ctx *ctx, int port)
@@ -1053,6 +1084,7 @@
 static void device_run(void *priv)
 {
 	struct vpe_ctx *ctx = priv;
+	struct sc_data *sc = ctx->dev->sc;
 	struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
 
 	if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
@@ -1075,13 +1107,37 @@
 		ctx->load_mmrs = false;
 	}
 
+	if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr ||
+			sc->load_coeff_h) {
+		vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h);
+		vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
+			&ctx->sc_coeff_h, 0);
+
+		sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr;
+		sc->load_coeff_h = false;
+	}
+
+	if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr ||
+			sc->load_coeff_v) {
+		vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v);
+		vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
+			&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
+
+		sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr;
+		sc->load_coeff_v = false;
+	}
+
 	/* output data descriptors */
 	if (ctx->deinterlacing)
 		add_out_dtd(ctx, VPE_PORT_MV_OUT);
 
-	add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
-	if (d_q_data->fmt->coplanar)
-		add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
+	if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+		add_out_dtd(ctx, VPE_PORT_RGB_OUT);
+	} else {
+		add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
+		if (d_q_data->fmt->coplanar)
+			add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
+	}
 
 	/* input data descriptors */
 	if (ctx->deinterlacing) {
@@ -1117,9 +1173,16 @@
 	}
 
 	/* sync on channel control descriptors for output ports */
-	vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT);
-	if (d_q_data->fmt->coplanar)
-		vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT);
+	if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+			VPE_CHAN_RGB_OUT);
+	} else {
+		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+			VPE_CHAN_LUMA_OUT);
+		if (d_q_data->fmt->coplanar)
+			vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+				VPE_CHAN_CHROMA_OUT);
+	}
 
 	if (ctx->deinterlacing)
 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
@@ -1198,6 +1261,8 @@
 
 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+	vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
+	vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
 
 	vpdma_reset_desc_list(&ctx->desc_list);
 
@@ -1352,7 +1417,8 @@
 {
 	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
 	struct v4l2_plane_pix_format *plane_fmt;
-	int i;
+	unsigned int w_align;
+	int i, depth, depth_bytes;
 
 	if (!fmt || !(fmt->types & type)) {
 		vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
@@ -1363,35 +1429,57 @@
 	if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
 		pix->field = V4L2_FIELD_NONE;
 
-	v4l_bound_align_image(&pix->width, MIN_W, MAX_W, W_ALIGN,
+	depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
+
+	/*
+	 * the line stride should 16 byte aligned for VPDMA to work, based on
+	 * the bytes per pixel, figure out how much the width should be aligned
+	 * to make sure line stride is 16 byte aligned
+	 */
+	depth_bytes = depth >> 3;
+
+	if (depth_bytes == 3)
+		/*
+		 * if bpp is 3(as in some RGB formats), the pixel width doesn't
+		 * really help in ensuring line stride is 16 byte aligned
+		 */
+		w_align = 4;
+	else
+		/*
+		 * for the remainder bpp(4, 2 and 1), the pixel width alignment
+		 * can ensure a line stride alignment of 16 bytes. For example,
+		 * if bpp is 2, then the line stride can be 16 byte aligned if
+		 * the width is 8 byte aligned
+		 */
+		w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes);
+
+	v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
 			      &pix->height, MIN_H, MAX_H, H_ALIGN,
 			      S_ALIGN);
 
 	pix->num_planes = fmt->coplanar ? 2 : 1;
 	pix->pixelformat = fmt->fourcc;
 
-	if (type == VPE_FMT_TYPE_CAPTURE) {
-		struct vpe_q_data *s_q_data;
-
-		/* get colorspace from the source queue */
-		s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
-
-		pix->colorspace = s_q_data->colorspace;
-	} else {
-		if (!pix->colorspace)
-			pix->colorspace = V4L2_COLORSPACE_SMPTE240M;
+	if (!pix->colorspace) {
+		if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
+				fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
+				fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
+				fmt->fourcc == V4L2_PIX_FMT_BGR32) {
+			pix->colorspace = V4L2_COLORSPACE_SRGB;
+		} else {
+			if (pix->height > 1280)	/* HD */
+				pix->colorspace = V4L2_COLORSPACE_REC709;
+			else			/* SD */
+				pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+		}
 	}
 
 	for (i = 0; i < pix->num_planes; i++) {
-		int depth;
-
 		plane_fmt = &pix->plane_fmt[i];
 		depth = fmt->vpdma_fmt[i]->depth;
 
 		if (i == VPE_LUMA)
-			plane_fmt->bytesperline =
-					round_up((pix->width * depth) >> 3,
-						1 << L_ALIGN);
+			plane_fmt->bytesperline = (pix->width * depth) >> 3;
 		else
 			plane_fmt->bytesperline = pix->width;
 
@@ -1749,6 +1837,14 @@
 	if (ret != 0)
 		goto free_desc_list;
 
+	ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE);
+	if (ret != 0)
+		goto free_mmr_adb;
+
+	ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE);
+	if (ret != 0)
+		goto free_sc_h;
+
 	init_adb_hdrs(ctx);
 
 	v4l2_fh_init(&ctx->fh, video_devdata(file));
@@ -1770,7 +1866,7 @@
 	s_q_data->height = 1080;
 	s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height *
 			s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
-	s_q_data->colorspace = V4L2_COLORSPACE_SMPTE240M;
+	s_q_data->colorspace = V4L2_COLORSPACE_SMPTE170M;
 	s_q_data->field = V4L2_FIELD_NONE;
 	s_q_data->c_rect.left = 0;
 	s_q_data->c_rect.top = 0;
@@ -1817,6 +1913,10 @@
 exit_fh:
 	v4l2_ctrl_handler_free(hdl);
 	v4l2_fh_exit(&ctx->fh);
+	vpdma_free_desc_buf(&ctx->sc_coeff_v);
+free_sc_h:
+	vpdma_free_desc_buf(&ctx->sc_coeff_h);
+free_mmr_adb:
 	vpdma_free_desc_buf(&ctx->mmr_adb);
 free_desc_list:
 	vpdma_free_desc_list(&ctx->desc_list);
@@ -1938,12 +2038,11 @@
 {
 	struct vpe_dev *dev;
 	struct video_device *vfd;
-	struct resource *res;
 	int ret, irq, func;
 
 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
-	if (IS_ERR(dev))
-		return PTR_ERR(dev);
+	if (!dev)
+		return -ENOMEM;
 
 	spin_lock_init(&dev->lock);
 
@@ -1954,16 +2053,17 @@
 	atomic_set(&dev->num_instances, 0);
 	mutex_init(&dev->dev_mutex);
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe_top");
+	dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"vpe_top");
 	/*
 	 * HACK: we get resource info from device tree in the form of a list of
 	 * VPE sub blocks, the driver currently uses only the base of vpe_top
 	 * for register access, the driver should be changed later to access
 	 * registers based on the sub block base addresses
 	 */
-	dev->base = devm_ioremap(&pdev->dev, res->start, SZ_32K);
-	if (IS_ERR(dev->base)) {
-		ret = PTR_ERR(dev->base);
+	dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K);
+	if (!dev->base) {
+		ret = -ENOMEM;
 		goto v4l2_dev_unreg;
 	}
 
@@ -2006,9 +2106,23 @@
 
 	vpe_top_vpdma_reset(dev);
 
-	dev->vpdma = vpdma_create(pdev);
-	if (IS_ERR(dev->vpdma))
+	dev->sc = sc_create(pdev);
+	if (IS_ERR(dev->sc)) {
+		ret = PTR_ERR(dev->sc);
 		goto runtime_put;
+	}
+
+	dev->csc = csc_create(pdev);
+	if (IS_ERR(dev->csc)) {
+		ret = PTR_ERR(dev->csc);
+		goto runtime_put;
+	}
+
+	dev->vpdma = vpdma_create(pdev);
+	if (IS_ERR(dev->vpdma)) {
+		ret = PTR_ERR(dev->vpdma);
+		goto runtime_put;
+	}
 
 	vfd = &dev->vfd;
 	*vfd = vpe_videodev;
@@ -2081,18 +2195,7 @@
 	},
 };
 
-static void __exit vpe_exit(void)
-{
-	platform_driver_unregister(&vpe_pdrv);
-}
-
-static int __init vpe_init(void)
-{
-	return platform_driver_register(&vpe_pdrv);
-}
-
-module_init(vpe_init);
-module_exit(vpe_exit);
+module_platform_driver(vpe_pdrv);
 
 MODULE_DESCRIPTION("TI VPE driver");
 MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti-vpe/vpe_regs.h
index ed214e8..74283d7 100644
--- a/drivers/media/platform/ti-vpe/vpe_regs.h
+++ b/drivers/media/platform/ti-vpe/vpe_regs.h
@@ -306,191 +306,4 @@
 #define VPE_FMD_FRAME_DIFF_MASK		0x000fffff
 #define VPE_FMD_FRAME_DIFF_SHIFT	0
 
-/* VPE scaler regs */
-#define VPE_SC_MP_SC0			0x0700
-#define VPE_INTERLACE_O			(1 << 0)
-#define VPE_LINEAR			(1 << 1)
-#define VPE_SC_BYPASS			(1 << 2)
-#define VPE_INVT_FID			(1 << 3)
-#define VPE_USE_RAV			(1 << 4)
-#define VPE_ENABLE_EV			(1 << 5)
-#define VPE_AUTO_HS			(1 << 6)
-#define VPE_DCM_2X			(1 << 7)
-#define VPE_DCM_4X			(1 << 8)
-#define VPE_HP_BYPASS			(1 << 9)
-#define VPE_INTERLACE_I			(1 << 10)
-#define VPE_ENABLE_SIN2_VER_INTP	(1 << 11)
-#define VPE_Y_PK_EN			(1 << 14)
-#define VPE_TRIM			(1 << 15)
-#define VPE_SELFGEN_FID			(1 << 16)
-
-#define VPE_SC_MP_SC1			0x0704
-#define VPE_ROW_ACC_INC_MASK		0x07ffffff
-#define VPE_ROW_ACC_INC_SHIFT		0
-
-#define VPE_SC_MP_SC2			0x0708
-#define VPE_ROW_ACC_OFFSET_MASK		0x0fffffff
-#define VPE_ROW_ACC_OFFSET_SHIFT	0
-
-#define VPE_SC_MP_SC3			0x070c
-#define VPE_ROW_ACC_OFFSET_B_MASK	0x0fffffff
-#define VPE_ROW_ACC_OFFSET_B_SHIFT	0
-
-#define VPE_SC_MP_SC4			0x0710
-#define VPE_TAR_H_MASK			0x07ff
-#define VPE_TAR_H_SHIFT			0
-#define VPE_TAR_W_MASK			0x07ff
-#define VPE_TAR_W_SHIFT			12
-#define VPE_LIN_ACC_INC_U_MASK		0x07
-#define VPE_LIN_ACC_INC_U_SHIFT		24
-#define VPE_NLIN_ACC_INIT_U_MASK	0x07
-#define VPE_NLIN_ACC_INIT_U_SHIFT	28
-
-#define VPE_SC_MP_SC5			0x0714
-#define VPE_SRC_H_MASK			0x07ff
-#define VPE_SRC_H_SHIFT			0
-#define VPE_SRC_W_MASK			0x07ff
-#define VPE_SRC_W_SHIFT			12
-#define VPE_NLIN_ACC_INC_U_MASK		0x07
-#define VPE_NLIN_ACC_INC_U_SHIFT	24
-
-#define VPE_SC_MP_SC6			0x0718
-#define VPE_ROW_ACC_INIT_RAV_MASK	0x03ff
-#define VPE_ROW_ACC_INIT_RAV_SHIFT	0
-#define VPE_ROW_ACC_INIT_RAV_B_MASK	0x03ff
-#define VPE_ROW_ACC_INIT_RAV_B_SHIFT	10
-
-#define VPE_SC_MP_SC8			0x0720
-#define VPE_NLIN_LEFT_MASK		0x07ff
-#define VPE_NLIN_LEFT_SHIFT		0
-#define VPE_NLIN_RIGHT_MASK		0x07ff
-#define VPE_NLIN_RIGHT_SHIFT		12
-
-#define VPE_SC_MP_SC9			0x0724
-#define VPE_LIN_ACC_INC			VPE_SC_MP_SC9
-
-#define VPE_SC_MP_SC10			0x0728
-#define VPE_NLIN_ACC_INIT		VPE_SC_MP_SC10
-
-#define VPE_SC_MP_SC11			0x072c
-#define VPE_NLIN_ACC_INC		VPE_SC_MP_SC11
-
-#define VPE_SC_MP_SC12			0x0730
-#define VPE_COL_ACC_OFFSET_MASK		0x01ffffff
-#define VPE_COL_ACC_OFFSET_SHIFT	0
-
-#define VPE_SC_MP_SC13			0x0734
-#define VPE_SC_FACTOR_RAV_MASK		0x03ff
-#define VPE_SC_FACTOR_RAV_SHIFT		0
-#define VPE_CHROMA_INTP_THR_MASK	0x03ff
-#define VPE_CHROMA_INTP_THR_SHIFT	12
-#define VPE_DELTA_CHROMA_THR_MASK	0x0f
-#define VPE_DELTA_CHROMA_THR_SHIFT	24
-
-#define VPE_SC_MP_SC17			0x0744
-#define VPE_EV_THR_MASK			0x03ff
-#define VPE_EV_THR_SHIFT		12
-#define VPE_DELTA_LUMA_THR_MASK		0x0f
-#define VPE_DELTA_LUMA_THR_SHIFT	24
-#define VPE_DELTA_EV_THR_MASK		0x0f
-#define VPE_DELTA_EV_THR_SHIFT		28
-
-#define VPE_SC_MP_SC18			0x0748
-#define VPE_HS_FACTOR_MASK		0x03ff
-#define VPE_HS_FACTOR_SHIFT		0
-#define VPE_CONF_DEFAULT_MASK		0x01ff
-#define VPE_CONF_DEFAULT_SHIFT		16
-
-#define VPE_SC_MP_SC19			0x074c
-#define VPE_HPF_COEFF0_MASK		0xff
-#define VPE_HPF_COEFF0_SHIFT		0
-#define VPE_HPF_COEFF1_MASK		0xff
-#define VPE_HPF_COEFF1_SHIFT		8
-#define VPE_HPF_COEFF2_MASK		0xff
-#define VPE_HPF_COEFF2_SHIFT		16
-#define VPE_HPF_COEFF3_MASK		0xff
-#define VPE_HPF_COEFF3_SHIFT		23
-
-#define VPE_SC_MP_SC20			0x0750
-#define VPE_HPF_COEFF4_MASK		0xff
-#define VPE_HPF_COEFF4_SHIFT		0
-#define VPE_HPF_COEFF5_MASK		0xff
-#define VPE_HPF_COEFF5_SHIFT		8
-#define VPE_HPF_NORM_SHIFT_MASK		0x07
-#define VPE_HPF_NORM_SHIFT_SHIFT	16
-#define VPE_NL_LIMIT_MASK		0x1ff
-#define VPE_NL_LIMIT_SHIFT		20
-
-#define VPE_SC_MP_SC21			0x0754
-#define VPE_NL_LO_THR_MASK		0x01ff
-#define VPE_NL_LO_THR_SHIFT		0
-#define VPE_NL_LO_SLOPE_MASK		0xff
-#define VPE_NL_LO_SLOPE_SHIFT		16
-
-#define VPE_SC_MP_SC22			0x0758
-#define VPE_NL_HI_THR_MASK		0x01ff
-#define VPE_NL_HI_THR_SHIFT		0
-#define VPE_NL_HI_SLOPE_SH_MASK		0x07
-#define VPE_NL_HI_SLOPE_SH_SHIFT	16
-
-#define VPE_SC_MP_SC23			0x075c
-#define VPE_GRADIENT_THR_MASK		0x07ff
-#define VPE_GRADIENT_THR_SHIFT		0
-#define VPE_GRADIENT_THR_RANGE_MASK	0x0f
-#define VPE_GRADIENT_THR_RANGE_SHIFT	12
-#define VPE_MIN_GY_THR_MASK		0xff
-#define VPE_MIN_GY_THR_SHIFT		16
-#define VPE_MIN_GY_THR_RANGE_MASK	0x0f
-#define VPE_MIN_GY_THR_RANGE_SHIFT	28
-
-#define VPE_SC_MP_SC24			0x0760
-#define VPE_ORG_H_MASK			0x07ff
-#define VPE_ORG_H_SHIFT			0
-#define VPE_ORG_W_MASK			0x07ff
-#define VPE_ORG_W_SHIFT			16
-
-#define VPE_SC_MP_SC25			0x0764
-#define VPE_OFF_H_MASK			0x07ff
-#define VPE_OFF_H_SHIFT			0
-#define VPE_OFF_W_MASK			0x07ff
-#define VPE_OFF_W_SHIFT			16
-
-/* VPE color space converter regs */
-#define VPE_CSC_CSC00			0x5700
-#define VPE_CSC_A0_MASK			0x1fff
-#define VPE_CSC_A0_SHIFT		0
-#define VPE_CSC_B0_MASK			0x1fff
-#define VPE_CSC_B0_SHIFT		16
-
-#define VPE_CSC_CSC01			0x5704
-#define VPE_CSC_C0_MASK			0x1fff
-#define VPE_CSC_C0_SHIFT		0
-#define VPE_CSC_A1_MASK			0x1fff
-#define VPE_CSC_A1_SHIFT		16
-
-#define VPE_CSC_CSC02			0x5708
-#define VPE_CSC_B1_MASK			0x1fff
-#define VPE_CSC_B1_SHIFT		0
-#define VPE_CSC_C1_MASK			0x1fff
-#define VPE_CSC_C1_SHIFT		16
-
-#define VPE_CSC_CSC03			0x570c
-#define VPE_CSC_A2_MASK			0x1fff
-#define VPE_CSC_A2_SHIFT		0
-#define VPE_CSC_B2_MASK			0x1fff
-#define VPE_CSC_B2_SHIFT		16
-
-#define VPE_CSC_CSC04			0x5710
-#define VPE_CSC_C2_MASK			0x1fff
-#define VPE_CSC_C2_SHIFT		0
-#define VPE_CSC_D0_MASK			0x0fff
-#define VPE_CSC_D0_SHIFT		16
-
-#define VPE_CSC_CSC05			0x5714
-#define VPE_CSC_D1_MASK			0x0fff
-#define VPE_CSC_D1_SHIFT		0
-#define VPE_CSC_D2_MASK			0x0fff
-#define VPE_CSC_D2_SHIFT		16
-#define VPE_CSC_BYPASS			(1 << 28)
-
 #endif
diff --git a/drivers/media/platform/vsp1/Makefile b/drivers/media/platform/vsp1/Makefile
index 4da2261..151cecd 100644
--- a/drivers/media/platform/vsp1/Makefile
+++ b/drivers/media/platform/vsp1/Makefile
@@ -1,5 +1,6 @@
 vsp1-y					:= vsp1_drv.o vsp1_entity.o vsp1_video.o
 vsp1-y					+= vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
-vsp1-y					+= vsp1_lif.o vsp1_uds.o
+vsp1-y					+= vsp1_hsit.o vsp1_lif.o vsp1_lut.o
+vsp1-y					+= vsp1_sru.o vsp1_uds.o
 
 obj-$(CONFIG_VIDEO_RENESAS_VSP1)	+= vsp1.o
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index d6c6ecd..94d1b02 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -28,8 +28,11 @@
 struct device;
 
 struct vsp1_platform_data;
+struct vsp1_hsit;
 struct vsp1_lif;
+struct vsp1_lut;
 struct vsp1_rwpf;
+struct vsp1_sru;
 struct vsp1_uds;
 
 #define VPS1_MAX_RPF		5
@@ -47,8 +50,12 @@
 	struct mutex lock;
 	int ref_count;
 
+	struct vsp1_hsit *hsi;
+	struct vsp1_hsit *hst;
 	struct vsp1_lif *lif;
+	struct vsp1_lut *lut;
 	struct vsp1_rwpf *rpf[VPS1_MAX_RPF];
+	struct vsp1_sru *sru;
 	struct vsp1_uds *uds[VPS1_MAX_UDS];
 	struct vsp1_rwpf *wpf[VPS1_MAX_WPF];
 
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index d16bf0f..0df0a99 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -20,8 +20,11 @@
 #include <linux/videodev2.h>
 
 #include "vsp1.h"
+#include "vsp1_hsit.h"
 #include "vsp1_lif.h"
+#include "vsp1_lut.h"
 #include "vsp1_rwpf.h"
+#include "vsp1_sru.h"
 #include "vsp1_uds.h"
 
 /* -----------------------------------------------------------------------------
@@ -152,6 +155,22 @@
 	}
 
 	/* Instantiate all the entities. */
+	vsp1->hsi = vsp1_hsit_create(vsp1, true);
+	if (IS_ERR(vsp1->hsi)) {
+		ret = PTR_ERR(vsp1->hsi);
+		goto done;
+	}
+
+	list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities);
+
+	vsp1->hst = vsp1_hsit_create(vsp1, false);
+	if (IS_ERR(vsp1->hst)) {
+		ret = PTR_ERR(vsp1->hst);
+		goto done;
+	}
+
+	list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
+
 	if (vsp1->pdata->features & VSP1_HAS_LIF) {
 		vsp1->lif = vsp1_lif_create(vsp1);
 		if (IS_ERR(vsp1->lif)) {
@@ -162,6 +181,16 @@
 		list_add_tail(&vsp1->lif->entity.list_dev, &vsp1->entities);
 	}
 
+	if (vsp1->pdata->features & VSP1_HAS_LUT) {
+		vsp1->lut = vsp1_lut_create(vsp1);
+		if (IS_ERR(vsp1->lut)) {
+			ret = PTR_ERR(vsp1->lut);
+			goto done;
+		}
+
+		list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities);
+	}
+
 	for (i = 0; i < vsp1->pdata->rpf_count; ++i) {
 		struct vsp1_rwpf *rpf;
 
@@ -175,6 +204,16 @@
 		list_add_tail(&rpf->entity.list_dev, &vsp1->entities);
 	}
 
+	if (vsp1->pdata->features & VSP1_HAS_SRU) {
+		vsp1->sru = vsp1_sru_create(vsp1);
+		if (IS_ERR(vsp1->sru)) {
+			ret = PTR_ERR(vsp1->sru);
+			goto done;
+		}
+
+		list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities);
+	}
+
 	for (i = 0; i < vsp1->pdata->uds_count; ++i) {
 		struct vsp1_uds *uds;
 
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index 9028f9d..0226e47 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -15,6 +15,7 @@
 #include <linux/gfp.h>
 
 #include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
 #include <media/v4l2-subdev.h>
 
 #include "vsp1.h"
@@ -122,12 +123,16 @@
 		unsigned int id;
 		unsigned int reg;
 	} routes[] = {
+		{ VI6_DPR_NODE_HSI, VI6_DPR_HSI_ROUTE },
+		{ VI6_DPR_NODE_HST, VI6_DPR_HST_ROUTE },
 		{ VI6_DPR_NODE_LIF, 0 },
+		{ VI6_DPR_NODE_LUT, VI6_DPR_LUT_ROUTE },
 		{ VI6_DPR_NODE_RPF(0), VI6_DPR_RPF_ROUTE(0) },
 		{ VI6_DPR_NODE_RPF(1), VI6_DPR_RPF_ROUTE(1) },
 		{ VI6_DPR_NODE_RPF(2), VI6_DPR_RPF_ROUTE(2) },
 		{ VI6_DPR_NODE_RPF(3), VI6_DPR_RPF_ROUTE(3) },
 		{ VI6_DPR_NODE_RPF(4), VI6_DPR_RPF_ROUTE(4) },
+		{ VI6_DPR_NODE_SRU, VI6_DPR_SRU_ROUTE },
 		{ VI6_DPR_NODE_UDS(0), VI6_DPR_UDS_ROUTE(0) },
 		{ VI6_DPR_NODE_UDS(1), VI6_DPR_UDS_ROUTE(1) },
 		{ VI6_DPR_NODE_UDS(2), VI6_DPR_UDS_ROUTE(2) },
@@ -177,5 +182,7 @@
 
 void vsp1_entity_destroy(struct vsp1_entity *entity)
 {
+	if (entity->subdev.ctrl_handler)
+		v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
 	media_entity_cleanup(&entity->subdev.entity);
 }
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index c4feab2c..e152798 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -20,8 +20,12 @@
 struct vsp1_device;
 
 enum vsp1_entity_type {
+	VSP1_ENTITY_HSI,
+	VSP1_ENTITY_HST,
 	VSP1_ENTITY_LIF,
+	VSP1_ENTITY_LUT,
 	VSP1_ENTITY_RPF,
+	VSP1_ENTITY_SRU,
 	VSP1_ENTITY_UDS,
 	VSP1_ENTITY_WPF,
 };
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
new file mode 100644
index 0000000..2854853
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -0,0 +1,222 @@
+/*
+ * vsp1_hsit.c  --  R-Car VSP1 Hue Saturation value (Inverse) Transform
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_hsit.h"
+
+#define HSIT_MIN_SIZE				4U
+#define HSIT_MAX_SIZE				8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_hsit_read(struct vsp1_hsit *hsit, u32 reg)
+{
+	return vsp1_read(hsit->entity.vsp1, reg);
+}
+
+static inline void vsp1_hsit_write(struct vsp1_hsit *hsit, u32 reg, u32 data)
+{
+	vsp1_write(hsit->entity.vsp1, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int hsit_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+	struct vsp1_hsit *hsit = to_hsit(subdev);
+
+	if (!enable)
+		return 0;
+
+	if (hsit->inverse)
+		vsp1_hsit_write(hsit, VI6_HSI_CTRL, VI6_HSI_CTRL_EN);
+	else
+		vsp1_hsit_write(hsit, VI6_HST_CTRL, VI6_HST_CTRL_EN);
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
+			       struct v4l2_subdev_fh *fh,
+			       struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct vsp1_hsit *hsit = to_hsit(subdev);
+
+	if (code->index > 0)
+		return -EINVAL;
+
+	if ((code->pad == HSIT_PAD_SINK && !hsit->inverse) |
+	    (code->pad == HSIT_PAD_SOURCE && hsit->inverse))
+		code->code = V4L2_MBUS_FMT_ARGB8888_1X32;
+	else
+		code->code = V4L2_MBUS_FMT_AHSV8888_1X32;
+
+	return 0;
+}
+
+static int hsit_enum_frame_size(struct v4l2_subdev *subdev,
+				struct v4l2_subdev_fh *fh,
+				struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct v4l2_mbus_framefmt *format;
+
+	format = v4l2_subdev_get_try_format(fh, fse->pad);
+
+	if (fse->index || fse->code != format->code)
+		return -EINVAL;
+
+	if (fse->pad == HSIT_PAD_SINK) {
+		fse->min_width = HSIT_MIN_SIZE;
+		fse->max_width = HSIT_MAX_SIZE;
+		fse->min_height = HSIT_MIN_SIZE;
+		fse->max_height = HSIT_MAX_SIZE;
+	} else {
+		/* The size on the source pad are fixed and always identical to
+		 * the size on the sink pad.
+		 */
+		fse->min_width = format->width;
+		fse->max_width = format->width;
+		fse->min_height = format->height;
+		fse->max_height = format->height;
+	}
+
+	return 0;
+}
+
+static int hsit_get_format(struct v4l2_subdev *subdev,
+			   struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct vsp1_hsit *hsit = to_hsit(subdev);
+
+	fmt->format = *vsp1_entity_get_pad_format(&hsit->entity, fh, fmt->pad,
+						  fmt->which);
+
+	return 0;
+}
+
+static int hsit_set_format(struct v4l2_subdev *subdev,
+			   struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct vsp1_hsit *hsit = to_hsit(subdev);
+	struct v4l2_mbus_framefmt *format;
+
+	format = vsp1_entity_get_pad_format(&hsit->entity, fh, fmt->pad,
+					    fmt->which);
+
+	if (fmt->pad == HSIT_PAD_SOURCE) {
+		/* The HST and HSI output format code and resolution can't be
+		 * modified.
+		 */
+		fmt->format = *format;
+		return 0;
+	}
+
+	format->code = hsit->inverse ? V4L2_MBUS_FMT_AHSV8888_1X32
+		     : V4L2_MBUS_FMT_ARGB8888_1X32;
+	format->width = clamp_t(unsigned int, fmt->format.width,
+				HSIT_MIN_SIZE, HSIT_MAX_SIZE);
+	format->height = clamp_t(unsigned int, fmt->format.height,
+				 HSIT_MIN_SIZE, HSIT_MAX_SIZE);
+	format->field = V4L2_FIELD_NONE;
+	format->colorspace = V4L2_COLORSPACE_SRGB;
+
+	fmt->format = *format;
+
+	/* Propagate the format to the source pad. */
+	format = vsp1_entity_get_pad_format(&hsit->entity, fh, HSIT_PAD_SOURCE,
+					    fmt->which);
+	*format = fmt->format;
+	format->code = hsit->inverse ? V4L2_MBUS_FMT_ARGB8888_1X32
+		     : V4L2_MBUS_FMT_AHSV8888_1X32;
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_video_ops hsit_video_ops = {
+	.s_stream = hsit_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops hsit_pad_ops = {
+	.enum_mbus_code = hsit_enum_mbus_code,
+	.enum_frame_size = hsit_enum_frame_size,
+	.get_fmt = hsit_get_format,
+	.set_fmt = hsit_set_format,
+};
+
+static struct v4l2_subdev_ops hsit_ops = {
+	.video	= &hsit_video_ops,
+	.pad    = &hsit_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse)
+{
+	struct v4l2_subdev *subdev;
+	struct vsp1_hsit *hsit;
+	int ret;
+
+	hsit = devm_kzalloc(vsp1->dev, sizeof(*hsit), GFP_KERNEL);
+	if (hsit == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	hsit->inverse = inverse;
+
+	if (inverse) {
+		hsit->entity.type = VSP1_ENTITY_HSI;
+		hsit->entity.id = VI6_DPR_NODE_HSI;
+	} else {
+		hsit->entity.type = VSP1_ENTITY_HST;
+		hsit->entity.id = VI6_DPR_NODE_HST;
+	}
+
+	ret = vsp1_entity_init(vsp1, &hsit->entity, 2);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	/* Initialize the V4L2 subdev. */
+	subdev = &hsit->entity.subdev;
+	v4l2_subdev_init(subdev, &hsit_ops);
+
+	subdev->entity.ops = &vsp1_media_ops;
+	subdev->internal_ops = &vsp1_subdev_internal_ops;
+	snprintf(subdev->name, sizeof(subdev->name), "%s %s",
+		 dev_name(vsp1->dev), inverse ? "hsi" : "hst");
+	v4l2_set_subdevdata(subdev, hsit);
+	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	vsp1_entity_init_formats(subdev, NULL);
+
+	return hsit;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.h b/drivers/media/platform/vsp1/vsp1_hsit.h
new file mode 100644
index 0000000..82f1c8426
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hsit.h
@@ -0,0 +1,38 @@
+/*
+ * vsp1_hsit.h  --  R-Car VSP1 Hue Saturation value (Inverse) Transform
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_HSIT_H__
+#define __VSP1_HSIT_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define HSIT_PAD_SINK				0
+#define HSIT_PAD_SOURCE				1
+
+struct vsp1_hsit {
+	struct vsp1_entity entity;
+	bool inverse;
+};
+
+static inline struct vsp1_hsit *to_hsit(struct v4l2_subdev *subdev)
+{
+	return container_of(subdev, struct vsp1_hsit, entity.subdev);
+}
+
+struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse);
+
+#endif /* __VSP1_HSIT_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
new file mode 100644
index 0000000..4e9dc7c
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -0,0 +1,252 @@
+/*
+ * vsp1_lut.c  --  R-Car VSP1 Look-Up Table
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/vsp1.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_lut.h"
+
+#define LUT_MIN_SIZE				4U
+#define LUT_MAX_SIZE				8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_lut_read(struct vsp1_lut *lut, u32 reg)
+{
+	return vsp1_read(lut->entity.vsp1, reg);
+}
+
+static inline void vsp1_lut_write(struct vsp1_lut *lut, u32 reg, u32 data)
+{
+	vsp1_write(lut->entity.vsp1, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static void lut_configure(struct vsp1_lut *lut, struct vsp1_lut_config *config)
+{
+	memcpy_toio(lut->entity.vsp1->mmio + VI6_LUT_TABLE, config->lut,
+		    sizeof(config->lut));
+}
+
+static long lut_ioctl(struct v4l2_subdev *subdev, unsigned int cmd, void *arg)
+{
+	struct vsp1_lut *lut = to_lut(subdev);
+
+	switch (cmd) {
+	case VIDIOC_VSP1_LUT_CONFIG:
+		lut_configure(lut, arg);
+		return 0;
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int lut_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+	struct vsp1_lut *lut = to_lut(subdev);
+
+	if (!enable)
+		return 0;
+
+	vsp1_lut_write(lut, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
+			      struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_mbus_code_enum *code)
+{
+	static const unsigned int codes[] = {
+		V4L2_MBUS_FMT_ARGB8888_1X32,
+		V4L2_MBUS_FMT_AHSV8888_1X32,
+		V4L2_MBUS_FMT_AYUV8_1X32,
+	};
+	struct v4l2_mbus_framefmt *format;
+
+	if (code->pad == LUT_PAD_SINK) {
+		if (code->index >= ARRAY_SIZE(codes))
+			return -EINVAL;
+
+		code->code = codes[code->index];
+	} else {
+		/* The LUT can't perform format conversion, the sink format is
+		 * always identical to the source format.
+		 */
+		if (code->index)
+			return -EINVAL;
+
+		format = v4l2_subdev_get_try_format(fh, LUT_PAD_SINK);
+		code->code = format->code;
+	}
+
+	return 0;
+}
+
+static int lut_enum_frame_size(struct v4l2_subdev *subdev,
+			       struct v4l2_subdev_fh *fh,
+			       struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct v4l2_mbus_framefmt *format;
+
+	format = v4l2_subdev_get_try_format(fh, fse->pad);
+
+	if (fse->index || fse->code != format->code)
+		return -EINVAL;
+
+	if (fse->pad == LUT_PAD_SINK) {
+		fse->min_width = LUT_MIN_SIZE;
+		fse->max_width = LUT_MAX_SIZE;
+		fse->min_height = LUT_MIN_SIZE;
+		fse->max_height = LUT_MAX_SIZE;
+	} else {
+		/* The size on the source pad are fixed and always identical to
+		 * the size on the sink pad.
+		 */
+		fse->min_width = format->width;
+		fse->max_width = format->width;
+		fse->min_height = format->height;
+		fse->max_height = format->height;
+	}
+
+	return 0;
+}
+
+static int lut_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+			  struct v4l2_subdev_format *fmt)
+{
+	struct vsp1_lut *lut = to_lut(subdev);
+
+	fmt->format = *vsp1_entity_get_pad_format(&lut->entity, fh, fmt->pad,
+						  fmt->which);
+
+	return 0;
+}
+
+static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+			  struct v4l2_subdev_format *fmt)
+{
+	struct vsp1_lut *lut = to_lut(subdev);
+	struct v4l2_mbus_framefmt *format;
+
+	/* Default to YUV if the requested format is not supported. */
+	if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
+	    fmt->format.code != V4L2_MBUS_FMT_AHSV8888_1X32 &&
+	    fmt->format.code != V4L2_MBUS_FMT_AYUV8_1X32)
+		fmt->format.code = V4L2_MBUS_FMT_AYUV8_1X32;
+
+	format = vsp1_entity_get_pad_format(&lut->entity, fh, fmt->pad,
+					    fmt->which);
+
+	if (fmt->pad == LUT_PAD_SOURCE) {
+		/* The LUT output format can't be modified. */
+		fmt->format = *format;
+		return 0;
+	}
+
+	format->width = clamp_t(unsigned int, fmt->format.width,
+				LUT_MIN_SIZE, LUT_MAX_SIZE);
+	format->height = clamp_t(unsigned int, fmt->format.height,
+				 LUT_MIN_SIZE, LUT_MAX_SIZE);
+	format->field = V4L2_FIELD_NONE;
+	format->colorspace = V4L2_COLORSPACE_SRGB;
+
+	fmt->format = *format;
+
+	/* Propagate the format to the source pad. */
+	format = vsp1_entity_get_pad_format(&lut->entity, fh, LUT_PAD_SOURCE,
+					    fmt->which);
+	*format = fmt->format;
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_core_ops lut_core_ops = {
+	.ioctl = lut_ioctl,
+};
+
+static struct v4l2_subdev_video_ops lut_video_ops = {
+	.s_stream = lut_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops lut_pad_ops = {
+	.enum_mbus_code = lut_enum_mbus_code,
+	.enum_frame_size = lut_enum_frame_size,
+	.get_fmt = lut_get_format,
+	.set_fmt = lut_set_format,
+};
+
+static struct v4l2_subdev_ops lut_ops = {
+	.core	= &lut_core_ops,
+	.video	= &lut_video_ops,
+	.pad    = &lut_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1)
+{
+	struct v4l2_subdev *subdev;
+	struct vsp1_lut *lut;
+	int ret;
+
+	lut = devm_kzalloc(vsp1->dev, sizeof(*lut), GFP_KERNEL);
+	if (lut == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	lut->entity.type = VSP1_ENTITY_LUT;
+	lut->entity.id = VI6_DPR_NODE_LUT;
+
+	ret = vsp1_entity_init(vsp1, &lut->entity, 2);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	/* Initialize the V4L2 subdev. */
+	subdev = &lut->entity.subdev;
+	v4l2_subdev_init(subdev, &lut_ops);
+
+	subdev->entity.ops = &vsp1_media_ops;
+	subdev->internal_ops = &vsp1_subdev_internal_ops;
+	snprintf(subdev->name, sizeof(subdev->name), "%s lut",
+		 dev_name(vsp1->dev));
+	v4l2_set_subdevdata(subdev, lut);
+	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	vsp1_entity_init_formats(subdev, NULL);
+
+	return lut;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_lut.h b/drivers/media/platform/vsp1/vsp1_lut.h
new file mode 100644
index 0000000..f92ffb8
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lut.h
@@ -0,0 +1,38 @@
+/*
+ * vsp1_lut.h  --  R-Car VSP1 Look-Up Table
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_LUT_H__
+#define __VSP1_LUT_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define LUT_PAD_SINK				0
+#define LUT_PAD_SOURCE				1
+
+struct vsp1_lut {
+	struct vsp1_entity entity;
+	u32 lut[256];
+};
+
+static inline struct vsp1_lut *to_lut(struct v4l2_subdev *subdev)
+{
+	return container_of(subdev, struct vsp1_lut, entity.subdev);
+}
+
+struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_LUT_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 1d3304f..2865080 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -336,8 +336,21 @@
  */
 
 #define VI6_SRU_CTRL0			0x2200
+#define VI6_SRU_CTRL0_PARAM0_SHIFT	16
+#define VI6_SRU_CTRL0_PARAM1_SHIFT	8
+#define VI6_SRU_CTRL0_MODE_UPSCALE	(4 << 4)
+#define VI6_SRU_CTRL0_PARAM2		(1 << 3)
+#define VI6_SRU_CTRL0_PARAM3		(1 << 2)
+#define VI6_SRU_CTRL0_PARAM4		(1 << 1)
+#define VI6_SRU_CTRL0_EN		(1 << 0)
+
 #define VI6_SRU_CTRL1			0x2204
+#define VI6_SRU_CTRL1_PARAM5		0x7ff
+
 #define VI6_SRU_CTRL2			0x2208
+#define VI6_SRU_CTRL2_PARAM6_SHIFT	16
+#define VI6_SRU_CTRL2_PARAM7_SHIFT	8
+#define VI6_SRU_CTRL2_PARAM8_SHIFT	0
 
 /* -----------------------------------------------------------------------------
  * UDS Control Registers
@@ -412,6 +425,7 @@
  */
 
 #define VI6_LUT_CTRL			0x2800
+#define VI6_LUT_CTRL_EN			(1 << 0)
 
 /* -----------------------------------------------------------------------------
  * CLU Control Registers
@@ -424,12 +438,14 @@
  */
 
 #define VI6_HST_CTRL			0x2a00
+#define VI6_HST_CTRL_EN			(1 << 0)
 
 /* -----------------------------------------------------------------------------
  * HSI Control Registers
  */
 
 #define VI6_HSI_CTRL			0x2b00
+#define VI6_HSI_CTRL_EN			(1 << 0)
 
 /* -----------------------------------------------------------------------------
  * BRU Control Registers
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 254871d..bce2be5 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -47,25 +47,36 @@
 	struct vsp1_rwpf *rpf = to_rwpf(subdev);
 	const struct vsp1_format_info *fmtinfo = rpf->video.fmtinfo;
 	const struct v4l2_pix_format_mplane *format = &rpf->video.format;
+	const struct v4l2_rect *crop = &rpf->crop;
 	u32 pstride;
 	u32 infmt;
 
 	if (!enable)
 		return 0;
 
-	/* Source size and stride. Cropping isn't supported yet. */
+	/* Source size, stride and crop offsets.
+	 *
+	 * The crop offsets correspond to the location of the crop rectangle top
+	 * left corner in the plane buffer. Only two offsets are needed, as
+	 * planes 2 and 3 always have identical strides.
+	 */
 	vsp1_rpf_write(rpf, VI6_RPF_SRC_BSIZE,
-		       (format->width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
-		       (format->height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
+		       (crop->width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
+		       (crop->height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
 	vsp1_rpf_write(rpf, VI6_RPF_SRC_ESIZE,
-		       (format->width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
-		       (format->height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+		       (crop->width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
+		       (crop->height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
 
+	rpf->offsets[0] = crop->top * format->plane_fmt[0].bytesperline
+			+ crop->left * fmtinfo->bpp[0] / 8;
 	pstride = format->plane_fmt[0].bytesperline
 		<< VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
-	if (format->num_planes > 1)
+	if (format->num_planes > 1) {
+		rpf->offsets[1] = crop->top * format->plane_fmt[1].bytesperline
+				+ crop->left * fmtinfo->bpp[1] / 8;
 		pstride |= format->plane_fmt[1].bytesperline
 			<< VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
+	}
 
 	vsp1_rpf_write(rpf, VI6_RPF_SRCM_PSTRIDE, pstride);
 
@@ -113,6 +124,8 @@
 	.enum_frame_size = vsp1_rwpf_enum_frame_size,
 	.get_fmt = vsp1_rwpf_get_format,
 	.set_fmt = vsp1_rwpf_set_format,
+	.get_selection = vsp1_rwpf_get_selection,
+	.set_selection = vsp1_rwpf_set_selection,
 };
 
 static struct v4l2_subdev_ops rpf_ops = {
@@ -129,11 +142,14 @@
 {
 	struct vsp1_rwpf *rpf = container_of(video, struct vsp1_rwpf, video);
 
-	vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y, buf->addr[0]);
+	vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
+		       buf->addr[0] + rpf->offsets[0]);
 	if (buf->buf.num_planes > 1)
-		vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0, buf->addr[1]);
+		vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
+			       buf->addr[1] + rpf->offsets[1]);
 	if (buf->buf.num_planes > 2)
-		vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1, buf->addr[2]);
+		vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
+			       buf->addr[2] + rpf->offsets[1]);
 }
 
 static const struct vsp1_video_operations rpf_vdev_ops = {
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 9752d55..782f770 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -71,6 +71,19 @@
 	return 0;
 }
 
+static struct v4l2_rect *
+vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf, struct v4l2_subdev_fh *fh, u32 which)
+{
+	switch (which) {
+	case V4L2_SUBDEV_FORMAT_TRY:
+		return v4l2_subdev_get_try_crop(fh, RWPF_PAD_SINK);
+	case V4L2_SUBDEV_FORMAT_ACTIVE:
+		return &rwpf->crop;
+	default:
+		return NULL;
+	}
+}
+
 int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
 			 struct v4l2_subdev_format *fmt)
 {
@@ -87,6 +100,7 @@
 {
 	struct vsp1_rwpf *rwpf = to_rwpf(subdev);
 	struct v4l2_mbus_framefmt *format;
+	struct v4l2_rect *crop;
 
 	/* Default to YUV if the requested format is not supported. */
 	if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
@@ -115,6 +129,13 @@
 
 	fmt->format = *format;
 
+	/* Update the sink crop rectangle. */
+	crop = vsp1_rwpf_get_crop(rwpf, fh, fmt->which);
+	crop->left = 0;
+	crop->top = 0;
+	crop->width = fmt->format.width;
+	crop->height = fmt->format.height;
+
 	/* Propagate the format to the source pad. */
 	format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SOURCE,
 					    fmt->which);
@@ -122,3 +143,78 @@
 
 	return 0;
 }
+
+int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
+			    struct v4l2_subdev_fh *fh,
+			    struct v4l2_subdev_selection *sel)
+{
+	struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+	struct v4l2_mbus_framefmt *format;
+
+	/* Cropping is implemented on the sink pad. */
+	if (sel->pad != RWPF_PAD_SINK)
+		return -EINVAL;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		sel->r = *vsp1_rwpf_get_crop(rwpf, fh, sel->which);
+		break;
+
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		format = vsp1_entity_get_pad_format(&rwpf->entity, fh,
+						    RWPF_PAD_SINK, sel->which);
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = format->width;
+		sel->r.height = format->height;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
+			    struct v4l2_subdev_fh *fh,
+			    struct v4l2_subdev_selection *sel)
+{
+	struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+	struct v4l2_mbus_framefmt *format;
+	struct v4l2_rect *crop;
+
+	/* Cropping is implemented on the sink pad. */
+	if (sel->pad != RWPF_PAD_SINK)
+		return -EINVAL;
+
+	if (sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
+
+	/* Make sure the crop rectangle is entirely contained in the image. The
+	 * WPF top and left offsets are limited to 255.
+	 */
+	format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SINK,
+					    sel->which);
+	sel->r.left = min_t(unsigned int, sel->r.left, format->width - 2);
+	sel->r.top = min_t(unsigned int, sel->r.top, format->height - 2);
+	if (rwpf->entity.type == VSP1_ENTITY_WPF) {
+		sel->r.left = min_t(unsigned int, sel->r.left, 255);
+		sel->r.top = min_t(unsigned int, sel->r.top, 255);
+	}
+	sel->r.width = min_t(unsigned int, sel->r.width,
+			     format->width - sel->r.left);
+	sel->r.height = min_t(unsigned int, sel->r.height,
+			      format->height - sel->r.top);
+
+	crop = vsp1_rwpf_get_crop(rwpf, fh, sel->which);
+	*crop = sel->r;
+
+	/* Propagate the format to the source pad. */
+	format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SOURCE,
+					    sel->which);
+	format->width = crop->width;
+	format->height = crop->height;
+
+	return 0;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index c182d85..6cbdb54 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -29,6 +29,10 @@
 
 	unsigned int max_width;
 	unsigned int max_height;
+
+	struct v4l2_rect crop;
+
+	unsigned int offsets[2];
 };
 
 static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
@@ -49,5 +53,11 @@
 			 struct v4l2_subdev_format *fmt);
 int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
 			 struct v4l2_subdev_format *fmt);
+int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
+			    struct v4l2_subdev_fh *fh,
+			    struct v4l2_subdev_selection *sel);
+int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
+			    struct v4l2_subdev_fh *fh,
+			    struct v4l2_subdev_selection *sel);
 
 #endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
new file mode 100644
index 0000000..7ab1a0b
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -0,0 +1,356 @@
+/*
+ * vsp1_sru.c  --  R-Car VSP1 Super Resolution Unit
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_sru.h"
+
+#define SRU_MIN_SIZE				4U
+#define SRU_MAX_SIZE				8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_sru_read(struct vsp1_sru *sru, u32 reg)
+{
+	return vsp1_read(sru->entity.vsp1, reg);
+}
+
+static inline void vsp1_sru_write(struct vsp1_sru *sru, u32 reg, u32 data)
+{
+	vsp1_write(sru->entity.vsp1, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_SRU_INTENSITY		(V4L2_CID_USER_BASE + 1)
+
+static int sru_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct vsp1_sru *sru =
+		container_of(ctrl->handler, struct vsp1_sru, ctrls);
+
+	switch (ctrl->id) {
+	case V4L2_CID_VSP1_SRU_INTENSITY:
+		sru->intensity = ctrl->val;
+		break;
+	}
+
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops sru_ctrl_ops = {
+	.s_ctrl = sru_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config sru_intensity_control = {
+	.ops = &sru_ctrl_ops,
+	.id = V4L2_CID_VSP1_SRU_INTENSITY,
+	.name = "Intensity",
+	.type = V4L2_CTRL_TYPE_INTEGER,
+	.min = 1,
+	.max = 6,
+	.step = 1,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+struct vsp1_sru_param {
+	u32 ctrl0;
+	u32 ctrl2;
+};
+
+#define VI6_SRU_CTRL0_PARAMS(p0, p1)			\
+	(((p0) << VI6_SRU_CTRL0_PARAM0_SHIFT) |		\
+	 ((p1) << VI6_SRU_CTRL0_PARAM1_SHIFT))
+
+#define VI6_SRU_CTRL2_PARAMS(p6, p7, p8)		\
+	(((p6) << VI6_SRU_CTRL2_PARAM6_SHIFT) |		\
+	 ((p7) << VI6_SRU_CTRL2_PARAM7_SHIFT) |		\
+	 ((p8) << VI6_SRU_CTRL2_PARAM8_SHIFT))
+
+static const struct vsp1_sru_param vsp1_sru_params[] = {
+	{
+		.ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN,
+		.ctrl2 = VI6_SRU_CTRL2_PARAMS(24, 40, 255),
+	}, {
+		.ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN,
+		.ctrl2 = VI6_SRU_CTRL2_PARAMS(8, 16, 255),
+	}, {
+		.ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN,
+		.ctrl2 = VI6_SRU_CTRL2_PARAMS(36, 60, 255),
+	}, {
+		.ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN,
+		.ctrl2 = VI6_SRU_CTRL2_PARAMS(12, 27, 255),
+	}, {
+		.ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN,
+		.ctrl2 = VI6_SRU_CTRL2_PARAMS(48, 80, 255),
+	}, {
+		.ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN,
+		.ctrl2 = VI6_SRU_CTRL2_PARAMS(16, 36, 255),
+	},
+};
+
+static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+	struct vsp1_sru *sru = to_sru(subdev);
+	const struct vsp1_sru_param *param;
+	struct v4l2_mbus_framefmt *input;
+	struct v4l2_mbus_framefmt *output;
+	bool upscale;
+	u32 ctrl0;
+
+	if (!enable)
+		return 0;
+
+	input = &sru->entity.formats[SRU_PAD_SINK];
+	output = &sru->entity.formats[SRU_PAD_SOURCE];
+	upscale = input->width != output->width;
+	param = &vsp1_sru_params[sru->intensity];
+
+	if (input->code == V4L2_MBUS_FMT_ARGB8888_1X32)
+		ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3
+		      | VI6_SRU_CTRL0_PARAM4;
+	else
+		ctrl0 = VI6_SRU_CTRL0_PARAM3;
+
+	vsp1_sru_write(sru, VI6_SRU_CTRL0, param->ctrl0 | ctrl0 |
+		       (upscale ? VI6_SRU_CTRL0_MODE_UPSCALE : 0));
+	vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
+	vsp1_sru_write(sru, VI6_SRU_CTRL2, param->ctrl2);
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
+			      struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_mbus_code_enum *code)
+{
+	static const unsigned int codes[] = {
+		V4L2_MBUS_FMT_ARGB8888_1X32,
+		V4L2_MBUS_FMT_AYUV8_1X32,
+	};
+	struct v4l2_mbus_framefmt *format;
+
+	if (code->pad == SRU_PAD_SINK) {
+		if (code->index >= ARRAY_SIZE(codes))
+			return -EINVAL;
+
+		code->code = codes[code->index];
+	} else {
+		/* The SRU can't perform format conversion, the sink format is
+		 * always identical to the source format.
+		 */
+		if (code->index)
+			return -EINVAL;
+
+		format = v4l2_subdev_get_try_format(fh, SRU_PAD_SINK);
+		code->code = format->code;
+	}
+
+	return 0;
+}
+
+static int sru_enum_frame_size(struct v4l2_subdev *subdev,
+			       struct v4l2_subdev_fh *fh,
+			       struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct v4l2_mbus_framefmt *format;
+
+	format = v4l2_subdev_get_try_format(fh, SRU_PAD_SINK);
+
+	if (fse->index || fse->code != format->code)
+		return -EINVAL;
+
+	if (fse->pad == SRU_PAD_SINK) {
+		fse->min_width = SRU_MIN_SIZE;
+		fse->max_width = SRU_MAX_SIZE;
+		fse->min_height = SRU_MIN_SIZE;
+		fse->max_height = SRU_MAX_SIZE;
+	} else {
+		fse->min_width = format->width;
+		fse->min_height = format->height;
+		if (format->width <= SRU_MAX_SIZE / 2 &&
+		    format->height <= SRU_MAX_SIZE / 2) {
+			fse->max_width = format->width * 2;
+			fse->max_height = format->height * 2;
+		} else {
+			fse->max_width = format->width;
+			fse->max_height = format->height;
+		}
+	}
+
+	return 0;
+}
+
+static int sru_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+			  struct v4l2_subdev_format *fmt)
+{
+	struct vsp1_sru *sru = to_sru(subdev);
+
+	fmt->format = *vsp1_entity_get_pad_format(&sru->entity, fh, fmt->pad,
+						  fmt->which);
+
+	return 0;
+}
+
+static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_fh *fh,
+			   unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+			   enum v4l2_subdev_format_whence which)
+{
+	struct v4l2_mbus_framefmt *format;
+	unsigned int input_area;
+	unsigned int output_area;
+
+	switch (pad) {
+	case SRU_PAD_SINK:
+		/* Default to YUV if the requested format is not supported. */
+		if (fmt->code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
+		    fmt->code != V4L2_MBUS_FMT_AYUV8_1X32)
+			fmt->code = V4L2_MBUS_FMT_AYUV8_1X32;
+
+		fmt->width = clamp(fmt->width, SRU_MIN_SIZE, SRU_MAX_SIZE);
+		fmt->height = clamp(fmt->height, SRU_MIN_SIZE, SRU_MAX_SIZE);
+		break;
+
+	case SRU_PAD_SOURCE:
+		/* The SRU can't perform format conversion. */
+		format = vsp1_entity_get_pad_format(&sru->entity, fh,
+						    SRU_PAD_SINK, which);
+		fmt->code = format->code;
+
+		/* We can upscale by 2 in both direction, but not independently.
+		 * Compare the input and output rectangles areas (avoiding
+		 * integer overflows on the output): if the requested output
+		 * area is larger than 1.5^2 the input area upscale by two,
+		 * otherwise don't scale.
+		 */
+		input_area = format->width * format->height;
+		output_area = min(fmt->width, SRU_MAX_SIZE)
+			    * min(fmt->height, SRU_MAX_SIZE);
+
+		if (fmt->width <= SRU_MAX_SIZE / 2 &&
+		    fmt->height <= SRU_MAX_SIZE / 2 &&
+		    output_area > input_area * 9 / 4) {
+			fmt->width = format->width * 2;
+			fmt->height = format->height * 2;
+		} else {
+			fmt->width = format->width;
+			fmt->height = format->height;
+		}
+		break;
+	}
+
+	fmt->field = V4L2_FIELD_NONE;
+	fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int sru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+			  struct v4l2_subdev_format *fmt)
+{
+	struct vsp1_sru *sru = to_sru(subdev);
+	struct v4l2_mbus_framefmt *format;
+
+	sru_try_format(sru, fh, fmt->pad, &fmt->format, fmt->which);
+
+	format = vsp1_entity_get_pad_format(&sru->entity, fh, fmt->pad,
+					    fmt->which);
+	*format = fmt->format;
+
+	if (fmt->pad == SRU_PAD_SINK) {
+		/* Propagate the format to the source pad. */
+		format = vsp1_entity_get_pad_format(&sru->entity, fh,
+						    SRU_PAD_SOURCE, fmt->which);
+		*format = fmt->format;
+
+		sru_try_format(sru, fh, SRU_PAD_SOURCE, format, fmt->which);
+	}
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_video_ops sru_video_ops = {
+	.s_stream = sru_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops sru_pad_ops = {
+	.enum_mbus_code = sru_enum_mbus_code,
+	.enum_frame_size = sru_enum_frame_size,
+	.get_fmt = sru_get_format,
+	.set_fmt = sru_set_format,
+};
+
+static struct v4l2_subdev_ops sru_ops = {
+	.video	= &sru_video_ops,
+	.pad    = &sru_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1)
+{
+	struct v4l2_subdev *subdev;
+	struct vsp1_sru *sru;
+	int ret;
+
+	sru = devm_kzalloc(vsp1->dev, sizeof(*sru), GFP_KERNEL);
+	if (sru == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	sru->entity.type = VSP1_ENTITY_SRU;
+	sru->entity.id = VI6_DPR_NODE_SRU;
+
+	ret = vsp1_entity_init(vsp1, &sru->entity, 2);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	/* Initialize the V4L2 subdev. */
+	subdev = &sru->entity.subdev;
+	v4l2_subdev_init(subdev, &sru_ops);
+
+	subdev->entity.ops = &vsp1_media_ops;
+	subdev->internal_ops = &vsp1_subdev_internal_ops;
+	snprintf(subdev->name, sizeof(subdev->name), "%s sru",
+		 dev_name(vsp1->dev));
+	v4l2_set_subdevdata(subdev, sru);
+	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	vsp1_entity_init_formats(subdev, NULL);
+
+	/* Initialize the control handler. */
+	v4l2_ctrl_handler_init(&sru->ctrls, 1);
+	v4l2_ctrl_new_custom(&sru->ctrls, &sru_intensity_control, NULL);
+	v4l2_ctrl_handler_setup(&sru->ctrls);
+	sru->entity.subdev.ctrl_handler = &sru->ctrls;
+
+	return sru;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_sru.h b/drivers/media/platform/vsp1/vsp1_sru.h
new file mode 100644
index 0000000..381870b
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_sru.h
@@ -0,0 +1,41 @@
+/*
+ * vsp1_sru.h  --  R-Car VSP1 Super Resolution Unit
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_SRU_H__
+#define __VSP1_SRU_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define SRU_PAD_SINK				0
+#define SRU_PAD_SOURCE				1
+
+struct vsp1_sru {
+	struct vsp1_entity entity;
+
+	struct v4l2_ctrl_handler ctrls;
+	unsigned int intensity;
+};
+
+static inline struct vsp1_sru *to_sru(struct v4l2_subdev *subdev)
+{
+	return container_of(subdev, struct vsp1_sru, entity.subdev);
+}
+
+struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_SRU_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 4b0ac07..b4687a8 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -488,11 +488,17 @@
  * This function completes the current buffer by filling its sequence number,
  * time stamp and payload size, and hands it back to the videobuf core.
  *
+ * When operating in DU output mode (deep pipeline to the DU through the LIF),
+ * the VSP1 needs to constantly supply frames to the display. In that case, if
+ * no other buffer is queued, reuse the one that has just been processed instead
+ * of handing it back to the videobuf core.
+ *
  * Return the next queued buffer or NULL if the queue is empty.
  */
 static struct vsp1_video_buffer *
 vsp1_video_complete_buffer(struct vsp1_video *video)
 {
+	struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
 	struct vsp1_video_buffer *next = NULL;
 	struct vsp1_video_buffer *done;
 	unsigned long flags;
@@ -507,6 +513,13 @@
 
 	done = list_first_entry(&video->irqqueue,
 				struct vsp1_video_buffer, queue);
+
+	/* In DU output mode reuse the buffer if the list is singular. */
+	if (pipe->lif && list_is_singular(&video->irqqueue)) {
+		spin_unlock_irqrestore(&video->irqlock, flags);
+		return done;
+	}
+
 	list_del(&done->queue);
 
 	if (!list_empty(&video->irqqueue))
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index db4b85e..7baed81 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -48,8 +48,7 @@
 	struct vsp1_pipeline *pipe =
 		to_vsp1_pipeline(&wpf->entity.subdev.entity);
 	struct vsp1_device *vsp1 = wpf->entity.vsp1;
-	const struct v4l2_mbus_framefmt *format =
-		&wpf->entity.formats[RWPF_PAD_SOURCE];
+	const struct v4l2_rect *crop = &wpf->crop;
 	unsigned int i;
 	u32 srcrpf = 0;
 	u32 outfmt = 0;
@@ -68,7 +67,7 @@
 
 	vsp1_wpf_write(wpf, VI6_WPF_SRCRPF, srcrpf);
 
-	/* Destination stride. Cropping isn't supported yet. */
+	/* Destination stride. */
 	if (!pipe->lif) {
 		struct v4l2_pix_format_mplane *format = &wpf->video.format;
 
@@ -79,10 +78,12 @@
 				       format->plane_fmt[1].bytesperline);
 	}
 
-	vsp1_wpf_write(wpf, VI6_WPF_HSZCLIP,
-		       format->width << VI6_WPF_SZCLIP_SIZE_SHIFT);
-	vsp1_wpf_write(wpf, VI6_WPF_VSZCLIP,
-		       format->height << VI6_WPF_SZCLIP_SIZE_SHIFT);
+	vsp1_wpf_write(wpf, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
+		       (crop->left << VI6_WPF_SZCLIP_OFST_SHIFT) |
+		       (crop->width << VI6_WPF_SZCLIP_SIZE_SHIFT));
+	vsp1_wpf_write(wpf, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
+		       (crop->top << VI6_WPF_SZCLIP_OFST_SHIFT) |
+		       (crop->height << VI6_WPF_SZCLIP_SIZE_SHIFT));
 
 	/* Format */
 	if (!pipe->lif) {
@@ -130,6 +131,8 @@
 	.enum_frame_size = vsp1_rwpf_enum_frame_size,
 	.get_fmt = vsp1_rwpf_get_format,
 	.set_fmt = vsp1_rwpf_set_format,
+	.get_selection = vsp1_rwpf_get_selection,
+	.set_selection = vsp1_rwpf_set_selection,
 };
 
 static struct v4l2_subdev_ops wpf_ops = {
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 6ecdc39..192f36f2 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -21,6 +21,12 @@
 
 source "drivers/media/radio/si470x/Kconfig"
 
+config RADIO_SI4713
+	tristate "Silicon Labs Si4713 FM Radio with RDS Transmitter support"
+	depends on VIDEO_V4L2
+
+source "drivers/media/radio/si4713/Kconfig"
+
 config RADIO_SI476X
 	tristate "Silicon Laboratories Si476x I2C FM Radio"
 	depends on I2C && VIDEO_V4L2
@@ -113,29 +119,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called radio-shark2.
 
-config I2C_SI4713
-	tristate "I2C driver for Silicon Labs Si4713 device"
-	depends on I2C && VIDEO_V4L2
-	---help---
-	  Say Y here if you want support to Si4713 I2C device.
-	  This device driver supports only i2c bus.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called si4713.
-
-config RADIO_SI4713
-	tristate "Silicon Labs Si4713 FM Radio Transmitter support"
-	depends on I2C && VIDEO_V4L2
-	select I2C_SI4713
-	---help---
-	  Say Y here if you want support to Si4713 FM Radio Transmitter.
-	  This device can transmit audio through FM. It can transmit
-	  RDS and RBDS signals as well. This module is the v4l2 radio
-	  interface for the i2c driver of this device.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called radio-si4713.
-
 config USB_KEENE
 	tristate "Keene FM Transmitter USB support"
 	depends on USB && VIDEO_V4L2
@@ -146,6 +129,20 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called radio-keene.
 
+config USB_RAREMONO
+	tristate "Thanko's Raremono AM/FM/SW radio support"
+	depends on USB && VIDEO_V4L2
+	---help---
+	  The 'Thanko's Raremono' device contains the Si4734 chip from Silicon Labs Inc.
+	  It is one of the very few or perhaps the only consumer USB radio device
+	  to receive the AM/FM/SW bands.
+
+	  Say Y here if you want to connect this type of AM/FM/SW receiver
+	  to your computer's USB port.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called radio-raremono.
+
 config USB_MA901
 	tristate "Masterkit MA901 USB FM radio support"
 	depends on USB && VIDEO_V4L2
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index 3b64560..120e791 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -17,12 +17,11 @@
 obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
 obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
 obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
-obj-$(CONFIG_I2C_SI4713) += si4713-i2c.o
-obj-$(CONFIG_RADIO_SI4713) += radio-si4713.o
 obj-$(CONFIG_RADIO_SI476X) += radio-si476x.o
 obj-$(CONFIG_RADIO_MIROPCM20) += radio-miropcm20.o
 obj-$(CONFIG_USB_DSBR) += dsbr100.o
 obj-$(CONFIG_RADIO_SI470X) += si470x/
+obj-$(CONFIG_RADIO_SI4713) += si4713/
 obj-$(CONFIG_USB_MR800) += radio-mr800.o
 obj-$(CONFIG_USB_KEENE) += radio-keene.o
 obj-$(CONFIG_USB_MA901) += radio-ma901.o
@@ -33,6 +32,7 @@
 obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
 obj-$(CONFIG_RADIO_WL128X) += wl128x/
 obj-$(CONFIG_RADIO_TEA575X) += tea575x.o
+obj-$(CONFIG_USB_RAREMONO) += radio-raremono.o
 
 shark2-objs := radio-shark2.o radio-tea5777.o
 
diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
new file mode 100644
index 0000000..7b3bdbb
--- /dev/null
+++ b/drivers/media/radio/radio-raremono.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <asm/unaligned.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+
+/*
+ * 'Thanko's Raremono' is a Japanese si4734-based AM/FM/SW USB receiver:
+ *
+ * http://www.raremono.jp/product/484.html/
+ *
+ * The USB protocol has been reversed engineered using wireshark, initially
+ * by Dinesh Ram <dinesh.ram@cern.ch> and finished by Hans Verkuil
+ * <hverkuil@xs4all.nl>.
+ *
+ * Sadly the firmware used in this product hides lots of goodies since the
+ * si4734 has more features than are supported by the firmware. Oh well...
+ */
+
+/* driver and module definitions */
+MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_DESCRIPTION("Thanko's Raremono AM/FM/SW Receiver USB driver");
+MODULE_LICENSE("GPL v2");
+
+/*
+ * The Device announces itself as Cygnal Integrated Products, Inc.
+ *
+ * The vendor and product IDs (and in fact all other lsusb information as
+ * well) are identical to the si470x Silicon Labs USB FM Radio Reference
+ * Design board, even though this card has a si4734 device. Clearly the
+ * designer of this product never bothered to change the USB IDs.
+ */
+
+/* USB Device ID List */
+static struct usb_device_id usb_raremono_device_table[] = {
+	{USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) },
+	{ }						/* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_raremono_device_table);
+
+#define BUFFER_LENGTH 64
+
+/* Timeout is set to a high value, could probably be reduced. Need more tests */
+#define USB_TIMEOUT 10000
+
+/* Frequency limits in KHz */
+#define FM_FREQ_RANGE_LOW	64000
+#define FM_FREQ_RANGE_HIGH	108000
+
+#define AM_FREQ_RANGE_LOW	520
+#define AM_FREQ_RANGE_HIGH	1710
+
+#define SW_FREQ_RANGE_LOW	2300
+#define SW_FREQ_RANGE_HIGH	26100
+
+enum { BAND_FM, BAND_AM, BAND_SW };
+
+static const struct v4l2_frequency_band bands[] = {
+	/* Band FM */
+	{
+		.type = V4L2_TUNER_RADIO,
+		.index = 0,
+		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+			      V4L2_TUNER_CAP_FREQ_BANDS,
+		.rangelow   = FM_FREQ_RANGE_LOW * 16,
+		.rangehigh  = FM_FREQ_RANGE_HIGH * 16,
+		.modulation = V4L2_BAND_MODULATION_FM,
+	},
+	/* Band AM */
+	{
+		.type = V4L2_TUNER_RADIO,
+		.index = 1,
+		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
+		.rangelow   = AM_FREQ_RANGE_LOW * 16,
+		.rangehigh  = AM_FREQ_RANGE_HIGH * 16,
+		.modulation = V4L2_BAND_MODULATION_AM,
+	},
+	/* Band SW */
+	{
+		.type = V4L2_TUNER_RADIO,
+		.index = 2,
+		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
+		.rangelow   = SW_FREQ_RANGE_LOW * 16,
+		.rangehigh  = SW_FREQ_RANGE_HIGH * 16,
+		.modulation = V4L2_BAND_MODULATION_AM,
+	},
+};
+
+struct raremono_device {
+	struct usb_device *usbdev;
+	struct usb_interface *intf;
+	struct video_device vdev;
+	struct v4l2_device v4l2_dev;
+	struct mutex lock;
+
+	u8 *buffer;
+	u32 band;
+	unsigned curfreq;
+};
+
+static inline struct raremono_device *to_raremono_dev(struct v4l2_device *v4l2_dev)
+{
+	return container_of(v4l2_dev, struct raremono_device, v4l2_dev);
+}
+
+/* Set frequency. */
+static int raremono_cmd_main(struct raremono_device *radio, unsigned band, unsigned freq)
+{
+	unsigned band_offset;
+	int ret;
+
+	switch (band) {
+	case BAND_FM:
+		band_offset = 1;
+		freq /= 10;
+		break;
+	case BAND_AM:
+		band_offset = 0;
+		break;
+	default:
+		band_offset = 2;
+		break;
+	}
+	radio->buffer[0] = 0x04 + band_offset;
+	radio->buffer[1] = freq >> 8;
+	radio->buffer[2] = freq & 0xff;
+
+	ret = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+			HID_REQ_SET_REPORT,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+			0x0300 + radio->buffer[0], 2,
+			radio->buffer, 3, USB_TIMEOUT);
+
+	if (ret < 0) {
+		dev_warn(radio->v4l2_dev.dev, "%s failed (%d)\n", __func__, ret);
+		return ret;
+	}
+	radio->curfreq = (band == BAND_FM) ? freq * 10 : freq;
+	return 0;
+}
+
+/* Handle unplugging the device.
+ * We call video_unregister_device in any case.
+ * The last function called in this procedure is
+ * usb_raremono_device_release.
+ */
+static void usb_raremono_disconnect(struct usb_interface *intf)
+{
+	struct raremono_device *radio = to_raremono_dev(usb_get_intfdata(intf));
+
+	dev_info(&intf->dev, "Thanko's Raremono disconnected\n");
+
+	mutex_lock(&radio->lock);
+	usb_set_intfdata(intf, NULL);
+	video_unregister_device(&radio->vdev);
+	v4l2_device_disconnect(&radio->v4l2_dev);
+	mutex_unlock(&radio->lock);
+	v4l2_device_put(&radio->v4l2_dev);
+}
+
+/*
+ * Linux Video interface
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+					struct v4l2_capability *v)
+{
+	struct raremono_device *radio = video_drvdata(file);
+
+	strlcpy(v->driver, "radio-raremono", sizeof(v->driver));
+	strlcpy(v->card, "Thanko's Raremono", sizeof(v->card));
+	usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
+	v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+	v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
+	return 0;
+}
+
+static int vidioc_enum_freq_bands(struct file *file, void *priv,
+		struct v4l2_frequency_band *band)
+{
+	if (band->tuner != 0)
+		return -EINVAL;
+
+	if (band->index >= ARRAY_SIZE(bands))
+		return -EINVAL;
+
+	*band = bands[band->index];
+
+	return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv,
+		struct v4l2_tuner *v)
+{
+	struct raremono_device *radio = video_drvdata(file);
+	int ret;
+
+	if (v->index > 0)
+		return -EINVAL;
+
+	strlcpy(v->name, "AM/FM/SW", sizeof(v->name));
+	v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+		V4L2_TUNER_CAP_FREQ_BANDS;
+	v->rangelow = AM_FREQ_RANGE_LOW * 16;
+	v->rangehigh = FM_FREQ_RANGE_HIGH * 16;
+	v->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
+	v->audmode = (radio->curfreq < FM_FREQ_RANGE_LOW) ?
+		V4L2_TUNER_MODE_MONO : V4L2_TUNER_MODE_STEREO;
+	memset(radio->buffer, 1, BUFFER_LENGTH);
+	ret = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
+			1, 0xa1, 0x030d, 2, radio->buffer, BUFFER_LENGTH, USB_TIMEOUT);
+
+	if (ret < 0) {
+		dev_warn(radio->v4l2_dev.dev, "%s failed (%d)\n", __func__, ret);
+		return ret;
+	}
+	v->signal = ((radio->buffer[1] & 0xf) << 8 | radio->buffer[2]) << 4;
+	return 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+					const struct v4l2_tuner *v)
+{
+	return v->index ? -EINVAL : 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+				const struct v4l2_frequency *f)
+{
+	struct raremono_device *radio = video_drvdata(file);
+	u32 freq = f->frequency;
+	unsigned band;
+
+	if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+		return -EINVAL;
+
+	if (f->frequency >= (FM_FREQ_RANGE_LOW + SW_FREQ_RANGE_HIGH) * 8)
+		band = BAND_FM;
+	else if (f->frequency <= (AM_FREQ_RANGE_HIGH + SW_FREQ_RANGE_LOW) * 8)
+		band = BAND_AM;
+	else
+		band = BAND_SW;
+
+	freq = clamp_t(u32, f->frequency, bands[band].rangelow, bands[band].rangehigh);
+	return raremono_cmd_main(radio, band, freq / 16);
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+				struct v4l2_frequency *f)
+{
+	struct raremono_device *radio = video_drvdata(file);
+
+	if (f->tuner != 0)
+		return -EINVAL;
+	f->type = V4L2_TUNER_RADIO;
+	f->frequency = radio->curfreq * 16;
+	return 0;
+}
+
+/* File system interface */
+static const struct v4l2_file_operations usb_raremono_fops = {
+	.owner		= THIS_MODULE,
+	.open           = v4l2_fh_open,
+	.release        = v4l2_fh_release,
+	.unlocked_ioctl	= video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops usb_raremono_ioctl_ops = {
+	.vidioc_querycap = vidioc_querycap,
+	.vidioc_g_tuner = vidioc_g_tuner,
+	.vidioc_s_tuner = vidioc_s_tuner,
+	.vidioc_g_frequency = vidioc_g_frequency,
+	.vidioc_s_frequency = vidioc_s_frequency,
+	.vidioc_enum_freq_bands = vidioc_enum_freq_bands,
+};
+
+/* check if the device is present and register with v4l and usb if it is */
+static int usb_raremono_probe(struct usb_interface *intf,
+				const struct usb_device_id *id)
+{
+	struct raremono_device *radio;
+	int retval = 0;
+
+	radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), GFP_KERNEL);
+	if (radio)
+		radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, GFP_KERNEL);
+
+	if (!radio || !radio->buffer)
+		return -ENOMEM;
+
+	radio->usbdev = interface_to_usbdev(intf);
+	radio->intf = intf;
+
+	/*
+	 * This device uses the same USB IDs as the si470x SiLabs reference
+	 * design. So do an additional check: attempt to read the device ID
+	 * from the si470x: the lower 12 bits are 0x0242 for the si470x. The
+	 * Raremono always returns 0x0800 (the meaning of that is unknown, but
+	 * at least it works).
+	 *
+	 * We use this check to determine which device we are dealing with.
+	 */
+	msleep(20);
+	retval = usb_control_msg(radio->usbdev,
+		usb_rcvctrlpipe(radio->usbdev, 0),
+		HID_REQ_GET_REPORT,
+		USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+		1, 2,
+		radio->buffer, 3, 500);
+	if (retval != 3 ||
+	    (get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) {
+		dev_info(&intf->dev, "this is not Thanko's Raremono.\n");
+		return -ENODEV;
+	}
+
+	dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n",
+			id->idVendor, id->idProduct);
+
+	retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
+	if (retval < 0) {
+		dev_err(&intf->dev, "couldn't register v4l2_device\n");
+		return retval;
+	}
+
+	mutex_init(&radio->lock);
+
+	strlcpy(radio->vdev.name, radio->v4l2_dev.name,
+		sizeof(radio->vdev.name));
+	radio->vdev.v4l2_dev = &radio->v4l2_dev;
+	radio->vdev.fops = &usb_raremono_fops;
+	radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops;
+	radio->vdev.lock = &radio->lock;
+	radio->vdev.release = video_device_release_empty;
+
+	usb_set_intfdata(intf, &radio->v4l2_dev);
+
+	video_set_drvdata(&radio->vdev, radio);
+	set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags);
+
+	raremono_cmd_main(radio, BAND_FM, 95160);
+
+	retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO, -1);
+	if (retval == 0) {
+		dev_info(&intf->dev, "V4L2 device registered as %s\n",
+				video_device_node_name(&radio->vdev));
+		return 0;
+	}
+	dev_err(&intf->dev, "could not register video device\n");
+	v4l2_device_unregister(&radio->v4l2_dev);
+	return retval;
+}
+
+/* USB subsystem interface */
+static struct usb_driver usb_raremono_driver = {
+	.name			= "radio-raremono",
+	.probe			= usb_raremono_probe,
+	.disconnect		= usb_raremono_disconnect,
+	.id_table		= usb_raremono_device_table,
+};
+
+module_usb_driver(usb_raremono_driver);
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index d6d4d60..07ef405 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -137,6 +137,8 @@
 /* interrupt out endpoint 2 every 1 millisecond */
 #define UNUSED_REPORT		23
 
+#define MAX_REPORT_SIZE		64
+
 
 
 /**************************************************************************
@@ -208,7 +210,7 @@
  */
 static int si470x_get_report(struct si470x_device *radio, void *buf, int size)
 {
-	unsigned char *report = (unsigned char *) buf;
+	unsigned char *report = buf;
 	int retval;
 
 	retval = usb_control_msg(radio->usbdev,
@@ -231,7 +233,7 @@
  */
 static int si470x_set_report(struct si470x_device *radio, void *buf, int size)
 {
-	unsigned char *report = (unsigned char *) buf;
+	unsigned char *report = buf;
 	int retval;
 
 	retval = usb_control_msg(radio->usbdev,
@@ -254,15 +256,14 @@
  */
 int si470x_get_register(struct si470x_device *radio, int regnr)
 {
-	unsigned char buf[REGISTER_REPORT_SIZE];
 	int retval;
 
-	buf[0] = REGISTER_REPORT(regnr);
+	radio->usb_buf[0] = REGISTER_REPORT(regnr);
 
-	retval = si470x_get_report(radio, (void *) &buf, sizeof(buf));
+	retval = si470x_get_report(radio, radio->usb_buf, REGISTER_REPORT_SIZE);
 
 	if (retval >= 0)
-		radio->registers[regnr] = get_unaligned_be16(&buf[1]);
+		radio->registers[regnr] = get_unaligned_be16(&radio->usb_buf[1]);
 
 	return (retval < 0) ? -EINVAL : 0;
 }
@@ -273,13 +274,12 @@
  */
 int si470x_set_register(struct si470x_device *radio, int regnr)
 {
-	unsigned char buf[REGISTER_REPORT_SIZE];
 	int retval;
 
-	buf[0] = REGISTER_REPORT(regnr);
-	put_unaligned_be16(radio->registers[regnr], &buf[1]);
+	radio->usb_buf[0] = REGISTER_REPORT(regnr);
+	put_unaligned_be16(radio->registers[regnr], &radio->usb_buf[1]);
 
-	retval = si470x_set_report(radio, (void *) &buf, sizeof(buf));
+	retval = si470x_set_report(radio, radio->usb_buf, REGISTER_REPORT_SIZE);
 
 	return (retval < 0) ? -EINVAL : 0;
 }
@@ -295,18 +295,17 @@
  */
 static int si470x_get_all_registers(struct si470x_device *radio)
 {
-	unsigned char buf[ENTIRE_REPORT_SIZE];
 	int retval;
 	unsigned char regnr;
 
-	buf[0] = ENTIRE_REPORT;
+	radio->usb_buf[0] = ENTIRE_REPORT;
 
-	retval = si470x_get_report(radio, (void *) &buf, sizeof(buf));
+	retval = si470x_get_report(radio, radio->usb_buf, ENTIRE_REPORT_SIZE);
 
 	if (retval >= 0)
 		for (regnr = 0; regnr < RADIO_REGISTER_NUM; regnr++)
 			radio->registers[regnr] = get_unaligned_be16(
-				&buf[regnr * RADIO_REGISTER_SIZE + 1]);
+				&radio->usb_buf[regnr * RADIO_REGISTER_SIZE + 1]);
 
 	return (retval < 0) ? -EINVAL : 0;
 }
@@ -323,14 +322,13 @@
 static int si470x_set_led_state(struct si470x_device *radio,
 		unsigned char led_state)
 {
-	unsigned char buf[LED_REPORT_SIZE];
 	int retval;
 
-	buf[0] = LED_REPORT;
-	buf[1] = LED_COMMAND;
-	buf[2] = led_state;
+	radio->usb_buf[0] = LED_REPORT;
+	radio->usb_buf[1] = LED_COMMAND;
+	radio->usb_buf[2] = led_state;
 
-	retval = si470x_set_report(radio, (void *) &buf, sizeof(buf));
+	retval = si470x_set_report(radio, radio->usb_buf, LED_REPORT_SIZE);
 
 	return (retval < 0) ? -EINVAL : 0;
 }
@@ -346,19 +344,18 @@
  */
 static int si470x_get_scratch_page_versions(struct si470x_device *radio)
 {
-	unsigned char buf[SCRATCH_REPORT_SIZE];
 	int retval;
 
-	buf[0] = SCRATCH_REPORT;
+	radio->usb_buf[0] = SCRATCH_REPORT;
 
-	retval = si470x_get_report(radio, (void *) &buf, sizeof(buf));
+	retval = si470x_get_report(radio, radio->usb_buf, SCRATCH_REPORT_SIZE);
 
 	if (retval < 0)
 		dev_warn(&radio->intf->dev, "si470x_get_scratch: "
 			"si470x_get_report returned %d\n", retval);
 	else {
-		radio->software_version = buf[1];
-		radio->hardware_version = buf[2];
+		radio->software_version = radio->usb_buf[1];
+		radio->hardware_version = radio->usb_buf[2];
 	}
 
 	return (retval < 0) ? -EINVAL : 0;
@@ -509,6 +506,7 @@
 	v4l2_device_unregister(&radio->v4l2_dev);
 	kfree(radio->int_in_buffer);
 	kfree(radio->buffer);
+	kfree(radio->usb_buf);
 	kfree(radio);
 }
 
@@ -593,6 +591,11 @@
 		retval = -ENOMEM;
 		goto err_initial;
 	}
+	radio->usb_buf = kmalloc(MAX_REPORT_SIZE, GFP_KERNEL);
+	if (radio->usb_buf == NULL) {
+		retval = -ENOMEM;
+		goto err_radio;
+	}
 	radio->usbdev = interface_to_usbdev(intf);
 	radio->intf = intf;
 	radio->band = 1; /* Default to 76 - 108 MHz */
@@ -612,7 +615,7 @@
 	if (!radio->int_in_endpoint) {
 		dev_info(&intf->dev, "could not find interrupt in endpoint\n");
 		retval = -EIO;
-		goto err_radio;
+		goto err_usbbuf;
 	}
 
 	int_end_size = le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize);
@@ -621,7 +624,7 @@
 	if (!radio->int_in_buffer) {
 		dev_info(&intf->dev, "could not allocate int_in_buffer");
 		retval = -ENOMEM;
-		goto err_radio;
+		goto err_usbbuf;
 	}
 
 	radio->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -632,6 +635,30 @@
 	}
 
 	radio->v4l2_dev.release = si470x_usb_release;
+
+	/*
+	 * The si470x SiLabs reference design uses the same USB IDs as
+	 * 'Thanko's Raremono' si4734 based receiver. So check here which we
+	 * have: attempt to read the device ID from the si470x: the lower 12
+	 * bits should be 0x0242 for the si470x.
+	 *
+	 * We use this check to determine which device we are dealing with.
+	 */
+	if (id->idVendor == 0x10c4 && id->idProduct == 0x818a) {
+		retval = usb_control_msg(radio->usbdev,
+				usb_rcvctrlpipe(radio->usbdev, 0),
+				HID_REQ_GET_REPORT,
+				USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+				1, 2,
+				radio->usb_buf, 3, 500);
+		if (retval != 3 ||
+		    (get_unaligned_be16(&radio->usb_buf[1]) & 0xfff) != 0x0242) {
+			dev_info(&intf->dev, "this is not a si470x device.\n");
+			retval = -ENODEV;
+			goto err_urb;
+		}
+	}
+
 	retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
 	if (retval < 0) {
 		dev_err(&intf->dev, "couldn't register v4l2_device\n");
@@ -743,6 +770,8 @@
 	usb_free_urb(radio->int_in_urb);
 err_intbuffer:
 	kfree(radio->int_in_buffer);
+err_usbbuf:
+	kfree(radio->usb_buf);
 err_radio:
 	kfree(radio);
 err_initial:
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 467e955..4b76604 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -167,6 +167,7 @@
 	/* reference to USB and video device */
 	struct usb_device *usbdev;
 	struct usb_interface *intf;
+	char *usb_buf;
 
 	/* Interrupt endpoint handling */
 	char *int_in_buffer;
diff --git a/drivers/media/radio/si4713/Kconfig b/drivers/media/radio/si4713/Kconfig
new file mode 100644
index 0000000..a7c3ba8
--- /dev/null
+++ b/drivers/media/radio/si4713/Kconfig
@@ -0,0 +1,40 @@
+config USB_SI4713
+	tristate "Silicon Labs Si4713 FM Radio Transmitter support with USB"
+	depends on USB && RADIO_SI4713
+	select SI4713
+	---help---
+	  This is a driver for USB devices with the Silicon Labs SI4713
+	  chip. Currently these devices are known to work.
+	  - 10c4:8244: Silicon Labs FM Transmitter USB device.
+
+	  Say Y here if you want to connect this type of radio to your
+	  computer's USB port.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called radio-usb-si4713.
+
+config PLATFORM_SI4713
+	tristate "Silicon Labs Si4713 FM Radio Transmitter support with I2C"
+	depends on I2C && RADIO_SI4713
+	select SI4713
+	---help---
+	  This is a driver for I2C devices with the Silicon Labs SI4713
+	  chip.
+
+	  Say Y here if you want to connect this type of radio to your
+	  computer's I2C port.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called radio-platform-si4713.
+
+config I2C_SI4713
+	tristate "Silicon Labs Si4713 FM Radio Transmitter support"
+	depends on I2C && RADIO_SI4713
+	---help---
+	  Say Y here if you want support to Si4713 FM Radio Transmitter.
+	  This device can transmit audio through FM. It can transmit
+	  RDS and RBDS signals as well. This module is the v4l2 radio
+	  interface for the i2c driver of this device.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called si4713.
diff --git a/drivers/media/radio/si4713/Makefile b/drivers/media/radio/si4713/Makefile
new file mode 100644
index 0000000..ddaaf92
--- /dev/null
+++ b/drivers/media/radio/si4713/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for radios with Silicon Labs Si4713 FM Radio Transmitters
+#
+
+obj-$(CONFIG_I2C_SI4713) += si4713.o
+obj-$(CONFIG_USB_SI4713) += radio-usb-si4713.o
+obj-$(CONFIG_PLATFORM_SI4713) += radio-platform-si4713.o
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/si4713/radio-platform-si4713.c
similarity index 100%
rename from drivers/media/radio/radio-si4713.c
rename to drivers/media/radio/si4713/radio-platform-si4713.c
diff --git a/drivers/media/radio/si4713/radio-usb-si4713.c b/drivers/media/radio/si4713/radio-usb-si4713.c
new file mode 100644
index 0000000..779855b
--- /dev/null
+++ b/drivers/media/radio/si4713/radio-usb-si4713.c
@@ -0,0 +1,540 @@
+/*
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates.
+ * All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* kernel includes */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+/* V4l includes */
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/si4713.h>
+
+#include "si4713.h"
+
+/* driver and module definitions */
+MODULE_AUTHOR("Dinesh Ram <dinesh.ram@cern.ch>");
+MODULE_DESCRIPTION("Si4713 FM Transmitter USB driver");
+MODULE_LICENSE("GPL v2");
+
+/* The Device announces itself as Cygnal Integrated Products, Inc. */
+#define USB_SI4713_VENDOR		0x10c4
+#define USB_SI4713_PRODUCT		0x8244
+
+#define BUFFER_LENGTH			64
+#define USB_TIMEOUT			1000
+#define USB_RESP_TIMEOUT		50000
+
+/* USB Device ID List */
+static struct usb_device_id usb_si4713_usb_device_table[] = {
+	{USB_DEVICE_AND_INTERFACE_INFO(USB_SI4713_VENDOR, USB_SI4713_PRODUCT,
+							USB_CLASS_HID, 0, 0) },
+	{ }						/* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_si4713_usb_device_table);
+
+struct si4713_usb_device {
+	struct usb_device	*usbdev;
+	struct usb_interface	*intf;
+	struct video_device	vdev;
+	struct v4l2_device	v4l2_dev;
+	struct v4l2_subdev	*v4l2_subdev;
+	struct mutex		lock;
+	struct i2c_adapter	i2c_adapter;
+
+	u8			*buffer;
+};
+
+static inline struct si4713_usb_device *to_si4713_dev(struct v4l2_device *v4l2_dev)
+{
+	return container_of(v4l2_dev, struct si4713_usb_device, v4l2_dev);
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+					struct v4l2_capability *v)
+{
+	struct si4713_usb_device *radio = video_drvdata(file);
+
+	strlcpy(v->driver, "radio-usb-si4713", sizeof(v->driver));
+	strlcpy(v->card, "Si4713 FM Transmitter", sizeof(v->card));
+	usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
+	v->device_caps = V4L2_CAP_MODULATOR | V4L2_CAP_RDS_OUTPUT;
+	v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+	return 0;
+}
+
+static int vidioc_g_modulator(struct file *file, void *priv,
+				struct v4l2_modulator *vm)
+{
+	struct si4713_usb_device *radio = video_drvdata(file);
+
+	return v4l2_subdev_call(radio->v4l2_subdev, tuner, g_modulator, vm);
+}
+
+static int vidioc_s_modulator(struct file *file, void *priv,
+				const struct v4l2_modulator *vm)
+{
+	struct si4713_usb_device *radio = video_drvdata(file);
+
+	return v4l2_subdev_call(radio->v4l2_subdev, tuner, s_modulator, vm);
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+				const struct v4l2_frequency *vf)
+{
+	struct si4713_usb_device *radio = video_drvdata(file);
+
+	return v4l2_subdev_call(radio->v4l2_subdev, tuner, s_frequency, vf);
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+				struct v4l2_frequency *vf)
+{
+	struct si4713_usb_device *radio = video_drvdata(file);
+
+	return v4l2_subdev_call(radio->v4l2_subdev, tuner, g_frequency, vf);
+}
+
+static const struct v4l2_ioctl_ops usb_si4713_ioctl_ops = {
+	.vidioc_querycap	  = vidioc_querycap,
+	.vidioc_g_modulator	  = vidioc_g_modulator,
+	.vidioc_s_modulator	  = vidioc_s_modulator,
+	.vidioc_g_frequency	  = vidioc_g_frequency,
+	.vidioc_s_frequency	  = vidioc_s_frequency,
+	.vidioc_log_status	  = v4l2_ctrl_log_status,
+	.vidioc_subscribe_event   = v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* File system interface */
+static const struct v4l2_file_operations usb_si4713_fops = {
+	.owner		= THIS_MODULE,
+	.open           = v4l2_fh_open,
+	.release        = v4l2_fh_release,
+	.poll           = v4l2_ctrl_poll,
+	.unlocked_ioctl	= video_ioctl2,
+};
+
+static void usb_si4713_video_device_release(struct v4l2_device *v4l2_dev)
+{
+	struct si4713_usb_device *radio = to_si4713_dev(v4l2_dev);
+	struct i2c_adapter *adapter = &radio->i2c_adapter;
+
+	i2c_del_adapter(adapter);
+	v4l2_device_unregister(&radio->v4l2_dev);
+	kfree(radio->buffer);
+	kfree(radio);
+}
+
+/*
+ * This command sequence emulates the behaviour of the Windows driver.
+ * The structure of these commands was determined by sniffing the
+ * usb traffic of the device during startup.
+ * Most likely, these commands make some queries to the device.
+ * Commands are sent to enquire parameters like the bus mode,
+ * component revision, boot mode, the device serial number etc.
+ *
+ * These commands are necessary to be sent in this order during startup.
+ * The device fails to powerup if these commands are not sent.
+ *
+ * The complete list of startup commands is given in the start_seq table below.
+ */
+static int si4713_send_startup_command(struct si4713_usb_device *radio)
+{
+	unsigned long until_jiffies = jiffies + usecs_to_jiffies(USB_RESP_TIMEOUT) + 1;
+	u8 *buffer = radio->buffer;
+	int retval;
+
+	/* send the command */
+	retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+					0x09, 0x21, 0x033f, 0, radio->buffer,
+					BUFFER_LENGTH, USB_TIMEOUT);
+	if (retval < 0)
+		return retval;
+
+	for (;;) {
+		/* receive the response */
+		retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
+				0x01, 0xa1, 0x033f, 0, radio->buffer,
+				BUFFER_LENGTH, USB_TIMEOUT);
+		if (retval < 0)
+			return retval;
+		if (!radio->buffer[1]) {
+			/* USB traffic sniffing showed that some commands require
+			 * additional checks. */
+			switch (buffer[1]) {
+			case 0x32:
+				if (radio->buffer[2] == 0)
+					return 0;
+				break;
+			case 0x14:
+			case 0x12:
+				if (radio->buffer[2] & SI4713_CTS)
+					return 0;
+				break;
+			case 0x06:
+				if ((radio->buffer[2] & SI4713_CTS) && radio->buffer[9] == 0x08)
+					return 0;
+				break;
+			default:
+				return 0;
+			}
+		}
+		if (time_is_before_jiffies(until_jiffies))
+			return -EIO;
+		msleep(3);
+	}
+
+	return retval;
+}
+
+struct si4713_start_seq_table {
+	int len;
+	u8 payload[8];
+};
+
+/*
+ * Some of the startup commands that could be recognized are :
+ * (0x03): Get serial number of the board (Response : CB000-00-00)
+ * (0x06, 0x03, 0x03, 0x08, 0x01, 0x0f) : Get Component revision
+ */
+static struct si4713_start_seq_table start_seq[] = {
+
+	{ 1, { 0x03 } },
+	{ 2, { 0x32, 0x7f } },
+	{ 6, { 0x06, 0x03, 0x03, 0x08, 0x01, 0x0f } },
+	{ 2, { 0x14, 0x02 } },
+	{ 2, { 0x09, 0x90 } },
+	{ 3, { 0x08, 0x90, 0xfa } },
+	{ 2, { 0x36, 0x01 } },
+	{ 2, { 0x05, 0x03 } },
+	{ 7, { 0x06, 0x00, 0x06, 0x0e, 0x01, 0x0f, 0x05 } },
+	{ 1, { 0x12 } },
+	/* Commands that are sent after pressing the 'Initialize'
+		button in the windows application */
+	{ 1, { 0x03 } },
+	{ 1, { 0x01 } },
+	{ 2, { 0x09, 0x90 } },
+	{ 3, { 0x08, 0x90, 0xfa } },
+	{ 1, { 0x34 } },
+	{ 2, { 0x35, 0x01 } },
+	{ 2, { 0x36, 0x01 } },
+	{ 2, { 0x30, 0x09 } },
+	{ 4, { 0x30, 0x06, 0x00, 0xe2 } },
+	{ 3, { 0x31, 0x01, 0x30 } },
+	{ 3, { 0x31, 0x04, 0x09 } },
+	{ 2, { 0x05, 0x02 } },
+	{ 6, { 0x06, 0x03, 0x03, 0x08, 0x01, 0x0f } },
+};
+
+static int si4713_start_seq(struct si4713_usb_device *radio)
+{
+	int retval = 0;
+	int i;
+
+	radio->buffer[0] = 0x3f;
+
+	for (i = 0; i < ARRAY_SIZE(start_seq); i++) {
+		int len = start_seq[i].len;
+		u8 *payload = start_seq[i].payload;
+
+		memcpy(radio->buffer + 1, payload, len);
+		memset(radio->buffer + len + 1, 0, BUFFER_LENGTH - 1 - len);
+		retval = si4713_send_startup_command(radio);
+	}
+
+	return retval;
+}
+
+static struct i2c_board_info si4713_board_info = {
+	I2C_BOARD_INFO("si4713", SI4713_I2C_ADDR_BUSEN_HIGH),
+};
+
+struct si4713_command_table {
+	int command_id;
+	u8 payload[8];
+};
+
+/*
+ * Structure of a command :
+ *	Byte 1 : 0x3f (always)
+ *	Byte 2 : 0x06 (send a command)
+ *	Byte 3 : Unknown
+ *	Byte 4 : Number of arguments + 1 (for the command byte)
+ *	Byte 5 : Number of response bytes
+ */
+static struct si4713_command_table command_table[] = {
+
+	{ SI4713_CMD_POWER_UP,		{ 0x00, SI4713_PWUP_NARGS + 1, SI4713_PWUP_NRESP} },
+	{ SI4713_CMD_GET_REV,		{ 0x03, 0x01, SI4713_GETREV_NRESP } },
+	{ SI4713_CMD_POWER_DOWN,	{ 0x00, 0x01, SI4713_PWDN_NRESP} },
+	{ SI4713_CMD_SET_PROPERTY,	{ 0x00, SI4713_SET_PROP_NARGS + 1, SI4713_SET_PROP_NRESP } },
+	{ SI4713_CMD_GET_PROPERTY,	{ 0x00, SI4713_GET_PROP_NARGS + 1, SI4713_GET_PROP_NRESP } },
+	{ SI4713_CMD_TX_TUNE_FREQ,	{ 0x03, SI4713_TXFREQ_NARGS + 1, SI4713_TXFREQ_NRESP } },
+	{ SI4713_CMD_TX_TUNE_POWER,	{ 0x03, SI4713_TXPWR_NARGS + 1, SI4713_TXPWR_NRESP } },
+	{ SI4713_CMD_TX_TUNE_MEASURE,	{ 0x03, SI4713_TXMEA_NARGS + 1, SI4713_TXMEA_NRESP } },
+	{ SI4713_CMD_TX_TUNE_STATUS,	{ 0x00, SI4713_TXSTATUS_NARGS + 1, SI4713_TXSTATUS_NRESP } },
+	{ SI4713_CMD_TX_ASQ_STATUS,	{ 0x03, SI4713_ASQSTATUS_NARGS + 1, SI4713_ASQSTATUS_NRESP } },
+	{ SI4713_CMD_GET_INT_STATUS,	{ 0x03, 0x01, SI4713_GET_STATUS_NRESP } },
+	{ SI4713_CMD_TX_RDS_BUFF,	{ 0x03, SI4713_RDSBUFF_NARGS + 1, SI4713_RDSBUFF_NRESP } },
+	{ SI4713_CMD_TX_RDS_PS,		{ 0x00, SI4713_RDSPS_NARGS + 1, SI4713_RDSPS_NRESP } },
+};
+
+static int send_command(struct si4713_usb_device *radio, u8 *payload, char *data, int len)
+{
+	int retval;
+
+	radio->buffer[0] = 0x3f;
+	radio->buffer[1] = 0x06;
+
+	memcpy(radio->buffer + 2, payload, 3);
+	memcpy(radio->buffer + 5, data, len);
+	memset(radio->buffer + 5 + len, 0, BUFFER_LENGTH - 5 - len);
+
+	/* send the command */
+	retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+					0x09, 0x21, 0x033f, 0, radio->buffer,
+					BUFFER_LENGTH, USB_TIMEOUT);
+
+	return retval < 0 ? retval : 0;
+}
+
+static int si4713_i2c_read(struct si4713_usb_device *radio, char *data, int len)
+{
+	unsigned long until_jiffies = jiffies + usecs_to_jiffies(USB_RESP_TIMEOUT) + 1;
+	int retval;
+
+	/* receive the response */
+	for (;;) {
+		retval = usb_control_msg(radio->usbdev,
+					usb_rcvctrlpipe(radio->usbdev, 0),
+					0x01, 0xa1, 0x033f, 0, radio->buffer,
+					BUFFER_LENGTH, USB_TIMEOUT);
+		if (retval < 0)
+			return retval;
+
+		/*
+		 * Check that we get a valid reply back (buffer[1] == 0) and
+		 * that CTS is set before returning, otherwise we wait and try
+		 * again. The i2c driver also does the CTS check, but the timeouts
+		 * used there are much too small for this USB driver, so we wait
+		 * for it here.
+		 */
+		if (radio->buffer[1] == 0 && (radio->buffer[2] & SI4713_CTS)) {
+			memcpy(data, radio->buffer + 2, len);
+			return 0;
+		}
+		if (time_is_before_jiffies(until_jiffies)) {
+			/* Zero the status value, ensuring CTS isn't set */
+			data[0] = 0;
+			return 0;
+		}
+		msleep(3);
+	}
+}
+
+static int si4713_i2c_write(struct si4713_usb_device *radio, char *data, int len)
+{
+	int retval = -EINVAL;
+	int i;
+
+	if (len > BUFFER_LENGTH - 5)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(command_table); i++) {
+		if (data[0] == command_table[i].command_id)
+			retval = send_command(radio, command_table[i].payload,
+						data, len);
+	}
+
+	return retval < 0 ? retval : 0;
+}
+
+static int si4713_transfer(struct i2c_adapter *i2c_adapter,
+				struct i2c_msg *msgs, int num)
+{
+	struct si4713_usb_device *radio = i2c_get_adapdata(i2c_adapter);
+	int retval = -EINVAL;
+	int i;
+
+	if (num <= 0)
+		return 0;
+
+	for (i = 0; i < num; i++) {
+		if (msgs[i].flags & I2C_M_RD)
+			retval = si4713_i2c_read(radio, msgs[i].buf, msgs[i].len);
+		else
+			retval = si4713_i2c_write(radio, msgs[i].buf, msgs[i].len);
+		if (retval)
+			break;
+	}
+
+	return retval ? retval : num;
+}
+
+static u32 si4713_functionality(struct i2c_adapter *adapter)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm si4713_algo = {
+	.master_xfer   = si4713_transfer,
+	.functionality = si4713_functionality,
+};
+
+/* This name value shows up in the sysfs filename associated
+		with this I2C adapter */
+static struct i2c_adapter si4713_i2c_adapter_template = {
+	.name   = "si4713-i2c",
+	.owner  = THIS_MODULE,
+	.algo   = &si4713_algo,
+};
+
+static int si4713_register_i2c_adapter(struct si4713_usb_device *radio)
+{
+	radio->i2c_adapter = si4713_i2c_adapter_template;
+	/* set up sysfs linkage to our parent device */
+	radio->i2c_adapter.dev.parent = &radio->usbdev->dev;
+	i2c_set_adapdata(&radio->i2c_adapter, radio);
+
+	return i2c_add_adapter(&radio->i2c_adapter);
+}
+
+/* check if the device is present and register with v4l and usb if it is */
+static int usb_si4713_probe(struct usb_interface *intf,
+				const struct usb_device_id *id)
+{
+	struct si4713_usb_device *radio;
+	struct i2c_adapter *adapter;
+	struct v4l2_subdev *sd;
+	int retval = -ENOMEM;
+
+	dev_info(&intf->dev, "Si4713 development board discovered: (%04X:%04X)\n",
+			id->idVendor, id->idProduct);
+
+	/* Initialize local device structure */
+	radio = kzalloc(sizeof(struct si4713_usb_device), GFP_KERNEL);
+	if (radio)
+		radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
+
+	if (!radio || !radio->buffer) {
+		dev_err(&intf->dev, "kmalloc for si4713_usb_device failed\n");
+		kfree(radio);
+		return -ENOMEM;
+	}
+
+	mutex_init(&radio->lock);
+
+	radio->usbdev = interface_to_usbdev(intf);
+	radio->intf = intf;
+	usb_set_intfdata(intf, &radio->v4l2_dev);
+
+	retval = si4713_start_seq(radio);
+	if (retval < 0)
+		goto err_v4l2;
+
+	retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
+	if (retval < 0) {
+		dev_err(&intf->dev, "couldn't register v4l2_device\n");
+		goto err_v4l2;
+	}
+
+	retval = si4713_register_i2c_adapter(radio);
+	if (retval < 0) {
+		dev_err(&intf->dev, "could not register i2c device\n");
+		goto err_i2cdev;
+	}
+
+	adapter = &radio->i2c_adapter;
+	sd = v4l2_i2c_new_subdev_board(&radio->v4l2_dev, adapter,
+					  &si4713_board_info, NULL);
+	radio->v4l2_subdev = sd;
+	if (!sd) {
+		dev_err(&intf->dev, "cannot get v4l2 subdevice\n");
+		retval = -ENODEV;
+		goto del_adapter;
+	}
+
+	radio->vdev.ctrl_handler = sd->ctrl_handler;
+	radio->v4l2_dev.release = usb_si4713_video_device_release;
+	strlcpy(radio->vdev.name, radio->v4l2_dev.name,
+		sizeof(radio->vdev.name));
+	radio->vdev.v4l2_dev = &radio->v4l2_dev;
+	radio->vdev.fops = &usb_si4713_fops;
+	radio->vdev.ioctl_ops = &usb_si4713_ioctl_ops;
+	radio->vdev.lock = &radio->lock;
+	radio->vdev.release = video_device_release_empty;
+	radio->vdev.vfl_dir = VFL_DIR_TX;
+
+	video_set_drvdata(&radio->vdev, radio);
+	set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags);
+
+	retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO, -1);
+	if (retval < 0) {
+		dev_err(&intf->dev, "could not register video device\n");
+		goto del_adapter;
+	}
+
+	dev_info(&intf->dev, "V4L2 device registered as %s\n",
+			video_device_node_name(&radio->vdev));
+
+	return 0;
+
+del_adapter:
+	i2c_del_adapter(adapter);
+err_i2cdev:
+	v4l2_device_unregister(&radio->v4l2_dev);
+err_v4l2:
+	kfree(radio->buffer);
+	kfree(radio);
+	return retval;
+}
+
+static void usb_si4713_disconnect(struct usb_interface *intf)
+{
+	struct si4713_usb_device *radio = to_si4713_dev(usb_get_intfdata(intf));
+
+	dev_info(&intf->dev, "Si4713 development board now disconnected\n");
+
+	mutex_lock(&radio->lock);
+	usb_set_intfdata(intf, NULL);
+	video_unregister_device(&radio->vdev);
+	v4l2_device_disconnect(&radio->v4l2_dev);
+	mutex_unlock(&radio->lock);
+	v4l2_device_put(&radio->v4l2_dev);
+}
+
+/* USB subsystem interface */
+static struct usb_driver usb_si4713_driver = {
+	.name			= "radio-usb-si4713",
+	.probe			= usb_si4713_probe,
+	.disconnect		= usb_si4713_disconnect,
+	.id_table		= usb_si4713_usb_device_table,
+};
+
+module_usb_driver(usb_si4713_driver);
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713/si4713.c
similarity index 86%
rename from drivers/media/radio/si4713-i2c.c
rename to drivers/media/radio/si4713/si4713.c
index 9ec48cc..07d5153 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -27,13 +27,12 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
-#include <linux/regulator/consumer.h>
 #include <linux/module.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-common.h>
 
-#include "si4713-i2c.h"
+#include "si4713.h"
 
 /* module parameters */
 static int debug;
@@ -45,23 +44,18 @@
 MODULE_DESCRIPTION("I2C driver for Si4713 FM Radio Transmitter");
 MODULE_VERSION("0.0.1");
 
-static const char *si4713_supply_names[SI4713_NUM_SUPPLIES] = {
-	"vio",
-	"vdd",
-};
-
 #define DEFAULT_RDS_PI			0x00
 #define DEFAULT_RDS_PTY			0x00
 #define DEFAULT_RDS_DEVIATION		0x00C8
 #define DEFAULT_RDS_PS_REPEAT_COUNT	0x0003
 #define DEFAULT_LIMITER_RTIME		0x1392
 #define DEFAULT_LIMITER_DEV		0x102CA
-#define DEFAULT_PILOT_FREQUENCY 	0x4A38
+#define DEFAULT_PILOT_FREQUENCY		0x4A38
 #define DEFAULT_PILOT_DEVIATION		0x1A5E
 #define DEFAULT_ACOMP_ATIME		0x0000
 #define DEFAULT_ACOMP_RTIME		0xF4240L
 #define DEFAULT_ACOMP_GAIN		0x0F
-#define DEFAULT_ACOMP_THRESHOLD 	(-0x28)
+#define DEFAULT_ACOMP_THRESHOLD		(-0x28)
 #define DEFAULT_MUTE			0x01
 #define DEFAULT_POWER_LEVEL		88
 #define DEFAULT_FREQUENCY		8800
@@ -213,6 +207,7 @@
 				u8 response[], const int respn, const int usecs)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
+	unsigned long until_jiffies;
 	u8 data1[MAX_ARGS + 1];
 	int err;
 
@@ -228,30 +223,42 @@
 	if (err != argn + 1) {
 		v4l2_err(&sdev->sd, "Error while sending command 0x%02x\n",
 			command);
-		return (err > 0) ? -EIO : err;
+		return err < 0 ? err : -EIO;
 	}
 
+	until_jiffies = jiffies + usecs_to_jiffies(usecs) + 1;
+
 	/* Wait response from interrupt */
-	if (!wait_for_completion_timeout(&sdev->work,
+	if (client->irq) {
+		if (!wait_for_completion_timeout(&sdev->work,
 				usecs_to_jiffies(usecs) + 1))
-		v4l2_warn(&sdev->sd,
+			v4l2_warn(&sdev->sd,
 				"(%s) Device took too much time to answer.\n",
 				__func__);
-
-	/* Then get the response */
-	err = i2c_master_recv(client, response, respn);
-	if (err != respn) {
-		v4l2_err(&sdev->sd,
-			"Error while reading response for command 0x%02x\n",
-			command);
-		return (err > 0) ? -EIO : err;
 	}
 
-	DBG_BUFFER(&sdev->sd, "Response", response, respn);
-	if (check_command_failed(response[0]))
-		return -EBUSY;
+	do {
+		err = i2c_master_recv(client, response, respn);
+		if (err != respn) {
+			v4l2_err(&sdev->sd,
+				"Error %d while reading response for command 0x%02x\n",
+				err, command);
+			return err < 0 ? err : -EIO;
+		}
 
-	return 0;
+		DBG_BUFFER(&sdev->sd, "Response", response, respn);
+		if (!check_command_failed(response[0]))
+			return 0;
+
+		if (client->irq)
+			return -EBUSY;
+		if (usecs <= 1000)
+			usleep_range(usecs, 1000);
+		else
+			usleep_range(1000, 2000);
+	} while (time_is_after_jiffies(until_jiffies));
+
+	return -EBUSY;
 }
 
 /*
@@ -265,9 +272,9 @@
 	int err;
 	u8 val[SI4713_GET_PROP_NRESP];
 	/*
-	 * 	.First byte = 0
-	 * 	.Second byte = property's MSB
-	 * 	.Third byte = property's LSB
+	 *	.First byte = 0
+	 *	.Second byte = property's MSB
+	 *	.Third byte = property's LSB
 	 */
 	const u8 args[SI4713_GET_PROP_NARGS] = {
 		0x00,
@@ -302,11 +309,11 @@
 	int rval;
 	u8 resp[SI4713_SET_PROP_NRESP];
 	/*
-	 * 	.First byte = 0
-	 * 	.Second byte = property's MSB
-	 * 	.Third byte = property's LSB
-	 * 	.Fourth byte = value's MSB
-	 * 	.Fifth byte = value's LSB
+	 *	.First byte = 0
+	 *	.Second byte = property's MSB
+	 *	.Third byte = property's LSB
+	 *	.Fourth byte = value's MSB
+	 *	.Fifth byte = value's LSB
 	 */
 	const u8 args[SI4713_SET_PROP_NARGS] = {
 		0x00,
@@ -344,31 +351,36 @@
  */
 static int si4713_powerup(struct si4713_device *sdev)
 {
+	struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
 	int err;
 	u8 resp[SI4713_PWUP_NRESP];
 	/*
-	 * 	.First byte = Enabled interrupts and boot function
-	 * 	.Second byte = Input operation mode
+	 *	.First byte = Enabled interrupts and boot function
+	 *	.Second byte = Input operation mode
 	 */
-	const u8 args[SI4713_PWUP_NARGS] = {
-		SI4713_PWUP_CTSIEN | SI4713_PWUP_GPO2OEN | SI4713_PWUP_FUNC_TX,
+	u8 args[SI4713_PWUP_NARGS] = {
+		SI4713_PWUP_GPO2OEN | SI4713_PWUP_FUNC_TX,
 		SI4713_PWUP_OPMOD_ANALOG,
 	};
 
 	if (sdev->power_state)
 		return 0;
 
-	err = regulator_bulk_enable(ARRAY_SIZE(sdev->supplies),
-				    sdev->supplies);
-	if (err) {
-		v4l2_err(&sdev->sd, "Failed to enable supplies: %d\n", err);
-		return err;
+	if (sdev->supplies) {
+		err = regulator_bulk_enable(sdev->supplies, sdev->supply_data);
+		if (err) {
+			v4l2_err(&sdev->sd, "Failed to enable supplies: %d\n", err);
+			return err;
+		}
 	}
 	if (gpio_is_valid(sdev->gpio_reset)) {
 		udelay(50);
 		gpio_set_value(sdev->gpio_reset, 1);
 	}
 
+	if (client->irq)
+		args[0] |= SI4713_PWUP_CTSIEN;
+
 	err = si4713_send_command(sdev, SI4713_CMD_POWER_UP,
 					args, ARRAY_SIZE(args),
 					resp, ARRAY_SIZE(resp),
@@ -380,13 +392,15 @@
 		v4l2_dbg(1, debug, &sdev->sd, "Device in power up mode\n");
 		sdev->power_state = POWER_ON;
 
-		err = si4713_write_property(sdev, SI4713_GPO_IEN,
+		if (client->irq)
+			err = si4713_write_property(sdev, SI4713_GPO_IEN,
 						SI4713_STC_INT | SI4713_CTS);
-	} else {
-		if (gpio_is_valid(sdev->gpio_reset))
-			gpio_set_value(sdev->gpio_reset, 0);
-		err = regulator_bulk_disable(ARRAY_SIZE(sdev->supplies),
-					     sdev->supplies);
+		return err;
+	}
+	if (gpio_is_valid(sdev->gpio_reset))
+		gpio_set_value(sdev->gpio_reset, 0);
+	if (sdev->supplies) {
+		err = regulator_bulk_disable(sdev->supplies, sdev->supply_data);
 		if (err)
 			v4l2_err(&sdev->sd,
 				 "Failed to disable supplies: %d\n", err);
@@ -418,11 +432,13 @@
 		v4l2_dbg(1, debug, &sdev->sd, "Device in reset mode\n");
 		if (gpio_is_valid(sdev->gpio_reset))
 			gpio_set_value(sdev->gpio_reset, 0);
-		err = regulator_bulk_disable(ARRAY_SIZE(sdev->supplies),
-					     sdev->supplies);
-		if (err)
-			v4l2_err(&sdev->sd,
-				 "Failed to disable supplies: %d\n", err);
+		if (sdev->supplies) {
+			err = regulator_bulk_disable(sdev->supplies,
+						     sdev->supply_data);
+			if (err)
+				v4l2_err(&sdev->sd,
+					 "Failed to disable supplies: %d\n", err);
+		}
 		sdev->power_state = POWER_OFF;
 	}
 
@@ -451,7 +467,7 @@
 		v4l2_info(&sdev->sd, "chip found @ 0x%02x (%s)\n",
 				client->addr << 1, client->adapter->name);
 	} else {
-		v4l2_err(&sdev->sd, "Invalid product number\n");
+		v4l2_err(&sdev->sd, "Invalid product number 0x%X\n", resp[1]);
 		rval = -EINVAL;
 	}
 	return rval;
@@ -465,39 +481,45 @@
  */
 static int si4713_wait_stc(struct si4713_device *sdev, const int usecs)
 {
-	int err;
+	struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
 	u8 resp[SI4713_GET_STATUS_NRESP];
+	unsigned long start_jiffies = jiffies;
+	int err;
 
-	/* Wait response from STC interrupt */
-	if (!wait_for_completion_timeout(&sdev->work,
-			usecs_to_jiffies(usecs) + 1))
+	if (client->irq &&
+	    !wait_for_completion_timeout(&sdev->work, usecs_to_jiffies(usecs) + 1))
 		v4l2_warn(&sdev->sd,
-			"%s: device took too much time to answer (%d usec).\n",
-				__func__, usecs);
+			"(%s) Device took too much time to answer.\n", __func__);
 
-	/* Clear status bits */
-	err = si4713_send_command(sdev, SI4713_CMD_GET_INT_STATUS,
-					NULL, 0,
-					resp, ARRAY_SIZE(resp),
-					DEFAULT_TIMEOUT);
+	for (;;) {
+		/* Clear status bits */
+		err = si4713_send_command(sdev, SI4713_CMD_GET_INT_STATUS,
+				NULL, 0,
+				resp, ARRAY_SIZE(resp),
+				DEFAULT_TIMEOUT);
+		/* The USB device returns errors when it waits for the
+		 * STC bit to be set. Hence polling */
+		if (err >= 0) {
+			v4l2_dbg(1, debug, &sdev->sd,
+				"%s: status bits: 0x%02x\n", __func__, resp[0]);
 
-	if (err < 0)
-		goto exit;
-
-	v4l2_dbg(1, debug, &sdev->sd,
-			"%s: status bits: 0x%02x\n", __func__, resp[0]);
-
-	if (!(resp[0] & SI4713_STC_INT))
-		err = -EIO;
-
-exit:
-	return err;
+			if (resp[0] & SI4713_STC_INT)
+				return 0;
+		}
+		if (jiffies_to_usecs(jiffies - start_jiffies) > usecs)
+			return err < 0 ? err : -EIO;
+		/* We sleep here for 3-4 ms in order to avoid flooding the device
+		 * with USB requests. The si4713 USB driver was developed
+		 * by reverse engineering the Windows USB driver. The windows
+		 * driver also has a ~2.5 ms delay between responses. */
+		usleep_range(3000, 4000);
+	}
 }
 
 /*
  * si4713_tx_tune_freq - Sets the state of the RF carrier and sets the tuning
- * 			frequency between 76 and 108 MHz in 10 kHz units and
- * 			steps of 50 kHz.
+ *			frequency between 76 and 108 MHz in 10 kHz units and
+ *			steps of 50 kHz.
  * @sdev: si4713_device structure for the device we are communicating
  * @frequency: desired frequency (76 - 108 MHz, unit 10 KHz, step 50 kHz)
  */
@@ -506,9 +528,9 @@
 	int err;
 	u8 val[SI4713_TXFREQ_NRESP];
 	/*
-	 * 	.First byte = 0
-	 * 	.Second byte = frequency's MSB
-	 * 	.Third byte = frequency's LSB
+	 *	.First byte = 0
+	 *	.Second byte = frequency's MSB
+	 *	.Third byte = frequency's LSB
 	 */
 	const u8 args[SI4713_TXFREQ_NARGS] = {
 		0x00,
@@ -535,14 +557,14 @@
 }
 
 /*
- * si4713_tx_tune_power - Sets the RF voltage level between 88 and 115 dBuV in
- * 			1 dB units. A value of 0x00 indicates off. The command
- * 			also sets the antenna tuning capacitance. A value of 0
- * 			indicates autotuning, and a value of 1 - 191 indicates
- * 			a manual override, which results in a tuning
- * 			capacitance of 0.25 pF x @antcap.
+ * si4713_tx_tune_power - Sets the RF voltage level between 88 and 120 dBuV in
+ *			1 dB units. A value of 0x00 indicates off. The command
+ *			also sets the antenna tuning capacitance. A value of 0
+ *			indicates autotuning, and a value of 1 - 191 indicates
+ *			a manual override, which results in a tuning
+ *			capacitance of 0.25 pF x @antcap.
  * @sdev: si4713_device structure for the device we are communicating
- * @power: tuning power (88 - 115 dBuV, unit/step 1 dB)
+ * @power: tuning power (88 - 120 dBuV, unit/step 1 dB)
  * @antcap: value of antenna tuning capacitor (0 - 191)
  */
 static int si4713_tx_tune_power(struct si4713_device *sdev, u8 power,
@@ -551,21 +573,21 @@
 	int err;
 	u8 val[SI4713_TXPWR_NRESP];
 	/*
-	 * 	.First byte = 0
-	 * 	.Second byte = 0
-	 * 	.Third byte = power
-	 * 	.Fourth byte = antcap
+	 *	.First byte = 0
+	 *	.Second byte = 0
+	 *	.Third byte = power
+	 *	.Fourth byte = antcap
 	 */
-	const u8 args[SI4713_TXPWR_NARGS] = {
+	u8 args[SI4713_TXPWR_NARGS] = {
 		0x00,
 		0x00,
 		power,
 		antcap,
 	};
 
-	if (((power > 0) && (power < SI4713_MIN_POWER)) ||
-		power > SI4713_MAX_POWER || antcap > SI4713_MAX_ANTCAP)
-		return -EDOM;
+	/* Map power values 1-87 to MIN_POWER (88) */
+	if (power > 0 && power < SI4713_MIN_POWER)
+		args[2] = power = SI4713_MIN_POWER;
 
 	err = si4713_send_command(sdev, SI4713_CMD_TX_TUNE_POWER,
 				  args, ARRAY_SIZE(args), val,
@@ -583,12 +605,12 @@
 
 /*
  * si4713_tx_tune_measure - Enters receive mode and measures the received noise
- * 			level in units of dBuV on the selected frequency.
- * 			The Frequency must be between 76 and 108 MHz in 10 kHz
- * 			units and steps of 50 kHz. The command also sets the
- * 			antenna	tuning capacitance. A value of 0 means
- * 			autotuning, and a value of 1 to 191 indicates manual
- * 			override.
+ *			level in units of dBuV on the selected frequency.
+ *			The Frequency must be between 76 and 108 MHz in 10 kHz
+ *			units and steps of 50 kHz. The command also sets the
+ *			antenna	tuning capacitance. A value of 0 means
+ *			autotuning, and a value of 1 to 191 indicates manual
+ *			override.
  * @sdev: si4713_device structure for the device we are communicating
  * @frequency: desired frequency (76 - 108 MHz, unit 10 KHz, step 50 kHz)
  * @antcap: value of antenna tuning capacitor (0 - 191)
@@ -599,10 +621,10 @@
 	int err;
 	u8 val[SI4713_TXMEA_NRESP];
 	/*
-	 * 	.First byte = 0
-	 * 	.Second byte = frequency's MSB
-	 * 	.Third byte = frequency's LSB
-	 * 	.Fourth byte = antcap
+	 *	.First byte = 0
+	 *	.Second byte = frequency's MSB
+	 *	.Third byte = frequency's LSB
+	 *	.Fourth byte = antcap
 	 */
 	const u8 args[SI4713_TXMEA_NARGS] = {
 		0x00,
@@ -632,11 +654,11 @@
 
 /*
  * si4713_tx_tune_status- Returns the status of the tx_tune_freq, tx_tune_mea or
- * 			tx_tune_power commands. This command return the current
- * 			frequency, output voltage in dBuV, the antenna tunning
- * 			capacitance value and the received noise level. The
- * 			command also clears the stcint interrupt bit when the
- * 			first bit of its arguments is high.
+ *			tx_tune_power commands. This command return the current
+ *			frequency, output voltage in dBuV, the antenna tunning
+ *			capacitance value and the received noise level. The
+ *			command also clears the stcint interrupt bit when the
+ *			first bit of its arguments is high.
  * @sdev: si4713_device structure for the device we are communicating
  * @intack: 0x01 to clear the seek/tune complete interrupt status indicator.
  * @frequency: returned frequency
@@ -651,7 +673,7 @@
 	int err;
 	u8 val[SI4713_TXSTATUS_NRESP];
 	/*
-	 * 	.First byte = intack bit
+	 *	.First byte = intack bit
 	 */
 	const u8 args[SI4713_TXSTATUS_NARGS] = {
 		intack & SI4713_INTACK_MASK,
@@ -812,8 +834,9 @@
 	return rval;
 }
 
-static int si4713_set_rds_radio_text(struct si4713_device *sdev, char *rt)
+static int si4713_set_rds_radio_text(struct si4713_device *sdev, const char *rt)
 {
+	static const char cr[RDS_RADIOTEXT_BLK_SIZE] = { RDS_CARRIAGE_RETURN, 0 };
 	int rval = 0, i;
 	u16 t_index = 0;
 	u8 b_index = 0, cr_inserted = 0;
@@ -837,7 +860,7 @@
 			for (i = 0; i < RDS_RADIOTEXT_BLK_SIZE; i++) {
 				if (!rt[t_index + i] ||
 				    rt[t_index + i] == RDS_CARRIAGE_RETURN) {
-					rt[t_index + i] = RDS_CARRIAGE_RETURN;
+					rt = cr;
 					cr_inserted = 1;
 					break;
 				}
@@ -1024,7 +1047,6 @@
 	if (rval < 0)
 		return rval;
 
-
 	sdev->frequency = DEFAULT_FREQUENCY;
 	sdev->stereo = 1;
 	sdev->tune_rnl = DEFAULT_TUNE_RNL;
@@ -1345,7 +1367,7 @@
 	struct v4l2_ctrl_handler *hdl;
 	int rval, i;
 
-	sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+	sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
 	if (!sdev) {
 		dev_err(&client->dev, "Failed to alloc video device.\n");
 		rval = -ENOMEM;
@@ -1362,13 +1384,14 @@
 		}
 		sdev->gpio_reset = pdata->gpio_reset;
 		gpio_direction_output(sdev->gpio_reset, 0);
+		sdev->supplies = pdata->supplies;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(sdev->supplies); i++)
-		sdev->supplies[i].supply = si4713_supply_names[i];
+	for (i = 0; i < sdev->supplies; i++)
+		sdev->supply_data[i].supply = pdata->supply_names[i];
 
-	rval = regulator_bulk_get(&client->dev, ARRAY_SIZE(sdev->supplies),
-				  sdev->supplies);
+	rval = regulator_bulk_get(&client->dev, sdev->supplies,
+				  sdev->supply_data);
 	if (rval) {
 		dev_err(&client->dev, "Cannot get regulators: %d\n", rval);
 		goto free_gpio;
@@ -1420,8 +1443,8 @@
 			V4L2_CID_AUDIO_COMPRESSION_GAIN, 0, MAX_ACOMP_GAIN, 1,
 			DEFAULT_ACOMP_GAIN);
 	sdev->compression_threshold = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
-			V4L2_CID_AUDIO_COMPRESSION_THRESHOLD, MIN_ACOMP_THRESHOLD,
-			MAX_ACOMP_THRESHOLD, 1,
+			V4L2_CID_AUDIO_COMPRESSION_THRESHOLD,
+			MIN_ACOMP_THRESHOLD, MAX_ACOMP_THRESHOLD, 1,
 			DEFAULT_ACOMP_THRESHOLD);
 	sdev->compression_attack_time = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
 			V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME, 0,
@@ -1443,9 +1466,11 @@
 			V4L2_CID_TUNE_PREEMPHASIS,
 			V4L2_PREEMPHASIS_75_uS, 0, V4L2_PREEMPHASIS_50_uS);
 	sdev->tune_pwr_level = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
-			V4L2_CID_TUNE_POWER_LEVEL, 0, 120, 1, DEFAULT_POWER_LEVEL);
+			V4L2_CID_TUNE_POWER_LEVEL, 0, SI4713_MAX_POWER,
+			1, DEFAULT_POWER_LEVEL);
 	sdev->tune_ant_cap = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
-			V4L2_CID_TUNE_ANTENNA_CAPACITOR, 0, 191, 1, 0);
+			V4L2_CID_TUNE_ANTENNA_CAPACITOR, 0, SI4713_MAX_ANTCAP,
+			1, 0);
 
 	if (hdl->error) {
 		rval = hdl->error;
@@ -1481,7 +1506,7 @@
 free_ctrls:
 	v4l2_ctrl_handler_free(hdl);
 put_reg:
-	regulator_bulk_free(ARRAY_SIZE(sdev->supplies), sdev->supplies);
+	regulator_bulk_free(sdev->supplies, sdev->supply_data);
 free_gpio:
 	if (gpio_is_valid(sdev->gpio_reset))
 		gpio_free(sdev->gpio_reset);
@@ -1505,7 +1530,7 @@
 
 	v4l2_device_unregister_subdev(sd);
 	v4l2_ctrl_handler_free(sd->ctrl_handler);
-	regulator_bulk_free(ARRAY_SIZE(sdev->supplies), sdev->supplies);
+	regulator_bulk_free(sdev->supplies, sdev->supply_data);
 	if (gpio_is_valid(sdev->gpio_reset))
 		gpio_free(sdev->gpio_reset);
 	kfree(sdev);
diff --git a/drivers/media/radio/si4713-i2c.h b/drivers/media/radio/si4713/si4713.h
similarity index 97%
rename from drivers/media/radio/si4713-i2c.h
rename to drivers/media/radio/si4713/si4713.h
index 25cdea2..4837cf6 100644
--- a/drivers/media/radio/si4713-i2c.h
+++ b/drivers/media/radio/si4713/si4713.h
@@ -15,6 +15,7 @@
 #ifndef SI4713_I2C_H
 #define SI4713_I2C_H
 
+#include <linux/regulator/consumer.h>
 #include <media/v4l2-subdev.h>
 #include <media/v4l2-ctrls.h>
 #include <media/si4713.h>
@@ -226,7 +227,8 @@
 		struct v4l2_ctrl *tune_ant_cap;
 	};
 	struct completion work;
-	struct regulator_bulk_data supplies[SI4713_NUM_SUPPLIES];
+	unsigned supplies;
+	struct regulator_bulk_data supply_data[SI4713_NUM_SUPPLIES];
 	int gpio_reset;
 	u32 power_state;
 	u32 rds_enabled;
diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
index cef0698..7c14060 100644
--- a/drivers/media/radio/tea575x.c
+++ b/drivers/media/radio/tea575x.c
@@ -20,12 +20,12 @@
  *
  */
 
-#include <asm/io.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <asm/io.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-dev.h>
 #include <media/v4l2-fh.h>
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index f329485..822b9f4 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1909,10 +1909,8 @@
 	int ret, i;
 
 	idev = input_allocate_device();
-	if (!idev) {
-		dev_err(ictx->dev, "input dev allocation failed\n");
+	if (!idev)
 		goto out;
-	}
 
 	snprintf(ictx->name_idev, sizeof(ictx->name_idev),
 		 "iMON Panel, Knob and Mouse(%04x:%04x)",
@@ -1960,10 +1958,8 @@
 	int ret;
 
 	touch = input_allocate_device();
-	if (!touch) {
-		dev_err(ictx->dev, "touchscreen input dev allocation failed\n");
+	if (!touch)
 		goto touch_alloc_failed;
-	}
 
 	snprintf(ictx->name_touch, sizeof(ictx->name_touch),
 		 "iMON USB Touchscreen (%04x:%04x)",
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index b1cde8c..0b8c549 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -98,4 +98,5 @@
 			rc-videomate-s350.o \
 			rc-videomate-tv-pvr.o \
 			rc-winfast.o \
-			rc-winfast-usbii-deluxe.o
+			rc-winfast-usbii-deluxe.o \
+			rc-su3000.o
diff --git a/drivers/media/rc/keymaps/rc-su3000.c b/drivers/media/rc/keymaps/rc-su3000.c
new file mode 100644
index 0000000..8dbd3e9
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-su3000.c
@@ -0,0 +1,75 @@
+/* rc-su3000.h - Keytable for Geniatech HDStar Remote Controller
+ *
+ * Copyright (c) 2013 by Evgeny Plehov <Evgeny Plehov@ukr.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table su3000[] = {
+	{ 0x25, KEY_POWER },	/* right-bottom Red */
+	{ 0x0a, KEY_MUTE },	/* -/-- */
+	{ 0x01, KEY_1 },
+	{ 0x02, KEY_2 },
+	{ 0x03, KEY_3 },
+	{ 0x04, KEY_4 },
+	{ 0x05, KEY_5 },
+	{ 0x06, KEY_6 },
+	{ 0x07, KEY_7 },
+	{ 0x08, KEY_8 },
+	{ 0x09, KEY_9 },
+	{ 0x00, KEY_0 },
+	{ 0x20, KEY_UP },	/* CH+ */
+	{ 0x21, KEY_DOWN },	/* CH+ */
+	{ 0x12, KEY_VOLUMEUP },	/* Brightness Up */
+	{ 0x13, KEY_VOLUMEDOWN },/* Brightness Down */
+	{ 0x1f, KEY_RECORD },
+	{ 0x17, KEY_PLAY },
+	{ 0x16, KEY_PAUSE },
+	{ 0x0b, KEY_STOP },
+	{ 0x27, KEY_FASTFORWARD },/* >> */
+	{ 0x26, KEY_REWIND },	/* << */
+	{ 0x0d, KEY_OK },	/* Mute */
+	{ 0x11, KEY_LEFT },	/* VOL- */
+	{ 0x10, KEY_RIGHT },	/* VOL+ */
+	{ 0x29, KEY_BACK },	/* button under 9 */
+	{ 0x2c, KEY_MENU },	/* TTX */
+	{ 0x2b, KEY_EPG },	/* EPG */
+	{ 0x1e, KEY_RED },	/* OSD */
+	{ 0x0e, KEY_GREEN },	/* Window */
+	{ 0x2d, KEY_YELLOW },	/* button under << */
+	{ 0x0f, KEY_BLUE },	/* bottom yellow button */
+	{ 0x14, KEY_AUDIO },	/* Snapshot */
+	{ 0x38, KEY_TV },	/* TV/Radio */
+	{ 0x0c, KEY_ESC }	/* upper Red button */
+};
+
+static struct rc_map_list su3000_map = {
+	.map = {
+		.scan    = su3000,
+		.size    = ARRAY_SIZE(su3000),
+		.rc_type = RC_TYPE_RC5,
+		.name    = RC_MAP_SU3000,
+	}
+};
+
+static int __init init_rc_map_su3000(void)
+{
+	return rc_map_register(&su3000_map);
+}
+
+static void __exit exit_rc_map_su3000(void)
+{
+	rc_map_unregister(&su3000_map);
+}
+
+module_init(init_rc_map_su3000)
+module_exit(exit_rc_map_su3000)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Evgeny Plehov <Evgeny Plehov@ukr.net>");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 3c76101..a25bb15 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -199,6 +199,7 @@
 #define VENDOR_TIVO		0x105a
 #define VENDOR_CONEXANT		0x0572
 #define VENDOR_TWISTEDMELON	0x2596
+#define VENDOR_HAUPPAUGE	0x2040
 
 enum mceusb_model_type {
 	MCE_GEN2 = 0,		/* Most boards */
@@ -210,6 +211,7 @@
 	MULTIFUNCTION,
 	TIVO_KIT,
 	MCE_GEN2_NO_TX,
+	HAUPPAUGE_CX_HYBRID_TV,
 };
 
 struct mceusb_model {
@@ -258,6 +260,11 @@
 		.no_tx = 1, /* tx isn't wired up at all */
 		.name = "Conexant Hybrid TV (cx231xx) MCE IR",
 	},
+	[HAUPPAUGE_CX_HYBRID_TV] = {
+		.rc_map = RC_MAP_HAUPPAUGE,
+		.no_tx = 1, /* eeprom says it has no tx */
+		.name = "Conexant Hybrid TV (cx231xx) MCE IR no TX",
+	},
 	[MULTIFUNCTION] = {
 		.mce_gen2 = 1,
 		.ir_intfnum = 2,
@@ -399,6 +406,9 @@
 	{ USB_DEVICE(VENDOR_TWISTEDMELON, 0x8016) },
 	/* Twisted Melon Inc. - Manta Transceiver */
 	{ USB_DEVICE(VENDOR_TWISTEDMELON, 0x8042) },
+	/* Hauppauge WINTV-HVR-HVR 930C-HD - based on cx231xx */
+	{ USB_DEVICE(VENDOR_HAUPPAUGE, 0xb130),
+	  .driver_info = HAUPPAUGE_CX_HYBRID_TV },
 	/* Terminating entry */
 	{ }
 };
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 46da365..02e2f38 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -22,6 +22,10 @@
 #include <linux/module.h>
 #include "rc-core-priv.h"
 
+/* Bitmap to store allocated device numbers from 0 to IRRCV_NUM_DEVICES - 1 */
+#define IRRCV_NUM_DEVICES      256
+DECLARE_BITMAP(ir_core_dev_number, IRRCV_NUM_DEVICES);
+
 /* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
 #define IR_TAB_MIN_SIZE	256
 #define IR_TAB_MAX_SIZE	8192
@@ -1065,10 +1069,9 @@
 int rc_register_device(struct rc_dev *dev)
 {
 	static bool raw_init = false; /* raw decoders loaded? */
-	static atomic_t devno = ATOMIC_INIT(0);
 	struct rc_map *rc_map;
 	const char *path;
-	int rc;
+	int rc, devno;
 
 	if (!dev || !dev->map_name)
 		return -EINVAL;
@@ -1096,7 +1099,15 @@
 	 */
 	mutex_lock(&dev->lock);
 
-	dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
+	do {
+		devno = find_first_zero_bit(ir_core_dev_number,
+					    IRRCV_NUM_DEVICES);
+		/* No free device slots */
+		if (devno >= IRRCV_NUM_DEVICES)
+			return -ENOMEM;
+	} while (test_and_set_bit(devno, ir_core_dev_number));
+
+	dev->devno = devno;
 	dev_set_name(&dev->dev, "rc%ld", dev->devno);
 	dev_set_drvdata(&dev->dev, dev);
 	rc = device_add(&dev->dev);
@@ -1186,6 +1197,7 @@
 	device_del(&dev->dev);
 out_unlock:
 	mutex_unlock(&dev->lock);
+	clear_bit(dev->devno, ir_core_dev_number);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(rc_register_device);
@@ -1197,6 +1209,8 @@
 
 	del_timer_sync(&dev->timer_keyup);
 
+	clear_bit(dev->devno, ir_core_dev_number);
+
 	if (dev->driver_type == RC_DRIVER_IR_RAW)
 		ir_raw_event_unregister(dev);
 
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index 65120c2..8f0cddb 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/reset.h>
 #include <media/rc-core.h>
 #include <linux/pinctrl/consumer.h>
 
@@ -28,6 +29,7 @@
 	int				sample_mult;
 	int				sample_div;
 	bool				rxuhfmode;
+	struct	reset_control		*rstc;
 };
 
 /* Registers */
@@ -161,6 +163,10 @@
 	unsigned int rx_max_symbol_per = MAX_SYMB_TIME;
 	unsigned int rx_sampling_freq_div;
 
+	/* Enable the IP */
+	if (dev->rstc)
+		reset_control_deassert(dev->rstc);
+
 	clk_prepare_enable(dev->sys_clock);
 	baseclock = clk_get_rate(dev->sys_clock);
 
@@ -271,6 +277,11 @@
 	else
 		rc_dev->rx_base = rc_dev->base;
 
+
+	rc_dev->rstc = reset_control_get(dev, NULL);
+	if (IS_ERR(rc_dev->rstc))
+		rc_dev->rstc = NULL;
+
 	rc_dev->dev = dev;
 	platform_set_drvdata(pdev, rc_dev);
 	st_rc_hardware_init(rc_dev);
@@ -338,6 +349,8 @@
 		writel(0x00, rc_dev->rx_base + IRB_RX_EN);
 		writel(0x00, rc_dev->rx_base + IRB_RX_INT_EN);
 		clk_disable_unprepare(rc_dev->sys_clock);
+		if (rc_dev->rstc)
+			reset_control_assert(rc_dev->rstc);
 	}
 
 	return 0;
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index 15665de..ba2e365 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -215,6 +215,13 @@
 	help
 	  FCI FC2580 silicon tuner driver.
 
+config MEDIA_TUNER_M88TS2022
+	tristate "Montage M88TS2022 silicon tuner"
+	depends on MEDIA_SUPPORT && I2C
+	default m if !MEDIA_SUBDRV_AUTOSELECT
+	help
+	  Montage M88TS2022 silicon tuner driver.
+
 config MEDIA_TUNER_TUA9001
 	tristate "Infineon TUA 9001 silicon tuner"
 	depends on MEDIA_SUPPORT && I2C
diff --git a/drivers/media/tuners/Makefile b/drivers/media/tuners/Makefile
index 308f108..efe82a9 100644
--- a/drivers/media/tuners/Makefile
+++ b/drivers/media/tuners/Makefile
@@ -31,6 +31,7 @@
 obj-$(CONFIG_MEDIA_TUNER_E4000) += e4000.o
 obj-$(CONFIG_MEDIA_TUNER_FC2580) += fc2580.o
 obj-$(CONFIG_MEDIA_TUNER_TUA9001) += tua9001.o
+obj-$(CONFIG_MEDIA_TUNER_M88TS2022) += m88ts2022.o
 obj-$(CONFIG_MEDIA_TUNER_FC0011) += fc0011.o
 obj-$(CONFIG_MEDIA_TUNER_FC0012) += fc0012.o
 obj-$(CONFIG_MEDIA_TUNER_FC0013) += fc0013.o
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 72971a8..40c1da7 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -243,8 +243,10 @@
 			break;
 	}
 
-	if (i == ARRAY_SIZE(e4000_pll_lut))
+	if (i == ARRAY_SIZE(e4000_pll_lut)) {
+		ret = -EINVAL;
 		goto err;
+	}
 
 	/*
 	 * Note: Currently f_vco overflows when c->frequency is 1 073 741 824 Hz
@@ -271,8 +273,10 @@
 			break;
 	}
 
-	if (i == ARRAY_SIZE(e400_lna_filter_lut))
+	if (i == ARRAY_SIZE(e400_lna_filter_lut)) {
+		ret = -EINVAL;
 		goto err;
+	}
 
 	ret = e4000_wr_reg(priv, 0x10, e400_lna_filter_lut[i].val);
 	if (ret < 0)
@@ -284,8 +288,10 @@
 			break;
 	}
 
-	if (i == ARRAY_SIZE(e4000_if_filter_lut))
+	if (i == ARRAY_SIZE(e4000_if_filter_lut)) {
+		ret = -EINVAL;
 		goto err;
+	}
 
 	buf[0] = e4000_if_filter_lut[i].reg11_val;
 	buf[1] = e4000_if_filter_lut[i].reg12_val;
@@ -300,8 +306,10 @@
 			break;
 	}
 
-	if (i == ARRAY_SIZE(e4000_band_lut))
+	if (i == ARRAY_SIZE(e4000_band_lut)) {
+		ret = -EINVAL;
 		goto err;
+	}
 
 	ret = e4000_wr_reg(priv, 0x07, e4000_band_lut[i].reg07_val);
 	if (ret < 0)
diff --git a/drivers/media/tuners/m88ts2022.c b/drivers/media/tuners/m88ts2022.c
new file mode 100644
index 0000000..40c42de
--- /dev/null
+++ b/drivers/media/tuners/m88ts2022.c
@@ -0,0 +1,674 @@
+/*
+ * Montage M88TS2022 silicon tuner driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ * Some calculations are taken from existing TS2020 driver.
+ */
+
+#include "m88ts2022_priv.h"
+
+/* write multiple registers */
+static int m88ts2022_wr_regs(struct m88ts2022_priv *priv,
+		u8 reg, const u8 *val, int len)
+{
+#define MAX_WR_LEN 3
+#define MAX_WR_XFER_LEN (MAX_WR_LEN + 1)
+	int ret;
+	u8 buf[MAX_WR_XFER_LEN];
+	struct i2c_msg msg[1] = {
+		{
+			.addr = priv->client->addr,
+			.flags = 0,
+			.len = 1 + len,
+			.buf = buf,
+		}
+	};
+
+	if (WARN_ON(len > MAX_WR_LEN))
+		return -EINVAL;
+
+	buf[0] = reg;
+	memcpy(&buf[1], val, len);
+
+	ret = i2c_transfer(priv->client->adapter, msg, 1);
+	if (ret == 1) {
+		ret = 0;
+	} else {
+		dev_warn(&priv->client->dev,
+				"%s: i2c wr failed=%d reg=%02x len=%d\n",
+				KBUILD_MODNAME, ret, reg, len);
+		ret = -EREMOTEIO;
+	}
+
+	return ret;
+}
+
+/* read multiple registers */
+static int m88ts2022_rd_regs(struct m88ts2022_priv *priv, u8 reg,
+		u8 *val, int len)
+{
+#define MAX_RD_LEN 1
+#define MAX_RD_XFER_LEN (MAX_RD_LEN)
+	int ret;
+	u8 buf[MAX_RD_XFER_LEN];
+	struct i2c_msg msg[2] = {
+		{
+			.addr = priv->client->addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &reg,
+		}, {
+			.addr = priv->client->addr,
+			.flags = I2C_M_RD,
+			.len = len,
+			.buf = buf,
+		}
+	};
+
+	if (WARN_ON(len > MAX_RD_LEN))
+		return -EINVAL;
+
+	ret = i2c_transfer(priv->client->adapter, msg, 2);
+	if (ret == 2) {
+		memcpy(val, buf, len);
+		ret = 0;
+	} else {
+		dev_warn(&priv->client->dev,
+				"%s: i2c rd failed=%d reg=%02x len=%d\n",
+				KBUILD_MODNAME, ret, reg, len);
+		ret = -EREMOTEIO;
+	}
+
+	return ret;
+}
+
+/* write single register */
+static int m88ts2022_wr_reg(struct m88ts2022_priv *priv, u8 reg, u8 val)
+{
+	return m88ts2022_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register */
+static int m88ts2022_rd_reg(struct m88ts2022_priv *priv, u8 reg, u8 *val)
+{
+	return m88ts2022_rd_regs(priv, reg, val, 1);
+}
+
+/* write single register with mask */
+static int m88ts2022_wr_reg_mask(struct m88ts2022_priv *priv,
+		u8 reg, u8 val, u8 mask)
+{
+	int ret;
+	u8 u8tmp;
+
+	/* no need for read if whole reg is written */
+	if (mask != 0xff) {
+		ret = m88ts2022_rd_regs(priv, reg, &u8tmp, 1);
+		if (ret)
+			return ret;
+
+		val &= mask;
+		u8tmp &= ~mask;
+		val |= u8tmp;
+	}
+
+	return m88ts2022_wr_regs(priv, reg, &val, 1);
+}
+
+static int m88ts2022_cmd(struct dvb_frontend *fe,
+		int op, int sleep, u8 reg, u8 mask, u8 val, u8 *reg_val)
+{
+	struct m88ts2022_priv *priv = fe->tuner_priv;
+	int ret, i;
+	u8 u8tmp;
+	struct m88ts2022_reg_val reg_vals[] = {
+		{0x51, 0x1f - op},
+		{0x51, 0x1f},
+		{0x50, 0x00 + op},
+		{0x50, 0x00},
+	};
+
+	for (i = 0; i < 2; i++) {
+		dev_dbg(&priv->client->dev,
+				"%s: i=%d op=%02x reg=%02x mask=%02x val=%02x\n",
+				__func__, i, op, reg, mask, val);
+
+		for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+			ret = m88ts2022_wr_reg(priv, reg_vals[i].reg,
+					reg_vals[i].val);
+			if (ret)
+				goto err;
+		}
+
+		usleep_range(sleep * 1000, sleep * 10000);
+
+		ret = m88ts2022_rd_reg(priv, reg, &u8tmp);
+		if (ret)
+			goto err;
+
+		if ((u8tmp & mask) != val)
+			break;
+	}
+
+	if (reg_val)
+		*reg_val = u8tmp;
+err:
+	return ret;
+}
+
+static int m88ts2022_set_params(struct dvb_frontend *fe)
+{
+	struct m88ts2022_priv *priv = fe->tuner_priv;
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+	int ret;
+	unsigned int frequency_khz, frequency_offset_khz, f_3db_hz;
+	unsigned int f_ref_khz, f_vco_khz, div_ref, div_out, pll_n, gdiv28;
+	u8 buf[3], u8tmp, cap_code, lpf_gm, lpf_mxdiv, div_max, div_min;
+	u16 u16tmp;
+	dev_dbg(&priv->client->dev,
+			"%s: frequency=%d symbol_rate=%d rolloff=%d\n",
+			__func__, c->frequency, c->symbol_rate, c->rolloff);
+	/*
+	 * Integer-N PLL synthesizer
+	 * kHz is used for all calculations to keep calculations within 32-bit
+	 */
+	f_ref_khz = DIV_ROUND_CLOSEST(priv->cfg.clock, 1000);
+	div_ref = DIV_ROUND_CLOSEST(f_ref_khz, 2000);
+
+	if (c->symbol_rate < 5000000)
+		frequency_offset_khz = 3000; /* 3 MHz */
+	else
+		frequency_offset_khz = 0;
+
+	frequency_khz = c->frequency + frequency_offset_khz;
+
+	if (frequency_khz < 1103000) {
+		div_out = 4;
+		u8tmp = 0x1b;
+	} else {
+		div_out = 2;
+		u8tmp = 0x0b;
+	}
+
+	buf[0] = u8tmp;
+	buf[1] = 0x40;
+	ret = m88ts2022_wr_regs(priv, 0x10, buf, 2);
+	if (ret)
+		goto err;
+
+	f_vco_khz = frequency_khz * div_out;
+	pll_n = f_vco_khz * div_ref / f_ref_khz;
+	pll_n += pll_n % 2;
+	priv->frequency_khz = pll_n * f_ref_khz / div_ref / div_out;
+
+	if (pll_n < 4095)
+		u16tmp = pll_n - 1024;
+	else if (pll_n < 6143)
+		u16tmp = pll_n + 1024;
+	else
+		u16tmp = pll_n + 3072;
+
+	buf[0] = (u16tmp >> 8) & 0x3f;
+	buf[1] = (u16tmp >> 0) & 0xff;
+	buf[2] = div_ref - 8;
+	ret = m88ts2022_wr_regs(priv, 0x01, buf, 3);
+	if (ret)
+		goto err;
+
+	dev_dbg(&priv->client->dev,
+			"%s: frequency=%u offset=%d f_vco_khz=%u pll_n=%u div_ref=%u div_out=%u\n",
+			__func__, priv->frequency_khz,
+			priv->frequency_khz - c->frequency, f_vco_khz, pll_n,
+			div_ref, div_out);
+
+	ret = m88ts2022_cmd(fe, 0x10, 5, 0x15, 0x40, 0x00, NULL);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_rd_reg(priv, 0x14, &u8tmp);
+	if (ret)
+		goto err;
+
+	u8tmp &= 0x7f;
+	if (u8tmp < 64) {
+		ret = m88ts2022_wr_reg_mask(priv, 0x10, 0x80, 0x80);
+		if (ret)
+			goto err;
+
+		ret = m88ts2022_wr_reg(priv, 0x11, 0x6f);
+		if (ret)
+			goto err;
+
+		ret = m88ts2022_cmd(fe, 0x10, 5, 0x15, 0x40, 0x00, NULL);
+		if (ret)
+			goto err;
+	}
+
+	ret = m88ts2022_rd_reg(priv, 0x14, &u8tmp);
+	if (ret)
+		goto err;
+
+	u8tmp &= 0x1f;
+	if (u8tmp > 19) {
+		ret = m88ts2022_wr_reg_mask(priv, 0x10, 0x00, 0x02);
+		if (ret)
+			goto err;
+	}
+
+	ret = m88ts2022_cmd(fe, 0x08, 5, 0x3c, 0xff, 0x00, NULL);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_wr_reg(priv, 0x25, 0x00);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_wr_reg(priv, 0x27, 0x70);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_wr_reg(priv, 0x41, 0x09);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_wr_reg(priv, 0x08, 0x0b);
+	if (ret)
+		goto err;
+
+	/* filters */
+	gdiv28 = DIV_ROUND_CLOSEST(f_ref_khz * 1694U, 1000000U);
+
+	ret = m88ts2022_wr_reg(priv, 0x04, gdiv28);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+	if (ret)
+		goto err;
+
+	cap_code = u8tmp & 0x3f;
+
+	ret = m88ts2022_wr_reg(priv, 0x41, 0x0d);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+	if (ret)
+		goto err;
+
+	u8tmp &= 0x3f;
+	cap_code = (cap_code + u8tmp) / 2;
+	gdiv28 = gdiv28 * 207 / (cap_code * 2 + 151);
+	div_max = gdiv28 * 135 / 100;
+	div_min = gdiv28 * 78 / 100;
+	div_max = clamp_val(div_max, 0U, 63U);
+
+	f_3db_hz = c->symbol_rate * 135UL / 200UL;
+	f_3db_hz +=  2000000U + (frequency_offset_khz * 1000U);
+	f_3db_hz = clamp(f_3db_hz, 7000000U, 40000000U);
+
+#define LPF_COEFF 3200U
+	lpf_gm = DIV_ROUND_CLOSEST(f_3db_hz * gdiv28, LPF_COEFF * f_ref_khz);
+	lpf_gm = clamp_val(lpf_gm, 1U, 23U);
+
+	lpf_mxdiv = DIV_ROUND_CLOSEST(lpf_gm * LPF_COEFF * f_ref_khz, f_3db_hz);
+	if (lpf_mxdiv < div_min)
+		lpf_mxdiv = DIV_ROUND_CLOSEST(++lpf_gm * LPF_COEFF * f_ref_khz, f_3db_hz);
+	lpf_mxdiv = clamp_val(lpf_mxdiv, 0U, div_max);
+
+	ret = m88ts2022_wr_reg(priv, 0x04, lpf_mxdiv);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_wr_reg(priv, 0x06, lpf_gm);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+	if (ret)
+		goto err;
+
+	cap_code = u8tmp & 0x3f;
+
+	ret = m88ts2022_wr_reg(priv, 0x41, 0x09);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+	if (ret)
+		goto err;
+
+	u8tmp &= 0x3f;
+	cap_code = (cap_code + u8tmp) / 2;
+
+	u8tmp = cap_code | 0x80;
+	ret = m88ts2022_wr_reg(priv, 0x25, u8tmp);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_wr_reg(priv, 0x27, 0x30);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_wr_reg(priv, 0x08, 0x09);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_cmd(fe, 0x01, 20, 0x21, 0xff, 0x00, NULL);
+	if (ret)
+		goto err;
+err:
+	if (ret)
+		dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int m88ts2022_init(struct dvb_frontend *fe)
+{
+	struct m88ts2022_priv *priv = fe->tuner_priv;
+	int ret, i;
+	u8 u8tmp;
+	static const struct m88ts2022_reg_val reg_vals[] = {
+		{0x7d, 0x9d},
+		{0x7c, 0x9a},
+		{0x7a, 0x76},
+		{0x3b, 0x01},
+		{0x63, 0x88},
+		{0x61, 0x85},
+		{0x22, 0x30},
+		{0x30, 0x40},
+		{0x20, 0x23},
+		{0x24, 0x02},
+		{0x12, 0xa0},
+	};
+	dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+	ret = m88ts2022_wr_reg(priv, 0x00, 0x01);
+	if (ret)
+		goto err;
+
+	ret = m88ts2022_wr_reg(priv, 0x00, 0x03);
+	if (ret)
+		goto err;
+
+	switch (priv->cfg.clock_out) {
+	case M88TS2022_CLOCK_OUT_DISABLED:
+		u8tmp = 0x60;
+		break;
+	case M88TS2022_CLOCK_OUT_ENABLED:
+		u8tmp = 0x70;
+		ret = m88ts2022_wr_reg(priv, 0x05, priv->cfg.clock_out_div);
+		if (ret)
+			goto err;
+		break;
+	case M88TS2022_CLOCK_OUT_ENABLED_XTALOUT:
+		u8tmp = 0x6c;
+		break;
+	default:
+		goto err;
+	}
+
+	ret = m88ts2022_wr_reg(priv, 0x42, u8tmp);
+	if (ret)
+		goto err;
+
+	if (priv->cfg.loop_through)
+		u8tmp = 0xec;
+	else
+		u8tmp = 0x6c;
+
+	ret = m88ts2022_wr_reg(priv, 0x62, u8tmp);
+	if (ret)
+		goto err;
+
+	for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+		ret = m88ts2022_wr_reg(priv, reg_vals[i].reg, reg_vals[i].val);
+		if (ret)
+			goto err;
+	}
+err:
+	if (ret)
+		dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static int m88ts2022_sleep(struct dvb_frontend *fe)
+{
+	struct m88ts2022_priv *priv = fe->tuner_priv;
+	int ret;
+	dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+	ret = m88ts2022_wr_reg(priv, 0x00, 0x00);
+	if (ret)
+		goto err;
+err:
+	if (ret)
+		dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static int m88ts2022_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+	struct m88ts2022_priv *priv = fe->tuner_priv;
+	dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+	*frequency = priv->frequency_khz;
+	return 0;
+}
+
+static int m88ts2022_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+	struct m88ts2022_priv *priv = fe->tuner_priv;
+	dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+	*frequency = 0; /* Zero-IF */
+	return 0;
+}
+
+static int m88ts2022_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+{
+	struct m88ts2022_priv *priv = fe->tuner_priv;
+	int ret;
+	u8 u8tmp;
+	u16 gain, u16tmp;
+	unsigned int gain1, gain2, gain3;
+
+	ret = m88ts2022_rd_reg(priv, 0x3d, &u8tmp);
+	if (ret)
+		goto err;
+
+	gain1 = (u8tmp >> 0) & 0x1f;
+	gain1 = clamp(gain1, 0U, 15U);
+
+	ret = m88ts2022_rd_reg(priv, 0x21, &u8tmp);
+	if (ret)
+		goto err;
+
+	gain2 = (u8tmp >> 0) & 0x1f;
+	gain2 = clamp(gain2, 2U, 16U);
+
+	ret = m88ts2022_rd_reg(priv, 0x66, &u8tmp);
+	if (ret)
+		goto err;
+
+	gain3 = (u8tmp >> 3) & 0x07;
+	gain3 = clamp(gain3, 0U, 6U);
+
+	gain = gain1 * 265 + gain2 * 338 + gain3 * 285;
+
+	/* scale value to 0x0000-0xffff */
+	u16tmp = (0xffff - gain);
+	u16tmp = clamp_val(u16tmp, 59000U, 61500U);
+
+	*strength = (u16tmp - 59000) * 0xffff / (61500 - 59000);
+err:
+	if (ret)
+		dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+	return ret;
+}
+
+static const struct dvb_tuner_ops m88ts2022_tuner_ops = {
+	.info = {
+		.name          = "Montage M88TS2022",
+		.frequency_min = 950000,
+		.frequency_max = 2150000,
+	},
+
+	.init = m88ts2022_init,
+	.sleep = m88ts2022_sleep,
+	.set_params = m88ts2022_set_params,
+
+	.get_frequency = m88ts2022_get_frequency,
+	.get_if_frequency = m88ts2022_get_if_frequency,
+	.get_rf_strength = m88ts2022_get_rf_strength,
+};
+
+static int m88ts2022_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	struct m88ts2022_config *cfg = client->dev.platform_data;
+	struct dvb_frontend *fe = cfg->fe;
+	struct m88ts2022_priv *priv;
+	int ret;
+	u8 chip_id, u8tmp;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		ret = -ENOMEM;
+		dev_err(&client->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+		goto err;
+	}
+
+	memcpy(&priv->cfg, cfg, sizeof(struct m88ts2022_config));
+	priv->client = client;
+
+	/* check if the tuner is there */
+	ret = m88ts2022_rd_reg(priv, 0x00, &u8tmp);
+	if (ret)
+		goto err;
+
+	if ((u8tmp & 0x03) == 0x00) {
+		ret = m88ts2022_wr_reg(priv, 0x00, 0x01);
+		if (ret < 0)
+			goto err;
+
+		usleep_range(2000, 50000);
+	}
+
+	ret = m88ts2022_wr_reg(priv, 0x00, 0x03);
+	if (ret)
+		goto err;
+
+	usleep_range(2000, 50000);
+
+	ret = m88ts2022_rd_reg(priv, 0x00, &chip_id);
+	if (ret)
+		goto err;
+
+	dev_dbg(&priv->client->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+
+	switch (chip_id) {
+	case 0xc3:
+	case 0x83:
+		break;
+	default:
+		goto err;
+	}
+
+	switch (priv->cfg.clock_out) {
+	case M88TS2022_CLOCK_OUT_DISABLED:
+		u8tmp = 0x60;
+		break;
+	case M88TS2022_CLOCK_OUT_ENABLED:
+		u8tmp = 0x70;
+		ret = m88ts2022_wr_reg(priv, 0x05, priv->cfg.clock_out_div);
+		if (ret)
+			goto err;
+		break;
+	case M88TS2022_CLOCK_OUT_ENABLED_XTALOUT:
+		u8tmp = 0x6c;
+		break;
+	default:
+		goto err;
+	}
+
+	ret = m88ts2022_wr_reg(priv, 0x42, u8tmp);
+	if (ret)
+		goto err;
+
+	if (priv->cfg.loop_through)
+		u8tmp = 0xec;
+	else
+		u8tmp = 0x6c;
+
+	ret = m88ts2022_wr_reg(priv, 0x62, u8tmp);
+	if (ret)
+		goto err;
+
+	/* sleep */
+	ret = m88ts2022_wr_reg(priv, 0x00, 0x00);
+	if (ret)
+		goto err;
+
+	dev_info(&priv->client->dev,
+			"%s: Montage M88TS2022 successfully identified\n",
+			KBUILD_MODNAME);
+
+	fe->tuner_priv = priv;
+	memcpy(&fe->ops.tuner_ops, &m88ts2022_tuner_ops,
+			sizeof(struct dvb_tuner_ops));
+
+	i2c_set_clientdata(client, priv);
+	return 0;
+err:
+	dev_dbg(&client->dev, "%s: failed=%d\n", __func__, ret);
+	kfree(priv);
+	return ret;
+}
+
+static int m88ts2022_remove(struct i2c_client *client)
+{
+	struct m88ts2022_priv *priv = i2c_get_clientdata(client);
+	struct dvb_frontend *fe = priv->cfg.fe;
+	dev_dbg(&client->dev, "%s:\n", __func__);
+
+	memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
+	fe->tuner_priv = NULL;
+	kfree(priv);
+
+	return 0;
+}
+
+static const struct i2c_device_id m88ts2022_id[] = {
+	{"m88ts2022", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, m88ts2022_id);
+
+static struct i2c_driver m88ts2022_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "m88ts2022",
+	},
+	.probe		= m88ts2022_probe,
+	.remove		= m88ts2022_remove,
+	.id_table	= m88ts2022_id,
+};
+
+module_i2c_driver(m88ts2022_driver);
+
+MODULE_DESCRIPTION("Montage M88TS2022 silicon tuner driver");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/m88ts2022.h b/drivers/media/tuners/m88ts2022.h
new file mode 100644
index 0000000..659fa1b
--- /dev/null
+++ b/drivers/media/tuners/m88ts2022.h
@@ -0,0 +1,54 @@
+/*
+ * Montage M88TS2022 silicon tuner driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#ifndef M88TS2022_H
+#define M88TS2022_H
+
+#include "dvb_frontend.h"
+
+struct m88ts2022_config {
+	/*
+	 * clock
+	 * 16000000 - 32000000
+	 */
+	u32 clock;
+
+	/*
+	 * RF loop-through
+	 */
+	u8 loop_through:1;
+
+	/*
+	 * clock output
+	 */
+#define M88TS2022_CLOCK_OUT_DISABLED        0
+#define M88TS2022_CLOCK_OUT_ENABLED         1
+#define M88TS2022_CLOCK_OUT_ENABLED_XTALOUT 2
+	u8 clock_out:2;
+
+	/*
+	 * clock output divider
+	 * 1 - 31
+	 */
+	u8 clock_out_div:5;
+
+	/*
+	 * pointer to DVB frontend
+	 */
+	struct dvb_frontend *fe;
+};
+
+#endif
diff --git a/drivers/media/tuners/m88ts2022_priv.h b/drivers/media/tuners/m88ts2022_priv.h
new file mode 100644
index 0000000..0363dd8
--- /dev/null
+++ b/drivers/media/tuners/m88ts2022_priv.h
@@ -0,0 +1,34 @@
+/*
+ * Montage M88TS2022 silicon tuner driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#ifndef M88TS2022_PRIV_H
+#define M88TS2022_PRIV_H
+
+#include "m88ts2022.h"
+
+struct m88ts2022_priv {
+	struct m88ts2022_config cfg;
+	struct i2c_client *client;
+	struct dvb_frontend *fe;
+	u32 frequency_khz;
+};
+
+struct m88ts2022_reg_val {
+	u8 reg;
+	u8 val;
+};
+
+#endif
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 4be5cf8..cca508d 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -134,15 +134,6 @@
 	_rc;								\
 })
 
-#define i2c_rcv(priv, buf, size) ({					\
-	int _rc;							\
-	_rc = tuner_i2c_xfer_recv(&priv->i2c_props, buf, size);		\
-	if (size != _rc)						\
-		tuner_err("i2c input error: rc = %d (should be %d)\n",	\
-			   _rc, (int)size); 				\
-	_rc;								\
-})
-
 #define i2c_send_recv(priv, obuf, osize, ibuf, isize) ({		\
 	int _rc;							\
 	_rc = tuner_i2c_xfer_send_recv(&priv->i2c_props, obuf, osize,	\
@@ -276,6 +267,7 @@
 	case XC2028_WAITING_FIRMWARE:
 		return -EAGAIN;
 	case XC2028_ACTIVE:
+		return 1;
 	case XC2028_SLEEP:
 		return 0;
 	case XC2028_NODEV:
@@ -718,6 +710,8 @@
 	return 0;
 }
 
+static int xc2028_sleep(struct dvb_frontend *fe);
+
 static int check_firmware(struct dvb_frontend *fe, unsigned int type,
 			  v4l2_std_id std, __u16 int_freq)
 {
@@ -890,7 +884,7 @@
 	return 0;
 
 fail:
-	priv->state = XC2028_SLEEP;
+	priv->state = XC2028_NO_FIRMWARE;
 
 	memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
 	if (retry_count < 8) {
@@ -900,6 +894,9 @@
 		goto retry;
 	}
 
+	/* Firmware didn't load. Put the device to sleep */
+	xc2028_sleep(fe);
+
 	if (rc == -ENOENT)
 		rc = -EINVAL;
 	return rc;
@@ -917,6 +914,12 @@
 	if (rc < 0)
 		return rc;
 
+	/* If the device is sleeping, no channel is tuned */
+	if (!rc) {
+		*strength = 0;
+		return 0;
+	}
+
 	mutex_lock(&priv->lock);
 
 	/* Sync Lock Indicator */
@@ -964,6 +967,12 @@
 	if (rc < 0)
 		return rc;
 
+	/* If the device is sleeping, no channel is tuned */
+	if (!rc) {
+		*afc = 0;
+		return 0;
+	}
+
 	mutex_lock(&priv->lock);
 
 	/* Sync Lock Indicator */
@@ -1281,6 +1290,10 @@
 	if (rc < 0)
 		return rc;
 
+	/* Device is already in sleep mode */
+	if (!rc)
+		return 0;
+
 	/* Avoid firmware reload on slow devices or if PM disabled */
 	if (no_poweroff || priv->ctrl.disable_power_mgmt)
 		return 0;
@@ -1298,7 +1311,8 @@
 	else
 		rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
 
-	priv->state = XC2028_SLEEP;
+	if (rc >= 0)
+		priv->state = XC2028_SLEEP;
 
 	mutex_unlock(&priv->lock);
 
@@ -1366,7 +1380,7 @@
 
 	if (rc < 0)
 		return;
-	priv->state = XC2028_SLEEP;
+	priv->state = XC2028_ACTIVE;
 }
 
 static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index cfe8056..39d824e 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -17,7 +17,6 @@
 source "drivers/media/usb/zr364xx/Kconfig"
 source "drivers/media/usb/stkwebcam/Kconfig"
 source "drivers/media/usb/s2255/Kconfig"
-source "drivers/media/usb/sn9c102/Kconfig"
 source "drivers/media/usb/usbtv/Kconfig"
 endif
 
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 0935f47..7ac4b14 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -10,7 +10,6 @@
 obj-$(CONFIG_USB_GSPCA)         += gspca/
 obj-$(CONFIG_USB_PWC)           += pwc/
 obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
-obj-$(CONFIG_USB_SN9C102)       += sn9c102/
 obj-$(CONFIG_VIDEO_AU0828) += au0828/
 obj-$(CONFIG_VIDEO_HDPVR)	+= hdpvr/
 obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index bd9d19a..ab45a6f 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -173,9 +173,8 @@
 	const struct usb_device_id *id)
 {
 	int ifnum;
-#ifdef CONFIG_VIDEO_AU0828_V4L2
-	int retval;
-#endif
+	int retval = 0;
+
 	struct au0828_dev *dev;
 	struct usb_device *usbdev = interface_to_usbdev(interface);
 
@@ -257,7 +256,11 @@
 #endif
 
 	/* Digital TV */
-	au0828_dvb_register(dev);
+	retval = au0828_dvb_register(dev);
+	if (retval)
+		pr_err("%s() au0282_dev_register failed\n",
+		       __func__);
+
 
 	/* Store the pointer to the au0828_dev so it can be accessed in
 	   au0828_usb_disconnect */
@@ -268,7 +271,7 @@
 
 	mutex_unlock(&dev->lock);
 
-	return 0;
+	return retval;
 }
 
 static struct usb_driver au0828_usb_driver = {
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 9a6f156..4ae8b10 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -33,6 +33,10 @@
 #include "mxl5007t.h"
 #include "tda18271.h"
 
+static int preallocate_big_buffers;
+module_param_named(preallocate_big_buffers, preallocate_big_buffers, int, 0644);
+MODULE_PARM_DESC(preallocate_big_buffers, "Preallocate the larger transfer buffers at module load time");
+
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
 #define _AU0828_BULKPIPE 0x83
@@ -153,9 +157,13 @@
 
 	dev->urb_streaming = 0;
 	for (i = 0; i < URB_COUNT; i++) {
-		usb_kill_urb(dev->urbs[i]);
-		kfree(dev->urbs[i]->transfer_buffer);
-		usb_free_urb(dev->urbs[i]);
+		if (dev->urbs[i]) {
+			usb_kill_urb(dev->urbs[i]);
+			if (!preallocate_big_buffers)
+				kfree(dev->urbs[i]->transfer_buffer);
+
+			usb_free_urb(dev->urbs[i]);
+		}
 	}
 
 	return 0;
@@ -181,10 +189,18 @@
 
 		purb = dev->urbs[i];
 
-		purb->transfer_buffer = kzalloc(URB_BUFSIZE, GFP_KERNEL);
+		if (preallocate_big_buffers)
+			purb->transfer_buffer = dev->dig_transfer_buffer[i];
+		else
+			purb->transfer_buffer = kzalloc(URB_BUFSIZE,
+					GFP_KERNEL);
+
 		if (!purb->transfer_buffer) {
 			usb_free_urb(purb);
 			dev->urbs[i] = NULL;
+			printk(KERN_ERR
+			       "%s: failed big buffer allocation, err = %d\n",
+			       __func__, ret);
 			goto err;
 		}
 
@@ -217,6 +233,27 @@
 	return ret;
 }
 
+static void au0828_start_transport(struct au0828_dev *dev)
+{
+	au0828_write(dev, 0x608, 0x90);
+	au0828_write(dev, 0x609, 0x72);
+	au0828_write(dev, 0x60a, 0x71);
+	au0828_write(dev, 0x60b, 0x01);
+
+}
+
+static void au0828_stop_transport(struct au0828_dev *dev, int full_stop)
+{
+	if (full_stop) {
+		au0828_write(dev, 0x608, 0x00);
+		au0828_write(dev, 0x609, 0x00);
+		au0828_write(dev, 0x60a, 0x00);
+	}
+	au0828_write(dev, 0x60b, 0x00);
+}
+
+
+
 static int au0828_dvb_start_feed(struct dvb_demux_feed *feed)
 {
 	struct dvb_demux *demux = feed->demux;
@@ -231,13 +268,17 @@
 
 	if (dvb) {
 		mutex_lock(&dvb->lock);
+		dvb->start_count++;
+		dprintk(1, "%s(), start_count: %d, stop_count: %d\n", __func__,
+			dvb->start_count, dvb->stop_count);
 		if (dvb->feeding++ == 0) {
 			/* Start transport */
-			au0828_write(dev, 0x608, 0x90);
-			au0828_write(dev, 0x609, 0x72);
-			au0828_write(dev, 0x60a, 0x71);
-			au0828_write(dev, 0x60b, 0x01);
+			au0828_start_transport(dev);
 			ret = start_urb_transfer(dev);
+			if (ret < 0) {
+				au0828_stop_transport(dev, 0);
+				dvb->feeding--;	/* We ran out of memory... */
+			}
 		}
 		mutex_unlock(&dvb->lock);
 	}
@@ -256,10 +297,16 @@
 
 	if (dvb) {
 		mutex_lock(&dvb->lock);
-		if (--dvb->feeding == 0) {
-			/* Stop transport */
-			ret = stop_urb_transfer(dev);
-			au0828_write(dev, 0x60b, 0x00);
+		dvb->stop_count++;
+		dprintk(1, "%s(), start_count: %d, stop_count: %d\n", __func__,
+			dvb->start_count, dvb->stop_count);
+		if (dvb->feeding > 0) {
+			dvb->feeding--;
+			if (dvb->feeding == 0) {
+				/* Stop transport */
+				ret = stop_urb_transfer(dev);
+				au0828_stop_transport(dev, 0);
+			}
 		}
 		mutex_unlock(&dvb->lock);
 	}
@@ -282,16 +329,10 @@
 
 	/* Stop transport */
 	stop_urb_transfer(dev);
-	au0828_write(dev, 0x608, 0x00);
-	au0828_write(dev, 0x609, 0x00);
-	au0828_write(dev, 0x60a, 0x00);
-	au0828_write(dev, 0x60b, 0x00);
+	au0828_stop_transport(dev, 1);
 
 	/* Start transport */
-	au0828_write(dev, 0x608, 0x90);
-	au0828_write(dev, 0x609, 0x72);
-	au0828_write(dev, 0x60a, 0x71);
-	au0828_write(dev, 0x60b, 0x01);
+	au0828_start_transport(dev);
 	start_urb_transfer(dev);
 
 	mutex_unlock(&dvb->lock);
@@ -304,6 +345,23 @@
 
 	dprintk(1, "%s()\n", __func__);
 
+	if (preallocate_big_buffers) {
+		int i;
+		for (i = 0; i < URB_COUNT; i++) {
+			dev->dig_transfer_buffer[i] = kzalloc(URB_BUFSIZE,
+					GFP_KERNEL);
+
+			if (!dev->dig_transfer_buffer[i]) {
+				result = -ENOMEM;
+
+				printk(KERN_ERR
+				       "%s: failed buffer allocation (errno = %d)\n",
+				       DRIVER_NAME, result);
+				goto fail_adapter;
+			}
+		}
+	}
+
 	INIT_WORK(&dev->restart_streaming, au0828_restart_dvb_streaming);
 
 	/* register adapter */
@@ -375,6 +433,9 @@
 
 	/* register network adapter */
 	dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
+
+	dvb->start_count = 0;
+	dvb->stop_count = 0;
 	return 0;
 
 fail_fe_conn:
@@ -391,6 +452,13 @@
 	dvb_frontend_detach(dvb->frontend);
 	dvb_unregister_adapter(&dvb->adapter);
 fail_adapter:
+
+	if (preallocate_big_buffers) {
+		int i;
+		for (i = 0; i < URB_COUNT; i++)
+			kfree(dev->dig_transfer_buffer[i]);
+	}
+
 	return result;
 }
 
@@ -411,6 +479,14 @@
 	dvb_unregister_frontend(dvb->frontend);
 	dvb_frontend_detach(dvb->frontend);
 	dvb_unregister_adapter(&dvb->adapter);
+
+	if (preallocate_big_buffers) {
+		int i;
+		for (i = 0; i < URB_COUNT; i++)
+			kfree(dev->dig_transfer_buffer[i]);
+	}
+
+
 }
 
 /* All the DVB attach calls go here, this function get's modified
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index ef1f57f..5439772 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -102,6 +102,8 @@
 	struct dmx_frontend fe_mem;
 	struct dvb_net net;
 	int feeding;
+	int start_count;
+	int stop_count;
 };
 
 enum au0828_stream_state {
@@ -260,6 +262,10 @@
 	/* USB / URB Related */
 	int		urb_streaming;
 	struct urb	*urbs[URB_COUNT];
+
+	/* Preallocated transfer digital transfer buffers */
+
+	char *dig_transfer_buffer[URB_COUNT];
 };
 
 /* ----------------------------------------------------------- */
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 86feeea..f14c5e8 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -45,6 +45,8 @@
 	select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
 	select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
 	select DVB_MB86A20S if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_TDA18271C2DD if MEDIA_SUBDRV_AUTOSELECT
 
 	---help---
 	  This adds support for DVB cards based on the
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 528cce9..2ee03e4 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -709,6 +709,8 @@
 
 /* table of devices that work with this driver */
 struct usb_device_id cx231xx_id_table[] = {
+	{USB_DEVICE(0x1D19, 0x6109),
+	.driver_info = CX231XX_BOARD_PV_XCAPTURE_USB},
 	{USB_DEVICE(0x0572, 0x5A3C),
 	 .driver_info = CX231XX_BOARD_UNKNOWN},
 	{USB_DEVICE(0x0572, 0x58A2),
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 96a5a09..7c0f797 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -371,9 +371,9 @@
 	mutex_lock(&dev->i2c_lock);
 	for (i = 0; i < num; i++) {
 
-		addr = msgs[i].addr >> 1;
+		addr = msgs[i].addr;
 
-		dprintk2(2, "%s %s addr=%x len=%d:",
+		dprintk2(2, "%s %s addr=0x%x len=%d:",
 			 (msgs[i].flags & I2C_M_RD) ? "read" : "write",
 			 i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len);
 		if (!msgs[i].len) {
@@ -390,32 +390,41 @@
 			rc = cx231xx_i2c_recv_bytes(i2c_adap, &msgs[i]);
 			if (i2c_debug >= 2) {
 				for (byte = 0; byte < msgs[i].len; byte++)
-					printk(" %02x", msgs[i].buf[byte]);
+					printk(KERN_CONT " %02x", msgs[i].buf[byte]);
 			}
 		} else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) &&
 			   msgs[i].addr == msgs[i + 1].addr
 			   && (msgs[i].len <= 2) && (bus->nr < 3)) {
+			/* write bytes */
+			if (i2c_debug >= 2) {
+				for (byte = 0; byte < msgs[i].len; byte++)
+					printk(KERN_CONT " %02x", msgs[i].buf[byte]);
+				printk(KERN_CONT "\n");
+			}
 			/* read bytes */
+			dprintk2(2, "plus %s %s addr=0x%x len=%d:",
+				(msgs[i+1].flags & I2C_M_RD) ? "read" : "write",
+				i+1 == num - 1 ? "stop" : "nonstop", addr, msgs[i+1].len);
 			rc = cx231xx_i2c_recv_bytes_with_saddr(i2c_adap,
 							       &msgs[i],
 							       &msgs[i + 1]);
 			if (i2c_debug >= 2) {
-				for (byte = 0; byte < msgs[i].len; byte++)
-					printk(" %02x", msgs[i].buf[byte]);
+				for (byte = 0; byte < msgs[i+1].len; byte++)
+					printk(KERN_CONT " %02x", msgs[i+1].buf[byte]);
 			}
 			i++;
 		} else {
 			/* write bytes */
 			if (i2c_debug >= 2) {
 				for (byte = 0; byte < msgs[i].len; byte++)
-					printk(" %02x", msgs[i].buf[byte]);
+					printk(KERN_CONT " %02x", msgs[i].buf[byte]);
 			}
 			rc = cx231xx_i2c_send_bytes(i2c_adap, &msgs[i]);
 		}
 		if (rc < 0)
 			goto err;
 		if (i2c_debug >= 2)
-			printk("\n");
+			printk(KERN_CONT "\n");
 	}
 	mutex_unlock(&dev->i2c_lock);
 	return num;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 8f9b2cea..8ede8ea 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1539,6 +1539,8 @@
 		&af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
 	{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
 		&af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
+	{ DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
+		&af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
 	{ }
 };
 MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index 90cfa35..eeab79b 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -442,6 +442,7 @@
  * IOD[0] ZL10353 1=enabled
  * IOE[0] tuner 0=enabled
  * tuner is behind ZL10353 I2C-gate
+ * tuner is behind TDA10023 I2C-gate
  *
  * E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
  * PCB: 508TC (rev0.6)
@@ -956,7 +957,7 @@
 
 		if (fe && adap->fe[1]) {
 			/* attach tuner for 2nd FE */
-			fe = dvb_attach(dvb_pll_attach, adap->fe[0],
+			fe = dvb_attach(dvb_pll_attach, adap->fe[1],
 					(0xc0 >> 1), &d->i2c_adap,
 					DVB_PLL_SAMSUNG_DTOS403IH102A);
 		}
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 44c64ef3..c1051c3 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -68,6 +68,19 @@
 	.microcode_name = "dvb-usb-terratec-h7-drxk.fw",
 };
 
+static struct drxk_config cablestar_hdci_drxk = {
+	.adr = 0x29,
+	.parallel_ts = true,
+	.dynamic_clk = true,
+	.single_master = true,
+	.enable_merr_cfg = true,
+	.no_i2c_bridge = false,
+	.chunk_size = 64,
+	.mpeg_out_clk_strength = 0x02,
+	.qam_demod_parameter_count = 2,
+	.microcode_name = "dvb-usb-technisat-cablestar-hdci-drxk.fw",
+};
+
 static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
 {
 	struct az6007_device_state *st = fe_to_priv(fe);
@@ -630,6 +643,27 @@
 	return 0;
 }
 
+static int az6007_cablestar_hdci_frontend_attach(struct dvb_usb_adapter *adap)
+{
+	struct az6007_device_state *st = adap_to_priv(adap);
+	struct dvb_usb_device *d = adap_to_d(adap);
+
+	pr_debug("attaching demod drxk\n");
+
+	adap->fe[0] = dvb_attach(drxk_attach, &cablestar_hdci_drxk,
+				 &d->i2c_adap);
+	if (!adap->fe[0])
+		return -EINVAL;
+
+	adap->fe[0]->sec_priv = adap;
+	st->gate_ctrl = adap->fe[0]->ops.i2c_gate_ctrl;
+	adap->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+
+	az6007_ci_init(adap);
+
+	return 0;
+}
+
 static int az6007_tuner_attach(struct dvb_usb_adapter *adap)
 {
 	struct dvb_usb_device *d = adap_to_d(adap);
@@ -868,6 +902,29 @@
 	}
 };
 
+static struct dvb_usb_device_properties az6007_cablestar_hdci_props = {
+	.driver_name         = KBUILD_MODNAME,
+	.owner               = THIS_MODULE,
+	.firmware            = AZ6007_FIRMWARE,
+
+	.adapter_nr          = adapter_nr,
+	.size_of_priv        = sizeof(struct az6007_device_state),
+	.i2c_algo            = &az6007_i2c_algo,
+	.tuner_attach        = az6007_tuner_attach,
+	.frontend_attach     = az6007_cablestar_hdci_frontend_attach,
+	.streaming_ctrl      = az6007_streaming_ctrl,
+/* ditch get_rc_config as it can't work (TS35 remote, I believe it's rc5) */
+	.get_rc_config       = NULL,
+	.read_mac_address    = az6007_read_mac_addr,
+	.download_firmware   = az6007_download_firmware,
+	.identify_state	     = az6007_identify_state,
+	.power_ctrl          = az6007_power_ctrl,
+	.num_adapters        = 1,
+	.adapter             = {
+		{ .stream = DVB_USB_STREAM_BULK(0x02, 10, 4096), }
+	}
+};
+
 static struct usb_device_id az6007_usb_table[] = {
 	{DVB_USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_6007,
 		&az6007_props, "Azurewave 6007", RC_MAP_EMPTY)},
@@ -875,6 +932,8 @@
 		&az6007_props, "Terratec H7", RC_MAP_NEC_TERRATEC_CINERGY_XS)},
 	{DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_H7_2,
 		&az6007_props, "Terratec H7", RC_MAP_NEC_TERRATEC_CINERGY_XS)},
+	{DVB_USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI,
+		&az6007_cablestar_hdci_props, "Technisat CableStar Combo HD CI", RC_MAP_EMPTY)},
 	{0},
 };
 
diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c
index 5c68f39..0c2b377 100644
--- a/drivers/media/usb/dvb-usb-v2/ec168.c
+++ b/drivers/media/usb/dvb-usb-v2/ec168.c
@@ -170,7 +170,7 @@
 
 error:
 	mutex_unlock(&d->i2c_mutex);
-	return i;
+	return ret;
 }
 
 static u32 ec168_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/media/usb/dvb-usb-v2/it913x.c b/drivers/media/usb/dvb-usb-v2/it913x.c
index 1cb6899..fe95a58 100644
--- a/drivers/media/usb/dvb-usb-v2/it913x.c
+++ b/drivers/media/usb/dvb-usb-v2/it913x.c
@@ -799,6 +799,9 @@
 	{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2,
 		&it913x_properties, "Digital Dual TV Receiver CTVDIGDUAL_V2",
 			RC_MAP_IT913X_V1) },
+	{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335,
+		&it913x_properties, "Avermedia H335",
+			RC_MAP_IT913X_V2) },
 	{}		/* Terminating entry */
 };
 
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index d83df4b..0a98d04c 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -601,7 +601,7 @@
 EXPORT_SYMBOL_GPL(mxl111sf_demod_attach);
 
 MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("0.1");
 
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
index 3f3f8bf..2d4530f 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-demod.h - driver for the MaxLinear MXL111SF DVB-T demodulator
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
index e4121cb..a619410 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-gpio.c - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
index 0220f54..b85a577 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-gpio.h - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 3443455..a101d06 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-i2c.c - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
index a57a45f..4657621 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-i2c.h - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
index b741b3a..f6b3480 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-phy.c - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
index f075607..0643738 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-phy.h - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
index 17831b0..89bf115 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-reg.h - driver for the MaxLinear MXL111SF
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 879c529..a8d2c70 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -512,7 +512,7 @@
 EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach);
 
 MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("0.1");
 
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index 90f583e..2046db2 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -1,7 +1,7 @@
 /*
  *  mxl111sf-tuner.h - driver for the MaxLinear MXL111SF CMOS tuner
  *
- *  Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ *  Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@
 #else
 static inline
 struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
-					   struct mxl111sf_state *mxl_state
+					   struct mxl111sf_state *mxl_state,
 					   struct mxl111sf_tuner_config *cfg)
 {
 	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 08240e4..c7304fa 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
  *
  *   This program is free software; you can redistribute it and/or modify it
  *   under the terms of the GNU General Public License as published by the Free
@@ -105,7 +105,7 @@
 		ret = -EINVAL;
 	}
 
-	pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data);
+	pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]);
 fail:
 	return ret;
 }
@@ -1421,7 +1421,7 @@
 
 module_usb_driver(mxl111sf_usb_driver);
 
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
 MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 9816de8..8516c01 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
  *
  *   This program is free software; you can redistribute it and/or modify it
  *   under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index ecca036..fda5c64 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1407,6 +1407,8 @@
 		&rtl2832u_props, "Dexatek DK DVB-T Dongle", NULL) },
 	{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6680,
 		&rtl2832u_props, "DigitalNow Quad DVB-T Receiver", NULL) },
+	{ DVB_USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_MINID,
+		&rtl2832u_props, "Leadtek Winfast DTV Dongle Mini D", NULL) },
 	{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00d3,
 		&rtl2832u_props, "TerraTec Cinergy T Stick RC (Rev. 3)", NULL) },
 	{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1102,
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 20e345d..a1c641e 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -149,6 +149,7 @@
 			  int num)
 {
 	struct dvb_usb_device *d = i2c_get_adapdata(adap);
+	int ret;
 	int i;
 
 	if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
@@ -173,7 +174,8 @@
 			if (1 + msg[i].len > sizeof(ibuf)) {
 				warn("i2c rd: len=%d is too big!\n",
 				     msg[i].len);
-				return -EOPNOTSUPP;
+				ret = -EOPNOTSUPP;
+				goto unlock;
 			}
 			obuf[0] = 0;
 			obuf[1] = msg[i].len;
@@ -193,12 +195,14 @@
 			if (3 + msg[i].len > sizeof(obuf)) {
 				warn("i2c wr: len=%d is too big!\n",
 				     msg[i].len);
-				return -EOPNOTSUPP;
+				ret = -EOPNOTSUPP;
+				goto unlock;
 			}
 			if (1 + msg[i + 1].len > sizeof(ibuf)) {
 				warn("i2c rd: len=%d is too big!\n",
 				     msg[i + 1].len);
-				return -EOPNOTSUPP;
+				ret = -EOPNOTSUPP;
+				goto unlock;
 			}
 			obuf[0] = msg[i].len;
 			obuf[1] = msg[i+1].len;
@@ -223,7 +227,8 @@
 			if (2 + msg[i].len > sizeof(obuf)) {
 				warn("i2c wr: len=%d is too big!\n",
 				     msg[i].len);
-				return -EOPNOTSUPP;
+				ret = -EOPNOTSUPP;
+				goto unlock;
 			}
 			obuf[0] = msg[i].addr;
 			obuf[1] = msg[i].len;
@@ -237,8 +242,14 @@
 		}
 	}
 
+	if (i == num)
+		ret = num;
+	else
+		ret = -EREMOTEIO;
+
+unlock:
 	mutex_unlock(&d->i2c_mutex);
-	return i == num ? num : -EREMOTEIO;
+	return ret;
 }
 
 static u32 cxusb_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index c1a63b2..ae0f56a 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -2,7 +2,7 @@
  *	DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101,
  *	TeVii S600, S630, S650, S660, S480, S421, S632
  *	Prof 1100, 7500,
- *	Geniatech SU3000 Cards
+ *	Geniatech SU3000, T220 Cards
  * Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by)
  *
  *	This program is free software; you can redistribute it and/or modify it
@@ -29,6 +29,8 @@
 #include "stb6100.h"
 #include "stb6100_proc.h"
 #include "m88rs2000.h"
+#include "tda18271.h"
+#include "cxd2820r.h"
 
 /* Max transfer size done by I2C transfer functions */
 #define MAX_XFER_SIZE  64
@@ -110,11 +112,6 @@
 		"Please see linux/Documentation/dvb/ for more details " \
 		"on firmware-problems."
 
-struct rc_map_dvb_usb_table_table {
-	struct rc_map_table *rc_keys;
-	int rc_keys_size;
-};
-
 struct su3000_state {
 	u8 initialized;
 };
@@ -129,12 +126,6 @@
 MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))."
 						DVB_USB_DEBUG_STATUS);
 
-/* keymaps */
-static int ir_keymap;
-module_param_named(keymap, ir_keymap, int, 0644);
-MODULE_PARM_DESC(keymap, "set keymap 0=default 1=dvbworld 2=tevii 3=tbs  ..."
-			" 256=none");
-
 /* demod probe */
 static int demod_probe = 1;
 module_param_named(demod, demod_probe, int, 0644);
@@ -301,6 +292,7 @@
 static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
 {
 	struct dvb_usb_device *d = i2c_get_adapdata(adap);
+	int ret;
 
 	if (!d)
 		return -ENODEV;
@@ -316,7 +308,8 @@
 		if (2 + msg[1].len > sizeof(ibuf)) {
 			warn("i2c rd: len=%d is too big!\n",
 			     msg[1].len);
-			return -EOPNOTSUPP;
+			ret = -EOPNOTSUPP;
+			goto unlock;
 		}
 
 		obuf[0] = msg[0].addr << 1;
@@ -340,7 +333,8 @@
 			if (2 + msg[0].len > sizeof(obuf)) {
 				warn("i2c wr: len=%d is too big!\n",
 				     msg[1].len);
-				return -EOPNOTSUPP;
+				ret = -EOPNOTSUPP;
+				goto unlock;
 			}
 
 			obuf[0] = msg[0].addr << 1;
@@ -357,7 +351,8 @@
 			if (2 + msg[0].len > sizeof(obuf)) {
 				warn("i2c wr: len=%d is too big!\n",
 				     msg[1].len);
-				return -EOPNOTSUPP;
+				ret = -EOPNOTSUPP;
+				goto unlock;
 			}
 
 			obuf[0] = msg[0].addr << 1;
@@ -386,15 +381,17 @@
 
 		break;
 	}
+	ret = num;
 
+unlock:
 	mutex_unlock(&d->i2c_mutex);
-	return num;
+	return ret;
 }
 
 static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
 {
 	struct dvb_usb_device *d = i2c_get_adapdata(adap);
-	int len, i, j;
+	int len, i, j, ret;
 
 	if (!d)
 		return -ENODEV;
@@ -430,7 +427,8 @@
 				if (2 + msg[j].len > sizeof(ibuf)) {
 					warn("i2c rd: len=%d is too big!\n",
 					     msg[j].len);
-					return -EOPNOTSUPP;
+					ret = -EOPNOTSUPP;
+					goto unlock;
 				}
 
 				dw210x_op_rw(d->udev, 0xc3,
@@ -466,7 +464,8 @@
 				if (2 + msg[j].len > sizeof(obuf)) {
 					warn("i2c wr: len=%d is too big!\n",
 					     msg[j].len);
-					return -EOPNOTSUPP;
+					ret = -EOPNOTSUPP;
+					goto unlock;
 				}
 
 				obuf[0] = msg[j].addr << 1;
@@ -481,15 +480,18 @@
 		}
 
 	}
+	ret = num;
 
+unlock:
 	mutex_unlock(&d->i2c_mutex);
-	return num;
+	return ret;
 }
 
 static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
 								int num)
 {
 	struct dvb_usb_device *d = i2c_get_adapdata(adap);
+	int ret;
 	int i;
 
 	if (!d)
@@ -506,7 +508,8 @@
 		if (2 + msg[1].len > sizeof(ibuf)) {
 			warn("i2c rd: len=%d is too big!\n",
 			     msg[1].len);
-			return -EOPNOTSUPP;
+			ret = -EOPNOTSUPP;
+			goto unlock;
 		}
 		obuf[0] = msg[0].addr << 1;
 		obuf[1] = msg[0].len;
@@ -530,7 +533,8 @@
 			if (2 + msg[0].len > sizeof(obuf)) {
 				warn("i2c wr: len=%d is too big!\n",
 				     msg[0].len);
-				return -EOPNOTSUPP;
+				ret = -EOPNOTSUPP;
+				goto unlock;
 			}
 			obuf[0] = msg[0].addr << 1;
 			obuf[1] = msg[0].len;
@@ -556,9 +560,11 @@
 				msg[i].flags == 0 ? ">>>" : "<<<");
 		debug_dump(msg[i].buf, msg[i].len, deb_xfer);
 	}
+	ret = num;
 
+unlock:
 	mutex_unlock(&d->i2c_mutex);
-	return num;
+	return ret;
 }
 
 static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
@@ -566,7 +572,7 @@
 {
 	struct dvb_usb_device *d = i2c_get_adapdata(adap);
 	struct usb_device *udev;
-	int len, i, j;
+	int len, i, j, ret;
 
 	if (!d)
 		return -ENODEV;
@@ -618,7 +624,8 @@
 				if (msg[j].len > sizeof(ibuf)) {
 					warn("i2c rd: len=%d is too big!\n",
 					     msg[j].len);
-					return -EOPNOTSUPP;
+					ret = -EOPNOTSUPP;
+					goto unlock;
 				}
 
 				dw210x_op_rw(d->udev, 0x91, 0, 0,
@@ -652,7 +659,8 @@
 				if (2 + msg[j].len > sizeof(obuf)) {
 					warn("i2c wr: len=%d is too big!\n",
 					     msg[j].len);
-					return -EOPNOTSUPP;
+					ret = -EOPNOTSUPP;
+					goto unlock;
 				}
 
 				obuf[0] = msg[j + 1].len;
@@ -671,7 +679,8 @@
 				if (2 + msg[j].len > sizeof(obuf)) {
 					warn("i2c wr: len=%d is too big!\n",
 					     msg[j].len);
-					return -EOPNOTSUPP;
+					ret = -EOPNOTSUPP;
+					goto unlock;
 				}
 				obuf[0] = msg[j].len + 1;
 				obuf[1] = (msg[j].addr << 1);
@@ -685,9 +694,11 @@
 		}
 		}
 	}
+	ret = num;
 
+unlock:
 	mutex_unlock(&d->i2c_mutex);
-	return num;
+	return ret;
 }
 
 static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
@@ -1095,6 +1106,16 @@
 	.set_lock_led = dw210x_led_ctrl,
 };
 
+static struct cxd2820r_config cxd2820r_config = {
+	.i2c_address = 0x6c, /* (0xd8 >> 1) */
+	.ts_mode = 0x38,
+};
+
+static struct tda18271_config tda18271_config = {
+	.output_opt = TDA18271_OUTPUT_LT_OFF,
+	.gate = TDA18271_GATE_DIGITAL,
+};
+
 static u8 m88rs2000_inittab[] = {
 	DEMOD_WRITE, 0x9a, 0x30,
 	DEMOD_WRITE, 0x00, 0x01,
@@ -1364,6 +1385,49 @@
 	return -EIO;
 }
 
+static int t220_frontend_attach(struct dvb_usb_adapter *d)
+{
+	u8 obuf[3] = { 0xe, 0x80, 0 };
+	u8 ibuf[] = { 0 };
+
+	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+		err("command 0x0e transfer failed.");
+
+	obuf[0] = 0xe;
+	obuf[1] = 0x83;
+	obuf[2] = 0;
+
+	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+		err("command 0x0e transfer failed.");
+
+	msleep(100);
+
+	obuf[0] = 0xe;
+	obuf[1] = 0x80;
+	obuf[2] = 1;
+
+	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+		err("command 0x0e transfer failed.");
+
+	obuf[0] = 0x51;
+
+	if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+		err("command 0x51 transfer failed.");
+
+	d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
+					&d->dev->i2c_adap, NULL);
+	if (d->fe_adap[0].fe != NULL) {
+		if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60,
+					&d->dev->i2c_adap, &tda18271_config)) {
+			info("Attached TDA18271HD/CXD2820R!\n");
+			return 0;
+		}
+	}
+
+	info("Failed to attach TDA18271HD/CXD2820R!\n");
+	return -EIO;
+}
+
 static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
 {
 	u8 obuf[] = { 0x51 };
@@ -1404,174 +1468,8 @@
 	return 0;
 }
 
-static struct rc_map_table rc_map_dw210x_table[] = {
-	{ 0xf80a, KEY_POWER2 },		/*power*/
-	{ 0xf80c, KEY_MUTE },		/*mute*/
-	{ 0xf811, KEY_1 },
-	{ 0xf812, KEY_2 },
-	{ 0xf813, KEY_3 },
-	{ 0xf814, KEY_4 },
-	{ 0xf815, KEY_5 },
-	{ 0xf816, KEY_6 },
-	{ 0xf817, KEY_7 },
-	{ 0xf818, KEY_8 },
-	{ 0xf819, KEY_9 },
-	{ 0xf810, KEY_0 },
-	{ 0xf81c, KEY_CHANNELUP },	/*ch+*/
-	{ 0xf80f, KEY_CHANNELDOWN },	/*ch-*/
-	{ 0xf81a, KEY_VOLUMEUP },	/*vol+*/
-	{ 0xf80e, KEY_VOLUMEDOWN },	/*vol-*/
-	{ 0xf804, KEY_RECORD },		/*rec*/
-	{ 0xf809, KEY_FAVORITES },	/*fav*/
-	{ 0xf808, KEY_REWIND },		/*rewind*/
-	{ 0xf807, KEY_FASTFORWARD },	/*fast*/
-	{ 0xf80b, KEY_PAUSE },		/*pause*/
-	{ 0xf802, KEY_ESC },		/*cancel*/
-	{ 0xf803, KEY_TAB },		/*tab*/
-	{ 0xf800, KEY_UP },		/*up*/
-	{ 0xf81f, KEY_OK },		/*ok*/
-	{ 0xf801, KEY_DOWN },		/*down*/
-	{ 0xf805, KEY_CAMERA },		/*cap*/
-	{ 0xf806, KEY_STOP },		/*stop*/
-	{ 0xf840, KEY_ZOOM },		/*full*/
-	{ 0xf81e, KEY_TV },		/*tvmode*/
-	{ 0xf81b, KEY_LAST },		/*recall*/
-};
-
-static struct rc_map_table rc_map_tevii_table[] = {
-	{ 0xf80a, KEY_POWER },
-	{ 0xf80c, KEY_MUTE },
-	{ 0xf811, KEY_1 },
-	{ 0xf812, KEY_2 },
-	{ 0xf813, KEY_3 },
-	{ 0xf814, KEY_4 },
-	{ 0xf815, KEY_5 },
-	{ 0xf816, KEY_6 },
-	{ 0xf817, KEY_7 },
-	{ 0xf818, KEY_8 },
-	{ 0xf819, KEY_9 },
-	{ 0xf810, KEY_0 },
-	{ 0xf81c, KEY_MENU },
-	{ 0xf80f, KEY_VOLUMEDOWN },
-	{ 0xf81a, KEY_LAST },
-	{ 0xf80e, KEY_OPEN },
-	{ 0xf804, KEY_RECORD },
-	{ 0xf809, KEY_VOLUMEUP },
-	{ 0xf808, KEY_CHANNELUP },
-	{ 0xf807, KEY_PVR },
-	{ 0xf80b, KEY_TIME },
-	{ 0xf802, KEY_RIGHT },
-	{ 0xf803, KEY_LEFT },
-	{ 0xf800, KEY_UP },
-	{ 0xf81f, KEY_OK },
-	{ 0xf801, KEY_DOWN },
-	{ 0xf805, KEY_TUNER },
-	{ 0xf806, KEY_CHANNELDOWN },
-	{ 0xf840, KEY_PLAYPAUSE },
-	{ 0xf81e, KEY_REWIND },
-	{ 0xf81b, KEY_FAVORITES },
-	{ 0xf81d, KEY_BACK },
-	{ 0xf84d, KEY_FASTFORWARD },
-	{ 0xf844, KEY_EPG },
-	{ 0xf84c, KEY_INFO },
-	{ 0xf841, KEY_AB },
-	{ 0xf843, KEY_AUDIO },
-	{ 0xf845, KEY_SUBTITLE },
-	{ 0xf84a, KEY_LIST },
-	{ 0xf846, KEY_F1 },
-	{ 0xf847, KEY_F2 },
-	{ 0xf85e, KEY_F3 },
-	{ 0xf85c, KEY_F4 },
-	{ 0xf852, KEY_F5 },
-	{ 0xf85a, KEY_F6 },
-	{ 0xf856, KEY_MODE },
-	{ 0xf858, KEY_SWITCHVIDEOMODE },
-};
-
-static struct rc_map_table rc_map_tbs_table[] = {
-	{ 0xf884, KEY_POWER },
-	{ 0xf894, KEY_MUTE },
-	{ 0xf887, KEY_1 },
-	{ 0xf886, KEY_2 },
-	{ 0xf885, KEY_3 },
-	{ 0xf88b, KEY_4 },
-	{ 0xf88a, KEY_5 },
-	{ 0xf889, KEY_6 },
-	{ 0xf88f, KEY_7 },
-	{ 0xf88e, KEY_8 },
-	{ 0xf88d, KEY_9 },
-	{ 0xf892, KEY_0 },
-	{ 0xf896, KEY_CHANNELUP },
-	{ 0xf891, KEY_CHANNELDOWN },
-	{ 0xf893, KEY_VOLUMEUP },
-	{ 0xf88c, KEY_VOLUMEDOWN },
-	{ 0xf883, KEY_RECORD },
-	{ 0xf898, KEY_PAUSE  },
-	{ 0xf899, KEY_OK },
-	{ 0xf89a, KEY_SHUFFLE },
-	{ 0xf881, KEY_UP },
-	{ 0xf890, KEY_LEFT },
-	{ 0xf882, KEY_RIGHT },
-	{ 0xf888, KEY_DOWN },
-	{ 0xf895, KEY_FAVORITES },
-	{ 0xf897, KEY_SUBTITLE },
-	{ 0xf89d, KEY_ZOOM },
-	{ 0xf89f, KEY_EXIT },
-	{ 0xf89e, KEY_MENU },
-	{ 0xf89c, KEY_EPG },
-	{ 0xf880, KEY_PREVIOUS },
-	{ 0xf89b, KEY_MODE }
-};
-
-static struct rc_map_table rc_map_su3000_table[] = {
-	{ 0x25, KEY_POWER },	/* right-bottom Red */
-	{ 0x0a, KEY_MUTE },	/* -/-- */
-	{ 0x01, KEY_1 },
-	{ 0x02, KEY_2 },
-	{ 0x03, KEY_3 },
-	{ 0x04, KEY_4 },
-	{ 0x05, KEY_5 },
-	{ 0x06, KEY_6 },
-	{ 0x07, KEY_7 },
-	{ 0x08, KEY_8 },
-	{ 0x09, KEY_9 },
-	{ 0x00, KEY_0 },
-	{ 0x20, KEY_UP },	/* CH+ */
-	{ 0x21, KEY_DOWN },	/* CH+ */
-	{ 0x12, KEY_VOLUMEUP },	/* Brightness Up */
-	{ 0x13, KEY_VOLUMEDOWN },/* Brightness Down */
-	{ 0x1f, KEY_RECORD },
-	{ 0x17, KEY_PLAY },
-	{ 0x16, KEY_PAUSE },
-	{ 0x0b, KEY_STOP },
-	{ 0x27, KEY_FASTFORWARD },/* >> */
-	{ 0x26, KEY_REWIND },	/* << */
-	{ 0x0d, KEY_OK },	/* Mute */
-	{ 0x11, KEY_LEFT },	/* VOL- */
-	{ 0x10, KEY_RIGHT },	/* VOL+ */
-	{ 0x29, KEY_BACK },	/* button under 9 */
-	{ 0x2c, KEY_MENU },	/* TTX */
-	{ 0x2b, KEY_EPG },	/* EPG */
-	{ 0x1e, KEY_RED },	/* OSD */
-	{ 0x0e, KEY_GREEN },	/* Window */
-	{ 0x2d, KEY_YELLOW },	/* button under << */
-	{ 0x0f, KEY_BLUE },	/* bottom yellow button */
-	{ 0x14, KEY_AUDIO },	/* Snapshot */
-	{ 0x38, KEY_TV },	/* TV/Radio */
-	{ 0x0c, KEY_ESC }	/* upper Red button */
-};
-
-static struct rc_map_dvb_usb_table_table keys_tables[] = {
-	{ rc_map_dw210x_table, ARRAY_SIZE(rc_map_dw210x_table) },
-	{ rc_map_tevii_table, ARRAY_SIZE(rc_map_tevii_table) },
-	{ rc_map_tbs_table, ARRAY_SIZE(rc_map_tbs_table) },
-	{ rc_map_su3000_table, ARRAY_SIZE(rc_map_su3000_table) },
-};
-
-static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int dw2102_rc_query(struct dvb_usb_device *d)
 {
-	struct rc_map_table *keymap = d->props.rc.legacy.rc_map_table;
-	int keymap_size = d->props.rc.legacy.rc_map_size;
 	u8 key[2];
 	struct i2c_msg msg = {
 		.addr = DW2102_RC_QUERY,
@@ -1579,32 +1477,55 @@
 		.buf = key,
 		.len = 2
 	};
-	int i;
-	/* override keymap */
-	if ((ir_keymap > 0) && (ir_keymap <= ARRAY_SIZE(keys_tables))) {
-		keymap = keys_tables[ir_keymap - 1].rc_keys ;
-		keymap_size = keys_tables[ir_keymap - 1].rc_keys_size;
-	} else if (ir_keymap > ARRAY_SIZE(keys_tables))
-		return 0; /* none */
 
-	*state = REMOTE_NO_KEY_PRESSED;
 	if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
-		for (i = 0; i < keymap_size ; i++) {
-			if (rc5_data(&keymap[i]) == msg.buf[0]) {
-				*state = REMOTE_KEY_PRESSED;
-				*event = keymap[i].keycode;
-				break;
-			}
-
-		}
-
-		if ((*state) == REMOTE_KEY_PRESSED)
-			deb_rc("%s: found rc key: %x, %x, event: %x\n",
-					__func__, key[0], key[1], (*event));
-		else if (key[0] != 0xff)
-			deb_rc("%s: unknown rc key: %x, %x\n",
+		if (msg.buf[0] != 0xff) {
+			deb_rc("%s: rc code: %x, %x\n",
 					__func__, key[0], key[1]);
+			rc_keydown(d->rc_dev, key[0], 1);
+		}
+	}
 
+	return 0;
+}
+
+static int prof_rc_query(struct dvb_usb_device *d)
+{
+	u8 key[2];
+	struct i2c_msg msg = {
+		.addr = DW2102_RC_QUERY,
+		.flags = I2C_M_RD,
+		.buf = key,
+		.len = 2
+	};
+
+	if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
+		if (msg.buf[0] != 0xff) {
+			deb_rc("%s: rc code: %x, %x\n",
+					__func__, key[0], key[1]);
+			rc_keydown(d->rc_dev, key[0]^0xff, 1);
+		}
+	}
+
+	return 0;
+}
+
+static int su3000_rc_query(struct dvb_usb_device *d)
+{
+	u8 key[2];
+	struct i2c_msg msg = {
+		.addr = DW2102_RC_QUERY,
+		.flags = I2C_M_RD,
+		.buf = key,
+		.len = 2
+	};
+
+	if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
+		if (msg.buf[0] != 0xff) {
+			deb_rc("%s: rc code: %x, %x\n",
+					__func__, key[0], key[1]);
+			rc_keydown(d->rc_dev, key[1] << 8 | key[0], 1);
+		}
 	}
 
 	return 0;
@@ -1630,6 +1551,7 @@
 	TEVII_S632,
 	TERRATEC_CINERGY_S2_R2,
 	GOTVIEW_SAT_HD,
+	GENIATECH_T220,
 };
 
 static struct usb_device_id dw2102_table[] = {
@@ -1652,6 +1574,7 @@
 	[TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
 	[TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00b0)},
 	[GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
+	[GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
 	{ }
 };
 
@@ -1711,9 +1634,7 @@
 		/* init registers */
 		switch (dev->descriptor.idProduct) {
 		case USB_PID_TEVII_S650:
-			dw2104_properties.rc.legacy.rc_map_table = rc_map_tevii_table;
-			dw2104_properties.rc.legacy.rc_map_size =
-					ARRAY_SIZE(rc_map_tevii_table);
+			dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC;
 		case USB_PID_DW2104:
 			reset = 1;
 			dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
@@ -1777,10 +1698,11 @@
 
 	.i2c_algo = &dw2102_serit_i2c_algo,
 
-	.rc.legacy = {
-		.rc_map_table = rc_map_dw210x_table,
-		.rc_map_size = ARRAY_SIZE(rc_map_dw210x_table),
+	.rc.core = {
 		.rc_interval = 150,
+		.rc_codes = RC_MAP_DM1105_NEC,
+		.module_name = "dw2102",
+		.allowed_protos   = RC_BIT_NEC,
 		.rc_query = dw2102_rc_query,
 	},
 
@@ -1831,10 +1753,11 @@
 	.no_reconnect = 1,
 
 	.i2c_algo = &dw2104_i2c_algo,
-	.rc.legacy = {
-		.rc_map_table = rc_map_dw210x_table,
-		.rc_map_size = ARRAY_SIZE(rc_map_dw210x_table),
+	.rc.core = {
 		.rc_interval = 150,
+		.rc_codes = RC_MAP_DM1105_NEC,
+		.module_name = "dw2102",
+		.allowed_protos   = RC_BIT_NEC,
 		.rc_query = dw2102_rc_query,
 	},
 
@@ -1881,10 +1804,11 @@
 	.no_reconnect = 1,
 
 	.i2c_algo = &dw3101_i2c_algo,
-	.rc.legacy = {
-		.rc_map_table = rc_map_dw210x_table,
-		.rc_map_size = ARRAY_SIZE(rc_map_dw210x_table),
+	.rc.core = {
 		.rc_interval = 150,
+		.rc_codes = RC_MAP_DM1105_NEC,
+		.module_name = "dw2102",
+		.allowed_protos   = RC_BIT_NEC,
 		.rc_query = dw2102_rc_query,
 	},
 
@@ -1929,10 +1853,11 @@
 	.no_reconnect = 1,
 
 	.i2c_algo = &s6x0_i2c_algo,
-	.rc.legacy = {
-		.rc_map_table = rc_map_tevii_table,
-		.rc_map_size = ARRAY_SIZE(rc_map_tevii_table),
+	.rc.core = {
 		.rc_interval = 150,
+		.rc_codes = RC_MAP_TEVII_NEC,
+		.module_name = "dw2102",
+		.allowed_protos   = RC_BIT_NEC,
 		.rc_query = dw2102_rc_query,
 	},
 
@@ -2022,11 +1947,12 @@
 	.identify_state	= su3000_identify_state,
 	.i2c_algo = &su3000_i2c_algo,
 
-	.rc.legacy = {
-		.rc_map_table = rc_map_su3000_table,
-		.rc_map_size = ARRAY_SIZE(rc_map_su3000_table),
+	.rc.core = {
 		.rc_interval = 150,
-		.rc_query = dw2102_rc_query,
+		.rc_codes = RC_MAP_SU3000,
+		.module_name = "dw2102",
+		.allowed_protos   = RC_BIT_RC5,
+		.rc_query = su3000_rc_query,
 	},
 
 	.read_mac_address = su3000_read_mac_address,
@@ -2077,6 +2003,55 @@
 	}
 };
 
+static struct dvb_usb_device_properties t220_properties = {
+	.caps = DVB_USB_IS_AN_I2C_ADAPTER,
+	.usb_ctrl = DEVICE_SPECIFIC,
+	.size_of_priv = sizeof(struct su3000_state),
+	.power_ctrl = su3000_power_ctrl,
+	.num_adapters = 1,
+	.identify_state	= su3000_identify_state,
+	.i2c_algo = &su3000_i2c_algo,
+
+	.rc.core = {
+		.rc_interval = 150,
+		.rc_codes = RC_MAP_SU3000,
+		.module_name = "dw2102",
+		.allowed_protos   = RC_BIT_RC5,
+		.rc_query = su3000_rc_query,
+	},
+
+	.read_mac_address = su3000_read_mac_address,
+
+	.generic_bulk_ctrl_endpoint = 0x01,
+
+	.adapter = {
+		{
+		.num_frontends = 1,
+		.fe = { {
+			.streaming_ctrl   = su3000_streaming_ctrl,
+			.frontend_attach  = t220_frontend_attach,
+			.stream = {
+				.type = USB_BULK,
+				.count = 8,
+				.endpoint = 0x82,
+				.u = {
+					.bulk = {
+						.buffersize = 4096,
+					}
+				}
+			}
+		} },
+		}
+	},
+	.num_device_descs = 1,
+	.devices = {
+		{ "Geniatech T220 DVB-T/T2 USB2.0",
+			{ &dw2102_table[GENIATECH_T220], NULL },
+			{ NULL },
+		},
+	}
+};
+
 static int dw2102_probe(struct usb_interface *intf,
 		const struct usb_device_id *id)
 {
@@ -2088,8 +2063,8 @@
 	/* fill only different fields */
 	p1100->firmware = P1100_FIRMWARE;
 	p1100->devices[0] = d1100;
-	p1100->rc.legacy.rc_map_table = rc_map_tbs_table;
-	p1100->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
+	p1100->rc.core.rc_query = prof_rc_query;
+	p1100->rc.core.rc_codes = RC_MAP_TBS_NEC;
 	p1100->adapter->fe[0].frontend_attach = stv0288_frontend_attach;
 
 	s660 = kmemdup(&s6x0_properties,
@@ -2114,8 +2089,8 @@
 	}
 	p7500->firmware = P7500_FIRMWARE;
 	p7500->devices[0] = d7500;
-	p7500->rc.legacy.rc_map_table = rc_map_tbs_table;
-	p7500->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
+	p7500->rc.core.rc_query = prof_rc_query;
+	p7500->rc.core.rc_codes = RC_MAP_TBS_NEC;
 	p7500->adapter->fe[0].frontend_attach = prof_7500_frontend_attach;
 
 
@@ -2149,7 +2124,9 @@
 	    0 == dvb_usb_device_init(intf, s421,
 			THIS_MODULE, NULL, adapter_nr) ||
 	    0 == dvb_usb_device_init(intf, &su3000_properties,
-				     THIS_MODULE, NULL, adapter_nr))
+			 THIS_MODULE, NULL, adapter_nr) ||
+	    0 == dvb_usb_device_init(intf, &t220_properties,
+			 THIS_MODULE, NULL, adapter_nr))
 		return 0;
 
 	return -ENODEV;
@@ -2169,7 +2146,7 @@
 			" DVB-C 3101 USB2.0,"
 			" TeVii S600, S630, S650, S660, S480, S421, S632"
 			" Prof 1100, 7500 USB2.0,"
-			" Geniatech SU3000 devices");
+			" Geniatech SU3000, T220 devices");
 MODULE_VERSION("0.1");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(DW2101_FIRMWARE);
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index ca5ee6a..a1fccf3 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -1,8 +1,12 @@
 config VIDEO_EM28XX
-	tristate "Empia EM28xx USB video capture support"
+	tristate "Empia EM28xx USB devices support"
 	depends on VIDEO_DEV && I2C
 	select VIDEO_TUNER
 	select VIDEO_TVEEPROM
+
+config VIDEO_EM28XX_V4L2
+	tristate "Empia EM28xx analog TV, video capture and/or webcam support"
+	depends on VIDEO_EM28XX
 	select VIDEOBUF2_VMALLOC
 	select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
 	select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
@@ -49,6 +53,8 @@
 	select DVB_MB86A20S if MEDIA_SUBDRV_AUTOSELECT
 	select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
 	select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
+	select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
 	---help---
 	  This adds support for DVB cards based on the
 	  Empiatech em28xx chips.
diff --git a/drivers/media/usb/em28xx/Makefile b/drivers/media/usb/em28xx/Makefile
index ad6d485..3f850d5 100644
--- a/drivers/media/usb/em28xx/Makefile
+++ b/drivers/media/usb/em28xx/Makefile
@@ -1,10 +1,11 @@
-em28xx-y +=	em28xx-video.o em28xx-i2c.o em28xx-cards.o
-em28xx-y +=	em28xx-core.o  em28xx-vbi.o em28xx-camera.o
+em28xx-y +=	em28xx-core.o em28xx-i2c.o em28xx-cards.o em28xx-camera.o
 
+em28xx-v4l-objs := em28xx-video.o em28xx-vbi.o
 em28xx-alsa-objs := em28xx-audio.o
 em28xx-rc-objs := em28xx-input.o
 
 obj-$(CONFIG_VIDEO_EM28XX) += em28xx.o
+obj-$(CONFIG_VIDEO_EM28XX_V4L2) += em28xx-v4l.o
 obj-$(CONFIG_VIDEO_EM28XX_ALSA) += em28xx-alsa.o
 obj-$(CONFIG_VIDEO_EM28XX_DVB) += em28xx-dvb.o
 obj-$(CONFIG_VIDEO_EM28XX_RC) += em28xx-rc.o
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 2fdb66e..05e9bd1 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com>
  *
- *  Copyright (C) 2007-2011 Mauro Carvalho Chehab <mchehab@redhat.com>
+ *  Copyright (C) 2007-2014 Mauro Carvalho Chehab
  *	- Port to work with the in-kernel driver
  *	- Cleanups, fixes, alsa-controls, etc.
  *
@@ -50,6 +50,9 @@
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "activates debug info");
 
+#define EM28XX_MAX_AUDIO_BUFS		5
+#define EM28XX_MIN_AUDIO_PACKETS	64
+
 #define dprintk(fmt, arg...) do {					\
 	    if (debug)							\
 		printk(KERN_INFO "em28xx-audio %s: " fmt,		\
@@ -63,17 +66,13 @@
 	int i;
 
 	dprintk("Stopping isoc\n");
-	for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
+	for (i = 0; i < dev->adev.num_urb; i++) {
+		struct urb *urb = dev->adev.urb[i];
+
 		if (!irqs_disabled())
-			usb_kill_urb(dev->adev.urb[i]);
+			usb_kill_urb(urb);
 		else
-			usb_unlink_urb(dev->adev.urb[i]);
-
-		usb_free_urb(dev->adev.urb[i]);
-		dev->adev.urb[i] = NULL;
-
-		kfree(dev->adev.transfer_buffer[i]);
-		dev->adev.transfer_buffer[i] = NULL;
+			usb_unlink_urb(urb);
 	}
 
 	return 0;
@@ -91,6 +90,12 @@
 	struct snd_pcm_substream *substream;
 	struct snd_pcm_runtime   *runtime;
 
+	if (dev->disconnected) {
+		dprintk("device disconnected while streaming. URB status=%d.\n", urb->status);
+		atomic_set(&dev->stream_started, 0);
+		return;
+	}
+
 	switch (urb->status) {
 	case 0:             /* success */
 	case -ETIMEDOUT:    /* NAK */
@@ -158,63 +163,27 @@
 	urb->status = 0;
 
 	status = usb_submit_urb(urb, GFP_ATOMIC);
-	if (status < 0) {
+	if (status < 0)
 		em28xx_errdev("resubmit of audio urb failed (error=%i)\n",
 			      status);
-	}
 	return;
 }
 
 static int em28xx_init_audio_isoc(struct em28xx *dev)
 {
 	int       i, errCode;
-	const int sb_size = EM28XX_NUM_AUDIO_PACKETS *
-			    EM28XX_AUDIO_MAX_PACKET_SIZE;
 
 	dprintk("Starting isoc transfers\n");
 
-	for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
-		struct urb *urb;
-		int j, k;
+	/* Start streaming */
+	for (i = 0; i < dev->adev.num_urb; i++) {
+		memset(dev->adev.transfer_buffer[i], 0x80,
+		       dev->adev.urb[i]->transfer_buffer_length);
 
-		dev->adev.transfer_buffer[i] = kmalloc(sb_size, GFP_ATOMIC);
-		if (!dev->adev.transfer_buffer[i])
-			return -ENOMEM;
-
-		memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
-		urb = usb_alloc_urb(EM28XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
-		if (!urb) {
-			em28xx_errdev("usb_alloc_urb failed!\n");
-			for (j = 0; j < i; j++) {
-				usb_free_urb(dev->adev.urb[j]);
-				kfree(dev->adev.transfer_buffer[j]);
-			}
-			return -ENOMEM;
-		}
-
-		urb->dev = dev->udev;
-		urb->context = dev;
-		urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO);
-		urb->transfer_flags = URB_ISO_ASAP;
-		urb->transfer_buffer = dev->adev.transfer_buffer[i];
-		urb->interval = 1;
-		urb->complete = em28xx_audio_isocirq;
-		urb->number_of_packets = EM28XX_NUM_AUDIO_PACKETS;
-		urb->transfer_buffer_length = sb_size;
-
-		for (j = k = 0; j < EM28XX_NUM_AUDIO_PACKETS;
-			     j++, k += EM28XX_AUDIO_MAX_PACKET_SIZE) {
-			urb->iso_frame_desc[j].offset = k;
-			urb->iso_frame_desc[j].length =
-			    EM28XX_AUDIO_MAX_PACKET_SIZE;
-		}
-		dev->adev.urb[i] = urb;
-	}
-
-	for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
 		errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
 		if (errCode) {
-			em28xx_errdev("submit of audio urb failed\n");
+			em28xx_errdev("submit of audio urb failed (error=%i)\n",
+				      errCode);
 			em28xx_deinit_isoc_audio(dev);
 			atomic_set(&dev->stream_started, 0);
 			return errCode;
@@ -255,15 +224,26 @@
 
 	.formats = SNDRV_PCM_FMTBIT_S16_LE,
 
-	.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_KNOT,
+	.rates = SNDRV_PCM_RATE_48000,
 
 	.rate_min = 48000,
 	.rate_max = 48000,
 	.channels_min = 2,
 	.channels_max = 2,
 	.buffer_bytes_max = 62720 * 8,	/* just about the value in usbaudio.c */
-	.period_bytes_min = 64,		/* 12544/2, */
-	.period_bytes_max = 12544,
+
+
+	/*
+	 * The period is 12.288 bytes. Allow a 10% of variation along its
+	 * value, in order to avoid overruns/underruns due to some clock
+	 * drift.
+	 *
+	 * FIXME: This period assumes 64 packets, and a 48000 PCM rate.
+	 * Calculate it dynamically.
+	 */
+	.period_bytes_min = 11059,
+	.period_bytes_max = 13516,
+
 	.periods_min = 2,
 	.periods_max = 98,		/* 12544, */
 };
@@ -274,28 +254,48 @@
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	int ret = 0;
 
-	dprintk("opening device and trying to acquire exclusive lock\n");
-
 	if (!dev) {
 		em28xx_err("BUG: em28xx can't find device struct."
 				" Can't proceed with open\n");
 		return -ENODEV;
 	}
 
+	if (dev->disconnected)
+		return -ENODEV;
+
+	dprintk("opening device and trying to acquire exclusive lock\n");
+
 	runtime->hw = snd_em28xx_hw_capture;
-	if ((dev->alt == 0 || dev->audio_ifnum) && dev->adev.users == 0) {
-		if (dev->audio_ifnum)
+	if ((dev->alt == 0 || dev->is_audio_only) && dev->adev.users == 0) {
+		int nonblock = !!(substream->f_flags & O_NONBLOCK);
+
+		if (nonblock) {
+			if (!mutex_trylock(&dev->lock))
+				return -EAGAIN;
+		} else
+			mutex_lock(&dev->lock);
+		if (dev->is_audio_only)
+			/* vendor audio is on a separate interface */
 			dev->alt = 1;
 		else
+			/* vendor audio is on the same interface as video */
 			dev->alt = 7;
+			/*
+			 * FIXME: The intention seems to be to select the alt
+			 * setting with the largest wMaxPacketSize for the video
+			 * endpoint.
+			 * At least dev->alt should be used instead, but we
+			 * should probably not touch it at all if it is
+			 * already >0, because wMaxPacketSize of the audio
+			 * endpoints seems to be the same for all.
+			 */
 
 		dprintk("changing alternate number on interface %d to %d\n",
-			dev->audio_ifnum, dev->alt);
-		usb_set_interface(dev->udev, dev->audio_ifnum, dev->alt);
+			dev->ifnum, dev->alt);
+		usb_set_interface(dev->udev, dev->ifnum, dev->alt);
 
 		/* Sets volume, mute, etc */
 		dev->mute = 0;
-		mutex_lock(&dev->lock);
 		ret = em28xx_audio_analog_set(dev);
 		if (ret < 0)
 			goto err;
@@ -304,7 +304,12 @@
 		mutex_unlock(&dev->lock);
 	}
 
+	/* Dynamically adjust the period size */
 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+				     dev->adev.period * 95 / 100,
+				     dev->adev.period * 105 / 100);
+
 	dev->adev.capture_pcm_substream = substream;
 
 	return 0;
@@ -344,6 +349,10 @@
 					struct snd_pcm_hw_params *hw_params)
 {
 	int ret;
+	struct em28xx *dev = snd_pcm_substream_chip(substream);
+
+	if (dev->disconnected)
+		return -ENODEV;
 
 	dprintk("Setting capture parameters\n");
 
@@ -383,6 +392,9 @@
 {
 	struct em28xx *dev = snd_pcm_substream_chip(substream);
 
+	if (dev->disconnected)
+		return -ENODEV;
+
 	dev->adev.hwptr_done_capture = 0;
 	dev->adev.capture_transfer_done = 0;
 
@@ -408,6 +420,9 @@
 	struct em28xx *dev = snd_pcm_substream_chip(substream);
 	int retval = 0;
 
+	if (dev->disconnected)
+		return -ENODEV;
+
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
 	case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
@@ -434,6 +449,9 @@
 	snd_pcm_uframes_t hwptr_done;
 
 	dev = snd_pcm_substream_chip(substream);
+	if (dev->disconnected)
+		return SNDRV_PCM_POS_XRUN;
+
 	spin_lock_irqsave(&dev->adev.slock, flags);
 	hwptr_done = dev->adev.hwptr_done_capture;
 	spin_unlock_irqrestore(&dev->adev.slock, flags);
@@ -455,6 +473,11 @@
 static int em28xx_vol_info(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *info)
 {
+	struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+
+	if (dev->disconnected)
+		return -ENODEV;
+
 	info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
 	info->count = 2;
 	info->value.integer.min = 0;
@@ -467,11 +490,22 @@
 			       struct snd_ctl_elem_value *value)
 {
 	struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+	struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
 	u16 val = (0x1f - (value->value.integer.value[0] & 0x1f)) |
 		  (0x1f - (value->value.integer.value[1] & 0x1f)) << 8;
+	int nonblock = 0;
 	int rc;
 
-	mutex_lock(&dev->lock);
+	if (dev->disconnected)
+		return -ENODEV;
+
+	if (substream)
+		nonblock = !!(substream->f_flags & O_NONBLOCK);
+	if (nonblock) {
+		if (!mutex_trylock(&dev->lock))
+			return -EAGAIN;
+	} else
+		mutex_lock(&dev->lock);
 	rc = em28xx_read_ac97(dev, kcontrol->private_value);
 	if (rc < 0)
 		goto err;
@@ -496,9 +530,20 @@
 			       struct snd_ctl_elem_value *value)
 {
 	struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+	struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
+	int nonblock = 0;
 	int val;
 
-	mutex_lock(&dev->lock);
+	if (dev->disconnected)
+		return -ENODEV;
+
+	if (substream)
+		nonblock = !!(substream->f_flags & O_NONBLOCK);
+	if (nonblock) {
+		if (!mutex_trylock(&dev->lock))
+			return -EAGAIN;
+	} else
+		mutex_lock(&dev->lock);
 	val = em28xx_read_ac97(dev, kcontrol->private_value);
 	mutex_unlock(&dev->lock);
 	if (val < 0)
@@ -520,9 +565,20 @@
 {
 	struct em28xx *dev = snd_kcontrol_chip(kcontrol);
 	u16 val = value->value.integer.value[0];
+	struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
+	int nonblock = 0;
 	int rc;
 
-	mutex_lock(&dev->lock);
+	if (dev->disconnected)
+		return -ENODEV;
+
+	if (substream)
+		nonblock = !!(substream->f_flags & O_NONBLOCK);
+	if (nonblock) {
+		if (!mutex_trylock(&dev->lock))
+			return -EAGAIN;
+	} else
+		mutex_lock(&dev->lock);
 	rc = em28xx_read_ac97(dev, kcontrol->private_value);
 	if (rc < 0)
 		goto err;
@@ -550,9 +606,20 @@
 			       struct snd_ctl_elem_value *value)
 {
 	struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+	struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
+	int nonblock = 0;
 	int val;
 
-	mutex_lock(&dev->lock);
+	if (dev->disconnected)
+		return -ENODEV;
+
+	if (substream)
+		nonblock = !!(substream->f_flags & O_NONBLOCK);
+	if (nonblock) {
+		if (!mutex_trylock(&dev->lock))
+			return -EAGAIN;
+	} else
+		mutex_lock(&dev->lock);
 	val = em28xx_read_ac97(dev, kcontrol->private_value);
 	mutex_unlock(&dev->lock);
 	if (val < 0)
@@ -634,25 +701,204 @@
 	.page      = snd_pcm_get_vmalloc_page,
 };
 
+static void em28xx_audio_free_urb(struct em28xx *dev)
+{
+	int i;
+
+	for (i = 0; i < dev->adev.num_urb; i++) {
+		struct urb *urb = dev->adev.urb[i];
+
+		if (!urb)
+			continue;
+
+		usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+				  dev->adev.transfer_buffer[i],
+				  urb->transfer_dma);
+
+		usb_free_urb(urb);
+	}
+	kfree(dev->adev.urb);
+	kfree(dev->adev.transfer_buffer);
+	dev->adev.num_urb = 0;
+}
+
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+static int em28xx_audio_ep_packet_size(struct usb_device *udev,
+					struct usb_endpoint_descriptor *e)
+{
+	int size = le16_to_cpu(e->wMaxPacketSize);
+
+	if (udev->speed == USB_SPEED_HIGH)
+		return (size & 0x7ff) *  (1 + (((size) >> 11) & 0x03));
+
+	return size & 0x7ff;
+}
+
+static int em28xx_audio_urb_init(struct em28xx *dev)
+{
+	struct usb_interface *intf;
+	struct usb_endpoint_descriptor *e, *ep = NULL;
+	int                 i, ep_size, interval, num_urb, npackets;
+	int		    urb_size, bytes_per_transfer;
+	u8 alt;
+
+	if (dev->ifnum)
+		alt = 1;
+	else
+		alt = 7;
+
+	intf = usb_ifnum_to_if(dev->udev, dev->ifnum);
+
+	if (intf->num_altsetting <= alt) {
+		em28xx_errdev("alt %d doesn't exist on interface %d\n",
+			      dev->ifnum, alt);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < intf->altsetting[alt].desc.bNumEndpoints; i++) {
+		e = &intf->altsetting[alt].endpoint[i].desc;
+		if (!usb_endpoint_dir_in(e))
+			continue;
+		if (e->bEndpointAddress == EM28XX_EP_AUDIO) {
+			ep = e;
+			break;
+		}
+	}
+
+	if (!ep) {
+		em28xx_errdev("Couldn't find an audio endpoint");
+		return -ENODEV;
+	}
+
+	ep_size = em28xx_audio_ep_packet_size(dev->udev, ep);
+	interval = 1 << (ep->bInterval - 1);
+
+	em28xx_info("Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n",
+		     EM28XX_EP_AUDIO, usb_speed_string(dev->udev->speed),
+		     dev->ifnum, alt,
+		     interval,
+		     ep_size);
+
+	/* Calculate the number and size of URBs to better fit the audio samples */
+
+	/*
+	 * Estimate the number of bytes per DMA transfer.
+	 *
+	 * This is given by the bit rate (for now, only 48000 Hz) multiplied
+	 * by 2 channels and 2 bytes/sample divided by the number of microframe
+	 * intervals and by the microframe rate (125 us)
+	 */
+	bytes_per_transfer = DIV_ROUND_UP(48000 * 2 * 2, 125 * interval);
+
+	/*
+	 * Estimate the number of transfer URBs. Don't let it go past the
+	 * maximum number of URBs that is known to be supported by the device.
+	 */
+	num_urb = DIV_ROUND_UP(bytes_per_transfer, ep_size);
+	if (num_urb > EM28XX_MAX_AUDIO_BUFS)
+		num_urb = EM28XX_MAX_AUDIO_BUFS;
+
+	/*
+	 * Now that we know the number of bytes per transfer and the number of
+	 * URBs, estimate the typical size of an URB, in order to adjust the
+	 * minimal number of packets.
+	 */
+	urb_size = bytes_per_transfer / num_urb;
+
+	/*
+	 * Now, calculate the amount of audio packets to be filled on each
+	 * URB. In order to preserve the old behaviour, use a minimal
+	 * threshold for this value.
+	 */
+	npackets = EM28XX_MIN_AUDIO_PACKETS;
+	if (urb_size > ep_size * npackets)
+		npackets = DIV_ROUND_UP(urb_size, ep_size);
+
+	em28xx_info("Number of URBs: %d, with %d packets and %d size",
+		    num_urb, npackets, urb_size);
+
+	/* Estimate the bytes per period */
+	dev->adev.period = urb_size * npackets;
+
+	/* Allocate space to store the number of URBs to be used */
+
+	dev->adev.transfer_buffer = kcalloc(num_urb,
+					    sizeof(*dev->adev.transfer_buffer),
+					    GFP_ATOMIC);
+	if (!dev->adev.transfer_buffer) {
+		return -ENOMEM;
+	}
+
+	dev->adev.urb = kcalloc(num_urb, sizeof(*dev->adev.urb), GFP_ATOMIC);
+	if (!dev->adev.urb) {
+		kfree(dev->adev.transfer_buffer);
+		return -ENOMEM;
+	}
+
+	/* Alloc memory for each URB and for each transfer buffer */
+	dev->adev.num_urb = num_urb;
+	for (i = 0; i < num_urb; i++) {
+		struct urb *urb;
+		int j, k;
+		void *buf;
+
+		urb = usb_alloc_urb(npackets, GFP_ATOMIC);
+		if (!urb) {
+			em28xx_errdev("usb_alloc_urb failed!\n");
+			em28xx_audio_free_urb(dev);
+			return -ENOMEM;
+		}
+		dev->adev.urb[i] = urb;
+
+		buf = usb_alloc_coherent(dev->udev, npackets * ep_size, GFP_ATOMIC,
+					 &urb->transfer_dma);
+		if (!buf) {
+			em28xx_errdev("usb_alloc_coherent failed!\n");
+			em28xx_audio_free_urb(dev);
+			return -ENOMEM;
+		}
+		dev->adev.transfer_buffer[i] = buf;
+
+		urb->dev = dev->udev;
+		urb->context = dev;
+		urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO);
+		urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+		urb->transfer_buffer = buf;
+		urb->interval = interval;
+		urb->complete = em28xx_audio_isocirq;
+		urb->number_of_packets = npackets;
+		urb->transfer_buffer_length = ep_size * npackets;
+
+		for (j = k = 0; j < npackets; j++, k += ep_size) {
+			urb->iso_frame_desc[j].offset = k;
+			urb->iso_frame_desc[j].length = ep_size;
+		}
+	}
+
+	return 0;
+}
+
 static int em28xx_audio_init(struct em28xx *dev)
 {
 	struct em28xx_audio *adev = &dev->adev;
 	struct snd_pcm      *pcm;
 	struct snd_card     *card;
 	static int          devnr;
-	int                 err;
+	int		    err;
 
-	if (!dev->has_alsa_audio || dev->audio_ifnum < 0) {
+	if (!dev->has_alsa_audio) {
 		/* This device does not support the extension (in this case
 		   the device is expecting the snd-usb-audio module or
 		   doesn't have analog audio support at all) */
 		return 0;
 	}
 
-	printk(KERN_INFO "em28xx-audio.c: probing for em28xx Audio Vendor Class\n");
+	em28xx_info("Binding audio extension\n");
+
 	printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus "
 			 "Rechberger\n");
-	printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2007-2011 Mauro Carvalho Chehab\n");
+	printk(KERN_INFO
+	       "em28xx-audio.c: Copyright (C) 2007-2014 Mauro Carvalho Chehab\n");
 
 	err = snd_card_create(index[devnr], "Em28xx Audio", THIS_MODULE, 0,
 			      &card);
@@ -660,11 +906,12 @@
 		return err;
 
 	spin_lock_init(&adev->slock);
+	adev->sndcard = card;
+	adev->udev = dev->udev;
+
 	err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm);
-	if (err < 0) {
-		snd_card_free(card);
-		return err;
-	}
+	if (err < 0)
+		goto card_free;
 
 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_em28xx_pcm_capture);
 	pcm->info_flags = 0;
@@ -694,15 +941,25 @@
 		em28xx_cvol_new(card, dev, "Surround", AC97_SURROUND_MASTER);
 	}
 
-	err = snd_card_register(card);
-	if (err < 0) {
-		snd_card_free(card);
-		return err;
-	}
-	adev->sndcard = card;
-	adev->udev = dev->udev;
+	err = em28xx_audio_urb_init(dev);
+	if (err)
+		goto card_free;
 
+	err = snd_card_register(card);
+	if (err < 0)
+		goto urb_free;
+
+	em28xx_info("Audio extension successfully initialized\n");
 	return 0;
+
+urb_free:
+	em28xx_audio_free_urb(dev);
+
+card_free:
+	snd_card_free(card);
+	adev->sndcard = NULL;
+
+	return err;
 }
 
 static int em28xx_audio_fini(struct em28xx *dev)
@@ -717,7 +974,14 @@
 		return 0;
 	}
 
+	em28xx_info("Closing audio extension");
+
 	if (dev->adev.sndcard) {
+		snd_card_disconnect(dev->adev.sndcard);
+		flush_work(&dev->wq_trigger);
+
+		em28xx_audio_free_urb(dev);
+
 		snd_card_free(dev->adev.sndcard);
 		dev->adev.sndcard = NULL;
 	}
@@ -745,7 +1009,8 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>");
 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
-MODULE_DESCRIPTION("Em28xx Audio driver");
+MODULE_DESCRIPTION(DRIVER_DESC " - audio interface");
+MODULE_VERSION(EM28XX_VERSION);
 
 module_init(em28xx_alsa_register);
 module_exit(em28xx_alsa_unregister);
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index d666741..c29f5c4 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -454,3 +454,4 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(em28xx_init_camera);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index a519669..4d97a76 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -36,7 +36,6 @@
 #include <media/tvaudio.h>
 #include <media/i2c-addr.h>
 #include <media/tveeprom.h>
-#include <media/v4l2-clk.h>
 #include <media/v4l2-common.h>
 
 #include "em28xx.h"
@@ -67,7 +66,7 @@
 
 
 /* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */
-static unsigned long em28xx_devused;
+DECLARE_BITMAP(em28xx_devused, EM28XX_MAXBOARDS);
 
 struct em28xx_hash_table {
 	unsigned long hash;
@@ -356,6 +355,28 @@
 	{	-1,			-1,	-1,	-1},
 };
 
+/*
+ * 2013:0258 PCTV DVB-S2 Stick (461e)
+ * GPIO 0 = POWER_ON
+ * GPIO 1 = BOOST
+ * GPIO 2 = VUV_LNB (red LED)
+ * GPIO 3 = #EXT_12V
+ * GPIO 4 = INT_DEM
+ * GPIO 5 = INT_LNB
+ * GPIO 6 = #RESET_DEM
+ * GPIO 7 = P07_LED (green LED)
+ */
+static struct em28xx_reg_seq pctv_461e[] = {
+	{EM2874_R80_GPIO_P0_CTRL,      0x7f, 0xff,    0},
+	{0x0d,                 0xff, 0xff,    0},
+	{EM2874_R80_GPIO_P0_CTRL,      0x3f, 0xff,  100}, /* reset demod */
+	{EM2874_R80_GPIO_P0_CTRL,      0x7f, 0xff,  200}, /* reset demod */
+	{0x0d,                 0x42, 0xff,    0},
+	{EM2874_R80_GPIO_P0_CTRL,      0xeb, 0xff,    0},
+	{EM2874_R5F_TS_ENABLE, 0x84, 0x84,    0}, /* parallel? | null discard */
+	{                  -1,   -1,   -1,   -1},
+};
+
 #if 0
 static struct em28xx_reg_seq hauppauge_930c_gpio[] = {
 	{EM2874_R80_GPIO_P0_CTRL,	0x6f,	0xff,	10},
@@ -412,6 +433,70 @@
 	{	-1,			-1,	-1,	-1},
 };
 
+/* 1ae7:9003/9004 SpeedLink Vicious And Devine Laplace webcam
+ * reg 0x80/0x84:
+ * GPIO_0: capturing LED, 0=on, 1=off
+ * GPIO_2: AV mute button, 0=pressed, 1=unpressed
+ * GPIO 3: illumination button, 0=pressed, 1=unpressed
+ * GPIO_6: illumination/flash LED, 0=on, 1=off
+ * reg 0x81/0x85:
+ * GPIO_7: snapshot button, 0=pressed, 1=unpressed
+ */
+static struct em28xx_reg_seq speedlink_vad_laplace_reg_seq[] = {
+	{EM2820_R08_GPIO_CTRL,		0xf7,	0xff,	10},
+	{EM2874_R80_GPIO_P0_CTRL,	0xff,	0xb2,	10},
+	{	-1,			-1,	-1,	-1},
+};
+
+/*
+ *  Button definitions
+ */
+static struct em28xx_button std_snapshot_button[] = {
+	{
+		.role         = EM28XX_BUTTON_SNAPSHOT,
+		.reg_r        = EM28XX_R0C_USBSUSP,
+		.reg_clearing = EM28XX_R0C_USBSUSP,
+		.mask         = EM28XX_R0C_USBSUSP_SNAPSHOT,
+		.inverted     = 0,
+	},
+	{-1, 0, 0, 0, 0},
+};
+
+static struct em28xx_button speedlink_vad_laplace_buttons[] = {
+	{
+		.role     = EM28XX_BUTTON_SNAPSHOT,
+		.reg_r    = EM2874_R85_GPIO_P1_STATE,
+		.mask     = 0x80,
+		.inverted = 1,
+	},
+	{
+		.role     = EM28XX_BUTTON_ILLUMINATION,
+		.reg_r    = EM2874_R84_GPIO_P0_STATE,
+		.mask     = 0x08,
+		.inverted = 1,
+	},
+	{-1, 0, 0, 0, 0},
+};
+
+/*
+ *  LED definitions
+ */
+static struct em28xx_led speedlink_vad_laplace_leds[] = {
+	{
+		.role      = EM28XX_LED_ANALOG_CAPTURING,
+		.gpio_reg  = EM2874_R80_GPIO_P0_CTRL,
+		.gpio_mask = 0x01,
+		.inverted  = 1,
+	},
+	{
+		.role      = EM28XX_LED_ILLUMINATION,
+		.gpio_reg  = EM2874_R80_GPIO_P0_CTRL,
+		.gpio_mask = 0x40,
+		.inverted  = 1,
+	},
+	{-1, 0, 0, 0},
+};
+
 /*
  *  Board definitions
  */
@@ -1391,7 +1476,7 @@
 	},
 	[EM2820_BOARD_PROLINK_PLAYTV_USB2] = {
 		.name         = "SIIG AVTuner-PVR / Pixelview Prolink PlayTV USB 2.0",
-		.has_snapshot_button = 1,
+		.buttons = std_snapshot_button,
 		.tda9887_conf = TDA9887_PRESENT,
 		.tuner_type   = TUNER_YMEC_TVF_5533MF,
 		.decoder      = EM28XX_SAA711X,
@@ -1413,7 +1498,7 @@
 	},
 	[EM2860_BOARD_SAA711X_REFERENCE_DESIGN] = {
 		.name                = "EM2860/SAA711X Reference Design",
-		.has_snapshot_button = 1,
+		.buttons = std_snapshot_button,
 		.tuner_type          = TUNER_ABSENT,
 		.decoder             = EM28XX_SAA711X,
 		.input               = { {
@@ -2020,7 +2105,7 @@
 	},
 	/* 1b80:e1cc Delock 61959
 	 * Empia EM2874B + Micronas DRX 3913KA2 + NXP TDA18271HDC2
-         * mostly the same as MaxMedia UB-425-TC but different remote */
+	 * mostly the same as MaxMedia UB-425-TC but different remote */
 	[EM2874_BOARD_DELOCK_61959] = {
 		.name          = "Delock 61959",
 		.tuner_type    = TUNER_ABSENT,
@@ -2043,7 +2128,38 @@
 		.tuner_gpio	= default_tuner_gpio,
 		.def_i2c_bus	= 1,
 	},
+	/* 1ae7:9003/9004 SpeedLink Vicious And Devine Laplace webcam
+	 * Empia EM2765 + OmniVision OV2640 */
+	[EM2765_BOARD_SPEEDLINK_VAD_LAPLACE] = {
+		.name         = "SpeedLink Vicious And Devine Laplace webcam",
+		.xclk         = EM28XX_XCLK_FREQUENCY_24MHZ,
+		.i2c_speed    = EM28XX_I2C_CLK_WAIT_ENABLE |
+				EM28XX_I2C_FREQ_100_KHZ,
+		.def_i2c_bus  = 1,
+		.tuner_type   = TUNER_ABSENT,
+		.is_webcam    = 1,
+		.input        = { {
+			.type     = EM28XX_VMUX_COMPOSITE1,
+			.amux     = EM28XX_AMUX_VIDEO,
+			.gpio     = speedlink_vad_laplace_reg_seq,
+		} },
+		.buttons = speedlink_vad_laplace_buttons,
+		.leds = speedlink_vad_laplace_leds,
+	},
+	/* 2013:0258 PCTV DVB-S2 Stick (461e)
+	 * Empia EM28178, Montage M88DS3103, Montage M88TS2022, Allegro A8293 */
+	[EM28178_BOARD_PCTV_461E] = {
+		.def_i2c_bus   = 1,
+		.i2c_speed     = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ,
+		.name          = "PCTV DVB-S2 Stick (461e)",
+		.tuner_type    = TUNER_ABSENT,
+		.tuner_gpio    = pctv_461e,
+		.has_dvb       = 1,
+		.ir_codes      = RC_MAP_PINNACLE_PCTV_HD,
+	},
 };
+EXPORT_SYMBOL_GPL(em28xx_boards);
+
 const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
 
 /* table of devices that work with this driver */
@@ -2208,6 +2324,12 @@
 			.driver_info = EM2884_BOARD_PCTV_520E },
 	{ USB_DEVICE(0x1b80, 0xe1cc),
 			.driver_info = EM2874_BOARD_DELOCK_61959 },
+	{ USB_DEVICE(0x1ae7, 0x9003),
+			.driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE },
+	{ USB_DEVICE(0x1ae7, 0x9004),
+			.driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE },
+	{ USB_DEVICE(0x2013, 0x0258),
+			.driver_info = EM28178_BOARD_PCTV_461E },
 	{ },
 };
 MODULE_DEVICE_TABLE(usb, em28xx_id_table);
@@ -2239,24 +2361,6 @@
 };
 /* NOTE: introduce a separate hash table for devices with 16 bit eeproms */
 
-/* I2C possible address to saa7115, tvp5150, msp3400, tvaudio */
-static unsigned short saa711x_addrs[] = {
-	0x4a >> 1, 0x48 >> 1,   /* SAA7111, SAA7111A and SAA7113 */
-	0x42 >> 1, 0x40 >> 1,   /* SAA7114, SAA7115 and SAA7118 */
-	I2C_CLIENT_END };
-
-static unsigned short tvp5150_addrs[] = {
-	0xb8 >> 1,
-	0xba >> 1,
-	I2C_CLIENT_END
-};
-
-static unsigned short msp3400_addrs[] = {
-	0x80 >> 1,
-	0x88 >> 1,
-	I2C_CLIENT_END
-};
-
 int em28xx_tuner_callback(void *ptr, int component, int command, int arg)
 {
 	struct em28xx_i2c_bus *i2c_bus = ptr;
@@ -2408,113 +2512,6 @@
 	em28xx_set_mode(dev, EM28XX_SUSPEND);
 }
 
-static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
-{
-	memset(ctl, 0, sizeof(*ctl));
-
-	ctl->fname   = XC2028_DEFAULT_FIRMWARE;
-	ctl->max_len = 64;
-	ctl->mts = em28xx_boards[dev->model].mts_firmware;
-
-	switch (dev->model) {
-	case EM2880_BOARD_EMPIRE_DUAL_TV:
-	case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
-	case EM2882_BOARD_TERRATEC_HYBRID_XS:
-		ctl->demod = XC3028_FE_ZARLINK456;
-		break;
-	case EM2880_BOARD_TERRATEC_HYBRID_XS:
-	case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
-	case EM2881_BOARD_PINNACLE_HYBRID_PRO:
-		ctl->demod = XC3028_FE_ZARLINK456;
-		break;
-	case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
-	case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
-		ctl->demod = XC3028_FE_DEFAULT;
-		break;
-	case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
-		ctl->demod = XC3028_FE_DEFAULT;
-		ctl->fname = XC3028L_DEFAULT_FIRMWARE;
-		break;
-	case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
-	case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
-	case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
-		/* FIXME: Better to specify the needed IF */
-		ctl->demod = XC3028_FE_DEFAULT;
-		break;
-	case EM2883_BOARD_KWORLD_HYBRID_330U:
-	case EM2882_BOARD_DIKOM_DK300:
-	case EM2882_BOARD_KWORLD_VS_DVBT:
-		ctl->demod = XC3028_FE_CHINA;
-		ctl->fname = XC2028_DEFAULT_FIRMWARE;
-		break;
-	case EM2882_BOARD_EVGA_INDTUBE:
-		ctl->demod = XC3028_FE_CHINA;
-		ctl->fname = XC3028L_DEFAULT_FIRMWARE;
-		break;
-	default:
-		ctl->demod = XC3028_FE_OREN538;
-	}
-}
-
-static void em28xx_tuner_setup(struct em28xx *dev)
-{
-	struct tuner_setup           tun_setup;
-	struct v4l2_frequency        f;
-
-	if (dev->tuner_type == TUNER_ABSENT)
-		return;
-
-	memset(&tun_setup, 0, sizeof(tun_setup));
-
-	tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
-	tun_setup.tuner_callback = em28xx_tuner_callback;
-
-	if (dev->board.radio.type) {
-		tun_setup.type = dev->board.radio.type;
-		tun_setup.addr = dev->board.radio_addr;
-
-		v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
-	}
-
-	if ((dev->tuner_type != TUNER_ABSENT) && (dev->tuner_type)) {
-		tun_setup.type   = dev->tuner_type;
-		tun_setup.addr   = dev->tuner_addr;
-
-		v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
-	}
-
-	if (dev->tda9887_conf) {
-		struct v4l2_priv_tun_config tda9887_cfg;
-
-		tda9887_cfg.tuner = TUNER_TDA9887;
-		tda9887_cfg.priv = &dev->tda9887_conf;
-
-		v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &tda9887_cfg);
-	}
-
-	if (dev->tuner_type == TUNER_XC2028) {
-		struct v4l2_priv_tun_config  xc2028_cfg;
-		struct xc2028_ctrl           ctl;
-
-		memset(&xc2028_cfg, 0, sizeof(xc2028_cfg));
-		memset(&ctl, 0, sizeof(ctl));
-
-		em28xx_setup_xc3028(dev, &ctl);
-
-		xc2028_cfg.tuner = TUNER_XC2028;
-		xc2028_cfg.priv  = &ctl;
-
-		v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &xc2028_cfg);
-	}
-
-	/* configure tuner */
-	f.tuner = 0;
-	f.type = V4L2_TUNER_ANALOG_TV;
-	f.frequency = 9076;     /* just a magic number */
-	dev->ctl_freq = f.frequency;
-	v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
-}
-
 static int em28xx_hint_board(struct em28xx *dev)
 {
 	int i;
@@ -2768,57 +2765,56 @@
 	/* Allow override tuner type by a module parameter */
 	if (tuner >= 0)
 		dev->tuner_type = tuner;
-
-	/* request some modules */
-	if (dev->board.has_msp34xx)
-		v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
-			"msp3400", 0, msp3400_addrs);
-
-	if (dev->board.decoder == EM28XX_SAA711X)
-		v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
-			"saa7115_auto", 0, saa711x_addrs);
-
-	if (dev->board.decoder == EM28XX_TVP5150)
-		v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
-			"tvp5150", 0, tvp5150_addrs);
-
-	if (dev->board.adecoder == EM28XX_TVAUDIO)
-		v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
-			"tvaudio", dev->board.tvaudio_addr, NULL);
-
-	if (dev->board.tuner_type != TUNER_ABSENT) {
-		int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
-
-		if (dev->board.radio.type)
-			v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
-				"tuner", dev->board.radio_addr, NULL);
-
-		if (has_demod)
-			v4l2_i2c_new_subdev(&dev->v4l2_dev,
-				&dev->i2c_adap[dev->def_i2c_bus], "tuner",
-				0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
-		if (dev->tuner_addr == 0) {
-			enum v4l2_i2c_tuner_type type =
-				has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
-			struct v4l2_subdev *sd;
-
-			sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
-				&dev->i2c_adap[dev->def_i2c_bus], "tuner",
-				0, v4l2_i2c_tuner_addrs(type));
-
-			if (sd)
-				dev->tuner_addr = v4l2_i2c_subdev_addr(sd);
-		} else {
-			v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
-				"tuner", dev->tuner_addr, NULL);
-		}
-	}
-
-	em28xx_tuner_setup(dev);
-
-	em28xx_init_camera(dev);
 }
 
+void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
+{
+	memset(ctl, 0, sizeof(*ctl));
+
+	ctl->fname   = XC2028_DEFAULT_FIRMWARE;
+	ctl->max_len = 64;
+	ctl->mts = em28xx_boards[dev->model].mts_firmware;
+
+	switch (dev->model) {
+	case EM2880_BOARD_EMPIRE_DUAL_TV:
+	case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
+	case EM2882_BOARD_TERRATEC_HYBRID_XS:
+		ctl->demod = XC3028_FE_ZARLINK456;
+		break;
+	case EM2880_BOARD_TERRATEC_HYBRID_XS:
+	case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
+	case EM2881_BOARD_PINNACLE_HYBRID_PRO:
+		ctl->demod = XC3028_FE_ZARLINK456;
+		break;
+	case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
+	case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
+		ctl->demod = XC3028_FE_DEFAULT;
+		break;
+	case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
+		ctl->demod = XC3028_FE_DEFAULT;
+		ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+		break;
+	case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
+	case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
+	case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
+		/* FIXME: Better to specify the needed IF */
+		ctl->demod = XC3028_FE_DEFAULT;
+		break;
+	case EM2883_BOARD_KWORLD_HYBRID_330U:
+	case EM2882_BOARD_DIKOM_DK300:
+	case EM2882_BOARD_KWORLD_VS_DVBT:
+		ctl->demod = XC3028_FE_CHINA;
+		ctl->fname = XC2028_DEFAULT_FIRMWARE;
+		break;
+	case EM2882_BOARD_EVGA_INDTUBE:
+		ctl->demod = XC3028_FE_CHINA;
+		ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+		break;
+	default:
+		ctl->demod = XC3028_FE_OREN538;
+	}
+}
+EXPORT_SYMBOL_GPL(em28xx_setup_xc3028);
 
 static void request_module_async(struct work_struct *work)
 {
@@ -2831,17 +2827,30 @@
 	 * can be initialised right now. Otherwise, the module init
 	 * code will do it.
 	 */
+
+	/*
+	 * Devicdes with an audio-only interface also have a V4L/DVB/RC
+	 * interface. Don't register extensions twice on those devices.
+	 */
+	if (dev->is_audio_only) {
+#if defined(CONFIG_MODULES) && defined(MODULE)
+		request_module("em28xx-alsa");
+#endif
+		return;
+	}
+
 	em28xx_init_extension(dev);
 
 #if defined(CONFIG_MODULES) && defined(MODULE)
+	if (dev->has_video)
+		request_module("em28xx-v4l");
 	if (dev->has_audio_class)
 		request_module("snd-usb-audio");
 	else if (dev->has_alsa_audio)
 		request_module("em28xx-alsa");
-
 	if (dev->board.has_dvb)
 		request_module("em28xx-dvb");
-	if (dev->board.has_snapshot_button ||
+	if (dev->board.buttons ||
 	    ((dev->board.ir_codes || dev->board.has_ir_i2c) && !disable_ir))
 		request_module("em28xx-rc");
 #endif /* CONFIG_MODULES */
@@ -2867,23 +2876,20 @@
 {
 	/*FIXME: I2C IR should be disconnected */
 
-	em28xx_release_analog_resources(dev);
+	mutex_lock(&dev->lock);
 
 	if (dev->def_i2c_bus)
 		em28xx_i2c_unregister(dev, 1);
 	em28xx_i2c_unregister(dev, 0);
-	if (dev->clk)
-		v4l2_clk_unregister_fixed(dev->clk);
-
-	v4l2_ctrl_handler_free(&dev->ctrl_handler);
-
-	v4l2_device_unregister(&dev->v4l2_dev);
 
 	usb_put_dev(dev->udev);
 
 	/* Mark device as unused */
-	clear_bit(dev->devno, &em28xx_devused);
+	clear_bit(dev->devno, em28xx_devused);
+
+	mutex_unlock(&dev->lock);
 };
+EXPORT_SYMBOL_GPL(em28xx_release_resources);
 
 /*
  * em28xx_init_dev()
@@ -2893,7 +2899,6 @@
 			   struct usb_interface *interface,
 			   int minor)
 {
-	struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
 	int retval;
 	static const char *default_chip_name = "em28xx";
 	const char *chip_name = default_chip_name;
@@ -2968,6 +2973,11 @@
 			dev->wait_after_write = 0;
 			dev->eeprom_addrwidth_16bit = 1;
 			break;
+		case CHIP_ID_EM28178:
+			chip_name = "em28178";
+			dev->wait_after_write = 0;
+			dev->eeprom_addrwidth_16bit = 1;
+			break;
 		case CHIP_ID_EM2883:
 			chip_name = "em2882/3";
 			dev->wait_after_write = 0;
@@ -2983,6 +2993,16 @@
 		}
 	}
 
+	if (dev->chip_id == CHIP_ID_EM2870 ||
+	    dev->chip_id == CHIP_ID_EM2874 ||
+	    dev->chip_id == CHIP_ID_EM28174 ||
+	    dev->chip_id == CHIP_ID_EM28178) {
+		/* Digital only device - don't load any alsa module */
+		dev->audio_mode.has_audio = false;
+		dev->has_audio_class = false;
+		dev->has_alsa_audio = false;
+	}
+
 	if (chip_name != default_chip_name)
 		printk(KERN_INFO DRIVER_NAME
 		       ": chip ID is %s\n", chip_name);
@@ -3015,15 +3035,6 @@
 		}
 	}
 
-	retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
-	if (retval < 0) {
-		em28xx_errdev("Call to v4l2_device_register() failed!\n");
-		return retval;
-	}
-
-	v4l2_ctrl_handler_init(hdl, 8);
-	dev->v4l2_dev.ctrl_handler = hdl;
-
 	rt_mutex_init(&dev->i2c_bus_lock);
 
 	/* register i2c bus 0 */
@@ -3034,7 +3045,7 @@
 	if (retval < 0) {
 		em28xx_errdev("%s: em28xx_i2c_register bus 0 - error [%d]!\n",
 			__func__, retval);
-		goto unregister_dev;
+		return retval;
 	}
 
 	/* register i2c bus 1 */
@@ -3048,88 +3059,17 @@
 		if (retval < 0) {
 			em28xx_errdev("%s: em28xx_i2c_register bus 1 - error [%d]!\n",
 				__func__, retval);
-			goto unregister_dev;
+
+			em28xx_i2c_unregister(dev, 0);
+
+			return retval;
 		}
 	}
 
-	/*
-	 * Default format, used for tvp5150 or saa711x output formats
-	 */
-	dev->vinmode = 0x10;
-	dev->vinctl  = EM28XX_VINCTRL_INTERLACED |
-		       EM28XX_VINCTRL_CCIR656_ENABLE;
-
 	/* Do board specific init and eeprom reading */
 	em28xx_card_setup(dev);
 
-	/* Configure audio */
-	retval = em28xx_audio_setup(dev);
-	if (retval < 0) {
-		em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
-			__func__, retval);
-		goto fail;
-	}
-	if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
-		v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
-			V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
-		v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
-			V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f);
-	} else {
-		/* install the em28xx notify callback */
-		v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_MUTE),
-				em28xx_ctrl_notify, dev);
-		v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_VOLUME),
-				em28xx_ctrl_notify, dev);
-	}
-
-	/* wake i2c devices */
-	em28xx_wake_i2c(dev);
-
-	/* init video dma queues */
-	INIT_LIST_HEAD(&dev->vidq.active);
-	INIT_LIST_HEAD(&dev->vbiq.active);
-
-	if (dev->board.has_msp34xx) {
-		/* Send a reset to other chips via gpio */
-		retval = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7);
-		if (retval < 0) {
-			em28xx_errdev("%s: em28xx_write_reg - "
-				      "msp34xx(1) failed! error [%d]\n",
-				      __func__, retval);
-			goto fail;
-		}
-		msleep(3);
-
-		retval = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff);
-		if (retval < 0) {
-			em28xx_errdev("%s: em28xx_write_reg - "
-				      "msp34xx(2) failed! error [%d]\n",
-				      __func__, retval);
-			goto fail;
-		}
-		msleep(3);
-	}
-
-	retval = em28xx_register_analog_devices(dev);
-	if (retval < 0) {
-		goto fail;
-	}
-
-	/* Save some power by putting tuner to sleep */
-	v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
-
 	return 0;
-
-fail:
-	if (dev->def_i2c_bus)
-		em28xx_i2c_unregister(dev, 1);
-	em28xx_i2c_unregister(dev, 0);
-	v4l2_ctrl_handler_free(&dev->ctrl_handler);
-
-unregister_dev:
-	v4l2_device_unregister(&dev->v4l2_dev);
-
-	return retval;
 }
 
 /* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
@@ -3154,7 +3094,7 @@
 
 	/* Check to see next free device and mark as used */
 	do {
-		nr = find_first_zero_bit(&em28xx_devused, EM28XX_MAXBOARDS);
+		nr = find_first_zero_bit(em28xx_devused, EM28XX_MAXBOARDS);
 		if (nr >= EM28XX_MAXBOARDS) {
 			/* No free device slots */
 			printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
@@ -3162,7 +3102,7 @@
 			retval = -ENOMEM;
 			goto err_no_slot;
 		}
-	} while (test_and_set_bit(nr, &em28xx_devused));
+	} while (test_and_set_bit(nr, em28xx_devused));
 
 	/* Don't register audio interfaces */
 	if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
@@ -3332,7 +3272,9 @@
 	dev->alt   = -1;
 	dev->is_audio_only = has_audio && !(has_video || has_dvb);
 	dev->has_alsa_audio = has_audio;
-	dev->audio_ifnum = ifnum;
+	dev->audio_mode.has_audio = has_audio;
+	dev->has_video = has_video;
+	dev->ifnum = ifnum;
 
 	/* Checks if audio is provided by some interface */
 	for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
@@ -3369,15 +3311,11 @@
 	/* save our data pointer in this interface device */
 	usb_set_intfdata(interface, dev);
 
-	/* initialize videobuf2 stuff */
-	em28xx_vb2_setup(dev);
-
 	/* allocate device struct */
 	mutex_init(&dev->lock);
-	mutex_lock(&dev->lock);
 	retval = em28xx_init_dev(dev, udev, interface, nr);
 	if (retval) {
-		goto unlock_and_free;
+		goto err_free;
 	}
 
 	if (usb_xfer_mode < 0) {
@@ -3402,26 +3340,6 @@
 
 		em28xx_info("dvb set to %s mode.\n",
 			    dev->dvb_xfer_bulk ? "bulk" : "isoc");
-
-		/* pre-allocate DVB usb transfer buffers */
-		if (dev->dvb_xfer_bulk) {
-			retval = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
-					    dev->dvb_xfer_bulk,
-					    EM28XX_DVB_NUM_BUFS,
-					    512,
-					    EM28XX_DVB_BULK_PACKET_MULTIPLIER);
-		} else {
-			retval = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
-					    dev->dvb_xfer_bulk,
-					    EM28XX_DVB_NUM_BUFS,
-					    dev->dvb_max_pkt_size_isoc,
-					    EM28XX_DVB_NUM_ISOC_PACKETS);
-		}
-		if (retval) {
-			printk(DRIVER_NAME
-			       ": Failed to pre-allocate USB transfer buffers for DVB.\n");
-			goto unlock_and_free;
-		}
 	}
 
 	request_modules(dev);
@@ -3429,19 +3347,15 @@
 	/* Should be the last thing to do, to avoid newer udev's to
 	   open the device before fully initializing it
 	 */
-	mutex_unlock(&dev->lock);
 
 	return 0;
 
-unlock_and_free:
-	mutex_unlock(&dev->lock);
-
 err_free:
 	kfree(dev->alt_max_pkt_size_isoc);
 	kfree(dev);
 
 err:
-	clear_bit(nr, &em28xx_devused);
+	clear_bit(nr, em28xx_devused);
 
 err_no_slot:
 	usb_put_dev(udev);
@@ -3465,36 +3379,13 @@
 
 	dev->disconnected = 1;
 
-	if (dev->is_audio_only) {
-		mutex_lock(&dev->lock);
-		em28xx_close_extension(dev);
-		mutex_unlock(&dev->lock);
-		return;
-	}
-
-	em28xx_info("disconnecting %s\n", dev->vdev->name);
+	em28xx_info("Disconnecting %s\n", dev->name);
 
 	flush_request_modules(dev);
 
-	mutex_lock(&dev->lock);
-
-	v4l2_device_disconnect(&dev->v4l2_dev);
-
-	if (dev->users) {
-		em28xx_warn("device %s is open! Deregistration and memory deallocation are deferred on close.\n",
-			    video_device_node_name(dev->vdev));
-
-		em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
-		em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
-	}
-
 	em28xx_close_extension(dev);
-	/* NOTE: must be called BEFORE the resources are released */
 
-	if (!dev->users)
-		em28xx_release_resources(dev);
-
-	mutex_unlock(&dev->lock);
+	em28xx_release_resources(dev);
 
 	if (!dev->users) {
 		kfree(dev->alt_max_pkt_size_isoc);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index fc157af..898fb9b 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -23,6 +23,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/jiffies.h>
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -33,6 +34,16 @@
 
 #include "em28xx.h"
 
+#define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \
+		      "Markus Rechberger <mrechberger@gmail.com>, " \
+		      "Mauro Carvalho Chehab <mchehab@infradead.org>, " \
+		      "Sascha Sommer <saschasommer@freenet.de>"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(EM28XX_VERSION);
+
 /* #define ENABLE_DEBUG_ISOC_FRAMES */
 
 static unsigned int core_debug;
@@ -53,14 +64,6 @@
 		printk(KERN_INFO "%s %s :"fmt, \
 			 dev->name, __func__ , ##arg); } while (0)
 
-static int alt;
-module_param(alt, int, 0644);
-MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
-
-static unsigned int disable_vbi;
-module_param(disable_vbi, int, 0644);
-MODULE_PARM_DESC(disable_vbi, "disable vbi support");
-
 /* FIXME */
 #define em28xx_isocdbg(fmt, arg...) do {\
 	if (core_debug) \
@@ -226,21 +229,42 @@
 EXPORT_SYMBOL_GPL(em28xx_write_reg_bits);
 
 /*
+ * em28xx_toggle_reg_bits()
+ * toggles/inverts the bits (specified by bitmask) of a register
+ */
+int em28xx_toggle_reg_bits(struct em28xx *dev, u16 reg, u8 bitmask)
+{
+	int oldval;
+	u8 newval;
+
+	oldval = em28xx_read_reg(dev, reg);
+	if (oldval < 0)
+		return oldval;
+
+	newval = (~oldval & bitmask) | (oldval & ~bitmask);
+
+	return em28xx_write_reg(dev, reg, newval);
+}
+EXPORT_SYMBOL_GPL(em28xx_toggle_reg_bits);
+
+/*
  * em28xx_is_ac97_ready()
  * Checks if ac97 is ready
  */
 static int em28xx_is_ac97_ready(struct em28xx *dev)
 {
-	int ret, i;
+	unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_AC97_XFER_TIMEOUT);
+	int ret;
 
 	/* Wait up to 50 ms for AC97 command to complete */
-	for (i = 0; i < 10; i++, msleep(5)) {
+	while (time_is_after_jiffies(timeout)) {
 		ret = em28xx_read_reg(dev, EM28XX_R43_AC97BUSY);
 		if (ret < 0)
 			return ret;
 
 		if (!(ret & 0x01))
 			return 0;
+		msleep(5);
 	}
 
 	em28xx_warn("AC97 command still being executed: not handled properly!\n");
@@ -482,16 +506,8 @@
 	int vid1, vid2, feat, cfg;
 	u32 vid;
 
-	if (dev->chip_id == CHIP_ID_EM2870 || dev->chip_id == CHIP_ID_EM2874
-		|| dev->chip_id == CHIP_ID_EM28174) {
-		/* Digital only device - don't load any alsa module */
-		dev->audio_mode.has_audio = false;
-		dev->has_audio_class = false;
-		dev->has_alsa_audio = false;
+	if (!dev->audio_mode.has_audio)
 		return 0;
-	}
-
-	dev->audio_mode.has_audio = true;
 
 	/* See how this device is configured */
 	cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
@@ -504,17 +520,19 @@
 		dev->has_alsa_audio = false;
 		dev->audio_mode.has_audio = false;
 		return 0;
-	} else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
-		   EM28XX_CHIPCFG_I2S_3_SAMPRATES) {
-		em28xx_info("I2S Audio (3 sample rates)\n");
-		dev->audio_mode.i2s_3rates = 1;
-	} else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
-		   EM28XX_CHIPCFG_I2S_5_SAMPRATES) {
-		em28xx_info("I2S Audio (5 sample rates)\n");
-		dev->audio_mode.i2s_5rates = 1;
-	}
-
-	if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) {
+	} else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) {
+		if (dev->chip_id < CHIP_ID_EM2860 &&
+	            (cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
+		    EM2820_CHIPCFG_I2S_1_SAMPRATE)
+			dev->audio_mode.i2s_samplerates = 1;
+		else if (dev->chip_id >= CHIP_ID_EM2860 &&
+			 (cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
+			 EM2860_CHIPCFG_I2S_5_SAMPRATES)
+			dev->audio_mode.i2s_samplerates = 5;
+		else
+			dev->audio_mode.i2s_samplerates = 3;
+		em28xx_info("I2S Audio (%d sample rate(s))\n",
+					       dev->audio_mode.i2s_samplerates);
 		/* Skip the code that does AC97 vendor detection */
 		dev->audio_mode.ac97 = EM28XX_NO_AC97;
 		goto init_audio;
@@ -582,23 +600,21 @@
 }
 EXPORT_SYMBOL_GPL(em28xx_audio_setup);
 
-int em28xx_colorlevels_set_default(struct em28xx *dev)
+const struct em28xx_led *em28xx_find_led(struct em28xx *dev,
+					 enum em28xx_led_role role)
 {
-	em28xx_write_reg(dev, EM28XX_R20_YGAIN, CONTRAST_DEFAULT);
-	em28xx_write_reg(dev, EM28XX_R21_YOFFSET, BRIGHTNESS_DEFAULT);
-	em28xx_write_reg(dev, EM28XX_R22_UVGAIN, SATURATION_DEFAULT);
-	em28xx_write_reg(dev, EM28XX_R23_UOFFSET, BLUE_BALANCE_DEFAULT);
-	em28xx_write_reg(dev, EM28XX_R24_VOFFSET, RED_BALANCE_DEFAULT);
-	em28xx_write_reg(dev, EM28XX_R25_SHARPNESS, SHARPNESS_DEFAULT);
-
-	em28xx_write_reg(dev, EM28XX_R14_GAMMA, 0x20);
-	em28xx_write_reg(dev, EM28XX_R15_RGAIN, 0x20);
-	em28xx_write_reg(dev, EM28XX_R16_GGAIN, 0x20);
-	em28xx_write_reg(dev, EM28XX_R17_BGAIN, 0x20);
-	em28xx_write_reg(dev, EM28XX_R18_ROFFSET, 0x00);
-	em28xx_write_reg(dev, EM28XX_R19_GOFFSET, 0x00);
-	return em28xx_write_reg(dev, EM28XX_R1A_BOFFSET, 0x00);
+	if (dev->board.leds) {
+		u8 k = 0;
+		while (dev->board.leds[k].role >= 0 &&
+			       dev->board.leds[k].role < EM28XX_NUM_LED_ROLES) {
+			if (dev->board.leds[k].role == role)
+				return &dev->board.leds[k];
+			k++;
+		}
+	}
+	return NULL;
 }
+EXPORT_SYMBOL_GPL(em28xx_find_led);
 
 int em28xx_capture_start(struct em28xx *dev, int start)
 {
@@ -606,273 +622,59 @@
 
 	if (dev->chip_id == CHIP_ID_EM2874 ||
 	    dev->chip_id == CHIP_ID_EM2884 ||
-	    dev->chip_id == CHIP_ID_EM28174) {
+	    dev->chip_id == CHIP_ID_EM28174 ||
+	    dev->chip_id == CHIP_ID_EM28178) {
 		/* The Transport Stream Enable Register moved in em2874 */
-		if (!start) {
-			rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE,
-						   0x00,
-						   EM2874_TS1_CAPTURE_ENABLE);
-			return rc;
-		}
-
-		/* Enable Transport Stream */
 		rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE,
-					   EM2874_TS1_CAPTURE_ENABLE,
+					   start ?
+					       EM2874_TS1_CAPTURE_ENABLE : 0x00,
 					   EM2874_TS1_CAPTURE_ENABLE);
-		return rc;
+	} else {
+		/* FIXME: which is the best order? */
+		/* video registers are sampled by VREF */
+		rc = em28xx_write_reg_bits(dev, EM28XX_R0C_USBSUSP,
+					   start ? 0x10 : 0x00, 0x10);
+		if (rc < 0)
+			return rc;
+
+		if (start) {
+			if (dev->board.is_webcam)
+				rc = em28xx_write_reg(dev, 0x13, 0x0c);
+
+			/* Enable video capture */
+			rc = em28xx_write_reg(dev, 0x48, 0x00);
+
+			if (dev->mode == EM28XX_ANALOG_MODE)
+				rc = em28xx_write_reg(dev,
+						    EM28XX_R12_VINENABLE, 0x67);
+			else
+				rc = em28xx_write_reg(dev,
+						    EM28XX_R12_VINENABLE, 0x37);
+
+			msleep(6);
+		} else {
+			/* disable video capture */
+			rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x27);
+		}
 	}
 
-
-	/* FIXME: which is the best order? */
-	/* video registers are sampled by VREF */
-	rc = em28xx_write_reg_bits(dev, EM28XX_R0C_USBSUSP,
-				   start ? 0x10 : 0x00, 0x10);
 	if (rc < 0)
 		return rc;
 
-	if (!start) {
-		/* disable video capture */
-		rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x27);
-		return rc;
+	/* Switch (explicitly controlled) analog capturing LED on/off */
+	if (dev->mode == EM28XX_ANALOG_MODE) {
+		const struct em28xx_led *led;
+		led = em28xx_find_led(dev, EM28XX_LED_ANALOG_CAPTURING);
+		if (led)
+			em28xx_write_reg_bits(dev, led->gpio_reg,
+					      (!start ^ led->inverted) ?
+					      ~led->gpio_mask : led->gpio_mask,
+					      led->gpio_mask);
 	}
 
-	if (dev->board.is_webcam)
-		rc = em28xx_write_reg(dev, 0x13, 0x0c);
-
-	/* enable video capture */
-	rc = em28xx_write_reg(dev, 0x48, 0x00);
-
-	if (dev->mode == EM28XX_ANALOG_MODE)
-		rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x67);
-	else
-		rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x37);
-
-	msleep(6);
-
 	return rc;
 }
 
-int em28xx_vbi_supported(struct em28xx *dev)
-{
-	/* Modprobe option to manually disable */
-	if (disable_vbi == 1)
-		return 0;
-
-	if (dev->board.is_webcam)
-		return 0;
-
-	/* FIXME: check subdevices for VBI support */
-
-	if (dev->chip_id == CHIP_ID_EM2860 ||
-	    dev->chip_id == CHIP_ID_EM2883)
-		return 1;
-
-	/* Version of em28xx that does not support VBI */
-	return 0;
-}
-
-int em28xx_set_outfmt(struct em28xx *dev)
-{
-	int ret;
-	u8 fmt, vinctrl;
-
-	fmt = dev->format->reg;
-	if (!dev->is_em25xx)
-		fmt |= 0x20;
-	/*
-	 * NOTE: it's not clear if this is really needed !
-	 * The datasheets say bit 5 is a reserved bit and devices seem to work
-	 * fine without it. But the Windows driver sets it for em2710/50+em28xx
-	 * devices and we've always been setting it, too.
-	 *
-	 * em2765 (em25xx, em276x/7x/8x) devices do NOT work with this bit set,
-	 * it's likely used for an additional (compressed ?) format there.
-	 */
-	ret = em28xx_write_reg(dev, EM28XX_R27_OUTFMT, fmt);
-	if (ret < 0)
-		return ret;
-
-	ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, dev->vinmode);
-	if (ret < 0)
-		return ret;
-
-	vinctrl = dev->vinctl;
-	if (em28xx_vbi_supported(dev) == 1) {
-		vinctrl |= EM28XX_VINCTRL_VBI_RAW;
-		em28xx_write_reg(dev, EM28XX_R34_VBI_START_H, 0x00);
-		em28xx_write_reg(dev, EM28XX_R36_VBI_WIDTH, dev->vbi_width/4);
-		em28xx_write_reg(dev, EM28XX_R37_VBI_HEIGHT, dev->vbi_height);
-		if (dev->norm & V4L2_STD_525_60) {
-			/* NTSC */
-			em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x09);
-		} else if (dev->norm & V4L2_STD_625_50) {
-			/* PAL */
-			em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x07);
-		}
-	}
-
-	return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, vinctrl);
-}
-
-static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
-				  u8 ymin, u8 ymax)
-{
-	em28xx_coredbg("em28xx Scale: (%d,%d)-(%d,%d)\n",
-			xmin, ymin, xmax, ymax);
-
-	em28xx_write_regs(dev, EM28XX_R28_XMIN, &xmin, 1);
-	em28xx_write_regs(dev, EM28XX_R29_XMAX, &xmax, 1);
-	em28xx_write_regs(dev, EM28XX_R2A_YMIN, &ymin, 1);
-	return em28xx_write_regs(dev, EM28XX_R2B_YMAX, &ymax, 1);
-}
-
-static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
-				   u16 width, u16 height)
-{
-	u8 cwidth = width >> 2;
-	u8 cheight = height >> 2;
-	u8 overflow = (height >> 9 & 0x02) | (width >> 10 & 0x01);
-	/* NOTE: size limit: 2047x1023 = 2MPix */
-
-	em28xx_coredbg("capture area set to (%d,%d): %dx%d\n",
-		       hstart, vstart,
-		       ((overflow & 2) << 9 | cwidth << 2),
-		       ((overflow & 1) << 10 | cheight << 2));
-
-	em28xx_write_regs(dev, EM28XX_R1C_HSTART, &hstart, 1);
-	em28xx_write_regs(dev, EM28XX_R1D_VSTART, &vstart, 1);
-	em28xx_write_regs(dev, EM28XX_R1E_CWIDTH, &cwidth, 1);
-	em28xx_write_regs(dev, EM28XX_R1F_CHEIGHT, &cheight, 1);
-	em28xx_write_regs(dev, EM28XX_R1B_OFLOW, &overflow, 1);
-
-	/* FIXME: function/meaning of these registers ? */
-	/* FIXME: align width+height to multiples of 4 ?! */
-	if (dev->is_em25xx) {
-		em28xx_write_reg(dev, 0x34, width >> 4);
-		em28xx_write_reg(dev, 0x35, height >> 4);
-	}
-}
-
-static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
-{
-	u8 mode;
-	/* the em2800 scaler only supports scaling down to 50% */
-
-	if (dev->board.is_em2800) {
-		mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00);
-	} else {
-		u8 buf[2];
-
-		buf[0] = h;
-		buf[1] = h >> 8;
-		em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2);
-
-		buf[0] = v;
-		buf[1] = v >> 8;
-		em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2);
-		/* it seems that both H and V scalers must be active
-		   to work correctly */
-		mode = (h || v) ? 0x30 : 0x00;
-	}
-	return em28xx_write_reg_bits(dev, EM28XX_R26_COMPR, mode, 0x30);
-}
-
-/* FIXME: this only function read values from dev */
-int em28xx_resolution_set(struct em28xx *dev)
-{
-	int width, height;
-	width = norm_maxw(dev);
-	height = norm_maxh(dev);
-
-	/* Properly setup VBI */
-	dev->vbi_width = 720;
-	if (dev->norm & V4L2_STD_525_60)
-		dev->vbi_height = 12;
-	else
-		dev->vbi_height = 18;
-
-	em28xx_set_outfmt(dev);
-
-	em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
-
-	/* If we don't set the start position to 2 in VBI mode, we end up
-	   with line 20/21 being YUYV encoded instead of being in 8-bit
-	   greyscale.  The core of the issue is that line 21 (and line 23 for
-	   PAL WSS) are inside of active video region, and as a result they
-	   get the pixelformatting associated with that area.  So by cropping
-	   it out, we end up with the same format as the rest of the VBI
-	   region */
-	if (em28xx_vbi_supported(dev) == 1)
-		em28xx_capture_area_set(dev, 0, 2, width, height);
-	else
-		em28xx_capture_area_set(dev, 0, 0, width, height);
-
-	return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
-}
-
-/* Set USB alternate setting for analog video */
-int em28xx_set_alternate(struct em28xx *dev)
-{
-	int errCode;
-	int i;
-	unsigned int min_pkt_size = dev->width * 2 + 4;
-
-	/* NOTE: for isoc transfers, only alt settings > 0 are allowed
-		 bulk transfers seem to work only with alt=0 ! */
-	dev->alt = 0;
-	if ((alt > 0) && (alt < dev->num_alt)) {
-		em28xx_coredbg("alternate forced to %d\n", dev->alt);
-		dev->alt = alt;
-		goto set_alt;
-	}
-	if (dev->analog_xfer_bulk)
-		goto set_alt;
-
-	/* When image size is bigger than a certain value,
-	   the frame size should be increased, otherwise, only
-	   green screen will be received.
-	 */
-	if (dev->width * 2 * dev->height > 720 * 240 * 2)
-		min_pkt_size *= 2;
-
-	for (i = 0; i < dev->num_alt; i++) {
-		/* stop when the selected alt setting offers enough bandwidth */
-		if (dev->alt_max_pkt_size_isoc[i] >= min_pkt_size) {
-			dev->alt = i;
-			break;
-		/* otherwise make sure that we end up with the maximum bandwidth
-		   because the min_pkt_size equation might be wrong...
-		*/
-		} else if (dev->alt_max_pkt_size_isoc[i] >
-			   dev->alt_max_pkt_size_isoc[dev->alt])
-			dev->alt = i;
-	}
-
-set_alt:
-	/* NOTE: for bulk transfers, we need to call usb_set_interface()
-	 * even if the previous settings were the same. Otherwise streaming
-	 * fails with all urbs having status = -EOVERFLOW ! */
-	if (dev->analog_xfer_bulk) {
-		dev->max_pkt_size = 512; /* USB 2.0 spec */
-		dev->packet_multiplier = EM28XX_BULK_PACKET_MULTIPLIER;
-	} else { /* isoc */
-		em28xx_coredbg("minimum isoc packet size: %u (alt=%d)\n",
-			       min_pkt_size, dev->alt);
-		dev->max_pkt_size =
-				  dev->alt_max_pkt_size_isoc[dev->alt];
-		dev->packet_multiplier = EM28XX_NUM_ISOC_PACKETS;
-	}
-	em28xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n",
-		       dev->alt, dev->max_pkt_size);
-	errCode = usb_set_interface(dev->udev, 0, dev->alt);
-	if (errCode < 0) {
-		em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
-			      dev->alt, errCode);
-		return errCode;
-	}
-	return 0;
-}
-
 int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio)
 {
 	int rc = 0;
@@ -1238,18 +1040,6 @@
 EXPORT_SYMBOL_GPL(em28xx_init_usb_xfer);
 
 /*
- * em28xx_wake_i2c()
- * configure i2c attached devices
- */
-void em28xx_wake_i2c(struct em28xx *dev)
-{
-	v4l2_device_call_all(&dev->v4l2_dev, 0, core,  reset, 0);
-	v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing,
-			INPUT(dev->ctl_input)->vmux, 0, 0);
-	v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
-}
-
-/*
  * Device control list
  */
 
@@ -1272,7 +1062,7 @@
 		ops->init(dev);
 	}
 	mutex_unlock(&em28xx_devlist_mutex);
-	printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name);
+	printk(KERN_INFO "em28xx: Registered (%s) extension\n", ops->name);
 	return 0;
 }
 EXPORT_SYMBOL(em28xx_register_extension);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 344042b..a0a669e 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -51,10 +51,14 @@
 #include "a8293.h"
 #include "qt1010.h"
 #include "mb86a20s.h"
+#include "m88ds3103.h"
+#include "m88ts2022.h"
 
-MODULE_DESCRIPTION("driver for em28xx based DVB cards");
 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC " - digital TV interface");
+MODULE_VERSION(EM28XX_VERSION);
+
 
 static unsigned int debug;
 module_param(debug, int, 0644);
@@ -87,6 +91,7 @@
 	struct semaphore      pll_mutex;
 	bool			dont_attach_fe1;
 	int			lna_gpio;
+	struct i2c_client	*i2c_client_tuner;
 };
 
 
@@ -198,7 +203,7 @@
 		dvb_alt = dev->dvb_alt_isoc;
 	}
 
-	usb_set_interface(dev->udev, 0, dvb_alt);
+	usb_set_interface(dev->udev, dev->ifnum, dvb_alt);
 	rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
 	if (rc < 0)
 		return rc;
@@ -271,7 +276,7 @@
 static int em28xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
 {
 	struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
-        struct em28xx *dev = i2c_bus->dev;
+	struct em28xx *dev = i2c_bus->dev;
 
 	if (acquire)
 		return em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
@@ -370,7 +375,6 @@
 	.no_i2c_bridge = 1,
 	.microcode_name = "dvb-usb-terratec-h5-drxk.fw",
 	.qam_demod_parameter_count = 2,
-	.load_firmware_sync = true,
 };
 
 static struct drxk_config hauppauge_930c_drxk = {
@@ -380,7 +384,6 @@
 	.microcode_name = "dvb-usb-hauppauge-hvr930c-drxk.fw",
 	.chunk_size = 56,
 	.qam_demod_parameter_count = 2,
-	.load_firmware_sync = true,
 };
 
 static struct drxk_config terratec_htc_stick_drxk = {
@@ -394,7 +397,6 @@
 	.antenna_dvbt = true,
 	/* The windows driver uses the same. This will disable LNA. */
 	.antenna_gpio = 0x6,
-	.load_firmware_sync = true,
 };
 
 static struct drxk_config maxmedia_ub425_tc_drxk = {
@@ -403,7 +405,6 @@
 	.no_i2c_bridge = 1,
 	.microcode_name = "dvb-demod-drxk-01.fw",
 	.chunk_size = 62,
-	.load_firmware_sync = true,
 	.qam_demod_parameter_count = 2,
 };
 
@@ -415,7 +416,6 @@
 	.chunk_size = 58,
 	.antenna_dvbt = true, /* disable LNA */
 	.antenna_gpio = (1 << 2), /* disable LNA */
-	.load_firmware_sync = true,
 };
 
 static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
@@ -808,6 +808,14 @@
 	.small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
 };
 
+static const struct m88ds3103_config pctv_461e_m88ds3103_config = {
+	.i2c_addr = 0x68,
+	.clock = 27000000,
+	.i2c_wr_max = 33,
+	.clock_out = 0,
+	.ts_mode = M88DS3103_TS_PARALLEL_16,
+	.agc = 0x99,
+};
 
 /* ------------------------------------------------------------------ */
 
@@ -815,11 +823,16 @@
 {
 	struct dvb_frontend *fe;
 	struct xc2028_config cfg;
+	struct xc2028_ctrl ctl;
 
 	memset(&cfg, 0, sizeof(cfg));
 	cfg.i2c_adap  = &dev->i2c_adap[dev->def_i2c_bus];
 	cfg.i2c_addr  = addr;
 
+	memset(&ctl, 0, sizeof(ctl));
+	em28xx_setup_xc3028(dev, &ctl);
+	cfg.ctrl  = &ctl;
+
 	if (!dev->dvb->fe[0]) {
 		em28xx_errdev("/2: dvb frontend not attached. "
 				"Can't attach xc3028\n");
@@ -979,12 +992,18 @@
 	int result = 0, mfe_shared = 0;
 	struct em28xx_dvb *dvb;
 
-	if (!dev->board.has_dvb) {
-		/* This device does not support the extension */
-		printk(KERN_INFO "em28xx_dvb: This device does not support the extension\n");
+	if (dev->is_audio_only) {
+		/* Shouldn't initialize IR for this interface */
 		return 0;
 	}
 
+	if (!dev->board.has_dvb) {
+		/* This device does not support the extension */
+		return 0;
+	}
+
+	em28xx_info("Binding DVB extension\n");
+
 	dvb = kzalloc(sizeof(struct em28xx_dvb), GFP_KERNEL);
 
 	if (dvb == NULL) {
@@ -994,6 +1013,27 @@
 	dev->dvb = dvb;
 	dvb->fe[0] = dvb->fe[1] = NULL;
 
+	/* pre-allocate DVB usb transfer buffers */
+	if (dev->dvb_xfer_bulk) {
+		result = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
+					   dev->dvb_xfer_bulk,
+					   EM28XX_DVB_NUM_BUFS,
+					   512,
+					   EM28XX_DVB_BULK_PACKET_MULTIPLIER);
+	} else {
+		result = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
+					   dev->dvb_xfer_bulk,
+					   EM28XX_DVB_NUM_BUFS,
+					   dev->dvb_max_pkt_size_isoc,
+					   EM28XX_DVB_NUM_ISOC_PACKETS);
+	}
+	if (result) {
+		em28xx_errdev("em28xx_dvb: failed to pre-allocate USB transfer buffers for DVB.\n");
+		kfree(dvb);
+		dev->dvb = NULL;
+		return result;
+	}
+
 	mutex_lock(&dev->lock);
 	em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
 	/* init frontend */
@@ -1330,6 +1370,48 @@
 			goto out_free;
 		}
 		break;
+	case EM28178_BOARD_PCTV_461E:
+		{
+			/* demod I2C adapter */
+			struct i2c_adapter *i2c_adapter;
+			struct i2c_board_info info;
+			struct m88ts2022_config m88ts2022_config = {
+				.clock = 27000000,
+			};
+			memset(&info, 0, sizeof(struct i2c_board_info));
+
+			/* attach demod */
+			dvb->fe[0] = dvb_attach(m88ds3103_attach,
+					&pctv_461e_m88ds3103_config,
+					&dev->i2c_adap[dev->def_i2c_bus],
+					&i2c_adapter);
+			if (dvb->fe[0] == NULL) {
+				result = -ENODEV;
+				goto out_free;
+			}
+
+			/* attach tuner */
+			m88ts2022_config.fe = dvb->fe[0];
+			strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+			info.addr = 0x60;
+			info.platform_data = &m88ts2022_config;
+			request_module("m88ts2022");
+			dvb->i2c_client_tuner = i2c_new_device(i2c_adapter, &info);
+
+			/* delegate signal strength measurement to tuner */
+			dvb->fe[0]->ops.read_signal_strength =
+					dvb->fe[0]->ops.tuner_ops.get_rf_strength;
+
+			/* attach SEC */
+			if (!dvb_attach(a8293_attach, dvb->fe[0],
+					&dev->i2c_adap[dev->def_i2c_bus],
+					&em28xx_a8293_config)) {
+				dvb_frontend_detach(dvb->fe[0]);
+				result = -ENODEV;
+				goto out_free;
+			}
+		}
+		break;
 	default:
 		em28xx_errdev("/2: The frontend of your DVB/ATSC card"
 				" isn't supported yet\n");
@@ -1354,7 +1436,7 @@
 	/* MFE lock */
 	dvb->adapter.mfe_shared = mfe_shared;
 
-	em28xx_info("Successfully loaded em28xx-dvb\n");
+	em28xx_info("DVB extension successfully initialized\n");
 ret:
 	em28xx_set_mode(dev, EM28XX_SUSPEND);
 	mutex_unlock(&dev->lock);
@@ -1375,14 +1457,23 @@
 
 static int em28xx_dvb_fini(struct em28xx *dev)
 {
+	if (dev->is_audio_only) {
+		/* Shouldn't initialize IR for this interface */
+		return 0;
+	}
+
 	if (!dev->board.has_dvb) {
 		/* This device does not support the extension */
 		return 0;
 	}
 
+	em28xx_info("Closing DVB extension");
+
 	if (dev->dvb) {
 		struct em28xx_dvb *dvb = dev->dvb;
 
+		em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
+
 		if (dev->disconnected) {
 			/* We cannot tell the device to sleep
 			 * once it has been unplugged. */
@@ -1392,6 +1483,7 @@
 				prevent_sleep(&dvb->fe[1]->ops);
 		}
 
+		i2c_release_client(dvb->i2c_client_tuner);
 		em28xx_unregister_dvb(dvb);
 		kfree(dvb);
 		dev->dvb = NULL;
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index c4ff973..7e17240 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -26,6 +26,7 @@
 #include <linux/kernel.h>
 #include <linux/usb.h>
 #include <linux/i2c.h>
+#include <linux/jiffies.h>
 
 #include "em28xx.h"
 #include "tuner-xc2028.h"
@@ -40,7 +41,7 @@
 
 static unsigned int i2c_debug;
 module_param(i2c_debug, int, 0644);
-MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
+MODULE_PARM_DESC(i2c_debug, "i2c debug message level (1: normal debug, 2: show I2C transfers)");
 
 /*
  * em2800_i2c_send_bytes()
@@ -48,8 +49,8 @@
  */
 static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
 {
+	unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_I2C_XFER_TIMEOUT);
 	int ret;
-	int write_timeout;
 	u8 b2[6];
 
 	if (len < 1 || len > 4)
@@ -74,22 +75,26 @@
 		return (ret < 0) ? ret : -EIO;
 	}
 	/* wait for completion */
-	for (write_timeout = EM2800_I2C_XFER_TIMEOUT; write_timeout > 0;
-	     write_timeout -= 5) {
+	while (time_is_after_jiffies(timeout)) {
 		ret = dev->em28xx_read_reg(dev, 0x05);
-		if (ret == 0x80 + len - 1) {
+		if (ret == 0x80 + len - 1)
 			return len;
-		} else if (ret == 0x94 + len - 1) {
-			return -ENODEV;
-		} else if (ret < 0) {
+		if (ret == 0x94 + len - 1) {
+			if (i2c_debug == 1)
+				em28xx_warn("R05 returned 0x%02x: I2C timeout",
+					    ret);
+			return -ENXIO;
+		}
+		if (ret < 0) {
 			em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
 				    ret);
 			return ret;
 		}
 		msleep(5);
 	}
-	em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
-	return -EIO;
+	if (i2c_debug)
+		em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
+	return -ETIMEDOUT;
 }
 
 /*
@@ -98,9 +103,9 @@
  */
 static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
 {
+	unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_I2C_XFER_TIMEOUT);
 	u8 buf2[4];
 	int ret;
-	int read_timeout;
 	int i;
 
 	if (len < 1 || len > 4)
@@ -117,22 +122,28 @@
 	}
 
 	/* wait for completion */
-	for (read_timeout = EM2800_I2C_XFER_TIMEOUT; read_timeout > 0;
-	     read_timeout -= 5) {
+	while (time_is_after_jiffies(timeout)) {
 		ret = dev->em28xx_read_reg(dev, 0x05);
-		if (ret == 0x84 + len - 1) {
+		if (ret == 0x84 + len - 1)
 			break;
-		} else if (ret == 0x94 + len - 1) {
-			return -ENODEV;
-		} else if (ret < 0) {
+		if (ret == 0x94 + len - 1) {
+			if (i2c_debug == 1)
+				em28xx_warn("R05 returned 0x%02x: I2C timeout",
+					    ret);
+			return -ENXIO;
+		}
+		if (ret < 0) {
 			em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
 				    ret);
 			return ret;
 		}
 		msleep(5);
 	}
-	if (ret != 0x84 + len - 1)
-		em28xx_warn("read from i2c device at 0x%x timed out\n", addr);
+	if (ret != 0x84 + len - 1) {
+		if (i2c_debug)
+			em28xx_warn("read from i2c device at 0x%x timed out\n",
+				    addr);
+	}
 
 	/* get the received message */
 	ret = dev->em28xx_read_reg_req_len(dev, 0x00, 4-len, buf2, len);
@@ -168,7 +179,8 @@
 static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
 				 u16 len, int stop)
 {
-	int write_timeout, ret;
+	unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_I2C_XFER_TIMEOUT);
+	int ret;
 
 	if (len < 1 || len > 64)
 		return -EOPNOTSUPP;
@@ -191,16 +203,19 @@
 		}
 	}
 
-	/* Check success of the i2c operation */
-	for (write_timeout = EM2800_I2C_XFER_TIMEOUT; write_timeout > 0;
-	     write_timeout -= 5) {
+	/* wait for completion */
+	while (time_is_after_jiffies(timeout)) {
 		ret = dev->em28xx_read_reg(dev, 0x05);
-		if (ret == 0) { /* success */
+		if (ret == 0) /* success */
 			return len;
-		} else if (ret == 0x10) {
-			return -ENODEV;
-		} else if (ret < 0) {
-			em28xx_warn("failed to read i2c transfer status from bridge (error=%i)\n",
+		if (ret == 0x10) {
+			if (i2c_debug == 1)
+				em28xx_warn("I2C transfer timeout on writing to addr 0x%02x",
+					    addr);
+			return -ENXIO;
+		}
+		if (ret < 0) {
+			em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
 				    ret);
 			return ret;
 		}
@@ -211,8 +226,10 @@
 		 * (even with high payload) ...
 		 */
 	}
-	em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
-	return -EIO;
+	if (i2c_debug)
+		em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
+			    addr, ret);
+	return -ETIMEDOUT;
 }
 
 /*
@@ -242,26 +259,28 @@
 	 * bytes if we are on bus B AND there was no write attempt to the
 	 * specified slave address before AND no device is present at the
 	 * requested slave address.
-	 * Anyway, the next check will fail with -ENODEV in this case, so avoid
+	 * Anyway, the next check will fail with -ENXIO in this case, so avoid
 	 * spamming the system log on device probing and do nothing here.
 	 */
 
 	/* Check success of the i2c operation */
 	ret = dev->em28xx_read_reg(dev, 0x05);
+	if (ret == 0) /* success */
+		return len;
 	if (ret < 0) {
-		em28xx_warn("failed to read i2c transfer status from bridge (error=%i)\n",
+		em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
 			    ret);
 		return ret;
 	}
-	if (ret > 0) {
-		if (ret == 0x10) {
-			return -ENODEV;
-		} else {
-			em28xx_warn("unknown i2c error (status=%i)\n", ret);
-			return -EIO;
-		}
+	if (ret == 0x10) {
+		if (i2c_debug == 1)
+			em28xx_warn("I2C transfer timeout on writing to addr 0x%02x",
+				    addr);
+		return -ENXIO;
 	}
-	return len;
+
+	em28xx_warn("unknown i2c error (status=%i)\n", ret);
+	return -ETIMEDOUT;
 }
 
 /*
@@ -316,8 +335,12 @@
 	 */
 	if (!ret)
 		return len;
-	else if (ret > 0)
-		return -ENODEV;
+	else if (ret > 0) {
+		if (i2c_debug == 1)
+			em28xx_warn("Bus B R08 returned 0x%02x: I2C timeout",
+				    ret);
+		return -ENXIO;
+	}
 
 	return ret;
 	/*
@@ -355,7 +378,7 @@
 	 * bytes if we are on bus B AND there was no write attempt to the
 	 * specified slave address before AND no device is present at the
 	 * requested slave address.
-	 * Anyway, the next check will fail with -ENODEV in this case, so avoid
+	 * Anyway, the next check will fail with -ENXIO in this case, so avoid
 	 * spamming the system log on device probing and do nothing here.
 	 */
 
@@ -367,8 +390,12 @@
 	 */
 	if (!ret)
 		return len;
-	else if (ret > 0)
-		return -ENODEV;
+	else if (ret > 0) {
+		if (i2c_debug == 1)
+			em28xx_warn("Bus B R08 returned 0x%02x: I2C timeout",
+				    ret);
+		return -ENXIO;
+	}
 
 	return ret;
 	/*
@@ -409,10 +436,6 @@
 		rc = em2800_i2c_check_for_device(dev, addr);
 	else if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM25XX_BUS_B)
 		rc = em25xx_bus_B_check_for_device(dev, addr);
-	if (rc == -ENODEV) {
-		if (i2c_debug)
-			printk(" no device\n");
-	}
 	return rc;
 }
 
@@ -421,7 +444,7 @@
 {
 	struct em28xx *dev = i2c_bus->dev;
 	u16 addr = msg.addr << 1;
-	int byte, rc = -EOPNOTSUPP;
+	int rc = -EOPNOTSUPP;
 
 	if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM28XX)
 		rc = em28xx_i2c_recv_bytes(dev, addr, msg.buf, msg.len);
@@ -429,10 +452,6 @@
 		rc = em2800_i2c_recv_bytes(dev, addr, msg.buf, msg.len);
 	else if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM25XX_BUS_B)
 		rc = em25xx_bus_B_recv_bytes(dev, addr, msg.buf, msg.len);
-	if (i2c_debug) {
-		for (byte = 0; byte < msg.len; byte++)
-			printk(" %02x", msg.buf[byte]);
-	}
 	return rc;
 }
 
@@ -441,12 +460,8 @@
 {
 	struct em28xx *dev = i2c_bus->dev;
 	u16 addr = msg.addr << 1;
-	int byte, rc = -EOPNOTSUPP;
+	int rc = -EOPNOTSUPP;
 
-	if (i2c_debug) {
-		for (byte = 0; byte < msg.len; byte++)
-			printk(" %02x", msg.buf[byte]);
-	}
 	if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM28XX)
 		rc = em28xx_i2c_send_bytes(dev, addr, msg.buf, msg.len, stop);
 	else if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM2800)
@@ -491,33 +506,53 @@
 	}
 	for (i = 0; i < num; i++) {
 		addr = msgs[i].addr << 1;
-		if (i2c_debug)
+		if (i2c_debug > 1)
 			printk(KERN_DEBUG "%s at %s: %s %s addr=%02x len=%d:",
 			       dev->name, __func__ ,
 			       (msgs[i].flags & I2C_M_RD) ? "read" : "write",
 			       i == num - 1 ? "stop" : "nonstop",
 			       addr, msgs[i].len);
-		if (!msgs[i].len) { /* no len: check only for device presence */
+		if (!msgs[i].len) {
+			/*
+			 * no len: check only for device presence
+			 * This code is only called during device probe.
+			 */
 			rc = i2c_check_for_device(i2c_bus, addr);
-			if (rc == -ENODEV) {
+			if (rc < 0) {
+				if (rc == -ENXIO) {
+					if (i2c_debug > 1)
+						printk(KERN_CONT " no device\n");
+					rc = -ENODEV;
+				} else {
+					if (i2c_debug > 1)
+						printk(KERN_CONT " ERROR: %i\n", rc);
+				}
 				rt_mutex_unlock(&dev->i2c_bus_lock);
 				return rc;
 			}
 		} else if (msgs[i].flags & I2C_M_RD) {
 			/* read bytes */
 			rc = i2c_recv_bytes(i2c_bus, msgs[i]);
+
+			if (i2c_debug > 1 && rc >= 0)
+				printk(KERN_CONT " %*ph",
+				       msgs[i].len, msgs[i].buf);
 		} else {
+			if (i2c_debug > 1)
+				printk(KERN_CONT " %*ph",
+				       msgs[i].len, msgs[i].buf);
+
 			/* write bytes */
 			rc = i2c_send_bytes(i2c_bus, msgs[i], i == num - 1);
 		}
 		if (rc < 0) {
-			if (i2c_debug)
-				printk(" ERROR: %i\n", rc);
+			if (i2c_debug > 1)
+				printk(KERN_CONT " ERROR: %i\n", rc);
 			rt_mutex_unlock(&dev->i2c_bus_lock);
 			return rc;
 		}
-		if (i2c_debug)
-			printk("\n");
+		if (i2c_debug > 1)
+			printk(KERN_CONT "\n");
 	}
 
 	rt_mutex_unlock(&dev->i2c_bus_lock);
@@ -600,7 +635,7 @@
 	 * calculation and returned device dataset. Simplifies the code a lot,
 	 * but we might have to deal with multiple sizes in the future !
 	 */
-	int i, err;
+	int err;
 	struct em28xx_eeprom *dev_config;
 	u8 buf, *data;
 
@@ -631,20 +666,14 @@
 		goto error;
 	}
 
-	/* Display eeprom content */
-	for (i = 0; i < len; i++) {
-		if (0 == (i % 16)) {
-			if (dev->eeprom_addrwidth_16bit)
-				em28xx_info("i2c eeprom %04x:", i);
-			else
-				em28xx_info("i2c eeprom %02x:", i);
-		}
-		printk(" %02x", data[i]);
-		if (15 == (i % 16))
-			printk("\n");
+	if (i2c_debug) {
+		/* Display eeprom content */
+		print_hex_dump(KERN_INFO, "eeprom ", DUMP_PREFIX_OFFSET,
+			       16, 1, data, len, true);
+
+		if (dev->eeprom_addrwidth_16bit)
+			em28xx_info("eeprom %06x: ... (skipped)\n", 256);
 	}
-	if (dev->eeprom_addrwidth_16bit)
-		em28xx_info("i2c eeprom %04x: ... (skipped)\n", i);
 
 	if (dev->eeprom_addrwidth_16bit &&
 	    data[0] == 0x26 && data[3] == 0x00) {
@@ -736,10 +765,16 @@
 		em28xx_info("\tAC97 audio (5 sample rates)\n");
 		break;
 	case 2:
-		em28xx_info("\tI2S audio, sample rate=32k\n");
+		if (dev->chip_id < CHIP_ID_EM2860)
+			em28xx_info("\tI2S audio, sample rate=32k\n");
+		else
+			em28xx_info("\tI2S audio, 3 sample rates\n");
 		break;
 	case 3:
-		em28xx_info("\tI2S audio, 3 sample rates\n");
+		if (dev->chip_id < CHIP_ID_EM2860)
+			em28xx_info("\tI2S audio, 3 sample rates\n");
+		else
+			em28xx_info("\tI2S audio, 5 sample rates\n");
 		break;
 	}
 
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index ea181e4..18f65d8 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -30,8 +30,9 @@
 
 #include "em28xx.h"
 
-#define EM28XX_SNAPSHOT_KEY KEY_CAMERA
-#define EM28XX_SBUTTON_QUERY_INTERVAL 500
+#define EM28XX_SNAPSHOT_KEY				KEY_CAMERA
+#define EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL		500 /* [ms] */
+#define EM28XX_BUTTONS_VOLATILE_QUERY_INTERVAL		100 /* [ms] */
 
 static unsigned int ir_debug;
 module_param(ir_debug, int, 0644);
@@ -442,6 +443,7 @@
 	case CHIP_ID_EM2884:
 	case CHIP_ID_EM2874:
 	case CHIP_ID_EM28174:
+	case CHIP_ID_EM28178:
 		return em2874_ir_change_protocol(rc_dev, rc_type);
 	default:
 		printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n",
@@ -470,54 +472,98 @@
 }
 
 /**********************************************************
- Handle Webcam snapshot button
+ Handle buttons
  **********************************************************/
 
-static void em28xx_query_sbutton(struct work_struct *work)
+static void em28xx_query_buttons(struct work_struct *work)
 {
-	/* Poll the register and see if the button is depressed */
 	struct em28xx *dev =
-		container_of(work, struct em28xx, sbutton_query_work.work);
-	int ret;
+		container_of(work, struct em28xx, buttons_query_work.work);
+	u8 i, j;
+	int regval;
+	bool is_pressed, was_pressed;
+	const struct em28xx_led *led;
 
-	ret = em28xx_read_reg(dev, EM28XX_R0C_USBSUSP);
-
-	if (ret & EM28XX_R0C_USBSUSP_SNAPSHOT) {
-		u8 cleared;
-		/* Button is depressed, clear the register */
-		cleared = ((u8) ret) & ~EM28XX_R0C_USBSUSP_SNAPSHOT;
-		em28xx_write_regs(dev, EM28XX_R0C_USBSUSP, &cleared, 1);
-
-		/* Not emulate the keypress */
-		input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
-				 1);
-		/* Now unpress the key */
-		input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
-				 0);
+	/* Poll and evaluate all addresses */
+	for (i = 0; i < dev->num_button_polling_addresses; i++) {
+		/* Read value from register */
+		regval = em28xx_read_reg(dev, dev->button_polling_addresses[i]);
+		if (regval < 0)
+			continue;
+		/* Check states of the buttons and act */
+		j = 0;
+		while (dev->board.buttons[j].role >= 0 &&
+			 dev->board.buttons[j].role < EM28XX_NUM_BUTTON_ROLES) {
+			struct em28xx_button *button = &dev->board.buttons[j];
+			/* Check if button uses the current address */
+			if (button->reg_r != dev->button_polling_addresses[i]) {
+				j++;
+				continue;
+			}
+			/* Determine if button is and was pressed last time */
+			is_pressed = regval & button->mask;
+			was_pressed = dev->button_polling_last_values[i]
+				       & button->mask;
+			if (button->inverted) {
+				is_pressed = !is_pressed;
+				was_pressed = !was_pressed;
+			}
+			/* Clear button state (if needed) */
+			if (is_pressed && button->reg_clearing)
+				em28xx_write_reg(dev, button->reg_clearing,
+						 (~regval & button->mask)
+						    | (regval & ~button->mask));
+			/* Handle button state */
+			if (!is_pressed || was_pressed) {
+				j++;
+				continue;
+			}
+			switch (button->role) {
+			case EM28XX_BUTTON_SNAPSHOT:
+				/* Emulate the keypress */
+				input_report_key(dev->sbutton_input_dev,
+						 EM28XX_SNAPSHOT_KEY, 1);
+				/* Unpress the key */
+				input_report_key(dev->sbutton_input_dev,
+						 EM28XX_SNAPSHOT_KEY, 0);
+				break;
+			case EM28XX_BUTTON_ILLUMINATION:
+				led = em28xx_find_led(dev,
+						      EM28XX_LED_ILLUMINATION);
+				/* Switch illumination LED on/off */
+				if (led)
+					em28xx_toggle_reg_bits(dev,
+							       led->gpio_reg,
+							       led->gpio_mask);
+				break;
+			default:
+				WARN_ONCE(1, "BUG: unhandled button role.");
+			}
+			/* Next button */
+			j++;
+		}
+		/* Save current value for comparison during the next polling */
+		dev->button_polling_last_values[i] = regval;
 	}
-
 	/* Schedule next poll */
-	schedule_delayed_work(&dev->sbutton_query_work,
-			      msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
+	schedule_delayed_work(&dev->buttons_query_work,
+			      msecs_to_jiffies(dev->button_polling_interval));
 }
 
-static void em28xx_register_snapshot_button(struct em28xx *dev)
+static int em28xx_register_snapshot_button(struct em28xx *dev)
 {
 	struct input_dev *input_dev;
 	int err;
 
 	em28xx_info("Registering snapshot button...\n");
 	input_dev = input_allocate_device();
-	if (!input_dev) {
-		em28xx_errdev("input_allocate_device failed\n");
-		return;
-	}
+	if (!input_dev)
+		return -ENOMEM;
 
 	usb_make_path(dev->udev, dev->snapshot_button_path,
 		      sizeof(dev->snapshot_button_path));
 	strlcat(dev->snapshot_button_path, "/sbutton",
 		sizeof(dev->snapshot_button_path));
-	INIT_DELAYED_WORK(&dev->sbutton_query_work, em28xx_query_sbutton);
 
 	input_dev->name = "em28xx snapshot button";
 	input_dev->phys = dev->snapshot_button_path;
@@ -535,25 +581,86 @@
 	if (err) {
 		em28xx_errdev("input_register_device failed\n");
 		input_free_device(input_dev);
-		return;
+		return err;
 	}
 
 	dev->sbutton_input_dev = input_dev;
-	schedule_delayed_work(&dev->sbutton_query_work,
-			      msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
-	return;
-
+	return 0;
 }
 
-static void em28xx_deregister_snapshot_button(struct em28xx *dev)
+static void em28xx_init_buttons(struct em28xx *dev)
 {
+	u8  i = 0, j = 0;
+	bool addr_new = 0;
+
+	dev->button_polling_interval = EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL;
+	while (dev->board.buttons[i].role >= 0 &&
+			 dev->board.buttons[i].role < EM28XX_NUM_BUTTON_ROLES) {
+		struct em28xx_button *button = &dev->board.buttons[i];
+		/* Check if polling address is already on the list */
+		addr_new = 1;
+		for (j = 0; j < dev->num_button_polling_addresses; j++) {
+			if (button->reg_r == dev->button_polling_addresses[j]) {
+				addr_new = 0;
+				break;
+			}
+		}
+		/* Check if max. number of polling addresses is exceeded */
+		if (addr_new && dev->num_button_polling_addresses
+					   >= EM28XX_NUM_BUTTON_ADDRESSES_MAX) {
+			WARN_ONCE(1, "BUG: maximum number of button polling addresses exceeded.");
+			goto next_button;
+		}
+		/* Button role specific checks and actions */
+		if (button->role == EM28XX_BUTTON_SNAPSHOT) {
+			/* Register input device */
+			if (em28xx_register_snapshot_button(dev) < 0)
+				goto next_button;
+		} else if (button->role == EM28XX_BUTTON_ILLUMINATION) {
+			/* Check sanity */
+			if (!em28xx_find_led(dev, EM28XX_LED_ILLUMINATION)) {
+				em28xx_errdev("BUG: illumination button defined, but no illumination LED.\n");
+				goto next_button;
+			}
+		}
+		/* Add read address to list of polling addresses */
+		if (addr_new) {
+			unsigned int index = dev->num_button_polling_addresses;
+			dev->button_polling_addresses[index] = button->reg_r;
+			dev->num_button_polling_addresses++;
+		}
+		/* Reduce polling interval if necessary */
+		if (!button->reg_clearing)
+			dev->button_polling_interval =
+					 EM28XX_BUTTONS_VOLATILE_QUERY_INTERVAL;
+next_button:
+		/* Next button */
+		i++;
+	}
+
+	/* Start polling */
+	if (dev->num_button_polling_addresses) {
+		memset(dev->button_polling_last_values, 0,
+					       EM28XX_NUM_BUTTON_ADDRESSES_MAX);
+		INIT_DELAYED_WORK(&dev->buttons_query_work,
+							  em28xx_query_buttons);
+		schedule_delayed_work(&dev->buttons_query_work,
+			       msecs_to_jiffies(dev->button_polling_interval));
+	}
+}
+
+static void em28xx_shutdown_buttons(struct em28xx *dev)
+{
+	/* Cancel polling */
+	cancel_delayed_work_sync(&dev->buttons_query_work);
+	/* Clear polling addresses list */
+	dev->num_button_polling_addresses = 0;
+	/* Deregister input devices */
 	if (dev->sbutton_input_dev != NULL) {
 		em28xx_info("Deregistering snapshot button\n");
-		cancel_delayed_work_sync(&dev->sbutton_query_work);
 		input_unregister_device(dev->sbutton_input_dev);
 		dev->sbutton_input_dev = NULL;
 	}
-	return;
 }
 
 static int em28xx_ir_init(struct em28xx *dev)
@@ -564,8 +671,13 @@
 	u64 rc_type;
 	u16 i2c_rc_dev_addr = 0;
 
-	if (dev->board.has_snapshot_button)
-		em28xx_register_snapshot_button(dev);
+	if (dev->is_audio_only) {
+		/* Shouldn't initialize IR for this interface */
+		return 0;
+	}
+
+	if (dev->board.buttons)
+		em28xx_init_buttons(dev);
 
 	if (dev->board.has_ir_i2c) {
 		i2c_rc_dev_addr = em28xx_probe_i2c_ir(dev);
@@ -583,6 +695,8 @@
 		return 0;
 	}
 
+	em28xx_info("Registering input extension\n");
+
 	ir = kzalloc(sizeof(*ir), GFP_KERNEL);
 	rc = rc_allocate_device();
 	if (!ir || !rc)
@@ -633,6 +747,7 @@
 		case CHIP_ID_EM2884:
 		case CHIP_ID_EM2874:
 		case CHIP_ID_EM28174:
+		case CHIP_ID_EM28178:
 			ir->get_key = em2874_polling_getkey;
 			rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC |
 					     RC_BIT_RC6_0;
@@ -675,6 +790,8 @@
 	if (err)
 		goto error;
 
+	em28xx_info("Input extension successfully initalized\n");
+
 	return 0;
 
 error:
@@ -688,7 +805,14 @@
 {
 	struct em28xx_IR *ir = dev->ir;
 
-	em28xx_deregister_snapshot_button(dev);
+	if (dev->is_audio_only) {
+		/* Shouldn't initialize IR for this interface */
+		return 0;
+	}
+
+	em28xx_info("Closing input extension");
+
+	em28xx_shutdown_buttons(dev);
 
 	/* skip detach on non attached boards */
 	if (!ir)
@@ -722,7 +846,8 @@
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
-MODULE_DESCRIPTION("Em28xx Input driver");
+MODULE_DESCRIPTION(DRIVER_DESC " - input interface");
+MODULE_VERSION(EM28XX_VERSION);
 
 module_init(em28xx_rc_register);
 module_exit(em28xx_rc_unregister);
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index 0e04778..311fb34 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -25,10 +25,12 @@
 #define EM28XX_R00_CHIPCFG	0x00
 
 /* em28xx Chip Configuration 0x00 */
-#define EM28XX_CHIPCFG_VENDOR_AUDIO		0x80
-#define EM28XX_CHIPCFG_I2S_VOLUME_CAPABLE	0x40
-#define EM28XX_CHIPCFG_I2S_5_SAMPRATES		0x30
-#define EM28XX_CHIPCFG_I2S_3_SAMPRATES		0x20
+#define EM2860_CHIPCFG_VENDOR_AUDIO		0x80
+#define EM2860_CHIPCFG_I2S_VOLUME_CAPABLE	0x40
+#define EM2820_CHIPCFG_I2S_3_SAMPRATES		0x30
+#define EM2860_CHIPCFG_I2S_5_SAMPRATES		0x30
+#define EM2820_CHIPCFG_I2S_1_SAMPRATE		0x20
+#define EM2860_CHIPCFG_I2S_3_SAMPRATES		0x20
 #define EM28XX_CHIPCFG_AC97			0x10
 #define EM28XX_CHIPCFG_AUDIOMASK		0x30
 
@@ -245,6 +247,7 @@
 	CHIP_ID_EM2874 = 65,
 	CHIP_ID_EM2884 = 68,
 	CHIP_ID_EM28174 = 113,
+	CHIP_ID_EM28178 = 114,
 };
 
 /*
diff --git a/drivers/media/usb/em28xx/em28xx-v4l.h b/drivers/media/usb/em28xx/em28xx-v4l.h
new file mode 100644
index 0000000..bce4386
--- /dev/null
+++ b/drivers/media/usb/em28xx/em28xx-v4l.h
@@ -0,0 +1,20 @@
+/*
+   em28xx-video.c - driver for Empia EM2800/EM2820/2840 USB
+		    video capture devices
+
+   Copyright (C) 2013-2014 Mauro Carvalho Chehab <m.chehab@samsung.com>
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+ */
+
+
+int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
+int em28xx_stop_vbi_streaming(struct vb2_queue *vq);
+extern struct vb2_ops em28xx_vbi_qops;
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 39f39c5..db3d655 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -27,6 +27,7 @@
 #include <linux/init.h>
 
 #include "em28xx.h"
+#include "em28xx-v4l.h"
 
 static unsigned int vbibufs = 5;
 module_param(vbibufs, int, 0644);
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index dd19c9f..c3c9289 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -38,9 +38,11 @@
 #include <linux/slab.h>
 
 #include "em28xx.h"
+#include "em28xx-v4l.h"
 #include <media/v4l2-common.h>
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-event.h>
+#include <media/v4l2-clk.h>
 #include <media/msp3400.h>
 #include <media/tuner.h>
 
@@ -49,19 +51,23 @@
 		      "Mauro Carvalho Chehab <mchehab@infradead.org>, " \
 		      "Sascha Sommer <saschasommer@freenet.de>"
 
-#define DRIVER_DESC         "Empia em28xx based USB video device driver"
+static unsigned int isoc_debug;
+module_param(isoc_debug, int, 0644);
+MODULE_PARM_DESC(isoc_debug, "enable debug messages [isoc transfers]");
 
-#define EM28XX_VERSION "0.2.0"
+static unsigned int disable_vbi;
+module_param(disable_vbi, int, 0644);
+MODULE_PARM_DESC(disable_vbi, "disable vbi support");
+
+static int alt;
+module_param(alt, int, 0644);
+MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
 
 #define em28xx_videodbg(fmt, arg...) do {\
 	if (video_debug) \
 		printk(KERN_INFO "%s %s :"fmt, \
 			 dev->name, __func__ , ##arg); } while (0)
 
-static unsigned int isoc_debug;
-module_param(isoc_debug, int, 0644);
-MODULE_PARM_DESC(isoc_debug, "enable debug messages [isoc transfers]");
-
 #define em28xx_isocdbg(fmt, arg...) \
 do {\
 	if (isoc_debug) { \
@@ -71,7 +77,7 @@
   } while (0)
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_DESCRIPTION(DRIVER_DESC " - v4l2 interface");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(EM28XX_VERSION);
 
@@ -135,6 +141,257 @@
 	},
 };
 
+static int em28xx_vbi_supported(struct em28xx *dev)
+{
+	/* Modprobe option to manually disable */
+	if (disable_vbi == 1)
+		return 0;
+
+	if (dev->board.is_webcam)
+		return 0;
+
+	/* FIXME: check subdevices for VBI support */
+
+	if (dev->chip_id == CHIP_ID_EM2860 ||
+	    dev->chip_id == CHIP_ID_EM2883)
+		return 1;
+
+	/* Version of em28xx that does not support VBI */
+	return 0;
+}
+
+/*
+ * em28xx_wake_i2c()
+ * configure i2c attached devices
+ */
+static void em28xx_wake_i2c(struct em28xx *dev)
+{
+	v4l2_device_call_all(&dev->v4l2_dev, 0, core,  reset, 0);
+	v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing,
+			INPUT(dev->ctl_input)->vmux, 0, 0);
+	v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
+}
+
+static int em28xx_colorlevels_set_default(struct em28xx *dev)
+{
+	em28xx_write_reg(dev, EM28XX_R20_YGAIN, CONTRAST_DEFAULT);
+	em28xx_write_reg(dev, EM28XX_R21_YOFFSET, BRIGHTNESS_DEFAULT);
+	em28xx_write_reg(dev, EM28XX_R22_UVGAIN, SATURATION_DEFAULT);
+	em28xx_write_reg(dev, EM28XX_R23_UOFFSET, BLUE_BALANCE_DEFAULT);
+	em28xx_write_reg(dev, EM28XX_R24_VOFFSET, RED_BALANCE_DEFAULT);
+	em28xx_write_reg(dev, EM28XX_R25_SHARPNESS, SHARPNESS_DEFAULT);
+
+	em28xx_write_reg(dev, EM28XX_R14_GAMMA, 0x20);
+	em28xx_write_reg(dev, EM28XX_R15_RGAIN, 0x20);
+	em28xx_write_reg(dev, EM28XX_R16_GGAIN, 0x20);
+	em28xx_write_reg(dev, EM28XX_R17_BGAIN, 0x20);
+	em28xx_write_reg(dev, EM28XX_R18_ROFFSET, 0x00);
+	em28xx_write_reg(dev, EM28XX_R19_GOFFSET, 0x00);
+	return em28xx_write_reg(dev, EM28XX_R1A_BOFFSET, 0x00);
+}
+
+static int em28xx_set_outfmt(struct em28xx *dev)
+{
+	int ret;
+	u8 fmt, vinctrl;
+
+	fmt = dev->format->reg;
+	if (!dev->is_em25xx)
+		fmt |= 0x20;
+	/*
+	 * NOTE: it's not clear if this is really needed !
+	 * The datasheets say bit 5 is a reserved bit and devices seem to work
+	 * fine without it. But the Windows driver sets it for em2710/50+em28xx
+	 * devices and we've always been setting it, too.
+	 *
+	 * em2765 (em25xx, em276x/7x/8x) devices do NOT work with this bit set,
+	 * it's likely used for an additional (compressed ?) format there.
+	 */
+	ret = em28xx_write_reg(dev, EM28XX_R27_OUTFMT, fmt);
+	if (ret < 0)
+		return ret;
+
+	ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, dev->vinmode);
+	if (ret < 0)
+		return ret;
+
+	vinctrl = dev->vinctl;
+	if (em28xx_vbi_supported(dev) == 1) {
+		vinctrl |= EM28XX_VINCTRL_VBI_RAW;
+		em28xx_write_reg(dev, EM28XX_R34_VBI_START_H, 0x00);
+		em28xx_write_reg(dev, EM28XX_R36_VBI_WIDTH, dev->vbi_width/4);
+		em28xx_write_reg(dev, EM28XX_R37_VBI_HEIGHT, dev->vbi_height);
+		if (dev->norm & V4L2_STD_525_60) {
+			/* NTSC */
+			em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x09);
+		} else if (dev->norm & V4L2_STD_625_50) {
+			/* PAL */
+			em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x07);
+		}
+	}
+
+	return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, vinctrl);
+}
+
+static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
+				  u8 ymin, u8 ymax)
+{
+	em28xx_videodbg("em28xx Scale: (%d,%d)-(%d,%d)\n",
+			xmin, ymin, xmax, ymax);
+
+	em28xx_write_regs(dev, EM28XX_R28_XMIN, &xmin, 1);
+	em28xx_write_regs(dev, EM28XX_R29_XMAX, &xmax, 1);
+	em28xx_write_regs(dev, EM28XX_R2A_YMIN, &ymin, 1);
+	return em28xx_write_regs(dev, EM28XX_R2B_YMAX, &ymax, 1);
+}
+
+static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
+				   u16 width, u16 height)
+{
+	u8 cwidth = width >> 2;
+	u8 cheight = height >> 2;
+	u8 overflow = (height >> 9 & 0x02) | (width >> 10 & 0x01);
+	/* NOTE: size limit: 2047x1023 = 2MPix */
+
+	em28xx_videodbg("capture area set to (%d,%d): %dx%d\n",
+		       hstart, vstart,
+		       ((overflow & 2) << 9 | cwidth << 2),
+		       ((overflow & 1) << 10 | cheight << 2));
+
+	em28xx_write_regs(dev, EM28XX_R1C_HSTART, &hstart, 1);
+	em28xx_write_regs(dev, EM28XX_R1D_VSTART, &vstart, 1);
+	em28xx_write_regs(dev, EM28XX_R1E_CWIDTH, &cwidth, 1);
+	em28xx_write_regs(dev, EM28XX_R1F_CHEIGHT, &cheight, 1);
+	em28xx_write_regs(dev, EM28XX_R1B_OFLOW, &overflow, 1);
+
+	/* FIXME: function/meaning of these registers ? */
+	/* FIXME: align width+height to multiples of 4 ?! */
+	if (dev->is_em25xx) {
+		em28xx_write_reg(dev, 0x34, width >> 4);
+		em28xx_write_reg(dev, 0x35, height >> 4);
+	}
+}
+
+static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
+{
+	u8 mode;
+	/* the em2800 scaler only supports scaling down to 50% */
+
+	if (dev->board.is_em2800) {
+		mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00);
+	} else {
+		u8 buf[2];
+
+		buf[0] = h;
+		buf[1] = h >> 8;
+		em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2);
+
+		buf[0] = v;
+		buf[1] = v >> 8;
+		em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2);
+		/* it seems that both H and V scalers must be active
+		   to work correctly */
+		mode = (h || v) ? 0x30 : 0x00;
+	}
+	return em28xx_write_reg_bits(dev, EM28XX_R26_COMPR, mode, 0x30);
+}
+
+/* FIXME: this only function read values from dev */
+static int em28xx_resolution_set(struct em28xx *dev)
+{
+	int width, height;
+	width = norm_maxw(dev);
+	height = norm_maxh(dev);
+
+	/* Properly setup VBI */
+	dev->vbi_width = 720;
+	if (dev->norm & V4L2_STD_525_60)
+		dev->vbi_height = 12;
+	else
+		dev->vbi_height = 18;
+
+	em28xx_set_outfmt(dev);
+
+	em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
+
+	/* If we don't set the start position to 2 in VBI mode, we end up
+	   with line 20/21 being YUYV encoded instead of being in 8-bit
+	   greyscale.  The core of the issue is that line 21 (and line 23 for
+	   PAL WSS) are inside of active video region, and as a result they
+	   get the pixelformatting associated with that area.  So by cropping
+	   it out, we end up with the same format as the rest of the VBI
+	   region */
+	if (em28xx_vbi_supported(dev) == 1)
+		em28xx_capture_area_set(dev, 0, 2, width, height);
+	else
+		em28xx_capture_area_set(dev, 0, 0, width, height);
+
+	return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
+}
+
+/* Set USB alternate setting for analog video */
+static int em28xx_set_alternate(struct em28xx *dev)
+{
+	int errCode;
+	int i;
+	unsigned int min_pkt_size = dev->width * 2 + 4;
+
+	/* NOTE: for isoc transfers, only alt settings > 0 are allowed
+		 bulk transfers seem to work only with alt=0 ! */
+	dev->alt = 0;
+	if ((alt > 0) && (alt < dev->num_alt)) {
+		em28xx_videodbg("alternate forced to %d\n", dev->alt);
+		dev->alt = alt;
+		goto set_alt;
+	}
+	if (dev->analog_xfer_bulk)
+		goto set_alt;
+
+	/* When image size is bigger than a certain value,
+	   the frame size should be increased, otherwise, only
+	   green screen will be received.
+	 */
+	if (dev->width * 2 * dev->height > 720 * 240 * 2)
+		min_pkt_size *= 2;
+
+	for (i = 0; i < dev->num_alt; i++) {
+		/* stop when the selected alt setting offers enough bandwidth */
+		if (dev->alt_max_pkt_size_isoc[i] >= min_pkt_size) {
+			dev->alt = i;
+			break;
+		/* otherwise make sure that we end up with the maximum bandwidth
+		   because the min_pkt_size equation might be wrong...
+		*/
+		} else if (dev->alt_max_pkt_size_isoc[i] >
+			   dev->alt_max_pkt_size_isoc[dev->alt])
+			dev->alt = i;
+	}
+
+set_alt:
+	/* NOTE: for bulk transfers, we need to call usb_set_interface()
+	 * even if the previous settings were the same. Otherwise streaming
+	 * fails with all urbs having status = -EOVERFLOW ! */
+	if (dev->analog_xfer_bulk) {
+		dev->max_pkt_size = 512; /* USB 2.0 spec */
+		dev->packet_multiplier = EM28XX_BULK_PACKET_MULTIPLIER;
+	} else { /* isoc */
+		em28xx_videodbg("minimum isoc packet size: %u (alt=%d)\n",
+			       min_pkt_size, dev->alt);
+		dev->max_pkt_size =
+				  dev->alt_max_pkt_size_isoc[dev->alt];
+		dev->packet_multiplier = EM28XX_NUM_ISOC_PACKETS;
+	}
+	em28xx_videodbg("setting alternate %d with wMaxPacketSize=%u\n",
+		       dev->alt, dev->max_pkt_size);
+	errCode = usb_set_interface(dev->udev, dev->ifnum, dev->alt);
+	if (errCode < 0) {
+		em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
+			      dev->alt, errCode);
+		return errCode;
+	}
+	return 0;
+}
+
 /* ------------------------------------------------------------------
 	DMA and thread functions
    ------------------------------------------------------------------*/
@@ -763,7 +1020,7 @@
 	.wait_finish    = vb2_ops_wait_finish,
 };
 
-int em28xx_vb2_setup(struct em28xx *dev)
+static int em28xx_vb2_setup(struct em28xx *dev)
 {
 	int rc;
 	struct vb2_queue *q;
@@ -831,7 +1088,7 @@
 	em28xx_audio_analog_set(dev);
 }
 
-void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
+static void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
 {
 	struct em28xx *dev = priv;
 
@@ -890,7 +1147,7 @@
 	return (ret < 0) ? ret : 0;
 }
 
-const struct v4l2_ctrl_ops em28xx_ctrl_ops = {
+static const struct v4l2_ctrl_ops em28xx_ctrl_ops = {
 	.s_ctrl = em28xx_s_ctrl,
 };
 
@@ -1368,7 +1625,7 @@
 		reg->val = ret;
 	} else {
 		__le16 val = 0;
-		ret = em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
+		ret = dev->em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
 						   reg->reg, (char *)&val, 2);
 		if (ret < 0)
 			return ret;
@@ -1570,6 +1827,10 @@
 	case VFL_TYPE_VBI:
 		fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
 		break;
+	case VFL_TYPE_RADIO:
+		break;
+	default:
+		return -EINVAL;
 	}
 
 	em28xx_videodbg("open dev=%s type=%s users=%d\n",
@@ -1590,15 +1851,17 @@
 	fh->type = fh_type;
 	filp->private_data = fh;
 
-	if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
+	if (dev->users == 0) {
 		em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
-		em28xx_resolution_set(dev);
 
-		/* Needed, since GPIO might have disabled power of
-		   some i2c device
+		if (vdev->vfl_type != VFL_TYPE_RADIO)
+			em28xx_resolution_set(dev);
+
+		/*
+		 * Needed, since GPIO might have disabled power
+		 * of some i2c devices
 		 */
 		em28xx_wake_i2c(dev);
-
 	}
 
 	if (vdev->vfl_type == VFL_TYPE_RADIO) {
@@ -1615,40 +1878,59 @@
 }
 
 /*
- * em28xx_realease_resources()
+ * em28xx_v4l2_fini()
  * unregisters the v4l2,i2c and usb devices
  * called when the device gets disconected or at module unload
 */
-void em28xx_release_analog_resources(struct em28xx *dev)
+static int em28xx_v4l2_fini(struct em28xx *dev)
 {
+	if (dev->is_audio_only) {
+		/* Shouldn't initialize IR for this interface */
+		return 0;
+	}
 
-	/*FIXME: I2C IR should be disconnected */
+	if (!dev->has_video) {
+		/* This device does not support the v4l2 extension */
+		return 0;
+	}
+
+	em28xx_info("Closing video extension");
+
+	mutex_lock(&dev->lock);
+
+	v4l2_device_disconnect(&dev->v4l2_dev);
+
+	em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
 
 	if (dev->radio_dev) {
-		if (video_is_registered(dev->radio_dev))
-			video_unregister_device(dev->radio_dev);
-		else
-			video_device_release(dev->radio_dev);
-		dev->radio_dev = NULL;
+		em28xx_info("V4L2 device %s deregistered\n",
+			    video_device_node_name(dev->radio_dev));
+		video_unregister_device(dev->radio_dev);
 	}
 	if (dev->vbi_dev) {
 		em28xx_info("V4L2 device %s deregistered\n",
 			    video_device_node_name(dev->vbi_dev));
-		if (video_is_registered(dev->vbi_dev))
-			video_unregister_device(dev->vbi_dev);
-		else
-			video_device_release(dev->vbi_dev);
-		dev->vbi_dev = NULL;
+		video_unregister_device(dev->vbi_dev);
 	}
 	if (dev->vdev) {
 		em28xx_info("V4L2 device %s deregistered\n",
 			    video_device_node_name(dev->vdev));
-		if (video_is_registered(dev->vdev))
-			video_unregister_device(dev->vdev);
-		else
-			video_device_release(dev->vdev);
-		dev->vdev = NULL;
+		video_unregister_device(dev->vdev);
 	}
+
+	if (dev->clk) {
+		v4l2_clk_unregister_fixed(dev->clk);
+		dev->clk = NULL;
+	}
+
+	v4l2_ctrl_handler_free(&dev->ctrl_handler);
+	v4l2_device_unregister(&dev->v4l2_dev);
+
+	if (dev->users)
+		em28xx_warn("Device is open ! Memory deallocation is deferred on last close.\n");
+	mutex_unlock(&dev->lock);
+
+	return 0;
 }
 
 /*
@@ -1668,14 +1950,10 @@
 	mutex_lock(&dev->lock);
 
 	if (dev->users == 1) {
-		/* the device is already disconnect,
-		   free the remaining resources */
+		/* free the remaining resources if device is disconnected */
 		if (dev->disconnected) {
-			em28xx_release_resources(dev);
 			kfree(dev->alt_max_pkt_size_isoc);
-			mutex_unlock(&dev->lock);
-			kfree(dev);
-			return 0;
+			goto exit;
 		}
 
 		/* Save some power by putting tuner to sleep */
@@ -1694,11 +1972,29 @@
 		}
 	}
 
+exit:
 	dev->users--;
 	mutex_unlock(&dev->lock);
 	return 0;
 }
 
+/*
+ * em28xx_videodevice_release()
+ * called when the last user of the video device exits and frees the memeory
+ */
+static void em28xx_videodevice_release(struct video_device *vdev)
+{
+	struct em28xx *dev = video_get_drvdata(vdev);
+
+	video_device_release(vdev);
+	if (vdev == dev->vdev)
+		dev->vdev = NULL;
+	else if (vdev == dev->vbi_dev)
+		dev->vbi_dev = NULL;
+	else if (vdev == dev->radio_dev)
+		dev->radio_dev = NULL;
+}
+
 static const struct v4l2_file_operations em28xx_v4l_fops = {
 	.owner         = THIS_MODULE,
 	.open          = em28xx_v4l2_open,
@@ -1753,11 +2049,10 @@
 };
 
 static const struct video_device em28xx_video_template = {
-	.fops                       = &em28xx_v4l_fops,
-	.release                    = video_device_release_empty,
-	.ioctl_ops 		    = &video_ioctl_ops,
-
-	.tvnorms                    = V4L2_STD_ALL,
+	.fops		= &em28xx_v4l_fops,
+	.ioctl_ops	= &video_ioctl_ops,
+	.release	= em28xx_videodevice_release,
+	.tvnorms	= V4L2_STD_ALL,
 };
 
 static const struct v4l2_file_operations radio_fops = {
@@ -1783,15 +2078,31 @@
 };
 
 static struct video_device em28xx_radio_template = {
-	.name                 = "em28xx-radio",
-	.fops                 = &radio_fops,
-	.ioctl_ops 	      = &radio_ioctl_ops,
+	.fops		= &radio_fops,
+	.ioctl_ops	= &radio_ioctl_ops,
+	.release	= em28xx_videodevice_release,
+};
+
+/* I2C possible address to saa7115, tvp5150, msp3400, tvaudio */
+static unsigned short saa711x_addrs[] = {
+	0x4a >> 1, 0x48 >> 1,   /* SAA7111, SAA7111A and SAA7113 */
+	0x42 >> 1, 0x40 >> 1,   /* SAA7114, SAA7115 and SAA7118 */
+	I2C_CLIENT_END };
+
+static unsigned short tvp5150_addrs[] = {
+	0xb8 >> 1,
+	0xba >> 1,
+	I2C_CLIENT_END
+};
+
+static unsigned short msp3400_addrs[] = {
+	0x80 >> 1,
+	0x88 >> 1,
+	I2C_CLIENT_END
 };
 
 /******************************** usb interface ******************************/
 
-
-
 static struct video_device *em28xx_vdev_init(struct em28xx *dev,
 					const struct video_device *template,
 					const char *type_name)
@@ -1817,14 +2128,198 @@
 	return vfd;
 }
 
-int em28xx_register_analog_devices(struct em28xx *dev)
+static void em28xx_tuner_setup(struct em28xx *dev)
+{
+	struct tuner_setup           tun_setup;
+	struct v4l2_frequency        f;
+
+	if (dev->tuner_type == TUNER_ABSENT)
+		return;
+
+	memset(&tun_setup, 0, sizeof(tun_setup));
+
+	tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
+	tun_setup.tuner_callback = em28xx_tuner_callback;
+
+	if (dev->board.radio.type) {
+		tun_setup.type = dev->board.radio.type;
+		tun_setup.addr = dev->board.radio_addr;
+
+		v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
+	}
+
+	if ((dev->tuner_type != TUNER_ABSENT) && (dev->tuner_type)) {
+		tun_setup.type   = dev->tuner_type;
+		tun_setup.addr   = dev->tuner_addr;
+
+		v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
+	}
+
+	if (dev->tda9887_conf) {
+		struct v4l2_priv_tun_config tda9887_cfg;
+
+		tda9887_cfg.tuner = TUNER_TDA9887;
+		tda9887_cfg.priv = &dev->tda9887_conf;
+
+		v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &tda9887_cfg);
+	}
+
+	if (dev->tuner_type == TUNER_XC2028) {
+		struct v4l2_priv_tun_config  xc2028_cfg;
+		struct xc2028_ctrl           ctl;
+
+		memset(&xc2028_cfg, 0, sizeof(xc2028_cfg));
+		memset(&ctl, 0, sizeof(ctl));
+
+		em28xx_setup_xc3028(dev, &ctl);
+
+		xc2028_cfg.tuner = TUNER_XC2028;
+		xc2028_cfg.priv  = &ctl;
+
+		v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &xc2028_cfg);
+	}
+
+	/* configure tuner */
+	f.tuner = 0;
+	f.type = V4L2_TUNER_ANALOG_TV;
+	f.frequency = 9076;     /* just a magic number */
+	dev->ctl_freq = f.frequency;
+	v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
+}
+
+static int em28xx_v4l2_init(struct em28xx *dev)
 {
 	u8 val;
 	int ret;
 	unsigned int maxw;
+	struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
 
-	printk(KERN_INFO "%s: v4l2 driver version %s\n",
-		dev->name, EM28XX_VERSION);
+	if (dev->is_audio_only) {
+		/* Shouldn't initialize IR for this interface */
+		return 0;
+	}
+
+	if (!dev->has_video) {
+		/* This device does not support the v4l2 extension */
+		return 0;
+	}
+
+	em28xx_info("Registering V4L2 extension\n");
+
+	mutex_lock(&dev->lock);
+
+	ret = v4l2_device_register(&dev->udev->dev, &dev->v4l2_dev);
+	if (ret < 0) {
+		em28xx_errdev("Call to v4l2_device_register() failed!\n");
+		goto err;
+	}
+
+	v4l2_ctrl_handler_init(hdl, 8);
+	dev->v4l2_dev.ctrl_handler = hdl;
+
+	/*
+	 * Default format, used for tvp5150 or saa711x output formats
+	 */
+	dev->vinmode = 0x10;
+	dev->vinctl  = EM28XX_VINCTRL_INTERLACED |
+		       EM28XX_VINCTRL_CCIR656_ENABLE;
+
+	/* request some modules */
+
+	if (dev->board.has_msp34xx)
+		v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+			"msp3400", 0, msp3400_addrs);
+
+	if (dev->board.decoder == EM28XX_SAA711X)
+		v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+			"saa7115_auto", 0, saa711x_addrs);
+
+	if (dev->board.decoder == EM28XX_TVP5150)
+		v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+			"tvp5150", 0, tvp5150_addrs);
+
+	if (dev->board.adecoder == EM28XX_TVAUDIO)
+		v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+			"tvaudio", dev->board.tvaudio_addr, NULL);
+
+	/* Initialize tuner and camera */
+
+	if (dev->board.tuner_type != TUNER_ABSENT) {
+		int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
+
+		if (dev->board.radio.type)
+			v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+				"tuner", dev->board.radio_addr, NULL);
+
+		if (has_demod)
+			v4l2_i2c_new_subdev(&dev->v4l2_dev,
+				&dev->i2c_adap[dev->def_i2c_bus], "tuner",
+				0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
+		if (dev->tuner_addr == 0) {
+			enum v4l2_i2c_tuner_type type =
+				has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
+			struct v4l2_subdev *sd;
+
+			sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
+				&dev->i2c_adap[dev->def_i2c_bus], "tuner",
+				0, v4l2_i2c_tuner_addrs(type));
+
+			if (sd)
+				dev->tuner_addr = v4l2_i2c_subdev_addr(sd);
+		} else {
+			v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+				"tuner", dev->tuner_addr, NULL);
+		}
+	}
+
+	em28xx_tuner_setup(dev);
+	em28xx_init_camera(dev);
+
+	/* Configure audio */
+	ret = em28xx_audio_setup(dev);
+	if (ret < 0) {
+		em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
+			__func__, ret);
+		goto unregister_dev;
+	}
+	if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
+		v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
+			V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+		v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
+			V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f);
+	} else {
+		/* install the em28xx notify callback */
+		v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_MUTE),
+				em28xx_ctrl_notify, dev);
+		v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_VOLUME),
+				em28xx_ctrl_notify, dev);
+	}
+
+	/* wake i2c devices */
+	em28xx_wake_i2c(dev);
+
+	/* init video dma queues */
+	INIT_LIST_HEAD(&dev->vidq.active);
+	INIT_LIST_HEAD(&dev->vbiq.active);
+
+	if (dev->board.has_msp34xx) {
+		/* Send a reset to other chips via gpio */
+		ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7);
+		if (ret < 0) {
+			em28xx_errdev("%s: em28xx_write_reg - msp34xx(1) failed! error [%d]\n",
+				      __func__, ret);
+			goto unregister_dev;
+		}
+		msleep(3);
+
+		ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff);
+		if (ret < 0) {
+			em28xx_errdev("%s: em28xx_write_reg - msp34xx(2) failed! error [%d]\n",
+				      __func__, ret);
+			goto unregister_dev;
+		}
+		msleep(3);
+	}
 
 	/* set default norm */
 	dev->norm = V4L2_STD_PAL;
@@ -1888,14 +2383,16 @@
 	/* Reset image controls */
 	em28xx_colorlevels_set_default(dev);
 	v4l2_ctrl_handler_setup(&dev->ctrl_handler);
-	if (dev->ctrl_handler.error)
-		return dev->ctrl_handler.error;
+	ret = dev->ctrl_handler.error;
+	if (ret)
+		goto unregister_dev;
 
 	/* allocate and fill video video_device struct */
 	dev->vdev = em28xx_vdev_init(dev, &em28xx_video_template, "video");
 	if (!dev->vdev) {
 		em28xx_errdev("cannot allocate video_device.\n");
-		return -ENODEV;
+		ret = -ENODEV;
+		goto unregister_dev;
 	}
 	dev->vdev->queue = &dev->vb_vidq;
 	dev->vdev->queue->lock = &dev->vb_queue_lock;
@@ -1925,7 +2422,7 @@
 	if (ret) {
 		em28xx_errdev("unable to register video device (error=%i).\n",
 			      ret);
-		return ret;
+		goto unregister_dev;
 	}
 
 	/* Allocate and fill vbi video_device struct */
@@ -1954,7 +2451,7 @@
 					    vbi_nr[dev->devno]);
 		if (ret < 0) {
 			em28xx_errdev("unable to register vbi device\n");
-			return ret;
+			goto unregister_dev;
 		}
 	}
 
@@ -1963,13 +2460,14 @@
 						  "radio");
 		if (!dev->radio_dev) {
 			em28xx_errdev("cannot allocate video_device.\n");
-			return -ENODEV;
+			ret = -ENODEV;
+			goto unregister_dev;
 		}
 		ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
 					    radio_nr[dev->devno]);
 		if (ret < 0) {
 			em28xx_errdev("can't register radio device\n");
-			return ret;
+			goto unregister_dev;
 		}
 		em28xx_info("Registered radio device as %s\n",
 			    video_device_node_name(dev->radio_dev));
@@ -1982,5 +2480,41 @@
 		em28xx_info("V4L2 VBI device registered as %s\n",
 			    video_device_node_name(dev->vbi_dev));
 
+	/* Save some power by putting tuner to sleep */
+	v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
+
+	/* initialize videobuf2 stuff */
+	em28xx_vb2_setup(dev);
+
+	em28xx_info("V4L2 extension successfully initialized\n");
+
+	mutex_unlock(&dev->lock);
 	return 0;
+
+unregister_dev:
+	v4l2_ctrl_handler_free(&dev->ctrl_handler);
+	v4l2_device_unregister(&dev->v4l2_dev);
+err:
+	mutex_unlock(&dev->lock);
+	return ret;
 }
+
+static struct em28xx_ops v4l2_ops = {
+	.id   = EM28XX_V4L2,
+	.name = "Em28xx v4l2 Extension",
+	.init = em28xx_v4l2_init,
+	.fini = em28xx_v4l2_fini,
+};
+
+static int __init em28xx_video_register(void)
+{
+	return em28xx_register_extension(&v4l2_ops);
+}
+
+static void __exit em28xx_video_unregister(void)
+{
+	em28xx_unregister_extension(&v4l2_ops);
+}
+
+module_init(em28xx_video_register);
+module_exit(em28xx_video_unregister);
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index f8726ad..32d8a4b 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -26,6 +26,9 @@
 #ifndef _EM28XX_H
 #define _EM28XX_H
 
+#define EM28XX_VERSION "0.2.1"
+#define DRIVER_DESC    "Empia em28xx device driver"
+
 #include <linux/workqueue.h>
 #include <linux/i2c.h>
 #include <linux/mutex.h>
@@ -132,6 +135,8 @@
 #define EM2884_BOARD_C3TECH_DIGITAL_DUO		  88
 #define EM2874_BOARD_DELOCK_61959		  89
 #define EM2874_BOARD_KWORLD_UB435Q_V2		  90
+#define EM2765_BOARD_SPEEDLINK_VAD_LAPLACE	  91
+#define EM28178_BOARD_PCTV_461E                   92
 
 /* Limits minimum and default number of buffers */
 #define EM28XX_MIN_BUF 4
@@ -178,8 +183,27 @@
 
 #define EM28XX_INTERLACED_DEFAULT 1
 
-/* time in msecs to wait for i2c writes to finish */
-#define EM2800_I2C_XFER_TIMEOUT		20
+/*
+ * Time in msecs to wait for i2c xfers to finish.
+ * 35ms is the maximum time a SMBUS device could wait when
+ * clock stretching is used. As the transfer itself will take
+ * some time to happen, set it to 35 ms.
+ *
+ * Ok, I2C doesn't specify any limit. So, eventually, we may need
+ * to increase this timeout.
+ *
+ * FIXME: this assumes that an I2C message is not longer than 1ms.
+ * This is actually dependent on the I2C bus speed, although most
+ * devices use a 100kHz clock. So, this assumtion is true most of
+ * the time.
+ */
+#define EM28XX_I2C_XFER_TIMEOUT		36
+
+/* time in msecs to wait for AC97 xfers to finish */
+#define EM28XX_AC97_XFER_TIMEOUT	100
+
+/* max. number of button state polling addresses */
+#define EM28XX_NUM_BUTTON_ADDRESSES_MAX		5
 
 enum em28xx_mode {
 	EM28XX_SUSPEND,
@@ -287,8 +311,7 @@
 
 	unsigned int has_audio:1;
 
-	unsigned int i2s_3rates:1;
-	unsigned int i2s_5rates:1;
+	u8 i2s_samplerates;
 };
 
 /* em28xx has two audio inputs: tuner and line in.
@@ -374,6 +397,33 @@
 	EM28XX_TVAUDIO,
 };
 
+enum em28xx_led_role {
+	EM28XX_LED_ANALOG_CAPTURING = 0,
+	EM28XX_LED_ILLUMINATION,
+	EM28XX_NUM_LED_ROLES, /* must be the last */
+};
+
+struct em28xx_led {
+	enum em28xx_led_role role;
+	u8 gpio_reg;
+	u8 gpio_mask;
+	bool inverted;
+};
+
+enum em28xx_button_role {
+	EM28XX_BUTTON_SNAPSHOT = 0,
+	EM28XX_BUTTON_ILLUMINATION,
+	EM28XX_NUM_BUTTON_ROLES, /* must be the last */
+};
+
+struct em28xx_button {
+	enum em28xx_button_role role;
+	u8 reg_r;
+	u8 reg_clearing;
+	u8 mask;
+	bool inverted;
+};
+
 struct em28xx_board {
 	char *name;
 	int vchannels;
@@ -395,7 +445,6 @@
 	unsigned int mts_firmware:1;
 	unsigned int max_range_640_480:1;
 	unsigned int has_dvb:1;
-	unsigned int has_snapshot_button:1;
 	unsigned int is_webcam:1;
 	unsigned int valid:1;
 	unsigned int has_ir_i2c:1;
@@ -410,6 +459,12 @@
 	struct em28xx_input       input[MAX_EM28XX_INPUT];
 	struct em28xx_input	  radio;
 	char			  *ir_codes;
+
+	/* LEDs that need to be controlled explicitly */
+	struct em28xx_led	  *leds;
+
+	/* Buttons */
+	struct em28xx_button	  *buttons;
 };
 
 struct em28xx_eeprom {
@@ -426,15 +481,13 @@
 	u8 string_idx_table;
 };
 
-#define EM28XX_AUDIO_BUFS 5
-#define EM28XX_NUM_AUDIO_PACKETS 64
-#define EM28XX_AUDIO_MAX_PACKET_SIZE 196 /* static value */
 #define EM28XX_CAPTURE_STREAM_EN 1
 
 /* em28xx extensions */
 #define EM28XX_AUDIO   0x10
 #define EM28XX_DVB     0x20
 #define EM28XX_RC      0x30
+#define EM28XX_V4L2    0x40
 
 /* em28xx resource types (used for res_get/res_lock etc */
 #define EM28XX_RESOURCE_VIDEO 0x01
@@ -442,8 +495,9 @@
 
 struct em28xx_audio {
 	char name[50];
-	char *transfer_buffer[EM28XX_AUDIO_BUFS];
-	struct urb *urb[EM28XX_AUDIO_BUFS];
+	unsigned num_urb;
+	char **transfer_buffer;
+	struct urb **urb;
 	struct usb_device *udev;
 	unsigned int capture_transfer_done;
 	struct snd_pcm_substream   *capture_pcm_substream;
@@ -451,6 +505,8 @@
 	unsigned int hwptr_done_capture;
 	struct snd_card            *sndcard;
 
+	size_t period;
+
 	int users;
 	spinlock_t slock;
 };
@@ -485,11 +541,13 @@
 	int model;		/* index in the device_data struct */
 	int devno;		/* marks the number of this device */
 	enum em28xx_chip_id chip_id;
+
 	unsigned int is_em25xx:1;	/* em25xx/em276x/7x/8x family bridge */
-
 	unsigned char disconnected:1;	/* device has been diconnected */
-
-	int audio_ifnum;
+	unsigned int has_video:1;
+	unsigned int has_audio_class:1;
+	unsigned int has_alsa_audio:1;
+	unsigned int is_audio_only:1;
 
 	struct v4l2_device v4l2_dev;
 	struct v4l2_ctrl_handler ctrl_handler;
@@ -507,10 +565,6 @@
 	/* Vinmode/Vinctl used at the driver */
 	int vinmode, vinctl;
 
-	unsigned int has_audio_class:1;
-	unsigned int has_alsa_audio:1;
-	unsigned int is_audio_only:1;
-
 	/* Controls audio streaming */
 	struct work_struct wq_trigger;	/* Trigger to start/stop audio for alsa module */
 	atomic_t       stream_started;	/* stream should be running if true */
@@ -608,6 +662,7 @@
 
 	/* usb transfer */
 	struct usb_device *udev;	/* the usb device */
+	u8 ifnum;		/* number of the assigned usb interface */
 	u8 analog_ep_isoc;	/* address of isoc endpoint for analog */
 	u8 analog_ep_bulk;	/* address of bulk endpoint for analog */
 	u8 dvb_ep_isoc;		/* address of isoc endpoint for DVB */
@@ -639,10 +694,15 @@
 
 	enum em28xx_mode mode;
 
-	/* Snapshot button */
+	/* Button state polling */
+	struct delayed_work buttons_query_work;
+	u8 button_polling_addresses[EM28XX_NUM_BUTTON_ADDRESSES_MAX];
+	u8 button_polling_last_values[EM28XX_NUM_BUTTON_ADDRESSES_MAX];
+	u8 num_button_polling_addresses;
+	u16 button_polling_interval; /* [ms] */
+	/* Snapshot button input device */
 	char snapshot_button_path[30];	/* path of the input dev */
 	struct input_dev *sbutton_input_dev;
-	struct delayed_work sbutton_query_work;
 
 	struct em28xx_dvb *dvb;
 };
@@ -672,6 +732,7 @@
 int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val);
 int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
 				 u8 bitmask);
+int em28xx_toggle_reg_bits(struct em28xx *dev, u16 reg, u8 bitmask);
 
 int em28xx_read_ac97(struct em28xx *dev, u8 reg);
 int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val);
@@ -679,12 +740,9 @@
 int em28xx_audio_analog_set(struct em28xx *dev);
 int em28xx_audio_setup(struct em28xx *dev);
 
-int em28xx_colorlevels_set_default(struct em28xx *dev);
+const struct em28xx_led *em28xx_find_led(struct em28xx *dev,
+					 enum em28xx_led_role role);
 int em28xx_capture_start(struct em28xx *dev, int start);
-int em28xx_vbi_supported(struct em28xx *dev);
-int em28xx_set_outfmt(struct em28xx *dev);
-int em28xx_resolution_set(struct em28xx *dev);
-int em28xx_set_alternate(struct em28xx *dev);
 int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
 		      int num_bufs, int max_pkt_size, int packet_multiplier);
 int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
@@ -696,30 +754,18 @@
 void em28xx_stop_urbs(struct em28xx *dev);
 int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode);
 int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio);
-void em28xx_wake_i2c(struct em28xx *dev);
 int em28xx_register_extension(struct em28xx_ops *dev);
 void em28xx_unregister_extension(struct em28xx_ops *dev);
 void em28xx_init_extension(struct em28xx *dev);
 void em28xx_close_extension(struct em28xx *dev);
 
-/* Provided by em28xx-video.c */
-int em28xx_vb2_setup(struct em28xx *dev);
-int em28xx_register_analog_devices(struct em28xx *dev);
-void em28xx_release_analog_resources(struct em28xx *dev);
-void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv);
-int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
-int em28xx_stop_vbi_streaming(struct vb2_queue *vq);
-extern const struct v4l2_ctrl_ops em28xx_ctrl_ops;
-
 /* Provided by em28xx-cards.c */
 extern struct em28xx_board em28xx_boards[];
 extern struct usb_device_id em28xx_id_table[];
 int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
+void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl);
 void em28xx_release_resources(struct em28xx *dev);
 
-/* Provided by em28xx-vbi.c */
-extern struct vb2_ops em28xx_vbi_qops;
-
 /* Provided by em28xx-camera.c */
 int em28xx_detect_sensor(struct em28xx *dev);
 int em28xx_init_camera(struct em28xx *dev);
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 2f0c89c..c563896 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -198,7 +198,6 @@
 	hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
 	v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n",
 		 print_buf);
-	kfree(print_buf);
 #endif
 
 	msleep(100);
@@ -214,6 +213,9 @@
 	retval = ret != 8;
 unlock:
 	mutex_unlock(&dev->usbc_mutex);
+#ifdef HDPVR_DEBUG
+	kfree(print_buf);
+#endif
 	return retval;
 }
 
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 78c9bc8..abf365a 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -1078,7 +1078,6 @@
 	/* register webcam snapshot button input device */
 	pdev->button_dev = input_allocate_device();
 	if (!pdev->button_dev) {
-		PWC_ERROR("Err, insufficient memory for webcam snapshot button device.");
 		rc = -ENOMEM;
 		goto err_video_unreg;
 	}
diff --git a/drivers/media/usb/sn9c102/Kconfig b/drivers/media/usb/sn9c102/Kconfig
deleted file mode 100644
index 6ebaf29..0000000
--- a/drivers/media/usb/sn9c102/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-config USB_SN9C102
-	tristate "USB SN9C1xx PC Camera Controller support (DEPRECATED)"
-	depends on VIDEO_V4L2
-	---help---
-	  This driver is DEPRECATED please use the gspca sonixb and
-	  sonixj modules instead.
-
-	  Say Y here if you want support for cameras based on SONiX SN9C101,
-	  SN9C102, SN9C103, SN9C105 and SN9C120 PC Camera Controllers.
-
-	  See <file:Documentation/video4linux/sn9c102.txt> for more info.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called sn9c102.
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 8c05565..2189bfb 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -83,14 +83,3 @@
 	#depends on HAS_DMA
 	select VIDEOBUF2_CORE
 	select VIDEOBUF2_MEMOPS
-
-config VIDEO_V4L2_INT_DEVICE
-	tristate "V4L2 int device (DEPRECATED)"
-	depends on VIDEO_V4L2
-	---help---
-	  An early framework for a hardware-independent interface for
-	  image sensors and bridges etc. Currently used by omap24xxcam and
-	  tcm825x drivers that should be converted to V4L2 subdev.
-
-	  Do not use for new developments.
-
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 1a85eee..c6ae7ba 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -15,7 +15,6 @@
 endif
 
 obj-$(CONFIG_VIDEO_V4L2) += videodev.o
-obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
 obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
 obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o
 
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index fb46790..6ff002b 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -745,6 +745,11 @@
 	case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS:		return "VPX Deblocking Effect Control";
 	case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD:	return "VPX Golden Frame Refresh Period";
 	case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:		return "VPX Golden Frame Indicator";
+	case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP:			return "VPX Minimum QP Value";
+	case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP:			return "VPX Maximum QP Value";
+	case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP:		return "VPX I-Frame QP Value";
+	case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP:		return "VPX P-Frame QP Value";
+	case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:			return "VPX Profile";
 
 	/* CAMERA controls */
 	/* Keep the order of the 'case's the same as in videodev2.h! */
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index b5aaaac..0a30dbf 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -872,8 +872,8 @@
 
 	/* Should not happen since we thought this minor was free */
 	WARN_ON(video_device[vdev->minor] != NULL);
-	video_device[vdev->minor] = vdev;
 	vdev->index = get_index(vdev);
+	video_device[vdev->minor] = vdev;
 	mutex_unlock(&videodev_lock);
 
 	if (vdev->ioctl_ops)
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index ee52b9f4..f7902fe 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -515,6 +515,7 @@
 		aspect.denominator = 9;
 	}
 	image_width = ((image_height * aspect.numerator) / aspect.denominator);
+	image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
 
 	/* Horizontal */
 	if (default_gtf)
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 68e6b5e..707aef7 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -28,6 +28,9 @@
 #include <media/v4l2-device.h>
 #include <media/videobuf2-core.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/v4l2.h>
+
 /* Zero out the end of the struct pointed to by p.  Everything after, but
  * not including, the specified field is cleared. */
 #define CLEAR_AFTER_FIELD(p, field) \
@@ -2338,6 +2341,12 @@
 	err = func(file, cmd, parg);
 	if (err == -ENOIOCTLCMD)
 		err = -ENOTTY;
+	if (err == 0) {
+		if (cmd == VIDIOC_DQBUF)
+			trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
+		else if (cmd == VIDIOC_QBUF)
+			trace_v4l2_qbuf(video_devdata(file)->minor, parg);
+	}
 
 	if (has_array_args) {
 		*kernel_ptr = user_ptr;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 73035ee..178ce96 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -558,6 +558,8 @@
 
 	if (m2m_ctx->m2m_dev->m2m_ops->unlock)
 		m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
+	else if (m2m_ctx->q_lock)
+		mutex_unlock(m2m_ctx->q_lock);
 
 	if (list_empty(&src_q->done_list))
 		poll_wait(file, &src_q->done_wq, wait);
@@ -566,6 +568,8 @@
 
 	if (m2m_ctx->m2m_dev->m2m_ops->lock)
 		m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
+	else if (m2m_ctx->q_lock)
+		mutex_lock(m2m_ctx->q_lock);
 
 	spin_lock_irqsave(&src_q->done_lock, flags);
 	if (!list_empty(&src_q->done_list))
@@ -693,6 +697,13 @@
 
 	if (ret)
 		goto err;
+	/*
+	 * If both queues use same mutex assign it as the common buffer
+	 * queues lock to the m2m context. This lock is used in the
+	 * v4l2_m2m_ioctl_* helpers.
+	 */
+	if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
+		m2m_ctx->q_lock = out_q_ctx->q.lock;
 
 	return m2m_ctx;
 err:
@@ -740,3 +751,118 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
 
+/* Videobuf2 ioctl helpers */
+
+int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
+				struct v4l2_requestbuffers *rb)
+{
+	struct v4l2_fh *fh = file->private_data;
+
+	return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
+
+int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
+				struct v4l2_create_buffers *create)
+{
+	struct v4l2_fh *fh = file->private_data;
+
+	return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
+
+int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
+				struct v4l2_buffer *buf)
+{
+	struct v4l2_fh *fh = file->private_data;
+
+	return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
+
+int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
+				struct v4l2_buffer *buf)
+{
+	struct v4l2_fh *fh = file->private_data;
+
+	return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
+
+int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
+				struct v4l2_buffer *buf)
+{
+	struct v4l2_fh *fh = file->private_data;
+
+	return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
+
+int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
+				struct v4l2_exportbuffer *eb)
+{
+	struct v4l2_fh *fh = file->private_data;
+
+	return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
+
+int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
+				enum v4l2_buf_type type)
+{
+	struct v4l2_fh *fh = file->private_data;
+
+	return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
+
+int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
+				enum v4l2_buf_type type)
+{
+	struct v4l2_fh *fh = file->private_data;
+
+	return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
+
+/*
+ * v4l2_file_operations helpers. It is assumed here same lock is used
+ * for the output and the capture buffer queue.
+ */
+
+int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct v4l2_fh *fh = file->private_data;
+	struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+	int ret;
+
+	if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock))
+		return -ERESTARTSYS;
+
+	ret = v4l2_m2m_mmap(file, m2m_ctx, vma);
+
+	if (m2m_ctx->q_lock)
+		mutex_unlock(m2m_ctx->q_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
+
+unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
+{
+	struct v4l2_fh *fh = file->private_data;
+	struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+	unsigned int ret;
+
+	if (m2m_ctx->q_lock)
+		mutex_lock(m2m_ctx->q_lock);
+
+	ret = v4l2_m2m_poll(file, m2m_ctx, wait);
+
+	if (m2m_ctx->q_lock)
+		mutex_unlock(m2m_ctx->q_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
+
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
index a6478dc..42e3e8a 100644
--- a/drivers/media/v4l2-core/v4l2-of.c
+++ b/drivers/media/v4l2-core/v4l2-of.c
@@ -121,9 +121,11 @@
  * the bus as serial CSI-2 and clock-noncontinuous isn't set, we set the
  * V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag.
  * The caller should hold a reference to @node.
+ *
+ * Return: 0.
  */
-void v4l2_of_parse_endpoint(const struct device_node *node,
-			    struct v4l2_of_endpoint *endpoint)
+int v4l2_of_parse_endpoint(const struct device_node *node,
+			   struct v4l2_of_endpoint *endpoint)
 {
 	struct device_node *port_node = of_get_parent(node);
 
@@ -146,6 +148,8 @@
 		v4l2_of_parse_parallel_bus(node, endpoint);
 
 	of_node_put(port_node);
+
+	return 0;
 }
 EXPORT_SYMBOL(v4l2_of_parse_endpoint);
 
@@ -262,6 +266,6 @@
 	np = of_parse_phandle(node, "remote-endpoint", 0);
 	if (!np)
 		return NULL;
-	return of_get_parent(np);
+	return of_get_next_parent(np);
 }
 EXPORT_SYMBOL(v4l2_of_get_remote_port);
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 65411adc..7e6b209 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -66,14 +66,11 @@
 static void videobuf_vm_open(struct vm_area_struct *vma)
 {
 	struct videobuf_mapping *map = vma->vm_private_data;
-	struct videobuf_queue *q = map->q;
 
-	dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+	dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
 		map, map->count, vma->vm_start, vma->vm_end);
 
-	videobuf_queue_lock(q);
 	map->count++;
-	videobuf_queue_unlock(q);
 }
 
 static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -85,11 +82,12 @@
 	dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
 		map, map->count, vma->vm_start, vma->vm_end);
 
-	videobuf_queue_lock(q);
-	if (!--map->count) {
+	map->count--;
+	if (0 == map->count) {
 		struct videobuf_dma_contig_memory *mem;
 
 		dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
+		videobuf_queue_lock(q);
 
 		/* We need first to cancel streams, before unmapping */
 		if (q->streaming)
@@ -128,8 +126,8 @@
 
 		kfree(map);
 
+		videobuf_queue_unlock(q);
 	}
-	videobuf_queue_unlock(q);
 }
 
 static const struct vm_operations_struct videobuf_vm_ops = {
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 9db674c..828e7c1 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -338,14 +338,11 @@
 static void videobuf_vm_open(struct vm_area_struct *vma)
 {
 	struct videobuf_mapping *map = vma->vm_private_data;
-	struct videobuf_queue *q = map->q;
 
 	dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
 		map->count, vma->vm_start, vma->vm_end);
 
-	videobuf_queue_lock(q);
 	map->count++;
-	videobuf_queue_unlock(q);
 }
 
 static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -358,9 +355,10 @@
 	dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
 		map->count, vma->vm_start, vma->vm_end);
 
-	videobuf_queue_lock(q);
-	if (!--map->count) {
+	map->count--;
+	if (0 == map->count) {
 		dprintk(1, "munmap %p q=%p\n", map, q);
+		videobuf_queue_lock(q);
 		for (i = 0; i < VIDEO_MAX_FRAME; i++) {
 			if (NULL == q->bufs[i])
 				continue;
@@ -376,9 +374,9 @@
 			q->bufs[i]->baddr = 0;
 			q->ops->buf_release(q, q->bufs[i]);
 		}
+		videobuf_queue_unlock(q);
 		kfree(map);
 	}
-	videobuf_queue_unlock(q);
 	return;
 }
 
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index 1365c65..2ff7fcc 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -54,14 +54,11 @@
 static void videobuf_vm_open(struct vm_area_struct *vma)
 {
 	struct videobuf_mapping *map = vma->vm_private_data;
-	struct videobuf_queue *q = map->q;
 
 	dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
 		map->count, vma->vm_start, vma->vm_end);
 
-	videobuf_queue_lock(q);
 	map->count++;
-	videobuf_queue_unlock(q);
 }
 
 static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -73,11 +70,12 @@
 	dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
 		map->count, vma->vm_start, vma->vm_end);
 
-	videobuf_queue_lock(q);
-	if (!--map->count) {
+	map->count--;
+	if (0 == map->count) {
 		struct videobuf_vmalloc_memory *mem;
 
 		dprintk(1, "munmap %p q=%p\n", map, q);
+		videobuf_queue_lock(q);
 
 		/* We need first to cancel streams, before unmapping */
 		if (q->streaming)
@@ -116,8 +114,8 @@
 
 		kfree(map);
 
+		videobuf_queue_unlock(q);
 	}
-	videobuf_queue_unlock(q);
 
 	return;
 }
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 0edc165..a127925 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -298,10 +298,28 @@
  * related information, if no buffers are left return the queue to an
  * uninitialized state. Might be called even if the queue has already been freed.
  */
-static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
+static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
 {
 	unsigned int buffer;
 
+	/*
+	 * Sanity check: when preparing a buffer the queue lock is released for
+	 * a short while (see __buf_prepare for the details), which would allow
+	 * a race with a reqbufs which can call this function. Removing the
+	 * buffers from underneath __buf_prepare is obviously a bad idea, so we
+	 * check if any of the buffers is in the state PREPARING, and if so we
+	 * just return -EAGAIN.
+	 */
+	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+	     ++buffer) {
+		if (q->bufs[buffer] == NULL)
+			continue;
+		if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
+			dprintk(1, "reqbufs: preparing buffers, cannot free\n");
+			return -EAGAIN;
+		}
+	}
+
 	/* Call driver-provided cleanup function for each buffer, if provided */
 	if (q->ops->buf_cleanup) {
 		for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
@@ -326,6 +344,7 @@
 	if (!q->num_buffers)
 		q->memory = 0;
 	INIT_LIST_HEAD(&q->queued_list);
+	return 0;
 }
 
 /**
@@ -481,6 +500,7 @@
 	case VB2_BUF_STATE_PREPARED:
 		b->flags |= V4L2_BUF_FLAG_PREPARED;
 		break;
+	case VB2_BUF_STATE_PREPARING:
 	case VB2_BUF_STATE_DEQUEUED:
 		/* nothing */
 		break;
@@ -657,7 +677,9 @@
 			return -EBUSY;
 		}
 
-		__vb2_queue_free(q, q->num_buffers);
+		ret = __vb2_queue_free(q, q->num_buffers);
+		if (ret)
+			return ret;
 
 		/*
 		 * In case of REQBUFS(0) return immediately without calling
@@ -1116,7 +1138,7 @@
 	int ret;
 	int write = !V4L2_TYPE_IS_OUTPUT(q->type);
 
-	/* Verify and copy relevant information provided by the userspace */
+	/* Copy relevant information provided by the userspace */
 	__fill_vb2_buffer(vb, b, planes);
 
 	for (plane = 0; plane < vb->num_planes; ++plane) {
@@ -1135,6 +1157,8 @@
 
 		if (planes[plane].length < planes[plane].data_offset +
 		    q->plane_sizes[plane]) {
+			dprintk(1, "qbuf: invalid dmabuf length for plane %d\n",
+				plane);
 			ret = -EINVAL;
 			goto err;
 		}
@@ -1226,6 +1250,7 @@
 static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
 {
 	struct vb2_queue *q = vb->vb2_queue;
+	struct rw_semaphore *mmap_sem;
 	int ret;
 
 	ret = __verify_length(vb, b);
@@ -1235,12 +1260,32 @@
 		return ret;
 	}
 
+	vb->state = VB2_BUF_STATE_PREPARING;
 	switch (q->memory) {
 	case V4L2_MEMORY_MMAP:
 		ret = __qbuf_mmap(vb, b);
 		break;
 	case V4L2_MEMORY_USERPTR:
+		/*
+		 * In case of user pointer buffers vb2 allocators need to get
+		 * direct access to userspace pages. This requires getting
+		 * the mmap semaphore for read access in the current process
+		 * structure. The same semaphore is taken before calling mmap
+		 * operation, while both qbuf/prepare_buf and mmap are called
+		 * by the driver or v4l2 core with the driver's lock held.
+		 * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
+		 * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
+		 * the videobuf2 core releases the driver's lock, takes
+		 * mmap_sem and then takes the driver's lock again.
+		 */
+		mmap_sem = &current->mm->mmap_sem;
+		call_qop(q, wait_prepare, q);
+		down_read(mmap_sem);
+		call_qop(q, wait_finish, q);
+
 		ret = __qbuf_userptr(vb, b);
+
+		up_read(mmap_sem);
 		break;
 	case V4L2_MEMORY_DMABUF:
 		ret = __qbuf_dmabuf(vb, b);
@@ -1254,105 +1299,36 @@
 		ret = call_qop(q, buf_prepare, vb);
 	if (ret)
 		dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
-	else
-		vb->state = VB2_BUF_STATE_PREPARED;
+	vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
 
 	return ret;
 }
 
 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
-				    const char *opname,
-				    int (*handler)(struct vb2_queue *,
-						   struct v4l2_buffer *,
-						   struct vb2_buffer *))
+				    const char *opname)
 {
-	struct rw_semaphore *mmap_sem = NULL;
-	struct vb2_buffer *vb;
-	int ret;
-
-	/*
-	 * In case of user pointer buffers vb2 allocators need to get direct
-	 * access to userspace pages. This requires getting the mmap semaphore
-	 * for read access in the current process structure. The same semaphore
-	 * is taken before calling mmap operation, while both qbuf/prepare_buf
-	 * and mmap are called by the driver or v4l2 core with the driver's lock
-	 * held. To avoid an AB-BA deadlock (mmap_sem then driver's lock in mmap
-	 * and driver's lock then mmap_sem in qbuf/prepare_buf) the videobuf2
-	 * core releases the driver's lock, takes mmap_sem and then takes the
-	 * driver's lock again.
-	 *
-	 * To avoid racing with other vb2 calls, which might be called after
-	 * releasing the driver's lock, this operation is performed at the
-	 * beginning of qbuf/prepare_buf processing. This way the queue status
-	 * is consistent after getting the driver's lock back.
-	 */
-	if (q->memory == V4L2_MEMORY_USERPTR) {
-		mmap_sem = &current->mm->mmap_sem;
-		call_qop(q, wait_prepare, q);
-		down_read(mmap_sem);
-		call_qop(q, wait_finish, q);
-	}
-
-	if (q->fileio) {
-		dprintk(1, "%s(): file io in progress\n", opname);
-		ret = -EBUSY;
-		goto unlock;
-	}
-
 	if (b->type != q->type) {
 		dprintk(1, "%s(): invalid buffer type\n", opname);
-		ret = -EINVAL;
-		goto unlock;
+		return -EINVAL;
 	}
 
 	if (b->index >= q->num_buffers) {
 		dprintk(1, "%s(): buffer index out of range\n", opname);
-		ret = -EINVAL;
-		goto unlock;
+		return -EINVAL;
 	}
 
-	vb = q->bufs[b->index];
-	if (NULL == vb) {
+	if (q->bufs[b->index] == NULL) {
 		/* Should never happen */
 		dprintk(1, "%s(): buffer is NULL\n", opname);
-		ret = -EINVAL;
-		goto unlock;
+		return -EINVAL;
 	}
 
 	if (b->memory != q->memory) {
 		dprintk(1, "%s(): invalid memory type\n", opname);
-		ret = -EINVAL;
-		goto unlock;
-	}
-
-	ret = __verify_planes_array(vb, b);
-	if (ret)
-		goto unlock;
-
-	ret = handler(q, b, vb);
-	if (ret)
-		goto unlock;
-
-	/* Fill buffer information for the userspace */
-	__fill_v4l2_buffer(vb, b);
-
-	dprintk(1, "%s() of buffer %d succeeded\n", opname, vb->v4l2_buf.index);
-unlock:
-	if (mmap_sem)
-		up_read(mmap_sem);
-	return ret;
-}
-
-static int __vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
-			     struct vb2_buffer *vb)
-{
-	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
-		dprintk(1, "%s(): invalid buffer state %d\n", __func__,
-			vb->state);
 		return -EINVAL;
 	}
 
-	return __buf_prepare(vb, b);
+	return __verify_planes_array(q->bufs[b->index], b);
 }
 
 /**
@@ -1372,22 +1348,95 @@
  */
 int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
 {
-	return vb2_queue_or_prepare_buf(q, b, "prepare_buf", __vb2_prepare_buf);
+	struct vb2_buffer *vb;
+	int ret;
+
+	if (q->fileio) {
+		dprintk(1, "%s(): file io in progress\n", __func__);
+		return -EBUSY;
+	}
+
+	ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
+	if (ret)
+		return ret;
+
+	vb = q->bufs[b->index];
+	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+		dprintk(1, "%s(): invalid buffer state %d\n", __func__,
+			vb->state);
+		return -EINVAL;
+	}
+
+	ret = __buf_prepare(vb, b);
+	if (!ret) {
+		/* Fill buffer information for the userspace */
+		__fill_v4l2_buffer(vb, b);
+
+		dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
+	}
+	return ret;
 }
 EXPORT_SYMBOL_GPL(vb2_prepare_buf);
 
-static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b,
-		      struct vb2_buffer *vb)
+/**
+ * vb2_start_streaming() - Attempt to start streaming.
+ * @q:		videobuf2 queue
+ *
+ * If there are not enough buffers, then retry_start_streaming is set to
+ * 1 and 0 is returned. The next time a buffer is queued and
+ * retry_start_streaming is 1, this function will be called again to
+ * retry starting the DMA engine.
+ */
+static int vb2_start_streaming(struct vb2_queue *q)
 {
 	int ret;
 
+	/* Tell the driver to start streaming */
+	ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
+
+	/*
+	 * If there are not enough buffers queued to start streaming, then
+	 * the start_streaming operation will return -ENOBUFS and you have to
+	 * retry when the next buffer is queued.
+	 */
+	if (ret == -ENOBUFS) {
+		dprintk(1, "qbuf: not enough buffers, retry when more buffers are queued.\n");
+		q->retry_start_streaming = 1;
+		return 0;
+	}
+	if (ret)
+		dprintk(1, "qbuf: driver refused to start streaming\n");
+	else
+		q->retry_start_streaming = 0;
+	return ret;
+}
+
+static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+	int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
+	struct vb2_buffer *vb;
+
+	if (ret)
+		return ret;
+
+	vb = q->bufs[b->index];
+	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+		dprintk(1, "%s(): invalid buffer state %d\n", __func__,
+			vb->state);
+		return -EINVAL;
+	}
+
 	switch (vb->state) {
 	case VB2_BUF_STATE_DEQUEUED:
 		ret = __buf_prepare(vb, b);
 		if (ret)
 			return ret;
+		break;
 	case VB2_BUF_STATE_PREPARED:
 		break;
+	case VB2_BUF_STATE_PREPARING:
+		dprintk(1, "qbuf: buffer still being prepared\n");
+		return -EINVAL;
 	default:
 		dprintk(1, "qbuf: buffer already in use\n");
 		return -EINVAL;
@@ -1407,6 +1456,16 @@
 	if (q->streaming)
 		__enqueue_in_driver(vb);
 
+	/* Fill buffer information for the userspace */
+	__fill_v4l2_buffer(vb, b);
+
+	if (q->retry_start_streaming) {
+		ret = vb2_start_streaming(q);
+		if (ret)
+			return ret;
+	}
+
+	dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
 	return 0;
 }
 
@@ -1429,7 +1488,12 @@
  */
 int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
 {
-	return vb2_queue_or_prepare_buf(q, b, "qbuf", __vb2_qbuf);
+	if (q->fileio) {
+		dprintk(1, "%s(): file io in progress\n", __func__);
+		return -EBUSY;
+	}
+
+	return vb2_internal_qbuf(q, b);
 }
 EXPORT_SYMBOL_GPL(vb2_qbuf);
 
@@ -1550,7 +1614,8 @@
 		return -EINVAL;
 	}
 
-	wait_event(q->done_wq, !atomic_read(&q->queued_count));
+	if (!q->retry_start_streaming)
+		wait_event(q->done_wq, !atomic_read(&q->queued_count));
 	return 0;
 }
 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
@@ -1579,37 +1644,11 @@
 		}
 }
 
-/**
- * vb2_dqbuf() - Dequeue a buffer to the userspace
- * @q:		videobuf2 queue
- * @b:		buffer structure passed from userspace to vidioc_dqbuf handler
- *		in driver
- * @nonblocking: if true, this call will not sleep waiting for a buffer if no
- *		 buffers ready for dequeuing are present. Normally the driver
- *		 would be passing (file->f_flags & O_NONBLOCK) here
- *
- * Should be called from vidioc_dqbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_finish callback in the driver (if provided), in which
- *    driver can perform any additional operations that may be required before
- *    returning the buffer to userspace, such as cache sync,
- * 3) the buffer struct members are filled with relevant information for
- *    the userspace.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_dqbuf handler in driver.
- */
-int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
 {
 	struct vb2_buffer *vb = NULL;
 	int ret;
 
-	if (q->fileio) {
-		dprintk(1, "dqbuf: file io in progress\n");
-		return -EBUSY;
-	}
-
 	if (b->type != q->type) {
 		dprintk(1, "dqbuf: invalid buffer type\n");
 		return -EINVAL;
@@ -1648,6 +1687,36 @@
 
 	return 0;
 }
+
+/**
+ * vb2_dqbuf() - Dequeue a buffer to the userspace
+ * @q:		videobuf2 queue
+ * @b:		buffer structure passed from userspace to vidioc_dqbuf handler
+ *		in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ *		 buffers ready for dequeuing are present. Normally the driver
+ *		 would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from vidioc_dqbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_finish callback in the driver (if provided), in which
+ *    driver can perform any additional operations that may be required before
+ *    returning the buffer to userspace, such as cache sync,
+ * 3) the buffer struct members are filled with relevant information for
+ *    the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_dqbuf handler in driver.
+ */
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+{
+	if (q->fileio) {
+		dprintk(1, "dqbuf: file io in progress\n");
+		return -EBUSY;
+	}
+	return vb2_internal_dqbuf(q, b, nonblocking);
+}
 EXPORT_SYMBOL_GPL(vb2_dqbuf);
 
 /**
@@ -1660,6 +1729,11 @@
 {
 	unsigned int i;
 
+	if (q->retry_start_streaming) {
+		q->retry_start_streaming = 0;
+		q->streaming = 0;
+	}
+
 	/*
 	 * Tell driver to stop all transactions and release all queued
 	 * buffers.
@@ -1687,6 +1761,46 @@
 		__vb2_dqbuf(q->bufs[i]);
 }
 
+static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+	struct vb2_buffer *vb;
+	int ret;
+
+	if (type != q->type) {
+		dprintk(1, "streamon: invalid stream type\n");
+		return -EINVAL;
+	}
+
+	if (q->streaming) {
+		dprintk(3, "streamon successful: already streaming\n");
+		return 0;
+	}
+
+	if (!q->num_buffers) {
+		dprintk(1, "streamon: no buffers have been allocated\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * If any buffers were queued before streamon,
+	 * we can now pass them to driver for processing.
+	 */
+	list_for_each_entry(vb, &q->queued_list, queued_entry)
+		__enqueue_in_driver(vb);
+
+	/* Tell driver to start streaming. */
+	ret = vb2_start_streaming(q);
+	if (ret) {
+		__vb2_queue_cancel(q);
+		return ret;
+	}
+
+	q->streaming = 1;
+
+	dprintk(3, "Streamon successful\n");
+	return 0;
+}
+
 /**
  * vb2_streamon - start streaming
  * @q:		videobuf2 queue
@@ -1702,48 +1816,35 @@
  */
 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
 {
-	struct vb2_buffer *vb;
-	int ret;
-
 	if (q->fileio) {
 		dprintk(1, "streamon: file io in progress\n");
 		return -EBUSY;
 	}
-
-	if (type != q->type) {
-		dprintk(1, "streamon: invalid stream type\n");
-		return -EINVAL;
-	}
-
-	if (q->streaming) {
-		dprintk(1, "streamon: already streaming\n");
-		return -EBUSY;
-	}
-
-	/*
-	 * If any buffers were queued before streamon,
-	 * we can now pass them to driver for processing.
-	 */
-	list_for_each_entry(vb, &q->queued_list, queued_entry)
-		__enqueue_in_driver(vb);
-
-	/*
-	 * Let driver notice that streaming state has been enabled.
-	 */
-	ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
-	if (ret) {
-		dprintk(1, "streamon: driver refused to start streaming\n");
-		__vb2_queue_cancel(q);
-		return ret;
-	}
-
-	q->streaming = 1;
-
-	dprintk(3, "Streamon successful\n");
-	return 0;
+	return vb2_internal_streamon(q, type);
 }
 EXPORT_SYMBOL_GPL(vb2_streamon);
 
+static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+	if (type != q->type) {
+		dprintk(1, "streamoff: invalid stream type\n");
+		return -EINVAL;
+	}
+
+	if (!q->streaming) {
+		dprintk(3, "streamoff successful: not streaming\n");
+		return 0;
+	}
+
+	/*
+	 * Cancel will pause streaming and remove all buffers from the driver
+	 * and videobuf, effectively returning control over them to userspace.
+	 */
+	__vb2_queue_cancel(q);
+
+	dprintk(3, "Streamoff successful\n");
+	return 0;
+}
 
 /**
  * vb2_streamoff - stop streaming
@@ -1766,25 +1867,7 @@
 		dprintk(1, "streamoff: file io in progress\n");
 		return -EBUSY;
 	}
-
-	if (type != q->type) {
-		dprintk(1, "streamoff: invalid stream type\n");
-		return -EINVAL;
-	}
-
-	if (!q->streaming) {
-		dprintk(1, "streamoff: not streaming\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * Cancel will pause streaming and remove all buffers from the driver
-	 * and videobuf, effectively returning control over them to userspace.
-	 */
-	__vb2_queue_cancel(q);
-
-	dprintk(3, "Streamoff successful\n");
-	return 0;
+	return vb2_internal_streamoff(q, type);
 }
 EXPORT_SYMBOL_GPL(vb2_streamoff);
 
@@ -2277,15 +2360,16 @@
 				goto err_reqbufs;
 			fileio->bufs[i].queued = 1;
 		}
-
-		/*
-		 * Start streaming.
-		 */
-		ret = vb2_streamon(q, q->type);
-		if (ret)
-			goto err_reqbufs;
+		fileio->index = q->num_buffers;
 	}
 
+	/*
+	 * Start streaming.
+	 */
+	ret = vb2_streamon(q, q->type);
+	if (ret)
+		goto err_reqbufs;
+
 	q->fileio = fileio;
 
 	return ret;
@@ -2308,13 +2392,8 @@
 	struct vb2_fileio_data *fileio = q->fileio;
 
 	if (fileio) {
-		/*
-		 * Hack fileio context to enable direct calls to vb2 ioctl
-		 * interface.
-		 */
+		vb2_internal_streamoff(q, q->type);
 		q->fileio = NULL;
-
-		vb2_streamoff(q, q->type);
 		fileio->req.count = 0;
 		vb2_reqbufs(q, &fileio->req);
 		kfree(fileio);
@@ -2358,39 +2437,34 @@
 	fileio = q->fileio;
 
 	/*
-	 * Hack fileio context to enable direct calls to vb2 ioctl interface.
-	 * The pointer will be restored before returning from this function.
-	 */
-	q->fileio = NULL;
-
-	index = fileio->index;
-	buf = &fileio->bufs[index];
-
-	/*
 	 * Check if we need to dequeue the buffer.
 	 */
-	if (buf->queued) {
-		struct vb2_buffer *vb;
-
+	index = fileio->index;
+	if (index >= q->num_buffers) {
 		/*
 		 * Call vb2_dqbuf to get buffer back.
 		 */
 		memset(&fileio->b, 0, sizeof(fileio->b));
 		fileio->b.type = q->type;
 		fileio->b.memory = q->memory;
-		fileio->b.index = index;
-		ret = vb2_dqbuf(q, &fileio->b, nonblock);
+		ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
 		dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
 		if (ret)
-			goto end;
+			return ret;
 		fileio->dq_count += 1;
 
+		index = fileio->b.index;
+		buf = &fileio->bufs[index];
+
 		/*
 		 * Get number of bytes filled by the driver
 		 */
-		vb = q->bufs[index];
-		buf->size = vb2_get_plane_payload(vb, 0);
+		buf->pos = 0;
 		buf->queued = 0;
+		buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
+				 : vb2_plane_size(q->bufs[index], 0);
+	} else {
+		buf = &fileio->bufs[index];
 	}
 
 	/*
@@ -2412,8 +2486,7 @@
 		ret = copy_from_user(buf->vaddr + buf->pos, data, count);
 	if (ret) {
 		dprintk(3, "file io: error copying data\n");
-		ret = -EFAULT;
-		goto end;
+		return -EFAULT;
 	}
 
 	/*
@@ -2433,10 +2506,6 @@
 		if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
 		    fileio->dq_count == 1) {
 			dprintk(3, "file io: read limit reached\n");
-			/*
-			 * Restore fileio pointer and release the context.
-			 */
-			q->fileio = fileio;
 			return __vb2_cleanup_fileio(q);
 		}
 
@@ -2448,32 +2517,20 @@
 		fileio->b.memory = q->memory;
 		fileio->b.index = index;
 		fileio->b.bytesused = buf->pos;
-		ret = vb2_qbuf(q, &fileio->b);
+		ret = vb2_internal_qbuf(q, &fileio->b);
 		dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
 		if (ret)
-			goto end;
+			return ret;
 
 		/*
 		 * Buffer has been queued, update the status
 		 */
 		buf->pos = 0;
 		buf->queued = 1;
-		buf->size = q->bufs[0]->v4l2_planes[0].length;
+		buf->size = vb2_plane_size(q->bufs[index], 0);
 		fileio->q_count += 1;
-
-		/*
-		 * Switch to the next buffer
-		 */
-		fileio->index = (index + 1) % q->num_buffers;
-
-		/*
-		 * Start streaming if required.
-		 */
-		if (!read && !q->streaming) {
-			ret = vb2_streamon(q, q->type);
-			if (ret)
-				goto end;
-		}
+		if (fileio->index < q->num_buffers)
+			fileio->index++;
 	}
 
 	/*
@@ -2481,11 +2538,6 @@
 	 */
 	if (ret == 0)
 		ret = count;
-end:
-	/*
-	 * Restore the fileio context and block vb2 ioctl interface.
-	 */
-	q->fileio = fileio;
 	return ret;
 }
 
@@ -2649,16 +2701,29 @@
 }
 EXPORT_SYMBOL_GPL(vb2_fop_mmap);
 
-int vb2_fop_release(struct file *file)
+int _vb2_fop_release(struct file *file, struct mutex *lock)
 {
 	struct video_device *vdev = video_devdata(file);
 
 	if (file->private_data == vdev->queue->owner) {
+		if (lock)
+			mutex_lock(lock);
 		vb2_queue_release(vdev->queue);
 		vdev->queue->owner = NULL;
+		if (lock)
+			mutex_unlock(lock);
 	}
 	return v4l2_fh_release(file);
 }
+EXPORT_SYMBOL_GPL(_vb2_fop_release);
+
+int vb2_fop_release(struct file *file)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+
+	return _vb2_fop_release(file, lock);
+}
 EXPORT_SYMBOL_GPL(vb2_fop_release);
 
 ssize_t vb2_fop_write(struct file *file, const char __user *buf,
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 0d3a8ff..c779f21 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -40,6 +40,7 @@
 	unsigned int			num_pages;
 	atomic_t			refcount;
 	struct vb2_vmarea_handler	handler;
+	struct vm_area_struct		*vma;
 };
 
 static void vb2_dma_sg_put(void *buf_priv);
@@ -155,12 +156,18 @@
 	}
 }
 
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
 				    unsigned long size, int write)
 {
 	struct vb2_dma_sg_buf *buf;
 	unsigned long first, last;
 	int num_pages_from_user;
+	struct vm_area_struct *vma;
 
 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
 	if (!buf)
@@ -180,7 +187,38 @@
 	if (!buf->pages)
 		goto userptr_fail_alloc_pages;
 
-	num_pages_from_user = get_user_pages(current, current->mm,
+	vma = find_vma(current->mm, vaddr);
+	if (!vma) {
+		dprintk(1, "no vma for address %lu\n", vaddr);
+		goto userptr_fail_find_vma;
+	}
+
+	if (vma->vm_end < vaddr + size) {
+		dprintk(1, "vma at %lu is too small for %lu bytes\n",
+			vaddr, size);
+		goto userptr_fail_find_vma;
+	}
+
+	buf->vma = vb2_get_vma(vma);
+	if (!buf->vma) {
+		dprintk(1, "failed to copy vma\n");
+		goto userptr_fail_find_vma;
+	}
+
+	if (vma_is_io(buf->vma)) {
+		for (num_pages_from_user = 0;
+		     num_pages_from_user < buf->num_pages;
+		     ++num_pages_from_user, vaddr += PAGE_SIZE) {
+			unsigned long pfn;
+
+			if (follow_pfn(buf->vma, vaddr, &pfn)) {
+				dprintk(1, "no page for address %lu\n", vaddr);
+				break;
+			}
+			buf->pages[num_pages_from_user] = pfn_to_page(pfn);
+		}
+	} else
+		num_pages_from_user = get_user_pages(current, current->mm,
 					     vaddr & PAGE_MASK,
 					     buf->num_pages,
 					     write,
@@ -200,9 +238,12 @@
 userptr_fail_alloc_table_from_pages:
 userptr_fail_get_user_pages:
 	dprintk(1, "get_user_pages requested/got: %d/%d]\n",
-	       num_pages_from_user, buf->num_pages);
-	while (--num_pages_from_user >= 0)
-		put_page(buf->pages[num_pages_from_user]);
+		buf->num_pages, num_pages_from_user);
+	if (!vma_is_io(buf->vma))
+		while (--num_pages_from_user >= 0)
+			put_page(buf->pages[num_pages_from_user]);
+	vb2_put_vma(buf->vma);
+userptr_fail_find_vma:
 	kfree(buf->pages);
 userptr_fail_alloc_pages:
 	kfree(buf);
@@ -226,9 +267,11 @@
 	while (--i >= 0) {
 		if (buf->write)
 			set_page_dirty_lock(buf->pages[i]);
-		put_page(buf->pages[i]);
+		if (!vma_is_io(buf->vma))
+			put_page(buf->pages[i]);
 	}
 	kfree(buf->pages);
+	vb2_put_vma(buf->vma);
 	kfree(buf);
 }
 
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index dd239bd..00d339c 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2235,10 +2235,10 @@
 	}
 
 	/* do we need to support multiple segments? */
-	if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
-		printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
-		    ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req),
-		    bio_segments(rsp->bio), blk_rq_bytes(rsp));
+	if (bio_multiple_segments(req->bio) ||
+	    bio_multiple_segments(rsp->bio)) {
+		printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
+		    ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
 		return -EINVAL;
 	}
 
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index a60c188..04bd3b6 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -754,19 +754,19 @@
 				 unsigned long arg)
 {
 	int ret;
-	mutex_lock(&i2o_cfg_mutex);
 	switch (cmd) {
 	case I2OGETIOPS:
 		ret = i2o_cfg_ioctl(file, cmd, arg);
 		break;
 	case I2OPASSTHRU32:
+		mutex_lock(&i2o_cfg_mutex);
 		ret = i2o_cfg_passthru32(file, cmd, arg);
+		mutex_unlock(&i2o_cfg_mutex);
 		break;
 	default:
 		ret = -ENOIOCTLCMD;
 		break;
 	}
-	mutex_unlock(&i2o_cfg_mutex);
 	return ret;
 }
 
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index ac514fb..71aa14a 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -173,6 +173,7 @@
 };
 MODULE_DEVICE_TABLE(i2c, max14577_i2c_id);
 
+#ifdef CONFIG_PM_SLEEP
 static int max14577_suspend(struct device *dev)
 {
 	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -208,6 +209,7 @@
 
 	return 0;
 }
+#endif /* CONFIG_PM_SLEEP */
 
 static struct of_device_id max14577_dt_match[] = {
 	{ .compatible = "maxim,max14577", },
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index be88a3b..5adede0 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -164,15 +164,15 @@
 	return pd;
 }
 
-static inline int max8997_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long max8997_i2c_get_driver_data(struct i2c_client *i2c,
 						const struct i2c_device_id *id)
 {
 	if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
 		const struct of_device_id *match;
 		match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node);
-		return (int)match->data;
+		return (unsigned long)match->data;
 	}
-	return (int)id->driver_data;
+	return id->driver_data;
 }
 
 static int max8997_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 612ca40..5d5e186 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -169,16 +169,16 @@
 	return pd;
 }
 
-static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long max8998_i2c_get_driver_data(struct i2c_client *i2c,
 						const struct i2c_device_id *id)
 {
 	if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
 		const struct of_device_id *match;
 		match = of_match_node(max8998_dt_match, i2c->dev.of_node);
-		return (int)(long)match->data;
+		return (unsigned long)match->data;
 	}
 
-	return (int)id->driver_data;
+	return id->driver_data;
 }
 
 static int max8998_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index a139798..714e213 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -315,6 +315,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int sec_pmic_suspend(struct device *dev)
 {
 	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -349,6 +350,7 @@
 
 	return 0;
 }
+#endif /* CONFIG_PM_SLEEP */
 
 static SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume);
 
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 966cf65..3cc4c70 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -158,7 +158,7 @@
 {
 	struct tps65217 *tps;
 	unsigned int version;
-	unsigned int chip_id = ids->driver_data;
+	unsigned long chip_id = ids->driver_data;
 	const struct of_device_id *match;
 	bool status_off = false;
 	int ret;
@@ -170,7 +170,7 @@
 				"Failed to find matching dt id\n");
 			return -EINVAL;
 		}
-		chip_id = (unsigned int)(unsigned long)match->data;
+		chip_id = (unsigned long)match->data;
 		status_off = of_property_read_bool(client->dev.of_node,
 					"ti,pmic-shutdown-controller");
 	}
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index ba04f1b..e6fab94 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -636,7 +636,7 @@
 	if (i2c->dev.of_node) {
 		of_id = of_match_device(wm8994_of_match, &i2c->dev);
 		if (of_id)
-			wm8994->type = (int)of_id->data;
+			wm8994->type = (enum wm8994_type)of_id->data;
 	} else {
 		wm8994->type = id->driver_data;
 	}
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index c169e07..f0fa4e8 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -3,7 +3,7 @@
  *                           Philip Edelbrock <phil@netroedge.com>
  * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
  * Copyright (C) 2003 IBM Corp.
- * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 8f8a6b3..2c2c9cc 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -787,6 +787,7 @@
 	if (rc != 0) {
 		dev_err(&pci_dev->dev,
 			"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
+		kfree(dma_map);
 		return rc;
 	}
 
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1ee2b94..9b809cf 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -908,7 +908,6 @@
 	list_for_each_entry_safe(cl, next, &dev->file_list, link) {
 		cl->state = MEI_FILE_DISCONNECTED;
 		cl->mei_flow_ctrl_creds = 0;
-		cl->read_cb = NULL;
 		cl->timer_count = 0;
 	}
 }
@@ -942,8 +941,16 @@
 void mei_cl_all_write_clear(struct mei_device *dev)
 {
 	struct mei_cl_cb *cb, *next;
+	struct list_head *list;
 
-	list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+	list = &dev->write_list.list;
+	list_for_each_entry_safe(cb, next, list, list) {
+		list_del(&cb->list);
+		mei_io_cb_free(cb);
+	}
+
+	list = &dev->write_waiting_list.list;
+	list_for_each_entry_safe(cb, next, list, list) {
 		list_del(&cb->list);
 		mei_io_cb_free(cb);
 	}
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index 752ff87..7e1ef0e 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -156,7 +156,8 @@
 static int _mic_virtio_copy(struct mic_vdev *mvdev,
 	struct mic_copy_desc *copy)
 {
-	int ret = 0, iovcnt = copy->iovcnt;
+	int ret = 0;
+	u32 iovcnt = copy->iovcnt;
 	struct iovec iov;
 	struct iovec __user *u_iov = copy->iov;
 	void __user *ubuf = NULL;
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 9b2062d..2bef3f7 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -139,8 +139,11 @@
 
 	ubuf += sizeof(hdr);
 	ubufcch = ubuf;
-	if (gru_user_copy_handle(&ubuf, cch))
-		goto fail;
+	if (gru_user_copy_handle(&ubuf, cch)) {
+		if (cch_locked)
+			unlock_cch_handle(cch);
+		return -EFAULT;
+	}
 	if (cch_locked)
 		ubufcch->delresp = 0;
 	bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
@@ -179,10 +182,6 @@
 		ret = -EFAULT;
 
 	return ret ? ret : bytes;
-
-fail:
-	unlock_cch_handle(cch);
-	return -EFAULT;
 }
 
 int gru_dump_chiplet_request(unsigned long arg)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 357bbc5..3e049c1 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -197,7 +197,7 @@
 	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
 
 	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
-		limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
 
 	mq->card = card;
 	mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 61e2abc..31ee7cf 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -729,7 +729,7 @@
 	of_node_put(rootnode);
 
 	/* Enable NFC clock */
-	clk = devm_clk_get(dev, "nfc_clk");
+	clk = devm_clk_get(dev, "ipg");
 	if (IS_ERR(clk)) {
 		dev_err(dev, "Unable to acquire NFC clock!\n");
 		retval = PTR_ERR(clk);
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 33bb1f2..6f27d9a 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1453,8 +1453,10 @@
 		struct ubi_attach_info *scan_ai;
 
 		scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
-		if (!scan_ai)
+		if (!scan_ai) {
+			err = -ENOMEM;
 			goto out_wl;
+		}
 
 		err = scan_all(ubi, scan_ai, 0);
 		if (err) {
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index e05dc62..57deae9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1245,8 +1245,10 @@
 	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
 					      sizeof(struct ubi_wl_entry),
 					      0, 0, NULL);
-	if (!ubi_wl_entry_slab)
+	if (!ubi_wl_entry_slab) {
+		err = -ENOMEM;
 		goto out_dev_unreg;
+	}
 
 	err = ubi_debugfs_init();
 	if (err)
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index bf79def..d361349 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -495,10 +495,12 @@
  */
 static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
 {
-	int err, err1;
+	int err;
 	size_t written;
 	loff_t addr;
 	uint32_t data = 0;
+	struct ubi_ec_hdr ec_hdr;
+
 	/*
 	 * Note, we cannot generally define VID header buffers on stack,
 	 * because of the way we deal with these buffers (see the header
@@ -509,50 +511,38 @@
 	struct ubi_vid_hdr vid_hdr;
 
 	/*
+	 * If VID or EC is valid, we have to corrupt them before erasing.
 	 * It is important to first invalidate the EC header, and then the VID
 	 * header. Otherwise a power cut may lead to valid EC header and
 	 * invalid VID header, in which case UBI will treat this PEB as
 	 * corrupted and will try to preserve it, and print scary warnings.
 	 */
 	addr = (loff_t)pnum * ubi->peb_size;
-	err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
-	if (!err) {
+	err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
+	if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+	    err != UBI_IO_FF){
+		err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+		if(err)
+			goto error;
+	}
+
+	err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
+	if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+	    err != UBI_IO_FF){
 		addr += ubi->vid_hdr_aloffset;
 		err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
-		if (!err)
-			return 0;
+		if (err)
+			goto error;
 	}
+	return 0;
 
+error:
 	/*
-	 * We failed to write to the media. This was observed with Spansion
-	 * S29GL512N NOR flash. Most probably the previously eraseblock erasure
-	 * was interrupted at a very inappropriate moment, so it became
-	 * unwritable. In this case we probably anyway have garbage in this
-	 * PEB.
+	 * The PEB contains a valid VID or EC header, but we cannot invalidate
+	 * it. Supposedly the flash media or the driver is screwed up, so
+	 * return an error.
 	 */
-	err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
-	if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
-	    err1 == UBI_IO_FF) {
-		struct ubi_ec_hdr ec_hdr;
-
-		err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
-		if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
-		    err1 == UBI_IO_FF)
-			/*
-			 * Both VID and EC headers are corrupted, so we can
-			 * safely erase this PEB and not afraid that it will be
-			 * treated as a valid PEB in case of an unclean reboot.
-			 */
-			return 0;
-	}
-
-	/*
-	 * The PEB contains a valid VID header, but we cannot invalidate it.
-	 * Supposedly the flash media or the driver is screwed up, so return an
-	 * error.
-	 */
-	ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
-		pnum, err, err1);
+	ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err);
 	ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
 	return -EIO;
 }
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f342278..494b888 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -139,7 +139,7 @@
 	  This adds a specialized tap character device driver that is based
 	  on the MAC-VLAN network interface, called macvtap. A macvtap device
 	  can be added in the same way as a macvlan device, using 'type
-	  macvlan', and then be accessed through the tap user space interface.
+	  macvtap', and then be accessed through the tap user space interface.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called macvtap.
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index cce1f1b..6d20fbd 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1796,8 +1796,6 @@
 	BOND_AD_INFO(bond).agg_select_timer = timeout;
 }
 
-static u16 aggregator_identifier;
-
 /**
  * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
  * @bond: bonding struct to work on
@@ -1811,7 +1809,7 @@
 	if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
 				bond->dev->dev_addr)) {
 
-		aggregator_identifier = 0;
+		BOND_AD_INFO(bond).aggregator_identifier = 0;
 
 		BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
 		BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
@@ -1880,7 +1878,7 @@
 		ad_initialize_agg(aggregator);
 
 		aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
-		aggregator->aggregator_identifier = (++aggregator_identifier);
+		aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
 		aggregator->slave = slave;
 		aggregator->is_active = 0;
 		aggregator->num_of_ports = 0;
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 13dc9d3..f4dd959 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -253,6 +253,7 @@
 struct ad_bond_info {
 	struct ad_system system;	    /* 802.3ad system structure */
 	u32 agg_select_timer;	    // Timer to select aggregator after all adapter's hand shakes
+	u16 aggregator_identifier;
 };
 
 struct ad_slave_info {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a7db819..1c6104d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1270,9 +1270,13 @@
 
 	if (slave_ops->ndo_set_mac_address == NULL) {
 		if (!bond_has_slaves(bond)) {
-			pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
-				   bond_dev->name);
-			bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+			pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
+				bond_dev->name);
+			if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+				bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+				pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
+					bond_dev->name);
+			}
 		} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
 			pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
 			       bond_dev->name);
@@ -1315,7 +1319,8 @@
 	 */
 	memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
 
-	if (!bond->params.fail_over_mac) {
+	if (!bond->params.fail_over_mac ||
+	    bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
 		/*
 		 * Set slave to master's mac address.  The application already
 		 * set the master's mac address to that of the first slave
@@ -1505,7 +1510,6 @@
 	slave_dev->npinfo = bond->dev->npinfo;
 	if (slave_dev->npinfo) {
 		if (slave_enable_netpoll(new_slave)) {
-			read_unlock(&bond->lock);
 			pr_info("Error, %s: master_dev is using netpoll, "
 				 "but new slave device does not support netpoll.\n",
 				 bond_dev->name);
@@ -1539,9 +1543,11 @@
 	bond_set_carrier(bond);
 
 	if (USES_PRIMARY(bond->params.mode)) {
+		block_netpoll_tx();
 		write_lock_bh(&bond->curr_slave_lock);
 		bond_select_active_slave(bond);
 		write_unlock_bh(&bond->curr_slave_lock);
+		unblock_netpoll_tx();
 	}
 
 	pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1567,10 +1573,12 @@
 	if (bond->primary_slave == new_slave)
 		bond->primary_slave = NULL;
 	if (bond->curr_active_slave == new_slave) {
+		block_netpoll_tx();
 		write_lock_bh(&bond->curr_slave_lock);
 		bond_change_active_slave(bond, NULL);
 		bond_select_active_slave(bond);
 		write_unlock_bh(&bond->curr_slave_lock);
+		unblock_netpoll_tx();
 	}
 	slave_disable_netpoll(new_slave);
 
@@ -1579,7 +1587,8 @@
 	dev_close(slave_dev);
 
 err_restore_mac:
-	if (!bond->params.fail_over_mac) {
+	if (!bond->params.fail_over_mac ||
+	    bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
 		/* XXX TODO - fom follow mode needs to change master's
 		 * MAC if this slave's MAC is in use by the bond, or at
 		 * least print a warning.
@@ -1672,7 +1681,8 @@
 
 	bond->current_arp_slave = NULL;
 
-	if (!all && !bond->params.fail_over_mac) {
+	if (!all && (!bond->params.fail_over_mac ||
+		     bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
 		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
 		    bond_has_slaves(bond))
 			pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
@@ -1769,7 +1779,8 @@
 	/* close slave before restoring its mac address */
 	dev_close(slave_dev);
 
-	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
+	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
+	    bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
 		/* restore original ("permanent") mac address */
 		memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
 		addr.sa_family = slave_dev->type;
@@ -2346,7 +2357,7 @@
 					    arp_work.work);
 	struct slave *slave, *oldcurrent;
 	struct list_head *iter;
-	int do_failover = 0;
+	int do_failover = 0, slave_state_changed = 0;
 
 	if (!bond_has_slaves(bond))
 		goto re_arm;
@@ -2370,7 +2381,7 @@
 			    bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
 
 				slave->link  = BOND_LINK_UP;
-				bond_set_active_slave(slave);
+				slave_state_changed = 1;
 
 				/* primary_slave has no meaning in round-robin
 				 * mode. the window of a slave being up and
@@ -2399,7 +2410,7 @@
 			    !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
 
 				slave->link  = BOND_LINK_DOWN;
-				bond_set_backup_slave(slave);
+				slave_state_changed = 1;
 
 				if (slave->link_failure_count < UINT_MAX)
 					slave->link_failure_count++;
@@ -2426,19 +2437,24 @@
 
 	rcu_read_unlock();
 
-	if (do_failover) {
-		/* the bond_select_active_slave must hold RTNL
-		 * and curr_slave_lock for write.
-		 */
+	if (do_failover || slave_state_changed) {
 		if (!rtnl_trylock())
 			goto re_arm;
-		block_netpoll_tx();
-		write_lock_bh(&bond->curr_slave_lock);
 
-		bond_select_active_slave(bond);
+		if (slave_state_changed) {
+			bond_slave_state_change(bond);
+		} else if (do_failover) {
+			/* the bond_select_active_slave must hold RTNL
+			 * and curr_slave_lock for write.
+			 */
+			block_netpoll_tx();
+			write_lock_bh(&bond->curr_slave_lock);
 
-		write_unlock_bh(&bond->curr_slave_lock);
-		unblock_netpoll_tx();
+			bond_select_active_slave(bond);
+
+			write_unlock_bh(&bond->curr_slave_lock);
+			unblock_netpoll_tx();
+		}
 		rtnl_unlock();
 	}
 
@@ -2599,45 +2615,51 @@
 
 /*
  * Send ARP probes for active-backup mode ARP monitor.
- *
- * Called with rcu_read_lock hold.
  */
-static void bond_ab_arp_probe(struct bonding *bond)
+static bool bond_ab_arp_probe(struct bonding *bond)
 {
 	struct slave *slave, *before = NULL, *new_slave = NULL,
-		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+		     *curr_arp_slave, *curr_active_slave;
 	struct list_head *iter;
 	bool found = false;
 
-	read_lock(&bond->curr_slave_lock);
+	rcu_read_lock();
+	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+	curr_active_slave = rcu_dereference(bond->curr_active_slave);
 
-	if (curr_arp_slave && bond->curr_active_slave)
+	if (curr_arp_slave && curr_active_slave)
 		pr_info("PROBE: c_arp %s && cas %s BAD\n",
 			curr_arp_slave->dev->name,
-			bond->curr_active_slave->dev->name);
+			curr_active_slave->dev->name);
 
-	if (bond->curr_active_slave) {
-		bond_arp_send_all(bond, bond->curr_active_slave);
-		read_unlock(&bond->curr_slave_lock);
-		return;
+	if (curr_active_slave) {
+		bond_arp_send_all(bond, curr_active_slave);
+		rcu_read_unlock();
+		return true;
 	}
-
-	read_unlock(&bond->curr_slave_lock);
+	rcu_read_unlock();
 
 	/* if we don't have a curr_active_slave, search for the next available
 	 * backup slave from the current_arp_slave and make it the candidate
 	 * for becoming the curr_active_slave
 	 */
 
+	if (!rtnl_trylock())
+		return false;
+	/* curr_arp_slave might have gone away */
+	curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave);
+
 	if (!curr_arp_slave) {
-		curr_arp_slave = bond_first_slave_rcu(bond);
-		if (!curr_arp_slave)
-			return;
+		curr_arp_slave = bond_first_slave(bond);
+		if (!curr_arp_slave) {
+			rtnl_unlock();
+			return true;
+		}
 	}
 
 	bond_set_slave_inactive_flags(curr_arp_slave);
 
-	bond_for_each_slave_rcu(bond, slave, iter) {
+	bond_for_each_slave(bond, slave, iter) {
 		if (!found && !before && IS_UP(slave->dev))
 			before = slave;
 
@@ -2667,21 +2689,26 @@
 	if (!new_slave && before)
 		new_slave = before;
 
-	if (!new_slave)
-		return;
+	if (!new_slave) {
+		rtnl_unlock();
+		return true;
+	}
 
 	new_slave->link = BOND_LINK_BACK;
 	bond_set_slave_active_flags(new_slave);
 	bond_arp_send_all(bond, new_slave);
 	new_slave->jiffies = jiffies;
 	rcu_assign_pointer(bond->current_arp_slave, new_slave);
+	rtnl_unlock();
+
+	return true;
 }
 
 static void bond_activebackup_arp_mon(struct work_struct *work)
 {
 	struct bonding *bond = container_of(work, struct bonding,
 					    arp_work.work);
-	bool should_notify_peers = false;
+	bool should_notify_peers = false, should_commit = false;
 	int delta_in_ticks;
 
 	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
@@ -2690,12 +2717,11 @@
 		goto re_arm;
 
 	rcu_read_lock();
-
 	should_notify_peers = bond_should_notify_peers(bond);
+	should_commit = bond_ab_arp_inspect(bond);
+	rcu_read_unlock();
 
-	if (bond_ab_arp_inspect(bond)) {
-		rcu_read_unlock();
-
+	if (should_commit) {
 		/* Race avoidance with bond_close flush of workqueue */
 		if (!rtnl_trylock()) {
 			delta_in_ticks = 1;
@@ -2704,13 +2730,14 @@
 		}
 
 		bond_ab_arp_commit(bond);
-
 		rtnl_unlock();
-		rcu_read_lock();
 	}
 
-	bond_ab_arp_probe(bond);
-	rcu_read_unlock();
+	if (!bond_ab_arp_probe(bond)) {
+		/* rtnl locking failed, re-arm */
+		delta_in_ticks = 1;
+		should_notify_peers = false;
+	}
 
 re_arm:
 	if (bond->params.arp_interval)
@@ -2841,9 +2868,12 @@
 		pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
 			bond->dev->name, bond->primary_slave ? slave_dev->name :
 							       "none");
+
+		block_netpoll_tx();
 		write_lock_bh(&bond->curr_slave_lock);
 		bond_select_active_slave(bond);
 		write_unlock_bh(&bond->curr_slave_lock);
+		unblock_netpoll_tx();
 		break;
 	case NETDEV_FEAT_CHANGE:
 		bond_compute_features(bond);
@@ -3415,7 +3445,8 @@
 	/* If fail_over_mac is enabled, do nothing and return success.
 	 * Returning an error causes ifenslave to fail.
 	 */
-	if (bond->params.fail_over_mac)
+	if (bond->params.fail_over_mac &&
+	    bond->params.mode == BOND_MODE_ACTIVEBACKUP)
 		return 0;
 
 	if (!is_valid_ether_addr(sa->sa_data))
@@ -3676,7 +3707,7 @@
 
 
 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
-			     void *accel_priv)
+			     void *accel_priv, select_queue_fallback_t fallback)
 {
 	/*
 	 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 11cb943..c378784 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -14,7 +14,7 @@
 #include <linux/errno.h>
 #include <linux/if.h>
 #include <linux/netdevice.h>
-#include <linux/rwlock.h>
+#include <linux/spinlock.h>
 #include <linux/rcupdate.h>
 #include <linux/ctype.h>
 #include <linux/inet.h>
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 1a9062f..86ccfb9 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -303,6 +303,19 @@
 	}
 }
 
+static inline void bond_slave_state_change(struct bonding *bond)
+{
+	struct list_head *iter;
+	struct slave *tmp;
+
+	bond_for_each_slave(bond, tmp, iter) {
+		if (tmp->link == BOND_LINK_UP)
+			bond_set_active_slave(tmp);
+		else if (tmp->link == BOND_LINK_DOWN)
+			bond_set_backup_slave(tmp);
+	}
+}
+
 static inline int bond_slave_state(struct slave *slave)
 {
 	return slave->backup;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d447b88..9e7d95d 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -104,7 +104,7 @@
 
 config CAN_FLEXCAN
 	tristate "Support for Freescale FLEXCAN based chips"
-	depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
+	depends on ARM || PPC
 	---help---
 	  Say Y here if you want to support for Freescale FlexCAN.
 
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 13a9098..fc59bc6 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -323,19 +323,10 @@
 	}
 
 	if (!priv->echo_skb[idx]) {
-		struct sock *srcsk = skb->sk;
 
-		if (atomic_read(&skb->users) != 1) {
-			struct sk_buff *old_skb = skb;
-
-			skb = skb_clone(old_skb, GFP_ATOMIC);
-			kfree_skb(old_skb);
-			if (!skb)
-				return;
-		} else
-			skb_orphan(skb);
-
-		skb->sk = srcsk;
+		skb = can_create_echo_skb(skb);
+		if (!skb)
+			return;
 
 		/* make settings for echo to reduce code in irq context */
 		skb->protocol = htons(ETH_P_CAN);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index aaed97b..320bef2 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -235,9 +235,12 @@
 };
 
 /*
- * Abstract off the read/write for arm versus ppc.
+ * Abstract off the read/write for arm versus ppc. This
+ * assumes that PPC uses big-endian registers and everything
+ * else uses little-endian registers, independent of CPU
+ * endianess.
  */
-#if defined(__BIG_ENDIAN)
+#if defined(CONFIG_PPC)
 static inline u32 flexcan_read(void __iomem *addr)
 {
 	return in_be32(addr);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index e24e669..71594e5 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -18,6 +18,7 @@
 #include <linux/netdevice.h>
 #include <linux/can.h>
 #include <linux/can/dev.h>
+#include <linux/can/skb.h>
 #include <linux/can/error.h>
 
 #include <linux/mfd/janz.h>
@@ -1133,20 +1134,9 @@
  */
 static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
 {
-	struct sock *srcsk = skb->sk;
-
-	if (atomic_read(&skb->users) != 1) {
-		struct sk_buff *old_skb = skb;
-
-		skb = skb_clone(old_skb, GFP_ATOMIC);
-		kfree_skb(old_skb);
-		if (!skb)
-			return;
-	} else {
-		skb_orphan(skb);
-	}
-
-	skb->sk = srcsk;
+	skb = can_create_echo_skb(skb);
+	if (!skb)
+		return;
 
 	/* save this skb for tx interrupt echo handling */
 	skb_queue_tail(&mod->echoq, skb);
@@ -1322,7 +1312,7 @@
 
 	/* process all communication messages */
 	while (true) {
-		struct ican3_msg msg;
+		struct ican3_msg uninitialized_var(msg);
 		ret = ican3_recv_msg(mod, &msg);
 		if (ret)
 			break;
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 035e235..4472529 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -108,135 +108,170 @@
 #endif /* CONFIG_PPC_MPC52xx */
 
 #ifdef CONFIG_PPC_MPC512x
-struct mpc512x_clockctl {
-	u32 spmr;		/* System PLL Mode Reg */
-	u32 sccr[2];		/* System Clk Ctrl Reg 1 & 2 */
-	u32 scfr1;		/* System Clk Freq Reg 1 */
-	u32 scfr2;		/* System Clk Freq Reg 2 */
-	u32 reserved;
-	u32 bcr;		/* Bread Crumb Reg */
-	u32 pccr[12];		/* PSC Clk Ctrl Reg 0-11 */
-	u32 spccr;		/* SPDIF Clk Ctrl Reg */
-	u32 cccr;		/* CFM Clk Ctrl Reg */
-	u32 dccr;		/* DIU Clk Cnfg Reg */
-	u32 mccr[4];		/* MSCAN Clk Ctrl Reg 1-3 */
-};
-
-static struct of_device_id mpc512x_clock_ids[] = {
-	{ .compatible = "fsl,mpc5121-clock", },
-	{}
-};
-
 static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
-				 const char *clock_name, int *mscan_clksrc)
+				 const char *clock_source, int *mscan_clksrc)
 {
-	struct mpc512x_clockctl __iomem *clockctl;
-	struct device_node *np_clock;
-	struct clk *sys_clk, *ref_clk;
-	int plen, clockidx, clocksrc = -1;
-	u32 sys_freq, val, clockdiv = 1, freq = 0;
-	const u32 *pval;
+	struct device_node *np;
+	u32 clockdiv;
+	enum {
+		CLK_FROM_AUTO,
+		CLK_FROM_IPS,
+		CLK_FROM_SYS,
+		CLK_FROM_REF,
+	} clk_from;
+	struct clk *clk_in, *clk_can;
+	unsigned long freq_calc;
+	struct mscan_priv *priv;
+	struct clk *clk_ipg;
 
-	np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
-	if (!np_clock) {
-		dev_err(&ofdev->dev, "couldn't find clock node\n");
-		return 0;
+	/* the caller passed in the clock source spec that was read from
+	 * the device tree, get the optional clock divider as well
+	 */
+	np = ofdev->dev.of_node;
+	clockdiv = 1;
+	of_property_read_u32(np, "fsl,mscan-clock-divider", &clockdiv);
+	dev_dbg(&ofdev->dev, "device tree specs: clk src[%s] div[%d]\n",
+		clock_source ? clock_source : "<NULL>", clockdiv);
+
+	/* when clock-source is 'ip', the CANCTL1[CLKSRC] bit needs to
+	 * get set, and the 'ips' clock is the input to the MSCAN
+	 * component
+	 *
+	 * for clock-source values of 'ref' or 'sys' the CANCTL1[CLKSRC]
+	 * bit needs to get cleared, an optional clock-divider may have
+	 * been specified (the default value is 1), the appropriate
+	 * MSCAN related MCLK is the input to the MSCAN component
+	 *
+	 * in the absence of a clock-source spec, first an optimal clock
+	 * gets determined based on the 'sys' clock, if that fails the
+	 * 'ref' clock is used
+	 */
+	clk_from = CLK_FROM_AUTO;
+	if (clock_source) {
+		/* interpret the device tree's spec for the clock source */
+		if (!strcmp(clock_source, "ip"))
+			clk_from = CLK_FROM_IPS;
+		else if (!strcmp(clock_source, "sys"))
+			clk_from = CLK_FROM_SYS;
+		else if (!strcmp(clock_source, "ref"))
+			clk_from = CLK_FROM_REF;
+		else
+			goto err_invalid;
+		dev_dbg(&ofdev->dev, "got a clk source spec[%d]\n", clk_from);
 	}
-	clockctl = of_iomap(np_clock, 0);
-	if (!clockctl) {
-		dev_err(&ofdev->dev, "couldn't map clock registers\n");
-		goto exit_put;
+	if (clk_from == CLK_FROM_AUTO) {
+		/* no spec so far, try the 'sys' clock; round to the
+		 * next MHz and see if we can get a multiple of 16MHz
+		 */
+		dev_dbg(&ofdev->dev, "no clk source spec, trying SYS\n");
+		clk_in = devm_clk_get(&ofdev->dev, "sys");
+		if (IS_ERR(clk_in))
+			goto err_notavail;
+		freq_calc = clk_get_rate(clk_in);
+		freq_calc +=  499999;
+		freq_calc /= 1000000;
+		freq_calc *= 1000000;
+		if ((freq_calc % 16000000) == 0) {
+			clk_from = CLK_FROM_SYS;
+			clockdiv = freq_calc / 16000000;
+			dev_dbg(&ofdev->dev,
+				"clk fit, sys[%lu] div[%d] freq[%lu]\n",
+				freq_calc, clockdiv, freq_calc / clockdiv);
+		}
+	}
+	if (clk_from == CLK_FROM_AUTO) {
+		/* no spec so far, use the 'ref' clock */
+		dev_dbg(&ofdev->dev, "no clk source spec, trying REF\n");
+		clk_in = devm_clk_get(&ofdev->dev, "ref");
+		if (IS_ERR(clk_in))
+			goto err_notavail;
+		clk_from = CLK_FROM_REF;
+		freq_calc = clk_get_rate(clk_in);
+		dev_dbg(&ofdev->dev,
+			"clk fit, ref[%lu] (no div) freq[%lu]\n",
+			freq_calc, freq_calc);
 	}
 
-	/* Determine the MSCAN device index from the peripheral's
-	 * physical address. Register address offsets against the
-	 * IMMR base are:  0x1300, 0x1380, 0x2300, 0x2380
+	/* select IPS or MCLK as the MSCAN input (returned to the caller),
+	 * setup the MCLK mux source and rate if applicable, apply the
+	 * optionally specified or derived above divider, and determine
+	 * the actual resulting clock rate to return to the caller
 	 */
-	pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
-	BUG_ON(!pval || plen < sizeof(*pval));
-	clockidx = (*pval & 0x80) ? 1 : 0;
-	if (*pval & 0x2000)
-		clockidx += 2;
-
-	/*
-	 * Clock source and divider selection: 3 different clock sources
-	 * can be selected: "ip", "ref" or "sys". For the latter two, a
-	 * clock divider can be defined as well. If the clock source is
-	 * not specified by the device tree, we first try to find an
-	 * optimal CAN source clock based on the system clock. If that
-	 * is not posslible, the reference clock will be used.
-	 */
-	if (clock_name && !strcmp(clock_name, "ip")) {
+	switch (clk_from) {
+	case CLK_FROM_IPS:
+		clk_can = devm_clk_get(&ofdev->dev, "ips");
+		if (IS_ERR(clk_can))
+			goto err_notavail;
+		priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+		priv->clk_can = clk_can;
+		freq_calc = clk_get_rate(clk_can);
 		*mscan_clksrc = MSCAN_CLKSRC_IPS;
-		freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
-	} else {
+		dev_dbg(&ofdev->dev, "clk from IPS, clksrc[%d] freq[%lu]\n",
+			*mscan_clksrc, freq_calc);
+		break;
+	case CLK_FROM_SYS:
+	case CLK_FROM_REF:
+		clk_can = devm_clk_get(&ofdev->dev, "mclk");
+		if (IS_ERR(clk_can))
+			goto err_notavail;
+		priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+		priv->clk_can = clk_can;
+		if (clk_from == CLK_FROM_SYS)
+			clk_in = devm_clk_get(&ofdev->dev, "sys");
+		if (clk_from == CLK_FROM_REF)
+			clk_in = devm_clk_get(&ofdev->dev, "ref");
+		if (IS_ERR(clk_in))
+			goto err_notavail;
+		clk_set_parent(clk_can, clk_in);
+		freq_calc = clk_get_rate(clk_in);
+		freq_calc /= clockdiv;
+		clk_set_rate(clk_can, freq_calc);
+		freq_calc = clk_get_rate(clk_can);
 		*mscan_clksrc = MSCAN_CLKSRC_BUS;
-
-		pval = of_get_property(ofdev->dev.of_node,
-				       "fsl,mscan-clock-divider", &plen);
-		if (pval && plen == sizeof(*pval))
-			clockdiv = *pval;
-		if (!clockdiv)
-			clockdiv = 1;
-
-		if (!clock_name || !strcmp(clock_name, "sys")) {
-			sys_clk = devm_clk_get(&ofdev->dev, "sys_clk");
-			if (IS_ERR(sys_clk)) {
-				dev_err(&ofdev->dev, "couldn't get sys_clk\n");
-				goto exit_unmap;
-			}
-			/* Get and round up/down sys clock rate */
-			sys_freq = 1000000 *
-				((clk_get_rate(sys_clk) + 499999) / 1000000);
-
-			if (!clock_name) {
-				/* A multiple of 16 MHz would be optimal */
-				if ((sys_freq % 16000000) == 0) {
-					clocksrc = 0;
-					clockdiv = sys_freq / 16000000;
-					freq = sys_freq / clockdiv;
-				}
-			} else {
-				clocksrc = 0;
-				freq = sys_freq / clockdiv;
-			}
-		}
-
-		if (clocksrc < 0) {
-			ref_clk = devm_clk_get(&ofdev->dev, "ref_clk");
-			if (IS_ERR(ref_clk)) {
-				dev_err(&ofdev->dev, "couldn't get ref_clk\n");
-				goto exit_unmap;
-			}
-			clocksrc = 1;
-			freq = clk_get_rate(ref_clk) / clockdiv;
-		}
+		dev_dbg(&ofdev->dev, "clk from MCLK, clksrc[%d] freq[%lu]\n",
+			*mscan_clksrc, freq_calc);
+		break;
+	default:
+		goto err_invalid;
 	}
 
-	/* Disable clock */
-	out_be32(&clockctl->mccr[clockidx], 0x0);
-	if (clocksrc >= 0) {
-		/* Set source and divider */
-		val = (clocksrc << 14) | ((clockdiv - 1) << 17);
-		out_be32(&clockctl->mccr[clockidx], val);
-		/* Enable clock */
-		out_be32(&clockctl->mccr[clockidx], val | 0x10000);
-	}
+	/* the above clk_can item is used for the bitrate, access to
+	 * the peripheral's register set needs the clk_ipg item
+	 */
+	clk_ipg = devm_clk_get(&ofdev->dev, "ipg");
+	if (IS_ERR(clk_ipg))
+		goto err_notavail_ipg;
+	if (clk_prepare_enable(clk_ipg))
+		goto err_notavail_ipg;
+	priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+	priv->clk_ipg = clk_ipg;
 
-	/* Enable MSCAN clock domain */
-	val = in_be32(&clockctl->sccr[1]);
-	if (!(val & (1 << 25)))
-		out_be32(&clockctl->sccr[1], val | (1 << 25));
+	/* return the determined clock source rate */
+	return freq_calc;
 
-	dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
-		*mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
-		clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
+err_invalid:
+	dev_err(&ofdev->dev, "invalid clock source specification\n");
+	/* clock source rate could not get determined */
+	return 0;
 
-exit_unmap:
-	iounmap(clockctl);
-exit_put:
-	of_node_put(np_clock);
-	return freq;
+err_notavail:
+	dev_err(&ofdev->dev, "cannot acquire or setup bitrate clock source\n");
+	/* clock source rate could not get determined */
+	return 0;
+
+err_notavail_ipg:
+	dev_err(&ofdev->dev, "cannot acquire or setup register clock\n");
+	/* clock source rate could not get determined */
+	return 0;
+}
+
+static void mpc512x_can_put_clock(struct platform_device *ofdev)
+{
+	struct mscan_priv *priv;
+
+	priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+	if (priv->clk_ipg)
+		clk_disable_unprepare(priv->clk_ipg);
 }
 #else /* !CONFIG_PPC_MPC512x */
 static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
@@ -244,6 +279,7 @@
 {
 	return 0;
 }
+#define mpc512x_can_put_clock NULL
 #endif /* CONFIG_PPC_MPC512x */
 
 static const struct of_device_id mpc5xxx_can_table[];
@@ -385,11 +421,13 @@
 static const struct mpc5xxx_can_data mpc5200_can_data = {
 	.type = MSCAN_TYPE_MPC5200,
 	.get_clock = mpc52xx_can_get_clock,
+	/* .put_clock not applicable */
 };
 
 static const struct mpc5xxx_can_data mpc5121_can_data = {
 	.type = MSCAN_TYPE_MPC5121,
 	.get_clock = mpc512x_can_get_clock,
+	.put_clock = mpc512x_can_put_clock,
 };
 
 static const struct of_device_id mpc5xxx_can_table[] = {
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 6c859bb..e77d110 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -473,6 +473,8 @@
 		return err;
 
 	dev->nchannels = msg.u.cardinfo.nchannels;
+	if (dev->nchannels > MAX_NET_DEVICES)
+		return -EINVAL;
 
 	return 0;
 }
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0a2a5ee..4e94057 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -46,6 +46,7 @@
 #include <linux/if_ether.h>
 #include <linux/can.h>
 #include <linux/can/dev.h>
+#include <linux/can/skb.h>
 #include <linux/slab.h>
 #include <net/rtnetlink.h>
 
@@ -109,25 +110,23 @@
 			stats->rx_packets++;
 			stats->rx_bytes += cfd->len;
 		}
-		kfree_skb(skb);
+		consume_skb(skb);
 		return NETDEV_TX_OK;
 	}
 
 	/* perform standard echo handling for CAN network interfaces */
 
 	if (loop) {
-		struct sock *srcsk = skb->sk;
 
-		skb = skb_share_check(skb, GFP_ATOMIC);
+		skb = can_create_echo_skb(skb);
 		if (!skb)
 			return NETDEV_TX_OK;
 
 		/* receive with packet counting */
-		skb->sk = srcsk;
 		vcan_rx(skb, dev);
 	} else {
 		/* no looped packets => no counting */
-		kfree_skb(skb);
+		consume_skb(skb);
 	}
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 0f4241c..238ccea 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -3294,7 +3294,6 @@
 
 static void __exit vortex_eisa_cleanup(void)
 {
-	struct vortex_private *vp;
 	void __iomem *ioaddr;
 
 #ifdef CONFIG_EISA
@@ -3303,7 +3302,6 @@
 #endif
 
 	if (compaq_net_device) {
-		vp = netdev_priv(compaq_net_device);
 		ioaddr = ioport_map(compaq_net_device->base_addr,
 		                    VORTEX_TOTAL_SIZE);
 
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 811fa5d..30104b6 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -212,7 +212,6 @@
     int neX000, ctron;
 #endif
     static unsigned version_printed;
-    struct ei_device *ei_local = netdev_priv(dev);
 
     if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
 		netdev_info(dev, version);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 0cc2143..511f6ee 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -929,6 +929,9 @@
 }
 
 static const struct of_device_id emac_of_match[] = {
+	{.compatible = "allwinner,sun4i-a10-emac",},
+
+	/* Deprecated */
 	{.compatible = "allwinner,sun4i-emac",},
 	{},
 };
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e92ffd6..2e45f6e 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1292,6 +1292,7 @@
 	alx = netdev_priv(netdev);
 	spin_lock_init(&alx->hw.mdio_lock);
 	spin_lock_init(&alx->irq_lock);
+	spin_lock_init(&alx->stats_lock);
 	alx->dev = netdev;
 	alx->hw.pdev = pdev;
 	alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 9d2deda..cda25ac 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -85,7 +85,7 @@
 
 static int disable_msi = 0;
 
-module_param(disable_msi, int, 0);
+module_param(disable_msi, int, S_IRUGO);
 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 
 typedef enum {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9d7419e..66c0df7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1873,7 +1873,7 @@
 }
 
 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
-		       void *accel_priv)
+		       void *accel_priv, select_queue_fallback_t fallback)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 
@@ -1895,7 +1895,7 @@
 	}
 
 	/* select a non-FCoE queue */
-	return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+	return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
 }
 
 void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 17d1689..a89a40f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -496,7 +496,7 @@
 
 /* select_queue callback */
 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
-		       void *accel_priv);
+		       void *accel_priv, select_queue_fallback_t fallback);
 
 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
 					struct bnx2x_fastpath *fp,
@@ -936,7 +936,7 @@
 	else /* CHIP_IS_E1X */
 		start_params->network_cos_mode = FW_WRR;
 
-	start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+	start_params->gre_tunnel_mode = L2GRE_TUNNEL;
 	start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
 
 	return bnx2x_func_state_change(bp, &func_params);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 92a467f..38fc794 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -358,49 +358,47 @@
 
 	cfg_idx = bnx2x_get_link_cfg_idx(bp);
 	old_multi_phy_config = bp->link_params.multi_phy_config;
-	switch (cmd->port) {
-	case PORT_TP:
-		if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
-			break; /* no port change */
-
-		if (!(bp->port.supported[0] & SUPPORTED_TP ||
-		      bp->port.supported[1] & SUPPORTED_TP)) {
+	if (cmd->port != bnx2x_get_port_type(bp)) {
+		switch (cmd->port) {
+		case PORT_TP:
+			if (!(bp->port.supported[0] & SUPPORTED_TP ||
+			      bp->port.supported[1] & SUPPORTED_TP)) {
+				DP(BNX2X_MSG_ETHTOOL,
+				   "Unsupported port type\n");
+				return -EINVAL;
+			}
+			bp->link_params.multi_phy_config &=
+				~PORT_HW_CFG_PHY_SELECTION_MASK;
+			if (bp->link_params.multi_phy_config &
+			    PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+				bp->link_params.multi_phy_config |=
+				PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+			else
+				bp->link_params.multi_phy_config |=
+				PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+			break;
+		case PORT_FIBRE:
+		case PORT_DA:
+			if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+			      bp->port.supported[1] & SUPPORTED_FIBRE)) {
+				DP(BNX2X_MSG_ETHTOOL,
+				   "Unsupported port type\n");
+				return -EINVAL;
+			}
+			bp->link_params.multi_phy_config &=
+				~PORT_HW_CFG_PHY_SELECTION_MASK;
+			if (bp->link_params.multi_phy_config &
+			    PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+				bp->link_params.multi_phy_config |=
+				PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+			else
+				bp->link_params.multi_phy_config |=
+				PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+			break;
+		default:
 			DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
 			return -EINVAL;
 		}
-		bp->link_params.multi_phy_config &=
-			~PORT_HW_CFG_PHY_SELECTION_MASK;
-		if (bp->link_params.multi_phy_config &
-		    PORT_HW_CFG_PHY_SWAPPED_ENABLED)
-			bp->link_params.multi_phy_config |=
-			PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
-		else
-			bp->link_params.multi_phy_config |=
-			PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
-		break;
-	case PORT_FIBRE:
-	case PORT_DA:
-		if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
-			break; /* no port change */
-
-		if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
-		      bp->port.supported[1] & SUPPORTED_FIBRE)) {
-			DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
-			return -EINVAL;
-		}
-		bp->link_params.multi_phy_config &=
-			~PORT_HW_CFG_PHY_SELECTION_MASK;
-		if (bp->link_params.multi_phy_config &
-		    PORT_HW_CFG_PHY_SWAPPED_ENABLED)
-			bp->link_params.multi_phy_config |=
-			PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
-		else
-			bp->link_params.multi_phy_config |=
-			PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
-		break;
-	default:
-		DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
-		return -EINVAL;
 	}
 	/* Save new config in case command complete successfully */
 	new_multi_phy_config = bp->link_params.multi_phy_config;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e118a3e..7d43822 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -95,29 +95,29 @@
 MODULE_FIRMWARE(FW_FILE_NAME_E2);
 
 int bnx2x_num_queues;
-module_param_named(num_queues, bnx2x_num_queues, int, 0);
+module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
 MODULE_PARM_DESC(num_queues,
 		 " Set number of queues (default is as a number of CPUs)");
 
 static int disable_tpa;
-module_param(disable_tpa, int, 0);
+module_param(disable_tpa, int, S_IRUGO);
 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
 
 static int int_mode;
-module_param(int_mode, int, 0);
+module_param(int_mode, int, S_IRUGO);
 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
 				"(1 INT#x; 2 MSI)");
 
 static int dropless_fc;
-module_param(dropless_fc, int, 0);
+module_param(dropless_fc, int, S_IRUGO);
 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
 
 static int mrrs = -1;
-module_param(mrrs, int, 0);
+module_param(mrrs, int, S_IRUGO);
 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
 
 static int debug;
-module_param(debug, int, 0);
+module_param(debug, int, S_IRUGO);
 MODULE_PARM_DESC(debug, " Default debug msglevel");
 
 struct workqueue_struct *bnx2x_wq;
@@ -13102,9 +13102,9 @@
 
 		if (atomic_read(&pdev->enable_cnt) == 1)
 			pci_release_regions(pdev);
-	}
 
-	pci_disable_device(pdev);
+		pci_disable_device(pdev);
+	}
 }
 
 static void bnx2x_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index aec5ef2..e42f48d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1446,12 +1446,12 @@
 	if (vf->cfg_flags & VF_CFG_INT_SIMD)
 		val |= IGU_VF_CONF_SINGLE_ISR_EN;
 	val &= ~IGU_VF_CONF_PARENT_MASK;
-	val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT;	/* parent PF */
+	val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
 
 	DP(BNX2X_MSG_IOV,
-	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
-	   vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
+	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
+	   vf->abs_vfid, val);
 
 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e2ca03e..3167ed6 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2609,13 +2609,14 @@
 
 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
 
-	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
-		reg32 &= ~0x3000;
-		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
-	} else if (!err)
-		err = -EBUSY;
+	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
+	if (err)
+		return err;
 
-	return err;
+	reg32 &= ~0x3000;
+	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+	return 0;
 }
 
 static void tg3_carrier_off(struct tg3 *tp)
@@ -14113,12 +14114,12 @@
 
 	tg3_netif_stop(tp);
 
+	tg3_set_mtu(dev, tp, new_mtu);
+
 	tg3_full_lock(tp, 1);
 
 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 
-	tg3_set_mtu(dev, tp, new_mtu);
-
 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
 	 * breaks all requests to 256 bytes.
 	 */
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index add05f1..1642de7 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,6 +1939,7 @@
 	pci_iounmap(pdev, tp->base_addr);
 	free_netdev (dev);
 	pci_release_regions (pdev);
+	pci_disable_device(pdev);
 
 	/* pci_power_off (pdev, -1); */
 }
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 4de8cfd..55e0fa0 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -13,6 +13,7 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/etherdevice.h>
+#include <linux/clk.h>
 #include <linux/crc32.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -51,6 +52,7 @@
 #define	ETH_HASH0	0x48
 #define	ETH_HASH1	0x4c
 #define	ETH_TXCTRL	0x50
+#define	ETH_END		0x54
 
 /* mode register */
 #define	MODER_RXEN	(1 <<  0) /* receive enable */
@@ -179,6 +181,7 @@
  * @membase:	pointer to buffer memory region
  * @dma_alloc:	dma allocated buffer size
  * @io_region_size:	I/O memory region size
+ * @num_bd:	number of buffer descriptors
  * @num_tx:	number of send buffers
  * @cur_tx:	last send buffer written
  * @dty_tx:	last buffer actually sent
@@ -199,6 +202,7 @@
 	int dma_alloc;
 	resource_size_t io_region_size;
 
+	unsigned int num_bd;
 	unsigned int num_tx;
 	unsigned int cur_tx;
 	unsigned int dty_tx;
@@ -216,6 +220,7 @@
 
 	struct phy_device *phy;
 	struct mii_bus *mdio;
+	struct clk *clk;
 	s8 phy_id;
 };
 
@@ -688,6 +693,11 @@
 	}
 
 	priv->phy = phy;
+	phy->advertising &= ~(ADVERTISED_1000baseT_Full |
+			      ADVERTISED_1000baseT_Half);
+	phy->supported &= ~(SUPPORTED_1000baseT_Full |
+			    SUPPORTED_1000baseT_Half);
+
 	return 0;
 }
 
@@ -890,6 +900,102 @@
 	return NETDEV_TX_OK;
 }
 
+static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct ethoc *priv = netdev_priv(dev);
+	struct phy_device *phydev = priv->phy;
+
+	if (!phydev)
+		return -EOPNOTSUPP;
+
+	return phy_ethtool_gset(phydev, cmd);
+}
+
+static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct ethoc *priv = netdev_priv(dev);
+	struct phy_device *phydev = priv->phy;
+
+	if (!phydev)
+		return -EOPNOTSUPP;
+
+	return phy_ethtool_sset(phydev, cmd);
+}
+
+static int ethoc_get_regs_len(struct net_device *netdev)
+{
+	return ETH_END;
+}
+
+static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+			   void *p)
+{
+	struct ethoc *priv = netdev_priv(dev);
+	u32 *regs_buff = p;
+	unsigned i;
+
+	regs->version = 0;
+	for (i = 0; i < ETH_END / sizeof(u32); ++i)
+		regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
+}
+
+static void ethoc_get_ringparam(struct net_device *dev,
+				struct ethtool_ringparam *ring)
+{
+	struct ethoc *priv = netdev_priv(dev);
+
+	ring->rx_max_pending = priv->num_bd - 1;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->tx_max_pending = priv->num_bd - 1;
+
+	ring->rx_pending = priv->num_rx;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+	ring->tx_pending = priv->num_tx;
+}
+
+static int ethoc_set_ringparam(struct net_device *dev,
+			       struct ethtool_ringparam *ring)
+{
+	struct ethoc *priv = netdev_priv(dev);
+
+	if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
+	    ring->tx_pending + ring->rx_pending > priv->num_bd)
+		return -EINVAL;
+	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+		return -EINVAL;
+
+	if (netif_running(dev)) {
+		netif_tx_disable(dev);
+		ethoc_disable_rx_and_tx(priv);
+		ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+		synchronize_irq(dev->irq);
+	}
+
+	priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
+	priv->num_rx = ring->rx_pending;
+	ethoc_init_ring(priv, dev->mem_start);
+
+	if (netif_running(dev)) {
+		ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+		ethoc_enable_rx_and_tx(priv);
+		netif_wake_queue(dev);
+	}
+	return 0;
+}
+
+const struct ethtool_ops ethoc_ethtool_ops = {
+	.get_settings = ethoc_get_settings,
+	.set_settings = ethoc_set_settings,
+	.get_regs_len = ethoc_get_regs_len,
+	.get_regs = ethoc_get_regs,
+	.get_link = ethtool_op_get_link,
+	.get_ringparam = ethoc_get_ringparam,
+	.set_ringparam = ethoc_set_ringparam,
+	.get_ts_info = ethtool_op_get_ts_info,
+};
+
 static const struct net_device_ops ethoc_netdev_ops = {
 	.ndo_open = ethoc_open,
 	.ndo_stop = ethoc_stop,
@@ -917,6 +1023,8 @@
 	int num_bd;
 	int ret = 0;
 	bool random_mac = false;
+	struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
 
 	/* allocate networking device */
 	netdev = alloc_etherdev(sizeof(struct ethoc));
@@ -1016,6 +1124,7 @@
 		ret = -ENODEV;
 		goto error;
 	}
+	priv->num_bd = num_bd;
 	/* num_tx must be a power of two */
 	priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
 	priv->num_rx = num_bd - priv->num_tx;
@@ -1030,8 +1139,7 @@
 	}
 
 	/* Allow the platform setup code to pass in a MAC address. */
-	if (dev_get_platdata(&pdev->dev)) {
-		struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	if (pdata) {
 		memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
 		priv->phy_id = pdata->phy_id;
 	} else {
@@ -1069,6 +1177,27 @@
 	if (random_mac)
 		netdev->addr_assign_type = NET_ADDR_RANDOM;
 
+	/* Allow the platform setup code to adjust MII management bus clock. */
+	if (!eth_clkfreq) {
+		struct clk *clk = devm_clk_get(&pdev->dev, NULL);
+
+		if (!IS_ERR(clk)) {
+			priv->clk = clk;
+			clk_prepare_enable(clk);
+			eth_clkfreq = clk_get_rate(clk);
+		}
+	}
+	if (eth_clkfreq) {
+		u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
+
+		if (!clkdiv)
+			clkdiv = 2;
+		dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
+		ethoc_write(priv, MIIMODER,
+			    (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
+			    clkdiv);
+	}
+
 	/* register MII bus */
 	priv->mdio = mdiobus_alloc();
 	if (!priv->mdio) {
@@ -1111,6 +1240,7 @@
 	netdev->netdev_ops = &ethoc_netdev_ops;
 	netdev->watchdog_timeo = ETHOC_TIMEOUT;
 	netdev->features |= 0;
+	netdev->ethtool_ops = &ethoc_ethtool_ops;
 
 	/* setup NAPI */
 	netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
@@ -1133,6 +1263,8 @@
 	kfree(priv->mdio->irq);
 	mdiobus_free(priv->mdio);
 free:
+	if (priv->clk)
+		clk_disable_unprepare(priv->clk);
 	free_netdev(netdev);
 out:
 	return ret;
@@ -1157,6 +1289,8 @@
 			kfree(priv->mdio->irq);
 			mdiobus_free(priv->mdio);
 		}
+		if (priv->clk)
+			clk_disable_unprepare(priv->clk);
 		unregister_netdev(netdev);
 		free_netdev(netdev);
 	}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4782b4..903362a 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1778,8 +1778,6 @@
 	struct fec_enet_private *fep = netdev_priv(ndev);
 	int ret;
 
-	napi_enable(&fep->napi);
-
 	/* I should reset the ring buffers here, but I don't yet know
 	 * a simple way to do that.
 	 */
@@ -1794,6 +1792,8 @@
 		fec_enet_free_buffers(ndev);
 		return ret;
 	}
+
+	napi_enable(&fep->napi);
 	phy_start(fep->phy_dev);
 	netif_start_queue(ndev);
 	fep->opened = 1;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index cbaba44..bf7a01e 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3034,7 +3034,7 @@
 		*enable_wake = false;
 	}
 
-	pci_disable_device(pdev);
+	pci_clear_master(pdev);
 }
 
 static int __e100_power_off(struct pci_dev *pdev, bool wake)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a4b9408..b901371 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4440,9 +4440,10 @@
 	/* Check if APP Table has changed */
 	if (memcmp(&new_cfg->app,
 		   &old_cfg->app,
-		   sizeof(new_cfg->app)))
+		   sizeof(new_cfg->app))) {
 		need_reconfig = true;
 		dev_info(&pf->pdev->dev, "APP Table change detected.\n");
+	}
 
 	return need_reconfig;
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6d4ada7..18076c4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6881,7 +6881,7 @@
 }
 
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
-			      void *accel_priv)
+			      void *accel_priv, select_queue_fallback_t fallback)
 {
 	struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
 #ifdef IXGBE_FCOE
@@ -6907,7 +6907,7 @@
 		if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
 			break;
 	default:
-		return __netdev_pick_tx(dev, skb);
+		return fallback(dev, skb);
 	}
 
 	f = &adapter->ring_feature[RING_F_FCOE];
@@ -6920,7 +6920,7 @@
 
 	return txq + f->offset;
 #else
-	return __netdev_pick_tx(dev, skb);
+	return fallback(dev, skb);
 #endif
 }
 
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 8f9266c..fd4b6ae 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,7 @@
 
 static u16
 ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
-		      void *accel_priv)
+		      void *accel_priv, select_queue_fallback_t fallback)
 {
 	/* we are currently only using the first queue */
 	return 0;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 6300fd2..68e6a66 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -43,12 +43,12 @@
 	  This driver is used by the MV643XX_ETH and MVNETA drivers.
 
 config MVNETA
-	tristate "Marvell Armada 370/XP network interface support"
-	depends on MACH_ARMADA_370_XP
+	tristate "Marvell Armada 370/38x/XP network interface support"
+	depends on PLAT_ORION
 	select MVMDIO
 	---help---
 	  This driver supports the network interface units in the
-	  Marvell ARMADA XP and ARMADA 370 SoC family.
+	  Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
 
 	  Note that this driver is distinct from the mv643xx_eth
 	  driver, which should be used for the older Marvell SoCs
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 6509935..55a37ae 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5020,6 +5020,8 @@
 		}
  	}
 
+	netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
+
 	err = register_netdev(dev);
 	if (err) {
 		dev_err(&pdev->dev, "cannot register net device\n");
@@ -5028,8 +5030,6 @@
 
 	netif_carrier_off(dev);
 
-	netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
-
 	sky2_show_addr(dev);
 
 	if (hw->ports > 1) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8e8a7eb..1345703 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -629,7 +629,7 @@
 }
 
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
-			 void *accel_priv)
+			 void *accel_priv, select_queue_fallback_t fallback)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	u16 rings_p_up = priv->num_tx_rings_p_up;
@@ -641,7 +641,7 @@
 	if (vlan_tx_tag_present(skb))
 		up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
 
-	return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up;
+	return fallback(dev, skb) % rings_p_up + up * rings_p_up;
 }
 
 static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3af04c3..9ca223b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -723,7 +723,7 @@
 
 void mlx4_en_tx_irq(struct mlx4_cq *mcq);
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
-			 void *accel_priv);
+			 void *accel_priv, select_queue_fallback_t fallback);
 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
 
 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 157fe8d..8ff57e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,5 +4,5 @@
 
 config MLX5_CORE
 	tristate
-	depends on PCI && X86
+	depends on PCI
 	default n
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1ded50ca..e46e869 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -726,9 +726,6 @@
 	int vpath_idx = 0;
 	enum vxge_hw_status status = VXGE_HW_OK;
 	struct vxge_vpath *vpath = NULL;
-	struct __vxge_hw_device *hldev;
-
-	hldev = pci_get_drvdata(vdev->pdev);
 
 	mac_address = (u8 *)&mac_addr;
 	memcpy(mac_address, mac_header, ETH_ALEN);
@@ -2443,9 +2440,6 @@
 
 static void vxge_rem_isr(struct vxgedev *vdev)
 {
-	struct __vxge_hw_device *hldev;
-	hldev = pci_get_drvdata(vdev->pdev);
-
 #ifdef CONFIG_PCI_MSI
 	if (vdev->config.intr_type == MSI_X) {
 		vxge_rem_msix_isr(vdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 30874cd..54ebf30 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -683,12 +683,17 @@
 		adapter->ahw->linkup = 0;
 		netif_carrier_off(netdev);
 	} else if (!adapter->ahw->linkup && linkup) {
-		/* Do not advertise Link up if the port is in loopback mode */
-		if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
+		adapter->ahw->linkup = 1;
+
+		/* Do not advertise Link up to the stack if device
+		 * is in loopback mode
+		 */
+		if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
+			netdev_info(netdev, "NIC Link is up for loopback test\n");
 			return;
+		}
 
 		netdev_info(netdev, "NIC Link is up\n");
-		adapter->ahw->linkup = 1;
 		netif_carrier_on(netdev);
 	}
 }
@@ -1150,13 +1155,13 @@
 	u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
 	u32 seq_number;
 
-	if (unlikely(ring > adapter->max_rds_rings))
+	if (unlikely(ring >= adapter->max_rds_rings))
 		return NULL;
 
 	rds_ring = &recv_ctx->rds_rings[ring];
 
 	index = qlcnic_get_lro_sts_refhandle(sts_data0);
-	if (unlikely(index > rds_ring->num_desc))
+	if (unlikely(index >= rds_ring->num_desc))
 		return NULL;
 
 	buffer = &rds_ring->rx_buf_arr[index];
@@ -1662,13 +1667,13 @@
 	u16 vid = 0xffff;
 	int err;
 
-	if (unlikely(ring > adapter->max_rds_rings))
+	if (unlikely(ring >= adapter->max_rds_rings))
 		return NULL;
 
 	rds_ring = &recv_ctx->rds_rings[ring];
 
 	index = qlcnic_83xx_hndl(sts_data[0]);
-	if (unlikely(index > rds_ring->num_desc))
+	if (unlikely(index >= rds_ring->num_desc))
 		return NULL;
 
 	buffer = &rds_ring->rx_buf_arr[index];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1f79d47..ba78c74 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1837,6 +1837,7 @@
 	qlcnic_linkevent_request(adapter, 1);
 
 	adapter->ahw->reset_context = 0;
+	netif_tx_start_all_queues(netdev);
 	return 0;
 }
 
@@ -2704,14 +2705,8 @@
 
 	err = __qlcnic_up(adapter, netdev);
 	if (err)
-		goto err_out;
+		qlcnic_detach(adapter);
 
-	netif_tx_start_all_queues(netdev);
-
-	return 0;
-
-err_out:
-	qlcnic_detach(adapter);
 	return err;
 }
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 17a1ca2..0638c18 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -448,8 +448,7 @@
 	return 0;
 }
 
-static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
-				   struct qlcnic_info *info)
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
 	struct qlcnic_cmd_args cmd;
@@ -495,10 +494,6 @@
 	if (err)
 		return -EIO;
 
-	err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
-	if (err)
-		return err;
-
 	if (qlcnic_83xx_get_port_info(adapter))
 		return -EIO;
 
@@ -555,6 +550,10 @@
 	if (err)
 		goto err_out_send_channel_term;
 
+	err = qlcnic_sriov_get_vf_acl(adapter);
+	if (err)
+		goto err_out_send_channel_term;
+
 	err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
 	if (err)
 		goto err_out_send_channel_term;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c49d1fb..75d11fa 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -429,7 +429,9 @@
 	}
 
 	/* Transfer ownership of the skb to the final buffer */
+#ifdef EFX_USE_PIO
 finish_packet:
+#endif
 	buffer->skb = skb;
 	buffer->flags = EFX_TX_BUF_SKB | dma_flags;
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index e2f202e..f2d7c70 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -37,6 +37,17 @@
 	  stmmac device driver. This driver is used for A20/A31
 	  GMAC 	  ethernet controller.
 
+config DWMAC_STI
+	bool "STi GMAC support"
+	depends on STMMAC_PLATFORM && ARCH_STI
+	default y
+	---help---
+	  Support for ethernet controller on STi SOCs.
+
+	  This selects STi SoC glue layer support for the stmmac
+	  device driver. This driver is used on for the STi series
+	  SOCs GMAC ethernet controller.
+
 config STMMAC_PCI
 	bool "STMMAC PCI bus support"
 	depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index ecadece..dcef287 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,6 +2,7 @@
 stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
 stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
 stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
 stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o	\
 	      chain_mode.o dwmac_lib.o dwmac1000_core.o  dwmac1000_dma.o \
 	      dwmac100_core.o dwmac100_dma.o enh_desc.o  norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
new file mode 100644
index 0000000..552bbc1
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -0,0 +1,330 @@
+/**
+ * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
+ *
+ * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+#include <linux/phy.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+
+/**
+ *			STi GMAC glue logic.
+ *			--------------------
+ *
+ *		 _
+ *		|  \
+ *	--------|0  \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK
+ * phyclk	|    |___________________________________________
+ *		|    |	|			(phyclk-in)
+ *	--------|1  /	|
+ * int-clk	|_ /	|
+ *			|	 _
+ *			|	|  \
+ *			|_______|1  \ ETH_SEL_TX_RETIME_CLK
+ *				|    |___________________________
+ *				|    |		(tx-retime-clk)
+ *			 _______|0  /
+ *			|	|_ /
+ *		 _	|
+ *		|  \	|
+ *	--------|0  \	|
+ * clk_125	|    |__|
+ *		|    |	ETH_SEL_TXCLK_NOT_CLK125
+ *	--------|1  /
+ * txclk	|_ /
+ *
+ *
+ * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can
+ * generate 50MHz clock or MAC can generate it.
+ * This bit is configured by "st,ext-phyclk" property.
+ *
+ * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz
+ * clock either comes from clk-125 pin or txclk pin. This configuration is
+ * totally driven by the board wiring. This bit is configured by
+ * "st,tx-retime-src" property.
+ *
+ * TXCLK configuration is different for different phy interface modes
+ * and changes according to link speed in modes like RGMII.
+ *
+ * Below table summarizes the clock requirement and clock sources for
+ * supported phy interface modes with link speeds.
+ * ________________________________________________
+ *|  PHY_MODE	| 1000 Mbit Link | 100 Mbit Link   |
+ * ------------------------------------------------
+ *|	MII	|	n/a	 |	25Mhz	   |
+ *|		|		 |	txclk	   |
+ * ------------------------------------------------
+ *|	GMII	|     125Mhz	 |	25Mhz	   |
+ *|		|  clk-125/txclk |	txclk	   |
+ * ------------------------------------------------
+ *|	RGMII	|     125Mhz	 |	25Mhz	   |
+ *|		|  clk-125/txclk |	clkgen     |
+ * ------------------------------------------------
+ *|	RMII	|	n/a	 |	25Mhz	   |
+ *|		|		 |clkgen/phyclk-in |
+ * ------------------------------------------------
+ *
+ * TX lines are always retimed with a clk, which can vary depending
+ * on the board configuration. Below is the table of these bits
+ * in eth configuration register depending on source of retime clk.
+ *
+ *---------------------------------------------------------------
+ * src	 | tx_rt_clk	| int_not_ext_phyclk	| txclk_n_clk125|
+ *---------------------------------------------------------------
+ * txclk |	0	|	n/a		|	1	|
+ *---------------------------------------------------------------
+ * ck_125|	0	|	n/a		|	0	|
+ *---------------------------------------------------------------
+ * phyclk|	1	|	0		|	n/a	|
+ *---------------------------------------------------------------
+ * clkgen|	1	|	1		|	n/a	|
+ *---------------------------------------------------------------
+ */
+
+ /* Register definition */
+
+ /* 3 bits [8:6]
+  *  [6:6]      ETH_SEL_TXCLK_NOT_CLK125
+  *  [7:7]      ETH_SEL_INTERNAL_NOTEXT_PHYCLK
+  *  [8:8]      ETH_SEL_TX_RETIME_CLK
+  *
+  */
+
+#define TX_RETIME_SRC_MASK		GENMASK(8, 6)
+#define ETH_SEL_TX_RETIME_CLK		BIT(8)
+#define ETH_SEL_INTERNAL_NOTEXT_PHYCLK	BIT(7)
+#define ETH_SEL_TXCLK_NOT_CLK125	BIT(6)
+
+#define ENMII_MASK			GENMASK(5, 5)
+#define ENMII				BIT(5)
+
+/**
+ * 3 bits [4:2]
+ *	000-GMII/MII
+ *	001-RGMII
+ *	010-SGMII
+ *	100-RMII
+*/
+#define MII_PHY_SEL_MASK		GENMASK(4, 2)
+#define ETH_PHY_SEL_RMII		BIT(4)
+#define ETH_PHY_SEL_SGMII		BIT(3)
+#define ETH_PHY_SEL_RGMII		BIT(2)
+#define ETH_PHY_SEL_GMII		0x0
+#define ETH_PHY_SEL_MII			0x0
+
+#define IS_PHY_IF_MODE_RGMII(iface)	(iface == PHY_INTERFACE_MODE_RGMII || \
+			iface == PHY_INTERFACE_MODE_RGMII_ID || \
+			iface == PHY_INTERFACE_MODE_RGMII_RXID || \
+			iface == PHY_INTERFACE_MODE_RGMII_TXID)
+
+#define IS_PHY_IF_MODE_GBIT(iface)	(IS_PHY_IF_MODE_RGMII(iface) || \
+			iface == PHY_INTERFACE_MODE_GMII)
+
+struct sti_dwmac {
+	int interface;
+	bool ext_phyclk;
+	bool is_tx_retime_src_clk_125;
+	struct clk *clk;
+	int reg;
+	struct device *dev;
+	struct regmap *regmap;
+};
+
+static u32 phy_intf_sels[] = {
+	[PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
+	[PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
+	[PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
+	[PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
+	[PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
+	[PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
+};
+
+enum {
+	TX_RETIME_SRC_NA = 0,
+	TX_RETIME_SRC_TXCLK = 1,
+	TX_RETIME_SRC_CLK_125,
+	TX_RETIME_SRC_PHYCLK,
+	TX_RETIME_SRC_CLKGEN,
+};
+
+static const char *const tx_retime_srcs[] = {
+	[TX_RETIME_SRC_NA] = "",
+	[TX_RETIME_SRC_TXCLK] = "txclk",
+	[TX_RETIME_SRC_CLK_125] = "clk_125",
+	[TX_RETIME_SRC_PHYCLK] = "phyclk",
+	[TX_RETIME_SRC_CLKGEN] = "clkgen",
+};
+
+static u32 tx_retime_val[] = {
+	[TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125,
+	[TX_RETIME_SRC_CLK_125] = 0x0,
+	[TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK,
+	[TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK |
+	    ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
+};
+
+static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd)
+{
+	u32 src = 0, freq = 0;
+
+	if (spd == SPEED_100) {
+		if (dwmac->interface == PHY_INTERFACE_MODE_MII ||
+		    dwmac->interface == PHY_INTERFACE_MODE_GMII) {
+			src = TX_RETIME_SRC_TXCLK;
+		} else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+			if (dwmac->ext_phyclk) {
+				src = TX_RETIME_SRC_PHYCLK;
+			} else {
+				src = TX_RETIME_SRC_CLKGEN;
+				freq = 50000000;
+			}
+
+		} else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+			src = TX_RETIME_SRC_CLKGEN;
+			freq = 25000000;
+		}
+
+		if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk)
+			clk_set_rate(dwmac->clk, freq);
+
+	} else if (spd == SPEED_1000) {
+		if (dwmac->is_tx_retime_src_clk_125)
+			src = TX_RETIME_SRC_CLK_125;
+		else
+			src = TX_RETIME_SRC_TXCLK;
+	}
+
+	regmap_update_bits(dwmac->regmap, dwmac->reg,
+			   TX_RETIME_SRC_MASK, tx_retime_val[src]);
+}
+
+static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+	struct sti_dwmac *dwmac = priv;
+
+	if (dwmac->clk)
+		clk_disable_unprepare(dwmac->clk);
+}
+
+static void sti_fix_mac_speed(void *priv, unsigned int spd)
+{
+	struct sti_dwmac *dwmac = priv;
+
+	setup_retime_src(dwmac, spd);
+
+	return;
+}
+
+static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
+				struct platform_device *pdev)
+{
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct regmap *regmap;
+	int err;
+
+	if (!np)
+		return -EINVAL;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
+	if (!res)
+		return -ENODATA;
+
+	regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	dwmac->dev = dev;
+	dwmac->interface = of_get_phy_mode(np);
+	dwmac->regmap = regmap;
+	dwmac->reg = res->start;
+	dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
+	dwmac->is_tx_retime_src_clk_125 = false;
+
+	if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
+		const char *rs;
+
+		err = of_property_read_string(np, "st,tx-retime-src", &rs);
+		if (err < 0) {
+			dev_err(dev, "st,tx-retime-src not specified\n");
+			return err;
+		}
+
+		if (!strcasecmp(rs, "clk_125"))
+			dwmac->is_tx_retime_src_clk_125 = true;
+	}
+
+	dwmac->clk = devm_clk_get(dev, "sti-ethclk");
+
+	if (IS_ERR(dwmac->clk))
+		dwmac->clk = NULL;
+
+	return 0;
+}
+
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
+{
+	struct sti_dwmac *dwmac = priv;
+	struct regmap *regmap = dwmac->regmap;
+	int iface = dwmac->interface;
+	u32 reg = dwmac->reg;
+	u32 val, spd;
+
+	if (dwmac->clk)
+		clk_prepare_enable(dwmac->clk);
+
+	regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
+
+	val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
+	regmap_update_bits(regmap, reg, ENMII_MASK, val);
+
+	if (IS_PHY_IF_MODE_GBIT(iface))
+		spd = SPEED_1000;
+	else
+		spd = SPEED_100;
+
+	setup_retime_src(dwmac, spd);
+
+	return 0;
+}
+
+static void *sti_dwmac_setup(struct platform_device *pdev)
+{
+	struct sti_dwmac *dwmac;
+	int ret;
+
+	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+	if (!dwmac)
+		return ERR_PTR(-ENOMEM);
+
+	ret = sti_dwmac_parse_data(dwmac, pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to parse OF data\n");
+		return ERR_PTR(ret);
+	}
+
+	return dwmac;
+}
+
+const struct stmmac_of_data sti_gmac_data = {
+	.fix_mac_speed = sti_fix_mac_speed,
+	.setup = sti_dwmac_setup,
+	.init = sti_dwmac_init,
+	.exit = sti_dwmac_exit,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index d9af26e..f9e60d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -133,6 +133,9 @@
 #ifdef CONFIG_DWMAC_SUNXI
 extern const struct stmmac_of_data sun7i_gmac_data;
 #endif
+#ifdef CONFIG_DWMAC_STI
+extern const struct stmmac_of_data sti_gmac_data;
+#endif
 extern struct platform_driver stmmac_pltfr_driver;
 static inline int stmmac_register_platform(void)
 {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d93aa87..a2e7d2c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1524,9 +1524,9 @@
 					     priv->dev->dev_addr, 0);
 		if (!is_valid_ether_addr(priv->dev->dev_addr))
 			eth_hw_addr_random(priv->dev);
+		pr_info("%s: device MAC address %pM\n", priv->dev->name,
+			priv->dev->dev_addr);
 	}
-	pr_warn("%s: device MAC address %pM\n", priv->dev->name,
-		priv->dev->dev_addr);
 }
 
 /**
@@ -1635,7 +1635,7 @@
 	stmmac_mmc_setup(priv);
 
 	ret = stmmac_init_ptp(priv);
-	if (ret)
+	if (ret && ret != -EOPNOTSUPP)
 		pr_warn("%s: failed PTP initialisation\n", __func__);
 
 #ifdef CONFIG_STMMAC_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5884a7d..c61bc72b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -33,6 +33,11 @@
 #ifdef CONFIG_DWMAC_SUNXI
 	{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
 #endif
+#ifdef CONFIG_DWMAC_STI
+	{ .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
+	{ .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
+	{ .compatible = "st,stih127-dwmac", .data = &sti_gmac_data},
+#endif
 	/* SoC specific glue layers should come before generic bindings */
 	{ .compatible = "st,spear600-gmac"},
 	{ .compatible = "snps,dwmac-3.610"},
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index bde63e3..651087b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -554,7 +554,7 @@
 		 * common for both the interface as the interface shares
 		 * the same hardware resource.
 		 */
-		for (i = 0; i <= priv->data.slaves; i++)
+		for (i = 0; i < priv->data.slaves; i++)
 			if (priv->slaves[i].ndev->flags & IFF_PROMISC)
 				flag = true;
 
@@ -578,7 +578,7 @@
 			unsigned long timeout = jiffies + HZ;
 
 			/* Disable Learn for all ports */
-			for (i = 0; i <= priv->data.slaves; i++) {
+			for (i = 0; i < priv->data.slaves; i++) {
 				cpsw_ale_control_set(ale, i,
 						     ALE_PORT_NOLEARN, 1);
 				cpsw_ale_control_set(ale, i,
@@ -606,7 +606,7 @@
 			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
 
 			/* Enable Learn for all ports */
-			for (i = 0; i <= priv->data.slaves; i++) {
+			for (i = 0; i < priv->data.slaves; i++) {
 				cpsw_ale_control_set(ale, i,
 						     ALE_PORT_NOLEARN, 0);
 				cpsw_ale_control_set(ale, i,
@@ -1878,14 +1878,29 @@
 		mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
 		phyid = be32_to_cpup(parp+1);
 		mdio = of_find_device_by_node(mdio_node);
-		snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-			 PHY_ID_FMT, mdio->name, phyid);
+
+		if (strncmp(mdio->name, "gpio", 4) == 0) {
+			/* GPIO bitbang MDIO driver attached */
+			struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
+
+			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+				 PHY_ID_FMT, bus->id, phyid);
+		} else {
+			/* davinci MDIO driver attached */
+			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+				 PHY_ID_FMT, mdio->name, phyid);
+		}
 
 		mac_addr = of_get_mac_address(slave_node);
 		if (mac_addr)
 			memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
 
 		slave_data->phy_if = of_get_phy_mode(slave_node);
+		if (slave_data->phy_if < 0) {
+			pr_err("Missing or malformed slave[%d] phy-mode property\n",
+			       i);
+			return slave_data->phy_if;
+		}
 
 		if (data->dual_emac) {
 			if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 023237a..17503da 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2071,7 +2071,7 @@
 
 /* Return subqueue id on this core (one per core). */
 static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
-				 void *accel_priv)
+				 void *accel_priv, select_queue_fallback_t fallback)
 {
 	return smp_processor_id();
 }
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 1ec65fe..4bfdf8c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -26,6 +26,7 @@
 #include <linux/netdevice.h>
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
+#include <linux/of_irq.h>
 #include <linux/of_address.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
@@ -600,7 +601,8 @@
 		size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 		packets++;
 
-		lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
+		++lp->tx_bd_ci;
+		lp->tx_bd_ci %= TX_BD_NUM;
 		cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 		status = cur_p->status;
 	}
@@ -686,7 +688,8 @@
 				     skb_headlen(skb), DMA_TO_DEVICE);
 
 	for (ii = 0; ii < num_frag; ii++) {
-		lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+		++lp->tx_bd_tail;
+		lp->tx_bd_tail %= TX_BD_NUM;
 		cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 		frag = &skb_shinfo(skb)->frags[ii];
 		cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -702,7 +705,8 @@
 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
 	/* Start the transfer */
 	axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
-	lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+	++lp->tx_bd_tail;
+	lp->tx_bd_tail %= TX_BD_NUM;
 
 	return NETDEV_TX_OK;
 }
@@ -774,7 +778,8 @@
 		cur_p->status = 0;
 		cur_p->sw_id_offset = (u32) new_skb;
 
-		lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
+		++lp->rx_bd_ci;
+		lp->rx_bd_ci %= RX_BD_NUM;
 		cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 	}
 
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index a26eecb..7b594ce 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -462,7 +462,7 @@
 
 #define NETVSC_MTU 65536
 
-#define NETVSC_RECEIVE_BUFFER_SIZE		(1024*1024*2)	/* 2MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE		(1024*1024*16)	/* 16MB */
 
 #define NETVSC_RECEIVE_BUFFER_ID		0xcafe
 
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 93b485b..03a2c6e 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -136,8 +136,7 @@
 
 	if (net_device->recv_buf) {
 		/* Free up the receive buffer */
-		free_pages((unsigned long)net_device->recv_buf,
-			get_order(net_device->recv_buf_size));
+		vfree(net_device->recv_buf);
 		net_device->recv_buf = NULL;
 	}
 
@@ -163,9 +162,7 @@
 		return -ENODEV;
 	ndev = net_device->ndev;
 
-	net_device->recv_buf =
-		(void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
-				get_order(net_device->recv_buf_size));
+	net_device->recv_buf = vzalloc(net_device->recv_buf_size);
 	if (!net_device->recv_buf) {
 		netdev_err(ndev, "unable to allocate receive "
 			"buffer of size %d\n", net_device->recv_buf_size);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7756118..7141a19 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -88,8 +88,12 @@
 {
 	struct net_device_context *net_device_ctx = netdev_priv(net);
 	struct hv_device *device_obj = net_device_ctx->device_ctx;
+	struct netvsc_device *nvdev;
+	struct rndis_device *rdev;
 	int ret = 0;
 
+	netif_carrier_off(net);
+
 	/* Open up the device */
 	ret = rndis_filter_open(device_obj);
 	if (ret != 0) {
@@ -99,6 +103,11 @@
 
 	netif_start_queue(net);
 
+	nvdev = hv_get_drvdata(device_obj);
+	rdev = nvdev->extension;
+	if (!rdev->link_state)
+		netif_carrier_on(net);
+
 	return ret;
 }
 
@@ -229,23 +238,24 @@
 	struct net_device *net;
 	struct net_device_context *ndev_ctx;
 	struct netvsc_device *net_device;
+	struct rndis_device *rdev;
 
 	net_device = hv_get_drvdata(device_obj);
+	rdev = net_device->extension;
+
+	rdev->link_state = status != 1;
+
 	net = net_device->ndev;
 
-	if (!net) {
-		netdev_err(net, "got link status but net device "
-				"not initialized yet\n");
+	if (!net || net->reg_state != NETREG_REGISTERED)
 		return;
-	}
 
+	ndev_ctx = netdev_priv(net);
 	if (status == 1) {
-		netif_carrier_on(net);
-		ndev_ctx = netdev_priv(net);
 		schedule_delayed_work(&ndev_ctx->dwork, 0);
 		schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
 	} else {
-		netif_carrier_off(net);
+		schedule_delayed_work(&ndev_ctx->dwork, 0);
 	}
 }
 
@@ -388,17 +398,35 @@
  * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
  * another netif_notify_peers() into a delayed work, otherwise GARP packet
  * will not be sent after quick migration, and cause network disconnection.
+ * Also, we update the carrier status here.
  */
-static void netvsc_send_garp(struct work_struct *w)
+static void netvsc_link_change(struct work_struct *w)
 {
 	struct net_device_context *ndev_ctx;
 	struct net_device *net;
 	struct netvsc_device *net_device;
+	struct rndis_device *rdev;
+	bool notify;
+
+	rtnl_lock();
 
 	ndev_ctx = container_of(w, struct net_device_context, dwork.work);
 	net_device = hv_get_drvdata(ndev_ctx->device_ctx);
+	rdev = net_device->extension;
 	net = net_device->ndev;
-	netdev_notify_peers(net);
+
+	if (rdev->link_state) {
+		netif_carrier_off(net);
+		notify = false;
+	} else {
+		netif_carrier_on(net);
+		notify = true;
+	}
+
+	rtnl_unlock();
+
+	if (notify)
+		netdev_notify_peers(net);
 }
 
 
@@ -414,13 +442,10 @@
 	if (!net)
 		return -ENOMEM;
 
-	/* Set initial state */
-	netif_carrier_off(net);
-
 	net_device_ctx = netdev_priv(net);
 	net_device_ctx->device_ctx = dev;
 	hv_set_drvdata(dev, net);
-	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
+	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
 	INIT_WORK(&net_device_ctx->work, do_set_multicast);
 
 	net->netdev_ops = &device_ops;
@@ -443,8 +468,6 @@
 	}
 	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
-	netif_carrier_on(net);
-
 	ret = register_netdev(net);
 	if (ret != 0) {
 		pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 2dc82f1..3da44d5 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -210,13 +210,6 @@
 	  To compile it as a module, choose M here: the module will be called
 	  kingsun-sir.
 
-config EP7211_DONGLE
-	tristate "Cirrus Logic clps711x I/R support"
-	depends on IRTTY_SIR && ARCH_CLPS711X && IRDA
-	help
-	  Say Y here if you want to build support for the Cirrus logic
-	  EP7211 chipset's infrared module.
-
 config KSDAZZLE_DONGLE
 	tristate "KingSun Dazzle IrDA-USB dongle"
 	depends on IRDA && USB
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index dfc6453..be8ab5b 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -35,7 +35,6 @@
 obj-$(CONFIG_ACT200L_DONGLE)	+= act200l-sir.o
 obj-$(CONFIG_MA600_DONGLE)	+= ma600-sir.o
 obj-$(CONFIG_TOIM3232_DONGLE)	+= toim3232-sir.o
-obj-$(CONFIG_EP7211_DONGLE)	+= ep7211-sir.o
 obj-$(CONFIG_KINGSUN_DONGLE)	+= kingsun-sir.o
 obj-$(CONFIG_KSDAZZLE_DONGLE)	+= ksdazzle-sir.o
 obj-$(CONFIG_KS959_DONGLE)    	+= ks959-sir.o
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
deleted file mode 100644
index 5fe1f4d..0000000
--- a/drivers/net/irda/ep7211-sir.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * IR port driver for the Cirrus Logic CLPS711X processors
- *
- * Copyright 2001, Blue Mug Inc.  All rights reserved.
- * Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <mach/hardware.h>
-
-#include "sir-dev.h"
-
-static int clps711x_dongle_open(struct sir_dev *dev)
-{
-	unsigned int syscon;
-
-	/* Turn on the SIR encoder. */
-	syscon = clps_readl(SYSCON1);
-	syscon |= SYSCON1_SIREN;
-	clps_writel(syscon, SYSCON1);
-
-	return 0;
-}
-
-static int clps711x_dongle_close(struct sir_dev *dev)
-{
-	unsigned int syscon;
-
-	/* Turn off the SIR encoder. */
-	syscon = clps_readl(SYSCON1);
-	syscon &= ~SYSCON1_SIREN;
-	clps_writel(syscon, SYSCON1);
-
-	return 0;
-}
-
-static struct dongle_driver clps711x_dongle = {
-	.owner		= THIS_MODULE,
-	.driver_name	= "EP7211 IR driver",
-	.type		= IRDA_EP7211_DONGLE,
-	.open		= clps711x_dongle_open,
-	.close		= clps711x_dongle_close,
-};
-
-static int clps711x_sir_probe(struct platform_device *pdev)
-{
-	return irda_register_dongle(&clps711x_dongle);
-}
-
-static int clps711x_sir_remove(struct platform_device *pdev)
-{
-	return irda_unregister_dongle(&clps711x_dongle);
-}
-
-static struct platform_driver clps711x_sir_driver = {
-	.driver	= {
-		.name	= "sir-clps711x",
-		.owner	= THIS_MODULE,
-	},
-	.probe	= clps711x_sir_probe,
-	.remove	= clps711x_sir_remove,
-};
-module_platform_driver(clps711x_sir_driver);
-
-MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
-MODULE_DESCRIPTION("EP7211 IR dongle driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 177441a..24b6ddd 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -522,7 +522,6 @@
 	sirdev_put_instance(priv->dev);
 
 	/* Stop tty */
-	irtty_stop_receiver(tty, TRUE);
 	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
 	if (tty->ops->stop)
 		tty->ops->stop(tty);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8433de4..a5d2189 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -879,14 +879,15 @@
 	dev->priv_flags |= IFF_MACVLAN;
 	err = netdev_upper_dev_link(lowerdev, dev);
 	if (err)
-		goto destroy_port;
-
+		goto unregister_netdev;
 
 	list_add_tail_rcu(&vlan->list, &port->vlans);
 	netif_stacked_transfer_operstate(lowerdev, dev);
 
 	return 0;
 
+unregister_netdev:
+	unregister_netdevice(dev);
 destroy_port:
 	port->count -= 1;
 	if (!port->count)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 547725f..98e7cbf 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -437,7 +437,10 @@
 		if (on) {
 			gpio_num = gpio_tab[EXTTS0_GPIO + index];
 			evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
-			evnt |= EVNT_RISE;
+			if (rq->extts.flags & PTP_FALLING_EDGE)
+				evnt |= EVNT_FALL;
+			else
+				evnt |= EVNT_RISE;
 		}
 		ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
 		return 0;
@@ -1003,11 +1006,6 @@
 	} else
 		list_add_tail(&dp83640->list, &clock->phylist);
 
-	if (clock->chosen && !list_empty(&clock->phylist))
-		recalibrate(clock);
-	else
-		enable_broadcast(dp83640->phydev, clock->page, 1);
-
 	dp83640_clock_put(clock);
 	return 0;
 
@@ -1058,6 +1056,21 @@
 	kfree(dp83640);
 }
 
+static int dp83640_config_init(struct phy_device *phydev)
+{
+	struct dp83640_private *dp83640 = phydev->priv;
+	struct dp83640_clock *clock = dp83640->clock;
+
+	if (clock->chosen && !list_empty(&clock->phylist))
+		recalibrate(clock);
+	else
+		enable_broadcast(phydev, clock->page, 1);
+
+	enable_status_frames(phydev, true);
+	ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
+	return 0;
+}
+
 static int dp83640_ack_interrupt(struct phy_device *phydev)
 {
 	int err = phy_read(phydev, MII_DP83640_MISR);
@@ -1195,11 +1208,6 @@
 
 	mutex_lock(&dp83640->clock->extreg_lock);
 
-	if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
-		enable_status_frames(phydev, true);
-		ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
-	}
-
 	ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
 	ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
 
@@ -1281,6 +1289,7 @@
 		}
 		/* fall through */
 	case HWTSTAMP_TX_ON:
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 		skb_queue_tail(&dp83640->tx_queue, skb);
 		schedule_work(&dp83640->ts_work);
 		break;
@@ -1330,6 +1339,7 @@
 	.flags		= PHY_HAS_INTERRUPT,
 	.probe		= dp83640_probe,
 	.remove		= dp83640_remove,
+	.config_init	= dp83640_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
 	.ack_interrupt  = dp83640_ack_interrupt,
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index bb88bc7..9367acc 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -170,6 +170,9 @@
 }
 
 static const struct of_device_id sun4i_mdio_dt_ids[] = {
+	{ .compatible = "allwinner,sun4i-a10-mdio" },
+
+	/* Deprecated */
 	{ .compatible = "allwinner,sun4i-mdio" },
 	{ }
 };
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 930694d..71e4900 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -150,6 +150,7 @@
 	err = device_register(&bus->dev);
 	if (err) {
 		pr_err("mii_bus %s failed to register\n", bus->id);
+		put_device(&bus->dev);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4b03e63..82514e7 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -719,7 +719,7 @@
 static int genphy_config_advert(struct phy_device *phydev)
 {
 	u32 advertise;
-	int oldadv, adv;
+	int oldadv, adv, bmsr;
 	int err, changed = 0;
 
 	/* Only allow advertising what this PHY supports */
@@ -744,26 +744,36 @@
 		changed = 1;
 	}
 
+	bmsr = phy_read(phydev, MII_BMSR);
+	if (bmsr < 0)
+		return bmsr;
+
+	/* Per 802.3-2008, Section 22.2.4.2.16 Extended status all
+	 * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a
+	 * logical 1.
+	 */
+	if (!(bmsr & BMSR_ESTATEN))
+		return changed;
+
 	/* Configure gigabit if it's supported */
+	adv = phy_read(phydev, MII_CTRL1000);
+	if (adv < 0)
+		return adv;
+
+	oldadv = adv;
+	adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
 	if (phydev->supported & (SUPPORTED_1000baseT_Half |
 				 SUPPORTED_1000baseT_Full)) {
-		adv = phy_read(phydev, MII_CTRL1000);
-		if (adv < 0)
-			return adv;
-
-		oldadv = adv;
-		adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 		adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
-
-		if (adv != oldadv) {
-			err = phy_write(phydev, MII_CTRL1000, adv);
-
-			if (err < 0)
-				return err;
+		if (adv != oldadv)
 			changed = 1;
-		}
 	}
 
+	err = phy_write(phydev, MII_CTRL1000, adv);
+	if (err < 0)
+		return err;
+
 	return changed;
 }
 
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 2840742..c8624a8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1648,7 +1648,7 @@
 }
 
 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
-			     void *accel_priv)
+			     void *accel_priv, select_queue_fallback_t fallback)
 {
 	/*
 	 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bcf01af..8fe9cb7 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -69,6 +69,7 @@
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
 #include <net/sock.h>
+#include <linux/seq_file.h>
 
 #include <asm/uaccess.h>
 
@@ -365,7 +366,7 @@
  * hope the rxq no. may help here.
  */
 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
-			    void *accel_priv)
+			    void *accel_priv, select_queue_fallback_t fallback)
 {
 	struct tun_struct *tun = netdev_priv(dev);
 	struct tun_flow_entry *e;
@@ -2228,6 +2229,27 @@
 	return 0;
 }
 
+#ifdef CONFIG_PROC_FS
+static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
+{
+	struct tun_struct *tun;
+	struct ifreq ifr;
+
+	memset(&ifr, 0, sizeof(ifr));
+
+	rtnl_lock();
+	tun = tun_get(f);
+	if (tun)
+		tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+	rtnl_unlock();
+
+	if (tun)
+		tun_put(tun);
+
+	return seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
+}
+#endif
+
 static const struct file_operations tun_fops = {
 	.owner	= THIS_MODULE,
 	.llseek = no_llseek,
@@ -2242,7 +2264,10 @@
 #endif
 	.open	= tun_chr_open,
 	.release = tun_chr_close,
-	.fasync = tun_chr_fasync
+	.fasync = tun_chr_fasync,
+#ifdef CONFIG_PROC_FS
+	.show_fdinfo = tun_chr_show_fdinfo,
+#endif
 };
 
 static struct miscdevice tun_miscdev = {
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 6b638a0..7e7269f 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -292,6 +292,21 @@
 	  This option adds support for CoreChip-sz SR9700 based USB 1.1
 	  10/100 Ethernet adapters.
 
+config USB_NET_SR9800
+	tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices"
+	depends on USB_USBNET
+	select CRC32
+	---help---
+	  Say Y if you want to use one of the following 100Mbps USB Ethernet
+	  device based on the CoreChip-sz SR9800 chip.
+
+	  This driver makes the adapter appear as a normal Ethernet interface,
+	  typically on eth0, if it is the only ethernet device, or perhaps on
+	  eth1, if you have a PCI or ISA ethernet card installed.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sr9800.
+
 config USB_NET_SMSC75XX
 	tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
 	depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b17b5e8..433f0a0 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -15,6 +15,7 @@
 obj-$(CONFIG_USB_NET_CDC_EEM)	+= cdc_eem.o
 obj-$(CONFIG_USB_NET_DM9601)	+= dm9601.o
 obj-$(CONFIG_USB_NET_SR9700)	+= sr9700.o
+obj-$(CONFIG_USB_NET_SR9800)	+= sr9800.o
 obj-$(CONFIG_USB_NET_SMSC75XX)	+= smsc75xx.o
 obj-$(CONFIG_USB_NET_SMSC95XX)	+= smsc95xx.o
 obj-$(CONFIG_USB_NET_GL620A)	+= gl620a.o
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 9765a7d..5d19409 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -917,7 +917,8 @@
 	.status = asix_status,
 	.link_reset = ax88178_link_reset,
 	.reset = ax88178_reset,
-	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
+	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+		 FLAG_MULTI_PACKET,
 	.rx_fixup = asix_rx_fixup_common,
 	.tx_fixup = asix_tx_fixup,
 };
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d6f64da..955df81 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1118,6 +1118,10 @@
 	u16 hdr_off;
 	u32 *pkt_hdr;
 
+	/* This check is no longer done by usbnet */
+	if (skb->len < dev->net->hard_header_len)
+		return 0;
+
 	skb_trim(skb, skb->len - 4);
 	memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
 	le32_to_cpus(&rx_hdr);
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index e4a8a93..1cc24e6 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -84,6 +84,10 @@
 	u32			size;
 	u32			count;
 
+	/* This check is no longer done by usbnet */
+	if (skb->len < dev->net->hard_header_len)
+		return 0;
+
 	header = (struct gl_header *) skb->data;
 
 	// get the packet count of the received skb
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1a48234..660bd5e 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1201,16 +1201,18 @@
 	struct hso_serial *serial = urb->context;
 	int status = urb->status;
 
+	D4("\n--- Got serial_read_bulk callback %02x ---", status);
+
 	/* sanity check */
 	if (!serial) {
 		D1("serial == NULL");
 		return;
-	} else if (status) {
+	}
+	if (status) {
 		handle_usb_error(status, __func__, serial->parent);
 		return;
 	}
 
-	D4("\n--- Got serial_read_bulk callback %02x ---", status);
 	D1("Actual length = %d\n", urb->actual_length);
 	DUMP1(urb->transfer_buffer, urb->actual_length);
 
@@ -1218,25 +1220,13 @@
 	if (serial->port.count == 0)
 		return;
 
-	if (status == 0) {
-		if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
-			fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
-		/* Valid data, handle RX data */
-		spin_lock(&serial->serial_lock);
-		serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
-		put_rxbuf_data_and_resubmit_bulk_urb(serial);
-		spin_unlock(&serial->serial_lock);
-	} else if (status == -ENOENT || status == -ECONNRESET) {
-		/* Unlinked - check for throttled port. */
-		D2("Port %d, successfully unlinked urb", serial->minor);
-		spin_lock(&serial->serial_lock);
-		serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
-		hso_resubmit_rx_bulk_urb(serial, urb);
-		spin_unlock(&serial->serial_lock);
-	} else {
-		D2("Port %d, status = %d for read urb", serial->minor, status);
-		return;
-	}
+	if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
+		fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
+	/* Valid data, handle RX data */
+	spin_lock(&serial->serial_lock);
+	serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
+	put_rxbuf_data_and_resubmit_bulk_urb(serial);
+	spin_unlock(&serial->serial_lock);
 }
 
 /*
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index a305a7b..82d844a 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -526,8 +526,9 @@
 {
 	u8 status;
 
-	if (skb->len == 0) {
-		dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
+	/* This check is no longer done by usbnet */
+	if (skb->len < dev->net->hard_header_len) {
+		dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
 		return 0;
 	}
 
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 0a85d92..4cbdb13 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -364,6 +364,10 @@
 	struct nc_trailer	*trailer;
 	u16			hdr_len, packet_len;
 
+	/* This check is no longer done by usbnet */
+	if (skb->len < dev->net->hard_header_len)
+		return 0;
+
 	if (!(skb->len & 0x01)) {
 		netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
 			   skb->len, dev->net->hard_header_len, dev->hard_mtu,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23bdd5b..313cb6c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -80,10 +80,10 @@
 {
 	__be16 proto;
 
-	/* usbnet rx_complete guarantees that skb->len is at least
-	 * hard_header_len, so we can inspect the dest address without
-	 * checking skb->len
-	 */
+	/* This check is no longer done by usbnet */
+	if (skb->len < dev->net->hard_header_len)
+		return 0;
+
 	switch (skb->data[0] & 0xf0) {
 	case 0x40:
 		proto = htons(ETH_P_IP);
@@ -712,6 +712,7 @@
 	{QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
 	{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
 	{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
+	{QMI_FIXED_INTF(0x19d2, 0x1270, 5)},	/* ZTE MF667 */
 	{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
 	{QMI_FIXED_INTF(0x19d2, 0x1402, 2)},	/* ZTE MF60 */
 	{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
@@ -723,6 +724,7 @@
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 19)},	/* Sierra Wireless MC7710 in QMI mode */
 	{QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
+	{QMI_FIXED_INTF(0x1199, 0x9051, 8)},	/* Netgear AirCard 340U */
 	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
 	{QMI_FIXED_INTF(0x2357, 0x9000, 4)},	/* TP-LINK MA260 */
@@ -730,6 +732,7 @@
 	{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},	/* Telit LE920 */
 	{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)},    /* Olivetti Olicard 200 */
 	{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},	/* Cinterion PLxx */
+	{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)},	/* Cinterion PHxx,PXxx */
 
 	/* 4. Gobi 1000 devices */
 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e8fac73..d89dbe3 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2273,15 +2273,6 @@
 	struct r8152 *tp = netdev_priv(netdev);
 	int res = 0;
 
-	res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
-	if (res) {
-		if (res == -ENODEV)
-			netif_device_detach(tp->netdev);
-		netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
-			   res);
-		return res;
-	}
-
 	rtl8152_set_speed(tp, AUTONEG_ENABLE,
 			  tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
 			  DUPLEX_FULL);
@@ -2289,6 +2280,14 @@
 	netif_carrier_off(netdev);
 	netif_start_queue(netdev);
 	set_bit(WORK_ENABLE, &tp->flags);
+	res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+	if (res) {
+		if (res == -ENODEV)
+			netif_device_detach(tp->netdev);
+		netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
+			   res);
+	}
+
 
 	return res;
 }
@@ -2298,8 +2297,8 @@
 	struct r8152 *tp = netdev_priv(netdev);
 	int res = 0;
 
-	usb_kill_urb(tp->intr_urb);
 	clear_bit(WORK_ENABLE, &tp->flags);
+	usb_kill_urb(tp->intr_urb);
 	cancel_delayed_work_sync(&tp->schedule);
 	netif_stop_queue(netdev);
 	tasklet_disable(&tp->tl);
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index a48bc0f..524a47a281 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -492,6 +492,10 @@
  */
 int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 {
+	/* This check is no longer done by usbnet */
+	if (skb->len < dev->net->hard_header_len)
+		return 0;
+
 	/* peripheral may have batched packets to us... */
 	while (likely(skb->len)) {
 		struct rndis_data_hdr	*hdr = (void *)skb->data;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f17b9e0..d9e7892 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2106,6 +2106,10 @@
 
 static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 {
+	/* This check is no longer done by usbnet */
+	if (skb->len < dev->net->hard_header_len)
+		return 0;
+
 	while (skb->len > 0) {
 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
 		struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 8dd54a0..424db65e 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1723,6 +1723,10 @@
 
 static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 {
+	/* This check is no longer done by usbnet */
+	if (skb->len < dev->net->hard_header_len)
+		return 0;
+
 	while (skb->len > 0) {
 		u32 header, align_count;
 		struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
new file mode 100644
index 0000000..b94a0fb
--- /dev/null
+++ b/drivers/net/usb/sr9800.c
@@ -0,0 +1,874 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * Based on asix_common.c, asix_devices.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.*
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+
+#include "sr9800.h"
+
+static int sr_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+			    u16 size, void *data)
+{
+	int err;
+
+	err = usbnet_read_cmd(dev, cmd, SR_REQ_RD_REG, value, index,
+			      data, size);
+	if ((err != size) && (err >= 0))
+		err = -EINVAL;
+
+	return err;
+}
+
+static int sr_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+			     u16 size, void *data)
+{
+	int err;
+
+	err = usbnet_write_cmd(dev, cmd, SR_REQ_WR_REG, value, index,
+			      data, size);
+	if ((err != size) && (err >= 0))
+		err = -EINVAL;
+
+	return err;
+}
+
+static void
+sr_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+		   u16 size, void *data)
+{
+	usbnet_write_cmd_async(dev, cmd, SR_REQ_WR_REG, value, index, data,
+			       size);
+}
+
+static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+	int offset = 0;
+
+	/* This check is no longer done by usbnet */
+	if (skb->len < dev->net->hard_header_len)
+		return 0;
+
+	while (offset + sizeof(u32) < skb->len) {
+		struct sk_buff *sr_skb;
+		u16 size;
+		u32 header = get_unaligned_le32(skb->data + offset);
+
+		offset += sizeof(u32);
+		/* get the packet length */
+		size = (u16) (header & 0x7ff);
+		if (size != ((~header >> 16) & 0x07ff)) {
+			netdev_err(dev->net, "%s : Bad Header Length\n",
+				   __func__);
+			return 0;
+		}
+
+		if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
+		    (size + offset > skb->len)) {
+			netdev_err(dev->net, "%s : Bad RX Length %d\n",
+				   __func__, size);
+			return 0;
+		}
+		sr_skb = netdev_alloc_skb_ip_align(dev->net, size);
+		if (!sr_skb)
+			return 0;
+
+		skb_put(sr_skb, size);
+		memcpy(sr_skb->data, skb->data + offset, size);
+		usbnet_skb_return(dev, sr_skb);
+
+		offset += (size + 1) & 0xfffe;
+	}
+
+	if (skb->len != offset) {
+		netdev_err(dev->net, "%s : Bad SKB Length %d\n", __func__,
+			   skb->len);
+		return 0;
+	}
+
+	return 1;
+}
+
+static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+					gfp_t flags)
+{
+	int headroom = skb_headroom(skb);
+	int tailroom = skb_tailroom(skb);
+	u32 padbytes = 0xffff0000;
+	u32 packet_len;
+	int padlen;
+
+	padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4;
+
+	if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (4 + padlen))) {
+		if ((headroom < 4) || (tailroom < padlen)) {
+			skb->data = memmove(skb->head + 4, skb->data,
+					    skb->len);
+			skb_set_tail_pointer(skb, skb->len);
+		}
+	} else {
+		struct sk_buff *skb2;
+		skb2 = skb_copy_expand(skb, 4, padlen, flags);
+		dev_kfree_skb_any(skb);
+		skb = skb2;
+		if (!skb)
+			return NULL;
+	}
+
+	skb_push(skb, 4);
+	packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
+	cpu_to_le32s(&packet_len);
+	skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+
+	if (padlen) {
+		cpu_to_le32s(&padbytes);
+		memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+		skb_put(skb, sizeof(padbytes));
+	}
+
+	return skb;
+}
+
+static void sr_status(struct usbnet *dev, struct urb *urb)
+{
+	struct sr9800_int_data *event;
+	int link;
+
+	if (urb->actual_length < 8)
+		return;
+
+	event = urb->transfer_buffer;
+	link = event->link & 0x01;
+	if (netif_carrier_ok(dev->net) != link) {
+		usbnet_link_change(dev, link, 1);
+		netdev_dbg(dev->net, "Link Status is: %d\n", link);
+	}
+
+	return;
+}
+
+static inline int sr_set_sw_mii(struct usbnet *dev)
+{
+	int ret;
+
+	ret = sr_write_cmd(dev, SR_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
+	if (ret < 0)
+		netdev_err(dev->net, "Failed to enable software MII access\n");
+	return ret;
+}
+
+static inline int sr_set_hw_mii(struct usbnet *dev)
+{
+	int ret;
+
+	ret = sr_write_cmd(dev, SR_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
+	if (ret < 0)
+		netdev_err(dev->net, "Failed to enable hardware MII access\n");
+	return ret;
+}
+
+static inline int sr_get_phy_addr(struct usbnet *dev)
+{
+	u8 buf[2];
+	int ret;
+
+	ret = sr_read_cmd(dev, SR_CMD_READ_PHY_ID, 0, 0, 2, buf);
+	if (ret < 0) {
+		netdev_err(dev->net, "%s : Error reading PHYID register:%02x\n",
+			   __func__, ret);
+		goto out;
+	}
+	netdev_dbg(dev->net, "%s : returning 0x%04x\n", __func__,
+		   *((__le16 *)buf));
+
+	ret = buf[1];
+
+out:
+	return ret;
+}
+
+static int sr_sw_reset(struct usbnet *dev, u8 flags)
+{
+	int ret;
+
+	ret = sr_write_cmd(dev, SR_CMD_SW_RESET, flags, 0, 0, NULL);
+	if (ret < 0)
+		netdev_err(dev->net, "Failed to send software reset:%02x\n",
+			   ret);
+
+	return ret;
+}
+
+static u16 sr_read_rx_ctl(struct usbnet *dev)
+{
+	__le16 v;
+	int ret;
+
+	ret = sr_read_cmd(dev, SR_CMD_READ_RX_CTL, 0, 0, 2, &v);
+	if (ret < 0) {
+		netdev_err(dev->net, "Error reading RX_CTL register:%02x\n",
+			   ret);
+		goto out;
+	}
+
+	ret = le16_to_cpu(v);
+out:
+	return ret;
+}
+
+static int sr_write_rx_ctl(struct usbnet *dev, u16 mode)
+{
+	int ret;
+
+	netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+	ret = sr_write_cmd(dev, SR_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
+	if (ret < 0)
+		netdev_err(dev->net,
+			   "Failed to write RX_CTL mode to 0x%04x:%02x\n",
+			   mode, ret);
+
+	return ret;
+}
+
+static u16 sr_read_medium_status(struct usbnet *dev)
+{
+	__le16 v;
+	int ret;
+
+	ret = sr_read_cmd(dev, SR_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
+	if (ret < 0) {
+		netdev_err(dev->net,
+			   "Error reading Medium Status register:%02x\n", ret);
+		return ret;	/* TODO: callers not checking for error ret */
+	}
+
+	return le16_to_cpu(v);
+}
+
+static int sr_write_medium_mode(struct usbnet *dev, u16 mode)
+{
+	int ret;
+
+	netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+	ret = sr_write_cmd(dev, SR_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
+	if (ret < 0)
+		netdev_err(dev->net,
+			   "Failed to write Medium Mode mode to 0x%04x:%02x\n",
+			   mode, ret);
+	return ret;
+}
+
+static int sr_write_gpio(struct usbnet *dev, u16 value, int sleep)
+{
+	int ret;
+
+	netdev_dbg(dev->net, "%s : value = 0x%04x\n", __func__, value);
+	ret = sr_write_cmd(dev, SR_CMD_WRITE_GPIOS, value, 0, 0, NULL);
+	if (ret < 0)
+		netdev_err(dev->net, "Failed to write GPIO value 0x%04x:%02x\n",
+			   value, ret);
+	if (sleep)
+		msleep(sleep);
+
+	return ret;
+}
+
+/* SR9800 have a 16-bit RX_CTL value */
+static void sr_set_multicast(struct net_device *net)
+{
+	struct usbnet *dev = netdev_priv(net);
+	struct sr_data *data = (struct sr_data *)&dev->data;
+	u16 rx_ctl = SR_DEFAULT_RX_CTL;
+
+	if (net->flags & IFF_PROMISC) {
+		rx_ctl |= SR_RX_CTL_PRO;
+	} else if (net->flags & IFF_ALLMULTI ||
+		   netdev_mc_count(net) > SR_MAX_MCAST) {
+		rx_ctl |= SR_RX_CTL_AMALL;
+	} else if (netdev_mc_empty(net)) {
+		/* just broadcast and directed */
+	} else {
+		/* We use the 20 byte dev->data
+		 * for our 8 byte filter buffer
+		 * to avoid allocating memory that
+		 * is tricky to free later
+		 */
+		struct netdev_hw_addr *ha;
+		u32 crc_bits;
+
+		memset(data->multi_filter, 0, SR_MCAST_FILTER_SIZE);
+
+		/* Build the multicast hash filter. */
+		netdev_for_each_mc_addr(ha, net) {
+			crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+			data->multi_filter[crc_bits >> 3] |=
+			    1 << (crc_bits & 7);
+		}
+
+		sr_write_cmd_async(dev, SR_CMD_WRITE_MULTI_FILTER, 0, 0,
+				   SR_MCAST_FILTER_SIZE, data->multi_filter);
+
+		rx_ctl |= SR_RX_CTL_AM;
+	}
+
+	sr_write_cmd_async(dev, SR_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
+}
+
+static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
+{
+	struct usbnet *dev = netdev_priv(net);
+	__le16 res;
+
+	mutex_lock(&dev->phy_mutex);
+	sr_set_sw_mii(dev);
+	sr_read_cmd(dev, SR_CMD_READ_MII_REG, phy_id, (__u16)loc, 2, &res);
+	sr_set_hw_mii(dev);
+	mutex_unlock(&dev->phy_mutex);
+
+	netdev_dbg(dev->net,
+		   "%s : phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", __func__,
+		   phy_id, loc, le16_to_cpu(res));
+
+	return le16_to_cpu(res);
+}
+
+static void
+sr_mdio_write(struct net_device *net, int phy_id, int loc, int val)
+{
+	struct usbnet *dev = netdev_priv(net);
+	__le16 res = cpu_to_le16(val);
+
+	netdev_dbg(dev->net,
+		   "%s : phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", __func__,
+		   phy_id, loc, val);
+	mutex_lock(&dev->phy_mutex);
+	sr_set_sw_mii(dev);
+	sr_write_cmd(dev, SR_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
+	sr_set_hw_mii(dev);
+	mutex_unlock(&dev->phy_mutex);
+}
+
+/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
+static u32 sr_get_phyid(struct usbnet *dev)
+{
+	int phy_reg;
+	u32 phy_id;
+	int i;
+
+	/* Poll for the rare case the FW or phy isn't ready yet.  */
+	for (i = 0; i < 100; i++) {
+		phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
+		if (phy_reg != 0 && phy_reg != 0xFFFF)
+			break;
+		mdelay(1);
+	}
+
+	if (phy_reg <= 0 || phy_reg == 0xFFFF)
+		return 0;
+
+	phy_id = (phy_reg & 0xffff) << 16;
+
+	phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2);
+	if (phy_reg < 0)
+		return 0;
+
+	phy_id |= (phy_reg & 0xffff);
+
+	return phy_id;
+}
+
+static void
+sr_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+	struct usbnet *dev = netdev_priv(net);
+	u8 opt;
+
+	if (sr_read_cmd(dev, SR_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
+		wolinfo->supported = 0;
+		wolinfo->wolopts = 0;
+		return;
+	}
+	wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+	wolinfo->wolopts = 0;
+	if (opt & SR_MONITOR_LINK)
+		wolinfo->wolopts |= WAKE_PHY;
+	if (opt & SR_MONITOR_MAGIC)
+		wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+static int
+sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+	struct usbnet *dev = netdev_priv(net);
+	u8 opt = 0;
+
+	if (wolinfo->wolopts & WAKE_PHY)
+		opt |= SR_MONITOR_LINK;
+	if (wolinfo->wolopts & WAKE_MAGIC)
+		opt |= SR_MONITOR_MAGIC;
+
+	if (sr_write_cmd(dev, SR_CMD_WRITE_MONITOR_MODE,
+			 opt, 0, 0, NULL) < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int sr_get_eeprom_len(struct net_device *net)
+{
+	struct usbnet *dev = netdev_priv(net);
+	struct sr_data *data = (struct sr_data *)&dev->data;
+
+	return data->eeprom_len;
+}
+
+static int sr_get_eeprom(struct net_device *net,
+			      struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct usbnet *dev = netdev_priv(net);
+	__le16 *ebuf = (__le16 *)data;
+	int ret;
+	int i;
+
+	/* Crude hack to ensure that we don't overwrite memory
+	 * if an odd length is supplied
+	 */
+	if (eeprom->len % 2)
+		return -EINVAL;
+
+	eeprom->magic = SR_EEPROM_MAGIC;
+
+	/* sr9800 returns 2 bytes from eeprom on read */
+	for (i = 0; i < eeprom->len / 2; i++) {
+		ret = sr_read_cmd(dev, SR_CMD_READ_EEPROM, eeprom->offset + i,
+				  0, 2, &ebuf[i]);
+		if (ret < 0)
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static void sr_get_drvinfo(struct net_device *net,
+				 struct ethtool_drvinfo *info)
+{
+	struct usbnet *dev = netdev_priv(net);
+	struct sr_data *data = (struct sr_data *)&dev->data;
+
+	/* Inherit standard device info */
+	usbnet_get_drvinfo(net, info);
+	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+	info->eedump_len = data->eeprom_len;
+}
+
+static u32 sr_get_link(struct net_device *net)
+{
+	struct usbnet *dev = netdev_priv(net);
+
+	return mii_link_ok(&dev->mii);
+}
+
+static int sr_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+	struct usbnet *dev = netdev_priv(net);
+
+	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static int sr_set_mac_address(struct net_device *net, void *p)
+{
+	struct usbnet *dev = netdev_priv(net);
+	struct sr_data *data = (struct sr_data *)&dev->data;
+	struct sockaddr *addr = p;
+
+	if (netif_running(net))
+		return -EBUSY;
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+	/* We use the 20 byte dev->data
+	 * for our 6 byte mac buffer
+	 * to avoid allocating memory that
+	 * is tricky to free later
+	 */
+	memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
+	sr_write_cmd_async(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+			   data->mac_addr);
+
+	return 0;
+}
+
+static const struct ethtool_ops sr9800_ethtool_ops = {
+	.get_drvinfo	= sr_get_drvinfo,
+	.get_link	= sr_get_link,
+	.get_msglevel	= usbnet_get_msglevel,
+	.set_msglevel	= usbnet_set_msglevel,
+	.get_wol	= sr_get_wol,
+	.set_wol	= sr_set_wol,
+	.get_eeprom_len	= sr_get_eeprom_len,
+	.get_eeprom	= sr_get_eeprom,
+	.get_settings	= usbnet_get_settings,
+	.set_settings	= usbnet_set_settings,
+	.nway_reset	= usbnet_nway_reset,
+};
+
+static int sr9800_link_reset(struct usbnet *dev)
+{
+	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+	u16 mode;
+
+	mii_check_media(&dev->mii, 1, 1);
+	mii_ethtool_gset(&dev->mii, &ecmd);
+	mode = SR9800_MEDIUM_DEFAULT;
+
+	if (ethtool_cmd_speed(&ecmd) != SPEED_100)
+		mode &= ~SR_MEDIUM_PS;
+
+	if (ecmd.duplex != DUPLEX_FULL)
+		mode &= ~SR_MEDIUM_FD;
+
+	netdev_dbg(dev->net, "%s : speed: %u duplex: %d mode: 0x%04x\n",
+		   __func__, ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
+
+	sr_write_medium_mode(dev, mode);
+
+	return 0;
+}
+
+
+static int sr9800_set_default_mode(struct usbnet *dev)
+{
+	u16 rx_ctl;
+	int ret;
+
+	sr_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
+	sr_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
+		      ADVERTISE_ALL | ADVERTISE_CSMA);
+	mii_nway_restart(&dev->mii);
+
+	ret = sr_write_medium_mode(dev, SR9800_MEDIUM_DEFAULT);
+	if (ret < 0)
+		goto out;
+
+	ret = sr_write_cmd(dev, SR_CMD_WRITE_IPG012,
+				SR9800_IPG0_DEFAULT | SR9800_IPG1_DEFAULT,
+				SR9800_IPG2_DEFAULT, 0, NULL);
+	if (ret < 0) {
+		netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
+		goto out;
+	}
+
+	/* Set RX_CTL to default values with 2k buffer, and enable cactus */
+	ret = sr_write_rx_ctl(dev, SR_DEFAULT_RX_CTL);
+	if (ret < 0)
+		goto out;
+
+	rx_ctl = sr_read_rx_ctl(dev);
+	netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+		   rx_ctl);
+
+	rx_ctl = sr_read_medium_status(dev);
+	netdev_dbg(dev->net, "Medium Status:0x%04x after all initializations\n",
+		   rx_ctl);
+
+	return 0;
+out:
+	return ret;
+}
+
+static int sr9800_reset(struct usbnet *dev)
+{
+	struct sr_data *data = (struct sr_data *)&dev->data;
+	int ret, embd_phy;
+	u16 rx_ctl;
+
+	ret = sr_write_gpio(dev,
+			SR_GPIO_RSE | SR_GPIO_GPO_2 | SR_GPIO_GPO2EN, 5);
+	if (ret < 0)
+		goto out;
+
+	embd_phy = ((sr_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
+
+	ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+	if (ret < 0) {
+		netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+		goto out;
+	}
+
+	ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_PRL);
+	if (ret < 0)
+		goto out;
+
+	msleep(150);
+
+	ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+	if (ret < 0)
+		goto out;
+
+	msleep(150);
+
+	if (embd_phy) {
+		ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+		if (ret < 0)
+			goto out;
+	} else {
+		ret = sr_sw_reset(dev, SR_SWRESET_PRTE);
+		if (ret < 0)
+			goto out;
+	}
+
+	msleep(150);
+	rx_ctl = sr_read_rx_ctl(dev);
+	netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+	ret = sr_write_rx_ctl(dev, 0x0000);
+	if (ret < 0)
+		goto out;
+
+	rx_ctl = sr_read_rx_ctl(dev);
+	netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+	ret = sr_sw_reset(dev, SR_SWRESET_PRL);
+	if (ret < 0)
+		goto out;
+
+	msleep(150);
+
+	ret = sr_sw_reset(dev, SR_SWRESET_IPRL | SR_SWRESET_PRL);
+	if (ret < 0)
+		goto out;
+
+	msleep(150);
+
+	ret = sr9800_set_default_mode(dev);
+	if (ret < 0)
+		goto out;
+
+	/* Rewrite MAC address */
+	memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+	ret = sr_write_cmd(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+							data->mac_addr);
+	if (ret < 0)
+		goto out;
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static const struct net_device_ops sr9800_netdev_ops = {
+	.ndo_open		= usbnet_open,
+	.ndo_stop		= usbnet_stop,
+	.ndo_start_xmit		= usbnet_start_xmit,
+	.ndo_tx_timeout		= usbnet_tx_timeout,
+	.ndo_change_mtu		= usbnet_change_mtu,
+	.ndo_set_mac_address	= sr_set_mac_address,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= sr_ioctl,
+	.ndo_set_rx_mode        = sr_set_multicast,
+};
+
+static int sr9800_phy_powerup(struct usbnet *dev)
+{
+	int ret;
+
+	/* set the embedded Ethernet PHY in power-down state */
+	ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_IPRL);
+	if (ret < 0) {
+		netdev_err(dev->net, "Failed to power down PHY : %d\n", ret);
+		return ret;
+	}
+	msleep(20);
+
+	/* set the embedded Ethernet PHY in power-up state */
+	ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+	if (ret < 0) {
+		netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+		return ret;
+	}
+	msleep(600);
+
+	/* set the embedded Ethernet PHY in reset state */
+	ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+	if (ret < 0) {
+		netdev_err(dev->net, "Failed to power up PHY: %d\n", ret);
+		return ret;
+	}
+	msleep(20);
+
+	/* set the embedded Ethernet PHY in power-up state */
+	ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+	if (ret < 0) {
+		netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+	struct sr_data *data = (struct sr_data *)&dev->data;
+	u16 led01_mux, led23_mux;
+	int ret, embd_phy;
+	u32 phyid;
+	u16 rx_ctl;
+
+	data->eeprom_len = SR9800_EEPROM_LEN;
+
+	usbnet_get_endpoints(dev, intf);
+
+	/* LED Setting Rule :
+	 * AABB:CCDD
+	 * AA : MFA0(LED0)
+	 * BB : MFA1(LED1)
+	 * CC : MFA2(LED2), Reserved for SR9800
+	 * DD : MFA3(LED3), Reserved for SR9800
+	 */
+	led01_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_LINK;
+	led23_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_TX_ACTIVE;
+	ret = sr_write_cmd(dev, SR_CMD_LED_MUX, led01_mux, led23_mux, 0, NULL);
+	if (ret < 0) {
+			netdev_err(dev->net, "set LINK LED failed : %d\n", ret);
+			goto out;
+	}
+
+	/* Get the MAC address */
+	ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
+			  dev->net->dev_addr);
+	if (ret < 0) {
+		netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
+		return ret;
+	}
+	netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
+
+	/* Initialize MII structure */
+	dev->mii.dev = dev->net;
+	dev->mii.mdio_read = sr_mdio_read;
+	dev->mii.mdio_write = sr_mdio_write;
+	dev->mii.phy_id_mask = 0x1f;
+	dev->mii.reg_num_mask = 0x1f;
+	dev->mii.phy_id = sr_get_phy_addr(dev);
+
+	dev->net->netdev_ops = &sr9800_netdev_ops;
+	dev->net->ethtool_ops = &sr9800_ethtool_ops;
+
+	embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
+	/* Reset the PHY to normal operation mode */
+	ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+	if (ret < 0) {
+		netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+		return ret;
+	}
+
+	/* Init PHY routine */
+	ret = sr9800_phy_powerup(dev);
+	if (ret < 0)
+		goto out;
+
+	rx_ctl = sr_read_rx_ctl(dev);
+	netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+	ret = sr_write_rx_ctl(dev, 0x0000);
+	if (ret < 0)
+		goto out;
+
+	rx_ctl = sr_read_rx_ctl(dev);
+	netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+	/* Read PHYID register *AFTER* the PHY was reset properly */
+	phyid = sr_get_phyid(dev);
+	netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
+
+	/* medium mode setting */
+	ret = sr9800_set_default_mode(dev);
+	if (ret < 0)
+		goto out;
+
+	if (dev->udev->speed == USB_SPEED_HIGH) {
+		ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+			SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].byte_cnt,
+			SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].threshold,
+			0, NULL);
+		if (ret < 0) {
+			netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+			goto out;
+		}
+		dev->rx_urb_size =
+			SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].size;
+	} else {
+		ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+			SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].byte_cnt,
+			SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].threshold,
+			0, NULL);
+		if (ret < 0) {
+			netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+			goto out;
+		}
+		dev->rx_urb_size =
+			SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size;
+	}
+	netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__,
+		   dev->rx_urb_size);
+	return 0;
+
+out:
+	return ret;
+}
+
+static const struct driver_info sr9800_driver_info = {
+	.description	= "CoreChip SR9800 USB 2.0 Ethernet",
+	.bind		= sr9800_bind,
+	.status		= sr_status,
+	.link_reset	= sr9800_link_reset,
+	.reset		= sr9800_reset,
+	.flags		= DRIVER_FLAG,
+	.rx_fixup	= sr_rx_fixup,
+	.tx_fixup	= sr_tx_fixup,
+};
+
+static const struct usb_device_id	products[] = {
+	{
+		USB_DEVICE(0x0fe6, 0x9800),	/* SR9800 Device  */
+		.driver_info = (unsigned long) &sr9800_driver_info,
+	},
+	{},		/* END */
+};
+
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver sr_driver = {
+	.name		= DRIVER_NAME,
+	.id_table	= products,
+	.probe		= usbnet_probe,
+	.suspend	= usbnet_suspend,
+	.resume		= usbnet_resume,
+	.disconnect	= usbnet_disconnect,
+	.supports_autosuspend = 1,
+};
+
+module_usb_driver(sr_driver);
+
+MODULE_AUTHOR("Liu Junliang <liujunliang_ljl@163.com");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_DESCRIPTION("SR9800 USB 2.0 USB2NET Dev : http://www.corechip-sz.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
new file mode 100644
index 0000000..18f6702
--- /dev/null
+++ b/drivers/net/usb/sr9800.h
@@ -0,0 +1,202 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef	_SR9800_H
+#define	_SR9800_H
+
+/* SR9800 spec. command table on Linux Platform */
+
+/* command : Software Station Management Control Reg */
+#define SR_CMD_SET_SW_MII		0x06
+/* command : PHY Read Reg */
+#define SR_CMD_READ_MII_REG		0x07
+/* command : PHY Write Reg */
+#define SR_CMD_WRITE_MII_REG		0x08
+/* command : Hardware Station Management Control Reg */
+#define SR_CMD_SET_HW_MII		0x0a
+/* command : SROM Read Reg */
+#define SR_CMD_READ_EEPROM		0x0b
+/* command : SROM Write Reg */
+#define SR_CMD_WRITE_EEPROM		0x0c
+/* command : SROM Write Enable Reg */
+#define SR_CMD_WRITE_ENABLE		0x0d
+/* command : SROM Write Disable Reg */
+#define SR_CMD_WRITE_DISABLE		0x0e
+/* command : RX Control Read Reg */
+#define SR_CMD_READ_RX_CTL		0x0f
+#define		SR_RX_CTL_PRO			(1 << 0)
+#define		SR_RX_CTL_AMALL			(1 << 1)
+#define		SR_RX_CTL_SEP			(1 << 2)
+#define		SR_RX_CTL_AB			(1 << 3)
+#define		SR_RX_CTL_AM			(1 << 4)
+#define		SR_RX_CTL_AP			(1 << 5)
+#define		SR_RX_CTL_ARP			(1 << 6)
+#define		SR_RX_CTL_SO			(1 << 7)
+#define		SR_RX_CTL_RH1M			(1 << 8)
+#define		SR_RX_CTL_RH2M			(1 << 9)
+#define		SR_RX_CTL_RH3M			(1 << 10)
+/* command : RX Control Write Reg */
+#define SR_CMD_WRITE_RX_CTL		0x10
+/* command : IPG0/IPG1/IPG2 Control Read Reg */
+#define SR_CMD_READ_IPG012		0x11
+/* command : IPG0/IPG1/IPG2 Control Write Reg */
+#define SR_CMD_WRITE_IPG012		0x12
+/* command : Node ID Read Reg */
+#define SR_CMD_READ_NODE_ID		0x13
+/* command : Node ID Write Reg */
+#define SR_CMD_WRITE_NODE_ID		0x14
+/* command : Multicast Filter Array Read Reg */
+#define	SR_CMD_READ_MULTI_FILTER	0x15
+/* command : Multicast Filter Array Write Reg */
+#define SR_CMD_WRITE_MULTI_FILTER	0x16
+/* command : Eth/HomePNA PHY Address Reg */
+#define SR_CMD_READ_PHY_ID		0x19
+/* command : Medium Status Read Reg */
+#define SR_CMD_READ_MEDIUM_STATUS	0x1a
+#define		SR_MONITOR_LINK			(1 << 1)
+#define		SR_MONITOR_MAGIC		(1 << 2)
+#define		SR_MONITOR_HSFS			(1 << 4)
+/* command : Medium Status Write Reg */
+#define SR_CMD_WRITE_MEDIUM_MODE	0x1b
+#define		SR_MEDIUM_GM			(1 << 0)
+#define		SR_MEDIUM_FD			(1 << 1)
+#define		SR_MEDIUM_AC			(1 << 2)
+#define		SR_MEDIUM_ENCK			(1 << 3)
+#define		SR_MEDIUM_RFC			(1 << 4)
+#define		SR_MEDIUM_TFC			(1 << 5)
+#define		SR_MEDIUM_JFE			(1 << 6)
+#define		SR_MEDIUM_PF			(1 << 7)
+#define		SR_MEDIUM_RE			(1 << 8)
+#define		SR_MEDIUM_PS			(1 << 9)
+#define		SR_MEDIUM_RSV			(1 << 10)
+#define		SR_MEDIUM_SBP			(1 << 11)
+#define		SR_MEDIUM_SM			(1 << 12)
+/* command : Monitor Mode Status Read Reg */
+#define SR_CMD_READ_MONITOR_MODE	0x1c
+/* command : Monitor Mode Status Write Reg */
+#define SR_CMD_WRITE_MONITOR_MODE	0x1d
+/* command : GPIO Status Read Reg */
+#define SR_CMD_READ_GPIOS		0x1e
+#define		SR_GPIO_GPO0EN		(1 << 0) /* GPIO0 Output enable */
+#define		SR_GPIO_GPO_0		(1 << 1) /* GPIO0 Output value */
+#define		SR_GPIO_GPO1EN		(1 << 2) /* GPIO1 Output enable */
+#define		SR_GPIO_GPO_1		(1 << 3) /* GPIO1 Output value */
+#define		SR_GPIO_GPO2EN		(1 << 4) /* GPIO2 Output enable */
+#define		SR_GPIO_GPO_2		(1 << 5) /* GPIO2 Output value */
+#define		SR_GPIO_RESERVED	(1 << 6) /* Reserved */
+#define		SR_GPIO_RSE		(1 << 7) /* Reload serial EEPROM */
+/* command : GPIO Status Write Reg */
+#define SR_CMD_WRITE_GPIOS		0x1f
+/* command : Eth PHY Power and Reset Control Reg */
+#define SR_CMD_SW_RESET			0x20
+#define		SR_SWRESET_CLEAR		0x00
+#define		SR_SWRESET_RR			(1 << 0)
+#define		SR_SWRESET_RT			(1 << 1)
+#define		SR_SWRESET_PRTE			(1 << 2)
+#define		SR_SWRESET_PRL			(1 << 3)
+#define		SR_SWRESET_BZ			(1 << 4)
+#define		SR_SWRESET_IPRL			(1 << 5)
+#define		SR_SWRESET_IPPD			(1 << 6)
+/* command : Software Interface Selection Status Read Reg */
+#define SR_CMD_SW_PHY_STATUS		0x21
+/* command : Software Interface Selection Status Write Reg */
+#define SR_CMD_SW_PHY_SELECT		0x22
+/* command : BULK in Buffer Size Reg */
+#define	SR_CMD_BULKIN_SIZE		0x2A
+/* command : LED_MUX Control Reg */
+#define	SR_CMD_LED_MUX			0x70
+#define		SR_LED_MUX_TX_ACTIVE		(1 << 0)
+#define		SR_LED_MUX_RX_ACTIVE		(1 << 1)
+#define		SR_LED_MUX_COLLISION		(1 << 2)
+#define		SR_LED_MUX_DUP_COL		(1 << 3)
+#define		SR_LED_MUX_DUP			(1 << 4)
+#define		SR_LED_MUX_SPEED		(1 << 5)
+#define		SR_LED_MUX_LINK_ACTIVE		(1 << 6)
+#define		SR_LED_MUX_LINK			(1 << 7)
+
+/* Register Access Flags */
+#define SR_REQ_RD_REG   (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+#define SR_REQ_WR_REG   (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+
+/* Multicast Filter Array size & Max Number */
+#define	SR_MCAST_FILTER_SIZE		8
+#define	SR_MAX_MCAST			64
+
+/* IPG0/1/2 Default Value */
+#define	SR9800_IPG0_DEFAULT		0x15
+#define	SR9800_IPG1_DEFAULT		0x0c
+#define	SR9800_IPG2_DEFAULT		0x12
+
+/* Medium Status Default Mode */
+#define SR9800_MEDIUM_DEFAULT	\
+	(SR_MEDIUM_FD | SR_MEDIUM_RFC | \
+	 SR_MEDIUM_TFC | SR_MEDIUM_PS | \
+	 SR_MEDIUM_AC | SR_MEDIUM_RE)
+
+/* RX Control Default Setting */
+#define SR_DEFAULT_RX_CTL	\
+	(SR_RX_CTL_SO | SR_RX_CTL_AB | SR_RX_CTL_RH1M)
+
+/* EEPROM Magic Number & EEPROM Size */
+#define SR_EEPROM_MAGIC			0xdeadbeef
+#define SR9800_EEPROM_LEN		0xff
+
+/* SR9800 Driver Version and Driver Name */
+#define DRIVER_VERSION			"11-Nov-2013"
+#define DRIVER_NAME			"CoreChips"
+#define	DRIVER_FLAG		\
+	(FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |  FLAG_MULTI_PACKET)
+
+/* SR9800 BULKIN Buffer Size */
+#define SR9800_MAX_BULKIN_2K		0
+#define SR9800_MAX_BULKIN_4K		1
+#define SR9800_MAX_BULKIN_6K		2
+#define SR9800_MAX_BULKIN_8K		3
+#define SR9800_MAX_BULKIN_16K		4
+#define SR9800_MAX_BULKIN_20K		5
+#define SR9800_MAX_BULKIN_24K		6
+#define SR9800_MAX_BULKIN_32K		7
+
+struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
+	/* 2k */
+	{2048, 0x8000, 0x8001},
+	/* 4k */
+	{4096, 0x8100, 0x8147},
+	/* 6k */
+	{6144, 0x8200, 0x81EB},
+	/* 8k */
+	{8192, 0x8300, 0x83D7},
+	/* 16 */
+	{16384, 0x8400, 0x851E},
+	/* 20k */
+	{20480, 0x8500, 0x8666},
+	/* 24k */
+	{24576, 0x8600, 0x87AE},
+	/* 32k */
+	{32768, 0x8700, 0x8A3D},
+};
+
+/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
+struct sr_data {
+	u8 multi_filter[SR_MCAST_FILTER_SIZE];
+	u8 mac_addr[ETH_ALEN];
+	u8 phymode;
+	u8 ledmode;
+	u8 eeprom_len;
+};
+
+struct sr9800_int_data {
+	__le16 res1;
+	u8 link;
+	__le16 res2;
+	u8 status;
+	__le16 res3;
+} __packed;
+
+#endif	/* _SR9800_H */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 4671da7..dd10d58 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -542,17 +542,19 @@
 	}
 	// else network stack removes extra byte if we forced a short packet
 
-	if (skb->len) {
-		/* all data was already cloned from skb inside the driver */
-		if (dev->driver_info->flags & FLAG_MULTI_PACKET)
-			dev_kfree_skb_any(skb);
-		else
-			usbnet_skb_return(dev, skb);
+	/* all data was already cloned from skb inside the driver */
+	if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+		goto done;
+
+	if (skb->len < ETH_HLEN) {
+		dev->net->stats.rx_errors++;
+		dev->net->stats.rx_length_errors++;
+		netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
+	} else {
+		usbnet_skb_return(dev, skb);
 		return;
 	}
 
-	netif_dbg(dev, rx_err, dev->net, "drop\n");
-	dev->net->stats.rx_errors++;
 done:
 	skb_queue_tail(&dev->done, skb);
 }
@@ -574,13 +576,6 @@
 	switch (urb_status) {
 	/* success */
 	case 0:
-		if (skb->len < dev->net->hard_header_len) {
-			state = rx_cleanup;
-			dev->net->stats.rx_errors++;
-			dev->net->stats.rx_length_errors++;
-			netif_dbg(dev, rx_err, dev->net,
-				  "rx length %d\n", skb->len);
-		}
 		break;
 
 	/* stalls need manual reset. this is rare ... except that
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 026a313..b0f705c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -469,7 +469,6 @@
 /* Look up Ethernet address in forwarding table */
 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
 					const u8 *mac)
-
 {
 	struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
 	struct vxlan_fdb *f;
@@ -596,10 +595,8 @@
 			NAPI_GRO_CB(p)->same_flow = 0;
 			continue;
 		}
-		goto found;
 	}
 
-found:
 	type = eh->h_proto;
 
 	rcu_read_lock();
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 0d1c759..19f7cb2 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -71,12 +71,9 @@
 		       const void *saddr, unsigned len)
 {
 	struct frhdr		hdr;
-	struct dlci_local	*dlp;
 	unsigned int		hlen;
 	char			*dest;
 
-	dlp = netdev_priv(dev);
-
 	hdr.control = FRAD_I_UI;
 	switch (type)
 	{
@@ -107,11 +104,9 @@
 
 static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
 {
-	struct dlci_local *dlp;
 	struct frhdr		*hdr;
 	int					process, header;
 
-	dlp = netdev_priv(dev);
 	if (!pskb_may_pull(skb, sizeof(*hdr))) {
 		netdev_notice(dev, "invalid data no header\n");
 		dev->stats.rx_errors++;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 8aa20df..507d9a9 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1764,7 +1764,7 @@
 	AR5523_DEVICE_UG(0x07d1, 0x3a07),	/* D-Link / WUA-2340 rev A1 */
 	AR5523_DEVICE_UG(0x1690, 0x0712),	/* Gigaset / AR5523 */
 	AR5523_DEVICE_UG(0x1690, 0x0710),	/* Gigaset / SMCWUSBTG */
-	AR5523_DEVICE_UG(0x129b, 0x160c),	/* Gigaset / USB stick 108
+	AR5523_DEVICE_UG(0x129b, 0x160b),	/* Gigaset / USB stick 108
 						   (CyberTAN Technology) */
 	AR5523_DEVICE_UG(0x16ab, 0x7801),	/* Globalsun / AR5523_1 */
 	AR5523_DEVICE_UX(0x16ab, 0x7811),	/* Globalsun / AR5523_2 */
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index d6bc7cb..1a2973b 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -110,7 +110,7 @@
 		ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
 
 	if (ah->ah_version == AR5K_AR5210) {
-		srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf;
+		srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf;
 		ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
 	} else {
 		srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 25243cb..b8daff7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -5065,6 +5065,10 @@
 			break;
 		}
 	}
+
+	if (is2GHz && !twiceMaxEdgePower)
+		twiceMaxEdgePower = 60;
+
 	return twiceMaxEdgePower;
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 58da346..99a2031 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -262,6 +262,8 @@
 struct ath9k_htc_sta {
 	u8 index;
 	enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
+	struct work_struct rc_update_work;
+	struct ath9k_htc_priv *htc_priv;
 };
 
 #define ATH9K_HTC_RXBUF 256
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index f4e1de2..c57d6b8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -34,6 +34,10 @@
 module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
 
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
 #define CHAN2G(_freq, _idx)  { \
 	.center_freq = (_freq), \
 	.hw_value = (_idx), \
@@ -725,12 +729,14 @@
 		IEEE80211_HW_SPECTRUM_MGMT |
 		IEEE80211_HW_HAS_RATE_CONTROL |
 		IEEE80211_HW_RX_INCLUDES_FCS |
-		IEEE80211_HW_SUPPORTS_PS |
 		IEEE80211_HW_PS_NULLFUNC_STACK |
 		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
 		IEEE80211_HW_MFP_CAPABLE |
 		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
 
+	if (ath9k_ps_enable)
+		hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
 	hw->wiphy->interface_modes =
 		BIT(NL80211_IFTYPE_STATION) |
 		BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 608d739..c9254a6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1270,18 +1270,50 @@
 	mutex_unlock(&priv->mutex);
 }
 
+static void ath9k_htc_sta_rc_update_work(struct work_struct *work)
+{
+	struct ath9k_htc_sta *ista =
+	    container_of(work, struct ath9k_htc_sta, rc_update_work);
+	struct ieee80211_sta *sta =
+	    container_of((void *)ista, struct ieee80211_sta, drv_priv);
+	struct ath9k_htc_priv *priv = ista->htc_priv;
+	struct ath_common *common = ath9k_hw_common(priv->ah);
+	struct ath9k_htc_target_rate trate;
+
+	mutex_lock(&priv->mutex);
+	ath9k_htc_ps_wakeup(priv);
+
+	memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+	ath9k_htc_setup_rate(priv, sta, &trate);
+	if (!ath9k_htc_send_rate_cmd(priv, &trate))
+		ath_dbg(common, CONFIG,
+			"Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+			sta->addr, be32_to_cpu(trate.capflags));
+	else
+		ath_dbg(common, CONFIG,
+			"Unable to update supported rates for sta: %pM\n",
+			sta->addr);
+
+	ath9k_htc_ps_restore(priv);
+	mutex_unlock(&priv->mutex);
+}
+
 static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
 			     struct ieee80211_vif *vif,
 			     struct ieee80211_sta *sta)
 {
 	struct ath9k_htc_priv *priv = hw->priv;
+	struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
 	int ret;
 
 	mutex_lock(&priv->mutex);
 	ath9k_htc_ps_wakeup(priv);
 	ret = ath9k_htc_add_station(priv, vif, sta);
-	if (!ret)
+	if (!ret) {
+		INIT_WORK(&ista->rc_update_work, ath9k_htc_sta_rc_update_work);
+		ista->htc_priv = priv;
 		ath9k_htc_init_rate(priv, sta);
+	}
 	ath9k_htc_ps_restore(priv);
 	mutex_unlock(&priv->mutex);
 
@@ -1293,12 +1325,13 @@
 				struct ieee80211_sta *sta)
 {
 	struct ath9k_htc_priv *priv = hw->priv;
-	struct ath9k_htc_sta *ista;
+	struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
 	int ret;
 
+	cancel_work_sync(&ista->rc_update_work);
+
 	mutex_lock(&priv->mutex);
 	ath9k_htc_ps_wakeup(priv);
-	ista = (struct ath9k_htc_sta *) sta->drv_priv;
 	htc_sta_drain(priv->htc, ista->index);
 	ret = ath9k_htc_remove_station(priv, vif, sta);
 	ath9k_htc_ps_restore(priv);
@@ -1311,28 +1344,12 @@
 				    struct ieee80211_vif *vif,
 				    struct ieee80211_sta *sta, u32 changed)
 {
-	struct ath9k_htc_priv *priv = hw->priv;
-	struct ath_common *common = ath9k_hw_common(priv->ah);
-	struct ath9k_htc_target_rate trate;
+	struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
 
-	mutex_lock(&priv->mutex);
-	ath9k_htc_ps_wakeup(priv);
+	if (!(changed & IEEE80211_RC_SUPP_RATES_CHANGED))
+		return;
 
-	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
-		memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
-		ath9k_htc_setup_rate(priv, sta, &trate);
-		if (!ath9k_htc_send_rate_cmd(priv, &trate))
-			ath_dbg(common, CONFIG,
-				"Supported rates for sta: %pM updated, rate caps: 0x%X\n",
-				sta->addr, be32_to_cpu(trate.capflags));
-		else
-			ath_dbg(common, CONFIG,
-				"Unable to update supported rates for sta: %pM\n",
-				sta->addr);
-	}
-
-	ath9k_htc_ps_restore(priv);
-	mutex_unlock(&priv->mutex);
+	schedule_work(&ista->rc_update_work);
 }
 
 static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fbf43c0..11eab9f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1316,7 +1316,7 @@
 	if (AR_SREV_9300_20_OR_LATER(ah))
 		udelay(50);
 	else if (AR_SREV_9100(ah))
-		udelay(10000);
+		mdelay(10);
 	else
 		udelay(100);
 
@@ -2051,9 +2051,8 @@
 
 	REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
 		    AR_RTC_FORCE_WAKE_EN);
-
 	if (AR_SREV_9100(ah))
-		udelay(10000);
+		mdelay(10);
 	else
 		udelay(50);
 
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c36de30..1fc2e5a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -57,6 +57,10 @@
 module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
 MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
 
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
 bool is_ath9k_unloaded;
 /* We use the hw_value as an index into our private channel structure */
 
@@ -903,13 +907,15 @@
 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
 		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
 		IEEE80211_HW_SIGNAL_DBM |
-		IEEE80211_HW_SUPPORTS_PS |
 		IEEE80211_HW_PS_NULLFUNC_STACK |
 		IEEE80211_HW_SPECTRUM_MGMT |
 		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
 		IEEE80211_HW_SUPPORTS_RC_TABLE |
 		IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
 
+	if (ath9k_ps_enable)
+		hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
 		hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
 
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index aa7ad3a7..4e5c0f8 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -496,7 +496,7 @@
 
 void hostap_remove_proc(local_info_t *local)
 {
-	remove_proc_subtree(local->ddev->name, hostap_proc);
+	proc_remove(local->proc);
 }
 
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index c24d1d3..73086c1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -696,6 +696,24 @@
 	return ret;
 }
 
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+		return false;
+	return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+		return false;
+	if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+		return true;
+
+	/* disabled by default */
+	return false;
+}
+
 static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
 				   struct ieee80211_vif *vif,
 				   enum ieee80211_ampdu_mlme_action action,
@@ -717,7 +735,7 @@
 
 	switch (action) {
 	case IEEE80211_AMPDU_RX_START:
-		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+		if (!iwl_enable_rx_ampdu(priv->cfg))
 			break;
 		IWL_DEBUG_HT(priv, "start Rx\n");
 		ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -729,7 +747,7 @@
 	case IEEE80211_AMPDU_TX_START:
 		if (!priv->trans->ops->txq_enable)
 			break;
-		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+		if (!iwl_enable_tx_ampdu(priv->cfg))
 			break;
 		IWL_DEBUG_HT(priv, "start Tx\n");
 		ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index c372816..7510355 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1286,7 +1286,7 @@
 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
 module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
 MODULE_PARM_DESC(11n_disable,
-	"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
+	"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
 module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
 		   int, S_IRUGO);
 MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index 0a84ade..b29075c3d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -79,9 +79,12 @@
 	IWL_POWER_NUM
 };
 
-#define IWL_DISABLE_HT_ALL	BIT(0)
-#define IWL_DISABLE_HT_TXAGG	BIT(1)
-#define IWL_DISABLE_HT_RXAGG	BIT(2)
+enum iwl_disable_11n {
+	IWL_DISABLE_HT_ALL	 = BIT(0),
+	IWL_DISABLE_HT_TXAGG	 = BIT(1),
+	IWL_DISABLE_HT_RXAGG	 = BIT(2),
+	IWL_ENABLE_HT_TXAGG	 = BIT(3),
+};
 
 /**
  * struct iwl_mod_params
@@ -90,7 +93,7 @@
  *
  * @sw_crypto: using hardware encryption, default = 0
  * @disable_11n: disable 11n capabilities, default = 0,
- *	use IWL_DISABLE_HT_* constants
+ *	use IWL_[DIS,EN]ABLE_HT_* constants
  * @amsdu_size_8K: enable 8K amsdu size, default = 0
  * @restart_fw: restart firmware, default = 1
  * @wd_disable: enable stuck queue check, default = 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index f06f4cb..725e954d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -182,6 +182,11 @@
 
 	for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
 		ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+
+		if (ch_idx >= NUM_2GHZ_CHANNELS &&
+		    !data->sku_cap_band_52GHz_enable)
+			ch_flags &= ~NVM_CHANNEL_VALID;
+
 		if (!(ch_flags & NVM_CHANNEL_VALID)) {
 			IWL_DEBUG_EEPROM(dev,
 					 "Ch. %d Flags %x [%sGHz] - No traffic\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 73cbba7..9426905 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -504,6 +504,7 @@
  * @match_notify:	clients waiting for match found notification
  * @pass_match:		clients waiting for the results
  * @active_clients:	active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify:	clients waiting for match notification without match
  */
 struct iwl_scan_offload_profile_cfg {
 	struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
@@ -512,7 +513,8 @@
 	u8 match_notify;
 	u8 pass_match;
 	u8 active_clients;
-	u8 reserved[3];
+	u8 any_beacon_notify;
+	u8 reserved[2];
 } __packed;
 
 /**
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c49b507..c35b866 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -246,7 +246,7 @@
 	else
 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
-	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+	if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
 		hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
 		hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
@@ -328,6 +328,24 @@
 	ieee80211_free_txskb(hw, skb);
 }
 
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+		return false;
+	return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+		return false;
+	if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+		return true;
+
+	/* enabled by default */
+	return true;
+}
+
 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
 				    struct ieee80211_vif *vif,
 				    enum ieee80211_ampdu_mlme_action action,
@@ -347,7 +365,7 @@
 
 	switch (action) {
 	case IEEE80211_AMPDU_RX_START:
-		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
+		if (!iwl_enable_rx_ampdu(mvm->cfg)) {
 			ret = -EINVAL;
 			break;
 		}
@@ -357,7 +375,7 @@
 		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
 		break;
 	case IEEE80211_AMPDU_TX_START:
-		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
+		if (!iwl_enable_tx_ampdu(mvm->cfg)) {
 			ret = -EINVAL;
 			break;
 		}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 0e00079..742afc4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -344,7 +344,8 @@
 
 	iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
 
-	cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+	cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+					   TX_CMD_FLG_BT_DIS);
 	cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
 	cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
 	cmd->tx_cmd.rate_n_flags =
@@ -807,6 +808,8 @@
 	profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
 	profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
 	profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+	if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+		profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
 
 	for (i = 0; i < req->n_match_sets; i++) {
 		profile = &profile_cfg->profiles[i];
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index ec18121..3397f59 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -652,7 +652,7 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
-	static const u8 *baddr = _baddr;
+	const u8 *baddr = _baddr;
 
 	lockdep_assert_held(&mvm->mutex);
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 90378c2..4df12fa 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -659,8 +659,14 @@
 	rcu_read_lock();
 
 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+	/*
+	 * sta can't be NULL otherwise it'd mean that the sta has been freed in
+	 * the firmware while we still have packets for it in the Tx queues.
+	 */
+	if (WARN_ON_ONCE(!sta))
+		goto out;
 
-	if (!IS_ERR_OR_NULL(sta)) {
+	if (!IS_ERR(sta)) {
 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
 		if (tid != IWL_TID_NON_QOS) {
@@ -675,7 +681,6 @@
 			spin_unlock_bh(&mvmsta->lock);
 		}
 	} else {
-		sta = NULL;
 		mvmsta = NULL;
 	}
 
@@ -683,42 +688,38 @@
 	 * If the txq is not an AMPDU queue, there is no chance we freed
 	 * several skbs. Check that out...
 	 */
-	if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
-	    atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
-		if (mvmsta) {
-			/*
-			 * If there are no pending frames for this STA, notify
-			 * mac80211 that this station can go to sleep in its
-			 * STA table.
-			 */
-			if (mvmsta->vif->type == NL80211_IFTYPE_AP)
-				ieee80211_sta_block_awake(mvm->hw, sta, false);
-			/*
-			 * We might very well have taken mvmsta pointer while
-			 * the station was being removed. The remove flow might
-			 * have seen a pending_frame (because we didn't take
-			 * the lock) even if now the queues are drained. So make
-			 * really sure now that this the station is not being
-			 * removed. If it is, run the drain worker to remove it.
-			 */
-			spin_lock_bh(&mvmsta->lock);
-			sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-			if (!sta || PTR_ERR(sta) == -EBUSY) {
-				/*
-				 * Station disappeared in the meantime:
-				 * so we are draining.
-				 */
-				set_bit(sta_id, mvm->sta_drained);
-				schedule_work(&mvm->sta_drained_wk);
-			}
-			spin_unlock_bh(&mvmsta->lock);
-		} else if (!mvmsta && PTR_ERR(sta) == -EBUSY) {
-			/* Tx response without STA, so we are draining */
-			set_bit(sta_id, mvm->sta_drained);
-			schedule_work(&mvm->sta_drained_wk);
-		}
+	if (txq_id >= mvm->first_agg_queue)
+		goto out;
+
+	/* We can't free more than one frame at once on a shared queue */
+	WARN_ON(skb_freed > 1);
+
+	/* If we have still frames from this STA nothing to do here */
+	if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
+		goto out;
+
+	if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
+		/*
+		 * If there are no pending frames for this STA, notify
+		 * mac80211 that this station can go to sleep in its
+		 * STA table.
+		 * If mvmsta is not NULL, sta is valid.
+		 */
+		ieee80211_sta_block_awake(mvm->hw, sta, false);
 	}
 
+	if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
+		/*
+		 * We are draining and this was the last packet - pre_rcu_remove
+		 * has been called already. We might be after the
+		 * synchronize_net already.
+		 * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
+		 */
+		set_bit(sta_id, mvm->sta_drained);
+		schedule_work(&mvm->sta_drained_wk);
+	}
+
+out:
 	rcu_read_unlock();
 }
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a4a5e25..86989df 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -411,6 +411,8 @@
 			mvm->status, table.valid);
 	}
 
+	IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
+
 	trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
 				      table.data1, table.data2, table.data3,
 				      table.blink1, table.blink2, table.ilink1,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3040924..f47bcbe 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -359,20 +359,25 @@
 /* 7265 Series */
 	{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
-	{IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
 	{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
 	{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 4d79761..9d3d275 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -748,7 +748,7 @@
 
 static u16
 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
-				void *accel_priv)
+				void *accel_priv, select_queue_fallback_t fallback)
 {
 	skb->priority = cfg80211_classify8021d(skb, NULL);
 	return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index abc5f56..2f1cd92 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1877,6 +1877,11 @@
 						   EEPROM_MAC_ADDR_0));
 
 	/*
+	 * Disable powersaving as default.
+	 */
+	rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+	/*
 	 * Initialize hw_mode information.
 	 */
 	spec->supported_bands = SUPPORT_BAND_2GHZ;
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9f16824..d849d59 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1706,6 +1706,11 @@
 	    IEEE80211_HW_SUPPORTS_PS |
 	    IEEE80211_HW_PS_NULLFUNC_STACK;
 
+	/*
+	 * Disable powersaving as default.
+	 */
+	rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
 				rt2x00_eeprom_addr(rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b8f5b06..7f8b5d1 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -7458,10 +7458,9 @@
 	u32 reg;
 
 	/*
-	 * Disable powersaving as default on PCI devices.
+	 * Disable powersaving as default.
 	 */
-	if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
-		rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+	rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
 	/*
 	 * Initialize all hw fields.
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 8ec17aa..3867d14 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -107,6 +107,7 @@
 	struct rtl8180_priv *priv = dev->priv;
 	unsigned int count = 32;
 	u8 signal, agc, sq;
+	dma_addr_t mapping;
 
 	while (count--) {
 		struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
@@ -128,6 +129,17 @@
 			if (unlikely(!new_skb))
 				goto done;
 
+			mapping = pci_map_single(priv->pdev,
+					       skb_tail_pointer(new_skb),
+					       MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+
+			if (pci_dma_mapping_error(priv->pdev, mapping)) {
+				kfree_skb(new_skb);
+				dev_err(&priv->pdev->dev, "RX DMA map error\n");
+
+				goto done;
+			}
+
 			pci_unmap_single(priv->pdev,
 					 *((dma_addr_t *)skb->cb),
 					 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
@@ -158,9 +170,7 @@
 
 			skb = new_skb;
 			priv->rx_buf[priv->rx_idx] = skb;
-			*((dma_addr_t *) skb->cb) =
-				pci_map_single(priv->pdev, skb_tail_pointer(skb),
-					       MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+			*((dma_addr_t *) skb->cb) = mapping;
 		}
 
 	done:
@@ -266,6 +276,13 @@
 	mapping = pci_map_single(priv->pdev, skb->data,
 				 skb->len, PCI_DMA_TODEVICE);
 
+	if (pci_dma_mapping_error(priv->pdev, mapping)) {
+		kfree_skb(skb);
+		dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
+		return;
+
+	}
+
 	tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
 		   RTL818X_TX_DESC_FLAG_LS |
 		   (ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index 56aee06..a6ad79f 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -15,6 +15,8 @@
 #ifndef RTL8187_H
 #define RTL8187_H
 
+#include <linux/cache.h>
+
 #include "rtl818x.h"
 #include "leds.h"
 
@@ -139,7 +141,10 @@
 	u8 aifsn[4];
 	u8 rfkill_mask;
 	struct {
-		__le64 buf;
+		union {
+			__le64 buf;
+			u8 dummy1[L1_CACHE_BYTES];
+		} ____cacheline_aligned;
 		struct sk_buff_head queue;
 	} b_tx_status; /* This queue is used by both -b and non-b devices */
 	struct mutex io_mutex;
@@ -147,7 +152,8 @@
 		u8 bits8;
 		__le16 bits16;
 		__le32 bits32;
-	} *io_dmabuf;
+		u8 dummy2[L1_CACHE_BYTES];
+	} *io_dmabuf ____cacheline_aligned;
 	bool rfkill_off;
 	u16 seqno;
 };
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index deedae3..d1c0191 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -48,7 +48,7 @@
 
 	/*<2> Enable Adapter */
 	if (rtlpriv->cfg->ops->hw_init(hw))
-		return 1;
+		return false;
 	RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
 
 	/*<3> Enable Interrupt */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index a82b30a..2eb0b38 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -937,14 +937,26 @@
 	bool is92c;
 	int err;
 	u8 tmp_u1b;
+	unsigned long flags;
 
 	rtlpci->being_init_adapter = true;
+
+	/* Since this function can take a very long time (up to 350 ms)
+	 * and can be called with irqs disabled, reenable the irqs
+	 * to let the other devices continue being serviced.
+	 *
+	 * It is safe doing so since our own interrupts will only be enabled
+	 * in a subsequent step.
+	 */
+	local_save_flags(flags);
+	local_irq_enable();
+
 	rtlpriv->intf_ops->disable_aspm(hw);
 	rtstatus = _rtl92ce_init_mac(hw);
 	if (!rtstatus) {
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
 		err = 1;
-		return err;
+		goto exit;
 	}
 
 	err = rtl92c_download_fw(hw);
@@ -952,7 +964,7 @@
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
 			 "Failed to download FW. Init HW without FW now..\n");
 		err = 1;
-		return err;
+		goto exit;
 	}
 
 	rtlhal->last_hmeboxnum = 0;
@@ -1032,6 +1044,8 @@
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
 	}
 	rtl92c_dm_init(hw);
+exit:
+	local_irq_restore(flags);
 	rtlpci->being_init_adapter = false;
 	return err;
 }
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4c76bcb..ae413a2 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -143,11 +143,7 @@
 	char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
 	struct xen_netif_rx_back_ring rx;
 	struct sk_buff_head rx_queue;
-	bool rx_queue_stopped;
-	/* Set when the RX interrupt is triggered by the frontend.
-	 * The worker thread may need to wake the queue.
-	 */
-	bool rx_event;
+	RING_IDX rx_last_skb_slots;
 
 	/* This array is allocated seperately as it is large */
 	struct gnttab_copy *grant_copy_op;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b9de31e..7669d49 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -100,7 +100,6 @@
 {
 	struct xenvif *vif = dev_id;
 
-	vif->rx_event = true;
 	xenvif_kick_thread(vif);
 
 	return IRQ_HANDLED;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6b62c3e..e5284bc 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -476,7 +476,6 @@
 	unsigned long offset;
 	struct skb_cb_overlay *sco;
 	bool need_to_notify = false;
-	bool ring_full = false;
 
 	struct netrx_pending_operations npo = {
 		.copy  = vif->grant_copy_op,
@@ -486,7 +485,7 @@
 	skb_queue_head_init(&rxq);
 
 	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
-		int max_slots_needed;
+		RING_IDX max_slots_needed;
 		int i;
 
 		/* We need a cheap worse case estimate for the number of
@@ -509,9 +508,10 @@
 		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
 			skb_queue_head(&vif->rx_queue, skb);
 			need_to_notify = true;
-			ring_full = true;
+			vif->rx_last_skb_slots = max_slots_needed;
 			break;
-		}
+		} else
+			vif->rx_last_skb_slots = 0;
 
 		sco = (struct skb_cb_overlay *)skb->cb;
 		sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
@@ -522,8 +522,6 @@
 
 	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
 
-	vif->rx_queue_stopped = !npo.copy_prod && ring_full;
-
 	if (!npo.copy_prod)
 		goto done;
 
@@ -1473,8 +1471,8 @@
 
 static inline int rx_work_todo(struct xenvif *vif)
 {
-	return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) ||
-		vif->rx_event;
+	return !skb_queue_empty(&vif->rx_queue) &&
+	       xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
 }
 
 static inline int tx_work_todo(struct xenvif *vif)
@@ -1560,8 +1558,6 @@
 		if (!skb_queue_empty(&vif->rx_queue))
 			xenvif_rx_action(vif);
 
-		vif->rx_event = false;
-
 		if (skb_queue_empty(&vif->rx_queue) &&
 		    netif_queue_stopped(vif->dev))
 			xenvif_start_queue(vif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e955c56..f9daa9e 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -117,6 +117,7 @@
 	} tx_skbs[NET_TX_RING_SIZE];
 	grant_ref_t gref_tx_head;
 	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
+	struct page *grant_tx_page[NET_TX_RING_SIZE];
 	unsigned tx_skb_freelist;
 
 	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
@@ -396,6 +397,7 @@
 			gnttab_release_grant_reference(
 				&np->gref_tx_head, np->grant_tx_ref[id]);
 			np->grant_tx_ref[id] = GRANT_INVALID_REF;
+			np->grant_tx_page[id] = NULL;
 			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
 			dev_kfree_skb_irq(skb);
 		}
@@ -452,6 +454,7 @@
 		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
 						mfn, GNTMAP_readonly);
 
+		np->grant_tx_page[id] = virt_to_page(data);
 		tx->gref = np->grant_tx_ref[id] = ref;
 		tx->offset = offset;
 		tx->size = len;
@@ -497,6 +500,7 @@
 							np->xbdev->otherend_id,
 							mfn, GNTMAP_readonly);
 
+			np->grant_tx_page[id] = page;
 			tx->gref = np->grant_tx_ref[id] = ref;
 			tx->offset = offset;
 			tx->size = bytes;
@@ -596,6 +600,7 @@
 	mfn = virt_to_mfn(data);
 	gnttab_grant_foreign_access_ref(
 		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
+	np->grant_tx_page[id] = virt_to_page(data);
 	tx->gref = np->grant_tx_ref[id] = ref;
 	tx->offset = offset;
 	tx->size = len;
@@ -1085,10 +1090,11 @@
 			continue;
 
 		skb = np->tx_skbs[i].skb;
-		gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
-					      GNTMAP_readonly);
-		gnttab_release_grant_reference(&np->gref_tx_head,
-					       np->grant_tx_ref[i]);
+		get_page(np->grant_tx_page[i]);
+		gnttab_end_foreign_access(np->grant_tx_ref[i],
+					  GNTMAP_readonly,
+					  (unsigned long)page_address(np->grant_tx_page[i]));
+		np->grant_tx_page[i] = NULL;
 		np->grant_tx_ref[i] = GRANT_INVALID_REF;
 		add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
 		dev_kfree_skb_irq(skb);
@@ -1097,78 +1103,35 @@
 
 static void xennet_release_rx_bufs(struct netfront_info *np)
 {
-	struct mmu_update      *mmu = np->rx_mmu;
-	struct multicall_entry *mcl = np->rx_mcl;
-	struct sk_buff_head free_list;
-	struct sk_buff *skb;
-	unsigned long mfn;
-	int xfer = 0, noxfer = 0, unused = 0;
 	int id, ref;
 
-	dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
-			 __func__);
-	return;
-
-	skb_queue_head_init(&free_list);
-
 	spin_lock_bh(&np->rx_lock);
 
 	for (id = 0; id < NET_RX_RING_SIZE; id++) {
-		ref = np->grant_rx_ref[id];
-		if (ref == GRANT_INVALID_REF) {
-			unused++;
-			continue;
-		}
+		struct sk_buff *skb;
+		struct page *page;
 
 		skb = np->rx_skbs[id];
-		mfn = gnttab_end_foreign_transfer_ref(ref);
-		gnttab_release_grant_reference(&np->gref_rx_head, ref);
+		if (!skb)
+			continue;
+
+		ref = np->grant_rx_ref[id];
+		if (ref == GRANT_INVALID_REF)
+			continue;
+
+		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
+
+		/* gnttab_end_foreign_access() needs a page ref until
+		 * foreign access is ended (which may be deferred).
+		 */
+		get_page(page);
+		gnttab_end_foreign_access(ref, 0,
+					  (unsigned long)page_address(page));
 		np->grant_rx_ref[id] = GRANT_INVALID_REF;
 
-		if (0 == mfn) {
-			skb_shinfo(skb)->nr_frags = 0;
-			dev_kfree_skb(skb);
-			noxfer++;
-			continue;
-		}
-
-		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-			/* Remap the page. */
-			const struct page *page =
-				skb_frag_page(&skb_shinfo(skb)->frags[0]);
-			unsigned long pfn = page_to_pfn(page);
-			void *vaddr = page_address(page);
-
-			MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
-						mfn_pte(mfn, PAGE_KERNEL),
-						0);
-			mcl++;
-			mmu->ptr = ((u64)mfn << PAGE_SHIFT)
-				| MMU_MACHPHYS_UPDATE;
-			mmu->val = pfn;
-			mmu++;
-
-			set_phys_to_machine(pfn, mfn);
-		}
-		__skb_queue_tail(&free_list, skb);
-		xfer++;
+		kfree_skb(skb);
 	}
 
-	dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
-		 __func__, xfer, noxfer, unused);
-
-	if (xfer) {
-		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-			/* Do all the remapping work and M2P updates. */
-			MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
-					 NULL, DOMID_SELF);
-			mcl++;
-			HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
-		}
-	}
-
-	__skb_queue_purge(&free_list);
-
 	spin_unlock_bh(&np->rx_lock);
 }
 
@@ -1339,6 +1302,7 @@
 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
 		np->rx_skbs[i] = NULL;
 		np->grant_rx_ref[i] = GRANT_INVALID_REF;
+		np->grant_tx_page[i] = NULL;
 	}
 
 	/* A grant for every tx ring slot */
@@ -1868,7 +1832,6 @@
 	case XenbusStateReconfiguring:
 	case XenbusStateReconfigured:
 	case XenbusStateUnknown:
-	case XenbusStateClosed:
 		break;
 
 	case XenbusStateInitWait:
@@ -1883,6 +1846,10 @@
 		netdev_notify_peers(netdev);
 		break;
 
+	case XenbusStateClosed:
+		if (dev->state == XenbusStateClosed)
+			break;
+		/* Missed the backend's CLOSING state -- fallthrough */
 	case XenbusStateClosing:
 		xenbus_frontend_closed(dev);
 		break;
diff --git a/drivers/of/address.c b/drivers/of/address.c
index d3dd41c..1a54f1f 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -99,11 +99,12 @@
 static int of_bus_pci_match(struct device_node *np)
 {
 	/*
+ 	 * "pciex" is PCI Express
 	 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
 	 * "ht" is hypertransport
 	 */
-	return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") ||
-		!strcmp(np->type, "ht");
+	return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") ||
+		!strcmp(np->type, "vci") || !strcmp(np->type, "ht");
 }
 
 static void of_bus_pci_count_cells(struct device_node *np,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index ff85450..10b5110 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -730,46 +730,64 @@
 }
 EXPORT_SYMBOL(of_find_node_with_property);
 
+static const struct of_device_id *
+of_match_compatible(const struct of_device_id *matches,
+			const struct device_node *node)
+{
+	const char *cp;
+	int cplen, l;
+	const struct of_device_id *m;
+
+	cp = __of_get_property(node, "compatible", &cplen);
+	while (cp && (cplen > 0)) {
+		m = matches;
+		while (m->name[0] || m->type[0] || m->compatible[0]) {
+			/* Only match for the entries without type and name */
+			if (m->name[0] || m->type[0] ||
+				of_compat_cmp(m->compatible, cp,
+					 strlen(m->compatible)))
+				m++;
+			else
+				return m;
+		}
+
+		/* Get node's next compatible string */
+		l = strlen(cp) + 1;
+		cp += l;
+		cplen -= l;
+	}
+
+	return NULL;
+}
+
 static
 const struct of_device_id *__of_match_node(const struct of_device_id *matches,
 					   const struct device_node *node)
 {
-	const char *cp;
-	int cplen, l;
+	const struct of_device_id *m;
 
 	if (!matches)
 		return NULL;
 
-	cp = __of_get_property(node, "compatible", &cplen);
-	do {
-		const struct of_device_id *m = matches;
+	m = of_match_compatible(matches, node);
+	if (m)
+		return m;
 
-		/* Check against matches with current compatible string */
-		while (m->name[0] || m->type[0] || m->compatible[0]) {
-			int match = 1;
-			if (m->name[0])
-				match &= node->name
-					&& !strcmp(m->name, node->name);
-			if (m->type[0])
-				match &= node->type
-					&& !strcmp(m->type, node->type);
-			if (m->compatible[0])
-				match &= cp
-					&& !of_compat_cmp(m->compatible, cp,
-							strlen(m->compatible));
-			if (match)
-				return m;
-			m++;
-		}
-
-		/* Get node's next compatible string */ 
-		if (cp) {
-			l = strlen(cp) + 1;
-			cp += l;
-			cplen -= l;
-		}
-	} while (cp && (cplen > 0));
-
+	while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
+		int match = 1;
+		if (matches->name[0])
+			match &= node->name
+				&& !strcmp(matches->name, node->name);
+		if (matches->type[0])
+			match &= node->type
+				&& !strcmp(matches->type, node->type);
+		if (matches->compatible[0])
+			match &= __of_device_is_compatible(node,
+							   matches->compatible);
+		if (match)
+			return matches;
+		matches++;
+	}
 	return NULL;
 }
 
@@ -778,10 +796,12 @@
  *	@matches:	array of of device match structures to search in
  *	@node:		the of device structure to match against
  *
- *	Low level utility function used by device matching. Matching order
- *	is to compare each of the node's compatibles with all given matches
- *	first. This implies node's compatible is sorted from specific to
- *	generic while matches can be in any order.
+ *	Low level utility function used by device matching. We have two ways
+ *	of matching:
+ *	- Try to find the best compatible match by comparing each compatible
+ *	  string of device node with all the given matches respectively.
+ *	- If the above method failed, then try to match the compatible by using
+ *	  __of_device_is_compatible() besides the match in type and name.
  */
 const struct of_device_id *of_match_node(const struct of_device_id *matches,
 					 const struct device_node *node)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 875b7b6..5b3c24f 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -24,7 +24,11 @@
 
 static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
 {
-	phydev->supported |= PHY_DEFAULT_FEATURES;
+	/* The default values for phydev->supported are provided by the PHY
+	 * driver "features" member, we want to reset to sane defaults fist
+	 * before supporting higher speeds.
+	 */
+	phydev->supported &= PHY_DEFAULT_FEATURES;
 
 	switch (max_speed) {
 	default:
@@ -44,7 +48,7 @@
 {
 	struct phy_device *phy;
 	bool is_c45;
-	int rc, prev_irq;
+	int rc;
 	u32 max_speed = 0;
 
 	is_c45 = of_device_is_compatible(child,
@@ -54,12 +58,14 @@
 	if (!phy || IS_ERR(phy))
 		return 1;
 
-	if (mdio->irq) {
-		prev_irq = mdio->irq[addr];
-		mdio->irq[addr] =
-			irq_of_parse_and_map(child, 0);
-		if (!mdio->irq[addr])
-			mdio->irq[addr] = prev_irq;
+	rc = irq_of_parse_and_map(child, 0);
+	if (rc > 0) {
+		phy->irq = rc;
+		if (mdio->irq)
+			mdio->irq[addr] = rc;
+	} else {
+		if (mdio->irq)
+			phy->irq = mdio->irq[addr];
 	}
 
 	/* Associate the OF node with the device structure so it
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index cd929ae..7c7a388 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -210,10 +210,29 @@
 	}
 }
 
+static void dock_event(acpi_handle handle, u32 type, void *data)
+{
+	struct acpiphp_context *context;
+
+	mutex_lock(&acpiphp_context_lock);
+	context = acpiphp_get_context(handle);
+	if (!context || WARN_ON(context->handle != handle)
+	    || context->func.parent->is_going_away) {
+		mutex_unlock(&acpiphp_context_lock);
+		return;
+	}
+	get_bridge(context->func.parent);
+	acpiphp_put_context(context);
+	mutex_unlock(&acpiphp_context_lock);
+
+	hotplug_event(handle, type, data);
+
+	put_bridge(context->func.parent);
+}
 
 static const struct acpi_dock_ops acpiphp_dock_ops = {
 	.fixup = post_dock_fixups,
-	.handler = hotplug_event,
+	.handler = dock_event,
 };
 
 /* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -441,7 +460,9 @@
 	list_del(&bridge->list);
 	mutex_unlock(&bridge_mutex);
 
+	mutex_lock(&acpiphp_context_lock);
 	bridge->is_going_away = true;
+	mutex_unlock(&acpiphp_context_lock);
 }
 
 /**
@@ -709,6 +730,17 @@
 	return (unsigned int)sta;
 }
 
+static inline bool device_status_valid(unsigned int sta)
+{
+	/*
+	 * ACPI spec says that _STA may return bit 0 clear with bit 3 set
+	 * if the device is valid but does not require a device driver to be
+	 * loaded (Section 6.3.7 of ACPI 5.0A).
+	 */
+	unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING;
+	return (sta & mask) == mask;
+}
+
 /**
  * trim_stale_devices - remove PCI devices that are not responding.
  * @dev: PCI device to start walking the hierarchy from.
@@ -724,7 +756,7 @@
 		unsigned long long sta;
 
 		status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
-		alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
+		alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
 			|| acpiphp_no_hotplug(handle);
 	}
 	if (!alive) {
@@ -742,7 +774,7 @@
 
 		/* The device is a bridge. so check the bus below it. */
 		pm_runtime_get_sync(&dev->dev);
-		list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
+		list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list)
 			trim_stale_devices(child);
 
 		pm_runtime_put(&dev->dev);
@@ -771,10 +803,10 @@
 		mutex_lock(&slot->crit_sect);
 		if (slot_no_hotplug(slot)) {
 			; /* do nothing */
-		} else if (get_slot_status(slot) == ACPI_STA_ALL) {
+		} else if (device_status_valid(get_slot_status(slot))) {
 			/* remove stale devices if any */
-			list_for_each_entry_safe(dev, tmp, &bus->devices,
-						 bus_list)
+			list_for_each_entry_safe_reverse(dev, tmp,
+							 &bus->devices, bus_list)
 				if (PCI_SLOT(dev->devfn) == slot->device)
 					trim_stale_devices(dev);
 
@@ -805,7 +837,7 @@
 	int i;
 	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
 
-	list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
+	list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) {
 		for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
 			struct resource *res = &dev->resource[i];
 			if ((res->flags & type_mask) && !res->start &&
@@ -829,7 +861,11 @@
 
 	bridge = acpiphp_handle_to_bridge(handle);
 	if (bridge) {
+		pci_lock_rescan_remove();
+
 		acpiphp_check_bridge(bridge);
+
+		pci_unlock_rescan_remove();
 		put_bridge(bridge);
 	}
 }
@@ -852,6 +888,7 @@
 
 	mutex_unlock(&acpiphp_context_lock);
 
+	pci_lock_rescan_remove();
 	acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
 
 	switch (type) {
@@ -905,6 +942,7 @@
 		break;
 	}
 
+	pci_unlock_rescan_remove();
 	if (bridge)
 		put_bridge(bridge);
 }
@@ -915,11 +953,9 @@
 	acpi_handle handle = context->handle;
 
 	acpi_scan_lock_acquire();
-	pci_lock_rescan_remove();
 
 	hotplug_event(handle, type, context);
 
-	pci_unlock_rescan_remove();
 	acpi_scan_lock_release();
 	acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
 	put_bridge(context->func.parent);
@@ -937,6 +973,7 @@
 {
 	struct acpiphp_context *context;
 	u32 ost_code = ACPI_OST_SC_SUCCESS;
+	acpi_status status;
 
 	switch (type) {
 	case ACPI_NOTIFY_BUS_CHECK:
@@ -972,13 +1009,20 @@
 
 	mutex_lock(&acpiphp_context_lock);
 	context = acpiphp_get_context(handle);
-	if (context && !WARN_ON(context->handle != handle)) {
-		get_bridge(context->func.parent);
-		acpiphp_put_context(context);
-		acpi_hotplug_execute(hotplug_event_work, context, type);
+	if (!context || WARN_ON(context->handle != handle)
+	    || context->func.parent->is_going_away)
+		goto err_out;
+
+	get_bridge(context->func.parent);
+	acpiphp_put_context(context);
+	status = acpi_hotplug_execute(hotplug_event_work, context, type);
+	if (ACPI_SUCCESS(status)) {
 		mutex_unlock(&acpiphp_context_lock);
 		return;
 	}
+	put_bridge(context->func.parent);
+
+ err_out:
 	mutex_unlock(&acpiphp_context_lock);
 	ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
 
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 04796c0..6e34498 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1208,18 +1208,6 @@
 	pci_free_cap_save_buffers(dev);
 }
 
-static void pci_free_resources(struct pci_dev *dev)
-{
-	int i;
-
-	pci_cleanup_rom(dev);
-	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-		struct resource *res = dev->resource + i;
-		if (res->parent)
-			release_resource(res);
-	}
-}
-
 /**
  * pci_release_dev - free a pci device structure when all users of it are finished.
  * @dev: device that's been disconnected
@@ -1229,14 +1217,9 @@
  */
 static void pci_release_dev(struct device *dev)
 {
-	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct pci_dev *pci_dev;
 
-	down_write(&pci_bus_sem);
-	list_del(&pci_dev->bus_list);
-	up_write(&pci_bus_sem);
-
-	pci_free_resources(pci_dev);
-
+	pci_dev = to_pci_dev(dev);
 	pci_release_capabilities(pci_dev);
 	pci_release_of_node(pci_dev);
 	pcibios_release_device(pci_dev);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 4ff36bf..8bd76c9 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -3,6 +3,18 @@
 #include <linux/pci-aspm.h>
 #include "pci.h"
 
+static void pci_free_resources(struct pci_dev *dev)
+{
+	int i;
+
+	pci_cleanup_rom(dev);
+	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+		struct resource *res = dev->resource + i;
+		if (res->parent)
+			release_resource(res);
+	}
+}
+
 static void pci_stop_dev(struct pci_dev *dev)
 {
 	pci_pme_active(dev, false);
@@ -25,6 +37,11 @@
 
 	device_del(&dev->dev);
 
+	down_write(&pci_bus_sem);
+	list_del(&dev->bus_list);
+	up_write(&pci_bus_sem);
+
+	pci_free_resources(dev);
 	put_device(&dev->dev);
 }
 
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 645c867..5f5b0f4 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -162,6 +162,9 @@
 {
 	int ret;
 
+	if (!phy)
+		return 0;
+
 	ret = phy_pm_runtime_get_sync(phy);
 	if (ret < 0 && ret != -ENOTSUPP)
 		return ret;
@@ -187,6 +190,9 @@
 {
 	int ret;
 
+	if (!phy)
+		return 0;
+
 	ret = phy_pm_runtime_get_sync(phy);
 	if (ret < 0 && ret != -ENOTSUPP)
 		return ret;
@@ -212,6 +218,9 @@
 {
 	int ret;
 
+	if (!phy)
+		return 0;
+
 	ret = phy_pm_runtime_get_sync(phy);
 	if (ret < 0 && ret != -ENOTSUPP)
 		return ret;
@@ -240,6 +249,9 @@
 {
 	int ret;
 
+	if (!phy)
+		return 0;
+
 	mutex_lock(&phy->mutex);
 	if (phy->power_count == 1 && phy->ops->power_off) {
 		ret =  phy->ops->power_off(phy);
@@ -308,7 +320,7 @@
  */
 void phy_put(struct phy *phy)
 {
-	if (IS_ERR(phy))
+	if (!phy || IS_ERR(phy))
 		return;
 
 	module_put(phy->ops->owner);
@@ -328,6 +340,9 @@
 {
 	int r;
 
+	if (!phy)
+		return;
+
 	r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
 	dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
 }
@@ -411,6 +426,27 @@
 EXPORT_SYMBOL_GPL(phy_get);
 
 /**
+ * phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or the name of the controller
+ * port for non-dt case
+ *
+ * Returns the phy driver, after getting a refcount to it; or
+ * NULL if there is no such phy.  The caller is responsible for
+ * calling phy_put() to release that count.
+ */
+struct phy *phy_optional_get(struct device *dev, const char *string)
+{
+	struct phy *phy = phy_get(dev, string);
+
+	if (PTR_ERR(phy) == -ENODEV)
+		phy = NULL;
+
+	return phy;
+}
+EXPORT_SYMBOL_GPL(phy_optional_get);
+
+/**
  * devm_phy_get() - lookup and obtain a reference to a phy.
  * @dev: device that requests this phy
  * @string: the phy name as given in the dt data or phy device name
@@ -441,6 +477,30 @@
 EXPORT_SYMBOL_GPL(devm_phy_get);
 
 /**
+ * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or phy device name
+ * for non-dt case
+ *
+ * Gets the phy using phy_get(), and associates a device with it using
+ * devres. On driver detach, release function is invoked on the devres
+ * data, then, devres data is freed. This differs to devm_phy_get() in
+ * that if the phy does not exist, it is not considered an error and
+ * -ENODEV will not be returned. Instead the NULL phy is returned,
+ * which can be passed to all other phy consumer calls.
+ */
+struct phy *devm_phy_optional_get(struct device *dev, const char *string)
+{
+	struct phy *phy = devm_phy_get(dev, string);
+
+	if (PTR_ERR(phy) == -ENODEV)
+		phy = NULL;
+
+	return phy;
+}
+EXPORT_SYMBOL_GPL(devm_phy_optional_get);
+
+/**
  * phy_create() - create a new phy
  * @dev: device that is creating the new phy
  * @ops: function pointers for performing phy operations
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5ee61a4..c0fe609 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -851,7 +851,9 @@
 	kref_init(&p->users);
 
 	/* Add the pinctrl handle to the global list */
+	mutex_lock(&pinctrl_list_mutex);
 	list_add_tail(&p->node, &pinctrl_list);
+	mutex_unlock(&pinctrl_list_mutex);
 
 	return p;
 }
@@ -1642,8 +1644,10 @@
 			    device_root, pctldev, &pinctrl_groups_ops);
 	debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
 			    device_root, pctldev, &pinctrl_gpioranges_ops);
-	pinmux_init_device_debugfs(device_root, pctldev);
-	pinconf_init_device_debugfs(device_root, pctldev);
+	if (pctldev->desc->pmxops)
+		pinmux_init_device_debugfs(device_root, pctldev);
+	if (pctldev->desc->confops)
+		pinconf_init_device_debugfs(device_root, pctldev);
 }
 
 static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 38c6f8b..d990e33 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1286,22 +1286,22 @@
 
 	switch (type) {
 	case IRQ_TYPE_EDGE_RISING:
-		irq_set_handler(d->irq, handle_simple_irq);
+		__irq_set_handler_locked(d->irq, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_ESR);
 		writel_relaxed(mask, pio + PIO_REHLSR);
 		break;
 	case IRQ_TYPE_EDGE_FALLING:
-		irq_set_handler(d->irq, handle_simple_irq);
+		__irq_set_handler_locked(d->irq, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_ESR);
 		writel_relaxed(mask, pio + PIO_FELLSR);
 		break;
 	case IRQ_TYPE_LEVEL_LOW:
-		irq_set_handler(d->irq, handle_level_irq);
+		__irq_set_handler_locked(d->irq, handle_level_irq);
 		writel_relaxed(mask, pio + PIO_LSR);
 		writel_relaxed(mask, pio + PIO_FELLSR);
 		break;
 	case IRQ_TYPE_LEVEL_HIGH:
-		irq_set_handler(d->irq, handle_level_irq);
+		__irq_set_handler_locked(d->irq, handle_level_irq);
 		writel_relaxed(mask, pio + PIO_LSR);
 		writel_relaxed(mask, pio + PIO_REHLSR);
 		break;
@@ -1310,7 +1310,7 @@
 		 * disable additional interrupt modes:
 		 * fall back to default behavior
 		 */
-		irq_set_handler(d->irq, handle_simple_irq);
+		__irq_set_handler_locked(d->irq, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_AIMDR);
 		return 0;
 	case IRQ_TYPE_NONE:
diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/pinctrl-imx1-core.c
index 17aecde..815384b 100644
--- a/drivers/pinctrl/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/pinctrl-imx1-core.c
@@ -45,7 +45,7 @@
 #define MX1_DDIR 0x00
 #define MX1_OCR 0x04
 #define MX1_ICONFA 0x0c
-#define MX1_ICONFB 0x10
+#define MX1_ICONFB 0x14
 #define MX1_GIUS 0x20
 #define MX1_GPR 0x38
 #define MX1_PUEN 0x40
@@ -97,13 +97,13 @@
 	u32 old_val;
 	u32 new_val;
 
-	dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
-			reg, offset, value);
-
 	/* Use the next register if the pin's port pin number is >=16 */
 	if (pin_id % 32 >= 16)
 		reg += 0x04;
 
+	dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
+			reg, offset, value);
+
 	/* Get current state of pins */
 	old_val = readl(reg);
 	old_val &= mask;
@@ -139,7 +139,7 @@
 		u32 reg_offset)
 {
 	void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset;
-	int offset = pin_id % 16;
+	int offset = (pin_id % 16) * 2;
 
 	/* Use the next register if the pin's port pin number is >=16 */
 	if (pin_id % 32 >= 16)
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index a2e93a2..e767355 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -645,7 +645,7 @@
 				 GFP_KERNEL);
 	if (!pmx->regs) {
 		dev_err(&pdev->dev, "Can't alloc regs pointer\n");
-		return -ENODEV;
+		return -ENOMEM;
 	}
 
 	for (i = 0; i < pmx->nbanks; i++) {
diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c
index 37b4265..dde0285 100644
--- a/drivers/pinctrl/sirf/pinctrl-prima2.c
+++ b/drivers/pinctrl/sirf/pinctrl-prima2.c
@@ -413,7 +413,7 @@
 	.funcval = 0,
 };
 
-static const unsigned ac97_pins[] = { 33, 34, 35, 36 };
+static const unsigned ac97_pins[] = { 43, 44, 45, 46 };
 
 static const struct sirfsoc_muxmask spi1_muxmask[] = {
 	{
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index b28d1af..9802b67 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -276,7 +276,20 @@
 	if (!configs)
 		return -ENOMEM;
 
-	configs[0] = pull;
+	switch (pull) {
+	case 0:
+		configs[0] = PIN_CONFIG_BIAS_DISABLE;
+		break;
+	case 1:
+		configs[0] = PIN_CONFIG_BIAS_PULL_DOWN;
+		break;
+	case 2:
+		configs[0] = PIN_CONFIG_BIAS_PULL_UP;
+		break;
+	default:
+		configs[0] = PIN_CONFIG_BIAS_DISABLE;
+		dev_err(data->dev, "invalid pull state %d - disabling\n", pull);
+	}
 
 	map->type = PIN_MAP_TYPE_CONFIGS_PIN;
 	map->data.configs.group_or_pin = data->groups[group];
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index b13303e..440ed77 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -25,4 +25,18 @@
 	  If you have a supported Chromebook, choose Y or M here.
 	  The module will be called chromeos_laptop.
 
+config CHROMEOS_PSTORE
+	tristate "Chrome OS pstore support"
+	---help---
+	  This module instantiates the persistent storage on x86 ChromeOS
+	  devices. It can be used to store away console logs and crash
+	  information across reboots.
+
+	  The range of memory used is 0xf00000-0x1000000, traditionally
+	  the memory used to back VGA controller memory.
+
+	  If you have a supported Chromebook, choose Y or M here.
+	  The module will be called chromeos_pstore.
+
+
 endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 015e919..2b860ca 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,2 +1,3 @@
 
 obj-$(CONFIG_CHROMEOS_LAPTOP)	+= chromeos_laptop.o
+obj-$(CONFIG_CHROMEOS_PSTORE)	+= chromeos_pstore.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 3e5b4497..7f3aad0 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -27,6 +27,7 @@
 #include <linux/input.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/platform_device.h>
 
 #define ATMEL_TP_I2C_ADDR	0x4b
 #define ATMEL_TP_I2C_BL_ADDR	0x25
@@ -40,7 +41,7 @@
 static struct i2c_client *tp;
 static struct i2c_client *ts;
 
-const char *i2c_adapter_names[] = {
+static const char *i2c_adapter_names[] = {
 	"SMBus I801 adapter",
 	"i915 gmbus vga",
 	"i915 gmbus panel",
@@ -53,20 +54,33 @@
 	I2C_ADAPTER_PANEL,
 };
 
-static struct i2c_board_info __initdata cyapa_device = {
+struct i2c_peripheral {
+	int (*add)(enum i2c_adapter_type type);
+	enum i2c_adapter_type type;
+};
+
+#define MAX_I2C_PERIPHERALS 3
+
+struct chromeos_laptop {
+	struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS];
+};
+
+static struct chromeos_laptop *cros_laptop;
+
+static struct i2c_board_info cyapa_device = {
 	I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
 	.flags		= I2C_CLIENT_WAKE,
 };
 
-static struct i2c_board_info __initdata isl_als_device = {
+static struct i2c_board_info isl_als_device = {
 	I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
 };
 
-static struct i2c_board_info __initdata tsl2583_als_device = {
+static struct i2c_board_info tsl2583_als_device = {
 	I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
 };
 
-static struct i2c_board_info __initdata tsl2563_als_device = {
+static struct i2c_board_info tsl2563_als_device = {
 	I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
 };
 
@@ -89,7 +103,7 @@
 	.config_length		= 0,
 };
 
-static struct i2c_board_info __initdata atmel_224s_tp_device = {
+static struct i2c_board_info atmel_224s_tp_device = {
 	I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR),
 	.platform_data = &atmel_224s_tp_platform_data,
 	.flags		= I2C_CLIENT_WAKE,
@@ -110,13 +124,13 @@
 	.config_length		= 0,
 };
 
-static struct i2c_board_info __initdata atmel_1664s_device = {
+static struct i2c_board_info atmel_1664s_device = {
 	I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR),
 	.platform_data = &atmel_1664s_platform_data,
 	.flags		= I2C_CLIENT_WAKE,
 };
 
-static struct i2c_client __init *__add_probed_i2c_device(
+static struct i2c_client *__add_probed_i2c_device(
 		const char *name,
 		int bus,
 		struct i2c_board_info *info,
@@ -169,7 +183,7 @@
 	return client;
 }
 
-static int __init __find_i2c_adap(struct device *dev, void *data)
+static int __find_i2c_adap(struct device *dev, void *data)
 {
 	const char *name = data;
 	static const char *prefix = "i2c-";
@@ -180,7 +194,7 @@
 	return (strncmp(adapter->name, name, strlen(name)) == 0);
 }
 
-static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
+static int find_i2c_adapter_num(enum i2c_adapter_type type)
 {
 	struct device *dev = NULL;
 	struct i2c_adapter *adapter;
@@ -189,8 +203,9 @@
 	dev = bus_find_device(&i2c_bus_type, NULL, (void *)name,
 			      __find_i2c_adap);
 	if (!dev) {
-		pr_err("%s: i2c adapter %s not found on system.\n", __func__,
-		       name);
+		/* Adapters may appear later. Deferred probing will retry */
+		pr_notice("%s: i2c adapter %s not found on system.\n", __func__,
+			  name);
 		return -ENODEV;
 	}
 	adapter = to_i2c_adapter(dev);
@@ -205,7 +220,7 @@
  * Returns NULL if no devices found.
  * See Documentation/i2c/instantiating-devices for more information.
  */
-static __init struct i2c_client *add_probed_i2c_device(
+static struct i2c_client *add_probed_i2c_device(
 		const char *name,
 		enum i2c_adapter_type type,
 		struct i2c_board_info *info,
@@ -222,7 +237,7 @@
  * info->addr.
  * Returns NULL if no device found.
  */
-static __init struct i2c_client *add_i2c_device(const char *name,
+static struct i2c_client *add_i2c_device(const char *name,
 						enum i2c_adapter_type type,
 						struct i2c_board_info *info)
 {
@@ -233,161 +248,259 @@
 				       addr_list);
 }
 
-
-static struct i2c_client __init *add_smbus_device(const char *name,
-						  struct i2c_board_info *info)
+static int setup_cyapa_tp(enum i2c_adapter_type type)
 {
-	return add_i2c_device(name, I2C_ADAPTER_SMBUS, info);
+	if (tp)
+		return 0;
+
+	/* add cyapa touchpad */
+	tp = add_i2c_device("trackpad", type, &cyapa_device);
+	return (!tp) ? -EAGAIN : 0;
 }
 
-static int __init setup_cyapa_smbus_tp(const struct dmi_system_id *id)
-{
-	/* add cyapa touchpad on smbus */
-	tp = add_smbus_device("trackpad", &cyapa_device);
-	return 0;
-}
-
-static int __init setup_atmel_224s_tp(const struct dmi_system_id *id)
+static int setup_atmel_224s_tp(enum i2c_adapter_type type)
 {
 	const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR,
 					     ATMEL_TP_I2C_ADDR,
 					     I2C_CLIENT_END };
+	if (tp)
+		return 0;
 
-	/* add atmel mxt touchpad on VGA DDC GMBus */
-	tp = add_probed_i2c_device("trackpad", I2C_ADAPTER_VGADDC,
+	/* add atmel mxt touchpad */
+	tp = add_probed_i2c_device("trackpad", type,
 				   &atmel_224s_tp_device, addr_list);
-	return 0;
+	return (!tp) ? -EAGAIN : 0;
 }
 
-static int __init setup_atmel_1664s_ts(const struct dmi_system_id *id)
+static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
 {
 	const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
 					     ATMEL_TS_I2C_ADDR,
 					     I2C_CLIENT_END };
+	if (ts)
+		return 0;
 
-	/* add atmel mxt touch device on PANEL GMBus */
-	ts = add_probed_i2c_device("touchscreen", I2C_ADAPTER_PANEL,
+	/* add atmel mxt touch device */
+	ts = add_probed_i2c_device("touchscreen", type,
 				   &atmel_1664s_device, addr_list);
-	return 0;
+	return (!ts) ? -EAGAIN : 0;
 }
 
-
-static int __init setup_isl29018_als(const struct dmi_system_id *id)
+static int setup_isl29018_als(enum i2c_adapter_type type)
 {
+	if (als)
+		return 0;
+
 	/* add isl29018 light sensor */
-	als = add_smbus_device("lightsensor", &isl_als_device);
-	return 0;
+	als = add_i2c_device("lightsensor", type, &isl_als_device);
+	return (!als) ? -EAGAIN : 0;
 }
 
-static int __init setup_isl29023_als(const struct dmi_system_id *id)
+static int setup_tsl2583_als(enum i2c_adapter_type type)
 {
-	/* add isl29023 light sensor on Panel GMBus */
-	als = add_i2c_device("lightsensor", I2C_ADAPTER_PANEL,
-			     &isl_als_device);
-	return 0;
+	if (als)
+		return 0;
+
+	/* add tsl2583 light sensor */
+	als = add_i2c_device(NULL, type, &tsl2583_als_device);
+	return (!als) ? -EAGAIN : 0;
 }
 
-static int __init setup_tsl2583_als(const struct dmi_system_id *id)
+static int setup_tsl2563_als(enum i2c_adapter_type type)
 {
-	/* add tsl2583 light sensor on smbus */
-	als = add_smbus_device(NULL, &tsl2583_als_device);
-	return 0;
+	if (als)
+		return 0;
+
+	/* add tsl2563 light sensor */
+	als = add_i2c_device(NULL, type, &tsl2563_als_device);
+	return (!als) ? -EAGAIN : 0;
 }
 
-static int __init setup_tsl2563_als(const struct dmi_system_id *id)
+static int __init chromeos_laptop_dmi_matched(const struct dmi_system_id *id)
 {
-	/* add tsl2563 light sensor on smbus */
-	als = add_smbus_device(NULL, &tsl2563_als_device);
-	return 0;
+	cros_laptop = (void *)id->driver_data;
+	pr_debug("DMI Matched %s.\n", id->ident);
+
+	/* Indicate to dmi_scan that processing is done. */
+	return 1;
 }
 
-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
+static int chromeos_laptop_probe(struct platform_device *pdev)
+{
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < MAX_I2C_PERIPHERALS; i++) {
+		struct i2c_peripheral *i2c_dev;
+
+		i2c_dev = &cros_laptop->i2c_peripherals[i];
+
+		/* No more peripherals. */
+		if (i2c_dev->add == NULL)
+			break;
+
+		/* Add the device. Set -EPROBE_DEFER on any failure */
+		if (i2c_dev->add(i2c_dev->type))
+			ret = -EPROBE_DEFER;
+	}
+
+	return ret;
+}
+
+static struct chromeos_laptop samsung_series_5_550 = {
+	.i2c_peripherals = {
+		/* Touchpad. */
+		{ .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+		/* Light Sensor. */
+		{ .add = setup_isl29018_als, I2C_ADAPTER_SMBUS },
+	},
+};
+
+static struct chromeos_laptop samsung_series_5 = {
+	.i2c_peripherals = {
+		/* Light Sensor. */
+		{ .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS },
+	},
+};
+
+static struct chromeos_laptop chromebook_pixel = {
+	.i2c_peripherals = {
+		/* Touch Screen. */
+		{ .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL },
+		/* Touchpad. */
+		{ .add = setup_atmel_224s_tp, I2C_ADAPTER_VGADDC },
+		/* Light Sensor. */
+		{ .add = setup_isl29018_als, I2C_ADAPTER_PANEL },
+	},
+};
+
+static struct chromeos_laptop acer_c7_chromebook = {
+	.i2c_peripherals = {
+		/* Touchpad. */
+		{ .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+	},
+};
+
+static struct chromeos_laptop acer_ac700 = {
+	.i2c_peripherals = {
+		/* Light Sensor. */
+		{ .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
+	},
+};
+
+static struct chromeos_laptop hp_pavilion_14_chromebook = {
+	.i2c_peripherals = {
+		/* Touchpad. */
+		{ .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+	},
+};
+
+static struct chromeos_laptop cr48 = {
+	.i2c_peripherals = {
+		/* Light Sensor. */
+		{ .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
+	},
+};
+
+#define _CBDD(board_) \
+	.callback = chromeos_laptop_dmi_matched, \
+	.driver_data = (void *)&board_
+
+static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
 	{
-		.ident = "Samsung Series 5 550 - Touchpad",
+		.ident = "Samsung Series 5 550",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
 		},
-		.callback = setup_cyapa_smbus_tp,
+		_CBDD(samsung_series_5_550),
 	},
 	{
-		.ident = "Chromebook Pixel - Touchscreen",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
-		},
-		.callback = setup_atmel_1664s_ts,
-	},
-	{
-		.ident = "Chromebook Pixel - Touchpad",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
-		},
-		.callback = setup_atmel_224s_tp,
-	},
-	{
-		.ident = "Samsung Series 5 550 - Light Sensor",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
-		},
-		.callback = setup_isl29018_als,
-	},
-	{
-		.ident = "Chromebook Pixel - Light Sensor",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
-		},
-		.callback = setup_isl29023_als,
-	},
-	{
-		.ident = "Acer C7 Chromebook - Touchpad",
-		.matches = {
-			DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
-		},
-		.callback = setup_cyapa_smbus_tp,
-	},
-	{
-		.ident = "HP Pavilion 14 Chromebook - Touchpad",
-		.matches = {
-			DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
-		},
-		.callback = setup_cyapa_smbus_tp,
-	},
-	{
-		.ident = "Samsung Series 5 - Light Sensor",
+		.ident = "Samsung Series 5",
 		.matches = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
 		},
-		.callback = setup_tsl2583_als,
+		_CBDD(samsung_series_5),
 	},
 	{
-		.ident = "Cr-48 - Light Sensor",
+		.ident = "Chromebook Pixel",
 		.matches = {
-			DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
 		},
-		.callback = setup_tsl2563_als,
+		_CBDD(chromebook_pixel),
 	},
 	{
-		.ident = "Acer AC700 - Light Sensor",
+		.ident = "Acer C7 Chromebook",
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
+		},
+		_CBDD(acer_c7_chromebook),
+	},
+	{
+		.ident = "Acer AC700",
 		.matches = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
 		},
-		.callback = setup_tsl2563_als,
+		_CBDD(acer_ac700),
+	},
+	{
+		.ident = "HP Pavilion 14 Chromebook",
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
+		},
+		_CBDD(hp_pavilion_14_chromebook),
+	},
+	{
+		.ident = "Cr-48",
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+		},
+		_CBDD(cr48),
 	},
 	{ }
 };
 MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table);
 
+static struct platform_device *cros_platform_device;
+
+static struct platform_driver cros_platform_driver = {
+	.driver = {
+		.name = "chromeos_laptop",
+		.owner = THIS_MODULE,
+	},
+	.probe = chromeos_laptop_probe,
+};
+
 static int __init chromeos_laptop_init(void)
 {
+	int ret;
 	if (!dmi_check_system(chromeos_laptop_dmi_table)) {
 		pr_debug("%s unsupported system.\n", __func__);
 		return -ENODEV;
 	}
+
+	ret = platform_driver_register(&cros_platform_driver);
+	if (ret)
+		return ret;
+
+	cros_platform_device = platform_device_alloc("chromeos_laptop", -1);
+	if (!cros_platform_device) {
+		ret = -ENOMEM;
+		goto fail_platform_device1;
+	}
+
+	ret = platform_device_add(cros_platform_device);
+	if (ret)
+		goto fail_platform_device2;
+
 	return 0;
+
+fail_platform_device2:
+	platform_device_put(cros_platform_device);
+fail_platform_device1:
+	platform_driver_unregister(&cros_platform_driver);
+	return ret;
 }
 
 static void __exit chromeos_laptop_exit(void)
@@ -398,6 +511,9 @@
 		i2c_unregister_device(tp);
 	if (ts)
 		i2c_unregister_device(ts);
+
+	platform_device_unregister(cros_platform_device);
+	platform_driver_unregister(&cros_platform_driver);
 }
 
 module_init(chromeos_laptop_init);
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
new file mode 100644
index 0000000..e0e0e65
--- /dev/null
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -0,0 +1,101 @@
+/*
+ *  chromeos_pstore.c - Driver to instantiate Chromebook ramoops device
+ *
+ *  Copyright (C) 2013 Google, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pstore_ram.h>
+
+static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
+	{
+		/*
+		 * Today all Chromebooks/boxes ship with GOOGLE as vendor and
+		 * coreboot as bios vendor. No other systems with this
+		 * combination are known to date.
+		 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+			DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+		},
+	},
+	{
+		/*
+		 * The first Samsung Chromebox and Chromebook Series 5 550 use
+		 * coreboot but with Samsung as the system vendor.
+		 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+			DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+		},
+	},
+	{
+		/* x86-alex, the first Samsung Chromebook. */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+		},
+	},
+	{
+		/* x86-mario, the Cr-48 pilot device from Google. */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "IEC"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+		},
+	},
+	{
+		/* x86-zgb, the first Acer Chromebook. */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+		},
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(dmi, chromeos_pstore_dmi_table);
+
+/*
+ * On x86 chromebooks/boxes, the firmware will keep the legacy VGA memory
+ * range untouched across reboots, so we use that to store our pstore
+ * contents for panic logs, etc.
+ */
+static struct ramoops_platform_data chromeos_ramoops_data = {
+	.mem_size	= 0x100000,
+	.mem_address	= 0xf00000,
+	.record_size	= 0x20000,
+	.console_size	= 0x20000,
+	.ftrace_size	= 0x20000,
+	.dump_oops	= 1,
+};
+
+static struct platform_device chromeos_ramoops = {
+	.name = "ramoops",
+	.dev = {
+		.platform_data = &chromeos_ramoops_data,
+	},
+};
+
+static int __init chromeos_pstore_init(void)
+{
+	if (dmi_check_system(chromeos_pstore_dmi_table))
+		return platform_device_register(&chromeos_ramoops);
+
+	return -ENODEV;
+}
+
+static void __exit chromeos_pstore_exit(void)
+{
+	platform_device_unregister(&chromeos_ramoops);
+}
+
+module_init(chromeos_pstore_init);
+module_exit(chromeos_pstore_exit);
+
+MODULE_DESCRIPTION("Chrome OS pstore module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d9dcd37..5ae65c1 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -197,6 +197,17 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called hp_accel.
 
+config HP_WIRELESS
+	tristate "HP WIRELESS"
+	depends on ACPI
+	depends on INPUT
+	help
+	 This driver provides supports for new HP wireless button for Windows 8.
+	 On such systems the driver should load automatically (via ACPI alias).
+
+	 To compile this driver as a module, choose M here: the module will
+	 be called hp-wireless.
+
 config HP_WMI
 	tristate "HP WMI extras"
 	depends on ACPI_WMI
@@ -808,4 +819,12 @@
 	  a paravirtualized device provided by QEMU; it lets a virtual machine
 	  (guest) communicate panic events to the host.
 
+config INTEL_BAYTRAIL_MBI
+	tristate
+	depends on PCI
+	---help---
+	  Needed on Baytrail platforms for access to the IOSF Sideband Mailbox
+	  Interface. This is a requirement for systems that need to configure
+	  the PUNIT for power management features such as RAPL.
+
 endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index f0e6aa4..9b87cfc 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -16,6 +16,7 @@
 obj-$(CONFIG_ACER_WMI)		+= acer-wmi.o
 obj-$(CONFIG_ACERHDF)		+= acerhdf.o
 obj-$(CONFIG_HP_ACCEL)		+= hp_accel.o
+obj-$(CONFIG_HP_WIRELESS)	+= hp-wireless.o
 obj-$(CONFIG_HP_WMI)		+= hp-wmi.o
 obj-$(CONFIG_AMILO_RFKILL)	+= amilo-rfkill.o
 obj-$(CONFIG_TC1100_WMI)	+= tc1100-wmi.o
@@ -54,3 +55,4 @@
 obj-$(CONFIG_INTEL_SMARTCONNECT)	+= intel-smartconnect.o
 
 obj-$(CONFIG_PVPANIC)           += pvpanic.o
+obj-$(CONFIG_INTEL_BAYTRAIL_MBI)	+= intel_baytrail.o
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 109f638..c5e082f 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -183,7 +183,6 @@
 
 	struct input_dev *inputdev;
 	struct backlight_device *backlight_device;
-	struct device *hwmon_device;
 	struct platform_device *platform_device;
 
 	struct led_classdev wlan_led;
@@ -1072,20 +1071,12 @@
 	return sprintf(buf, "%d\n", value);
 }
 
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL, 0);
-
-static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	return sprintf(buf, "asus\n");
-}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+static DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL);
+static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
 
 static struct attribute *hwmon_attributes[] = {
-	&sensor_dev_attr_pwm1.dev_attr.attr,
-	&sensor_dev_attr_temp1_input.dev_attr.attr,
-	&sensor_dev_attr_name.dev_attr.attr,
+	&dev_attr_pwm1.attr,
+	&dev_attr_temp1_input.attr,
 	NULL
 };
 
@@ -1099,9 +1090,9 @@
 	int dev_id = -1;
 	u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
 
-	if (attr == &sensor_dev_attr_pwm1.dev_attr.attr)
+	if (attr == &dev_attr_pwm1.attr)
 		dev_id = ASUS_WMI_DEVID_FAN_CTRL;
-	else if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr)
+	else if (attr == &dev_attr_temp1_input.attr)
 		dev_id = ASUS_WMI_DEVID_THERMAL_CTRL;
 
 	if (dev_id != -1) {
@@ -1136,35 +1127,20 @@
 	.is_visible = asus_hwmon_sysfs_is_visible,
 	.attrs = hwmon_attributes
 };
-
-static void asus_wmi_hwmon_exit(struct asus_wmi *asus)
-{
-	struct device *hwmon;
-
-	hwmon = asus->hwmon_device;
-	if (!hwmon)
-		return;
-	sysfs_remove_group(&hwmon->kobj, &hwmon_attribute_group);
-	hwmon_device_unregister(hwmon);
-	asus->hwmon_device = NULL;
-}
+__ATTRIBUTE_GROUPS(hwmon_attribute);
 
 static int asus_wmi_hwmon_init(struct asus_wmi *asus)
 {
 	struct device *hwmon;
-	int result;
 
-	hwmon = hwmon_device_register(&asus->platform_device->dev);
+	hwmon = hwmon_device_register_with_groups(&asus->platform_device->dev,
+						  "asus", asus,
+						  hwmon_attribute_groups);
 	if (IS_ERR(hwmon)) {
 		pr_err("Could not register asus hwmon device\n");
 		return PTR_ERR(hwmon);
 	}
-	dev_set_drvdata(hwmon, asus);
-	asus->hwmon_device = hwmon;
-	result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group);
-	if (result)
-		asus_wmi_hwmon_exit(asus);
-	return result;
+	return 0;
 }
 
 /*
@@ -1835,7 +1811,6 @@
 fail_rfkill:
 	asus_wmi_led_exit(asus);
 fail_leds:
-	asus_wmi_hwmon_exit(asus);
 fail_hwmon:
 	asus_wmi_input_exit(asus);
 fail_input:
@@ -1853,7 +1828,6 @@
 	wmi_remove_notify_handler(asus->driver->event_guid);
 	asus_wmi_backlight_exit(asus);
 	asus_wmi_input_exit(asus);
-	asus_wmi_hwmon_exit(asus);
 	asus_wmi_led_exit(asus);
 	asus_wmi_rfkill_exit(asus);
 	asus_wmi_debugfs_exit(asus);
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index eaa78ed..7297df2 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -173,8 +173,7 @@
 /* ======= */
 struct compal_data{
 	/* Fan control */
-	struct device *hwmon_dev;
-	int pwm_enable; /* 0:full on, 1:set by pwm1, 2:control by moterboard */
+	int pwm_enable; /* 0:full on, 1:set by pwm1, 2:control by motherboard */
 	unsigned char curr_pwm;
 
 	/* Power supply */
@@ -402,15 +401,6 @@
 SIMPLE_MASKED_STORE_SHOW(wake_up_key,	WAKE_UP_ADDR, WAKE_UP_KEY)
 SIMPLE_MASKED_STORE_SHOW(wake_up_mouse,	WAKE_UP_ADDR, WAKE_UP_MOUSE)
 
-
-/* General hwmon interface */
-static ssize_t hwmon_name_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	return sprintf(buf, "%s\n", DRIVER_NAME);
-}
-
-
 /* Fan control interface */
 static ssize_t pwm_enable_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
@@ -665,56 +655,56 @@
 static DEVICE_ATTR(wake_up_mouse,
 		0644, wake_up_mouse_show,	wake_up_mouse_store);
 
-static SENSOR_DEVICE_ATTR(name,        S_IRUGO, hwmon_name_show,   NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_input,  S_IRUGO, fan_show,          NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, temp_cpu,          NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, temp_cpu_local,    NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, temp_cpu_DTS,      NULL, 1);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, temp_northbridge,  NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, temp_vga,          NULL, 1);
-static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, temp_SKIN,         NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, label_cpu,         NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, label_cpu_local,   NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, label_cpu_DTS,     NULL, 1);
-static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, label_northbridge, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO, label_vga,         NULL, 1);
-static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO, label_SKIN,        NULL, 1);
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, pwm_show, pwm_store, 1);
-static SENSOR_DEVICE_ATTR(pwm1_enable,
-		S_IRUGO | S_IWUSR, pwm_enable_show, pwm_enable_store, 0);
+static DEVICE_ATTR(fan1_input,  S_IRUGO, fan_show,          NULL);
+static DEVICE_ATTR(temp1_input, S_IRUGO, temp_cpu,          NULL);
+static DEVICE_ATTR(temp2_input, S_IRUGO, temp_cpu_local,    NULL);
+static DEVICE_ATTR(temp3_input, S_IRUGO, temp_cpu_DTS,      NULL);
+static DEVICE_ATTR(temp4_input, S_IRUGO, temp_northbridge,  NULL);
+static DEVICE_ATTR(temp5_input, S_IRUGO, temp_vga,          NULL);
+static DEVICE_ATTR(temp6_input, S_IRUGO, temp_SKIN,         NULL);
+static DEVICE_ATTR(temp1_label, S_IRUGO, label_cpu,         NULL);
+static DEVICE_ATTR(temp2_label, S_IRUGO, label_cpu_local,   NULL);
+static DEVICE_ATTR(temp3_label, S_IRUGO, label_cpu_DTS,     NULL);
+static DEVICE_ATTR(temp4_label, S_IRUGO, label_northbridge, NULL);
+static DEVICE_ATTR(temp5_label, S_IRUGO, label_vga,         NULL);
+static DEVICE_ATTR(temp6_label, S_IRUGO, label_SKIN,        NULL);
+static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, pwm_show, pwm_store);
+static DEVICE_ATTR(pwm1_enable,
+		   S_IRUGO | S_IWUSR, pwm_enable_show, pwm_enable_store);
 
-static struct attribute *compal_attributes[] = {
+static struct attribute *compal_platform_attrs[] = {
 	&dev_attr_wake_up_pme.attr,
 	&dev_attr_wake_up_modem.attr,
 	&dev_attr_wake_up_lan.attr,
 	&dev_attr_wake_up_wlan.attr,
 	&dev_attr_wake_up_key.attr,
 	&dev_attr_wake_up_mouse.attr,
-	/* Maybe put the sensor-stuff in a separate hwmon-driver? That way,
-	 * the hwmon sysfs won't be cluttered with the above files. */
-	&sensor_dev_attr_name.dev_attr.attr,
-	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
-	&sensor_dev_attr_pwm1.dev_attr.attr,
-	&sensor_dev_attr_fan1_input.dev_attr.attr,
-	&sensor_dev_attr_temp1_input.dev_attr.attr,
-	&sensor_dev_attr_temp2_input.dev_attr.attr,
-	&sensor_dev_attr_temp3_input.dev_attr.attr,
-	&sensor_dev_attr_temp4_input.dev_attr.attr,
-	&sensor_dev_attr_temp5_input.dev_attr.attr,
-	&sensor_dev_attr_temp6_input.dev_attr.attr,
-	&sensor_dev_attr_temp1_label.dev_attr.attr,
-	&sensor_dev_attr_temp2_label.dev_attr.attr,
-	&sensor_dev_attr_temp3_label.dev_attr.attr,
-	&sensor_dev_attr_temp4_label.dev_attr.attr,
-	&sensor_dev_attr_temp5_label.dev_attr.attr,
-	&sensor_dev_attr_temp6_label.dev_attr.attr,
 	NULL
 };
-
-static struct attribute_group compal_attribute_group = {
-	.attrs = compal_attributes
+static struct attribute_group compal_platform_attr_group = {
+	.attrs = compal_platform_attrs
 };
 
+static struct attribute *compal_hwmon_attrs[] = {
+	&dev_attr_pwm1_enable.attr,
+	&dev_attr_pwm1.attr,
+	&dev_attr_fan1_input.attr,
+	&dev_attr_temp1_input.attr,
+	&dev_attr_temp2_input.attr,
+	&dev_attr_temp3_input.attr,
+	&dev_attr_temp4_input.attr,
+	&dev_attr_temp5_input.attr,
+	&dev_attr_temp6_input.attr,
+	&dev_attr_temp1_label.attr,
+	&dev_attr_temp2_label.attr,
+	&dev_attr_temp3_label.attr,
+	&dev_attr_temp4_label.attr,
+	&dev_attr_temp5_label.attr,
+	&dev_attr_temp6_label.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(compal_hwmon);
+
 static int compal_probe(struct platform_device *);
 static int compal_remove(struct platform_device *);
 static struct platform_driver compal_driver = {
@@ -1021,30 +1011,28 @@
 {
 	int err;
 	struct compal_data *data;
+	struct device *hwmon_dev;
 
 	if (!extra_features)
 		return 0;
 
 	/* Fan control */
-	data = kzalloc(sizeof(struct compal_data), GFP_KERNEL);
+	data = devm_kzalloc(&pdev->dev, sizeof(struct compal_data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
 	initialize_fan_control_data(data);
 
-	err = sysfs_create_group(&pdev->dev.kobj, &compal_attribute_group);
-	if (err) {
-		kfree(data);
+	err = sysfs_create_group(&pdev->dev.kobj, &compal_platform_attr_group);
+	if (err)
 		return err;
-	}
 
-	data->hwmon_dev = hwmon_device_register(&pdev->dev);
-	if (IS_ERR(data->hwmon_dev)) {
-		err = PTR_ERR(data->hwmon_dev);
-		sysfs_remove_group(&pdev->dev.kobj,
-				&compal_attribute_group);
-		kfree(data);
-		return err;
+	hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+						      DRIVER_NAME, data,
+						      compal_hwmon_groups);
+	if (IS_ERR(hwmon_dev)) {
+		err = PTR_ERR(hwmon_dev);
+		goto remove;
 	}
 
 	/* Power supply */
@@ -1054,6 +1042,10 @@
 	platform_set_drvdata(pdev, data);
 
 	return 0;
+
+remove:
+	sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
+	return err;
 }
 
 static void __exit compal_cleanup(void)
@@ -1080,12 +1072,9 @@
 	pwm_disable_control();
 
 	data = platform_get_drvdata(pdev);
-	hwmon_device_unregister(data->hwmon_dev);
 	power_supply_unregister(&data->psy);
 
-	kfree(data);
-
-	sysfs_remove_group(&pdev->dev.kobj, &compal_attribute_group);
+	sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
 
 	return 0;
 }
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index c608b1d..fed4111 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -559,19 +559,45 @@
 }
 static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
 
+static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
+			      struct serio *port)
+{
+	static bool extended;
+
+	if (str & 0x20)
+		return false;
+
+	if (unlikely(data == 0xe0)) {
+		extended = true;
+		return false;
+	} else if (unlikely(extended)) {
+		switch (data) {
+		case 0x8:
+			schedule_delayed_work(&dell_rfkill_work,
+					      round_jiffies_relative(HZ / 4));
+			break;
+		}
+		extended = false;
+	}
+
+	return false;
+}
 
 static int __init dell_setup_rfkill(void)
 {
-	int status;
-	int ret;
+	int status, ret, whitelisted;
 	const char *product;
 
 	/*
-	 * rfkill causes trouble on various non Latitudes, according to Dell
-	 * actually testing the rfkill functionality is only done on Latitudes.
+	 * rfkill support causes trouble on various models, mostly Inspirons.
+	 * So we whitelist certain series, and don't support rfkill on others.
 	 */
+	whitelisted = 0;
 	product = dmi_get_system_info(DMI_PRODUCT_NAME);
-	if (!force_rfkill && (!product || strncmp(product, "Latitude", 8)))
+	if (product &&  (strncmp(product, "Latitude", 8) == 0 ||
+			 strncmp(product, "Precision", 9) == 0))
+		whitelisted = 1;
+	if (!force_rfkill && !whitelisted)
 		return 0;
 
 	get_buffer();
@@ -633,7 +659,16 @@
 			goto err_wwan;
 	}
 
+	ret = i8042_install_filter(dell_laptop_i8042_filter);
+	if (ret) {
+		pr_warn("Unable to install key filter\n");
+		goto err_filter;
+	}
+
 	return 0;
+err_filter:
+	if (wwan_rfkill)
+		rfkill_unregister(wwan_rfkill);
 err_wwan:
 	rfkill_destroy(wwan_rfkill);
 	if (bluetooth_rfkill)
@@ -684,7 +719,7 @@
 
 out:
 	release_buffer();
-	return 0;
+	return ret;
 }
 
 static int dell_get_intensity(struct backlight_device *bd)
@@ -755,30 +790,6 @@
 	led_classdev_unregister(&touchpad_led);
 }
 
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
-			      struct serio *port)
-{
-	static bool extended;
-
-	if (str & 0x20)
-		return false;
-
-	if (unlikely(data == 0xe0)) {
-		extended = true;
-		return false;
-	} else if (unlikely(extended)) {
-		switch (data) {
-		case 0x8:
-			schedule_delayed_work(&dell_rfkill_work,
-					      round_jiffies_relative(HZ / 4));
-			break;
-		}
-		extended = false;
-	}
-
-	return false;
-}
-
 static int __init dell_init(void)
 {
 	int max_intensity = 0;
@@ -828,12 +839,6 @@
 		goto fail_rfkill;
 	}
 
-	ret = i8042_install_filter(dell_laptop_i8042_filter);
-	if (ret) {
-		pr_warn("Unable to install key filter\n");
-		goto fail_filter;
-	}
-
 	if (quirks && quirks->touchpad_led)
 		touchpad_led_init(&platform_device->dev);
 
@@ -885,7 +890,6 @@
 fail_backlight:
 	i8042_remove_filter(dell_laptop_i8042_filter);
 	cancel_delayed_work_sync(&dell_rfkill_work);
-fail_filter:
 	dell_cleanup_rfkill();
 fail_rfkill:
 	free_page((unsigned long)bufferpage);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index ed69ec5..399e8c5 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -165,7 +165,6 @@
 
 	struct platform_device *platform_device;
 	struct acpi_device *device;		/* the device we are in */
-	struct device *hwmon_device;
 	struct backlight_device *backlight_device;
 
 	struct input_dev *inputdev;
@@ -1068,7 +1067,7 @@
 	{								\
 		return store_sys_hwmon(_get, buf, count);		\
 	}								\
-	static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
+	static DEVICE_ATTR(_name, _mode, show_##_name, store_##_name);
 
 EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
 EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
@@ -1076,55 +1075,26 @@
 EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
 			 eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
 
-static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	return sprintf(buf, "eeepc\n");
-}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
-
-static struct attribute *hwmon_attributes[] = {
-	&sensor_dev_attr_pwm1.dev_attr.attr,
-	&sensor_dev_attr_fan1_input.dev_attr.attr,
-	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
-	&sensor_dev_attr_name.dev_attr.attr,
+static struct attribute *hwmon_attrs[] = {
+	&dev_attr_pwm1.attr,
+	&dev_attr_fan1_input.attr,
+	&dev_attr_pwm1_enable.attr,
 	NULL
 };
-
-static struct attribute_group hwmon_attribute_group = {
-	.attrs = hwmon_attributes
-};
-
-static void eeepc_hwmon_exit(struct eeepc_laptop *eeepc)
-{
-	struct device *hwmon;
-
-	hwmon = eeepc->hwmon_device;
-	if (!hwmon)
-		return;
-	sysfs_remove_group(&hwmon->kobj,
-			   &hwmon_attribute_group);
-	hwmon_device_unregister(hwmon);
-	eeepc->hwmon_device = NULL;
-}
+ATTRIBUTE_GROUPS(hwmon);
 
 static int eeepc_hwmon_init(struct eeepc_laptop *eeepc)
 {
+	struct device *dev = &eeepc->platform_device->dev;
 	struct device *hwmon;
-	int result;
 
-	hwmon = hwmon_device_register(&eeepc->platform_device->dev);
+	hwmon = devm_hwmon_device_register_with_groups(dev, "eeepc", NULL,
+						       hwmon_groups);
 	if (IS_ERR(hwmon)) {
 		pr_err("Could not register eeepc hwmon device\n");
-		eeepc->hwmon_device = NULL;
 		return PTR_ERR(hwmon);
 	}
-	eeepc->hwmon_device = hwmon;
-	result = sysfs_create_group(&hwmon->kobj,
-				    &hwmon_attribute_group);
-	if (result)
-		eeepc_hwmon_exit(eeepc);
-	return result;
+	return 0;
 }
 
 /*
@@ -1480,7 +1450,6 @@
 fail_rfkill:
 	eeepc_led_exit(eeepc);
 fail_led:
-	eeepc_hwmon_exit(eeepc);
 fail_hwmon:
 	eeepc_input_exit(eeepc);
 fail_input:
@@ -1500,7 +1469,6 @@
 	eeepc_backlight_exit(eeepc);
 	eeepc_rfkill_exit(eeepc);
 	eeepc_input_exit(eeepc);
-	eeepc_hwmon_exit(eeepc);
 	eeepc_led_exit(eeepc);
 	eeepc_platform_exit(eeepc);
 
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 9d30d69..be02bcc 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -633,7 +633,6 @@
 
 static int acpi_fujitsu_add(struct acpi_device *device)
 {
-	int result = 0;
 	int state = 0;
 	struct input_dev *input;
 	int error;
@@ -669,8 +668,8 @@
 	if (error)
 		goto err_free_input_dev;
 
-	result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
-	if (result) {
+	error = acpi_bus_update_power(fujitsu->acpi_handle, &state);
+	if (error) {
 		pr_err("Error reading power state\n");
 		goto err_unregister_input_dev;
 	}
@@ -700,7 +699,7 @@
 		fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
 	get_lcd_level();
 
-	return result;
+	return 0;
 
 err_unregister_input_dev:
 	input_unregister_device(input);
@@ -708,7 +707,7 @@
 err_free_input_dev:
 	input_free_device(input);
 err_stop:
-	return result;
+	return error;
 }
 
 static int acpi_fujitsu_remove(struct acpi_device *device)
@@ -831,8 +830,8 @@
 	if (error)
 		goto err_free_input_dev;
 
-	result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
-	if (result) {
+	error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
+	if (error) {
 		pr_err("Error reading power state\n");
 		goto err_unregister_input_dev;
 	}
@@ -907,7 +906,7 @@
 err_free_fifo:
 	kfifo_free(&fujitsu_hotkey->fifo);
 err_stop:
-	return result;
+	return error;
 }
 
 static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
new file mode 100644
index 0000000..415348f
--- /dev/null
+++ b/drivers/platform/x86/hp-wireless.c
@@ -0,0 +1,132 @@
+/*
+ *  hp-wireless button for Windows 8
+ *
+ *  Copyright (C) 2014 Alex Hung <alex.hung@canonical.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Hung");
+MODULE_ALIAS("acpi*:HPQ6001:*");
+
+static struct input_dev *hpwl_input_dev;
+
+static const struct acpi_device_id hpwl_ids[] = {
+	{"HPQ6001", 0},
+	{"", 0},
+};
+
+static int hp_wireless_input_setup(void)
+{
+	int err;
+
+	hpwl_input_dev = input_allocate_device();
+	if (!hpwl_input_dev)
+		return -ENOMEM;
+
+	hpwl_input_dev->name = "HP Wireless hotkeys";
+	hpwl_input_dev->phys = "hpq6001/input0";
+	hpwl_input_dev->id.bustype = BUS_HOST;
+	hpwl_input_dev->evbit[0] = BIT(EV_KEY);
+	set_bit(KEY_RFKILL, hpwl_input_dev->keybit);
+
+	err = input_register_device(hpwl_input_dev);
+	if (err)
+		goto err_free_dev;
+
+	return 0;
+
+err_free_dev:
+	input_free_device(hpwl_input_dev);
+	return err;
+}
+
+static void hp_wireless_input_destroy(void)
+{
+	input_unregister_device(hpwl_input_dev);
+}
+
+static void hpwl_notify(struct acpi_device *acpi_dev, u32 event)
+{
+	if (event != 0x80) {
+		pr_info("Received unknown event (0x%x)\n", event);
+		return;
+	}
+
+	input_report_key(hpwl_input_dev, KEY_RFKILL, 1);
+	input_sync(hpwl_input_dev);
+	input_report_key(hpwl_input_dev, KEY_RFKILL, 0);
+	input_sync(hpwl_input_dev);
+}
+
+static int hpwl_add(struct acpi_device *device)
+{
+	int err;
+
+	err = hp_wireless_input_setup();
+	return err;
+}
+
+static int hpwl_remove(struct acpi_device *device)
+{
+	hp_wireless_input_destroy();
+	return 0;
+}
+
+static struct acpi_driver hpwl_driver = {
+	.name	= "hp-wireless",
+	.owner	= THIS_MODULE,
+	.ids	= hpwl_ids,
+	.ops	= {
+		.add	= hpwl_add,
+		.remove	= hpwl_remove,
+		.notify	= hpwl_notify,
+	},
+};
+
+static int __init hpwl_init(void)
+{
+	int err;
+
+	pr_info("Initializing HPQ6001 module\n");
+	err = acpi_bus_register_driver(&hpwl_driver);
+	if (err) {
+		pr_err("Unable to register HP wireless control driver.\n");
+		goto error_acpi_register;
+	}
+
+	return 0;
+
+error_acpi_register:
+	return err;
+}
+
+static void __exit hpwl_exit(void)
+{
+	pr_info("Exiting HPQ6001 module\n");
+	acpi_bus_unregister_driver(&hpwl_driver);
+}
+
+module_init(hpwl_init);
+module_exit(hpwl_exit);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index aff4d06..3dc9344 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -77,6 +77,7 @@
 static struct acpi_device_id lis3lv02d_device_ids[] = {
 	{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
 	{"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
+	{"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */
 	{"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
@@ -88,7 +89,7 @@
  *
  * Returns 0 on success.
  */
-int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
+static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
 {
 	struct acpi_device *dev = lis3->bus_priv;
 	if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI,
@@ -106,7 +107,7 @@
  *
  * Returns 0 on success.
  */
-int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
+static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
 {
 	struct acpi_device *dev = lis3->bus_priv;
 	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
@@ -129,7 +130,7 @@
  *
  * Returns 0 on success.
  */
-int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
+static int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
 {
 	struct acpi_device *dev = lis3->bus_priv;
 	unsigned long long ret; /* Not used when writting */
diff --git a/drivers/platform/x86/intel_baytrail.c b/drivers/platform/x86/intel_baytrail.c
new file mode 100644
index 0000000..f96626b
--- /dev/null
+++ b/drivers/platform/x86/intel_baytrail.c
@@ -0,0 +1,224 @@
+/*
+ * Baytrail IOSF-SB MailBox Interface Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ *
+ * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
+ * mailbox interface (MBI) to communicate with mutiple devices. This
+ * driver implements BayTrail-specific access to this interface.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+
+#include "intel_baytrail.h"
+
+static DEFINE_SPINLOCK(iosf_mbi_lock);
+
+static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
+{
+	return (op << 24) | (port << 16) | (offset << 8) | BT_MBI_ENABLE;
+}
+
+static struct pci_dev *mbi_pdev;	/* one mbi device */
+
+/* Hold lock before calling */
+static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+{
+	int result;
+
+	if (!mbi_pdev)
+		return -ENODEV;
+
+	if (mcrx) {
+		result = pci_write_config_dword(mbi_pdev,
+						BT_MBI_MCRX_OFFSET, mcrx);
+		if (result < 0)
+			goto iosf_mbi_read_err;
+	}
+
+	result = pci_write_config_dword(mbi_pdev,
+					BT_MBI_MCR_OFFSET, mcr);
+	if (result < 0)
+		goto iosf_mbi_read_err;
+
+	result = pci_read_config_dword(mbi_pdev,
+				       BT_MBI_MDR_OFFSET, mdr);
+	if (result < 0)
+		goto iosf_mbi_read_err;
+
+	return 0;
+
+iosf_mbi_read_err:
+	dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
+		result);
+	return result;
+}
+
+/* Hold lock before calling */
+static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+{
+	int result;
+
+	if (!mbi_pdev)
+		return -ENODEV;
+
+	result = pci_write_config_dword(mbi_pdev,
+					BT_MBI_MDR_OFFSET, mdr);
+	if (result < 0)
+		goto iosf_mbi_write_err;
+
+	if (mcrx) {
+		result = pci_write_config_dword(mbi_pdev,
+			 BT_MBI_MCRX_OFFSET, mcrx);
+		if (result < 0)
+			goto iosf_mbi_write_err;
+	}
+
+	result = pci_write_config_dword(mbi_pdev,
+					BT_MBI_MCR_OFFSET, mcr);
+	if (result < 0)
+		goto iosf_mbi_write_err;
+
+	return 0;
+
+iosf_mbi_write_err:
+	dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
+		result);
+	return result;
+}
+
+int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+	u32 mcr, mcrx;
+	unsigned long flags;
+	int ret;
+
+	/*Access to the GFX unit is handled by GPU code */
+	BUG_ON(port == BT_MBI_UNIT_GFX);
+
+	mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+	mcrx = offset & BT_MBI_MASK_HI;
+
+	spin_lock_irqsave(&iosf_mbi_lock, flags);
+	ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
+	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(bt_mbi_read);
+
+int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+	u32 mcr, mcrx;
+	unsigned long flags;
+	int ret;
+
+	/*Access to the GFX unit is handled by GPU code */
+	BUG_ON(port == BT_MBI_UNIT_GFX);
+
+	mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+	mcrx = offset & BT_MBI_MASK_HI;
+
+	spin_lock_irqsave(&iosf_mbi_lock, flags);
+	ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
+	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(bt_mbi_write);
+
+int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+	u32 mcr, mcrx;
+	u32 value;
+	unsigned long flags;
+	int ret;
+
+	/*Access to the GFX unit is handled by GPU code */
+	BUG_ON(port == BT_MBI_UNIT_GFX);
+
+	mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+	mcrx = offset & BT_MBI_MASK_HI;
+
+	spin_lock_irqsave(&iosf_mbi_lock, flags);
+
+	/* Read current mdr value */
+	ret = iosf_mbi_pci_read_mdr(mcrx, mcr & BT_MBI_RD_MASK, &value);
+	if (ret < 0) {
+		spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+		return ret;
+	}
+
+	/* Apply mask */
+	value &= ~mask;
+	mdr &= mask;
+	value |= mdr;
+
+	/* Write back */
+	ret = iosf_mbi_pci_write_mdr(mcrx, mcr | BT_MBI_WR_MASK, value);
+
+	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(bt_mbi_modify);
+
+static int iosf_mbi_probe(struct pci_dev *pdev,
+			  const struct pci_device_id *unused)
+{
+	int ret;
+
+	ret = pci_enable_device(pdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "error: could not enable device\n");
+		return ret;
+	}
+
+	mbi_pdev = pci_dev_get(pdev);
+	return 0;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) },
+	{ 0, },
+};
+MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
+
+static struct pci_driver iosf_mbi_pci_driver = {
+	.name		= "iosf_mbi_pci",
+	.probe		= iosf_mbi_probe,
+	.id_table	= iosf_mbi_pci_ids,
+};
+
+static int __init bt_mbi_init(void)
+{
+	return pci_register_driver(&iosf_mbi_pci_driver);
+}
+
+static void __exit bt_mbi_exit(void)
+{
+	pci_unregister_driver(&iosf_mbi_pci_driver);
+	if (mbi_pdev) {
+		pci_dev_put(mbi_pdev);
+		mbi_pdev = NULL;
+	}
+}
+
+module_init(bt_mbi_init);
+module_exit(bt_mbi_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("BayTrail Mailbox Interface accessor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_baytrail.h b/drivers/platform/x86/intel_baytrail.h
new file mode 100644
index 0000000..8bcc311
--- /dev/null
+++ b/drivers/platform/x86/intel_baytrail.h
@@ -0,0 +1,90 @@
+/*
+ * intel_baytrail.h: MailBox access support for Intel BayTrail platforms
+ */
+
+#ifndef INTEL_BAYTRAIL_MBI_SYMS_H
+#define INTEL_BAYTRAIL_MBI_SYMS_H
+
+#define BT_MBI_MCR_OFFSET	0xD0
+#define BT_MBI_MDR_OFFSET	0xD4
+#define BT_MBI_MCRX_OFFSET	0xD8
+
+#define BT_MBI_RD_MASK		0xFEFFFFFF
+#define BT_MBI_WR_MASK		0X01000000
+
+#define BT_MBI_MASK_HI		0xFFFFFF00
+#define BT_MBI_MASK_LO		0x000000FF
+#define BT_MBI_ENABLE		0xF0
+
+/* BT-SB unit access methods */
+#define BT_MBI_UNIT_AUNIT	0x00
+#define BT_MBI_UNIT_SMC		0x01
+#define BT_MBI_UNIT_CPU		0x02
+#define BT_MBI_UNIT_BUNIT	0x03
+#define BT_MBI_UNIT_PMC		0x04
+#define BT_MBI_UNIT_GFX		0x06
+#define BT_MBI_UNIT_SMI		0x0C
+#define BT_MBI_UNIT_USB		0x43
+#define BT_MBI_UNIT_SATA	0xA3
+#define BT_MBI_UNIT_PCIE	0xA6
+
+/* Read/write opcodes */
+#define BT_MBI_AUNIT_READ	0x10
+#define BT_MBI_AUNIT_WRITE	0x11
+#define BT_MBI_SMC_READ		0x10
+#define BT_MBI_SMC_WRITE	0x11
+#define BT_MBI_CPU_READ		0x10
+#define BT_MBI_CPU_WRITE	0x11
+#define BT_MBI_BUNIT_READ	0x10
+#define BT_MBI_BUNIT_WRITE	0x11
+#define BT_MBI_PMC_READ		0x06
+#define BT_MBI_PMC_WRITE	0x07
+#define BT_MBI_GFX_READ		0x00
+#define BT_MBI_GFX_WRITE	0x01
+#define BT_MBI_SMIO_READ	0x06
+#define BT_MBI_SMIO_WRITE	0x07
+#define BT_MBI_USB_READ		0x06
+#define BT_MBI_USB_WRITE	0x07
+#define BT_MBI_SATA_READ	0x00
+#define BT_MBI_SATA_WRITE	0x01
+#define BT_MBI_PCIE_READ	0x00
+#define BT_MBI_PCIE_WRITE	0x01
+
+/**
+ * bt_mbi_read() - MailBox Interface read command
+ * @port:	port indicating subunit being accessed
+ * @opcode:	port specific read or write opcode
+ * @offset:	register address offset
+ * @mdr:	register data to be read
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr);
+
+/**
+ * bt_mbi_write() - MailBox unmasked write command
+ * @port:	port indicating subunit being accessed
+ * @opcode:	port specific read or write opcode
+ * @offset:	register address offset
+ * @mdr:	register data to be written
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
+
+/**
+ * bt_mbi_modify() - MailBox masked write command
+ * @port:	port indicating subunit being accessed
+ * @opcode:	port specific read or write opcode
+ * @offset:	register address offset
+ * @mdr:	register data being modified
+ * @mask:	mask indicating bits in mdr to be modified
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
+
+#endif /* INTEL_BAYTRAIL_MBI_SYMS_H */
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 60ea476..76ca094 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -62,12 +62,10 @@
 #define IPC_RWBUF_SIZE    20		/* IPC Read buffer Size */
 #define IPC_IOC	          0x100		/* IPC command register IOC bit */
 
-enum {
-	SCU_IPC_LINCROFT,
-	SCU_IPC_PENWELL,
-	SCU_IPC_CLOVERVIEW,
-	SCU_IPC_TANGIER,
-};
+#define PCI_DEVICE_ID_LINCROFT		0x082a
+#define PCI_DEVICE_ID_PENWELL		0x080e
+#define PCI_DEVICE_ID_CLOVERVIEW	0x08ea
+#define PCI_DEVICE_ID_TANGIER		0x11a0
 
 /* intel scu ipc driver data*/
 struct intel_scu_ipc_pdata_t {
@@ -78,35 +76,29 @@
 	u8 irq_mode;
 };
 
-static struct intel_scu_ipc_pdata_t intel_scu_ipc_pdata[] = {
-	[SCU_IPC_LINCROFT] = {
-		.ipc_base = 0xff11c000,
-		.i2c_base = 0xff12b000,
-		.ipc_len = 0x100,
-		.i2c_len = 0x10,
-		.irq_mode = 0,
-	},
-	[SCU_IPC_PENWELL] = {
-		.ipc_base = 0xff11c000,
-		.i2c_base = 0xff12b000,
-		.ipc_len = 0x100,
-		.i2c_len = 0x10,
-		.irq_mode = 1,
-	},
-	[SCU_IPC_CLOVERVIEW] = {
-		.ipc_base = 0xff11c000,
-		.i2c_base = 0xff12b000,
-		.ipc_len = 0x100,
-		.i2c_len = 0x10,
-		.irq_mode = 1,
-	},
-	[SCU_IPC_TANGIER] = {
-		.ipc_base = 0xff009000,
-		.i2c_base  = 0xff00d000,
-		.ipc_len  = 0x100,
-		.i2c_len = 0x10,
-		.irq_mode = 0,
-	},
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
+	.ipc_base = 0xff11c000,
+	.i2c_base = 0xff12b000,
+	.ipc_len = 0x100,
+	.i2c_len = 0x10,
+	.irq_mode = 0,
+};
+
+/* Penwell and Cloverview */
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
+	.ipc_base = 0xff11c000,
+	.i2c_base = 0xff12b000,
+	.ipc_len = 0x100,
+	.i2c_len = 0x10,
+	.irq_mode = 1,
+};
+
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
+	.ipc_base = 0xff009000,
+	.i2c_base  = 0xff00d000,
+	.ipc_len  = 0x100,
+	.i2c_len = 0x10,
+	.irq_mode = 0,
 };
 
 static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
@@ -583,15 +575,14 @@
  */
 static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
-	int err, pid;
+	int err;
 	struct intel_scu_ipc_pdata_t *pdata;
 	resource_size_t pci_resource;
 
 	if (ipcdev.pdev)		/* We support only one SCU */
 		return -EBUSY;
 
-	pid = id->driver_data;
-	pdata = &intel_scu_ipc_pdata[pid];
+	pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
 
 	ipcdev.pdev = pci_dev_get(dev);
 	ipcdev.irq_mode = pdata->irq_mode;
@@ -650,11 +641,21 @@
 }
 
 static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
-	{PCI_VDEVICE(INTEL, 0x082a), SCU_IPC_LINCROFT},
-	{PCI_VDEVICE(INTEL, 0x080e), SCU_IPC_PENWELL},
-	{PCI_VDEVICE(INTEL, 0x08ea), SCU_IPC_CLOVERVIEW},
-	{PCI_VDEVICE(INTEL, 0x11a0), SCU_IPC_TANGIER},
-	{ 0,}
+	{
+		PCI_VDEVICE(INTEL, PCI_DEVICE_ID_LINCROFT),
+		(kernel_ulong_t)&intel_scu_ipc_lincroft_pdata,
+	}, {
+		PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL),
+		(kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
+	}, {
+		PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CLOVERVIEW),
+		(kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
+	}, {
+		PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER),
+		(kernel_ulong_t)&intel_scu_ipc_tangier_pdata,
+	}, {
+		0,
+	}
 };
 MODULE_DEVICE_TABLE(pci, pci_ids);
 
diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c
index 3c59c0a..f4bad83 100644
--- a/drivers/platform/x86/mxm-wmi.c
+++ b/drivers/platform/x86/mxm-wmi.c
@@ -20,6 +20,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/mxm-wmi.h>
 #include <linux/acpi.h>
 
 MODULE_AUTHOR("Dave Airlie");
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 563e4f5..8f8551a 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -789,7 +789,7 @@
 		void *buffer, size_t buflen)
 {
 	int ret = 0;
-	size_t len = len;
+	size_t len;
 	union acpi_object *object = __call_snc_method(handle, name, value);
 
 	if (!object)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 7ad1ed0..90dd764 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -149,6 +149,7 @@
 MODULE_DEVICE_TABLE(acpi, toshiba_device_ids);
 
 static const struct key_entry toshiba_acpi_keymap[] = {
+	{ KE_KEY, 0x9e, { KEY_RFKILL } },
 	{ KE_KEY, 0x101, { KEY_MUTE } },
 	{ KE_KEY, 0x102, { KEY_ZOOMOUT } },
 	{ KE_KEY, 0x103, { KEY_ZOOMIN } },
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 5631748..041f9b6 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -192,7 +192,7 @@
 
 	/*
 	 * Voltage is measured in units of 1.22mV. The voltage is stored as
-	 * a 10-bit number plus sign, in the upper bits of a 16-bit register
+	 * a 12-bit number plus sign, in the upper bits of a 16-bit register
 	 */
 	err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
 	if (err)
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 80edb7d..0b4cf9d 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -444,8 +444,6 @@
 		ret = PTR_ERR(isp->phy);
 		goto fail0;
 	}
-	if (!isp->phy)
-		goto fail0;
 
 	isp->dev = &pdev->dev;
 	platform_set_drvdata(pdev, isp);
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index c7ff6d6..0fbac86 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -148,7 +148,7 @@
 {
 	struct max17040_chip *chip = i2c_get_clientdata(client);
 
-	if (chip->pdata->battery_online)
+	if (chip->pdata && chip->pdata->battery_online)
 		chip->online = chip->pdata->battery_online();
 	else
 		chip->online = 1;
@@ -158,7 +158,8 @@
 {
 	struct max17040_chip *chip = i2c_get_clientdata(client);
 
-	if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
+	if (!chip->pdata || !chip->pdata->charger_online
+			|| !chip->pdata->charger_enable) {
 		chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
 		return;
 	}
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 77b46d0..e10febe 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -498,7 +498,7 @@
 				     struct ab3100_platform_data *plfdata,
 				     struct regulator_init_data *init_data,
 				     struct device_node *np,
-				     int id)
+				     unsigned long id)
 {
 	struct regulator_desc *desc;
 	struct ab3100_regulator *reg;
@@ -646,7 +646,7 @@
 		err = ab3100_regulator_register(
 			pdev, NULL, ab3100_regulator_matches[i].init_data,
 			ab3100_regulator_matches[i].of_node,
-			(int) ab3100_regulator_matches[i].driver_data);
+			(unsigned long)ab3100_regulator_matches[i].driver_data);
 		if (err) {
 			ab3100_regulators_remove(pdev);
 			return err;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b38a6b6..16a309e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1272,6 +1272,8 @@
 				if (r->dev.parent &&
 					node == r->dev.of_node)
 					return r;
+			*ret = -EPROBE_DEFER;
+			return NULL;
 		} else {
 			/*
 			 * If we couldn't even get the node then it's
@@ -1312,7 +1314,7 @@
 	struct regulator_dev *rdev;
 	struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
 	const char *devname = NULL;
-	int ret = -EPROBE_DEFER;
+	int ret;
 
 	if (id == NULL) {
 		pr_err("get() with no identifier\n");
@@ -1322,6 +1324,11 @@
 	if (dev)
 		devname = dev_name(dev);
 
+	if (have_full_constraints())
+		ret = -ENODEV;
+	else
+		ret = -EPROBE_DEFER;
+
 	mutex_lock(&regulator_list_mutex);
 
 	rdev = regulator_dev_lookup(dev, id, &ret);
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 7f34020..b14ebda 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -576,7 +576,9 @@
 	/* Only LDO 5 and 6 has got the over current interrupt */
 	if (pdev->id == DA9055_ID_LDO5 || pdev->id ==  DA9055_ID_LDO6) {
 		irq = platform_get_irq_byname(pdev, "REGULATOR");
-		irq = regmap_irq_get_virq(da9055->irq_data, irq);
+		if (irq < 0)
+			return irq;
+
 		ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
 						da9055_ldo5_6_oc_irq,
 						IRQF_TRIGGER_HIGH |
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index b1078ba3..186df87 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -168,10 +168,11 @@
 			MAX14577_REG_MAX);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
-		return ret;
 	}
 
-	return 0;
+	of_node_put(np);
+
+	return ret;
 }
 
 static inline struct regulator_init_data *match_init_data(int index)
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index d9e5579..cd0b9e35 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -441,6 +441,7 @@
 	for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
 		if (!reg_np) {
 			config.init_data = pdata->regulators[i].initdata;
+			config.of_node = pdata->regulators[i].reg_node;
 		} else {
 			config.init_data = rdata[i].init_data;
 			config.of_node = rdata[i].of_node;
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 92bd22c..9cbc567 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -504,7 +504,7 @@
 	struct dasd_diag_req *dreq;
 	struct dasd_diag_bio *dbio;
 	struct req_iterator iter;
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	char *dst;
 	unsigned int count, datasize;
 	sector_t recid, first_rec, last_rec;
@@ -525,10 +525,10 @@
 	/* Check struct bio and count the number of blocks for the request. */
 	count = 0;
 	rq_for_each_segment(bv, req, iter) {
-		if (bv->bv_len & (blksize - 1))
+		if (bv.bv_len & (blksize - 1))
 			/* Fba can only do full blocks. */
 			return ERR_PTR(-EINVAL);
-		count += bv->bv_len >> (block->s2b_shift + 9);
+		count += bv.bv_len >> (block->s2b_shift + 9);
 	}
 	/* Paranoia. */
 	if (count != last_rec - first_rec + 1)
@@ -545,8 +545,8 @@
 	dbio = dreq->bio;
 	recid = first_rec;
 	rq_for_each_segment(bv, req, iter) {
-		dst = page_address(bv->bv_page) + bv->bv_offset;
-		for (off = 0; off < bv->bv_len; off += blksize) {
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		for (off = 0; off < bv.bv_len; off += blksize) {
 			memset(dbio, 0, sizeof (struct dasd_diag_bio));
 			dbio->type = rw_cmd;
 			dbio->block_number = recid + 1;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 95e4578..2e8e075 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2551,7 +2551,7 @@
 	struct dasd_ccw_req *cqr;
 	struct ccw1 *ccw;
 	struct req_iterator iter;
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	char *dst;
 	unsigned int off;
 	int count, cidaw, cplength, datasize;
@@ -2573,13 +2573,13 @@
 	count = 0;
 	cidaw = 0;
 	rq_for_each_segment(bv, req, iter) {
-		if (bv->bv_len & (blksize - 1))
+		if (bv.bv_len & (blksize - 1))
 			/* Eckd can only do full blocks. */
 			return ERR_PTR(-EINVAL);
-		count += bv->bv_len >> (block->s2b_shift + 9);
+		count += bv.bv_len >> (block->s2b_shift + 9);
 #if defined(CONFIG_64BIT)
-		if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
-			cidaw += bv->bv_len >> (block->s2b_shift + 9);
+		if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+			cidaw += bv.bv_len >> (block->s2b_shift + 9);
 #endif
 	}
 	/* Paranoia. */
@@ -2650,16 +2650,16 @@
 			      last_rec - recid + 1, cmd, basedev, blksize);
 	}
 	rq_for_each_segment(bv, req, iter) {
-		dst = page_address(bv->bv_page) + bv->bv_offset;
+		dst = page_address(bv.bv_page) + bv.bv_offset;
 		if (dasd_page_cache) {
 			char *copy = kmem_cache_alloc(dasd_page_cache,
 						      GFP_DMA | __GFP_NOWARN);
 			if (copy && rq_data_dir(req) == WRITE)
-				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+				memcpy(copy + bv.bv_offset, dst, bv.bv_len);
 			if (copy)
-				dst = copy + bv->bv_offset;
+				dst = copy + bv.bv_offset;
 		}
-		for (off = 0; off < bv->bv_len; off += blksize) {
+		for (off = 0; off < bv.bv_len; off += blksize) {
 			sector_t trkid = recid;
 			unsigned int recoffs = sector_div(trkid, blk_per_trk);
 			rcmd = cmd;
@@ -2735,7 +2735,7 @@
 	struct dasd_ccw_req *cqr;
 	struct ccw1 *ccw;
 	struct req_iterator iter;
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	char *dst, *idaw_dst;
 	unsigned int cidaw, cplength, datasize;
 	unsigned int tlf;
@@ -2813,8 +2813,8 @@
 	idaw_dst = NULL;
 	idaw_len = 0;
 	rq_for_each_segment(bv, req, iter) {
-		dst = page_address(bv->bv_page) + bv->bv_offset;
-		seg_len = bv->bv_len;
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		seg_len = bv.bv_len;
 		while (seg_len) {
 			if (new_track) {
 				trkid = recid;
@@ -3039,7 +3039,7 @@
 {
 	struct dasd_ccw_req *cqr;
 	struct req_iterator iter;
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	char *dst;
 	unsigned int trkcount, ctidaw;
 	unsigned char cmd;
@@ -3125,8 +3125,8 @@
 		new_track = 1;
 		recid = first_rec;
 		rq_for_each_segment(bv, req, iter) {
-			dst = page_address(bv->bv_page) + bv->bv_offset;
-			seg_len = bv->bv_len;
+			dst = page_address(bv.bv_page) + bv.bv_offset;
+			seg_len = bv.bv_len;
 			while (seg_len) {
 				if (new_track) {
 					trkid = recid;
@@ -3158,9 +3158,9 @@
 		}
 	} else {
 		rq_for_each_segment(bv, req, iter) {
-			dst = page_address(bv->bv_page) + bv->bv_offset;
+			dst = page_address(bv.bv_page) + bv.bv_offset;
 			last_tidaw = itcw_add_tidaw(itcw, 0x00,
-						    dst, bv->bv_len);
+						    dst, bv.bv_len);
 			if (IS_ERR(last_tidaw)) {
 				ret = -EINVAL;
 				goto out_error;
@@ -3278,7 +3278,7 @@
 	struct dasd_ccw_req *cqr;
 	struct ccw1 *ccw;
 	struct req_iterator iter;
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	char *dst;
 	unsigned char cmd;
 	unsigned int trkcount;
@@ -3378,8 +3378,8 @@
 			idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
 	}
 	rq_for_each_segment(bv, req, iter) {
-		dst = page_address(bv->bv_page) + bv->bv_offset;
-		seg_len = bv->bv_len;
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		seg_len = bv.bv_len;
 		if (cmd == DASD_ECKD_CCW_READ_TRACK)
 			memset(dst, 0, seg_len);
 		if (!len_to_track_end) {
@@ -3424,7 +3424,7 @@
 	struct dasd_eckd_private *private;
 	struct ccw1 *ccw;
 	struct req_iterator iter;
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	char *dst, *cda;
 	unsigned int blksize, blk_per_trk, off;
 	sector_t recid;
@@ -3442,8 +3442,8 @@
 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
 		ccw++;
 	rq_for_each_segment(bv, req, iter) {
-		dst = page_address(bv->bv_page) + bv->bv_offset;
-		for (off = 0; off < bv->bv_len; off += blksize) {
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		for (off = 0; off < bv.bv_len; off += blksize) {
 			/* Skip locate record. */
 			if (private->uses_cdl && recid <= 2*blk_per_trk)
 				ccw++;
@@ -3454,7 +3454,7 @@
 					cda = (char *)((addr_t) ccw->cda);
 				if (dst != cda) {
 					if (rq_data_dir(req) == READ)
-						memcpy(dst, cda, bv->bv_len);
+						memcpy(dst, cda, bv.bv_len);
 					kmem_cache_free(dasd_page_cache,
 					    (void *)((addr_t)cda & PAGE_MASK));
 				}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 9cbc8c3..2c8e68b 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -260,7 +260,7 @@
 	struct dasd_ccw_req *cqr;
 	struct ccw1 *ccw;
 	struct req_iterator iter;
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	char *dst;
 	int count, cidaw, cplength, datasize;
 	sector_t recid, first_rec, last_rec;
@@ -283,13 +283,13 @@
 	count = 0;
 	cidaw = 0;
 	rq_for_each_segment(bv, req, iter) {
-		if (bv->bv_len & (blksize - 1))
+		if (bv.bv_len & (blksize - 1))
 			/* Fba can only do full blocks. */
 			return ERR_PTR(-EINVAL);
-		count += bv->bv_len >> (block->s2b_shift + 9);
+		count += bv.bv_len >> (block->s2b_shift + 9);
 #if defined(CONFIG_64BIT)
-		if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
-			cidaw += bv->bv_len / blksize;
+		if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+			cidaw += bv.bv_len / blksize;
 #endif
 	}
 	/* Paranoia. */
@@ -326,16 +326,16 @@
 	}
 	recid = first_rec;
 	rq_for_each_segment(bv, req, iter) {
-		dst = page_address(bv->bv_page) + bv->bv_offset;
+		dst = page_address(bv.bv_page) + bv.bv_offset;
 		if (dasd_page_cache) {
 			char *copy = kmem_cache_alloc(dasd_page_cache,
 						      GFP_DMA | __GFP_NOWARN);
 			if (copy && rq_data_dir(req) == WRITE)
-				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+				memcpy(copy + bv.bv_offset, dst, bv.bv_len);
 			if (copy)
-				dst = copy + bv->bv_offset;
+				dst = copy + bv.bv_offset;
 		}
-		for (off = 0; off < bv->bv_len; off += blksize) {
+		for (off = 0; off < bv.bv_len; off += blksize) {
 			/* Locate record for stupid devices. */
 			if (private->rdc_data.mode.bits.data_chain == 0) {
 				ccw[-1].flags |= CCW_FLAG_CC;
@@ -384,7 +384,7 @@
 	struct dasd_fba_private *private;
 	struct ccw1 *ccw;
 	struct req_iterator iter;
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	char *dst, *cda;
 	unsigned int blksize, off;
 	int status;
@@ -399,8 +399,8 @@
 	if (private->rdc_data.mode.bits.data_chain != 0)
 		ccw++;
 	rq_for_each_segment(bv, req, iter) {
-		dst = page_address(bv->bv_page) + bv->bv_offset;
-		for (off = 0; off < bv->bv_len; off += blksize) {
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		for (off = 0; off < bv.bv_len; off += blksize) {
 			/* Skip locate record. */
 			if (private->rdc_data.mode.bits.data_chain == 0)
 				ccw++;
@@ -411,7 +411,7 @@
 					cda = (char *)((addr_t) ccw->cda);
 				if (dst != cda) {
 					if (rq_data_dir(req) == READ)
-						memcpy(dst, cda, bv->bv_len);
+						memcpy(dst, cda, bv.bv_len);
 					kmem_cache_free(dasd_page_cache,
 					    (void *)((addr_t)cda & PAGE_MASK));
 				}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 6eca019..ebf41e2 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -808,18 +808,19 @@
 dcssblk_make_request(struct request_queue *q, struct bio *bio)
 {
 	struct dcssblk_dev_info *dev_info;
-	struct bio_vec *bvec;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 	unsigned long index;
 	unsigned long page_addr;
 	unsigned long source_addr;
 	unsigned long bytes_done;
-	int i;
 
 	bytes_done = 0;
 	dev_info = bio->bi_bdev->bd_disk->private_data;
 	if (dev_info == NULL)
 		goto fail;
-	if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+	if ((bio->bi_iter.bi_sector & 7) != 0 ||
+	    (bio->bi_iter.bi_size & 4095) != 0)
 		/* Request is not page-aligned. */
 		goto fail;
 	if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
@@ -842,22 +843,22 @@
 		}
 	}
 
-	index = (bio->bi_sector >> 3);
-	bio_for_each_segment(bvec, bio, i) {
+	index = (bio->bi_iter.bi_sector >> 3);
+	bio_for_each_segment(bvec, bio, iter) {
 		page_addr = (unsigned long)
-			page_address(bvec->bv_page) + bvec->bv_offset;
+			page_address(bvec.bv_page) + bvec.bv_offset;
 		source_addr = dev_info->start + (index<<12) + bytes_done;
-		if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
+		if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
 			// More paranoia.
 			goto fail;
 		if (bio_data_dir(bio) == READ) {
 			memcpy((void*)page_addr, (void*)source_addr,
-				bvec->bv_len);
+				bvec.bv_len);
 		} else {
 			memcpy((void*)source_addr, (void*)page_addr,
-				bvec->bv_len);
+				bvec.bv_len);
 		}
-		bytes_done += bvec->bv_len;
+		bytes_done += bvec.bv_len;
 	}
 	bio_endio(bio, 0);
 	return;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index d0ab501..76bed17 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -130,7 +130,7 @@
 	struct aidaw *aidaw = scmrq->aidaw;
 	struct msb *msb = &scmrq->aob->msb[0];
 	struct req_iterator iter;
-	struct bio_vec *bv;
+	struct bio_vec bv;
 
 	msb->bs = MSB_BS_4K;
 	scmrq->aob->request.msb_count = 1;
@@ -142,9 +142,9 @@
 	msb->data_addr = (u64) aidaw;
 
 	rq_for_each_segment(bv, scmrq->request, iter) {
-		WARN_ON(bv->bv_offset);
-		msb->blk_count += bv->bv_len >> 12;
-		aidaw->data_addr = (u64) page_address(bv->bv_page);
+		WARN_ON(bv.bv_offset);
+		msb->blk_count += bv.bv_len >> 12;
+		aidaw->data_addr = (u64) page_address(bv.bv_page);
 		aidaw++;
 	}
 }
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 27f930c..9aae909 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -122,7 +122,7 @@
 	struct aidaw *aidaw = scmrq->aidaw;
 	struct msb *msb = &scmrq->aob->msb[0];
 	struct req_iterator iter;
-	struct bio_vec *bv;
+	struct bio_vec bv;
 	int i = 0;
 	u64 addr;
 
@@ -163,7 +163,7 @@
 			i++;
 		}
 		rq_for_each_segment(bv, req, iter) {
-			aidaw->data_addr = (u64) page_address(bv->bv_page);
+			aidaw->data_addr = (u64) page_address(bv.bv_page);
 			aidaw++;
 			i++;
 		}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 58141f0..6969d39 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -184,25 +184,26 @@
 static void xpram_make_request(struct request_queue *q, struct bio *bio)
 {
 	xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
-	struct bio_vec *bvec;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 	unsigned int index;
 	unsigned long page_addr;
 	unsigned long bytes;
-	int i;
 
-	if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+	if ((bio->bi_iter.bi_sector & 7) != 0 ||
+	    (bio->bi_iter.bi_size & 4095) != 0)
 		/* Request is not page-aligned. */
 		goto fail;
-	if ((bio->bi_size >> 12) > xdev->size)
+	if ((bio->bi_iter.bi_size >> 12) > xdev->size)
 		/* Request size is no page-aligned. */
 		goto fail;
-	if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
+	if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
 		goto fail;
-	index = (bio->bi_sector >> 3) + xdev->offset;
-	bio_for_each_segment(bvec, bio, i) {
+	index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
+	bio_for_each_segment(bvec, bio, iter) {
 		page_addr = (unsigned long)
-			kmap(bvec->bv_page) + bvec->bv_offset;
-		bytes = bvec->bv_len;
+			kmap(bvec.bv_page) + bvec.bv_offset;
+		bytes = bvec.bv_len;
 		if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
 			/* More paranoia. */
 			goto fail;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 88e35d8..8ee88c4 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -342,8 +342,9 @@
  */
 int cio_commit_config(struct subchannel *sch)
 {
-	struct schib schib;
 	int ccode, retry, ret = 0;
+	struct schib schib;
+	struct irb irb;
 
 	if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
 		return -ENODEV;
@@ -367,7 +368,10 @@
 			ret = -EAGAIN;
 			break;
 		case 1: /* status pending */
-			return -EBUSY;
+			ret = -EBUSY;
+			if (tsch(sch->schid, &irb))
+				return ret;
+			break;
 		case 2: /* busy */
 			udelay(100); /* allow for recovery */
 			ret = -EBUSY;
@@ -403,7 +407,6 @@
  */
 int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
 {
-	int retry;
 	int ret;
 
 	CIO_TRACE_EVENT(2, "ensch");
@@ -418,20 +421,14 @@
 	sch->config.isc = sch->isc;
 	sch->config.intparm = intparm;
 
-	for (retry = 0; retry < 3; retry++) {
+	ret = cio_commit_config(sch);
+	if (ret == -EIO) {
+		/*
+		 * Got a program check in msch. Try without
+		 * the concurrent sense bit the next time.
+		 */
+		sch->config.csense = 0;
 		ret = cio_commit_config(sch);
-		if (ret == -EIO) {
-			/*
-			 * Got a program check in msch. Try without
-			 * the concurrent sense bit the next time.
-			 */
-			sch->config.csense = 0;
-		} else if (ret == -EBUSY) {
-			struct irb irb;
-			if (tsch(sch->schid, &irb) != 0)
-				break;
-		} else
-			break;
 	}
 	CIO_HEX_EVENT(2, &ret, sizeof(ret));
 	return ret;
@@ -444,7 +441,6 @@
  */
 int cio_disable_subchannel(struct subchannel *sch)
 {
-	int retry;
 	int ret;
 
 	CIO_TRACE_EVENT(2, "dissch");
@@ -456,16 +452,8 @@
 		return -ENODEV;
 
 	sch->config.ena = 0;
+	ret = cio_commit_config(sch);
 
-	for (retry = 0; retry < 3; retry++) {
-		ret = cio_commit_config(sch);
-		if (ret == -EBUSY) {
-			struct irb irb;
-			if (tsch(sch->schid, &irb) != 0)
-				break;
-		} else
-			break;
-	}
 	CIO_HEX_EVENT(2, &ret, sizeof(ret));
 	return ret;
 }
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 8acaae1..a563e4c 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -359,14 +359,12 @@
 #define need_siga_sync_out_after_pci(q)	\
 	(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
 
-#define for_each_input_queue(irq_ptr, q, i)	\
-	for (i = 0, q = irq_ptr->input_qs[0];	\
-		i < irq_ptr->nr_input_qs;	\
-		q = irq_ptr->input_qs[++i])
-#define for_each_output_queue(irq_ptr, q, i)	\
-	for (i = 0, q = irq_ptr->output_qs[0];	\
-		i < irq_ptr->nr_output_qs;	\
-		q = irq_ptr->output_qs[++i])
+#define for_each_input_queue(irq_ptr, q, i)		\
+	for (i = 0; i < irq_ptr->nr_input_qs &&		\
+		({ q = irq_ptr->input_qs[i]; 1; }); i++)
+#define for_each_output_queue(irq_ptr, q, i)		\
+	for (i = 0; i < irq_ptr->nr_output_qs &&	\
+		({ q = irq_ptr->output_qs[i]; 1; }); i++)
 
 #define prev_buf(bufnr)	\
 	((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index c883a08..77466c4 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -996,7 +996,7 @@
 		}
 	}
 
-	if (!pci_out_supported(q))
+	if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
 		return;
 
 	for_each_output_queue(irq_ptr, q, i) {
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index d629717..0fc5848 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -642,8 +642,15 @@
 	     (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
 		/* OK */
 	}
-	if (irb_is_error(irb))
-		vcdev->err = -EIO; /* XXX - use real error */
+	if (irb_is_error(irb)) {
+		/* Command reject? */
+		if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
+		    (irb->ecw[0] & SNS0_CMD_REJECT))
+			vcdev->err = -EOPNOTSUPP;
+		else
+			/* Map everything else to -EIO. */
+			vcdev->err = -EIO;
+	}
 	if (vcdev->curr_io & activity) {
 		switch (activity) {
 		case VIRTIO_CCW_DOING_READ_FEAT:
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index ac0bdde..a0de045 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -738,6 +738,8 @@
 	int (*freeze)(struct ccwgroup_device *);
 	int (*thaw) (struct ccwgroup_device *);
 	int (*restore)(struct ccwgroup_device *);
+	int (*control_event_handler)(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd);
 };
 
 struct qeth_vlan_vid {
@@ -948,13 +950,10 @@
 int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
 	int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
 	void *reply_param);
-void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd);
-void qeth_bridgeport_query_support(struct qeth_card *card);
 int qeth_bridgeport_query_ports(struct qeth_card *card,
 	enum qeth_sbp_roles *role, enum qeth_sbp_states *state);
 int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
 int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
-void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd);
 int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
 int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
 int qeth_get_elements_for_frags(struct sk_buff *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c05dacb..c3a83df 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -69,6 +69,7 @@
 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 struct workqueue_struct *qeth_wq;
+EXPORT_SYMBOL_GPL(qeth_wq);
 
 static void qeth_close_dev_handler(struct work_struct *work)
 {
@@ -616,15 +617,12 @@
 				qeth_schedule_recovery(card);
 				return NULL;
 			case IPA_CMD_SETBRIDGEPORT:
-				if (cmd->data.sbp.hdr.command_code ==
-					IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
-					qeth_bridge_state_change(card, cmd);
-					return NULL;
-				} else
-					return cmd;
 			case IPA_CMD_ADDRESS_CHANGE_NOTIF:
-				qeth_bridge_host_event(card, cmd);
-				return NULL;
+				if (card->discipline->control_event_handler
+								(card, cmd))
+					return cmd;
+				else
+					return NULL;
 			case IPA_CMD_MODCCID:
 				return cmd;
 			case IPA_CMD_REGISTER_LOCAL_ADDR:
@@ -4973,10 +4971,6 @@
 		qeth_query_setadapterparms(card);
 	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
 		qeth_query_setdiagass(card);
-	qeth_bridgeport_query_support(card);
-	if (card->options.sbp.supported_funcs)
-		dev_info(&card->gdev->dev,
-		"The device represents a HiperSockets Bridge Capable Port\n");
 	return 0;
 out:
 	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 914d2c1..0710550 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -33,6 +33,11 @@
 					    unsigned long));
 static void qeth_l2_set_multicast_list(struct net_device *);
 static int qeth_l2_recover(void *);
+static void qeth_bridgeport_query_support(struct qeth_card *card);
+static void qeth_bridge_state_change(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd);
+static void qeth_bridge_host_event(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd);
 
 static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
@@ -989,6 +994,10 @@
 		rc = -ENODEV;
 		goto out_remove;
 	}
+	qeth_bridgeport_query_support(card);
+	if (card->options.sbp.supported_funcs)
+		dev_info(&card->gdev->dev,
+		"The device represents a HiperSockets Bridge Capable Port\n");
 	qeth_trace_features(card);
 
 	if (!card->dev && qeth_l2_setup_netdev(card)) {
@@ -1233,6 +1242,26 @@
 	return rc;
 }
 
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l2_control_event(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd)
+{
+	switch (cmd->hdr.command) {
+	case IPA_CMD_SETBRIDGEPORT:
+		if (cmd->data.sbp.hdr.command_code ==
+				IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
+			qeth_bridge_state_change(card, cmd);
+			return 0;
+		} else
+			return 1;
+	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+		qeth_bridge_host_event(card, cmd);
+		return 0;
+	default:
+		return 1;
+	}
+}
+
 struct qeth_discipline qeth_l2_discipline = {
 	.start_poll = qeth_qdio_start_poll,
 	.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
@@ -1246,6 +1275,7 @@
 	.freeze = qeth_l2_pm_suspend,
 	.thaw = qeth_l2_pm_resume,
 	.restore = qeth_l2_pm_resume,
+	.control_event_handler = qeth_l2_control_event,
 };
 EXPORT_SYMBOL_GPL(qeth_l2_discipline);
 
@@ -1463,7 +1493,8 @@
 	kfree(data);
 }
 
-void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
+static void qeth_bridge_state_change(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd)
 {
 	struct qeth_sbp_state_change *qports =
 		 &cmd->data.sbp.data.state_change;
@@ -1488,7 +1519,6 @@
 			sizeof(struct qeth_sbp_state_change) + extrasize);
 	queue_work(qeth_wq, &data->worker);
 }
-EXPORT_SYMBOL(qeth_bridge_state_change);
 
 struct qeth_bridge_host_data {
 	struct work_struct worker;
@@ -1528,7 +1558,8 @@
 	kfree(data);
 }
 
-void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
+static void qeth_bridge_host_event(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd)
 {
 	struct qeth_ipacmd_addr_change *hostevs =
 		 &cmd->data.addrchange;
@@ -1560,7 +1591,6 @@
 			sizeof(struct qeth_ipacmd_addr_change) + extrasize);
 	queue_work(qeth_wq, &data->worker);
 }
-EXPORT_SYMBOL(qeth_bridge_host_event);
 
 /* SETBRIDGEPORT support; sending commands */
 
@@ -1683,7 +1713,7 @@
  * Sets bitmask of supported setbridgeport subfunctions in the qeth_card
  * strucutre: card->options.sbp.supported_funcs.
  */
-void qeth_bridgeport_query_support(struct qeth_card *card)
+static void qeth_bridgeport_query_support(struct qeth_card *card)
 {
 	struct qeth_cmd_buffer *iob;
 	struct qeth_ipa_cmd *cmd;
@@ -1709,7 +1739,6 @@
 	}
 	card->options.sbp.supported_funcs = cbctl.data.supported;
 }
-EXPORT_SYMBOL_GPL(qeth_bridgeport_query_support);
 
 static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
 	struct qeth_reply *reply, unsigned long data)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index c1b0b27..0f43042 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3593,6 +3593,13 @@
 	return rc;
 }
 
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l3_control_event(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd)
+{
+	return 1;
+}
+
 struct qeth_discipline qeth_l3_discipline = {
 	.start_poll = qeth_qdio_start_poll,
 	.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
@@ -3606,6 +3613,7 @@
 	.freeze = qeth_l3_pm_suspend,
 	.thaw = qeth_l3_pm_resume,
 	.restore = qeth_l3_pm_resume,
+	.control_event_handler = qeth_l3_control_event,
 };
 EXPORT_SYMBOL_GPL(qeth_l3_discipline);
 
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index c1441ed..c7763e4 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -11,7 +11,6 @@
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index fc1339c..7c71e7b 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -9,7 +9,6 @@
 #include <linux/fs.h>
 #include <linux/errno.h>
 #include <linux/major.h>
-#include <linux/init.h>
 #include <linux/miscdevice.h>
 #include <linux/ioport.h>		/* request_region */
 #include <linux/slab.h>
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index ddbe5a9..af15a2f 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -19,7 +19,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kthread.h>
 #include <linux/delay.h>
 #include <linux/ioport.h>
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index d9f268f..25c738e 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -9,7 +9,6 @@
 #include <linux/miscdevice.h>
 #include <linux/fcntl.h>
 #include <linux/poll.h>
-#include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <linux/mm.h>
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index b0aae05..b7acafc 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -11,7 +11,6 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/ioport.h>
-#include <linux/init.h>
 #include <linux/miscdevice.h>
 #include <linux/mm.h>
 #include <linux/of.h>
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 446b851..0cac7d8 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2163,10 +2163,10 @@
 	}
 
 	/* do we need to support multiple segments? */
-	if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
-		printk("%s: multiple segments req %u %u, rsp %u %u\n",
-		       __func__, bio_segments(req->bio), blk_rq_bytes(req),
-		       bio_segments(rsp->bio), blk_rq_bytes(rsp));
+	if (bio_multiple_segments(req->bio) ||
+	    bio_multiple_segments(rsp->bio)) {
+		printk("%s: multiple segments req %u, rsp %u\n",
+		       __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
 		return -EINVAL;
 	}
 
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 9d26637..410f4a3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1901,7 +1901,7 @@
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
 	Mpi2SmpPassthroughRequest_t *mpi_request;
 	Mpi2SmpPassthroughReply_t *mpi_reply;
-	int rc, i;
+	int rc;
 	u16 smid;
 	u32 ioc_state;
 	unsigned long timeleft;
@@ -1916,7 +1916,8 @@
 	void *pci_addr_out = NULL;
 	u16 wait_state_count;
 	struct request *rsp = req->next_rq;
-	struct bio_vec *bvec = NULL;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 
 	if (!rsp) {
 		printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
@@ -1942,7 +1943,7 @@
 	ioc->transport_cmds.status = MPT2_CMD_PENDING;
 
 	/* Check if the request is split across multiple segments */
-	if (bio_segments(req->bio) > 1) {
+	if (bio_multiple_segments(req->bio)) {
 		u32 offset = 0;
 
 		/* Allocate memory and copy the request */
@@ -1955,11 +1956,11 @@
 			goto out;
 		}
 
-		bio_for_each_segment(bvec, req->bio, i) {
+		bio_for_each_segment(bvec, req->bio, iter) {
 			memcpy(pci_addr_out + offset,
-			    page_address(bvec->bv_page) + bvec->bv_offset,
-			    bvec->bv_len);
-			offset += bvec->bv_len;
+			    page_address(bvec.bv_page) + bvec.bv_offset,
+			    bvec.bv_len);
+			offset += bvec.bv_len;
 		}
 	} else {
 		dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1974,7 +1975,7 @@
 
 	/* Check if the response needs to be populated across
 	 * multiple segments */
-	if (bio_segments(rsp->bio) > 1) {
+	if (bio_multiple_segments(rsp->bio)) {
 		pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
 		    &pci_dma_in);
 		if (!pci_addr_in) {
@@ -2041,7 +2042,7 @@
 	sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
 	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
 	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-	if (bio_segments(req->bio) > 1) {
+	if (bio_multiple_segments(req->bio)) {
 		ioc->base_add_sg_single(psge, sgl_flags |
 		    (blk_rq_bytes(req) - 4), pci_dma_out);
 	} else {
@@ -2057,7 +2058,7 @@
 	    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
 	    MPI2_SGE_FLAGS_END_OF_LIST);
 	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-	if (bio_segments(rsp->bio) > 1) {
+	if (bio_multiple_segments(rsp->bio)) {
 		ioc->base_add_sg_single(psge, sgl_flags |
 		    (blk_rq_bytes(rsp) + 4), pci_dma_in);
 	} else {
@@ -2102,23 +2103,23 @@
 		    le16_to_cpu(mpi_reply->ResponseDataLength);
 		/* check if the resp needs to be copied from the allocated
 		 * pci mem */
-		if (bio_segments(rsp->bio) > 1) {
+		if (bio_multiple_segments(rsp->bio)) {
 			u32 offset = 0;
 			u32 bytes_to_copy =
 			    le16_to_cpu(mpi_reply->ResponseDataLength);
-			bio_for_each_segment(bvec, rsp->bio, i) {
-				if (bytes_to_copy <= bvec->bv_len) {
-					memcpy(page_address(bvec->bv_page) +
-					    bvec->bv_offset, pci_addr_in +
+			bio_for_each_segment(bvec, rsp->bio, iter) {
+				if (bytes_to_copy <= bvec.bv_len) {
+					memcpy(page_address(bvec.bv_page) +
+					    bvec.bv_offset, pci_addr_in +
 					    offset, bytes_to_copy);
 					break;
 				} else {
-					memcpy(page_address(bvec->bv_page) +
-					    bvec->bv_offset, pci_addr_in +
-					    offset, bvec->bv_len);
-					bytes_to_copy -= bvec->bv_len;
+					memcpy(page_address(bvec.bv_page) +
+					    bvec.bv_offset, pci_addr_in +
+					    offset, bvec.bv_len);
+					bytes_to_copy -= bvec.bv_len;
 				}
-				offset += bvec->bv_len;
+				offset += bvec.bv_len;
 			}
 		}
 	} else {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index e771a88..65170cb 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1884,7 +1884,7 @@
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 	Mpi2SmpPassthroughRequest_t *mpi_request;
 	Mpi2SmpPassthroughReply_t *mpi_reply;
-	int rc, i;
+	int rc;
 	u16 smid;
 	u32 ioc_state;
 	unsigned long timeleft;
@@ -1898,7 +1898,8 @@
 	void *pci_addr_out = NULL;
 	u16 wait_state_count;
 	struct request *rsp = req->next_rq;
-	struct bio_vec *bvec = NULL;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 
 	if (!rsp) {
 		pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
@@ -1925,7 +1926,7 @@
 	ioc->transport_cmds.status = MPT3_CMD_PENDING;
 
 	/* Check if the request is split across multiple segments */
-	if (req->bio->bi_vcnt > 1) {
+	if (bio_multiple_segments(req->bio)) {
 		u32 offset = 0;
 
 		/* Allocate memory and copy the request */
@@ -1938,11 +1939,11 @@
 			goto out;
 		}
 
-		bio_for_each_segment(bvec, req->bio, i) {
+		bio_for_each_segment(bvec, req->bio, iter) {
 			memcpy(pci_addr_out + offset,
-			    page_address(bvec->bv_page) + bvec->bv_offset,
-			    bvec->bv_len);
-			offset += bvec->bv_len;
+			    page_address(bvec.bv_page) + bvec.bv_offset,
+			    bvec.bv_len);
+			offset += bvec.bv_len;
 		}
 	} else {
 		dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1957,7 +1958,7 @@
 
 	/* Check if the response needs to be populated across
 	 * multiple segments */
-	if (rsp->bio->bi_vcnt > 1) {
+	if (bio_multiple_segments(rsp->bio)) {
 		pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
 		    &pci_dma_in);
 		if (!pci_addr_in) {
@@ -2018,7 +2019,7 @@
 	mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
 	psge = &mpi_request->SGL;
 
-	if (req->bio->bi_vcnt > 1)
+	if (bio_multiple_segments(req->bio))
 		ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
 		    pci_dma_in, (blk_rq_bytes(rsp) + 4));
 	else
@@ -2063,23 +2064,23 @@
 
 		/* check if the resp needs to be copied from the allocated
 		 * pci mem */
-		if (rsp->bio->bi_vcnt > 1) {
+		if (bio_multiple_segments(rsp->bio)) {
 			u32 offset = 0;
 			u32 bytes_to_copy =
 			    le16_to_cpu(mpi_reply->ResponseDataLength);
-			bio_for_each_segment(bvec, rsp->bio, i) {
-				if (bytes_to_copy <= bvec->bv_len) {
-					memcpy(page_address(bvec->bv_page) +
-					    bvec->bv_offset, pci_addr_in +
+			bio_for_each_segment(bvec, rsp->bio, iter) {
+				if (bytes_to_copy <= bvec.bv_len) {
+					memcpy(page_address(bvec.bv_page) +
+					    bvec.bv_offset, pci_addr_in +
 					    offset, bytes_to_copy);
 					break;
 				} else {
-					memcpy(page_address(bvec->bv_page) +
-					    bvec->bv_offset, pci_addr_in +
-					    offset, bvec->bv_len);
-					bytes_to_copy -= bvec->bv_len;
+					memcpy(page_address(bvec.bv_page) +
+					    bvec.bv_offset, pci_addr_in +
+					    offset, bvec.bv_len);
+					bytes_to_copy -= bvec.bv_len;
 				}
-				offset += bvec->bv_len;
+				offset += bvec.bv_len;
 			}
 		}
 	} else {
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index aa66361..bac04c2 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -731,7 +731,7 @@
 
 	bio->bi_rw &= ~REQ_WRITE;
 	or->in.bio = bio;
-	or->in.total_bytes = bio->bi_size;
+	or->in.total_bytes = bio->bi_iter.bi_size;
 	return 0;
 }
 
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 570c7fc..4a0d7c9 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1990,6 +1990,8 @@
 
 	vha->flags.delete_progress = 1;
 
+	qlt_remove_target(ha, vha);
+
 	fc_remove_host(vha->host);
 
 	scsi_remove_host(vha->host);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 41d6491..e1fe95e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2750,6 +2750,13 @@
 	uint32_t len;
 };
 
+struct scsi_qlt_host {
+	void *target_lport_ptr;
+	struct mutex tgt_mutex;
+	struct mutex tgt_host_action_mutex;
+	struct qla_tgt *qla_tgt;
+};
+
 struct qlt_hw_data {
 	/* Protected by hw lock */
 	uint32_t enable_class_2:1;
@@ -2765,15 +2772,11 @@
 	uint32_t __iomem *atio_q_in;
 	uint32_t __iomem *atio_q_out;
 
-	void *target_lport_ptr;
 	struct qla_tgt_func_tmpl *tgt_ops;
-	struct qla_tgt *qla_tgt;
 	struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
 	uint16_t current_handle;
 
 	struct qla_tgt_vp_map *tgt_vp_map;
-	struct mutex tgt_mutex;
-	struct mutex tgt_host_action_mutex;
 
 	int saved_set;
 	uint16_t saved_exchange_count;
@@ -3435,6 +3438,7 @@
 #define VP_ERR_FAB_LOGOUT	4
 #define VP_ERR_ADAP_NORESOURCES	5
 	struct qla_hw_data *hw;
+	struct scsi_qlt_host vha_tgt;
 	struct req_que *req;
 	int		fw_heartbeat_counter;
 	int		seconds_since_last_heartbeat;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 38a1257..2eb97d7 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -590,7 +590,7 @@
 
 	/* Check to avoid double sessions */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
-	list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
+	list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
 				sess_list_entry) {
 		if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
@@ -627,7 +627,7 @@
 
 		return NULL;
 	}
-	sess->tgt = ha->tgt.qla_tgt;
+	sess->tgt = vha->vha_tgt.qla_tgt;
 	sess->vha = vha;
 	sess->s_id = fcport->d_id;
 	sess->loop_id = fcport->loop_id;
@@ -635,7 +635,7 @@
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
 	    "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
-	    sess, ha->tgt.qla_tgt);
+	    sess, vha->vha_tgt.qla_tgt);
 
 	be_sid[0] = sess->s_id.b.domain;
 	be_sid[1] = sess->s_id.b.area;
@@ -662,8 +662,8 @@
 	memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
-	list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
-	ha->tgt.qla_tgt->sess_count++;
+	list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
+	vha->vha_tgt.qla_tgt->sess_count++;
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -682,7 +682,7 @@
 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
 	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	struct qla_tgt_sess *sess;
 	unsigned long flags;
 
@@ -692,6 +692,9 @@
 	if (!tgt || (fcport->port_type != FCT_INITIATOR))
 		return;
 
+	if (qla_ini_mode_enabled(vha))
+		return;
+
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	if (tgt->tgt_stop) {
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -701,9 +704,9 @@
 	if (!sess) {
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-		mutex_lock(&ha->tgt.tgt_mutex);
+		mutex_lock(&vha->vha_tgt.tgt_mutex);
 		sess = qlt_create_sess(vha, fcport, false);
-		mutex_unlock(&ha->tgt.tgt_mutex);
+		mutex_unlock(&vha->vha_tgt.tgt_mutex);
 
 		spin_lock_irqsave(&ha->hardware_lock, flags);
 	} else {
@@ -739,7 +742,7 @@
 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
 	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	struct qla_tgt_sess *sess;
 	unsigned long flags;
 
@@ -806,12 +809,12 @@
 	 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
 	 * Lock is needed, because we still can get an incoming packet.
 	 */
-	mutex_lock(&ha->tgt.tgt_mutex);
+	mutex_lock(&vha->vha_tgt.tgt_mutex);
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	tgt->tgt_stop = 1;
 	qlt_clear_tgt_db(tgt, true);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-	mutex_unlock(&ha->tgt.tgt_mutex);
+	mutex_unlock(&vha->vha_tgt.tgt_mutex);
 
 	flush_delayed_work(&tgt->sess_del_work);
 
@@ -845,20 +848,21 @@
 void qlt_stop_phase2(struct qla_tgt *tgt)
 {
 	struct qla_hw_data *ha = tgt->ha;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 	unsigned long flags;
 
 	if (tgt->tgt_stopped) {
-		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
 		    "Already in tgt->tgt_stopped state\n");
 		dump_stack();
 		return;
 	}
 
-	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
 	    "Waiting for %d IRQ commands to complete (tgt %p)",
 	    tgt->irq_cmd_count, tgt);
 
-	mutex_lock(&ha->tgt.tgt_mutex);
+	mutex_lock(&vha->vha_tgt.tgt_mutex);
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	while (tgt->irq_cmd_count != 0) {
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -868,9 +872,9 @@
 	tgt->tgt_stop = 0;
 	tgt->tgt_stopped = 1;
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-	mutex_unlock(&ha->tgt.tgt_mutex);
+	mutex_unlock(&vha->vha_tgt.tgt_mutex);
 
-	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
 	    tgt);
 }
 EXPORT_SYMBOL(qlt_stop_phase2);
@@ -878,14 +882,14 @@
 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
 static void qlt_release(struct qla_tgt *tgt)
 {
-	struct qla_hw_data *ha = tgt->ha;
+	scsi_qla_host_t *vha = tgt->vha;
 
-	if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
 		qlt_stop_phase2(tgt);
 
-	ha->tgt.qla_tgt = NULL;
+	vha->vha_tgt.qla_tgt = NULL;
 
-	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
 	    "Release of tgt %p finished\n", tgt);
 
 	kfree(tgt);
@@ -949,8 +953,8 @@
 		return;
 	}
 
-	if (ha->tgt.qla_tgt != NULL)
-		ha->tgt.qla_tgt->notify_ack_expected++;
+	if (vha->vha_tgt.qla_tgt != NULL)
+		vha->vha_tgt.qla_tgt->notify_ack_expected++;
 
 	pkt->entry_type = NOTIFY_ACK_TYPE;
 	pkt->entry_count = 1;
@@ -1054,7 +1058,7 @@
 		/* Other bytes are zero */
 	}
 
-	ha->tgt.qla_tgt->abts_resp_expected++;
+	vha->vha_tgt.qla_tgt->abts_resp_expected++;
 
 	qla2x00_start_iocbs(vha, vha->req);
 }
@@ -1206,7 +1210,7 @@
 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
 		    "qla_target(%d): task abort for non-existant session\n",
 		    vha->vp_idx);
-		rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
+		rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
 		    QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
 		if (rc != 0) {
 			qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
@@ -2157,8 +2161,7 @@
 	struct qla_tgt_cmd *cmd, void *ctio)
 {
 	struct qla_tgt_srr_ctio *sc;
-	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	struct qla_tgt_srr_imm *imm;
 
 	tgt->ctio_srr_id++;
@@ -2474,7 +2477,7 @@
 	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
 	scsi_qla_host_t *vha = cmd->vha;
 	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	struct qla_tgt_sess *sess = NULL;
 	struct atio_from_isp *atio = &cmd->atio;
 	unsigned char *cdb;
@@ -2507,10 +2510,10 @@
 			goto out_term;
 		}
 
-		mutex_lock(&ha->tgt.tgt_mutex);
+		mutex_lock(&vha->vha_tgt.tgt_mutex);
 		sess = qlt_make_local_sess(vha, s_id);
 		/* sess has an extra creation ref. */
-		mutex_unlock(&ha->tgt.tgt_mutex);
+		mutex_unlock(&vha->vha_tgt.tgt_mutex);
 
 		if (!sess)
 			goto out_term;
@@ -2576,8 +2579,7 @@
 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
 	struct atio_from_isp *atio)
 {
-	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	struct qla_tgt_cmd *cmd;
 
 	if (unlikely(tgt->tgt_stop)) {
@@ -2593,11 +2595,9 @@
 		return -ENOMEM;
 	}
 
-	INIT_LIST_HEAD(&cmd->cmd_list);
-
 	memcpy(&cmd->atio, atio, sizeof(*atio));
 	cmd->state = QLA_TGT_STATE_NEW;
-	cmd->tgt = ha->tgt.qla_tgt;
+	cmd->tgt = vha->vha_tgt.qla_tgt;
 	cmd->vha = vha;
 
 	INIT_WORK(&cmd->work, qlt_do_work);
@@ -2723,7 +2723,7 @@
 	uint32_t lun, unpacked_lun;
 	int lun_size, fn;
 
-	tgt = ha->tgt.qla_tgt;
+	tgt = vha->vha_tgt.qla_tgt;
 
 	lun = a->u.isp24.fcp_cmnd.lun;
 	lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
@@ -2797,7 +2797,7 @@
 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
 		    "qla_target(%d): task abort for unexisting "
 		    "session\n", vha->vp_idx);
-		return qlt_sched_sess_work(ha->tgt.qla_tgt,
+		return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
 		    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
 	}
 
@@ -2810,7 +2810,6 @@
 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
 	struct imm_ntfy_from_isp *iocb)
 {
-	struct qla_hw_data *ha = vha->hw;
 	int res = 0;
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
@@ -2828,7 +2827,7 @@
 	case ELS_PDISC:
 	case ELS_ADISC:
 	{
-		struct qla_tgt *tgt = ha->tgt.qla_tgt;
+		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 		if (tgt->link_reinit_iocb_pending) {
 			qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
 			    0, 0, 0, 0, 0, 0);
@@ -3202,8 +3201,7 @@
 	struct imm_ntfy_from_isp *iocb)
 {
 	struct qla_tgt_srr_imm *imm;
-	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	struct qla_tgt_srr_ctio *sctio;
 
 	tgt->imm_srr_id++;
@@ -3313,7 +3311,7 @@
 
 	case IMM_NTFY_LIP_LINK_REINIT:
 	{
-		struct qla_tgt *tgt = ha->tgt.qla_tgt;
+		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
 		    "qla_target(%d): LINK REINIT (loop %#x, "
 		    "subcode %x)\n", vha->vp_idx,
@@ -3489,7 +3487,7 @@
 	struct atio_from_isp *atio)
 {
 	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	int rc;
 
 	if (unlikely(tgt == NULL)) {
@@ -3591,7 +3589,7 @@
 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
 {
 	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 
 	if (unlikely(tgt == NULL)) {
 		ql_dbg(ql_dbg_tgt, vha, 0xe05d,
@@ -3794,7 +3792,7 @@
 	uint16_t *mailbox)
 {
 	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	int login_code;
 
 	ql_dbg(ql_dbg_tgt, vha, 0xe039,
@@ -3924,14 +3922,14 @@
 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
 	uint8_t *s_id)
 {
-	struct qla_hw_data *ha = vha->hw;
 	struct qla_tgt_sess *sess = NULL;
 	fc_port_t *fcport = NULL;
 	int rc, global_resets;
 	uint16_t loop_id = 0;
 
 retry:
-	global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+	global_resets =
+	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
 
 	rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
 	if (rc != 0) {
@@ -3958,12 +3956,13 @@
 		return NULL;
 
 	if (global_resets !=
-	    atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
 		    "qla_target(%d): global reset during session discovery "
 		    "(counter was %d, new %d), retrying", vha->vp_idx,
 		    global_resets,
-		    atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+		    atomic_read(&vha->vha_tgt.
+			qla_tgt->tgt_global_resets_count));
 		goto retry;
 	}
 
@@ -3998,10 +3997,10 @@
 	if (!sess) {
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-		mutex_lock(&ha->tgt.tgt_mutex);
+		mutex_lock(&vha->vha_tgt.tgt_mutex);
 		sess = qlt_make_local_sess(vha, s_id);
 		/* sess has got an extra creation ref */
-		mutex_unlock(&ha->tgt.tgt_mutex);
+		mutex_unlock(&vha->vha_tgt.tgt_mutex);
 
 		spin_lock_irqsave(&ha->hardware_lock, flags);
 		if (!sess)
@@ -4052,10 +4051,10 @@
 	if (!sess) {
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-		mutex_lock(&ha->tgt.tgt_mutex);
+		mutex_lock(&vha->vha_tgt.tgt_mutex);
 		sess = qlt_make_local_sess(vha, s_id);
 		/* sess has got an extra creation ref */
-		mutex_unlock(&ha->tgt.tgt_mutex);
+		mutex_unlock(&vha->vha_tgt.tgt_mutex);
 
 		spin_lock_irqsave(&ha->hardware_lock, flags);
 		if (!sess)
@@ -4141,9 +4140,9 @@
 	}
 
 	ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
-	    "Registering target for host %ld(%p)", base_vha->host_no, ha);
+	    "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
 
-	BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
+	BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
 
 	tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
 	if (!tgt) {
@@ -4171,7 +4170,7 @@
 	INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
 	atomic_set(&tgt->tgt_global_resets_count, 0);
 
-	ha->tgt.qla_tgt = tgt;
+	base_vha->vha_tgt.qla_tgt = tgt;
 
 	ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
 		"qla_target(%d): using 64 Bit PCI addressing",
@@ -4192,16 +4191,16 @@
 /* Must be called under tgt_host_action_mutex */
 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
 {
-	if (!ha->tgt.qla_tgt)
+	if (!vha->vha_tgt.qla_tgt)
 		return 0;
 
 	mutex_lock(&qla_tgt_mutex);
-	list_del(&ha->tgt.qla_tgt->tgt_list_entry);
+	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
 	mutex_unlock(&qla_tgt_mutex);
 
 	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
 	    vha->host_no, ha);
-	qlt_release(ha->tgt.qla_tgt);
+	qlt_release(vha->vha_tgt.qla_tgt);
 
 	return 0;
 }
@@ -4235,8 +4234,9 @@
  * @callback:  lport initialization callback for tcm_qla2xxx code
  * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
  */
-int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
-	int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
+int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
+		       u64 npiv_wwpn, u64 npiv_wwnn,
+		       int (*callback)(struct scsi_qla_host *, void *, u64, u64))
 {
 	struct qla_tgt *tgt;
 	struct scsi_qla_host *vha;
@@ -4255,14 +4255,11 @@
 		if (!host)
 			continue;
 
-		if (ha->tgt.tgt_ops != NULL)
-			continue;
-
 		if (!(host->hostt->supported_mode & MODE_TARGET))
 			continue;
 
 		spin_lock_irqsave(&ha->hardware_lock, flags);
-		if (host->active_mode & MODE_TARGET) {
+		if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
 			pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
 			    host->host_no);
 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4276,24 +4273,18 @@
 			    " qla2xxx scsi_host\n");
 			continue;
 		}
-		qlt_lport_dump(vha, wwpn, b);
+		qlt_lport_dump(vha, phys_wwpn, b);
 
 		if (memcmp(vha->port_name, b, WWN_SIZE)) {
 			scsi_host_put(host);
 			continue;
 		}
-		/*
-		 * Setup passed parameters ahead of invoking callback
-		 */
-		ha->tgt.tgt_ops = qla_tgt_ops;
-		ha->tgt.target_lport_ptr = target_lport_ptr;
-		rc = (*callback)(vha);
-		if (rc != 0) {
-			ha->tgt.tgt_ops = NULL;
-			ha->tgt.target_lport_ptr = NULL;
-			scsi_host_put(host);
-		}
 		mutex_unlock(&qla_tgt_mutex);
+
+		rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
+		if (rc != 0)
+			scsi_host_put(host);
+
 		return rc;
 	}
 	mutex_unlock(&qla_tgt_mutex);
@@ -4314,7 +4305,7 @@
 	/*
 	 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
 	 */
-	ha->tgt.target_lport_ptr = NULL;
+	vha->vha_tgt.target_lport_ptr = NULL;
 	ha->tgt.tgt_ops = NULL;
 	/*
 	 * Release the Scsi_Host reference for the underlying qla2xxx host
@@ -4376,8 +4367,9 @@
 qlt_enable_vha(struct scsi_qla_host *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	unsigned long flags;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
 	if (!tgt) {
 		ql_dbg(ql_dbg_tgt, vha, 0xe069,
@@ -4392,9 +4384,14 @@
 	qlt_set_mode(vha);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-	qla2xxx_wake_dpc(vha);
-	qla2x00_wait_for_hba_online(vha);
+	if (vha->vp_idx) {
+		qla24xx_disable_vp(vha);
+		qla24xx_enable_vp(vha);
+	} else {
+		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+		qla2xxx_wake_dpc(base_vha);
+		qla2x00_wait_for_hba_online(base_vha);
+	}
 }
 EXPORT_SYMBOL(qlt_enable_vha);
 
@@ -4407,7 +4404,7 @@
 qlt_disable_vha(struct scsi_qla_host *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	unsigned long flags;
 
 	if (!tgt) {
@@ -4438,8 +4435,10 @@
 	if (!qla_tgt_mode_enabled(vha))
 		return;
 
-	mutex_init(&ha->tgt.tgt_mutex);
-	mutex_init(&ha->tgt.tgt_host_action_mutex);
+	vha->vha_tgt.qla_tgt = NULL;
+
+	mutex_init(&vha->vha_tgt.tgt_mutex);
+	mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
 
 	qlt_clear_mode(vha);
 
@@ -4450,6 +4449,8 @@
 	 * assigning the value appropriately.
 	 */
 	ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+
+	qlt_add_target(ha, vha);
 }
 
 void
@@ -4768,8 +4769,8 @@
 		ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
 	}
 
-	mutex_init(&ha->tgt.tgt_mutex);
-	mutex_init(&ha->tgt.tgt_host_action_mutex);
+	mutex_init(&base_vha->vha_tgt.tgt_mutex);
+	mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
 	qlt_clear_mode(base_vha);
 }
 
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index b33e411..66e755c 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -855,7 +855,6 @@
 	uint16_t loop_id;	/* to save extra sess dereferences */
 	struct qla_tgt *tgt;	/* to save extra sess dereferences */
 	struct scsi_qla_host *vha;
-	struct list_head cmd_list;
 
 	struct atio_from_isp atio;
 };
@@ -932,8 +931,8 @@
  */
 extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
 extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
-extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
-			int (*callback)(struct scsi_qla_host *), void *);
+extern int qlt_lport_register(void *, u64, u64, u64,
+			int (*callback)(struct scsi_qla_host *, void *, u64, u64));
 extern void qlt_lport_deregister(struct scsi_qla_host *);
 extern void qlt_unreg_sess(struct qla_tgt_sess *);
 extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7eb19be..75a141b 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -53,16 +53,6 @@
 struct workqueue_struct *tcm_qla2xxx_free_wq;
 struct workqueue_struct *tcm_qla2xxx_cmd_wq;
 
-static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
-{
-	return 1;
-}
-
-static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
-{
-	return 0;
-}
-
 /*
  * Parse WWN.
  * If strict, we require lower-case hex and colon separators to be sure
@@ -174,7 +164,7 @@
 	*wwnn = 0;
 
 	/* count may include a LF at end of string */
-	if (name[cnt-1] == '\n')
+	if (name[cnt-1] == '\n' || name[cnt-1] == 0)
 		cnt--;
 
 	/* validate we have enough characters for WWPN */
@@ -777,6 +767,9 @@
 
 static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
 {
+	if (!sess)
+		return;
+
 	assert_spin_locked(&sess->vha->hw->hardware_lock);
 	kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
 }
@@ -957,7 +950,6 @@
 	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
 			struct tcm_qla2xxx_lport, lport_wwn);
 	struct scsi_qla_host *vha = lport->qla_vha;
-	struct qla_hw_data *ha = vha->hw;
 	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
 			struct tcm_qla2xxx_tpg, se_tpg);
 	unsigned long op;
@@ -977,12 +969,12 @@
 		atomic_set(&tpg->lport_tpg_enabled, 1);
 		qlt_enable_vha(vha);
 	} else {
-		if (!ha->tgt.qla_tgt) {
-			pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
+		if (!vha->vha_tgt.qla_tgt) {
+			pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n");
 			return -ENODEV;
 		}
 		atomic_set(&tpg->lport_tpg_enabled, 0);
-		qlt_stop_phase1(ha->tgt.qla_tgt);
+		qlt_stop_phase1(vha->vha_tgt.qla_tgt);
 	}
 
 	return count;
@@ -1011,7 +1003,7 @@
 	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
 		return ERR_PTR(-EINVAL);
 
-	if (!lport->qla_npiv_vp && (tpgt != 1)) {
+	if ((tpgt != 1)) {
 		pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
 		return ERR_PTR(-ENOSYS);
 	}
@@ -1038,11 +1030,8 @@
 		kfree(tpg);
 		return NULL;
 	}
-	/*
-	 * Setup local TPG=1 pointer for non NPIV mode.
-	 */
-	if (lport->qla_npiv_vp == NULL)
-		lport->tpg_1 = tpg;
+
+	lport->tpg_1 = tpg;
 
 	return &tpg->se_tpg;
 }
@@ -1053,19 +1042,17 @@
 			struct tcm_qla2xxx_tpg, se_tpg);
 	struct tcm_qla2xxx_lport *lport = tpg->lport;
 	struct scsi_qla_host *vha = lport->qla_vha;
-	struct qla_hw_data *ha = vha->hw;
 	/*
 	 * Call into qla2x_target.c LLD logic to shutdown the active
 	 * FC Nexuses and disable target mode operation for this qla_hw_data
 	 */
-	if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
-		qlt_stop_phase1(ha->tgt.qla_tgt);
+	if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop)
+		qlt_stop_phase1(vha->vha_tgt.qla_tgt);
 
 	core_tpg_deregister(se_tpg);
 	/*
 	 * Clear local TPG=1 pointer for non NPIV mode.
 	 */
-	if (lport->qla_npiv_vp == NULL)
 		lport->tpg_1 = NULL;
 
 	kfree(tpg);
@@ -1095,12 +1082,22 @@
 	tpg->lport = lport;
 	tpg->lport_tpgt = tpgt;
 
+	/*
+	 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+	 * NodeACLs
+	 */
+	tpg->tpg_attrib.generate_node_acls = 1;
+	tpg->tpg_attrib.demo_mode_write_protect = 1;
+	tpg->tpg_attrib.cache_dynamic_acls = 1;
+	tpg->tpg_attrib.demo_mode_login_only = 1;
+
 	ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
 				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
 	if (ret < 0) {
 		kfree(tpg);
 		return NULL;
 	}
+	lport->tpg_1 = tpg;
 	return &tpg->se_tpg;
 }
 
@@ -1111,13 +1108,12 @@
 	scsi_qla_host_t *vha,
 	const uint8_t *s_id)
 {
-	struct qla_hw_data *ha = vha->hw;
 	struct tcm_qla2xxx_lport *lport;
 	struct se_node_acl *se_nacl;
 	struct tcm_qla2xxx_nacl *nacl;
 	u32 key;
 
-	lport = ha->tgt.target_lport_ptr;
+	lport = vha->vha_tgt.target_lport_ptr;
 	if (!lport) {
 		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
 		dump_stack();
@@ -1221,13 +1217,12 @@
 	scsi_qla_host_t *vha,
 	const uint16_t loop_id)
 {
-	struct qla_hw_data *ha = vha->hw;
 	struct tcm_qla2xxx_lport *lport;
 	struct se_node_acl *se_nacl;
 	struct tcm_qla2xxx_nacl *nacl;
 	struct tcm_qla2xxx_fc_loopid *fc_loopid;
 
-	lport = ha->tgt.target_lport_ptr;
+	lport = vha->vha_tgt.target_lport_ptr;
 	if (!lport) {
 		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
 		dump_stack();
@@ -1341,6 +1336,7 @@
 {
 	struct qla_tgt *tgt = sess->tgt;
 	struct qla_hw_data *ha = tgt->ha;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 	struct se_session *se_sess;
 	struct se_node_acl *se_nacl;
 	struct tcm_qla2xxx_lport *lport;
@@ -1357,7 +1353,7 @@
 	se_nacl = se_sess->se_node_acl;
 	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
 
-	lport = ha->tgt.target_lport_ptr;
+	lport = vha->vha_tgt.target_lport_ptr;
 	if (!lport) {
 		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
 		dump_stack();
@@ -1391,7 +1387,7 @@
 	unsigned char port_name[36];
 	unsigned long flags;
 
-	lport = ha->tgt.target_lport_ptr;
+	lport = vha->vha_tgt.target_lport_ptr;
 	if (!lport) {
 		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
 		dump_stack();
@@ -1455,7 +1451,8 @@
 {
 	struct qla_tgt *tgt = sess->tgt;
 	struct qla_hw_data *ha = tgt->ha;
-	struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr;
 	struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
 	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
 			struct tcm_qla2xxx_nacl, se_node_acl);
@@ -1562,15 +1559,18 @@
 	return 0;
 }
 
-static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha,
+					 void *target_lport_ptr,
+					 u64 npiv_wwpn, u64 npiv_wwnn)
 {
 	struct qla_hw_data *ha = vha->hw;
-	struct tcm_qla2xxx_lport *lport;
+	struct tcm_qla2xxx_lport *lport =
+			(struct tcm_qla2xxx_lport *)target_lport_ptr;
 	/*
-	 * Setup local pointer to vha, NPIV VP pointer (if present) and
-	 * vha->tcm_lport pointer
+	 * Setup tgt_ops, local pointer to vha and target_lport_ptr
 	 */
-	lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
+	ha->tgt.tgt_ops = &tcm_qla2xxx_template;
+	vha->vha_tgt.target_lport_ptr = target_lport_ptr;
 	lport->qla_vha = vha;
 
 	return 0;
@@ -1602,8 +1602,8 @@
 	if (ret != 0)
 		goto out;
 
-	ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
-				tcm_qla2xxx_lport_register_cb, lport);
+	ret = qlt_lport_register(lport, wwpn, 0, 0,
+				 tcm_qla2xxx_lport_register_cb);
 	if (ret != 0)
 		goto out_lport;
 
@@ -1621,7 +1621,6 @@
 	struct tcm_qla2xxx_lport *lport = container_of(wwn,
 			struct tcm_qla2xxx_lport, lport_wwn);
 	struct scsi_qla_host *vha = lport->qla_vha;
-	struct qla_hw_data *ha = vha->hw;
 	struct se_node_acl *node;
 	u32 key = 0;
 
@@ -1630,8 +1629,8 @@
 	 * shutdown of struct qla_tgt after the call to
 	 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
 	 */
-	if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
-		qlt_stop_phase2(ha->tgt.qla_tgt);
+	if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped)
+		qlt_stop_phase2(vha->vha_tgt.qla_tgt);
 
 	qlt_lport_deregister(vha);
 
@@ -1642,17 +1641,70 @@
 	kfree(lport);
 }
 
+static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
+					      void *target_lport_ptr,
+					      u64 npiv_wwpn, u64 npiv_wwnn)
+{
+	struct fc_vport *vport;
+	struct Scsi_Host *sh = base_vha->host;
+	struct scsi_qla_host *npiv_vha;
+	struct tcm_qla2xxx_lport *lport =
+			(struct tcm_qla2xxx_lport *)target_lport_ptr;
+	struct fc_vport_identifiers vport_id;
+
+	if (!qla_tgt_mode_enabled(base_vha)) {
+		pr_err("qla2xxx base_vha not enabled for target mode\n");
+		return -EPERM;
+	}
+
+	memset(&vport_id, 0, sizeof(vport_id));
+	vport_id.port_name = npiv_wwpn;
+	vport_id.node_name = npiv_wwnn;
+	vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+	vport_id.vport_type = FC_PORTTYPE_NPIV;
+	vport_id.disable = false;
+
+	vport = fc_vport_create(sh, 0, &vport_id);
+	if (!vport) {
+		pr_err("fc_vport_create failed for qla2xxx_npiv\n");
+		return -ENODEV;
+	}
+	/*
+	 * Setup local pointer to NPIV vhba + target_lport_ptr
+	 */
+	npiv_vha = (struct scsi_qla_host *)vport->dd_data;
+	npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
+	lport->qla_vha = npiv_vha;
+
+	scsi_host_get(npiv_vha->host);
+	return 0;
+}
+
+
 static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
 	struct target_fabric_configfs *tf,
 	struct config_group *group,
 	const char *name)
 {
 	struct tcm_qla2xxx_lport *lport;
-	u64 npiv_wwpn, npiv_wwnn;
+	u64 phys_wwpn, npiv_wwpn, npiv_wwnn;
+	char *p, tmp[128];
 	int ret;
 
-	if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
-				&npiv_wwpn, &npiv_wwnn) < 0)
+	snprintf(tmp, 128, "%s", name);
+
+	p = strchr(tmp, '@');
+	if (!p) {
+		pr_err("Unable to locate NPIV '@' seperator\n");
+		return ERR_PTR(-EINVAL);
+	}
+	*p++ = '\0';
+
+	if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0)
+		return ERR_PTR(-EINVAL);
+
+	if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1,
+				       &npiv_wwpn, &npiv_wwnn) < 0)
 		return ERR_PTR(-EINVAL);
 
 	lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
@@ -1666,12 +1718,19 @@
 			TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
 	sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
 
-/* FIXME: tcm_qla2xxx_npiv_make_lport */
-	ret = -ENOSYS;
+	ret = tcm_qla2xxx_init_lport(lport);
 	if (ret != 0)
 		goto out;
 
+	ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn,
+				 tcm_qla2xxx_lport_register_npiv_cb);
+	if (ret != 0)
+		goto out_lport;
+
 	return &lport->lport_wwn;
+out_lport:
+	vfree(lport->lport_loopid_map);
+	btree_destroy32(&lport->lport_fcport_map);
 out:
 	kfree(lport);
 	return ERR_PTR(ret);
@@ -1681,14 +1740,16 @@
 {
 	struct tcm_qla2xxx_lport *lport = container_of(wwn,
 			struct tcm_qla2xxx_lport, lport_wwn);
-	struct scsi_qla_host *vha = lport->qla_vha;
-	struct Scsi_Host *sh = vha->host;
-	/*
-	 * Notify libfc that we want to release the lport->npiv_vport
-	 */
-	fc_vport_terminate(lport->npiv_vport);
+	struct scsi_qla_host *npiv_vha = lport->qla_vha;
+	struct qla_hw_data *ha = npiv_vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
-	scsi_host_put(sh);
+	scsi_host_put(npiv_vha->host);
+	/*
+	 * Notify libfc that we want to release the vha->fc_vport
+	 */
+	fc_vport_terminate(npiv_vha->fc_vport);
+	scsi_host_put(base_vha->host);
 	kfree(lport);
 }
 
@@ -1769,14 +1830,16 @@
 	.tpg_get_pr_transport_id	= tcm_qla2xxx_get_pr_transport_id,
 	.tpg_get_pr_transport_id_len	= tcm_qla2xxx_get_pr_transport_id_len,
 	.tpg_parse_pr_out_transport_id	= tcm_qla2xxx_parse_pr_out_transport_id,
-	.tpg_check_demo_mode		= tcm_qla2xxx_check_false,
-	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_true,
-	.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
-	.tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
+	.tpg_check_demo_mode		= tcm_qla2xxx_check_demo_mode,
+	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_demo_mode_cache,
+	.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
+	.tpg_check_prod_mode_write_protect =
+	    tcm_qla2xxx_check_prod_write_protect,
 	.tpg_check_demo_mode_login_only	= tcm_qla2xxx_check_demo_mode_login_only,
 	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,
 	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,
 	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
+	.check_stop_free                = tcm_qla2xxx_check_stop_free,
 	.release_cmd			= tcm_qla2xxx_release_cmd,
 	.put_session			= tcm_qla2xxx_put_session,
 	.shutdown_session		= tcm_qla2xxx_shutdown_session,
@@ -1871,7 +1934,8 @@
 	 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
 	 */
 	npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
-	npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
+	npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
+	    tcm_qla2xxx_tpg_attrs;
 	npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
 	npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
 	npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 771f7b8..275d8b9 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -70,12 +70,8 @@
 	struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
 	/* Pointer to struct scsi_qla_host from qla2xxx LLD */
 	struct scsi_qla_host *qla_vha;
-	/* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
-	struct scsi_qla_host *qla_npiv_vp;
 	/* Pointer to struct qla_tgt pointer */
 	struct qla_tgt lport_qla_tgt;
-	/* Pointer to struct fc_vport for NPIV vport from libfc */
-	struct fc_vport *npiv_vport;
 	/* Pointer to TPG=1 for non NPIV mode */
 	struct tcm_qla2xxx_tpg *tpg_1;
 	/* Returned by tcm_qla2xxx_make_lport() */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7bd7f0d..62ec84b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1684,7 +1684,7 @@
 
 	host_dev = scsi_get_device(shost);
 	if (host_dev && host_dev->dma_mask)
-		bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
+		bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
 
 	return bounce_limit;
 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9846c6a..470954a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -801,7 +801,7 @@
 	if (sdkp->device->no_write_same)
 		return BLKPREP_KILL;
 
-	BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size);
+	BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
 
 	sector >>= ilog2(sdp->sector_size) - 9;
 	nr_sectors >>= ilog2(sdp->sector_size) - 9;
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 6174ca4..a7a691d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -365,7 +365,6 @@
 	struct bio *bio;
 	struct scsi_disk *sdkp;
 	struct sd_dif_tuple *sdt;
-	unsigned int i, j;
 	u32 phys, virt;
 
 	sdkp = rq->bio->bi_bdev->bd_disk->private_data;
@@ -376,19 +375,21 @@
 	phys = hw_sector & 0xffffffff;
 
 	__rq_for_each_bio(bio, rq) {
-		struct bio_vec *iv;
+		struct bio_vec iv;
+		struct bvec_iter iter;
+		unsigned int j;
 
 		/* Already remapped? */
 		if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
 			break;
 
-		virt = bio->bi_integrity->bip_sector & 0xffffffff;
+		virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
 
-		bip_for_each_vec(iv, bio->bi_integrity, i) {
-			sdt = kmap_atomic(iv->bv_page)
-				+ iv->bv_offset;
+		bip_for_each_vec(iv, bio->bi_integrity, iter) {
+			sdt = kmap_atomic(iv.bv_page)
+				+ iv.bv_offset;
 
-			for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+			for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
 
 				if (be32_to_cpu(sdt->ref_tag) == virt)
 					sdt->ref_tag = cpu_to_be32(phys);
@@ -414,7 +415,7 @@
 	struct scsi_disk *sdkp;
 	struct bio *bio;
 	struct sd_dif_tuple *sdt;
-	unsigned int i, j, sectors, sector_sz;
+	unsigned int j, sectors, sector_sz;
 	u32 phys, virt;
 
 	sdkp = scsi_disk(scmd->request->rq_disk);
@@ -430,15 +431,16 @@
 		phys >>= 3;
 
 	__rq_for_each_bio(bio, scmd->request) {
-		struct bio_vec *iv;
+		struct bio_vec iv;
+		struct bvec_iter iter;
 
-		virt = bio->bi_integrity->bip_sector & 0xffffffff;
+		virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
 
-		bip_for_each_vec(iv, bio->bi_integrity, i) {
-			sdt = kmap_atomic(iv->bv_page)
-				+ iv->bv_offset;
+		bip_for_each_vec(iv, bio->bi_integrity, iter) {
+			sdt = kmap_atomic(iv.bv_page)
+				+ iv.bv_offset;
 
-			for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+			for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
 
 				if (sectors == 0) {
 					kunmap_atomic(sdt);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ba9310b..581ee2a 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -376,10 +376,10 @@
 	def_tristate SPI_PXA2XX && PCI
 
 config SPI_RSPI
-	tristate "Renesas RSPI controller"
+	tristate "Renesas RSPI/QSPI controller"
 	depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE
 	help
-	  SPI driver for Renesas RSPI blocks.
+	  SPI driver for Renesas RSPI and QSPI blocks.
 
 config SPI_S3C24XX
 	tristate "Samsung S3C24XX series SPI"
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 46d2313..5032141 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -40,6 +40,7 @@
 	unsigned int irq;
 	u8 bits_per_word;
 	struct clk *clk_mclk;
+	struct clk *clk_ipg;
 	u32 mclk_rate;
 
 	struct completion txisrdone;
@@ -475,8 +476,6 @@
 	struct spi_master *master;
 	int ret;
 	void *tempp;
-	int psc_num;
-	char clk_name[16];
 	struct clk *clk;
 
 	master = spi_alloc_master(dev, sizeof *mps);
@@ -519,9 +518,7 @@
 		goto free_master;
 	init_completion(&mps->txisrdone);
 
-	psc_num = master->bus_num;
-	snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
-	clk = devm_clk_get(dev, clk_name);
+	clk = devm_clk_get(dev, "mclk");
 	if (IS_ERR(clk)) {
 		ret = PTR_ERR(clk);
 		goto free_master;
@@ -532,17 +529,29 @@
 	mps->clk_mclk = clk;
 	mps->mclk_rate = clk_get_rate(clk);
 
+	clk = devm_clk_get(dev, "ipg");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		goto free_mclk_clock;
+	}
+	ret = clk_prepare_enable(clk);
+	if (ret)
+		goto free_mclk_clock;
+	mps->clk_ipg = clk;
+
 	ret = mpc512x_psc_spi_port_config(master, mps);
 	if (ret < 0)
-		goto free_clock;
+		goto free_ipg_clock;
 
 	ret = devm_spi_register_master(dev, master);
 	if (ret < 0)
-		goto free_clock;
+		goto free_ipg_clock;
 
 	return ret;
 
-free_clock:
+free_ipg_clock:
+	clk_disable_unprepare(mps->clk_ipg);
+free_mclk_clock:
 	clk_disable_unprepare(mps->clk_mclk);
 free_master:
 	spi_master_put(master);
@@ -556,6 +565,7 @@
 	struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
 
 	clk_disable_unprepare(mps->clk_mclk);
+	clk_disable_unprepare(mps->clk_ipg);
 
 	return 0;
 }
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 5040630..bae97ff 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -361,6 +361,8 @@
 	init_completion(&hw->done);
 
 	master->mode_bits          = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+	if (hw->pdata->lsb)
+		master->mode_bits |= SPI_LSB_FIRST;
 	master->num_chipselect     = hw->pdata->num_cs;
 	master->bus_num            = hw->pdata->bus_num;
 	hw->bitbang.master         = hw->master;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 23756b0..d0b28bb 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -755,9 +755,7 @@
 	ret = master->transfer_one_message(master, master->cur_msg);
 	if (ret) {
 		dev_err(&master->dev,
-			"failed to transfer one message from queue: %d\n", ret);
-		master->cur_msg->status = ret;
-		spi_finalize_current_message(master);
+			"failed to transfer one message from queue\n");
 		return;
 	}
 }
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 2cd9b0e..75b3603 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -168,6 +168,7 @@
 config SSB_DRIVER_GPIO
 	bool "SSB GPIO driver"
 	depends on SSB && GPIOLIB
+	select IRQ_DOMAIN if SSB_EMBEDDED
 	help
 	  Driver to provide access to the GPIO pins on the bus.
 
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index dc109de..ba350d2 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -9,16 +9,40 @@
  */
 
 #include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/export.h>
 #include <linux/ssb/ssb.h>
 
 #include "ssb_private.h"
 
+
+/**************************************************
+ * Shared
+ **************************************************/
+
 static struct ssb_bus *ssb_gpio_get_bus(struct gpio_chip *chip)
 {
 	return container_of(chip, struct ssb_bus, gpio);
 }
 
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+	if (bus->bustype == SSB_BUSTYPE_SSB)
+		return irq_find_mapping(bus->irq_domain, gpio);
+	else
+		return -EINVAL;
+}
+#endif
+
+/**************************************************
+ * ChipCommon
+ **************************************************/
+
 static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio)
 {
 	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
@@ -74,19 +98,129 @@
 	ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0);
 }
 
-static int ssb_gpio_chipco_to_irq(struct gpio_chip *chip, unsigned gpio)
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+static void ssb_gpio_irq_chipco_mask(struct irq_data *d)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+	int gpio = irqd_to_hwirq(d);
 
-	if (bus->bustype == SSB_BUSTYPE_SSB)
-		return ssb_mips_irq(bus->chipco.dev) + 2;
-	else
-		return -EINVAL;
+	ssb_chipco_gpio_intmask(&bus->chipco, BIT(gpio), 0);
 }
 
+static void ssb_gpio_irq_chipco_unmask(struct irq_data *d)
+{
+	struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+	int gpio = irqd_to_hwirq(d);
+	u32 val = ssb_chipco_gpio_in(&bus->chipco, BIT(gpio));
+
+	ssb_chipco_gpio_polarity(&bus->chipco, BIT(gpio), val);
+	ssb_chipco_gpio_intmask(&bus->chipco, BIT(gpio), BIT(gpio));
+}
+
+static struct irq_chip ssb_gpio_irq_chipco_chip = {
+	.name		= "SSB-GPIO-CC",
+	.irq_mask	= ssb_gpio_irq_chipco_mask,
+	.irq_unmask	= ssb_gpio_irq_chipco_unmask,
+};
+
+static irqreturn_t ssb_gpio_irq_chipco_handler(int irq, void *dev_id)
+{
+	struct ssb_bus *bus = dev_id;
+	struct ssb_chipcommon *chipco = &bus->chipco;
+	u32 val = chipco_read32(chipco, SSB_CHIPCO_GPIOIN);
+	u32 mask = chipco_read32(chipco, SSB_CHIPCO_GPIOIRQ);
+	u32 pol = chipco_read32(chipco, SSB_CHIPCO_GPIOPOL);
+	unsigned long irqs = (val ^ pol) & mask;
+	int gpio;
+
+	if (!irqs)
+		return IRQ_NONE;
+
+	for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
+		generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
+	ssb_chipco_gpio_polarity(chipco, irqs, val & irqs);
+
+	return IRQ_HANDLED;
+}
+
+static int ssb_gpio_irq_chipco_domain_init(struct ssb_bus *bus)
+{
+	struct ssb_chipcommon *chipco = &bus->chipco;
+	struct gpio_chip *chip = &bus->gpio;
+	int gpio, hwirq, err;
+
+	if (bus->bustype != SSB_BUSTYPE_SSB)
+		return 0;
+
+	bus->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
+						&irq_domain_simple_ops, chipco);
+	if (!bus->irq_domain) {
+		err = -ENODEV;
+		goto err_irq_domain;
+	}
+	for (gpio = 0; gpio < chip->ngpio; gpio++) {
+		int irq = irq_create_mapping(bus->irq_domain, gpio);
+
+		irq_set_chip_data(irq, bus);
+		irq_set_chip_and_handler(irq, &ssb_gpio_irq_chipco_chip,
+					 handle_simple_irq);
+	}
+
+	hwirq = ssb_mips_irq(bus->chipco.dev) + 2;
+	err = request_irq(hwirq, ssb_gpio_irq_chipco_handler, IRQF_SHARED,
+			  "gpio", bus);
+	if (err)
+		goto err_req_irq;
+
+	ssb_chipco_gpio_intmask(&bus->chipco, ~0, 0);
+	chipco_set32(chipco, SSB_CHIPCO_IRQMASK, SSB_CHIPCO_IRQ_GPIO);
+
+	return 0;
+
+err_req_irq:
+	for (gpio = 0; gpio < chip->ngpio; gpio++) {
+		int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+		irq_dispose_mapping(irq);
+	}
+	irq_domain_remove(bus->irq_domain);
+err_irq_domain:
+	return err;
+}
+
+static void ssb_gpio_irq_chipco_domain_exit(struct ssb_bus *bus)
+{
+	struct ssb_chipcommon *chipco = &bus->chipco;
+	struct gpio_chip *chip = &bus->gpio;
+	int gpio;
+
+	if (bus->bustype != SSB_BUSTYPE_SSB)
+		return;
+
+	chipco_mask32(chipco, SSB_CHIPCO_IRQMASK, ~SSB_CHIPCO_IRQ_GPIO);
+	free_irq(ssb_mips_irq(bus->chipco.dev) + 2, chipco);
+	for (gpio = 0; gpio < chip->ngpio; gpio++) {
+		int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+		irq_dispose_mapping(irq);
+	}
+	irq_domain_remove(bus->irq_domain);
+}
+#else
+static int ssb_gpio_irq_chipco_domain_init(struct ssb_bus *bus)
+{
+	return 0;
+}
+
+static void ssb_gpio_irq_chipco_domain_exit(struct ssb_bus *bus)
+{
+}
+#endif
+
 static int ssb_gpio_chipco_init(struct ssb_bus *bus)
 {
 	struct gpio_chip *chip = &bus->gpio;
+	int err;
 
 	chip->label		= "ssb_chipco_gpio";
 	chip->owner		= THIS_MODULE;
@@ -96,7 +230,9 @@
 	chip->set		= ssb_gpio_chipco_set_value;
 	chip->direction_input	= ssb_gpio_chipco_direction_input;
 	chip->direction_output	= ssb_gpio_chipco_direction_output;
-	chip->to_irq		= ssb_gpio_chipco_to_irq;
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+	chip->to_irq		= ssb_gpio_to_irq;
+#endif
 	chip->ngpio		= 16;
 	/* There is just one SoC in one device and its GPIO addresses should be
 	 * deterministic to address them more easily. The other buses could get
@@ -106,9 +242,23 @@
 	else
 		chip->base		= -1;
 
-	return gpiochip_add(chip);
+	err = ssb_gpio_irq_chipco_domain_init(bus);
+	if (err)
+		return err;
+
+	err = gpiochip_add(chip);
+	if (err) {
+		ssb_gpio_irq_chipco_domain_exit(bus);
+		return err;
+	}
+
+	return 0;
 }
 
+/**************************************************
+ * EXTIF
+ **************************************************/
+
 #ifdef CONFIG_SSB_DRIVER_EXTIF
 
 static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio)
@@ -145,19 +295,127 @@
 	return 0;
 }
 
-static int ssb_gpio_extif_to_irq(struct gpio_chip *chip, unsigned gpio)
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+static void ssb_gpio_irq_extif_mask(struct irq_data *d)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+	int gpio = irqd_to_hwirq(d);
 
-	if (bus->bustype == SSB_BUSTYPE_SSB)
-		return ssb_mips_irq(bus->extif.dev) + 2;
-	else
-		return -EINVAL;
+	ssb_extif_gpio_intmask(&bus->extif, BIT(gpio), 0);
 }
 
+static void ssb_gpio_irq_extif_unmask(struct irq_data *d)
+{
+	struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+	int gpio = irqd_to_hwirq(d);
+	u32 val = ssb_extif_gpio_in(&bus->extif, BIT(gpio));
+
+	ssb_extif_gpio_polarity(&bus->extif, BIT(gpio), val);
+	ssb_extif_gpio_intmask(&bus->extif, BIT(gpio), BIT(gpio));
+}
+
+static struct irq_chip ssb_gpio_irq_extif_chip = {
+	.name		= "SSB-GPIO-EXTIF",
+	.irq_mask	= ssb_gpio_irq_extif_mask,
+	.irq_unmask	= ssb_gpio_irq_extif_unmask,
+};
+
+static irqreturn_t ssb_gpio_irq_extif_handler(int irq, void *dev_id)
+{
+	struct ssb_bus *bus = dev_id;
+	struct ssb_extif *extif = &bus->extif;
+	u32 val = ssb_read32(extif->dev, SSB_EXTIF_GPIO_IN);
+	u32 mask = ssb_read32(extif->dev, SSB_EXTIF_GPIO_INTMASK);
+	u32 pol = ssb_read32(extif->dev, SSB_EXTIF_GPIO_INTPOL);
+	unsigned long irqs = (val ^ pol) & mask;
+	int gpio;
+
+	if (!irqs)
+		return IRQ_NONE;
+
+	for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
+		generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
+	ssb_extif_gpio_polarity(extif, irqs, val & irqs);
+
+	return IRQ_HANDLED;
+}
+
+static int ssb_gpio_irq_extif_domain_init(struct ssb_bus *bus)
+{
+	struct ssb_extif *extif = &bus->extif;
+	struct gpio_chip *chip = &bus->gpio;
+	int gpio, hwirq, err;
+
+	if (bus->bustype != SSB_BUSTYPE_SSB)
+		return 0;
+
+	bus->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
+						&irq_domain_simple_ops, extif);
+	if (!bus->irq_domain) {
+		err = -ENODEV;
+		goto err_irq_domain;
+	}
+	for (gpio = 0; gpio < chip->ngpio; gpio++) {
+		int irq = irq_create_mapping(bus->irq_domain, gpio);
+
+		irq_set_chip_data(irq, bus);
+		irq_set_chip_and_handler(irq, &ssb_gpio_irq_extif_chip,
+					 handle_simple_irq);
+	}
+
+	hwirq = ssb_mips_irq(bus->extif.dev) + 2;
+	err = request_irq(hwirq, ssb_gpio_irq_extif_handler, IRQF_SHARED,
+			  "gpio", bus);
+	if (err)
+		goto err_req_irq;
+
+	ssb_extif_gpio_intmask(&bus->extif, ~0, 0);
+
+	return 0;
+
+err_req_irq:
+	for (gpio = 0; gpio < chip->ngpio; gpio++) {
+		int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+		irq_dispose_mapping(irq);
+	}
+	irq_domain_remove(bus->irq_domain);
+err_irq_domain:
+	return err;
+}
+
+static void ssb_gpio_irq_extif_domain_exit(struct ssb_bus *bus)
+{
+	struct ssb_extif *extif = &bus->extif;
+	struct gpio_chip *chip = &bus->gpio;
+	int gpio;
+
+	if (bus->bustype != SSB_BUSTYPE_SSB)
+		return;
+
+	free_irq(ssb_mips_irq(bus->extif.dev) + 2, extif);
+	for (gpio = 0; gpio < chip->ngpio; gpio++) {
+		int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+		irq_dispose_mapping(irq);
+	}
+	irq_domain_remove(bus->irq_domain);
+}
+#else
+static int ssb_gpio_irq_extif_domain_init(struct ssb_bus *bus)
+{
+	return 0;
+}
+
+static void ssb_gpio_irq_extif_domain_exit(struct ssb_bus *bus)
+{
+}
+#endif
+
 static int ssb_gpio_extif_init(struct ssb_bus *bus)
 {
 	struct gpio_chip *chip = &bus->gpio;
+	int err;
 
 	chip->label		= "ssb_extif_gpio";
 	chip->owner		= THIS_MODULE;
@@ -165,7 +423,9 @@
 	chip->set		= ssb_gpio_extif_set_value;
 	chip->direction_input	= ssb_gpio_extif_direction_input;
 	chip->direction_output	= ssb_gpio_extif_direction_output;
-	chip->to_irq		= ssb_gpio_extif_to_irq;
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+	chip->to_irq		= ssb_gpio_to_irq;
+#endif
 	chip->ngpio		= 5;
 	/* There is just one SoC in one device and its GPIO addresses should be
 	 * deterministic to address them more easily. The other buses could get
@@ -175,7 +435,17 @@
 	else
 		chip->base		= -1;
 
-	return gpiochip_add(chip);
+	err = ssb_gpio_irq_extif_domain_init(bus);
+	if (err)
+		return err;
+
+	err = gpiochip_add(chip);
+	if (err) {
+		ssb_gpio_irq_extif_domain_exit(bus);
+		return err;
+	}
+
+	return 0;
 }
 
 #else
@@ -185,6 +455,10 @@
 }
 #endif
 
+/**************************************************
+ * Init
+ **************************************************/
+
 int ssb_gpio_init(struct ssb_bus *bus)
 {
 	if (ssb_chipco_available(&bus->chipco))
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 32a811d..2fead38 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -593,6 +593,13 @@
 		ssb_pcicore_init(&bus->pcicore);
 		if (bus->bustype == SSB_BUSTYPE_SSB)
 			ssb_watchdog_register(bus);
+
+		err = ssb_gpio_init(bus);
+		if (err == -ENOTSUPP)
+			ssb_dbg("GPIO driver not activated\n");
+		else if (err)
+			ssb_dbg("Error registering GPIO driver: %i\n", err);
+
 		ssb_bus_may_powerdown(bus);
 
 		err = ssb_devices_register(bus);
@@ -830,11 +837,6 @@
 	ssb_chipcommon_init(&bus->chipco);
 	ssb_extif_init(&bus->extif);
 	ssb_mipscore_init(&bus->mipscore);
-	err = ssb_gpio_init(bus);
-	if (err == -ENOTSUPP)
-		ssb_dbg("GPIO driver not activated\n");
-	else if (err)
-		ssb_dbg("Error registering GPIO driver: %i\n", err);
 	err = ssb_fetch_invariants(bus, get_invariants);
 	if (err) {
 		ssb_bus_may_powerdown(bus);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4bb6b11..99375f0 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -52,6 +52,8 @@
 
 source "drivers/staging/rtl8188eu/Kconfig"
 
+source "drivers/staging/rtl8821ae/Kconfig"
+
 source "drivers/staging/rts5139/Kconfig"
 
 source "drivers/staging/rts5208/Kconfig"
@@ -76,10 +78,6 @@
 
 source "drivers/staging/iio/Kconfig"
 
-source "drivers/staging/zsmalloc/Kconfig"
-
-source "drivers/staging/zram/Kconfig"
-
 source "drivers/staging/wlags49_h2/Kconfig"
 
 source "drivers/staging/wlags49_h25/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 9f07e5e..ddc3c4a 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -18,6 +18,7 @@
 obj-$(CONFIG_RTL8192E)		+= rtl8192e/
 obj-$(CONFIG_R8712U)		+= rtl8712/
 obj-$(CONFIG_R8188EU)		+= rtl8188eu/
+obj-$(CONFIG_R8821AE)		+= rtl8821ae/
 obj-$(CONFIG_RTS5139)		+= rts5139/
 obj-$(CONFIG_RTS5208)		+= rts5208/
 obj-$(CONFIG_TRANZPORT)		+= frontier/
@@ -32,8 +33,6 @@
 obj-$(CONFIG_VME_BUS)		+= vme/
 obj-$(CONFIG_DX_SEP)            += sep/
 obj-$(CONFIG_IIO)		+= iio/
-obj-$(CONFIG_ZRAM)		+= zram/
-obj-$(CONFIG_ZSMALLOC)		+= zsmalloc/
 obj-$(CONFIG_WLAGS49_H2)	+= wlags49_h2/
 obj-$(CONFIG_WLAGS49_H25)	+= wlags49_h25/
 obj-$(CONFIG_FB_SM7XX)		+= sm7xxfb/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 23948f1..713a972 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -295,21 +295,29 @@
 
 	/* If size is not set, or set to 0, always return EOF. */
 	if (asma->size == 0)
-		goto out;
+		goto out_unlock;
 
 	if (!asma->file) {
 		ret = -EBADF;
-		goto out;
+		goto out_unlock;
 	}
 
+	mutex_unlock(&ashmem_mutex);
+
+	/*
+	 * asma and asma->file are used outside the lock here.  We assume
+	 * once asma->file is set it will never be changed, and will not
+	 * be destroyed until all references to the file are dropped and
+	 * ashmem_release is called.
+	 */
 	ret = asma->file->f_op->read(asma->file, buf, len, pos);
-	if (ret < 0)
-		goto out;
+	if (ret >= 0) {
+		/** Update backing file pos, since f_ops->read() doesn't */
+		asma->file->f_pos = *pos;
+	}
+	return ret;
 
-	/** Update backing file pos, since f_ops->read() doesn't */
-	asma->file->f_pos = *pos;
-
-out:
+out_unlock:
 	mutex_unlock(&ashmem_mutex);
 	return ret;
 }
@@ -498,6 +506,7 @@
 
 static int set_name(struct ashmem_area *asma, void __user *name)
 {
+	int len;
 	int ret = 0;
 	char local_name[ASHMEM_NAME_LEN];
 
@@ -510,21 +519,19 @@
 	 * variable that does not need protection and later copy the local
 	 * variable to the structure member with lock held.
 	 */
-	if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
-		return -EFAULT;
-
+	len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+	if (len < 0)
+		return len;
+	if (len == ASHMEM_NAME_LEN)
+		local_name[ASHMEM_NAME_LEN - 1] = '\0';
 	mutex_lock(&ashmem_mutex);
 	/* cannot change an existing mapping's name */
-	if (unlikely(asma->file)) {
+	if (unlikely(asma->file))
 		ret = -EINVAL;
-		goto out;
-	}
-	memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
-		local_name, ASHMEM_NAME_LEN);
-	asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
-out:
-	mutex_unlock(&ashmem_mutex);
+	else
+		strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
 
+	mutex_unlock(&ashmem_mutex);
 	return ret;
 }
 
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
index af6cd37..ee3a738 100644
--- a/drivers/staging/android/ion/compat_ion.c
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -35,9 +35,14 @@
 	compat_ulong_t arg;
 };
 
+struct compat_ion_handle_data {
+	compat_int_t handle;
+};
+
 #define COMPAT_ION_IOC_ALLOC	_IOWR(ION_IOC_MAGIC, 0, \
 				      struct compat_ion_allocation_data)
-#define COMPAT_ION_IOC_FREE	_IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+#define COMPAT_ION_IOC_FREE	_IOWR(ION_IOC_MAGIC, 1, \
+				      struct compat_ion_handle_data)
 #define COMPAT_ION_IOC_CUSTOM	_IOWR(ION_IOC_MAGIC, 6, \
 				      struct compat_ion_custom_data)
 
@@ -64,6 +69,19 @@
 	return err;
 }
 
+static int compat_get_ion_handle_data(
+			struct compat_ion_handle_data __user *data32,
+			struct ion_handle_data __user *data)
+{
+	compat_int_t i;
+	int err;
+
+	err = get_user(i, &data32->handle);
+	err |= put_user(i, &data->handle);
+
+	return err;
+}
+
 static int compat_put_ion_allocation_data(
 			struct compat_ion_allocation_data __user *data32,
 			struct ion_allocation_data __user *data)
@@ -132,8 +150,8 @@
 	}
 	case COMPAT_ION_IOC_FREE:
 	{
-		struct compat_ion_allocation_data __user *data32;
-		struct ion_allocation_data __user *data;
+		struct compat_ion_handle_data __user *data32;
+		struct ion_handle_data __user *data;
 		int err;
 
 		data32 = compat_ptr(arg);
@@ -141,7 +159,7 @@
 		if (data == NULL)
 			return -EFAULT;
 
-		err = compat_get_ion_allocation_data(data32, data);
+		err = compat_get_ion_handle_data(data32, data);
 		if (err)
 			return err;
 
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 55b2002..01cdc8a 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -17,9 +17,11 @@
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/init.h>
 #include <linux/bootmem.h>
 #include <linux/memblock.h>
 #include <linux/sizes.h>
+#include <linux/io.h>
 #include "ion.h"
 #include "ion_priv.h"
 
@@ -57,7 +59,7 @@
 };
 
 struct ion_platform_data dummy_ion_pdata = {
-	.nr = 4,
+	.nr = ARRAY_SIZE(dummy_heaps),
 	.heaps = dummy_heaps,
 };
 
@@ -69,7 +71,7 @@
 	heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
 			GFP_KERNEL);
 	if (!heaps)
-		return PTR_ERR(heaps);
+		return -ENOMEM;
 
 
 	/* Allocate a dummy carveout heap */
@@ -128,6 +130,7 @@
 	}
 	return err;
 }
+device_initcall(ion_dummy_init);
 
 static void __exit ion_dummy_exit(void)
 {
@@ -152,7 +155,4 @@
 
 	return;
 }
-
-module_init(ion_dummy_init);
-module_exit(ion_dummy_exit);
-
+__exitcall(ion_dummy_exit);
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 296c74f..37e64d5 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -243,12 +243,12 @@
 	init_waitqueue_head(&heap->waitqueue);
 	heap->task = kthread_run(ion_heap_deferred_free, heap,
 				 "%s", heap->name);
-	sched_setscheduler(heap->task, SCHED_IDLE, &param);
 	if (IS_ERR(heap->task)) {
 		pr_err("%s: creating thread for deferred free failed\n",
 		       __func__);
 		return PTR_RET(heap->task);
 	}
+	sched_setscheduler(heap->task, SCHED_IDLE, &param);
 	return 0;
 }
 
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index d986739..fc2e4fc 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -17,6 +17,7 @@
 #ifndef _ION_PRIV_H
 #define _ION_PRIV_H
 
+#include <linux/device.h>
 #include <linux/dma-direction.h>
 #include <linux/kref.h>
 #include <linux/mm_types.h>
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7f07291..9849f39 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -124,6 +124,7 @@
 
 		info->page = page;
 		info->order = orders[i];
+		INIT_LIST_HEAD(&info->list);
 		return info;
 	}
 	kfree(info);
@@ -145,12 +146,15 @@
 	struct list_head pages;
 	struct page_info *info, *tmp_info;
 	int i = 0;
-	long size_remaining = PAGE_ALIGN(size);
+	unsigned long size_remaining = PAGE_ALIGN(size);
 	unsigned int max_order = orders[0];
 
 	if (align > PAGE_SIZE)
 		return -EINVAL;
 
+	if (size / PAGE_SIZE > totalram_pages / 2)
+		return -ENOMEM;
+
 	INIT_LIST_HEAD(&pages);
 	while (size_remaining > 0) {
 		info = alloc_largest_available(sys_heap, buffer, size_remaining,
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 585040b..5aaf71d 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -35,10 +35,27 @@
 	u32			value;
 };
 
+#if IS_ENABLED(CONFIG_SW_SYNC)
 struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
 void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
 
 struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+	return NULL;
+}
+
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
+
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+		u32 value)
+{
+	return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
 
 #endif /* __KERNEL __ */
 
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 38e5d3b..3d05f662 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -79,27 +79,27 @@
 		container_of(kref, struct sync_timeline, kref);
 	unsigned long flags;
 
-	if (obj->ops->release_obj)
-		obj->ops->release_obj(obj);
-
 	spin_lock_irqsave(&sync_timeline_list_lock, flags);
 	list_del(&obj->sync_timeline_list);
 	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
 
+	if (obj->ops->release_obj)
+		obj->ops->release_obj(obj);
+
 	kfree(obj);
 }
 
 void sync_timeline_destroy(struct sync_timeline *obj)
 {
 	obj->destroyed = true;
+	smp_wmb();
 
 	/*
-	 * If this is not the last reference, signal any children
-	 * that their parent is going away.
+	 * signal any children that their parent is going away.
 	 */
+	sync_timeline_signal(obj);
 
-	if (!kref_put(&obj->kref, sync_timeline_free))
-		sync_timeline_signal(obj);
+	kref_put(&obj->kref, sync_timeline_free);
 }
 EXPORT_SYMBOL(sync_timeline_destroy);
 
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 8dfdd27..95a2358 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -40,7 +40,7 @@
 }
 
 static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
-			    void *accel_priv)
+			    void *accel_priv, select_queue_fallback_t fallback)
 {
 	return ClassifyPacket(netdev_priv(dev), skb);
 }
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 2460803..5b15033 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -616,8 +616,6 @@
 	ret = driver->auto_attach(dev, context);
 	if (ret >= 0)
 		ret = comedi_device_postconfig(dev);
-	if (ret < 0)
-		comedi_device_detach(dev);
 	mutex_unlock(&dev->mutex);
 
 	if (ret < 0) {
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 593676c..d9ad2c0 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -494,6 +494,7 @@
 				 struct comedi_insn *insn, unsigned int *data)
 {
 	struct pci1710_private *devpriv = dev->private;
+	unsigned int val;
 	int n, chan, range, ofs;
 
 	chan = CR_CHAN(insn->chanspec);
@@ -509,11 +510,14 @@
 		outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
 		ofs = PCI171x_DA1;
 	}
+	val = devpriv->ao_data[chan];
 
-	for (n = 0; n < insn->n; n++)
-		outw(data[n], dev->iobase + ofs);
+	for (n = 0; n < insn->n; n++) {
+		val = data[n];
+		outw(val, dev->iobase + ofs);
+	}
 
-	devpriv->ao_data[chan] = data[n];
+	devpriv->ao_data[chan] = val;
 
 	return n;
 
@@ -679,6 +683,7 @@
 				 struct comedi_insn *insn, unsigned int *data)
 {
 	struct pci1710_private *devpriv = dev->private;
+	unsigned int val;
 	int n, rangereg, chan;
 
 	chan = CR_CHAN(insn->chanspec);
@@ -688,13 +693,15 @@
 		outb(rangereg, dev->iobase + PCI1720_RANGE);
 		devpriv->da_ranges = rangereg;
 	}
+	val = devpriv->ao_data[chan];
 
 	for (n = 0; n < insn->n; n++) {
-		outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1));
+		val = data[n];
+		outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
 		outb(0, dev->iobase + PCI1720_SYNCOUT);	/*  update outputs */
 	}
 
-	devpriv->ao_data[chan] = data[n];
+	devpriv->ao_data[chan] = val;
 
 	return n;
 }
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 3beeb12..88c60b6 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -48,6 +48,7 @@
 #include <linux/usb.h>
 #include <linux/fcntl.h>
 #include <linux/compiler.h>
+#include <asm/unaligned.h>
 
 #include "comedi_fc.h"
 #include "../comedidev.h"
@@ -792,7 +793,8 @@
 		}
 
 		/* 32 bits big endian from the A/D converter */
-		val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1)));
+		val = be32_to_cpu(get_unaligned((uint32_t
+						 *)(devpriv->insn_buf + 1)));
 		val &= 0x00ffffff;	/* strip status byte */
 		val ^= 0x00800000;	/* convert to unsigned */
 
@@ -1357,7 +1359,7 @@
 		return ret;
 
 	/* 32 bits big endian from the A/D converter */
-	val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1)));
+	val = be32_to_cpu(get_unaligned((uint32_t *)(devpriv->insn_buf + 1)));
 	val &= 0x00ffffff;	/* strip status byte */
 	val ^= 0x00800000;	/* convert to unsigned */
 
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 1f61b89..33ac7fb 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -2232,177 +2232,6 @@
 	return rtn;
 }
 
-/*
- * Common Packet Handling code
- */
-
-static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch,
-				  long dlen, long plen, int n1, u8 *dbuf)
-{
-	char *error;
-	long n;
-	long remain;
-	u8 *buf;
-	u8 *b;
-
-	remain = nd->nd_remain;
-	nd->nd_tx_work = 1;
-
-	/*
-	 *  Otherwise data should appear only when we are
-	 *  in the CS_READY state.
-	 */
-
-	if (ch->ch_state < CS_READY) {
-		error = "Data received before RWIN established";
-		nd->nd_remain = 0;
-		nd->nd_state = NS_SEND_ERROR;
-		nd->nd_error = error;
-	}
-
-	/*
-	 *  Assure that the data received is within the
-	 *  allowable window.
-	 */
-
-	n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
-
-	if (dlen > n) {
-		error = "Receive data overrun";
-		nd->nd_remain = 0;
-		nd->nd_state = NS_SEND_ERROR;
-		nd->nd_error = error;
-	}
-
-	/*
-	 *  If we received 3 or less characters,
-	 *  assume it is a human typing, and set RTIME
-	 *  to 10 milliseconds.
-	 *
-	 *  If we receive 10 or more characters,
-	 *  assume its not a human typing, and set RTIME
-	 *  to 100 milliseconds.
-	 */
-
-	if (ch->ch_edelay != DGRP_RTIME) {
-		if (ch->ch_rtime != ch->ch_edelay) {
-			ch->ch_rtime = ch->ch_edelay;
-			ch->ch_flag |= CH_PARAM;
-		}
-	} else if (dlen <= 3) {
-		if (ch->ch_rtime != 10) {
-			ch->ch_rtime = 10;
-			ch->ch_flag |= CH_PARAM;
-		}
-	} else {
-		if (ch->ch_rtime != DGRP_RTIME) {
-			ch->ch_rtime = DGRP_RTIME;
-			ch->ch_flag |= CH_PARAM;
-		}
-	}
-
-	/*
-	 *  If a portion of the packet is outside the
-	 *  buffer, shorten the effective length of the
-	 *  data packet to be the amount of data received.
-	 */
-
-	if (remain < plen)
-		dlen -= plen - remain;
-
-	/*
-	 *  Detect if receive flush is now complete.
-	 */
-
-	if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
-			((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
-			((nd->nd_seq_in    - nd->nd_seq_out) & SEQ_MASK)) {
-		ch->ch_flag &= ~CH_RX_FLUSH;
-	}
-
-	/*
-	 *  If we are ready to receive, move the data into
-	 *  the receive buffer.
-	 */
-
-	ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
-
-	if (ch->ch_state == CS_READY &&
-			(ch->ch_tun.un_open_count != 0) &&
-			(ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
-			(ch->ch_cflag & CF_CREAD) != 0 &&
-			(ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
-			(ch->ch_send & RR_RX_FLUSH) == 0) {
-
-		if (ch->ch_rin + dlen >= RBUF_MAX) {
-			n = RBUF_MAX - ch->ch_rin;
-
-			memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
-
-			ch->ch_rin = 0;
-			dbuf += n;
-			dlen -= n;
-		}
-
-		memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
-
-		ch->ch_rin += dlen;
-
-
-		/*
-		 *  If we are not in fastcook mode, or
-		 *  if there is a fastcook thread
-		 *  waiting for data, send the data to
-		 *  the line discipline.
-		 */
-
-		if ((ch->ch_flag & CH_FAST_READ) == 0 ||
-				ch->ch_inwait != 0) {
-			dgrp_input(ch);
-		}
-
-		/*
-		 *  If there is a read thread waiting
-		 *  in select, and we are in fastcook
-		 *  mode, wake him up.
-		 */
-
-		if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
-				(ch->ch_flag & CH_FAST_READ) != 0)
-			wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
-
-		/*
-		 * Wake any thread waiting in the
-		 * fastcook loop.
-		 */
-
-		if ((ch->ch_flag & CH_INPUT) != 0) {
-			ch->ch_flag &= ~CH_INPUT;
-			wake_up_interruptible(&ch->ch_flag_wait);
-		}
-	}
-
-	/*
-	 *  Fabricate and insert a data packet header to
-	 *  preced the remaining data when it comes in.
-	 */
-
-	if (remain < plen) {
-		dlen = plen - remain;
-		b = buf;
-
-		b[0] = 0x90 + n1;
-		put_unaligned_be16(dlen, b + 1);
-
-		remain = 3;
-		if (remain > 0 && b != buf)
-			memcpy(buf, b, remain);
-
-		nd->nd_remain = remain;
-		return;
-	}
-}
-
 /**
  * dgrp_receive() -- decode data packets received from the remote PortServer.
  * @nd: pointer to a node structure
@@ -2477,8 +2306,7 @@
 			plen = dlen + 1;
 
 			dbuf = b + 1;
-			handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
-			break;
+			goto data;
 
 		/*
 		 *  Process 2-byte header data packet.
@@ -2492,8 +2320,7 @@
 			plen = dlen + 2;
 
 			dbuf = b + 2;
-			handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
-			break;
+			goto data;
 
 		/*
 		 *  Process 3-byte header data packet.
@@ -2508,6 +2335,159 @@
 
 			dbuf = b + 3;
 
+		/*
+		 *  Common packet handling code.
+		 */
+
+data:
+			nd->nd_tx_work = 1;
+
+			/*
+			 *  Otherwise data should appear only when we are
+			 *  in the CS_READY state.
+			 */
+
+			if (ch->ch_state < CS_READY) {
+				error = "Data received before RWIN established";
+				goto prot_error;
+			}
+
+			/*
+			 *  Assure that the data received is within the
+			 *  allowable window.
+			 */
+
+			n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
+
+			if (dlen > n) {
+				error = "Receive data overrun";
+				goto prot_error;
+			}
+
+			/*
+			 *  If we received 3 or less characters,
+			 *  assume it is a human typing, and set RTIME
+			 *  to 10 milliseconds.
+			 *
+			 *  If we receive 10 or more characters,
+			 *  assume its not a human typing, and set RTIME
+			 *  to 100 milliseconds.
+			 */
+
+			if (ch->ch_edelay != DGRP_RTIME) {
+				if (ch->ch_rtime != ch->ch_edelay) {
+					ch->ch_rtime = ch->ch_edelay;
+					ch->ch_flag |= CH_PARAM;
+				}
+			} else if (dlen <= 3) {
+				if (ch->ch_rtime != 10) {
+					ch->ch_rtime = 10;
+					ch->ch_flag |= CH_PARAM;
+				}
+			} else {
+				if (ch->ch_rtime != DGRP_RTIME) {
+					ch->ch_rtime = DGRP_RTIME;
+					ch->ch_flag |= CH_PARAM;
+				}
+			}
+
+			/*
+			 *  If a portion of the packet is outside the
+			 *  buffer, shorten the effective length of the
+			 *  data packet to be the amount of data received.
+			 */
+
+			if (remain < plen)
+				dlen -= plen - remain;
+
+			/*
+			 *  Detect if receive flush is now complete.
+			 */
+
+			if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
+			    ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
+			    ((nd->nd_seq_in    - nd->nd_seq_out) & SEQ_MASK)) {
+				ch->ch_flag &= ~CH_RX_FLUSH;
+			}
+
+			/*
+			 *  If we are ready to receive, move the data into
+			 *  the receive buffer.
+			 */
+
+			ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
+
+			if (ch->ch_state == CS_READY &&
+			    (ch->ch_tun.un_open_count != 0) &&
+			    (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
+			    (ch->ch_cflag & CF_CREAD) != 0 &&
+			    (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
+			    (ch->ch_send & RR_RX_FLUSH) == 0) {
+
+				if (ch->ch_rin + dlen >= RBUF_MAX) {
+					n = RBUF_MAX - ch->ch_rin;
+
+					memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
+
+					ch->ch_rin = 0;
+					dbuf += n;
+					dlen -= n;
+				}
+
+				memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
+
+				ch->ch_rin += dlen;
+
+
+				/*
+				 *  If we are not in fastcook mode, or
+				 *  if there is a fastcook thread
+				 *  waiting for data, send the data to
+				 *  the line discipline.
+				 */
+
+				if ((ch->ch_flag & CH_FAST_READ) == 0 ||
+				    ch->ch_inwait != 0) {
+					dgrp_input(ch);
+				}
+
+				/*
+				 *  If there is a read thread waiting
+				 *  in select, and we are in fastcook
+				 *  mode, wake him up.
+				 */
+
+				if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
+				    (ch->ch_flag & CH_FAST_READ) != 0)
+					wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
+
+				/*
+				 * Wake any thread waiting in the
+				 * fastcook loop.
+				 */
+
+				if ((ch->ch_flag & CH_INPUT) != 0) {
+					ch->ch_flag &= ~CH_INPUT;
+
+					wake_up_interruptible(&ch->ch_flag_wait);
+				}
+			}
+
+			/*
+			 *  Fabricate and insert a data packet header to
+			 *  preced the remaining data when it comes in.
+			 */
+
+			if (remain < plen) {
+				dlen = plen - remain;
+				b = buf;
+
+				b[0] = 0x90 + n1;
+				put_unaligned_be16(dlen, b + 1);
+
+				remain = 3;
+				goto done;
+			}
 			break;
 
 		/*
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
index f8788bf..cdeffe7 100644
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ b/drivers/staging/gdm72xx/gdm_usb.c
@@ -635,11 +635,14 @@
 #endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
 
 	ret = register_wimax_device(phy_dev, &intf->dev);
+	if (ret)
+		release_usb(udev);
 
 out:
 	if (ret) {
 		kfree(phy_dev);
 		kfree(udev);
+		usb_put_dev(usbdev);
 	} else {
 		usb_set_intfdata(intf, phy_dev);
 	}
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 35154d6..c9fedb7 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -77,7 +77,6 @@
 	uint64_t mask;
 	unsigned be;
 	unsigned is_signed;
-	unsigned enabled;
 	unsigned location;
 };
 
@@ -335,6 +334,7 @@
 	while (ent = readdir(dp), ent != NULL) {
 		if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
 			   "_en") == 0) {
+			int current_enabled = 0;
 			current = &(*ci_array)[count++];
 			ret = asprintf(&filename,
 				       "%s/%s", scan_el_dir, ent->d_name);
@@ -350,10 +350,10 @@
 				ret = -errno;
 				goto error_cleanup_array;
 			}
-			fscanf(sysfsfp, "%u", &current->enabled);
+			fscanf(sysfsfp, "%u", &current_enabled);
 			fclose(sysfsfp);
 
-			if (!current->enabled) {
+			if (!current_enabled) {
 				free(filename);
 				count--;
 				continue;
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 5ea3641..5708ffc 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -393,7 +393,7 @@
 	}, {
 		.type = IIO_EV_TYPE_THRESH,
 		.dir = IIO_EV_DIR_FALLING,
-		.mask_separate = BIT(IIO_EV_INFO_VALUE),
+		.mask_separate = BIT(IIO_EV_INFO_VALUE) |
 			BIT(IIO_EV_INFO_ENABLE),
 	}, {
 		.type = IIO_EV_TYPE_THRESH,
@@ -409,7 +409,13 @@
 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
 	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
 	.scan_index = (_index), \
-	.scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \
+	.scan_type = { \
+		.sign = 'u', \
+		.realbits = (_realbits), \
+		.storagebits = 16, \
+		.shift = 12 - (_realbits), \
+		.endianness = IIO_BE, \
+	}, \
 	.event_spec = _ev_spec, \
 	.num_event_specs = _num_ev_spec, \
 }
@@ -588,7 +594,8 @@
 	return 0;
 
 error_free_irq:
-	free_irq(client->irq, indio_dev);
+	if (client->irq > 0)
+		free_irq(client->irq, indio_dev);
 error_cleanup_ring:
 	ad799x_ring_cleanup(indio_dev);
 error_disable_reg:
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index df71669..7fc66a6 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -1035,8 +1035,6 @@
 SHOW_SCALE_AVAILABLE_ATTR(5);
 SHOW_SCALE_AVAILABLE_ATTR(6);
 SHOW_SCALE_AVAILABLE_ATTR(7);
-SHOW_SCALE_AVAILABLE_ATTR(8);
-SHOW_SCALE_AVAILABLE_ATTR(9);
 SHOW_SCALE_AVAILABLE_ATTR(10);
 SHOW_SCALE_AVAILABLE_ATTR(11);
 SHOW_SCALE_AVAILABLE_ATTR(12);
@@ -1053,8 +1051,6 @@
 	&iio_dev_attr_in_voltage5_scale_available.dev_attr.attr,
 	&iio_dev_attr_in_voltage6_scale_available.dev_attr.attr,
 	&iio_dev_attr_in_voltage7_scale_available.dev_attr.attr,
-	&iio_dev_attr_in_voltage8_scale_available.dev_attr.attr,
-	&iio_dev_attr_in_voltage9_scale_available.dev_attr.attr,
 	&iio_dev_attr_in_voltage10_scale_available.dev_attr.attr,
 	&iio_dev_attr_in_voltage11_scale_available.dev_attr.attr,
 	&iio_dev_attr_in_voltage12_scale_available.dev_attr.attr,
@@ -1613,7 +1609,7 @@
 			 * of the array.
 			 */
 			scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >>
-				   (iio->channels[i].scan_type.realbits - s);
+				   (LRADC_RESOLUTION - s);
 			lradc->scale_avail[i][s].nano =
 					do_div(scale_uv, 100000000) * 10;
 			lradc->scale_avail[i][s].integer = scale_uv;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 0a4298b..2b96665 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -629,7 +629,7 @@
 	struct iio_buffer *buffer;
 
 	buffer = iio_kfifo_allocate(indio_dev);
-	if (buffer)
+	if (!buffer)
 		return -ENOMEM;
 
 	iio_device_attach_buffer(indio_dev, buffer);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 09ef5fb8..236ed66 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,9 +88,9 @@
 
 	imx_drm_device_put();
 
-	drm_vblank_cleanup(imxdrm->drm);
-	drm_kms_helper_poll_fini(imxdrm->drm);
-	drm_mode_config_cleanup(imxdrm->drm);
+	drm_vblank_cleanup(drm);
+	drm_kms_helper_poll_fini(drm);
+	drm_mode_config_cleanup(drm);
 
 	return 0;
 }
@@ -142,19 +142,19 @@
 
 int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
 {
-	return drm_vblank_get(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+	return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
 }
 EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
 
 void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
 {
-	drm_vblank_put(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+	drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
 }
 EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
 
 void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
 {
-	drm_handle_vblank(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+	drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
 }
 EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
 
@@ -370,29 +370,6 @@
 }
 
 /*
- * register a crtc to the drm core
- */
-static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
-{
-	struct imx_drm_device *imxdrm = __imx_drm_device();
-	int ret;
-
-	ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
-	if (ret)
-		return ret;
-
-	drm_crtc_helper_add(imx_drm_crtc->crtc,
-			imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
-
-	drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
-			imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
-
-	drm_mode_group_reinit(imxdrm->drm);
-
-	return 0;
-}
-
-/*
  * Called by the CRTC driver when all CRTCs are registered. This
  * puts all the pieces together and initializes the driver.
  * Once this is called no more CRTCs can be registered since
@@ -424,15 +401,15 @@
 
 	mutex_lock(&imxdrm->mutex);
 
-	drm_kms_helper_poll_init(imxdrm->drm);
+	drm_kms_helper_poll_init(drm);
 
 	/* setup the grouping for the legacy output */
-	ret = drm_mode_group_init_legacy_group(imxdrm->drm,
-			&imxdrm->drm->primary->mode_group);
+	ret = drm_mode_group_init_legacy_group(drm,
+			&drm->primary->mode_group);
 	if (ret)
 		goto err_kms;
 
-	ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
+	ret = drm_vblank_init(drm, MAX_CRTC);
 	if (ret)
 		goto err_kms;
 
@@ -441,7 +418,7 @@
 	 * by drm timer once a current process gives up ownership of
 	 * vblank event.(after drm_vblank_put function is called)
 	 */
-	imxdrm->drm->vblank_disable_allowed = true;
+	drm->vblank_disable_allowed = true;
 
 	if (!imx_drm_device_get()) {
 		ret = -EINVAL;
@@ -536,10 +513,18 @@
 
 	*new_crtc = imx_drm_crtc;
 
-	ret = imx_drm_crtc_register(imx_drm_crtc);
+	ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
 	if (ret)
 		goto err_register;
 
+	drm_crtc_helper_add(crtc,
+			imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
+
+	drm_crtc_init(imxdrm->drm, crtc,
+			imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
+
+	drm_mode_group_reinit(imxdrm->drm);
+
 	imx_drm_update_possible_crtcs();
 
 	mutex_unlock(&imxdrm->mutex);
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
index f3a1f5e..62ce0e8 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -16,6 +16,7 @@
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/clk.h>
+#include <linux/hdmi.h>
 #include <linux/regmap.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
@@ -52,11 +53,6 @@
 	YCbCr422_12B = 0x12,
 };
 
-enum hdmi_colorimetry {
-	ITU601,
-	ITU709,
-};
-
 enum imx_hdmi_devtype {
 	IMX6Q_HDMI,
 	IMX6DL_HDMI,
@@ -489,12 +485,12 @@
 
 	if (is_color_space_conversion(hdmi)) {
 		if (hdmi->hdmi_data.enc_out_format == RGB) {
-			if (hdmi->hdmi_data.colorimetry == ITU601)
+			if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
 				csc_coeff = &csc_coeff_rgb_out_eitu601;
 			else
 				csc_coeff = &csc_coeff_rgb_out_eitu709;
 		} else if (hdmi->hdmi_data.enc_in_format == RGB) {
-			if (hdmi->hdmi_data.colorimetry == ITU601)
+			if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
 				csc_coeff = &csc_coeff_rgb_in_eitu601;
 			else
 				csc_coeff = &csc_coeff_rgb_in_eitu709;
@@ -1140,16 +1136,16 @@
 	/* Set up colorimetry */
 	if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
 		colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
-		if (hdmi->hdmi_data.colorimetry == ITU601)
+		if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
 			ext_colorimetry =
 				HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
-		else /* hdmi->hdmi_data.colorimetry == ITU709 */
+		else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
 			ext_colorimetry =
 				HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
 	} else if (hdmi->hdmi_data.enc_out_format != RGB) {
-		if (hdmi->hdmi_data.colorimetry == ITU601)
+		if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
 			colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
-		else /* hdmi->hdmi_data.colorimetry == ITU709 */
+		else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
 			colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
 		ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
 	} else { /* Carries no data */
@@ -1379,9 +1375,9 @@
 		(hdmi->vic == 21) || (hdmi->vic == 22) ||
 		(hdmi->vic == 2) || (hdmi->vic == 3) ||
 		(hdmi->vic == 17) || (hdmi->vic == 18))
-		hdmi->hdmi_data.colorimetry = ITU601;
+		hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
 	else
-		hdmi->hdmi_data.colorimetry = ITU709;
+		hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
 
 	if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
 		(hdmi->vic == 12) || (hdmi->vic == 13) ||
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO
index 22742d6..0a2b6cb 100644
--- a/drivers/staging/lustre/TODO
+++ b/drivers/staging/lustre/TODO
@@ -9,5 +9,6 @@
 * Other minor misc cleanups...
 
 Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger
-<andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing
-hpdd-discuss <hpdd-discuss@lists.01.org> would be great too.
+<andreas.dilger@intel.com>, Oleg Drokin <oleg.drokin@intel.com> and
+Peng Tao <tao.peng@emc.com>. CCing hpdd-discuss <hpdd-discuss@lists.01.org>
+would be great too.
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
index 596a15f..037ae8a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
@@ -61,6 +61,8 @@
 	__u16 kuc_msglen;     /* Including header */
 } __attribute__((aligned(sizeof(__u64))));
 
+#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE)
+
 #define KUC_MAGIC  0x191C /*Lustre9etLinC */
 #define KUC_FL_BLOCK 0x01   /* Wait for send */
 
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index d0d942c..dddccca1 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -120,7 +120,7 @@
 do {									    \
 	LASSERT(!in_interrupt() ||					    \
 		((size) <= LIBCFS_VMALLOC_SIZE &&			    \
-		 ((mask) & GFP_ATOMIC)) != 0);			    \
+		 ((mask) & __GFP_WAIT) == 0));				    \
 } while (0)
 
 #define LIBCFS_ALLOC_POST(ptr, size)					    \
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 9364863..6f58ead 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -529,7 +529,7 @@
 {
 	struct page *page;
 
-	if (is_vmalloc_addr(vaddr)) {
+	if (is_vmalloc_addr((void *)vaddr)) {
 		page = vmalloc_to_page ((void *)vaddr);
 		LASSERT (page != NULL);
 		return page;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 68a4f52..b7b53b5 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -924,7 +924,7 @@
 int
 ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 {
-	int	       mpflag = 0;
+	int	       mpflag = 1;
 	int	       type = lntmsg->msg_type;
 	lnet_process_id_t target = lntmsg->msg_target;
 	unsigned int      payload_niov = lntmsg->msg_niov;
@@ -993,8 +993,9 @@
 
 	/* The first fragment will be set later in pro_pack */
 	rc = ksocknal_launch_packet(ni, tx, target);
-	if (lntmsg->msg_vmflush)
+	if (!mpflag)
 		cfs_memory_pressure_restore(mpflag);
+
 	if (rc == 0)
 		return (0);
 
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6b6c0240..7893d83 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -760,7 +760,8 @@
 	*flags |= (error << CLF_HSM_ERR_L);
 }
 
-#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec))
+#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \
+				  sizeof(struct changelog_ext_rec))
 
 struct changelog_rec {
 	__u16		 cr_namelen;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 22d0acc9..52b7731 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -1086,7 +1086,7 @@
 		break;
 	case Q_GETQUOTA:
 		if (((type == USRQUOTA &&
-		      uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
+		      !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
 		     (type == GRPQUOTA &&
 		      !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
 		    (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 5338e8d..0718905 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -194,10 +194,10 @@
 	struct cl_object     *obj = ll_i2info(inode)->lli_clob;
 	pgoff_t	       offset;
 	int		   ret;
-	int		   i;
 	int		   rw;
 	obd_count	     page_count = 0;
-	struct bio_vec       *bvec;
+	struct bio_vec       bvec;
+	struct bvec_iter   iter;
 	struct bio	   *bio;
 	ssize_t	       bytes;
 
@@ -220,15 +220,15 @@
 	for (bio = head; bio != NULL; bio = bio->bi_next) {
 		LASSERT(rw == bio->bi_rw);
 
-		offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
-		bio_for_each_segment(bvec, bio, i) {
-			BUG_ON(bvec->bv_offset != 0);
-			BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
+		offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
+		bio_for_each_segment(bvec, bio, iter) {
+			BUG_ON(bvec.bv_offset != 0);
+			BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
 
-			pages[page_count] = bvec->bv_page;
+			pages[page_count] = bvec.bv_page;
 			offsets[page_count] = offset;
 			page_count++;
-			offset += bvec->bv_len;
+			offset += bvec.bv_len;
 		}
 		LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
 	}
@@ -313,7 +313,8 @@
 	bio = &lo->lo_bio;
 	while (*bio && (*bio)->bi_rw == rw) {
 		CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
-		       (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
+		       (unsigned long long)(*bio)->bi_iter.bi_sector,
+		       (*bio)->bi_iter.bi_size,
 		       page_count, (*bio)->bi_vcnt);
 		if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
 			break;
@@ -347,7 +348,8 @@
 		goto err;
 
 	CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
-	       (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+	       (unsigned long long)old_bio->bi_iter.bi_sector,
+	       old_bio->bi_iter.bi_size);
 
 	spin_lock_irq(&lo->lo_lock);
 	inactive = (lo->lo_state != LLOOP_BOUND);
@@ -367,7 +369,7 @@
 	loop_add_bio(lo, old_bio);
 	return;
 err:
-	cfs_bio_io_error(old_bio, old_bio->bi_size);
+	cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
 }
 
 
@@ -378,7 +380,7 @@
 	while (bio) {
 		struct bio *tmp = bio->bi_next;
 		bio->bi_next = NULL;
-		cfs_bio_endio(bio, bio->bi_size, ret);
+		cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
 		bio = tmp;
 	}
 }
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index d1ad91c3..8301392 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1430,7 +1430,7 @@
 {
 	struct kuc_hdr *lh = (struct kuc_hdr *)buf;
 
-	LASSERT(len <= CR_MAXSIZE);
+	LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
 
 	lh->kuc_magic = KUC_MAGIC;
 	lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
@@ -1503,7 +1503,7 @@
 	CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
 	       cs->cs_fp, cs->cs_startrec);
 
-	OBD_ALLOC(cs->cs_buf, CR_MAXSIZE);
+	OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
 	if (cs->cs_buf == NULL)
 		GOTO(out, rc = -ENOMEM);
 
@@ -1540,7 +1540,7 @@
 	if (ctxt)
 		llog_ctxt_put(ctxt);
 	if (cs->cs_buf)
-		OBD_FREE(cs->cs_buf, CR_MAXSIZE);
+		OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
 	OBD_FREE_PTR(cs);
 	return rc;
 }
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 46f1e61..22b0c9d 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -21,6 +21,8 @@
 # Please keep them in alphabetic order
 source "drivers/staging/media/as102/Kconfig"
 
+source "drivers/staging/media/bcm2048/Kconfig"
+
 source "drivers/staging/media/cxd2099/Kconfig"
 
 source "drivers/staging/media/davinci_vpfe/Kconfig"
@@ -31,8 +33,14 @@
 
 source "drivers/staging/media/msi3101/Kconfig"
 
+source "drivers/staging/media/omap24xx/Kconfig"
+
+source "drivers/staging/media/sn9c102/Kconfig"
+
 source "drivers/staging/media/solo6x10/Kconfig"
 
+source "drivers/staging/media/omap4iss/Kconfig"
+
 # Keep LIRC at the end, as it has sub-menus
 source "drivers/staging/media/lirc/Kconfig"
 
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index eb7f30b..bedc62a 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,4 +1,5 @@
 obj-$(CONFIG_DVB_AS102)		+= as102/
+obj-$(CONFIG_I2C_BCM2048)	+= bcm2048/
 obj-$(CONFIG_DVB_CXD2099)	+= cxd2099/
 obj-$(CONFIG_LIRC_STAGING)	+= lirc/
 obj-$(CONFIG_SOLO6X10)		+= solo6x10/
@@ -6,3 +7,7 @@
 obj-$(CONFIG_VIDEO_GO7007)	+= go7007/
 obj-$(CONFIG_USB_MSI3101)	+= msi3101/
 obj-$(CONFIG_VIDEO_DM365_VPFE)	+= davinci_vpfe/
+obj-$(CONFIG_VIDEO_OMAP4)	+= omap4iss/
+obj-$(CONFIG_USB_SN9C102)       += sn9c102/
+obj-$(CONFIG_VIDEO_OMAP2)       += omap24xx/
+obj-$(CONFIG_VIDEO_TCM825X)     += omap24xx/
diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/staging/media/as102/as102_drv.c
index 8b7bb95..09d64cd 100644
--- a/drivers/staging/media/as102/as102_drv.c
+++ b/drivers/staging/media/as102/as102_drv.c
@@ -111,8 +111,6 @@
 	struct as10x_bus_adapter_t *bus_adap = &dev->bus_adap;
 	int ret = -EFAULT;
 
-	ENTER();
-
 	if (mutex_lock_interruptible(&dev->bus_adap.lock)) {
 		dprintk(debug, "mutex_lock_interruptible(lock) failed !\n");
 		return -EBUSY;
@@ -133,15 +131,14 @@
 		filter.pid = pid;
 
 		ret = as10x_cmd_add_PID_filter(bus_adap, &filter);
-		dprintk(debug, "ADD_PID_FILTER([%02d -> %02d], 0x%04x) ret = %d\n",
+		dprintk(debug,
+			"ADD_PID_FILTER([%02d -> %02d], 0x%04x) ret = %d\n",
 			index, filter.idx, filter.pid, ret);
 		break;
 	}
 	}
 
 	mutex_unlock(&dev->bus_adap.lock);
-
-	LEAVE();
 	return ret;
 }
 
@@ -151,8 +148,6 @@
 	struct dvb_demux *demux = dvbdmxfeed->demux;
 	struct as102_dev_t *as102_dev = demux->priv;
 
-	ENTER();
-
 	if (mutex_lock_interruptible(&as102_dev->sem))
 		return -ERESTARTSYS;
 
@@ -164,7 +159,6 @@
 		ret = as102_start_stream(as102_dev);
 
 	mutex_unlock(&as102_dev->sem);
-	LEAVE();
 	return ret;
 }
 
@@ -173,8 +167,6 @@
 	struct dvb_demux *demux = dvbdmxfeed->demux;
 	struct as102_dev_t *as102_dev = demux->priv;
 
-	ENTER();
-
 	if (mutex_lock_interruptible(&as102_dev->sem))
 		return -ERESTARTSYS;
 
@@ -186,7 +178,6 @@
 				 dvbdmxfeed->pid, 0);
 
 	mutex_unlock(&as102_dev->sem);
-	LEAVE();
 	return 0;
 }
 
diff --git a/drivers/staging/media/as102/as102_drv.h b/drivers/staging/media/as102/as102_drv.h
index b0e5a23..a06837d 100644
--- a/drivers/staging/media/as102/as102_drv.h
+++ b/drivers/staging/media/as102/as102_drv.h
@@ -38,14 +38,6 @@
 		printk(args);	\
 	} } while (0)
 
-#ifdef TRACE
-#define ENTER()	pr_debug(">> enter %s\n", __func__)
-#define LEAVE()	pr_debug("<< leave %s\n", __func__)
-#else
-#define ENTER()
-#define LEAVE()
-#endif
-
 #define AS102_DEVICE_MAJOR	192
 
 #define AS102_USB_BUF_SIZE	512
diff --git a/drivers/staging/media/as102/as102_fe.c b/drivers/staging/media/as102/as102_fe.c
index 9ce8c9d..b686b76 100644
--- a/drivers/staging/media/as102/as102_fe.c
+++ b/drivers/staging/media/as102/as102_fe.c
@@ -34,8 +34,6 @@
 	struct as102_dev_t *dev;
 	struct as10x_tune_args tune_args = { 0 };
 
-	ENTER();
-
 	dev = (struct as102_dev_t *) fe->tuner_priv;
 	if (dev == NULL)
 		return -ENODEV;
@@ -52,7 +50,6 @@
 
 	mutex_unlock(&dev->bus_adap.lock);
 
-	LEAVE();
 	return (ret < 0) ? -EINVAL : 0;
 }
 
@@ -63,8 +60,6 @@
 	struct as102_dev_t *dev;
 	struct as10x_tps tps = { 0 };
 
-	ENTER();
-
 	dev = (struct as102_dev_t *) fe->tuner_priv;
 	if (dev == NULL)
 		return -EINVAL;
@@ -80,13 +75,11 @@
 
 	mutex_unlock(&dev->bus_adap.lock);
 
-	LEAVE();
 	return (ret < 0) ? -EINVAL : 0;
 }
 
 static int as102_fe_get_tune_settings(struct dvb_frontend *fe,
 			struct dvb_frontend_tune_settings *settings) {
-	ENTER();
 
 #if 0
 	dprintk(debug, "step_size    = %d\n", settings->step_size);
@@ -97,7 +90,6 @@
 
 	settings->min_delay_ms = 1000;
 
-	LEAVE();
 	return 0;
 }
 
@@ -108,8 +100,6 @@
 	struct as102_dev_t *dev;
 	struct as10x_tune_status tstate = { 0 };
 
-	ENTER();
-
 	dev = (struct as102_dev_t *) fe->tuner_priv;
 	if (dev == NULL)
 		return -ENODEV;
@@ -151,8 +141,8 @@
 		if (as10x_cmd_get_demod_stats(&dev->bus_adap,
 			(struct as10x_demod_stats *) &dev->demod_stats) < 0) {
 			memset(&dev->demod_stats, 0, sizeof(dev->demod_stats));
-			dprintk(debug, "as10x_cmd_get_demod_stats failed "
-				"(probably not tuned)\n");
+			dprintk(debug,
+				"as10x_cmd_get_demod_stats failed (probably not tuned)\n");
 		} else {
 			dprintk(debug,
 				"demod status: fc: 0x%08x, bad fc: 0x%08x, "
@@ -168,7 +158,6 @@
 
 out:
 	mutex_unlock(&dev->bus_adap.lock);
-	LEAVE();
 	return ret;
 }
 
@@ -183,15 +172,12 @@
 {
 	struct as102_dev_t *dev;
 
-	ENTER();
-
 	dev = (struct as102_dev_t *) fe->tuner_priv;
 	if (dev == NULL)
 		return -ENODEV;
 
 	*snr = dev->demod_stats.mer;
 
-	LEAVE();
 	return 0;
 }
 
@@ -199,15 +185,12 @@
 {
 	struct as102_dev_t *dev;
 
-	ENTER();
-
 	dev = (struct as102_dev_t *) fe->tuner_priv;
 	if (dev == NULL)
 		return -ENODEV;
 
 	*ber = dev->ber;
 
-	LEAVE();
 	return 0;
 }
 
@@ -216,15 +199,12 @@
 {
 	struct as102_dev_t *dev;
 
-	ENTER();
-
 	dev = (struct as102_dev_t *) fe->tuner_priv;
 	if (dev == NULL)
 		return -ENODEV;
 
 	*strength = (((0xffff * 400) * dev->signal_strength + 41000) * 2);
 
-	LEAVE();
 	return 0;
 }
 
@@ -232,8 +212,6 @@
 {
 	struct as102_dev_t *dev;
 
-	ENTER();
-
 	dev = (struct as102_dev_t *) fe->tuner_priv;
 	if (dev == NULL)
 		return -ENODEV;
@@ -243,7 +221,6 @@
 	else
 		*ucblocks = 0;
 
-	LEAVE();
 	return 0;
 }
 
@@ -252,8 +229,6 @@
 	struct as102_dev_t *dev;
 	int ret;
 
-	ENTER();
-
 	dev = (struct as102_dev_t *) fe->tuner_priv;
 	if (dev == NULL)
 		return -ENODEV;
@@ -263,7 +238,8 @@
 
 	if (acquire) {
 		if (elna_enable)
-			as10x_cmd_set_context(&dev->bus_adap, CONTEXT_LNA, dev->elna_cfg);
+			as10x_cmd_set_context(&dev->bus_adap,
+					      CONTEXT_LNA, dev->elna_cfg);
 
 		ret = as10x_cmd_turn_on(&dev->bus_adap);
 	} else {
@@ -272,7 +248,6 @@
 
 	mutex_unlock(&dev->bus_adap.lock);
 
-	LEAVE();
 	return ret;
 }
 
@@ -581,8 +556,8 @@
 			   as102_fe_get_code_rate(params->code_rate_LP);
 		}
 
-		dprintk(debug, "\thierarchy: 0x%02x  "
-				"selected: %s  code_rate_%s: 0x%02x\n",
+		dprintk(debug,
+			"\thierarchy: 0x%02x  selected: %s  code_rate_%s: 0x%02x\n",
 			tune_args->hierarchy,
 			tune_args->hier_select == HIER_HIGH_PRIORITY ?
 			"HP" : "LP",
diff --git a/drivers/staging/media/as102/as102_fw.c b/drivers/staging/media/as102/as102_fw.c
index b9670ee..f33f752 100644
--- a/drivers/staging/media/as102/as102_fw.c
+++ b/drivers/staging/media/as102/as102_fw.c
@@ -26,10 +26,10 @@
 #include "as102_drv.h"
 #include "as102_fw.h"
 
-char as102_st_fw1[] = "as102_data1_st.hex";
-char as102_st_fw2[] = "as102_data2_st.hex";
-char as102_dt_fw1[] = "as102_data1_dt.hex";
-char as102_dt_fw2[] = "as102_data2_dt.hex";
+static const char as102_st_fw1[] = "as102_data1_st.hex";
+static const char as102_st_fw2[] = "as102_data2_st.hex";
+static const char as102_dt_fw1[] = "as102_data1_dt.hex";
+static const char as102_dt_fw2[] = "as102_data2_dt.hex";
 
 static unsigned char atohx(unsigned char *dst, char *src)
 {
@@ -109,8 +109,6 @@
 	int total_read_bytes = 0, errno = 0;
 	unsigned char addr_has_changed = 0;
 
-	ENTER();
-
 	for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
 		int read_bytes = 0, data_len = 0;
 
@@ -158,7 +156,6 @@
 		}
 	}
 error:
-	LEAVE();
 	return (errno == 0) ? total_read_bytes : errno;
 }
 
@@ -167,11 +164,9 @@
 	int errno = -EFAULT;
 	const struct firmware *firmware = NULL;
 	unsigned char *cmd_buf = NULL;
-	char *fw1, *fw2;
+	const char *fw1, *fw2;
 	struct usb_device *dev = bus_adap->usb_dev;
 
-	ENTER();
-
 	/* select fw file to upload */
 	if (dual_tuner) {
 		fw1 = as102_dt_fw1;
@@ -233,6 +228,5 @@
 	kfree(cmd_buf);
 	release_firmware(firmware);
 
-	LEAVE();
 	return errno;
 }
diff --git a/drivers/staging/media/as102/as102_usb_drv.c b/drivers/staging/media/as102/as102_usb_drv.c
index 9f275f0..e4a6945 100644
--- a/drivers/staging/media/as102/as102_usb_drv.c
+++ b/drivers/staging/media/as102/as102_usb_drv.c
@@ -92,7 +92,6 @@
 			      unsigned char *recv_buf, int recv_buf_len)
 {
 	int ret = 0;
-	ENTER();
 
 	if (send_buf != NULL) {
 		ret = usb_control_msg(bus_adap->usb_dev,
@@ -140,7 +139,6 @@
 #endif
 	}
 
-	LEAVE();
 	return ret;
 }
 
@@ -191,7 +189,7 @@
 	return ret ? ret : actual_len;
 }
 
-struct as102_priv_ops_t as102_priv_ops = {
+static struct as102_priv_ops_t as102_priv_ops = {
 	.upload_fw_pkt	= as102_send_ep1,
 	.xfer_cmd	= as102_usb_xfer_cmd,
 	.as102_read_ep2	= as102_read_ep2,
@@ -240,8 +238,6 @@
 {
 	int i;
 
-	ENTER();
-
 	for (i = 0; i < MAX_STREAM_URB; i++)
 		usb_free_urb(dev->stream_urb[i]);
 
@@ -249,15 +245,12 @@
 			MAX_STREAM_URB * AS102_USB_BUF_SIZE,
 			dev->stream,
 			dev->dma_addr);
-	LEAVE();
 }
 
 static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev)
 {
 	int i, ret = 0;
 
-	ENTER();
-
 	dev->stream = usb_alloc_coherent(dev->bus_adap.usb_dev,
 				       MAX_STREAM_URB * AS102_USB_BUF_SIZE,
 				       GFP_KERNEL,
@@ -287,7 +280,6 @@
 
 		dev->stream_urb[i] = urb;
 	}
-	LEAVE();
 	return ret;
 }
 
@@ -318,23 +310,17 @@
 {
 	struct as102_dev_t *as102_dev;
 
-	ENTER();
-
 	as102_dev = container_of(kref, struct as102_dev_t, kref);
 	if (as102_dev != NULL) {
 		usb_put_dev(as102_dev->bus_adap.usb_dev);
 		kfree(as102_dev);
 	}
-
-	LEAVE();
 }
 
 static void as102_usb_disconnect(struct usb_interface *intf)
 {
 	struct as102_dev_t *as102_dev;
 
-	ENTER();
-
 	/* extract as102_dev_t from usb_device private data */
 	as102_dev = usb_get_intfdata(intf);
 
@@ -353,8 +339,6 @@
 	kref_put(&as102_dev->kref, as102_usb_release);
 
 	pr_info("%s: device has been disconnected\n", DRIVER_NAME);
-
-	LEAVE();
 }
 
 static int as102_usb_probe(struct usb_interface *intf,
@@ -364,8 +348,6 @@
 	struct as102_dev_t *as102_dev;
 	int i;
 
-	ENTER();
-
 	/* This should never actually happen */
 	if (ARRAY_SIZE(as102_usb_id_table) !=
 	    (sizeof(as102_device_names) / sizeof(const char *))) {
@@ -419,15 +401,21 @@
 	/* request buffer allocation for streaming */
 	ret = as102_alloc_usb_stream_buffer(as102_dev);
 	if (ret != 0)
-		goto failed;
+		goto failed_stream;
 
 	/* register dvb layer */
 	ret = as102_dvb_register(as102_dev);
+	if (ret != 0)
+		goto failed_dvb;
 
-	LEAVE();
 	return ret;
 
+failed_dvb:
+	as102_free_usb_stream_buffer(as102_dev);
+failed_stream:
+	usb_deregister_dev(intf, &as102_usb_class_driver);
 failed:
+	usb_put_dev(as102_dev->bus_adap.usb_dev);
 	usb_set_intfdata(intf, NULL);
 	kfree(as102_dev);
 	return ret;
@@ -439,8 +427,6 @@
 	struct usb_interface *intf = NULL;
 	struct as102_dev_t *dev = NULL;
 
-	ENTER();
-
 	/* read minor from inode */
 	minor = iminor(inode);
 
@@ -467,7 +453,6 @@
 	kref_get(&dev->kref);
 
 exit:
-	LEAVE();
 	return ret;
 }
 
@@ -476,15 +461,12 @@
 	int ret = 0;
 	struct as102_dev_t *dev = NULL;
 
-	ENTER();
-
 	dev = file->private_data;
 	if (dev != NULL) {
 		/* decrement the count on our device */
 		kref_put(&dev->kref, as102_usb_release);
 	}
 
-	LEAVE();
 	return ret;
 }
 
diff --git a/drivers/staging/media/as102/as10x_cmd.c b/drivers/staging/media/as102/as10x_cmd.c
index a73df10..9e49f15 100644
--- a/drivers/staging/media/as102/as10x_cmd.c
+++ b/drivers/staging/media/as102/as10x_cmd.c
@@ -34,8 +34,6 @@
 	int error = AS10X_CMD_ERROR;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -63,7 +61,6 @@
 	error = as10x_rsp_parse(prsp, CONTROL_PROC_TURNON_RSP);
 
 out:
-	LEAVE();
 	return error;
 }
 
@@ -78,8 +75,6 @@
 	int error = AS10X_CMD_ERROR;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -106,7 +101,6 @@
 	error = as10x_rsp_parse(prsp, CONTROL_PROC_TURNOFF_RSP);
 
 out:
-	LEAVE();
 	return error;
 }
 
@@ -123,8 +117,6 @@
 	int error = AS10X_CMD_ERROR;
 	struct as10x_cmd_t *preq, *prsp;
 
-	ENTER();
-
 	preq = adap->cmd;
 	prsp = adap->rsp;
 
@@ -164,7 +156,6 @@
 	error = as10x_rsp_parse(prsp, CONTROL_PROC_SETTUNE_RSP);
 
 out:
-	LEAVE();
 	return error;
 }
 
@@ -181,8 +172,6 @@
 	int error = AS10X_CMD_ERROR;
 	struct as10x_cmd_t  *preq, *prsp;
 
-	ENTER();
-
 	preq = adap->cmd;
 	prsp = adap->rsp;
 
@@ -220,7 +209,6 @@
 	pstatus->BER = le16_to_cpu(prsp->body.get_tune_status.rsp.sts.BER);
 
 out:
-	LEAVE();
 	return error;
 }
 
@@ -236,8 +224,6 @@
 	int error = AS10X_CMD_ERROR;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -281,7 +267,6 @@
 	ptps->cell_ID = le16_to_cpu(prsp->body.get_tps.rsp.tps.cell_ID);
 
 out:
-	LEAVE();
 	return error;
 }
 
@@ -298,8 +283,6 @@
 	int error = AS10X_CMD_ERROR;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -343,7 +326,6 @@
 		prsp->body.get_demod_stats.rsp.stats.has_started;
 
 out:
-	LEAVE();
 	return error;
 }
 
@@ -361,8 +343,6 @@
 	int error = AS10X_CMD_ERROR;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -397,7 +377,6 @@
 	*is_ready = prsp->body.get_impulse_rsp.rsp.is_ready;
 
 out:
-	LEAVE();
 	return error;
 }
 
diff --git a/drivers/staging/media/as102/as10x_cmd_cfg.c b/drivers/staging/media/as102/as10x_cmd_cfg.c
index 4a2bbd7..b1e300d 100644
--- a/drivers/staging/media/as102/as10x_cmd_cfg.c
+++ b/drivers/staging/media/as102/as10x_cmd_cfg.c
@@ -40,8 +40,6 @@
 	int  error;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -81,7 +79,6 @@
 	}
 
 out:
-	LEAVE();
 	return error;
 }
 
@@ -99,8 +96,6 @@
 	int error;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -136,7 +131,6 @@
 	error = as10x_context_rsp_parse(prsp, CONTROL_PROC_CONTEXT_RSP);
 
 out:
-	LEAVE();
 	return error;
 }
 
@@ -156,8 +150,6 @@
 	int error;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -188,7 +180,6 @@
 	error = as10x_rsp_parse(prsp, CONTROL_PROC_ELNA_CHANGE_MODE_RSP);
 
 out:
-	LEAVE();
 	return error;
 }
 
diff --git a/drivers/staging/media/as102/as10x_cmd_stream.c b/drivers/staging/media/as102/as10x_cmd_stream.c
index 6d000f6..1088ca1 100644
--- a/drivers/staging/media/as102/as10x_cmd_stream.c
+++ b/drivers/staging/media/as102/as10x_cmd_stream.c
@@ -34,8 +34,6 @@
 	int error;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -77,7 +75,6 @@
 	}
 
 out:
-	LEAVE();
 	return error;
 }
 
@@ -94,8 +91,6 @@
 	int error;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -126,7 +121,6 @@
 	error = as10x_rsp_parse(prsp, CONTROL_PROC_REMOVEFILTER_RSP);
 
 out:
-	LEAVE();
 	return error;
 }
 
@@ -141,8 +135,6 @@
 	int error;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -172,7 +164,6 @@
 	error = as10x_rsp_parse(prsp, CONTROL_PROC_START_STREAMING_RSP);
 
 out:
-	LEAVE();
 	return error;
 }
 
@@ -187,8 +178,6 @@
 	int8_t error;
 	struct as10x_cmd_t *pcmd, *prsp;
 
-	ENTER();
-
 	pcmd = adap->cmd;
 	prsp = adap->rsp;
 
@@ -218,6 +207,5 @@
 	error = as10x_rsp_parse(prsp, CONTROL_PROC_STOP_STREAMING_RSP);
 
 out:
-	LEAVE();
 	return error;
 }
diff --git a/drivers/staging/media/bcm2048/Kconfig b/drivers/staging/media/bcm2048/Kconfig
new file mode 100644
index 0000000..a9fc6e1
--- /dev/null
+++ b/drivers/staging/media/bcm2048/Kconfig
@@ -0,0 +1,13 @@
+#
+# Multimedia Video device configuration
+#
+
+config I2C_BCM2048
+	tristate "Broadcom BCM2048 FM Radio Receiver support"
+	depends on I2C && VIDEO_V4L2 && RADIO_ADAPTERS
+	---help---
+	  Say Y here if you want support to BCM2048 FM Radio Receiver.
+	  This device driver supports only i2c bus.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called radio-bcm2048.
diff --git a/drivers/staging/media/bcm2048/Makefile b/drivers/staging/media/bcm2048/Makefile
new file mode 100644
index 0000000..b4f5663
--- /dev/null
+++ b/drivers/staging/media/bcm2048/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_I2C_BCM2048) += radio-bcm2048.o
diff --git a/drivers/staging/media/bcm2048/TODO b/drivers/staging/media/bcm2048/TODO
new file mode 100644
index 0000000..051f85d
--- /dev/null
+++ b/drivers/staging/media/bcm2048/TODO
@@ -0,0 +1,24 @@
+TODO:
+
+From the initial code review:
+
+The main thing you need to do is to implement all the controls using the
+control framework (see Documentation/video4linux/v4l2-controls.txt).
+Most drivers are by now converted to the control framework, so you will
+find many examples of how to do this in drivers/media/radio.
+
+The sysfs stuff should be replaced by controls as well. A lot of the RDS
+support is now available as controls (although there may well be some
+missing features, but that is easy enough to add). Since the RDS data is
+actually read() from the device I am not sure whether the RDS
+properties/controls should be there at all.
+
+Correct Coding Style, as this driver also violates several Style
+rules, and do evil tricks, like returning from a function inside a
+macro.
+
+Finally this driver should probably be split up into two parts: one
+v4l2_subdev-based core driver and one platform driver. See e.g.
+radio-si4713/si4713-i2c.c as a good example. But I would wait with that
+until the rest of the driver is cleaned up. Then I have a better idea of
+whether this is necessary or not.
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
new file mode 100644
index 0000000..b2cd3a8
--- /dev/null
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -0,0 +1,2744 @@
+/*
+ * drivers/staging/media/radio-bcm2048.c
+ *
+ * Driver for I2C Broadcom BCM2048 FM Radio Receiver:
+ *
+ * Copyright (C) Nokia Corporation
+ * Contact: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
+ *
+ * Copyright (C) Nils Faerber <nils.faerber@kernelconcepts.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+/*
+ * History:
+ *		Eero Nurkkala <ext-eero.nurkkala@nokia.com>
+ *		Version 0.0.1
+ *		- Initial implementation
+ * 2010-02-21	Nils Faerber <nils.faerber@kernelconcepts.de>
+ *		Version 0.0.2
+ *		- Add support for interrupt driven rds data reading
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include "radio-bcm2048.h"
+
+/* driver definitions */
+#define BCM2048_DRIVER_AUTHOR	"Eero Nurkkala <ext-eero.nurkkala@nokia.com>"
+#define BCM2048_DRIVER_NAME	BCM2048_NAME
+#define BCM2048_DRIVER_VERSION	KERNEL_VERSION(0, 0, 1)
+#define BCM2048_DRIVER_CARD	"Broadcom bcm2048 FM Radio Receiver"
+#define BCM2048_DRIVER_DESC	"I2C driver for BCM2048 FM Radio Receiver"
+
+/* I2C Control Registers */
+#define BCM2048_I2C_FM_RDS_SYSTEM	0x00
+#define BCM2048_I2C_FM_CTRL		0x01
+#define BCM2048_I2C_RDS_CTRL0		0x02
+#define BCM2048_I2C_RDS_CTRL1		0x03
+#define BCM2048_I2C_FM_AUDIO_PAUSE	0x04
+#define BCM2048_I2C_FM_AUDIO_CTRL0	0x05
+#define BCM2048_I2C_FM_AUDIO_CTRL1	0x06
+#define BCM2048_I2C_FM_SEARCH_CTRL0	0x07
+#define BCM2048_I2C_FM_SEARCH_CTRL1	0x08
+#define BCM2048_I2C_FM_SEARCH_TUNE_MODE	0x09
+#define BCM2048_I2C_FM_FREQ0		0x0a
+#define BCM2048_I2C_FM_FREQ1		0x0b
+#define BCM2048_I2C_FM_AF_FREQ0		0x0c
+#define BCM2048_I2C_FM_AF_FREQ1		0x0d
+#define BCM2048_I2C_FM_CARRIER		0x0e
+#define BCM2048_I2C_FM_RSSI		0x0f
+#define BCM2048_I2C_FM_RDS_MASK0	0x10
+#define BCM2048_I2C_FM_RDS_MASK1	0x11
+#define BCM2048_I2C_FM_RDS_FLAG0	0x12
+#define BCM2048_I2C_FM_RDS_FLAG1	0x13
+#define BCM2048_I2C_RDS_WLINE		0x14
+#define BCM2048_I2C_RDS_BLKB_MATCH0	0x16
+#define BCM2048_I2C_RDS_BLKB_MATCH1	0x17
+#define BCM2048_I2C_RDS_BLKB_MASK0	0x18
+#define BCM2048_I2C_RDS_BLKB_MASK1	0x19
+#define BCM2048_I2C_RDS_PI_MATCH0	0x1a
+#define BCM2048_I2C_RDS_PI_MATCH1	0x1b
+#define BCM2048_I2C_RDS_PI_MASK0	0x1c
+#define BCM2048_I2C_RDS_PI_MASK1	0x1d
+#define BCM2048_I2C_SPARE1		0x20
+#define BCM2048_I2C_SPARE2		0x21
+#define BCM2048_I2C_FM_RDS_REV		0x28
+#define BCM2048_I2C_SLAVE_CONFIGURATION	0x29
+#define BCM2048_I2C_RDS_DATA		0x80
+#define BCM2048_I2C_FM_BEST_TUNE_MODE	0x90
+
+/* BCM2048_I2C_FM_RDS_SYSTEM */
+#define BCM2048_FM_ON			0x01
+#define BCM2048_RDS_ON			0x02
+
+/* BCM2048_I2C_FM_CTRL */
+#define BCM2048_BAND_SELECT			0x01
+#define BCM2048_STEREO_MONO_AUTO_SELECT		0x02
+#define BCM2048_STEREO_MONO_MANUAL_SELECT	0x04
+#define BCM2048_STEREO_MONO_BLEND_SWITCH	0x08
+#define BCM2048_HI_LO_INJECTION			0x10
+
+/* BCM2048_I2C_RDS_CTRL0 */
+#define BCM2048_RBDS_RDS_SELECT		0x01
+#define BCM2048_FLUSH_FIFO		0x02
+
+/* BCM2048_I2C_FM_AUDIO_PAUSE */
+#define BCM2048_AUDIO_PAUSE_RSSI_TRESH	0x0f
+#define BCM2048_AUDIO_PAUSE_DURATION	0xf0
+
+/* BCM2048_I2C_FM_AUDIO_CTRL0 */
+#define BCM2048_RF_MUTE			0x01
+#define BCM2048_MANUAL_MUTE		0x02
+#define BCM2048_DAC_OUTPUT_LEFT		0x04
+#define BCM2048_DAC_OUTPUT_RIGHT	0x08
+#define BCM2048_AUDIO_ROUTE_DAC		0x10
+#define BCM2048_AUDIO_ROUTE_I2S		0x20
+#define BCM2048_DE_EMPHASIS_SELECT	0x40
+#define BCM2048_AUDIO_BANDWIDTH_SELECT	0x80
+
+/* BCM2048_I2C_FM_SEARCH_CTRL0 */
+#define BCM2048_SEARCH_RSSI_THRESHOLD	0x7f
+#define BCM2048_SEARCH_DIRECTION	0x80
+
+/* BCM2048_I2C_FM_SEARCH_TUNE_MODE */
+#define BCM2048_FM_AUTO_SEARCH		0x03
+
+/* BCM2048_I2C_FM_RSSI */
+#define BCM2048_RSSI_VALUE		0xff
+
+/* BCM2048_I2C_FM_RDS_MASK0 */
+/* BCM2048_I2C_FM_RDS_MASK1 */
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED	0x01
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL	0x02
+#define BCM2048_FM_FLAG_RSSI_LOW		0x04
+#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH	0x08
+#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION	0x10
+#define BCM2048_FLAG_STEREO_DETECTED		0x20
+#define BCM2048_FLAG_STEREO_ACTIVE		0x40
+
+/* BCM2048_I2C_RDS_DATA */
+#define BCM2048_SLAVE_ADDRESS			0x3f
+#define BCM2048_SLAVE_ENABLE			0x80
+
+/* BCM2048_I2C_FM_BEST_TUNE_MODE */
+#define BCM2048_BEST_TUNE_MODE			0x80
+
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED	0x01
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL	0x02
+#define BCM2048_FM_FLAG_RSSI_LOW		0x04
+#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH	0x08
+#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION	0x10
+#define BCM2048_FLAG_STEREO_DETECTED		0x20
+#define BCM2048_FLAG_STEREO_ACTIVE		0x40
+
+#define BCM2048_RDS_FLAG_FIFO_WLINE		0x02
+#define BCM2048_RDS_FLAG_B_BLOCK_MATCH		0x08
+#define BCM2048_RDS_FLAG_SYNC_LOST		0x10
+#define BCM2048_RDS_FLAG_PI_MATCH		0x20
+
+#define BCM2048_RDS_MARK_END_BYTE0		0x7C
+#define BCM2048_RDS_MARK_END_BYTEN		0xFF
+
+#define BCM2048_FM_FLAGS_ALL	(FM_FLAG_SEARCH_TUNE_FINISHED | \
+				 FM_FLAG_SEARCH_TUNE_FAIL | \
+				 FM_FLAG_RSSI_LOW | \
+				 FM_FLAG_CARRIER_ERROR_HIGH | \
+				 FM_FLAG_AUDIO_PAUSE_INDICATION | \
+				 FLAG_STEREO_DETECTED | FLAG_STEREO_ACTIVE)
+
+#define BCM2048_RDS_FLAGS_ALL	(RDS_FLAG_FIFO_WLINE | \
+				 RDS_FLAG_B_BLOCK_MATCH | \
+				 RDS_FLAG_SYNC_LOST | RDS_FLAG_PI_MATCH)
+
+#define BCM2048_DEFAULT_TIMEOUT		1500
+#define BCM2048_AUTO_SEARCH_TIMEOUT	3000
+
+
+#define BCM2048_FREQDEV_UNIT		10000
+#define BCM2048_FREQV4L2_MULTI		625
+#define dev_to_v4l2(f)	((f * BCM2048_FREQDEV_UNIT) / BCM2048_FREQV4L2_MULTI)
+#define v4l2_to_dev(f)	((f * BCM2048_FREQV4L2_MULTI) / BCM2048_FREQDEV_UNIT)
+
+#define msb(x)                  ((u8)((u16) x >> 8))
+#define lsb(x)                  ((u8)((u16) x &  0x00FF))
+#define compose_u16(msb, lsb)	(((u16)msb << 8) | lsb)
+
+#define BCM2048_DEFAULT_POWERING_DELAY	20
+#define BCM2048_DEFAULT_REGION		0x02
+#define BCM2048_DEFAULT_MUTE		0x01
+#define BCM2048_DEFAULT_RSSI_THRESHOLD	0x64
+#define BCM2048_DEFAULT_RDS_WLINE	0x7E
+
+#define BCM2048_FM_SEARCH_INACTIVE	0x00
+#define BCM2048_FM_PRE_SET_MODE		0x01
+#define BCM2048_FM_AUTO_SEARCH_MODE	0x02
+#define BCM2048_FM_AF_JUMP_MODE		0x03
+
+#define BCM2048_FREQUENCY_BASE		64000
+
+#define BCM2048_POWER_ON		0x01
+#define BCM2048_POWER_OFF		0x00
+
+#define BCM2048_ITEM_ENABLED		0x01
+#define BCM2048_SEARCH_DIRECTION_UP	0x01
+
+#define BCM2048_DE_EMPHASIS_75us	75
+#define BCM2048_DE_EMPHASIS_50us	50
+
+#define BCM2048_SCAN_FAIL		0x00
+#define BCM2048_SCAN_OK			0x01
+
+#define BCM2048_FREQ_ERROR_FLOOR	-20
+#define BCM2048_FREQ_ERROR_ROOF		20
+
+/* -60 dB is reported as full signal strenght */
+#define BCM2048_RSSI_LEVEL_BASE		-60
+#define BCM2048_RSSI_LEVEL_ROOF		-100
+#define BCM2048_RSSI_LEVEL_ROOF_NEG	100
+#define BCM2048_SIGNAL_MULTIPLIER	(0xFFFF / \
+					 (BCM2048_RSSI_LEVEL_ROOF_NEG + \
+					  BCM2048_RSSI_LEVEL_BASE))
+
+#define BCM2048_RDS_FIFO_DUPLE_SIZE	0x03
+#define BCM2048_RDS_CRC_MASK		0x0F
+#define BCM2048_RDS_CRC_NONE		0x00
+#define BCM2048_RDS_CRC_MAX_2BITS	0x04
+#define BCM2048_RDS_CRC_LEAST_2BITS	0x08
+#define BCM2048_RDS_CRC_UNRECOVARABLE	0x0C
+
+#define BCM2048_RDS_BLOCK_MASK		0xF0
+#define BCM2048_RDS_BLOCK_A		0x00
+#define BCM2048_RDS_BLOCK_B		0x10
+#define BCM2048_RDS_BLOCK_C		0x20
+#define BCM2048_RDS_BLOCK_D		0x30
+#define BCM2048_RDS_BLOCK_C_SCORED	0x40
+#define BCM2048_RDS_BLOCK_E		0x60
+
+#define BCM2048_RDS_RT			0x20
+#define BCM2048_RDS_PS			0x00
+
+#define BCM2048_RDS_GROUP_AB_MASK	0x08
+#define BCM2048_RDS_GROUP_A		0x00
+#define BCM2048_RDS_GROUP_B		0x08
+
+#define BCM2048_RDS_RT_AB_MASK		0x10
+#define BCM2048_RDS_RT_A		0x00
+#define BCM2048_RDS_RT_B		0x10
+#define BCM2048_RDS_RT_INDEX		0x0F
+
+#define BCM2048_RDS_PS_INDEX		0x03
+
+struct rds_info {
+	u16 rds_pi;
+#define BCM2048_MAX_RDS_RT (64 + 1)
+	u8 rds_rt[BCM2048_MAX_RDS_RT];
+	u8 rds_rt_group_b;
+	u8 rds_rt_ab;
+#define BCM2048_MAX_RDS_PS (8 + 1)
+	u8 rds_ps[BCM2048_MAX_RDS_PS];
+	u8 rds_ps_group;
+	u8 rds_ps_group_cnt;
+#define BCM2048_MAX_RDS_RADIO_TEXT 255
+	u8 radio_text[BCM2048_MAX_RDS_RADIO_TEXT + 3];
+	u8 text_len;
+};
+
+struct region_info {
+	u32 bottom_frequency;
+	u32 top_frequency;
+	u8 deemphasis;
+	u8 channel_spacing;
+	u8 region;
+};
+
+struct bcm2048_device {
+	struct i2c_client *client;
+	struct video_device *videodev;
+	struct work_struct work;
+	struct completion compl;
+	struct mutex mutex;
+	struct bcm2048_platform_data *platform_data;
+	struct rds_info rds_info;
+	struct region_info region_info;
+	u16 frequency;
+	u8 cache_fm_rds_system;
+	u8 cache_fm_ctrl;
+	u8 cache_fm_audio_ctrl0;
+	u8 cache_fm_search_ctrl0;
+	u8 power_state;
+	u8 rds_state;
+	u8 fifo_size;
+	u8 scan_state;
+	u8 mute_state;
+
+	/* for rds data device read */
+	wait_queue_head_t read_queue;
+	unsigned int users;
+	unsigned char rds_data_available;
+	unsigned int rd_index;
+};
+
+static int radio_nr = -1;	/* radio device minor (-1 ==> auto assign) */
+module_param(radio_nr, int, 0);
+MODULE_PARM_DESC(radio_nr,
+		 "Minor number for radio device (-1 ==> auto assign)");
+
+static struct region_info region_configs[] = {
+	/* USA */
+	{
+		.channel_spacing	= 20,
+		.bottom_frequency	= 87500,
+		.top_frequency		= 108000,
+		.deemphasis		= 75,
+		.region			= 0,
+	},
+	/* Australia */
+	{
+		.channel_spacing	= 20,
+		.bottom_frequency	= 87500,
+		.top_frequency		= 108000,
+		.deemphasis		= 50,
+		.region			= 1,
+	},
+	/* Europe */
+	{
+		.channel_spacing	= 10,
+		.bottom_frequency	= 87500,
+		.top_frequency		= 108000,
+		.deemphasis		= 50,
+		.region			= 2,
+	},
+	/* Japan */
+	{
+		.channel_spacing	= 10,
+		.bottom_frequency	= 76000,
+		.top_frequency		= 90000,
+		.deemphasis		= 50,
+		.region			= 3,
+	},
+	/* Japan wide band */
+	{
+		.channel_spacing	= 10,
+		.bottom_frequency	= 76000,
+		.top_frequency		= 108000,
+		.deemphasis		= 50,
+		.region			= 4,
+	},
+};
+
+/*
+ *	I2C Interface read / write
+ */
+static int bcm2048_send_command(struct bcm2048_device *bdev, unsigned int reg,
+					unsigned int value)
+{
+	struct i2c_client *client = bdev->client;
+	u8 data[2];
+
+	if (!bdev->power_state) {
+		dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
+		return -EIO;
+	}
+
+	data[0] = reg & 0xff;
+	data[1] = value & 0xff;
+
+	if (i2c_master_send(client, data, 2) == 2) {
+		return 0;
+	} else {
+		dev_err(&bdev->client->dev, "BCM I2C error!\n");
+		dev_err(&bdev->client->dev, "Is Bluetooth up and running?\n");
+		return -EIO;
+	}
+}
+
+static int bcm2048_recv_command(struct bcm2048_device *bdev, unsigned int reg,
+			u8 *value)
+{
+	struct i2c_client *client = bdev->client;
+
+	if (!bdev->power_state) {
+		dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
+		return -EIO;
+	}
+
+	value[0] = i2c_smbus_read_byte_data(client, reg & 0xff);
+
+	return 0;
+}
+
+static int bcm2048_recv_duples(struct bcm2048_device *bdev, unsigned int reg,
+			u8 *value, u8 duples)
+{
+	struct i2c_client *client = bdev->client;
+	struct i2c_adapter *adap = client->adapter;
+	struct i2c_msg msg[2];
+	u8 buf;
+
+	if (!bdev->power_state) {
+		dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
+		return -EIO;
+	}
+
+	buf = reg & 0xff;
+
+	msg[0].addr = client->addr;
+	msg[0].flags = client->flags & I2C_M_TEN;
+	msg[0].len = 1;
+	msg[0].buf = &buf;
+
+	msg[1].addr = client->addr;
+	msg[1].flags = client->flags & I2C_M_TEN;
+	msg[1].flags |= I2C_M_RD;
+	msg[1].len = duples;
+	msg[1].buf = value;
+
+	return i2c_transfer(adap, msg, 2);
+}
+
+/*
+ *	BCM2048 - I2C register programming helpers
+ */
+static int bcm2048_set_power_state(struct bcm2048_device *bdev, u8 power)
+{
+	int err = 0;
+
+	mutex_lock(&bdev->mutex);
+
+	if (power) {
+		bdev->power_state = BCM2048_POWER_ON;
+		bdev->cache_fm_rds_system |= BCM2048_FM_ON;
+	} else {
+		bdev->cache_fm_rds_system &= ~BCM2048_FM_ON;
+	}
+
+	/*
+	 * Warning! FM cannot be turned off because then
+	 * the I2C communications get ruined!
+	 * Comment off the "if (power)" when the chip works!
+	 */
+	if (power)
+		err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
+					bdev->cache_fm_rds_system);
+	msleep(BCM2048_DEFAULT_POWERING_DELAY);
+
+	if (!power)
+		bdev->power_state = BCM2048_POWER_OFF;
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_power_state(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err && (value & BCM2048_FM_ON))
+		return BCM2048_POWER_ON;
+
+	return err;
+}
+
+static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on)
+{
+	int err;
+	u8 flags;
+
+	bdev->cache_fm_rds_system &= ~BCM2048_RDS_ON;
+
+	if (rds_on) {
+		bdev->cache_fm_rds_system |= BCM2048_RDS_ON;
+		bdev->rds_state = BCM2048_RDS_ON;
+		flags =	BCM2048_RDS_FLAG_FIFO_WLINE;
+		err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
+						flags);
+	} else {
+		flags =	0;
+		bdev->rds_state = 0;
+		err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
+						flags);
+		memset(&bdev->rds_info, 0, sizeof(bdev->rds_info));
+	}
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
+					bdev->cache_fm_rds_system);
+
+	return err;
+}
+
+static int bcm2048_get_rds_no_lock(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value);
+
+	if (!err && (value & BCM2048_RDS_ON))
+		return BCM2048_ITEM_ENABLED;
+
+	return err;
+}
+
+static int bcm2048_set_rds(struct bcm2048_device *bdev, u8 rds_on)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_set_rds_no_lock(bdev, rds_on);
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_rds(struct bcm2048_device *bdev)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_get_rds_no_lock(bdev);
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_rds_pi(struct bcm2048_device *bdev)
+{
+	return bdev->rds_info.rds_pi;
+}
+
+static int bcm2048_set_fm_automatic_stereo_mono(struct bcm2048_device *bdev,
+						u8 enabled)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	bdev->cache_fm_ctrl &= ~BCM2048_STEREO_MONO_AUTO_SELECT;
+
+	if (enabled)
+		bdev->cache_fm_ctrl |= BCM2048_STEREO_MONO_AUTO_SELECT;
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
+					bdev->cache_fm_ctrl);
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_set_fm_hi_lo_injection(struct bcm2048_device *bdev,
+						u8 hi_lo)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	bdev->cache_fm_ctrl &= ~BCM2048_HI_LO_INJECTION;
+
+	if (hi_lo)
+		bdev->cache_fm_ctrl |= BCM2048_HI_LO_INJECTION;
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
+					bdev->cache_fm_ctrl);
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_fm_hi_lo_injection(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CTRL, &value);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err && (value & BCM2048_HI_LO_INJECTION))
+		return BCM2048_ITEM_ENABLED;
+
+	return err;
+}
+
+static int bcm2048_set_fm_frequency(struct bcm2048_device *bdev, u32 frequency)
+{
+	int err;
+
+	if (frequency < bdev->region_info.bottom_frequency ||
+		frequency > bdev->region_info.top_frequency)
+		return -EDOM;
+
+	frequency -= BCM2048_FREQUENCY_BASE;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ0, lsb(frequency));
+	err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ1,
+					msb(frequency));
+
+	if (!err)
+		bdev->frequency = frequency;
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_fm_frequency(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 lsb, msb;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ0, &lsb);
+	err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ1, &msb);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (err)
+		return err;
+
+	err = compose_u16(msb, lsb);
+	err += BCM2048_FREQUENCY_BASE;
+
+	return err;
+}
+
+static int bcm2048_set_fm_af_frequency(struct bcm2048_device *bdev,
+						u32 frequency)
+{
+	int err;
+
+	if (frequency < bdev->region_info.bottom_frequency ||
+		frequency > bdev->region_info.top_frequency)
+		return -EDOM;
+
+	frequency -= BCM2048_FREQUENCY_BASE;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ0,
+					lsb(frequency));
+	err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ1,
+					msb(frequency));
+	if (!err)
+		bdev->frequency = frequency;
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_fm_af_frequency(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 lsb, msb;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ0, &lsb);
+	err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ1, &msb);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (err)
+		return err;
+
+	err = compose_u16(msb, lsb);
+	err += BCM2048_FREQUENCY_BASE;
+
+	return err;
+}
+
+static int bcm2048_set_fm_deemphasis(struct bcm2048_device *bdev, int d)
+{
+	int err;
+	u8 deemphasis;
+
+	if (d == BCM2048_DE_EMPHASIS_75us)
+		deemphasis = BCM2048_DE_EMPHASIS_SELECT;
+	else
+		deemphasis = 0;
+
+	mutex_lock(&bdev->mutex);
+
+	bdev->cache_fm_audio_ctrl0 &= ~BCM2048_DE_EMPHASIS_SELECT;
+	bdev->cache_fm_audio_ctrl0 |= deemphasis;
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+		bdev->cache_fm_audio_ctrl0);
+
+	if (!err)
+		bdev->region_info.deemphasis = d;
+
+	mutex_unlock(&bdev->mutex);
+
+	return err;
+}
+
+static int bcm2048_get_fm_deemphasis(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err) {
+		if (value & BCM2048_DE_EMPHASIS_SELECT)
+			return BCM2048_DE_EMPHASIS_75us;
+		else
+			return BCM2048_DE_EMPHASIS_50us;
+	}
+
+	return err;
+}
+
+static int bcm2048_set_region(struct bcm2048_device *bdev, u8 region)
+{
+	int err;
+	u32 new_frequency = 0;
+
+	if (region > ARRAY_SIZE(region_configs))
+		return -EINVAL;
+
+	mutex_lock(&bdev->mutex);
+	bdev->region_info = region_configs[region];
+	mutex_unlock(&bdev->mutex);
+
+	if (bdev->frequency < region_configs[region].bottom_frequency ||
+		bdev->frequency > region_configs[region].top_frequency)
+		new_frequency = region_configs[region].bottom_frequency;
+
+	if (new_frequency > 0) {
+		err = bcm2048_set_fm_frequency(bdev, new_frequency);
+
+		if (err)
+			goto done;
+	}
+
+	err = bcm2048_set_fm_deemphasis(bdev,
+			region_configs[region].deemphasis);
+
+done:
+	return err;
+}
+
+static int bcm2048_get_region(struct bcm2048_device *bdev)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+	err = bdev->region_info.region;
+	mutex_unlock(&bdev->mutex);
+
+	return err;
+}
+
+static int bcm2048_set_mute(struct bcm2048_device *bdev, u16 mute)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE);
+
+	if (mute)
+		bdev->cache_fm_audio_ctrl0 |= (BCM2048_RF_MUTE |
+						BCM2048_MANUAL_MUTE);
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+					bdev->cache_fm_audio_ctrl0);
+
+	if (!err)
+		bdev->mute_state = mute;
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_mute(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	if (bdev->power_state) {
+		err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+						&value);
+		if (!err)
+			err = value & (BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE);
+	} else {
+		err = bdev->mute_state;
+	}
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_set_audio_route(struct bcm2048_device *bdev, u8 route)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	route &= (BCM2048_AUDIO_ROUTE_DAC | BCM2048_AUDIO_ROUTE_I2S);
+	bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_AUDIO_ROUTE_DAC |
+		BCM2048_AUDIO_ROUTE_I2S);
+	bdev->cache_fm_audio_ctrl0 |= route;
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+					bdev->cache_fm_audio_ctrl0);
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_audio_route(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return value & (BCM2048_AUDIO_ROUTE_DAC |
+			BCM2048_AUDIO_ROUTE_I2S);
+
+	return err;
+}
+
+static int bcm2048_set_dac_output(struct bcm2048_device *bdev, u8 channels)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_DAC_OUTPUT_LEFT |
+					BCM2048_DAC_OUTPUT_RIGHT);
+	bdev->cache_fm_audio_ctrl0 |= channels;
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+					bdev->cache_fm_audio_ctrl0);
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_dac_output(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return value & (BCM2048_DAC_OUTPUT_LEFT |
+			BCM2048_DAC_OUTPUT_RIGHT);
+
+	return err;
+}
+
+static int bcm2048_set_fm_search_rssi_threshold(struct bcm2048_device *bdev,
+							u8 threshold)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	threshold &= BCM2048_SEARCH_RSSI_THRESHOLD;
+	bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_RSSI_THRESHOLD;
+	bdev->cache_fm_search_ctrl0 |= threshold;
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0,
+					bdev->cache_fm_search_ctrl0);
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_fm_search_rssi_threshold(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return value & BCM2048_SEARCH_RSSI_THRESHOLD;
+
+	return err;
+}
+
+static int bcm2048_set_fm_search_mode_direction(struct bcm2048_device *bdev,
+						u8 direction)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_DIRECTION;
+
+	if (direction)
+		bdev->cache_fm_search_ctrl0 |= BCM2048_SEARCH_DIRECTION;
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0,
+					bdev->cache_fm_search_ctrl0);
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_fm_search_mode_direction(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err && (value & BCM2048_SEARCH_DIRECTION))
+		return BCM2048_SEARCH_DIRECTION_UP;
+
+	return err;
+}
+
+static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev,
+						u8 mode)
+{
+	int err, timeout, restart_rds = 0;
+	u8 value, flags;
+
+	value = mode & BCM2048_FM_AUTO_SEARCH;
+
+	flags =	BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED |
+		BCM2048_FM_FLAG_SEARCH_TUNE_FAIL;
+
+	mutex_lock(&bdev->mutex);
+
+	/*
+	 * If RDS is enabled, and frequency is changed, RDS quits working.
+	 * Thus, always restart RDS if it's enabled. Moreover, RDS must
+	 * not be enabled while changing the frequency because it can
+	 * provide a race to the mutex from the workqueue handler if RDS
+	 * IRQ occurs while waiting for frequency changed IRQ.
+	 */
+	if (bcm2048_get_rds_no_lock(bdev)) {
+		err = bcm2048_set_rds_no_lock(bdev, 0);
+		if (err)
+			goto unlock;
+		restart_rds = 1;
+	}
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK0, flags);
+
+	if (err)
+		goto unlock;
+
+	bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE, value);
+
+	if (mode != BCM2048_FM_AUTO_SEARCH_MODE)
+		timeout = BCM2048_DEFAULT_TIMEOUT;
+	else
+		timeout = BCM2048_AUTO_SEARCH_TIMEOUT;
+
+	if (!wait_for_completion_timeout(&bdev->compl,
+			msecs_to_jiffies(timeout)))
+			dev_err(&bdev->client->dev, "IRQ timeout.\n");
+
+	if (value)
+		if (!bdev->scan_state)
+			err = -EIO;
+
+unlock:
+	if (restart_rds)
+		err |= bcm2048_set_rds_no_lock(bdev, 1);
+
+	mutex_unlock(&bdev->mutex);
+
+	return err;
+}
+
+static int bcm2048_get_fm_search_tune_mode(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE,
+					&value);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return value & BCM2048_FM_AUTO_SEARCH;
+
+	return err;
+}
+
+static int bcm2048_set_rds_b_block_mask(struct bcm2048_device *bdev, u16 mask)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_send_command(bdev,
+			BCM2048_I2C_RDS_BLKB_MASK0, lsb(mask));
+	err |= bcm2048_send_command(bdev,
+			BCM2048_I2C_RDS_BLKB_MASK1, msb(mask));
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_rds_b_block_mask(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 lsb, msb;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev,
+			BCM2048_I2C_RDS_BLKB_MASK0, &lsb);
+	err |= bcm2048_recv_command(bdev,
+			BCM2048_I2C_RDS_BLKB_MASK1, &msb);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return compose_u16(msb, lsb);
+
+	return err;
+}
+
+static int bcm2048_set_rds_b_block_match(struct bcm2048_device *bdev,
+						u16 match)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_send_command(bdev,
+			BCM2048_I2C_RDS_BLKB_MATCH0, lsb(match));
+	err |= bcm2048_send_command(bdev,
+			BCM2048_I2C_RDS_BLKB_MATCH1, msb(match));
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_rds_b_block_match(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 lsb, msb;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev,
+			BCM2048_I2C_RDS_BLKB_MATCH0, &lsb);
+	err |= bcm2048_recv_command(bdev,
+			BCM2048_I2C_RDS_BLKB_MATCH1, &msb);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return compose_u16(msb, lsb);
+
+	return err;
+}
+
+static int bcm2048_set_rds_pi_mask(struct bcm2048_device *bdev, u16 mask)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_send_command(bdev,
+			BCM2048_I2C_RDS_PI_MASK0, lsb(mask));
+	err |= bcm2048_send_command(bdev,
+			BCM2048_I2C_RDS_PI_MASK1, msb(mask));
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_rds_pi_mask(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 lsb, msb;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev,
+			BCM2048_I2C_RDS_PI_MASK0, &lsb);
+	err |= bcm2048_recv_command(bdev,
+			BCM2048_I2C_RDS_PI_MASK1, &msb);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return compose_u16(msb, lsb);
+
+	return err;
+}
+
+static int bcm2048_set_rds_pi_match(struct bcm2048_device *bdev, u16 match)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_send_command(bdev,
+			BCM2048_I2C_RDS_PI_MATCH0, lsb(match));
+	err |= bcm2048_send_command(bdev,
+			BCM2048_I2C_RDS_PI_MATCH1, msb(match));
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_rds_pi_match(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 lsb, msb;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev,
+			BCM2048_I2C_RDS_PI_MATCH0, &lsb);
+	err |= bcm2048_recv_command(bdev,
+			BCM2048_I2C_RDS_PI_MATCH1, &msb);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return compose_u16(msb, lsb);
+
+	return err;
+}
+
+static int bcm2048_set_fm_rds_mask(struct bcm2048_device *bdev, u16 mask)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_send_command(bdev,
+			BCM2048_I2C_FM_RDS_MASK0, lsb(mask));
+	err |= bcm2048_send_command(bdev,
+			BCM2048_I2C_FM_RDS_MASK1, msb(mask));
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_fm_rds_mask(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value0, value1;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK0, &value0);
+	err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK1, &value1);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return compose_u16(value1, value0);
+
+	return err;
+}
+
+static int bcm2048_get_fm_rds_flags(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value0, value1;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &value0);
+	err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &value1);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return compose_u16(value1, value0);
+
+	return err;
+}
+
+static int bcm2048_get_region_bottom_frequency(struct bcm2048_device *bdev)
+{
+	return bdev->region_info.bottom_frequency;
+}
+
+static int bcm2048_get_region_top_frequency(struct bcm2048_device *bdev)
+{
+	return bdev->region_info.top_frequency;
+}
+
+static int bcm2048_set_fm_best_tune_mode(struct bcm2048_device *bdev, u8 mode)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	/* Perform read as the manual indicates */
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
+					&value);
+	value &= ~BCM2048_BEST_TUNE_MODE;
+
+	if (mode)
+		value |= BCM2048_BEST_TUNE_MODE;
+	err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
+					value);
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_fm_best_tune_mode(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
+					&value);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err && (value & BCM2048_BEST_TUNE_MODE))
+		return BCM2048_ITEM_ENABLED;
+
+	return err;
+}
+
+static int bcm2048_get_fm_carrier_error(struct bcm2048_device *bdev)
+{
+	int err = 0;
+	s8 value;
+
+	mutex_lock(&bdev->mutex);
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CARRIER, &value);
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return value;
+
+	return err;
+}
+
+static int bcm2048_get_fm_rssi(struct bcm2048_device *bdev)
+{
+	int err;
+	s8 value;
+
+	mutex_lock(&bdev->mutex);
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RSSI, &value);
+	mutex_unlock(&bdev->mutex);
+
+	if (!err)
+		return value;
+
+	return err;
+}
+
+static int bcm2048_set_rds_wline(struct bcm2048_device *bdev, u8 wline)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_send_command(bdev, BCM2048_I2C_RDS_WLINE, wline);
+
+	if (!err)
+		bdev->fifo_size = wline;
+
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_rds_wline(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 value;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_RDS_WLINE, &value);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err) {
+		bdev->fifo_size = value;
+		return value;
+	}
+
+	return err;
+}
+
+static int bcm2048_checkrev(struct bcm2048_device *bdev)
+{
+	int err;
+	u8 version;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_REV, &version);
+
+	mutex_unlock(&bdev->mutex);
+
+	if (!err) {
+		dev_info(&bdev->client->dev, "BCM2048 Version 0x%x\n",
+			version);
+		return version;
+	}
+
+	return err;
+}
+
+static int bcm2048_get_rds_rt(struct bcm2048_device *bdev, char *data)
+{
+	int err = 0, i, j = 0, ce = 0, cr = 0;
+	char data_buffer[BCM2048_MAX_RDS_RT+1];
+
+	mutex_lock(&bdev->mutex);
+
+	if (!bdev->rds_info.text_len) {
+		err = -EINVAL;
+		goto unlock;
+	}
+
+	for (i = 0; i < BCM2048_MAX_RDS_RT; i++) {
+		if (bdev->rds_info.rds_rt[i]) {
+			ce = i;
+			/* Skip the carriage return */
+			if (bdev->rds_info.rds_rt[i] != 0x0d) {
+				data_buffer[j++] = bdev->rds_info.rds_rt[i];
+			} else {
+				cr = i;
+				break;
+			}
+		}
+	}
+
+	if (j <= BCM2048_MAX_RDS_RT)
+		data_buffer[j] = 0;
+
+	for (i = 0; i < BCM2048_MAX_RDS_RT; i++) {
+		if (!bdev->rds_info.rds_rt[i]) {
+			if (cr && (i < cr)) {
+				err = -EBUSY;
+				goto unlock;
+			}
+			if (i < ce) {
+				if (cr && (i >= cr))
+					break;
+				err = -EBUSY;
+				goto unlock;
+			}
+		}
+	}
+
+	memcpy(data, data_buffer, sizeof(data_buffer));
+
+unlock:
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static int bcm2048_get_rds_ps(struct bcm2048_device *bdev, char *data)
+{
+	int err = 0, i, j = 0;
+	char data_buffer[BCM2048_MAX_RDS_PS+1];
+
+	mutex_lock(&bdev->mutex);
+
+	if (!bdev->rds_info.text_len) {
+		err = -EINVAL;
+		goto unlock;
+	}
+
+	for (i = 0; i < BCM2048_MAX_RDS_PS; i++) {
+		if (bdev->rds_info.rds_ps[i]) {
+			data_buffer[j++] = bdev->rds_info.rds_ps[i];
+		} else {
+			if (i < (BCM2048_MAX_RDS_PS - 1)) {
+				err = -EBUSY;
+				goto unlock;
+			}
+		}
+	}
+
+	if (j <= BCM2048_MAX_RDS_PS)
+		data_buffer[j] = 0;
+
+	memcpy(data, data_buffer, sizeof(data_buffer));
+
+unlock:
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+static void bcm2048_parse_rds_pi(struct bcm2048_device *bdev)
+{
+	int i, cnt = 0;
+	u16 pi;
+
+	for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
+
+		/* Block A match, only data without crc errors taken */
+		if (bdev->rds_info.radio_text[i] == BCM2048_RDS_BLOCK_A) {
+
+			pi = ((bdev->rds_info.radio_text[i+1] << 8) +
+				bdev->rds_info.radio_text[i+2]);
+
+			if (!bdev->rds_info.rds_pi) {
+				bdev->rds_info.rds_pi = pi;
+				return;
+			}
+			if (pi != bdev->rds_info.rds_pi) {
+				cnt++;
+				if (cnt > 3) {
+					bdev->rds_info.rds_pi = pi;
+					cnt = 0;
+				}
+			} else {
+				cnt = 0;
+			}
+		}
+	}
+}
+
+static int bcm2048_rds_block_crc(struct bcm2048_device *bdev, int i)
+{
+	return bdev->rds_info.radio_text[i] & BCM2048_RDS_CRC_MASK;
+}
+
+static void bcm2048_parse_rds_rt_block(struct bcm2048_device *bdev, int i,
+					int index, int crc)
+{
+	/* Good data will overwrite poor data */
+	if (crc) {
+		if (!bdev->rds_info.rds_rt[index])
+			bdev->rds_info.rds_rt[index] =
+				bdev->rds_info.radio_text[i+1];
+		if (!bdev->rds_info.rds_rt[index+1])
+			bdev->rds_info.rds_rt[index+1] =
+				bdev->rds_info.radio_text[i+2];
+	} else {
+		bdev->rds_info.rds_rt[index] = bdev->rds_info.radio_text[i+1];
+		bdev->rds_info.rds_rt[index+1] =
+			bdev->rds_info.radio_text[i+2];
+	}
+}
+
+static int bcm2048_parse_rt_match_b(struct bcm2048_device *bdev, int i)
+{
+	int crc, rt_id, rt_group_b, rt_ab, index = 0;
+
+	crc = bcm2048_rds_block_crc(bdev, i);
+
+	if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+		return -EIO;
+
+	if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+		BCM2048_RDS_BLOCK_B) {
+
+		rt_id = (bdev->rds_info.radio_text[i+1] &
+			BCM2048_RDS_BLOCK_MASK);
+		rt_group_b = bdev->rds_info.radio_text[i+1] &
+			BCM2048_RDS_GROUP_AB_MASK;
+		rt_ab = bdev->rds_info.radio_text[i+2] &
+				BCM2048_RDS_RT_AB_MASK;
+
+		if (rt_group_b != bdev->rds_info.rds_rt_group_b) {
+			memset(bdev->rds_info.rds_rt, 0,
+				sizeof(bdev->rds_info.rds_rt));
+			bdev->rds_info.rds_rt_group_b = rt_group_b;
+		}
+
+		if (rt_id == BCM2048_RDS_RT) {
+			/* A to B or (vice versa), means: clear screen */
+			if (rt_ab != bdev->rds_info.rds_rt_ab) {
+				memset(bdev->rds_info.rds_rt, 0,
+					sizeof(bdev->rds_info.rds_rt));
+				bdev->rds_info.rds_rt_ab = rt_ab;
+			}
+
+			index = bdev->rds_info.radio_text[i+2] &
+					BCM2048_RDS_RT_INDEX;
+
+			if (bdev->rds_info.rds_rt_group_b)
+				index <<= 1;
+			else
+				index <<= 2;
+
+			return index;
+		}
+	}
+
+	return -EIO;
+}
+
+static int bcm2048_parse_rt_match_c(struct bcm2048_device *bdev, int i,
+					int index)
+{
+	int crc;
+
+	crc = bcm2048_rds_block_crc(bdev, i);
+
+	if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+		return 0;
+
+	BUG_ON((index+2) >= BCM2048_MAX_RDS_RT);
+
+	if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+		BCM2048_RDS_BLOCK_C) {
+		if (bdev->rds_info.rds_rt_group_b)
+			return 1;
+		bcm2048_parse_rds_rt_block(bdev, i, index, crc);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void bcm2048_parse_rt_match_d(struct bcm2048_device *bdev, int i,
+					int index)
+{
+	int crc;
+
+	crc = bcm2048_rds_block_crc(bdev, i);
+
+	if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+		return;
+
+	BUG_ON((index+4) >= BCM2048_MAX_RDS_RT);
+
+	if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+		BCM2048_RDS_BLOCK_D)
+		bcm2048_parse_rds_rt_block(bdev, i, index+2, crc);
+}
+
+static int bcm2048_parse_rds_rt(struct bcm2048_device *bdev)
+{
+	int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0;
+
+	for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
+
+		if (match_b) {
+			match_b = 0;
+			index = bcm2048_parse_rt_match_b(bdev, i);
+			if (index >= 0 && index <= (BCM2048_MAX_RDS_RT - 5))
+				match_c = 1;
+			continue;
+		} else if (match_c) {
+			match_c = 0;
+			if (bcm2048_parse_rt_match_c(bdev, i, index))
+				match_d = 1;
+			continue;
+		} else if (match_d) {
+			match_d = 0;
+			bcm2048_parse_rt_match_d(bdev, i, index);
+			continue;
+		}
+
+		/* Skip erroneous blocks due to messed up A block altogether */
+		if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK)
+			== BCM2048_RDS_BLOCK_A) {
+			crc = bcm2048_rds_block_crc(bdev, i);
+			if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+				continue;
+			/* Syncronize to a good RDS PI */
+			if (((bdev->rds_info.radio_text[i+1] << 8) +
+				bdev->rds_info.radio_text[i+2]) ==
+				bdev->rds_info.rds_pi)
+					match_b = 1;
+		}
+	}
+
+	return 0;
+}
+
+static void bcm2048_parse_rds_ps_block(struct bcm2048_device *bdev, int i,
+					int index, int crc)
+{
+	/* Good data will overwrite poor data */
+	if (crc) {
+		if (!bdev->rds_info.rds_ps[index])
+			bdev->rds_info.rds_ps[index] =
+				bdev->rds_info.radio_text[i+1];
+		if (!bdev->rds_info.rds_ps[index+1])
+			bdev->rds_info.rds_ps[index+1] =
+				bdev->rds_info.radio_text[i+2];
+	} else {
+		bdev->rds_info.rds_ps[index] = bdev->rds_info.radio_text[i+1];
+		bdev->rds_info.rds_ps[index+1] =
+			bdev->rds_info.radio_text[i+2];
+	}
+}
+
+static int bcm2048_parse_ps_match_c(struct bcm2048_device *bdev, int i,
+					int index)
+{
+	int crc;
+
+	crc = bcm2048_rds_block_crc(bdev, i);
+
+	if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+		return 0;
+
+	if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+		BCM2048_RDS_BLOCK_C)
+		return 1;
+
+	return 0;
+}
+
+static void bcm2048_parse_ps_match_d(struct bcm2048_device *bdev, int i,
+					int index)
+{
+	int crc;
+
+	crc = bcm2048_rds_block_crc(bdev, i);
+
+	if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+		return;
+
+	if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+		BCM2048_RDS_BLOCK_D)
+		bcm2048_parse_rds_ps_block(bdev, i, index, crc);
+}
+
+static int bcm2048_parse_ps_match_b(struct bcm2048_device *bdev, int i)
+{
+	int crc, index, ps_id, ps_group;
+
+	crc = bcm2048_rds_block_crc(bdev, i);
+
+	if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+		return -EIO;
+
+	/* Block B Radio PS match */
+	if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+		BCM2048_RDS_BLOCK_B) {
+		ps_id = bdev->rds_info.radio_text[i+1] &
+			BCM2048_RDS_BLOCK_MASK;
+		ps_group = bdev->rds_info.radio_text[i+1] &
+			BCM2048_RDS_GROUP_AB_MASK;
+
+		/*
+		 * Poor RSSI will lead to RDS data corruption
+		 * So using 3 (same) sequential values to justify major changes
+		 */
+		if (ps_group != bdev->rds_info.rds_ps_group) {
+			if (crc == BCM2048_RDS_CRC_NONE) {
+				bdev->rds_info.rds_ps_group_cnt++;
+				if (bdev->rds_info.rds_ps_group_cnt > 2) {
+					bdev->rds_info.rds_ps_group = ps_group;
+					bdev->rds_info.rds_ps_group_cnt	= 0;
+					dev_err(&bdev->client->dev,
+						"RDS PS Group change!\n");
+				} else {
+					return -EIO;
+				}
+			} else {
+				bdev->rds_info.rds_ps_group_cnt = 0;
+			}
+		}
+
+		if (ps_id == BCM2048_RDS_PS) {
+			index = bdev->rds_info.radio_text[i+2] &
+				BCM2048_RDS_PS_INDEX;
+			index <<= 1;
+			return index;
+		}
+	}
+
+	return -EIO;
+}
+
+static void bcm2048_parse_rds_ps(struct bcm2048_device *bdev)
+{
+	int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0;
+
+	for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
+
+		if (match_b) {
+			match_b = 0;
+			index = bcm2048_parse_ps_match_b(bdev, i);
+			if (index >= 0 && index < (BCM2048_MAX_RDS_PS - 1))
+				match_c = 1;
+			continue;
+		} else if (match_c) {
+			match_c = 0;
+			if (bcm2048_parse_ps_match_c(bdev, i, index))
+				match_d = 1;
+			continue;
+		} else if (match_d) {
+			match_d = 0;
+			bcm2048_parse_ps_match_d(bdev, i, index);
+			continue;
+		}
+
+		/* Skip erroneous blocks due to messed up A block altogether */
+		if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK)
+			== BCM2048_RDS_BLOCK_A) {
+			crc = bcm2048_rds_block_crc(bdev, i);
+			if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+				continue;
+			/* Syncronize to a good RDS PI */
+			if (((bdev->rds_info.radio_text[i+1] << 8) +
+				bdev->rds_info.radio_text[i+2]) ==
+				bdev->rds_info.rds_pi)
+					match_b = 1;
+		}
+	}
+}
+
+static void bcm2048_rds_fifo_receive(struct bcm2048_device *bdev)
+{
+	int err;
+
+	mutex_lock(&bdev->mutex);
+
+	err = bcm2048_recv_duples(bdev, BCM2048_I2C_RDS_DATA,
+				bdev->rds_info.radio_text, bdev->fifo_size);
+	if (err != 2) {
+		dev_err(&bdev->client->dev, "RDS Read problem\n");
+		mutex_unlock(&bdev->mutex);
+		return;
+	}
+
+	bdev->rds_info.text_len = bdev->fifo_size;
+
+	bcm2048_parse_rds_pi(bdev);
+	bcm2048_parse_rds_rt(bdev);
+	bcm2048_parse_rds_ps(bdev);
+
+	mutex_unlock(&bdev->mutex);
+
+	wake_up_interruptible(&bdev->read_queue);
+}
+
+static int bcm2048_get_rds_data(struct bcm2048_device *bdev, char *data)
+{
+	int err = 0, i, p = 0;
+	char *data_buffer;
+
+	mutex_lock(&bdev->mutex);
+
+	if (!bdev->rds_info.text_len) {
+		err = -EINVAL;
+		goto unlock;
+	}
+
+	data_buffer = kzalloc(BCM2048_MAX_RDS_RADIO_TEXT*5, GFP_KERNEL);
+	if (!data_buffer) {
+		err = -ENOMEM;
+		goto unlock;
+	}
+
+	for (i = 0; i < bdev->rds_info.text_len; i++) {
+		p += sprintf(data_buffer+p, "%x ",
+			bdev->rds_info.radio_text[i]);
+	}
+
+	memcpy(data, data_buffer, p);
+	kfree(data_buffer);
+
+unlock:
+	mutex_unlock(&bdev->mutex);
+	return err;
+}
+
+/*
+ *	BCM2048 default initialization sequence
+ */
+static int bcm2048_init(struct bcm2048_device *bdev)
+{
+	int err;
+
+	err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON);
+	if (err < 0)
+		goto exit;
+
+	err = bcm2048_set_audio_route(bdev, BCM2048_AUDIO_ROUTE_DAC);
+	if (err < 0)
+		goto exit;
+
+	err = bcm2048_set_dac_output(bdev, BCM2048_DAC_OUTPUT_LEFT |
+		BCM2048_DAC_OUTPUT_RIGHT);
+
+exit:
+	return err;
+}
+
+/*
+ *	BCM2048 default deinitialization sequence
+ */
+static int bcm2048_deinit(struct bcm2048_device *bdev)
+{
+	int err;
+
+	err = bcm2048_set_audio_route(bdev, 0);
+	if (err < 0)
+		goto exit;
+
+	err = bcm2048_set_dac_output(bdev, 0);
+	if (err < 0)
+		goto exit;
+
+	err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
+	if (err < 0)
+		goto exit;
+
+exit:
+	return err;
+}
+
+/*
+ *	BCM2048 probe sequence
+ */
+static int bcm2048_probe(struct bcm2048_device *bdev)
+{
+	int err;
+
+	err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON);
+	if (err < 0)
+		goto unlock;
+
+	err = bcm2048_checkrev(bdev);
+	if (err < 0)
+		goto unlock;
+
+	err = bcm2048_set_mute(bdev, BCM2048_DEFAULT_MUTE);
+	if (err < 0)
+		goto unlock;
+
+	err = bcm2048_set_region(bdev, BCM2048_DEFAULT_REGION);
+	if (err < 0)
+		goto unlock;
+
+	err = bcm2048_set_fm_search_rssi_threshold(bdev,
+					BCM2048_DEFAULT_RSSI_THRESHOLD);
+	if (err < 0)
+		goto unlock;
+
+	err = bcm2048_set_fm_automatic_stereo_mono(bdev, BCM2048_ITEM_ENABLED);
+	if (err < 0)
+		goto unlock;
+
+	err = bcm2048_get_rds_wline(bdev);
+	if (err < BCM2048_DEFAULT_RDS_WLINE)
+		err = bcm2048_set_rds_wline(bdev, BCM2048_DEFAULT_RDS_WLINE);
+	if (err < 0)
+		goto unlock;
+
+	err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
+
+	init_waitqueue_head(&bdev->read_queue);
+	bdev->rds_data_available = 0;
+	bdev->rd_index = 0;
+	bdev->users = 0;
+
+unlock:
+	return err;
+}
+
+/*
+ *	BCM2048 workqueue handler
+ */
+static void bcm2048_work(struct work_struct *work)
+{
+	struct bcm2048_device *bdev;
+	u8 flag_lsb, flag_msb, flags;
+
+	bdev = container_of(work, struct bcm2048_device, work);
+	bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &flag_lsb);
+	bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &flag_msb);
+
+	if (flag_lsb & (BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED |
+			BCM2048_FM_FLAG_SEARCH_TUNE_FAIL)) {
+
+		if (flag_lsb & BCM2048_FM_FLAG_SEARCH_TUNE_FAIL)
+			bdev->scan_state = BCM2048_SCAN_FAIL;
+		else
+			bdev->scan_state = BCM2048_SCAN_OK;
+
+		complete(&bdev->compl);
+	}
+
+	if (flag_msb & BCM2048_RDS_FLAG_FIFO_WLINE) {
+		bcm2048_rds_fifo_receive(bdev);
+		if (bdev->rds_state) {
+			flags =	BCM2048_RDS_FLAG_FIFO_WLINE;
+			bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
+						flags);
+		}
+		bdev->rds_data_available = 1;
+		bdev->rd_index = 0; /* new data, new start */
+	}
+}
+
+/*
+ *	BCM2048 interrupt handler
+ */
+static irqreturn_t bcm2048_handler(int irq, void *dev)
+{
+	struct bcm2048_device *bdev = dev;
+
+	dev_dbg(&bdev->client->dev, "IRQ called, queuing work\n");
+	if (bdev->power_state)
+		schedule_work(&bdev->work);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ *	BCM2048 sysfs interface definitions
+ */
+#define property_write(prop, type, mask, check)				\
+static ssize_t bcm2048_##prop##_write(struct device *dev,		\
+					struct device_attribute *attr,	\
+					const char *buf,		\
+					size_t count)			\
+{									\
+	struct bcm2048_device *bdev = dev_get_drvdata(dev);		\
+	type value;							\
+	int err;							\
+									\
+	if (!bdev)							\
+		return -ENODEV;						\
+									\
+	sscanf(buf, mask, &value);					\
+									\
+	if (check)							\
+		return -EDOM;						\
+									\
+	err = bcm2048_set_##prop(bdev, value);				\
+									\
+	return err < 0 ? err : count;					\
+}
+
+#define property_read(prop, size, mask)					\
+static ssize_t bcm2048_##prop##_read(struct device *dev,		\
+					struct device_attribute *attr,	\
+					char *buf)			\
+{									\
+	struct bcm2048_device *bdev = dev_get_drvdata(dev);		\
+	int value;							\
+									\
+	if (!bdev)							\
+		return -ENODEV;						\
+									\
+	value = bcm2048_get_##prop(bdev);				\
+									\
+	if (value >= 0)							\
+		value = sprintf(buf, mask "\n", value);			\
+									\
+	return value;							\
+}
+
+#define property_signed_read(prop, size, mask)				\
+static ssize_t bcm2048_##prop##_read(struct device *dev,		\
+					struct device_attribute *attr,	\
+					char *buf)			\
+{									\
+	struct bcm2048_device *bdev = dev_get_drvdata(dev);		\
+	size value;							\
+									\
+	if (!bdev)							\
+		return -ENODEV;						\
+									\
+	value = bcm2048_get_##prop(bdev);				\
+									\
+	value = sprintf(buf, mask "\n", value);				\
+									\
+	return value;							\
+}
+
+#define DEFINE_SYSFS_PROPERTY(prop, signal, size, mask, check)		\
+property_write(prop, signal size, mask, check)				\
+property_read(prop, size, mask)
+
+#define property_str_read(prop, size)					\
+static ssize_t bcm2048_##prop##_read(struct device *dev,		\
+					struct device_attribute *attr,	\
+					char *buf)			\
+{									\
+	struct bcm2048_device *bdev = dev_get_drvdata(dev);		\
+	int count;							\
+	u8 *out;							\
+									\
+	if (!bdev)							\
+		return -ENODEV;						\
+									\
+	out = kzalloc(size + 1, GFP_KERNEL);				\
+	if (!out)							\
+		return -ENOMEM;						\
+									\
+	bcm2048_get_##prop(bdev, out);					\
+	count = sprintf(buf, "%s\n", out);				\
+									\
+	kfree(out);							\
+									\
+	return count;							\
+}
+
+DEFINE_SYSFS_PROPERTY(power_state, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(mute, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(audio_route, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(dac_output, unsigned, int, "%u", 0)
+
+DEFINE_SYSFS_PROPERTY(fm_hi_lo_injection, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_frequency, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_af_frequency, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_deemphasis, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_rds_mask, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_best_tune_mode, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_rssi_threshold, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_mode_direction, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_tune_mode, unsigned, int, "%u", value > 3)
+
+DEFINE_SYSFS_PROPERTY(rds, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_b_block_mask, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_b_block_match, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_pi_mask, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_pi_match, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_wline, unsigned, int, "%u", 0)
+property_read(rds_pi, unsigned int, "%x")
+property_str_read(rds_rt, (BCM2048_MAX_RDS_RT + 1))
+property_str_read(rds_ps, (BCM2048_MAX_RDS_PS + 1))
+
+property_read(fm_rds_flags, unsigned int, "%u")
+property_str_read(rds_data, BCM2048_MAX_RDS_RADIO_TEXT*5)
+
+property_read(region_bottom_frequency, unsigned int, "%u")
+property_read(region_top_frequency, unsigned int, "%u")
+property_signed_read(fm_carrier_error, int, "%d")
+property_signed_read(fm_rssi, int, "%d")
+DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0)
+
+static struct device_attribute attrs[] = {
+	__ATTR(power_state, S_IRUGO | S_IWUSR, bcm2048_power_state_read,
+		bcm2048_power_state_write),
+	__ATTR(mute, S_IRUGO | S_IWUSR, bcm2048_mute_read,
+		bcm2048_mute_write),
+	__ATTR(audio_route, S_IRUGO | S_IWUSR, bcm2048_audio_route_read,
+		bcm2048_audio_route_write),
+	__ATTR(dac_output, S_IRUGO | S_IWUSR, bcm2048_dac_output_read,
+		bcm2048_dac_output_write),
+	__ATTR(fm_hi_lo_injection, S_IRUGO | S_IWUSR,
+		bcm2048_fm_hi_lo_injection_read,
+		bcm2048_fm_hi_lo_injection_write),
+	__ATTR(fm_frequency, S_IRUGO | S_IWUSR, bcm2048_fm_frequency_read,
+		bcm2048_fm_frequency_write),
+	__ATTR(fm_af_frequency, S_IRUGO | S_IWUSR,
+		bcm2048_fm_af_frequency_read,
+		bcm2048_fm_af_frequency_write),
+	__ATTR(fm_deemphasis, S_IRUGO | S_IWUSR, bcm2048_fm_deemphasis_read,
+		bcm2048_fm_deemphasis_write),
+	__ATTR(fm_rds_mask, S_IRUGO | S_IWUSR, bcm2048_fm_rds_mask_read,
+		bcm2048_fm_rds_mask_write),
+	__ATTR(fm_best_tune_mode, S_IRUGO | S_IWUSR,
+		bcm2048_fm_best_tune_mode_read,
+		bcm2048_fm_best_tune_mode_write),
+	__ATTR(fm_search_rssi_threshold, S_IRUGO | S_IWUSR,
+		bcm2048_fm_search_rssi_threshold_read,
+		bcm2048_fm_search_rssi_threshold_write),
+	__ATTR(fm_search_mode_direction, S_IRUGO | S_IWUSR,
+		bcm2048_fm_search_mode_direction_read,
+		bcm2048_fm_search_mode_direction_write),
+	__ATTR(fm_search_tune_mode, S_IRUGO | S_IWUSR,
+		bcm2048_fm_search_tune_mode_read,
+		bcm2048_fm_search_tune_mode_write),
+	__ATTR(rds, S_IRUGO | S_IWUSR, bcm2048_rds_read,
+		bcm2048_rds_write),
+	__ATTR(rds_b_block_mask, S_IRUGO | S_IWUSR,
+		bcm2048_rds_b_block_mask_read,
+		bcm2048_rds_b_block_mask_write),
+	__ATTR(rds_b_block_match, S_IRUGO | S_IWUSR,
+		bcm2048_rds_b_block_match_read,
+		bcm2048_rds_b_block_match_write),
+	__ATTR(rds_pi_mask, S_IRUGO | S_IWUSR, bcm2048_rds_pi_mask_read,
+		bcm2048_rds_pi_mask_write),
+	__ATTR(rds_pi_match, S_IRUGO | S_IWUSR, bcm2048_rds_pi_match_read,
+		bcm2048_rds_pi_match_write),
+	__ATTR(rds_wline, S_IRUGO | S_IWUSR, bcm2048_rds_wline_read,
+		bcm2048_rds_wline_write),
+	__ATTR(rds_pi, S_IRUGO, bcm2048_rds_pi_read, NULL),
+	__ATTR(rds_rt, S_IRUGO, bcm2048_rds_rt_read, NULL),
+	__ATTR(rds_ps, S_IRUGO, bcm2048_rds_ps_read, NULL),
+	__ATTR(fm_rds_flags, S_IRUGO, bcm2048_fm_rds_flags_read, NULL),
+	__ATTR(region_bottom_frequency, S_IRUGO,
+		bcm2048_region_bottom_frequency_read, NULL),
+	__ATTR(region_top_frequency, S_IRUGO,
+		bcm2048_region_top_frequency_read, NULL),
+	__ATTR(fm_carrier_error, S_IRUGO,
+		bcm2048_fm_carrier_error_read, NULL),
+	__ATTR(fm_rssi, S_IRUGO,
+		bcm2048_fm_rssi_read, NULL),
+	__ATTR(region, S_IRUGO | S_IWUSR, bcm2048_region_read,
+		bcm2048_region_write),
+	__ATTR(rds_data, S_IRUGO, bcm2048_rds_data_read, NULL),
+};
+
+static int bcm2048_sysfs_unregister_properties(struct bcm2048_device *bdev,
+						int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++)
+		device_remove_file(&bdev->client->dev, &attrs[i]);
+
+	return 0;
+}
+
+static int bcm2048_sysfs_register_properties(struct bcm2048_device *bdev)
+{
+	int err = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(attrs); i++) {
+		if (device_create_file(&bdev->client->dev, &attrs[i]) != 0) {
+			dev_err(&bdev->client->dev,
+					"could not register sysfs entry\n");
+			err = -EBUSY;
+			bcm2048_sysfs_unregister_properties(bdev, i);
+			break;
+		}
+	}
+
+	return err;
+}
+
+
+static int bcm2048_fops_open(struct file *file)
+{
+	struct bcm2048_device *bdev = video_drvdata(file);
+
+	bdev->users++;
+	bdev->rd_index = 0;
+	bdev->rds_data_available = 0;
+
+	return 0;
+}
+
+static int bcm2048_fops_release(struct file *file)
+{
+	struct bcm2048_device *bdev = video_drvdata(file);
+
+	bdev->users--;
+
+	return 0;
+}
+
+static unsigned int bcm2048_fops_poll(struct file *file,
+		struct poll_table_struct *pts)
+{
+	struct bcm2048_device *bdev = video_drvdata(file);
+	int retval = 0;
+
+	poll_wait(file, &bdev->read_queue, pts);
+
+	if (bdev->rds_data_available)
+		retval = POLLIN | POLLRDNORM;
+
+	return retval;
+}
+
+static ssize_t bcm2048_fops_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct bcm2048_device *bdev = video_drvdata(file);
+	int i;
+	int retval = 0;
+
+	/* we return at least 3 bytes, one block */
+	count = (count / 3) * 3; /* only multiples of 3 */
+	if (count < 3)
+		return -ENOBUFS;
+
+	while (!bdev->rds_data_available) {
+		if (file->f_flags & O_NONBLOCK) {
+			retval = -EWOULDBLOCK;
+			goto done;
+		}
+		/* interruptible_sleep_on(&bdev->read_queue); */
+		if (wait_event_interruptible(bdev->read_queue,
+			bdev->rds_data_available) < 0) {
+			retval = -EINTR;
+			goto done;
+		}
+	}
+
+	mutex_lock(&bdev->mutex);
+	/* copy data to userspace */
+	i = bdev->fifo_size - bdev->rd_index;
+	if (count > i)
+		count = (i / 3) * 3;
+
+	i = 0;
+	while (i < count) {
+		unsigned char tmpbuf[3];
+		tmpbuf[i] = bdev->rds_info.radio_text[bdev->rd_index+i+2];
+		tmpbuf[i+1] = bdev->rds_info.radio_text[bdev->rd_index+i+1];
+		tmpbuf[i+2] = ((bdev->rds_info.radio_text[bdev->rd_index+i]
+				& 0xf0) >> 4);
+		if ((bdev->rds_info.radio_text[bdev->rd_index+i] &
+			BCM2048_RDS_CRC_MASK) == BCM2048_RDS_CRC_UNRECOVARABLE)
+			tmpbuf[i+2] |= 0x80;
+		if (copy_to_user(buf+i, tmpbuf, 3)) {
+			retval = -EFAULT;
+			break;
+		}
+		i += 3;
+	}
+
+	bdev->rd_index += i;
+	if (bdev->rd_index >= bdev->fifo_size)
+		bdev->rds_data_available = 0;
+
+	mutex_unlock(&bdev->mutex);
+	if (retval == 0)
+		retval = i;
+
+done:
+	return retval;
+}
+
+/*
+ *	bcm2048_fops - file operations interface
+ */
+static const struct v4l2_file_operations bcm2048_fops = {
+	.owner		= THIS_MODULE,
+	.ioctl		= video_ioctl2,
+	/* for RDS read support */
+	.open		= bcm2048_fops_open,
+	.release	= bcm2048_fops_release,
+	.read		= bcm2048_fops_read,
+	.poll		= bcm2048_fops_poll
+};
+
+/*
+ *	Video4Linux Interface
+ */
+static struct v4l2_queryctrl bcm2048_v4l2_queryctrl[] = {
+	{
+		.id		= V4L2_CID_AUDIO_VOLUME,
+		.flags		= V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+		.id		= V4L2_CID_AUDIO_BALANCE,
+		.flags		= V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+		.id		= V4L2_CID_AUDIO_BASS,
+		.flags		= V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+		.id		= V4L2_CID_AUDIO_TREBLE,
+		.flags		= V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+		.id		= V4L2_CID_AUDIO_MUTE,
+		.type		= V4L2_CTRL_TYPE_BOOLEAN,
+		.name		= "Mute",
+		.minimum	= 0,
+		.maximum	= 1,
+		.step		= 1,
+		.default_value	= 1,
+	},
+	{
+		.id		= V4L2_CID_AUDIO_LOUDNESS,
+		.flags		= V4L2_CTRL_FLAG_DISABLED,
+	},
+};
+
+static int bcm2048_vidioc_querycap(struct file *file, void *priv,
+		struct v4l2_capability *capability)
+{
+	struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+
+	strlcpy(capability->driver, BCM2048_DRIVER_NAME,
+		sizeof(capability->driver));
+	strlcpy(capability->card, BCM2048_DRIVER_CARD,
+		sizeof(capability->card));
+	snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr);
+	capability->version = BCM2048_DRIVER_VERSION;
+	capability->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+					V4L2_CAP_HW_FREQ_SEEK;
+
+	return 0;
+}
+
+static int bcm2048_vidioc_g_input(struct file *filp, void *priv,
+		unsigned int *i)
+{
+	*i = 0;
+
+	return 0;
+}
+
+static int bcm2048_vidioc_s_input(struct file *filp, void *priv,
+					unsigned int i)
+{
+	if (i)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int bcm2048_vidioc_queryctrl(struct file *file, void *priv,
+		struct v4l2_queryctrl *qc)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bcm2048_v4l2_queryctrl); i++) {
+		if (qc->id && qc->id == bcm2048_v4l2_queryctrl[i].id) {
+			*qc = bcm2048_v4l2_queryctrl[i];
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int bcm2048_vidioc_g_ctrl(struct file *file, void *priv,
+		struct v4l2_control *ctrl)
+{
+	struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+	int err = 0;
+
+	if (!bdev)
+		return -ENODEV;
+
+	switch (ctrl->id) {
+	case V4L2_CID_AUDIO_MUTE:
+		err = bcm2048_get_mute(bdev);
+		if (err >= 0)
+			ctrl->value = err;
+		break;
+	}
+
+	return err;
+}
+
+static int bcm2048_vidioc_s_ctrl(struct file *file, void *priv,
+		struct v4l2_control *ctrl)
+{
+	struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+	int err = 0;
+
+	if (!bdev)
+		return -ENODEV;
+
+	switch (ctrl->id) {
+	case V4L2_CID_AUDIO_MUTE:
+		if (ctrl->value) {
+			if (bdev->power_state) {
+				err = bcm2048_set_mute(bdev, ctrl->value);
+				err |= bcm2048_deinit(bdev);
+			}
+		} else {
+			if (!bdev->power_state) {
+				err = bcm2048_init(bdev);
+				err |= bcm2048_set_mute(bdev, ctrl->value);
+			}
+		}
+		break;
+	}
+
+	return err;
+}
+
+static int bcm2048_vidioc_g_audio(struct file *file, void *priv,
+		struct v4l2_audio *audio)
+{
+	if (audio->index > 1)
+		return -EINVAL;
+
+	strncpy(audio->name, "Radio", 32);
+	audio->capability = V4L2_AUDCAP_STEREO;
+
+	return 0;
+}
+
+static int bcm2048_vidioc_s_audio(struct file *file, void *priv,
+		const struct v4l2_audio *audio)
+{
+	if (audio->index != 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int bcm2048_vidioc_g_tuner(struct file *file, void *priv,
+		struct v4l2_tuner *tuner)
+{
+	struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+	s8 f_error;
+	s8 rssi;
+
+	if (!bdev)
+		return -ENODEV;
+
+	if (tuner->index > 0)
+		return -EINVAL;
+
+	strncpy(tuner->name, "FM Receiver", 32);
+	tuner->type = V4L2_TUNER_RADIO;
+	tuner->rangelow =
+		dev_to_v4l2(bcm2048_get_region_bottom_frequency(bdev));
+	tuner->rangehigh =
+		dev_to_v4l2(bcm2048_get_region_top_frequency(bdev));
+	tuner->rxsubchans = V4L2_TUNER_SUB_STEREO;
+	tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
+	tuner->audmode = V4L2_TUNER_MODE_STEREO;
+	tuner->afc = 0;
+	if (bdev->power_state) {
+		/*
+		 * Report frequencies with high carrier errors to have zero
+		 * signal level
+		 */
+		f_error = bcm2048_get_fm_carrier_error(bdev);
+		if (f_error < BCM2048_FREQ_ERROR_FLOOR ||
+		    f_error > BCM2048_FREQ_ERROR_ROOF) {
+			tuner->signal = 0;
+		} else {
+			/*
+			 * RSSI level -60 dB is defined to report full
+			 * signal strenght
+			 */
+			rssi = bcm2048_get_fm_rssi(bdev);
+			if (rssi >= BCM2048_RSSI_LEVEL_BASE) {
+				tuner->signal = 0xFFFF;
+			} else if (rssi > BCM2048_RSSI_LEVEL_ROOF) {
+				tuner->signal = (rssi +
+						 BCM2048_RSSI_LEVEL_ROOF_NEG)
+						 * BCM2048_SIGNAL_MULTIPLIER;
+			} else {
+				tuner->signal = 0;
+			}
+		}
+	} else {
+		tuner->signal = 0;
+	}
+
+	return 0;
+}
+
+static int bcm2048_vidioc_s_tuner(struct file *file, void *priv,
+		const struct v4l2_tuner *tuner)
+{
+	struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+
+	if (!bdev)
+		return -ENODEV;
+
+	if (tuner->index > 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int bcm2048_vidioc_g_frequency(struct file *file, void *priv,
+		struct v4l2_frequency *freq)
+{
+	struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+	int err = 0;
+	int f;
+
+	if (!bdev->power_state)
+		return -ENODEV;
+
+	freq->type = V4L2_TUNER_RADIO;
+	f = bcm2048_get_fm_frequency(bdev);
+
+	if (f < 0)
+		err = f;
+	else
+		freq->frequency = dev_to_v4l2(f);
+
+	return err;
+}
+
+static int bcm2048_vidioc_s_frequency(struct file *file, void *priv,
+		const struct v4l2_frequency *freq)
+{
+	struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+	int err;
+
+	if (freq->type != V4L2_TUNER_RADIO)
+		return -EINVAL;
+
+	if (!bdev->power_state)
+		return -ENODEV;
+
+	err = bcm2048_set_fm_frequency(bdev, v4l2_to_dev(freq->frequency));
+	err |= bcm2048_set_fm_search_tune_mode(bdev, BCM2048_FM_PRE_SET_MODE);
+
+	return err;
+}
+
+static int bcm2048_vidioc_s_hw_freq_seek(struct file *file, void *priv,
+					const struct v4l2_hw_freq_seek *seek)
+{
+	struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+	int err;
+
+	if (!bdev->power_state)
+		return -ENODEV;
+
+	if ((seek->tuner != 0) || (seek->type != V4L2_TUNER_RADIO))
+		return -EINVAL;
+
+	err = bcm2048_set_fm_search_mode_direction(bdev, seek->seek_upward);
+	err |= bcm2048_set_fm_search_tune_mode(bdev,
+			BCM2048_FM_AUTO_SEARCH_MODE);
+
+	return err;
+}
+
+static struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
+	.vidioc_querycap	= bcm2048_vidioc_querycap,
+	.vidioc_g_input		= bcm2048_vidioc_g_input,
+	.vidioc_s_input		= bcm2048_vidioc_s_input,
+	.vidioc_queryctrl	= bcm2048_vidioc_queryctrl,
+	.vidioc_g_ctrl		= bcm2048_vidioc_g_ctrl,
+	.vidioc_s_ctrl		= bcm2048_vidioc_s_ctrl,
+	.vidioc_g_audio		= bcm2048_vidioc_g_audio,
+	.vidioc_s_audio		= bcm2048_vidioc_s_audio,
+	.vidioc_g_tuner		= bcm2048_vidioc_g_tuner,
+	.vidioc_s_tuner		= bcm2048_vidioc_s_tuner,
+	.vidioc_g_frequency	= bcm2048_vidioc_g_frequency,
+	.vidioc_s_frequency	= bcm2048_vidioc_s_frequency,
+	.vidioc_s_hw_freq_seek  = bcm2048_vidioc_s_hw_freq_seek,
+};
+
+/*
+ * bcm2048_viddev_template - video device interface
+ */
+static struct video_device bcm2048_viddev_template = {
+	.fops			= &bcm2048_fops,
+	.name			= BCM2048_DRIVER_NAME,
+	.release		= video_device_release,
+	.ioctl_ops		= &bcm2048_ioctl_ops,
+};
+
+/*
+ *	I2C driver interface
+ */
+static int bcm2048_i2c_driver_probe(struct i2c_client *client,
+					const struct i2c_device_id *id)
+{
+	struct bcm2048_device *bdev;
+	int err, skip_release = 0;
+
+	bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
+	if (!bdev) {
+		dev_dbg(&client->dev, "Failed to alloc video device.\n");
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	bdev->videodev = video_device_alloc();
+	if (!bdev->videodev) {
+		dev_dbg(&client->dev, "Failed to alloc video device.\n");
+		err = -ENOMEM;
+		goto free_bdev;
+	}
+
+	bdev->client = client;
+	i2c_set_clientdata(client, bdev);
+	mutex_init(&bdev->mutex);
+	init_completion(&bdev->compl);
+	INIT_WORK(&bdev->work, bcm2048_work);
+
+	if (client->irq) {
+		err = request_irq(client->irq,
+			bcm2048_handler, IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+			client->name, bdev);
+		if (err < 0) {
+			dev_err(&client->dev, "Could not request IRQ\n");
+			goto free_vdev;
+		}
+		dev_dbg(&client->dev, "IRQ requested.\n");
+	} else {
+		dev_dbg(&client->dev, "IRQ not configured. Using timeouts.\n");
+	}
+
+	*bdev->videodev = bcm2048_viddev_template;
+	video_set_drvdata(bdev->videodev, bdev);
+	if (video_register_device(bdev->videodev, VFL_TYPE_RADIO, radio_nr)) {
+		dev_dbg(&client->dev, "Could not register video device.\n");
+		err = -EIO;
+		goto free_irq;
+	}
+
+	err = bcm2048_sysfs_register_properties(bdev);
+	if (err < 0) {
+		dev_dbg(&client->dev, "Could not register sysfs interface.\n");
+		goto free_registration;
+	}
+
+	err = bcm2048_probe(bdev);
+	if (err < 0) {
+		dev_dbg(&client->dev, "Failed to probe device information.\n");
+		goto free_sysfs;
+	}
+
+	return 0;
+
+free_sysfs:
+	bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
+free_registration:
+	video_unregister_device(bdev->videodev);
+	/* video_unregister_device frees bdev->videodev */
+	bdev->videodev = NULL;
+	skip_release = 1;
+free_irq:
+	if (client->irq)
+		free_irq(client->irq, bdev);
+free_vdev:
+	if (!skip_release)
+		video_device_release(bdev->videodev);
+	i2c_set_clientdata(client, NULL);
+free_bdev:
+	kfree(bdev);
+exit:
+	return err;
+}
+
+static int __exit bcm2048_i2c_driver_remove(struct i2c_client *client)
+{
+	struct bcm2048_device *bdev = i2c_get_clientdata(client);
+	struct video_device *vd;
+
+	if (!client->adapter)
+		return -ENODEV;
+
+	if (bdev) {
+		vd = bdev->videodev;
+
+		bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
+
+		if (vd)
+			video_unregister_device(vd);
+
+		if (bdev->power_state)
+			bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
+
+		if (client->irq > 0)
+			free_irq(client->irq, bdev);
+
+		cancel_work_sync(&bdev->work);
+
+		kfree(bdev);
+	}
+
+	i2c_set_clientdata(client, NULL);
+
+	return 0;
+}
+
+/*
+ *	bcm2048_i2c_driver - i2c driver interface
+ */
+static const struct i2c_device_id bcm2048_id[] = {
+	{ "bcm2048" , 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, bcm2048_id);
+
+static struct i2c_driver bcm2048_i2c_driver = {
+	.driver		= {
+		.name	= BCM2048_DRIVER_NAME,
+	},
+	.probe		= bcm2048_i2c_driver_probe,
+	.remove		= __exit_p(bcm2048_i2c_driver_remove),
+	.id_table	= bcm2048_id,
+};
+
+/*
+ *	Module Interface
+ */
+static int __init bcm2048_module_init(void)
+{
+	pr_info(BCM2048_DRIVER_DESC "\n");
+
+	return i2c_add_driver(&bcm2048_i2c_driver);
+}
+module_init(bcm2048_module_init);
+
+static void __exit bcm2048_module_exit(void)
+{
+	i2c_del_driver(&bcm2048_i2c_driver);
+}
+module_exit(bcm2048_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(BCM2048_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(BCM2048_DRIVER_DESC);
+MODULE_VERSION("0.0.2");
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.h b/drivers/staging/media/bcm2048/radio-bcm2048.h
new file mode 100644
index 0000000..4c90a32
--- /dev/null
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.h
@@ -0,0 +1,30 @@
+/*
+ * drivers/staging/media/radio-bcm2048.h
+ *
+ * Property and command definitions for bcm2048 radio receiver chip.
+ *
+ * Copyright (C) Nokia Corporation
+ * Contact: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef BCM2048_H
+#define BCM2048_H
+
+#define BCM2048_NAME		"bcm2048"
+#define BCM2048_I2C_ADDR	0x22
+
+#endif	/* ifndef BCM2048_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index ff48fce..b942bf7 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -19,6 +19,7 @@
  *      Prabhakar Lad <prabhakar.lad@ti.com>
  */
 
+#include <linux/delay.h>
 #include "dm365_isif.h"
 #include "vpfe_mc_capture.h"
 
@@ -918,7 +919,7 @@
 		   (0 << ISIF_VDFC_EN_SHIFT), DFCCTL);
 
 	isif_write(isif->isif_cfg.base_addr, 0x6, DFCMEMCTL);
-	for (i = 0 ; i < vdfc->num_vdefects; i++) {
+	for (i = 0; i < vdfc->num_vdefects; i++) {
 		count = DFC_WRITE_WAIT_COUNT;
 		while (count &&
 			(isif_read(isif->isif_cfg.base_addr, DFCMEMCTL) & 0x2))
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 24d98a6..1f3b0f9 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -346,7 +346,7 @@
 	}
 	mutex_unlock(&mdev->graph_mutex);
 
-	return (ret == 0) ? ret : -ETIMEDOUT ;
+	return ret ? -ETIMEDOUT : 0;
 }
 
 /*
@@ -1201,6 +1201,8 @@
 	unsigned long addr;
 	int ret;
 
+	if (count == 0)
+		return -ENOBUFS;
 	ret = mutex_lock_interruptible(&video->lock);
 	if (ret)
 		goto streamoff;
diff --git a/drivers/staging/media/go7007/go7007-loader.c b/drivers/staging/media/go7007/go7007-loader.c
index 10bb41c..eecb1f2 100644
--- a/drivers/staging/media/go7007/go7007-loader.c
+++ b/drivers/staging/media/go7007/go7007-loader.c
@@ -59,7 +59,7 @@
 
 	if (usbdev->descriptor.bNumConfigurations != 1) {
 		dev_err(&interface->dev, "can't handle multiple config\n");
-		return -ENODEV;
+		goto failed2;
 	}
 
 	vendor = le16_to_cpu(usbdev->descriptor.idVendor);
@@ -108,6 +108,7 @@
 	return 0;
 
 failed2:
+	usb_put_dev(usbdev);
 	dev_err(&interface->dev, "probe failed\n");
 	return -ENODEV;
 }
@@ -115,6 +116,7 @@
 static void go7007_loader_disconnect(struct usb_interface *interface)
 {
 	dev_info(&interface->dev, "disconnect\n");
+	usb_put_dev(interface_to_usbdev(interface));
 	usb_set_intfdata(interface, NULL);
 }
 
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 41d110f..0b58989 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -220,7 +220,7 @@
 	wptr = nwptr;
 }
 
-static void irq_handler(void *blah)
+static void lirc_lirc_irq_handler(void *blah)
 {
 	struct timeval tv;
 	static struct timeval lasttv;
@@ -659,7 +659,7 @@
 		goto exit_device_put;
 	}
 	ppdevice = parport_register_device(pport, LIRC_DRIVER_NAME,
-					   pf, kf, irq_handler, 0, NULL);
+					   pf, kf, lirc_lirc_irq_handler, 0, NULL);
 	parport_put_port(pport);
 	if (ppdevice == NULL) {
 		pr_notice("parport_register_device() failed\n");
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index abe0d5c..10c685d 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -650,7 +650,7 @@
 	rbwrite(l);
 }
 
-static irqreturn_t irq_handler(int i, void *blah)
+static irqreturn_t lirc_irq_handler(int i, void *blah)
 {
 	struct timeval tv;
 	int counter, dcd;
@@ -852,7 +852,7 @@
 		return result;
 #endif
 
-	result = request_irq(irq, irq_handler,
+	result = request_irq(irq, lirc_irq_handler,
 			     (share_irq ? IRQF_SHARED : 0),
 			     LIRC_DRIVER_NAME, (void *)&hardware);
 	if (result < 0) {
diff --git a/drivers/staging/media/omap24xx/Kconfig b/drivers/staging/media/omap24xx/Kconfig
new file mode 100644
index 0000000..82e569a
--- /dev/null
+++ b/drivers/staging/media/omap24xx/Kconfig
@@ -0,0 +1,35 @@
+config VIDEO_V4L2_INT_DEVICE
+       tristate
+
+config VIDEO_OMAP2
+	tristate "OMAP2 Camera Capture Interface driver (DEPRECATED)"
+	depends on VIDEO_DEV && ARCH_OMAP2
+	select VIDEOBUF_DMA_SG
+	select VIDEO_V4L2_INT_DEVICE
+	---help---
+	  This is a v4l2 driver for the TI OMAP2 camera capture interface
+
+	  It uses the deprecated int-device API. Since this driver is no
+	  longer actively maintained and nobody is interested in converting
+	  it to the subdev API, this driver will be removed soon.
+
+	  If you do want to keep this driver in the kernel, and are willing
+	  to convert it to the subdev API, then please contact the linux-media
+	  mailinglist.
+
+config VIDEO_TCM825X
+	tristate "TCM825x camera sensor support (DEPRECATED)"
+	depends on I2C && VIDEO_V4L2
+	depends on MEDIA_CAMERA_SUPPORT
+	select VIDEO_V4L2_INT_DEVICE
+	---help---
+	  This is a driver for the Toshiba TCM825x VGA camera sensor.
+	  It is used for example in Nokia N800.
+
+	  It uses the deprecated int-device API. Since this driver is no
+	  longer actively maintained and nobody is interested in converting
+	  it to the subdev API, this driver will be removed soon.
+
+	  If you do want to keep this driver in the kernel, and are willing
+	  to convert it to the subdev API, then please contact the linux-media
+	  mailinglist.
diff --git a/drivers/staging/media/omap24xx/Makefile b/drivers/staging/media/omap24xx/Makefile
new file mode 100644
index 0000000..c2e7175
--- /dev/null
+++ b/drivers/staging/media/omap24xx/Makefile
@@ -0,0 +1,5 @@
+omap2cam-objs	:=	omap24xxcam.o omap24xxcam-dma.o
+
+obj-$(CONFIG_VIDEO_OMAP2)   += omap2cam.o
+obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
+obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
diff --git a/drivers/media/platform/omap24xxcam-dma.c b/drivers/staging/media/omap24xx/omap24xxcam-dma.c
similarity index 100%
rename from drivers/media/platform/omap24xxcam-dma.c
rename to drivers/staging/media/omap24xx/omap24xxcam-dma.c
diff --git a/drivers/media/platform/omap24xxcam.c b/drivers/staging/media/omap24xx/omap24xxcam.c
similarity index 100%
rename from drivers/media/platform/omap24xxcam.c
rename to drivers/staging/media/omap24xx/omap24xxcam.c
diff --git a/drivers/media/platform/omap24xxcam.h b/drivers/staging/media/omap24xx/omap24xxcam.h
similarity index 99%
rename from drivers/media/platform/omap24xxcam.h
rename to drivers/staging/media/omap24xx/omap24xxcam.h
index 7f6f791..233bb40 100644
--- a/drivers/media/platform/omap24xxcam.h
+++ b/drivers/staging/media/omap24xx/omap24xxcam.h
@@ -28,8 +28,8 @@
 #define OMAP24XXCAM_H
 
 #include <media/videobuf-dma-sg.h>
-#include <media/v4l2-int-device.h>
 #include <media/v4l2-device.h>
+#include "v4l2-int-device.h"
 
 /*
  *
diff --git a/drivers/media/i2c/tcm825x.c b/drivers/staging/media/omap24xx/tcm825x.c
similarity index 99%
rename from drivers/media/i2c/tcm825x.c
rename to drivers/staging/media/omap24xx/tcm825x.c
index 9252529..b1ae8e9 100644
--- a/drivers/media/i2c/tcm825x.c
+++ b/drivers/staging/media/omap24xx/tcm825x.c
@@ -28,7 +28,7 @@
 
 #include <linux/i2c.h>
 #include <linux/module.h>
-#include <media/v4l2-int-device.h>
+#include "v4l2-int-device.h"
 
 #include "tcm825x.h"
 
diff --git a/drivers/media/i2c/tcm825x.h b/drivers/staging/media/omap24xx/tcm825x.h
similarity index 99%
rename from drivers/media/i2c/tcm825x.h
rename to drivers/staging/media/omap24xx/tcm825x.h
index 8ebab95..e2d1bcd 100644
--- a/drivers/media/i2c/tcm825x.h
+++ b/drivers/staging/media/omap24xx/tcm825x.h
@@ -17,7 +17,7 @@
 
 #include <linux/videodev2.h>
 
-#include <media/v4l2-int-device.h>
+#include "v4l2-int-device.h"
 
 #define TCM825X_NAME "tcm825x"
 
diff --git a/drivers/media/v4l2-core/v4l2-int-device.c b/drivers/staging/media/omap24xx/v4l2-int-device.c
similarity index 98%
rename from drivers/media/v4l2-core/v4l2-int-device.c
rename to drivers/staging/media/omap24xx/v4l2-int-device.c
index f447349..427a890 100644
--- a/drivers/media/v4l2-core/v4l2-int-device.c
+++ b/drivers/staging/media/omap24xx/v4l2-int-device.c
@@ -28,7 +28,7 @@
 #include <linux/string.h>
 #include <linux/module.h>
 
-#include <media/v4l2-int-device.h>
+#include "v4l2-int-device.h"
 
 static DEFINE_MUTEX(mutex);
 static LIST_HEAD(int_list);
diff --git a/include/media/v4l2-int-device.h b/drivers/staging/media/omap24xx/v4l2-int-device.h
similarity index 100%
rename from include/media/v4l2-int-device.h
rename to drivers/staging/media/omap24xx/v4l2-int-device.h
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
new file mode 100644
index 0000000..b9fe753
--- /dev/null
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -0,0 +1,12 @@
+config VIDEO_OMAP4
+	bool "OMAP 4 Camera support"
+	depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4
+	select VIDEOBUF2_DMA_CONTIG
+	---help---
+	  Driver for an OMAP 4 ISS controller.
+
+config VIDEO_OMAP4_DEBUG
+	bool "OMAP 4 Camera debug messages"
+	depends on VIDEO_OMAP4
+	---help---
+	  Enable debug messages on OMAP 4 ISS controller driver.
diff --git a/drivers/staging/media/omap4iss/Makefile b/drivers/staging/media/omap4iss/Makefile
new file mode 100644
index 0000000..a716ce9
--- /dev/null
+++ b/drivers/staging/media/omap4iss/Makefile
@@ -0,0 +1,6 @@
+# Makefile for OMAP4 ISS driver
+
+omap4-iss-objs += \
+	iss.o iss_csi2.o iss_csiphy.o iss_ipipeif.o iss_ipipe.o iss_resizer.o iss_video.o
+
+obj-$(CONFIG_VIDEO_OMAP4) += omap4-iss.o
diff --git a/drivers/staging/media/omap4iss/TODO b/drivers/staging/media/omap4iss/TODO
new file mode 100644
index 0000000..fcde888
--- /dev/null
+++ b/drivers/staging/media/omap4iss/TODO
@@ -0,0 +1,4 @@
+* Make the driver compile as a module
+* Fix FIFO/buffer overflows and underflows
+* Replace dummy resizer code with a real implementation
+* Fix checkpatch errors and warnings
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
new file mode 100644
index 0000000..61fbfcd
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -0,0 +1,1563 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver
+ *
+ * Copyright (C) 2012, Texas Instruments
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+
+#define ISS_PRINT_REGISTER(iss, name)\
+	dev_dbg(iss->dev, "###ISS " #name "=0x%08x\n", \
+		iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_##name))
+
+static void iss_print_status(struct iss_device *iss)
+{
+	dev_dbg(iss->dev, "-------------ISS HL Register dump-------------\n");
+
+	ISS_PRINT_REGISTER(iss, HL_REVISION);
+	ISS_PRINT_REGISTER(iss, HL_SYSCONFIG);
+	ISS_PRINT_REGISTER(iss, HL_IRQSTATUS(5));
+	ISS_PRINT_REGISTER(iss, HL_IRQENABLE_SET(5));
+	ISS_PRINT_REGISTER(iss, HL_IRQENABLE_CLR(5));
+	ISS_PRINT_REGISTER(iss, CTRL);
+	ISS_PRINT_REGISTER(iss, CLKCTRL);
+	ISS_PRINT_REGISTER(iss, CLKSTAT);
+
+	dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+/*
+ * omap4iss_flush - Post pending L3 bus writes by doing a register readback
+ * @iss: OMAP4 ISS device
+ *
+ * In order to force posting of pending writes, we need to write and
+ * readback the same register, in this case the revision register.
+ *
+ * See this link for reference:
+ *   http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
+ */
+void omap4iss_flush(struct iss_device *iss)
+{
+	iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION, 0);
+	iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
+}
+
+/*
+ * iss_isp_enable_interrupts - Enable ISS ISP interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void omap4iss_isp_enable_interrupts(struct iss_device *iss)
+{
+	static const u32 isp_irq = ISP5_IRQ_OCP_ERR |
+				   ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
+				   ISP5_IRQ_RSZ_FIFO_OVF |
+				   ISP5_IRQ_RSZ_INT_DMA |
+				   ISP5_IRQ_ISIF_INT(0);
+
+	/* Enable ISP interrupts */
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQSTATUS(0), isp_irq);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQENABLE_SET(0),
+		      isp_irq);
+}
+
+/*
+ * iss_isp_disable_interrupts - Disable ISS interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void omap4iss_isp_disable_interrupts(struct iss_device *iss)
+{
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQENABLE_CLR(0), ~0);
+}
+
+/*
+ * iss_enable_interrupts - Enable ISS interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void iss_enable_interrupts(struct iss_device *iss)
+{
+	static const u32 hl_irq = ISS_HL_IRQ_CSIA | ISS_HL_IRQ_CSIB
+				| ISS_HL_IRQ_ISP(0);
+
+	/* Enable HL interrupts */
+	iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5), hl_irq);
+	iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQENABLE_SET(5), hl_irq);
+
+	if (iss->regs[OMAP4_ISS_MEM_ISP_SYS1])
+		omap4iss_isp_enable_interrupts(iss);
+}
+
+/*
+ * iss_disable_interrupts - Disable ISS interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void iss_disable_interrupts(struct iss_device *iss)
+{
+	if (iss->regs[OMAP4_ISS_MEM_ISP_SYS1])
+		omap4iss_isp_disable_interrupts(iss);
+
+	iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQENABLE_CLR(5), ~0);
+}
+
+int omap4iss_get_external_info(struct iss_pipeline *pipe,
+			       struct media_link *link)
+{
+	struct iss_device *iss =
+		container_of(pipe, struct iss_video, pipe)->iss;
+	struct v4l2_subdev_format fmt;
+	struct v4l2_ctrl *ctrl;
+	int ret;
+
+	if (!pipe->external)
+		return 0;
+
+	if (pipe->external_rate)
+		return 0;
+
+	memset(&fmt, 0, sizeof(fmt));
+
+	fmt.pad = link->source->index;
+	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(link->sink->entity),
+			       pad, get_fmt, NULL, &fmt);
+	if (ret < 0)
+		return -EPIPE;
+
+	pipe->external_bpp = omap4iss_video_format_info(fmt.format.code)->bpp;
+
+	ctrl = v4l2_ctrl_find(pipe->external->ctrl_handler,
+			      V4L2_CID_PIXEL_RATE);
+	if (ctrl == NULL) {
+		dev_warn(iss->dev, "no pixel rate control in subdev %s\n",
+			 pipe->external->name);
+		return -EPIPE;
+	}
+
+	pipe->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
+
+	return 0;
+}
+
+/*
+ * Configure the bridge. Valid inputs are
+ *
+ * IPIPEIF_INPUT_CSI2A: CSI2a receiver
+ * IPIPEIF_INPUT_CSI2B: CSI2b receiver
+ *
+ * The bridge and lane shifter are configured according to the selected input
+ * and the ISP platform data.
+ */
+void omap4iss_configure_bridge(struct iss_device *iss,
+			       enum ipipeif_input_entity input)
+{
+	u32 issctrl_val;
+	u32 isp5ctrl_val;
+
+	issctrl_val = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_CTRL);
+	issctrl_val &= ~ISS_CTRL_INPUT_SEL_MASK;
+	issctrl_val &= ~ISS_CTRL_CLK_DIV_MASK;
+
+	isp5ctrl_val = iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL);
+
+	switch (input) {
+	case IPIPEIF_INPUT_CSI2A:
+		issctrl_val |= ISS_CTRL_INPUT_SEL_CSI2A;
+		break;
+
+	case IPIPEIF_INPUT_CSI2B:
+		issctrl_val |= ISS_CTRL_INPUT_SEL_CSI2B;
+		break;
+
+	default:
+		return;
+	}
+
+	issctrl_val |= ISS_CTRL_SYNC_DETECT_VS_RAISING;
+
+	isp5ctrl_val |= ISP5_CTRL_VD_PULSE_EXT | ISP5_CTRL_PSYNC_CLK_SEL |
+			ISP5_CTRL_SYNC_ENABLE;
+
+	iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_CTRL, issctrl_val);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL, isp5ctrl_val);
+}
+
+#if defined(DEBUG) && defined(ISS_ISR_DEBUG)
+static void iss_isr_dbg(struct iss_device *iss, u32 irqstatus)
+{
+	static const char * const name[] = {
+		"ISP_0",
+		"ISP_1",
+		"ISP_2",
+		"ISP_3",
+		"CSIA",
+		"CSIB",
+		"CCP2_0",
+		"CCP2_1",
+		"CCP2_2",
+		"CCP2_3",
+		"CBUFF",
+		"BTE",
+		"SIMCOP_0",
+		"SIMCOP_1",
+		"SIMCOP_2",
+		"SIMCOP_3",
+		"CCP2_8",
+		"HS_VS",
+		"18",
+		"19",
+		"20",
+		"21",
+		"22",
+		"23",
+		"24",
+		"25",
+		"26",
+		"27",
+		"28",
+		"29",
+		"30",
+		"31",
+	};
+	unsigned int i;
+
+	dev_dbg(iss->dev, "ISS IRQ: ");
+
+	for (i = 0; i < ARRAY_SIZE(name); i++) {
+		if ((1 << i) & irqstatus)
+			pr_cont("%s ", name[i]);
+	}
+	pr_cont("\n");
+}
+
+static void iss_isp_isr_dbg(struct iss_device *iss, u32 irqstatus)
+{
+	static const char * const name[] = {
+		"ISIF_0",
+		"ISIF_1",
+		"ISIF_2",
+		"ISIF_3",
+		"IPIPEREQ",
+		"IPIPELAST_PIX",
+		"IPIPEDMA",
+		"IPIPEBSC",
+		"IPIPEHST",
+		"IPIPEIF",
+		"AEW",
+		"AF",
+		"H3A",
+		"RSZ_REG",
+		"RSZ_LAST_PIX",
+		"RSZ_DMA",
+		"RSZ_CYC_RZA",
+		"RSZ_CYC_RZB",
+		"RSZ_FIFO_OVF",
+		"RSZ_FIFO_IN_BLK_ERR",
+		"20",
+		"21",
+		"RSZ_EOF0",
+		"RSZ_EOF1",
+		"H3A_EOF",
+		"IPIPE_EOF",
+		"26",
+		"IPIPE_DPC_INI",
+		"IPIPE_DPC_RNEW0",
+		"IPIPE_DPC_RNEW1",
+		"30",
+		"OCP_ERR",
+	};
+	unsigned int i;
+
+	dev_dbg(iss->dev, "ISP IRQ: ");
+
+	for (i = 0; i < ARRAY_SIZE(name); i++) {
+		if ((1 << i) & irqstatus)
+			pr_cont("%s ", name[i]);
+	}
+	pr_cont("\n");
+}
+#endif
+
+/*
+ * iss_isr - Interrupt Service Routine for ISS module.
+ * @irq: Not used currently.
+ * @_iss: Pointer to the OMAP4 ISS device
+ *
+ * Handles the corresponding callback if plugged in.
+ *
+ * Returns IRQ_HANDLED when IRQ was correctly handled, or IRQ_NONE when the
+ * IRQ wasn't handled.
+ */
+static irqreturn_t iss_isr(int irq, void *_iss)
+{
+	static const u32 ipipeif_events = ISP5_IRQ_IPIPEIF_IRQ |
+					  ISP5_IRQ_ISIF_INT(0);
+	static const u32 resizer_events = ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
+					  ISP5_IRQ_RSZ_FIFO_OVF |
+					  ISP5_IRQ_RSZ_INT_DMA;
+	struct iss_device *iss = _iss;
+	u32 irqstatus;
+
+	irqstatus = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5));
+	iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5), irqstatus);
+
+	if (irqstatus & ISS_HL_IRQ_CSIA)
+		omap4iss_csi2_isr(&iss->csi2a);
+
+	if (irqstatus & ISS_HL_IRQ_CSIB)
+		omap4iss_csi2_isr(&iss->csi2b);
+
+	if (irqstatus & ISS_HL_IRQ_ISP(0)) {
+		u32 isp_irqstatus = iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1,
+						 ISP5_IRQSTATUS(0));
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQSTATUS(0),
+			      isp_irqstatus);
+
+		if (isp_irqstatus & ISP5_IRQ_OCP_ERR)
+			dev_dbg(iss->dev, "ISP5 OCP Error!\n");
+
+		if (isp_irqstatus & ipipeif_events) {
+			omap4iss_ipipeif_isr(&iss->ipipeif,
+					     isp_irqstatus & ipipeif_events);
+		}
+
+		if (isp_irqstatus & resizer_events)
+			omap4iss_resizer_isr(&iss->resizer,
+					     isp_irqstatus & resizer_events);
+
+#if defined(DEBUG) && defined(ISS_ISR_DEBUG)
+		iss_isp_isr_dbg(iss, isp_irqstatus);
+#endif
+	}
+
+	omap4iss_flush(iss);
+
+#if defined(DEBUG) && defined(ISS_ISR_DEBUG)
+	iss_isr_dbg(iss, irqstatus);
+#endif
+
+	return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline power management
+ *
+ * Entities must be powered up when part of a pipeline that contains at least
+ * one open video device node.
+ *
+ * To achieve this use the entity use_count field to track the number of users.
+ * For entities corresponding to video device nodes the use_count field stores
+ * the users count of the node. For entities corresponding to subdevs the
+ * use_count field stores the total number of users of all video device nodes
+ * in the pipeline.
+ *
+ * The omap4iss_pipeline_pm_use() function must be called in the open() and
+ * close() handlers of video device nodes. It increments or decrements the use
+ * count of all subdev entities in the pipeline.
+ *
+ * To react to link management on powered pipelines, the link setup notification
+ * callback updates the use count of all entities in the source and sink sides
+ * of the link.
+ */
+
+/*
+ * iss_pipeline_pm_use_count - Count the number of users of a pipeline
+ * @entity: The entity
+ *
+ * Return the total number of users of all video device nodes in the pipeline.
+ */
+static int iss_pipeline_pm_use_count(struct media_entity *entity)
+{
+	struct media_entity_graph graph;
+	int use = 0;
+
+	media_entity_graph_walk_start(&graph, entity);
+
+	while ((entity = media_entity_graph_walk_next(&graph))) {
+		if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
+			use += entity->use_count;
+	}
+
+	return use;
+}
+
+/*
+ * iss_pipeline_pm_power_one - Apply power change to an entity
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Change the entity use count by @change. If the entity is a subdev update its
+ * power state by calling the core::s_power operation when the use count goes
+ * from 0 to != 0 or from != 0 to 0.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int iss_pipeline_pm_power_one(struct media_entity *entity, int change)
+{
+	struct v4l2_subdev *subdev;
+
+	subdev = media_entity_type(entity) == MEDIA_ENT_T_V4L2_SUBDEV
+	       ? media_entity_to_v4l2_subdev(entity) : NULL;
+
+	if (entity->use_count == 0 && change > 0 && subdev != NULL) {
+		int ret;
+
+		ret = v4l2_subdev_call(subdev, core, s_power, 1);
+		if (ret < 0 && ret != -ENOIOCTLCMD)
+			return ret;
+	}
+
+	entity->use_count += change;
+	WARN_ON(entity->use_count < 0);
+
+	if (entity->use_count == 0 && change < 0 && subdev != NULL)
+		v4l2_subdev_call(subdev, core, s_power, 0);
+
+	return 0;
+}
+
+/*
+ * iss_pipeline_pm_power - Apply power change to all entities in a pipeline
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Walk the pipeline to update the use count and the power state of all non-node
+ * entities.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int iss_pipeline_pm_power(struct media_entity *entity, int change)
+{
+	struct media_entity_graph graph;
+	struct media_entity *first = entity;
+	int ret = 0;
+
+	if (!change)
+		return 0;
+
+	media_entity_graph_walk_start(&graph, entity);
+
+	while (!ret && (entity = media_entity_graph_walk_next(&graph)))
+		if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+			ret = iss_pipeline_pm_power_one(entity, change);
+
+	if (!ret)
+		return 0;
+
+	media_entity_graph_walk_start(&graph, first);
+
+	while ((first = media_entity_graph_walk_next(&graph))
+	       && first != entity)
+		if (media_entity_type(first) != MEDIA_ENT_T_DEVNODE)
+			iss_pipeline_pm_power_one(first, -change);
+
+	return ret;
+}
+
+/*
+ * omap4iss_pipeline_pm_use - Update the use count of an entity
+ * @entity: The entity
+ * @use: Use (1) or stop using (0) the entity
+ *
+ * Update the use count of all entities in the pipeline and power entities on or
+ * off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. No failure can occur when the use parameter is
+ * set to 0.
+ */
+int omap4iss_pipeline_pm_use(struct media_entity *entity, int use)
+{
+	int change = use ? 1 : -1;
+	int ret;
+
+	mutex_lock(&entity->parent->graph_mutex);
+
+	/* Apply use count to node. */
+	entity->use_count += change;
+	WARN_ON(entity->use_count < 0);
+
+	/* Apply power change to connected non-nodes. */
+	ret = iss_pipeline_pm_power(entity, change);
+	if (ret < 0)
+		entity->use_count -= change;
+
+	mutex_unlock(&entity->parent->graph_mutex);
+
+	return ret;
+}
+
+/*
+ * iss_pipeline_link_notify - Link management notification callback
+ * @link: The link
+ * @flags: New link flags that will be applied
+ *
+ * React to link management on powered pipelines by updating the use count of
+ * all entities in the source and sink sides of the link. Entities are powered
+ * on or off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. This function will not fail for disconnection
+ * events.
+ */
+static int iss_pipeline_link_notify(struct media_link *link, u32 flags,
+				    unsigned int notification)
+{
+	struct media_entity *source = link->source->entity;
+	struct media_entity *sink = link->sink->entity;
+	int source_use = iss_pipeline_pm_use_count(source);
+	int sink_use = iss_pipeline_pm_use_count(sink);
+	int ret;
+
+	if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+	    !(link->flags & MEDIA_LNK_FL_ENABLED)) {
+		/* Powering off entities is assumed to never fail. */
+		iss_pipeline_pm_power(source, -sink_use);
+		iss_pipeline_pm_power(sink, -source_use);
+		return 0;
+	}
+
+	if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+		(flags & MEDIA_LNK_FL_ENABLED)) {
+		ret = iss_pipeline_pm_power(source, sink_use);
+		if (ret < 0)
+			return ret;
+
+		ret = iss_pipeline_pm_power(sink, source_use);
+		if (ret < 0)
+			iss_pipeline_pm_power(source, -sink_use);
+
+		return ret;
+	}
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline stream management
+ */
+
+/*
+ * iss_pipeline_enable - Enable streaming on a pipeline
+ * @pipe: ISS pipeline
+ * @mode: Stream mode (single shot or continuous)
+ *
+ * Walk the entities chain starting at the pipeline output video node and start
+ * all modules in the chain in the given mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int iss_pipeline_enable(struct iss_pipeline *pipe,
+			       enum iss_pipeline_stream_state mode)
+{
+	struct iss_device *iss = pipe->output->iss;
+	struct media_entity *entity;
+	struct media_pad *pad;
+	struct v4l2_subdev *subdev;
+	unsigned long flags;
+	int ret;
+
+	/* If one of the entities in the pipeline has crashed it will not work
+	 * properly. Refuse to start streaming in that case. This check must be
+	 * performed before the loop below to avoid starting entities if the
+	 * pipeline won't start anyway (those entities would then likely fail to
+	 * stop, making the problem worse).
+	 */
+	if (pipe->entities & iss->crashed)
+		return -EIO;
+
+	spin_lock_irqsave(&pipe->lock, flags);
+	pipe->state &= ~(ISS_PIPELINE_IDLE_INPUT | ISS_PIPELINE_IDLE_OUTPUT);
+	spin_unlock_irqrestore(&pipe->lock, flags);
+
+	pipe->do_propagation = false;
+
+	entity = &pipe->output->video.entity;
+	while (1) {
+		pad = &entity->pads[0];
+		if (!(pad->flags & MEDIA_PAD_FL_SINK))
+			break;
+
+		pad = media_entity_remote_pad(pad);
+		if (pad == NULL ||
+		    media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+			break;
+
+		entity = pad->entity;
+		subdev = media_entity_to_v4l2_subdev(entity);
+
+		ret = v4l2_subdev_call(subdev, video, s_stream, mode);
+		if (ret < 0 && ret != -ENOIOCTLCMD)
+			return ret;
+	}
+	iss_print_status(pipe->output->iss);
+	return 0;
+}
+
+/*
+ * iss_pipeline_disable - Disable streaming on a pipeline
+ * @pipe: ISS pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and stop
+ * all modules in the chain. Wait synchronously for the modules to be stopped if
+ * necessary.
+ */
+static int iss_pipeline_disable(struct iss_pipeline *pipe)
+{
+	struct iss_device *iss = pipe->output->iss;
+	struct media_entity *entity;
+	struct media_pad *pad;
+	struct v4l2_subdev *subdev;
+	int failure = 0;
+	int ret;
+
+	entity = &pipe->output->video.entity;
+	while (1) {
+		pad = &entity->pads[0];
+		if (!(pad->flags & MEDIA_PAD_FL_SINK))
+			break;
+
+		pad = media_entity_remote_pad(pad);
+		if (pad == NULL ||
+		    media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+			break;
+
+		entity = pad->entity;
+		subdev = media_entity_to_v4l2_subdev(entity);
+
+		ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+		if (ret < 0) {
+			dev_dbg(iss->dev, "%s: module stop timeout.\n",
+				subdev->name);
+			/* If the entity failed to stopped, assume it has
+			 * crashed. Mark it as such, the ISS will be reset when
+			 * applications will release it.
+			 */
+			iss->crashed |= 1U << subdev->entity.id;
+			failure = -ETIMEDOUT;
+		}
+	}
+
+	return failure;
+}
+
+/*
+ * omap4iss_pipeline_set_stream - Enable/disable streaming on a pipeline
+ * @pipe: ISS pipeline
+ * @state: Stream state (stopped, single shot or continuous)
+ *
+ * Set the pipeline to the given stream state. Pipelines can be started in
+ * single-shot or continuous mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise. The pipeline state is not updated when the operation
+ * fails, except when stopping the pipeline.
+ */
+int omap4iss_pipeline_set_stream(struct iss_pipeline *pipe,
+				 enum iss_pipeline_stream_state state)
+{
+	int ret;
+
+	if (state == ISS_PIPELINE_STREAM_STOPPED)
+		ret = iss_pipeline_disable(pipe);
+	else
+		ret = iss_pipeline_enable(pipe, state);
+
+	if (ret == 0 || state == ISS_PIPELINE_STREAM_STOPPED)
+		pipe->stream_state = state;
+
+	return ret;
+}
+
+/*
+ * omap4iss_pipeline_cancel_stream - Cancel stream on a pipeline
+ * @pipe: ISS pipeline
+ *
+ * Cancelling a stream mark all buffers on all video nodes in the pipeline as
+ * erroneous and makes sure no new buffer can be queued. This function is called
+ * when a fatal error that prevents any further operation on the pipeline
+ * occurs.
+ */
+void omap4iss_pipeline_cancel_stream(struct iss_pipeline *pipe)
+{
+	if (pipe->input)
+		omap4iss_video_cancel_stream(pipe->input);
+	if (pipe->output)
+		omap4iss_video_cancel_stream(pipe->output);
+}
+
+/*
+ * iss_pipeline_is_last - Verify if entity has an enabled link to the output
+ *			  video node
+ * @me: ISS module's media entity
+ *
+ * Returns 1 if the entity has an enabled link to the output video node or 0
+ * otherwise. It's true only while pipeline can have no more than one output
+ * node.
+ */
+static int iss_pipeline_is_last(struct media_entity *me)
+{
+	struct iss_pipeline *pipe;
+	struct media_pad *pad;
+
+	if (!me->pipe)
+		return 0;
+	pipe = to_iss_pipeline(me);
+	if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED)
+		return 0;
+	pad = media_entity_remote_pad(&pipe->output->pad);
+	return pad->entity == me;
+}
+
+static int iss_reset(struct iss_device *iss)
+{
+	unsigned long timeout = 0;
+
+	iss_reg_set(iss, OMAP4_ISS_MEM_TOP, ISS_HL_SYSCONFIG,
+		    ISS_HL_SYSCONFIG_SOFTRESET);
+
+	while (iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_SYSCONFIG) &
+	       ISS_HL_SYSCONFIG_SOFTRESET) {
+		if (timeout++ > 100) {
+			dev_alert(iss->dev, "cannot reset ISS\n");
+			return -ETIMEDOUT;
+		}
+		usleep_range(10, 10);
+	}
+
+	iss->crashed = 0;
+	return 0;
+}
+
+static int iss_isp_reset(struct iss_device *iss)
+{
+	unsigned long timeout = 0;
+
+	/* Fist, ensure that the ISP is IDLE (no transactions happening) */
+	iss_reg_update(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG,
+		       ISP5_SYSCONFIG_STANDBYMODE_MASK,
+		       ISP5_SYSCONFIG_STANDBYMODE_SMART);
+
+	iss_reg_set(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL, ISP5_CTRL_MSTANDBY);
+
+	for (;;) {
+		if (iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL) &
+		    ISP5_CTRL_MSTANDBY_WAIT)
+			break;
+		if (timeout++ > 1000) {
+			dev_alert(iss->dev, "cannot set ISP5 to standby\n");
+			return -ETIMEDOUT;
+		}
+		usleep_range(1000, 1500);
+	}
+
+	/* Now finally, do the reset */
+	iss_reg_set(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG,
+		    ISP5_SYSCONFIG_SOFTRESET);
+
+	timeout = 0;
+	while (iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG) &
+	       ISP5_SYSCONFIG_SOFTRESET) {
+		if (timeout++ > 1000) {
+			dev_alert(iss->dev, "cannot reset ISP5\n");
+			return -ETIMEDOUT;
+		}
+		usleep_range(1000, 1500);
+	}
+
+	return 0;
+}
+
+/*
+ * iss_module_sync_idle - Helper to sync module with its idle state
+ * @me: ISS submodule's media entity
+ * @wait: ISS submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISS submodule needs to wait for next interrupt. If
+ * yes, makes the caller to sleep while waiting for such event.
+ */
+int omap4iss_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+			      atomic_t *stopping)
+{
+	struct iss_pipeline *pipe = to_iss_pipeline(me);
+	struct iss_video *video = pipe->output;
+	unsigned long flags;
+
+	if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED ||
+	    (pipe->stream_state == ISS_PIPELINE_STREAM_SINGLESHOT &&
+	     !iss_pipeline_ready(pipe)))
+		return 0;
+
+	/*
+	 * atomic_set() doesn't include memory barrier on ARM platform for SMP
+	 * scenario. We'll call it here to avoid race conditions.
+	 */
+	atomic_set(stopping, 1);
+	smp_wmb();
+
+	/*
+	 * If module is the last one, it's writing to memory. In this case,
+	 * it's necessary to check if the module is already paused due to
+	 * DMA queue underrun or if it has to wait for next interrupt to be
+	 * idle.
+	 * If it isn't the last one, the function won't sleep but *stopping
+	 * will still be set to warn next submodule caller's interrupt the
+	 * module wants to be idle.
+	 */
+	if (!iss_pipeline_is_last(me))
+		return 0;
+
+	spin_lock_irqsave(&video->qlock, flags);
+	if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+		spin_unlock_irqrestore(&video->qlock, flags);
+		atomic_set(stopping, 0);
+		smp_wmb();
+		return 0;
+	}
+	spin_unlock_irqrestore(&video->qlock, flags);
+	if (!wait_event_timeout(*wait, !atomic_read(stopping),
+				msecs_to_jiffies(1000))) {
+		atomic_set(stopping, 0);
+		smp_wmb();
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/*
+ * omap4iss_module_sync_is_stopped - Helper to verify if module was stopping
+ * @wait: ISS submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISS submodule was stopping. In case of yes, it
+ * notices the caller by setting stopping to 0 and waking up the wait queue.
+ * Returns 1 if it was stopping or 0 otherwise.
+ */
+int omap4iss_module_sync_is_stopping(wait_queue_head_t *wait,
+				     atomic_t *stopping)
+{
+	if (atomic_cmpxchg(stopping, 1, 0)) {
+		wake_up(wait);
+		return 1;
+	}
+
+	return 0;
+}
+
+/* --------------------------------------------------------------------------
+ * Clock management
+ */
+
+#define ISS_CLKCTRL_MASK	(ISS_CLKCTRL_CSI2_A |\
+				 ISS_CLKCTRL_CSI2_B |\
+				 ISS_CLKCTRL_ISP)
+
+static int __iss_subclk_update(struct iss_device *iss)
+{
+	u32 clk = 0;
+	int ret = 0, timeout = 1000;
+
+	if (iss->subclk_resources & OMAP4_ISS_SUBCLK_CSI2_A)
+		clk |= ISS_CLKCTRL_CSI2_A;
+
+	if (iss->subclk_resources & OMAP4_ISS_SUBCLK_CSI2_B)
+		clk |= ISS_CLKCTRL_CSI2_B;
+
+	if (iss->subclk_resources & OMAP4_ISS_SUBCLK_ISP)
+		clk |= ISS_CLKCTRL_ISP;
+
+	iss_reg_update(iss, OMAP4_ISS_MEM_TOP, ISS_CLKCTRL,
+		       ISS_CLKCTRL_MASK, clk);
+
+	/* Wait for HW assertion */
+	while (--timeout > 0) {
+		udelay(1);
+		if ((iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_CLKSTAT) &
+		    ISS_CLKCTRL_MASK) == clk)
+			break;
+	}
+
+	if (!timeout)
+		ret = -EBUSY;
+
+	return ret;
+}
+
+int omap4iss_subclk_enable(struct iss_device *iss,
+			    enum iss_subclk_resource res)
+{
+	iss->subclk_resources |= res;
+
+	return __iss_subclk_update(iss);
+}
+
+int omap4iss_subclk_disable(struct iss_device *iss,
+			     enum iss_subclk_resource res)
+{
+	iss->subclk_resources &= ~res;
+
+	return __iss_subclk_update(iss);
+}
+
+#define ISS_ISP5_CLKCTRL_MASK	(ISP5_CTRL_BL_CLK_ENABLE |\
+				 ISP5_CTRL_ISIF_CLK_ENABLE |\
+				 ISP5_CTRL_H3A_CLK_ENABLE |\
+				 ISP5_CTRL_RSZ_CLK_ENABLE |\
+				 ISP5_CTRL_IPIPE_CLK_ENABLE |\
+				 ISP5_CTRL_IPIPEIF_CLK_ENABLE)
+
+static void __iss_isp_subclk_update(struct iss_device *iss)
+{
+	u32 clk = 0;
+
+	if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_ISIF)
+		clk |= ISP5_CTRL_ISIF_CLK_ENABLE;
+
+	if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_H3A)
+		clk |= ISP5_CTRL_H3A_CLK_ENABLE;
+
+	if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_RSZ)
+		clk |= ISP5_CTRL_RSZ_CLK_ENABLE;
+
+	if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_IPIPE)
+		clk |= ISP5_CTRL_IPIPE_CLK_ENABLE;
+
+	if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_IPIPEIF)
+		clk |= ISP5_CTRL_IPIPEIF_CLK_ENABLE;
+
+	if (clk)
+		clk |= ISP5_CTRL_BL_CLK_ENABLE;
+
+	iss_reg_update(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL,
+		       ISS_ISP5_CLKCTRL_MASK, clk);
+}
+
+void omap4iss_isp_subclk_enable(struct iss_device *iss,
+				enum iss_isp_subclk_resource res)
+{
+	iss->isp_subclk_resources |= res;
+
+	__iss_isp_subclk_update(iss);
+}
+
+void omap4iss_isp_subclk_disable(struct iss_device *iss,
+				 enum iss_isp_subclk_resource res)
+{
+	iss->isp_subclk_resources &= ~res;
+
+	__iss_isp_subclk_update(iss);
+}
+
+/*
+ * iss_enable_clocks - Enable ISS clocks
+ * @iss: OMAP4 ISS device
+ *
+ * Return 0 if successful, or clk_enable return value if any of tthem fails.
+ */
+static int iss_enable_clocks(struct iss_device *iss)
+{
+	int ret;
+
+	ret = clk_enable(iss->iss_fck);
+	if (ret) {
+		dev_err(iss->dev, "clk_enable iss_fck failed\n");
+		return ret;
+	}
+
+	ret = clk_enable(iss->iss_ctrlclk);
+	if (ret) {
+		dev_err(iss->dev, "clk_enable iss_ctrlclk failed\n");
+		clk_disable(iss->iss_fck);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * iss_disable_clocks - Disable ISS clocks
+ * @iss: OMAP4 ISS device
+ */
+static void iss_disable_clocks(struct iss_device *iss)
+{
+	clk_disable(iss->iss_ctrlclk);
+	clk_disable(iss->iss_fck);
+}
+
+static void iss_put_clocks(struct iss_device *iss)
+{
+	if (iss->iss_fck) {
+		clk_put(iss->iss_fck);
+		iss->iss_fck = NULL;
+	}
+
+	if (iss->iss_ctrlclk) {
+		clk_put(iss->iss_ctrlclk);
+		iss->iss_ctrlclk = NULL;
+	}
+}
+
+static int iss_get_clocks(struct iss_device *iss)
+{
+	iss->iss_fck = clk_get(iss->dev, "iss_fck");
+	if (IS_ERR(iss->iss_fck)) {
+		dev_err(iss->dev, "Unable to get iss_fck clock info\n");
+		iss_put_clocks(iss);
+		return PTR_ERR(iss->iss_fck);
+	}
+
+	iss->iss_ctrlclk = clk_get(iss->dev, "iss_ctrlclk");
+	if (IS_ERR(iss->iss_ctrlclk)) {
+		dev_err(iss->dev, "Unable to get iss_ctrlclk clock info\n");
+		iss_put_clocks(iss);
+		return PTR_ERR(iss->iss_fck);
+	}
+
+	return 0;
+}
+
+/*
+ * omap4iss_get - Acquire the ISS resource.
+ *
+ * Initializes the clocks for the first acquire.
+ *
+ * Increment the reference count on the ISS. If the first reference is taken,
+ * enable clocks and power-up all submodules.
+ *
+ * Return a pointer to the ISS device structure, or NULL if an error occurred.
+ */
+struct iss_device *omap4iss_get(struct iss_device *iss)
+{
+	struct iss_device *__iss = iss;
+
+	if (iss == NULL)
+		return NULL;
+
+	mutex_lock(&iss->iss_mutex);
+	if (iss->ref_count > 0)
+		goto out;
+
+	if (iss_enable_clocks(iss) < 0) {
+		__iss = NULL;
+		goto out;
+	}
+
+	iss_enable_interrupts(iss);
+
+out:
+	if (__iss != NULL)
+		iss->ref_count++;
+	mutex_unlock(&iss->iss_mutex);
+
+	return __iss;
+}
+
+/*
+ * omap4iss_put - Release the ISS
+ *
+ * Decrement the reference count on the ISS. If the last reference is released,
+ * power-down all submodules, disable clocks and free temporary buffers.
+ */
+void omap4iss_put(struct iss_device *iss)
+{
+	if (iss == NULL)
+		return;
+
+	mutex_lock(&iss->iss_mutex);
+	BUG_ON(iss->ref_count == 0);
+	if (--iss->ref_count == 0) {
+		iss_disable_interrupts(iss);
+		/* Reset the ISS if an entity has failed to stop. This is the
+		 * only way to recover from such conditions, although it would
+		 * be worth investigating whether resetting the ISP only can't
+		 * fix the problem in some cases.
+		 */
+		if (iss->crashed)
+			iss_reset(iss);
+		iss_disable_clocks(iss);
+	}
+	mutex_unlock(&iss->iss_mutex);
+}
+
+static int iss_map_mem_resource(struct platform_device *pdev,
+				struct iss_device *iss,
+				enum iss_mem_resources res)
+{
+	struct resource *mem;
+
+	/* request the mem region for the camera registers */
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
+	if (!mem) {
+		dev_err(iss->dev, "no mem resource?\n");
+		return -ENODEV;
+	}
+
+	if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
+		dev_err(iss->dev,
+			"cannot reserve camera register I/O region\n");
+		return -ENODEV;
+	}
+	iss->res[res] = mem;
+
+	/* map the region */
+	iss->regs[res] = ioremap_nocache(mem->start, resource_size(mem));
+	if (!iss->regs[res]) {
+		dev_err(iss->dev, "cannot map camera register I/O region\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void iss_unregister_entities(struct iss_device *iss)
+{
+	omap4iss_resizer_unregister_entities(&iss->resizer);
+	omap4iss_ipipe_unregister_entities(&iss->ipipe);
+	omap4iss_ipipeif_unregister_entities(&iss->ipipeif);
+	omap4iss_csi2_unregister_entities(&iss->csi2a);
+	omap4iss_csi2_unregister_entities(&iss->csi2b);
+
+	v4l2_device_unregister(&iss->v4l2_dev);
+	media_device_unregister(&iss->media_dev);
+}
+
+/*
+ * iss_register_subdev_group - Register a group of subdevices
+ * @iss: OMAP4 ISS device
+ * @board_info: I2C subdevs board information array
+ *
+ * Register all I2C subdevices in the board_info array. The array must be
+ * terminated by a NULL entry, and the first entry must be the sensor.
+ *
+ * Return a pointer to the sensor media entity if it has been successfully
+ * registered, or NULL otherwise.
+ */
+static struct v4l2_subdev *
+iss_register_subdev_group(struct iss_device *iss,
+		     struct iss_subdev_i2c_board_info *board_info)
+{
+	struct v4l2_subdev *sensor = NULL;
+	unsigned int first;
+
+	if (board_info->board_info == NULL)
+		return NULL;
+
+	for (first = 1; board_info->board_info; ++board_info, first = 0) {
+		struct v4l2_subdev *subdev;
+		struct i2c_adapter *adapter;
+
+		adapter = i2c_get_adapter(board_info->i2c_adapter_id);
+		if (adapter == NULL) {
+			dev_err(iss->dev,
+				"%s: Unable to get I2C adapter %d for device %s\n",
+				__func__, board_info->i2c_adapter_id,
+				board_info->board_info->type);
+			continue;
+		}
+
+		subdev = v4l2_i2c_new_subdev_board(&iss->v4l2_dev, adapter,
+				board_info->board_info, NULL);
+		if (subdev == NULL) {
+			dev_err(iss->dev, "%s: Unable to register subdev %s\n",
+				__func__, board_info->board_info->type);
+			continue;
+		}
+
+		if (first)
+			sensor = subdev;
+	}
+
+	return sensor;
+}
+
+static int iss_register_entities(struct iss_device *iss)
+{
+	struct iss_platform_data *pdata = iss->pdata;
+	struct iss_v4l2_subdevs_group *subdevs;
+	int ret;
+
+	iss->media_dev.dev = iss->dev;
+	strlcpy(iss->media_dev.model, "TI OMAP4 ISS",
+		sizeof(iss->media_dev.model));
+	iss->media_dev.hw_revision = iss->revision;
+	iss->media_dev.link_notify = iss_pipeline_link_notify;
+	ret = media_device_register(&iss->media_dev);
+	if (ret < 0) {
+		dev_err(iss->dev, "%s: Media device registration failed (%d)\n",
+			__func__, ret);
+		return ret;
+	}
+
+	iss->v4l2_dev.mdev = &iss->media_dev;
+	ret = v4l2_device_register(iss->dev, &iss->v4l2_dev);
+	if (ret < 0) {
+		dev_err(iss->dev, "%s: V4L2 device registration failed (%d)\n",
+			__func__, ret);
+		goto done;
+	}
+
+	/* Register internal entities */
+	ret = omap4iss_csi2_register_entities(&iss->csi2a, &iss->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = omap4iss_csi2_register_entities(&iss->csi2b, &iss->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = omap4iss_ipipeif_register_entities(&iss->ipipeif, &iss->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = omap4iss_ipipe_register_entities(&iss->ipipe, &iss->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = omap4iss_resizer_register_entities(&iss->resizer, &iss->v4l2_dev);
+	if (ret < 0)
+		goto done;
+
+	/* Register external entities */
+	for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
+		struct v4l2_subdev *sensor;
+		struct media_entity *input;
+		unsigned int flags;
+		unsigned int pad;
+
+		sensor = iss_register_subdev_group(iss, subdevs->subdevs);
+		if (sensor == NULL)
+			continue;
+
+		sensor->host_priv = subdevs;
+
+		/* Connect the sensor to the correct interface module.
+		 * CSI2a receiver through CSIPHY1, or
+		 * CSI2b receiver through CSIPHY2
+		 */
+		switch (subdevs->interface) {
+		case ISS_INTERFACE_CSI2A_PHY1:
+			input = &iss->csi2a.subdev.entity;
+			pad = CSI2_PAD_SINK;
+			flags = MEDIA_LNK_FL_IMMUTABLE
+			      | MEDIA_LNK_FL_ENABLED;
+			break;
+
+		case ISS_INTERFACE_CSI2B_PHY2:
+			input = &iss->csi2b.subdev.entity;
+			pad = CSI2_PAD_SINK;
+			flags = MEDIA_LNK_FL_IMMUTABLE
+			      | MEDIA_LNK_FL_ENABLED;
+			break;
+
+		default:
+			dev_err(iss->dev, "%s: invalid interface type %u\n",
+				__func__, subdevs->interface);
+			ret = -EINVAL;
+			goto done;
+		}
+
+		ret = media_entity_create_link(&sensor->entity, 0, input, pad,
+					       flags);
+		if (ret < 0)
+			goto done;
+	}
+
+	ret = v4l2_device_register_subdev_nodes(&iss->v4l2_dev);
+
+done:
+	if (ret < 0)
+		iss_unregister_entities(iss);
+
+	return ret;
+}
+
+static void iss_cleanup_modules(struct iss_device *iss)
+{
+	omap4iss_csi2_cleanup(iss);
+	omap4iss_ipipeif_cleanup(iss);
+	omap4iss_ipipe_cleanup(iss);
+	omap4iss_resizer_cleanup(iss);
+}
+
+static int iss_initialize_modules(struct iss_device *iss)
+{
+	int ret;
+
+	ret = omap4iss_csiphy_init(iss);
+	if (ret < 0) {
+		dev_err(iss->dev, "CSI PHY initialization failed\n");
+		goto error_csiphy;
+	}
+
+	ret = omap4iss_csi2_init(iss);
+	if (ret < 0) {
+		dev_err(iss->dev, "CSI2 initialization failed\n");
+		goto error_csi2;
+	}
+
+	ret = omap4iss_ipipeif_init(iss);
+	if (ret < 0) {
+		dev_err(iss->dev, "ISP IPIPEIF initialization failed\n");
+		goto error_ipipeif;
+	}
+
+	ret = omap4iss_ipipe_init(iss);
+	if (ret < 0) {
+		dev_err(iss->dev, "ISP IPIPE initialization failed\n");
+		goto error_ipipe;
+	}
+
+	ret = omap4iss_resizer_init(iss);
+	if (ret < 0) {
+		dev_err(iss->dev, "ISP RESIZER initialization failed\n");
+		goto error_resizer;
+	}
+
+	/* Connect the submodules. */
+	ret = media_entity_create_link(
+			&iss->csi2a.subdev.entity, CSI2_PAD_SOURCE,
+			&iss->ipipeif.subdev.entity, IPIPEIF_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(
+			&iss->csi2b.subdev.entity, CSI2_PAD_SOURCE,
+			&iss->ipipeif.subdev.entity, IPIPEIF_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(
+			&iss->ipipeif.subdev.entity, IPIPEIF_PAD_SOURCE_VP,
+			&iss->resizer.subdev.entity, RESIZER_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(
+			&iss->ipipeif.subdev.entity, IPIPEIF_PAD_SOURCE_VP,
+			&iss->ipipe.subdev.entity, IPIPE_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	ret = media_entity_create_link(
+			&iss->ipipe.subdev.entity, IPIPE_PAD_SOURCE_VP,
+			&iss->resizer.subdev.entity, RESIZER_PAD_SINK, 0);
+	if (ret < 0)
+		goto error_link;
+
+	return 0;
+
+error_link:
+	omap4iss_resizer_cleanup(iss);
+error_resizer:
+	omap4iss_ipipe_cleanup(iss);
+error_ipipe:
+	omap4iss_ipipeif_cleanup(iss);
+error_ipipeif:
+	omap4iss_csi2_cleanup(iss);
+error_csi2:
+error_csiphy:
+	return ret;
+}
+
+static int iss_probe(struct platform_device *pdev)
+{
+	struct iss_platform_data *pdata = pdev->dev.platform_data;
+	struct iss_device *iss;
+	unsigned int i;
+	int ret;
+
+	if (pdata == NULL)
+		return -EINVAL;
+
+	iss = kzalloc(sizeof(*iss), GFP_KERNEL);
+	if (!iss) {
+		dev_err(&pdev->dev, "Could not allocate memory\n");
+		return -ENOMEM;
+	}
+
+	mutex_init(&iss->iss_mutex);
+
+	iss->dev = &pdev->dev;
+	iss->pdata = pdata;
+
+	iss->raw_dmamask = DMA_BIT_MASK(32);
+	iss->dev->dma_mask = &iss->raw_dmamask;
+	iss->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+	platform_set_drvdata(pdev, iss);
+
+	/* Clocks */
+	ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP);
+	if (ret < 0)
+		goto error;
+
+	ret = iss_get_clocks(iss);
+	if (ret < 0)
+		goto error;
+
+	if (omap4iss_get(iss) == NULL)
+		goto error;
+
+	ret = iss_reset(iss);
+	if (ret < 0)
+		goto error_iss;
+
+	iss->revision = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
+	dev_info(iss->dev, "Revision %08x found\n", iss->revision);
+
+	for (i = 1; i < OMAP4_ISS_MEM_LAST; i++) {
+		ret = iss_map_mem_resource(pdev, iss, i);
+		if (ret)
+			goto error_iss;
+	}
+
+	/* Configure BTE BW_LIMITER field to max recommended value (1 GB) */
+	iss_reg_update(iss, OMAP4_ISS_MEM_BTE, BTE_CTRL,
+		       BTE_CTRL_BW_LIMITER_MASK,
+		       18 << BTE_CTRL_BW_LIMITER_SHIFT);
+
+	/* Perform ISP reset */
+	ret = omap4iss_subclk_enable(iss, OMAP4_ISS_SUBCLK_ISP);
+	if (ret < 0)
+		goto error_iss;
+
+	ret = iss_isp_reset(iss);
+	if (ret < 0)
+		goto error_iss;
+
+	dev_info(iss->dev, "ISP Revision %08x found\n",
+		 iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_REVISION));
+
+	/* Interrupt */
+	iss->irq_num = platform_get_irq(pdev, 0);
+	if (iss->irq_num <= 0) {
+		dev_err(iss->dev, "No IRQ resource\n");
+		ret = -ENODEV;
+		goto error_iss;
+	}
+
+	if (request_irq(iss->irq_num, iss_isr, IRQF_SHARED, "OMAP4 ISS", iss)) {
+		dev_err(iss->dev, "Unable to request IRQ\n");
+		ret = -EINVAL;
+		goto error_iss;
+	}
+
+	/* Entities */
+	ret = iss_initialize_modules(iss);
+	if (ret < 0)
+		goto error_irq;
+
+	ret = iss_register_entities(iss);
+	if (ret < 0)
+		goto error_modules;
+
+	omap4iss_put(iss);
+
+	return 0;
+
+error_modules:
+	iss_cleanup_modules(iss);
+error_irq:
+	free_irq(iss->irq_num, iss);
+error_iss:
+	omap4iss_put(iss);
+error:
+	iss_put_clocks(iss);
+
+	for (i = 0; i < OMAP4_ISS_MEM_LAST; i++) {
+		if (iss->regs[i]) {
+			iounmap(iss->regs[i]);
+			iss->regs[i] = NULL;
+		}
+
+		if (iss->res[i]) {
+			release_mem_region(iss->res[i]->start,
+					   resource_size(iss->res[i]));
+			iss->res[i] = NULL;
+		}
+	}
+	platform_set_drvdata(pdev, NULL);
+
+	mutex_destroy(&iss->iss_mutex);
+	kfree(iss);
+
+	return ret;
+}
+
+static int iss_remove(struct platform_device *pdev)
+{
+	struct iss_device *iss = platform_get_drvdata(pdev);
+	unsigned int i;
+
+	iss_unregister_entities(iss);
+	iss_cleanup_modules(iss);
+
+	free_irq(iss->irq_num, iss);
+	iss_put_clocks(iss);
+
+	for (i = 0; i < OMAP4_ISS_MEM_LAST; i++) {
+		if (iss->regs[i]) {
+			iounmap(iss->regs[i]);
+			iss->regs[i] = NULL;
+		}
+
+		if (iss->res[i]) {
+			release_mem_region(iss->res[i]->start,
+					   resource_size(iss->res[i]));
+			iss->res[i] = NULL;
+		}
+	}
+
+	kfree(iss);
+
+	return 0;
+}
+
+static struct platform_device_id omap4iss_id_table[] = {
+	{ "omap4iss", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, omap4iss_id_table);
+
+static struct platform_driver iss_driver = {
+	.probe		= iss_probe,
+	.remove		= iss_remove,
+	.id_table	= omap4iss_id_table,
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "omap4iss",
+	},
+};
+
+module_platform_driver(iss_driver);
+
+MODULE_DESCRIPTION("TI OMAP4 ISS driver");
+MODULE_AUTHOR("Sergio Aguirre <sergio.a.aguirre@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ISS_VIDEO_DRIVER_VERSION);
diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h
new file mode 100644
index 0000000..346db92
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss.h
@@ -0,0 +1,236 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver
+ *
+ * Copyright (C) 2012 Texas Instruments.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _OMAP4_ISS_H_
+#define _OMAP4_ISS_H_
+
+#include <media/v4l2-device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include <media/omap4iss.h>
+
+#include "iss_regs.h"
+#include "iss_csiphy.h"
+#include "iss_csi2.h"
+#include "iss_ipipeif.h"
+#include "iss_ipipe.h"
+#include "iss_resizer.h"
+
+#define to_iss_device(ptr_module)				\
+	container_of(ptr_module, struct iss_device, ptr_module)
+#define to_device(ptr_module)						\
+	(to_iss_device(ptr_module)->dev)
+
+enum iss_mem_resources {
+	OMAP4_ISS_MEM_TOP,
+	OMAP4_ISS_MEM_CSI2_A_REGS1,
+	OMAP4_ISS_MEM_CAMERARX_CORE1,
+	OMAP4_ISS_MEM_CSI2_B_REGS1,
+	OMAP4_ISS_MEM_CAMERARX_CORE2,
+	OMAP4_ISS_MEM_BTE,
+	OMAP4_ISS_MEM_ISP_SYS1,
+	OMAP4_ISS_MEM_ISP_RESIZER,
+	OMAP4_ISS_MEM_ISP_IPIPE,
+	OMAP4_ISS_MEM_ISP_ISIF,
+	OMAP4_ISS_MEM_ISP_IPIPEIF,
+	OMAP4_ISS_MEM_LAST,
+};
+
+enum iss_subclk_resource {
+	OMAP4_ISS_SUBCLK_SIMCOP		= (1 << 0),
+	OMAP4_ISS_SUBCLK_ISP		= (1 << 1),
+	OMAP4_ISS_SUBCLK_CSI2_A		= (1 << 2),
+	OMAP4_ISS_SUBCLK_CSI2_B		= (1 << 3),
+	OMAP4_ISS_SUBCLK_CCP2		= (1 << 4),
+};
+
+enum iss_isp_subclk_resource {
+	OMAP4_ISS_ISP_SUBCLK_BL		= (1 << 0),
+	OMAP4_ISS_ISP_SUBCLK_ISIF	= (1 << 1),
+	OMAP4_ISS_ISP_SUBCLK_H3A	= (1 << 2),
+	OMAP4_ISS_ISP_SUBCLK_RSZ	= (1 << 3),
+	OMAP4_ISS_ISP_SUBCLK_IPIPE	= (1 << 4),
+	OMAP4_ISS_ISP_SUBCLK_IPIPEIF	= (1 << 5),
+};
+
+/*
+ * struct iss_reg - Structure for ISS register values.
+ * @reg: 32-bit Register address.
+ * @val: 32-bit Register value.
+ */
+struct iss_reg {
+	enum iss_mem_resources mmio_range;
+	u32 reg;
+	u32 val;
+};
+
+/*
+ * struct iss_device - ISS device structure.
+ * @crashed: Bitmask of crashed entities (indexed by entity ID)
+ */
+struct iss_device {
+	struct v4l2_device v4l2_dev;
+	struct media_device media_dev;
+	struct device *dev;
+	u32 revision;
+
+	/* platform HW resources */
+	struct iss_platform_data *pdata;
+	unsigned int irq_num;
+
+	struct resource *res[OMAP4_ISS_MEM_LAST];
+	void __iomem *regs[OMAP4_ISS_MEM_LAST];
+
+	u64 raw_dmamask;
+
+	struct mutex iss_mutex;	/* For handling ref_count field */
+	bool crashed;
+	int has_context;
+	int ref_count;
+
+	struct clk *iss_fck;
+	struct clk *iss_ctrlclk;
+
+	/* ISS modules */
+	struct iss_csi2_device csi2a;
+	struct iss_csi2_device csi2b;
+	struct iss_csiphy csiphy1;
+	struct iss_csiphy csiphy2;
+	struct iss_ipipeif_device ipipeif;
+	struct iss_ipipe_device ipipe;
+	struct iss_resizer_device resizer;
+
+	unsigned int subclk_resources;
+	unsigned int isp_subclk_resources;
+};
+
+#define v4l2_dev_to_iss_device(dev) \
+	container_of(dev, struct iss_device, v4l2_dev)
+
+int omap4iss_get_external_info(struct iss_pipeline *pipe,
+			       struct media_link *link);
+
+int omap4iss_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+			      atomic_t *stopping);
+
+int omap4iss_module_sync_is_stopping(wait_queue_head_t *wait,
+				     atomic_t *stopping);
+
+int omap4iss_pipeline_set_stream(struct iss_pipeline *pipe,
+				 enum iss_pipeline_stream_state state);
+void omap4iss_pipeline_cancel_stream(struct iss_pipeline *pipe);
+
+void omap4iss_configure_bridge(struct iss_device *iss,
+			       enum ipipeif_input_entity input);
+
+struct iss_device *omap4iss_get(struct iss_device *iss);
+void omap4iss_put(struct iss_device *iss);
+int omap4iss_subclk_enable(struct iss_device *iss,
+			   enum iss_subclk_resource res);
+int omap4iss_subclk_disable(struct iss_device *iss,
+			    enum iss_subclk_resource res);
+void omap4iss_isp_subclk_enable(struct iss_device *iss,
+				enum iss_isp_subclk_resource res);
+void omap4iss_isp_subclk_disable(struct iss_device *iss,
+				 enum iss_isp_subclk_resource res);
+
+int omap4iss_pipeline_pm_use(struct media_entity *entity, int use);
+
+int omap4iss_register_entities(struct platform_device *pdev,
+			       struct v4l2_device *v4l2_dev);
+void omap4iss_unregister_entities(struct platform_device *pdev);
+
+/*
+ * iss_reg_read - Read the value of an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ *
+ * Return the register value.
+ */
+static inline
+u32 iss_reg_read(struct iss_device *iss, enum iss_mem_resources res,
+		 u32 offset)
+{
+	return readl(iss->regs[res] + offset);
+}
+
+/*
+ * iss_reg_write - Write a value to an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @value: value to be written
+ */
+static inline
+void iss_reg_write(struct iss_device *iss, enum iss_mem_resources res,
+		   u32 offset, u32 value)
+{
+	writel(value, iss->regs[res] + offset);
+}
+
+/*
+ * iss_reg_clr - Clear bits in an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @clr: bit mask to be cleared
+ */
+static inline
+void iss_reg_clr(struct iss_device *iss, enum iss_mem_resources res,
+		 u32 offset, u32 clr)
+{
+	u32 v = iss_reg_read(iss, res, offset);
+
+	iss_reg_write(iss, res, offset, v & ~clr);
+}
+
+/*
+ * iss_reg_set - Set bits in an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @set: bit mask to be set
+ */
+static inline
+void iss_reg_set(struct iss_device *iss, enum iss_mem_resources res,
+		 u32 offset, u32 set)
+{
+	u32 v = iss_reg_read(iss, res, offset);
+
+	iss_reg_write(iss, res, offset, v | set);
+}
+
+/*
+ * iss_reg_update - Clear and set bits in an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @clr: bit mask to be cleared
+ * @set: bit mask to be set
+ *
+ * Clear the clr mask first and then set the set mask.
+ */
+static inline
+void iss_reg_update(struct iss_device *iss, enum iss_mem_resources res,
+		    u32 offset, u32 clr, u32 set)
+{
+	u32 v = iss_reg_read(iss, res, offset);
+
+	iss_reg_write(iss, res, offset, (v & ~clr) | set);
+}
+
+#endif /* _OMAP4_ISS_H_ */
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
new file mode 100644
index 0000000..61fc350
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -0,0 +1,1343 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI PHY module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <media/v4l2-common.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/mm.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_csi2.h"
+
+/*
+ * csi2_if_enable - Enable CSI2 Receiver interface.
+ * @enable: enable flag
+ *
+ */
+static void csi2_if_enable(struct iss_csi2_device *csi2, u8 enable)
+{
+	struct iss_csi2_ctrl_cfg *currctrl = &csi2->ctrl;
+
+	iss_reg_update(csi2->iss, csi2->regs1, CSI2_CTRL, CSI2_CTRL_IF_EN,
+		       enable ? CSI2_CTRL_IF_EN : 0);
+
+	currctrl->if_enable = enable;
+}
+
+/*
+ * csi2_recv_config - CSI2 receiver module configuration.
+ * @currctrl: iss_csi2_ctrl_cfg structure
+ *
+ */
+static void csi2_recv_config(struct iss_csi2_device *csi2,
+			     struct iss_csi2_ctrl_cfg *currctrl)
+{
+	u32 reg = 0;
+
+	if (currctrl->frame_mode)
+		reg |= CSI2_CTRL_FRAME;
+	else
+		reg &= ~CSI2_CTRL_FRAME;
+
+	if (currctrl->vp_clk_enable)
+		reg |= CSI2_CTRL_VP_CLK_EN;
+	else
+		reg &= ~CSI2_CTRL_VP_CLK_EN;
+
+	if (currctrl->vp_only_enable)
+		reg |= CSI2_CTRL_VP_ONLY_EN;
+	else
+		reg &= ~CSI2_CTRL_VP_ONLY_EN;
+
+	reg &= ~CSI2_CTRL_VP_OUT_CTRL_MASK;
+	reg |= currctrl->vp_out_ctrl << CSI2_CTRL_VP_OUT_CTRL_SHIFT;
+
+	if (currctrl->ecc_enable)
+		reg |= CSI2_CTRL_ECC_EN;
+	else
+		reg &= ~CSI2_CTRL_ECC_EN;
+
+	/*
+	 * Set MFlag assertion boundaries to:
+	 * Low: 4/8 of FIFO size
+	 * High: 6/8 of FIFO size
+	 */
+	reg &= ~(CSI2_CTRL_MFLAG_LEVH_MASK | CSI2_CTRL_MFLAG_LEVL_MASK);
+	reg |= (2 << CSI2_CTRL_MFLAG_LEVH_SHIFT) |
+	       (4 << CSI2_CTRL_MFLAG_LEVL_SHIFT);
+
+	/* Generation of 16x64-bit bursts (Recommended) */
+	reg |= CSI2_CTRL_BURST_SIZE_EXPAND;
+
+	/* Do Non-Posted writes (Recommended) */
+	reg |= CSI2_CTRL_NON_POSTED_WRITE;
+
+	/*
+	 * Enforce Little endian for all formats, including:
+	 * YUV4:2:2 8-bit and YUV4:2:0 Legacy
+	 */
+	reg |= CSI2_CTRL_ENDIANNESS;
+
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTRL, reg);
+}
+
+static const unsigned int csi2_input_fmts[] = {
+	V4L2_MBUS_FMT_SGRBG10_1X10,
+	V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+	V4L2_MBUS_FMT_SRGGB10_1X10,
+	V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
+	V4L2_MBUS_FMT_SBGGR10_1X10,
+	V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
+	V4L2_MBUS_FMT_SGBRG10_1X10,
+	V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
+	V4L2_MBUS_FMT_SBGGR8_1X8,
+	V4L2_MBUS_FMT_SGBRG8_1X8,
+	V4L2_MBUS_FMT_SGRBG8_1X8,
+	V4L2_MBUS_FMT_SRGGB8_1X8,
+	V4L2_MBUS_FMT_UYVY8_1X16,
+	V4L2_MBUS_FMT_YUYV8_1X16,
+};
+
+/* To set the format on the CSI2 requires a mapping function that takes
+ * the following inputs:
+ * - 3 different formats (at this time)
+ * - 2 destinations (mem, vp+mem) (vp only handled separately)
+ * - 2 decompression options (on, off)
+ * Output should be CSI2 frame format code
+ * Array indices as follows: [format][dest][decompr]
+ * Not all combinations are valid. 0 means invalid.
+ */
+static const u16 __csi2_fmt_map[][2][2] = {
+	/* RAW10 formats */
+	{
+		/* Output to memory */
+		{
+			/* No DPCM decompression */
+			CSI2_PIX_FMT_RAW10_EXP16,
+			/* DPCM decompression */
+			0,
+		},
+		/* Output to both */
+		{
+			/* No DPCM decompression */
+			CSI2_PIX_FMT_RAW10_EXP16_VP,
+			/* DPCM decompression */
+			0,
+		},
+	},
+	/* RAW10 DPCM8 formats */
+	{
+		/* Output to memory */
+		{
+			/* No DPCM decompression */
+			CSI2_USERDEF_8BIT_DATA1,
+			/* DPCM decompression */
+			CSI2_USERDEF_8BIT_DATA1_DPCM10,
+		},
+		/* Output to both */
+		{
+			/* No DPCM decompression */
+			CSI2_PIX_FMT_RAW8_VP,
+			/* DPCM decompression */
+			CSI2_USERDEF_8BIT_DATA1_DPCM10_VP,
+		},
+	},
+	/* RAW8 formats */
+	{
+		/* Output to memory */
+		{
+			/* No DPCM decompression */
+			CSI2_PIX_FMT_RAW8,
+			/* DPCM decompression */
+			0,
+		},
+		/* Output to both */
+		{
+			/* No DPCM decompression */
+			CSI2_PIX_FMT_RAW8_VP,
+			/* DPCM decompression */
+			0,
+		},
+	},
+	/* YUV422 formats */
+	{
+		/* Output to memory */
+		{
+			/* No DPCM decompression */
+			CSI2_PIX_FMT_YUV422_8BIT,
+			/* DPCM decompression */
+			0,
+		},
+		/* Output to both */
+		{
+			/* No DPCM decompression */
+			CSI2_PIX_FMT_YUV422_8BIT_VP16,
+			/* DPCM decompression */
+			0,
+		},
+	},
+};
+
+/*
+ * csi2_ctx_map_format - Map CSI2 sink media bus format to CSI2 format ID
+ * @csi2: ISS CSI2 device
+ *
+ * Returns CSI2 physical format id
+ */
+static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2)
+{
+	const struct v4l2_mbus_framefmt *fmt = &csi2->formats[CSI2_PAD_SINK];
+	int fmtidx, destidx;
+
+	switch (fmt->code) {
+	case V4L2_MBUS_FMT_SGRBG10_1X10:
+	case V4L2_MBUS_FMT_SRGGB10_1X10:
+	case V4L2_MBUS_FMT_SBGGR10_1X10:
+	case V4L2_MBUS_FMT_SGBRG10_1X10:
+		fmtidx = 0;
+		break;
+	case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+	case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
+	case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
+	case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
+		fmtidx = 1;
+		break;
+	case V4L2_MBUS_FMT_SBGGR8_1X8:
+	case V4L2_MBUS_FMT_SGBRG8_1X8:
+	case V4L2_MBUS_FMT_SGRBG8_1X8:
+	case V4L2_MBUS_FMT_SRGGB8_1X8:
+		fmtidx = 2;
+		break;
+	case V4L2_MBUS_FMT_UYVY8_1X16:
+	case V4L2_MBUS_FMT_YUYV8_1X16:
+		fmtidx = 3;
+		break;
+	default:
+		WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n",
+		     fmt->code);
+		return 0;
+	}
+
+	if (!(csi2->output & CSI2_OUTPUT_IPIPEIF) &&
+	    !(csi2->output & CSI2_OUTPUT_MEMORY)) {
+		/* Neither output enabled is a valid combination */
+		return CSI2_PIX_FMT_OTHERS;
+	}
+
+	/* If we need to skip frames at the beginning of the stream disable the
+	 * video port to avoid sending the skipped frames to the IPIPEIF.
+	 */
+	destidx = csi2->frame_skip ? 0 : !!(csi2->output & CSI2_OUTPUT_IPIPEIF);
+
+	return __csi2_fmt_map[fmtidx][destidx][csi2->dpcm_decompress];
+}
+
+/*
+ * csi2_set_outaddr - Set memory address to save output image
+ * @csi2: Pointer to ISS CSI2a device.
+ * @addr: 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ *
+ * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte
+ * boundary.
+ */
+static void csi2_set_outaddr(struct iss_csi2_device *csi2, u32 addr)
+{
+	struct iss_csi2_ctx_cfg *ctx = &csi2->contexts[0];
+
+	ctx->ping_addr = addr;
+	ctx->pong_addr = addr;
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PING_ADDR(ctx->ctxnum),
+		      ctx->ping_addr);
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PONG_ADDR(ctx->ctxnum),
+		      ctx->pong_addr);
+}
+
+/*
+ * is_usr_def_mapping - Checks whether USER_DEF_MAPPING should
+ *			be enabled by CSI2.
+ * @format_id: mapped format id
+ *
+ */
+static inline int is_usr_def_mapping(u32 format_id)
+{
+	return (format_id & 0xf0) == 0x40 ? 1 : 0;
+}
+
+/*
+ * csi2_ctx_enable - Enable specified CSI2 context
+ * @ctxnum: Context number, valid between 0 and 7 values.
+ * @enable: enable
+ *
+ */
+static void csi2_ctx_enable(struct iss_csi2_device *csi2, u8 ctxnum, u8 enable)
+{
+	struct iss_csi2_ctx_cfg *ctx = &csi2->contexts[ctxnum];
+	u32 reg;
+
+	reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctxnum));
+
+	if (enable) {
+		unsigned int skip = 0;
+
+		if (csi2->frame_skip)
+			skip = csi2->frame_skip;
+		else if (csi2->output & CSI2_OUTPUT_MEMORY)
+			skip = 1;
+
+		reg &= ~CSI2_CTX_CTRL1_COUNT_MASK;
+		reg |= CSI2_CTX_CTRL1_COUNT_UNLOCK
+		    |  (skip << CSI2_CTX_CTRL1_COUNT_SHIFT)
+		    |  CSI2_CTX_CTRL1_CTX_EN;
+	} else {
+		reg &= ~CSI2_CTX_CTRL1_CTX_EN;
+	}
+
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctxnum), reg);
+	ctx->enabled = enable;
+}
+
+/*
+ * csi2_ctx_config - CSI2 context configuration.
+ * @ctx: context configuration
+ *
+ */
+static void csi2_ctx_config(struct iss_csi2_device *csi2,
+			    struct iss_csi2_ctx_cfg *ctx)
+{
+	u32 reg;
+
+	/* Set up CSI2_CTx_CTRL1 */
+	if (ctx->eof_enabled)
+		reg = CSI2_CTX_CTRL1_EOF_EN;
+
+	if (ctx->eol_enabled)
+		reg |= CSI2_CTX_CTRL1_EOL_EN;
+
+	if (ctx->checksum_enabled)
+		reg |= CSI2_CTX_CTRL1_CS_EN;
+
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctx->ctxnum), reg);
+
+	/* Set up CSI2_CTx_CTRL2 */
+	reg = ctx->virtual_id << CSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT;
+	reg |= ctx->format_id << CSI2_CTX_CTRL2_FORMAT_SHIFT;
+
+	if (ctx->dpcm_decompress && ctx->dpcm_predictor)
+		reg |= CSI2_CTX_CTRL2_DPCM_PRED;
+
+	if (is_usr_def_mapping(ctx->format_id))
+		reg |= 2 << CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT;
+
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL2(ctx->ctxnum), reg);
+
+	/* Set up CSI2_CTx_CTRL3 */
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL3(ctx->ctxnum),
+		      ctx->alpha << CSI2_CTX_CTRL3_ALPHA_SHIFT);
+
+	/* Set up CSI2_CTx_DAT_OFST */
+	iss_reg_update(csi2->iss, csi2->regs1, CSI2_CTX_DAT_OFST(ctx->ctxnum),
+		       CSI2_CTX_DAT_OFST_MASK, ctx->data_offset);
+
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PING_ADDR(ctx->ctxnum),
+		      ctx->ping_addr);
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PONG_ADDR(ctx->ctxnum),
+		      ctx->pong_addr);
+}
+
+/*
+ * csi2_timing_config - CSI2 timing configuration.
+ * @timing: csi2_timing_cfg structure
+ */
+static void csi2_timing_config(struct iss_csi2_device *csi2,
+			       struct iss_csi2_timing_cfg *timing)
+{
+	u32 reg;
+
+	reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_TIMING);
+
+	if (timing->force_rx_mode)
+		reg |= CSI2_TIMING_FORCE_RX_MODE_IO1;
+	else
+		reg &= ~CSI2_TIMING_FORCE_RX_MODE_IO1;
+
+	if (timing->stop_state_16x)
+		reg |= CSI2_TIMING_STOP_STATE_X16_IO1;
+	else
+		reg &= ~CSI2_TIMING_STOP_STATE_X16_IO1;
+
+	if (timing->stop_state_4x)
+		reg |= CSI2_TIMING_STOP_STATE_X4_IO1;
+	else
+		reg &= ~CSI2_TIMING_STOP_STATE_X4_IO1;
+
+	reg &= ~CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK;
+	reg |= timing->stop_state_counter <<
+	       CSI2_TIMING_STOP_STATE_COUNTER_IO1_SHIFT;
+
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_TIMING, reg);
+}
+
+/*
+ * csi2_irq_ctx_set - Enables CSI2 Context IRQs.
+ * @enable: Enable/disable CSI2 Context interrupts
+ */
+static void csi2_irq_ctx_set(struct iss_csi2_device *csi2, int enable)
+{
+	u32 reg = CSI2_CTX_IRQ_FE;
+	int i;
+
+	if (csi2->use_fs_irq)
+		reg |= CSI2_CTX_IRQ_FS;
+
+	for (i = 0; i < 8; i++) {
+		iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(i),
+			      reg);
+		if (enable)
+			iss_reg_set(csi2->iss, csi2->regs1,
+				    CSI2_CTX_IRQENABLE(i), reg);
+		else
+			iss_reg_clr(csi2->iss, csi2->regs1,
+				    CSI2_CTX_IRQENABLE(i), reg);
+	}
+}
+
+/*
+ * csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs.
+ * @enable: Enable/disable CSI2 ComplexIO #1 interrupts
+ */
+static void csi2_irq_complexio1_set(struct iss_csi2_device *csi2, int enable)
+{
+	u32 reg;
+	reg = CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT |
+		CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER |
+		CSI2_COMPLEXIO_IRQ_STATEULPM5 |
+		CSI2_COMPLEXIO_IRQ_ERRCONTROL5 |
+		CSI2_COMPLEXIO_IRQ_ERRESC5 |
+		CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5 |
+		CSI2_COMPLEXIO_IRQ_ERRSOTHS5 |
+		CSI2_COMPLEXIO_IRQ_STATEULPM4 |
+		CSI2_COMPLEXIO_IRQ_ERRCONTROL4 |
+		CSI2_COMPLEXIO_IRQ_ERRESC4 |
+		CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4 |
+		CSI2_COMPLEXIO_IRQ_ERRSOTHS4 |
+		CSI2_COMPLEXIO_IRQ_STATEULPM3 |
+		CSI2_COMPLEXIO_IRQ_ERRCONTROL3 |
+		CSI2_COMPLEXIO_IRQ_ERRESC3 |
+		CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3 |
+		CSI2_COMPLEXIO_IRQ_ERRSOTHS3 |
+		CSI2_COMPLEXIO_IRQ_STATEULPM2 |
+		CSI2_COMPLEXIO_IRQ_ERRCONTROL2 |
+		CSI2_COMPLEXIO_IRQ_ERRESC2 |
+		CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2 |
+		CSI2_COMPLEXIO_IRQ_ERRSOTHS2 |
+		CSI2_COMPLEXIO_IRQ_STATEULPM1 |
+		CSI2_COMPLEXIO_IRQ_ERRCONTROL1 |
+		CSI2_COMPLEXIO_IRQ_ERRESC1 |
+		CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1 |
+		CSI2_COMPLEXIO_IRQ_ERRSOTHS1;
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQSTATUS, reg);
+	if (enable)
+		iss_reg_set(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQENABLE,
+			    reg);
+	else
+		iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQENABLE,
+			      0);
+}
+
+/*
+ * csi2_irq_status_set - Enables CSI2 Status IRQs.
+ * @enable: Enable/disable CSI2 Status interrupts
+ */
+static void csi2_irq_status_set(struct iss_csi2_device *csi2, int enable)
+{
+	u32 reg;
+	reg = CSI2_IRQ_OCP_ERR |
+		CSI2_IRQ_SHORT_PACKET |
+		CSI2_IRQ_ECC_CORRECTION |
+		CSI2_IRQ_ECC_NO_CORRECTION |
+		CSI2_IRQ_COMPLEXIO_ERR |
+		CSI2_IRQ_FIFO_OVF |
+		CSI2_IRQ_CONTEXT0;
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQSTATUS, reg);
+	if (enable)
+		iss_reg_set(csi2->iss, csi2->regs1, CSI2_IRQENABLE, reg);
+	else
+		iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQENABLE, 0);
+}
+
+/*
+ * omap4iss_csi2_reset - Resets the CSI2 module.
+ *
+ * Must be called with the phy lock held.
+ *
+ * Returns 0 if successful, or -EBUSY if power command didn't respond.
+ */
+int omap4iss_csi2_reset(struct iss_csi2_device *csi2)
+{
+	u8 soft_reset_retries = 0;
+	u32 reg;
+	int i;
+
+	if (!csi2->available)
+		return -ENODEV;
+
+	if (csi2->phy->phy_in_use)
+		return -EBUSY;
+
+	iss_reg_set(csi2->iss, csi2->regs1, CSI2_SYSCONFIG,
+		    CSI2_SYSCONFIG_SOFT_RESET);
+
+	do {
+		reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_SYSSTATUS)
+		    & CSI2_SYSSTATUS_RESET_DONE;
+		if (reg == CSI2_SYSSTATUS_RESET_DONE)
+			break;
+		soft_reset_retries++;
+		if (soft_reset_retries < 5)
+			usleep_range(100, 100);
+	} while (soft_reset_retries < 5);
+
+	if (soft_reset_retries == 5) {
+		dev_err(csi2->iss->dev,
+			"CSI2: Soft reset try count exceeded!\n");
+		return -EBUSY;
+	}
+
+	iss_reg_set(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_CFG,
+		    CSI2_COMPLEXIO_CFG_RESET_CTRL);
+
+	i = 100;
+	do {
+		reg = iss_reg_read(csi2->iss, csi2->phy->phy_regs, REGISTER1)
+		    & REGISTER1_RESET_DONE_CTRLCLK;
+		if (reg == REGISTER1_RESET_DONE_CTRLCLK)
+			break;
+		usleep_range(100, 100);
+	} while (--i > 0);
+
+	if (i == 0) {
+		dev_err(csi2->iss->dev,
+			"CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
+		return -EBUSY;
+	}
+
+	iss_reg_update(csi2->iss, csi2->regs1, CSI2_SYSCONFIG,
+		       CSI2_SYSCONFIG_MSTANDBY_MODE_MASK |
+		       CSI2_SYSCONFIG_AUTO_IDLE,
+		       CSI2_SYSCONFIG_MSTANDBY_MODE_NO);
+
+	return 0;
+}
+
+static int csi2_configure(struct iss_csi2_device *csi2)
+{
+	const struct iss_v4l2_subdevs_group *pdata;
+	struct iss_csi2_timing_cfg *timing = &csi2->timing[0];
+	struct v4l2_subdev *sensor;
+	struct media_pad *pad;
+
+	/*
+	 * CSI2 fields that can be updated while the context has
+	 * been enabled or the interface has been enabled are not
+	 * updated dynamically currently. So we do not allow to
+	 * reconfigure if either has been enabled
+	 */
+	if (csi2->contexts[0].enabled || csi2->ctrl.if_enable)
+		return -EBUSY;
+
+	pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]);
+	sensor = media_entity_to_v4l2_subdev(pad->entity);
+	pdata = sensor->host_priv;
+
+	csi2->frame_skip = 0;
+	v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip);
+
+	csi2->ctrl.vp_out_ctrl = pdata->bus.csi2.vpclk_div;
+	csi2->ctrl.frame_mode = ISS_CSI2_FRAME_IMMEDIATE;
+	csi2->ctrl.ecc_enable = pdata->bus.csi2.crc;
+
+	timing->force_rx_mode = 1;
+	timing->stop_state_16x = 1;
+	timing->stop_state_4x = 1;
+	timing->stop_state_counter = 0x1ff;
+
+	/*
+	 * The CSI2 receiver can't do any format conversion except DPCM
+	 * decompression, so every set_format call configures both pads
+	 * and enables DPCM decompression as a special case:
+	 */
+	if (csi2->formats[CSI2_PAD_SINK].code !=
+	    csi2->formats[CSI2_PAD_SOURCE].code)
+		csi2->dpcm_decompress = true;
+	else
+		csi2->dpcm_decompress = false;
+
+	csi2->contexts[0].format_id = csi2_ctx_map_format(csi2);
+
+	if (csi2->video_out.bpl_padding == 0)
+		csi2->contexts[0].data_offset = 0;
+	else
+		csi2->contexts[0].data_offset = csi2->video_out.bpl_value;
+
+	/*
+	 * Enable end of frame and end of line signals generation for
+	 * context 0. These signals are generated from CSI2 receiver to
+	 * qualify the last pixel of a frame and the last pixel of a line.
+	 * Without enabling the signals CSI2 receiver writes data to memory
+	 * beyond buffer size and/or data line offset is not handled correctly.
+	 */
+	csi2->contexts[0].eof_enabled = 1;
+	csi2->contexts[0].eol_enabled = 1;
+
+	csi2_irq_complexio1_set(csi2, 1);
+	csi2_irq_ctx_set(csi2, 1);
+	csi2_irq_status_set(csi2, 1);
+
+	/* Set configuration (timings, format and links) */
+	csi2_timing_config(csi2, timing);
+	csi2_recv_config(csi2, &csi2->ctrl);
+	csi2_ctx_config(csi2, &csi2->contexts[0]);
+
+	return 0;
+}
+
+/*
+ * csi2_print_status - Prints CSI2 debug information.
+ */
+#define CSI2_PRINT_REGISTER(iss, regs, name)\
+	dev_dbg(iss->dev, "###CSI2 " #name "=0x%08x\n", \
+		iss_reg_read(iss, regs, CSI2_##name))
+
+static void csi2_print_status(struct iss_csi2_device *csi2)
+{
+	struct iss_device *iss = csi2->iss;
+
+	if (!csi2->available)
+		return;
+
+	dev_dbg(iss->dev, "-------------CSI2 Register dump-------------\n");
+
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, SYSCONFIG);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, SYSSTATUS);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, IRQENABLE);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, IRQSTATUS);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, CTRL);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, DBG_H);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_CFG);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_IRQSTATUS);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, SHORT_PACKET);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_IRQENABLE);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, DBG_P);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, TIMING);
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL1(0));
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL2(0));
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_DAT_OFST(0));
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_PING_ADDR(0));
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_PONG_ADDR(0));
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_IRQENABLE(0));
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_IRQSTATUS(0));
+	CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL3(0));
+
+	dev_dbg(iss->dev, "--------------------------------------------\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+/*
+ * csi2_isr_buffer - Does buffer handling at end-of-frame
+ * when writing to memory.
+ */
+static void csi2_isr_buffer(struct iss_csi2_device *csi2)
+{
+	struct iss_buffer *buffer;
+
+	csi2_ctx_enable(csi2, 0, 0);
+
+	buffer = omap4iss_video_buffer_next(&csi2->video_out);
+
+	/*
+	 * Let video queue operation restart engine if there is an underrun
+	 * condition.
+	 */
+	if (buffer == NULL)
+		return;
+
+	csi2_set_outaddr(csi2, buffer->iss_addr);
+	csi2_ctx_enable(csi2, 0, 1);
+}
+
+static void csi2_isr_ctx(struct iss_csi2_device *csi2,
+			 struct iss_csi2_ctx_cfg *ctx)
+{
+	unsigned int n = ctx->ctxnum;
+	u32 status;
+
+	status = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n));
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n), status);
+
+	/* Propagate frame number */
+	if (status & CSI2_CTX_IRQ_FS) {
+		struct iss_pipeline *pipe =
+				     to_iss_pipeline(&csi2->subdev.entity);
+		if (pipe->do_propagation)
+			atomic_inc(&pipe->frame_number);
+	}
+
+	if (!(status & CSI2_CTX_IRQ_FE))
+		return;
+
+	/* Skip interrupts until we reach the frame skip count. The CSI2 will be
+	 * automatically disabled, as the frame skip count has been programmed
+	 * in the CSI2_CTx_CTRL1::COUNT field, so reenable it.
+	 *
+	 * It would have been nice to rely on the FRAME_NUMBER interrupt instead
+	 * but it turned out that the interrupt is only generated when the CSI2
+	 * writes to memory (the CSI2_CTx_CTRL1::COUNT field is decreased
+	 * correctly and reaches 0 when data is forwarded to the video port only
+	 * but no interrupt arrives). Maybe a CSI2 hardware bug.
+	 */
+	if (csi2->frame_skip) {
+		csi2->frame_skip--;
+		if (csi2->frame_skip == 0) {
+			ctx->format_id = csi2_ctx_map_format(csi2);
+			csi2_ctx_config(csi2, ctx);
+			csi2_ctx_enable(csi2, n, 1);
+		}
+		return;
+	}
+
+	if (csi2->output & CSI2_OUTPUT_MEMORY)
+		csi2_isr_buffer(csi2);
+}
+
+/*
+ * omap4iss_csi2_isr - CSI2 interrupt handling.
+ */
+void omap4iss_csi2_isr(struct iss_csi2_device *csi2)
+{
+	struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
+	u32 csi2_irqstatus, cpxio1_irqstatus;
+	struct iss_device *iss = csi2->iss;
+
+	if (!csi2->available)
+		return;
+
+	csi2_irqstatus = iss_reg_read(csi2->iss, csi2->regs1, CSI2_IRQSTATUS);
+	iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQSTATUS, csi2_irqstatus);
+
+	/* Failure Cases */
+	if (csi2_irqstatus & CSI2_IRQ_COMPLEXIO_ERR) {
+		cpxio1_irqstatus = iss_reg_read(csi2->iss, csi2->regs1,
+						CSI2_COMPLEXIO_IRQSTATUS);
+		iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQSTATUS,
+			      cpxio1_irqstatus);
+		dev_dbg(iss->dev, "CSI2: ComplexIO Error IRQ %x\n",
+			cpxio1_irqstatus);
+		pipe->error = true;
+	}
+
+	if (csi2_irqstatus & (CSI2_IRQ_OCP_ERR |
+			      CSI2_IRQ_SHORT_PACKET |
+			      CSI2_IRQ_ECC_NO_CORRECTION |
+			      CSI2_IRQ_COMPLEXIO_ERR |
+			      CSI2_IRQ_FIFO_OVF)) {
+		dev_dbg(iss->dev,
+			"CSI2 Err: OCP:%d SHORT:%d ECC:%d CPXIO:%d OVF:%d\n",
+			csi2_irqstatus & CSI2_IRQ_OCP_ERR ? 1 : 0,
+			csi2_irqstatus & CSI2_IRQ_SHORT_PACKET ? 1 : 0,
+			csi2_irqstatus & CSI2_IRQ_ECC_NO_CORRECTION ? 1 : 0,
+			csi2_irqstatus & CSI2_IRQ_COMPLEXIO_ERR ? 1 : 0,
+			csi2_irqstatus & CSI2_IRQ_FIFO_OVF ? 1 : 0);
+		pipe->error = true;
+	}
+
+	if (omap4iss_module_sync_is_stopping(&csi2->wait, &csi2->stopping))
+		return;
+
+	/* Successful cases */
+	if (csi2_irqstatus & CSI2_IRQ_CONTEXT0)
+		csi2_isr_ctx(csi2, &csi2->contexts[0]);
+
+	if (csi2_irqstatus & CSI2_IRQ_ECC_CORRECTION)
+		dev_dbg(iss->dev, "CSI2: ECC correction done\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * ISS video operations
+ */
+
+/*
+ * csi2_queue - Queues the first buffer when using memory output
+ * @video: The video node
+ * @buffer: buffer to queue
+ */
+static int csi2_queue(struct iss_video *video, struct iss_buffer *buffer)
+{
+	struct iss_csi2_device *csi2 = container_of(video,
+				struct iss_csi2_device, video_out);
+
+	csi2_set_outaddr(csi2, buffer->iss_addr);
+
+	/*
+	 * If streaming was enabled before there was a buffer queued
+	 * or underrun happened in the ISR, the hardware was not enabled
+	 * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
+	 * Enable it now.
+	 */
+	if (csi2->video_out.dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+		/* Enable / disable context 0 and IRQs */
+		csi2_if_enable(csi2, 1);
+		csi2_ctx_enable(csi2, 0, 1);
+		iss_video_dmaqueue_flags_clr(&csi2->video_out);
+	}
+
+	return 0;
+}
+
+static const struct iss_video_operations csi2_issvideo_ops = {
+	.queue = csi2_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__csi2_get_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+		  unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_format(fh, pad);
+	else
+		return &csi2->formats[pad];
+}
+
+static void
+csi2_try_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+		unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+		enum v4l2_subdev_format_whence which)
+{
+	enum v4l2_mbus_pixelcode pixelcode;
+	struct v4l2_mbus_framefmt *format;
+	const struct iss_format_info *info;
+	unsigned int i;
+
+	switch (pad) {
+	case CSI2_PAD_SINK:
+		/* Clamp the width and height to valid range (1-8191). */
+		for (i = 0; i < ARRAY_SIZE(csi2_input_fmts); i++) {
+			if (fmt->code == csi2_input_fmts[i])
+				break;
+		}
+
+		/* If not found, use SGRBG10 as default */
+		if (i >= ARRAY_SIZE(csi2_input_fmts))
+			fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+		fmt->width = clamp_t(u32, fmt->width, 1, 8191);
+		fmt->height = clamp_t(u32, fmt->height, 1, 8191);
+		break;
+
+	case CSI2_PAD_SOURCE:
+		/* Source format same as sink format, except for DPCM
+		 * compression.
+		 */
+		pixelcode = fmt->code;
+		format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK, which);
+		memcpy(fmt, format, sizeof(*fmt));
+
+		/*
+		 * Only Allow DPCM decompression, and check that the
+		 * pattern is preserved
+		 */
+		info = omap4iss_video_format_info(fmt->code);
+		if (info->uncompressed == pixelcode)
+			fmt->code = pixelcode;
+		break;
+	}
+
+	/* RGB, non-interlaced */
+	fmt->colorspace = V4L2_COLORSPACE_SRGB;
+	fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * csi2_enum_mbus_code - Handle pixel format enumeration
+ * @sd     : pointer to v4l2 subdev structure
+ * @fh     : V4L2 subdev file handle
+ * @code   : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
+			       struct v4l2_subdev_fh *fh,
+			       struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+	const struct iss_format_info *info;
+
+	if (code->pad == CSI2_PAD_SINK) {
+		if (code->index >= ARRAY_SIZE(csi2_input_fmts))
+			return -EINVAL;
+
+		code->code = csi2_input_fmts[code->index];
+	} else {
+		format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK,
+					   V4L2_SUBDEV_FORMAT_TRY);
+		switch (code->index) {
+		case 0:
+			/* Passthrough sink pad code */
+			code->code = format->code;
+			break;
+		case 1:
+			/* Uncompressed code */
+			info = omap4iss_video_format_info(format->code);
+			if (info->uncompressed == format->code)
+				return -EINVAL;
+
+			code->code = info->uncompressed;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int csi2_enum_frame_size(struct v4l2_subdev *sd,
+				struct v4l2_subdev_fh *fh,
+				struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt format;
+
+	if (fse->index != 0)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = 1;
+	format.height = 1;
+	csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->min_width = format.width;
+	fse->min_height = format.height;
+
+	if (format.code != fse->code)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = -1;
+	format.height = -1;
+	csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->max_width = format.width;
+	fse->max_height = format.height;
+
+	return 0;
+}
+
+/*
+ * csi2_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	fmt->format = *format;
+	return 0;
+}
+
+/*
+ * csi2_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	csi2_try_format(csi2, fh, fmt->pad, &fmt->format, fmt->which);
+	*format = fmt->format;
+
+	/* Propagate the format from sink to source */
+	if (fmt->pad == CSI2_PAD_SINK) {
+		format = __csi2_get_format(csi2, fh, CSI2_PAD_SOURCE,
+					   fmt->which);
+		*format = fmt->format;
+		csi2_try_format(csi2, fh, CSI2_PAD_SOURCE, format, fmt->which);
+	}
+
+	return 0;
+}
+
+static int csi2_link_validate(struct v4l2_subdev *sd, struct media_link *link,
+			      struct v4l2_subdev_format *source_fmt,
+			      struct v4l2_subdev_format *sink_fmt)
+{
+	struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
+	int rval;
+
+	pipe->external = media_entity_to_v4l2_subdev(link->source->entity);
+	rval = omap4iss_get_external_info(pipe, link);
+	if (rval < 0)
+		return rval;
+
+	return v4l2_subdev_link_validate_default(sd, link, source_fmt,
+						 sink_fmt);
+}
+
+/*
+ * csi2_init_formats - Initialize formats on all pads
+ * @sd: ISS CSI2 V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct v4l2_subdev_format format;
+
+	memset(&format, 0, sizeof(format));
+	format.pad = CSI2_PAD_SINK;
+	format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+	format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+	format.format.width = 4096;
+	format.format.height = 4096;
+	csi2_set_format(sd, fh, &format);
+
+	return 0;
+}
+
+/*
+ * csi2_set_stream - Enable/Disable streaming on the CSI2 module
+ * @sd: ISS CSI2 V4L2 subdevice
+ * @enable: ISS pipeline stream state
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct iss_device *iss = csi2->iss;
+	struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
+	struct iss_video *video_out = &csi2->video_out;
+	int ret = 0;
+
+	if (csi2->state == ISS_PIPELINE_STREAM_STOPPED) {
+		if (enable == ISS_PIPELINE_STREAM_STOPPED)
+			return 0;
+
+		omap4iss_subclk_enable(iss, csi2->subclk);
+	}
+
+	switch (enable) {
+	case ISS_PIPELINE_STREAM_CONTINUOUS: {
+		ret = omap4iss_csiphy_config(iss, sd);
+		if (ret < 0)
+			return ret;
+
+		if (omap4iss_csiphy_acquire(csi2->phy) < 0)
+			return -ENODEV;
+		csi2->use_fs_irq = pipe->do_propagation;
+		csi2_configure(csi2);
+		csi2_print_status(csi2);
+
+		/*
+		 * When outputting to memory with no buffer available, let the
+		 * buffer queue handler start the hardware. A DMA queue flag
+		 * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+		 * a buffer available.
+		 */
+		if (csi2->output & CSI2_OUTPUT_MEMORY &&
+		    !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
+			break;
+		/* Enable context 0 and IRQs */
+		atomic_set(&csi2->stopping, 0);
+		csi2_ctx_enable(csi2, 0, 1);
+		csi2_if_enable(csi2, 1);
+		iss_video_dmaqueue_flags_clr(video_out);
+		break;
+	}
+	case ISS_PIPELINE_STREAM_STOPPED:
+		if (csi2->state == ISS_PIPELINE_STREAM_STOPPED)
+			return 0;
+		if (omap4iss_module_sync_idle(&sd->entity, &csi2->wait,
+					      &csi2->stopping))
+			ret = -ETIMEDOUT;
+		csi2_ctx_enable(csi2, 0, 0);
+		csi2_if_enable(csi2, 0);
+		csi2_irq_ctx_set(csi2, 0);
+		omap4iss_csiphy_release(csi2->phy);
+		omap4iss_subclk_disable(iss, csi2->subclk);
+		iss_video_dmaqueue_flags_clr(video_out);
+		break;
+	}
+
+	csi2->state = enable;
+	return ret;
+}
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops csi2_video_ops = {
+	.s_stream = csi2_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+	.enum_mbus_code = csi2_enum_mbus_code,
+	.enum_frame_size = csi2_enum_frame_size,
+	.get_fmt = csi2_get_format,
+	.set_fmt = csi2_set_format,
+	.link_validate = csi2_link_validate,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops csi2_ops = {
+	.video = &csi2_video_ops,
+	.pad = &csi2_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
+	.open = csi2_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * csi2_link_setup - Setup CSI2 connections.
+ * @entity : Pointer to media entity structure
+ * @local  : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags  : Link flags
+ * return -EINVAL or zero on success
+ */
+static int csi2_link_setup(struct media_entity *entity,
+			   const struct media_pad *local,
+			   const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+	struct iss_csi2_ctrl_cfg *ctrl = &csi2->ctrl;
+
+	/*
+	 * The ISS core doesn't support pipelines with multiple video outputs.
+	 * Revisit this when it will be implemented, and return -EBUSY for now.
+	 */
+
+	switch (local->index | media_entity_type(remote->entity)) {
+	case CSI2_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (csi2->output & ~CSI2_OUTPUT_MEMORY)
+				return -EBUSY;
+			csi2->output |= CSI2_OUTPUT_MEMORY;
+		} else {
+			csi2->output &= ~CSI2_OUTPUT_MEMORY;
+		}
+		break;
+
+	case CSI2_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (csi2->output & ~CSI2_OUTPUT_IPIPEIF)
+				return -EBUSY;
+			csi2->output |= CSI2_OUTPUT_IPIPEIF;
+		} else {
+			csi2->output &= ~CSI2_OUTPUT_IPIPEIF;
+		}
+		break;
+
+	default:
+		/* Link from camera to CSI2 is fixed... */
+		return -EINVAL;
+	}
+
+	ctrl->vp_only_enable = csi2->output & CSI2_OUTPUT_MEMORY ? false : true;
+	ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_IPIPEIF);
+
+	return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations csi2_media_ops = {
+	.link_setup = csi2_link_setup,
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+void omap4iss_csi2_unregister_entities(struct iss_csi2_device *csi2)
+{
+	v4l2_device_unregister_subdev(&csi2->subdev);
+	omap4iss_video_unregister(&csi2->video_out);
+}
+
+int omap4iss_csi2_register_entities(struct iss_csi2_device *csi2,
+				    struct v4l2_device *vdev)
+{
+	int ret;
+
+	/* Register the subdev and video nodes. */
+	ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+	if (ret < 0)
+		goto error;
+
+	ret = omap4iss_video_register(&csi2->video_out, vdev);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	omap4iss_csi2_unregister_entities(csi2);
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISS CSI2 initialisation and cleanup
+ */
+
+/*
+ * csi2_init_entities - Initialize subdev and media entity.
+ * @csi2: Pointer to csi2 structure.
+ * return -ENOMEM or zero on success
+ */
+static int csi2_init_entities(struct iss_csi2_device *csi2, const char *subname)
+{
+	struct v4l2_subdev *sd = &csi2->subdev;
+	struct media_pad *pads = csi2->pads;
+	struct media_entity *me = &sd->entity;
+	int ret;
+	char name[V4L2_SUBDEV_NAME_SIZE];
+
+	v4l2_subdev_init(sd, &csi2_ops);
+	sd->internal_ops = &csi2_internal_ops;
+	sprintf(name, "CSI2%s", subname);
+	snprintf(sd->name, sizeof(sd->name), "OMAP4 ISS %s", name);
+
+	sd->grp_id = 1 << 16;	/* group ID for iss subdevs */
+	v4l2_set_subdevdata(sd, csi2);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+	pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+
+	me->ops = &csi2_media_ops;
+	ret = media_entity_init(me, CSI2_PADS_NUM, pads, 0);
+	if (ret < 0)
+		return ret;
+
+	csi2_init_formats(sd, NULL);
+
+	/* Video device node */
+	csi2->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	csi2->video_out.ops = &csi2_issvideo_ops;
+	csi2->video_out.bpl_alignment = 32;
+	csi2->video_out.bpl_zero_padding = 1;
+	csi2->video_out.bpl_max = 0x1ffe0;
+	csi2->video_out.iss = csi2->iss;
+	csi2->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+
+	ret = omap4iss_video_init(&csi2->video_out, name);
+	if (ret < 0)
+		goto error_video;
+
+	/* Connect the CSI2 subdev to the video node. */
+	ret = media_entity_create_link(&csi2->subdev.entity, CSI2_PAD_SOURCE,
+				       &csi2->video_out.video.entity, 0, 0);
+	if (ret < 0)
+		goto error_link;
+
+	return 0;
+
+error_link:
+	omap4iss_video_cleanup(&csi2->video_out);
+error_video:
+	media_entity_cleanup(&csi2->subdev.entity);
+	return ret;
+}
+
+/*
+ * omap4iss_csi2_init - Routine for module driver init
+ */
+int omap4iss_csi2_init(struct iss_device *iss)
+{
+	struct iss_csi2_device *csi2a = &iss->csi2a;
+	struct iss_csi2_device *csi2b = &iss->csi2b;
+	int ret;
+
+	csi2a->iss = iss;
+	csi2a->available = 1;
+	csi2a->regs1 = OMAP4_ISS_MEM_CSI2_A_REGS1;
+	csi2a->phy = &iss->csiphy1;
+	csi2a->subclk = OMAP4_ISS_SUBCLK_CSI2_A;
+	csi2a->state = ISS_PIPELINE_STREAM_STOPPED;
+	init_waitqueue_head(&csi2a->wait);
+
+	ret = csi2_init_entities(csi2a, "a");
+	if (ret < 0)
+		return ret;
+
+	csi2b->iss = iss;
+	csi2b->available = 1;
+	csi2b->regs1 = OMAP4_ISS_MEM_CSI2_B_REGS1;
+	csi2b->phy = &iss->csiphy2;
+	csi2b->subclk = OMAP4_ISS_SUBCLK_CSI2_B;
+	csi2b->state = ISS_PIPELINE_STREAM_STOPPED;
+	init_waitqueue_head(&csi2b->wait);
+
+	ret = csi2_init_entities(csi2b, "b");
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * omap4iss_csi2_cleanup - Routine for module driver cleanup
+ */
+void omap4iss_csi2_cleanup(struct iss_device *iss)
+{
+	struct iss_csi2_device *csi2a = &iss->csi2a;
+	struct iss_csi2_device *csi2b = &iss->csi2b;
+
+	omap4iss_video_cleanup(&csi2a->video_out);
+	media_entity_cleanup(&csi2a->subdev.entity);
+
+	omap4iss_video_cleanup(&csi2b->video_out);
+	media_entity_cleanup(&csi2b->subdev.entity);
+}
diff --git a/drivers/staging/media/omap4iss/iss_csi2.h b/drivers/staging/media/omap4iss/iss_csi2.h
new file mode 100644
index 0000000..971aa7b
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csi2.h
@@ -0,0 +1,158 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI2 module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_CSI2_H
+#define OMAP4_ISS_CSI2_H
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include "iss_video.h"
+
+struct iss_csiphy;
+
+/* This is not an exhaustive list */
+enum iss_csi2_pix_formats {
+	CSI2_PIX_FMT_OTHERS = 0,
+	CSI2_PIX_FMT_YUV422_8BIT = 0x1e,
+	CSI2_PIX_FMT_YUV422_8BIT_VP = 0x9e,
+	CSI2_PIX_FMT_YUV422_8BIT_VP16 = 0xde,
+	CSI2_PIX_FMT_RAW10_EXP16 = 0xab,
+	CSI2_PIX_FMT_RAW10_EXP16_VP = 0x12f,
+	CSI2_PIX_FMT_RAW8 = 0x2a,
+	CSI2_PIX_FMT_RAW8_DPCM10_EXP16 = 0x2aa,
+	CSI2_PIX_FMT_RAW8_DPCM10_VP = 0x32a,
+	CSI2_PIX_FMT_RAW8_VP = 0x12a,
+	CSI2_USERDEF_8BIT_DATA1_DPCM10_VP = 0x340,
+	CSI2_USERDEF_8BIT_DATA1_DPCM10 = 0x2c0,
+	CSI2_USERDEF_8BIT_DATA1 = 0x40,
+};
+
+enum iss_csi2_irqevents {
+	OCP_ERR_IRQ = 0x4000,
+	SHORT_PACKET_IRQ = 0x2000,
+	ECC_CORRECTION_IRQ = 0x1000,
+	ECC_NO_CORRECTION_IRQ = 0x800,
+	COMPLEXIO2_ERR_IRQ = 0x400,
+	COMPLEXIO1_ERR_IRQ = 0x200,
+	FIFO_OVF_IRQ = 0x100,
+	CONTEXT7 = 0x80,
+	CONTEXT6 = 0x40,
+	CONTEXT5 = 0x20,
+	CONTEXT4 = 0x10,
+	CONTEXT3 = 0x8,
+	CONTEXT2 = 0x4,
+	CONTEXT1 = 0x2,
+	CONTEXT0 = 0x1,
+};
+
+enum iss_csi2_ctx_irqevents {
+	CTX_ECC_CORRECTION = 0x100,
+	CTX_LINE_NUMBER = 0x80,
+	CTX_FRAME_NUMBER = 0x40,
+	CTX_CS = 0x20,
+	CTX_LE = 0x8,
+	CTX_LS = 0x4,
+	CTX_FE = 0x2,
+	CTX_FS = 0x1,
+};
+
+enum iss_csi2_frame_mode {
+	ISS_CSI2_FRAME_IMMEDIATE,
+	ISS_CSI2_FRAME_AFTERFEC,
+};
+
+#define ISS_CSI2_MAX_CTX_NUM	7
+
+struct iss_csi2_ctx_cfg {
+	u8 ctxnum;		/* context number 0 - 7 */
+	u8 dpcm_decompress;
+
+	/* Fields in CSI2_CTx_CTRL2 - locked by CSI2_CTx_CTRL1.CTX_EN */
+	u8 virtual_id;
+	u16 format_id;		/* as in CSI2_CTx_CTRL2[9:0] */
+	u8 dpcm_predictor;	/* 1: simple, 0: advanced */
+
+	/* Fields in CSI2_CTx_CTRL1/3 - Shadowed */
+	u16 alpha;
+	u16 data_offset;
+	u32 ping_addr;
+	u32 pong_addr;
+	u8 eof_enabled;
+	u8 eol_enabled;
+	u8 checksum_enabled;
+	u8 enabled;
+};
+
+struct iss_csi2_timing_cfg {
+	u8 ionum;			/* IO1 or IO2 as in CSI2_TIMING */
+	unsigned force_rx_mode:1;
+	unsigned stop_state_16x:1;
+	unsigned stop_state_4x:1;
+	u16 stop_state_counter;
+};
+
+struct iss_csi2_ctrl_cfg {
+	bool vp_clk_enable;
+	bool vp_only_enable;
+	u8 vp_out_ctrl;
+	enum iss_csi2_frame_mode frame_mode;
+	bool ecc_enable;
+	bool if_enable;
+};
+
+#define CSI2_PAD_SINK		0
+#define CSI2_PAD_SOURCE		1
+#define CSI2_PADS_NUM		2
+
+#define CSI2_OUTPUT_IPIPEIF	(1 << 0)
+#define CSI2_OUTPUT_MEMORY	(1 << 1)
+
+struct iss_csi2_device {
+	struct v4l2_subdev subdev;
+	struct media_pad pads[CSI2_PADS_NUM];
+	struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM];
+
+	struct iss_video video_out;
+	struct iss_device *iss;
+
+	u8 available;		/* Is the IP present on the silicon? */
+
+	/* memory resources, as defined in enum iss_mem_resources */
+	unsigned int regs1;
+	unsigned int regs2;
+	/* ISP subclock, as defined in enum iss_isp_subclk_resource */
+	unsigned int subclk;
+
+	u32 output; /* output to IPIPEIF, memory or both? */
+	bool dpcm_decompress;
+	unsigned int frame_skip;
+	bool use_fs_irq;
+
+	struct iss_csiphy *phy;
+	struct iss_csi2_ctx_cfg contexts[ISS_CSI2_MAX_CTX_NUM + 1];
+	struct iss_csi2_timing_cfg timing[2];
+	struct iss_csi2_ctrl_cfg ctrl;
+	enum iss_pipeline_stream_state state;
+	wait_queue_head_t wait;
+	atomic_t stopping;
+};
+
+void omap4iss_csi2_isr(struct iss_csi2_device *csi2);
+int omap4iss_csi2_reset(struct iss_csi2_device *csi2);
+int omap4iss_csi2_init(struct iss_device *iss);
+void omap4iss_csi2_cleanup(struct iss_device *iss);
+void omap4iss_csi2_unregister_entities(struct iss_csi2_device *csi2);
+int omap4iss_csi2_register_entities(struct iss_csi2_device *csi2,
+				    struct v4l2_device *vdev);
+#endif	/* OMAP4_ISS_CSI2_H */
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.c b/drivers/staging/media/omap4iss/iss_csiphy.c
new file mode 100644
index 0000000..7c3d55d
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csiphy.c
@@ -0,0 +1,279 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI PHY module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include "../../../../arch/arm/mach-omap2/control.h"
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_csiphy.h"
+
+/*
+ * csiphy_lanes_config - Configuration of CSIPHY lanes.
+ *
+ * Updates HW configuration.
+ * Called with phy->mutex taken.
+ */
+static void csiphy_lanes_config(struct iss_csiphy *phy)
+{
+	unsigned int i;
+	u32 reg;
+
+	reg = iss_reg_read(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG);
+
+	for (i = 0; i < phy->max_data_lanes; i++) {
+		reg &= ~(CSI2_COMPLEXIO_CFG_DATA_POL(i + 1) |
+			 CSI2_COMPLEXIO_CFG_DATA_POSITION_MASK(i + 1));
+		reg |= (phy->lanes.data[i].pol ?
+			CSI2_COMPLEXIO_CFG_DATA_POL(i + 1) : 0);
+		reg |= (phy->lanes.data[i].pos <<
+			CSI2_COMPLEXIO_CFG_DATA_POSITION_SHIFT(i + 1));
+	}
+
+	reg &= ~(CSI2_COMPLEXIO_CFG_CLOCK_POL |
+		 CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK);
+	reg |= phy->lanes.clk.pol ? CSI2_COMPLEXIO_CFG_CLOCK_POL : 0;
+	reg |= phy->lanes.clk.pos << CSI2_COMPLEXIO_CFG_CLOCK_POSITION_SHIFT;
+
+	iss_reg_write(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG, reg);
+}
+
+/*
+ * csiphy_set_power
+ * @power: Power state to be set.
+ *
+ * Returns 0 if successful, or -EBUSY if the retry count is exceeded.
+ */
+static int csiphy_set_power(struct iss_csiphy *phy, u32 power)
+{
+	u32 reg;
+	u8 retry_count;
+
+	iss_reg_update(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG,
+		       CSI2_COMPLEXIO_CFG_PWD_CMD_MASK,
+		       power | CSI2_COMPLEXIO_CFG_PWR_AUTO);
+
+	retry_count = 0;
+	do {
+		udelay(1);
+		reg = iss_reg_read(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG)
+		    & CSI2_COMPLEXIO_CFG_PWD_STATUS_MASK;
+
+		if (reg != power >> 2)
+			retry_count++;
+
+	} while ((reg != power >> 2) && (retry_count < 250));
+
+	if (retry_count == 250) {
+		dev_err(phy->iss->dev, "CSI2 CIO set power failed!\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+/*
+ * csiphy_dphy_config - Configure CSI2 D-PHY parameters.
+ *
+ * Called with phy->mutex taken.
+ */
+static void csiphy_dphy_config(struct iss_csiphy *phy)
+{
+	u32 reg;
+
+	/* Set up REGISTER0 */
+	reg = phy->dphy.ths_term << REGISTER0_THS_TERM_SHIFT;
+	reg |= phy->dphy.ths_settle << REGISTER0_THS_SETTLE_SHIFT;
+
+	iss_reg_write(phy->iss, phy->phy_regs, REGISTER0, reg);
+
+	/* Set up REGISTER1 */
+	reg = phy->dphy.tclk_term << REGISTER1_TCLK_TERM_SHIFT;
+	reg |= phy->dphy.tclk_miss << REGISTER1_CTRLCLK_DIV_FACTOR_SHIFT;
+	reg |= phy->dphy.tclk_settle << REGISTER1_TCLK_SETTLE_SHIFT;
+	reg |= 0xb8 << REGISTER1_DPHY_HS_SYNC_PATTERN_SHIFT;
+
+	iss_reg_write(phy->iss, phy->phy_regs, REGISTER1, reg);
+}
+
+/*
+ * TCLK values are OK at their reset values
+ */
+#define TCLK_TERM	0
+#define TCLK_MISS	1
+#define TCLK_SETTLE	14
+
+int omap4iss_csiphy_config(struct iss_device *iss,
+			   struct v4l2_subdev *csi2_subdev)
+{
+	struct iss_csi2_device *csi2 = v4l2_get_subdevdata(csi2_subdev);
+	struct iss_pipeline *pipe = to_iss_pipeline(&csi2_subdev->entity);
+	struct iss_v4l2_subdevs_group *subdevs = pipe->external->host_priv;
+	struct iss_csiphy_dphy_cfg csi2phy;
+	int csi2_ddrclk_khz;
+	struct iss_csiphy_lanes_cfg *lanes;
+	unsigned int used_lanes = 0;
+	u32 cam_rx_ctrl;
+	unsigned int i;
+
+	lanes = &subdevs->bus.csi2.lanecfg;
+
+	/*
+	 * SCM.CONTROL_CAMERA_RX
+	 * - bit [31] : CSIPHY2 lane 2 enable (4460+ only)
+	 * - bit [30:29] : CSIPHY2 per-lane enable (1 to 0)
+	 * - bit [28:24] : CSIPHY1 per-lane enable (4 to 0)
+	 * - bit [21] : CSIPHY2 CTRLCLK enable
+	 * - bit [20:19] : CSIPHY2 config: 00 d-phy, 01/10 ccp2
+	 * - bit [18] : CSIPHY1 CTRLCLK enable
+	 * - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2
+	 */
+	cam_rx_ctrl = omap4_ctrl_pad_readl(
+			OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
+
+
+	if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) {
+		cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK |
+				OMAP4_CAMERARX_CSI21_CAMMODE_MASK);
+		/* NOTE: Leave CSIPHY1 config to 0x0: D-PHY mode */
+		/* Enable all lanes for now */
+		cam_rx_ctrl |=
+			0x1f << OMAP4_CAMERARX_CSI21_LANEENABLE_SHIFT;
+		/* Enable CTRLCLK */
+		cam_rx_ctrl |= OMAP4_CAMERARX_CSI21_CTRLCLKEN_MASK;
+	}
+
+	if (subdevs->interface == ISS_INTERFACE_CSI2B_PHY2) {
+		cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI22_LANEENABLE_MASK |
+				OMAP4_CAMERARX_CSI22_CAMMODE_MASK);
+		/* NOTE: Leave CSIPHY2 config to 0x0: D-PHY mode */
+		/* Enable all lanes for now */
+		cam_rx_ctrl |=
+			0x3 << OMAP4_CAMERARX_CSI22_LANEENABLE_SHIFT;
+		/* Enable CTRLCLK */
+		cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK;
+	}
+
+	omap4_ctrl_pad_writel(cam_rx_ctrl,
+		 OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
+
+	/* Reset used lane count */
+	csi2->phy->used_data_lanes = 0;
+
+	/* Clock and data lanes verification */
+	for (i = 0; i < csi2->phy->max_data_lanes; i++) {
+		if (lanes->data[i].pos == 0)
+			continue;
+
+		if (lanes->data[i].pol > 1 ||
+		    lanes->data[i].pos > (csi2->phy->max_data_lanes + 1))
+			return -EINVAL;
+
+		if (used_lanes & (1 << lanes->data[i].pos))
+			return -EINVAL;
+
+		used_lanes |= 1 << lanes->data[i].pos;
+		csi2->phy->used_data_lanes++;
+	}
+
+	if (lanes->clk.pol > 1 ||
+	    lanes->clk.pos > (csi2->phy->max_data_lanes + 1))
+		return -EINVAL;
+
+	if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
+		return -EINVAL;
+
+	csi2_ddrclk_khz = pipe->external_rate / 1000
+		/ (2 * csi2->phy->used_data_lanes)
+		* pipe->external_bpp;
+
+	/*
+	 * THS_TERM: Programmed value = ceil(12.5 ns/DDRClk period) - 1.
+	 * THS_SETTLE: Programmed value = ceil(90 ns/DDRClk period) + 3.
+	 */
+	csi2phy.ths_term = DIV_ROUND_UP(25 * csi2_ddrclk_khz, 2000000) - 1;
+	csi2phy.ths_settle = DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3;
+	csi2phy.tclk_term = TCLK_TERM;
+	csi2phy.tclk_miss = TCLK_MISS;
+	csi2phy.tclk_settle = TCLK_SETTLE;
+
+	mutex_lock(&csi2->phy->mutex);
+	csi2->phy->dphy = csi2phy;
+	csi2->phy->lanes = *lanes;
+	mutex_unlock(&csi2->phy->mutex);
+
+	return 0;
+}
+
+int omap4iss_csiphy_acquire(struct iss_csiphy *phy)
+{
+	int rval;
+
+	mutex_lock(&phy->mutex);
+
+	rval = omap4iss_csi2_reset(phy->csi2);
+	if (rval)
+		goto done;
+
+	csiphy_dphy_config(phy);
+	csiphy_lanes_config(phy);
+
+	rval = csiphy_set_power(phy, CSI2_COMPLEXIO_CFG_PWD_CMD_ON);
+	if (rval)
+		goto done;
+
+	phy->phy_in_use = 1;
+
+done:
+	mutex_unlock(&phy->mutex);
+	return rval;
+}
+
+void omap4iss_csiphy_release(struct iss_csiphy *phy)
+{
+	mutex_lock(&phy->mutex);
+	if (phy->phy_in_use) {
+		csiphy_set_power(phy, CSI2_COMPLEXIO_CFG_PWD_CMD_OFF);
+		phy->phy_in_use = 0;
+	}
+	mutex_unlock(&phy->mutex);
+}
+
+/*
+ * omap4iss_csiphy_init - Initialize the CSI PHY frontends
+ */
+int omap4iss_csiphy_init(struct iss_device *iss)
+{
+	struct iss_csiphy *phy1 = &iss->csiphy1;
+	struct iss_csiphy *phy2 = &iss->csiphy2;
+
+	phy1->iss = iss;
+	phy1->csi2 = &iss->csi2a;
+	phy1->max_data_lanes = ISS_CSIPHY1_NUM_DATA_LANES;
+	phy1->used_data_lanes = 0;
+	phy1->cfg_regs = OMAP4_ISS_MEM_CSI2_A_REGS1;
+	phy1->phy_regs = OMAP4_ISS_MEM_CAMERARX_CORE1;
+	mutex_init(&phy1->mutex);
+
+	phy2->iss = iss;
+	phy2->csi2 = &iss->csi2b;
+	phy2->max_data_lanes = ISS_CSIPHY2_NUM_DATA_LANES;
+	phy2->used_data_lanes = 0;
+	phy2->cfg_regs = OMAP4_ISS_MEM_CSI2_B_REGS1;
+	phy2->phy_regs = OMAP4_ISS_MEM_CAMERARX_CORE2;
+	mutex_init(&phy2->mutex);
+
+	return 0;
+}
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.h b/drivers/staging/media/omap4iss/iss_csiphy.h
new file mode 100644
index 0000000..e9ca439
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csiphy.h
@@ -0,0 +1,51 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI PHY module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_CSI_PHY_H
+#define OMAP4_ISS_CSI_PHY_H
+
+#include <media/omap4iss.h>
+
+struct iss_csi2_device;
+
+struct iss_csiphy_dphy_cfg {
+	u8 ths_term;
+	u8 ths_settle;
+	u8 tclk_term;
+	unsigned tclk_miss:1;
+	u8 tclk_settle;
+};
+
+struct iss_csiphy {
+	struct iss_device *iss;
+	struct mutex mutex;	/* serialize csiphy configuration */
+	u8 phy_in_use;
+	struct iss_csi2_device *csi2;
+
+	/* memory resources, as defined in enum iss_mem_resources */
+	unsigned int cfg_regs;
+	unsigned int phy_regs;
+
+	u8 max_data_lanes;	/* number of CSI2 Data Lanes supported */
+	u8 used_data_lanes;	/* number of CSI2 Data Lanes used */
+	struct iss_csiphy_lanes_cfg lanes;
+	struct iss_csiphy_dphy_cfg dphy;
+};
+
+int omap4iss_csiphy_config(struct iss_device *iss,
+			   struct v4l2_subdev *csi2_subdev);
+int omap4iss_csiphy_acquire(struct iss_csiphy *phy);
+void omap4iss_csiphy_release(struct iss_csiphy *phy);
+int omap4iss_csiphy_init(struct iss_device *iss);
+
+#endif	/* OMAP4_ISS_CSI_PHY_H */
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.c b/drivers/staging/media/omap4iss/iss_ipipe.c
new file mode 100644
index 0000000..6eaafc5
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipe.c
@@ -0,0 +1,570 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPE module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_ipipe.h"
+
+static struct v4l2_mbus_framefmt *
+__ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+		  unsigned int pad, enum v4l2_subdev_format_whence which);
+
+static const unsigned int ipipe_fmts[] = {
+	V4L2_MBUS_FMT_SGRBG10_1X10,
+	V4L2_MBUS_FMT_SRGGB10_1X10,
+	V4L2_MBUS_FMT_SBGGR10_1X10,
+	V4L2_MBUS_FMT_SGBRG10_1X10,
+};
+
+/*
+ * ipipe_print_status - Print current IPIPE Module register values.
+ * @ipipe: Pointer to ISS ISP IPIPE device.
+ *
+ * Also prints other debug information stored in the IPIPE module.
+ */
+#define IPIPE_PRINT_REGISTER(iss, name)\
+	dev_dbg(iss->dev, "###IPIPE " #name "=0x%08x\n", \
+		iss_reg_read(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_##name))
+
+static void ipipe_print_status(struct iss_ipipe_device *ipipe)
+{
+	struct iss_device *iss = to_iss_device(ipipe);
+
+	dev_dbg(iss->dev, "-------------IPIPE Register dump-------------\n");
+
+	IPIPE_PRINT_REGISTER(iss, SRC_EN);
+	IPIPE_PRINT_REGISTER(iss, SRC_MODE);
+	IPIPE_PRINT_REGISTER(iss, SRC_FMT);
+	IPIPE_PRINT_REGISTER(iss, SRC_COL);
+	IPIPE_PRINT_REGISTER(iss, SRC_VPS);
+	IPIPE_PRINT_REGISTER(iss, SRC_VSZ);
+	IPIPE_PRINT_REGISTER(iss, SRC_HPS);
+	IPIPE_PRINT_REGISTER(iss, SRC_HSZ);
+	IPIPE_PRINT_REGISTER(iss, GCK_MMR);
+	IPIPE_PRINT_REGISTER(iss, YUV_PHS);
+
+	dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+/*
+ * ipipe_enable - Enable/Disable IPIPE.
+ * @enable: enable flag
+ *
+ */
+static void ipipe_enable(struct iss_ipipe_device *ipipe, u8 enable)
+{
+	struct iss_device *iss = to_iss_device(ipipe);
+
+	iss_reg_update(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_EN,
+		       IPIPE_SRC_EN_EN, enable ? IPIPE_SRC_EN_EN : 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+static void ipipe_configure(struct iss_ipipe_device *ipipe)
+{
+	struct iss_device *iss = to_iss_device(ipipe);
+	struct v4l2_mbus_framefmt *format;
+
+	/* IPIPE_PAD_SINK */
+	format = &ipipe->formats[IPIPE_PAD_SINK];
+
+	/* NOTE: Currently just supporting pipeline IN: RGB, OUT: YUV422 */
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_FMT,
+		      IPIPE_SRC_FMT_RAW2YUV);
+
+	/* Enable YUV444 -> YUV422 conversion */
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_YUV_PHS,
+		      IPIPE_YUV_PHS_LPF);
+
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_VPS, 0);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_HPS, 0);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_VSZ,
+		      (format->height - 2) & IPIPE_SRC_VSZ_MASK);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_HSZ,
+		      (format->width - 1) & IPIPE_SRC_HSZ_MASK);
+
+	/* Ignore ipipeif_wrt signal, and operate on-the-fly.  */
+	iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_MODE,
+		    IPIPE_SRC_MODE_WRT | IPIPE_SRC_MODE_OST);
+
+	/* HACK: Values tuned for Ducati SW (OV) */
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_COL,
+		      IPIPE_SRC_COL_EE_B | IPIPE_SRC_COL_EO_GB |
+		      IPIPE_SRC_COL_OE_GR | IPIPE_SRC_COL_OO_R);
+
+	/* IPIPE_PAD_SOURCE_VP */
+	format = &ipipe->formats[IPIPE_PAD_SOURCE_VP];
+	/* Do nothing? */
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * ipipe_set_stream - Enable/Disable streaming on the IPIPE module
+ * @sd: ISP IPIPE V4L2 subdevice
+ * @enable: Enable/disable stream
+ */
+static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+	struct iss_device *iss = to_iss_device(ipipe);
+	int ret = 0;
+
+	if (ipipe->state == ISS_PIPELINE_STREAM_STOPPED) {
+		if (enable == ISS_PIPELINE_STREAM_STOPPED)
+			return 0;
+
+		omap4iss_isp_subclk_enable(iss, OMAP4_ISS_ISP_SUBCLK_IPIPE);
+
+		/* Enable clk_arm_g0 */
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_GCK_MMR,
+			      IPIPE_GCK_MMR_REG);
+
+		/* Enable clk_pix_g[3:0] */
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_GCK_PIX,
+			      IPIPE_GCK_PIX_G3 | IPIPE_GCK_PIX_G2 |
+			      IPIPE_GCK_PIX_G1 | IPIPE_GCK_PIX_G0);
+	}
+
+	switch (enable) {
+	case ISS_PIPELINE_STREAM_CONTINUOUS:
+
+		ipipe_configure(ipipe);
+		ipipe_print_status(ipipe);
+
+		atomic_set(&ipipe->stopping, 0);
+		ipipe_enable(ipipe, 1);
+		break;
+
+	case ISS_PIPELINE_STREAM_STOPPED:
+		if (ipipe->state == ISS_PIPELINE_STREAM_STOPPED)
+			return 0;
+		if (omap4iss_module_sync_idle(&sd->entity, &ipipe->wait,
+					      &ipipe->stopping))
+			ret = -ETIMEDOUT;
+
+		ipipe_enable(ipipe, 0);
+		omap4iss_isp_subclk_disable(iss, OMAP4_ISS_ISP_SUBCLK_IPIPE);
+		break;
+	}
+
+	ipipe->state = enable;
+	return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+		  unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_format(fh, pad);
+	else
+		return &ipipe->formats[pad];
+}
+
+/*
+ * ipipe_try_format - Try video format on a pad
+ * @ipipe: ISS IPIPE device
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+ipipe_try_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+		unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+		enum v4l2_subdev_format_whence which)
+{
+	struct v4l2_mbus_framefmt *format;
+	unsigned int width = fmt->width;
+	unsigned int height = fmt->height;
+	unsigned int i;
+
+	switch (pad) {
+	case IPIPE_PAD_SINK:
+		for (i = 0; i < ARRAY_SIZE(ipipe_fmts); i++) {
+			if (fmt->code == ipipe_fmts[i])
+				break;
+		}
+
+		/* If not found, use SGRBG10 as default */
+		if (i >= ARRAY_SIZE(ipipe_fmts))
+			fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+		/* Clamp the input size. */
+		fmt->width = clamp_t(u32, width, 1, 8192);
+		fmt->height = clamp_t(u32, height, 1, 8192);
+		fmt->colorspace = V4L2_COLORSPACE_SRGB;
+		break;
+
+	case IPIPE_PAD_SOURCE_VP:
+		format = __ipipe_get_format(ipipe, fh, IPIPE_PAD_SINK, which);
+		memcpy(fmt, format, sizeof(*fmt));
+
+		fmt->code = V4L2_MBUS_FMT_UYVY8_1X16;
+		fmt->width = clamp_t(u32, width, 32, fmt->width);
+		fmt->height = clamp_t(u32, height, 32, fmt->height);
+		fmt->colorspace = V4L2_COLORSPACE_JPEG;
+		break;
+	}
+
+	fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * ipipe_enum_mbus_code - Handle pixel format enumeration
+ * @sd     : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code   : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
+			       struct v4l2_subdev_fh *fh,
+			       struct v4l2_subdev_mbus_code_enum *code)
+{
+	switch (code->pad) {
+	case IPIPE_PAD_SINK:
+		if (code->index >= ARRAY_SIZE(ipipe_fmts))
+			return -EINVAL;
+
+		code->code = ipipe_fmts[code->index];
+		break;
+
+	case IPIPE_PAD_SOURCE_VP:
+		/* FIXME: Forced format conversion inside IPIPE ? */
+		if (code->index != 0)
+			return -EINVAL;
+
+		code->code = V4L2_MBUS_FMT_UYVY8_1X16;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
+				struct v4l2_subdev_fh *fh,
+				struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt format;
+
+	if (fse->index != 0)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = 1;
+	format.height = 1;
+	ipipe_try_format(ipipe, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->min_width = format.width;
+	fse->min_height = format.height;
+
+	if (format.code != fse->code)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = -1;
+	format.height = -1;
+	ipipe_try_format(ipipe, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+	fse->max_width = format.width;
+	fse->max_height = format.height;
+
+	return 0;
+}
+
+/*
+ * ipipe_get_format - Retrieve the video format on a pad
+ * @sd : ISP IPIPE V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __ipipe_get_format(ipipe, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	fmt->format = *format;
+	return 0;
+}
+
+/*
+ * ipipe_set_format - Set the video format on a pad
+ * @sd : ISP IPIPE V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __ipipe_get_format(ipipe, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	ipipe_try_format(ipipe, fh, fmt->pad, &fmt->format, fmt->which);
+	*format = fmt->format;
+
+	/* Propagate the format from sink to source */
+	if (fmt->pad == IPIPE_PAD_SINK) {
+		format = __ipipe_get_format(ipipe, fh, IPIPE_PAD_SOURCE_VP,
+					   fmt->which);
+		*format = fmt->format;
+		ipipe_try_format(ipipe, fh, IPIPE_PAD_SOURCE_VP, format,
+				fmt->which);
+	}
+
+	return 0;
+}
+
+static int ipipe_link_validate(struct v4l2_subdev *sd, struct media_link *link,
+				 struct v4l2_subdev_format *source_fmt,
+				 struct v4l2_subdev_format *sink_fmt)
+{
+	/* Check if the two ends match */
+	if (source_fmt->format.width != sink_fmt->format.width ||
+	    source_fmt->format.height != sink_fmt->format.height)
+		return -EPIPE;
+
+	if (source_fmt->format.code != sink_fmt->format.code)
+		return -EPIPE;
+
+	return 0;
+}
+
+/*
+ * ipipe_init_formats - Initialize formats on all pads
+ * @sd: ISP IPIPE V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	struct v4l2_subdev_format format;
+
+	memset(&format, 0, sizeof(format));
+	format.pad = IPIPE_PAD_SINK;
+	format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+	format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+	format.format.width = 4096;
+	format.format.height = 4096;
+	ipipe_set_format(sd, fh, &format);
+
+	return 0;
+}
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops ipipe_v4l2_video_ops = {
+	.s_stream = ipipe_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops ipipe_v4l2_pad_ops = {
+	.enum_mbus_code = ipipe_enum_mbus_code,
+	.enum_frame_size = ipipe_enum_frame_size,
+	.get_fmt = ipipe_get_format,
+	.set_fmt = ipipe_set_format,
+	.link_validate = ipipe_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops ipipe_v4l2_ops = {
+	.video = &ipipe_v4l2_video_ops,
+	.pad = &ipipe_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops ipipe_v4l2_internal_ops = {
+	.open = ipipe_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ipipe_link_setup - Setup IPIPE connections
+ * @entity: IPIPE media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int ipipe_link_setup(struct media_entity *entity,
+			   const struct media_pad *local,
+			   const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+	struct iss_device *iss = to_iss_device(ipipe);
+
+	switch (local->index | media_entity_type(remote->entity)) {
+	case IPIPE_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* Read from IPIPEIF. */
+		if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+			ipipe->input = IPIPE_INPUT_NONE;
+			break;
+		}
+
+		if (ipipe->input != IPIPE_INPUT_NONE)
+			return -EBUSY;
+
+		if (remote->entity == &iss->ipipeif.subdev.entity)
+			ipipe->input = IPIPE_INPUT_IPIPEIF;
+
+		break;
+
+	case IPIPE_PAD_SOURCE_VP | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* Send to RESIZER */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (ipipe->output & ~IPIPE_OUTPUT_VP)
+				return -EBUSY;
+			ipipe->output |= IPIPE_OUTPUT_VP;
+		} else {
+			ipipe->output &= ~IPIPE_OUTPUT_VP;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ipipe_media_ops = {
+	.link_setup = ipipe_link_setup,
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * ipipe_init_entities - Initialize V4L2 subdev and media entity
+ * @ipipe: ISS ISP IPIPE module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int ipipe_init_entities(struct iss_ipipe_device *ipipe)
+{
+	struct v4l2_subdev *sd = &ipipe->subdev;
+	struct media_pad *pads = ipipe->pads;
+	struct media_entity *me = &sd->entity;
+	int ret;
+
+	ipipe->input = IPIPE_INPUT_NONE;
+
+	v4l2_subdev_init(sd, &ipipe_v4l2_ops);
+	sd->internal_ops = &ipipe_v4l2_internal_ops;
+	strlcpy(sd->name, "OMAP4 ISS ISP IPIPE", sizeof(sd->name));
+	sd->grp_id = 1 << 16;	/* group ID for iss subdevs */
+	v4l2_set_subdevdata(sd, ipipe);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	pads[IPIPE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[IPIPE_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
+
+	me->ops = &ipipe_media_ops;
+	ret = media_entity_init(me, IPIPE_PADS_NUM, pads, 0);
+	if (ret < 0)
+		return ret;
+
+	ipipe_init_formats(sd, NULL);
+
+	return 0;
+}
+
+void omap4iss_ipipe_unregister_entities(struct iss_ipipe_device *ipipe)
+{
+	media_entity_cleanup(&ipipe->subdev.entity);
+
+	v4l2_device_unregister_subdev(&ipipe->subdev);
+}
+
+int omap4iss_ipipe_register_entities(struct iss_ipipe_device *ipipe,
+	struct v4l2_device *vdev)
+{
+	int ret;
+
+	/* Register the subdev and video node. */
+	ret = v4l2_device_register_subdev(vdev, &ipipe->subdev);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	omap4iss_ipipe_unregister_entities(ipipe);
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP IPIPE initialisation and cleanup
+ */
+
+/*
+ * omap4iss_ipipe_init - IPIPE module initialization.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap4iss_ipipe_init(struct iss_device *iss)
+{
+	struct iss_ipipe_device *ipipe = &iss->ipipe;
+
+	ipipe->state = ISS_PIPELINE_STREAM_STOPPED;
+	init_waitqueue_head(&ipipe->wait);
+
+	return ipipe_init_entities(ipipe);
+}
+
+/*
+ * omap4iss_ipipe_cleanup - IPIPE module cleanup.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ */
+void omap4iss_ipipe_cleanup(struct iss_device *iss)
+{
+	/* FIXME: are you sure there's nothing to do? */
+}
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.h b/drivers/staging/media/omap4iss/iss_ipipe.h
new file mode 100644
index 0000000..c22d904
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipe.h
@@ -0,0 +1,67 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPE module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_IPIPE_H
+#define OMAP4_ISS_IPIPE_H
+
+#include "iss_video.h"
+
+enum ipipe_input_entity {
+	IPIPE_INPUT_NONE,
+	IPIPE_INPUT_IPIPEIF,
+};
+
+#define IPIPE_OUTPUT_VP		(1 << 0)
+
+/* Sink and source IPIPE pads */
+#define IPIPE_PAD_SINK				0
+#define IPIPE_PAD_SOURCE_VP			1
+#define IPIPE_PADS_NUM				2
+
+/*
+ * struct iss_ipipe_device - Structure for the IPIPE module to store its own
+ *			    information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @input: Active input
+ * @output: Active outputs
+ * @error: A hardware error occurred during capture
+ * @state: Streaming state
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ */
+struct iss_ipipe_device {
+	struct v4l2_subdev subdev;
+	struct media_pad pads[IPIPE_PADS_NUM];
+	struct v4l2_mbus_framefmt formats[IPIPE_PADS_NUM];
+
+	enum ipipe_input_entity input;
+	unsigned int output;
+	unsigned int error;
+
+	enum iss_pipeline_stream_state state;
+	wait_queue_head_t wait;
+	atomic_t stopping;
+};
+
+struct iss_device;
+
+int omap4iss_ipipe_register_entities(struct iss_ipipe_device *ipipe,
+	struct v4l2_device *vdev);
+void omap4iss_ipipe_unregister_entities(struct iss_ipipe_device *ipipe);
+
+int omap4iss_ipipe_init(struct iss_device *iss);
+void omap4iss_ipipe_cleanup(struct iss_device *iss);
+
+#endif	/* OMAP4_ISS_IPIPE_H */
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
new file mode 100644
index 0000000..7bc1457
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.c
@@ -0,0 +1,849 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPEIF module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_ipipeif.h"
+
+static const unsigned int ipipeif_fmts[] = {
+	V4L2_MBUS_FMT_SGRBG10_1X10,
+	V4L2_MBUS_FMT_SRGGB10_1X10,
+	V4L2_MBUS_FMT_SBGGR10_1X10,
+	V4L2_MBUS_FMT_SGBRG10_1X10,
+	V4L2_MBUS_FMT_UYVY8_1X16,
+	V4L2_MBUS_FMT_YUYV8_1X16,
+};
+
+/*
+ * ipipeif_print_status - Print current IPIPEIF Module register values.
+ * @ipipeif: Pointer to ISS ISP IPIPEIF device.
+ *
+ * Also prints other debug information stored in the IPIPEIF module.
+ */
+#define IPIPEIF_PRINT_REGISTER(iss, name)\
+	dev_dbg(iss->dev, "###IPIPEIF " #name "=0x%08x\n", \
+		iss_reg_read(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_##name))
+
+#define ISIF_PRINT_REGISTER(iss, name)\
+	dev_dbg(iss->dev, "###ISIF " #name "=0x%08x\n", \
+		iss_reg_read(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_##name))
+
+#define ISP5_PRINT_REGISTER(iss, name)\
+	dev_dbg(iss->dev, "###ISP5 " #name "=0x%08x\n", \
+		iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_##name))
+
+static void ipipeif_print_status(struct iss_ipipeif_device *ipipeif)
+{
+	struct iss_device *iss = to_iss_device(ipipeif);
+
+	dev_dbg(iss->dev, "-------------IPIPEIF Register dump-------------\n");
+
+	IPIPEIF_PRINT_REGISTER(iss, CFG1);
+	IPIPEIF_PRINT_REGISTER(iss, CFG2);
+
+	ISIF_PRINT_REGISTER(iss, SYNCEN);
+	ISIF_PRINT_REGISTER(iss, CADU);
+	ISIF_PRINT_REGISTER(iss, CADL);
+	ISIF_PRINT_REGISTER(iss, MODESET);
+	ISIF_PRINT_REGISTER(iss, CCOLP);
+	ISIF_PRINT_REGISTER(iss, SPH);
+	ISIF_PRINT_REGISTER(iss, LNH);
+	ISIF_PRINT_REGISTER(iss, LNV);
+	ISIF_PRINT_REGISTER(iss, VDINT(0));
+	ISIF_PRINT_REGISTER(iss, HSIZE);
+
+	ISP5_PRINT_REGISTER(iss, SYSCONFIG);
+	ISP5_PRINT_REGISTER(iss, CTRL);
+	ISP5_PRINT_REGISTER(iss, IRQSTATUS(0));
+	ISP5_PRINT_REGISTER(iss, IRQENABLE_SET(0));
+	ISP5_PRINT_REGISTER(iss, IRQENABLE_CLR(0));
+
+	dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+static void ipipeif_write_enable(struct iss_ipipeif_device *ipipeif, u8 enable)
+{
+	struct iss_device *iss = to_iss_device(ipipeif);
+
+	iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SYNCEN,
+		       ISIF_SYNCEN_DWEN, enable ? ISIF_SYNCEN_DWEN : 0);
+}
+
+/*
+ * ipipeif_enable - Enable/Disable IPIPEIF.
+ * @enable: enable flag
+ *
+ */
+static void ipipeif_enable(struct iss_ipipeif_device *ipipeif, u8 enable)
+{
+	struct iss_device *iss = to_iss_device(ipipeif);
+
+	iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SYNCEN,
+		       ISIF_SYNCEN_SYEN, enable ? ISIF_SYNCEN_SYEN : 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+/*
+ * ipipeif_set_outaddr - Set memory address to save output image
+ * @ipipeif: Pointer to ISP IPIPEIF device.
+ * @addr: 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ */
+static void ipipeif_set_outaddr(struct iss_ipipeif_device *ipipeif, u32 addr)
+{
+	struct iss_device *iss = to_iss_device(ipipeif);
+
+	/* Save address splitted in Base Address H & L */
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CADU,
+		      (addr >> (16 + 5)) & ISIF_CADU_MASK);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CADL,
+		      (addr >> 5) & ISIF_CADL_MASK);
+}
+
+static void ipipeif_configure(struct iss_ipipeif_device *ipipeif)
+{
+	struct iss_device *iss = to_iss_device(ipipeif);
+	const struct iss_format_info *info;
+	struct v4l2_mbus_framefmt *format;
+	u32 isif_ccolp = 0;
+
+	omap4iss_configure_bridge(iss, ipipeif->input);
+
+	/* IPIPEIF_PAD_SINK */
+	format = &ipipeif->formats[IPIPEIF_PAD_SINK];
+
+	/* IPIPEIF with YUV422 input from ISIF */
+	iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG1,
+		    IPIPEIF_CFG1_INPSRC1_MASK | IPIPEIF_CFG1_INPSRC2_MASK);
+
+	/* Select ISIF/IPIPEIF input format */
+	switch (format->code) {
+	case V4L2_MBUS_FMT_UYVY8_1X16:
+	case V4L2_MBUS_FMT_YUYV8_1X16:
+		iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
+			       ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
+			       ISIF_MODESET_CCDW_MASK,
+			       ISIF_MODESET_INPMOD_YCBCR16);
+
+		iss_reg_update(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG2,
+			       IPIPEIF_CFG2_YUV8, IPIPEIF_CFG2_YUV16);
+
+		break;
+	case V4L2_MBUS_FMT_SGRBG10_1X10:
+		isif_ccolp = ISIF_CCOLP_CP0_F0_GR |
+			ISIF_CCOLP_CP1_F0_R |
+			ISIF_CCOLP_CP2_F0_B |
+			ISIF_CCOLP_CP3_F0_GB;
+		goto cont_raw;
+	case V4L2_MBUS_FMT_SRGGB10_1X10:
+		isif_ccolp = ISIF_CCOLP_CP0_F0_R |
+			ISIF_CCOLP_CP1_F0_GR |
+			ISIF_CCOLP_CP2_F0_GB |
+			ISIF_CCOLP_CP3_F0_B;
+		goto cont_raw;
+	case V4L2_MBUS_FMT_SBGGR10_1X10:
+		isif_ccolp = ISIF_CCOLP_CP0_F0_B |
+			ISIF_CCOLP_CP1_F0_GB |
+			ISIF_CCOLP_CP2_F0_GR |
+			ISIF_CCOLP_CP3_F0_R;
+		goto cont_raw;
+	case V4L2_MBUS_FMT_SGBRG10_1X10:
+		isif_ccolp = ISIF_CCOLP_CP0_F0_GB |
+			ISIF_CCOLP_CP1_F0_B |
+			ISIF_CCOLP_CP2_F0_R |
+			ISIF_CCOLP_CP3_F0_GR;
+cont_raw:
+		iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG2,
+			    IPIPEIF_CFG2_YUV16);
+
+		iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
+			       ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
+			       ISIF_MODESET_CCDW_MASK, ISIF_MODESET_INPMOD_RAW |
+			       ISIF_MODESET_CCDW_2BIT);
+
+		info = omap4iss_video_format_info(format->code);
+		iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CGAMMAWD,
+			       ISIF_CGAMMAWD_GWDI_MASK,
+			       ISIF_CGAMMAWD_GWDI(info->bpp));
+
+		/* Set RAW Bayer pattern */
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CCOLP,
+			      isif_ccolp);
+		break;
+	}
+
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SPH, 0 & ISIF_SPH_MASK);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_LNH,
+		      (format->width - 1) & ISIF_LNH_MASK);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_LNV,
+		      (format->height - 1) & ISIF_LNV_MASK);
+
+	/* Generate ISIF0 on the last line of the image */
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_VDINT(0),
+		      format->height - 1);
+
+	/* IPIPEIF_PAD_SOURCE_ISIF_SF */
+	format = &ipipeif->formats[IPIPEIF_PAD_SOURCE_ISIF_SF];
+
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_HSIZE,
+		      (ipipeif->video_out.bpl_value >> 5) &
+		      ISIF_HSIZE_HSIZE_MASK);
+
+	/* IPIPEIF_PAD_SOURCE_VP */
+	/* Do nothing? */
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void ipipeif_isr_buffer(struct iss_ipipeif_device *ipipeif)
+{
+	struct iss_buffer *buffer;
+
+	/* The ISIF generates VD0 interrupts even when writes are disabled.
+	 * deal with it anyway). Disabling the ISIF when no buffer is available
+	 * is thus not be enough, we need to handle the situation explicitly.
+	 */
+	if (list_empty(&ipipeif->video_out.dmaqueue))
+		return;
+
+	ipipeif_write_enable(ipipeif, 0);
+
+	buffer = omap4iss_video_buffer_next(&ipipeif->video_out);
+	if (buffer == NULL)
+		return;
+
+	ipipeif_set_outaddr(ipipeif, buffer->iss_addr);
+
+	ipipeif_write_enable(ipipeif, 1);
+}
+
+/*
+ * ipipeif_isif0_isr - Handle ISIF0 event
+ * @ipipeif: Pointer to ISP IPIPEIF device.
+ *
+ * Executes LSC deferred enablement before next frame starts.
+ */
+static void ipipeif_isif0_isr(struct iss_ipipeif_device *ipipeif)
+{
+	struct iss_pipeline *pipe =
+			     to_iss_pipeline(&ipipeif->subdev.entity);
+	if (pipe->do_propagation)
+		atomic_inc(&pipe->frame_number);
+
+	if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+		ipipeif_isr_buffer(ipipeif);
+}
+
+/*
+ * omap4iss_ipipeif_isr - Configure ipipeif during interframe time.
+ * @ipipeif: Pointer to ISP IPIPEIF device.
+ * @events: IPIPEIF events
+ */
+void omap4iss_ipipeif_isr(struct iss_ipipeif_device *ipipeif, u32 events)
+{
+	if (omap4iss_module_sync_is_stopping(&ipipeif->wait,
+					     &ipipeif->stopping))
+		return;
+
+	if (events & ISP5_IRQ_ISIF_INT(0))
+		ipipeif_isif0_isr(ipipeif);
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+static int ipipeif_video_queue(struct iss_video *video,
+			       struct iss_buffer *buffer)
+{
+	struct iss_ipipeif_device *ipipeif = container_of(video,
+				struct iss_ipipeif_device, video_out);
+
+	if (!(ipipeif->output & IPIPEIF_OUTPUT_MEMORY))
+		return -ENODEV;
+
+	ipipeif_set_outaddr(ipipeif, buffer->iss_addr);
+
+	/*
+	 * If streaming was enabled before there was a buffer queued
+	 * or underrun happened in the ISR, the hardware was not enabled
+	 * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
+	 * Enable it now.
+	 */
+	if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+		if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+			ipipeif_write_enable(ipipeif, 1);
+		ipipeif_enable(ipipeif, 1);
+		iss_video_dmaqueue_flags_clr(video);
+	}
+
+	return 0;
+}
+
+static const struct iss_video_operations ipipeif_video_ops = {
+	.queue = ipipeif_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+#define IPIPEIF_DRV_SUBCLK_MASK	(OMAP4_ISS_ISP_SUBCLK_IPIPEIF |\
+				 OMAP4_ISS_ISP_SUBCLK_ISIF)
+/*
+ * ipipeif_set_stream - Enable/Disable streaming on the IPIPEIF module
+ * @sd: ISP IPIPEIF V4L2 subdevice
+ * @enable: Enable/disable stream
+ */
+static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+	struct iss_device *iss = to_iss_device(ipipeif);
+	struct iss_video *video_out = &ipipeif->video_out;
+	int ret = 0;
+
+	if (ipipeif->state == ISS_PIPELINE_STREAM_STOPPED) {
+		if (enable == ISS_PIPELINE_STREAM_STOPPED)
+			return 0;
+
+		omap4iss_isp_subclk_enable(iss, IPIPEIF_DRV_SUBCLK_MASK);
+	}
+
+	switch (enable) {
+	case ISS_PIPELINE_STREAM_CONTINUOUS:
+
+		ipipeif_configure(ipipeif);
+		ipipeif_print_status(ipipeif);
+
+		/*
+		 * When outputting to memory with no buffer available, let the
+		 * buffer queue handler start the hardware. A DMA queue flag
+		 * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+		 * a buffer available.
+		 */
+		if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY &&
+		    !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
+			break;
+
+		atomic_set(&ipipeif->stopping, 0);
+		if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+			ipipeif_write_enable(ipipeif, 1);
+		ipipeif_enable(ipipeif, 1);
+		iss_video_dmaqueue_flags_clr(video_out);
+		break;
+
+	case ISS_PIPELINE_STREAM_STOPPED:
+		if (ipipeif->state == ISS_PIPELINE_STREAM_STOPPED)
+			return 0;
+		if (omap4iss_module_sync_idle(&sd->entity, &ipipeif->wait,
+					      &ipipeif->stopping))
+			ret = -ETIMEDOUT;
+
+		if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+			ipipeif_write_enable(ipipeif, 0);
+		ipipeif_enable(ipipeif, 0);
+		omap4iss_isp_subclk_disable(iss, IPIPEIF_DRV_SUBCLK_MASK);
+		iss_video_dmaqueue_flags_clr(video_out);
+		break;
+	}
+
+	ipipeif->state = enable;
+	return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__ipipeif_get_format(struct iss_ipipeif_device *ipipeif,
+		     struct v4l2_subdev_fh *fh, unsigned int pad,
+		     enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_format(fh, pad);
+	else
+		return &ipipeif->formats[pad];
+}
+
+/*
+ * ipipeif_try_format - Try video format on a pad
+ * @ipipeif: ISS IPIPEIF device
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
+		   struct v4l2_subdev_fh *fh, unsigned int pad,
+		   struct v4l2_mbus_framefmt *fmt,
+		   enum v4l2_subdev_format_whence which)
+{
+	struct v4l2_mbus_framefmt *format;
+	unsigned int width = fmt->width;
+	unsigned int height = fmt->height;
+	unsigned int i;
+
+	switch (pad) {
+	case IPIPEIF_PAD_SINK:
+		/* TODO: If the IPIPEIF output formatter pad is connected
+		 * directly to the resizer, only YUV formats can be used.
+		 */
+		for (i = 0; i < ARRAY_SIZE(ipipeif_fmts); i++) {
+			if (fmt->code == ipipeif_fmts[i])
+				break;
+		}
+
+		/* If not found, use SGRBG10 as default */
+		if (i >= ARRAY_SIZE(ipipeif_fmts))
+			fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+		/* Clamp the input size. */
+		fmt->width = clamp_t(u32, width, 1, 8192);
+		fmt->height = clamp_t(u32, height, 1, 8192);
+		break;
+
+	case IPIPEIF_PAD_SOURCE_ISIF_SF:
+		format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
+					      which);
+		memcpy(fmt, format, sizeof(*fmt));
+
+		/* The data formatter truncates the number of horizontal output
+		 * pixels to a multiple of 16. To avoid clipping data, allow
+		 * callers to request an output size bigger than the input size
+		 * up to the nearest multiple of 16.
+		 */
+		fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
+		fmt->width &= ~15;
+		fmt->height = clamp_t(u32, height, 32, fmt->height);
+		break;
+
+	case IPIPEIF_PAD_SOURCE_VP:
+		format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
+					      which);
+		memcpy(fmt, format, sizeof(*fmt));
+
+		fmt->width = clamp_t(u32, width, 32, fmt->width);
+		fmt->height = clamp_t(u32, height, 32, fmt->height);
+		break;
+	}
+
+	/* Data is written to memory unpacked, each 10-bit or 12-bit pixel is
+	 * stored on 2 bytes.
+	 */
+	fmt->colorspace = V4L2_COLORSPACE_SRGB;
+	fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * ipipeif_enum_mbus_code - Handle pixel format enumeration
+ * @sd     : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code   : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
+			       struct v4l2_subdev_fh *fh,
+			       struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	switch (code->pad) {
+	case IPIPEIF_PAD_SINK:
+		if (code->index >= ARRAY_SIZE(ipipeif_fmts))
+			return -EINVAL;
+
+		code->code = ipipeif_fmts[code->index];
+		break;
+
+	case IPIPEIF_PAD_SOURCE_ISIF_SF:
+	case IPIPEIF_PAD_SOURCE_VP:
+		/* No format conversion inside IPIPEIF */
+		if (code->index != 0)
+			return -EINVAL;
+
+		format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
+					      V4L2_SUBDEV_FORMAT_TRY);
+
+		code->code = format->code;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
+				struct v4l2_subdev_fh *fh,
+				struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt format;
+
+	if (fse->index != 0)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = 1;
+	format.height = 1;
+	ipipeif_try_format(ipipeif, fh, fse->pad, &format,
+			   V4L2_SUBDEV_FORMAT_TRY);
+	fse->min_width = format.width;
+	fse->min_height = format.height;
+
+	if (format.code != fse->code)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = -1;
+	format.height = -1;
+	ipipeif_try_format(ipipeif, fh, fse->pad, &format,
+			   V4L2_SUBDEV_FORMAT_TRY);
+	fse->max_width = format.width;
+	fse->max_height = format.height;
+
+	return 0;
+}
+
+/*
+ * ipipeif_get_format - Retrieve the video format on a pad
+ * @sd : ISP IPIPEIF V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __ipipeif_get_format(ipipeif, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	fmt->format = *format;
+	return 0;
+}
+
+/*
+ * ipipeif_set_format - Set the video format on a pad
+ * @sd : ISP IPIPEIF V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __ipipeif_get_format(ipipeif, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	ipipeif_try_format(ipipeif, fh, fmt->pad, &fmt->format, fmt->which);
+	*format = fmt->format;
+
+	/* Propagate the format from sink to source */
+	if (fmt->pad == IPIPEIF_PAD_SINK) {
+		format = __ipipeif_get_format(ipipeif, fh,
+					      IPIPEIF_PAD_SOURCE_ISIF_SF,
+					      fmt->which);
+		*format = fmt->format;
+		ipipeif_try_format(ipipeif, fh, IPIPEIF_PAD_SOURCE_ISIF_SF,
+				   format, fmt->which);
+
+		format = __ipipeif_get_format(ipipeif, fh,
+					      IPIPEIF_PAD_SOURCE_VP,
+					      fmt->which);
+		*format = fmt->format;
+		ipipeif_try_format(ipipeif, fh, IPIPEIF_PAD_SOURCE_VP, format,
+				fmt->which);
+	}
+
+	return 0;
+}
+
+static int ipipeif_link_validate(struct v4l2_subdev *sd,
+				 struct media_link *link,
+				 struct v4l2_subdev_format *source_fmt,
+				 struct v4l2_subdev_format *sink_fmt)
+{
+	/* Check if the two ends match */
+	if (source_fmt->format.width != sink_fmt->format.width ||
+	    source_fmt->format.height != sink_fmt->format.height)
+		return -EPIPE;
+
+	if (source_fmt->format.code != sink_fmt->format.code)
+		return -EPIPE;
+
+	return 0;
+}
+
+/*
+ * ipipeif_init_formats - Initialize formats on all pads
+ * @sd: ISP IPIPEIF V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ipipeif_init_formats(struct v4l2_subdev *sd,
+				struct v4l2_subdev_fh *fh)
+{
+	struct v4l2_subdev_format format;
+
+	memset(&format, 0, sizeof(format));
+	format.pad = IPIPEIF_PAD_SINK;
+	format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+	format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+	format.format.width = 4096;
+	format.format.height = 4096;
+	ipipeif_set_format(sd, fh, &format);
+
+	return 0;
+}
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops ipipeif_v4l2_video_ops = {
+	.s_stream = ipipeif_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops ipipeif_v4l2_pad_ops = {
+	.enum_mbus_code = ipipeif_enum_mbus_code,
+	.enum_frame_size = ipipeif_enum_frame_size,
+	.get_fmt = ipipeif_get_format,
+	.set_fmt = ipipeif_set_format,
+	.link_validate = ipipeif_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops ipipeif_v4l2_ops = {
+	.video = &ipipeif_v4l2_video_ops,
+	.pad = &ipipeif_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops ipipeif_v4l2_internal_ops = {
+	.open = ipipeif_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ipipeif_link_setup - Setup IPIPEIF connections
+ * @entity: IPIPEIF media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int ipipeif_link_setup(struct media_entity *entity,
+			   const struct media_pad *local,
+			   const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+	struct iss_device *iss = to_iss_device(ipipeif);
+
+	switch (local->index | media_entity_type(remote->entity)) {
+	case IPIPEIF_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* Read from the sensor CSI2a or CSI2b. */
+		if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+			ipipeif->input = IPIPEIF_INPUT_NONE;
+			break;
+		}
+
+		if (ipipeif->input != IPIPEIF_INPUT_NONE)
+			return -EBUSY;
+
+		if (remote->entity == &iss->csi2a.subdev.entity)
+			ipipeif->input = IPIPEIF_INPUT_CSI2A;
+		else if (remote->entity == &iss->csi2b.subdev.entity)
+			ipipeif->input = IPIPEIF_INPUT_CSI2B;
+
+		break;
+
+	case IPIPEIF_PAD_SOURCE_ISIF_SF | MEDIA_ENT_T_DEVNODE:
+		/* Write to memory */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (ipipeif->output & ~IPIPEIF_OUTPUT_MEMORY)
+				return -EBUSY;
+			ipipeif->output |= IPIPEIF_OUTPUT_MEMORY;
+		} else {
+			ipipeif->output &= ~IPIPEIF_OUTPUT_MEMORY;
+		}
+		break;
+
+	case IPIPEIF_PAD_SOURCE_VP | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* Send to IPIPE/RESIZER */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (ipipeif->output & ~IPIPEIF_OUTPUT_VP)
+				return -EBUSY;
+			ipipeif->output |= IPIPEIF_OUTPUT_VP;
+		} else {
+			ipipeif->output &= ~IPIPEIF_OUTPUT_VP;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ipipeif_media_ops = {
+	.link_setup = ipipeif_link_setup,
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * ipipeif_init_entities - Initialize V4L2 subdev and media entity
+ * @ipipeif: ISS ISP IPIPEIF module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int ipipeif_init_entities(struct iss_ipipeif_device *ipipeif)
+{
+	struct v4l2_subdev *sd = &ipipeif->subdev;
+	struct media_pad *pads = ipipeif->pads;
+	struct media_entity *me = &sd->entity;
+	int ret;
+
+	ipipeif->input = IPIPEIF_INPUT_NONE;
+
+	v4l2_subdev_init(sd, &ipipeif_v4l2_ops);
+	sd->internal_ops = &ipipeif_v4l2_internal_ops;
+	strlcpy(sd->name, "OMAP4 ISS ISP IPIPEIF", sizeof(sd->name));
+	sd->grp_id = 1 << 16;	/* group ID for iss subdevs */
+	v4l2_set_subdevdata(sd, ipipeif);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	pads[IPIPEIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[IPIPEIF_PAD_SOURCE_ISIF_SF].flags = MEDIA_PAD_FL_SOURCE;
+	pads[IPIPEIF_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
+
+	me->ops = &ipipeif_media_ops;
+	ret = media_entity_init(me, IPIPEIF_PADS_NUM, pads, 0);
+	if (ret < 0)
+		return ret;
+
+	ipipeif_init_formats(sd, NULL);
+
+	ipipeif->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	ipipeif->video_out.ops = &ipipeif_video_ops;
+	ipipeif->video_out.iss = to_iss_device(ipipeif);
+	ipipeif->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+	ipipeif->video_out.bpl_alignment = 32;
+	ipipeif->video_out.bpl_zero_padding = 1;
+	ipipeif->video_out.bpl_max = 0x1ffe0;
+
+	ret = omap4iss_video_init(&ipipeif->video_out, "ISP IPIPEIF");
+	if (ret < 0)
+		return ret;
+
+	/* Connect the IPIPEIF subdev to the video node. */
+	ret = media_entity_create_link(&ipipeif->subdev.entity,
+				       IPIPEIF_PAD_SOURCE_ISIF_SF,
+				       &ipipeif->video_out.video.entity, 0, 0);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+void omap4iss_ipipeif_unregister_entities(struct iss_ipipeif_device *ipipeif)
+{
+	media_entity_cleanup(&ipipeif->subdev.entity);
+
+	v4l2_device_unregister_subdev(&ipipeif->subdev);
+	omap4iss_video_unregister(&ipipeif->video_out);
+}
+
+int omap4iss_ipipeif_register_entities(struct iss_ipipeif_device *ipipeif,
+	struct v4l2_device *vdev)
+{
+	int ret;
+
+	/* Register the subdev and video node. */
+	ret = v4l2_device_register_subdev(vdev, &ipipeif->subdev);
+	if (ret < 0)
+		goto error;
+
+	ret = omap4iss_video_register(&ipipeif->video_out, vdev);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	omap4iss_ipipeif_unregister_entities(ipipeif);
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP IPIPEIF initialisation and cleanup
+ */
+
+/*
+ * omap4iss_ipipeif_init - IPIPEIF module initialization.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap4iss_ipipeif_init(struct iss_device *iss)
+{
+	struct iss_ipipeif_device *ipipeif = &iss->ipipeif;
+
+	ipipeif->state = ISS_PIPELINE_STREAM_STOPPED;
+	init_waitqueue_head(&ipipeif->wait);
+
+	return ipipeif_init_entities(ipipeif);
+}
+
+/*
+ * omap4iss_ipipeif_cleanup - IPIPEIF module cleanup.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ */
+void omap4iss_ipipeif_cleanup(struct iss_device *iss)
+{
+	/* FIXME: are you sure there's nothing to do? */
+}
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.h b/drivers/staging/media/omap4iss/iss_ipipeif.h
new file mode 100644
index 0000000..cbdccb9
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.h
@@ -0,0 +1,92 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPEIF module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_IPIPEIF_H
+#define OMAP4_ISS_IPIPEIF_H
+
+#include "iss_video.h"
+
+enum ipipeif_input_entity {
+	IPIPEIF_INPUT_NONE,
+	IPIPEIF_INPUT_CSI2A,
+	IPIPEIF_INPUT_CSI2B
+};
+
+#define IPIPEIF_OUTPUT_MEMORY		(1 << 0)
+#define IPIPEIF_OUTPUT_VP		(1 << 1)
+
+/* Sink and source IPIPEIF pads */
+#define IPIPEIF_PAD_SINK			0
+#define IPIPEIF_PAD_SOURCE_ISIF_SF		1
+#define IPIPEIF_PAD_SOURCE_VP			2
+#define IPIPEIF_PADS_NUM			3
+
+/*
+ * struct iss_ipipeif_device - Structure for the IPIPEIF module to store its own
+ *			    information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @input: Active input
+ * @output: Active outputs
+ * @video_out: Output video node
+ * @error: A hardware error occurred during capture
+ * @alaw: A-law compression enabled (1) or disabled (0)
+ * @lpf: Low pass filter enabled (1) or disabled (0)
+ * @obclamp: Optical-black clamp enabled (1) or disabled (0)
+ * @fpc_en: Faulty pixels correction enabled (1) or disabled (0)
+ * @blcomp: Black level compensation configuration
+ * @clamp: Optical-black or digital clamp configuration
+ * @fpc: Faulty pixels correction configuration
+ * @lsc: Lens shading compensation configuration
+ * @update: Bitmask of controls to update during the next interrupt
+ * @shadow_update: Controls update in progress by userspace
+ * @syncif: Interface synchronization configuration
+ * @vpcfg: Video port configuration
+ * @underrun: A buffer underrun occurred and a new buffer has been queued
+ * @state: Streaming state
+ * @lock: Serializes shadow_update with interrupt handler
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ * @ioctl_lock: Serializes ioctl calls and LSC requests freeing
+ */
+struct iss_ipipeif_device {
+	struct v4l2_subdev subdev;
+	struct media_pad pads[IPIPEIF_PADS_NUM];
+	struct v4l2_mbus_framefmt formats[IPIPEIF_PADS_NUM];
+
+	enum ipipeif_input_entity input;
+	unsigned int output;
+	struct iss_video video_out;
+	unsigned int error;
+
+	enum iss_pipeline_stream_state state;
+	wait_queue_head_t wait;
+	atomic_t stopping;
+};
+
+struct iss_device;
+
+int omap4iss_ipipeif_init(struct iss_device *iss);
+void omap4iss_ipipeif_cleanup(struct iss_device *iss);
+int omap4iss_ipipeif_register_entities(struct iss_ipipeif_device *ipipeif,
+	struct v4l2_device *vdev);
+void omap4iss_ipipeif_unregister_entities(struct iss_ipipeif_device *ipipeif);
+
+int omap4iss_ipipeif_busy(struct iss_ipipeif_device *ipipeif);
+void omap4iss_ipipeif_isr(struct iss_ipipeif_device *ipipeif, u32 events);
+void omap4iss_ipipeif_restore_context(struct iss_device *iss);
+void omap4iss_ipipeif_max_rate(struct iss_ipipeif_device *ipipeif,
+	unsigned int *max_rate);
+
+#endif	/* OMAP4_ISS_IPIPEIF_H */
diff --git a/drivers/staging/media/omap4iss/iss_regs.h b/drivers/staging/media/omap4iss/iss_regs.h
new file mode 100644
index 0000000..efd0291
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_regs.h
@@ -0,0 +1,901 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - Register defines
+ *
+ * Copyright (C) 2012 Texas Instruments.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _OMAP4_ISS_REGS_H_
+#define _OMAP4_ISS_REGS_H_
+
+/* ISS */
+#define ISS_HL_REVISION					0x0
+
+#define ISS_HL_SYSCONFIG				0x10
+#define ISS_HL_SYSCONFIG_IDLEMODE_SHIFT			2
+#define ISS_HL_SYSCONFIG_IDLEMODE_FORCEIDLE		0x0
+#define ISS_HL_SYSCONFIG_IDLEMODE_NOIDLE		0x1
+#define ISS_HL_SYSCONFIG_IDLEMODE_SMARTIDLE		0x2
+#define ISS_HL_SYSCONFIG_SOFTRESET			(1 << 0)
+
+#define ISS_HL_IRQSTATUS_RAW(i)				(0x20 + (0x10 * (i)))
+#define ISS_HL_IRQSTATUS(i)				(0x24 + (0x10 * (i)))
+#define ISS_HL_IRQENABLE_SET(i)				(0x28 + (0x10 * (i)))
+#define ISS_HL_IRQENABLE_CLR(i)				(0x2c + (0x10 * (i)))
+
+#define ISS_HL_IRQ_HS_VS				(1 << 17)
+#define ISS_HL_IRQ_SIMCOP(i)				(1 << (12 + (i)))
+#define ISS_HL_IRQ_BTE					(1 << 11)
+#define ISS_HL_IRQ_CBUFF				(1 << 10)
+#define ISS_HL_IRQ_CCP2(i)				(1 << ((i) > 3 ? 16 : 14 + (i)))
+#define ISS_HL_IRQ_CSIB					(1 << 5)
+#define ISS_HL_IRQ_CSIA					(1 << 4)
+#define ISS_HL_IRQ_ISP(i)				(1 << (i))
+
+#define ISS_CTRL					0x80
+#define ISS_CTRL_CLK_DIV_MASK				(3 << 4)
+#define ISS_CTRL_INPUT_SEL_MASK				(3 << 2)
+#define ISS_CTRL_INPUT_SEL_CSI2A			(0 << 2)
+#define ISS_CTRL_INPUT_SEL_CSI2B			(1 << 2)
+#define ISS_CTRL_SYNC_DETECT_VS_RAISING			(3 << 0)
+
+#define ISS_CLKCTRL					0x84
+#define ISS_CLKCTRL_VPORT2_CLK				(1 << 30)
+#define ISS_CLKCTRL_VPORT1_CLK				(1 << 29)
+#define ISS_CLKCTRL_VPORT0_CLK				(1 << 28)
+#define ISS_CLKCTRL_CCP2				(1 << 4)
+#define ISS_CLKCTRL_CSI2_B				(1 << 3)
+#define ISS_CLKCTRL_CSI2_A				(1 << 2)
+#define ISS_CLKCTRL_ISP					(1 << 1)
+#define ISS_CLKCTRL_SIMCOP				(1 << 0)
+
+#define ISS_CLKSTAT					0x88
+#define ISS_CLKSTAT_VPORT2_CLK				(1 << 30)
+#define ISS_CLKSTAT_VPORT1_CLK				(1 << 29)
+#define ISS_CLKSTAT_VPORT0_CLK				(1 << 28)
+#define ISS_CLKSTAT_CCP2				(1 << 4)
+#define ISS_CLKSTAT_CSI2_B				(1 << 3)
+#define ISS_CLKSTAT_CSI2_A				(1 << 2)
+#define ISS_CLKSTAT_ISP					(1 << 1)
+#define ISS_CLKSTAT_SIMCOP				(1 << 0)
+
+#define ISS_PM_STATUS					0x8c
+#define ISS_PM_STATUS_CBUFF_PM_MASK			(3 << 12)
+#define ISS_PM_STATUS_BTE_PM_MASK			(3 << 10)
+#define ISS_PM_STATUS_SIMCOP_PM_MASK			(3 << 8)
+#define ISS_PM_STATUS_ISP_PM_MASK			(3 << 6)
+#define ISS_PM_STATUS_CCP2_PM_MASK			(3 << 4)
+#define ISS_PM_STATUS_CSI2_B_PM_MASK			(3 << 2)
+#define ISS_PM_STATUS_CSI2_A_PM_MASK			(3 << 0)
+
+#define REGISTER0					0x0
+#define REGISTER0_HSCLOCKCONFIG				(1 << 24)
+#define REGISTER0_THS_TERM_MASK				(0xff << 8)
+#define REGISTER0_THS_TERM_SHIFT			8
+#define REGISTER0_THS_SETTLE_MASK			(0xff << 0)
+#define REGISTER0_THS_SETTLE_SHIFT			0
+
+#define REGISTER1					0x4
+#define REGISTER1_RESET_DONE_CTRLCLK			(1 << 29)
+#define REGISTER1_CLOCK_MISS_DETECTOR_STATUS		(1 << 25)
+#define REGISTER1_TCLK_TERM_MASK			(0x3f << 18)
+#define REGISTER1_TCLK_TERM_SHIFT			18
+#define REGISTER1_DPHY_HS_SYNC_PATTERN_SHIFT		10
+#define REGISTER1_CTRLCLK_DIV_FACTOR_MASK		(0x3 << 8)
+#define REGISTER1_CTRLCLK_DIV_FACTOR_SHIFT		8
+#define REGISTER1_TCLK_SETTLE_MASK			(0xff << 0)
+#define REGISTER1_TCLK_SETTLE_SHIFT			0
+
+#define REGISTER2					0x8
+
+#define CSI2_SYSCONFIG					0x10
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_MASK		(3 << 12)
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_FORCE		(0 << 12)
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_NO			(1 << 12)
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_SMART		(2 << 12)
+#define CSI2_SYSCONFIG_SOFT_RESET			(1 << 1)
+#define CSI2_SYSCONFIG_AUTO_IDLE			(1 << 0)
+
+#define CSI2_SYSSTATUS					0x14
+#define CSI2_SYSSTATUS_RESET_DONE			(1 << 0)
+
+#define CSI2_IRQSTATUS					0x18
+#define CSI2_IRQENABLE					0x1c
+
+/* Shared bits across CSI2_IRQENABLE and IRQSTATUS */
+
+#define CSI2_IRQ_OCP_ERR				(1 << 14)
+#define CSI2_IRQ_SHORT_PACKET				(1 << 13)
+#define CSI2_IRQ_ECC_CORRECTION				(1 << 12)
+#define CSI2_IRQ_ECC_NO_CORRECTION			(1 << 11)
+#define CSI2_IRQ_COMPLEXIO_ERR				(1 << 9)
+#define CSI2_IRQ_FIFO_OVF				(1 << 8)
+#define CSI2_IRQ_CONTEXT0				(1 << 0)
+
+#define CSI2_CTRL					0x40
+#define CSI2_CTRL_MFLAG_LEVH_MASK			(7 << 20)
+#define CSI2_CTRL_MFLAG_LEVH_SHIFT			20
+#define CSI2_CTRL_MFLAG_LEVL_MASK			(7 << 17)
+#define CSI2_CTRL_MFLAG_LEVL_SHIFT			17
+#define CSI2_CTRL_BURST_SIZE_EXPAND			(1 << 16)
+#define CSI2_CTRL_VP_CLK_EN				(1 << 15)
+#define CSI2_CTRL_NON_POSTED_WRITE			(1 << 13)
+#define CSI2_CTRL_VP_ONLY_EN				(1 << 11)
+#define CSI2_CTRL_VP_OUT_CTRL_MASK			(3 << 8)
+#define CSI2_CTRL_VP_OUT_CTRL_SHIFT			8
+#define CSI2_CTRL_DBG_EN				(1 << 7)
+#define CSI2_CTRL_BURST_SIZE_MASK			(3 << 5)
+#define CSI2_CTRL_ENDIANNESS				(1 << 4)
+#define CSI2_CTRL_FRAME					(1 << 3)
+#define CSI2_CTRL_ECC_EN				(1 << 2)
+#define CSI2_CTRL_IF_EN					(1 << 0)
+
+#define CSI2_DBG_H					0x44
+
+#define CSI2_COMPLEXIO_CFG				0x50
+#define CSI2_COMPLEXIO_CFG_RESET_CTRL			(1 << 30)
+#define CSI2_COMPLEXIO_CFG_RESET_DONE			(1 << 29)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_MASK			(3 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_OFF			(0 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_ON			(1 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_ULP			(2 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_MASK		(3 << 25)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_OFF		(0 << 25)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_ON		(1 << 25)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_ULP		(2 << 25)
+#define CSI2_COMPLEXIO_CFG_PWR_AUTO			(1 << 24)
+#define CSI2_COMPLEXIO_CFG_DATA_POL(i)			(1 << (((i) * 4) + 3))
+#define CSI2_COMPLEXIO_CFG_DATA_POSITION_MASK(i)	(7 << ((i) * 4))
+#define CSI2_COMPLEXIO_CFG_DATA_POSITION_SHIFT(i)	((i) * 4)
+#define CSI2_COMPLEXIO_CFG_CLOCK_POL			(1 << 3)
+#define CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK		(7 << 0)
+#define CSI2_COMPLEXIO_CFG_CLOCK_POSITION_SHIFT		0
+
+#define CSI2_COMPLEXIO_IRQSTATUS			0x54
+
+#define CSI2_SHORT_PACKET				0x5c
+
+#define CSI2_COMPLEXIO_IRQENABLE			0x60
+
+/* Shared bits across CSI2_COMPLEXIO_IRQENABLE and IRQSTATUS */
+#define CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT		(1 << 26)
+#define CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER		(1 << 25)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM5			(1 << 24)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM4			(1 << 23)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM3			(1 << 22)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM2			(1 << 21)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM1			(1 << 20)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL5			(1 << 19)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL4			(1 << 18)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL3			(1 << 17)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL2			(1 << 16)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL1			(1 << 15)
+#define CSI2_COMPLEXIO_IRQ_ERRESC5			(1 << 14)
+#define CSI2_COMPLEXIO_IRQ_ERRESC4			(1 << 13)
+#define CSI2_COMPLEXIO_IRQ_ERRESC3			(1 << 12)
+#define CSI2_COMPLEXIO_IRQ_ERRESC2			(1 << 11)
+#define CSI2_COMPLEXIO_IRQ_ERRESC1			(1 << 10)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5		(1 << 9)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4		(1 << 8)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3		(1 << 7)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2		(1 << 6)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1		(1 << 5)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS5			(1 << 4)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS4			(1 << 3)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS3			(1 << 2)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS2			(1 << 1)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS1			(1 << 0)
+
+#define CSI2_DBG_P					0x68
+
+#define CSI2_TIMING					0x6c
+#define CSI2_TIMING_FORCE_RX_MODE_IO1			(1 << 15)
+#define CSI2_TIMING_STOP_STATE_X16_IO1			(1 << 14)
+#define CSI2_TIMING_STOP_STATE_X4_IO1			(1 << 13)
+#define CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK		(0x1fff << 0)
+#define CSI2_TIMING_STOP_STATE_COUNTER_IO1_SHIFT	0
+
+#define CSI2_CTX_CTRL1(i)				(0x70 + (0x20 * i))
+#define CSI2_CTX_CTRL1_GENERIC				(1 << 30)
+#define CSI2_CTX_CTRL1_TRANSCODE			(0xf << 24)
+#define CSI2_CTX_CTRL1_FEC_NUMBER_MASK			(0xff << 16)
+#define CSI2_CTX_CTRL1_COUNT_MASK			(0xff << 8)
+#define CSI2_CTX_CTRL1_COUNT_SHIFT			8
+#define CSI2_CTX_CTRL1_EOF_EN				(1 << 7)
+#define CSI2_CTX_CTRL1_EOL_EN				(1 << 6)
+#define CSI2_CTX_CTRL1_CS_EN				(1 << 5)
+#define CSI2_CTX_CTRL1_COUNT_UNLOCK			(1 << 4)
+#define CSI2_CTX_CTRL1_PING_PONG			(1 << 3)
+#define CSI2_CTX_CTRL1_CTX_EN				(1 << 0)
+
+#define CSI2_CTX_CTRL2(i)				(0x74 + (0x20 * i))
+#define CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT		13
+#define CSI2_CTX_CTRL2_USER_DEF_MAP_MASK		\
+		(0x3 << CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT)
+#define CSI2_CTX_CTRL2_VIRTUAL_ID_MASK			(3 << 11)
+#define CSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT			11
+#define CSI2_CTX_CTRL2_DPCM_PRED			(1 << 10)
+#define CSI2_CTX_CTRL2_FORMAT_MASK			(0x3ff << 0)
+#define CSI2_CTX_CTRL2_FORMAT_SHIFT			0
+
+#define CSI2_CTX_DAT_OFST(i)				(0x78 + (0x20 * i))
+#define CSI2_CTX_DAT_OFST_MASK				(0xfff << 5)
+
+#define CSI2_CTX_PING_ADDR(i)				(0x7c + (0x20 * i))
+#define CSI2_CTX_PING_ADDR_MASK				0xffffffe0
+
+#define CSI2_CTX_PONG_ADDR(i)				(0x80 + (0x20 * i))
+#define CSI2_CTX_PONG_ADDR_MASK				CSI2_CTX_PING_ADDR_MASK
+
+#define CSI2_CTX_IRQENABLE(i)				(0x84 + (0x20 * i))
+#define CSI2_CTX_IRQSTATUS(i)				(0x88 + (0x20 * i))
+
+#define CSI2_CTX_CTRL3(i)				(0x8c + (0x20 * i))
+#define CSI2_CTX_CTRL3_ALPHA_SHIFT			5
+#define CSI2_CTX_CTRL3_ALPHA_MASK			\
+		(0x3fff << CSI2_CTX_CTRL3_ALPHA_SHIFT)
+
+/* Shared bits across CSI2_CTX_IRQENABLE and IRQSTATUS */
+#define CSI2_CTX_IRQ_ECC_CORRECTION			(1 << 8)
+#define CSI2_CTX_IRQ_LINE_NUMBER			(1 << 7)
+#define CSI2_CTX_IRQ_FRAME_NUMBER			(1 << 6)
+#define CSI2_CTX_IRQ_CS					(1 << 5)
+#define CSI2_CTX_IRQ_LE					(1 << 3)
+#define CSI2_CTX_IRQ_LS					(1 << 2)
+#define CSI2_CTX_IRQ_FE					(1 << 1)
+#define CSI2_CTX_IRQ_FS					(1 << 0)
+
+/* ISS BTE */
+#define BTE_CTRL					(0x0030)
+#define BTE_CTRL_BW_LIMITER_MASK			(0x3ff << 22)
+#define BTE_CTRL_BW_LIMITER_SHIFT			22
+
+/* ISS ISP_SYS1 */
+#define ISP5_REVISION					(0x0000)
+#define ISP5_SYSCONFIG					(0x0010)
+#define ISP5_SYSCONFIG_STANDBYMODE_MASK			(3 << 4)
+#define ISP5_SYSCONFIG_STANDBYMODE_FORCE		(0 << 4)
+#define ISP5_SYSCONFIG_STANDBYMODE_NO			(1 << 4)
+#define ISP5_SYSCONFIG_STANDBYMODE_SMART		(2 << 4)
+#define ISP5_SYSCONFIG_SOFTRESET			(1 << 1)
+
+#define ISP5_IRQSTATUS(i)				(0x0028 + (0x10 * (i)))
+#define ISP5_IRQENABLE_SET(i)				(0x002c + (0x10 * (i)))
+#define ISP5_IRQENABLE_CLR(i)				(0x0030 + (0x10 * (i)))
+
+/* Bits shared for ISP5_IRQ* registers */
+#define ISP5_IRQ_OCP_ERR				(1 << 31)
+#define ISP5_IRQ_IPIPE_INT_DPC_RNEW1			(1 << 29)
+#define ISP5_IRQ_IPIPE_INT_DPC_RNEW0			(1 << 28)
+#define ISP5_IRQ_IPIPE_INT_DPC_INIT			(1 << 27)
+#define ISP5_IRQ_IPIPE_INT_EOF				(1 << 25)
+#define ISP5_IRQ_H3A_INT_EOF				(1 << 24)
+#define ISP5_IRQ_RSZ_INT_EOF1				(1 << 23)
+#define ISP5_IRQ_RSZ_INT_EOF0				(1 << 22)
+#define ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR			(1 << 19)
+#define ISP5_IRQ_RSZ_FIFO_OVF				(1 << 18)
+#define ISP5_IRQ_RSZ_INT_CYC_RSZB			(1 << 17)
+#define ISP5_IRQ_RSZ_INT_CYC_RSZA			(1 << 16)
+#define ISP5_IRQ_RSZ_INT_DMA				(1 << 15)
+#define ISP5_IRQ_RSZ_INT_LAST_PIX			(1 << 14)
+#define ISP5_IRQ_RSZ_INT_REG				(1 << 13)
+#define ISP5_IRQ_H3A_INT				(1 << 12)
+#define ISP5_IRQ_AF_INT					(1 << 11)
+#define ISP5_IRQ_AEW_INT				(1 << 10)
+#define ISP5_IRQ_IPIPEIF_IRQ				(1 << 9)
+#define ISP5_IRQ_IPIPE_INT_HST				(1 << 8)
+#define ISP5_IRQ_IPIPE_INT_BSC				(1 << 7)
+#define ISP5_IRQ_IPIPE_INT_DMA				(1 << 6)
+#define ISP5_IRQ_IPIPE_INT_LAST_PIX			(1 << 5)
+#define ISP5_IRQ_IPIPE_INT_REG				(1 << 4)
+#define ISP5_IRQ_ISIF_INT(i)				(1 << (i))
+
+#define ISP5_CTRL					(0x006c)
+#define ISP5_CTRL_MSTANDBY				(1 << 24)
+#define ISP5_CTRL_VD_PULSE_EXT				(1 << 23)
+#define ISP5_CTRL_MSTANDBY_WAIT				(1 << 20)
+#define ISP5_CTRL_BL_CLK_ENABLE				(1 << 15)
+#define ISP5_CTRL_ISIF_CLK_ENABLE			(1 << 14)
+#define ISP5_CTRL_H3A_CLK_ENABLE			(1 << 13)
+#define ISP5_CTRL_RSZ_CLK_ENABLE			(1 << 12)
+#define ISP5_CTRL_IPIPE_CLK_ENABLE			(1 << 11)
+#define ISP5_CTRL_IPIPEIF_CLK_ENABLE			(1 << 10)
+#define ISP5_CTRL_SYNC_ENABLE				(1 << 9)
+#define ISP5_CTRL_PSYNC_CLK_SEL				(1 << 8)
+
+/* ISS ISP ISIF register offsets */
+#define ISIF_SYNCEN					(0x0000)
+#define ISIF_SYNCEN_DWEN				(1 << 1)
+#define ISIF_SYNCEN_SYEN				(1 << 0)
+
+#define ISIF_MODESET					(0x0004)
+#define ISIF_MODESET_INPMOD_MASK			(3 << 12)
+#define ISIF_MODESET_INPMOD_RAW				(0 << 12)
+#define ISIF_MODESET_INPMOD_YCBCR16			(1 << 12)
+#define ISIF_MODESET_INPMOD_YCBCR8			(2 << 12)
+#define ISIF_MODESET_CCDW_MASK				(7 << 8)
+#define ISIF_MODESET_CCDW_2BIT				(2 << 8)
+#define ISIF_MODESET_CCDMD				(1 << 7)
+#define ISIF_MODESET_SWEN				(1 << 5)
+#define ISIF_MODESET_HDPOL				(1 << 3)
+#define ISIF_MODESET_VDPOL				(1 << 2)
+
+#define ISIF_SPH					(0x0018)
+#define ISIF_SPH_MASK					(0x7fff)
+
+#define ISIF_LNH					(0x001c)
+#define ISIF_LNH_MASK					(0x7fff)
+
+#define ISIF_LNV					(0x0028)
+#define ISIF_LNV_MASK					(0x7fff)
+
+#define ISIF_HSIZE					(0x0034)
+#define ISIF_HSIZE_ADCR					(1 << 12)
+#define ISIF_HSIZE_HSIZE_MASK				(0xfff)
+
+#define ISIF_CADU					(0x003c)
+#define ISIF_CADU_MASK					(0x7ff)
+
+#define ISIF_CADL					(0x0040)
+#define ISIF_CADL_MASK					(0xffff)
+
+#define ISIF_CCOLP					(0x004c)
+#define ISIF_CCOLP_CP0_F0_R				(0 << 6)
+#define ISIF_CCOLP_CP0_F0_GR				(1 << 6)
+#define ISIF_CCOLP_CP0_F0_B				(3 << 6)
+#define ISIF_CCOLP_CP0_F0_GB				(2 << 6)
+#define ISIF_CCOLP_CP1_F0_R				(0 << 4)
+#define ISIF_CCOLP_CP1_F0_GR				(1 << 4)
+#define ISIF_CCOLP_CP1_F0_B				(3 << 4)
+#define ISIF_CCOLP_CP1_F0_GB				(2 << 4)
+#define ISIF_CCOLP_CP2_F0_R				(0 << 2)
+#define ISIF_CCOLP_CP2_F0_GR				(1 << 2)
+#define ISIF_CCOLP_CP2_F0_B				(3 << 2)
+#define ISIF_CCOLP_CP2_F0_GB				(2 << 2)
+#define ISIF_CCOLP_CP3_F0_R				(0 << 0)
+#define ISIF_CCOLP_CP3_F0_GR				(1 << 0)
+#define ISIF_CCOLP_CP3_F0_B				(3 << 0)
+#define ISIF_CCOLP_CP3_F0_GB				(2 << 0)
+
+#define ISIF_VDINT(i)					(0x0070 + (i) * 4)
+#define ISIF_VDINT_MASK					(0x7fff)
+
+#define ISIF_CGAMMAWD					(0x0080)
+#define ISIF_CGAMMAWD_GWDI_MASK				(0xf << 1)
+#define ISIF_CGAMMAWD_GWDI(bpp)				((16 - (bpp)) << 1)
+
+#define ISIF_CCDCFG					(0x0088)
+#define ISIF_CCDCFG_Y8POS				(1 << 11)
+
+/* ISS ISP IPIPEIF register offsets */
+#define IPIPEIF_ENABLE					(0x0000)
+
+#define IPIPEIF_CFG1					(0x0004)
+#define IPIPEIF_CFG1_INPSRC1_MASK			(3 << 14)
+#define IPIPEIF_CFG1_INPSRC1_VPORT_RAW			(0 << 14)
+#define IPIPEIF_CFG1_INPSRC1_SDRAM_RAW			(1 << 14)
+#define IPIPEIF_CFG1_INPSRC1_ISIF_DARKFM		(2 << 14)
+#define IPIPEIF_CFG1_INPSRC1_SDRAM_YUV			(3 << 14)
+#define IPIPEIF_CFG1_INPSRC2_MASK			(3 << 2)
+#define IPIPEIF_CFG1_INPSRC2_ISIF			(0 << 2)
+#define IPIPEIF_CFG1_INPSRC2_SDRAM_RAW			(1 << 2)
+#define IPIPEIF_CFG1_INPSRC2_ISIF_DARKFM		(2 << 2)
+#define IPIPEIF_CFG1_INPSRC2_SDRAM_YUV			(3 << 2)
+
+#define IPIPEIF_CFG2					(0x0030)
+#define IPIPEIF_CFG2_YUV8P				(1 << 7)
+#define IPIPEIF_CFG2_YUV8				(1 << 6)
+#define IPIPEIF_CFG2_YUV16				(1 << 3)
+#define IPIPEIF_CFG2_VDPOL				(1 << 2)
+#define IPIPEIF_CFG2_HDPOL				(1 << 1)
+#define IPIPEIF_CFG2_INTSW				(1 << 0)
+
+#define IPIPEIF_CLKDIV					(0x0040)
+
+/* ISS ISP IPIPE register offsets */
+#define IPIPE_SRC_EN					(0x0000)
+#define IPIPE_SRC_EN_EN					(1 << 0)
+
+#define IPIPE_SRC_MODE					(0x0004)
+#define IPIPE_SRC_MODE_WRT				(1 << 1)
+#define IPIPE_SRC_MODE_OST				(1 << 0)
+
+#define IPIPE_SRC_FMT					(0x0008)
+#define IPIPE_SRC_FMT_RAW2YUV				(0 << 0)
+#define IPIPE_SRC_FMT_RAW2RAW				(1 << 0)
+#define IPIPE_SRC_FMT_RAW2STATS				(2 << 0)
+#define IPIPE_SRC_FMT_YUV2YUV				(3 << 0)
+
+#define IPIPE_SRC_COL					(0x000c)
+#define IPIPE_SRC_COL_OO_R				(0 << 6)
+#define IPIPE_SRC_COL_OO_GR				(1 << 6)
+#define IPIPE_SRC_COL_OO_B				(3 << 6)
+#define IPIPE_SRC_COL_OO_GB				(2 << 6)
+#define IPIPE_SRC_COL_OE_R				(0 << 4)
+#define IPIPE_SRC_COL_OE_GR				(1 << 4)
+#define IPIPE_SRC_COL_OE_B				(3 << 4)
+#define IPIPE_SRC_COL_OE_GB				(2 << 4)
+#define IPIPE_SRC_COL_EO_R				(0 << 2)
+#define IPIPE_SRC_COL_EO_GR				(1 << 2)
+#define IPIPE_SRC_COL_EO_B				(3 << 2)
+#define IPIPE_SRC_COL_EO_GB				(2 << 2)
+#define IPIPE_SRC_COL_EE_R				(0 << 0)
+#define IPIPE_SRC_COL_EE_GR				(1 << 0)
+#define IPIPE_SRC_COL_EE_B				(3 << 0)
+#define IPIPE_SRC_COL_EE_GB				(2 << 0)
+
+#define IPIPE_SRC_VPS					(0x0010)
+#define IPIPE_SRC_VPS_MASK				(0xffff)
+
+#define IPIPE_SRC_VSZ					(0x0014)
+#define IPIPE_SRC_VSZ_MASK				(0x1fff)
+
+#define IPIPE_SRC_HPS					(0x0018)
+#define IPIPE_SRC_HPS_MASK				(0xffff)
+
+#define IPIPE_SRC_HSZ					(0x001c)
+#define IPIPE_SRC_HSZ_MASK				(0x1ffe)
+
+#define IPIPE_SEL_SBU					(0x0020)
+
+#define IPIPE_SRC_STA					(0x0024)
+
+#define IPIPE_GCK_MMR					(0x0028)
+#define IPIPE_GCK_MMR_REG				(1 << 0)
+
+#define IPIPE_GCK_PIX					(0x002c)
+#define IPIPE_GCK_PIX_G3				(1 << 3)
+#define IPIPE_GCK_PIX_G2				(1 << 2)
+#define IPIPE_GCK_PIX_G1				(1 << 1)
+#define IPIPE_GCK_PIX_G0				(1 << 0)
+
+#define IPIPE_DPC_LUT_EN				(0x0034)
+#define IPIPE_DPC_LUT_SEL				(0x0038)
+#define IPIPE_DPC_LUT_ADR				(0x003c)
+#define IPIPE_DPC_LUT_SIZ				(0x0040)
+
+#define IPIPE_DPC_OTF_EN				(0x0044)
+#define IPIPE_DPC_OTF_TYP				(0x0048)
+#define IPIPE_DPC_OTF_2_D_THR_R				(0x004c)
+#define IPIPE_DPC_OTF_2_D_THR_GR			(0x0050)
+#define IPIPE_DPC_OTF_2_D_THR_GB			(0x0054)
+#define IPIPE_DPC_OTF_2_D_THR_B				(0x0058)
+#define IPIPE_DPC_OTF_2_C_THR_R				(0x005c)
+#define IPIPE_DPC_OTF_2_C_THR_GR			(0x0060)
+#define IPIPE_DPC_OTF_2_C_THR_GB			(0x0064)
+#define IPIPE_DPC_OTF_2_C_THR_B				(0x0068)
+#define IPIPE_DPC_OTF_3_SHF				(0x006c)
+#define IPIPE_DPC_OTF_3_D_THR				(0x0070)
+#define IPIPE_DPC_OTF_3_D_SPL				(0x0074)
+#define IPIPE_DPC_OTF_3_D_MIN				(0x0078)
+#define IPIPE_DPC_OTF_3_D_MAX				(0x007c)
+#define IPIPE_DPC_OTF_3_C_THR				(0x0080)
+#define IPIPE_DPC_OTF_3_C_SLP				(0x0084)
+#define IPIPE_DPC_OTF_3_C_MIN				(0x0088)
+#define IPIPE_DPC_OTF_3_C_MAX				(0x008c)
+
+#define IPIPE_LSC_VOFT					(0x0090)
+#define IPIPE_LSC_VA2					(0x0094)
+#define IPIPE_LSC_VA1					(0x0098)
+#define IPIPE_LSC_VS					(0x009c)
+#define IPIPE_LSC_HOFT					(0x00a0)
+#define IPIPE_LSC_HA2					(0x00a4)
+#define IPIPE_LSC_HA1					(0x00a8)
+#define IPIPE_LSC_HS					(0x00ac)
+#define IPIPE_LSC_GAN_R					(0x00b0)
+#define IPIPE_LSC_GAN_GR				(0x00b4)
+#define IPIPE_LSC_GAN_GB				(0x00b8)
+#define IPIPE_LSC_GAN_B					(0x00bc)
+#define IPIPE_LSC_OFT_R					(0x00c0)
+#define IPIPE_LSC_OFT_GR				(0x00c4)
+#define IPIPE_LSC_OFT_GB				(0x00c8)
+#define IPIPE_LSC_OFT_B					(0x00cc)
+#define IPIPE_LSC_SHF					(0x00d0)
+#define IPIPE_LSC_MAX					(0x00d4)
+
+#define IPIPE_D2F_1ST_EN				(0x00d8)
+#define IPIPE_D2F_1ST_TYP				(0x00dc)
+#define IPIPE_D2F_1ST_THR_00				(0x00e0)
+#define IPIPE_D2F_1ST_THR_01				(0x00e4)
+#define IPIPE_D2F_1ST_THR_02				(0x00e8)
+#define IPIPE_D2F_1ST_THR_03				(0x00ec)
+#define IPIPE_D2F_1ST_THR_04				(0x00f0)
+#define IPIPE_D2F_1ST_THR_05				(0x00f4)
+#define IPIPE_D2F_1ST_THR_06				(0x00f8)
+#define IPIPE_D2F_1ST_THR_07				(0x00fc)
+#define IPIPE_D2F_1ST_STR_00				(0x0100)
+#define IPIPE_D2F_1ST_STR_01				(0x0104)
+#define IPIPE_D2F_1ST_STR_02				(0x0108)
+#define IPIPE_D2F_1ST_STR_03				(0x010c)
+#define IPIPE_D2F_1ST_STR_04				(0x0110)
+#define IPIPE_D2F_1ST_STR_05				(0x0114)
+#define IPIPE_D2F_1ST_STR_06				(0x0118)
+#define IPIPE_D2F_1ST_STR_07				(0x011c)
+#define IPIPE_D2F_1ST_SPR_00				(0x0120)
+#define IPIPE_D2F_1ST_SPR_01				(0x0124)
+#define IPIPE_D2F_1ST_SPR_02				(0x0128)
+#define IPIPE_D2F_1ST_SPR_03				(0x012c)
+#define IPIPE_D2F_1ST_SPR_04				(0x0130)
+#define IPIPE_D2F_1ST_SPR_05				(0x0134)
+#define IPIPE_D2F_1ST_SPR_06				(0x0138)
+#define IPIPE_D2F_1ST_SPR_07				(0x013c)
+#define IPIPE_D2F_1ST_EDG_MIN				(0x0140)
+#define IPIPE_D2F_1ST_EDG_MAX				(0x0144)
+#define IPIPE_D2F_2ND_EN				(0x0148)
+#define IPIPE_D2F_2ND_TYP				(0x014c)
+#define IPIPE_D2F_2ND_THR00				(0x0150)
+#define IPIPE_D2F_2ND_THR01				(0x0154)
+#define IPIPE_D2F_2ND_THR02				(0x0158)
+#define IPIPE_D2F_2ND_THR03				(0x015c)
+#define IPIPE_D2F_2ND_THR04				(0x0160)
+#define IPIPE_D2F_2ND_THR05				(0x0164)
+#define IPIPE_D2F_2ND_THR06				(0x0168)
+#define IPIPE_D2F_2ND_THR07				(0x016c)
+#define IPIPE_D2F_2ND_STR_00				(0x0170)
+#define IPIPE_D2F_2ND_STR_01				(0x0174)
+#define IPIPE_D2F_2ND_STR_02				(0x0178)
+#define IPIPE_D2F_2ND_STR_03				(0x017c)
+#define IPIPE_D2F_2ND_STR_04				(0x0180)
+#define IPIPE_D2F_2ND_STR_05				(0x0184)
+#define IPIPE_D2F_2ND_STR_06				(0x0188)
+#define IPIPE_D2F_2ND_STR_07				(0x018c)
+#define IPIPE_D2F_2ND_SPR_00				(0x0190)
+#define IPIPE_D2F_2ND_SPR_01				(0x0194)
+#define IPIPE_D2F_2ND_SPR_02				(0x0198)
+#define IPIPE_D2F_2ND_SPR_03				(0x019c)
+#define IPIPE_D2F_2ND_SPR_04				(0x01a0)
+#define IPIPE_D2F_2ND_SPR_05				(0x01a4)
+#define IPIPE_D2F_2ND_SPR_06				(0x01a8)
+#define IPIPE_D2F_2ND_SPR_07				(0x01ac)
+#define IPIPE_D2F_2ND_EDG_MIN				(0x01b0)
+#define IPIPE_D2F_2ND_EDG_MAX				(0x01b4)
+
+#define IPIPE_GIC_EN					(0x01b8)
+#define IPIPE_GIC_TYP					(0x01bc)
+#define IPIPE_GIC_GAN					(0x01c0)
+#define IPIPE_GIC_NFGAIN				(0x01c4)
+#define IPIPE_GIC_THR					(0x01c8)
+#define IPIPE_GIC_SLP					(0x01cc)
+
+#define IPIPE_WB2_OFT_R					(0x01d0)
+#define IPIPE_WB2_OFT_GR				(0x01d4)
+#define IPIPE_WB2_OFT_GB				(0x01d8)
+#define IPIPE_WB2_OFT_B					(0x01dc)
+
+#define IPIPE_WB2_WGN_R					(0x01e0)
+#define IPIPE_WB2_WGN_GR				(0x01e4)
+#define IPIPE_WB2_WGN_GB				(0x01e8)
+#define IPIPE_WB2_WGN_B					(0x01ec)
+
+#define IPIPE_CFA_MODE					(0x01f0)
+#define IPIPE_CFA_2DIR_HPF_THR				(0x01f4)
+#define IPIPE_CFA_2DIR_HPF_SLP				(0x01f8)
+#define IPIPE_CFA_2DIR_MIX_THR				(0x01fc)
+#define IPIPE_CFA_2DIR_MIX_SLP				(0x0200)
+#define IPIPE_CFA_2DIR_DIR_TRH				(0x0204)
+#define IPIPE_CFA_2DIR_DIR_SLP				(0x0208)
+#define IPIPE_CFA_2DIR_NDWT				(0x020c)
+#define IPIPE_CFA_MONO_HUE_FRA				(0x0210)
+#define IPIPE_CFA_MONO_EDG_THR				(0x0214)
+#define IPIPE_CFA_MONO_THR_MIN				(0x0218)
+
+#define IPIPE_CFA_MONO_THR_SLP				(0x021c)
+#define IPIPE_CFA_MONO_SLP_MIN				(0x0220)
+#define IPIPE_CFA_MONO_SLP_SLP				(0x0224)
+#define IPIPE_CFA_MONO_LPWT				(0x0228)
+
+#define IPIPE_RGB1_MUL_RR				(0x022c)
+#define IPIPE_RGB1_MUL_GR				(0x0230)
+#define IPIPE_RGB1_MUL_BR				(0x0234)
+#define IPIPE_RGB1_MUL_RG				(0x0238)
+#define IPIPE_RGB1_MUL_GG				(0x023c)
+#define IPIPE_RGB1_MUL_BG				(0x0240)
+#define IPIPE_RGB1_MUL_RB				(0x0244)
+#define IPIPE_RGB1_MUL_GB				(0x0248)
+#define IPIPE_RGB1_MUL_BB				(0x024c)
+#define IPIPE_RGB1_OFT_OR				(0x0250)
+#define IPIPE_RGB1_OFT_OG				(0x0254)
+#define IPIPE_RGB1_OFT_OB				(0x0258)
+#define IPIPE_GMM_CFG					(0x025c)
+#define IPIPE_RGB2_MUL_RR				(0x0260)
+#define IPIPE_RGB2_MUL_GR				(0x0264)
+#define IPIPE_RGB2_MUL_BR				(0x0268)
+#define IPIPE_RGB2_MUL_RG				(0x026c)
+#define IPIPE_RGB2_MUL_GG				(0x0270)
+#define IPIPE_RGB2_MUL_BG				(0x0274)
+#define IPIPE_RGB2_MUL_RB				(0x0278)
+#define IPIPE_RGB2_MUL_GB				(0x027c)
+#define IPIPE_RGB2_MUL_BB				(0x0280)
+#define IPIPE_RGB2_OFT_OR				(0x0284)
+#define IPIPE_RGB2_OFT_OG				(0x0288)
+#define IPIPE_RGB2_OFT_OB				(0x028c)
+
+#define IPIPE_YUV_ADJ					(0x0294)
+#define IPIPE_YUV_MUL_RY				(0x0298)
+#define IPIPE_YUV_MUL_GY				(0x029c)
+#define IPIPE_YUV_MUL_BY				(0x02a0)
+#define IPIPE_YUV_MUL_RCB				(0x02a4)
+#define IPIPE_YUV_MUL_GCB				(0x02a8)
+#define IPIPE_YUV_MUL_BCB				(0x02ac)
+#define IPIPE_YUV_MUL_RCR				(0x02b0)
+#define IPIPE_YUV_MUL_GCR				(0x02b4)
+#define IPIPE_YUV_MUL_BCR				(0x02b8)
+#define IPIPE_YUV_OFT_Y					(0x02bc)
+#define IPIPE_YUV_OFT_CB				(0x02c0)
+#define IPIPE_YUV_OFT_CR				(0x02c4)
+
+#define IPIPE_YUV_PHS					(0x02c8)
+#define IPIPE_YUV_PHS_LPF				(1 << 1)
+#define IPIPE_YUV_PHS_POS				(1 << 0)
+
+#define IPIPE_YEE_EN					(0x02d4)
+#define IPIPE_YEE_TYP					(0x02d8)
+#define IPIPE_YEE_SHF					(0x02dc)
+#define IPIPE_YEE_MUL_00				(0x02e0)
+#define IPIPE_YEE_MUL_01				(0x02e4)
+#define IPIPE_YEE_MUL_02				(0x02e8)
+#define IPIPE_YEE_MUL_10				(0x02ec)
+#define IPIPE_YEE_MUL_11				(0x02f0)
+#define IPIPE_YEE_MUL_12				(0x02f4)
+#define IPIPE_YEE_MUL_20				(0x02f8)
+#define IPIPE_YEE_MUL_21				(0x02fc)
+#define IPIPE_YEE_MUL_22				(0x0300)
+#define IPIPE_YEE_THR					(0x0304)
+#define IPIPE_YEE_E_GAN					(0x0308)
+#define IPIPE_YEE_E_THR_1				(0x030c)
+#define IPIPE_YEE_E_THR_2				(0x0310)
+#define IPIPE_YEE_G_GAN					(0x0314)
+#define IPIPE_YEE_G_OFT					(0x0318)
+
+#define IPIPE_CAR_EN					(0x031c)
+#define IPIPE_CAR_TYP					(0x0320)
+#define IPIPE_CAR_SW					(0x0324)
+#define IPIPE_CAR_HPF_TYP				(0x0328)
+#define IPIPE_CAR_HPF_SHF				(0x032c)
+#define IPIPE_CAR_HPF_THR				(0x0330)
+#define IPIPE_CAR_GN1_GAN				(0x0334)
+#define IPIPE_CAR_GN1_SHF				(0x0338)
+#define IPIPE_CAR_GN1_MIN				(0x033c)
+#define IPIPE_CAR_GN2_GAN				(0x0340)
+#define IPIPE_CAR_GN2_SHF				(0x0344)
+#define IPIPE_CAR_GN2_MIN				(0x0348)
+#define IPIPE_CGS_EN					(0x034c)
+#define IPIPE_CGS_GN1_L_THR				(0x0350)
+#define IPIPE_CGS_GN1_L_GAIN				(0x0354)
+#define IPIPE_CGS_GN1_L_SHF				(0x0358)
+#define IPIPE_CGS_GN1_L_MIN				(0x035c)
+#define IPIPE_CGS_GN1_H_THR				(0x0360)
+#define IPIPE_CGS_GN1_H_GAIN				(0x0364)
+#define IPIPE_CGS_GN1_H_SHF				(0x0368)
+#define IPIPE_CGS_GN1_H_MIN				(0x036c)
+#define IPIPE_CGS_GN2_L_THR				(0x0370)
+#define IPIPE_CGS_GN2_L_GAIN				(0x0374)
+#define IPIPE_CGS_GN2_L_SHF				(0x0378)
+#define IPIPE_CGS_GN2_L_MIN				(0x037c)
+
+#define IPIPE_BOX_EN					(0x0380)
+#define IPIPE_BOX_MODE					(0x0384)
+#define IPIPE_BOX_TYP					(0x0388)
+#define IPIPE_BOX_SHF					(0x038c)
+#define IPIPE_BOX_SDR_SAD_H				(0x0390)
+#define IPIPE_BOX_SDR_SAD_L				(0x0394)
+
+#define IPIPE_HST_EN					(0x039c)
+#define IPIPE_HST_MODE					(0x03a0)
+#define IPIPE_HST_SEL					(0x03a4)
+#define IPIPE_HST_PARA					(0x03a8)
+#define IPIPE_HST_0_VPS					(0x03ac)
+#define IPIPE_HST_0_VSZ					(0x03b0)
+#define IPIPE_HST_0_HPS					(0x03b4)
+#define IPIPE_HST_0_HSZ					(0x03b8)
+#define IPIPE_HST_1_VPS					(0x03bc)
+#define IPIPE_HST_1_VSZ					(0x03c0)
+#define IPIPE_HST_1_HPS					(0x03c4)
+#define IPIPE_HST_1_HSZ					(0x03c8)
+#define IPIPE_HST_2_VPS					(0x03cc)
+#define IPIPE_HST_2_VSZ					(0x03d0)
+#define IPIPE_HST_2_HPS					(0x03d4)
+#define IPIPE_HST_2_HSZ					(0x03d8)
+#define IPIPE_HST_3_VPS					(0x03dc)
+#define IPIPE_HST_3_VSZ					(0x03e0)
+#define IPIPE_HST_3_HPS					(0x03e4)
+#define IPIPE_HST_3_HSZ					(0x03e8)
+#define IPIPE_HST_TBL					(0x03ec)
+#define IPIPE_HST_MUL_R					(0x03f0)
+#define IPIPE_HST_MUL_GR				(0x03f4)
+#define IPIPE_HST_MUL_GB				(0x03f8)
+#define IPIPE_HST_MUL_B					(0x03fc)
+
+#define IPIPE_BSC_EN					(0x0400)
+#define IPIPE_BSC_MODE					(0x0404)
+#define IPIPE_BSC_TYP					(0x0408)
+#define IPIPE_BSC_ROW_VCT				(0x040c)
+#define IPIPE_BSC_ROW_SHF				(0x0410)
+#define IPIPE_BSC_ROW_VPO				(0x0414)
+#define IPIPE_BSC_ROW_VNU				(0x0418)
+#define IPIPE_BSC_ROW_VSKIP				(0x041c)
+#define IPIPE_BSC_ROW_HPO				(0x0420)
+#define IPIPE_BSC_ROW_HNU				(0x0424)
+#define IPIPE_BSC_ROW_HSKIP				(0x0428)
+#define IPIPE_BSC_COL_VCT				(0x042c)
+#define IPIPE_BSC_COL_SHF				(0x0430)
+#define IPIPE_BSC_COL_VPO				(0x0434)
+#define IPIPE_BSC_COL_VNU				(0x0438)
+#define IPIPE_BSC_COL_VSKIP				(0x043c)
+#define IPIPE_BSC_COL_HPO				(0x0440)
+#define IPIPE_BSC_COL_HNU				(0x0444)
+#define IPIPE_BSC_COL_HSKIP				(0x0448)
+
+#define IPIPE_BSC_EN					(0x0400)
+
+/* ISS ISP Resizer register offsets */
+#define RSZ_REVISION					(0x0000)
+#define RSZ_SYSCONFIG					(0x0004)
+#define RSZ_SYSCONFIG_RSZB_CLK_EN			(1 << 9)
+#define RSZ_SYSCONFIG_RSZA_CLK_EN			(1 << 8)
+
+#define RSZ_IN_FIFO_CTRL				(0x000c)
+#define RSZ_IN_FIFO_CTRL_THRLD_LOW_MASK			(0x1ff << 16)
+#define RSZ_IN_FIFO_CTRL_THRLD_LOW_SHIFT		16
+#define RSZ_IN_FIFO_CTRL_THRLD_HIGH_MASK		(0x1ff << 0)
+#define RSZ_IN_FIFO_CTRL_THRLD_HIGH_SHIFT		0
+
+#define RSZ_FRACDIV					(0x0008)
+#define RSZ_FRACDIV_MASK				(0xffff)
+
+#define RSZ_SRC_EN					(0x0020)
+#define RSZ_SRC_EN_SRC_EN				(1 << 0)
+
+#define RSZ_SRC_MODE					(0x0024)
+#define RSZ_SRC_MODE_OST				(1 << 0)
+#define RSZ_SRC_MODE_WRT				(1 << 1)
+
+#define RSZ_SRC_FMT0					(0x0028)
+#define RSZ_SRC_FMT0_BYPASS				(1 << 1)
+#define RSZ_SRC_FMT0_SEL				(1 << 0)
+
+#define RSZ_SRC_FMT1					(0x002c)
+#define RSZ_SRC_FMT1_IN420				(1 << 1)
+
+#define RSZ_SRC_VPS					(0x0030)
+#define RSZ_SRC_VSZ					(0x0034)
+#define RSZ_SRC_HPS					(0x0038)
+#define RSZ_SRC_HSZ					(0x003c)
+#define RSZ_DMA_RZA					(0x0040)
+#define RSZ_DMA_RZB					(0x0044)
+#define RSZ_DMA_STA					(0x0048)
+#define RSZ_GCK_MMR					(0x004c)
+#define RSZ_GCK_MMR_MMR					(1 << 0)
+
+#define RSZ_GCK_SDR					(0x0054)
+#define RSZ_GCK_SDR_CORE				(1 << 0)
+
+#define RSZ_IRQ_RZA					(0x0058)
+#define RSZ_IRQ_RZA_MASK				(0x1fff)
+
+#define RSZ_IRQ_RZB					(0x005c)
+#define RSZ_IRQ_RZB_MASK				(0x1fff)
+
+#define RSZ_YUV_Y_MIN					(0x0060)
+#define RSZ_YUV_Y_MAX					(0x0064)
+#define RSZ_YUV_C_MIN					(0x0068)
+#define RSZ_YUV_C_MAX					(0x006c)
+
+#define RSZ_SEQ						(0x0074)
+#define RSZ_SEQ_HRVB					(1 << 2)
+#define RSZ_SEQ_HRVA					(1 << 0)
+
+#define RZA_EN						(0x0078)
+#define RZA_MODE					(0x007c)
+#define RZA_MODE_ONE_SHOT				(1 << 0)
+
+#define RZA_420						(0x0080)
+#define RZA_I_VPS					(0x0084)
+#define RZA_I_HPS					(0x0088)
+#define RZA_O_VSZ					(0x008c)
+#define RZA_O_HSZ					(0x0090)
+#define RZA_V_PHS_Y					(0x0094)
+#define RZA_V_PHS_C					(0x0098)
+#define RZA_V_DIF					(0x009c)
+#define RZA_V_TYP					(0x00a0)
+#define RZA_V_LPF					(0x00a4)
+#define RZA_H_PHS					(0x00a8)
+#define RZA_H_DIF					(0x00b0)
+#define RZA_H_TYP					(0x00b4)
+#define RZA_H_LPF					(0x00b8)
+#define RZA_DWN_EN					(0x00bc)
+#define RZA_SDR_Y_BAD_H					(0x00d0)
+#define RZA_SDR_Y_BAD_L					(0x00d4)
+#define RZA_SDR_Y_SAD_H					(0x00d8)
+#define RZA_SDR_Y_SAD_L					(0x00dc)
+#define RZA_SDR_Y_OFT					(0x00e0)
+#define RZA_SDR_Y_PTR_S					(0x00e4)
+#define RZA_SDR_Y_PTR_E					(0x00e8)
+#define RZA_SDR_C_BAD_H					(0x00ec)
+#define RZA_SDR_C_BAD_L					(0x00f0)
+#define RZA_SDR_C_SAD_H					(0x00f4)
+#define RZA_SDR_C_SAD_L					(0x00f8)
+#define RZA_SDR_C_OFT					(0x00fc)
+#define RZA_SDR_C_PTR_S					(0x0100)
+#define RZA_SDR_C_PTR_E					(0x0104)
+
+#define RZB_EN						(0x0108)
+#define RZB_MODE					(0x010c)
+#define RZB_420						(0x0110)
+#define RZB_I_VPS					(0x0114)
+#define RZB_I_HPS					(0x0118)
+#define RZB_O_VSZ					(0x011c)
+#define RZB_O_HSZ					(0x0120)
+
+#define RZB_V_DIF					(0x012c)
+#define RZB_V_TYP					(0x0130)
+#define RZB_V_LPF					(0x0134)
+
+#define RZB_H_DIF					(0x0140)
+#define RZB_H_TYP					(0x0144)
+#define RZB_H_LPF					(0x0148)
+
+#define RZB_SDR_Y_BAD_H					(0x0160)
+#define RZB_SDR_Y_BAD_L					(0x0164)
+#define RZB_SDR_Y_SAD_H					(0x0168)
+#define RZB_SDR_Y_SAD_L					(0x016c)
+#define RZB_SDR_Y_OFT					(0x0170)
+#define RZB_SDR_Y_PTR_S					(0x0174)
+#define RZB_SDR_Y_PTR_E					(0x0178)
+#define RZB_SDR_C_BAD_H					(0x017c)
+#define RZB_SDR_C_BAD_L					(0x0180)
+#define RZB_SDR_C_SAD_H					(0x0184)
+#define RZB_SDR_C_SAD_L					(0x0188)
+
+#define RZB_SDR_C_PTR_S					(0x0190)
+#define RZB_SDR_C_PTR_E					(0x0194)
+
+/* Shared Bitmasks between RZA & RZB */
+#define RSZ_EN_EN					(1 << 0)
+
+#define RSZ_420_CEN					(1 << 1)
+#define RSZ_420_YEN					(1 << 0)
+
+#define RSZ_I_VPS_MASK					(0x1fff)
+
+#define RSZ_I_HPS_MASK					(0x1fff)
+
+#define RSZ_O_VSZ_MASK					(0x1fff)
+
+#define RSZ_O_HSZ_MASK					(0x1ffe)
+
+#define RSZ_V_PHS_Y_MASK				(0x3fff)
+
+#define RSZ_V_PHS_C_MASK				(0x3fff)
+
+#define RSZ_V_DIF_MASK					(0x3fff)
+
+#define RSZ_V_TYP_C					(1 << 1)
+#define RSZ_V_TYP_Y					(1 << 0)
+
+#define RSZ_V_LPF_C_MASK				(0x3f << 6)
+#define RSZ_V_LPF_C_SHIFT				6
+#define RSZ_V_LPF_Y_MASK				(0x3f << 0)
+#define RSZ_V_LPF_Y_SHIFT				0
+
+#define RSZ_H_PHS_MASK					(0x3fff)
+
+#define RSZ_H_DIF_MASK					(0x3fff)
+
+#define RSZ_H_TYP_C					(1 << 1)
+#define RSZ_H_TYP_Y					(1 << 0)
+
+#define RSZ_H_LPF_C_MASK				(0x3f << 6)
+#define RSZ_H_LPF_C_SHIFT				6
+#define RSZ_H_LPF_Y_MASK				(0x3f << 0)
+#define RSZ_H_LPF_Y_SHIFT				0
+
+#define RSZ_DWN_EN_DWN_EN				(1 << 0)
+
+#endif /* _OMAP4_ISS_REGS_H_ */
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
new file mode 100644
index 0000000..ae831b8
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_resizer.c
@@ -0,0 +1,893 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP RESIZER module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_resizer.h"
+
+static const unsigned int resizer_fmts[] = {
+	V4L2_MBUS_FMT_UYVY8_1X16,
+	V4L2_MBUS_FMT_YUYV8_1X16,
+};
+
+/*
+ * resizer_print_status - Print current RESIZER Module register values.
+ * @resizer: Pointer to ISS ISP RESIZER device.
+ *
+ * Also prints other debug information stored in the RESIZER module.
+ */
+#define RSZ_PRINT_REGISTER(iss, name)\
+	dev_dbg(iss->dev, "###RSZ " #name "=0x%08x\n", \
+		iss_reg_read(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_##name))
+
+#define RZA_PRINT_REGISTER(iss, name)\
+	dev_dbg(iss->dev, "###RZA " #name "=0x%08x\n", \
+		iss_reg_read(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_##name))
+
+static void resizer_print_status(struct iss_resizer_device *resizer)
+{
+	struct iss_device *iss = to_iss_device(resizer);
+
+	dev_dbg(iss->dev, "-------------RESIZER Register dump-------------\n");
+
+	RSZ_PRINT_REGISTER(iss, SYSCONFIG);
+	RSZ_PRINT_REGISTER(iss, IN_FIFO_CTRL);
+	RSZ_PRINT_REGISTER(iss, FRACDIV);
+	RSZ_PRINT_REGISTER(iss, SRC_EN);
+	RSZ_PRINT_REGISTER(iss, SRC_MODE);
+	RSZ_PRINT_REGISTER(iss, SRC_FMT0);
+	RSZ_PRINT_REGISTER(iss, SRC_FMT1);
+	RSZ_PRINT_REGISTER(iss, SRC_VPS);
+	RSZ_PRINT_REGISTER(iss, SRC_VSZ);
+	RSZ_PRINT_REGISTER(iss, SRC_HPS);
+	RSZ_PRINT_REGISTER(iss, SRC_HSZ);
+	RSZ_PRINT_REGISTER(iss, DMA_RZA);
+	RSZ_PRINT_REGISTER(iss, DMA_RZB);
+	RSZ_PRINT_REGISTER(iss, DMA_STA);
+	RSZ_PRINT_REGISTER(iss, GCK_MMR);
+	RSZ_PRINT_REGISTER(iss, GCK_SDR);
+	RSZ_PRINT_REGISTER(iss, IRQ_RZA);
+	RSZ_PRINT_REGISTER(iss, IRQ_RZB);
+	RSZ_PRINT_REGISTER(iss, YUV_Y_MIN);
+	RSZ_PRINT_REGISTER(iss, YUV_Y_MAX);
+	RSZ_PRINT_REGISTER(iss, YUV_C_MIN);
+	RSZ_PRINT_REGISTER(iss, YUV_C_MAX);
+	RSZ_PRINT_REGISTER(iss, SEQ);
+
+	RZA_PRINT_REGISTER(iss, EN);
+	RZA_PRINT_REGISTER(iss, MODE);
+	RZA_PRINT_REGISTER(iss, 420);
+	RZA_PRINT_REGISTER(iss, I_VPS);
+	RZA_PRINT_REGISTER(iss, I_HPS);
+	RZA_PRINT_REGISTER(iss, O_VSZ);
+	RZA_PRINT_REGISTER(iss, O_HSZ);
+	RZA_PRINT_REGISTER(iss, V_PHS_Y);
+	RZA_PRINT_REGISTER(iss, V_PHS_C);
+	RZA_PRINT_REGISTER(iss, V_DIF);
+	RZA_PRINT_REGISTER(iss, V_TYP);
+	RZA_PRINT_REGISTER(iss, V_LPF);
+	RZA_PRINT_REGISTER(iss, H_PHS);
+	RZA_PRINT_REGISTER(iss, H_DIF);
+	RZA_PRINT_REGISTER(iss, H_TYP);
+	RZA_PRINT_REGISTER(iss, H_LPF);
+	RZA_PRINT_REGISTER(iss, DWN_EN);
+	RZA_PRINT_REGISTER(iss, SDR_Y_BAD_H);
+	RZA_PRINT_REGISTER(iss, SDR_Y_BAD_L);
+	RZA_PRINT_REGISTER(iss, SDR_Y_SAD_H);
+	RZA_PRINT_REGISTER(iss, SDR_Y_SAD_L);
+	RZA_PRINT_REGISTER(iss, SDR_Y_OFT);
+	RZA_PRINT_REGISTER(iss, SDR_Y_PTR_S);
+	RZA_PRINT_REGISTER(iss, SDR_Y_PTR_E);
+	RZA_PRINT_REGISTER(iss, SDR_C_BAD_H);
+	RZA_PRINT_REGISTER(iss, SDR_C_BAD_L);
+	RZA_PRINT_REGISTER(iss, SDR_C_SAD_H);
+	RZA_PRINT_REGISTER(iss, SDR_C_SAD_L);
+	RZA_PRINT_REGISTER(iss, SDR_C_OFT);
+	RZA_PRINT_REGISTER(iss, SDR_C_PTR_S);
+	RZA_PRINT_REGISTER(iss, SDR_C_PTR_E);
+
+	dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+/*
+ * resizer_enable - Enable/Disable RESIZER.
+ * @enable: enable flag
+ *
+ */
+static void resizer_enable(struct iss_resizer_device *resizer, u8 enable)
+{
+	struct iss_device *iss = to_iss_device(resizer);
+
+	iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_EN,
+		       RSZ_SRC_EN_SRC_EN, enable ? RSZ_SRC_EN_SRC_EN : 0);
+
+	/* TODO: Enable RSZB */
+	iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_EN, RSZ_EN_EN,
+		       enable ? RSZ_EN_EN : 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+/*
+ * resizer_set_outaddr - Set memory address to save output image
+ * @resizer: Pointer to ISP RESIZER device.
+ * @addr: 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ */
+static void resizer_set_outaddr(struct iss_resizer_device *resizer, u32 addr)
+{
+	struct iss_device *iss = to_iss_device(resizer);
+	struct v4l2_mbus_framefmt *informat, *outformat;
+
+	informat = &resizer->formats[RESIZER_PAD_SINK];
+	outformat = &resizer->formats[RESIZER_PAD_SOURCE_MEM];
+
+	/* Save address splitted in Base Address H & L */
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_BAD_H,
+		      (addr >> 16) & 0xffff);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_BAD_L,
+		      addr & 0xffff);
+
+	/* SAD = BAD */
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_SAD_H,
+		      (addr >> 16) & 0xffff);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_SAD_L,
+		      addr & 0xffff);
+
+	/* Program UV buffer address... Hardcoded to be contiguous! */
+	if ((informat->code == V4L2_MBUS_FMT_UYVY8_1X16) &&
+	    (outformat->code == V4L2_MBUS_FMT_YUYV8_1_5X8)) {
+		u32 c_addr = addr + (resizer->video_out.bpl_value *
+				     (outformat->height - 1));
+
+		/* Ensure Y_BAD_L[6:0] = C_BAD_L[6:0]*/
+		if ((c_addr ^ addr) & 0x7f) {
+			c_addr &= ~0x7f;
+			c_addr += 0x80;
+			c_addr |= addr & 0x7f;
+		}
+
+		/* Save address splitted in Base Address H & L */
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_BAD_H,
+			      (c_addr >> 16) & 0xffff);
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_BAD_L,
+			      c_addr & 0xffff);
+
+		/* SAD = BAD */
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_SAD_H,
+			      (c_addr >> 16) & 0xffff);
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_SAD_L,
+			      c_addr & 0xffff);
+	}
+}
+
+static void resizer_configure(struct iss_resizer_device *resizer)
+{
+	struct iss_device *iss = to_iss_device(resizer);
+	struct v4l2_mbus_framefmt *informat, *outformat;
+
+	informat = &resizer->formats[RESIZER_PAD_SINK];
+	outformat = &resizer->formats[RESIZER_PAD_SOURCE_MEM];
+
+	/* Disable pass-through more. Despite its name, the BYPASS bit controls
+	 * pass-through mode, not bypass mode.
+	 */
+	iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_FMT0,
+		    RSZ_SRC_FMT0_BYPASS);
+
+	/* Select RSZ input */
+	iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_FMT0,
+		       RSZ_SRC_FMT0_SEL,
+		       resizer->input == RESIZER_INPUT_IPIPEIF ?
+		       RSZ_SRC_FMT0_SEL : 0);
+
+	/* RSZ ignores WEN signal from IPIPE/IPIPEIF */
+	iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_MODE,
+		    RSZ_SRC_MODE_WRT);
+
+	/* Set Resizer in free-running mode */
+	iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_MODE,
+		    RSZ_SRC_MODE_OST);
+
+	/* Init Resizer A */
+	iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_MODE,
+		    RZA_MODE_ONE_SHOT);
+
+	/* Set size related things now */
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_VPS, 0);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_HPS, 0);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_VSZ,
+		      informat->height - 2);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_HSZ,
+		      informat->width - 1);
+
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_I_VPS, 0);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_I_HPS, 0);
+
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_O_VSZ,
+		      outformat->height - 2);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_O_HSZ,
+		      outformat->width - 1);
+
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_V_DIF, 0x100);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_H_DIF, 0x100);
+
+	/* Buffer output settings */
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_PTR_S, 0);
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_PTR_E,
+		      outformat->height - 1);
+
+	iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_OFT,
+		      resizer->video_out.bpl_value);
+
+	/* UYVY -> NV12 conversion */
+	if ((informat->code == V4L2_MBUS_FMT_UYVY8_1X16) &&
+	    (outformat->code == V4L2_MBUS_FMT_YUYV8_1_5X8)) {
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420,
+			      RSZ_420_CEN | RSZ_420_YEN);
+
+		/* UV Buffer output settings */
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_PTR_S,
+			      0);
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_PTR_E,
+			      outformat->height - 1);
+
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_OFT,
+			      resizer->video_out.bpl_value);
+	} else {
+		iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420, 0);
+	}
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void resizer_isr_buffer(struct iss_resizer_device *resizer)
+{
+	struct iss_buffer *buffer;
+
+	/* The whole resizer needs to be stopped. Disabling RZA only produces
+	 * input FIFO overflows, most probably when the next frame is received.
+	 */
+	resizer_enable(resizer, 0);
+
+	buffer = omap4iss_video_buffer_next(&resizer->video_out);
+	if (buffer == NULL)
+		return;
+
+	resizer_set_outaddr(resizer, buffer->iss_addr);
+
+	resizer_enable(resizer, 1);
+}
+
+/*
+ * resizer_isif0_isr - Handle ISIF0 event
+ * @resizer: Pointer to ISP RESIZER device.
+ *
+ * Executes LSC deferred enablement before next frame starts.
+ */
+static void resizer_int_dma_isr(struct iss_resizer_device *resizer)
+{
+	struct iss_pipeline *pipe =
+			     to_iss_pipeline(&resizer->subdev.entity);
+	if (pipe->do_propagation)
+		atomic_inc(&pipe->frame_number);
+
+	resizer_isr_buffer(resizer);
+}
+
+/*
+ * omap4iss_resizer_isr - Configure resizer during interframe time.
+ * @resizer: Pointer to ISP RESIZER device.
+ * @events: RESIZER events
+ */
+void omap4iss_resizer_isr(struct iss_resizer_device *resizer, u32 events)
+{
+	struct iss_device *iss = to_iss_device(resizer);
+	struct iss_pipeline *pipe =
+			     to_iss_pipeline(&resizer->subdev.entity);
+
+	if (events & (ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
+		      ISP5_IRQ_RSZ_FIFO_OVF)) {
+		dev_dbg(iss->dev, "RSZ Err: FIFO_IN_BLK:%d, FIFO_OVF:%d\n",
+			events & ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR ? 1 : 0,
+			events & ISP5_IRQ_RSZ_FIFO_OVF ? 1 : 0);
+		omap4iss_pipeline_cancel_stream(pipe);
+	}
+
+	if (omap4iss_module_sync_is_stopping(&resizer->wait,
+					     &resizer->stopping))
+		return;
+
+	if (events & ISP5_IRQ_RSZ_INT_DMA)
+		resizer_int_dma_isr(resizer);
+}
+
+/* -----------------------------------------------------------------------------
+ * ISS video operations
+ */
+
+static int resizer_video_queue(struct iss_video *video,
+			       struct iss_buffer *buffer)
+{
+	struct iss_resizer_device *resizer = container_of(video,
+				struct iss_resizer_device, video_out);
+
+	if (!(resizer->output & RESIZER_OUTPUT_MEMORY))
+		return -ENODEV;
+
+	resizer_set_outaddr(resizer, buffer->iss_addr);
+
+	/*
+	 * If streaming was enabled before there was a buffer queued
+	 * or underrun happened in the ISR, the hardware was not enabled
+	 * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
+	 * Enable it now.
+	 */
+	if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+		resizer_enable(resizer, 1);
+		iss_video_dmaqueue_flags_clr(video);
+	}
+
+	return 0;
+}
+
+static const struct iss_video_operations resizer_video_ops = {
+	.queue = resizer_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * resizer_set_stream - Enable/Disable streaming on the RESIZER module
+ * @sd: ISP RESIZER V4L2 subdevice
+ * @enable: Enable/disable stream
+ */
+static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+	struct iss_device *iss = to_iss_device(resizer);
+	struct iss_video *video_out = &resizer->video_out;
+	int ret = 0;
+
+	if (resizer->state == ISS_PIPELINE_STREAM_STOPPED) {
+		if (enable == ISS_PIPELINE_STREAM_STOPPED)
+			return 0;
+
+		omap4iss_isp_subclk_enable(iss, OMAP4_ISS_ISP_SUBCLK_RSZ);
+
+		iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_MMR,
+			    RSZ_GCK_MMR_MMR);
+		iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_SDR,
+			    RSZ_GCK_SDR_CORE);
+
+		/* FIXME: Enable RSZB also */
+		iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SYSCONFIG,
+			    RSZ_SYSCONFIG_RSZA_CLK_EN);
+	}
+
+	switch (enable) {
+	case ISS_PIPELINE_STREAM_CONTINUOUS:
+
+		resizer_configure(resizer);
+		resizer_print_status(resizer);
+
+		/*
+		 * When outputting to memory with no buffer available, let the
+		 * buffer queue handler start the hardware. A DMA queue flag
+		 * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+		 * a buffer available.
+		 */
+		if (resizer->output & RESIZER_OUTPUT_MEMORY &&
+		    !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
+			break;
+
+		atomic_set(&resizer->stopping, 0);
+		resizer_enable(resizer, 1);
+		iss_video_dmaqueue_flags_clr(video_out);
+		break;
+
+	case ISS_PIPELINE_STREAM_STOPPED:
+		if (resizer->state == ISS_PIPELINE_STREAM_STOPPED)
+			return 0;
+		if (omap4iss_module_sync_idle(&sd->entity, &resizer->wait,
+					      &resizer->stopping))
+			ret = -ETIMEDOUT;
+
+		resizer_enable(resizer, 0);
+		iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SYSCONFIG,
+			    RSZ_SYSCONFIG_RSZA_CLK_EN);
+		iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_SDR,
+			    RSZ_GCK_SDR_CORE);
+		iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_MMR,
+			    RSZ_GCK_MMR_MMR);
+		omap4iss_isp_subclk_disable(iss, OMAP4_ISS_ISP_SUBCLK_RSZ);
+		iss_video_dmaqueue_flags_clr(video_out);
+		break;
+	}
+
+	resizer->state = enable;
+	return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__resizer_get_format(struct iss_resizer_device *resizer,
+		     struct v4l2_subdev_fh *fh, unsigned int pad,
+		     enum v4l2_subdev_format_whence which)
+{
+	if (which == V4L2_SUBDEV_FORMAT_TRY)
+		return v4l2_subdev_get_try_format(fh, pad);
+	else
+		return &resizer->formats[pad];
+}
+
+/*
+ * resizer_try_format - Try video format on a pad
+ * @resizer: ISS RESIZER device
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+resizer_try_format(struct iss_resizer_device *resizer,
+		   struct v4l2_subdev_fh *fh, unsigned int pad,
+		   struct v4l2_mbus_framefmt *fmt,
+		   enum v4l2_subdev_format_whence which)
+{
+	enum v4l2_mbus_pixelcode pixelcode;
+	struct v4l2_mbus_framefmt *format;
+	unsigned int width = fmt->width;
+	unsigned int height = fmt->height;
+	unsigned int i;
+
+	switch (pad) {
+	case RESIZER_PAD_SINK:
+		for (i = 0; i < ARRAY_SIZE(resizer_fmts); i++) {
+			if (fmt->code == resizer_fmts[i])
+				break;
+		}
+
+		/* If not found, use UYVY as default */
+		if (i >= ARRAY_SIZE(resizer_fmts))
+			fmt->code = V4L2_MBUS_FMT_UYVY8_1X16;
+
+		/* Clamp the input size. */
+		fmt->width = clamp_t(u32, width, 1, 8192);
+		fmt->height = clamp_t(u32, height, 1, 8192);
+		break;
+
+	case RESIZER_PAD_SOURCE_MEM:
+		pixelcode = fmt->code;
+		format = __resizer_get_format(resizer, fh, RESIZER_PAD_SINK,
+					      which);
+		memcpy(fmt, format, sizeof(*fmt));
+
+		if ((pixelcode == V4L2_MBUS_FMT_YUYV8_1_5X8) &&
+		    (fmt->code == V4L2_MBUS_FMT_UYVY8_1X16))
+			fmt->code = pixelcode;
+
+		/* The data formatter truncates the number of horizontal output
+		 * pixels to a multiple of 16. To avoid clipping data, allow
+		 * callers to request an output size bigger than the input size
+		 * up to the nearest multiple of 16.
+		 */
+		fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
+		fmt->width &= ~15;
+		fmt->height = clamp_t(u32, height, 32, fmt->height);
+		break;
+
+	}
+
+	fmt->colorspace = V4L2_COLORSPACE_JPEG;
+	fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * resizer_enum_mbus_code - Handle pixel format enumeration
+ * @sd     : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code   : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
+			       struct v4l2_subdev_fh *fh,
+			       struct v4l2_subdev_mbus_code_enum *code)
+{
+	struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	switch (code->pad) {
+	case RESIZER_PAD_SINK:
+		if (code->index >= ARRAY_SIZE(resizer_fmts))
+			return -EINVAL;
+
+		code->code = resizer_fmts[code->index];
+		break;
+
+	case RESIZER_PAD_SOURCE_MEM:
+		format = __resizer_get_format(resizer, fh, RESIZER_PAD_SINK,
+					      V4L2_SUBDEV_FORMAT_TRY);
+
+		if (code->index == 0) {
+			code->code = format->code;
+			break;
+		}
+
+		switch (format->code) {
+		case V4L2_MBUS_FMT_UYVY8_1X16:
+			if (code->index == 1)
+				code->code = V4L2_MBUS_FMT_YUYV8_1_5X8;
+			else
+				return -EINVAL;
+			break;
+		default:
+			if (code->index != 0)
+				return -EINVAL;
+		}
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int resizer_enum_frame_size(struct v4l2_subdev *sd,
+				struct v4l2_subdev_fh *fh,
+				struct v4l2_subdev_frame_size_enum *fse)
+{
+	struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt format;
+
+	if (fse->index != 0)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = 1;
+	format.height = 1;
+	resizer_try_format(resizer, fh, fse->pad, &format,
+			   V4L2_SUBDEV_FORMAT_TRY);
+	fse->min_width = format.width;
+	fse->min_height = format.height;
+
+	if (format.code != fse->code)
+		return -EINVAL;
+
+	format.code = fse->code;
+	format.width = -1;
+	format.height = -1;
+	resizer_try_format(resizer, fh, fse->pad, &format,
+			   V4L2_SUBDEV_FORMAT_TRY);
+	fse->max_width = format.width;
+	fse->max_height = format.height;
+
+	return 0;
+}
+
+/*
+ * resizer_get_format - Retrieve the video format on a pad
+ * @sd : ISP RESIZER V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			   struct v4l2_subdev_format *fmt)
+{
+	struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __resizer_get_format(resizer, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	fmt->format = *format;
+	return 0;
+}
+
+/*
+ * resizer_set_format - Set the video format on a pad
+ * @sd : ISP RESIZER V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+			      struct v4l2_subdev_format *fmt)
+{
+	struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+	struct v4l2_mbus_framefmt *format;
+
+	format = __resizer_get_format(resizer, fh, fmt->pad, fmt->which);
+	if (format == NULL)
+		return -EINVAL;
+
+	resizer_try_format(resizer, fh, fmt->pad, &fmt->format, fmt->which);
+	*format = fmt->format;
+
+	/* Propagate the format from sink to source */
+	if (fmt->pad == RESIZER_PAD_SINK) {
+		format = __resizer_get_format(resizer, fh,
+					      RESIZER_PAD_SOURCE_MEM,
+					      fmt->which);
+		*format = fmt->format;
+		resizer_try_format(resizer, fh, RESIZER_PAD_SOURCE_MEM, format,
+				fmt->which);
+	}
+
+	return 0;
+}
+
+static int resizer_link_validate(struct v4l2_subdev *sd,
+				 struct media_link *link,
+				 struct v4l2_subdev_format *source_fmt,
+				 struct v4l2_subdev_format *sink_fmt)
+{
+	/* Check if the two ends match */
+	if (source_fmt->format.width != sink_fmt->format.width ||
+	    source_fmt->format.height != sink_fmt->format.height)
+		return -EPIPE;
+
+	if (source_fmt->format.code != sink_fmt->format.code)
+		return -EPIPE;
+
+	return 0;
+}
+
+/*
+ * resizer_init_formats - Initialize formats on all pads
+ * @sd: ISP RESIZER V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int resizer_init_formats(struct v4l2_subdev *sd,
+				struct v4l2_subdev_fh *fh)
+{
+	struct v4l2_subdev_format format;
+
+	memset(&format, 0, sizeof(format));
+	format.pad = RESIZER_PAD_SINK;
+	format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+	format.format.code = V4L2_MBUS_FMT_UYVY8_1X16;
+	format.format.width = 4096;
+	format.format.height = 4096;
+	resizer_set_format(sd, fh, &format);
+
+	return 0;
+}
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
+	.s_stream = resizer_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
+	.enum_mbus_code = resizer_enum_mbus_code,
+	.enum_frame_size = resizer_enum_frame_size,
+	.get_fmt = resizer_get_format,
+	.set_fmt = resizer_set_format,
+	.link_validate = resizer_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops resizer_v4l2_ops = {
+	.video = &resizer_v4l2_video_ops,
+	.pad = &resizer_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = {
+	.open = resizer_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * resizer_link_setup - Setup RESIZER connections
+ * @entity: RESIZER media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int resizer_link_setup(struct media_entity *entity,
+			   const struct media_pad *local,
+			   const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+	struct iss_device *iss = to_iss_device(resizer);
+
+	switch (local->index | media_entity_type(remote->entity)) {
+	case RESIZER_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+		/* Read from IPIPE or IPIPEIF. */
+		if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+			resizer->input = RESIZER_INPUT_NONE;
+			break;
+		}
+
+		if (resizer->input != RESIZER_INPUT_NONE)
+			return -EBUSY;
+
+		if (remote->entity == &iss->ipipeif.subdev.entity)
+			resizer->input = RESIZER_INPUT_IPIPEIF;
+		else if (remote->entity == &iss->ipipe.subdev.entity)
+			resizer->input = RESIZER_INPUT_IPIPE;
+
+
+		break;
+
+	case RESIZER_PAD_SOURCE_MEM | MEDIA_ENT_T_DEVNODE:
+		/* Write to memory */
+		if (flags & MEDIA_LNK_FL_ENABLED) {
+			if (resizer->output & ~RESIZER_OUTPUT_MEMORY)
+				return -EBUSY;
+			resizer->output |= RESIZER_OUTPUT_MEMORY;
+		} else {
+			resizer->output &= ~RESIZER_OUTPUT_MEMORY;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations resizer_media_ops = {
+	.link_setup = resizer_link_setup,
+	.link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * resizer_init_entities - Initialize V4L2 subdev and media entity
+ * @resizer: ISS ISP RESIZER module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int resizer_init_entities(struct iss_resizer_device *resizer)
+{
+	struct v4l2_subdev *sd = &resizer->subdev;
+	struct media_pad *pads = resizer->pads;
+	struct media_entity *me = &sd->entity;
+	int ret;
+
+	resizer->input = RESIZER_INPUT_NONE;
+
+	v4l2_subdev_init(sd, &resizer_v4l2_ops);
+	sd->internal_ops = &resizer_v4l2_internal_ops;
+	strlcpy(sd->name, "OMAP4 ISS ISP resizer", sizeof(sd->name));
+	sd->grp_id = 1 << 16;	/* group ID for iss subdevs */
+	v4l2_set_subdevdata(sd, resizer);
+	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+	pads[RESIZER_PAD_SOURCE_MEM].flags = MEDIA_PAD_FL_SOURCE;
+
+	me->ops = &resizer_media_ops;
+	ret = media_entity_init(me, RESIZER_PADS_NUM, pads, 0);
+	if (ret < 0)
+		return ret;
+
+	resizer_init_formats(sd, NULL);
+
+	resizer->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	resizer->video_out.ops = &resizer_video_ops;
+	resizer->video_out.iss = to_iss_device(resizer);
+	resizer->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+	resizer->video_out.bpl_alignment = 32;
+	resizer->video_out.bpl_zero_padding = 1;
+	resizer->video_out.bpl_max = 0x1ffe0;
+
+	ret = omap4iss_video_init(&resizer->video_out, "ISP resizer a");
+	if (ret < 0)
+		return ret;
+
+	/* Connect the RESIZER subdev to the video node. */
+	ret = media_entity_create_link(&resizer->subdev.entity,
+				       RESIZER_PAD_SOURCE_MEM,
+				       &resizer->video_out.video.entity, 0, 0);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+void omap4iss_resizer_unregister_entities(struct iss_resizer_device *resizer)
+{
+	media_entity_cleanup(&resizer->subdev.entity);
+
+	v4l2_device_unregister_subdev(&resizer->subdev);
+	omap4iss_video_unregister(&resizer->video_out);
+}
+
+int omap4iss_resizer_register_entities(struct iss_resizer_device *resizer,
+	struct v4l2_device *vdev)
+{
+	int ret;
+
+	/* Register the subdev and video node. */
+	ret = v4l2_device_register_subdev(vdev, &resizer->subdev);
+	if (ret < 0)
+		goto error;
+
+	ret = omap4iss_video_register(&resizer->video_out, vdev);
+	if (ret < 0)
+		goto error;
+
+	return 0;
+
+error:
+	omap4iss_resizer_unregister_entities(resizer);
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP RESIZER initialisation and cleanup
+ */
+
+/*
+ * omap4iss_resizer_init - RESIZER module initialization.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap4iss_resizer_init(struct iss_device *iss)
+{
+	struct iss_resizer_device *resizer = &iss->resizer;
+
+	resizer->state = ISS_PIPELINE_STREAM_STOPPED;
+	init_waitqueue_head(&resizer->wait);
+
+	return resizer_init_entities(resizer);
+}
+
+/*
+ * omap4iss_resizer_cleanup - RESIZER module cleanup.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ */
+void omap4iss_resizer_cleanup(struct iss_device *iss)
+{
+	/* FIXME: are you sure there's nothing to do? */
+}
diff --git a/drivers/staging/media/omap4iss/iss_resizer.h b/drivers/staging/media/omap4iss/iss_resizer.h
new file mode 100644
index 0000000..3727498
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_resizer.h
@@ -0,0 +1,75 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP RESIZER module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_RESIZER_H
+#define OMAP4_ISS_RESIZER_H
+
+#include "iss_video.h"
+
+enum resizer_input_entity {
+	RESIZER_INPUT_NONE,
+	RESIZER_INPUT_IPIPE,
+	RESIZER_INPUT_IPIPEIF
+};
+
+#define RESIZER_OUTPUT_MEMORY		(1 << 0)
+
+/* Sink and source RESIZER pads */
+#define RESIZER_PAD_SINK			0
+#define RESIZER_PAD_SOURCE_MEM			1
+#define RESIZER_PADS_NUM			2
+
+/*
+ * struct iss_resizer_device - Structure for the RESIZER module to store its own
+ *			    information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @input: Active input
+ * @output: Active outputs
+ * @video_out: Output video node
+ * @error: A hardware error occurred during capture
+ * @state: Streaming state
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ */
+struct iss_resizer_device {
+	struct v4l2_subdev subdev;
+	struct media_pad pads[RESIZER_PADS_NUM];
+	struct v4l2_mbus_framefmt formats[RESIZER_PADS_NUM];
+
+	enum resizer_input_entity input;
+	unsigned int output;
+	struct iss_video video_out;
+	unsigned int error;
+
+	enum iss_pipeline_stream_state state;
+	wait_queue_head_t wait;
+	atomic_t stopping;
+};
+
+struct iss_device;
+
+int omap4iss_resizer_init(struct iss_device *iss);
+void omap4iss_resizer_cleanup(struct iss_device *iss);
+int omap4iss_resizer_register_entities(struct iss_resizer_device *resizer,
+	struct v4l2_device *vdev);
+void omap4iss_resizer_unregister_entities(struct iss_resizer_device *resizer);
+
+int omap4iss_resizer_busy(struct iss_resizer_device *resizer);
+void omap4iss_resizer_isr(struct iss_resizer_device *resizer, u32 events);
+void omap4iss_resizer_restore_context(struct iss_device *iss);
+void omap4iss_resizer_max_rate(struct iss_resizer_device *resizer,
+	unsigned int *max_rate);
+
+#endif	/* OMAP4_ISS_RESIZER_H */
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
new file mode 100644
index 0000000..8c7f350
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -0,0 +1,1226 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - Generic video node
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+
+#include "iss_video.h"
+#include "iss.h"
+
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static struct iss_format_info formats[] = {
+	{ V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
+	  V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
+	  V4L2_PIX_FMT_GREY, 8, "Greyscale 8 bpp", },
+	{ V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
+	  V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
+	  V4L2_PIX_FMT_Y10, 10, "Greyscale 10 bpp", },
+	{ V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
+	  V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
+	  V4L2_PIX_FMT_Y12, 12, "Greyscale 12 bpp", },
+	{ V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
+	  V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
+	  V4L2_PIX_FMT_SBGGR8, 8, "BGGR Bayer 8 bpp", },
+	{ V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
+	  V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
+	  V4L2_PIX_FMT_SGBRG8, 8, "GBRG Bayer 8 bpp", },
+	{ V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
+	  V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
+	  V4L2_PIX_FMT_SGRBG8, 8, "GRBG Bayer 8 bpp", },
+	{ V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
+	  V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
+	  V4L2_PIX_FMT_SRGGB8, 8, "RGGB Bayer 8 bpp", },
+	{ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+	  V4L2_MBUS_FMT_SGRBG10_1X10, 0,
+	  V4L2_PIX_FMT_SGRBG10DPCM8, 8, "GRBG Bayer 10 bpp DPCM8",  },
+	{ V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
+	  V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
+	  V4L2_PIX_FMT_SBGGR10, 10, "BGGR Bayer 10 bpp", },
+	{ V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
+	  V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
+	  V4L2_PIX_FMT_SGBRG10, 10, "GBRG Bayer 10 bpp", },
+	{ V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
+	  V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
+	  V4L2_PIX_FMT_SGRBG10, 10, "GRBG Bayer 10 bpp", },
+	{ V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
+	  V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
+	  V4L2_PIX_FMT_SRGGB10, 10, "RGGB Bayer 10 bpp", },
+	{ V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
+	  V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
+	  V4L2_PIX_FMT_SBGGR12, 12, "BGGR Bayer 12 bpp", },
+	{ V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
+	  V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
+	  V4L2_PIX_FMT_SGBRG12, 12, "GBRG Bayer 12 bpp", },
+	{ V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
+	  V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
+	  V4L2_PIX_FMT_SGRBG12, 12, "GRBG Bayer 12 bpp", },
+	{ V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
+	  V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
+	  V4L2_PIX_FMT_SRGGB12, 12, "RGGB Bayer 12 bpp", },
+	{ V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
+	  V4L2_MBUS_FMT_UYVY8_1X16, 0,
+	  V4L2_PIX_FMT_UYVY, 16, "YUV 4:2:2 (UYVY)", },
+	{ V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
+	  V4L2_MBUS_FMT_YUYV8_1X16, 0,
+	  V4L2_PIX_FMT_YUYV, 16, "YUV 4:2:2 (YUYV)", },
+	{ V4L2_MBUS_FMT_YUYV8_1_5X8, V4L2_MBUS_FMT_YUYV8_1_5X8,
+	  V4L2_MBUS_FMT_YUYV8_1_5X8, 0,
+	  V4L2_PIX_FMT_NV12, 8, "YUV 4:2:0 (NV12)", },
+};
+
+const struct iss_format_info *
+omap4iss_video_format_info(enum v4l2_mbus_pixelcode code)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+		if (formats[i].code == code)
+			return &formats[i];
+	}
+
+	return NULL;
+}
+
+/*
+ * iss_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
+ * @video: ISS video instance
+ * @mbus: v4l2_mbus_framefmt format (input)
+ * @pix: v4l2_pix_format format (output)
+ *
+ * Fill the output pix structure with information from the input mbus format.
+ * The bytesperline and sizeimage fields are computed from the requested bytes
+ * per line value in the pix format and information from the video instance.
+ *
+ * Return the number of padding bytes at end of line.
+ */
+static unsigned int iss_video_mbus_to_pix(const struct iss_video *video,
+					  const struct v4l2_mbus_framefmt *mbus,
+					  struct v4l2_pix_format *pix)
+{
+	unsigned int bpl = pix->bytesperline;
+	unsigned int min_bpl;
+	unsigned int i;
+
+	memset(pix, 0, sizeof(*pix));
+	pix->width = mbus->width;
+	pix->height = mbus->height;
+
+	/* Skip the last format in the loop so that it will be selected if no
+	 * match is found.
+	 */
+	for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
+		if (formats[i].code == mbus->code)
+			break;
+	}
+
+	min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
+
+	/* Clamp the requested bytes per line value. If the maximum bytes per
+	 * line value is zero, the module doesn't support user configurable line
+	 * sizes. Override the requested value with the minimum in that case.
+	 */
+	if (video->bpl_max)
+		bpl = clamp(bpl, min_bpl, video->bpl_max);
+	else
+		bpl = min_bpl;
+
+	if (!video->bpl_zero_padding || bpl != min_bpl)
+		bpl = ALIGN(bpl, video->bpl_alignment);
+
+	pix->pixelformat = formats[i].pixelformat;
+	pix->bytesperline = bpl;
+	pix->sizeimage = pix->bytesperline * pix->height;
+	pix->colorspace = mbus->colorspace;
+	pix->field = mbus->field;
+
+	/* FIXME: Special case for NV12! We should make this nicer... */
+	if (pix->pixelformat == V4L2_PIX_FMT_NV12)
+		pix->sizeimage += (pix->bytesperline * pix->height) / 2;
+
+	return bpl - min_bpl;
+}
+
+static void iss_video_pix_to_mbus(const struct v4l2_pix_format *pix,
+				  struct v4l2_mbus_framefmt *mbus)
+{
+	unsigned int i;
+
+	memset(mbus, 0, sizeof(*mbus));
+	mbus->width = pix->width;
+	mbus->height = pix->height;
+
+	for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+		if (formats[i].pixelformat == pix->pixelformat)
+			break;
+	}
+
+	if (WARN_ON(i == ARRAY_SIZE(formats)))
+		return;
+
+	mbus->code = formats[i].code;
+	mbus->colorspace = pix->colorspace;
+	mbus->field = pix->field;
+}
+
+static struct v4l2_subdev *
+iss_video_remote_subdev(struct iss_video *video, u32 *pad)
+{
+	struct media_pad *remote;
+
+	remote = media_entity_remote_pad(&video->pad);
+
+	if (remote == NULL ||
+	    media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+		return NULL;
+
+	if (pad)
+		*pad = remote->index;
+
+	return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+/* Return a pointer to the ISS video instance at the far end of the pipeline. */
+static struct iss_video *
+iss_video_far_end(struct iss_video *video)
+{
+	struct media_entity_graph graph;
+	struct media_entity *entity = &video->video.entity;
+	struct media_device *mdev = entity->parent;
+	struct iss_video *far_end = NULL;
+
+	mutex_lock(&mdev->graph_mutex);
+	media_entity_graph_walk_start(&graph, entity);
+
+	while ((entity = media_entity_graph_walk_next(&graph))) {
+		if (entity == &video->video.entity)
+			continue;
+
+		if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+			continue;
+
+		far_end = to_iss_video(media_entity_to_video_device(entity));
+		if (far_end->type != video->type)
+			break;
+
+		far_end = NULL;
+	}
+
+	mutex_unlock(&mdev->graph_mutex);
+	return far_end;
+}
+
+static int
+__iss_video_get_format(struct iss_video *video,
+		       struct v4l2_mbus_framefmt *format)
+{
+	struct v4l2_subdev_format fmt;
+	struct v4l2_subdev *subdev;
+	u32 pad;
+	int ret;
+
+	subdev = iss_video_remote_subdev(video, &pad);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	memset(&fmt, 0, sizeof(fmt));
+	fmt.pad = pad;
+	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+	mutex_lock(&video->mutex);
+	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+	mutex_unlock(&video->mutex);
+
+	if (ret)
+		return ret;
+
+	*format = fmt.format;
+	return 0;
+}
+
+static int
+iss_video_check_format(struct iss_video *video, struct iss_video_fh *vfh)
+{
+	struct v4l2_mbus_framefmt format;
+	struct v4l2_pix_format pixfmt;
+	int ret;
+
+	ret = __iss_video_get_format(video, &format);
+	if (ret < 0)
+		return ret;
+
+	pixfmt.bytesperline = 0;
+	ret = iss_video_mbus_to_pix(video, &format, &pixfmt);
+
+	if (vfh->format.fmt.pix.pixelformat != pixfmt.pixelformat ||
+	    vfh->format.fmt.pix.height != pixfmt.height ||
+	    vfh->format.fmt.pix.width != pixfmt.width ||
+	    vfh->format.fmt.pix.bytesperline != pixfmt.bytesperline ||
+	    vfh->format.fmt.pix.sizeimage != pixfmt.sizeimage)
+		return -EINVAL;
+
+	return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Video queue operations
+ */
+
+static int iss_video_queue_setup(struct vb2_queue *vq,
+				 const struct v4l2_format *fmt,
+				 unsigned int *count, unsigned int *num_planes,
+				 unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct iss_video_fh *vfh = vb2_get_drv_priv(vq);
+	struct iss_video *video = vfh->video;
+
+	/* Revisit multi-planar support for NV12 */
+	*num_planes = 1;
+
+	sizes[0] = vfh->format.fmt.pix.sizeimage;
+	if (sizes[0] == 0)
+		return -EINVAL;
+
+	alloc_ctxs[0] = video->alloc_ctx;
+
+	*count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
+
+	return 0;
+}
+
+static void iss_video_buf_cleanup(struct vb2_buffer *vb)
+{
+	struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+
+	if (buffer->iss_addr)
+		buffer->iss_addr = 0;
+}
+
+static int iss_video_buf_prepare(struct vb2_buffer *vb)
+{
+	struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
+	struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+	struct iss_video *video = vfh->video;
+	unsigned long size = vfh->format.fmt.pix.sizeimage;
+	dma_addr_t addr;
+
+	if (vb2_plane_size(vb, 0) < size)
+		return -ENOBUFS;
+
+	/* Refuse to prepare the buffer is the video node has registered an
+	 * error. We don't need to take any lock here as the operation is
+	 * inherently racy. The authoritative check will be performed in the
+	 * queue handler, which can't return an error, this check is just a best
+	 * effort to notify userspace as early as possible.
+	 */
+	if (unlikely(video->error))
+		return -EIO;
+
+	addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+	if (!IS_ALIGNED(addr, 32)) {
+		dev_dbg(video->iss->dev,
+			"Buffer address must be aligned to 32 bytes boundary.\n");
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(vb, 0, size);
+	buffer->iss_addr = addr;
+	return 0;
+}
+
+static void iss_video_buf_queue(struct vb2_buffer *vb)
+{
+	struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
+	struct iss_video *video = vfh->video;
+	struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+	struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
+	unsigned long flags;
+	bool empty;
+
+	spin_lock_irqsave(&video->qlock, flags);
+
+	if (unlikely(video->error)) {
+		vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+		spin_unlock_irqrestore(&video->qlock, flags);
+		return;
+	}
+
+	empty = list_empty(&video->dmaqueue);
+	list_add_tail(&buffer->list, &video->dmaqueue);
+
+	spin_unlock_irqrestore(&video->qlock, flags);
+
+	if (empty) {
+		enum iss_pipeline_state state;
+		unsigned int start;
+
+		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+			state = ISS_PIPELINE_QUEUE_OUTPUT;
+		else
+			state = ISS_PIPELINE_QUEUE_INPUT;
+
+		spin_lock_irqsave(&pipe->lock, flags);
+		pipe->state |= state;
+		video->ops->queue(video, buffer);
+		video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_QUEUED;
+
+		start = iss_pipeline_ready(pipe);
+		if (start)
+			pipe->state |= ISS_PIPELINE_STREAM;
+		spin_unlock_irqrestore(&pipe->lock, flags);
+
+		if (start)
+			omap4iss_pipeline_set_stream(pipe,
+						ISS_PIPELINE_STREAM_SINGLESHOT);
+	}
+}
+
+static struct vb2_ops iss_video_vb2ops = {
+	.queue_setup	= iss_video_queue_setup,
+	.buf_prepare	= iss_video_buf_prepare,
+	.buf_queue	= iss_video_buf_queue,
+	.buf_cleanup	= iss_video_buf_cleanup,
+};
+
+/*
+ * omap4iss_video_buffer_next - Complete the current buffer and return the next
+ * @video: ISS video object
+ *
+ * Remove the current video buffer from the DMA queue and fill its timestamp,
+ * field count and state fields before waking up its completion handler.
+ *
+ * For capture video nodes, the buffer state is set to VB2_BUF_STATE_DONE if no
+ * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
+ *
+ * The DMA queue is expected to contain at least one buffer.
+ *
+ * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
+ * empty.
+ */
+struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
+{
+	struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
+	enum iss_pipeline_state state;
+	struct iss_buffer *buf;
+	unsigned long flags;
+	struct timespec ts;
+
+	spin_lock_irqsave(&video->qlock, flags);
+	if (WARN_ON(list_empty(&video->dmaqueue))) {
+		spin_unlock_irqrestore(&video->qlock, flags);
+		return NULL;
+	}
+
+	buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
+			       list);
+	list_del(&buf->list);
+	spin_unlock_irqrestore(&video->qlock, flags);
+
+	ktime_get_ts(&ts);
+	buf->vb.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
+	buf->vb.v4l2_buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+
+	/* Do frame number propagation only if this is the output video node.
+	 * Frame number either comes from the CSI receivers or it gets
+	 * incremented here if H3A is not active.
+	 * Note: There is no guarantee that the output buffer will finish
+	 * first, so the input number might lag behind by 1 in some cases.
+	 */
+	if (video == pipe->output && !pipe->do_propagation)
+		buf->vb.v4l2_buf.sequence =
+			atomic_inc_return(&pipe->frame_number);
+	else
+		buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
+
+	vb2_buffer_done(&buf->vb, pipe->error ?
+			VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+	pipe->error = false;
+
+	spin_lock_irqsave(&video->qlock, flags);
+	if (list_empty(&video->dmaqueue)) {
+		spin_unlock_irqrestore(&video->qlock, flags);
+		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+			state = ISS_PIPELINE_QUEUE_OUTPUT
+			      | ISS_PIPELINE_STREAM;
+		else
+			state = ISS_PIPELINE_QUEUE_INPUT
+			      | ISS_PIPELINE_STREAM;
+
+		spin_lock_irqsave(&pipe->lock, flags);
+		pipe->state &= ~state;
+		if (video->pipe.stream_state == ISS_PIPELINE_STREAM_CONTINUOUS)
+			video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN;
+		spin_unlock_irqrestore(&pipe->lock, flags);
+		return NULL;
+	}
+
+	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
+		spin_lock(&pipe->lock);
+		pipe->state &= ~ISS_PIPELINE_STREAM;
+		spin_unlock(&pipe->lock);
+	}
+
+	buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
+			       list);
+	spin_unlock_irqrestore(&video->qlock, flags);
+	buf->vb.state = VB2_BUF_STATE_ACTIVE;
+	return buf;
+}
+
+/*
+ * omap4iss_video_cancel_stream - Cancel stream on a video node
+ * @video: ISS video object
+ *
+ * Cancelling a stream mark all buffers on the video node as erroneous and makes
+ * sure no new buffer can be queued.
+ */
+void omap4iss_video_cancel_stream(struct iss_video *video)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&video->qlock, flags);
+
+	while (!list_empty(&video->dmaqueue)) {
+		struct iss_buffer *buf;
+
+		buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
+				       list);
+		list_del(&buf->list);
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+	}
+
+	video->error = true;
+
+	spin_unlock_irqrestore(&video->qlock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+iss_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+	struct iss_video *video = video_drvdata(file);
+
+	strlcpy(cap->driver, ISS_VIDEO_DRIVER_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, video->video.name, sizeof(cap->card));
+	strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
+
+	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+	else
+		cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+
+	cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+			  | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
+
+	return 0;
+}
+
+static int
+iss_video_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+	struct iss_video *video = video_drvdata(file);
+	struct v4l2_mbus_framefmt format;
+	unsigned int index = f->index;
+	unsigned int i;
+	int ret;
+
+	if (f->type != video->type)
+		return -EINVAL;
+
+	ret = __iss_video_get_format(video, &format);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+		const struct iss_format_info *info = &formats[i];
+
+		if (format.code != info->code)
+			continue;
+
+		if (index == 0) {
+			f->pixelformat = info->pixelformat;
+			strlcpy(f->description, info->description,
+				sizeof(f->description));
+			return 0;
+		}
+
+		index--;
+	}
+
+	return -EINVAL;
+}
+
+static int
+iss_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(fh);
+	struct iss_video *video = video_drvdata(file);
+
+	if (format->type != video->type)
+		return -EINVAL;
+
+	mutex_lock(&video->mutex);
+	*format = vfh->format;
+	mutex_unlock(&video->mutex);
+
+	return 0;
+}
+
+static int
+iss_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(fh);
+	struct iss_video *video = video_drvdata(file);
+	struct v4l2_mbus_framefmt fmt;
+
+	if (format->type != video->type)
+		return -EINVAL;
+
+	mutex_lock(&video->mutex);
+
+	/* Fill the bytesperline and sizeimage fields by converting to media bus
+	 * format and back to pixel format.
+	 */
+	iss_video_pix_to_mbus(&format->fmt.pix, &fmt);
+	iss_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
+
+	vfh->format = *format;
+
+	mutex_unlock(&video->mutex);
+	return 0;
+}
+
+static int
+iss_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+	struct iss_video *video = video_drvdata(file);
+	struct v4l2_subdev_format fmt;
+	struct v4l2_subdev *subdev;
+	u32 pad;
+	int ret;
+
+	if (format->type != video->type)
+		return -EINVAL;
+
+	subdev = iss_video_remote_subdev(video, &pad);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	iss_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
+
+	fmt.pad = pad;
+	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+	if (ret)
+		return ret;
+
+	iss_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
+	return 0;
+}
+
+static int
+iss_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
+{
+	struct iss_video *video = video_drvdata(file);
+	struct v4l2_subdev *subdev;
+	int ret;
+
+	subdev = iss_video_remote_subdev(video, NULL);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	mutex_lock(&video->mutex);
+	ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
+	mutex_unlock(&video->mutex);
+
+	return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+}
+
+static int
+iss_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+	struct iss_video *video = video_drvdata(file);
+	struct v4l2_subdev_format format;
+	struct v4l2_subdev *subdev;
+	u32 pad;
+	int ret;
+
+	subdev = iss_video_remote_subdev(video, &pad);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	/* Try the get crop operation first and fallback to get format if not
+	 * implemented.
+	 */
+	ret = v4l2_subdev_call(subdev, video, g_crop, crop);
+	if (ret != -ENOIOCTLCMD)
+		return ret;
+
+	format.pad = pad;
+	format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
+	if (ret < 0)
+		return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+
+	crop->c.left = 0;
+	crop->c.top = 0;
+	crop->c.width = format.format.width;
+	crop->c.height = format.format.height;
+
+	return 0;
+}
+
+static int
+iss_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+{
+	struct iss_video *video = video_drvdata(file);
+	struct v4l2_subdev *subdev;
+	int ret;
+
+	subdev = iss_video_remote_subdev(video, NULL);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	mutex_lock(&video->mutex);
+	ret = v4l2_subdev_call(subdev, video, s_crop, crop);
+	mutex_unlock(&video->mutex);
+
+	return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+}
+
+static int
+iss_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(fh);
+	struct iss_video *video = video_drvdata(file);
+
+	if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+	    video->type != a->type)
+		return -EINVAL;
+
+	memset(a, 0, sizeof(*a));
+	a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+	a->parm.output.timeperframe = vfh->timeperframe;
+
+	return 0;
+}
+
+static int
+iss_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(fh);
+	struct iss_video *video = video_drvdata(file);
+
+	if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+	    video->type != a->type)
+		return -EINVAL;
+
+	if (a->parm.output.timeperframe.denominator == 0)
+		a->parm.output.timeperframe.denominator = 1;
+
+	vfh->timeperframe = a->parm.output.timeperframe;
+
+	return 0;
+}
+
+static int
+iss_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+	return vb2_reqbufs(&vfh->queue, rb);
+}
+
+static int
+iss_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+	return vb2_querybuf(&vfh->queue, b);
+}
+
+static int
+iss_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+	return vb2_qbuf(&vfh->queue, b);
+}
+
+static int
+iss_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+	return vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
+}
+
+/*
+ * Stream management
+ *
+ * Every ISS pipeline has a single input and a single output. The input can be
+ * either a sensor or a video node. The output is always a video node.
+ *
+ * As every pipeline has an output video node, the ISS video objects at the
+ * pipeline output stores the pipeline state. It tracks the streaming state of
+ * both the input and output, as well as the availability of buffers.
+ *
+ * In sensor-to-memory mode, frames are always available at the pipeline input.
+ * Starting the sensor usually requires I2C transfers and must be done in
+ * interruptible context. The pipeline is started and stopped synchronously
+ * to the stream on/off commands. All modules in the pipeline will get their
+ * subdev set stream handler called. The module at the end of the pipeline must
+ * delay starting the hardware until buffers are available at its output.
+ *
+ * In memory-to-memory mode, starting/stopping the stream requires
+ * synchronization between the input and output. ISS modules can't be stopped
+ * in the middle of a frame, and at least some of the modules seem to become
+ * busy as soon as they're started, even if they don't receive a frame start
+ * event. For that reason frames need to be processed in single-shot mode. The
+ * driver needs to wait until a frame is completely processed and written to
+ * memory before restarting the pipeline for the next frame. Pipelined
+ * processing might be possible but requires more testing.
+ *
+ * Stream start must be delayed until buffers are available at both the input
+ * and output. The pipeline must be started in the videobuf queue callback with
+ * the buffers queue spinlock held. The modules subdev set stream operation must
+ * not sleep.
+ */
+static int
+iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(fh);
+	struct iss_video *video = video_drvdata(file);
+	struct media_entity_graph graph;
+	struct media_entity *entity;
+	enum iss_pipeline_state state;
+	struct iss_pipeline *pipe;
+	struct iss_video *far_end;
+	unsigned long flags;
+	int ret;
+
+	if (type != video->type)
+		return -EINVAL;
+
+	mutex_lock(&video->stream_lock);
+
+	/* Start streaming on the pipeline. No link touching an entity in the
+	 * pipeline can be activated or deactivated once streaming is started.
+	 */
+	pipe = video->video.entity.pipe
+	     ? to_iss_pipeline(&video->video.entity) : &video->pipe;
+	pipe->external = NULL;
+	pipe->external_rate = 0;
+	pipe->external_bpp = 0;
+	pipe->entities = 0;
+
+	if (video->iss->pdata->set_constraints)
+		video->iss->pdata->set_constraints(video->iss, true);
+
+	ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+	if (ret < 0)
+		goto err_media_entity_pipeline_start;
+
+	entity = &video->video.entity;
+	media_entity_graph_walk_start(&graph, entity);
+	while ((entity = media_entity_graph_walk_next(&graph)))
+		pipe->entities |= 1 << entity->id;
+
+	/* Verify that the currently configured format matches the output of
+	 * the connected subdev.
+	 */
+	ret = iss_video_check_format(video, vfh);
+	if (ret < 0)
+		goto err_iss_video_check_format;
+
+	video->bpl_padding = ret;
+	video->bpl_value = vfh->format.fmt.pix.bytesperline;
+
+	/* Find the ISS video node connected at the far end of the pipeline and
+	 * update the pipeline.
+	 */
+	far_end = iss_video_far_end(video);
+
+	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		state = ISS_PIPELINE_STREAM_OUTPUT | ISS_PIPELINE_IDLE_OUTPUT;
+		pipe->input = far_end;
+		pipe->output = video;
+	} else {
+		if (far_end == NULL) {
+			ret = -EPIPE;
+			goto err_iss_video_check_format;
+		}
+
+		state = ISS_PIPELINE_STREAM_INPUT | ISS_PIPELINE_IDLE_INPUT;
+		pipe->input = video;
+		pipe->output = far_end;
+	}
+
+	spin_lock_irqsave(&pipe->lock, flags);
+	pipe->state &= ~ISS_PIPELINE_STREAM;
+	pipe->state |= state;
+	spin_unlock_irqrestore(&pipe->lock, flags);
+
+	/* Set the maximum time per frame as the value requested by userspace.
+	 * This is a soft limit that can be overridden if the hardware doesn't
+	 * support the request limit.
+	 */
+	if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		pipe->max_timeperframe = vfh->timeperframe;
+
+	video->queue = &vfh->queue;
+	INIT_LIST_HEAD(&video->dmaqueue);
+	spin_lock_init(&video->qlock);
+	video->error = false;
+	atomic_set(&pipe->frame_number, -1);
+
+	ret = vb2_streamon(&vfh->queue, type);
+	if (ret < 0)
+		goto err_iss_video_check_format;
+
+	/* In sensor-to-memory mode, the stream can be started synchronously
+	 * to the stream on command. In memory-to-memory mode, it will be
+	 * started when buffers are queued on both the input and output.
+	 */
+	if (pipe->input == NULL) {
+		unsigned long flags;
+		ret = omap4iss_pipeline_set_stream(pipe,
+					      ISS_PIPELINE_STREAM_CONTINUOUS);
+		if (ret < 0)
+			goto err_omap4iss_set_stream;
+		spin_lock_irqsave(&video->qlock, flags);
+		if (list_empty(&video->dmaqueue))
+			video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN;
+		spin_unlock_irqrestore(&video->qlock, flags);
+	}
+
+	mutex_unlock(&video->stream_lock);
+	return 0;
+
+err_omap4iss_set_stream:
+	vb2_streamoff(&vfh->queue, type);
+err_iss_video_check_format:
+	media_entity_pipeline_stop(&video->video.entity);
+err_media_entity_pipeline_start:
+	if (video->iss->pdata->set_constraints)
+		video->iss->pdata->set_constraints(video->iss, false);
+	video->queue = NULL;
+
+	mutex_unlock(&video->stream_lock);
+	return ret;
+}
+
+static int
+iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(fh);
+	struct iss_video *video = video_drvdata(file);
+	struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
+	enum iss_pipeline_state state;
+	unsigned long flags;
+
+	if (type != video->type)
+		return -EINVAL;
+
+	mutex_lock(&video->stream_lock);
+
+	if (!vb2_is_streaming(&vfh->queue))
+		goto done;
+
+	/* Update the pipeline state. */
+	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		state = ISS_PIPELINE_STREAM_OUTPUT
+		      | ISS_PIPELINE_QUEUE_OUTPUT;
+	else
+		state = ISS_PIPELINE_STREAM_INPUT
+		      | ISS_PIPELINE_QUEUE_INPUT;
+
+	spin_lock_irqsave(&pipe->lock, flags);
+	pipe->state &= ~state;
+	spin_unlock_irqrestore(&pipe->lock, flags);
+
+	/* Stop the stream. */
+	omap4iss_pipeline_set_stream(pipe, ISS_PIPELINE_STREAM_STOPPED);
+	vb2_streamoff(&vfh->queue, type);
+	video->queue = NULL;
+
+	if (video->iss->pdata->set_constraints)
+		video->iss->pdata->set_constraints(video->iss, false);
+	media_entity_pipeline_stop(&video->video.entity);
+
+done:
+	mutex_unlock(&video->stream_lock);
+	return 0;
+}
+
+static int
+iss_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
+{
+	if (input->index > 0)
+		return -EINVAL;
+
+	strlcpy(input->name, "camera", sizeof(input->name));
+	input->type = V4L2_INPUT_TYPE_CAMERA;
+
+	return 0;
+}
+
+static int
+iss_video_g_input(struct file *file, void *fh, unsigned int *input)
+{
+	*input = 0;
+
+	return 0;
+}
+
+static int
+iss_video_s_input(struct file *file, void *fh, unsigned int input)
+{
+	return input == 0 ? 0 : -EINVAL;
+}
+
+static const struct v4l2_ioctl_ops iss_video_ioctl_ops = {
+	.vidioc_querycap		= iss_video_querycap,
+	.vidioc_enum_fmt_vid_cap        = iss_video_enum_format,
+	.vidioc_g_fmt_vid_cap		= iss_video_get_format,
+	.vidioc_s_fmt_vid_cap		= iss_video_set_format,
+	.vidioc_try_fmt_vid_cap		= iss_video_try_format,
+	.vidioc_g_fmt_vid_out		= iss_video_get_format,
+	.vidioc_s_fmt_vid_out		= iss_video_set_format,
+	.vidioc_try_fmt_vid_out		= iss_video_try_format,
+	.vidioc_cropcap			= iss_video_cropcap,
+	.vidioc_g_crop			= iss_video_get_crop,
+	.vidioc_s_crop			= iss_video_set_crop,
+	.vidioc_g_parm			= iss_video_get_param,
+	.vidioc_s_parm			= iss_video_set_param,
+	.vidioc_reqbufs			= iss_video_reqbufs,
+	.vidioc_querybuf		= iss_video_querybuf,
+	.vidioc_qbuf			= iss_video_qbuf,
+	.vidioc_dqbuf			= iss_video_dqbuf,
+	.vidioc_streamon		= iss_video_streamon,
+	.vidioc_streamoff		= iss_video_streamoff,
+	.vidioc_enum_input		= iss_video_enum_input,
+	.vidioc_g_input			= iss_video_g_input,
+	.vidioc_s_input			= iss_video_s_input,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 file operations
+ */
+
+static int iss_video_open(struct file *file)
+{
+	struct iss_video *video = video_drvdata(file);
+	struct iss_video_fh *handle;
+	struct vb2_queue *q;
+	int ret = 0;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (handle == NULL)
+		return -ENOMEM;
+
+	v4l2_fh_init(&handle->vfh, &video->video);
+	v4l2_fh_add(&handle->vfh);
+
+	/* If this is the first user, initialise the pipeline. */
+	if (omap4iss_get(video->iss) == NULL) {
+		ret = -EBUSY;
+		goto done;
+	}
+
+	ret = omap4iss_pipeline_pm_use(&video->video.entity, 1);
+	if (ret < 0) {
+		omap4iss_put(video->iss);
+		goto done;
+	}
+
+	video->alloc_ctx = vb2_dma_contig_init_ctx(video->iss->dev);
+	if (IS_ERR(video->alloc_ctx)) {
+		ret = PTR_ERR(video->alloc_ctx);
+		omap4iss_put(video->iss);
+		goto done;
+	}
+
+	q = &handle->queue;
+
+	q->type = video->type;
+	q->io_modes = VB2_MMAP;
+	q->drv_priv = handle;
+	q->ops = &iss_video_vb2ops;
+	q->mem_ops = &vb2_dma_contig_memops;
+	q->buf_struct_size = sizeof(struct iss_buffer);
+	q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+	ret = vb2_queue_init(q);
+	if (ret) {
+		omap4iss_put(video->iss);
+		goto done;
+	}
+
+	memset(&handle->format, 0, sizeof(handle->format));
+	handle->format.type = video->type;
+	handle->timeperframe.denominator = 1;
+
+	handle->video = video;
+	file->private_data = &handle->vfh;
+
+done:
+	if (ret < 0) {
+		v4l2_fh_del(&handle->vfh);
+		kfree(handle);
+	}
+
+	return ret;
+}
+
+static int iss_video_release(struct file *file)
+{
+	struct iss_video *video = video_drvdata(file);
+	struct v4l2_fh *vfh = file->private_data;
+	struct iss_video_fh *handle = to_iss_video_fh(vfh);
+
+	/* Disable streaming and free the buffers queue resources. */
+	iss_video_streamoff(file, vfh, video->type);
+
+	omap4iss_pipeline_pm_use(&video->video.entity, 0);
+
+	/* Release the videobuf2 queue */
+	vb2_queue_release(&handle->queue);
+
+	/* Release the file handle. */
+	v4l2_fh_del(vfh);
+	kfree(handle);
+	file->private_data = NULL;
+
+	omap4iss_put(video->iss);
+
+	return 0;
+}
+
+static unsigned int iss_video_poll(struct file *file, poll_table *wait)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
+
+	return vb2_poll(&vfh->queue, file, wait);
+}
+
+static int iss_video_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
+
+	return vb2_mmap(&vfh->queue, vma);
+}
+
+static struct v4l2_file_operations iss_video_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = video_ioctl2,
+	.open = iss_video_open,
+	.release = iss_video_release,
+	.poll = iss_video_poll,
+	.mmap = iss_video_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * ISS video core
+ */
+
+static const struct iss_video_operations iss_video_dummy_ops = {
+};
+
+int omap4iss_video_init(struct iss_video *video, const char *name)
+{
+	const char *direction;
+	int ret;
+
+	switch (video->type) {
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+		direction = "output";
+		video->pad.flags = MEDIA_PAD_FL_SINK;
+		break;
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+		direction = "input";
+		video->pad.flags = MEDIA_PAD_FL_SOURCE;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
+	if (ret < 0)
+		return ret;
+
+	mutex_init(&video->mutex);
+	atomic_set(&video->active, 0);
+
+	spin_lock_init(&video->pipe.lock);
+	mutex_init(&video->stream_lock);
+
+	/* Initialize the video device. */
+	if (video->ops == NULL)
+		video->ops = &iss_video_dummy_ops;
+
+	video->video.fops = &iss_video_fops;
+	snprintf(video->video.name, sizeof(video->video.name),
+		 "OMAP4 ISS %s %s", name, direction);
+	video->video.vfl_type = VFL_TYPE_GRABBER;
+	video->video.release = video_device_release_empty;
+	video->video.ioctl_ops = &iss_video_ioctl_ops;
+	video->pipe.stream_state = ISS_PIPELINE_STREAM_STOPPED;
+
+	video_set_drvdata(&video->video, video);
+
+	return 0;
+}
+
+void omap4iss_video_cleanup(struct iss_video *video)
+{
+	media_entity_cleanup(&video->video.entity);
+	mutex_destroy(&video->stream_lock);
+	mutex_destroy(&video->mutex);
+}
+
+int omap4iss_video_register(struct iss_video *video, struct v4l2_device *vdev)
+{
+	int ret;
+
+	video->video.v4l2_dev = vdev;
+
+	ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
+	if (ret < 0)
+		dev_err(video->iss->dev,
+			"%s: could not register video device (%d)\n",
+			__func__, ret);
+
+	return ret;
+}
+
+void omap4iss_video_unregister(struct iss_video *video)
+{
+	video_unregister_device(&video->video);
+}
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
new file mode 100644
index 0000000..878e4a3
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -0,0 +1,204 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - Generic video node
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_VIDEO_H
+#define OMAP4_ISS_VIDEO_H
+
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define ISS_VIDEO_DRIVER_NAME		"issvideo"
+#define ISS_VIDEO_DRIVER_VERSION	"0.0.2"
+
+struct iss_device;
+struct iss_video;
+struct v4l2_mbus_framefmt;
+struct v4l2_pix_format;
+
+/*
+ * struct iss_format_info - ISS media bus format information
+ * @code: V4L2 media bus format code
+ * @truncated: V4L2 media bus format code for the same format truncated to 10
+ *	bits. Identical to @code if the format is 10 bits wide or less.
+ * @uncompressed: V4L2 media bus format code for the corresponding uncompressed
+ *	format. Identical to @code if the format is not DPCM compressed.
+ * @flavor: V4L2 media bus format code for the same pixel layout but
+ *	shifted to be 8 bits per pixel. =0 if format is not shiftable.
+ * @pixelformat: V4L2 pixel format FCC identifier
+ * @bpp: Bits per pixel
+ * @description: Human-readable format description
+ */
+struct iss_format_info {
+	enum v4l2_mbus_pixelcode code;
+	enum v4l2_mbus_pixelcode truncated;
+	enum v4l2_mbus_pixelcode uncompressed;
+	enum v4l2_mbus_pixelcode flavor;
+	u32 pixelformat;
+	unsigned int bpp;
+	const char *description;
+};
+
+enum iss_pipeline_stream_state {
+	ISS_PIPELINE_STREAM_STOPPED = 0,
+	ISS_PIPELINE_STREAM_CONTINUOUS = 1,
+	ISS_PIPELINE_STREAM_SINGLESHOT = 2,
+};
+
+enum iss_pipeline_state {
+	/* The stream has been started on the input video node. */
+	ISS_PIPELINE_STREAM_INPUT = 1,
+	/* The stream has been started on the output video node. */
+	ISS_PIPELINE_STREAM_OUTPUT = (1 << 1),
+	/* At least one buffer is queued on the input video node. */
+	ISS_PIPELINE_QUEUE_INPUT = (1 << 2),
+	/* At least one buffer is queued on the output video node. */
+	ISS_PIPELINE_QUEUE_OUTPUT = (1 << 3),
+	/* The input entity is idle, ready to be started. */
+	ISS_PIPELINE_IDLE_INPUT = (1 << 4),
+	/* The output entity is idle, ready to be started. */
+	ISS_PIPELINE_IDLE_OUTPUT = (1 << 5),
+	/* The pipeline is currently streaming. */
+	ISS_PIPELINE_STREAM = (1 << 6),
+};
+
+/*
+ * struct iss_pipeline - An OMAP4 ISS hardware pipeline
+ * @entities: Bitmask of entities in the pipeline (indexed by entity ID)
+ * @error: A hardware error occurred during capture
+ */
+struct iss_pipeline {
+	struct media_pipeline pipe;
+	spinlock_t lock;		/* Pipeline state and queue flags */
+	unsigned int state;
+	enum iss_pipeline_stream_state stream_state;
+	struct iss_video *input;
+	struct iss_video *output;
+	unsigned int entities;
+	atomic_t frame_number;
+	bool do_propagation; /* of frame number */
+	bool error;
+	struct v4l2_fract max_timeperframe;
+	struct v4l2_subdev *external;
+	unsigned int external_rate;
+	int external_bpp;
+};
+
+#define to_iss_pipeline(__e) \
+	container_of((__e)->pipe, struct iss_pipeline, pipe)
+
+static inline int iss_pipeline_ready(struct iss_pipeline *pipe)
+{
+	return pipe->state == (ISS_PIPELINE_STREAM_INPUT |
+			       ISS_PIPELINE_STREAM_OUTPUT |
+			       ISS_PIPELINE_QUEUE_INPUT |
+			       ISS_PIPELINE_QUEUE_OUTPUT |
+			       ISS_PIPELINE_IDLE_INPUT |
+			       ISS_PIPELINE_IDLE_OUTPUT);
+}
+
+/*
+ * struct iss_buffer - ISS buffer
+ * @buffer: ISS video buffer
+ * @iss_addr: Physical address of the buffer.
+ */
+struct iss_buffer {
+	/* common v4l buffer stuff -- must be first */
+	struct vb2_buffer	vb;
+	struct list_head	list;
+	dma_addr_t iss_addr;
+};
+
+#define to_iss_buffer(buf)	container_of(buf, struct iss_buffer, buffer)
+
+enum iss_video_dmaqueue_flags {
+	/* Set if DMA queue becomes empty when ISS_PIPELINE_STREAM_CONTINUOUS */
+	ISS_VIDEO_DMAQUEUE_UNDERRUN = (1 << 0),
+	/* Set when queuing buffer to an empty DMA queue */
+	ISS_VIDEO_DMAQUEUE_QUEUED = (1 << 1),
+};
+
+#define iss_video_dmaqueue_flags_clr(video)	\
+			({ (video)->dmaqueue_flags = 0; })
+
+/*
+ * struct iss_video_operations - ISS video operations
+ * @queue:	Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
+ *		if there was no buffer previously queued.
+ */
+struct iss_video_operations {
+	int(*queue)(struct iss_video *video, struct iss_buffer *buffer);
+};
+
+struct iss_video {
+	struct video_device video;
+	enum v4l2_buf_type type;
+	struct media_pad pad;
+
+	struct mutex mutex;		/* format and crop settings */
+	atomic_t active;
+
+	struct iss_device *iss;
+
+	unsigned int capture_mem;
+	unsigned int bpl_alignment;	/* alignment value */
+	unsigned int bpl_zero_padding;	/* whether the alignment is optional */
+	unsigned int bpl_max;		/* maximum bytes per line value */
+	unsigned int bpl_value;		/* bytes per line value */
+	unsigned int bpl_padding;	/* padding at end of line */
+
+	/* Pipeline state */
+	struct iss_pipeline pipe;
+	struct mutex stream_lock;	/* pipeline and stream states */
+	bool error;
+
+	/* Video buffers queue */
+	struct vb2_queue *queue;
+	spinlock_t qlock;		/* protects dmaqueue and error */
+	struct list_head dmaqueue;
+	enum iss_video_dmaqueue_flags dmaqueue_flags;
+	struct vb2_alloc_ctx *alloc_ctx;
+
+	const struct iss_video_operations *ops;
+};
+
+#define to_iss_video(vdev)	container_of(vdev, struct iss_video, video)
+
+struct iss_video_fh {
+	struct v4l2_fh vfh;
+	struct iss_video *video;
+	struct vb2_queue queue;
+	struct v4l2_format format;
+	struct v4l2_fract timeperframe;
+};
+
+#define to_iss_video_fh(fh)	container_of(fh, struct iss_video_fh, vfh)
+#define iss_video_queue_to_iss_video_fh(q) \
+				container_of(q, struct iss_video_fh, queue)
+
+int omap4iss_video_init(struct iss_video *video, const char *name);
+void omap4iss_video_cleanup(struct iss_video *video);
+int omap4iss_video_register(struct iss_video *video,
+			    struct v4l2_device *vdev);
+void omap4iss_video_unregister(struct iss_video *video);
+struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video);
+void omap4iss_video_cancel_stream(struct iss_video *video);
+struct media_pad *omap4iss_video_remote_pad(struct iss_video *video);
+
+const struct iss_format_info *
+omap4iss_video_format_info(enum v4l2_mbus_pixelcode code);
+
+#endif /* OMAP4_ISS_VIDEO_H */
diff --git a/drivers/staging/media/sn9c102/Kconfig b/drivers/staging/media/sn9c102/Kconfig
new file mode 100644
index 0000000..c9aba59
--- /dev/null
+++ b/drivers/staging/media/sn9c102/Kconfig
@@ -0,0 +1,17 @@
+config USB_SN9C102
+	tristate "USB SN9C1xx PC Camera Controller support (DEPRECATED)"
+	depends on VIDEO_V4L2 && MEDIA_USB_SUPPORT
+	---help---
+	  This driver is DEPRECATED, please use the gspca sonixb and
+	  sonixj modules instead.
+
+	  Say Y here if you want support for cameras based on SONiX SN9C101,
+	  SN9C102, SN9C103, SN9C105 and SN9C120 PC Camera Controllers.
+
+	  See <file:drivers/staging/media/sn9c102/sn9c102.txt> for more info.
+
+	  If you have webcams that are only supported by this driver and not by
+	  the gspca driver, then contact the linux-media mailinglist.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sn9c102.
diff --git a/drivers/media/usb/sn9c102/Makefile b/drivers/staging/media/sn9c102/Makefile
similarity index 100%
rename from drivers/media/usb/sn9c102/Makefile
rename to drivers/staging/media/sn9c102/Makefile
diff --git a/drivers/media/usb/sn9c102/sn9c102.h b/drivers/staging/media/sn9c102/sn9c102.h
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102.h
rename to drivers/staging/media/sn9c102/sn9c102.h
diff --git a/Documentation/video4linux/sn9c102.txt b/drivers/staging/media/sn9c102/sn9c102.txt
similarity index 100%
rename from Documentation/video4linux/sn9c102.txt
rename to drivers/staging/media/sn9c102/sn9c102.txt
diff --git a/drivers/media/usb/sn9c102/sn9c102_config.h b/drivers/staging/media/sn9c102/sn9c102_config.h
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_config.h
rename to drivers/staging/media/sn9c102/sn9c102_config.h
diff --git a/drivers/media/usb/sn9c102/sn9c102_core.c b/drivers/staging/media/sn9c102/sn9c102_core.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_core.c
rename to drivers/staging/media/sn9c102/sn9c102_core.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_devtable.h b/drivers/staging/media/sn9c102/sn9c102_devtable.h
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_devtable.h
rename to drivers/staging/media/sn9c102/sn9c102_devtable.h
diff --git a/drivers/media/usb/sn9c102/sn9c102_hv7131d.c b/drivers/staging/media/sn9c102/sn9c102_hv7131d.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_hv7131d.c
rename to drivers/staging/media/sn9c102/sn9c102_hv7131d.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_hv7131r.c b/drivers/staging/media/sn9c102/sn9c102_hv7131r.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_hv7131r.c
rename to drivers/staging/media/sn9c102/sn9c102_hv7131r.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_mi0343.c b/drivers/staging/media/sn9c102/sn9c102_mi0343.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_mi0343.c
rename to drivers/staging/media/sn9c102/sn9c102_mi0343.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_mi0360.c b/drivers/staging/media/sn9c102/sn9c102_mi0360.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_mi0360.c
rename to drivers/staging/media/sn9c102/sn9c102_mi0360.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_mt9v111.c b/drivers/staging/media/sn9c102/sn9c102_mt9v111.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_mt9v111.c
rename to drivers/staging/media/sn9c102/sn9c102_mt9v111.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_ov7630.c b/drivers/staging/media/sn9c102/sn9c102_ov7630.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_ov7630.c
rename to drivers/staging/media/sn9c102/sn9c102_ov7630.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_ov7660.c b/drivers/staging/media/sn9c102/sn9c102_ov7660.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_ov7660.c
rename to drivers/staging/media/sn9c102/sn9c102_ov7660.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_pas106b.c b/drivers/staging/media/sn9c102/sn9c102_pas106b.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_pas106b.c
rename to drivers/staging/media/sn9c102/sn9c102_pas106b.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_pas202bcb.c b/drivers/staging/media/sn9c102/sn9c102_pas202bcb.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_pas202bcb.c
rename to drivers/staging/media/sn9c102/sn9c102_pas202bcb.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_sensor.h b/drivers/staging/media/sn9c102/sn9c102_sensor.h
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_sensor.h
rename to drivers/staging/media/sn9c102/sn9c102_sensor.h
diff --git a/drivers/media/usb/sn9c102/sn9c102_tas5110c1b.c b/drivers/staging/media/sn9c102/sn9c102_tas5110c1b.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_tas5110c1b.c
rename to drivers/staging/media/sn9c102/sn9c102_tas5110c1b.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_tas5110d.c b/drivers/staging/media/sn9c102/sn9c102_tas5110d.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_tas5110d.c
rename to drivers/staging/media/sn9c102/sn9c102_tas5110d.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_tas5130d1b.c b/drivers/staging/media/sn9c102/sn9c102_tas5130d1b.c
similarity index 100%
rename from drivers/media/usb/sn9c102/sn9c102_tas5130d1b.c
rename to drivers/staging/media/sn9c102/sn9c102_tas5130d1b.c
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
index d582c5b..ce9e5aa 100644
--- a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
@@ -964,7 +964,7 @@
 {
 	struct solo_enc_dev *solo_enc = video_drvdata(file);
 
-	return solo_set_video_type(solo_enc->solo_dev, std & V4L2_STD_PAL);
+	return solo_set_video_type(solo_enc->solo_dev, std & V4L2_STD_625_50);
 }
 
 static int solo_enum_framesizes(struct file *file, void *priv,
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2.c b/drivers/staging/media/solo6x10/solo6x10-v4l2.c
index 7b26de3..47e72da 100644
--- a/drivers/staging/media/solo6x10/solo6x10-v4l2.c
+++ b/drivers/staging/media/solo6x10/solo6x10-v4l2.c
@@ -527,7 +527,7 @@
 	return 0;
 }
 
-int solo_set_video_type(struct solo_dev *solo_dev, bool type)
+int solo_set_video_type(struct solo_dev *solo_dev, bool is_50hz)
 {
 	int i;
 
@@ -537,7 +537,8 @@
 	for (i = 0; i < solo_dev->nr_chans; i++)
 		if (vb2_is_busy(&solo_dev->v4l2_enc[i]->vidq))
 			return -EBUSY;
-	solo_dev->video_type = type;
+	solo_dev->video_type = is_50hz ? SOLO_VO_FMT_TYPE_PAL :
+					 SOLO_VO_FMT_TYPE_NTSC;
 	/* Reconfigure for the new standard */
 	solo_disp_init(solo_dev);
 	solo_enc_init(solo_dev);
@@ -551,7 +552,7 @@
 {
 	struct solo_dev *solo_dev = video_drvdata(file);
 
-	return solo_set_video_type(solo_dev, std & V4L2_STD_PAL);
+	return solo_set_video_type(solo_dev, std & V4L2_STD_625_50);
 }
 
 static int solo_s_ctrl(struct v4l2_ctrl *ctrl)
diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
index f1bbb8c..8964f8b 100644
--- a/drivers/staging/media/solo6x10/solo6x10.h
+++ b/drivers/staging/media/solo6x10/solo6x10.h
@@ -398,7 +398,7 @@
 		      int desc_cnt);
 
 /* Global s_std ioctl */
-int solo_set_video_type(struct solo_dev *solo_dev, bool type);
+int solo_set_video_type(struct solo_dev *solo_dev, bool is_50hz);
 void solo_update_mode(struct solo_enc_dev *solo_enc);
 
 /* Set the threshold for motion detection */
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index eedffed..31b269a 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -307,7 +307,7 @@
 }
 
 static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
-				void *accel_priv)
+				void *accel_priv, select_queue_fallback_t fallback)
 {
 	return (u16)smp_processor_id();
 }
@@ -892,6 +892,11 @@
 	priv->mii_bus->write = xlr_mii_write;
 	priv->mii_bus->parent = &pdev->dev;
 	priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+	if (priv->mii_bus->irq == NULL) {
+		pr_err("irq alloc failed\n");
+		mdiobus_free(priv->mii_bus);
+		return -ENOMEM;
+	}
 	priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq;
 
 	/* Scan only the enabled address */
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 47e0a91..5a001d9 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -275,13 +275,6 @@
  */
 #define MAX_TRANSFER_PACKETS	((1<<10)-1)
 
-enum {
-	USB_CLOCK_TYPE_REF_12,
-	USB_CLOCK_TYPE_REF_24,
-	USB_CLOCK_TYPE_REF_48,
-	USB_CLOCK_TYPE_CRYSTAL_12,
-};
-
 /**
  * Logical transactions may take numerous low level
  * transactions, especially when splits are concerned. This
@@ -471,19 +464,6 @@
 /* Returns the IO address to push/pop stuff data from the FIFOs */
 #define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
 
-static int octeon_usb_get_clock_type(void)
-{
-	switch (cvmx_sysinfo_get()->board_type) {
-	case CVMX_BOARD_TYPE_BBGW_REF:
-	case CVMX_BOARD_TYPE_LANAI2_A:
-	case CVMX_BOARD_TYPE_LANAI2_U:
-	case CVMX_BOARD_TYPE_LANAI2_G:
-	case CVMX_BOARD_TYPE_UBNT_E100:
-		return USB_CLOCK_TYPE_CRYSTAL_12;
-	}
-	return USB_CLOCK_TYPE_REF_48;
-}
-
 /**
  * Read a USB 32bit CSR. It performs the necessary address swizzle
  * for 32bit CSRs and logs the value in a readable format if
@@ -582,37 +562,6 @@
 		return 0; /* Data0 */
 }
 
-
-/**
- * Return the number of USB ports supported by this Octeon
- * chip. If the chip doesn't support USB, or is not supported
- * by this API, a zero will be returned. Most Octeon chips
- * support one usb port, but some support two ports.
- * cvmx_usb_initialize() must be called on independent
- * struct cvmx_usb_state.
- *
- * Returns: Number of port, zero if usb isn't supported
- */
-static int cvmx_usb_get_num_ports(void)
-{
-	int arch_ports = 0;
-
-	if (OCTEON_IS_MODEL(OCTEON_CN56XX))
-		arch_ports = 1;
-	else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
-		arch_ports = 2;
-	else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
-		arch_ports = 1;
-	else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
-		arch_ports = 1;
-	else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
-		arch_ports = 1;
-	else
-		arch_ports = 0;
-
-	return arch_ports;
-}
-
 /**
  * Initialize a USB port for use. This must be called before any
  * other access to the Octeon USB port is made. The port starts
@@ -628,41 +577,16 @@
  * Returns: 0 or a negative error code.
  */
 static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
-			       int usb_port_number)
+			       int usb_port_number,
+			       enum cvmx_usb_initialize_flags flags)
 {
 	union cvmx_usbnx_clk_ctl usbn_clk_ctl;
 	union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
-	enum cvmx_usb_initialize_flags flags = 0;
 	int i;
 
 	/* At first allow 0-1 for the usb port number */
 	if ((usb_port_number < 0) || (usb_port_number > 1))
 		return -EINVAL;
-	/* For all chips except 52XX there is only one port */
-	if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
-		return -EINVAL;
-	/* Try to determine clock type automatically */
-	if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) {
-		/* Only 12 MHZ crystals are supported */
-		flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
-	} else {
-		flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
-
-		switch (octeon_usb_get_clock_type()) {
-		case USB_CLOCK_TYPE_REF_12:
-			flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
-			break;
-		case USB_CLOCK_TYPE_REF_24:
-			flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
-			break;
-		case USB_CLOCK_TYPE_REF_48:
-			flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
-			break;
-		default:
-			return -EINVAL;
-			break;
-		}
-	}
 
 	memset(usb, 0, sizeof(*usb));
 	usb->init_flags = flags;
@@ -3431,7 +3355,6 @@
 	return 0;
 }
 
-
 static const struct hc_driver octeon_hc_driver = {
 	.description		= "Octeon USB",
 	.product_desc		= "Octeon Host Controller",
@@ -3448,15 +3371,74 @@
 	.hub_control		= octeon_usb_hub_control,
 };
 
-
-static int octeon_usb_driver_probe(struct device *dev)
+static int octeon_usb_probe(struct platform_device *pdev)
 {
 	int status;
-	int usb_num = to_platform_device(dev)->id;
-	int irq = platform_get_irq(to_platform_device(dev), 0);
+	int initialize_flags;
+	int usb_num;
+	struct resource *res_mem;
+	struct device_node *usbn_node;
+	int irq = platform_get_irq(pdev, 0);
+	struct device *dev = &pdev->dev;
 	struct octeon_hcd *priv;
 	struct usb_hcd *hcd;
 	unsigned long flags;
+	u32 clock_rate = 48000000;
+	bool is_crystal_clock = false;
+	const char *clock_type;
+	int i;
+
+	if (dev->of_node == NULL) {
+		dev_err(dev, "Error: empty of_node\n");
+		return -ENXIO;
+	}
+	usbn_node = dev->of_node->parent;
+
+	i = of_property_read_u32(usbn_node,
+				 "refclk-frequency", &clock_rate);
+	if (i) {
+		dev_err(dev, "No USBN \"refclk-frequency\"\n");
+		return -ENXIO;
+	}
+	switch (clock_rate) {
+	case 12000000:
+		initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+		break;
+	case 24000000:
+		initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+		break;
+	case 48000000:
+		initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+		break;
+	default:
+		dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", clock_rate);
+		return -ENXIO;
+
+	}
+
+	i = of_property_read_string(usbn_node,
+				    "refclk-type", &clock_type);
+
+	if (!i && strcmp("crystal", clock_type) == 0)
+		is_crystal_clock = true;
+
+	if (is_crystal_clock)
+		initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
+	else
+		initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+
+	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res_mem == NULL) {
+		dev_err(dev, "found no memory resource\n");
+		return -ENXIO;
+	}
+	usb_num = (res_mem->start >> 44) & 1;
+
+	if (irq < 0) {
+		/* Defective device tree, but we know how to fix it. */
+		irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
+		irq = irq_create_mapping(NULL, hwirq);
+	}
 
 	/*
 	 * Set the DMA mask to 64bits so we get buffers already translated for
@@ -3465,90 +3447,6 @@
 	dev->coherent_dma_mask = ~0;
 	dev->dma_mask = &dev->coherent_dma_mask;
 
-	hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
-	if (!hcd) {
-		dev_dbg(dev, "Failed to allocate memory for HCD\n");
-		return -1;
-	}
-	hcd->uses_new_polling = 1;
-	priv = (struct octeon_hcd *)hcd->hcd_priv;
-
-	spin_lock_init(&priv->lock);
-
-	tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
-	INIT_LIST_HEAD(&priv->dequeue_list);
-
-	status = cvmx_usb_initialize(&priv->usb, usb_num);
-	if (status) {
-		dev_dbg(dev, "USB initialization failed with %d\n", status);
-		kfree(hcd);
-		return -1;
-	}
-
-	/* This delay is needed for CN3010, but I don't know why... */
-	mdelay(10);
-
-	spin_lock_irqsave(&priv->lock, flags);
-	cvmx_usb_poll(&priv->usb);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	status = usb_add_hcd(hcd, irq, IRQF_SHARED);
-	if (status) {
-		dev_dbg(dev, "USB add HCD failed with %d\n", status);
-		kfree(hcd);
-		return -1;
-	}
-	device_wakeup_enable(hcd->self.controller);
-
-	dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
-
-	return 0;
-}
-
-static int octeon_usb_driver_remove(struct device *dev)
-{
-	int status;
-	struct usb_hcd *hcd = dev_get_drvdata(dev);
-	struct octeon_hcd *priv = hcd_to_octeon(hcd);
-	unsigned long flags;
-
-	usb_remove_hcd(hcd);
-	tasklet_kill(&priv->dequeue_tasklet);
-	spin_lock_irqsave(&priv->lock, flags);
-	status = cvmx_usb_shutdown(&priv->usb);
-	spin_unlock_irqrestore(&priv->lock, flags);
-	if (status)
-		dev_dbg(dev, "USB shutdown failed with %d\n", status);
-
-	kfree(hcd);
-
-	return 0;
-}
-
-static struct device_driver octeon_usb_driver = {
-	.name	= "OcteonUSB",
-	.bus	= &platform_bus_type,
-	.probe	= octeon_usb_driver_probe,
-	.remove	= octeon_usb_driver_remove,
-};
-
-
-#define MAX_USB_PORTS   10
-static struct platform_device *pdev_glob[MAX_USB_PORTS];
-static int octeon_usb_registered;
-static int __init octeon_usb_module_init(void)
-{
-	int num_devices = cvmx_usb_get_num_ports();
-	int device;
-
-	if (usb_disabled() || num_devices == 0)
-		return -ENODEV;
-
-	if (driver_register(&octeon_usb_driver))
-		return -ENOMEM;
-
-	octeon_usb_registered = 1;
-
 	/*
 	 * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
 	 * IOB priority registers.  Under heavy network load USB
@@ -3569,41 +3467,102 @@
 		cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
 	}
 
-	for (device = 0; device < num_devices; device++) {
-		struct resource irq_resource;
-		struct platform_device *pdev;
-		memset(&irq_resource, 0, sizeof(irq_resource));
-		irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1;
-		irq_resource.end = irq_resource.start;
-		irq_resource.flags = IORESOURCE_IRQ;
-		pdev = platform_device_register_simple((char *)octeon_usb_driver.  name, device, &irq_resource, 1);
-		if (IS_ERR(pdev)) {
-			driver_unregister(&octeon_usb_driver);
-			octeon_usb_registered = 0;
-			return PTR_ERR(pdev);
-		}
-		if (device < MAX_USB_PORTS)
-			pdev_glob[device] = pdev;
-
+	hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
+	if (!hcd) {
+		dev_dbg(dev, "Failed to allocate memory for HCD\n");
+		return -1;
 	}
+	hcd->uses_new_polling = 1;
+	priv = (struct octeon_hcd *)hcd->hcd_priv;
+
+	spin_lock_init(&priv->lock);
+
+	tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
+	INIT_LIST_HEAD(&priv->dequeue_list);
+
+	status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags);
+	if (status) {
+		dev_dbg(dev, "USB initialization failed with %d\n", status);
+		kfree(hcd);
+		return -1;
+	}
+
+	/* This delay is needed for CN3010, but I don't know why... */
+	mdelay(10);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	cvmx_usb_poll(&priv->usb);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	status = usb_add_hcd(hcd, irq, 0);
+	if (status) {
+		dev_dbg(dev, "USB add HCD failed with %d\n", status);
+		kfree(hcd);
+		return -1;
+	}
+	device_wakeup_enable(hcd->self.controller);
+
+	dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
+
 	return 0;
 }
 
-static void __exit octeon_usb_module_cleanup(void)
+static int octeon_usb_remove(struct platform_device *pdev)
 {
-	int i;
+	int status;
+	struct device *dev = &pdev->dev;
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct octeon_hcd *priv = hcd_to_octeon(hcd);
+	unsigned long flags;
 
-	for (i = 0; i < MAX_USB_PORTS; i++)
-		if (pdev_glob[i]) {
-			platform_device_unregister(pdev_glob[i]);
-			pdev_glob[i] = NULL;
-		}
-	if (octeon_usb_registered)
-		driver_unregister(&octeon_usb_driver);
+	usb_remove_hcd(hcd);
+	tasklet_kill(&priv->dequeue_tasklet);
+	spin_lock_irqsave(&priv->lock, flags);
+	status = cvmx_usb_shutdown(&priv->usb);
+	spin_unlock_irqrestore(&priv->lock, flags);
+	if (status)
+		dev_dbg(dev, "USB shutdown failed with %d\n", status);
+
+	kfree(hcd);
+
+	return 0;
 }
 
+static struct of_device_id octeon_usb_match[] = {
+	{
+		.compatible = "cavium,octeon-5750-usbc",
+	},
+	{},
+};
+
+static struct platform_driver octeon_usb_driver = {
+	.driver = {
+		.name       = "OcteonUSB",
+		.owner		= THIS_MODULE,
+		.of_match_table = octeon_usb_match,
+	},
+	.probe      = octeon_usb_probe,
+	.remove     = octeon_usb_remove,
+};
+
+static int __init octeon_usb_driver_init(void)
+{
+	if (usb_disabled())
+		return 0;
+
+	return platform_driver_register(&octeon_usb_driver);
+}
+module_init(octeon_usb_driver_init);
+
+static void __exit octeon_usb_driver_exit(void)
+{
+	if (usb_disabled())
+		return;
+
+	platform_driver_unregister(&octeon_usb_driver);
+}
+module_exit(octeon_usb_driver_exit);
+
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
-MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver.");
-module_init(octeon_usb_module_init);
-module_exit(octeon_usb_module_cleanup);
+MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index cb06036..5d965cf 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -668,8 +668,8 @@
 	if (binding) {
 		binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
 		binding->ptype.func = oz_pkt_recv;
-		memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
 		if (net_dev && *net_dev) {
+			memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
 			oz_dbg(ON, "Adding binding: %s\n", net_dev);
 			binding->ptype.dev =
 				dev_get_by_name(&init_net, net_dev);
@@ -680,6 +680,7 @@
 			}
 		} else {
 			oz_dbg(ON, "Binding to all netcards\n");
+			memset(binding->name, 0, OZ_MAX_BINDING_LEN);
 			binding->ptype.dev = NULL;
 		}
 		if (binding) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 153ec61..96df62f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -912,12 +912,12 @@
 	unsigned char *pbuf;
 	u32 wpa_ielen = 0;
 	u8 *pbssid = GetAddr3Ptr(pframe);
-	u32 hidden_ssid = 0;
 	struct HT_info_element *pht_info = NULL;
 	struct rtw_ieee80211_ht_cap *pht_cap = NULL;
 	u32 bcn_channel;
 	unsigned short	ht_cap_info;
 	unsigned char	ht_info_infos_0;
+	int ssid_len;
 
 	if (is_client_associated_to_ap(Adapter) == false)
 		return true;
@@ -999,21 +999,15 @@
 	}
 
 	/* checking SSID */
+	ssid_len = 0;
 	p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
-	if (p == NULL) {
-		DBG_88E("%s marc: cannot find SSID for survey event\n", __func__);
-		hidden_ssid = true;
-	} else {
-		hidden_ssid = false;
+	if (p) {
+		ssid_len = *(p + 1);
+		if (ssid_len > NDIS_802_11_LENGTH_SSID)
+			ssid_len = 0;
 	}
-
-	if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
-		memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
-		bssid->Ssid.SsidLength = *(p + 1);
-	} else {
-		bssid->Ssid.SsidLength = 0;
-		bssid->Ssid.Ssid[0] = '\0';
-	}
+	memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len);
+	bssid->Ssid.SsidLength = ssid_len;
 
 	RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
 				"cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index dec9925..4ad80ae 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -2500,7 +2500,7 @@
 		 ("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n",
 		  poidparam->subcode, poidparam->len, len));
 
-	if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
+	if (poidparam->subcode >= ARRAY_SIZE(mp_ioctl_hdl)) {
 		RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n"));
 		ret = -EINVAL;
 		goto _rtw_mp_ioctl_hdl_exit;
@@ -3164,9 +3164,7 @@
 	u8 *p2pie;
 	uint p2pielen = 0, attr_contentlen = 0;
 	u8 attr_content[100] = {0x00};
-
-	u8 go_devadd_str[17 + 10] = {0x00};
-	/*  +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */
+	u8 go_devadd_str[17 + 12] = {};
 
 	/*	Commented by Albert 20121209 */
 	/*	The input data is the GO's interface address which the application wants to know its device address. */
@@ -3223,12 +3221,12 @@
 	spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
 
 	if (!blnMatch)
-		sprintf(go_devadd_str, "\n\ndev_add = NULL");
+		snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL");
 	else
-		sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
+		snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
 			attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
 
-	if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17))
+	if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str)))
 		return -EFAULT;
 	return ret;
 }
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 68f98fa..7c9ee58 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -653,7 +653,7 @@
 }
 
 static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
-			    void *accel_priv)
+			    void *accel_priv, select_queue_fallback_t fallback)
 {
 	struct adapter	*padapter = rtw_netdev_priv(dev);
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 0a341d6..a70dcef 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -53,7 +53,7 @@
 	{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
 	/*=== Customer ID ===*/
 	/****** 8188EUS ********/
-	{USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+	{USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
 	{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
 	{}	/* Terminating entry */
 };
diff --git a/drivers/staging/rtl8821ae/Kconfig b/drivers/staging/rtl8821ae/Kconfig
new file mode 100644
index 0000000..abccc9d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/Kconfig
@@ -0,0 +1,11 @@
+config R8821AE
+	tristate "RealTek RTL8821AE Wireless LAN NIC driver"
+	depends on PCI && WLAN && MAC80211
+	depends on m
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	select EEPROM_93CX6
+	select CRYPTO
+	default N
+	---help---
+	  If built as a module, it will be called r8821ae.ko.
diff --git a/drivers/staging/rtl8821ae/Makefile b/drivers/staging/rtl8821ae/Makefile
new file mode 100644
index 0000000..8a23bd7
--- /dev/null
+++ b/drivers/staging/rtl8821ae/Makefile
@@ -0,0 +1,35 @@
+PCI_MAIN_OBJS	:= base.o	\
+		rc.o	\
+		debug.o	\
+		regd.o	\
+		efuse.o	\
+		cam.o	\
+		ps.o	\
+		core.o	\
+		stats.o	\
+		pci.o	\
+
+BT_COEXIST_OBJS:=	btcoexist/halbtc8192e2ant.o\
+			btcoexist/halbtc8723b1ant.o\
+			btcoexist/halbtc8723b2ant.o\
+			btcoexist/halbtcoutsrc.o\
+			btcoexist/rtl_btc.o	\
+
+PCI_8821AE_HAL_OBJS:=	\
+	rtl8821ae/hw.o		\
+	rtl8821ae/table.o		\
+	rtl8821ae/sw.o		\
+	rtl8821ae/trx.o		\
+	rtl8821ae/led.o		\
+	rtl8821ae/fw.o		\
+	rtl8821ae/phy.o		\
+	rtl8821ae/rf.o		\
+	rtl8821ae/dm.o		\
+	rtl8821ae/pwrseq.o	\
+	rtl8821ae/pwrseqcmd.o	\
+	rtl8821ae/hal_btc.o	\
+	rtl8821ae/hal_bt_coexist.o	\
+
+rtl8821ae-objs += $(BT_COEXIST_OBJS) $(PCI_MAIN_OBJS) $(PCI_8821AE_HAL_OBJS)
+
+obj-$(CONFIG_R8821AE) += rtl8821ae.o
diff --git a/drivers/staging/rtl8821ae/TODO b/drivers/staging/rtl8821ae/TODO
new file mode 100644
index 0000000..3ee7529
--- /dev/null
+++ b/drivers/staging/rtl8821ae/TODO
@@ -0,0 +1,10 @@
+Realtek 8821AE PCI wifi driver TODO:
+  - remove built-in btcoexist module when the "real" one gets upstream
+  - remove built-in rtlwifi code by porting driver to use the "real" one
+    in the drivers/net/ directory.
+  - fix up coding style issues
+
+Please send any patches for this driver to:
+	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+and the <devel@driverdev.osuosl.org> mailing list.
+
diff --git a/drivers/staging/rtl8821ae/base.c b/drivers/staging/rtl8821ae/base.c
new file mode 100644
index 0000000..18c936f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/base.c
@@ -0,0 +1,1873 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include <linux/ip.h>
+#include <linux/module.h>
+#include "wifi.h"
+#include "rc.h"
+#include "base.h"
+#include "efuse.h"
+#include "cam.h"
+#include "ps.h"
+#include "regd.h"
+#include "pci.h"
+
+/*
+ *NOTICE!!!: This file will be very big, we hsould
+ *keep it clear under follwing roles:
+ *
+ *This file include follwing part, so, if you add new
+ *functions into this file, please check which part it
+ *should includes. or check if you should add new part
+ *for this file:
+ *
+ *1) mac80211 init functions
+ *2) tx information functions
+ *3) functions called by core.c
+ *4) wq & timer callback functions
+ *5) frame process functions
+ *6) IOT functions
+ *7) sysfs functions
+ *8) vif functions
+ *9) ...
+ */
+
+/*********************************************************
+ *
+ * mac80211 init functions
+ *
+ *********************************************************/
+static struct ieee80211_channel rtl_channeltable_2g[] = {
+	{.center_freq = 2412,.hw_value = 1,},
+	{.center_freq = 2417,.hw_value = 2,},
+	{.center_freq = 2422,.hw_value = 3,},
+	{.center_freq = 2427,.hw_value = 4,},
+	{.center_freq = 2432,.hw_value = 5,},
+	{.center_freq = 2437,.hw_value = 6,},
+	{.center_freq = 2442,.hw_value = 7,},
+	{.center_freq = 2447,.hw_value = 8,},
+	{.center_freq = 2452,.hw_value = 9,},
+	{.center_freq = 2457,.hw_value = 10,},
+	{.center_freq = 2462,.hw_value = 11,},
+	{.center_freq = 2467,.hw_value = 12,},
+	{.center_freq = 2472,.hw_value = 13,},
+	{.center_freq = 2484,.hw_value = 14,},
+};
+
+static struct ieee80211_channel rtl_channeltable_5g[] = {
+	{.center_freq = 5180,.hw_value = 36,},
+	{.center_freq = 5200,.hw_value = 40,},
+	{.center_freq = 5220,.hw_value = 44,},
+	{.center_freq = 5240,.hw_value = 48,},
+	{.center_freq = 5260,.hw_value = 52,},
+	{.center_freq = 5280,.hw_value = 56,},
+	{.center_freq = 5300,.hw_value = 60,},
+	{.center_freq = 5320,.hw_value = 64,},
+	{.center_freq = 5500,.hw_value = 100,},
+	{.center_freq = 5520,.hw_value = 104,},
+	{.center_freq = 5540,.hw_value = 108,},
+	{.center_freq = 5560,.hw_value = 112,},
+	{.center_freq = 5580,.hw_value = 116,},
+	{.center_freq = 5600,.hw_value = 120,},
+	{.center_freq = 5620,.hw_value = 124,},
+	{.center_freq = 5640,.hw_value = 128,},
+	{.center_freq = 5660,.hw_value = 132,},
+	{.center_freq = 5680,.hw_value = 136,},
+	{.center_freq = 5700,.hw_value = 140,},
+	{.center_freq = 5745,.hw_value = 149,},
+	{.center_freq = 5765,.hw_value = 153,},
+	{.center_freq = 5785,.hw_value = 157,},
+	{.center_freq = 5805,.hw_value = 161,},
+	{.center_freq = 5825,.hw_value = 165,},
+};
+
+static struct ieee80211_rate rtl_ratetable_2g[] = {
+	{.bitrate = 10,.hw_value = 0x00,},
+	{.bitrate = 20,.hw_value = 0x01,},
+	{.bitrate = 55,.hw_value = 0x02,},
+	{.bitrate = 110,.hw_value = 0x03,},
+	{.bitrate = 60,.hw_value = 0x04,},
+	{.bitrate = 90,.hw_value = 0x05,},
+	{.bitrate = 120,.hw_value = 0x06,},
+	{.bitrate = 180,.hw_value = 0x07,},
+	{.bitrate = 240,.hw_value = 0x08,},
+	{.bitrate = 360,.hw_value = 0x09,},
+	{.bitrate = 480,.hw_value = 0x0a,},
+	{.bitrate = 540,.hw_value = 0x0b,},
+};
+
+static struct ieee80211_rate rtl_ratetable_5g[] = {
+	{.bitrate = 60,.hw_value = 0x04,},
+	{.bitrate = 90,.hw_value = 0x05,},
+	{.bitrate = 120,.hw_value = 0x06,},
+	{.bitrate = 180,.hw_value = 0x07,},
+	{.bitrate = 240,.hw_value = 0x08,},
+	{.bitrate = 360,.hw_value = 0x09,},
+	{.bitrate = 480,.hw_value = 0x0a,},
+	{.bitrate = 540,.hw_value = 0x0b,},
+};
+
+static const struct ieee80211_supported_band rtl_band_2ghz = {
+	.band = IEEE80211_BAND_2GHZ,
+
+	.channels = rtl_channeltable_2g,
+	.n_channels = ARRAY_SIZE(rtl_channeltable_2g),
+
+	.bitrates = rtl_ratetable_2g,
+	.n_bitrates = ARRAY_SIZE(rtl_ratetable_2g),
+
+	.ht_cap = {0},
+};
+
+static struct ieee80211_supported_band rtl_band_5ghz = {
+	.band = IEEE80211_BAND_5GHZ,
+
+	.channels = rtl_channeltable_5g,
+	.n_channels = ARRAY_SIZE(rtl_channeltable_5g),
+
+	.bitrates = rtl_ratetable_5g,
+	.n_bitrates = ARRAY_SIZE(rtl_ratetable_5g),
+
+	.ht_cap = {0},
+};
+
+static const u8 tid_to_ac[] = {
+	2, /* IEEE80211_AC_BE */
+	3, /* IEEE80211_AC_BK */
+	3, /* IEEE80211_AC_BK */
+	2, /* IEEE80211_AC_BE */
+	1, /* IEEE80211_AC_VI */
+	1, /* IEEE80211_AC_VI */
+	0, /* IEEE80211_AC_VO */
+	0, /* IEEE80211_AC_VO */
+};
+
+u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid)
+{
+	return tid_to_ac[tid];
+}
+
+static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
+				  struct ieee80211_sta_ht_cap *ht_cap)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	ht_cap->ht_supported = true;
+	ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+	    IEEE80211_HT_CAP_SGI_40 |
+	    IEEE80211_HT_CAP_SGI_20 |
+	    IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU;
+
+	if (rtlpriv->rtlhal.disable_amsdu_8k)
+		ht_cap->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
+
+	/*
+	 *Maximum length of AMPDU that the STA can receive.
+	 *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+	 */
+	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+
+	/*Minimum MPDU start spacing , */
+	ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+
+	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+	/*
+	 *hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+	 *base on ant_num
+	 *rx_mask: RX mask
+	 *if rx_ant =1 rx_mask[0]=0xff;==>MCS0-MCS7
+	 *if rx_ant =2 rx_mask[1]=0xff;==>MCS8-MCS15
+	 *if rx_ant >=3 rx_mask[2]=0xff;
+	 *if BW_40 rx_mask[4]=0x01;
+	 *highest supported RX rate
+	 */
+	if (rtlpriv->dm.supp_phymode_switch) {
+		RT_TRACE(COMP_INIT, DBG_EMERG, ("Support phy mode switch\n"));
+
+		ht_cap->mcs.rx_mask[0] = 0xFF;
+		ht_cap->mcs.rx_mask[1] = 0xFF;
+		ht_cap->mcs.rx_mask[4] = 0x01;
+
+		ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
+	} else {
+		if (get_rf_type(rtlphy) == RF_1T2R ||
+				get_rf_type(rtlphy) == RF_2T2R) {
+
+			RT_TRACE(COMP_INIT, DBG_DMESG, ("1T2R or 2T2R\n"));
+
+			ht_cap->mcs.rx_mask[0] = 0xFF;
+			ht_cap->mcs.rx_mask[1] = 0xFF;
+			ht_cap->mcs.rx_mask[4] = 0x01;
+
+			ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
+		} else if (get_rf_type(rtlphy) == RF_1T1R) {
+
+			RT_TRACE(COMP_INIT, DBG_DMESG, ("1T1R\n"));
+
+			ht_cap->mcs.rx_mask[0] = 0xFF;
+			ht_cap->mcs.rx_mask[1] = 0x00;
+			ht_cap->mcs.rx_mask[4] = 0x01;
+
+			ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7;
+		}
+	}
+}
+
+static void _rtl_init_mac80211(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct ieee80211_supported_band *sband;
+
+
+	if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY &&
+	    rtlhal->bandset == BAND_ON_BOTH) {
+		/* 1: 2.4 G bands */
+		/* <1> use  mac->bands as mem for hw->wiphy->bands */
+		sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+
+		/* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+		 * to default value(1T1R) */
+		memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz,
+				sizeof(struct ieee80211_supported_band));
+
+		/* <3> init ht cap base on ant_num */
+		_rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+		/* <4> set mac->sband to wiphy->sband */
+		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+
+		/* 2: 5 G bands */
+		/* <1> use  mac->bands as mem for hw->wiphy->bands */
+		sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+
+		/* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+		 * to default value(1T1R) */
+		memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz,
+				sizeof(struct ieee80211_supported_band));
+
+		/* <3> init ht cap base on ant_num */
+		_rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+		/* <4> set mac->sband to wiphy->sband */
+		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+	} else {
+		if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+			/* <1> use  mac->bands as mem for hw->wiphy->bands */
+			sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+
+			/* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+			 * to default value(1T1R) */
+			memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]),
+			       &rtl_band_2ghz,
+			       sizeof(struct ieee80211_supported_band));
+
+			/* <3> init ht cap base on ant_num */
+			_rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+			/* <4> set mac->sband to wiphy->sband */
+			hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+		} else if (rtlhal->current_bandtype == BAND_ON_5G) {
+			/* <1> use  mac->bands as mem for hw->wiphy->bands */
+			sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+
+			/* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+			 * to default value(1T1R) */
+			memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]),
+			       &rtl_band_5ghz,
+			       sizeof(struct ieee80211_supported_band));
+
+			/* <3> init ht cap base on ant_num */
+			_rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+			/* <4> set mac->sband to wiphy->sband */
+			hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+		} else {
+			RT_TRACE(COMP_INIT, DBG_EMERG, ("Err BAND %d\n",
+				 rtlhal->current_bandtype));
+		}
+	}
+	/* <5> set hw caps */
+	hw->flags = IEEE80211_HW_SIGNAL_DBM |
+	    IEEE80211_HW_RX_INCLUDES_FCS |
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0))
+	    IEEE80211_HW_BEACON_FILTER |
+#endif
+	    IEEE80211_HW_AMPDU_AGGREGATION |
+	    IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+	    IEEE80211_HW_CONNECTION_MONITOR |
+	    /* IEEE80211_HW_SUPPORTS_CQM_RSSI | */
+	    IEEE80211_HW_MFP_CAPABLE | 0;
+
+	/* swlps or hwlps has been set in diff chip in init_sw_vars */
+	if (rtlpriv->psc.b_swctrl_lps)
+		hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+	    		IEEE80211_HW_PS_NULLFUNC_STACK |
+	    		/* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
+			0;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+	hw->wiphy->interface_modes =
+	    BIT(NL80211_IFTYPE_AP) |
+	    BIT(NL80211_IFTYPE_STATION) |
+	    BIT(NL80211_IFTYPE_ADHOC) |
+	    BIT(NL80211_IFTYPE_MESH_POINT) |
+	    BIT(NL80211_IFTYPE_P2P_CLIENT) |
+	    BIT(NL80211_IFTYPE_P2P_GO);
+#else
+/*<delete in kernel end>*/
+	hw->wiphy->interface_modes =
+	    BIT(NL80211_IFTYPE_AP) |
+	    BIT(NL80211_IFTYPE_STATION) |
+	    BIT(NL80211_IFTYPE_ADHOC) |
+	    BIT(NL80211_IFTYPE_MESH_POINT) ;
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39))
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+	hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+#endif
+
+	hw->wiphy->rts_threshold = 2347;
+
+	hw->queues = AC_MAX;
+	hw->extra_tx_headroom = RTL_TX_HEADER_SIZE;
+
+	/* TODO: Correct this value for our hw */
+	/* TODO: define these hard code value */
+	hw->max_listen_interval = 10;
+	hw->max_rate_tries = 4;
+	/* hw->max_rates = 1; */
+	hw->sta_data_size = sizeof(struct rtl_sta_info);
+#ifdef VIF_TODO
+	hw->vif_data_size = sizeof(struct rtl_vif_info);
+#endif
+
+	/* <6> mac address */
+	if (is_valid_ether_addr(rtlefuse->dev_addr)) {
+		SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
+	} else {
+		u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
+		get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1);
+		SET_IEEE80211_PERM_ADDR(hw, rtlmac);
+	}
+
+}
+
+static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	/* <1> timer */
+	init_timer(&rtlpriv->works.watchdog_timer);
+	setup_timer(&rtlpriv->works.watchdog_timer,
+		    rtl_watch_dog_timer_callback, (unsigned long)hw);
+	init_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer);
+	setup_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer,
+		    rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw);
+	/* <2> work queue */
+	rtlpriv->works.hw = hw;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+	rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0);
+/*<delete in kernel start>*/
+#else
+	rtlpriv->works.rtl_wq = create_workqueue(rtlpriv->cfg->name);
+#endif
+/*<delete in kernel end>*/
+	INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
+			  (void *)rtl_watchdog_wq_callback);
+	INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
+			  (void *)rtl_ips_nic_off_wq_callback);
+	INIT_DELAYED_WORK(&rtlpriv->works.ps_work,
+			  (void *)rtl_swlps_wq_callback);
+	INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq,
+			  (void *)rtl_swlps_rfon_wq_callback);
+	INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
+			  (void *)rtl_fwevt_wq_callback);
+
+}
+
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	del_timer_sync(&rtlpriv->works.watchdog_timer);
+
+	cancel_delayed_work(&rtlpriv->works.watchdog_wq);
+	cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+	cancel_delayed_work(&rtlpriv->works.ps_work);
+	cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
+	cancel_delayed_work(&rtlpriv->works.fwevt_wq);
+}
+
+void rtl_init_rfkill(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	bool radio_state;
+	bool blocked;
+	u8 valid = 0;
+
+	/*set init state to on */
+	rtlpriv->rfkill.rfkill_state = 1;
+	wiphy_rfkill_set_hw_state(hw->wiphy, 0);
+
+	radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
+
+	if (valid) {
+		printk(KERN_INFO "rtlwifi: wireless switch is %s\n",
+		       rtlpriv->rfkill.rfkill_state ? "on" : "off");
+
+		rtlpriv->rfkill.rfkill_state = radio_state;
+
+		blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+		wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+	}
+
+	wiphy_rfkill_start_polling(hw->wiphy);
+}
+
+void rtl_deinit_rfkill(struct ieee80211_hw *hw)
+{
+	wiphy_rfkill_stop_polling(hw->wiphy);
+}
+
+#ifdef VIF_TODO
+static void rtl_init_vif(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	INIT_LIST_HEAD(&rtlpriv->vif_priv.vif_list);
+
+	rtlpriv->vif_priv.vifs = 0;
+}
+#endif
+
+int rtl_init_core(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+
+	/* <1> init mac80211 */
+	_rtl_init_mac80211(hw);
+	rtlmac->hw = hw;
+	rtlmac->link_state = MAC80211_NOLINK;
+
+	/* <2> rate control register */
+	hw->rate_control_algorithm = "rtl_rc";
+
+	/*
+	 * <3> init CRDA must come after init
+	 * mac80211 hw  in _rtl_init_mac80211.
+	 */
+	if (rtl_regd_init(hw, rtl_reg_notifier)) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("REGD init failed\n"));
+		return 1;
+	}
+
+	/* <4> locks */
+	mutex_init(&rtlpriv->locks.conf_mutex);
+	spin_lock_init(&rtlpriv->locks.ips_lock);
+	spin_lock_init(&rtlpriv->locks.irq_th_lock);
+	spin_lock_init(&rtlpriv->locks.h2c_lock);
+	spin_lock_init(&rtlpriv->locks.rf_ps_lock);
+	spin_lock_init(&rtlpriv->locks.rf_lock);
+	spin_lock_init(&rtlpriv->locks.lps_lock);
+	spin_lock_init(&rtlpriv->locks.waitq_lock);
+	spin_lock_init(&rtlpriv->locks.entry_list_lock);
+	spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
+	spin_lock_init(&rtlpriv->locks.check_sendpkt_lock);
+	spin_lock_init(&rtlpriv->locks.fw_ps_lock);
+	spin_lock_init(&rtlpriv->locks.iqk_lock);
+	/* <5> init list */
+	INIT_LIST_HEAD(&rtlpriv->entry_list);
+
+	/* <6> init deferred work */
+	_rtl_init_deferred_work(hw);
+
+	/* <7> */
+#ifdef VIF_TODO
+	rtl_init_vif(hw);
+#endif
+
+	return 0;
+}
+
+void rtl_deinit_core(struct ieee80211_hw *hw)
+{
+}
+
+void rtl_init_rx_config(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+	rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
+}
+
+/*********************************************************
+ *
+ * tx information functions
+ *
+ *********************************************************/
+static void _rtl_qurey_shortpreamble_mode(struct ieee80211_hw *hw,
+					  struct rtl_tcb_desc *tcb_desc,
+					  struct ieee80211_tx_info *info)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 rate_flag = info->control.rates[0].flags;
+
+	tcb_desc->use_shortpreamble = false;
+
+	/* 1M can only use Long Preamble. 11B spec */
+	if (tcb_desc->hw_rate == rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M])
+		return;
+	else if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+		tcb_desc->use_shortpreamble = true;
+
+	return;
+}
+
+static void _rtl_query_shortgi(struct ieee80211_hw *hw,
+			       struct ieee80211_sta *sta,
+			       struct rtl_tcb_desc *tcb_desc,
+			       struct ieee80211_tx_info *info)
+{
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	u8 rate_flag = info->control.rates[0].flags;
+	u8 sgi_40 = 0, sgi_20 = 0, bw_40 = 0;
+	tcb_desc->use_shortgi = false;
+
+	if (sta == NULL)
+		return;
+
+	sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+	sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+
+	if (!(sta->ht_cap.ht_supported))
+		return;
+
+	if (!sgi_40 && !sgi_20)
+		return;
+
+	if (mac->opmode == NL80211_IFTYPE_STATION)
+		bw_40 = mac->bw_40;
+	else if (mac->opmode == NL80211_IFTYPE_AP ||
+		 mac->opmode == NL80211_IFTYPE_ADHOC ||
+		 mac->opmode == NL80211_IFTYPE_MESH_POINT)
+		bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+	if ((bw_40 == true) && sgi_40)
+		tcb_desc->use_shortgi = true;
+	else if ((bw_40 == false) && sgi_20)
+		tcb_desc->use_shortgi = true;
+
+	if (!(rate_flag & IEEE80211_TX_RC_SHORT_GI))
+		tcb_desc->use_shortgi = false;
+}
+
+static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
+				       struct rtl_tcb_desc *tcb_desc,
+				       struct ieee80211_tx_info *info)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 rate_flag = info->control.rates[0].flags;
+
+	/* Common Settings */
+	tcb_desc->b_rts_stbc = false;
+	tcb_desc->b_cts_enable = false;
+	tcb_desc->rts_sc = 0;
+	tcb_desc->b_rts_bw = false;
+	tcb_desc->b_rts_use_shortpreamble = false;
+	tcb_desc->b_rts_use_shortgi = false;
+
+	if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+		/* Use CTS-to-SELF in protection mode. */
+		tcb_desc->b_rts_enable = true;
+		tcb_desc->b_cts_enable = true;
+		tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
+	} else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+		/* Use RTS-CTS in protection mode. */
+		tcb_desc->b_rts_enable = true;
+		tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
+	}
+}
+
+static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
+				   struct ieee80211_sta *sta,
+				   struct rtl_tcb_desc *tcb_desc)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_sta_info *sta_entry = NULL;
+	u8 ratr_index = 7;
+
+	if (sta) {
+		sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+		ratr_index = sta_entry->ratr_index;
+	}
+	if (!tcb_desc->disable_ratefallback || !tcb_desc->use_driver_rate) {
+		if (mac->opmode == NL80211_IFTYPE_STATION) {
+			tcb_desc->ratr_index = 0;
+		} else if (mac->opmode == NL80211_IFTYPE_ADHOC ||
+				mac->opmode == NL80211_IFTYPE_MESH_POINT) {
+			if (tcb_desc->b_multicast || tcb_desc->b_broadcast) {
+				tcb_desc->hw_rate =
+				    rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
+				tcb_desc->use_driver_rate = 1;
+				tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+			} else {
+				tcb_desc->ratr_index = ratr_index;
+			}
+		} else if (mac->opmode == NL80211_IFTYPE_AP) {
+			tcb_desc->ratr_index = ratr_index;
+		}
+	}
+
+	if (rtlpriv->dm.b_useramask) {
+		tcb_desc->ratr_index = ratr_index;
+		/* TODO we will differentiate adhoc and station futrue  */
+		if (mac->opmode == NL80211_IFTYPE_STATION ||
+			mac->opmode == NL80211_IFTYPE_MESH_POINT) {
+			tcb_desc->mac_id = 0;
+
+			if (mac->mode == WIRELESS_MODE_N_24G) {
+				tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB;
+			} else if (mac->mode == WIRELESS_MODE_N_5G) {
+				tcb_desc->ratr_index = RATR_INX_WIRELESS_NG;
+			} else if (mac->mode & WIRELESS_MODE_G) {
+				tcb_desc->ratr_index = RATR_INX_WIRELESS_GB;
+			} else if (mac->mode & WIRELESS_MODE_B) {
+				tcb_desc->ratr_index = RATR_INX_WIRELESS_B;
+			} else if (mac->mode & WIRELESS_MODE_A) {
+				tcb_desc->ratr_index = RATR_INX_WIRELESS_G;
+			}
+		} else if (mac->opmode == NL80211_IFTYPE_AP ||
+			mac->opmode == NL80211_IFTYPE_ADHOC) {
+			if (NULL != sta) {
+				if (sta->aid > 0) {
+					tcb_desc->mac_id = sta->aid + 1;
+				} else {
+					tcb_desc->mac_id = 1;
+				}
+			} else {
+				tcb_desc->mac_id = 0;
+			}
+		}
+	}
+}
+
+static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
+				      struct ieee80211_sta *sta,
+				      struct rtl_tcb_desc *tcb_desc)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+	tcb_desc->b_packet_bw = false;
+	if (!sta)
+		return;
+	if (mac->opmode == NL80211_IFTYPE_AP ||
+		mac->opmode == NL80211_IFTYPE_ADHOC ||
+		mac->opmode == NL80211_IFTYPE_MESH_POINT) {
+		if (!(sta->ht_cap.ht_supported) ||
+			!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+			return;
+	} else if (mac->opmode == NL80211_IFTYPE_STATION) {
+		if (!mac->bw_40 || !(sta->ht_cap.ht_supported))
+		return;
+	}
+	if (tcb_desc->b_multicast || tcb_desc->b_broadcast)
+		return;
+
+	/*use legency rate, shall use 20MHz */
+	if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M])
+		return;
+
+	tcb_desc->b_packet_bw = true;
+}
+
+static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
+				  struct ieee80211_sta *sta)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 hw_rate;
+
+	if ((get_rf_type(rtlphy) == RF_2T2R) && (sta->ht_cap.mcs.rx_mask[1]!=0))
+		hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
+	else
+		hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
+
+	return hw_rate;
+}
+
+void rtl_get_tcb_desc(struct ieee80211_hw *hw,
+		      struct ieee80211_tx_info *info,
+		      struct ieee80211_sta *sta,
+		      struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+	struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+	struct ieee80211_rate *txrate;
+	u16 fc = rtl_get_fc(skb);
+
+	txrate = ieee80211_get_tx_rate(hw, info);
+	if (txrate != NULL)
+		tcb_desc->hw_rate = txrate->hw_value;
+
+	if (ieee80211_is_data(fc)) {
+		/*
+		 *we set data rate INX 0
+		 *in rtl_rc.c   if skb is special data or
+		 *mgt which need low data rate.
+		 */
+
+		/*
+		 *So tcb_desc->hw_rate is just used for
+		 *special data and mgt frames
+		 */
+		if (info->control.rates[0].idx == 0 ||
+				ieee80211_is_nullfunc(fc)) {
+			tcb_desc->use_driver_rate = true;
+			tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+
+			tcb_desc->disable_ratefallback = 1;
+		} else {
+			/*
+			 *because hw will nerver use hw_rate
+			 *when tcb_desc->use_driver_rate = false
+			 *so we never set highest N rate here,
+			 *and N rate will all be controled by FW
+			 *when tcb_desc->use_driver_rate = false
+			 */
+			if (sta && (sta->ht_cap.ht_supported)) {
+				tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw, sta);
+			} else {
+				if(rtlmac->mode == WIRELESS_MODE_B) {
+					tcb_desc->hw_rate =
+					    rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M];
+				} else {
+					tcb_desc->hw_rate =
+					    rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M];
+				}
+			}
+		}
+
+		if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
+			tcb_desc->b_multicast = 1;
+		else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+			tcb_desc->b_broadcast = 1;
+
+		_rtl_txrate_selectmode(hw, sta, tcb_desc);
+		_rtl_query_bandwidth_mode(hw, sta, tcb_desc);
+		_rtl_qurey_shortpreamble_mode(hw, tcb_desc, info);
+		_rtl_query_shortgi(hw, sta, tcb_desc, info);
+		_rtl_query_protection_mode(hw, tcb_desc, info);
+	} else {
+		tcb_desc->use_driver_rate = true;
+		tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+		tcb_desc->disable_ratefallback = 1;
+		tcb_desc->mac_id = 0;
+		tcb_desc->b_packet_bw = false;
+	}
+}
+//EXPORT_SYMBOL(rtl_get_tcb_desc);
+
+bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u16 fc = rtl_get_fc(skb);
+
+	if (rtlpriv->dm.supp_phymode_switch &&
+		mac->link_state < MAC80211_LINKED &&
+		(ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) {
+		if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+			rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+	}
+	if (ieee80211_is_auth(fc)) {
+		RT_TRACE(COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
+		rtl_ips_nic_on(hw);
+
+		mac->link_state = MAC80211_LINKING;
+		/* Dul mac */
+		rtlpriv->phy.b_need_iqk = true;
+
+	}
+
+	return true;
+}
+
+struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, u8 *sa,
+				u8 *bssid, u16 tid);
+bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+{
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u16 fc = rtl_get_fc(skb);
+	u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
+	u8 category;
+
+	if (!ieee80211_is_action(fc))
+		return true;
+
+	category = *act;
+	act++;
+	switch (category) {
+	case ACT_CAT_BA:
+		switch (*act) {
+		case ACT_ADDBAREQ:
+			if (mac->act_scanning)
+				return false;
+
+			RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+				 ("%s ACT_ADDBAREQ From :%pM\n",
+				  is_tx ? "Tx" : "Rx", hdr->addr2));
+			RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("req \n"),
+		      	skb->data, skb->len);
+			if (!is_tx) {
+				struct ieee80211_sta *sta = NULL;
+				struct rtl_sta_info *sta_entry = NULL;
+				struct ieee80211_mgmt *mgmt = (void *)skb->data;
+				u16 capab = 0, tid = 0;
+				struct rtl_tid_data *tid_data;
+				struct sk_buff *skb_delba = NULL;
+				struct ieee80211_rx_status rx_status = { 0 };
+
+				rcu_read_lock();
+				sta = rtl_find_sta(hw, hdr->addr3);
+				if (sta == NULL) {
+					RT_TRACE((COMP_SEND | COMP_RECV),
+						 DBG_EMERG, ("sta is NULL\n"));
+					rcu_read_unlock();
+					return true;
+				}
+
+				sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+				if (!sta_entry) {
+					rcu_read_unlock();
+					return true;
+				}
+				capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+				tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+				tid_data = &sta_entry->tids[tid];
+				if (tid_data->agg.rx_agg_state ==
+				    RTL_RX_AGG_START) {
+					skb_delba = rtl_make_del_ba(hw,
+								    hdr->addr2,
+								    hdr->addr3,
+								    tid);
+					if (skb_delba) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+						rx_status.freq = hw->conf.chandef.chan->center_freq;
+						rx_status.band = hw->conf.chandef.chan->band;
+#else
+						rx_status.freq = hw->conf.channel->center_freq;
+						rx_status.band = hw->conf.channel->band;
+#endif
+						rx_status.flag |= RX_FLAG_DECRYPTED;
+						rx_status.flag |= RX_FLAG_MACTIME_MPDU;
+						rx_status.rate_idx = 0;
+						rx_status.signal = 50 + 10;
+						memcpy(IEEE80211_SKB_RXCB(skb_delba), &rx_status,
+								sizeof(rx_status));
+						RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG,
+								("fake del\n"), skb_delba->data,
+								skb_delba->len);
+						ieee80211_rx_irqsafe(hw, skb_delba);
+					}
+				}
+				rcu_read_unlock();
+			}
+			break;
+		case ACT_ADDBARSP:
+			RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+				 ("%s ACT_ADDBARSP From :%pM\n",
+				  is_tx ? "Tx" : "Rx", hdr->addr2));
+			break;
+		case ACT_DELBA:
+			RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+				 ("ACT_ADDBADEL From :%pM\n", hdr->addr2));
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return true;
+}
+
+/*should call before software enc*/
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	u16 fc = rtl_get_fc(skb);
+	u16 ether_type;
+	u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+	const struct iphdr *ip;
+
+	if (!ieee80211_is_data(fc))
+		goto end;
+
+
+	ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
+			      SNAP_SIZE + PROTOC_TYPE_SIZE);
+	ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
+	ether_type = ntohs(ether_type);
+
+	if (ETH_P_IP == ether_type) {
+		if (IPPROTO_UDP == ip->protocol) {
+			struct udphdr *udp = (struct udphdr *)((u8 *) ip +
+							       (ip->ihl << 2));
+			if (((((u8 *) udp)[1] == 68) &&
+			     (((u8 *) udp)[3] == 67)) ||
+			    ((((u8 *) udp)[1] == 67) &&
+			     (((u8 *) udp)[3] == 68))) {
+				/*
+				 * 68 : UDP BOOTP client
+				 * 67 : UDP BOOTP server
+				 */
+				RT_TRACE((COMP_SEND | COMP_RECV),
+					 DBG_DMESG, ("dhcp %s !!\n",
+						     (is_tx) ? "Tx" : "Rx"));
+
+				if (is_tx) {
+					rtlpriv->ra.is_special_data = true;
+					rtl_lps_leave(hw);
+					ppsc->last_delaylps_stamp_jiffies =
+									jiffies;
+				}
+
+				return true;
+			}
+		}
+	} else if (ETH_P_ARP == ether_type) {
+		if (is_tx) {
+			rtlpriv->ra.is_special_data = true;
+			rtl_lps_leave(hw);
+			ppsc->last_delaylps_stamp_jiffies = jiffies;
+		}
+
+		return true;
+	} else if (ETH_P_PAE == ether_type) {
+		RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+			 ("802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"));
+
+		if (is_tx) {
+			rtlpriv->ra.is_special_data = true;
+			rtl_lps_leave(hw);
+			ppsc->last_delaylps_stamp_jiffies = jiffies;
+		}
+
+		return true;
+	} else if (0x86DD == ether_type) {
+		return true;
+	}
+
+end:
+	rtlpriv->ra.is_special_data = false;
+	return false;
+}
+
+/*********************************************************
+ *
+ * functions called by core.c
+ *
+ *********************************************************/
+int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_tid_data *tid_data;
+	struct rtl_sta_info *sta_entry = NULL;
+
+	if (sta == NULL)
+		return -EINVAL;
+
+	if (unlikely(tid >= MAX_TID_COUNT))
+		return -EINVAL;
+
+	sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+	if (!sta_entry)
+		return -ENXIO;
+	tid_data = &sta_entry->tids[tid];
+
+	RT_TRACE(COMP_SEND, DBG_DMESG,
+		 ("on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
+		  tid_data->seq_number));
+
+	*ssn = tid_data->seq_number;
+	tid_data->agg.agg_state = RTL_AGG_START;
+
+	ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+	return 0;
+}
+
+int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    struct ieee80211_sta *sta, u16 tid)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_tid_data *tid_data;
+	struct rtl_sta_info *sta_entry = NULL;
+
+	if (sta == NULL)
+		return -EINVAL;
+
+	if (!sta->addr) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+		return -EINVAL;
+	}
+
+	RT_TRACE(COMP_SEND, DBG_DMESG,
+		 ("on ra = %pM tid = %d\n", sta->addr, tid));
+
+	if (unlikely(tid >= MAX_TID_COUNT))
+		return -EINVAL;
+
+	sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+	tid_data = &sta_entry->tids[tid];
+	sta_entry->tids[tid].agg.agg_state = RTL_AGG_STOP;
+
+	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+	return 0;
+}
+
+int rtl_rx_agg_start(struct ieee80211_hw *hw,
+		     struct ieee80211_sta *sta, u16 tid)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_tid_data *tid_data;
+	struct rtl_sta_info *sta_entry = NULL;
+
+	if (sta == NULL)
+		return -EINVAL;
+
+	if (unlikely(tid >= MAX_TID_COUNT))
+		return -EINVAL;
+
+	sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+	if (!sta_entry)
+		return -ENXIO;
+	tid_data = &sta_entry->tids[tid];
+
+	RT_TRACE(COMP_RECV, DBG_DMESG,
+		 ("on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
+		 tid_data->seq_number));
+
+	tid_data->agg.rx_agg_state = RTL_RX_AGG_START;
+	return 0;
+}
+
+int rtl_rx_agg_stop(struct ieee80211_hw *hw,
+		struct ieee80211_sta *sta, u16 tid)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_tid_data *tid_data;
+	struct rtl_sta_info *sta_entry = NULL;
+
+	if (sta == NULL)
+		return -EINVAL;
+
+	if (!sta->addr) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+		return -EINVAL;
+	}
+
+	RT_TRACE(COMP_SEND, DBG_DMESG,
+		 ("on ra = %pM tid = %d\n", sta->addr, tid));
+
+	if (unlikely(tid >= MAX_TID_COUNT))
+		return -EINVAL;
+
+	sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+	tid_data = &sta_entry->tids[tid];
+	sta_entry->tids[tid].agg.rx_agg_state = RTL_RX_AGG_STOP;
+
+	return 0;
+}
+int rtl_tx_agg_oper(struct ieee80211_hw *hw,
+		struct ieee80211_sta *sta, u16 tid)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_tid_data *tid_data;
+	struct rtl_sta_info *sta_entry = NULL;
+
+	if (sta == NULL)
+		return -EINVAL;
+
+	if (!sta->addr) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+		return -EINVAL;
+	}
+
+	RT_TRACE(COMP_SEND, DBG_DMESG,
+		 ("on ra = %pM tid = %d\n", sta->addr, tid));
+
+	if (unlikely(tid >= MAX_TID_COUNT))
+		return -EINVAL;
+
+	sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+	tid_data = &sta_entry->tids[tid];
+	sta_entry->tids[tid].agg.agg_state = RTL_AGG_OPERATIONAL;
+
+	return 0;
+}
+
+/*********************************************************
+ *
+ * wq & timer callback functions
+ *
+ *********************************************************/
+/* this function is used for roaming */
+void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+	if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+		return;
+
+	if (rtlpriv->mac80211.link_state < MAC80211_LINKED)
+		return;
+
+	/* check if this really is a beacon */
+	if (!ieee80211_is_beacon(hdr->frame_control) &&
+		!ieee80211_is_probe_resp(hdr->frame_control))
+		return;
+
+	/* min. beacon length + FCS_LEN */
+	if (skb->len <= 40 + FCS_LEN)
+		return;
+
+	/* and only beacons from the associated BSSID, please */
+	if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+		return;
+
+	rtlpriv->link_info.bcn_rx_inperiod ++;
+}
+
+void rtl_watchdog_wq_callback(void *data)
+{
+	struct rtl_works *rtlworks = container_of_dwork_rtl(data,
+							    struct rtl_works,
+							    watchdog_wq);
+	struct ieee80211_hw *hw = rtlworks->hw;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	bool b_busytraffic = false;
+	bool b_tx_busy_traffic = false;
+	bool b_rx_busy_traffic = false;
+	bool b_higher_busytraffic = false;
+	bool b_higher_busyrxtraffic = false;
+	u8 idx, tid;
+	u32 rx_cnt_inp4eriod = 0;
+	u32 tx_cnt_inp4eriod = 0;
+	u32 aver_rx_cnt_inperiod = 0;
+	u32 aver_tx_cnt_inperiod = 0;
+	u32 aver_tidtx_inperiod[MAX_TID_COUNT] = {0};
+	u32 tidtx_inp4eriod[MAX_TID_COUNT] = {0};
+	bool benter_ps = false;
+
+	if (is_hal_stop(rtlhal))
+		return;
+
+	/* <1> Determine if action frame is allowed */
+	if (mac->link_state > MAC80211_NOLINK) {
+		if (mac->cnt_after_linked < 20)
+			mac->cnt_after_linked++;
+	} else {
+		mac->cnt_after_linked = 0;
+	}
+
+	/* <2> to check if traffic busy, if
+	 * busytraffic we don't change channel */
+	if (mac->link_state >= MAC80211_LINKED) {
+
+		/* (1) get aver_rx_cnt_inperiod & aver_tx_cnt_inperiod */
+		for (idx = 0; idx <= 2; idx++) {
+			rtlpriv->link_info.num_rx_in4period[idx] =
+			    rtlpriv->link_info.num_rx_in4period[idx + 1];
+			rtlpriv->link_info.num_tx_in4period[idx] =
+			    rtlpriv->link_info.num_tx_in4period[idx + 1];
+		}
+		rtlpriv->link_info.num_rx_in4period[3] =
+		    rtlpriv->link_info.num_rx_inperiod;
+		rtlpriv->link_info.num_tx_in4period[3] =
+		    rtlpriv->link_info.num_tx_inperiod;
+		for (idx = 0; idx <= 3; idx++) {
+			rx_cnt_inp4eriod +=
+			    rtlpriv->link_info.num_rx_in4period[idx];
+			tx_cnt_inp4eriod +=
+			    rtlpriv->link_info.num_tx_in4period[idx];
+		}
+		aver_rx_cnt_inperiod = rx_cnt_inp4eriod / 4;
+		aver_tx_cnt_inperiod = tx_cnt_inp4eriod / 4;
+
+		/* (2) check traffic busy */
+		if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100) {
+			b_busytraffic = true;
+			if (aver_rx_cnt_inperiod > aver_tx_cnt_inperiod)
+				b_rx_busy_traffic = true;
+			else
+				b_tx_busy_traffic = false;
+		}
+
+		/* Higher Tx/Rx data. */
+		if (aver_rx_cnt_inperiod > 4000 ||
+			aver_tx_cnt_inperiod > 4000) {
+			b_higher_busytraffic = true;
+
+			/* Extremely high Rx data. */
+			if (aver_rx_cnt_inperiod > 5000)
+				b_higher_busyrxtraffic = true;
+		}
+
+		/* check every tid's tx traffic */
+		for (tid = 0; tid <= 7; tid++) {
+			for (idx = 0; idx <= 2; idx++)
+				rtlpriv->link_info.tidtx_in4period[tid][idx] =
+					rtlpriv->link_info.tidtx_in4period[tid]
+					[idx + 1];
+			rtlpriv->link_info.tidtx_in4period[tid][3] =
+				rtlpriv->link_info.tidtx_inperiod[tid];
+
+			for (idx = 0; idx <= 3; idx++)
+				tidtx_inp4eriod[tid] +=
+				   rtlpriv->link_info.tidtx_in4period[tid][idx];
+			aver_tidtx_inperiod[tid] = tidtx_inp4eriod[tid] / 4;
+			if (aver_tidtx_inperiod[tid] > 5000)
+				rtlpriv->link_info.higher_busytxtraffic[tid] =
+									true;
+			else
+				rtlpriv->link_info.higher_busytxtraffic[tid] =
+									false;
+		}
+
+		if (((rtlpriv->link_info.num_rx_inperiod +
+		      rtlpriv->link_info.num_tx_inperiod) > 8) ||
+		    (rtlpriv->link_info.num_rx_inperiod > 2))
+			benter_ps = false;
+		else
+			benter_ps = true;
+
+		/* LeisurePS only work in infra mode. */
+		if (benter_ps)
+			rtl_lps_enter(hw);
+		else
+			rtl_lps_leave(hw);
+	}
+
+	rtlpriv->link_info.num_rx_inperiod = 0;
+	rtlpriv->link_info.num_tx_inperiod = 0;
+	for (tid = 0; tid <= 7; tid++)
+		rtlpriv->link_info.tidtx_inperiod[tid] = 0;
+
+	rtlpriv->link_info.b_busytraffic = b_busytraffic;
+	rtlpriv->link_info.b_rx_busy_traffic = b_rx_busy_traffic;
+	rtlpriv->link_info.b_tx_busy_traffic = b_tx_busy_traffic;
+	rtlpriv->link_info.b_higher_busytraffic = b_higher_busytraffic;
+	rtlpriv->link_info.b_higher_busyrxtraffic = b_higher_busyrxtraffic;
+
+	/* <3> DM */
+	rtlpriv->cfg->ops->dm_watchdog(hw);
+
+	/* <4> roaming */
+	if (mac->link_state == MAC80211_LINKED &&
+			mac->opmode == NL80211_IFTYPE_STATION) {
+		if ((rtlpriv->link_info.bcn_rx_inperiod +
+			rtlpriv->link_info.num_rx_inperiod) == 0) {
+			rtlpriv->link_info.roam_times++;
+			RT_TRACE(COMP_ERR, DBG_DMESG, ("AP off for %d s\n",
+				(rtlpriv->link_info.roam_times * 2)));
+
+			/* if we can't recv beacon for 10s,
+			 * we should reconnect this AP */
+			if (rtlpriv->link_info.roam_times >= 5) {
+				RT_TRACE(COMP_ERR, DBG_EMERG,
+					 ("AP off, try to reconnect now\n"));
+				rtlpriv->link_info.roam_times = 0;
+				ieee80211_connection_loss(rtlpriv->mac80211.vif);
+			}
+		} else {
+			rtlpriv->link_info.roam_times = 0;
+		}
+	}
+	rtlpriv->link_info.bcn_rx_inperiod = 0;
+}
+
+void rtl_watch_dog_timer_callback(unsigned long data)
+{
+	struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	queue_delayed_work(rtlpriv->works.rtl_wq,
+			   &rtlpriv->works.watchdog_wq, 0);
+
+	mod_timer(&rtlpriv->works.watchdog_timer,
+		  jiffies + MSECS(RTL_WATCH_DOG_TIME));
+}
+void rtl_fwevt_wq_callback(void *data)
+{
+	struct rtl_works *rtlworks =
+		container_of_dwork_rtl(data, struct rtl_works, fwevt_wq);
+	struct ieee80211_hw *hw = rtlworks->hw;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->cfg->ops->c2h_command_handle(hw);
+}
+void rtl_easy_concurrent_retrytimer_callback(unsigned long data)
+{
+	struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_priv *buddy_priv = rtlpriv->buddy_priv;
+
+	if(buddy_priv == NULL)
+		return;
+
+	rtlpriv->cfg->ops->dualmac_easy_concurrent(hw);
+}
+/*********************************************************
+ *
+ * frame process functions
+ *
+ *********************************************************/
+u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie)
+{
+	struct ieee80211_mgmt *mgmt = (void *)data;
+	u8 *pos, *end;
+
+	pos = (u8 *)mgmt->u.beacon.variable;
+	end = data + len;
+	while (pos < end) {
+		if (pos + 2 + pos[1] > end)
+			return NULL;
+
+		if (pos[0] == ie)
+			return pos;
+
+		pos += 2 + pos[1];
+	}
+	return NULL;
+}
+
+/* when we use 2 rx ants we send IEEE80211_SMPS_OFF */
+/* when we use 1 rx ant we send IEEE80211_SMPS_STATIC */
+struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
+				     enum ieee80211_smps_mode smps,
+				     u8 *da, u8 *bssid)
+{
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct sk_buff *skb;
+	struct ieee80211_mgmt_compat *action_frame;
+
+	/* 27 = header + category + action + smps mode */
+	skb = dev_alloc_skb(27 + hw->extra_tx_headroom);
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, hw->extra_tx_headroom);
+	action_frame = (void *)skb_put(skb, 27);
+	memset(action_frame, 0, 27);
+	memcpy(action_frame->da, da, ETH_ALEN);
+	memcpy(action_frame->sa, rtlefuse->dev_addr, ETH_ALEN);
+	memcpy(action_frame->bssid, bssid, ETH_ALEN);
+	action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+						  IEEE80211_STYPE_ACTION);
+	action_frame->u.action.category = WLAN_CATEGORY_HT;
+	action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
+	switch (smps) {
+	case IEEE80211_SMPS_AUTOMATIC:/* 0 */
+	case IEEE80211_SMPS_NUM_MODES:/* 4 */
+		WARN_ON(1);
+	case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
+		action_frame->u.action.u.ht_smps.smps_control =
+				WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
+		break;
+	case IEEE80211_SMPS_STATIC:/* 2 */ /*MIMO_PS_STATIC*/
+		action_frame->u.action.u.ht_smps.smps_control =
+				WLAN_HT_SMPS_CONTROL_STATIC;/* 1 */
+		break;
+	case IEEE80211_SMPS_DYNAMIC:/* 3 */ /*MIMO_PS_DYNAMIC*/
+		action_frame->u.action.u.ht_smps.smps_control =
+				WLAN_HT_SMPS_CONTROL_DYNAMIC;/* 3 */
+		break;
+	}
+
+	return skb;
+}
+
+int rtl_send_smps_action(struct ieee80211_hw *hw,
+			 struct ieee80211_sta *sta,
+			 enum ieee80211_smps_mode smps)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct sk_buff *skb = NULL;
+	struct rtl_tcb_desc tcb_desc;
+	u8 bssid[ETH_ALEN] = {0};
+
+	memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+	if (rtlpriv->mac80211.act_scanning)
+		goto err_free;
+
+	if (!sta)
+		goto err_free;
+
+	if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
+		goto err_free;
+
+	if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+		goto err_free;
+
+	if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP)
+		memcpy(bssid, rtlpriv->efuse.dev_addr, ETH_ALEN);
+	else
+		memcpy(bssid, rtlpriv->mac80211.bssid, ETH_ALEN);
+
+	skb = rtl_make_smps_action(hw, smps, sta->addr, bssid);
+	/* this is a type = mgmt * stype = action frame */
+	if (skb) {
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+		struct rtl_sta_info *sta_entry =
+			(struct rtl_sta_info *) sta->drv_priv;
+		sta_entry->mimo_ps = smps;
+		/* rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); */
+
+		info->control.rates[0].idx = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+		info->band = hw->conf.chandef.chan->band;
+#else
+		info->band = hw->conf.channel->band;
+#endif
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+		info->control.sta = sta;
+		rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+		rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+	}
+	return 1;
+
+err_free:
+	return 0;
+}
+//EXPORT_SYMBOL(rtl_send_smps_action);
+
+/* because mac80211 have issues when can receive del ba
+ * so here we just make a fake del_ba if we receive a ba_req
+ * but rx_agg was opened to let mac80211 release some ba
+ * related resources, so please this del_ba for tx */
+struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
+				u8 *sa, u8 *bssid, u16 tid)
+{
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct sk_buff *skb;
+	struct ieee80211_mgmt *action_frame;
+	u16 params;
+
+	/* 27 = header + category + action + smps mode */
+	skb = dev_alloc_skb(34 + hw->extra_tx_headroom);
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, hw->extra_tx_headroom);
+	action_frame = (void *)skb_put(skb, 34);
+	memset(action_frame, 0, 34);
+	memcpy(action_frame->sa, sa, ETH_ALEN);
+	memcpy(action_frame->da, rtlefuse->dev_addr, ETH_ALEN);
+	memcpy(action_frame->bssid, bssid, ETH_ALEN);
+	action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+						  IEEE80211_STYPE_ACTION);
+	action_frame->u.action.category = WLAN_CATEGORY_BACK;
+	action_frame->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
+	params = (u16)(1 << 11); 	/* bit 11 initiator */
+	params |= (u16)(tid << 12); 		/* bit 15:12 TID number */
+
+	action_frame->u.action.u.delba.params = cpu_to_le16(params);
+	action_frame->u.action.u.delba.reason_code =
+		cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
+
+	return skb;
+}
+
+/*********************************************************
+ *
+ * IOT functions
+ *
+ *********************************************************/
+static bool rtl_chk_vendor_ouisub(struct ieee80211_hw *hw,
+				  struct octet_string vendor_ie)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	bool matched = false;
+	static u8 athcap_1[] = { 0x00, 0x03, 0x7F };
+	static u8 athcap_2[] = { 0x00, 0x13, 0x74 };
+	static u8 broadcap_1[] = { 0x00, 0x10, 0x18 };
+	static u8 broadcap_2[] = { 0x00, 0x0a, 0xf7 };
+	static u8 broadcap_3[] = { 0x00, 0x05, 0xb5 };
+	static u8 racap[] = { 0x00, 0x0c, 0x43 };
+	static u8 ciscocap[] = { 0x00, 0x40, 0x96 };
+	static u8 marvcap[] = { 0x00, 0x50, 0x43 };
+
+	if (memcmp(vendor_ie.octet, athcap_1, 3) == 0 ||
+		memcmp(vendor_ie.octet, athcap_2, 3) == 0) {
+		rtlpriv->mac80211.vendor = PEER_ATH;
+		matched = true;
+	} else if (memcmp(vendor_ie.octet, broadcap_1, 3) == 0 ||
+		memcmp(vendor_ie.octet, broadcap_2, 3) == 0 ||
+		memcmp(vendor_ie.octet, broadcap_3, 3) == 0) {
+		rtlpriv->mac80211.vendor = PEER_BROAD;
+		matched = true;
+	} else if (memcmp(vendor_ie.octet, racap, 3) == 0) {
+		rtlpriv->mac80211.vendor = PEER_RAL;
+		matched = true;
+	} else if (memcmp(vendor_ie.octet, ciscocap, 3) == 0) {
+		rtlpriv->mac80211.vendor = PEER_CISCO;
+		matched = true;
+	} else if (memcmp(vendor_ie.octet, marvcap, 3) == 0) {
+		rtlpriv->mac80211.vendor = PEER_MARV;
+		matched = true;
+	}
+
+	return matched;
+}
+
+bool rtl_find_221_ie(struct ieee80211_hw *hw, u8 *data,
+		unsigned int len)
+{
+	struct ieee80211_mgmt *mgmt = (void *)data;
+	struct octet_string vendor_ie;
+	u8 *pos, *end;
+
+	pos = (u8 *)mgmt->u.beacon.variable;
+	end = data + len;
+	while (pos < end) {
+		if (pos[0] == 221) {
+			vendor_ie.length = pos[1];
+			vendor_ie.octet = &pos[2];
+			if (rtl_chk_vendor_ouisub(hw, vendor_ie))
+				return true;
+		}
+
+		if (pos + 2 + pos[1] > end)
+			return false;
+
+		pos += 2 + pos[1];
+	}
+	return false;
+}
+
+void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct ieee80211_hdr *hdr = (void *)data;
+	u32 vendor = PEER_UNKNOWN;
+
+	static u8 ap3_1[3] = { 0x00, 0x14, 0xbf };
+	static u8 ap3_2[3] = { 0x00, 0x1a, 0x70 };
+	static u8 ap3_3[3] = { 0x00, 0x1d, 0x7e };
+	static u8 ap4_1[3] = { 0x00, 0x90, 0xcc };
+	static u8 ap4_2[3] = { 0x00, 0x0e, 0x2e };
+	static u8 ap4_3[3] = { 0x00, 0x18, 0x02 };
+	static u8 ap4_4[3] = { 0x00, 0x17, 0x3f };
+	static u8 ap4_5[3] = { 0x00, 0x1c, 0xdf };
+	static u8 ap5_1[3] = { 0x00, 0x1c, 0xf0 };
+	static u8 ap5_2[3] = { 0x00, 0x21, 0x91 };
+	static u8 ap5_3[3] = { 0x00, 0x24, 0x01 };
+	static u8 ap5_4[3] = { 0x00, 0x15, 0xe9 };
+	static u8 ap5_5[3] = { 0x00, 0x17, 0x9A };
+	static u8 ap5_6[3] = { 0x00, 0x18, 0xE7 };
+	static u8 ap6_1[3] = { 0x00, 0x17, 0x94 };
+	static u8 ap7_1[3] = { 0x00, 0x14, 0xa4 };
+
+	if (mac->opmode != NL80211_IFTYPE_STATION)
+		return;
+
+	if (mac->link_state == MAC80211_NOLINK) {
+		mac->vendor = PEER_UNKNOWN;
+		return;
+	}
+
+	if (mac->cnt_after_linked > 2)
+		return;
+
+	/* check if this really is a beacon */
+	if (!ieee80211_is_beacon(hdr->frame_control))
+		return;
+
+	/* min. beacon length + FCS_LEN */
+	if (len <= 40 + FCS_LEN)
+		return;
+
+	/* and only beacons from the associated BSSID, please */
+	if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+		return;
+
+	if (rtl_find_221_ie(hw, data, len)) {
+		vendor = mac->vendor;
+	}
+
+	if ((memcmp(mac->bssid, ap5_1, 3) == 0) ||
+		(memcmp(mac->bssid, ap5_2, 3) == 0) ||
+		(memcmp(mac->bssid, ap5_3, 3) == 0) ||
+		(memcmp(mac->bssid, ap5_4, 3) == 0) ||
+		(memcmp(mac->bssid, ap5_5, 3) == 0) ||
+		(memcmp(mac->bssid, ap5_6, 3) == 0) ||
+		vendor == PEER_ATH) {
+		vendor = PEER_ATH;
+		RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>ath find\n"));
+	} else if ((memcmp(mac->bssid, ap4_4, 3) == 0) ||
+		(memcmp(mac->bssid, ap4_5, 3) == 0) ||
+		(memcmp(mac->bssid, ap4_1, 3) == 0) ||
+		(memcmp(mac->bssid, ap4_2, 3) == 0) ||
+		(memcmp(mac->bssid, ap4_3, 3) == 0) ||
+		vendor == PEER_RAL) {
+		RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>ral findn\n"));
+		vendor = PEER_RAL;
+	} else if (memcmp(mac->bssid, ap6_1, 3) == 0 ||
+		vendor == PEER_CISCO) {
+		vendor = PEER_CISCO;
+		RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>cisco find\n"));
+	} else if ((memcmp(mac->bssid, ap3_1, 3) == 0) ||
+		(memcmp(mac->bssid, ap3_2, 3) == 0) ||
+		(memcmp(mac->bssid, ap3_3, 3) == 0) ||
+		vendor == PEER_BROAD) {
+		RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>broad find\n"));
+		vendor = PEER_BROAD;
+	} else if (memcmp(mac->bssid, ap7_1, 3) == 0 ||
+		vendor == PEER_MARV) {
+		vendor = PEER_MARV;
+		RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>marv find\n"));
+	}
+
+	mac->vendor = vendor;
+}
+
+/*********************************************************
+ *
+ * sysfs functions
+ *
+ *********************************************************/
+static ssize_t rtl_show_debug_level(struct device *d,
+				    struct device_attribute *attr, char *buf)
+{
+	struct ieee80211_hw *hw = dev_get_drvdata(d);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	return sprintf(buf, "0x%08X\n", rtlpriv->dbg.global_debuglevel);
+}
+
+static ssize_t rtl_store_debug_level(struct device *d,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct ieee80211_hw *hw = dev_get_drvdata(d);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	unsigned long val;
+	int ret;
+
+	ret = strict_strtoul(buf, 0, &val);
+	if (ret) {
+		printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf);
+	} else {
+		rtlpriv->dbg.global_debuglevel = val;
+		printk(KERN_DEBUG "debuglevel:%x\n",
+			rtlpriv->dbg.global_debuglevel);
+	}
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
+		   rtl_show_debug_level, rtl_store_debug_level);
+
+static struct attribute *rtl_sysfs_entries[] = {
+
+	&dev_attr_debug_level.attr,
+
+	NULL
+};
+
+/*
+ * "name" is folder name witch will be
+ * put in device directory like :
+ * sys/devices/pci0000:00/0000:00:1c.4/
+ * 0000:06:00.0/rtl_sysfs
+ */
+struct attribute_group rtl_attribute_group = {
+	.name = "rtlsysfs",
+	.attrs = rtl_sysfs_entries,
+};
+
+#ifdef VIF_TODO
+/*********************************************************
+ *
+ * vif functions
+ *
+ *********************************************************/
+static inline struct ieee80211_vif *
+rtl_get_vif(struct rtl_vif_info *vif_priv)
+{
+	return container_of((void *)vif_priv, struct ieee80211_vif, drv_priv);
+}
+
+/* Protected by ar->mutex or RCU */
+struct ieee80211_vif *rtl_get_main_vif(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_vif_info *cvif;
+
+	list_for_each_entry_rcu(cvif, &rtlpriv->vif_priv.vif_list, list) {
+		if (cvif->active)
+			return rtl_get_vif(cvif);
+	}
+
+	return NULL;
+}
+
+static inline bool is_main_vif(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif)
+{
+	bool ret;
+
+	rcu_read_lock();
+	ret = (rtl_get_main_vif(hw) == vif);
+	rcu_read_unlock();
+	return ret;
+}
+
+bool rtl_set_vif_info(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct rtl_vif_info *vif_info = (void *) vif->drv_priv;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int vif_id = -1;
+
+	if (rtlpriv->vif_priv.vifs >= MAX_VIRTUAL_MAC) {
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("vif number can not bigger than %d, now vifs is:%d\n",
+			  MAX_VIRTUAL_MAC, rtlpriv->vif_priv.vifs));
+		return false;
+	}
+
+	rcu_read_lock();
+	vif_id = bitmap_find_free_region(&rtlpriv->vif_priv.vif_bitmap,
+					 MAX_VIRTUAL_MAC, 0);
+	RT_TRACE(COMP_MAC80211, DBG_DMESG,
+		 ("%s vid_id:%d\n", __func__, vif_id));
+
+	if (vif_id < 0) {
+		rcu_read_unlock();
+		return false;
+	}
+
+	BUG_ON(rtlpriv->vif_priv.vif[vif_id].id != vif_id);
+	vif_info->active = true;
+	vif_info->id = vif_id;
+	vif_info->enable_beacon = false;
+	rtlpriv->vif_priv.vifs++;
+	if (rtlpriv->vif_priv.vifs > 1) {
+		rtlpriv->psc.b_inactiveps = false;
+		rtlpriv->psc.b_swctrl_lps = false;
+		rtlpriv->psc.b_fwctrl_lps = false;
+	}
+
+	list_add_tail_rcu(&vif_info->list, &rtlpriv->vif_priv.vif_list);
+	rcu_assign_pointer(rtlpriv->vif_priv.vif[vif_id].vif, vif);
+
+	RT_TRACE(COMP_MAC80211, DBG_DMESG, ("vifaddress:%p %p %p\n",
+		 rtlpriv->vif_priv.vif[vif_id].vif, vif, rtl_get_main_vif(hw)));
+
+	rcu_read_unlock();
+
+	return true;
+}
+#endif
+
+
+#if 0
+MODULE_AUTHOR("lizhaoming	<chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE	<wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger	<Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+#endif
+struct rtl_global_var global_var = {};
+
+int rtl_core_module_init(void)
+{
+	if (rtl_rate_control_register())
+		printk(KERN_DEBUG "rtl: Unable to register rtl_rc,"
+			  "use default RC !!\n");
+
+	/* add proc for debug */
+	rtl_proc_add_topdir();
+
+	/* init some global vars */
+	INIT_LIST_HEAD(&global_var.glb_priv_list);
+	spin_lock_init(&global_var.glb_list_lock);
+
+	return 0;
+}
+
+void rtl_core_module_exit(void)
+{
+	/*RC*/
+   	rtl_rate_control_unregister();
+
+	/* add proc for debug */
+	rtl_proc_remove_topdir();
+}
+
+#if 0
+module_init(rtl_core_module_init);
+module_exit(rtl_core_module_exit);
+#endif
diff --git a/drivers/staging/rtl8821ae/base.h b/drivers/staging/rtl8821ae/base.h
new file mode 100644
index 0000000..629d14f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/base.h
@@ -0,0 +1,159 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_BASE_H__
+#define __RTL_BASE_H__
+
+#include "compat.h"
+
+enum ap_peer {
+	PEER_UNKNOWN = 0,
+	PEER_RTL = 1,
+	PEER_RTL_92SE = 2,
+	PEER_BROAD = 3,
+	PEER_RAL = 4,
+	PEER_ATH = 5,
+	PEER_CISCO = 6,
+	PEER_MARV = 7,
+	PEER_AIRGO = 9,
+	PEER_MAX = 10,
+} ;
+
+#define RTL_DUMMY_OFFSET	0
+#define RTL_DUMMY_UNIT		8
+#define RTL_TX_DUMMY_SIZE	(RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
+#define RTL_TX_DESC_SIZE	32
+#define RTL_TX_HEADER_SIZE	(RTL_TX_DESC_SIZE + RTL_TX_DUMMY_SIZE)
+
+#define HT_AMSDU_SIZE_4K 	3839
+#define HT_AMSDU_SIZE_8K 	7935
+
+#define MAX_BIT_RATE_40MHZ_MCS15 	300	/* Mbps */
+#define MAX_BIT_RATE_40MHZ_MCS7 	150	/* Mbps */
+
+#define RTL_RATE_COUNT_LEGACY		12
+#define RTL_CHANNEL_COUNT		14
+
+#define FRAME_OFFSET_FRAME_CONTROL	0
+#define FRAME_OFFSET_DURATION		2
+#define FRAME_OFFSET_ADDRESS1		4
+#define FRAME_OFFSET_ADDRESS2		10
+#define FRAME_OFFSET_ADDRESS3		16
+#define FRAME_OFFSET_SEQUENCE		22
+#define FRAME_OFFSET_ADDRESS4		24
+
+#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val)		\
+	WRITEEF2BYTE(_hdr, _val)
+#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val)	\
+	WRITEEF1BYTE(_hdr, _val)
+#define SET_80211_HDR_PWR_MGNT(_hdr, _val)		\
+	SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
+#define SET_80211_HDR_TO_DS(_hdr, _val)			\
+	SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
+
+#define SET_80211_PS_POLL_AID(_hdr, _val)		\
+	WRITEEF2BYTE(((u8*)(_hdr))+2, _val)
+#define SET_80211_PS_POLL_BSSID(_hdr, _val)		\
+	CP_MACADDR(((u8*)(_hdr))+4, (u8*)(_val))
+#define SET_80211_PS_POLL_TA(_hdr, _val)		\
+	CP_MACADDR(((u8*)(_hdr))+10, (u8*)(_val))
+
+#define SET_80211_HDR_DURATION(_hdr, _val)	\
+	WRITEEF2BYTE((u8*)(_hdr)+FRAME_OFFSET_DURATION, _val)
+#define SET_80211_HDR_ADDRESS1(_hdr, _val)	\
+	CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val))
+#define SET_80211_HDR_ADDRESS2(_hdr, _val) 	\
+	CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS2, (u8*)(_val))
+#define SET_80211_HDR_ADDRESS3(_hdr, _val) 	\
+	CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8*)(_val))
+#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val)  \
+	WRITEEF2BYTE((u8*)(_hdr)+FRAME_OFFSET_SEQUENCE, _val)
+
+#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) 	\
+	WRITEEF4BYTE(((u8*)(__phdr)) + 24, __val)
+#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \
+	WRITEEF4BYTE(((u8*)(__phdr)) + 28, __val)
+#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \
+	WRITEEF2BYTE(((u8*)(__phdr)) + 32, __val)
+#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) 		\
+	READEF2BYTE(((u8*)(__phdr)) + 34)
+#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
+	WRITEEF2BYTE(((u8*)(__phdr)) + 34, __val)
+#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
+	SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
+	(GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
+
+int rtl_init_core(struct ieee80211_hw *hw);
+void rtl_deinit_core(struct ieee80211_hw *hw);
+void rtl_init_rx_config(struct ieee80211_hw *hw);
+void rtl_init_rfkill(struct ieee80211_hw *hw);
+void rtl_deinit_rfkill(struct ieee80211_hw *hw);
+
+void rtl_watch_dog_timer_callback(unsigned long data);
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
+
+bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
+bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
+
+void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rtl_watch_dog_timer_callback(unsigned long data);
+int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	struct ieee80211_sta *sta, u16 tid, u16 * ssn);
+int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	struct ieee80211_sta *sta, u16 tid);
+int rtl_tx_agg_oper(struct ieee80211_hw *hw,
+		    struct ieee80211_sta *sta, u16 tid);
+int rtl_rx_agg_start(struct ieee80211_hw *hw,
+		     struct ieee80211_sta *sta, u16 tid);
+int rtl_rx_agg_stop(struct ieee80211_hw *hw,
+		    struct ieee80211_sta *sta, u16 tid);
+void rtl_watchdog_wq_callback(void *data);
+void rtl_fwevt_wq_callback(void *data);
+
+void rtl_get_tcb_desc(struct ieee80211_hw *hw,
+		      struct ieee80211_tx_info *info,
+		      struct ieee80211_sta *sta,
+		      struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc);
+
+int rtl_send_smps_action(struct ieee80211_hw *hw,
+		struct ieee80211_sta *sta,
+		enum ieee80211_smps_mode smps);
+u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
+void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
+u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid);
+extern struct attribute_group rtl_attribute_group;
+void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
+extern struct rtl_global_var global_var;
+
+#ifdef VIF_TODO
+struct ieee80211_vif *rtl_get_main_vif(struct ieee80211_hw *hw);
+bool rtl_set_vif_info(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+#endif
+#endif
diff --git a/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c
new file mode 100644
index 0000000..b30f17a
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c
@@ -0,0 +1,3976 @@
+//============================================================
+// Description:
+//
+// This file is for 8812a1ant Co-exist mechanism
+//
+// History
+// 2012/11/15 Cosa first check in.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "halbt_precomp.h"
+#if 1
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8812A_1ANT		GLCoexDm8812a1Ant;
+static PCOEX_DM_8812A_1ANT 	coex_dm=&GLCoexDm8812a1Ant;
+static COEX_STA_8812A_1ANT		GLCoexSta8812a1Ant;
+static PCOEX_STA_8812A_1ANT	coex_sta=&GLCoexSta8812a1Ant;
+
+const char *const GLBtInfoSrc8812a1Ant[]={
+	"BT Info[wifi fw]",
+	"BT Info[bt rsp]",
+	"BT Info[bt auto report]",
+};
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8812a1ant_
+//============================================================
+#if 0
+void
+halbtc8812a1ant_Reg0x550Bit3(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			bSet
+	)
+{
+	u1Byte	u1tmp=0;
+	
+	u1tmp = btcoexist->btc_read_1byte(btcoexist, 0x550);
+	if(bSet)
+	{
+		u1tmp |= BIT3;
+	}
+	else
+	{
+		u1tmp &= ~BIT3;
+	}
+	btcoexist->btc_write_1byte(btcoexist, 0x550, u1tmp);
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], set 0x550[3]=%d\n", (bSet? 1:0)));
+}
+#endif
+u1Byte
+halbtc8812a1ant_BtRssiState(
+	u1Byte			level_num,
+	u1Byte			rssi_thresh,
+	u1Byte			rssi_thresh1
+	)
+{
+	s4Byte			bt_rssi=0;
+	u1Byte			bt_rssi_state;
+
+	bt_rssi = coex_sta->bt_rssi;
+
+	if(level_num == 2)
+	{			
+		if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+			(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(bt_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+			{
+				bt_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+			}
+			else
+			{
+				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+			}
+		}
+		else
+		{
+			if(bt_rssi < rssi_thresh)
+			{
+				bt_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+			}
+			else
+			{
+				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+			}
+		}
+	}
+	else if(level_num == 3)
+	{
+		if(rssi_thresh > rssi_thresh1)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n"));
+			return coex_sta->pre_bt_rssi_state;
+		}
+		
+		if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+			(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(bt_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+			{
+				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+			}
+			else
+			{
+				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+			}
+		}
+		else if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+			(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM))
+		{
+			if(bt_rssi >= (rssi_thresh1+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+			{
+				bt_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+			}
+			else if(bt_rssi < rssi_thresh)
+			{
+				bt_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+			}
+			else
+			{
+				bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n"));
+			}
+		}
+		else
+		{
+			if(bt_rssi < rssi_thresh1)
+			{
+				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+			}
+			else
+			{
+				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+			}
+		}
+	}
+		
+	coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+	return bt_rssi_state;
+}
+
+u1Byte
+halbtc8812a1ant_WifiRssiState(
+	 	PBTC_COEXIST		btcoexist,
+	 	u1Byte			index,
+	 	u1Byte			level_num,
+	 	u1Byte			rssi_thresh,
+	 	u1Byte			rssi_thresh1
+	)
+{
+	s4Byte			wifi_rssi=0;
+	u1Byte			wifi_rssi_state;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+	
+	if(level_num == 2)
+	{
+		if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_LOW) ||
+			(coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+			{
+				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+			}
+			else
+			{
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+			}
+		}
+		else
+		{
+			if(wifi_rssi < rssi_thresh)
+			{
+				wifi_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+			}
+			else
+			{
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+			}
+		}
+	}
+	else if(level_num == 3)
+	{
+		if(rssi_thresh > rssi_thresh1)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n"));
+			return coex_sta->pre_wifi_rssi_state[index];
+		}
+		
+		if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_LOW) ||
+			(coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+			{
+				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+			}
+			else
+			{
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+			}
+		}
+		else if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) ||
+			(coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_MEDIUM))
+		{
+			if(wifi_rssi >= (rssi_thresh1+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+			{
+				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+			}
+			else if(wifi_rssi < rssi_thresh)
+			{
+				wifi_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+			}
+			else
+			{
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n"));
+			}
+		}
+		else
+		{
+			if(wifi_rssi < rssi_thresh1)
+			{
+				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+			}
+			else
+			{
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+			}
+		}
+	}
+		
+	coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+	return wifi_rssi_state;
+}
+
+void
+halbtc8812a1ant_MonitorBtEnableDisable(
+	  	PBTC_COEXIST		btcoexist
+	)
+{
+	static BOOLEAN	pre_bt_disabled=false;
+	static u4Byte		bt_disable_cnt=0;
+	BOOLEAN			bt_active=true, bt_disable_by68=false, bt_disabled=false;
+	u4Byte			u4_tmp=0;
+
+	// This function check if bt is disabled
+
+	if(	coex_sta->high_priority_tx == 0 &&
+		coex_sta->high_priority_rx == 0 &&
+		coex_sta->low_priority_tx == 0 &&
+		coex_sta->low_priority_rx == 0)
+	{
+		bt_active = false;
+	}
+	if(	coex_sta->high_priority_tx == 0xffff &&
+		coex_sta->high_priority_rx == 0xffff &&
+		coex_sta->low_priority_tx == 0xffff &&
+		coex_sta->low_priority_rx == 0xffff)
+	{
+		bt_active = false;
+	}
+	if(bt_active)
+	{
+		bt_disable_cnt = 0;
+		bt_disabled = false;
+		btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+	}
+	else
+	{
+		bt_disable_cnt++;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n", 
+				bt_disable_cnt));
+		if(bt_disable_cnt >= 2 ||bt_disable_by68)
+		{
+			bt_disabled = true;
+			btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled);
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+		}
+	}
+	if(pre_bt_disabled != bt_disabled)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n", 
+			(pre_bt_disabled ? "disabled":"enabled"), 
+			(bt_disabled ? "disabled":"enabled")));
+		pre_bt_disabled = bt_disabled;
+		if(!bt_disabled)
+		{
+		}
+		else
+		{
+		}
+	}
+}
+
+void
+halbtc8812a1ant_MonitorBtCtr(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u4Byte 			reg_hp_tx_rx, reg_lp_tx_rx, u4_tmp;
+	u4Byte			reg_hp_tx=0, reg_hp_rx=0, reg_lp_tx=0, reg_lp_rx=0;
+	u1Byte			u1_tmp;
+	
+	reg_hp_tx_rx = 0x770;
+	reg_lp_tx_rx = 0x774;
+
+	u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_tx_rx);
+	reg_hp_tx = u4_tmp & bMaskLWord;
+	reg_hp_rx = (u4_tmp & bMaskHWord)>>16;
+
+	u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_tx_rx);
+	reg_lp_tx = u4_tmp & bMaskLWord;
+	reg_lp_rx = (u4_tmp & bMaskHWord)>>16;
+		
+	coex_sta->high_priority_tx = reg_hp_tx;
+	coex_sta->high_priority_rx = reg_hp_rx;
+	coex_sta->low_priority_tx = reg_lp_tx;
+	coex_sta->low_priority_rx = reg_lp_rx;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n", 
+		reg_hp_tx_rx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx));
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n", 
+		reg_lp_tx_rx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx));
+
+	// reset counter
+	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void
+halbtc8812a1ant_QueryBtInfo(
+	 	PBTC_COEXIST		btcoexist
+	)
+{	
+	u1Byte	dataLen=3;
+	u1Byte	buf[5] = {0};
+	static	u4Byte	btInfoCnt=0;
+
+	if(!btInfoCnt ||
+		(coex_sta->bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_BT_RSP]-btInfoCnt)>2)
+	{
+		buf[0] = dataLen;
+		buf[1] = 0x1;	// polling enable, 1=enable, 0=disable
+		buf[2] = 0x2;	// polling time in seconds
+		buf[3] = 0x1;	// auto report enable, 1=enable, 0=disable
+			
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_INFO, (PVOID)&buf[0]);
+	}
+	btInfoCnt = coex_sta->bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_BT_RSP];
+}
+u1Byte
+halbtc8812a1ant_ActionAlgorithm(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	PBTC_STACK_INFO		stack_info=&btcoexist->stack_info;
+	BOOLEAN				bt_hs_on=false;
+	u1Byte				algorithm=BT_8812A_1ANT_COEX_ALGO_UNDEFINED;
+	u1Byte				num_of_diff_profile=0;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	
+	if(!stack_info->bt_link_exist)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No profile exists!!!\n"));
+		return algorithm;
+	}
+
+	if(stack_info->sco_exist)
+		num_of_diff_profile++;
+	if(stack_info->hid_exist)
+		num_of_diff_profile++;
+	if(stack_info->pan_exist)
+		num_of_diff_profile++;
+	if(stack_info->a2dp_exist)
+		num_of_diff_profile++;
+	
+	if(num_of_diff_profile == 1)
+	{
+		if(stack_info->sco_exist)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n"));
+			algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+		}
+		else
+		{
+			if(stack_info->hid_exist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n"));
+				algorithm = BT_8812A_1ANT_COEX_ALGO_HID;
+			}
+			else if(stack_info->a2dp_exist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n"));
+				algorithm = BT_8812A_1ANT_COEX_ALGO_A2DP;
+			}
+			else if(stack_info->pan_exist)
+			{
+				if(bt_hs_on)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_PANHS;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR;
+				}
+			}
+		}
+	}
+	else if(num_of_diff_profile == 2)
+	{
+		if(stack_info->sco_exist)
+		{
+			if(stack_info->hid_exist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n"));
+				algorithm = BT_8812A_1ANT_COEX_ALGO_HID;
+			}
+			else if(stack_info->a2dp_exist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n"));
+				algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+			}
+			else if(stack_info->pan_exist)
+			{
+				if(bt_hs_on)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+		else
+		{
+			if( stack_info->hid_exist &&
+				stack_info->a2dp_exist )
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n"));
+				algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+			}
+			else if( stack_info->hid_exist &&
+				stack_info->pan_exist )
+			{
+				if(bt_hs_on)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+			else if( stack_info->pan_exist &&
+				stack_info->a2dp_exist )
+			{
+				if(bt_hs_on)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP;
+				}
+			}
+		}
+	}
+	else if(num_of_diff_profile == 3)
+	{
+		if(stack_info->sco_exist)
+		{
+			if( stack_info->hid_exist &&
+				stack_info->a2dp_exist )
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n"));
+				algorithm = BT_8812A_1ANT_COEX_ALGO_HID;
+			}
+			else if( stack_info->hid_exist &&
+				stack_info->pan_exist )
+			{
+				if(bt_hs_on)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+			else if( stack_info->pan_exist &&
+				stack_info->a2dp_exist )
+			{
+				if(bt_hs_on)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+		else
+		{
+			if( stack_info->hid_exist &&
+				stack_info->pan_exist &&
+				stack_info->a2dp_exist )
+			{
+				if(bt_hs_on)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
+				}
+			}
+		}
+	}
+	else if(num_of_diff_profile >= 3)
+	{
+		if(stack_info->sco_exist)
+		{
+			if( stack_info->hid_exist &&
+				stack_info->pan_exist &&
+				stack_info->a2dp_exist )
+			{
+				if(bt_hs_on)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
+
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"));
+					algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+	}
+
+	return algorithm;
+}
+
+BOOLEAN
+halbtc8812a1ant_NeedToDecBtPwr(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	BOOLEAN		ret=false;
+	BOOLEAN		bt_hs_on=false, wifi_connected=false;
+	s4Byte		bt_hs_rssi=0;
+
+	if(!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+		return false;
+	if(!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected))
+		return false;
+	if(!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+		return false;
+
+	if(wifi_connected)
+	{
+		if(bt_hs_on)
+		{
+			if(bt_hs_rssi > 37)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for HS mode!!\n"));
+				ret = true;
+			}
+		}
+		else
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for Wifi is connected!!\n"));
+			ret = true;
+		}
+	}
+	
+	return ret;
+}
+
+void
+halbtc8812a1ant_SetFwDacSwingLevel(
+	 	PBTC_COEXIST		btcoexist,
+	 	u1Byte			dac_swing_lvl
+	)
+{
+	u1Byte			h2c_parameter[1] ={0};
+
+	// There are several type of dacswing
+	// 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+	h2c_parameter[0] = dac_swing_lvl;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl));
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]));
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void
+halbtc8812a1ant_SetFwDecBtPwr(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			dec_bt_pwr
+	)
+{
+	u1Byte	dataLen=3;
+	u1Byte	buf[5] = {0};
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power : %s\n", 
+			(dec_bt_pwr? "Yes!!":"No!!")));
+
+	buf[0] = dataLen;
+	buf[1] = 0x3;		// OP_Code
+	buf[2] = 0x1;		// OP_Code_Length
+	if(dec_bt_pwr)
+		buf[3] = 0x1;	// OP_Code_Content
+	else
+		buf[3] = 0x0;
+		
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);	
+}
+
+void
+halbtc8812a1ant_DecBtPwr(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	BOOLEAN			dec_bt_pwr
+	)
+{
+	return;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power = %s\n",  
+		(force_exec? "force to":""), ((dec_bt_pwr)? "ON":"OFF")));
+	coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_dec_bt_pwr=%d, cur_dec_bt_pwr=%d\n", 
+			coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr));
+
+		if(coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) 
+			return;
+	}
+	halbtc8812a1ant_SetFwDecBtPwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+	coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void
+halbtc8812a1ant_SetFwBtLnaConstrain(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			bt_lna_cons_on
+	)
+{
+	u1Byte	dataLen=3;
+	u1Byte	buf[5] = {0};
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT LNA Constrain: %s\n", 
+		(bt_lna_cons_on? "ON!!":"OFF!!")));
+
+	buf[0] = dataLen;
+	buf[1] = 0x2;		// OP_Code
+	buf[2] = 0x1;		// OP_Code_Length
+	if(bt_lna_cons_on)
+		buf[3] = 0x1;	// OP_Code_Content
+	else
+		buf[3] = 0x0;
+		
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);	
+}
+
+void
+halbtc8812a1ant_SetBtLnaConstrain(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	BOOLEAN			bt_lna_cons_on
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Constrain = %s\n",  
+		(force_exec? "force":""), ((bt_lna_cons_on)? "ON":"OFF")));
+	coex_dm->bCurBtLnaConstrain = bt_lna_cons_on;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtLnaConstrain=%d, bCurBtLnaConstrain=%d\n", 
+			coex_dm->bPreBtLnaConstrain, coex_dm->bCurBtLnaConstrain));
+
+		if(coex_dm->bPreBtLnaConstrain == coex_dm->bCurBtLnaConstrain) 
+			return;
+	}
+	halbtc8812a1ant_SetFwBtLnaConstrain(btcoexist, coex_dm->bCurBtLnaConstrain);
+
+	coex_dm->bPreBtLnaConstrain = coex_dm->bCurBtLnaConstrain;
+}
+
+void
+halbtc8812a1ant_SetFwBtPsdMode(
+	 	PBTC_COEXIST		btcoexist,
+	 	u1Byte			bt_psd_mode
+	)
+{
+	u1Byte	dataLen=3;
+	u1Byte	buf[5] = {0};
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT PSD mode=0x%x\n", 
+		bt_psd_mode));
+
+	buf[0] = dataLen;
+	buf[1] = 0x4;			// OP_Code
+	buf[2] = 0x1;			// OP_Code_Length
+	buf[3] = bt_psd_mode;	// OP_Code_Content
+		
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);	
+}
+
+
+void
+halbtc8812a1ant_SetBtPsdMode(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	u1Byte			bt_psd_mode
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT PSD mode = 0x%x\n",  
+		(force_exec? "force":""), bt_psd_mode));
+	coex_dm->bCurBtPsdMode = bt_psd_mode;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtPsdMode=0x%x, bCurBtPsdMode=0x%x\n", 
+			coex_dm->bPreBtPsdMode, coex_dm->bCurBtPsdMode));
+
+		if(coex_dm->bPreBtPsdMode == coex_dm->bCurBtPsdMode) 
+			return;
+	}
+	halbtc8812a1ant_SetFwBtPsdMode(btcoexist, coex_dm->bCurBtPsdMode);
+
+	coex_dm->bPreBtPsdMode = coex_dm->bCurBtPsdMode;
+}
+
+
+void
+halbtc8812a1ant_SetBtAutoReport(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			enable_auto_report
+	)
+{
+#if 0
+	u1Byte			h2c_parameter[1] ={0};
+	
+	h2c_parameter[0] = 0;
+
+	if(enable_auto_report)
+	{
+		h2c_parameter[0] |= BIT0;
+	}
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n", 
+		(enable_auto_report? "Enabled!!":"Disabled!!"), h2c_parameter[0]));
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);	
+#else
+
+#endif
+}
+
+void
+halbtc8812a1ant_BtAutoReport(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	BOOLEAN			enable_auto_report
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Auto report = %s\n",  
+		(force_exec? "force to":""), ((enable_auto_report)? "Enabled":"Disabled")));
+	coex_dm->cur_bt_auto_report = enable_auto_report;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_bt_auto_report=%d, cur_bt_auto_report=%d\n", 
+			coex_dm->pre_bt_auto_report, coex_dm->cur_bt_auto_report));
+
+		if(coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) 
+			return;
+	}
+	halbtc8812a1ant_SetBtAutoReport(btcoexist, coex_dm->cur_bt_auto_report);
+
+	coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void
+halbtc8812a1ant_FwDacSwingLvl(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	u1Byte			fw_dac_swing_lvl
+	)
+{
+	return;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n",  
+		(force_exec? "force to":""), fw_dac_swing_lvl));
+	coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_fw_dac_swing_lvl=%d, cur_fw_dac_swing_lvl=%d\n", 
+			coex_dm->pre_fw_dac_swing_lvl, coex_dm->cur_fw_dac_swing_lvl));
+
+		if(coex_dm->pre_fw_dac_swing_lvl == coex_dm->cur_fw_dac_swing_lvl) 
+			return;
+	}
+
+	halbtc8812a1ant_SetFwDacSwingLevel(btcoexist, coex_dm->cur_fw_dac_swing_lvl);
+
+	coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void
+halbtc8812a1ant_SetSwRfRxLpfCorner(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			rx_rf_shrink_on
+	)
+{
+	if(rx_rf_shrink_on)
+	{
+		//Shrink RF Rx LPF corner
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+	}
+	else
+	{
+		//Resume RF Rx LPF corner
+		// After initialized, we can use coex_dm->bt_rf0x1e_backup
+		if(btcoexist->bInitilized)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, coex_dm->bt_rf0x1e_backup);
+		}
+	}
+}
+
+void
+halbtc8812a1ant_RfShrink(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	BOOLEAN			rx_rf_shrink_on
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",  
+		(force_exec? "force to":""), ((rx_rf_shrink_on)? "ON":"OFF")));
+	coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_rf_rx_lpf_shrink=%d, cur_rf_rx_lpf_shrink=%d\n", 
+			coex_dm->pre_rf_rx_lpf_shrink, coex_dm->cur_rf_rx_lpf_shrink));
+
+		if(coex_dm->pre_rf_rx_lpf_shrink == coex_dm->cur_rf_rx_lpf_shrink) 
+			return;
+	}
+	halbtc8812a1ant_SetSwRfRxLpfCorner(btcoexist, coex_dm->cur_rf_rx_lpf_shrink);
+
+	coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void
+halbtc8812a1ant_SetSwPenaltyTxRateAdaptive(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			low_penalty_ra
+	)
+{
+	u1Byte	u1_tmp;
+
+	u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x4fd);
+	u1_tmp |= BIT0;
+	if(low_penalty_ra)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+		u1_tmp &= ~BIT2;
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+		u1_tmp |= BIT2;
+	}
+
+	btcoexist->btc_write_1byte(btcoexist, 0x4fd, u1_tmp);
+}
+
+void
+halbtc8812a1ant_LowPenaltyRa(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	BOOLEAN			low_penalty_ra
+	)
+{
+	return;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",  
+		(force_exec? "force to":""), ((low_penalty_ra)? "ON":"OFF")));
+	coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_low_penalty_ra=%d, cur_low_penalty_ra=%d\n", 
+			coex_dm->pre_low_penalty_ra, coex_dm->cur_low_penalty_ra));
+
+		if(coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) 
+			return;
+	}
+	halbtc8812a1ant_SetSwPenaltyTxRateAdaptive(btcoexist, coex_dm->cur_low_penalty_ra);
+
+	coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void
+halbtc8812a1ant_SetDacSwingReg(
+	 	PBTC_COEXIST		btcoexist,
+	 	u4Byte			level
+	)
+{
+	u1Byte	val=(u1Byte)level;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Write SwDacSwing = 0x%x\n", level));
+	btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
+}
+
+void
+halbtc8812a1ant_SetSwFullTimeDacSwing(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			sw_dac_swing_on,
+	 	u4Byte			sw_dac_swing_lvl
+	)
+{
+	if(sw_dac_swing_on)
+	{
+		halbtc8812a1ant_SetDacSwingReg(btcoexist, sw_dac_swing_lvl);
+	}
+	else
+	{
+		halbtc8812a1ant_SetDacSwingReg(btcoexist, 0x18);
+	}
+}
+
+
+void
+halbtc8812a1ant_DacSwing(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	BOOLEAN			dac_swing_on,
+	 	u4Byte			dac_swing_lvl
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",  
+		(force_exec? "force to":""), ((dac_swing_on)? "ON":"OFF"), dac_swing_lvl));
+	coex_dm->cur_dac_swing_on = dac_swing_on;
+	coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_dac_swing_on=%d, pre_dac_swing_lvl=0x%x, cur_dac_swing_on=%d, cur_dac_swing_lvl=0x%x\n", 
+			coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+			coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl));
+
+		if( (coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+			(coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl) )
+			return;
+	}
+	delay_ms(30);
+	halbtc8812a1ant_SetSwFullTimeDacSwing(btcoexist, dac_swing_on, dac_swing_lvl);
+
+	coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+	coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void
+halbtc8812a1ant_SetAdcBackOff(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			adc_back_off
+	)
+{
+	if(adc_back_off)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n"));
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n"));
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
+	}
+}
+
+void
+halbtc8812a1ant_AdcBackOff(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	BOOLEAN			adc_back_off
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n",  
+		(force_exec? "force to":""), ((adc_back_off)? "ON":"OFF")));
+	coex_dm->cur_adc_back_off = adc_back_off;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_adc_back_off=%d, cur_adc_back_off=%d\n", 
+			coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off));
+
+		if(coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off) 
+			return;
+	}
+	halbtc8812a1ant_SetAdcBackOff(btcoexist, coex_dm->cur_adc_back_off);
+
+	coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off;
+}
+
+void
+halbtc8812a1ant_SetAgcTable(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			agc_table_en
+	)
+{
+	u1Byte		rssi_adjust_val=0;
+	
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+	if(agc_table_en)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x3fa58);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x37a58);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x2fa58);
+		rssi_adjust_val = 8;
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x39258);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x31258);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x29258);
+	}
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+	// set rssi_adjust_val for wifi module.
+	btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssi_adjust_val);
+}
+
+
+void
+halbtc8812a1ant_AgcTable(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	BOOLEAN			agc_table_en
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n",  
+		(force_exec? "force to":""), ((agc_table_en)? "Enable":"Disable")));
+	coex_dm->cur_agc_table_en = agc_table_en;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_agc_table_en=%d, cur_agc_table_en=%d\n", 
+			coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en));
+
+		if(coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) 
+			return;
+	}
+	halbtc8812a1ant_SetAgcTable(btcoexist, agc_table_en);
+
+	coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void
+halbtc8812a1ant_SetCoexTable(
+	 	PBTC_COEXIST	btcoexist,
+	 	u4Byte		val0x6c0,
+	 	u4Byte		val0x6c4,
+	 	u4Byte		val0x6c8,
+	 	u1Byte		val0x6cc
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+	btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4));
+	btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+	btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+	btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void
+halbtc8812a1ant_CoexTable(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	u4Byte			val0x6c0,
+	 	u4Byte			val0x6c4,
+	 	u4Byte			val0x6c8,
+	 	u1Byte			val0x6cc
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", 
+		(force_exec? "force to":""), val0x6c0, val0x6c4, val0x6c8, val0x6cc));
+	coex_dm->cur_val0x6c0 = val0x6c0;
+	coex_dm->cur_val0x6c4 = val0x6c4;
+	coex_dm->cur_val0x6c8 = val0x6c8;
+	coex_dm->cur_val0x6cc = val0x6cc;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_val0x6c0=0x%x, pre_val0x6c4=0x%x, pre_val0x6c8=0x%x, pre_val0x6cc=0x%x !!\n", 
+			coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4, coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc));
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], cur_val0x6c0=0x%x, cur_val0x6c4=0x%x, cur_val0x6c8=0x%x, cur_val0x6cc=0x%x !!\n", 
+			coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4, coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc));
+	
+		if( (coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+			(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+			(coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+			(coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc) )
+			return;
+	}
+	halbtc8812a1ant_SetCoexTable(btcoexist, val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+
+	coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+	coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+	coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+	coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void
+halbtc8812a1ant_SetFwIgnoreWlanAct(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			enable
+	)
+{
+	u1Byte	dataLen=3;
+	u1Byte	buf[5] = {0};
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], %s BT Ignore Wlan_Act\n",
+		(enable? "Enable":"Disable")));
+
+	buf[0] = dataLen;
+	buf[1] = 0x1;			// OP_Code
+	buf[2] = 0x1;			// OP_Code_Length
+	if(enable)
+		buf[3] = 0x1; 		// OP_Code_Content
+	else
+		buf[3] = 0x0;
+		
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);	
+}
+
+void
+halbtc8812a1ant_IgnoreWlanAct(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	BOOLEAN			enable
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n", 
+		(force_exec? "force to":""), (enable? "ON":"OFF")));
+	coex_dm->cur_ignore_wlan_act = enable;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", 
+			coex_dm->pre_ignore_wlan_act, coex_dm->cur_ignore_wlan_act));
+
+		if(coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act)
+			return;
+	}
+	halbtc8812a1ant_SetFwIgnoreWlanAct(btcoexist, enable);
+
+	coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void
+halbtc8812a1ant_SetFwPstdma(
+	 	PBTC_COEXIST		btcoexist,
+	 	u1Byte			byte1,
+	 	u1Byte			byte2,
+	 	u1Byte			byte3,
+	 	u1Byte			byte4,
+	 	u1Byte			byte5
+	)
+{
+	u1Byte			h2c_parameter[5] ={0};
+
+	h2c_parameter[0] = byte1;	
+	h2c_parameter[1] = byte2;	
+	h2c_parameter[2] = byte3;
+	h2c_parameter[3] = byte4;
+	h2c_parameter[4] = byte5;
+
+	coex_dm->ps_tdma_para[0] = byte1;
+	coex_dm->ps_tdma_para[1] = byte2;
+	coex_dm->ps_tdma_para[2] = byte3;
+	coex_dm->ps_tdma_para[3] = byte4;
+	coex_dm->ps_tdma_para[4] = byte5;
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", 
+		h2c_parameter[0], 
+		h2c_parameter[1]<<24|h2c_parameter[2]<<16|h2c_parameter[3]<<8|h2c_parameter[4]));
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void
+halbtc8812a1ant_SetLpsRpwm(
+	 	PBTC_COEXIST		btcoexist,
+	 	u1Byte			lps_val,
+	 	u1Byte			rpwm_val
+	)
+{
+	u1Byte	lps=lps_val;
+	u1Byte	rpwm=rpwm_val;
+	
+	btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_LPS, &lps);
+	btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_RPWM, &rpwm);
+	
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT, NULL);
+}
+
+void
+halbtc8812a1ant_LpsRpwm(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	u1Byte			lps_val,
+	 	u1Byte			rpwm_val
+	)
+{
+	BOOLEAN	bForceExecPwrCmd=false;
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set lps/rpwm=0x%x/0x%x \n", 
+		(force_exec? "force to":""), lps_val, rpwm_val));
+	coex_dm->cur_lps = lps_val;
+	coex_dm->cur_rpwm = rpwm_val;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_lps/cur_lps=0x%x/0x%x, pre_rpwm/cur_rpwm=0x%x/0x%x!!\n", 
+			coex_dm->pre_lps, coex_dm->cur_lps, coex_dm->pre_rpwm, coex_dm->cur_rpwm));
+
+		if( (coex_dm->pre_lps == coex_dm->cur_lps) &&
+			(coex_dm->pre_rpwm == coex_dm->cur_rpwm) )
+		{
+			return;
+		}
+	}
+	halbtc8812a1ant_SetLpsRpwm(btcoexist, lps_val, rpwm_val);
+
+	coex_dm->pre_lps = coex_dm->cur_lps;
+	coex_dm->pre_rpwm = coex_dm->cur_rpwm;
+}
+
+void
+halbtc8812a1ant_SwMechanism1(
+	 	PBTC_COEXIST	btcoexist,	
+	 	BOOLEAN		shrink_rx_lpf,
+	 	BOOLEAN 	low_penalty_ra,
+	 	BOOLEAN		limited_dig, 
+	 	BOOLEAN		bt_lna_constrain
+	) 
+{
+	//halbtc8812a1ant_RfShrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+	//halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, low_penalty_ra);
+
+	//no limited DIG
+	//halbtc8812a1ant_SetBtLnaConstrain(btcoexist, NORMAL_EXEC, bt_lna_constrain);
+}
+
+void
+halbtc8812a1ant_SwMechanism2(
+	 	PBTC_COEXIST	btcoexist,	
+	 	BOOLEAN		agc_table_shift,
+	 	BOOLEAN 	adc_back_off,
+	 	BOOLEAN		sw_dac_swing,
+	 	u4Byte		dac_swing_lvl
+	)
+{
+	//halbtc8812a1ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift);
+	//halbtc8812a1ant_AdcBackOff(btcoexist, NORMAL_EXEC, adc_back_off);
+	//halbtc8812a1ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing, dac_swing_lvl);
+}
+
+void
+halbtc8812a1ant_PsTdma(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			force_exec,
+	 	BOOLEAN			turn_on,
+	 	u1Byte			type
+	)
+{
+	BOOLEAN			bTurnOnByCnt=false;
+	u1Byte			psTdmaTypeByCnt=0, rssi_adjust_val=0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n", 
+		(force_exec? "force to":""), (turn_on? "ON":"OFF"), type));
+	coex_dm->cur_ps_tdma_on = turn_on;
+	coex_dm->cur_ps_tdma = type;
+
+	if(!force_exec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n", 
+			coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on));
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n", 
+			coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma));
+
+		if( (coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+			(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma) )
+			return;
+	}
+	if(turn_on)
+	{
+		switch(type)
+		{
+			default:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x58);
+				break;
+			case 1:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x48);
+				rssi_adjust_val = 11;
+				break;
+			case 2:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x12, 0x12, 0x0, 0x48);
+				rssi_adjust_val = 14;
+				break;
+			case 3:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x40);
+				break;
+			case 4:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+				rssi_adjust_val = 17;
+				break;
+			case 5:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x61, 0x15, 0x3, 0x31, 0x0);
+				break;
+			case 6:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0x3, 0x0, 0x0);
+				break;
+			case 7:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xc, 0x5, 0x0, 0x0);
+				break;
+			case 8:	
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+				break;
+			case 9:	
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0xa, 0xa, 0x0, 0x48);
+				rssi_adjust_val = 18;
+				break;
+			case 10:	
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0xa, 0x0, 0x40);
+				break;
+			case 11:	
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x5, 0x5, 0x0, 0x48);
+				rssi_adjust_val = 20;
+				break;
+			case 12:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xeb, 0xa, 0x3, 0x31, 0x18);
+				break;
+
+			case 15:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0x3, 0x8, 0x0);
+				break;
+			case 16:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x10, 0x0);
+				rssi_adjust_val = 18;
+				break;
+
+			case 18:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+				rssi_adjust_val = 14;
+				break;			
+				
+			case 20:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0x25, 0x25, 0x0, 0x0);
+				break;
+			case 21:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x20, 0x3, 0x10, 0x40);
+				break;
+			case 22:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0x8, 0x8, 0x0, 0x40);
+				break;
+			case 23:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x25, 0x3, 0x31, 0x18);
+				rssi_adjust_val = 22;
+				break;
+			case 24:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x15, 0x3, 0x31, 0x18);
+				rssi_adjust_val = 22;
+				break;
+			case 25:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+				rssi_adjust_val = 22;
+				break;
+			case 26:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+				rssi_adjust_val = 22;
+				break;
+			case 27:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x25, 0x3, 0x31, 0x98);
+				rssi_adjust_val = 22;
+				break;
+			case 28:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x69, 0x25, 0x3, 0x31, 0x0);
+				break;
+			case 29:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xab, 0x1a, 0x1a, 0x1, 0x8);
+				break;
+			case 30:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+				break;
+			case 31:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0, 0x58);
+				break;
+			case 32:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xab, 0xa, 0x3, 0x31, 0x88);
+				break;
+			case 33:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xa3, 0x25, 0x3, 0x30, 0x88);
+				break;
+			case 34:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x8);
+				break;
+			case 35:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, 0x1a, 0x0, 0x8);
+				break;
+			case 36:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x12, 0x3, 0x14, 0x58);
+				break;
+		}
+	}
+	else
+	{
+		// disable PS tdma
+		switch(type)
+		{
+			case 8:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0, 0x0, 0x0);
+				btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4);
+				break;
+			case 0:
+			default:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+				delay_ms(5);
+				btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+				break;
+			case 9:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+				btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4);
+				break;
+			case 10:
+				halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+				delay_ms(5);
+				btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+				break;
+		}
+	}
+	rssi_adjust_val =0;
+	btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssi_adjust_val);
+
+	// update pre state
+	coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+	coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void
+halbtc8812a1ant_CoexAllOff(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	// fw all off
+	halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+	halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+	// sw all off
+	halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+	halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+
+	// hw all off
+	halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+}
+
+void
+halbtc8812a1ant_WifiParaAdjust(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			enable
+	)
+{
+	if(enable)
+	{
+		halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, true);
+	}
+	else
+	{
+		halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, false);
+	}
+}
+
+BOOLEAN
+halbtc8812a1ant_IsCommonAction(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	BOOLEAN			common=false, wifi_connected=false, wifi_busy=false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+	//halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+
+	if(!wifi_connected && 
+		BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"));
+		halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+		
+ 		halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+		halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+		common = true;
+	}
+	else if(wifi_connected && 
+		(BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT non connected-idle!!\n"));
+		halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+
+      		halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+		halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+		common = true;
+	}
+	else if(!wifi_connected && 
+		(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"));
+		halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+		halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+		halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+		common = true;
+	}
+	else if(wifi_connected && 
+		(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n"));
+		halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+		halbtc8812a1ant_SwMechanism1(btcoexist,true,true,true,true);
+		halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+		common = true;
+	}
+	else if(!wifi_connected && 
+		(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE != coex_dm->bt_status) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+		halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+		halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+		halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		
+		common = true;
+	}
+	else
+	{
+		halbtc8812a1ant_SwMechanism1(btcoexist,true,true,true,true);
+		
+		common = false;
+	}
+	
+	return common;
+}
+
+
+void
+halbtc8812a1ant_TdmaDurationAdjustForAcl(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	static s4Byte		up,dn,m,n,wait_count;
+	s4Byte			result;   //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+	u1Byte			retry_count=0, bt_info_ext;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], halbtc8812a1ant_TdmaDurationAdjustForAcl()\n"));
+	if(coex_dm->reset_tdma_adjust)
+	{
+		coex_dm->reset_tdma_adjust = false;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
+
+		halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+		coex_dm->ps_tdma_du_adj_type = 2;
+		//============
+		up = 0;
+		dn = 0;
+		m = 1;
+		n= 3;
+		result = 0;
+		wait_count = 0;
+	}
+	else
+	{
+		//accquire the BT TRx retry count from BT_Info byte2
+		retry_count = coex_sta->bt_retry_cnt;
+		bt_info_ext = coex_sta->bt_info_ext;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retry_count = %d\n", retry_count));
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n", 
+			up, dn, m, n, wait_count));
+		result = 0;
+		wait_count++; 
+		  
+		if(retry_count == 0)  // no retry in the last 2-second duration
+		{
+			up++;
+			dn--;
+
+			if (dn <= 0)
+				dn = 0;				 
+
+			if(up >= n)	// if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+			{
+				wait_count = 0; 
+				n = 3;
+				up = 0;
+				dn = 0;
+				result = 1; 
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n"));
+			}
+		}
+		else if (retry_count <= 3)	// <=3 retry in the last 2-second duration
+		{
+			up--; 
+			dn++;
+
+			if (up <= 0)
+				up = 0;
+
+			if (dn == 2)	// if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+			{
+				if (wait_count <= 2)
+					m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+				else
+					m = 1;
+
+				if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+					m = 20;
+
+				n = 3*m;
+				up = 0;
+				dn = 0;
+				wait_count = 0;
+				result = -1; 
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+			}
+		}
+		else  //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+		{
+			if (wait_count == 1)
+				m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+			else
+				m = 1;
+
+			if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+				m = 20;
+
+			n = 3*m;
+			up = 0;
+			dn = 0;
+			wait_count = 0; 
+			result = -1;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+		}
+
+		if(result == -1)
+		{
+			if( (BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+				((coex_dm->cur_ps_tdma == 1) ||(coex_dm->cur_ps_tdma == 2)) )
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+				coex_dm->ps_tdma_du_adj_type = 9;
+			}
+			else if(coex_dm->cur_ps_tdma == 1)
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+				coex_dm->ps_tdma_du_adj_type = 2;
+			}
+			else if(coex_dm->cur_ps_tdma == 2)
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+				coex_dm->ps_tdma_du_adj_type = 9;
+			}
+			else if(coex_dm->cur_ps_tdma == 9)
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 11);
+				coex_dm->ps_tdma_du_adj_type = 11;
+			}
+		}
+		else if(result == 1)
+		{
+			if( (BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+				((coex_dm->cur_ps_tdma == 1) ||(coex_dm->cur_ps_tdma == 2)) )
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+				coex_dm->ps_tdma_du_adj_type = 9;
+			}
+			else if(coex_dm->cur_ps_tdma == 11)
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+				coex_dm->ps_tdma_du_adj_type = 9;
+			}
+			else if(coex_dm->cur_ps_tdma == 9)
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+				coex_dm->ps_tdma_du_adj_type = 2;
+			}
+			else if(coex_dm->cur_ps_tdma == 2)
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 1);
+				coex_dm->ps_tdma_du_adj_type = 1;
+			}
+		}
+
+		if( coex_dm->cur_ps_tdma != 1 &&
+			coex_dm->cur_ps_tdma != 2 &&
+			coex_dm->cur_ps_tdma != 9 &&
+			coex_dm->cur_ps_tdma != 11 )
+		{
+			// recover to previous adjust type
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, coex_dm->ps_tdma_du_adj_type);
+		}
+	}
+}
+
+u1Byte
+halbtc8812a1ant_PsTdmaTypeByWifiRssi(
+	 	s4Byte	wifi_rssi,
+	 	s4Byte	pre_wifi_rssi,
+	 	u1Byte	wifi_rssi_thresh
+	)
+{
+	u1Byte	ps_tdma_type=0;
+	
+	if(wifi_rssi > pre_wifi_rssi)
+	{
+		if(wifi_rssi > (wifi_rssi_thresh+5))
+		{
+			ps_tdma_type = 26;
+		}
+		else
+		{
+			ps_tdma_type = 25;
+		}
+	}
+	else
+	{
+		if(wifi_rssi > wifi_rssi_thresh)
+		{
+			ps_tdma_type = 26;
+		}
+		else
+		{
+			ps_tdma_type = 25;
+		}
+	}
+
+	return ps_tdma_type;
+}
+
+void
+halbtc8812a1ant_PsTdmaCheckForPowerSaveState(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			new_ps_state
+	)
+{
+	u1Byte	lps_mode=0x0;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
+	
+	if(lps_mode)	// already under LPS state
+	{
+		if(new_ps_state)		
+		{
+			// keep state under LPS, do nothing.
+		}
+		else
+		{
+			// will leave LPS state, turn off psTdma first
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+		}
+	}
+	else						// NO PS state
+	{
+		if(new_ps_state)
+		{
+			// will enter LPS state, turn off psTdma first
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+		}
+		else
+		{
+			// keep state under NO PS state, do nothing.
+		}
+	}
+}
+
+// SCO only or SCO+PAN(HS)
+void
+halbtc8812a1ant_ActionSco(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u1Byte	wifi_rssi_state;
+	u4Byte	wifi_bw;
+
+	wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+
+	halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 4);
+
+	if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+	
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if(BTC_WIFI_BW_HT40 == wifi_bw)
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			  halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			  halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		}		
+	}
+}
+
+
+void
+halbtc8812a1ant_ActionHid(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u1Byte	wifi_rssi_state, bt_rssi_state;	
+	u4Byte	wifi_bw;
+
+	wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+	bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+	halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+	if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if(BTC_WIFI_BW_HT40 == wifi_bw)
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,false,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		}		
+	}
+}
+
+//A2DP only / PAN(EDR) only/ A2DP+PAN(HS)
+void
+halbtc8812a1ant_ActionA2dp(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u1Byte		wifi_rssi_state, bt_rssi_state;
+	u4Byte		wifi_bw;
+
+	wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+	bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+	halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+	if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if(BTC_WIFI_BW_HT40 == wifi_bw)
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		}		
+	}
+}
+
+void
+halbtc8812a1ant_ActionA2dpPanHs(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u1Byte		wifi_rssi_state, bt_rssi_state, bt_info_ext;
+	u4Byte		wifi_bw;
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+	bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+	halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+	if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if(BTC_WIFI_BW_HT40 == wifi_bw)
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		}		
+	}
+}
+
+void
+halbtc8812a1ant_ActionPanEdr(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u1Byte		wifi_rssi_state, bt_rssi_state;
+	u4Byte		wifi_bw;
+
+	wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+	bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+	halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+	if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if(BTC_WIFI_BW_HT40 == wifi_bw)
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		}
+	}
+}
+
+
+//PAN(HS) only
+void
+halbtc8812a1ant_ActionPanHs(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u1Byte		wifi_rssi_state, bt_rssi_state;
+	u4Byte		wifi_bw;
+
+	wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+	bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+	halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if(BTC_WIFI_BW_HT40 == wifi_bw)
+	{
+		// fw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+		}
+		else
+		{
+			halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+		}
+
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+		}
+	}
+	else
+	{
+		// fw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+		}
+		else
+		{
+			halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+		}
+
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		}
+	}
+}
+
+//PAN(EDR)+A2DP
+void
+halbtc8812a1ant_ActionPanEdrA2dp(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u1Byte		wifi_rssi_state, bt_rssi_state, bt_info_ext;
+	u4Byte		wifi_bw;
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+	bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+	halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+	if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if(BTC_WIFI_BW_HT40 == wifi_bw)
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		}
+	}
+}
+
+void
+halbtc8812a1ant_ActionPanEdrHid(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u1Byte		wifi_rssi_state, bt_rssi_state;
+	u4Byte		wifi_bw;
+
+	wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+	bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+	halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+	if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if(BTC_WIFI_BW_HT40 == wifi_bw)
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+		}
+	}
+	else
+	{		
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		}
+	}
+}
+
+// HID+A2DP+PAN(EDR)
+void
+halbtc8812a1ant_ActionHidA2dpPanEdr(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u1Byte		wifi_rssi_state, bt_rssi_state, bt_info_ext;
+	u4Byte		wifi_bw;
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+	bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+	halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+	if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if(BTC_WIFI_BW_HT40 == wifi_bw)
+	{	
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		}
+	}
+}
+
+void
+halbtc8812a1ant_ActionHidA2dp(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u1Byte		wifi_rssi_state, bt_rssi_state, bt_info_ext;
+	u4Byte		wifi_bw;
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+	bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+	if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if(BTC_WIFI_BW_HT40 == wifi_bw)
+	{		
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+		}
+		else
+		{
+			halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+		}
+	}
+}
+
+void
+halbtc8812a1ant_ActionHs(
+	 	PBTC_COEXIST		btcoexist,
+	 	BOOLEAN			hs_connecting
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action for HS, hs_connecting=%d!!!\n", hs_connecting));
+	halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+
+	if(hs_connecting)
+	{
+		halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xaaaaaaaa, 0xaaaaaaaa, 0xffff, 0x3);
+	}
+	else
+	{
+		if((coex_sta->high_priority_tx+coex_sta->high_priority_rx+
+			coex_sta->low_priority_tx+coex_sta->low_priority_rx)<=1200)
+			halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xaaaaaaaa, 0xaaaaaaaa, 0xffff, 0x3);
+		else
+			halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xffffffff, 0xffffffff, 0xffff, 0x3);	
+	}
+}
+
+
+void
+halbtc8812a1ant_ActionWifiNotConnected(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	BOOLEAN		hs_connecting=false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+	
+	halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+	if(hs_connecting)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is connecting!!!\n"));
+		halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+	}
+	else
+	{
+		halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+		halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+	}
+}
+
+void
+halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	PBTC_STACK_INFO	stack_info=&btcoexist->stack_info;
+	BOOLEAN			hs_connecting=false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+	halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+	if(hs_connecting)
+	{
+		halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+	}
+	else if(btcoexist->bt_info.bt_disabled)
+	{
+		halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+		halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+	}
+	else if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+{
+		halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+		halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+	}
+	else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) ||
+		(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+	{
+		halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 28);
+		halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+	}
+	else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+	{
+		if(stack_info->hid_exist)
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 35);
+		else
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 29);
+		halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+	}
+	else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+		(BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+		{
+		halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+		halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+	}
+	else 
+	{
+		//error condition, should not reach here, record error number for debugging.
+		coex_dm->error_condition = 1;
+	}
+}
+
+void
+halbtc8812a1ant_ActionWifiConnectedScan(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	PBTC_STACK_INFO	stack_info=&btcoexist->stack_info;
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan()===>\n"));
+
+	if(btcoexist->bt_info.bt_disabled)
+	{
+		halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+		halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+		halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+	}
+	else
+	{
+		halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+		halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+		// power save must executed before psTdma.
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+
+		// psTdma
+		if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan(), bt is under inquiry/page scan\n"));
+			if(stack_info->sco_exist)
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+				halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+			}
+			else
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+				halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+			}
+		}
+		else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) ||
+			(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+		{
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5);
+			halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+		}
+		else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+		{
+			if(stack_info->hid_exist)
+		{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 34);
+				halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+			}
+			else
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 4);
+				halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+			}
+		}
+		else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+			(BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+		{
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 33);
+			halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+		}
+		else 
+		{
+			//error condition, should not reach here
+			coex_dm->error_condition = 2;
+		}
+	}
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan()<===\n"));
+}
+
+void
+halbtc8812a1ant_ActionWifiConnectedSpecialPacket(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	PBTC_STACK_INFO	stack_info=&btcoexist->stack_info;
+
+	halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+	if(btcoexist->bt_info.bt_disabled)
+	{	
+		halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+		halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+	}
+	else
+	{
+		if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+		{
+			if(stack_info->sco_exist)
+		{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+				halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+			}
+			else
+			{
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+				halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+			}
+		}
+		else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) ||
+			(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+		{
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 28);
+			halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+		}
+		else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+		{
+			if(stack_info->hid_exist)
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 35);
+			else
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 29);
+			halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+		}
+		else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+			(BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+		{
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+			halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+		}
+		else 
+		{
+			//error condition, should not reach here
+			coex_dm->error_condition = 3;
+		}
+	}
+}
+
+void
+halbtc8812a1ant_ActionWifiConnected(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	PBTC_STACK_INFO	stack_info=&btcoexist->stack_info;
+	BOOLEAN 	wifi_connected=false, wifi_busy=false, bt_hs_on=false;
+	BOOLEAN		scan=false, link=false, roam=false;
+	BOOLEAN		hs_connecting=false, under4way=false;
+	u4Byte		wifi_bw;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()===>\n"));
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+	if(!wifi_connected)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi not connected<===\n"));
+		return;
+	}
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under4way);
+	if(under4way)
+	{
+		halbtc8812a1ant_ActionWifiConnectedSpecialPacket(btcoexist);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"));
+		return;
+	}
+	
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+	if(scan || link || roam)
+	{
+		halbtc8812a1ant_ActionWifiConnectedScan(btcoexist);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"));
+		return;
+	}
+	
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);	
+	if(!wifi_busy)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi associated-idle!!!\n"));
+		if(btcoexist->bt_info.bt_disabled)
+		{
+			halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+			halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+			btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+			halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+		}
+		else
+		{
+			if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], bt is under inquiry/page scan!!!\n"));
+				if(stack_info->sco_exist)
+				{
+					halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+					halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+				}
+				else
+				{
+					halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+					halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+					// power save must executed before psTdma.
+					btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+					halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+					halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+				}
+			}
+			else if(BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)
+			{
+				halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+				halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x26, 0x0);
+				// power save must executed before psTdma.
+				btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+				halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+			}
+			else if(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)
+			{
+				halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+				halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x26, 0x0);
+				// power save must executed before psTdma.
+				btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+				halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+			}
+			else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+			{
+				if(stack_info->hid_exist && stack_info->numOfLink==1)
+				{
+					// hid only
+					halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+					// power save must executed before psTdma.
+					btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+					
+					halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+					halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5fff5fff, 0x5fff5fff, 0xffff, 0x3);
+					coex_dm->reset_tdma_adjust = true;
+				}
+				else
+				{				
+					halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+					halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+					// power save must executed before psTdma.
+					btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+
+					if(stack_info->hid_exist)
+					{
+						if(stack_info->a2dp_exist)
+						{
+							// hid+a2dp
+							halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+							halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+						}
+						else if(stack_info->pan_exist)
+						{
+							if(bt_hs_on)
+							{
+								// hid+hs
+								halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+							}
+							else
+							{
+								// hid+pan
+								halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+							}
+							halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+						}
+						else
+						{
+							coex_dm->error_condition = 4;
+						}
+						coex_dm->reset_tdma_adjust = true;
+					}
+					else if(stack_info->a2dp_exist)
+					{
+						if(stack_info->pan_exist)
+						{
+							if(bt_hs_on)
+							{
+								// a2dp+hs
+								halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+							}
+							else
+							{
+								// a2dp+pan
+								halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 36);
+							}
+							halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+							coex_dm->reset_tdma_adjust = true;
+						}
+						else
+						{
+							// a2dp only
+							halbtc8812a1ant_TdmaDurationAdjustForAcl(btcoexist);
+							halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+						}
+					}
+					else if(stack_info->pan_exist)
+					{
+						// pan only
+						if(bt_hs_on)
+						{
+							coex_dm->error_condition = 5;
+						}
+						else
+						{
+							halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+							halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+						}
+						coex_dm->reset_tdma_adjust = true;
+					}
+					else
+					{
+						// temp state, do nothing!!!
+						//DbgPrint("error 6, coex_dm->bt_status=%d\n", coex_dm->bt_status);
+						//DbgPrint("error 6, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n", 
+							//stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist);
+						//coex_dm->error_condition = 6;
+					}
+				}
+			}
+			else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+				(BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+			{
+				halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+				// power save must executed before psTdma.
+				btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+				halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+			}
+			else 
+			{
+				//error condition, should not reach here
+				coex_dm->error_condition = 7;
+			}
+		}
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi busy!!!\n"));
+		if(btcoexist->bt_info.bt_disabled)
+		{
+			halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+			btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+			halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+		}
+		else
+		{
+			if(bt_hs_on)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is under progress!!!\n"));
+				//DbgPrint("coex_dm->bt_status = 0x%x\n", coex_dm->bt_status);
+				halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+			}
+			else if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+			{
+				if(stack_info->sco_exist)
+				{
+					halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+					halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+				}
+				else
+				{
+					halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+					halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+					// power save must executed before psTdma.
+					btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+					halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+					halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+				}
+			}
+			else if(BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)
+			{
+				halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+				// power save must executed before psTdma.
+				btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5);
+				halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3);
+			}
+			else if(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)
+			{
+				halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+				// power save must executed before psTdma.
+				btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+				if(bt_hs_on)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is under progress!!!\n"));
+					halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+				}
+				else
+				{
+					halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5);
+					halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3);
+				}
+			}
+			else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+			{
+				if(stack_info->hid_exist && stack_info->numOfLink==1)
+				{
+					// hid only
+					halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+					// power save must executed before psTdma.
+					btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+					
+					halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+					halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5fff5fff, 0x5fff5fff, 0xffff, 0x3);
+					coex_dm->reset_tdma_adjust = true;
+				}
+				else
+				{
+					halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+					halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+					// power save must executed before psTdma.
+					btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+
+					if(stack_info->hid_exist)
+					{
+						if(stack_info->a2dp_exist)
+						{
+							// hid+a2dp
+							halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+							halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+						}
+						else if(stack_info->pan_exist)
+						{
+							if(bt_hs_on)
+							{
+								// hid+hs
+								halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+							}
+							else
+							{
+								// hid+pan
+								halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+							}
+							halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+						}
+						else
+						{
+							coex_dm->error_condition = 8;
+						}
+						coex_dm->reset_tdma_adjust = true;
+					}
+					else if(stack_info->a2dp_exist)
+					{
+						if(stack_info->pan_exist)
+						{
+							if(bt_hs_on)
+							{
+								// a2dp+hs
+								halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+							}
+							else
+							{
+								// a2dp+pan
+								halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 36);
+							}
+							halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+							coex_dm->reset_tdma_adjust = true;
+						}
+						else
+						{
+							// a2dp only
+							halbtc8812a1ant_TdmaDurationAdjustForAcl(btcoexist);
+							halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+						}
+					}
+					else if(stack_info->pan_exist)
+					{
+						// pan only
+						if(bt_hs_on)
+						{
+							coex_dm->error_condition = 9;
+						}
+						else
+						{
+							halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+							halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+						}
+						coex_dm->reset_tdma_adjust = true;
+					}
+					else
+					{
+						//DbgPrint("error 10, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n", 
+							//stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist);
+						coex_dm->error_condition = 10;
+					}
+				}
+			}
+			else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+				(BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+			{
+				halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+				// power save must executed before psTdma.
+				btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+				halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+				halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+			}
+			else 
+			{
+				//DbgPrint("error 11, coex_dm->bt_status=%d\n", coex_dm->bt_status);
+				//DbgPrint("error 11, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n", 
+					//stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist);
+				//error condition, should not reach here
+				coex_dm->error_condition = 11;
+			}
+		}
+	}
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()<===\n"));
+}
+
+void
+halbtc8812a1ant_RunSwCoexistMechanism(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	PBTC_STACK_INFO		stack_info=&btcoexist->stack_info;
+	BOOLEAN				wifi_under5g=false, wifi_busy=false, wifi_connected=false;
+	u1Byte				bt_info_original=0, bt_retry_cnt=0;
+	u1Byte				algorithm=0;
+
+	return;
+	if(stack_info->bProfileNotified)
+	{
+		algorithm = halbtc8812a1ant_ActionAlgorithm(btcoexist);
+		coex_dm->cur_algorithm = algorithm;		
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Algorithm = %d \n", coex_dm->cur_algorithm));
+
+		if(halbtc8812a1ant_IsCommonAction(btcoexist))
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action common.\n"));
+		}
+		else
+		{
+			switch(coex_dm->cur_algorithm)
+			{
+				case BT_8812A_1ANT_COEX_ALGO_SCO:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = SCO.\n"));
+					halbtc8812a1ant_ActionSco(btcoexist);
+					break;
+				case BT_8812A_1ANT_COEX_ALGO_HID:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID.\n"));
+					halbtc8812a1ant_ActionHid(btcoexist);
+					break;
+				case BT_8812A_1ANT_COEX_ALGO_A2DP:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP.\n"));
+					halbtc8812a1ant_ActionA2dp(btcoexist);
+					break;
+				case BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP+PAN(HS).\n"));
+					halbtc8812a1ant_ActionA2dpPanHs(btcoexist);
+					break;
+				case BT_8812A_1ANT_COEX_ALGO_PANEDR:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR).\n"));
+					halbtc8812a1ant_ActionPanEdr(btcoexist);
+					break;
+				case BT_8812A_1ANT_COEX_ALGO_PANHS:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HS mode.\n"));
+					halbtc8812a1ant_ActionPanHs(btcoexist);
+					break;
+				case BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN+A2DP.\n"));
+					halbtc8812a1ant_ActionPanEdrA2dp(btcoexist);
+					break;
+				case BT_8812A_1ANT_COEX_ALGO_PANEDR_HID:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR)+HID.\n"));
+					halbtc8812a1ant_ActionPanEdrHid(btcoexist);
+					break;
+				case BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP+PAN.\n"));
+					halbtc8812a1ant_ActionHidA2dpPanEdr(btcoexist);
+					break;
+				case BT_8812A_1ANT_COEX_ALGO_HID_A2DP:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP.\n"));
+					halbtc8812a1ant_ActionHidA2dp(btcoexist);
+					break;
+				default:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = coexist All Off!!\n"));
+					halbtc8812a1ant_CoexAllOff(btcoexist);
+					break;
+			}
+			coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+		}
+	}
+}
+
+void
+halbtc8812a1ant_RunCoexistMechanism(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	PBTC_STACK_INFO		stack_info=&btcoexist->stack_info;
+	BOOLEAN				wifi_under5g=false, wifi_busy=false, wifi_connected=false;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()===>\n"));
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+
+	if(wifi_under5g)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for 5G <===\n"));
+		return;
+	}
+
+	if(btcoexist->manual_control)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"));
+		return;
+	}
+
+	if(btcoexist->stop_coex_dm)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"));
+		return;
+	}
+
+	halbtc8812a1ant_RunSwCoexistMechanism(btcoexist);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+	if(btcoexist->bt_info.bt_disabled)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], bt is disabled!!!\n"));
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+		if(wifi_busy)
+		{			
+			halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+			btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+		}
+		else
+		{
+			halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+			halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+			// power save must executed before psTdma.
+			btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+		}
+		halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+	}
+	else if(coex_sta->under_ips)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n"));
+		halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+		halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+		halbtc8812a1ant_WifiParaAdjust(btcoexist, false);
+	}
+	else if(!wifi_connected)
+	{
+		BOOLEAN	scan=false, link=false, roam=false;
+		
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is non connected-idle !!!\n"));
+
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+		if(scan || link || roam)
+			halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist);
+		else
+			halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+	}
+	else	// wifi LPS/Busy
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is NOT under IPS!!!\n"));
+		halbtc8812a1ant_WifiParaAdjust(btcoexist, true);
+		halbtc8812a1ant_ActionWifiConnected(btcoexist);
+	}
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()<===\n"));
+}
+
+void
+halbtc8812a1ant_InitCoexDm(
+	 	PBTC_COEXIST		btcoexist
+	)
+{	
+	BOOLEAN		wifi_connected=false;
+	// force to reset coex mechanism
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+	if(!wifi_connected) // non-connected scan
+	{
+		halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+	}
+	else	// wifi is connected
+	{
+		halbtc8812a1ant_ActionWifiConnected(btcoexist);
+	}
+
+	halbtc8812a1ant_FwDacSwingLvl(btcoexist, FORCE_EXEC, 6);
+	halbtc8812a1ant_DecBtPwr(btcoexist, FORCE_EXEC, false);
+
+	// sw all off
+	halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+	halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+	halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+}
+
+//============================================================
+// work around function start with wa_halbtc8812a1ant_
+//============================================================
+//============================================================
+// extern function start with EXhalbtc8812a1ant_
+//============================================================
+void
+EXhalbtc8812a1ant_InitHwConfig(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	u4Byte	u4_tmp=0;
+	u2Byte	u2Tmp=0;
+	u1Byte	u1_tmp=0;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n"));
+
+	// backup rf 0x1e value
+	coex_dm->bt_rf0x1e_backup = 
+		btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
+	
+	//ant sw control to BT
+	 btcoexist->btc_write_4byte(btcoexist, 0x900, 0x00000400);
+	 btcoexist->btc_write_1byte(btcoexist, 0x76d, 0x1);
+	 btcoexist->btc_write_1byte(btcoexist, 0xcb3, 0x77);
+	 btcoexist->btc_write_1byte(btcoexist, 0xcb7, 0x40);	
+
+	// 0x790[5:0]=0x5
+	u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+	u1_tmp &= 0xc0;
+	u1_tmp |= 0x5;
+	btcoexist->btc_write_1byte(btcoexist, 0x790, u1_tmp);
+
+	// PTA parameter
+	btcoexist->btc_write_1byte(btcoexist, 0x6cc, 0x0);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c8, 0xffff);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c4, 0x55555555);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c0, 0x55555555);
+
+	// coex parameters
+	btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1);
+
+	// enable counter statistics
+	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+
+	// enable PTA
+	btcoexist->btc_write_1byte(btcoexist, 0x40, 0x20);
+
+	// bt clock related
+	u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x4);
+	u1_tmp |= BIT7;
+	btcoexist->btc_write_1byte(btcoexist, 0x4, u1_tmp);
+
+	// bt clock related
+	u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x7);
+	u1_tmp |= BIT1;
+	btcoexist->btc_write_1byte(btcoexist, 0x7, u1_tmp);
+}
+
+void
+EXhalbtc8812a1ant_InitCoexDm(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+
+	btcoexist->stop_coex_dm = false;
+
+	halbtc8812a1ant_InitCoexDm(btcoexist);
+}
+
+void
+EXhalbtc8812a1ant_DisplayCoexInfo(
+	 	PBTC_COEXIST		btcoexist
+	)
+{
+	PBTC_BOARD_INFO		board_info=&btcoexist->boardInfo;
+	PBTC_STACK_INFO		stack_info=&btcoexist->stack_info;
+	pu1Byte				cli_buf=btcoexist->cli_buf;
+	u1Byte				u1_tmp[4], i, bt_info_ext, psTdmaCase=0;
+	u4Byte				u4_tmp[4];
+	BOOLEAN				roam=false, scan=false, link=false, wifi_under5g=false;
+	BOOLEAN				bt_hs_on=false, wifi_busy=false;
+	s4Byte				wifi_rssi=0, bt_hs_rssi=0;
+	u4Byte				wifi_bw, wifiTrafficDir;
+	u1Byte				wifiDot11Chnl, wifiHsChnl;
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+	CL_PRINTF(cli_buf);
+
+	if(btcoexist->manual_control)
+	{
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[Under Manual Control]============");
+		CL_PRINTF(cli_buf);
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+		CL_PRINTF(cli_buf);
+	}
+	if(btcoexist->stop_coex_dm)
+	{
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[Coex is STOPPED]============");
+		CL_PRINTF(cli_buf);
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+		CL_PRINTF(cli_buf);
+	}
+
+	if(!board_info->bBtExist)
+	{
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+		CL_PRINTF(cli_buf);
+		return;
+	}
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+		board_info->pgAntNum, board_info->btdmAntNum);
+	CL_PRINTF(cli_buf);	
+	
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+		((stack_info->bProfileNotified)? "Yes":"No"), stack_info->hciVersion);
+	CL_PRINTF(cli_buf);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_FW_VER);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+		wifiDot11Chnl, wifiHsChnl, bt_hs_on);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+		coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1],
+		coex_dm->wifi_chnl_info[2]);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+		wifi_rssi, bt_hs_rssi);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", \
+		link, roam, scan);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+		(wifi_under5g? "5G":"2.4G"),
+		((BTC_WIFI_BW_LEGACY==wifi_bw)? "Legacy": (((BTC_WIFI_BW_HT40==wifi_bw)? "HT40":"HT20"))),
+		((!wifi_busy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+	CL_PRINTF(cli_buf);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+		((coex_sta->c2h_bt_inquiry_page)?("inquiry/page scan"):((BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)? "non-connected idle":
+		(  (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)? "connected-idle":"busy"))),
+		coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+	CL_PRINTF(cli_buf);
+	
+	if(stack_info->bProfileNotified)
+	{			
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+			stack_info->sco_exist, stack_info->hid_exist, stack_info->pan_exist, stack_info->a2dp_exist);
+		CL_PRINTF(cli_buf);	
+
+		btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+	}
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+		(bt_info_ext&BIT0)? "Basic rate":"EDR rate");
+	CL_PRINTF(cli_buf);	
+
+	for(i=0; i<BT_INFO_SRC_8812A_1ANT_MAX; i++)
+	{
+		if(coex_sta->bt_info_c2h_cnt[i])
+		{				
+			CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8812a1Ant[i], \
+				coex_sta->bt_info_c2h[i][0], coex_sta->bt_info_c2h[i][1],
+				coex_sta->bt_info_c2h[i][2], coex_sta->bt_info_c2h[i][3],
+				coex_sta->bt_info_c2h[i][4], coex_sta->bt_info_c2h[i][5],
+				coex_sta->bt_info_c2h[i][6], coex_sta->bt_info_c2h_cnt[i]);
+			CL_PRINTF(cli_buf);
+		}
+	}
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)", \
+		((coex_sta->under_ips? "IPS ON":"IPS OFF")),
+		((coex_sta->under_lps? "LPS ON":"LPS OFF")), 
+		btcoexist->bt_info.lps1Ant, 
+		btcoexist->bt_info.rpwm1Ant);
+	CL_PRINTF(cli_buf);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+	if(!btcoexist->manual_control)
+	{
+		// Sw mechanism	
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+		CL_PRINTF(cli_buf);
+	
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d/ %d ", "SM1[ShRf/ LpRA/ LimDig/ btLna]", \
+			coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra, coex_dm->limited_dig, coex_dm->bCurBtLnaConstrain);
+		CL_PRINTF(cli_buf);
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+			coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+		CL_PRINTF(cli_buf);
+	
+		// Fw mechanism		
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+		CL_PRINTF(cli_buf);	
+
+		psTdmaCase = coex_dm->cur_ps_tdma;
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \
+			coex_dm->ps_tdma_para[0], coex_dm->ps_tdma_para[1],
+			coex_dm->ps_tdma_para[2], coex_dm->ps_tdma_para[3],
+			coex_dm->ps_tdma_para[4], psTdmaCase);
+		CL_PRINTF(cli_buf);
+
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Latest error condition(should be 0)", \
+			coex_dm->error_condition);
+		CL_PRINTF(cli_buf);
+		
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", \
+			coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+		CL_PRINTF(cli_buf);
+	}
+
+	// Hw setting		
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+	CL_PRINTF(cli_buf);	
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+		coex_dm->bt_rf0x1e_backup);
+	CL_PRINTF(cli_buf);
+
+	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778", \
+		u1_tmp[0]);
+	CL_PRINTF(cli_buf);
+	
+	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
+	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x92c/ 0x930", \
+		(u1_tmp[0]), u4_tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+	u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x40/ 0x4f", \
+		u1_tmp[0], u1_tmp[1]);
+	CL_PRINTF(cli_buf);
+
+	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+		u4_tmp[0], u1_tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+		u4_tmp[0]);
+	CL_PRINTF(cli_buf);
+
+#if 0
+	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
+	u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xf4c);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0xf48/ 0xf4c (FA cnt)", \
+		u4_tmp[0], u4_tmp[1]);
+	CL_PRINTF(cli_buf);
+#endif
+
+	u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+	u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+	u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+	u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+		u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770(hp rx[31:16]/tx[15:0])", \
+		coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+	CL_PRINTF(cli_buf);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+		coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+	CL_PRINTF(cli_buf);
+	
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void
+EXhalbtc8812a1ant_IpsNotify(
+	 	PBTC_COEXIST		btcoexist,
+	 	u1Byte			type
+	)
+{
+	u4Byte	u4_tmp=0;
+
+	if(btcoexist->manual_control ||	btcoexist->stop_coex_dm)
+		return;
+
+	if(BTC_IPS_ENTER == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+		coex_sta->under_ips = true;
+
+		// 0x4c[23]=1
+		u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+		u4_tmp |= BIT23;
+		btcoexist->btc_write_4byte(btcoexist, 0x4c, u4_tmp);
+		
+		halbtc8812a1ant_CoexAllOff(btcoexist);
+	}
+	else if(BTC_IPS_LEAVE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+		coex_sta->under_ips = false;
+		//halbtc8812a1ant_InitCoexDm(btcoexist);
+	}
+}
+
+void
+EXhalbtc8812a1ant_LpsNotify(
+	 	PBTC_COEXIST		btcoexist,
+	 	u1Byte			type
+	)
+{
+	if(btcoexist->manual_control || btcoexist->stop_coex_dm)
+		return;
+
+	if(BTC_LPS_ENABLE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+		coex_sta->under_lps = true;
+	}
+	else if(BTC_IPS_LEAVE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+		coex_sta->under_lps = false;
+	}
+}
+
+void
+EXhalbtc8812a1ant_ScanNotify(
+	 	PBTC_COEXIST		btcoexist,
+	 	u1Byte			type
+	)
+{
+	PBTC_STACK_INFO 	stack_info=&btcoexist->stack_info;
+	BOOLEAN 		wifi_connected=false;	
+
+	if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+		return;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+	if(BTC_SCAN_START == type)
+	{	
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+		if(!wifi_connected)	// non-connected scan
+		{
+			//set 0x550[3]=1 before PsTdma
+			//halbtc8812a1ant_Reg0x550Bit3(btcoexist, true);
+			halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist);
+		}
+		else	// wifi is connected
+		{
+			halbtc8812a1ant_ActionWifiConnectedScan(btcoexist);
+		}
+	}
+	else if(BTC_SCAN_FINISH == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+		if(!wifi_connected)	// non-connected scan
+		{
+			//halbtc8812a1ant_Reg0x550Bit3(btcoexist, false);
+			halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+		}
+		else
+		{
+			halbtc8812a1ant_ActionWifiConnected(btcoexist);
+		}
+	}
+}
+
+void
+EXhalbtc8812a1ant_ConnectNotify(
+	 	PBTC_COEXIST		btcoexist,
+	 	u1Byte			type
+	)
+{
+	PBTC_STACK_INFO 	stack_info=&btcoexist->stack_info;
+	BOOLEAN			wifi_connected=false;	
+
+	if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+		return;
+
+	if(BTC_ASSOCIATE_START == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+		if(btcoexist->bt_info.bt_disabled)
+		{			
+			halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+			btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+		}
+		else
+		{
+			halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist);
+		}
+	}
+	else if(BTC_ASSOCIATE_FINISH == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+		
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+		if(!wifi_connected) // non-connected scan
+		{
+			//halbtc8812a1ant_Reg0x550Bit3(btcoexist, false);
+			halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+		}
+		else
+		{
+			halbtc8812a1ant_ActionWifiConnected(btcoexist);
+		}
+	}
+}
+
+void
+EXhalbtc8812a1ant_MediaStatusNotify(
+	 	PBTC_COEXIST			btcoexist,
+	 	u1Byte				type
+	)
+{
+	u1Byte			dataLen=5;
+	u1Byte			buf[6] = {0};
+	u1Byte			h2c_parameter[3] ={0};
+	BOOLEAN			wifi_under5g=false;
+	u4Byte			wifi_bw;
+	u1Byte			wifi_central_chnl;
+
+	if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+		return;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+
+	// only 2.4G we need to inform bt the chnl mask
+	if(!wifi_under5g)
+	{
+		if(BTC_MEDIA_CONNECT == type)
+		{
+			h2c_parameter[0] = 0x1;
+		}
+		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+		btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl);
+		h2c_parameter[1] = wifi_central_chnl;
+		if(BTC_WIFI_BW_HT40 == wifi_bw)
+			h2c_parameter[2] = 0x30;
+		else
+			h2c_parameter[2] = 0x20;
+	}
+		
+	coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+	coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+	coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+	
+	buf[0] = dataLen;
+	buf[1] = 0x5;				// OP_Code
+	buf[2] = 0x3;				// OP_Code_Length
+	buf[3] = h2c_parameter[0]; 	// OP_Code_Content
+	buf[4] = h2c_parameter[1];
+	buf[5] = h2c_parameter[2];
+		
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);		
+}
+
+void
+EXhalbtc8812a1ant_SpecialPacketNotify(
+	 	PBTC_COEXIST			btcoexist,
+	 	u1Byte				type
+	)
+{
+	BOOLEAN 	bSecurityLink=false;
+
+	if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+		return;
+
+	//if(type == BTC_PACKET_DHCP)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], special Packet(%d) notify\n", type));
+		if(btcoexist->bt_info.bt_disabled)
+		{
+			halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+			btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);	
+		}
+		else
+		{
+			halbtc8812a1ant_ActionWifiConnectedSpecialPacket(btcoexist);
+		}
+	}
+}
+
+void
+EXhalbtc8812a1ant_BtInfoNotify(
+	 	PBTC_COEXIST		btcoexist,
+	 	pu1Byte			tmp_buf,
+	 	u1Byte			length
+	)
+{
+	u1Byte			bt_info=0;
+	u1Byte			i, rsp_source=0;
+	static u4Byte		set_bt_lna_cnt=0, set_bt_psd_mode=0;
+	BOOLEAN			bt_busy=false, limited_dig=false;
+	BOOLEAN			wifi_connected=false;
+	BOOLEAN			bRejApAggPkt=false;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify()===>\n"));
+
+
+	rsp_source = tmp_buf[0]&0xf;
+	if(rsp_source >= BT_INFO_SRC_8812A_1ANT_MAX)
+		rsp_source = BT_INFO_SRC_8812A_1ANT_WIFI_FW;
+	coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rsp_source, length));
+	for(i=0; i<length; i++)
+	{
+		coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
+		if(i == 1)
+			bt_info = tmp_buf[i];
+		if(i == length-1)
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%2x]\n", tmp_buf[i]));
+		}
+		else
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%2x, ", tmp_buf[i]));
+		}
+	}
+
+	if(btcoexist->manual_control)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n"));
+		return;
+	}
+	if(btcoexist->stop_coex_dm)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Coex STOPPED!!<===\n"));
+		return;
+	}
+
+	if(BT_INFO_SRC_8812A_1ANT_WIFI_FW != rsp_source)
+	{
+		coex_sta->bt_retry_cnt =
+			coex_sta->bt_info_c2h[rsp_source][2];
+
+		coex_sta->bt_rssi =
+			coex_sta->bt_info_c2h[rsp_source][3]*2+10;
+
+		coex_sta->bt_info_ext = 
+			coex_sta->bt_info_c2h[rsp_source][4];
+
+		// Here we need to resend some wifi info to BT
+		// because bt is reset and loss of the info.
+		if( (coex_sta->bt_info_ext & BIT1) )
+		{			
+			btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+			if(wifi_connected)
+			{
+				EXhalbtc8812a1ant_MediaStatusNotify(btcoexist, BTC_MEDIA_CONNECT);
+			}
+			else
+			{
+				EXhalbtc8812a1ant_MediaStatusNotify(btcoexist, BTC_MEDIA_DISCONNECT);
+			}
+
+			set_bt_psd_mode = 0;
+		}
+
+		// test-chip bt patch doesn't support, temporary remove.
+		// need to add back when mp-chip. 12/20/2012
+#if 0		
+		if(set_bt_psd_mode <= 3)
+		{

+			halbtc8812a1ant_SetBtPsdMode(btcoexist, FORCE_EXEC, 0xd);
+			set_bt_psd_mode++;
+		}
+		
+		if(coex_dm->bCurBtLnaConstrain)
+		{
+			if( (coex_sta->bt_info_ext & BIT2) )
+			{
+			}
+			else
+			{
+				if(set_bt_lna_cnt <= 3)
+				{
+					halbtc8812a1ant_SetBtLnaConstrain(btcoexist, FORCE_EXEC, true);
+					set_bt_lna_cnt++;
+				}
+			}
+		}
+		else
+		{
+			set_bt_lna_cnt = 0;
+		}
+#endif
+		// test-chip bt patch only rsp the status for BT_RSP, 
+		// so temporary we consider the following only under BT_RSP
+		if(BT_INFO_SRC_8812A_1ANT_BT_RSP == rsp_source)
+		{
+			if( (coex_sta->bt_info_ext & BIT3) )
+			{
+			#if 0// temp disable because bt patch report the wrong value.
+				halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, false);
+			#endif
+			}
+			else
+			{
+				// BT already NOT ignore Wlan active, do nothing here.
+			}
+
+			if( (coex_sta->bt_info_ext & BIT4) )
+			{
+				// BT auto report already enabled, do nothing
+			}
+			else
+			{
+				halbtc8812a1ant_BtAutoReport(btcoexist, FORCE_EXEC, true);
+			}
+		}
+	}
+		
+	// check BIT2 first ==> check if bt is under inquiry or page scan
+	if(bt_info & BT_INFO_8812A_1ANT_B_INQ_PAGE)
+	{
+		coex_sta->c2h_bt_inquiry_page = true;
+		coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_INQ_PAGE;
+	}
+	else
+	{
+		coex_sta->c2h_bt_inquiry_page = false;
+		if(!(bt_info&BT_INFO_8812A_1ANT_B_CONNECTION))
+		{
+			coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-connected idle!!!\n"));
+		}
+		else if(bt_info == BT_INFO_8812A_1ANT_B_CONNECTION)	// connection exists but no busy
+		{
+			coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt connected-idle!!!\n"));
+		}		
+		else if((bt_info&BT_INFO_8812A_1ANT_B_SCO_ESCO) ||
+			(bt_info&BT_INFO_8812A_1ANT_B_SCO_BUSY))
+		{
+			coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_SCO_BUSY;
+			bRejApAggPkt = true;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt sco busy!!!\n"));
+		}
+		else if(bt_info&BT_INFO_8812A_1ANT_B_ACL_BUSY)
+		{
+			if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
+				coex_dm->reset_tdma_adjust = true;
+			coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_ACL_BUSY;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl busy!!!\n"));
+		}
+#if 0
+		else if(bt_info&BT_INFO_8812A_1ANT_B_SCO_ESCO)
+		{
+			coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl/sco busy!!!\n"));
+		}
+#endif
+		else
+		{
+			//DbgPrint("error, undefined bt_info=0x%x\n", bt_info);
+			coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_MAX;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-defined state!!!\n"));
+		}
+
+		// send delete BA to disable aggregation
+		//btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &bRejApAggPkt);
+	}
+
+	if( (BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+		(BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+		(BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+	{
+		bt_busy = true;
+	}
+	else
+	{
+		bt_busy = false;
+	}
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+	if(bt_busy)
+	{
+		limited_dig = true;
+	}
+	else
+	{
+		limited_dig = false;
+	}
+	coex_dm->limited_dig = limited_dig;
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+	halbtc8812a1ant_RunCoexistMechanism(btcoexist);
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify()<===\n"));
+}
+
+void
+EXhalbtc8812a1ant_StackOperationNotify(
+	 	PBTC_COEXIST			btcoexist,
+	 	u1Byte				type
+	)
+{
+	if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+	}
+	else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+	}
+}
+
+void
+EXhalbtc8812a1ant_HaltNotify(
+	 	PBTC_COEXIST			btcoexist
+	)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
+
+	halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
+	halbtc8812a1ant_PsTdma(btcoexist, FORCE_EXEC, false, 0);
+	btcoexist->btc_write_1byte(btcoexist, 0x4f, 0xf);
+	halbtc8812a1ant_WifiParaAdjust(btcoexist, false);
+}
+
+void
+EXhalbtc8812a1ant_PnpNotify(
+	 	PBTC_COEXIST			btcoexist,
+	 	u1Byte				pnpState
+	)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n"));
+
+	if(BTC_WIFI_PNP_SLEEP == pnpState)
+	{
+		btcoexist->stop_coex_dm = true;
+		halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
+		halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+		halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);	
+	}
+	else if(BTC_WIFI_PNP_WAKE_UP == pnpState)
+	{
+		
+	}
+}
+
+void
+EXhalbtc8812a1ant_Periodical(
+	 	PBTC_COEXIST			btcoexist
+	)
+{
+	BOOLEAN			wifi_under5g=false;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical()===>\n"));
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 1Ant Periodical!!\n"));
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+
+	if(wifi_under5g)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical(), return for 5G<===\n"));
+		halbtc8812a1ant_CoexAllOff(btcoexist);
+		return;
+	}
+
+	halbtc8812a1ant_QueryBtInfo(btcoexist);
+	halbtc8812a1ant_MonitorBtCtr(btcoexist);
+	halbtc8812a1ant_MonitorBtEnableDisable(btcoexist);
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical()<===\n"));
+}
+
+void
+EXhalbtc8812a1ant_DbgControl(
+	 	PBTC_COEXIST			btcoexist,
+	 	u1Byte				opCode,
+	 	u1Byte				opLen,
+	 	pu1Byte				pData
+	)
+{
+	switch(opCode)
+	{
+		case BTC_DBG_SET_COEX_NORMAL:
+			btcoexist->manual_control = false;
+			halbtc8812a1ant_InitCoexDm(btcoexist);
+			break;
+		case BTC_DBG_SET_COEX_WIFI_ONLY:
+			btcoexist->manual_control = true;
+			halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+			btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+			halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);	
+			break;
+		case BTC_DBG_SET_COEX_BT_ONLY:
+			// todo
+			break;
+		default:
+			break;
+	}
+}
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h
new file mode 100644
index 0000000..37bdab5
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h
@@ -0,0 +1,205 @@
+//===========================================
+// The following is for 8812A_1ANT BT Co-exist definition
+//===========================================
+#define	BT_INFO_8812A_1ANT_B_FTP						BIT7
+#define	BT_INFO_8812A_1ANT_B_A2DP					BIT6
+#define	BT_INFO_8812A_1ANT_B_HID						BIT5
+#define	BT_INFO_8812A_1ANT_B_SCO_BUSY				BIT4
+#define	BT_INFO_8812A_1ANT_B_ACL_BUSY				BIT3
+#define	BT_INFO_8812A_1ANT_B_INQ_PAGE				BIT2
+#define	BT_INFO_8812A_1ANT_B_SCO_ESCO				BIT1
+#define	BT_INFO_8812A_1ANT_B_CONNECTION				BIT0
+
+#define	BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_)	\
+		(((_BT_INFO_EXT_&BIT0))? true:false)
+
+#define	BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT		2
+
+#define  
+#define OUT
+
+typedef enum _BT_INFO_SRC_8812A_1ANT{
+	BT_INFO_SRC_8812A_1ANT_WIFI_FW			= 0x0,
+	BT_INFO_SRC_8812A_1ANT_BT_RSP				= 0x1,
+	BT_INFO_SRC_8812A_1ANT_BT_ACTIVE_SEND		= 0x2,
+	BT_INFO_SRC_8812A_1ANT_MAX
+}BT_INFO_SRC_8812A_1ANT,*PBT_INFO_SRC_8812A_1ANT;
+
+typedef enum _BT_8812A_1ANT_BT_STATUS{
+	BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE	= 0x0,
+	BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE		= 0x1,
+	BT_8812A_1ANT_BT_STATUS_INQ_PAGE				= 0x2,
+	BT_8812A_1ANT_BT_STATUS_ACL_BUSY				= 0x3,
+	BT_8812A_1ANT_BT_STATUS_SCO_BUSY				= 0x4,
+	BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY			= 0x5,
+	BT_8812A_1ANT_BT_STATUS_MAX
+}BT_8812A_1ANT_BT_STATUS,*PBT_8812A_1ANT_BT_STATUS;
+
+typedef enum _BT_8812A_1ANT_COEX_ALGO{
+	BT_8812A_1ANT_COEX_ALGO_UNDEFINED			= 0x0,
+	BT_8812A_1ANT_COEX_ALGO_SCO				= 0x1,
+	BT_8812A_1ANT_COEX_ALGO_HID				= 0x2,
+	BT_8812A_1ANT_COEX_ALGO_A2DP				= 0x3,
+	BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS		= 0x4,
+	BT_8812A_1ANT_COEX_ALGO_PANEDR			= 0x5,
+	BT_8812A_1ANT_COEX_ALGO_PANHS			= 0x6,
+	BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP		= 0x7,
+	BT_8812A_1ANT_COEX_ALGO_PANEDR_HID		= 0x8,
+	BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR	= 0x9,
+	BT_8812A_1ANT_COEX_ALGO_HID_A2DP			= 0xa,
+	BT_8812A_1ANT_COEX_ALGO_MAX				= 0xb,
+}BT_8812A_1ANT_COEX_ALGO,*PBT_8812A_1ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8812A_1ANT{
+	// fw mechanism
+	bool		pre_dec_bt_pwr;
+	bool		cur_dec_bt_pwr;
+	bool		bPreBtLnaConstrain;
+	bool		bCurBtLnaConstrain;
+	u8		bPreBtPsdMode;
+	u8		bCurBtPsdMode;
+	u8		pre_fw_dac_swing_lvl;
+	u8		cur_fw_dac_swing_lvl;
+	bool		cur_ignore_wlan_act;
+	bool		pre_ignore_wlan_act;
+	u8		pre_ps_tdma;
+	u8		cur_ps_tdma;
+	u8		ps_tdma_para[5];
+	u8		ps_tdma_du_adj_type;
+	bool		reset_tdma_adjust;
+	bool		pre_ps_tdma_on;
+	bool		cur_ps_tdma_on;
+	bool		pre_bt_auto_report;
+	bool		cur_bt_auto_report;
+	u8		pre_lps;
+	u8		cur_lps;
+	u8		pre_rpwm;
+	u8		cur_rpwm;
+
+	// sw mechanism
+	bool		pre_rf_rx_lpf_shrink;
+	bool		cur_rf_rx_lpf_shrink;
+	u32		bt_rf0x1e_backup;
+	bool 	pre_low_penalty_ra;
+	bool		cur_low_penalty_ra;
+	bool		pre_dac_swing_on;
+	u32		pre_dac_swing_lvl;
+	bool		cur_dac_swing_on;
+	u32		cur_dac_swing_lvl;
+	bool		pre_adc_back_off;
+	bool		cur_adc_back_off;
+	bool 	pre_agc_table_en;
+	bool		cur_agc_table_en;
+	u32		pre_val0x6c0;
+	u32		cur_val0x6c0;
+	u32		pre_val0x6c4;
+	u32		cur_val0x6c4;
+	u32		pre_val0x6c8;
+	u32		cur_val0x6c8;
+	u8		pre_val0x6cc;
+	u8		cur_val0x6cc;
+	bool		limited_dig;
+
+	// algorithm related
+	u8		pre_algorithm;
+	u8		cur_algorithm;
+	u8		bt_status;
+	u8		wifi_chnl_info[3];
+
+	u8		error_condition;
+} COEX_DM_8812A_1ANT, *PCOEX_DM_8812A_1ANT;
+
+typedef struct _COEX_STA_8812A_1ANT{
+	bool					under_lps;
+	bool					under_ips;
+	u32					high_priority_tx;
+	u32					high_priority_rx;
+	u32					low_priority_tx;
+	u32					low_priority_rx;
+	u8					bt_rssi;
+	u8					pre_bt_rssi_state;
+	u8					pre_wifi_rssi_state[4];
+	bool					c2h_bt_info_req_sent;
+	u8					bt_info_c2h[BT_INFO_SRC_8812A_1ANT_MAX][10];
+	u32					bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_MAX];
+	bool					c2h_bt_inquiry_page;
+	u8					bt_retry_cnt;
+	u8					bt_info_ext;
+}COEX_STA_8812A_1ANT, *PCOEX_STA_8812A_1ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+void
+EXhalbtc8812a1ant_InitHwConfig(
+	 	PBTC_COEXIST		btcoexist
+	);
+void
+EXhalbtc8812a1ant_InitCoexDm(
+	 	PBTC_COEXIST		btcoexist
+	);
+void
+EXhalbtc8812a1ant_IpsNotify(
+	 	PBTC_COEXIST		btcoexist,
+	 	u8			type
+	);
+void
+EXhalbtc8812a1ant_LpsNotify(
+	 	PBTC_COEXIST		btcoexist,
+	 	u8			type
+	);
+void
+EXhalbtc8812a1ant_ScanNotify(
+	 	PBTC_COEXIST		btcoexist,
+	 	u8			type
+	);
+void
+EXhalbtc8812a1ant_ConnectNotify(
+	 	PBTC_COEXIST		btcoexist,
+	 	u8			type
+	);
+void
+EXhalbtc8812a1ant_MediaStatusNotify(
+	 	PBTC_COEXIST			btcoexist,
+	 	u8				type
+	);
+void
+EXhalbtc8812a1ant_SpecialPacketNotify(
+	 	PBTC_COEXIST			btcoexist,
+	 	u8				type
+	);
+void
+EXhalbtc8812a1ant_BtInfoNotify(
+	 	PBTC_COEXIST		btcoexist,
+	 	u8			*tmp_buf,
+	 	u8			length
+	);
+void
+EXhalbtc8812a1ant_StackOperationNotify(
+	 	PBTC_COEXIST			btcoexist,
+	 	u8				type
+	);
+void
+EXhalbtc8812a1ant_HaltNotify(
+	 	PBTC_COEXIST			btcoexist
+	);
+void
+EXhalbtc8812a1ant_PnpNotify(
+	 	PBTC_COEXIST			btcoexist,
+	 	u8				pnpState
+	);
+void
+EXhalbtc8812a1ant_Periodical(
+	 	PBTC_COEXIST			btcoexist
+	);
+void
+EXhalbtc8812a1ant_DisplayCoexInfo(
+	 	PBTC_COEXIST		btcoexist
+	);
+void
+EXhalbtc8812a1ant_DbgControl(
+	 	PBTC_COEXIST			btcoexist,
+	 	u8				opCode,
+	 	u8				opLen,
+	 	u8 				*pData
+	);
diff --git a/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c
new file mode 100644
index 0000000..e619923
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c
@@ -0,0 +1,1614 @@
+//============================================================
+// Description:
+//
+// This file is for RTL8723A Co-exist mechanism
+//
+// History
+// 2012/08/22 Cosa first check in.
+// 2012/11/14 Cosa Revise for 8723A 1Ant out sourcing.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "Mp_Precomp.h"
+#if(BT_30_SUPPORT == 1)
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8723A_1ANT	GLCoexDm8723a1Ant;
+static PCOEX_DM_8723A_1ANT 	pCoexDm=&GLCoexDm8723a1Ant;
+static COEX_STA_8723A_1ANT	GLCoexSta8723a1Ant;
+static PCOEX_STA_8723A_1ANT	pCoexSta=&GLCoexSta8723a1Ant;
+
+const char *const GLBtInfoSrc8723a1Ant[]={
+	"BT Info[wifi fw]",
+	"BT Info[bt rsp]",
+	"BT Info[bt auto report]",
+};
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8723a1ant_
+//============================================================
+VOID
+halbtc8723a1ant_Reg0x550Bit3(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bSet
+	)
+{
+	u1Byte	u1tmp=0;
+	
+	u1tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x550);
+	if(bSet)
+	{
+		u1tmp |= BIT3;
+	}
+	else
+	{
+		u1tmp &= ~BIT3;
+	}
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x550, u1tmp);
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], set 0x550[3]=%d\n", (bSet? 1:0)));
+}
+
+VOID
+halbtc8723a1ant_NotifyFwScan(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			scanType
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+	
+	if(BTC_SCAN_START == scanType)
+		H2C_Parameter[0] = 0x1;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Notify FW for wifi scan, write 0x3b=0x%x\n", 
+		H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3b, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a1ant_QueryBtInfo(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+
+	pCoexSta->bC2hBtInfoReqSent = true;
+
+	H2C_Parameter[0] |= BIT0;	// trigger
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x38=0x%x\n", 
+		H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x38, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a1ant_SetSwRfRxLpfCorner(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bRxRfShrinkOn
+	)
+{
+	if(bRxRfShrinkOn)
+	{
+		//Shrink RF Rx LPF corner
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+	}
+	else
+	{
+		//Resume RF Rx LPF corner
+		// After initialized, we can use pCoexDm->btRf0x1eBackup
+		if(pBtCoexist->initilized)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+			pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup);
+		}
+	}
+}
+
+VOID
+halbtc8723a1ant_RfShrink(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bRxRfShrinkOn
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",  
+		(bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF")));
+	pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n", 
+			pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink));
+
+		if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink) 
+			return;
+	}
+	halbtc8723a1ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink);
+
+	pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink;
+}
+
+VOID
+halbtc8723a1ant_SetSwPenaltyTxRateAdaptive(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bLowPenaltyRa
+	)
+{
+	u1Byte	tmpU1;
+
+	tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd);
+	tmpU1 |= BIT0;
+	if(bLowPenaltyRa)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+		tmpU1 &= ~BIT2;
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+		tmpU1 |= BIT2;
+	}
+
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1);
+}
+
+VOID
+halbtc8723a1ant_LowPenaltyRa(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bLowPenaltyRa
+	)
+{
+	return;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",  
+		(bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF")));
+	pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n", 
+			pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa));
+
+		if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa) 
+			return;
+	}
+	halbtc8723a1ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa);
+
+	pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa;
+}
+
+VOID
+halbtc8723a1ant_SetCoexTable(
+	IN	PBTC_COEXIST	pBtCoexist,
+	IN	u4Byte		val0x6c0,
+	IN	u4Byte		val0x6c8,
+	IN	u1Byte		val0x6cc
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc);
+}
+
+VOID
+halbtc8723a1ant_CoexTable(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	u4Byte			val0x6c0,
+	IN	u4Byte			val0x6c8,
+	IN	u1Byte			val0x6cc
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", 
+		(bForceExec? "force to":""), val0x6c0, val0x6c8, val0x6cc));
+	pCoexDm->curVal0x6c0 = val0x6c0;
+	pCoexDm->curVal0x6c8 = val0x6c8;
+	pCoexDm->curVal0x6cc = val0x6cc;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n", 
+			pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc));
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n", 
+			pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc));
+	
+		if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
+			(pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) &&
+			(pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) )
+			return;
+	}
+	halbtc8723a1ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c8, val0x6cc);
+
+	pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0;
+	pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8;
+	pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc;
+}
+
+VOID
+halbtc8723a1ant_SetFwIgnoreWlanAct(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bEnable
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+		
+	if(bEnable)
+	{
+		H2C_Parameter[0] |= BIT0;		// function enable
+	}
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x25=0x%x\n", 
+		H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x25, 1, H2C_Parameter);	
+}
+
+VOID
+halbtc8723a1ant_IgnoreWlanAct(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bEnable
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n", 
+		(bForceExec? "force to":""), (bEnable? "ON":"OFF")));
+	pCoexDm->bCurIgnoreWlanAct = bEnable;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", 
+			pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct));
+
+		if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
+			return;
+	}
+	halbtc8723a1ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable);
+
+	pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct;
+}
+
+VOID
+halbtc8723a1ant_SetFwPstdma(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type,
+	IN	u1Byte			byte1,
+	IN	u1Byte			byte2,
+	IN	u1Byte			byte3,
+	IN	u1Byte			byte4,
+	IN	u1Byte			byte5
+	)
+{
+	u1Byte			H2C_Parameter[5] ={0};
+	u1Byte			realByte1=byte1, realByte5=byte5;
+	BOOLEAN			bApEnable=FALSE;
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, &bApEnable);
+
+	// byte1[1:0] != 0 means enable pstdma
+	// for 2Ant bt coexist, if byte1 != 0 means enable pstdma
+	if(byte1)
+	{
+		if(bApEnable)
+		{
+			if(type != 5 && type != 12)
+			{
+				BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], FW for 1Ant AP mode\n"));
+				realByte1 &= ~BIT4;
+				realByte1 |= BIT5;
+
+				realByte5 |= BIT5;
+				realByte5 &= ~BIT6;
+			}
+		}
+	}
+	H2C_Parameter[0] = realByte1;	
+	H2C_Parameter[1] = byte2;	
+	H2C_Parameter[2] = byte3;
+	H2C_Parameter[3] = byte4;
+	H2C_Parameter[4] = realByte5;
+
+	pCoexDm->psTdmaPara[0] = realByte1;
+	pCoexDm->psTdmaPara[1] = byte2;
+	pCoexDm->psTdmaPara[2] = byte3;
+	pCoexDm->psTdmaPara[3] = byte4;
+	pCoexDm->psTdmaPara[4] = realByte5;
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x3a(5bytes)=0x%x%08x\n", 
+		H2C_Parameter[0], 
+		H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3a, 5, H2C_Parameter);
+}
+
+VOID
+halbtc8723a1ant_PsTdma(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bTurnOn,
+	IN	u1Byte			type
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n", 
+		(bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type));		
+	pCoexDm->bCurPsTdmaOn = bTurnOn;
+	pCoexDm->curPsTdma = type;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", 
+			pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn));
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", 
+			pCoexDm->prePsTdma, pCoexDm->curPsTdma));
+
+		if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
+			(pCoexDm->prePsTdma == pCoexDm->curPsTdma) )
+			return;
+	}	
+	if(pCoexDm->bCurPsTdmaOn)
+	{
+		switch(pCoexDm->curPsTdma)
+		{
+			case 1:
+			default:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x1a, 0x1a, 0x0, 0x40);
+				break;
+			case 2:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x12, 0x12, 0x0, 0x40);
+				break;
+			case 3:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x3f, 0x3, 0x10, 0x40);
+				break;
+			case 4:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x15, 0x3, 0x10, 0x0);
+				break;
+			case 5:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0xa9, 0x15, 0x3, 0x35, 0xc0);
+				break;
+			
+			case 8: 
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0);
+				break;
+			case 9: 
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x0, 0x40);
+				break;
+			case 10:	
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x0, 0x40);
+				break;
+			case 11:	
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x0, 0x40);
+				break;
+			case 12:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0xa9, 0xa, 0x3, 0x15, 0xc0);
+				break;
+	
+			case 18:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0);
+				break;			
+
+			case 20:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x2a, 0x2a, 0x0, 0x0);
+				break;
+			case 21:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x20, 0x3, 0x10, 0x40);
+				break;
+			case 22:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x1a, 0x1a, 0x2, 0x40);
+				break;
+			case 23:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x12, 0x12, 0x2, 0x40);
+				break;
+			case 24:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x2, 0x40);
+				break;
+			case 25:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x2, 0x40);
+				break;
+			case 26:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0);
+				break;
+			case 27:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x2, 0x40);
+				break;
+			case 28:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x3, 0x2f, 0x2f, 0x0, 0x0);
+				break;
+
+		}
+	}
+	else
+	{
+		// disable PS tdma
+		switch(pCoexDm->curPsTdma)
+		{
+			case 8:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x8, 0x0, 0x0, 0x0, 0x0);		
+				break;
+			case 0:
+			default:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x0, 0x0, 0x0, 0x0, 0x0);
+				pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x210);
+				break;
+			case 9:
+				halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x0, 0x0, 0x0, 0x0, 0x0);
+				pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x110);
+				break;
+
+		}
+	}
+
+	// update pre state
+	pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn;
+	pCoexDm->prePsTdma = pCoexDm->curPsTdma;
+}
+
+
+VOID
+halbtc8723a1ant_CoexAllOff(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	// fw all off
+	halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+	// sw all off
+	halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+
+	// hw all off
+	halbtc8723a1ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+}
+
+VOID
+halbtc8723a1ant_InitCoexDm(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	// force to reset coex mechanism
+	halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+}
+
+VOID
+halbtc8723a1ant_BtEnableAction(
+	IN 	PBTC_COEXIST		pBtCoexist
+	)
+{
+	halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+}
+
+VOID
+halbtc8723a1ant_MonitorBtCtr(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u4Byte 			regHPTxRx, regLPTxRx, u4Tmp;
+	u4Byte			regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0;
+	u1Byte			u1Tmp;
+	
+	regHPTxRx = 0x770;
+	regLPTxRx = 0x774;
+
+	u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx);
+	regHPTx = u4Tmp & MASKLWORD;
+	regHPRx = (u4Tmp & MASKHWORD)>>16;
+
+	u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx);
+	regLPTx = u4Tmp & MASKLWORD;
+	regLPRx = (u4Tmp & MASKHWORD)>>16;
+		
+	pCoexSta->highPriorityTx = regHPTx;
+	pCoexSta->highPriorityRx = regHPRx;
+	pCoexSta->lowPriorityTx = regLPTx;
+	pCoexSta->lowPriorityRx = regLPRx;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", 
+		regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx));
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", 
+		regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx));
+
+	// reset counter
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc);
+}
+
+VOID
+halbtc8723a1ant_MonitorBtEnableDisable(
+	IN 	PBTC_COEXIST		pBtCoexist
+	)
+{
+	static BOOLEAN	bPreBtDisabled=FALSE;
+	static u4Byte	btDisableCnt=0;
+	BOOLEAN			bBtActive=true, bBtDisabled=FALSE;
+
+	// This function check if bt is disabled
+	
+	if(	pCoexSta->highPriorityTx == 0 &&
+		pCoexSta->highPriorityRx == 0 &&
+		pCoexSta->lowPriorityTx == 0 &&
+		pCoexSta->lowPriorityRx == 0)
+	{
+		bBtActive = FALSE;
+	}
+	if(	pCoexSta->highPriorityTx == 0xffff &&
+		pCoexSta->highPriorityRx == 0xffff &&
+		pCoexSta->lowPriorityTx == 0xffff &&
+		pCoexSta->lowPriorityRx == 0xffff)
+	{
+		bBtActive = FALSE;
+	}
+	if(bBtActive)
+	{
+		btDisableCnt = 0;
+		bBtDisabled = FALSE;
+		pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+	}
+	else
+	{
+		btDisableCnt++;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n", 
+				btDisableCnt));
+		if(btDisableCnt >= 2)
+		{
+			bBtDisabled = true;
+			pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+		}
+	}
+	if(bPreBtDisabled != bBtDisabled)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n", 
+			(bPreBtDisabled ? "disabled":"enabled"), 
+			(bBtDisabled ? "disabled":"enabled")));
+		bPreBtDisabled = bBtDisabled;
+		if(!bBtDisabled)
+		{
+			halbtc8723a1ant_BtEnableAction(pBtCoexist);
+		}
+		else
+		{
+			pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+		}
+	}
+}
+
+VOID
+halbtc8723a1ant_TdmaDurationAdjust(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	static s4Byte		up,dn,m,n,WaitCount;
+	s4Byte			result;   //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+	u1Byte			retryCount=0;
+	u1Byte			btState;
+	BOOLEAN			bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+	u4Byte			wifiBw;
+	
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	btState = pCoexDm->btStatus;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], TdmaDurationAdjust()\n"));
+	if(pCoexDm->psTdmaGlobalCnt != pCoexDm->psTdmaMonitorCnt)
+	{
+		pCoexDm->psTdmaMonitorCnt = 0;
+		pCoexDm->psTdmaGlobalCnt = 0;
+	}
+	if(pCoexDm->psTdmaMonitorCnt == 0)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], first run BT A2DP + WiFi busy state!!\n"));
+		if(btState == BT_STATE_8723A_1ANT_ACL_ONLY_BUSY)
+		{
+			halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+			pCoexDm->psTdmaDuAdjType = 1;
+		}
+		else
+		{
+			halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 22);
+			pCoexDm->psTdmaDuAdjType = 22;
+		}
+		//============
+		up = 0;
+		dn = 0;
+		m = 1;
+		n= 3;
+		result = 0;
+		WaitCount = 0;
+	}
+	else
+	{
+		//accquire the BT TRx retry count from BT_Info byte2
+		retryCount = pCoexSta->btRetryCnt;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], retryCount = %d\n", retryCount));
+		result = 0;
+		WaitCount++; 
+		  
+		if(retryCount == 0)  // no retry in the last 2-second duration
+		{
+			up++;
+			dn--;
+
+			if (dn <= 0)
+				dn = 0; 			 
+
+			if(up >= n) // if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+			{
+				WaitCount = 0; 
+				n = 3;
+				up = 0;
+				dn = 0;
+				result = 1; 
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Increase wifi duration!!\n"));
+			}
+		}
+		else if (retryCount <= 3)	// <=3 retry in the last 2-second duration
+		{
+			up--; 
+			dn++;
+
+			if (up <= 0)
+				up = 0;
+
+			if (dn == 2)	// if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+			{
+				if (WaitCount <= 2)
+					m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+				else
+					m = 1;
+
+				if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+					m = 20;
+
+				n = 3*m;
+				up = 0;
+				dn = 0;
+				WaitCount = 0;	
+				result = -1; 
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+			}
+		}
+		else  //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+		{
+			if (WaitCount == 1)
+				m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+			else
+				m = 1;
+
+			if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+				m = 20;
+
+			n = 3*m;
+			up = 0;
+			dn = 0;
+			WaitCount = 0; 
+			result = -1;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+		}
+		
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT TxRx counter H+L <= 1200\n"));
+			if(btState != BT_STATE_8723A_1ANT_ACL_ONLY_BUSY)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], NOT ACL only busy!\n"));
+				if(BTC_WIFI_BW_HT40 != wifiBw)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], 20MHz\n"));
+					if(result == -1)
+					{
+						if(pCoexDm->curPsTdma == 22)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+							pCoexDm->psTdmaDuAdjType = 23;
+						}
+						else if(pCoexDm->curPsTdma == 23)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+							pCoexDm->psTdmaDuAdjType = 24;
+						}
+						else if(pCoexDm->curPsTdma == 24)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25);
+							pCoexDm->psTdmaDuAdjType = 25;
+						}
+					} 
+					else if (result == 1)
+					{
+						if(pCoexDm->curPsTdma == 25)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+							pCoexDm->psTdmaDuAdjType = 24;
+						}
+						else if(pCoexDm->curPsTdma == 24)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+							pCoexDm->psTdmaDuAdjType = 23;
+						}
+						else if(pCoexDm->curPsTdma == 23)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 22);
+							pCoexDm->psTdmaDuAdjType = 22;
+						}
+					}
+					// error handle, if not in the following state,
+					// set psTdma again.
+					if( (pCoexDm->psTdmaDuAdjType != 22) &&
+						(pCoexDm->psTdmaDuAdjType != 23) &&
+						(pCoexDm->psTdmaDuAdjType != 24) &&
+						(pCoexDm->psTdmaDuAdjType != 25) )
+					{
+						BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n"));
+						halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+						pCoexDm->psTdmaDuAdjType = 23;
+					}
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], 40MHz\n"));
+					if(result == -1)
+					{
+						if(pCoexDm->curPsTdma == 23)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+							pCoexDm->psTdmaDuAdjType = 24;
+						}
+						else if(pCoexDm->curPsTdma == 24)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25);
+							pCoexDm->psTdmaDuAdjType = 25;
+						}
+						else if(pCoexDm->curPsTdma == 25)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 27);
+							pCoexDm->psTdmaDuAdjType = 27;
+						}
+					} 
+					else if (result == 1)
+					{
+						if(pCoexDm->curPsTdma == 27)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25);
+							pCoexDm->psTdmaDuAdjType = 25;
+						}
+						else if(pCoexDm->curPsTdma == 25)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+							pCoexDm->psTdmaDuAdjType = 24;
+						}
+						else if(pCoexDm->curPsTdma == 24)
+						{
+							halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+							pCoexDm->psTdmaDuAdjType = 23;
+						}
+					}
+					// error handle, if not in the following state,
+					// set psTdma again.
+					if( (pCoexDm->psTdmaDuAdjType != 23) &&
+						(pCoexDm->psTdmaDuAdjType != 24) &&
+						(pCoexDm->psTdmaDuAdjType != 25) &&
+						(pCoexDm->psTdmaDuAdjType != 27) )
+					{
+						BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n"));
+						halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+						pCoexDm->psTdmaDuAdjType = 24;
+					}
+				}
+			}
+			else
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ACL only busy\n"));
+				if (result == -1)
+				{
+					if(pCoexDm->curPsTdma == 1)
+					{
+						halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+						pCoexDm->psTdmaDuAdjType = 2;
+					}
+					else if(pCoexDm->curPsTdma == 2)
+					{
+						halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+						pCoexDm->psTdmaDuAdjType = 9;
+					}
+					else if(pCoexDm->curPsTdma == 9)
+					{
+						halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+				}
+				else if (result == 1)
+				{
+					if(pCoexDm->curPsTdma == 11)
+					{
+						halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+						pCoexDm->psTdmaDuAdjType = 9;
+					}
+					else if(pCoexDm->curPsTdma == 9)
+					{
+						halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+						pCoexDm->psTdmaDuAdjType = 2;
+					}
+					else if(pCoexDm->curPsTdma == 2)
+					{
+						halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+						pCoexDm->psTdmaDuAdjType = 1;
+					}
+				}
+
+				// error handle, if not in the following state,
+				// set psTdma again.
+				if( (pCoexDm->psTdmaDuAdjType != 1) &&
+					(pCoexDm->psTdmaDuAdjType != 2) &&
+					(pCoexDm->psTdmaDuAdjType != 9) &&
+					(pCoexDm->psTdmaDuAdjType != 11) )
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n"));
+					halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+					pCoexDm->psTdmaDuAdjType = 2;
+				}
+			}
+		}
+	}
+
+	// if current PsTdma not match with the recorded one (when scan, dhcp...), 
+	// then we have to adjust it back to the previous record one.
+	if(pCoexDm->curPsTdma != pCoexDm->psTdmaDuAdjType)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", 
+			pCoexDm->curPsTdma, pCoexDm->psTdmaDuAdjType));
+
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+		if( !bScan && !bLink &&	!bRoam)
+		{
+			halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType);
+		}
+		else
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"));
+		}
+	}
+	pCoexDm->psTdmaMonitorCnt++;
+}
+
+
+VOID
+halbtc8723a1ant_CoexForWifiConnect(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BOOLEAN		bWifiConnected=FALSE, bWifiBusy=FALSE;
+	u1Byte		btState, btInfoOriginal=0;
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+	btState = pCoexDm->btStatus;
+	btInfoOriginal = pCoexSta->btInfoC2h[BT_INFO_SRC_8723A_1ANT_BT_RSP][0];
+
+	if(bWifiConnected)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi connected!!\n"));
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+		
+		if( !bWifiBusy &&
+			((BT_STATE_8723A_1ANT_NO_CONNECTION == btState) ||
+			(BT_STATE_8723A_1ANT_CONNECT_IDLE == btState)) )
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], [Wifi is idle] or [Bt is non connected idle or Bt is connected idle]!!\n"));
+
+			if(BT_STATE_8723A_1ANT_NO_CONNECTION == btState)
+				halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+			else if(BT_STATE_8723A_1ANT_CONNECT_IDLE == btState)
+				halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+			pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0);
+		}
+		else
+		{
+			if( (BT_STATE_8723A_1ANT_SCO_ONLY_BUSY == btState) ||
+				(BT_STATE_8723A_1ANT_ACL_SCO_BUSY == btState) ||
+				(BT_STATE_8723A_1ANT_HID_BUSY == btState) ||
+				(BT_STATE_8723A_1ANT_HID_SCO_BUSY == btState) )
+			{
+				pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0x60);
+			}
+			else
+			{
+				pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0);
+			}
+			switch(btState)
+			{
+				case BT_STATE_8723A_1ANT_NO_CONNECTION:
+					halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+					break;
+				case BT_STATE_8723A_1ANT_CONNECT_IDLE:
+					halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+					break;
+				case BT_STATE_8723A_1ANT_INQ_OR_PAG:
+					halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+					break;
+				case BT_STATE_8723A_1ANT_SCO_ONLY_BUSY:
+				case BT_STATE_8723A_1ANT_ACL_SCO_BUSY:
+				case BT_STATE_8723A_1ANT_HID_BUSY:
+				case BT_STATE_8723A_1ANT_HID_SCO_BUSY:
+					halbtc8723a1ant_TdmaDurationAdjust(pBtCoexist);
+					break;
+				case BT_STATE_8723A_1ANT_ACL_ONLY_BUSY:
+					if (btInfoOriginal&BT_INFO_8723A_1ANT_B_A2DP)
+					{
+						halbtc8723a1ant_TdmaDurationAdjust(pBtCoexist);
+					}
+					else if(btInfoOriginal&BT_INFO_8723A_1ANT_B_FTP)
+					{
+						halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+					}
+					else if( (btInfoOriginal&BT_INFO_8723A_1ANT_B_A2DP) &&
+							(btInfoOriginal&BT_INFO_8723A_1ANT_B_FTP) )
+					{
+						halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+					}
+					else
+					{
+						halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+					}
+					break;
+				default:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], error!!!, undefined case in halbtc8723a1ant_CoexForWifiConnect()!!\n"));
+					break;
+			}
+		}
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is disconnected!!\n"));
+	}
+
+	pCoexDm->psTdmaGlobalCnt++;
+}
+
+//============================================================
+// work around function start with wa_halbtc8723a1ant_
+//============================================================
+VOID
+wa_halbtc8723a1ant_MonitorC2h(
+	IN	PBTC_COEXIST			pBtCoexist
+	)
+{
+	u1Byte	tmp1b=0x0;
+	u4Byte	curC2hTotalCnt=0x0;
+	static u4Byte	preC2hTotalCnt=0x0, sameCntPollingTime=0x0;
+
+	curC2hTotalCnt+=pCoexSta->btInfoC2hCnt[BT_INFO_SRC_8723A_1ANT_BT_RSP];
+
+	if(curC2hTotalCnt == preC2hTotalCnt)
+	{
+		sameCntPollingTime++;
+	}
+	else
+	{
+		preC2hTotalCnt = curC2hTotalCnt;
+		sameCntPollingTime = 0;
+	}
+
+	if(sameCntPollingTime >= 2)
+	{
+		tmp1b = pBtCoexist->btc_read_1byte(pBtCoexist, 0x1af);
+		if(tmp1b != 0x0)
+		{
+			pCoexSta->c2hHangDetectCnt++;
+			pBtCoexist->btc_write_1byte(pBtCoexist, 0x1af, 0x0);
+		}
+	}
+}
+
+//============================================================
+// extern function start with EXhalbtc8723a1ant_
+//============================================================
+VOID
+EXhalbtc8723a1ant_InitHwConfig(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n"));
+
+	// backup rf 0x1e value
+	pCoexDm->btRf0x1eBackup = 
+		pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20);
+
+	// enable counter statistics
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4);
+	
+	// coex table
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, 0x0);			// 1-Ant coex
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, 0xffff);		// wifi break table
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c4, 0x55555555);	//coex table
+
+	// antenna switch control parameter
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x858, 0xaaaaaaaa);
+	
+	pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x210);	//set antenna at wifi side if ANTSW is software control
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x870, 0x300);	//SPDT(connected with TRSW) control by hardware PTA
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x874, 0x22804000);	//ANTSW keep by GNT_BT
+
+	// coexistence parameters
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x1);	// enable RTK mode PTA
+}
+
+VOID
+EXhalbtc8723a1ant_InitCoexDm(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+	
+	halbtc8723a1ant_InitCoexDm(pBtCoexist);
+}
+
+VOID
+EXhalbtc8723a1ant_DisplayCoexInfo(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	struct btc_board_info *		pBoardInfo=&pBtCoexist->board_info;
+	PBTC_STACK_INFO		pStackInfo=&pBtCoexist->stack_info;
+	pu1Byte				cliBuf=pBtCoexist->cli_buf;
+	u1Byte				u1Tmp[4], i, btInfoExt, psTdmaCase=0;
+	u4Byte				u4Tmp[4];
+	BOOLEAN				bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE;
+	BOOLEAN				bBtHsOn=FALSE, bWifiBusy=FALSE;
+	s4Byte				wifiRssi=0, btHsRssi=0;
+	u4Byte				wifiBw, wifiTrafficDir;
+	u1Byte				wifiDot11Chnl, wifiHsChnl;
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+	CL_PRINTF(cliBuf);
+
+	if(!pBoardInfo->bt_exist)
+	{
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+		CL_PRINTF(cliBuf);
+		return;
+	}
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+		pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num);
+	CL_PRINTF(cliBuf);	
+	
+	if(pBtCoexist->manual_control)
+	{
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "[Action Manual control]!!");
+		CL_PRINTF(cliBuf);
+	}
+	
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+		((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+		wifiDot11Chnl, wifiHsChnl, bBtHsOn);
+	CL_PRINTF(cliBuf);
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+		pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1],
+		pCoexDm->wifiChnlInfo[2]);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+		wifiRssi, btHsRssi);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \
+		bLink, bRoam, bScan);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+		(bWifiUnder5G? "5G":"2.4G"),
+		((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))),
+		((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+	CL_PRINTF(cliBuf);
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+		((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8723A_1ANT_BT_STATUS_IDLE == pCoexDm->btStatus)? "idle":(  (BT_8723A_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy"))),
+		pCoexSta->btRssi, pCoexSta->btRetryCnt);
+	CL_PRINTF(cliBuf);
+	
+	if(pStackInfo->bProfileNotified)
+	{			
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+			pStackInfo->bScoExist, pStackInfo->bHidExist, pStackInfo->bPanExist, pStackInfo->bA2dpExist);
+		CL_PRINTF(cliBuf);	
+
+		pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);
+	}
+
+	btInfoExt = pCoexSta->btInfoExt;
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+		(btInfoExt&BIT0)? "Basic rate":"EDR rate");
+	CL_PRINTF(cliBuf);	
+
+	for(i=0; i<BT_INFO_SRC_8723A_1ANT_MAX; i++)
+	{
+		if(pCoexSta->btInfoC2hCnt[i])
+		{				
+			CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723a1Ant[i], \
+				pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1],
+				pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3],
+				pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5],
+				pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]);
+			CL_PRINTF(cliBuf);
+		}
+	}
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "write 0x1af=0x0 num", \
+		pCoexSta->c2hHangDetectCnt);
+	CL_PRINTF(cliBuf);
+	
+	// Sw mechanism	
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+	CL_PRINTF(cliBuf);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d", "SM1[ShRf/ LpRA/ LimDig]", \
+		pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig);
+	CL_PRINTF(cliBuf);
+
+	// Fw mechanism		
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+	CL_PRINTF(cliBuf);	
+	
+	if(!pBtCoexist->manual_control)
+	{
+		psTdmaCase = pCoexDm->curPsTdma;
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \
+			pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
+			pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
+			pCoexDm->psTdmaPara[4], psTdmaCase);
+		CL_PRINTF(cliBuf);
+	
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "IgnWlanAct", \
+			pCoexDm->bCurIgnoreWlanAct);
+		CL_PRINTF(cliBuf);
+	}
+
+	// Hw setting		
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+	CL_PRINTF(cliBuf);	
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+		pCoexDm->btRf0x1eBackup);
+	CL_PRINTF(cliBuf);
+
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778);
+	u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x783);
+	u1Tmp[2] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x796);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/ 0x783/ 0x796", \
+		u1Tmp[0], u1Tmp[1], u1Tmp[2]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x880);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x880", \
+		u4Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x40", \
+		u1Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550);
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+		u4Tmp[0], u1Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x484);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x484(rate adaptive)", \
+		u4Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+		u4Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda0);
+	u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda4);
+	u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda8);
+	u4Tmp[3] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xdac);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0xda0/0xda4/0xda8/0xdac(FA cnt)", \
+		u4Tmp[0], u4Tmp[1], u4Tmp[2], u4Tmp[3]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0);
+	u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4);
+	u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8);
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+		u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770 (hp rx[31:16]/tx[15:0])", \
+		pCoexSta->highPriorityRx, pCoexSta->highPriorityTx);
+	CL_PRINTF(cliBuf);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+		pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx);
+	CL_PRINTF(cliBuf);
+
+	// Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x41b);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x41b (mgntQ hang chk == 0xf)", \
+		u1Tmp[0]);
+	CL_PRINTF(cliBuf);	
+
+	pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+VOID
+EXhalbtc8723a1ant_IpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	if(BTC_IPS_ENTER == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+		halbtc8723a1ant_CoexAllOff(pBtCoexist);
+	}
+	else if(BTC_IPS_LEAVE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+		//halbtc8723a1ant_InitCoexDm(pBtCoexist);
+	}
+}
+
+VOID
+EXhalbtc8723a1ant_LpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	if(BTC_LPS_ENABLE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+	}
+	else if(BTC_LPS_DISABLE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+		halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+	}
+}
+
+VOID
+EXhalbtc8723a1ant_ScanNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	BOOLEAN		bWifiConnected=FALSE;
+	
+	halbtc8723a1ant_NotifyFwScan(pBtCoexist, type);
+
+	if(pBtCoexist->btInfo.bBtDisabled)
+	{
+		halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9); 
+	}
+	else
+	{
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+		if(BTC_SCAN_START == type)
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+			if(!bWifiConnected)	// non-connected scan
+			{
+				//set 0x550[3]=1 before PsTdma
+				halbtc8723a1ant_Reg0x550Bit3(pBtCoexist, true);
+			}
+
+			halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+		}
+		else if(BTC_SCAN_FINISH == type)
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+			if(!bWifiConnected)	// non-connected scan
+			{
+				halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); 
+			}
+			else
+			{
+				halbtc8723a1ant_CoexForWifiConnect(pBtCoexist);
+			}
+		}
+	}
+}
+
+VOID
+EXhalbtc8723a1ant_ConnectNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	BOOLEAN		bWifiConnected=FALSE;
+		
+	if(pBtCoexist->btInfo.bBtDisabled)
+	{
+		halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9); 
+	}
+	else
+	{
+		if(BTC_ASSOCIATE_START == type)
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+			//set 0x550[3]=1 before PsTdma
+			halbtc8723a1ant_Reg0x550Bit3(pBtCoexist, true);
+			halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);	// extend wifi slot	
+		}
+		else if(BTC_ASSOCIATE_FINISH == type)
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+			pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+			if(!bWifiConnected)	// non-connected scan
+			{
+				halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+			}
+			else
+			{
+				halbtc8723a1ant_CoexForWifiConnect(pBtCoexist);
+			}
+		}
+	}
+}
+
+VOID
+EXhalbtc8723a1ant_MediaStatusNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	)
+{
+	if(BTC_MEDIA_CONNECT == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
+	}
+}
+
+VOID
+EXhalbtc8723a1ant_SpecialPacketNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	)
+{
+	if(type == BTC_PACKET_DHCP)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], DHCP Packet notify\n"));
+		if(pBtCoexist->btInfo.bBtDisabled)
+		{
+			halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);	
+		}
+		else
+		{
+			halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 18);
+		}		
+	}
+}
+
+VOID
+EXhalbtc8723a1ant_BtInfoNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	pu1Byte			tmpBuf,
+	IN	u1Byte			length
+	)
+{
+	u1Byte			btInfo=0;
+	u1Byte			i, rspSource=0;
+	BOOLEAN			bBtHsOn=FALSE, bBtBusy=FALSE, bForceLps=FALSE;
+
+	pCoexSta->bC2hBtInfoReqSent = FALSE;
+	
+	rspSource = BT_INFO_SRC_8723A_1ANT_BT_RSP;
+	pCoexSta->btInfoC2hCnt[rspSource]++;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length));
+	for(i=0; i<length; i++)
+	{
+		pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
+		if(i == 0)
+			btInfo = tmpBuf[i];
+		if(i == length-1)
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i]));
+		}
+		else
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
+		}
+	}
+
+	if(BT_INFO_SRC_8723A_1ANT_WIFI_FW != rspSource)
+	{
+		pCoexSta->btRetryCnt =
+			pCoexSta->btInfoC2h[rspSource][1];
+
+		pCoexSta->btRssi =
+			pCoexSta->btInfoC2h[rspSource][2]*2+10;
+
+		pCoexSta->btInfoExt = 
+			pCoexSta->btInfoC2h[rspSource][3];
+	}
+		
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+	// check BIT2 first ==> check if bt is under inquiry or page scan
+	if(btInfo & BT_INFO_8723A_1ANT_B_INQ_PAGE)
+	{
+		pCoexSta->bC2hBtInquiryPage = true;
+	}
+	else
+	{
+		pCoexSta->bC2hBtInquiryPage = FALSE;
+	}
+	btInfo &= ~BIT2;
+	if(!(btInfo & BIT0))
+	{
+		pCoexDm->btStatus = BT_STATE_8723A_1ANT_NO_CONNECTION;
+		bForceLps = FALSE;
+	}
+	else
+	{
+		bForceLps = true;
+		if(btInfo == 0x1)
+		{
+			pCoexDm->btStatus = BT_STATE_8723A_1ANT_CONNECT_IDLE;
+		}
+		else if(btInfo == 0x9)
+		{
+			pCoexDm->btStatus = BT_STATE_8723A_1ANT_ACL_ONLY_BUSY;
+			bBtBusy = true;
+		}
+		else if(btInfo == 0x13)
+		{
+			pCoexDm->btStatus = BT_STATE_8723A_1ANT_SCO_ONLY_BUSY;
+			bBtBusy = true;
+		}
+		else if(btInfo == 0x1b)
+		{
+			pCoexDm->btStatus = BT_STATE_8723A_1ANT_ACL_SCO_BUSY;
+			bBtBusy = true;
+		}
+		else if(btInfo == 0x29)
+		{
+			pCoexDm->btStatus = BT_STATE_8723A_1ANT_HID_BUSY;
+			bBtBusy = true;
+		}
+		else if(btInfo == 0x3b)
+		{
+			pCoexDm->btStatus = BT_STATE_8723A_1ANT_HID_SCO_BUSY;
+			bBtBusy = true;
+		}
+	}
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &bBtBusy);
+	if(bForceLps)
+		pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+	else
+		pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+
+	if( (BT_STATE_8723A_1ANT_NO_CONNECTION == pCoexDm->btStatus) ||
+		(BT_STATE_8723A_1ANT_CONNECT_IDLE == pCoexDm->btStatus) )
+	{
+		if(pCoexSta->bC2hBtInquiryPage)
+			pCoexDm->btStatus = BT_STATE_8723A_1ANT_INQ_OR_PAG;
+	}
+}
+
+VOID
+EXhalbtc8723a1ant_StackOperationNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	)
+{
+	if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+	}
+	else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+	}
+}
+
+VOID
+EXhalbtc8723a1ant_HaltNotify(
+	IN	PBTC_COEXIST			pBtCoexist
+	)
+{
+	halbtc8723a1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+	
+	halbtc8723a1ant_LowPenaltyRa(pBtCoexist, FORCE_EXEC, FALSE);
+	halbtc8723a1ant_RfShrink(pBtCoexist, FORCE_EXEC, FALSE);
+
+	halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+	EXhalbtc8723a1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+}
+
+VOID
+EXhalbtc8723a1ant_Periodical(
+	IN	PBTC_COEXIST			pBtCoexist
+	)
+{
+	BOOLEAN		bScan=FALSE, bLink=FALSE, bRoam=FALSE, bWifiConnected=FALSE;
+	
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 1Ant Periodical!!\n"));
+	
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+	// work around for c2h hang
+	wa_halbtc8723a1ant_MonitorC2h(pBtCoexist);	
+
+	halbtc8723a1ant_QueryBtInfo(pBtCoexist);
+	halbtc8723a1ant_MonitorBtCtr(pBtCoexist);
+	halbtc8723a1ant_MonitorBtEnableDisable(pBtCoexist);
+
+	
+	if(bScan)
+		return;
+	if(bLink)
+		return;
+
+	if(bWifiConnected)
+	{
+		if(pBtCoexist->btInfo.bBtDisabled)
+		{
+			halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+			
+			halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+		}
+		else
+		{
+			halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a1ant_CoexForWifiConnect(pBtCoexist);
+		}
+	}
+	else
+	{
+		halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+		halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+	}
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h
new file mode 100644
index 0000000..60992f5
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h
@@ -0,0 +1,176 @@
+//===========================================
+// The following is for 8723A 1Ant BT Co-exist definition
+//===========================================
+#define	BT_INFO_8723A_1ANT_B_FTP						BIT7
+#define	BT_INFO_8723A_1ANT_B_A2DP					BIT6
+#define	BT_INFO_8723A_1ANT_B_HID						BIT5
+#define	BT_INFO_8723A_1ANT_B_SCO_BUSY				BIT4
+#define	BT_INFO_8723A_1ANT_B_ACL_BUSY				BIT3
+#define	BT_INFO_8723A_1ANT_B_INQ_PAGE				BIT2
+#define	BT_INFO_8723A_1ANT_B_SCO_ESCO				BIT1
+#define	BT_INFO_8723A_1ANT_B_CONNECTION				BIT0
+
+typedef enum _BT_STATE_8723A_1ANT{
+	BT_STATE_8723A_1ANT_DISABLED				= 0,
+	BT_STATE_8723A_1ANT_NO_CONNECTION		= 1,
+	BT_STATE_8723A_1ANT_CONNECT_IDLE		= 2,
+	BT_STATE_8723A_1ANT_INQ_OR_PAG			= 3,
+	BT_STATE_8723A_1ANT_ACL_ONLY_BUSY		= 4,
+	BT_STATE_8723A_1ANT_SCO_ONLY_BUSY		= 5,
+	BT_STATE_8723A_1ANT_ACL_SCO_BUSY			= 6,
+	BT_STATE_8723A_1ANT_HID_BUSY				= 7,
+	BT_STATE_8723A_1ANT_HID_SCO_BUSY			= 8,
+	BT_STATE_8723A_1ANT_MAX
+}BT_STATE_8723A_1ANT, *PBT_STATE_8723A_1ANT;
+
+#define		BTC_RSSI_COEX_THRESH_TOL_8723A_1ANT		2
+
+typedef enum _BT_INFO_SRC_8723A_1ANT{
+	BT_INFO_SRC_8723A_1ANT_WIFI_FW			= 0x0,
+	BT_INFO_SRC_8723A_1ANT_BT_RSP				= 0x1,
+	BT_INFO_SRC_8723A_1ANT_BT_ACTIVE_SEND		= 0x2,
+	BT_INFO_SRC_8723A_1ANT_MAX
+}BT_INFO_SRC_8723A_1ANT,*PBT_INFO_SRC_8723A_1ANT;
+
+typedef enum _BT_8723A_1ANT_BT_STATUS{
+	BT_8723A_1ANT_BT_STATUS_IDLE				= 0x0,
+	BT_8723A_1ANT_BT_STATUS_CONNECTED_IDLE	= 0x1,
+	BT_8723A_1ANT_BT_STATUS_NON_IDLE			= 0x2,
+	BT_8723A_1ANT_BT_STATUS_MAX
+}BT_8723A_1ANT_BT_STATUS,*PBT_8723A_1ANT_BT_STATUS;
+
+typedef enum _BT_8723A_1ANT_COEX_ALGO{
+	BT_8723A_1ANT_COEX_ALGO_UNDEFINED			= 0x0,
+	BT_8723A_1ANT_COEX_ALGO_SCO				= 0x1,
+	BT_8723A_1ANT_COEX_ALGO_HID				= 0x2,
+	BT_8723A_1ANT_COEX_ALGO_A2DP				= 0x3,
+	BT_8723A_1ANT_COEX_ALGO_PANEDR			= 0x4,
+	BT_8723A_1ANT_COEX_ALGO_PANHS			= 0x5,
+	BT_8723A_1ANT_COEX_ALGO_PANEDR_A2DP		= 0x6,
+	BT_8723A_1ANT_COEX_ALGO_PANEDR_HID		= 0x7,
+	BT_8723A_1ANT_COEX_ALGO_HID_A2DP_PANEDR	= 0x8,
+	BT_8723A_1ANT_COEX_ALGO_HID_A2DP			= 0x9,
+	BT_8723A_1ANT_COEX_ALGO_MAX
+}BT_8723A_1ANT_COEX_ALGO,*PBT_8723A_1ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8723A_1ANT{
+	// fw mechanism
+	BOOLEAN		bCurIgnoreWlanAct;
+	BOOLEAN		bPreIgnoreWlanAct;
+	u1Byte		prePsTdma;
+	u1Byte		curPsTdma;
+	u1Byte		psTdmaPara[5];
+	u1Byte		psTdmaDuAdjType;
+	u4Byte		psTdmaMonitorCnt;
+	u4Byte		psTdmaGlobalCnt;
+	BOOLEAN		bResetTdmaAdjust;
+	BOOLEAN		bPrePsTdmaOn;
+	BOOLEAN		bCurPsTdmaOn;
+
+	// sw mechanism
+	BOOLEAN		bPreRfRxLpfShrink;
+	BOOLEAN		bCurRfRxLpfShrink;
+	u4Byte		btRf0x1eBackup;
+	BOOLEAN 	bPreLowPenaltyRa;
+	BOOLEAN		bCurLowPenaltyRa;
+	u4Byte		preVal0x6c0;
+	u4Byte		curVal0x6c0;
+	u4Byte		preVal0x6c8;
+	u4Byte		curVal0x6c8;
+	u1Byte		preVal0x6cc;
+	u1Byte		curVal0x6cc;
+	BOOLEAN		limited_dig;
+
+	// algorithm related
+	u1Byte		preAlgorithm;
+	u1Byte		curAlgorithm;
+	u1Byte		btStatus;
+	u1Byte		wifiChnlInfo[3];
+} COEX_DM_8723A_1ANT, *PCOEX_DM_8723A_1ANT;
+
+typedef struct _COEX_STA_8723A_1ANT{
+	u4Byte					highPriorityTx;
+	u4Byte					highPriorityRx;
+	u4Byte					lowPriorityTx;
+	u4Byte					lowPriorityRx;
+	u1Byte					btRssi;
+	u1Byte					preBtRssiState;
+	u1Byte					preBtRssiState1;
+	u1Byte					preWifiRssiState[4];
+	BOOLEAN					bC2hBtInfoReqSent;
+	u1Byte					btInfoC2h[BT_INFO_SRC_8723A_1ANT_MAX][10];
+	u4Byte					btInfoC2hCnt[BT_INFO_SRC_8723A_1ANT_MAX];
+	BOOLEAN					bC2hBtInquiryPage;
+	u1Byte					btRetryCnt;
+	u1Byte					btInfoExt;
+	//BOOLEAN					bHoldForStackOperation;
+	//u1Byte					bHoldPeriodCnt;
+	// this is for c2h hang work-around
+	u4Byte					c2hHangDetectCnt;
+}COEX_STA_8723A_1ANT, *PCOEX_STA_8723A_1ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+VOID
+EXhalbtc8723a1ant_InitHwConfig(
+	IN	PBTC_COEXIST		pBtCoexist
+	);
+VOID
+EXhalbtc8723a1ant_InitCoexDm(
+	IN	PBTC_COEXIST		pBtCoexist
+	);
+VOID
+EXhalbtc8723a1ant_IpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8723a1ant_LpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8723a1ant_ScanNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8723a1ant_ConnectNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8723a1ant_MediaStatusNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	);
+VOID
+EXhalbtc8723a1ant_SpecialPacketNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	);
+VOID
+EXhalbtc8723a1ant_BtInfoNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	pu1Byte			tmpBuf,
+	IN	u1Byte			length
+	);
+VOID
+EXhalbtc8723a1ant_StackOperationNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	);
+VOID
+EXhalbtc8723a1ant_HaltNotify(
+	IN	PBTC_COEXIST			pBtCoexist
+	);
+VOID
+EXhalbtc8723a1ant_Periodical(
+	IN	PBTC_COEXIST			pBtCoexist
+	);
+VOID
+EXhalbtc8723a1ant_DisplayCoexInfo(
+	IN	PBTC_COEXIST		pBtCoexist
+	);
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h b/drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h
new file mode 100644
index 0000000..d538ba3
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h
@@ -0,0 +1,99 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef	__HALBT_PRECOMP_H__
+#define __HALBT_PRECOMP_H__
+/*************************************************************
+ * include files
+ *************************************************************/
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "../rtl8821ae/reg.h"
+#include "../rtl8821ae/def.h"
+#include "../rtl8821ae/phy.h"
+#include "../rtl8821ae/dm.h"
+#include "../rtl8821ae/fw.h"
+#include "../rtl8821ae/led.h"
+#include "../rtl8821ae/hw.h"
+#include "../rtl8821ae/pwrseqcmd.h"
+#include "../rtl8821ae/pwrseq.h"
+
+#include "halbtcoutsrc.h"
+
+
+#include "halbtc8192e2ant.h"
+#include "halbtc8723b1ant.h"
+#include "halbtc8723b2ant.h"
+
+
+
+#define GetDefaultAdapter(padapter)	padapter
+
+
+#define BIT0	0x00000001
+#define BIT1	0x00000002
+#define BIT2	0x00000004
+#define BIT3	0x00000008
+#define BIT4	0x00000010
+#define BIT5	0x00000020
+#define BIT6	0x00000040
+#define BIT7	0x00000080
+#define BIT8	0x00000100
+#define BIT9	0x00000200
+#define BIT10	0x00000400
+#define BIT11	0x00000800
+#define BIT12	0x00001000
+#define BIT13	0x00002000
+#define BIT14	0x00004000
+#define BIT15	0x00008000
+#define BIT16	0x00010000
+#define BIT17	0x00020000
+#define BIT18	0x00040000
+#define BIT19	0x00080000
+#define BIT20	0x00100000
+#define BIT21	0x00200000
+#define BIT22	0x00400000
+#define BIT23	0x00800000
+#define BIT24	0x01000000
+#define BIT25	0x02000000
+#define BIT26	0x04000000
+#define BIT27	0x08000000
+#define BIT28	0x10000000
+#define BIT29	0x20000000
+#define BIT30	0x40000000
+#define BIT31	0x80000000
+
+#define	MASKBYTE0                	0xff
+#define	MASKBYTE1                	0xff00
+#define	MASKBYTE2                	0xff0000
+#define	MASKBYTE3                	0xff000000
+#define	MASKHWORD                	0xffff0000
+#define	MASKLWORD                	0x0000ffff
+#define	MASKDWORD			0xffffffff
+#define	MASK12BITS			0xfff
+#define	MASKH4BITS			0xf0000000
+#define MASKOFDM_D			0xffc00000
+#define	MASKCCK				0x3f3f3f3f
+
+#endif	/* __HALBT_PRECOMP_H__ */
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c
new file mode 100644
index 0000000..973d0ea
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c
@@ -0,0 +1,3891 @@
+//============================================================
+// Description:
+//
+// This file is for 8192e1ant Co-exist mechanism
+//
+// History
+// 2012/11/15 Cosa first check in.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "Mp_Precomp.h"
+#if(BT_30_SUPPORT == 1)
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8192E_1ANT		GLCoexDm8192e1Ant;
+static PCOEX_DM_8192E_1ANT 	pCoexDm=&GLCoexDm8192e1Ant;
+static COEX_STA_8192E_1ANT		GLCoexSta8192e1Ant;
+static PCOEX_STA_8192E_1ANT	pCoexSta=&GLCoexSta8192e1Ant;
+
+const char *const GLBtInfoSrc8192e1Ant[]={
+	"BT Info[wifi fw]",
+	"BT Info[bt rsp]",
+	"BT Info[bt auto report]",
+};
+
+u4Byte	GLCoexVerDate8192e1Ant=20130729;
+u4Byte	GLCoexVer8192e1Ant=0x10;
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8192e1ant_
+//============================================================
+u1Byte
+halbtc8192e1ant_BtRssiState(
+	u1Byte			levelNum,
+	u1Byte			rssiThresh,
+	u1Byte			rssiThresh1
+	)
+{
+	s4Byte			btRssi=0;
+	u1Byte			btRssiState=pCoexSta->preBtRssiState;
+
+	btRssi = pCoexSta->btRssi;
+
+	if(levelNum == 2)
+	{			
+		if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+			(pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+			{
+				btRssiState = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+			}
+			else
+			{
+				btRssiState = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+			}
+		}
+		else
+		{
+			if(btRssi < rssiThresh)
+			{
+				btRssiState = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+			}
+			else
+			{
+				btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+			}
+		}
+	}
+	else if(levelNum == 3)
+	{
+		if(rssiThresh > rssiThresh1)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n"));
+			return pCoexSta->preBtRssiState;
+		}
+		
+		if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+			(pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+			{
+				btRssiState = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+			}
+			else
+			{
+				btRssiState = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+			}
+		}
+		else if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) ||
+			(pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_MEDIUM))
+		{
+			if(btRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+			{
+				btRssiState = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+			}
+			else if(btRssi < rssiThresh)
+			{
+				btRssiState = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+			}
+			else
+			{
+				btRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n"));
+			}
+		}
+		else
+		{
+			if(btRssi < rssiThresh1)
+			{
+				btRssiState = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+			}
+			else
+			{
+				btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+			}
+		}
+	}
+		
+	pCoexSta->preBtRssiState = btRssiState;
+
+	return btRssiState;
+}
+
+u1Byte
+halbtc8192e1ant_WifiRssiState(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			index,
+	IN	u1Byte			levelNum,
+	IN	u1Byte			rssiThresh,
+	IN	u1Byte			rssiThresh1
+	)
+{
+	s4Byte			wifiRssi=0;
+	u1Byte			wifiRssiState=pCoexSta->preWifiRssiState[index];
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+	
+	if(levelNum == 2)
+	{
+		if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+			(pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+			{
+				wifiRssiState = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+			}
+			else
+			{
+				wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+			}
+		}
+		else
+		{
+			if(wifiRssi < rssiThresh)
+			{
+				wifiRssiState = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+			}
+			else
+			{
+				wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+			}
+		}
+	}
+	else if(levelNum == 3)
+	{
+		if(rssiThresh > rssiThresh1)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n"));
+			return pCoexSta->preWifiRssiState[index];
+		}
+		
+		if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+			(pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+			{
+				wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+			}
+			else
+			{
+				wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+			}
+		}
+		else if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_MEDIUM) ||
+			(pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_MEDIUM))
+		{
+			if(wifiRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+			{
+				wifiRssiState = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+			}
+			else if(wifiRssi < rssiThresh)
+			{
+				wifiRssiState = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+			}
+			else
+			{
+				wifiRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n"));
+			}
+		}
+		else
+		{
+			if(wifiRssi < rssiThresh1)
+			{
+				wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+			}
+			else
+			{
+				wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+			}
+		}
+	}
+		
+	pCoexSta->preWifiRssiState[index] = wifiRssiState;
+
+	return wifiRssiState;
+}
+
+VOID
+halbtc8192e1ant_Updatera_mask(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN				bForceExec,
+	IN	u1Byte				type,
+	IN	u4Byte				rateMask
+	)
+{
+	if(BTC_RATE_DISABLE == type)
+	{
+		pCoexDm->curra_mask |= rateMask;		// disable rate
+	}
+	else if(BTC_RATE_ENABLE == type)
+	{
+		pCoexDm->curra_mask &= ~rateMask;	// enable rate
+	}
+	
+	if( bForceExec || (pCoexDm->prera_mask != pCoexDm->curra_mask))
+	{
+		pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_UPDATE_ra_mask, &pCoexDm->curra_mask);
+	}
+	pCoexDm->prera_mask = pCoexDm->curra_mask;
+}
+
+VOID
+halbtc8192e1ant_MonitorBtCtr(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u4Byte 			regHPTxRx, regLPTxRx, u4Tmp;
+	u4Byte			regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0;
+	u1Byte			u1Tmp;
+	
+	regHPTxRx = 0x770;
+	regLPTxRx = 0x774;
+
+	u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx);
+	regHPTx = u4Tmp & MASKLWORD;
+	regHPRx = (u4Tmp & MASKHWORD)>>16;
+
+	u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx);
+	regLPTx = u4Tmp & MASKLWORD;
+	regLPRx = (u4Tmp & MASKHWORD)>>16;
+		
+	pCoexSta->highPriorityTx = regHPTx;
+	pCoexSta->highPriorityRx = regHPRx;
+	pCoexSta->lowPriorityTx = regLPTx;
+	pCoexSta->lowPriorityRx = regLPRx;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", 
+		regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx));
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", 
+		regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx));
+
+	// reset counter
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc);
+}
+
+VOID
+halbtc8192e1ant_QueryBtInfo(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+
+	pCoexSta->bC2hBtInfoReqSent = true;
+
+	H2C_Parameter[0] |= BIT0;	// trigger
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x61=0x%x\n", 
+		H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x61, 1, H2C_Parameter);
+}
+
+BOOLEAN
+halbtc8192e1ant_IsWifiStatusChanged(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	static BOOLEAN	bPreWifiBusy=FALSE, bPreUnder4way=FALSE, bPreBtHsOn=FALSE;
+	BOOLEAN	bWifiBusy=FALSE, bUnder4way=FALSE, bBtHsOn=FALSE;
+	BOOLEAN	bWifiConnected=FALSE;
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way);
+
+	if(bWifiConnected)
+	{
+		if(bWifiBusy != bPreWifiBusy)
+		{
+			bPreWifiBusy = bWifiBusy;
+			return true;
+		}
+		if(bUnder4way != bPreUnder4way)
+		{
+			bPreUnder4way = bUnder4way;
+			return true;
+		}
+		if(bBtHsOn != bPreBtHsOn)
+		{
+			bPreBtHsOn = bBtHsOn;
+			return true;
+		}
+	}
+
+	return FALSE;
+}
+
+VOID
+halbtc8192e1ant_UpdateBtLinkInfo(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	PBTC_BT_LINK_INFO	pBtLinkInfo=&pBtCoexist->bt_link_info;
+
+	pBtLinkInfo->bBtLinkExist = pCoexSta->bBtLinkExist;
+	pBtLinkInfo->bScoExist = pCoexSta->bScoExist;
+	pBtLinkInfo->bA2dpExist = pCoexSta->bA2dpExist;
+	pBtLinkInfo->bPanExist = pCoexSta->bPanExist;
+	pBtLinkInfo->bHidExist = pCoexSta->bHidExist;
+
+	// check if Sco only
+	if( pBtLinkInfo->bScoExist &&
+		!pBtLinkInfo->bA2dpExist &&
+		!pBtLinkInfo->bPanExist &&
+		!pBtLinkInfo->bHidExist )
+		pBtLinkInfo->bScoOnly = true;
+	else
+		pBtLinkInfo->bScoOnly = FALSE;
+
+	// check if A2dp only
+	if( !pBtLinkInfo->bScoExist &&
+		pBtLinkInfo->bA2dpExist &&
+		!pBtLinkInfo->bPanExist &&
+		!pBtLinkInfo->bHidExist )
+		pBtLinkInfo->bA2dpOnly = true;
+	else
+		pBtLinkInfo->bA2dpOnly = FALSE;
+
+	// check if Pan only
+	if( !pBtLinkInfo->bScoExist &&
+		!pBtLinkInfo->bA2dpExist &&
+		pBtLinkInfo->bPanExist &&
+		!pBtLinkInfo->bHidExist )
+		pBtLinkInfo->bPanOnly = true;
+	else
+		pBtLinkInfo->bPanOnly = FALSE;
+	
+	// check if Hid only
+	if( !pBtLinkInfo->bScoExist &&
+		!pBtLinkInfo->bA2dpExist &&
+		!pBtLinkInfo->bPanExist &&
+		pBtLinkInfo->bHidExist )
+		pBtLinkInfo->bHidOnly = true;
+	else
+		pBtLinkInfo->bHidOnly = FALSE;
+}
+
+u1Byte
+halbtc8192e1ant_ActionAlgorithm(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	PBTC_BT_LINK_INFO	pBtLinkInfo=&pBtCoexist->bt_link_info;
+	BOOLEAN				bBtHsOn=FALSE;
+	u1Byte				algorithm=BT_8192E_1ANT_COEX_ALGO_UNDEFINED;
+	u1Byte				numOfDiffProfile=0;
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+		
+	if(!pBtLinkInfo->bBtLinkExist)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No BT link exists!!!\n"));
+		return algorithm;
+	}
+
+	if(pBtLinkInfo->bScoExist)
+		numOfDiffProfile++;
+	if(pBtLinkInfo->bHidExist)
+		numOfDiffProfile++;
+	if(pBtLinkInfo->bPanExist)
+		numOfDiffProfile++;
+	if(pBtLinkInfo->bA2dpExist)
+		numOfDiffProfile++;
+	
+	if(numOfDiffProfile == 1)
+	{
+		if(pBtLinkInfo->bScoExist)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n"));
+			algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+		}
+		else
+		{
+			if(pBtLinkInfo->bHidExist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n"));
+				algorithm = BT_8192E_1ANT_COEX_ALGO_HID;
+			}
+			else if(pBtLinkInfo->bA2dpExist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n"));
+				algorithm = BT_8192E_1ANT_COEX_ALGO_A2DP;
+			}
+			else if(pBtLinkInfo->bPanExist)
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_PANHS;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR;
+				}
+			}
+		}
+	}
+	else if(numOfDiffProfile == 2)
+	{
+		if(pBtLinkInfo->bScoExist)
+		{
+			if(pBtLinkInfo->bHidExist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n"));
+				algorithm = BT_8192E_1ANT_COEX_ALGO_HID;
+			}
+			else if(pBtLinkInfo->bA2dpExist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n"));
+				algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+			}
+			else if(pBtLinkInfo->bPanExist)
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+		else
+		{
+			if( pBtLinkInfo->bHidExist &&
+				pBtLinkInfo->bA2dpExist )
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n"));
+				algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+			}
+			else if( pBtLinkInfo->bHidExist &&
+				pBtLinkInfo->bPanExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+			else if( pBtLinkInfo->bPanExist &&
+				pBtLinkInfo->bA2dpExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP;
+				}
+			}
+		}
+	}
+	else if(numOfDiffProfile == 3)
+	{
+		if(pBtLinkInfo->bScoExist)
+		{
+			if( pBtLinkInfo->bHidExist &&
+				pBtLinkInfo->bA2dpExist )
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n"));
+				algorithm = BT_8192E_1ANT_COEX_ALGO_HID;
+			}
+			else if( pBtLinkInfo->bHidExist &&
+				pBtLinkInfo->bPanExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+			else if( pBtLinkInfo->bPanExist &&
+				pBtLinkInfo->bA2dpExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+		else
+		{
+			if( pBtLinkInfo->bHidExist &&
+				pBtLinkInfo->bPanExist &&
+				pBtLinkInfo->bA2dpExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
+				}
+			}
+		}
+	}
+	else if(numOfDiffProfile >= 3)
+	{
+		if(pBtLinkInfo->bScoExist)
+		{
+			if( pBtLinkInfo->bHidExist &&
+				pBtLinkInfo->bPanExist &&
+				pBtLinkInfo->bA2dpExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
+
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"));
+					algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+	}
+
+	return algorithm;
+}
+
+VOID
+halbtc8192e1ant_SetFwDacSwingLevel(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			dacSwingLvl
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+
+	// There are several type of dacswing
+	// 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+	H2C_Parameter[0] = dacSwingLvl;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dacSwingLvl));
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x64=0x%x\n", H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x64, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_SetFwDecBtPwr(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte				decBtPwrLvl
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+	
+	H2C_Parameter[0] = decBtPwrLvl;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power level = %d, FW write 0x62=0x%x\n", 
+		decBtPwrLvl, H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x62, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_DecBtPwr(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN				bForceExec,
+	IN	u1Byte				decBtPwrLvl
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power level = %d\n",  
+		(bForceExec? "force to":""), decBtPwrLvl));
+	pCoexDm->curBtDecPwrLvl = decBtPwrLvl;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], BtDecPwrLvl=%d, curBtDecPwrLvl=%d\n", 
+			pCoexDm->preBtDecPwrLvl, pCoexDm->curBtDecPwrLvl));
+
+		if(pCoexDm->preBtDecPwrLvl == pCoexDm->curBtDecPwrLvl) 
+			return;
+	}
+	halbtc8192e1ant_SetFwDecBtPwr(pBtCoexist, pCoexDm->curBtDecPwrLvl);
+
+	pCoexDm->preBtDecPwrLvl = pCoexDm->curBtDecPwrLvl;
+}
+
+VOID
+halbtc8192e1ant_SetFwBtLnaConstrain(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bBtLnaConsOn
+	)
+{
+	u1Byte			H2C_Parameter[2] ={0};
+	
+	H2C_Parameter[0] = 0x3;	// opCode, 0x3=BT_SET_LNA_CONSTRAIN
+
+	if(bBtLnaConsOn)
+	{
+		H2C_Parameter[1] |= BIT0;
+	}
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT LNA Constrain: %s, FW write 0x69=0x%x\n", 
+		(bBtLnaConsOn? "ON!!":"OFF!!"), 
+		H2C_Parameter[0]<<8|H2C_Parameter[1]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x69, 2, H2C_Parameter);	
+}
+
+VOID
+halbtc8192e1ant_SetBtLnaConstrain(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bBtLnaConsOn
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Constrain = %s\n",  
+		(bForceExec? "force":""), ((bBtLnaConsOn)? "ON":"OFF")));
+	pCoexDm->bCurBtLnaConstrain = bBtLnaConsOn;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtLnaConstrain=%d, bCurBtLnaConstrain=%d\n", 
+			pCoexDm->bPreBtLnaConstrain, pCoexDm->bCurBtLnaConstrain));
+
+		if(pCoexDm->bPreBtLnaConstrain == pCoexDm->bCurBtLnaConstrain) 
+			return;
+	}
+	halbtc8192e1ant_SetFwBtLnaConstrain(pBtCoexist, pCoexDm->bCurBtLnaConstrain);
+
+	pCoexDm->bPreBtLnaConstrain = pCoexDm->bCurBtLnaConstrain;
+}
+
+VOID
+halbtc8192e1ant_SetFwBtPsdMode(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			btPsdMode
+	)
+{
+	u1Byte			H2C_Parameter[2] ={0};
+	
+	H2C_Parameter[0] = 0x2;	// opCode, 0x2=BT_SET_PSD_MODE
+
+	H2C_Parameter[1] = btPsdMode;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT PSD mode=0x%x, FW write 0x69=0x%x\n", 
+		H2C_Parameter[1], 
+		H2C_Parameter[0]<<8|H2C_Parameter[1]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x69, 2, H2C_Parameter);	
+}
+
+
+VOID
+halbtc8192e1ant_SetBtPsdMode(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	u1Byte			btPsdMode
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT PSD mode = 0x%x\n",  
+		(bForceExec? "force":""), btPsdMode));
+	pCoexDm->bCurBtPsdMode = btPsdMode;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtPsdMode=0x%x, bCurBtPsdMode=0x%x\n", 
+			pCoexDm->bPreBtPsdMode, pCoexDm->bCurBtPsdMode));
+
+		if(pCoexDm->bPreBtPsdMode == pCoexDm->bCurBtPsdMode) 
+			return;
+	}
+	halbtc8192e1ant_SetFwBtPsdMode(pBtCoexist, pCoexDm->bCurBtPsdMode);
+
+	pCoexDm->bPreBtPsdMode = pCoexDm->bCurBtPsdMode;
+}
+
+
+VOID
+halbtc8192e1ant_SetBtAutoReport(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bEnableAutoReport
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+	
+	H2C_Parameter[0] = 0;
+
+	if(bEnableAutoReport)
+	{
+		H2C_Parameter[0] |= BIT0;
+	}
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n", 
+		(bEnableAutoReport? "Enabled!!":"Disabled!!"), H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x68, 1, H2C_Parameter);	
+}
+
+VOID
+halbtc8192e1ant_BtAutoReport(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bEnableAutoReport
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Auto report = %s\n",  
+		(bForceExec? "force to":""), ((bEnableAutoReport)? "Enabled":"Disabled")));
+	pCoexDm->bCurBtAutoReport = bEnableAutoReport;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtAutoReport=%d, bCurBtAutoReport=%d\n", 
+			pCoexDm->bPreBtAutoReport, pCoexDm->bCurBtAutoReport));
+
+		if(pCoexDm->bPreBtAutoReport == pCoexDm->bCurBtAutoReport) 
+			return;
+	}
+	halbtc8192e1ant_SetBtAutoReport(pBtCoexist, pCoexDm->bCurBtAutoReport);
+
+	pCoexDm->bPreBtAutoReport = pCoexDm->bCurBtAutoReport;
+}
+
+VOID
+halbtc8192e1ant_FwDacSwingLvl(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	u1Byte			fwDacSwingLvl
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n",  
+		(bForceExec? "force to":""), fwDacSwingLvl));
+	pCoexDm->curFwDacSwingLvl = fwDacSwingLvl;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", 
+			pCoexDm->preFwDacSwingLvl, pCoexDm->curFwDacSwingLvl));
+
+		if(pCoexDm->preFwDacSwingLvl == pCoexDm->curFwDacSwingLvl) 
+			return;
+	}
+
+	halbtc8192e1ant_SetFwDacSwingLevel(pBtCoexist, pCoexDm->curFwDacSwingLvl);
+
+	pCoexDm->preFwDacSwingLvl = pCoexDm->curFwDacSwingLvl;
+}
+
+VOID
+halbtc8192e1ant_SetSwRfRxLpfCorner(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bRxRfShrinkOn
+	)
+{
+	if(bRxRfShrinkOn)
+	{
+		//Shrink RF Rx LPF corner
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+	}
+	else
+	{
+		//Resume RF Rx LPF corner
+		// After initialized, we can use pCoexDm->btRf0x1eBackup
+		if(pBtCoexist->initilized)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+			pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup);
+		}
+	}
+}
+
+VOID
+halbtc8192e1ant_RfShrink(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bRxRfShrinkOn
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",  
+		(bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF")));
+	pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n", 
+			pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink));
+
+		if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink) 
+			return;
+	}
+	halbtc8192e1ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink);
+
+	pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink;
+}
+
+VOID
+halbtc8192e1ant_SetSwPenaltyTxRateAdaptive(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bLowPenaltyRa
+	)
+{
+	u1Byte	tmpU1;
+
+	tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd);
+	tmpU1 |= BIT0;
+	if(bLowPenaltyRa)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+		tmpU1 &= ~BIT2;
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+		tmpU1 |= BIT2;
+	}
+
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1);
+}
+
+VOID
+halbtc8192e1ant_LowPenaltyRa(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bLowPenaltyRa
+	)
+{
+	return;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",  
+		(bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF")));
+	pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n", 
+			pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa));
+
+		if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa) 
+			return;
+	}
+	halbtc8192e1ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa);
+
+	pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa;
+}
+
+VOID
+halbtc8192e1ant_SetDacSwingReg(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u4Byte			level
+	)
+{
+	u1Byte	val=(u1Byte)level;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Write SwDacSwing = 0x%x\n", level));
+	pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x883, 0x3e, val);
+}
+
+VOID
+halbtc8192e1ant_SetSwFullTimeDacSwing(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bSwDacSwingOn,
+	IN	u4Byte			swDacSwingLvl
+	)
+{
+	if(bSwDacSwingOn)
+	{
+		halbtc8192e1ant_SetDacSwingReg(pBtCoexist, swDacSwingLvl);
+	}
+	else
+	{
+		halbtc8192e1ant_SetDacSwingReg(pBtCoexist, 0x18);
+	}
+}
+
+
+VOID
+halbtc8192e1ant_DacSwing(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bDacSwingOn,
+	IN	u4Byte			dacSwingLvl
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dacSwingLvl=0x%x\n",  
+		(bForceExec? "force to":""), ((bDacSwingOn)? "ON":"OFF"), dacSwingLvl));
+	pCoexDm->bCurDacSwingOn = bDacSwingOn;
+	pCoexDm->curDacSwingLvl = dacSwingLvl;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", 
+			pCoexDm->bPreDacSwingOn, pCoexDm->preDacSwingLvl,
+			pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl));
+
+		if( (pCoexDm->bPreDacSwingOn == pCoexDm->bCurDacSwingOn) &&
+			(pCoexDm->preDacSwingLvl == pCoexDm->curDacSwingLvl) )
+			return;
+	}
+	mdelay(30);
+	halbtc8192e1ant_SetSwFullTimeDacSwing(pBtCoexist, bDacSwingOn, dacSwingLvl);
+
+	pCoexDm->bPreDacSwingOn = pCoexDm->bCurDacSwingOn;
+	pCoexDm->preDacSwingLvl = pCoexDm->curDacSwingLvl;
+}
+
+VOID
+halbtc8192e1ant_SetAdcBackOff(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bAdcBackOff
+	)
+{
+	if(bAdcBackOff)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n"));
+		pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x8db, 0x60, 0x3);
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n"));
+		pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x8db, 0x60, 0x1);
+	}
+}
+
+VOID
+halbtc8192e1ant_AdcBackOff(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bAdcBackOff
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n",  
+		(bForceExec? "force to":""), ((bAdcBackOff)? "ON":"OFF")));
+	pCoexDm->bCurAdcBackOff = bAdcBackOff;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n", 
+			pCoexDm->bPreAdcBackOff, pCoexDm->bCurAdcBackOff));
+
+		if(pCoexDm->bPreAdcBackOff == pCoexDm->bCurAdcBackOff) 
+			return;
+	}
+	halbtc8192e1ant_SetAdcBackOff(pBtCoexist, pCoexDm->bCurAdcBackOff);
+
+	pCoexDm->bPreAdcBackOff = pCoexDm->bCurAdcBackOff;
+}
+
+VOID
+halbtc8192e1ant_SetAgcTable(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bAgcTableEn
+	)
+{
+	u1Byte		rssiAdjustVal=0;
+	
+	pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+	if(bAgcTableEn)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x3fa58);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x37a58);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x2fa58);
+		rssiAdjustVal = 8;
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x39258);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x31258);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x29258);
+	}
+	pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+	// set rssiAdjustVal for wifi module.
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssiAdjustVal);
+}
+
+
+VOID
+halbtc8192e1ant_AgcTable(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bAgcTableEn
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n",  
+		(bForceExec? "force to":""), ((bAgcTableEn)? "Enable":"Disable")));
+	pCoexDm->bCurAgcTableEn = bAgcTableEn;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", 
+			pCoexDm->bPreAgcTableEn, pCoexDm->bCurAgcTableEn));
+
+		if(pCoexDm->bPreAgcTableEn == pCoexDm->bCurAgcTableEn) 
+			return;
+	}
+	halbtc8192e1ant_SetAgcTable(pBtCoexist, bAgcTableEn);
+
+	pCoexDm->bPreAgcTableEn = pCoexDm->bCurAgcTableEn;
+}
+
+VOID
+halbtc8192e1ant_SetCoexTable(
+	IN	PBTC_COEXIST	pBtCoexist,
+	IN	u4Byte		val0x6c0,
+	IN	u4Byte		val0x6c4,
+	IN	u4Byte		val0x6c8,
+	IN	u1Byte		val0x6cc
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4));
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c4, val0x6c4);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc);
+}
+
+VOID
+halbtc8192e1ant_CoexTable(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	u4Byte			val0x6c0,
+	IN	u4Byte			val0x6c4,
+	IN	u4Byte			val0x6c8,
+	IN	u1Byte			val0x6cc
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", 
+		(bForceExec? "force to":""), val0x6c0, val0x6c4, val0x6c8, val0x6cc));
+	pCoexDm->curVal0x6c0 = val0x6c0;
+	pCoexDm->curVal0x6c4 = val0x6c4;
+	pCoexDm->curVal0x6c8 = val0x6c8;
+	pCoexDm->curVal0x6cc = val0x6cc;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n", 
+			pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c4, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc));
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n", 
+			pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c4, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc));
+	
+		if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
+			(pCoexDm->preVal0x6c4 == pCoexDm->curVal0x6c4) &&
+			(pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) &&
+			(pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) )
+			return;
+	}
+	halbtc8192e1ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+
+	pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0;
+	pCoexDm->preVal0x6c4 = pCoexDm->curVal0x6c4;
+	pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8;
+	pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc;
+}
+
+VOID
+halbtc8192e1ant_CoexTableWithType(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN				bForceExec,
+	IN	u1Byte				type
+	)
+{
+	switch(type)
+	{
+		case 0:
+			halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55555555, 0x55555555, 0xffffff, 0x3);
+			break;
+		case 1:
+			halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55555555, 0x5a5a5a5a, 0xffffff, 0x3);
+			break;
+		case 2:
+			halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5a5a5a5a, 0x5a5a5a5a, 0xffffff, 0x3);
+			break;
+		case 3:
+			halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xaaaaaaaa, 0xaaaaaaaa, 0xffffff, 0x3);
+			break;
+		case 4:
+			halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xffffffff, 0xffffffff, 0xffffff, 0x3);
+			break;
+		case 5:
+			halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5fff5fff, 0x5fff5fff, 0xffffff, 0x3);
+			break;
+		case 6:
+			halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5a5a5a5a, 0xffffff, 0x3);
+			break;
+		case 7:
+			halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xddffddff, 0xddffddff, 0xffffff, 0x3);
+			break;
+		case 8:
+			halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5afa5afa, 0xffffff, 0x3);
+			break;		
+		case 9:
+			halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5f5f5f5f, 0x5f5f5f5f, 0xffffff, 0x3);
+			break;
+		default:
+			break;
+	}
+}
+
+VOID
+halbtc8192e1ant_SetFwIgnoreWlanAct(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bEnable
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+		
+	if(bEnable)
+	{
+		H2C_Parameter[0] |= BIT0;		// function enable
+	}
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n", 
+		H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x63, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_IgnoreWlanAct(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bEnable
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n", 
+		(bForceExec? "force to":""), (bEnable? "ON":"OFF")));
+	pCoexDm->bCurIgnoreWlanAct = bEnable;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", 
+			pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct));
+
+		if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
+			return;
+	}
+	halbtc8192e1ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable);
+
+	pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct;
+}
+
+VOID
+halbtc8192e1ant_SetFwPstdma(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			byte1,
+	IN	u1Byte			byte2,
+	IN	u1Byte			byte3,
+	IN	u1Byte			byte4,
+	IN	u1Byte			byte5
+	)
+{
+	u1Byte			H2C_Parameter[5] ={0};
+
+	H2C_Parameter[0] = byte1;	
+	H2C_Parameter[1] = byte2;	
+	H2C_Parameter[2] = byte3;
+	H2C_Parameter[3] = byte4;
+	H2C_Parameter[4] = byte5;
+
+	pCoexDm->psTdmaPara[0] = byte1;
+	pCoexDm->psTdmaPara[1] = byte2;
+	pCoexDm->psTdmaPara[2] = byte3;
+	pCoexDm->psTdmaPara[3] = byte4;
+	pCoexDm->psTdmaPara[4] = byte5;
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", 
+		H2C_Parameter[0], 
+		H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x60, 5, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_SetLpsRpwm(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			lpsVal,
+	IN	u1Byte			rpwmVal
+	)
+{
+	u1Byte	lps=lpsVal;
+	u1Byte	rpwm=rpwmVal;
+	
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_1ANT_LPS, &lps);
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_1ANT_RPWM, &rpwm);
+}
+
+VOID
+halbtc8192e1ant_LpsRpwm(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	u1Byte			lpsVal,
+	IN	u1Byte			rpwmVal
+	)
+{
+	BOOLEAN	bForceExecPwrCmd=FALSE;
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set lps/rpwm=0x%x/0x%x \n", 
+		(bForceExec? "force to":""), lpsVal, rpwmVal));
+	pCoexDm->curLps = lpsVal;
+	pCoexDm->curRpwm = rpwmVal;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preLps/curLps=0x%x/0x%x, preRpwm/curRpwm=0x%x/0x%x!!\n", 
+			pCoexDm->preLps, pCoexDm->curLps, pCoexDm->preRpwm, pCoexDm->curRpwm));
+
+		if( (pCoexDm->preLps == pCoexDm->curLps) &&
+			(pCoexDm->preRpwm == pCoexDm->curRpwm) )
+		{
+			return;
+		}
+	}
+	halbtc8192e1ant_SetLpsRpwm(pBtCoexist, lpsVal, rpwmVal);
+
+	pCoexDm->preLps = pCoexDm->curLps;
+	pCoexDm->preRpwm = pCoexDm->curRpwm;
+}
+
+VOID
+halbtc8192e1ant_SwMechanism1(
+	IN	PBTC_COEXIST	pBtCoexist,	
+	IN	BOOLEAN		bShrinkRxLPF,
+	IN	BOOLEAN 	bLowPenaltyRA,
+	IN	BOOLEAN		limited_dig, 
+	IN	BOOLEAN		bBTLNAConstrain
+	) 
+{
+	//halbtc8192e1ant_RfShrink(pBtCoexist, NORMAL_EXEC, bShrinkRxLPF);
+	//halbtc8192e1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, bLowPenaltyRA);
+
+	//no limited DIG
+	//halbtc8192e1ant_SetBtLnaConstrain(pBtCoexist, NORMAL_EXEC, bBTLNAConstrain);
+}
+
+VOID
+halbtc8192e1ant_SwMechanism2(
+	IN	PBTC_COEXIST	pBtCoexist,	
+	IN	BOOLEAN		bAGCTableShift,
+	IN	BOOLEAN 	bADCBackOff,
+	IN	BOOLEAN		bSWDACSwing,
+	IN	u4Byte		dacSwingLvl
+	)
+{
+	//halbtc8192e1ant_AgcTable(pBtCoexist, NORMAL_EXEC, bAGCTableShift);
+	//halbtc8192e1ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, bADCBackOff);
+	//halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, bSWDACSwing, dacSwingLvl);
+}
+
+VOID
+halbtc8192e1ant_PsTdma(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bTurnOn,
+	IN	u1Byte			type
+	)
+{
+	BOOLEAN			bTurnOnByCnt=FALSE;
+	u1Byte			psTdmaTypeByCnt=0, rssiAdjustVal=0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n", 
+		(bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type));
+	pCoexDm->bCurPsTdmaOn = bTurnOn;
+	pCoexDm->curPsTdma = type;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", 
+			pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn));
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", 
+			pCoexDm->prePsTdma, pCoexDm->curPsTdma));
+
+		if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
+			(pCoexDm->prePsTdma == pCoexDm->curPsTdma) )
+			return;
+	}
+	if(bTurnOn)
+	{
+		switch(type)
+		{
+			default:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x2c, 0x03, 0x10, 0x50);
+				break;
+			case 1:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x2c, 0x03, 0x10, 0x50);
+				rssiAdjustVal = 11;
+				break;
+			case 2:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x25, 0x03, 0x10, 0x50);
+				rssiAdjustVal = 14;
+				break;
+			case 3:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x40);
+				break;
+			case 4:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+				rssiAdjustVal = 17;
+				break;
+			case 5:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x61, 0x15, 0x3, 0x31, 0x0);
+				break;
+			case 6:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0x3, 0x0, 0x0);
+				break;
+			case 7:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xc, 0x5, 0x0, 0x0);
+				break;
+			case 8:	
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+				break;
+			case 9:	
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x1e, 0x03, 0x10, 0x50);
+				rssiAdjustVal = 18;
+				break;
+			case 10:	
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0xa, 0x0, 0x40);
+				break;
+			case 11:	
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x12, 0x03, 0x10, 0x50);
+				rssiAdjustVal = 20;
+				break;
+			case 12:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xeb, 0xa, 0x3, 0x31, 0x18);
+				break;
+
+			case 15:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0x3, 0x8, 0x0);
+				break;
+			case 16:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x10, 0x0);
+				rssiAdjustVal = 18;
+				break;
+
+			case 18:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+				rssiAdjustVal = 14;
+				break;			
+				
+			case 20:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0x25, 0x25, 0x0, 0x0);
+				break;
+			case 21:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x20, 0x3, 0x10, 0x40);
+				break;
+			case 22:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0x8, 0x8, 0x0, 0x40);
+				break;
+			case 23:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x3, 0x31, 0x18);
+				rssiAdjustVal = 22;
+				break;
+			case 24:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x15, 0x3, 0x31, 0x18);
+				rssiAdjustVal = 22;
+				break;
+			case 25:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+				rssiAdjustVal = 22;
+				break;
+			case 26:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+				rssiAdjustVal = 22;
+				break;
+			case 27:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x3, 0x31, 0x98);
+				rssiAdjustVal = 22;
+				break;
+			case 28:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x69, 0x25, 0x3, 0x31, 0x0);
+				break;
+			case 29:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xab, 0x1a, 0x1a, 0x1, 0x10);
+				break;
+			case 30:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+				break;
+			case 31:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x1a, 0x1a, 0, 0x58);
+				break;
+			case 32:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xab, 0xa, 0x3, 0x31, 0x90);
+				break;
+			case 33:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xa3, 0x25, 0x3, 0x30, 0x90);
+				break;
+			case 34:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x10);
+				break;
+			case 35:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x0, 0x10);
+				break;
+			case 36:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x12, 0x3, 0x14, 0x50);
+				break;
+			case 37:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x25, 0x3, 0x10, 0x50);
+				break;
+			case 38:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x90);
+				break;
+		}
+	}
+	else
+	{
+		// disable PS tdma
+		switch(type)
+		{
+			case 8:		//0x778 = 1, ant2PTA
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x8, 0x0, 0x0, 0x0, 0x0);
+				pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x4);
+				break;
+			case 0:		//0x778 = 1, ant2BT
+			default:
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+				mdelay(5);
+				pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20);
+				break;
+			case 9:		//0x778 = 1, ant2WIFI
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+				pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x4);
+				break;
+			case 10:	//0x778 = 3, ant2BT
+				halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+				mdelay(5);
+				pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20);
+				break;
+		}
+	}
+	rssiAdjustVal =0;
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssiAdjustVal);
+
+	// update pre state
+	pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn;
+	pCoexDm->prePsTdma = pCoexDm->curPsTdma;
+}
+
+VOID
+halbtc8192e1ant_SetSwitchSsType(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte				ssType
+	)
+{
+	u1Byte	mimoPs=BTC_MIMO_PS_DYNAMIC;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], REAL set SS Type = %d\n", ssType));
+
+	if(ssType == 1)
+	{
+		halbtc8192e1ant_Updatera_mask(pBtCoexist, FORCE_EXEC, BTC_RATE_DISABLE, 0xfff00000);	// disable 2ss
+		halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+		// switch ofdm path
+		pBtCoexist->btc_write_1byte(pBtCoexist, 0xc04, 0x11);
+		pBtCoexist->btc_write_1byte(pBtCoexist, 0xd04, 0x1);
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0x90c, 0x81111111);
+		// switch cck patch
+		pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0xe77, 0x4, 0x1);
+		pBtCoexist->btc_write_1byte(pBtCoexist, 0xa07, 0x81);
+		mimoPs=BTC_MIMO_PS_STATIC;
+	}
+	else if(ssType == 2)
+	{
+		halbtc8192e1ant_Updatera_mask(pBtCoexist, FORCE_EXEC, BTC_RATE_ENABLE, 0xfff00000);	// enable 2ss
+		halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 8);
+		pBtCoexist->btc_write_1byte(pBtCoexist, 0xc04, 0x33);
+		pBtCoexist->btc_write_1byte(pBtCoexist, 0xd04, 0x3);
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0x90c, 0x81121313);
+		pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0xe77, 0x4, 0x0);
+		pBtCoexist->btc_write_1byte(pBtCoexist, 0xa07, 0x41);
+		mimoPs=BTC_MIMO_PS_DYNAMIC;
+	}
+	
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimoPs);	// set rx 1ss or 2ss
+}
+
+VOID
+halbtc8192e1ant_SwitchSsType(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN				bForceExec,
+	IN	u1Byte				newSsType
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], %s Switch SS Type = %d\n",  
+		(bForceExec? "force to":""), newSsType));
+	pCoexDm->curSsType = newSsType;
+
+	if(!bForceExec)
+	{
+		if(pCoexDm->preSsType == pCoexDm->curSsType) 
+			return;
+	}
+	halbtc8192e1ant_SetSwitchSsType(pBtCoexist, pCoexDm->curSsType);
+
+	pCoexDm->preSsType = pCoexDm->curSsType;
+}
+
+VOID
+halbtc8192e1ant_CoexAllOff(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+	// sw all off
+	halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+	halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+
+	// hw all off
+	halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+}
+
+BOOLEAN
+halbtc8192e1ant_IsCommonAction(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BOOLEAN			bCommon=FALSE, bWifiConnected=FALSE, bWifiBusy=FALSE;
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+
+	if(!bWifiConnected && 
+		BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"));		
+ 		halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+		halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+		bCommon = true;
+	}
+	else if(bWifiConnected && 
+		(BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT non connected-idle!!\n"));
+      	halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+		halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+		bCommon = true;
+	}
+	else if(!bWifiConnected && 
+		(BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"));
+		halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+		halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+		bCommon = true;
+	}
+	else if(bWifiConnected && 
+		(BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n"));
+		halbtc8192e1ant_SwMechanism1(pBtCoexist,true,true,true,true);
+		halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+		bCommon = true;
+	}
+	else if(!bWifiConnected && 
+		(BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+		halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+		halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		
+		bCommon = true;
+	}
+	else
+	{
+		halbtc8192e1ant_SwMechanism1(pBtCoexist,true,true,true,true);
+		
+		bCommon = FALSE;
+	}
+	
+	return bCommon;
+}
+
+
+VOID
+halbtc8192e1ant_TdmaDurationAdjustForAcl(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte				wifiStatus
+	)
+{
+	static s4Byte		up,dn,m,n,WaitCount;
+	s4Byte			result;   //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+	u1Byte			retryCount=0, btInfoExt;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], TdmaDurationAdjustForAcl()\n"));
+
+	if( (BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) ||
+		(BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifiStatus) ||
+		(BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifiStatus) )
+	{
+		if( pCoexDm->curPsTdma != 1 &&
+			pCoexDm->curPsTdma != 2 &&
+			pCoexDm->curPsTdma != 3 &&
+			pCoexDm->curPsTdma != 9 )
+		{
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+			pCoexDm->psTdmaDuAdjType = 9;
+
+			up = 0;
+			dn = 0;
+			m = 1;
+			n= 3;
+			result = 0;
+			WaitCount = 0;
+		}		
+		return;
+	}
+
+	if(!pCoexDm->bAutoTdmaAdjust)
+	{
+		pCoexDm->bAutoTdmaAdjust = true;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
+
+		halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+		pCoexDm->psTdmaDuAdjType = 2;
+		//============
+		up = 0;
+		dn = 0;
+		m = 1;
+		n= 3;
+		result = 0;
+		WaitCount = 0;
+	}
+	else
+	{
+		//accquire the BT TRx retry count from BT_Info byte2
+		retryCount = pCoexSta->btRetryCnt;
+		btInfoExt = pCoexSta->btInfoExt;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount));
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, WaitCount=%d\n", 
+			up, dn, m, n, WaitCount));
+		result = 0;
+		WaitCount++; 
+		  
+		if(retryCount == 0)  // no retry in the last 2-second duration
+		{
+			up++;
+			dn--;
+
+			if (dn <= 0)
+				dn = 0;				 
+
+			if(up >= n)	// if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+			{
+				WaitCount = 0; 
+				n = 3;
+				up = 0;
+				dn = 0;
+				result = 1; 
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n"));
+			}
+		}
+		else if (retryCount <= 3)	// <=3 retry in the last 2-second duration
+		{
+			up--; 
+			dn++;
+
+			if (up <= 0)
+				up = 0;
+
+			if (dn == 2)	// if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+			{
+				if (WaitCount <= 2)
+					m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+				else
+					m = 1;
+
+				if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+					m = 20;
+
+				n = 3*m;
+				up = 0;
+				dn = 0;
+				WaitCount = 0;
+				result = -1; 
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+			}
+		}
+		else  //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+		{
+			if (WaitCount == 1)
+				m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+			else
+				m = 1;
+
+			if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+				m = 20;
+
+			n = 3*m;
+			up = 0;
+			dn = 0;
+			WaitCount = 0; 
+			result = -1;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+		}
+
+		if(result == -1)
+		{
+			if( (BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(btInfoExt)) &&
+				((pCoexDm->curPsTdma == 1) ||(pCoexDm->curPsTdma == 2)) )
+			{
+				halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+				pCoexDm->psTdmaDuAdjType = 9;
+			}
+			else if(pCoexDm->curPsTdma == 1)
+			{
+				halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+				pCoexDm->psTdmaDuAdjType = 2;
+			}
+			else if(pCoexDm->curPsTdma == 2)
+			{
+				halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+				pCoexDm->psTdmaDuAdjType = 9;
+			}
+			else if(pCoexDm->curPsTdma == 9)
+			{
+				halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+				pCoexDm->psTdmaDuAdjType = 11;
+			}
+		}
+		else if(result == 1)
+		{
+			if( (BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(btInfoExt)) &&
+				((pCoexDm->curPsTdma == 1) ||(pCoexDm->curPsTdma == 2)) )
+			{
+				halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+				pCoexDm->psTdmaDuAdjType = 9;
+			}
+			else if(pCoexDm->curPsTdma == 11)
+			{
+				halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+				pCoexDm->psTdmaDuAdjType = 9;
+			}
+			else if(pCoexDm->curPsTdma == 9)
+			{
+				halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+				pCoexDm->psTdmaDuAdjType = 2;
+			}
+			else if(pCoexDm->curPsTdma == 2)
+			{
+				halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+				pCoexDm->psTdmaDuAdjType = 1;
+			}
+		}
+
+		if( pCoexDm->curPsTdma != 1 &&
+			pCoexDm->curPsTdma != 2 &&
+			pCoexDm->curPsTdma != 9 &&
+			pCoexDm->curPsTdma != 11 )
+		{
+			// recover to previous adjust type
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType);
+		}
+	}
+}
+
+u1Byte
+halbtc8192e1ant_PsTdmaTypeByWifiRssi(
+	IN	s4Byte	wifiRssi,
+	IN	s4Byte	preWifiRssi,
+	IN	u1Byte	wifiRssiThresh
+	)
+{
+	u1Byte	psTdmaType=0;
+	
+	if(wifiRssi > preWifiRssi)
+	{
+		if(wifiRssi > (wifiRssiThresh+5))
+		{
+			psTdmaType = 26;
+		}
+		else
+		{
+			psTdmaType = 25;
+		}
+	}
+	else
+	{
+		if(wifiRssi > wifiRssiThresh)
+		{
+			psTdmaType = 26;
+		}
+		else
+		{
+			psTdmaType = 25;
+		}
+	}
+
+	return psTdmaType;
+}
+
+VOID
+halbtc8192e1ant_PsTdmaCheckForPowerSaveState(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bNewPsState
+	)
+{
+	u1Byte	lpsMode=0x0;
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_LPS_MODE, &lpsMode);
+	
+	if(lpsMode)	// already under LPS state
+	{
+		if(bNewPsState)		
+		{
+			// keep state under LPS, do nothing.
+		}
+		else
+		{
+			// will leave LPS state, turn off psTdma first
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		}
+	}
+	else						// NO PS state
+	{
+		if(bNewPsState)
+		{
+			// will enter LPS state, turn off psTdma first
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		}
+		else
+		{
+			// keep state under NO PS state, do nothing.
+		}
+	}
+}
+
+VOID
+halbtc8192e1ant_PowerSaveState(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte				psType,
+	IN	u1Byte				lpsVal,
+	IN	u1Byte				rpwmVal
+	)
+{
+	BOOLEAN		bLowPwrDisable=FALSE;
+
+	switch(psType)
+	{
+		case BTC_PS_WIFI_NATIVE:
+			// recover to original 32k low power setting
+			bLowPwrDisable = FALSE;
+			pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+			pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+			break;
+		case BTC_PS_LPS_ON:
+			halbtc8192e1ant_PsTdmaCheckForPowerSaveState(pBtCoexist, true);
+			halbtc8192e1ant_LpsRpwm(pBtCoexist, NORMAL_EXEC, lpsVal, rpwmVal);
+			// when coex force to enter LPS, do not enter 32k low power.
+			bLowPwrDisable = true;
+			pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+			// power save must executed before psTdma.
+			pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+			break;
+		case BTC_PS_LPS_OFF:
+			halbtc8192e1ant_PsTdmaCheckForPowerSaveState(pBtCoexist, FALSE);
+			pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+			break;
+		default:
+			break;
+	}
+}
+
+
+VOID
+halbtc8192e1ant_ActionWifiOnly(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+	halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);	
+}
+
+VOID
+halbtc8192e1ant_MonitorBtEnableDisable(
+	IN 	PBTC_COEXIST		pBtCoexist
+	)
+{
+	static BOOLEAN	bPreBtDisabled=FALSE;
+	static u4Byte		btDisableCnt=0;
+	BOOLEAN			bBtActive=true, bBtDisabled=FALSE;
+
+	// This function check if bt is disabled
+
+	if(	pCoexSta->highPriorityTx == 0 &&
+		pCoexSta->highPriorityRx == 0 &&
+		pCoexSta->lowPriorityTx == 0 &&
+		pCoexSta->lowPriorityRx == 0)
+	{
+		bBtActive = FALSE;
+	}
+	if(	pCoexSta->highPriorityTx == 0xffff &&
+		pCoexSta->highPriorityRx == 0xffff &&
+		pCoexSta->lowPriorityTx == 0xffff &&
+		pCoexSta->lowPriorityRx == 0xffff)
+	{
+		bBtActive = FALSE;
+	}
+	if(bBtActive)
+	{
+		btDisableCnt = 0;
+		bBtDisabled = FALSE;
+		pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+	}
+	else
+	{
+		btDisableCnt++;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n", 
+				btDisableCnt));
+		if(btDisableCnt >= 2)
+		{
+			bBtDisabled = true;
+			pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+			halbtc8192e1ant_ActionWifiOnly(pBtCoexist);
+		}
+	}
+	if(bPreBtDisabled != bBtDisabled)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n", 
+			(bPreBtDisabled ? "disabled":"enabled"), 
+			(bBtDisabled ? "disabled":"enabled")));
+		bPreBtDisabled = bBtDisabled;
+		if(!bBtDisabled)
+		{
+		}
+		else
+		{
+			pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+			pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+		}
+	}
+}
+
+//=============================================
+//
+//	Software Coex Mechanism start
+//
+//=============================================
+
+// SCO only or SCO+PAN(HS)
+VOID
+halbtc8192e1ant_ActionSco(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte	wifiRssiState;
+	u4Byte	wifiBw;
+
+	wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+	
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			  halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			  halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		}		
+	}
+}
+
+
+VOID
+halbtc8192e1ant_ActionHid(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte	wifiRssiState;	
+	u4Byte	wifiBw;
+
+	wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,FALSE,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		}		
+	}
+}
+
+//A2DP only / PAN(EDR) only/ A2DP+PAN(HS)
+VOID
+halbtc8192e1ant_ActionA2dp(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState;
+	u4Byte		wifiBw;
+
+	wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		}		
+	}
+}
+
+VOID
+halbtc8192e1ant_ActionA2dpPanHs(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState, btInfoExt;
+	u4Byte		wifiBw;
+
+	btInfoExt = pCoexSta->btInfoExt;
+	wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		}		
+	}
+}
+
+VOID
+halbtc8192e1ant_ActionPanEdr(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState;
+	u4Byte		wifiBw;
+
+	wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		}
+	}
+}
+
+
+//PAN(HS) only
+VOID
+halbtc8192e1ant_ActionPanHs(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState;
+	u4Byte		wifiBw;
+
+	wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		}
+	}
+}
+
+//PAN(EDR)+A2DP
+VOID
+halbtc8192e1ant_ActionPanEdrA2dp(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState, btInfoExt;
+	u4Byte		wifiBw;
+
+	btInfoExt = pCoexSta->btInfoExt;
+	wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		}
+	}
+}
+
+VOID
+halbtc8192e1ant_ActionPanEdrHid(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState;
+	u4Byte		wifiBw;
+
+	wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+		}
+	}
+	else
+	{		
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		}
+	}
+}
+
+// HID+A2DP+PAN(EDR)
+VOID
+halbtc8192e1ant_ActionHidA2dpPanEdr(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState, btInfoExt;
+	u4Byte		wifiBw;
+
+	btInfoExt = pCoexSta->btInfoExt;
+	wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{	
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		}
+	}
+}
+
+VOID
+halbtc8192e1ant_ActionHidA2dp(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState, btInfoExt;
+	u4Byte		wifiBw;
+
+	btInfoExt = pCoexSta->btInfoExt;
+	wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{		
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+		}
+	}
+	else
+	{
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+		}
+		else
+		{
+			halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+		}
+	}
+}
+
+//=============================================
+//
+//	Non-Software Coex Mechanism start
+//
+//=============================================
+VOID
+halbtc8192e1ant_ActionBtInquiry(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	PBTC_BT_LINK_INFO	pBtLinkInfo=&pBtCoexist->bt_link_info;
+	BOOLEAN				bWifiConnected=FALSE, bBtHsOn=FALSE;
+	
+	// Note:
+	// Do not do DacSwing here, use original setting.
+	
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+	if(bBtHsOn)
+		return;
+
+	if(!bWifiConnected)
+	{
+		halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+		
+		halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+		halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+		halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+	}
+	else if( (pBtLinkInfo->bScoExist) ||
+			(pBtLinkInfo->bHidOnly) )
+	{
+		// SCO/HID-only busy
+		halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+		
+		halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+		halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
+		halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 1);
+	}
+	else
+	{
+		halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+
+		halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+		halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 30);
+		halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+	}
+}
+
+VOID
+halbtc8192e1ant_ActionBtScoHidOnlyBusy(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte				wifiStatus
+	)
+{
+	PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+	u1Byte		btRssiState=BTC_RSSI_STATE_HIGH;
+
+	if(BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus)
+	{		
+		halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+		halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+	
+		halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+		halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+	}
+	else
+	{
+		if(pBtLinkInfo->bHidOnly)
+		{
+			halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+			halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+			halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+		}
+		else
+		{
+			// dec bt power for diff level
+			btRssiState = halbtc8192e1ant_BtRssiState(3, 34, 42);
+			if( (btRssiState == BTC_RSSI_STATE_LOW) ||
+				(btRssiState == BTC_RSSI_STATE_STAY_LOW) )
+			{
+				halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+			}
+			else if( (btRssiState == BTC_RSSI_STATE_MEDIUM) ||
+					(btRssiState == BTC_RSSI_STATE_STAY_MEDIUM) )
+			{
+				halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+			}
+			else if( (btRssiState == BTC_RSSI_STATE_HIGH) ||
+					(btRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+			{
+				halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 6);
+			}
+
+			// sw dacSwing
+			halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 0xc);
+
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+			halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);			
+		}
+	}
+}
+
+VOID
+halbtc8192e1ant_ActionHs(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action for HS!!!\n"));
+
+	halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+	if(BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)
+	{
+		// error, should not be here
+		pCoexDm->errorCondition = 1;
+	}
+	else if(BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)
+	{
+		halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+		halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 6);
+
+		halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 10);
+		halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+	}
+	else if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && 
+			!pBtCoexist->bt_link_info.bHidOnly)
+	{
+		if(pCoexDm->curSsType == 1)
+		{
+			halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+			halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 6);
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 10);
+			//halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 38);
+			halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+		}
+	}
+	else
+	{
+		halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+			BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+	}
+}
+
+VOID
+halbtc8192e1ant_ActionWifiConnectedBtAclBusy(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte				wifiStatus
+	)
+{
+	PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+
+	if(pBtLinkInfo->bHidOnly)
+	{
+		halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist, wifiStatus);
+		pCoexDm->bAutoTdmaAdjust = FALSE;
+		return;
+	}
+	
+	halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+	halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+	if( (pBtLinkInfo->bA2dpOnly) ||
+		(pBtLinkInfo->bHidExist&&pBtLinkInfo->bA2dpExist) )
+	{
+		halbtc8192e1ant_TdmaDurationAdjustForAcl(pBtCoexist, wifiStatus);
+	}
+	else if( (pBtLinkInfo->bPanOnly) ||
+			(pBtLinkInfo->bHidExist&&pBtLinkInfo->bPanExist) )
+	{
+		halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+		pCoexDm->bAutoTdmaAdjust = FALSE;
+	}
+	else
+	{
+		if( (BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) ||
+			(BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifiStatus) ||
+			(BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifiStatus) )
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+		else
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+		pCoexDm->bAutoTdmaAdjust = FALSE;
+	}
+		
+	halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 1);
+}
+
+
+VOID
+halbtc8192e1ant_ActionWifiNotConnected(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	// power save state
+	halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+	halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+	halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+	halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+	halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);	
+}
+
+VOID
+halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+	halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+	halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+	halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+	halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+}
+
+VOID
+halbtc8192e1ant_ActionWifiConnectedScan(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	// power save state
+	if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly)
+		halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+	else
+		halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+	if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+	{
+		halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist,
+			BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+	}
+	else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+			(BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+	{
+		halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+			BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+	}
+	else
+	{		
+		halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+		halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+		halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+	}
+}
+
+
+VOID
+halbtc8192e1ant_ActionWifiConnectedSpecialPacket(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	// power save state
+	if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly)
+		halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+	else
+		halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+	if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+	{
+		halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist,
+			BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT);
+	}
+	else
+	{		
+		halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+		halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+		halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+	}
+}
+
+VOID
+halbtc8192e1ant_ActionWifiConnected(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BOOLEAN 	bWifiConnected=FALSE, bWifiBusy=FALSE;
+	BOOLEAN 	bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+	BOOLEAN 	bUnder4way=FALSE;
+	u4Byte		wifiBw;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()===>\n"));
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+	if(!bWifiConnected)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi not connected<===\n"));
+		return;
+	}
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way);
+	if(bUnder4way)
+	{
+		halbtc8192e1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"));
+		return;
+	}
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+	if(bScan || bLink || bRoam)
+	{
+		halbtc8192e1ant_ActionWifiConnectedScan(pBtCoexist);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"));
+		return;
+	}
+
+	// power save state
+	if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly)
+		halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+	else
+		halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);	
+	if(!bWifiBusy)
+	{
+		if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+		{
+			halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist, 
+				BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+		}
+		else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+			(BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+		{
+			halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+				BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+		}
+		else
+		{				
+			halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+			halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+			halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+		}
+	}
+	else
+	{
+		if(BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)
+		{				
+			halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+			halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+			halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+		}
+		else if(BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)
+		{				
+			halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+			halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);			
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+			halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+		}
+		else if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+		{
+			halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist,
+				BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+		}
+		else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+			(BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+		{
+			halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+				BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+		}
+		else 
+		{				
+			halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+			halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+			halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+		}
+	}
+}
+
+VOID
+halbtc8192e1ant_RunSwCoexistMechanism(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BOOLEAN				bWifiUnder5G=FALSE, bWifiBusy=FALSE, bWifiConnected=FALSE;
+	u1Byte				btInfoOriginal=0, btRetryCnt=0;
+	u1Byte				algorithm=0;
+
+	return;
+
+	algorithm = halbtc8192e1ant_ActionAlgorithm(pBtCoexist);
+	pCoexDm->curAlgorithm = algorithm;
+
+	if(halbtc8192e1ant_IsCommonAction(pBtCoexist))
+	{
+	}
+	else
+	{
+		switch(pCoexDm->curAlgorithm)
+		{
+			case BT_8192E_1ANT_COEX_ALGO_SCO:
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = SCO.\n"));
+				halbtc8192e1ant_ActionSco(pBtCoexist);
+				break;
+			case BT_8192E_1ANT_COEX_ALGO_HID:
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID.\n"));
+				halbtc8192e1ant_ActionHid(pBtCoexist);
+				break;
+			case BT_8192E_1ANT_COEX_ALGO_A2DP:
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP.\n"));
+				halbtc8192e1ant_ActionA2dp(pBtCoexist);
+				break;
+			case BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS:
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP+PAN(HS).\n"));
+				halbtc8192e1ant_ActionA2dpPanHs(pBtCoexist);
+				break;
+			case BT_8192E_1ANT_COEX_ALGO_PANEDR:
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR).\n"));
+				halbtc8192e1ant_ActionPanEdr(pBtCoexist);
+				break;
+			case BT_8192E_1ANT_COEX_ALGO_PANHS:
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HS mode.\n"));
+				halbtc8192e1ant_ActionPanHs(pBtCoexist);
+				break;
+			case BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP:
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN+A2DP.\n"));
+				halbtc8192e1ant_ActionPanEdrA2dp(pBtCoexist);
+				break;
+			case BT_8192E_1ANT_COEX_ALGO_PANEDR_HID:
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR)+HID.\n"));
+				halbtc8192e1ant_ActionPanEdrHid(pBtCoexist);
+				break;
+			case BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP+PAN.\n"));
+				halbtc8192e1ant_ActionHidA2dpPanEdr(pBtCoexist);
+				break;
+			case BT_8192E_1ANT_COEX_ALGO_HID_A2DP:
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP.\n"));
+				halbtc8192e1ant_ActionHidA2dp(pBtCoexist);
+				break;
+			default:
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = coexist All Off!!\n"));
+				halbtc8192e1ant_CoexAllOff(pBtCoexist);
+				break;
+		}
+		pCoexDm->preAlgorithm = pCoexDm->curAlgorithm;
+	}
+}
+
+VOID
+halbtc8192e1ant_RunCoexistMechanism(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	PBTC_BT_LINK_INFO	pBtLinkInfo=&pBtCoexist->bt_link_info;
+	BOOLEAN	bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()===>\n"));
+
+	if(pBtCoexist->manual_control)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"));
+		return;
+	}
+
+	if(pBtCoexist->bStopCoexDm)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"));
+		return;
+	}
+
+	if(pCoexSta->bUnderIps)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n"));
+		return;
+	}
+	
+	halbtc8192e1ant_RunSwCoexistMechanism(pBtCoexist);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+	if(pCoexSta->bC2hBtInquiryPage)
+	{
+		halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+		return;
+	}
+
+	// 1ss or 2ss
+	if(pBtLinkInfo->bScoExist)
+	{
+		halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 1);
+	}
+	else if(bBtHsOn)
+	{
+		if(pBtLinkInfo->bHidOnly)
+			halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 2);
+		else
+			halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 1);
+	}
+	else
+		halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 2);
+	
+	if(bBtHsOn)
+	{
+		halbtc8192e1ant_ActionHs(pBtCoexist);
+		return;
+	}
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+	if(!bWifiConnected)
+	{
+		BOOLEAN	bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+		
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is non connected-idle !!!\n"));
+
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+		if(bScan || bLink || bRoam)
+			halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist);
+		else
+			halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist);
+	}
+	else
+	{
+		halbtc8192e1ant_ActionWifiConnected(pBtCoexist);
+	}
+}
+
+VOID
+halbtc8192e1ant_InitCoexDm(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{	
+	// force to reset coex mechanism
+	halbtc8192e1ant_FwDacSwingLvl(pBtCoexist, FORCE_EXEC, 6);
+	halbtc8192e1ant_DecBtPwr(pBtCoexist, FORCE_EXEC, 0);
+
+	// sw all off
+	halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+	halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+	halbtc8192e1ant_SwitchSsType(pBtCoexist, FORCE_EXEC, 2);
+
+	halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 8);
+	halbtc8192e1ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0);
+}
+
+//============================================================
+// work around function start with wa_halbtc8192e1ant_
+//============================================================
+//============================================================
+// extern function start with EXhalbtc8192e1ant_
+//============================================================
+VOID
+EXhalbtc8192e1ant_InitHwConfig(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u4Byte	u4Tmp=0;
+	u16	u2Tmp=0;
+	u1Byte	u1Tmp=0;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n"));
+
+	// backup rf 0x1e value
+	pCoexDm->btRf0x1eBackup = 
+		pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+	// antenna sw ctrl to bt
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x4f, 0x6);
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x944, 0x24);
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x930, 0x700700);
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20);
+	if(pBtCoexist->chipInterface == BTC_INTF_USB)
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0x64, 0x30430004);
+	else
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0x64, 0x30030004);
+
+	halbtc8192e1ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0);
+
+	// antenna switch control parameter
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x858, 0x55555555);
+
+	// coex parameters
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x1);
+	// 0x790[5:0]=0x5
+	u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x790);
+	u1Tmp &= 0xc0;
+	u1Tmp |= 0x5;
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x790, u1Tmp);
+
+	// enable counter statistics
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4);
+
+	// enable PTA
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20);
+	// enable mailbox interface
+	u2Tmp = pBtCoexist->btc_read_2byte(pBtCoexist, 0x40);
+	u2Tmp |= BIT9;
+	pBtCoexist->btc_write_2byte(pBtCoexist, 0x40, u2Tmp);
+
+	// enable PTA I2C mailbox 
+	u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x101);
+	u1Tmp |= BIT4;
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x101, u1Tmp);
+
+	// enable bt clock when wifi is disabled.
+	u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x93);
+	u1Tmp |= BIT0;
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x93, u1Tmp);
+	// enable bt clock when suspend.
+	u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x7);
+	u1Tmp |= BIT0;
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x7, u1Tmp);
+}
+
+VOID
+EXhalbtc8192e1ant_InitCoexDm(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+
+	pBtCoexist->bStopCoexDm = FALSE;
+	
+	halbtc8192e1ant_InitCoexDm(pBtCoexist);
+}
+
+VOID
+EXhalbtc8192e1ant_DisplayCoexInfo(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	struct btc_board_info *		pBoardInfo=&pBtCoexist->board_info;
+	PBTC_STACK_INFO		pStackInfo=&pBtCoexist->stack_info;
+	PBTC_BT_LINK_INFO	pBtLinkInfo=&pBtCoexist->bt_link_info;
+	pu1Byte				cliBuf=pBtCoexist->cli_buf;
+	u1Byte				u1Tmp[4], i, btInfoExt, psTdmaCase=0;
+	u4Byte				u4Tmp[4];
+	BOOLEAN				bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE;
+	BOOLEAN				bBtHsOn=FALSE, bWifiBusy=FALSE;
+	s4Byte				wifiRssi=0, btHsRssi=0;
+	u4Byte				wifiBw, wifiTrafficDir;
+	u1Byte				wifiDot11Chnl, wifiHsChnl;
+	u4Byte				fwVer=0, btPatchVer=0;
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+	CL_PRINTF(cliBuf);
+
+	if(pBtCoexist->manual_control)
+	{
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[Under Manual Control]============");
+		CL_PRINTF(cliBuf);
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+		CL_PRINTF(cliBuf);
+	}	
+	if(pBtCoexist->bStopCoexDm)
+	{
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[Coex is STOPPED]============");
+		CL_PRINTF(cliBuf);
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+		CL_PRINTF(cliBuf);
+	}
+
+	if(!pBoardInfo->bt_exist)
+	{
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+		CL_PRINTF(cliBuf);
+		return;
+	}
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+		pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num);
+	CL_PRINTF(cliBuf);	
+	
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+		((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", "CoexVer/ FwVer/ PatchVer", \
+		GLCoexVerDate8192e1Ant, GLCoexVer8192e1Ant, fwVer, btPatchVer, btPatchVer);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+		wifiDot11Chnl, wifiHsChnl, bBtHsOn);
+	CL_PRINTF(cliBuf);
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+		pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1],
+		pCoexDm->wifiChnlInfo[2]);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+		wifiRssi, btHsRssi);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \
+		bLink, bRoam, bScan);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+		(bWifiUnder5G? "5G":"2.4G"),
+		((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))),
+		((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+	CL_PRINTF(cliBuf);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+		((pBtCoexist->btInfo.bBtDisabled)? ("disabled"):	((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)? "non-connected idle":
+		(  (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy")))),
+		pCoexSta->btRssi, pCoexSta->btRetryCnt);
+	CL_PRINTF(cliBuf);
+	
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+		pBtLinkInfo->bScoExist, pBtLinkInfo->bHidExist, pBtLinkInfo->bPanExist, pBtLinkInfo->bA2dpExist);
+	CL_PRINTF(cliBuf);
+	pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);	
+
+	btInfoExt = pCoexSta->btInfoExt;
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+		(btInfoExt&BIT0)? "Basic rate":"EDR rate");
+	CL_PRINTF(cliBuf);	
+
+	for(i=0; i<BT_INFO_SRC_8192E_1ANT_MAX; i++)
+	{
+		if(pCoexSta->btInfoC2hCnt[i])
+		{				
+			CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8192e1Ant[i], \
+				pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1],
+				pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3],
+				pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5],
+				pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]);
+			CL_PRINTF(cliBuf);
+		}
+	}
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)", \
+		((pCoexSta->bUnderIps? "IPS ON":"IPS OFF")),
+		((pCoexSta->bUnderLps? "LPS ON":"LPS OFF")), 
+		pBtCoexist->btInfo.lps1Ant, 
+		pBtCoexist->btInfo.rpwm_1ant);
+	CL_PRINTF(cliBuf);
+	pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "SS Type", \
+		pCoexDm->curSsType);
+	CL_PRINTF(cliBuf);
+
+	if(!pBtCoexist->manual_control)
+	{
+		// Sw mechanism	
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+		CL_PRINTF(cliBuf);
+	
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d/ %d ", "SM1[ShRf/ LpRA/ LimDig/ btLna]", \
+			pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig, pCoexDm->bCurBtLnaConstrain);
+		CL_PRINTF(cliBuf);
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+			pCoexDm->bCurAgcTableEn, pCoexDm->bCurAdcBackOff, pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl);
+		CL_PRINTF(cliBuf);
+
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/ %s/ %d ", "DelBA/ BtCtrlAgg/ AggSize", \
+			(pBtCoexist->btInfo.reject_agg_pkt? "Yes":"No"), (pBtCoexist->btInfo.b_bt_ctrl_agg_buf_size? "Yes":"No"),
+				pBtCoexist->btInfo.aggBufSize);
+		CL_PRINTF(cliBuf);
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Rate Mask", \
+				pBtCoexist->btInfo.ra_mask);
+		CL_PRINTF(cliBuf);
+	
+		// Fw mechanism		
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+		CL_PRINTF(cliBuf);	
+
+		psTdmaCase = pCoexDm->curPsTdma;
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", "PS TDMA", \
+			pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
+			pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
+			pCoexDm->psTdmaPara[4], psTdmaCase, pCoexDm->bAutoTdmaAdjust);
+		CL_PRINTF(cliBuf);
+
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Latest error condition(should be 0)", \
+			pCoexDm->errorCondition);
+		CL_PRINTF(cliBuf);
+		
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwrLvl/ IgnWlanAct", \
+			pCoexDm->curBtDecPwrLvl, pCoexDm->bCurIgnoreWlanAct);
+		CL_PRINTF(cliBuf);
+	}
+
+	// Hw setting		
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+	CL_PRINTF(cliBuf);	
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+		pCoexDm->btRf0x1eBackup);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc04);
+	u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xd04);
+	u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x90c);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0xc04/ 0xd04/ 0x90c", \
+		u4Tmp[0], u4Tmp[1], u4Tmp[2]);
+	CL_PRINTF(cliBuf);
+
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778", \
+		u1Tmp[0]);
+	CL_PRINTF(cliBuf);
+	
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x92c);
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x930);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x92c/ 0x930", \
+		(u1Tmp[0]), u4Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40);
+	u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4f);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x40/ 0x4f", \
+		u1Tmp[0], u1Tmp[1]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550);
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+		u4Tmp[0], u1Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+		u4Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0);
+	u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4);
+	u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8);
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+		u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770(hp rx[31:16]/tx[15:0])", \
+		pCoexSta->highPriorityRx, pCoexSta->highPriorityTx);
+	CL_PRINTF(cliBuf);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+		pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx);
+	CL_PRINTF(cliBuf);
+#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 1)
+	halbtc8192e1ant_MonitorBtCtr(pBtCoexist);
+#endif
+	
+	pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+VOID
+EXhalbtc8192e1ant_IpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	u4Byte	u4Tmp=0;
+
+	if(pBtCoexist->manual_control ||	pBtCoexist->bStopCoexDm)
+		return;
+
+	if(BTC_IPS_ENTER == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+		pCoexSta->bUnderIps = true;
+		halbtc8192e1ant_CoexAllOff(pBtCoexist);
+	}
+	else if(BTC_IPS_LEAVE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+		pCoexSta->bUnderIps = FALSE;
+	}
+}
+
+VOID
+EXhalbtc8192e1ant_LpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	if(pBtCoexist->manual_control || pBtCoexist->bStopCoexDm)
+		return;
+
+	if(BTC_LPS_ENABLE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+		pCoexSta->bUnderLps = true;
+	}
+	else if(BTC_LPS_DISABLE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+		pCoexSta->bUnderLps = FALSE;
+	}
+}
+
+VOID
+EXhalbtc8192e1ant_ScanNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	BOOLEAN 		bWifiConnected=FALSE, bBtHsOn=FALSE;	
+
+	if(pBtCoexist->manual_control ||
+		pBtCoexist->bStopCoexDm ||
+		pBtCoexist->btInfo.bBtDisabled )
+		return;
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+	if(pCoexSta->bC2hBtInquiryPage)
+	{
+		halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+		return;
+	}
+	else if(bBtHsOn)
+	{
+		halbtc8192e1ant_ActionHs(pBtCoexist);
+		return;
+	}
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+	if(BTC_SCAN_START == type)
+	{	
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+		if(!bWifiConnected)	// non-connected scan
+		{
+			halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist);
+		}
+		else	// wifi is connected
+		{
+			halbtc8192e1ant_ActionWifiConnectedScan(pBtCoexist);
+		}
+	}
+	else if(BTC_SCAN_FINISH == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+		if(!bWifiConnected)	// non-connected scan
+		{
+			halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist);
+		}
+		else
+		{
+			halbtc8192e1ant_ActionWifiConnected(pBtCoexist);
+		}
+	}
+}
+
+VOID
+EXhalbtc8192e1ant_ConnectNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	BOOLEAN	bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+	if(pBtCoexist->manual_control ||
+		pBtCoexist->bStopCoexDm ||
+		pBtCoexist->btInfo.bBtDisabled )
+		return;
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+	if(pCoexSta->bC2hBtInquiryPage)
+	{
+		halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+		return;
+	}
+	else if(bBtHsOn)
+	{
+		halbtc8192e1ant_ActionHs(pBtCoexist);
+		return;
+	}
+
+	if(BTC_ASSOCIATE_START == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+		halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist);
+	}
+	else if(BTC_ASSOCIATE_FINISH == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+		
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+		if(!bWifiConnected) // non-connected scan
+		{
+			halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist);
+		}
+		else
+		{
+			halbtc8192e1ant_ActionWifiConnected(pBtCoexist);
+		}
+	}
+}
+
+VOID
+EXhalbtc8192e1ant_MediaStatusNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	)
+{
+	u1Byte			H2C_Parameter[3] ={0};
+	u4Byte			wifiBw;
+	u1Byte			wifiCentralChnl;
+
+	if(pBtCoexist->manual_control ||
+		pBtCoexist->bStopCoexDm ||
+		pBtCoexist->btInfo.bBtDisabled )
+		return;
+
+	if(BTC_MEDIA_CONNECT == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
+	}
+
+	// only 2.4G we need to inform bt the chnl mask
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl);
+	if( (BTC_MEDIA_CONNECT == type) &&
+		(wifiCentralChnl <= 14) )
+	{
+		H2C_Parameter[0] = 0x1;
+		H2C_Parameter[1] = wifiCentralChnl;
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+		if(BTC_WIFI_BW_HT40 == wifiBw)
+			H2C_Parameter[2] = 0x30;
+		else
+			H2C_Parameter[2] = 0x20;
+	}
+		
+	pCoexDm->wifiChnlInfo[0] = H2C_Parameter[0];
+	pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1];
+	pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2];
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x66=0x%x\n", 
+		H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x66, 3, H2C_Parameter);
+}
+
+VOID
+EXhalbtc8192e1ant_SpecialPacketNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	)
+{
+	BOOLEAN	bBtHsOn=FALSE;
+
+	if(pBtCoexist->manual_control ||
+		pBtCoexist->bStopCoexDm ||
+		pBtCoexist->btInfo.bBtDisabled )
+		return;
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+	if(pCoexSta->bC2hBtInquiryPage)
+	{
+		halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+		return;
+	}
+	else if(bBtHsOn)
+	{
+		halbtc8192e1ant_ActionHs(pBtCoexist);
+		return;
+	}
+
+	if( BTC_PACKET_DHCP == type ||
+		BTC_PACKET_EAPOL == type )
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], special Packet(%d) notify\n", type));
+		halbtc8192e1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
+	}
+}
+
+VOID
+EXhalbtc8192e1ant_BtInfoNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	pu1Byte			tmpBuf,
+	IN	u1Byte			length
+	)
+{
+	PBTC_BT_LINK_INFO	pBtLinkInfo=&pBtCoexist->bt_link_info;
+	u1Byte				btInfo=0;
+	u1Byte				i, rspSource=0;
+	static u4Byte		setBtPsdMode=0;
+	BOOLEAN				bBtBusy=FALSE, limited_dig=FALSE;
+	BOOLEAN				bWifiConnected=FALSE;
+	BOOLEAN				b_bt_ctrl_agg_buf_size=FALSE;
+
+	pCoexSta->bC2hBtInfoReqSent = FALSE;
+
+	rspSource = tmpBuf[0]&0xf;
+	if(rspSource >= BT_INFO_SRC_8192E_1ANT_MAX)
+		rspSource = BT_INFO_SRC_8192E_1ANT_WIFI_FW;
+	pCoexSta->btInfoC2hCnt[rspSource]++;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length));
+	for(i=0; i<length; i++)
+	{
+		pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
+		if(i == 1)
+			btInfo = tmpBuf[i];
+		if(i == length-1)
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i]));
+		}
+		else
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
+		}
+	}
+
+	if(pBtCoexist->btInfo.bBtDisabled)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for BT is disabled <===\n"));
+		return;
+	}	
+
+	if(pBtCoexist->manual_control)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n"));
+		return;
+	}
+	if(pBtCoexist->bStopCoexDm)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Coex STOPPED!!<===\n"));
+		return;
+	}
+
+	if(BT_INFO_SRC_8192E_1ANT_WIFI_FW != rspSource)
+	{
+		pCoexSta->btRetryCnt =	// [3:0]
+			pCoexSta->btInfoC2h[rspSource][2]&0xf;
+
+		pCoexSta->btRssi =
+			pCoexSta->btInfoC2h[rspSource][3]*2+10;
+
+		pCoexSta->btInfoExt = 
+			pCoexSta->btInfoC2h[rspSource][4];
+
+		// Here we need to resend some wifi info to BT
+		// because bt is reset and loss of the info.
+		if( (pCoexSta->btInfoExt & BIT1) )
+		{			
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"));
+			pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+			if(bWifiConnected)
+			{
+				EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_CONNECT);
+			}
+			else
+			{
+				EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+			}
+
+			setBtPsdMode = 0;
+		}
+
+		// test-chip bt patch only rsp the status for BT_RSP, 
+		// so temporary we consider the following only under BT_RSP
+		if(BT_INFO_SRC_8192E_1ANT_BT_RSP == rspSource)
+		{
+			if( (pCoexSta->btInfoExt & BIT3) )
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"));
+				halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+			}
+			else
+			{
+				// BT already NOT ignore Wlan active, do nothing here.
+			}
+#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 0)
+			if( (pCoexSta->btInfoExt & BIT4) )
+			{
+				// BT auto report already enabled, do nothing
+			}
+			else
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n"));
+				halbtc8192e1ant_BtAutoReport(pBtCoexist, FORCE_EXEC, true);
+			}
+#endif
+		}
+	}
+		
+	// check BIT2 first ==> check if bt is under inquiry or page scan
+	if(btInfo & BT_INFO_8192E_1ANT_B_INQ_PAGE)
+		pCoexSta->bC2hBtInquiryPage = true;
+	else
+		pCoexSta->bC2hBtInquiryPage = FALSE;
+
+	// set link exist status
+	if(!(btInfo&BT_INFO_8192E_1ANT_B_CONNECTION))
+	{
+		pCoexSta->bBtLinkExist = FALSE;
+		pCoexSta->bPanExist = FALSE;
+		pCoexSta->bA2dpExist = FALSE;
+		pCoexSta->bHidExist = FALSE;
+		pCoexSta->bScoExist = FALSE;
+	}
+	else // connection exists 
+	{
+		pCoexSta->bBtLinkExist = true;
+		if(btInfo & BT_INFO_8192E_1ANT_B_FTP)
+			pCoexSta->bPanExist = true;
+		else
+			pCoexSta->bPanExist = FALSE;
+		if(btInfo & BT_INFO_8192E_1ANT_B_A2DP)
+			pCoexSta->bA2dpExist = true;
+		else
+			pCoexSta->bA2dpExist = FALSE;
+		if(btInfo & BT_INFO_8192E_1ANT_B_HID)
+			pCoexSta->bHidExist = true;
+		else
+			pCoexSta->bHidExist = FALSE;
+		if(btInfo & BT_INFO_8192E_1ANT_B_SCO_ESCO)
+			pCoexSta->bScoExist = true;
+		else
+			pCoexSta->bScoExist = FALSE;
+	}
+
+	halbtc8192e1ant_UpdateBtLinkInfo(pBtCoexist);
+
+	if(!(btInfo&BT_INFO_8192E_1ANT_B_CONNECTION))
+	{
+		pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-connected idle!!!\n"));
+	}
+	else if(btInfo == BT_INFO_8192E_1ANT_B_CONNECTION)	// connection exists but no busy
+	{
+		pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt connected-idle!!!\n"));
+	}		
+	else if((btInfo&BT_INFO_8192E_1ANT_B_SCO_ESCO) ||
+		(btInfo&BT_INFO_8192E_1ANT_B_SCO_BUSY))
+	{
+		pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_SCO_BUSY;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt sco busy!!!\n"));
+	}
+	else if( (btInfo&BT_INFO_8192E_1ANT_B_ACL_BUSY) ||
+			(btInfo&BT_INFO_8192E_1ANT_B_A2DP) ||
+			(btInfo&BT_INFO_8192E_1ANT_B_FTP) )
+	{
+		if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY != pCoexDm->btStatus)
+			pCoexDm->bAutoTdmaAdjust = FALSE;
+		pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_ACL_BUSY;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl busy!!!\n"));
+	}
+	else
+	{
+		pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_MAX;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-defined state!!!\n"));
+	}
+
+	// ra mask check
+	if(pBtLinkInfo->bScoExist || pBtLinkInfo->bHidExist)
+	{
+		halbtc8192e1ant_Updatera_mask(pBtCoexist, NORMAL_EXEC, BTC_RATE_DISABLE, 0x00000003);	// disable tx cck 1M/2M
+	}
+	else
+	{
+		halbtc8192e1ant_Updatera_mask(pBtCoexist, NORMAL_EXEC, BTC_RATE_ENABLE, 0x00000003);	// enable tx cck 1M/2M
+	}
+	
+	if( (BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) ||
+		(BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+		(BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+	{
+		bBtBusy = true;
+		limited_dig = true;
+		if(pBtLinkInfo->bHidExist)
+			b_bt_ctrl_agg_buf_size = true;
+	}
+	else
+	{
+		bBtBusy = FALSE;
+		limited_dig = FALSE;
+	}
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+
+	//============================================
+	//	Aggregation related setting
+	//============================================
+	// if sco, reject AddBA
+	//pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &bRejApAggPkt);
+
+	// decide BT control aggregation buf size or not
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, &b_bt_ctrl_agg_buf_size);
+	// real update aggregation setting
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+	//============================================
+
+	pCoexDm->limited_dig = limited_dig;
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+	halbtc8192e1ant_RunCoexistMechanism(pBtCoexist);
+}
+
+VOID
+EXhalbtc8192e1ant_StackOperationNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	)
+{
+	if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+	}
+	else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+	}
+}
+
+VOID
+EXhalbtc8192e1ant_HaltNotify(
+	IN	PBTC_COEXIST			pBtCoexist
+	)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
+
+	pBtCoexist->bStopCoexDm = true;
+	halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+
+	halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+	
+	halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x4f, 0xf);
+
+	EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+}
+
+VOID
+EXhalbtc8192e1ant_PnpNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				pnpState
+	)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n"));
+
+	if(BTC_WIFI_PNP_SLEEP == pnpState)
+	{
+		pBtCoexist->bStopCoexDm = true;
+		halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+		halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+		halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);	
+	}
+	else if(BTC_WIFI_PNP_WAKE_UP == pnpState)
+	{
+		
+	}
+}
+
+VOID
+EXhalbtc8192e1ant_Periodical(
+	IN	PBTC_COEXIST			pBtCoexist
+	)
+{
+	static u1Byte		disVerInfoCnt=0;
+	u4Byte				fwVer=0, btPatchVer=0;
+	struct btc_board_info *		pBoardInfo=&pBtCoexist->board_info;
+	PBTC_STACK_INFO		pStackInfo=&pBtCoexist->stack_info;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ==========================Periodical===========================\n"));
+
+	if(disVerInfoCnt <= 5)
+	{
+		disVerInfoCnt += 1;
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", \
+			pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num, pBoardInfo->btdm_ant_pos));
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], BT stack/ hci ext ver = %s / %d\n", \
+			((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion));
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", \
+			GLCoexVerDate8192e1Ant, GLCoexVer8192e1Ant, fwVer, btPatchVer, btPatchVer));
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
+	}
+#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 0)
+	halbtc8192e1ant_QueryBtInfo(pBtCoexist);
+	halbtc8192e1ant_MonitorBtCtr(pBtCoexist);
+	halbtc8192e1ant_MonitorBtEnableDisable(pBtCoexist);
+#else
+	if( halbtc8192e1ant_IsWifiStatusChanged(pBtCoexist) ||
+		pCoexDm->bAutoTdmaAdjust)
+	{
+		halbtc8192e1ant_RunCoexistMechanism(pBtCoexist);
+	}
+#endif
+}
+
+VOID
+EXhalbtc8192e1ant_DbgControl(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				opCode,
+	IN	u1Byte				opLen,
+	IN	pu1Byte				pData
+	)
+{
+	switch(opCode)
+	{
+		case BTC_DBG_SET_COEX_NORMAL:
+			pBtCoexist->manual_control = FALSE;
+			halbtc8192e1ant_InitCoexDm(pBtCoexist);
+			break;
+		case BTC_DBG_SET_COEX_WIFI_ONLY:
+			pBtCoexist->manual_control = true;
+			halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+			halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);	
+			break;
+		case BTC_DBG_SET_COEX_BT_ONLY:
+			// todo
+			break;
+		default:
+			break;
+	}
+}
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h
new file mode 100644
index 0000000..a759b75
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h
@@ -0,0 +1,226 @@
+//===========================================
+// The following is for 8192E_1ANT BT Co-exist definition
+//===========================================
+#define	BT_AUTO_REPORT_ONLY_8192E_1ANT				0
+
+#define	BT_INFO_8192E_1ANT_B_FTP						BIT7
+#define	BT_INFO_8192E_1ANT_B_A2DP					BIT6
+#define	BT_INFO_8192E_1ANT_B_HID						BIT5
+#define	BT_INFO_8192E_1ANT_B_SCO_BUSY				BIT4
+#define	BT_INFO_8192E_1ANT_B_ACL_BUSY				BIT3
+#define	BT_INFO_8192E_1ANT_B_INQ_PAGE				BIT2
+#define	BT_INFO_8192E_1ANT_B_SCO_ESCO				BIT1
+#define	BT_INFO_8192E_1ANT_B_CONNECTION				BIT0
+
+#define	BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_)	\
+		(((_BT_INFO_EXT_&BIT0))? true:FALSE)
+
+#define	BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT		2
+
+typedef enum _BT_INFO_SRC_8192E_1ANT{
+	BT_INFO_SRC_8192E_1ANT_WIFI_FW			= 0x0,
+	BT_INFO_SRC_8192E_1ANT_BT_RSP				= 0x1,
+	BT_INFO_SRC_8192E_1ANT_BT_ACTIVE_SEND		= 0x2,
+	BT_INFO_SRC_8192E_1ANT_MAX
+}BT_INFO_SRC_8192E_1ANT,*PBT_INFO_SRC_8192E_1ANT;
+
+typedef enum _BT_8192E_1ANT_BT_STATUS{
+	BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE	= 0x0,
+	BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE		= 0x1,
+	BT_8192E_1ANT_BT_STATUS_INQ_PAGE				= 0x2,
+	BT_8192E_1ANT_BT_STATUS_ACL_BUSY				= 0x3,
+	BT_8192E_1ANT_BT_STATUS_SCO_BUSY				= 0x4,
+	BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY			= 0x5,
+	BT_8192E_1ANT_BT_STATUS_MAX
+}BT_8192E_1ANT_BT_STATUS,*PBT_8192E_1ANT_BT_STATUS;
+
+typedef enum _BT_8192E_1ANT_WIFI_STATUS{
+	BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE				= 0x0,
+	BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN		= 0x1,
+	BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN					= 0x2,
+	BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT				= 0x3,
+	BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE					= 0x4,
+	BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY					= 0x5,
+	BT_8192E_1ANT_WIFI_STATUS_MAX
+}BT_8192E_1ANT_WIFI_STATUS,*PBT_8192E_1ANT_WIFI_STATUS;
+
+typedef enum _BT_8192E_1ANT_COEX_ALGO{
+	BT_8192E_1ANT_COEX_ALGO_UNDEFINED			= 0x0,
+	BT_8192E_1ANT_COEX_ALGO_SCO				= 0x1,
+	BT_8192E_1ANT_COEX_ALGO_HID				= 0x2,
+	BT_8192E_1ANT_COEX_ALGO_A2DP				= 0x3,
+	BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS		= 0x4,
+	BT_8192E_1ANT_COEX_ALGO_PANEDR			= 0x5,
+	BT_8192E_1ANT_COEX_ALGO_PANHS			= 0x6,
+	BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP		= 0x7,
+	BT_8192E_1ANT_COEX_ALGO_PANEDR_HID		= 0x8,
+	BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR	= 0x9,
+	BT_8192E_1ANT_COEX_ALGO_HID_A2DP			= 0xa,
+	BT_8192E_1ANT_COEX_ALGO_MAX				= 0xb,
+}BT_8192E_1ANT_COEX_ALGO,*PBT_8192E_1ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8192E_1ANT{
+	// fw mechanism
+	u1Byte		preBtDecPwrLvl;
+	u1Byte		curBtDecPwrLvl;
+	BOOLEAN		bPreBtLnaConstrain;
+	BOOLEAN		bCurBtLnaConstrain;
+	u1Byte		bPreBtPsdMode;
+	u1Byte		bCurBtPsdMode;
+	u1Byte		preFwDacSwingLvl;
+	u1Byte		curFwDacSwingLvl;
+	BOOLEAN		bCurIgnoreWlanAct;
+	BOOLEAN		bPreIgnoreWlanAct;
+	u1Byte		prePsTdma;
+	u1Byte		curPsTdma;
+	u1Byte		psTdmaPara[5];
+	u1Byte		psTdmaDuAdjType;
+	BOOLEAN		bAutoTdmaAdjust;
+	BOOLEAN		bPrePsTdmaOn;
+	BOOLEAN		bCurPsTdmaOn;
+	BOOLEAN		bPreBtAutoReport;
+	BOOLEAN		bCurBtAutoReport;
+	u1Byte		preLps;
+	u1Byte		curLps;
+	u1Byte		preRpwm;
+	u1Byte		curRpwm;
+
+	// sw mechanism
+	BOOLEAN		bPreRfRxLpfShrink;
+	BOOLEAN		bCurRfRxLpfShrink;
+	u4Byte		btRf0x1eBackup;
+	BOOLEAN 	bPreLowPenaltyRa;
+	BOOLEAN		bCurLowPenaltyRa;
+	BOOLEAN		bPreDacSwingOn;
+	u4Byte		preDacSwingLvl;
+	BOOLEAN		bCurDacSwingOn;
+	u4Byte		curDacSwingLvl;
+	BOOLEAN		bPreAdcBackOff;
+	BOOLEAN		bCurAdcBackOff;
+	BOOLEAN 	bPreAgcTableEn;
+	BOOLEAN		bCurAgcTableEn;
+	u4Byte		preVal0x6c0;
+	u4Byte		curVal0x6c0;
+	u4Byte		preVal0x6c4;
+	u4Byte		curVal0x6c4;
+	u4Byte		preVal0x6c8;
+	u4Byte		curVal0x6c8;
+	u1Byte		preVal0x6cc;
+	u1Byte		curVal0x6cc;
+	BOOLEAN		limited_dig;
+
+	// algorithm related
+	u1Byte		preAlgorithm;
+	u1Byte		curAlgorithm;
+	u1Byte		btStatus;
+	u1Byte		wifiChnlInfo[3];
+
+	u1Byte		preSsType;
+	u1Byte		curSsType;
+
+	u4Byte		prera_mask;
+	u4Byte		curra_mask;
+
+	u1Byte		errorCondition;
+} COEX_DM_8192E_1ANT, *PCOEX_DM_8192E_1ANT;
+
+typedef struct _COEX_STA_8192E_1ANT{
+	BOOLEAN					bBtLinkExist;
+	BOOLEAN					bScoExist;
+	BOOLEAN					bA2dpExist;
+	BOOLEAN					bHidExist;
+	BOOLEAN					bPanExist;
+
+	BOOLEAN					bUnderLps;
+	BOOLEAN					bUnderIps;
+	u4Byte					highPriorityTx;
+	u4Byte					highPriorityRx;
+	u4Byte					lowPriorityTx;
+	u4Byte					lowPriorityRx;
+	u1Byte					btRssi;
+	u1Byte					preBtRssiState;
+	u1Byte					preWifiRssiState[4];
+	BOOLEAN					bC2hBtInfoReqSent;
+	u1Byte					btInfoC2h[BT_INFO_SRC_8192E_1ANT_MAX][10];
+	u4Byte					btInfoC2hCnt[BT_INFO_SRC_8192E_1ANT_MAX];
+	BOOLEAN					bC2hBtInquiryPage;
+	u1Byte					btRetryCnt;
+	u1Byte					btInfoExt;
+}COEX_STA_8192E_1ANT, *PCOEX_STA_8192E_1ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+VOID
+EXhalbtc8192e1ant_InitHwConfig(
+	IN	PBTC_COEXIST		pBtCoexist
+	);
+VOID
+EXhalbtc8192e1ant_InitCoexDm(
+	IN	PBTC_COEXIST		pBtCoexist
+	);
+VOID
+EXhalbtc8192e1ant_IpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8192e1ant_LpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8192e1ant_ScanNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8192e1ant_ConnectNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8192e1ant_MediaStatusNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	);
+VOID
+EXhalbtc8192e1ant_SpecialPacketNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	);
+VOID
+EXhalbtc8192e1ant_BtInfoNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	pu1Byte			tmpBuf,
+	IN	u1Byte			length
+	);
+VOID
+EXhalbtc8192e1ant_StackOperationNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	);
+VOID
+EXhalbtc8192e1ant_HaltNotify(
+	IN	PBTC_COEXIST			pBtCoexist
+	);
+VOID
+EXhalbtc8192e1ant_PnpNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				pnpState
+	);
+VOID
+EXhalbtc8192e1ant_Periodical(
+	IN	PBTC_COEXIST			pBtCoexist
+	);
+VOID
+EXhalbtc8192e1ant_DisplayCoexInfo(
+	IN	PBTC_COEXIST		pBtCoexist
+	);
+VOID
+EXhalbtc8192e1ant_DbgControl(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				opCode,
+	IN	u1Byte				opLen,
+	IN	pu1Byte 			pData
+	);
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c
new file mode 100644
index 0000000..44ec785
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c
@@ -0,0 +1,4242 @@
+/**************************************************************
+ * Description:
+ *
+ * This file is for RTL8192E Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ **************************************************************/
+
+/**************************************************************
+ *   include files
+ **************************************************************/
+#include "halbt_precomp.h"
+#if 1
+/**************************************************************
+ *   Global variables, these are static variables
+ **************************************************************/
+static struct coex_dm_8192e_2ant glcoex_dm_8192e_2ant;
+static struct coex_dm_8192e_2ant *coex_dm = &glcoex_dm_8192e_2ant;
+static struct coex_sta_8192e_2ant glcoex_sta_8192e_2ant;
+static struct coex_sta_8192e_2ant *coex_sta = &glcoex_sta_8192e_2ant;
+
+const char *const GLBtInfoSrc8192e2Ant[]={
+	"BT Info[wifi fw]",
+	"BT Info[bt rsp]",
+	"BT Info[bt auto report]",
+};
+
+u32 glcoex_ver_date_8192e_2ant = 20130902;
+u32 glcoex_ver_8192e_2ant = 0x34;
+
+/**************************************************************
+ *   local function proto type if needed
+ **************************************************************/
+/**************************************************************
+ *   local function start with halbtc8192e2ant_
+ **************************************************************/
+u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+	int btrssi=0;
+	u8 btrssi_state = coex_sta->pre_bt_rssi_state;
+
+	btrssi = coex_sta->bt_rssi;
+
+	if (level_num == 2) {
+		if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+				  "BT Rssi pre state=LOW\n");
+			if (btrssi >= (rssi_thresh +
+				       BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+				btrssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "BT Rssi state switch to High\n");
+			} else {
+				btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "BT Rssi state stay at Low\n");
+			}
+		} else {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+				  "BT Rssi pre state=HIGH\n");
+			if (btrssi < rssi_thresh) {
+				btrssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "BT Rssi state switch to Low\n");
+			} else {
+				btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "BT Rssi state stay at High\n");
+			}
+		}
+	} else if (level_num == 3) {
+		if (rssi_thresh > rssi_thresh1) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+				  "BT Rssi thresh error!!\n");
+			return coex_sta->pre_bt_rssi_state;
+		}
+
+		if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+				  "BT Rssi pre state=LOW\n");
+			if(btrssi >= (rssi_thresh +
+				      BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+				btrssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "BT Rssi state switch to Medium\n");
+			} else {
+				btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "BT Rssi state stay at Low\n");
+			}
+		} else if ((coex_sta->pre_bt_rssi_state ==
+			    BTC_RSSI_STATE_MEDIUM) ||
+			   (coex_sta->pre_bt_rssi_state ==
+			    BTC_RSSI_STATE_STAY_MEDIUM)) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+				  "[BTCoex], BT Rssi pre state=MEDIUM\n");
+			if (btrssi >= (rssi_thresh1 +
+				       BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+				btrssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "BT Rssi state switch to High\n");
+			} else if (btrssi < rssi_thresh) {
+				btrssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "BT Rssi state switch to Low\n");
+			} else {
+				btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "BT Rssi state stay at Medium\n");
+			}
+		} else {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+				  "BT Rssi pre state=HIGH\n");
+			if (btrssi < rssi_thresh1) {
+				btrssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "BT Rssi state switch to Medium\n");
+			} else {
+				btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "BT Rssi state stay at High\n");
+			}
+		}
+	}
+
+	coex_sta->pre_bt_rssi_state = btrssi_state;
+
+	return btrssi_state;
+}
+
+u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist * btcoexist, u8 index,
+				  u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+	int wifirssi = 0;
+	u8 wifirssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
+
+	if (level_num == 2) {
+		if ((coex_sta->pre_wifi_rssi_state[index] ==
+		     BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_wifi_rssi_state[index] ==
+		     BTC_RSSI_STATE_STAY_LOW)) {
+			if (wifirssi >= (rssi_thresh +
+					 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+				wifirssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "wifi RSSI state switch to High\n");
+			} else {
+				wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "wifi RSSI state stay at Low\n");
+ 			}
+ 		} else {
+			if (wifirssi < rssi_thresh) {
+				wifirssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "wifi RSSI state switch to Low\n");
+			} else {
+				wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "wifi RSSI state stay at High\n");
+			}
+		}
+	} else if (level_num == 3) {
+		if (rssi_thresh > rssi_thresh1) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+				  "wifi RSSI thresh error!!\n");
+			return coex_sta->pre_wifi_rssi_state[index];
+		}
+
+		if ((coex_sta->pre_wifi_rssi_state[index] ==
+		     BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_wifi_rssi_state[index] ==
+		     BTC_RSSI_STATE_STAY_LOW)) {
+			if (wifirssi >= (rssi_thresh +
+					 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+				wifirssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "wifi RSSI state switch to Medium\n");
+			} else {
+				wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "wifi RSSI state stay at Low\n");
+			}
+		} else if ((coex_sta->pre_wifi_rssi_state[index] ==
+			    BTC_RSSI_STATE_MEDIUM) ||
+			   (coex_sta->pre_wifi_rssi_state[index] ==
+			    BTC_RSSI_STATE_STAY_MEDIUM)) {
+			if (wifirssi >= (rssi_thresh1 +
+					 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+				wifirssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "wifi RSSI state switch to High\n");
+			} else if (wifirssi < rssi_thresh) {
+				wifirssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "wifi RSSI state switch to Low\n");
+			} else {
+				wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "wifi RSSI state stay at Medium\n");
+			}
+		} else {
+			if (wifirssi < rssi_thresh1) {
+				wifirssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "wifi RSSI state switch to Medium\n");
+			} else {
+				wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "wifi RSSI state stay at High\n");
+			}
+		}
+	}
+
+	coex_sta->pre_wifi_rssi_state[index] = wifirssi_state;
+
+	return wifirssi_state;
+}
+
+void halbtc8192e2ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist)
+{
+	static bool pre_bt_disabled = false;
+	static u32 bt_disable_cnt = 0;
+	bool bt_active = true, bt_disabled = false;
+
+	/* This function check if bt is disabled */
+
+	if (coex_sta->high_priority_tx == 0 &&
+	    coex_sta->high_priority_rx == 0 &&
+	    coex_sta->low_priority_tx == 0 &&
+	    coex_sta->low_priority_rx == 0)
+		bt_active = false;
+
+	if (coex_sta->high_priority_tx == 0xffff &&
+	    coex_sta->high_priority_rx == 0xffff &&
+	    coex_sta->low_priority_tx == 0xffff &&
+	    coex_sta->low_priority_rx == 0xffff)
+		bt_active = false;
+
+	if (bt_active) {
+		bt_disable_cnt = 0;
+		bt_disabled = false;
+		btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+				   &bt_disabled);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+			  "[BTCoex], BT is enabled !!\n");
+	} else {
+		bt_disable_cnt++;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+			  "[BTCoex], bt all counters=0, %d times!!\n",
+			  bt_disable_cnt);
+		if (bt_disable_cnt >= 2) {
+			bt_disabled = true;
+			btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+					   &bt_disabled);
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+				  "[BTCoex], BT is disabled !!\n");
+		}
+	}
+	if (pre_bt_disabled != bt_disabled) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+			  "[BTCoex], BT is from %s to %s!!\n",
+			  (pre_bt_disabled ? "disabled":"enabled"),
+			  (bt_disabled ? "disabled":"enabled"));
+		pre_bt_disabled = bt_disabled;
+	}
+}
+
+u32 halbtc8192e2ant_decidera_mask(struct btc_coexist *btcoexist,
+				  u8 sstype, u32 ra_masktype)
+{
+	u32 disra_mask = 0x0;
+
+	switch (ra_masktype) {
+	case 0: /* normal mode */
+		if (sstype == 2)
+			disra_mask = 0x0;	/* enable 2ss */
+		else
+			disra_mask = 0xfff00000;/* disable 2ss */
+		break;
+	case 1: /* disable cck 1/2 */
+		if(sstype == 2)
+			disra_mask = 0x00000003;/* enable 2ss */
+		else
+			disra_mask = 0xfff00003;/* disable 2ss */
+		break;
+	case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */
+		if(sstype == 2)
+			disra_mask = 0x0001f1f7;/* enable 2ss */
+		else
+			disra_mask = 0xfff1f1f7;/* disable 2ss */
+		break;
+	default:
+		break;
+	}
+
+	return disra_mask;
+}
+
+void halbtc8192e2ant_Updatera_mask(struct btc_coexist *btcoexist,
+				   bool force_exec, u32 dis_ratemask)
+{
+	coex_dm->curra_mask = dis_ratemask;
+
+	if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask))
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
+				   &coex_dm->curra_mask);
+	coex_dm->prera_mask = coex_dm->curra_mask;
+}
+
+void halbtc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist,
+					     bool force_exec, u8 type)
+{
+	bool wifi_under_bmode = false;
+
+	coex_dm->cur_arfrtype = type;
+
+	if (force_exec || (coex_dm->pre_arfrtype != coex_dm->cur_arfrtype)) {
+		switch (coex_dm->cur_arfrtype) {
+		case 0:	/* normal mode */
+			btcoexist->btc_write_4byte(btcoexist, 0x430,
+						   coex_dm->backup_arfr_cnt1);
+			btcoexist->btc_write_4byte(btcoexist, 0x434,
+						   coex_dm->backup_arfr_cnt2);
+			break;
+		case 1:
+			btcoexist->btc_get(btcoexist,
+					   BTC_GET_BL_WIFI_UNDER_B_MODE,
+					   &wifi_under_bmode);
+			if (wifi_under_bmode) {
+				btcoexist->btc_write_4byte(btcoexist, 0x430,
+							   0x0);
+				btcoexist->btc_write_4byte(btcoexist, 0x434,
+							   0x01010101);
+			} else {
+				btcoexist->btc_write_4byte(btcoexist, 0x430,
+							   0x0);
+				btcoexist->btc_write_4byte(btcoexist, 0x434,
+							   0x04030201);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	coex_dm->pre_arfrtype = coex_dm->cur_arfrtype;
+}
+
+void halbtc8192e2ant_retrylimit(struct btc_coexist *btcoexist,
+				bool force_exec, u8 type)
+{
+	coex_dm->cur_retrylimit_type = type;
+
+	if (force_exec || (coex_dm->pre_retrylimit_type !=
+			   coex_dm->cur_retrylimit_type)) {
+		switch (coex_dm->cur_retrylimit_type) {
+			case 0:	/* normal mode */
+				btcoexist->btc_write_2byte(btcoexist, 0x42a,
+						    coex_dm->backup_retrylimit);
+				break;
+			case 1:	/* retry limit=8 */
+				btcoexist->btc_write_2byte(btcoexist, 0x42a,
+							   0x0808);
+				break;
+			default:
+				break;
+		}
+	}
+
+	coex_dm->pre_retrylimit_type = coex_dm->cur_retrylimit_type;
+}
+
+void halbtc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist,
+				   bool force_exec, u8 type)
+{
+	coex_dm->cur_ampdutime_type = type;
+
+	if (force_exec || (coex_dm->pre_ampdutime_type !=
+			   coex_dm->cur_ampdutime_type)) {
+		switch (coex_dm->cur_ampdutime_type) {
+		case 0:	/* normal mode */
+			btcoexist->btc_write_1byte(btcoexist, 0x456,
+						coex_dm->backup_ampdu_maxtime);
+			break;
+		case 1:	/* AMPDU timw = 0x38 * 32us */
+			btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
+			break;
+		default:
+			break;
+		}
+	}
+
+	coex_dm->pre_ampdutime_type = coex_dm->cur_ampdutime_type;
+}
+
+void halbtc8192e2ant_limited_tx(struct btc_coexist *btcoexist,
+				bool force_exec, u8 ra_masktype, u8 arfr_type,
+				u8 retrylimit_type, u8 ampdutime_type)
+{
+	u32 disra_mask = 0x0;
+
+	coex_dm->curra_masktype = ra_masktype;
+	disra_mask = halbtc8192e2ant_decidera_mask(btcoexist,
+						   coex_dm->cur_sstype,
+						   ra_masktype);
+	halbtc8192e2ant_Updatera_mask(btcoexist, force_exec, disra_mask);
+
+	halbtc8192e2ant_autorate_fallback_retry(btcoexist, force_exec,
+						arfr_type);
+	halbtc8192e2ant_retrylimit(btcoexist, force_exec, retrylimit_type);
+	halbtc8192e2ant_ampdu_maxtime(btcoexist, force_exec, ampdutime_type);
+}
+
+void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist,
+				bool force_exec, bool rej_ap_agg_pkt,
+				bool b_bt_ctrl_agg_buf_size,
+				u8 agg_buf_size)
+{
+	bool reject_rx_agg = rej_ap_agg_pkt;
+	bool bt_ctrl_rx_agg_size = b_bt_ctrl_agg_buf_size;
+	u8 rx_agg_size = agg_buf_size;
+
+	/*********************************************
+	 *	Rx Aggregation related setting
+	 *********************************************/
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+			   &reject_rx_agg);
+	/* decide BT control aggregation buf size or not */
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
+			   &bt_ctrl_rx_agg_size);
+	/* aggregation buf size, only work
+	 * when BT control Rx aggregation size. */
+	btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size);
+	/* real update aggregation setting */
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+
+
+}
+
+void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+	u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+	u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
+
+	reg_hp_txrx = 0x770;
+	reg_lp_txrx = 0x774;
+
+	u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+	reg_hp_tx = u32tmp & MASKLWORD;
+	reg_hp_rx = (u32tmp & MASKHWORD)>>16;
+
+	u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+	reg_lp_tx = u32tmp & MASKLWORD;
+	reg_lp_rx = (u32tmp & MASKHWORD)>>16;
+
+	coex_sta->high_priority_tx = reg_hp_tx;
+	coex_sta->high_priority_rx = reg_hp_rx;
+	coex_sta->low_priority_tx = reg_lp_tx;
+	coex_sta->low_priority_rx = reg_lp_rx;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+		  "[BTCoex] High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+		  reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+		  "[BTCoex] Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+		  reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+
+	/* reset counter */
+	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
+{
+	u8 h2c_parameter[1] ={0};
+
+	coex_sta->c2h_bt_info_req_sent = true;
+
+	h2c_parameter[0] |= BIT0;	/* trigger */
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n",
+		  h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
+}
+
+bool halbtc8192e2ant_iswifi_status_changed(struct btc_coexist *btcoexist)
+{
+	static bool pre_wifi_busy = false;
+	static bool pre_under_4way = false, pre_bt_hson = false;
+	bool wifi_busy = false, under_4way = false, bt_hson = false;
+	bool wifi_connected = false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+			   &wifi_connected);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+			   &under_4way);
+
+	if (wifi_connected) {
+		if (wifi_busy != pre_wifi_busy) {
+			pre_wifi_busy = wifi_busy;
+			return true;
+		}
+		if (under_4way != pre_under_4way) {
+			pre_under_4way = under_4way;
+			return true;
+		}
+		if (bt_hson != pre_bt_hson) {
+			pre_bt_hson = bt_hson;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist)
+{
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+	bool bt_hson = false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+
+	bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+	bt_link_info->sco_exist = coex_sta->sco_exist;
+	bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+	bt_link_info->pan_exist = coex_sta->pan_exist;
+	bt_link_info->hid_exist = coex_sta->hid_exist;
+
+	/* work around for HS mode. */
+	if (bt_hson) {
+		bt_link_info->pan_exist = true;
+		bt_link_info->bt_link_exist = true;
+	}
+
+	/* check if Sco only */
+	if (bt_link_info->sco_exist &&
+	    !bt_link_info->a2dp_exist &&
+	    !bt_link_info->pan_exist &&
+	    !bt_link_info->hid_exist)
+		bt_link_info->sco_only = true;
+	else
+		bt_link_info->sco_only = false;
+
+	/* check if A2dp only */
+	if (!bt_link_info->sco_exist &&
+	    bt_link_info->a2dp_exist &&
+	    !bt_link_info->pan_exist &&
+	    !bt_link_info->hid_exist)
+		bt_link_info->a2dp_only = true;
+	else
+		bt_link_info->a2dp_only = false;
+
+	/* check if Pan only */
+	if (!bt_link_info->sco_exist &&
+	    !bt_link_info->a2dp_exist &&
+	    bt_link_info->pan_exist &&
+	    !bt_link_info->hid_exist)
+		bt_link_info->pan_only = true;
+	else
+		bt_link_info->pan_only = false;
+
+	/* check if Hid only */
+	if (!bt_link_info->sco_exist &&
+	    !bt_link_info->a2dp_exist &&
+	    !bt_link_info->pan_exist &&
+	    bt_link_info->hid_exist)
+		bt_link_info->hid_only = true;
+	else
+		bt_link_info->hid_only = false;
+}
+
+u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+	struct btc_stack_info *stack_info = &btcoexist->stack_info;
+	bool bt_hson=false;
+	u8 algorithm = BT_8192E_2ANT_COEX_ALGO_UNDEFINED;
+	u8 numOfDiffProfile = 0;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+
+	if (!bt_link_info->bt_link_exist) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "No BT link exists!!!\n");
+		return algorithm;
+	}
+
+	if (bt_link_info->sco_exist)
+		numOfDiffProfile++;
+	if (bt_link_info->hid_exist)
+		numOfDiffProfile++;
+	if (bt_link_info->pan_exist)
+		numOfDiffProfile++;
+	if (bt_link_info->a2dp_exist)
+		numOfDiffProfile++;
+
+	if (numOfDiffProfile == 1) {
+		if (bt_link_info->sco_exist) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "SCO only\n");
+			algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+		} else {
+			if (bt_link_info->hid_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "HID only\n");
+				algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
+			} else if (bt_link_info->a2dp_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "A2DP only\n");
+				algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
+			} else if (bt_link_info->pan_exist) {
+				if (bt_hson) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "PAN(HS) only\n");
+					algorithm =
+						BT_8192E_2ANT_COEX_ALGO_PANHS;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "PAN(EDR) only\n");
+					algorithm =
+						BT_8192E_2ANT_COEX_ALGO_PANEDR;
+				}
+			}
+		}
+	} else if (numOfDiffProfile == 2) {
+		if (bt_link_info->sco_exist) {
+			if (bt_link_info->hid_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "SCO + HID\n");
+				algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+			} else if (bt_link_info->a2dp_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "SCO + A2DP ==> SCO\n");
+				algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+			} else if (bt_link_info->pan_exist) {
+				if (bt_hson) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "SCO + PAN(HS)\n");
+					algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "SCO + PAN(EDR)\n");
+					algorithm =
+						BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
+				}
+			}
+		} else {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->a2dp_exist) {
+				if (stack_info->num_of_hid >= 2) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "HID*2 + A2DP\n");
+					algorithm =
+					BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "HID + A2DP\n");
+					algorithm =
+					    BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
+				}
+			} else if (bt_link_info->hid_exist &&
+				   bt_link_info->pan_exist) {
+				if (bt_hson) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "HID + PAN(HS)\n");
+					algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "HID + PAN(EDR)\n");
+					algorithm =
+					    BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			} else if (bt_link_info->pan_exist &&
+				   bt_link_info->a2dp_exist) {
+				if (bt_hson) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "A2DP + PAN(HS)\n");
+					algorithm =
+					    BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "A2DP + PAN(EDR)\n");
+					algorithm =
+					    BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
+				}
+			}
+		}
+	} else if (numOfDiffProfile == 3) {
+		if (bt_link_info->sco_exist) {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->a2dp_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "SCO + HID + A2DP ==> HID\n");
+				algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+			} else if (bt_link_info->hid_exist &&
+				   bt_link_info->pan_exist) {
+				if (bt_hson) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "SCO + HID + PAN(HS)\n");
+					algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "SCO + HID + PAN(EDR)\n");
+					algorithm =
+						BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
+				}
+			} else if (bt_link_info->pan_exist &&
+				   bt_link_info->a2dp_exist) {
+				if (bt_hson) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "SCO + A2DP + PAN(HS)\n");
+					algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "SCO + A2DP + PAN(EDR)\n");
+					algorithm =
+					    BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		} else {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->pan_exist &&
+			    bt_link_info->a2dp_exist) {
+				if (bt_hson) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "HID + A2DP + PAN(HS)\n");
+					algorithm =
+					    BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "HID + A2DP + PAN(EDR)\n");
+					algorithm =
+					BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+				}
+			}
+		}
+	} else if (numOfDiffProfile >= 3) {
+		if (bt_link_info->sco_exist) {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->pan_exist &&
+			    bt_link_info->a2dp_exist) {
+				if (bt_hson) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "ErrorSCO+HID+A2DP+PAN(HS)\n");
+
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "SCO+HID+A2DP+PAN(EDR)\n");
+					algorithm =
+					    BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+	}
+
+	return algorithm;
+}
+
+void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
+					  u8 dac_swinglvl)
+{
+	u8 h2c_parameter[1] ={0};
+
+	/* There are several type of dacswing
+	 * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
+	h2c_parameter[0] = dac_swinglvl;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swinglvl);
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
+				     u8 dec_btpwr_lvl)
+{
+	u8 h2c_parameter[1] ={0};
+
+	h2c_parameter[0] = dec_btpwr_lvl;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex] decrease Bt Power level = %d, FW write 0x62=0x%x\n",
+		  dec_btpwr_lvl, h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
+			       bool force_exec, u8 dec_btpwr_lvl)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s Dec BT power level = %d\n",
+		  (force_exec? "force to":""), dec_btpwr_lvl);
+	coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+			  coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+	}
+	halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+	coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
+				       bool enable_autoreport)
+{
+	u8 h2c_parameter[1] ={0};
+
+	h2c_parameter[0] = 0;
+
+	if (enable_autoreport)
+		h2c_parameter[0] |= BIT0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+		  (enable_autoreport? "Enabled!!":"Disabled!!"),
+		  h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
+				   bool force_exec, bool enable_autoreport)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s BT Auto report = %s\n",
+		  (force_exec? "force to":""),
+		  ((enable_autoreport)? "Enabled":"Disabled"));
+	coex_dm->cur_bt_auto_report = enable_autoreport;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+			  coex_dm->pre_bt_auto_report,
+			  coex_dm->cur_bt_auto_report);
+
+		if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+			return;
+	}
+	halbtc8192e2ant_set_bt_autoreport(btcoexist,
+					  coex_dm->cur_bt_auto_report);
+
+	coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
+				     bool force_exec, u8 fw_dac_swinglvl)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s set FW Dac Swing level = %d\n",
+		  (force_exec? "force to":""), fw_dac_swinglvl);
+	coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+			  coex_dm->pre_fw_dac_swing_lvl,
+			  coex_dm->cur_fw_dac_swing_lvl);
+
+		if (coex_dm->pre_fw_dac_swing_lvl ==
+		    coex_dm->cur_fw_dac_swing_lvl)
+			return;
+	}
+
+	halbtc8192e2ant_setfw_dac_swinglevel(btcoexist,
+					     coex_dm->cur_fw_dac_swing_lvl);
+
+	coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void halbtc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+					     bool rx_rf_shrink_on)
+{
+	if (rx_rf_shrink_on) {
+		/* Shrink RF Rx LPF corner */
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], Shrink RF Rx LPF corner!!\n");
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+					  0xfffff, 0xffffc);
+	} else {
+		/* Resume RF Rx LPF corner
+		 * After initialized, we can use coex_dm->btRf0x1eBackup */
+		if (btcoexist->initilized) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+				  "[BTCoex], Resume RF Rx LPF corner!!\n");
+			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+						  0xfffff,
+						  coex_dm->bt_rf0x1e_backup);
+		}
+	}
+}
+
+void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
+			       bool force_exec, bool rx_rf_shrink_on)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s turn Rx RF Shrink = %s\n",
+		  (force_exec? "force to":""), ((rx_rf_shrink_on)? "ON":"OFF"));
+	coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
+			  coex_dm->pre_rf_rx_lpf_shrink,
+			  coex_dm->cur_rf_rx_lpf_shrink);
+
+		if (coex_dm->pre_rf_rx_lpf_shrink ==
+		    coex_dm->cur_rf_rx_lpf_shrink)
+			return;
+	}
+	halbtc8192e2ant_set_sw_rf_rx_lpf_corner(btcoexist,
+						coex_dm->cur_rf_rx_lpf_shrink);
+
+	coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void halbtc8192e2ant_set_sw_penalty_tx_rateadaptive(
+						struct btc_coexist *btcoexist,
+						bool low_penalty_ra)
+{
+	u8 h2c_parameter[6] ={0};
+
+	h2c_parameter[0] = 0x6;	/* opCode, 0x6= Retry_Penalty */
+
+	if (low_penalty_ra) {
+		h2c_parameter[1] |= BIT0;
+		/* normal rate except MCS7/6/5, OFDM54/48/36 */
+		h2c_parameter[2] = 0x00;
+		h2c_parameter[3] = 0xf7;  /* MCS7 or OFDM54 */
+		h2c_parameter[4] = 0xf8;  /* MCS6 or OFDM48 */
+		h2c_parameter[5] = 0xf9;  /* MCS5 or OFDM36 */
+	}
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], set WiFi Low-Penalty Retry: %s",
+		  (low_penalty_ra? "ON!!":"OFF!!"));
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+void halbtc8192e2ant_low_penalty_ra(struct btc_coexist *btcoexist,
+				    bool force_exec, bool low_penalty_ra)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s turn LowPenaltyRA = %s\n",
+		  (force_exec? "force to":""), ((low_penalty_ra)? "ON":"OFF"));
+	coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex] bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+			  coex_dm->pre_low_penalty_ra,
+			  coex_dm->cur_low_penalty_ra);
+
+		if (coex_dm->pre_low_penalty_ra ==
+		    coex_dm->cur_low_penalty_ra)
+			return;
+	}
+	halbtc8192e2ant_set_sw_penalty_tx_rateadaptive(btcoexist,
+						coex_dm->cur_low_penalty_ra);
+
+	coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
+				      u32 level)
+{
+	u8 val = (u8)level;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+	btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+void halbtc8192e2ant_setsw_fulltime_dacswing(struct btc_coexist *btcoexist,
+					     bool sw_dac_swingon,
+					     u32 sw_dac_swinglvl)
+{
+	if (sw_dac_swingon)
+		halbtc8192e2ant_set_dac_swingreg(btcoexist, sw_dac_swinglvl);
+	else
+		halbtc8192e2ant_set_dac_swingreg(btcoexist, 0x18);
+}
+
+
+void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
+			      bool force_exec, bool dac_swingon,
+			      u32 dac_swinglvl)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s turn DacSwing=%s, dac_swinglvl=0x%x\n",
+		  (force_exec? "force to":""),
+		  ((dac_swingon)? "ON":"OFF"), dac_swinglvl);
+	coex_dm->cur_dac_swing_on = dac_swingon;
+	coex_dm->cur_dac_swing_lvl = dac_swinglvl;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, ",
+			  coex_dm->pre_dac_swing_on,
+			  coex_dm->pre_dac_swing_lvl);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+			  coex_dm->cur_dac_swing_on,
+			  coex_dm->cur_dac_swing_lvl);
+
+		if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+		    (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+			return;
+	}
+	mdelay(30);
+	halbtc8192e2ant_setsw_fulltime_dacswing(btcoexist, dac_swingon,
+						dac_swinglvl);
+
+	coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+	coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void halbtc8192e2ant_set_adc_backoff(struct btc_coexist *btcoexist,
+				     bool adc_backoff)
+{
+	if(adc_backoff)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], BB BackOff Level On!\n");
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x3);
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], BB BackOff Level Off!\n");
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x1);
+	}
+}
+
+void halbtc8192e2ant_adc_backoff(struct btc_coexist *btcoexist,
+				 bool force_exec, bool adc_backoff)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s turn AdcBackOff = %s\n",
+		  (force_exec? "force to":""), ((adc_backoff)? "ON":"OFF"));
+	coex_dm->cur_adc_back_off = adc_backoff;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+			  coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off);
+
+		if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
+			return;
+	}
+	halbtc8192e2ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_back_off);
+
+	coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off;
+}
+
+void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
+				   bool agc_table_en)
+{
+
+	/* BB AGC Gain Table */
+	if (agc_table_en) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], BB Agc Table On!\n");
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x071D0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
+	} else {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], BB Agc Table Off!\n");
+	 	btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
+	}
+}
+
+void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
+			      bool force_exec, bool agc_table_en)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s %s Agc Table\n",
+		  (force_exec? "force to":""),
+		  ((agc_table_en)? "Enable":"Disable"));
+	coex_dm->cur_agc_table_en = agc_table_en;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+			  coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+
+		if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+			return;
+	}
+	halbtc8192e2ant_set_agc_table(btcoexist, agc_table_en);
+
+	coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
+				    u32 val0x6c0, u32 val0x6c4,
+				    u32 val0x6c8, u8 val0x6cc)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+	btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist, bool force_exec,
+				u32 val0x6c0, u32 val0x6c4,
+				u32 val0x6c8, u8 val0x6cc)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s write Coex Table 0x6c0=0x%x, ",
+		  (force_exec? "force to":""), val0x6c0);
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+		  val0x6c4, val0x6c8, val0x6cc);
+	coex_dm->cur_val0x6c0 = val0x6c0;
+	coex_dm->cur_val0x6c4 = val0x6c4;
+	coex_dm->cur_val0x6c8 = val0x6c8;
+	coex_dm->cur_val0x6cc = val0x6cc;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, ",
+			  coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+			  coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, \n",
+			  coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+			  coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+
+		if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+		    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+		    (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+		    (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+			return;
+	}
+	halbtc8192e2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+				       val0x6c8, val0x6cc);
+
+	coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+	coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+	coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+	coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void halbtc8192e2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+					  bool force_exec, u8 type)
+{
+	switch (type) {
+	case 0:
+		halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
+					   0x5a5a5a5a, 0xffffff, 0x3);
+		break;
+	case 1:
+		halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+					   0x5a5a5a5a, 0xffffff, 0x3);
+		break;
+	case 2:
+		halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
+					   0x5ffb5ffb, 0xffffff, 0x3);
+		break;
+	case 3:
+		halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
+					   0x5fdb5fdb, 0xffffff, 0x3);
+		break;
+	case 4:
+		halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
+					   0x5ffb5ffb, 0xffffff, 0x3);
+		break;
+	default:
+		break;
+	}
+}
+
+void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
+					   bool enable)
+{
+	u8 h2c_parameter[1] ={0};
+
+	if (enable)
+		h2c_parameter[0] |= BIT0; /* function enable */
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+		  h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
+				   bool force_exec, bool enable)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s turn Ignore WlanAct %s\n",
+		  (force_exec? "force to":""), (enable? "ON":"OFF"));
+	coex_dm->cur_ignore_wlan_act = enable;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], bPreIgnoreWlanAct = %d ",
+			  coex_dm->pre_ignore_wlan_act);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "bCurIgnoreWlanAct = %d!!\n",
+			  coex_dm->cur_ignore_wlan_act);
+
+		if (coex_dm->pre_ignore_wlan_act ==
+		    coex_dm->cur_ignore_wlan_act)
+			return;
+	}
+	halbtc8192e2ant_set_fw_ignore_wlanact(btcoexist, enable);
+
+	coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
+				 u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+	u8 h2c_parameter[5] ={0};
+
+	h2c_parameter[0] = byte1;
+	h2c_parameter[1] = byte2;
+	h2c_parameter[2] = byte3;
+	h2c_parameter[3] = byte4;
+	h2c_parameter[4] = byte5;
+
+	coex_dm->ps_tdma_para[0] = byte1;
+	coex_dm->ps_tdma_para[1] = byte2;
+	coex_dm->ps_tdma_para[2] = byte3;
+	coex_dm->ps_tdma_para[3] = byte4;
+	coex_dm->ps_tdma_para[4] = byte5;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+		  h2c_parameter[0],
+		  h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+		  h2c_parameter[3] << 8 | h2c_parameter[4]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void halbtc8192e2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+				   bool shrink_rx_lpf, bool low_penalty_ra,
+				   bool limited_dig, bool btlan_constrain)
+{
+	halbtc8192e2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+}
+
+void halbtc8192e2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+				   bool agc_table_shift, bool adc_backoff,
+				   bool sw_dac_swing, u32 dac_swinglvl)
+{
+	halbtc8192e2ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift);
+	halbtc8192e2ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+				 dac_swinglvl);
+}
+
+void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
+			     bool force_exec, bool turn_on, u8 type)
+{
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+		  (force_exec? "force to":""), (turn_on? "ON":"OFF"), type);
+	coex_dm->cur_ps_tdma_on = turn_on;
+	coex_dm->cur_ps_tdma = type;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+			  coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+			  coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+
+		if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+		    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+			return;
+	}
+	if (turn_on) {
+		switch (type) {
+		case 1:
+		default:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+						    0x1a, 0xe1, 0x90);
+			break;
+		case 2:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+						    0x12, 0xe1, 0x90);
+			break;
+		case 3:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+						    0x3, 0xf1, 0x90);
+			break;
+		case 4:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10,
+						    0x3, 0xf1, 0x90);
+			break;
+		case 5:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+						    0x1a, 0x60, 0x90);
+			break;
+		case 6:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+						     0x12, 0x60, 0x90);
+			break;
+		case 7:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+						    0x3, 0x70, 0x90);
+			break;
+		case 8:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xa3, 0x10,
+						    0x3, 0x70, 0x90);
+			break;
+		case 9:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+						    0x1a, 0xe1, 0x10);
+			break;
+		case 10:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+						    0x12, 0xe1, 0x10);
+			break;
+		case 11:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+						    0x3, 0xf1, 0x10);
+			break;
+		case 12:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10,
+						    0x3, 0xf1, 0x10);
+			break;
+		case 13:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+						    0x1a, 0xe0, 0x10);
+			break;
+		case 14:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+						    0x12, 0xe0, 0x10);
+			break;
+		case 15:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+						    0x3, 0xf0, 0x10);
+			break;
+		case 16:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+						    0x3, 0xf0, 0x10);
+			break;
+		case 17:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0x61, 0x20,
+						    0x03, 0x10, 0x10);
+			break;
+		case 18:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x5,
+						    0x5, 0xe1, 0x90);
+			break;
+		case 19:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25,
+						    0x25, 0xe1, 0x90);
+			break;
+		case 20:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25,
+						    0x25, 0x60, 0x90);
+			break;
+		case 21:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x15,
+						    0x03, 0x70, 0x90);
+			break;
+		case 71:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+						    0x1a, 0xe1, 0x90);
+			break;
+		}
+	} else {
+		/* disable PS tdma */
+		switch (type) {
+		default:
+		case 0:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0,
+						    0x0, 0x0);
+			btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4);
+			break;
+		case 1:
+			halbtc8192e2ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0,
+						    0x8, 0x0);
+			mdelay(5);
+			btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+			break;
+		}
+	}
+
+	/* update pre state */
+	coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+	coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, u8 sstype)
+{
+	u8 mimops = BTC_MIMO_PS_DYNAMIC;
+	u32 disra_mask = 0x0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+		  "[BTCoex], REAL set SS Type = %d\n", sstype);
+
+	disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
+						   coex_dm->curra_masktype);
+	halbtc8192e2ant_Updatera_mask(btcoexist, FORCE_EXEC, disra_mask);
+
+	if (sstype == 1) {
+		halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+		/* switch ofdm path */
+		btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x11);
+		btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x1);
+		btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81111111);
+		/* switch cck patch */
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x1);
+		btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x81);
+		mimops=BTC_MIMO_PS_STATIC;
+	} else if (sstype == 2) {
+		halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
+		btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x33);
+		btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x3);
+		btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81121313);
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x0);
+		btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x41);
+		mimops=BTC_MIMO_PS_DYNAMIC;
+	}
+	/* set rx 1ss or 2ss */
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimops);
+}
+
+void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
+				   bool force_exec, u8 new_sstype)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+		  "[BTCoex], %s Switch SS Type = %d\n",
+		  (force_exec? "force to":""), new_sstype);
+	coex_dm->cur_sstype = new_sstype;
+
+	if (!force_exec) {
+		if (coex_dm->pre_sstype == coex_dm->cur_sstype)
+			return;
+	}
+	halbtc8192e2ant_set_switch_sstype(btcoexist, coex_dm->cur_sstype);
+
+	coex_dm->pre_sstype = coex_dm->cur_sstype;
+}
+
+void halbtc8192e2ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+	/* fw all off */
+	halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+	halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+	/* sw all off */
+	halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+	halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+	/* hw all off */
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+void halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+	/* force to reset coex mechanism */
+
+	halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, FORCE_EXEC, 6);
+	halbtc8192e2ant_dec_btpwr(btcoexist, FORCE_EXEC, 0);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+	halbtc8192e2ant_switch_sstype(btcoexist, FORCE_EXEC, 2);
+
+	halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+	halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+void halbtc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+	bool low_pwr_disable = true;
+
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+			   &low_pwr_disable);
+
+	halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+	halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+	halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+	halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+	halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
+{
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+	bool common = false, wifi_connected = false, wifi_busy = false;
+	bool bt_hson = false, low_pwr_disable = false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+			   &wifi_connected);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+	if (bt_link_info->sco_exist || bt_link_info->hid_exist)
+		halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 0, 0, 0);
+	else
+		halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+
+	if (!wifi_connected) {
+		low_pwr_disable = false;
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+				   &low_pwr_disable);
+
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], Wifi non-connected idle!!\n");
+
+		if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+		     coex_dm->bt_status) ||
+		    (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
+		     coex_dm->bt_status)) {
+			halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+						      2);
+			halbtc8192e2ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 1);
+			halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+						0);
+		} else {
+			halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+						      1);
+			halbtc8192e2ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 0);
+			halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+						1);
+		}
+
+		halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+ 		halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false,
+					      false);
+		halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false,
+					      0x18);
+
+		common = true;
+	} else {
+		if (BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+		    coex_dm->bt_status) {
+			low_pwr_disable = false;
+			btcoexist->btc_set(btcoexist,
+					   BTC_SET_ACT_DISABLE_LOW_POWER,
+					   &low_pwr_disable);
+
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Wifi connected + BT non connected-idle!!\n");
+
+			halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+						      2);
+			halbtc8192e2ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 1);
+			halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+						0);
+			halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC,
+							6);
+			halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+
+			common = true;
+		} else if (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
+			   coex_dm->bt_status) {
+			low_pwr_disable = true;
+			btcoexist->btc_set(btcoexist,
+					   BTC_SET_ACT_DISABLE_LOW_POWER,
+					   &low_pwr_disable);
+
+			if (bt_hson)
+				return false;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Wifi connected + BT connected-idle!!\n");
+
+			halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+						      2);
+			halbtc8192e2ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 1);
+			halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+						0);
+			halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC,
+							6);
+			halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+
+			common = true;
+		} else {
+			low_pwr_disable = true;
+			btcoexist->btc_set(btcoexist,
+					   BTC_SET_ACT_DISABLE_LOW_POWER,
+					   &low_pwr_disable);
+
+			if (wifi_busy) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "Wifi Connected-Busy + BT Busy!!\n");
+				common = false;
+			} else {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "Wifi Connected-Idle + BT Busy!!\n");
+
+				halbtc8192e2ant_switch_sstype(btcoexist,
+							      NORMAL_EXEC, 1);
+				halbtc8192e2ant_coex_table_with_type(btcoexist,
+								    NORMAL_EXEC,
+								    2);
+				halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+							true, 21);
+				halbtc8192e2ant_fw_dac_swinglvl(btcoexist,
+								NORMAL_EXEC, 6);
+				halbtc8192e2ant_dec_btpwr(btcoexist,
+							  NORMAL_EXEC, 0);
+				halbtc8192e2ant_sw_mechanism1(btcoexist, false,
+							      false, false,
+							      false);
+				halbtc8192e2ant_sw_mechanism2(btcoexist, false,
+							      false, false,
+							      0x18);
+				common = true;
+			}
+		}
+	}
+	return common;
+}
+
+void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+					  bool sco_hid, bool tx_pause,
+					  u8 max_interval)
+{
+	static int up, dn, m, n, wait_cnt;
+	/* 0: no change, +1: increase WiFi duration,
+	 * -1: decrease WiFi duration */
+	int result;
+	u8 retry_cnt = 0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], TdmaDurationAdjust()\n");
+
+	if (!coex_dm->auto_tdma_adjust) {
+		coex_dm->auto_tdma_adjust = true;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], first run TdmaDurationAdjust()!!\n");
+		if (sco_hid) {
+			if (tx_pause) {
+				if (max_interval == 1) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 13);
+					coex_dm->ps_tdma_du_adj_type = 13;
+				} else if (max_interval == 2) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+					coex_dm->ps_tdma_du_adj_type = 14;
+				} else if (max_interval == 3) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				}
+			} else {
+				if (max_interval == 1) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 9);
+					coex_dm->ps_tdma_du_adj_type = 9;
+				} else if (max_interval == 2) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+					coex_dm->ps_tdma_du_adj_type = 10;
+				} else if (max_interval == 3) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				}
+			}
+		} else {
+			if (tx_pause) {
+				if (max_interval == 1) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 5);
+					coex_dm->ps_tdma_du_adj_type = 5;
+				} else if (max_interval == 2) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+					coex_dm->ps_tdma_du_adj_type = 6;
+				} else if (max_interval == 3) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				}
+			} else {
+				if (max_interval == 1) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 1);
+					coex_dm->ps_tdma_du_adj_type = 1;
+				} else if (max_interval == 2) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+					coex_dm->ps_tdma_du_adj_type = 2;
+				} else if (max_interval == 3) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				}
+			}
+		}
+
+		up = 0;
+		dn = 0;
+		m = 1;
+		n= 3;
+		result = 0;
+		wait_cnt = 0;
+	} else {
+		/* accquire the BT TRx retry count from BT_Info byte2 */
+		retry_cnt = coex_sta->bt_retry_cnt;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], retry_cnt = %d\n", retry_cnt);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
+			  up, dn, m, n, wait_cnt);
+		result = 0;
+		wait_cnt++;
+		/* no retry in the last 2-second duration */
+		if (retry_cnt == 0) {
+			up++;
+			dn--;
+
+			if (dn <= 0)
+				dn = 0;
+
+			if (up >= n) {
+				wait_cnt = 0;
+				n = 3;
+				up = 0;
+				dn = 0;
+				result = 1;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex]Increase wifi duration!!\n");
+			}
+		} else if (retry_cnt <= 3) {
+			up--;
+			dn++;
+
+			if (up <= 0)
+				up = 0;
+
+			if (dn == 2) {
+				if (wait_cnt <= 2)
+					m++;
+				else
+					m = 1;
+
+				if (m >= 20)
+					m = 20;
+
+				n = 3 * m;
+				up = 0;
+				dn = 0;
+				wait_cnt = 0;
+				result = -1;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "Reduce wifi duration for retry<3\n");
+			}
+		} else {
+			if (wait_cnt == 1)
+				m++;
+			else
+				m = 1;
+
+			if (m >= 20)
+				m = 20;
+
+			n = 3*m;
+			up = 0;
+			dn = 0;
+			wait_cnt = 0;
+			result = -1;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+				  "Decrease wifi duration for retryCounter>3!!\n");
+		}
+
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], max Interval = %d\n", max_interval);
+		if (max_interval == 1) {
+			if (tx_pause) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 1\n");
+
+				if (coex_dm->cur_ps_tdma == 71) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 5);
+					coex_dm->ps_tdma_du_adj_type = 5;
+				} else if (coex_dm->cur_ps_tdma == 1) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 5);
+					coex_dm->ps_tdma_du_adj_type = 5;
+				} else if (coex_dm->cur_ps_tdma == 2) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+					coex_dm->ps_tdma_du_adj_type = 6;
+				} else if (coex_dm->cur_ps_tdma == 3) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else if (coex_dm->cur_ps_tdma == 4) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 8);
+					coex_dm->ps_tdma_du_adj_type = 8;
+				}
+				if (coex_dm->cur_ps_tdma == 9) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 13);
+					coex_dm->ps_tdma_du_adj_type = 13;
+				} else if (coex_dm->cur_ps_tdma == 10) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+					coex_dm->ps_tdma_du_adj_type = 14;
+				} else if (coex_dm->cur_ps_tdma == 11) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else if (coex_dm->cur_ps_tdma == 12) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 16);
+					coex_dm->ps_tdma_du_adj_type = 16;
+				}
+
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 5) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 6);
+						coex_dm->ps_tdma_du_adj_type =
+									     6;
+					} else if (coex_dm->cur_ps_tdma == 6) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									     7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 8);
+						coex_dm->ps_tdma_du_adj_type =
+									     8;
+					} else if (coex_dm->cur_ps_tdma == 13) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 14);
+						coex_dm->ps_tdma_du_adj_type =
+									     14;
+					} else if (coex_dm->cur_ps_tdma == 14) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 15) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 16);
+						coex_dm->ps_tdma_du_adj_type =
+									     16;
+					}
+				} else if (result == 1) {
+					if (coex_dm->cur_ps_tdma == 8) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									     7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 6);
+						coex_dm->ps_tdma_du_adj_type =
+									     6;
+					} else if (coex_dm->cur_ps_tdma == 6) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 5);
+						coex_dm->ps_tdma_du_adj_type =
+									     5;
+					} else if (coex_dm->cur_ps_tdma == 16) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 15) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 14);
+						coex_dm->ps_tdma_du_adj_type =
+									     14;
+					} else if (coex_dm->cur_ps_tdma == 14) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 13);
+						coex_dm->ps_tdma_du_adj_type =
+									     13;
+					}
+				}
+			} else {
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 0\n");
+				if (coex_dm->cur_ps_tdma == 5) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 71);
+					coex_dm->ps_tdma_du_adj_type = 71;
+				} else if (coex_dm->cur_ps_tdma == 6) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+					coex_dm->ps_tdma_du_adj_type = 2;
+				} else if (coex_dm->cur_ps_tdma == 7) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else if (coex_dm->cur_ps_tdma == 8) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 4);
+					coex_dm->ps_tdma_du_adj_type = 4;
+				}
+				if (coex_dm->cur_ps_tdma == 13) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 9);
+					coex_dm->ps_tdma_du_adj_type = 9;
+				} else if (coex_dm->cur_ps_tdma == 14) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+					coex_dm->ps_tdma_du_adj_type = 10;
+				} else if (coex_dm->cur_ps_tdma == 15) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else if (coex_dm->cur_ps_tdma == 16) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 12);
+					coex_dm->ps_tdma_du_adj_type = 12;
+				}
+
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 71) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 1);
+						coex_dm->ps_tdma_du_adj_type =
+									     1;
+					} else if (coex_dm->cur_ps_tdma == 1) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 2);
+						coex_dm->ps_tdma_du_adj_type =
+									     2;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									     3;
+					} else if (coex_dm->cur_ps_tdma == 3) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 4);
+						coex_dm->ps_tdma_du_adj_type =
+									     4;
+					} else if (coex_dm->cur_ps_tdma == 9) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 10);
+						coex_dm->ps_tdma_du_adj_type =
+									     10;
+					} else if (coex_dm->cur_ps_tdma == 10) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 11) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 12);
+						coex_dm->ps_tdma_du_adj_type =
+									     12;
+					}
+				} else if (result == 1) {
+ 					if (coex_dm->cur_ps_tdma == 4) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									     3;
+					} else if (coex_dm->cur_ps_tdma == 3) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 2);
+						coex_dm->ps_tdma_du_adj_type =
+									     2;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 1);
+						coex_dm->ps_tdma_du_adj_type =
+									     1;
+					} else if (coex_dm->cur_ps_tdma == 1) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 71);
+						coex_dm->ps_tdma_du_adj_type =
+									     71;
+					} else if (coex_dm->cur_ps_tdma == 12) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 11) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 10);
+						coex_dm->ps_tdma_du_adj_type =
+									     10;
+					} else if (coex_dm->cur_ps_tdma == 10) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 9);
+						coex_dm->ps_tdma_du_adj_type =
+									     9;
+					}
+				}
+			}
+		} else if (max_interval == 2) {
+			if (tx_pause) {
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 1\n");
+				if (coex_dm->cur_ps_tdma == 1) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+					coex_dm->ps_tdma_du_adj_type = 6;
+				} else if (coex_dm->cur_ps_tdma == 2) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+					coex_dm->ps_tdma_du_adj_type = 6;
+				} else if (coex_dm->cur_ps_tdma == 3) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else if (coex_dm->cur_ps_tdma == 4) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 8);
+					coex_dm->ps_tdma_du_adj_type = 8;
+				}
+				if (coex_dm->cur_ps_tdma == 9) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+					coex_dm->ps_tdma_du_adj_type = 14;
+				} else if (coex_dm->cur_ps_tdma == 10) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+					coex_dm->ps_tdma_du_adj_type = 14;
+				} else if (coex_dm->cur_ps_tdma == 11) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else if (coex_dm->cur_ps_tdma == 12) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 16);
+					coex_dm->ps_tdma_du_adj_type = 16;
+				}
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 5) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 6);
+						coex_dm->ps_tdma_du_adj_type =
+									     6;
+					} else if (coex_dm->cur_ps_tdma == 6) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									     7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 8);
+						coex_dm->ps_tdma_du_adj_type =
+									     8;
+					} else if (coex_dm->cur_ps_tdma == 13) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 14);
+						coex_dm->ps_tdma_du_adj_type =
+									     14;
+					} else if (coex_dm->cur_ps_tdma == 14) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 15) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 16);
+						coex_dm->ps_tdma_du_adj_type =
+									     16;
+					}
+				} else if (result == 1) {
+					if (coex_dm->cur_ps_tdma == 8) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									     7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 6);
+						coex_dm->ps_tdma_du_adj_type =
+									     6;
+					} else if (coex_dm->cur_ps_tdma == 6) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 6);
+						coex_dm->ps_tdma_du_adj_type =
+									     6;
+					} else if (coex_dm->cur_ps_tdma == 16) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 15) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 14);
+						coex_dm->ps_tdma_du_adj_type =
+									     14;
+					} else if (coex_dm->cur_ps_tdma == 14) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 14);
+						coex_dm->ps_tdma_du_adj_type =
+									     14;
+					}
+				}
+			} else {
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 0\n");
+				if (coex_dm->cur_ps_tdma == 5) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+					coex_dm->ps_tdma_du_adj_type = 2;
+				} else if (coex_dm->cur_ps_tdma == 6) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+					coex_dm->ps_tdma_du_adj_type = 2;
+				} else if (coex_dm->cur_ps_tdma == 7) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else if (coex_dm->cur_ps_tdma == 8) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 4);
+					coex_dm->ps_tdma_du_adj_type = 4;
+				}
+				if (coex_dm->cur_ps_tdma == 13) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+					coex_dm->ps_tdma_du_adj_type = 10;
+				} else if (coex_dm->cur_ps_tdma == 14) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+					coex_dm->ps_tdma_du_adj_type = 10;
+				} else if (coex_dm->cur_ps_tdma == 15) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else if (coex_dm->cur_ps_tdma == 16) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 12);
+					coex_dm->ps_tdma_du_adj_type = 12;
+				}
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 1) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 2);
+						coex_dm->ps_tdma_du_adj_type =
+									     2;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									     3;
+					} else if (coex_dm->cur_ps_tdma == 3) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 4);
+						coex_dm->ps_tdma_du_adj_type =
+									     4;
+					} else if (coex_dm->cur_ps_tdma == 9) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 10);
+						coex_dm->ps_tdma_du_adj_type =
+									     10;
+					} else if (coex_dm->cur_ps_tdma == 10) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 11) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 12);
+						coex_dm->ps_tdma_du_adj_type =
+									     12;
+					}
+				} else if (result == 1) {
+					if (coex_dm->cur_ps_tdma == 4) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									     3;
+					} else if (coex_dm->cur_ps_tdma == 3) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 2);
+						coex_dm->ps_tdma_du_adj_type =
+									     2;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 2);
+						coex_dm->ps_tdma_du_adj_type =
+									     2;
+					} else if(coex_dm->cur_ps_tdma == 12) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if(coex_dm->cur_ps_tdma == 11) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 10);
+						coex_dm->ps_tdma_du_adj_type =
+									     10;
+					} else if(coex_dm->cur_ps_tdma == 10) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 10);
+						coex_dm->ps_tdma_du_adj_type =
+									     10;
+					}
+				}
+			}
+		} else if (max_interval == 3) {
+			if (tx_pause) {
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 1\n");
+				if (coex_dm->cur_ps_tdma == 1) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else if (coex_dm->cur_ps_tdma == 2) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else if (coex_dm->cur_ps_tdma == 3) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else if (coex_dm->cur_ps_tdma == 4) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 8);
+					coex_dm->ps_tdma_du_adj_type = 8;
+				}
+				if (coex_dm->cur_ps_tdma == 9) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else if (coex_dm->cur_ps_tdma == 10) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else if (coex_dm->cur_ps_tdma == 11) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else if (coex_dm->cur_ps_tdma == 12) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 16);
+					coex_dm->ps_tdma_du_adj_type = 16;
+				}
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 5) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									     7;
+					} else if (coex_dm->cur_ps_tdma == 6) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									     7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 8);
+						coex_dm->ps_tdma_du_adj_type =
+									     8;
+					} else if (coex_dm->cur_ps_tdma == 13) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 14) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 15) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 16);
+						coex_dm->ps_tdma_du_adj_type =
+									     16;
+					}
+				} else if (result == 1) {
+					if (coex_dm->cur_ps_tdma == 8) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									     7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									     7;
+					} else if (coex_dm->cur_ps_tdma == 6) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									     7;
+					} else if (coex_dm->cur_ps_tdma == 16) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 15) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 14) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					}
+				}
+			} else {
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 0\n");
+				if (coex_dm->cur_ps_tdma == 5) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else if (coex_dm->cur_ps_tdma == 6) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else if (coex_dm->cur_ps_tdma == 7) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else if (coex_dm->cur_ps_tdma == 8) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 4);
+					coex_dm->ps_tdma_du_adj_type = 4;
+				}
+				if (coex_dm->cur_ps_tdma == 13) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else if (coex_dm->cur_ps_tdma == 14) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else if (coex_dm->cur_ps_tdma == 15) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else if (coex_dm->cur_ps_tdma == 16) {
+					halbtc8192e2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 12);
+					coex_dm->ps_tdma_du_adj_type = 12;
+				}
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 1) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									     3;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									     3;
+					} else if (coex_dm->cur_ps_tdma == 3) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 4);
+						coex_dm->ps_tdma_du_adj_type =
+									     4;
+					} else if (coex_dm->cur_ps_tdma == 9) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 10) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 11) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 12);
+						coex_dm->ps_tdma_du_adj_type =
+									     12;
+					}
+				} else if (result == 1) {
+					if (coex_dm->cur_ps_tdma == 4) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									     3;
+					} else if (coex_dm->cur_ps_tdma == 3) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									     3;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									     3;
+					} else if (coex_dm->cur_ps_tdma == 12) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 11) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 10) {
+						halbtc8192e2ant_ps_tdma(
+								    btcoexist,
+								    NORMAL_EXEC,
+								    true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					}
+				}
+			}
+		}
+	}
+
+	/* if current PsTdma not match with
+	 * the recorded one (when scan, dhcp...),
+	 * then we have to adjust it back to the previous record one. */
+	if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
+		bool scan = false, link = false, roam = false;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], PsTdma type dismatch!!!, " );
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "curPsTdma=%d, recordPsTdma=%d\n",
+			  coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
+
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+		if ( !scan && !link && !roam)
+			halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+						true,
+						coex_dm->ps_tdma_du_adj_type);
+		else
+			BTC_PRINT(BTC_MSG_ALGORITHM,
+				  ALGO_TRACE_FW_DETAIL,
+				  "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+	}
+}
+
+/* SCO only or SCO+PAN(HS) */
+void halbtc8192e2ant_action_sco(struct btc_coexist *btcoexist)
+{
+	u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+	u32 wifi_bw;
+
+	wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+
+	halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+	halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+
+	btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+	if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+	    (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+	} else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+	} else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+	}
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	/* sw mechanism */
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x6);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x6);
+		}
+	} else {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x6);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x6);
+		}
+	}
+}
+
+void halbtc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist)
+{
+	u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+	u32 wifi_bw;
+
+	wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+
+	halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+	halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+
+	btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+	if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+	    (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+	} else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+	} else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+	}
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	/* sw mechanism */
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x6);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x6);
+		}
+	} else {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x6);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x6);
+		}
+	}
+}
+
+void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist)
+{
+	u8 wifirssi_state, btrssi_state=BTC_RSSI_STATE_HIGH;
+	u32 wifi_bw;
+
+	wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+	btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+	halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+	halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+	if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+	    (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+	} else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+	} else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+	}
+
+	/* sw mechanism */
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+ 			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+ 		} else {
+ 			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
+void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+	u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+	u32 wifi_bw;
+	bool long_dist = false;
+
+	wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+	btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+	if ((btrssi_state == BTC_RSSI_STATE_LOW ||
+	     btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
+	    (wifirssi_state == BTC_RSSI_STATE_LOW ||
+	     wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
+		long_dist = true;
+	}
+	if (long_dist) {
+		halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2);
+		halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true,
+					   0x4);
+	} else {
+		halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+		halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
+					   0x8);
+	}
+
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (long_dist)
+		halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+	else
+		halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+
+	if (long_dist) {
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 17);
+		coex_dm->auto_tdma_adjust = false;
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+	} else {
+		if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+		    (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+			halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
+							     true, 1);
+			halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+		} else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+			   (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+			halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
+							     false, 1);
+			halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+		} else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+			   (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
+							     false, 1);
+			halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+		}
+	}
+
+	/* sw mechanism */
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ 			halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+	u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+	u32 wifi_bw;
+
+	wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+	btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+	halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+	halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+	if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+	    (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 2);
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+	} else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+						     2);
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+	} else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+						     2);
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+	}
+
+	/* sw mechanism */
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ 			halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      true, 0x6);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      true, 0x6);
+		}
+	} else {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      true, 0x6);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      true, 0x6);
+		}
+	}
+}
+
+void halbtc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+	u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+	u32 wifi_bw;
+
+	wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+	btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+	halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+	halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+	if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+	    (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+	} else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+	} else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+		halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+	}
+
+	/* sw mechanism */
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+/* PAN(HS) only */
+void halbtc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+	u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+	u32 wifi_bw;
+
+	wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+	btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+	halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+	halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+	if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+	    (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+	} else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+	} else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+	}
+	halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+/* PAN(EDR)+A2DP */
+void halbtc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+	u8 wifirssi_state, btrssi_state=BTC_RSSI_STATE_HIGH;
+	u32 wifi_bw;
+
+	wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+	btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+	halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+	halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+	    (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+	} else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+						     3);
+	} else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+						     3);
+	}
+
+	/* sw mechanism	*/
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+	u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+	u32 wifi_bw;
+
+	wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+	btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+	halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+	if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+	    (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+	        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+	} else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+	        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+	} else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+	        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+	}
+
+	/* sw mechanism */
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+/* HID+A2DP+PAN(EDR) */
+void halbtc8192e2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+	u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+	u32 wifi_bw;
+
+	wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+	btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+	halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+	halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+	halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+	if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+	   (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+	} else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
+	} else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
+	}
+
+	/* sw mechanism */
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+	u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+	u32 wifi_bw;
+
+	wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+	btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+	halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+	halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+	if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+	    (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+	} else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) 	{
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+	} else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+		halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+	}
+
+	/* sw mechanism */
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+	u8 algorithm = 0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+		  "[BTCoex], RunCoexistMechanism()===>\n");
+
+	if (btcoexist->manual_control) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], return for Manual CTRL <===\n");
+		return;
+	}
+
+	if (coex_sta->under_ips) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], wifi is under IPS !!!\n");
+		return;
+	}
+
+	algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
+	if (coex_sta->c2h_bt_inquiry_page &&
+	    (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BT is under inquiry/page scan !!\n");
+		halbtc8192e2ant_action_bt_inquiry(btcoexist);
+		return;
+	}
+
+	coex_dm->cur_algorithm = algorithm;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+		  "[BTCoex], Algorithm = %d \n", coex_dm->cur_algorithm);
+
+	if (halbtc8192e2ant_is_common_action(btcoexist)) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], Action 2-Ant common.\n");
+		coex_dm->auto_tdma_adjust = false;
+	} else {
+		if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
+				  coex_dm->pre_algorithm,
+				  coex_dm->cur_algorithm);
+			coex_dm->auto_tdma_adjust = false;
+		}
+		switch (coex_dm->cur_algorithm) {
+		case BT_8192E_2ANT_COEX_ALGO_SCO:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = SCO.\n");
+			halbtc8192e2ant_action_sco(btcoexist);
+			break;
+		case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = SCO+PAN(EDR).\n");
+			halbtc8192e2ant_action_sco_pan(btcoexist);
+			break;
+		case BT_8192E_2ANT_COEX_ALGO_HID:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = HID.\n");
+			halbtc8192e2ant_action_hid(btcoexist);
+			break;
+		case BT_8192E_2ANT_COEX_ALGO_A2DP:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = A2DP.\n");
+			halbtc8192e2ant_action_a2dp(btcoexist);
+			break;
+		case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
+			halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
+			break;
+		case BT_8192E_2ANT_COEX_ALGO_PANEDR:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = PAN(EDR).\n");
+			halbtc8192e2ant_action_pan_edr(btcoexist);
+			break;
+		case BT_8192E_2ANT_COEX_ALGO_PANHS:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = HS mode.\n");
+			halbtc8192e2ant_action_pan_hs(btcoexist);
+			break;
+		case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = PAN+A2DP.\n");
+			halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
+			break;
+		case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
+			halbtc8192e2ant_action_pan_edr_hid(btcoexist);
+			break;
+		case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
+			halbtc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
+			break;
+		case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = HID+A2DP.\n");
+			halbtc8192e2ant_action_hid_a2dp(btcoexist);
+			break;
+		default:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "Action 2-Ant, algorithm = unknown!!\n");
+			/* halbtc8192e2ant_coex_alloff(btcoexist); */
+			break;
+		}
+		coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+	}
+}
+
+void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, bool backup)
+{
+	u16 u16tmp = 0;
+	u8 u8tmp = 0;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+		  "[BTCoex], 2Ant Init HW Config!!\n");
+
+	if (backup) {
+		/* backup rf 0x1e value */
+		coex_dm->bt_rf0x1e_backup =
+			btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A,
+						  0x1e, 0xfffff);
+
+		coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
+								      0x430);
+		coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist,
+								     0x434);
+		coex_dm->backup_retrylimit = btcoexist->btc_read_2byte(
+								    btcoexist,
+								    0x42a);
+		coex_dm->backup_ampdu_maxtime = btcoexist->btc_read_1byte(
+								    btcoexist,
+								    0x456);
+	}
+
+	/* antenna sw ctrl to bt */
+	btcoexist->btc_write_1byte(btcoexist, 0x4f, 0x6);
+	btcoexist->btc_write_1byte(btcoexist, 0x944, 0x24);
+	btcoexist->btc_write_4byte(btcoexist, 0x930, 0x700700);
+	btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+	if (btcoexist->chip_interface == BTC_INTF_USB)
+		btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30430004);
+	else
+		btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30030004);
+
+	halbtc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+	/* antenna switch control parameter */
+	btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555);
+
+	/* coex parameters */
+	btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
+	/* 0x790[5:0]=0x5 */
+	u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+	u8tmp &= 0xc0;
+	u8tmp |= 0x5;
+	btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+	/* enable counter statistics */
+	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+
+	/* enable PTA */
+	btcoexist->btc_write_1byte(btcoexist, 0x40, 0x20);
+	/* enable mailbox interface */
+	u16tmp = btcoexist->btc_read_2byte(btcoexist, 0x40);
+	u16tmp |= BIT9;
+	btcoexist->btc_write_2byte(btcoexist, 0x40, u16tmp);
+
+	/* enable PTA I2C mailbox  */
+	u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x101);
+	u8tmp |= BIT4;
+	btcoexist->btc_write_1byte(btcoexist, 0x101, u8tmp);
+
+	/* enable bt clock when wifi is disabled. */
+	u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x93);
+	u8tmp |= BIT0;
+	btcoexist->btc_write_1byte(btcoexist, 0x93, u8tmp);
+	/* enable bt clock when suspend. */
+	u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x7);
+	u8tmp |= BIT0;
+	btcoexist->btc_write_1byte(btcoexist, 0x7, u8tmp);
+}
+
+/*************************************************************
+ *   work around function start with wa_halbtc8192e2ant_
+ *************************************************************/
+
+/************************************************************
+ *   extern function start with EXhalbtc8192e2ant_
+ ************************************************************/
+
+void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+	halbtc8192e2ant_init_hwconfig(btcoexist, true);
+}
+
+void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+		  "[BTCoex], Coex Mechanism Init!!\n");
+	halbtc8192e2ant_init_coex_dm(btcoexist);
+}
+
+void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+	struct btc_board_info *board_info = &btcoexist->board_info;
+	struct btc_stack_info*stack_info = &btcoexist->stack_info;
+	u8 *cli_buf = btcoexist->cli_buf;
+	u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
+	u16 u16tmp[4];
+	u32 u32tmp[4];
+	bool roam = false, scan = false, link = false, wifi_under_5g = false;
+	bool bt_hson = false, wifi_busy = false;
+	int wifirssi = 0, bt_hs_rssi = 0;
+	u32 wifi_bw, wifi_traffic_dir;
+	u8 wifi_dot11_chnl, wifi_hs_chnl;
+	u32 fw_ver = 0, bt_patch_ver = 0;
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n ============[BT Coexist info]============");
+	CL_PRINTF(cli_buf);
+
+	if (btcoexist->manual_control) {
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+			   "\r\n ===========[Under Manual Control]===========");
+		CL_PRINTF(cli_buf);
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+			   "\r\n ==========================================");
+		CL_PRINTF(cli_buf);
+	}
+
+	if (!board_info->bt_exist) {
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+		CL_PRINTF(cli_buf);
+		return;
+	}
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+		   board_info->pg_ant_num, board_info->btdm_ant_num);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+		   "BT stack/ hci ext ver",
+		   ((stack_info->profile_notified) ? "Yes" : "No"),
+		   stack_info->hci_version);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+		   "CoexVer/ FwVer/ PatchVer",
+		   glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+		   fw_ver, bt_patch_ver, bt_patch_ver);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+			   &wifi_dot11_chnl);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+		   "Dot11 channel / HsMode(HsChnl)",
+		   wifi_dot11_chnl, bt_hson, wifi_hs_chnl);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+		   "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0],
+		   coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+		   "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+		   "Wifi link/ roam/ scan", link, roam, scan);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+			   &wifi_traffic_dir);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+		   "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+		   ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
+		   	(((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
+		   ((!wifi_busy) ? "idle" :
+		   	((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
+				"uplink" : "downlink")));
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ",
+		   "BT [status/ rssi/ retryCnt]",
+		   ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
+		    ((coex_sta->c2h_bt_inquiry_page) ?
+		     ("inquiry/page scan") :
+		      ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+			coex_dm->bt_status) ? "non-connected idle" :
+			 ((BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
+			   coex_dm->bt_status) ? "connected-idle" : "busy")))),
+		   coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+		   "SCO/HID/PAN/A2DP", stack_info->sco_exist,
+		   stack_info->hid_exist, stack_info->pan_exist,
+		   stack_info->a2dp_exist);
+	CL_PRINTF(cli_buf);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+		   "BT Info A2DP rate",
+		   (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
+	CL_PRINTF(cli_buf);
+
+	for (i=0; i<BT_INFO_SRC_8192E_2ANT_MAX; i++) {
+		if (coex_sta->bt_info_c2h_cnt[i]) {
+			CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+				   "\r\n %-35s = %02x %02x %02x %02x ",
+				   GLBtInfoSrc8192e2Ant[i],
+				   coex_sta->bt_info_c2h[i][0],
+				   coex_sta->bt_info_c2h[i][1],
+				   coex_sta->bt_info_c2h[i][2],
+				   coex_sta->bt_info_c2h[i][3]);
+			CL_PRINTF(cli_buf);
+			CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+				   "%02x %02x %02x(%d)",
+				   coex_sta->bt_info_c2h[i][4],
+				   coex_sta->bt_info_c2h[i][5],
+				   coex_sta->bt_info_c2h[i][6],
+				   coex_sta->bt_info_c2h_cnt[i]);
+			CL_PRINTF(cli_buf);
+		}
+	}
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s",
+		   "PS state, IPS/LPS",
+		   ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+		   ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+	CL_PRINTF(cli_buf);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "SS Type",
+		   coex_dm->cur_sstype);
+	CL_PRINTF(cli_buf);
+
+	/* Sw mechanism	*/
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+		   "============[Sw mechanism]============");
+	CL_PRINTF(cli_buf);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+		   "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+		   coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+	CL_PRINTF(cli_buf);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+		   "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+		   coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+		   coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Rate Mask",
+		   btcoexist->bt_info.ra_mask);
+	CL_PRINTF(cli_buf);
+
+	/* Fw mechanism	*/
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+		   "============[Fw mechanism]============");
+	CL_PRINTF(cli_buf);
+
+	ps_tdma_case = coex_dm->cur_ps_tdma;
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)",
+		   "PS TDMA", coex_dm->ps_tdma_para[0],
+		   coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+		   coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+		   ps_tdma_case, coex_dm->auto_tdma_adjust);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+		   "DecBtPwr/ IgnWlanAct",
+		   coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+	CL_PRINTF(cli_buf);
+
+	/* Hw setting */
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+		   "============[Hw setting]============");
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+		   "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+		   "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+		   coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit,
+		   coex_dm->backup_ampdu_maxtime);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
+	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
+	u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+		   "0x430/0x434/0x42a/0x456",
+		   u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04);
+	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04);
+	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
+	CL_PRINTF(cli_buf);
+
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778",
+		   u8tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+		   "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+		   "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+		   "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)",
+		   u32tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+		   "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+		   u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+		   "0x770(hp rx[31:16]/tx[15:0])",
+		   coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+	CL_PRINTF(cli_buf);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+		   "0x774(lp rx[31:16]/tx[15:0])",
+		   coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+	CL_PRINTF(cli_buf);
+#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 1)
+	halbtc8192e2ant_monitor_bt_ctr(btcoexist);
+#endif
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	if (BTC_IPS_ENTER == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], IPS ENTER notify\n");
+		coex_sta->under_ips = true;
+		halbtc8192e2ant_coex_alloff(btcoexist);
+	} else if (BTC_IPS_LEAVE == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], IPS LEAVE notify\n");
+		coex_sta->under_ips = false;
+	}
+}
+
+void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	if (BTC_LPS_ENABLE == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], LPS ENABLE notify\n");
+		coex_sta->under_lps = true;
+	} else if (BTC_LPS_DISABLE == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], LPS DISABLE notify\n");
+		coex_sta->under_lps = false;
+	}
+}
+
+void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	if (BTC_SCAN_START == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], SCAN START notify\n");
+	else if(BTC_SCAN_FINISH == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], SCAN FINISH notify\n");
+}
+
+void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	if (BTC_ASSOCIATE_START == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], CONNECT START notify\n");
+	else if(BTC_ASSOCIATE_FINISH == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], CONNECT FINISH notify\n");
+}
+
+void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
+					    u8 type)
+{
+	u8 h2c_parameter[3] ={0};
+	u32 wifi_bw;
+	u8 wifi_center_chnl;
+
+	if (btcoexist->manual_control ||
+	    btcoexist->stop_coex_dm ||
+	    btcoexist->bt_info.bt_disabled)
+		return;
+
+	if (BTC_MEDIA_CONNECT == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], MEDIA connect notify\n");
+	else
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], MEDIA disconnect notify\n");
+
+	/* only 2.4G we need to inform bt the chnl mask */
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
+			   &wifi_center_chnl);
+	if ((BTC_MEDIA_CONNECT == type) &&
+	    (wifi_center_chnl <= 14)) {
+		h2c_parameter[0] = 0x1;
+		h2c_parameter[1] = wifi_center_chnl;
+		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+		if (BTC_WIFI_BW_HT40 == wifi_bw)
+			h2c_parameter[2] = 0x30;
+		else
+			h2c_parameter[2] = 0x20;
+	}
+
+	coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+	coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+	coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], FW write 0x66=0x%x\n",
+		  h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+		  h2c_parameter[2]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
+					      u8 type)
+{
+	if (type == BTC_PACKET_DHCP)
+ 		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], DHCP Packet notify\n");
+ }
+
+void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
+				       u8 *tmp_buf, u8 length )
+{
+	u8 bt_info = 0;
+	u8 i, rspSource = 0;
+	bool bt_busy = false, limited_dig = false;
+	bool wifi_connected = false;
+
+	coex_sta->c2h_bt_info_req_sent = false;
+
+	rspSource = tmp_buf[0] & 0xf;
+	if (rspSource >= BT_INFO_SRC_8192E_2ANT_MAX)
+		rspSource = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
+	coex_sta->bt_info_c2h_cnt[rspSource]++;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+		  "[BTCoex], Bt info[%d], length=%d, hex data=[",
+		  rspSource, length);
+	for (i = 0; i < length; i++) {
+		coex_sta->bt_info_c2h[rspSource][i] = tmp_buf[i];
+		if (i == 1)
+			bt_info = tmp_buf[i];
+		if (i == length-1)
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+				  "0x%02x]\n", tmp_buf[i]);
+		else
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+				  "0x%02x, ", tmp_buf[i]);
+	}
+
+	if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rspSource) {
+		coex_sta->bt_retry_cnt =	/* [3:0] */
+			coex_sta->bt_info_c2h[rspSource][2] & 0xf;
+
+		coex_sta->bt_rssi =
+			coex_sta->bt_info_c2h[rspSource][3] * 2 + 10;
+
+		coex_sta->bt_info_ext =
+			coex_sta->bt_info_c2h[rspSource][4];
+
+		/* Here we need to resend some wifi info to BT
+		 * because bt is reset and loss of the info. */
+		if ((coex_sta->bt_info_ext & BIT1)) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "bit1, send wifi BW&Chnl to BT!!\n");
+			btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+					   &wifi_connected);
+			if (wifi_connected)
+				ex_halbtc8192e2ant_media_status_notify(
+							btcoexist,
+							BTC_MEDIA_CONNECT);
+			else
+				ex_halbtc8192e2ant_media_status_notify(
+							btcoexist,
+							BTC_MEDIA_DISCONNECT);
+		}
+
+		if ((coex_sta->bt_info_ext & BIT3)) {
+			if (!btcoexist->manual_control &&
+			    !btcoexist->stop_coex_dm) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "bit3, BT NOT ignore Wlan active!\n");
+				halbtc8192e2ant_IgnoreWlanAct(btcoexist,
+							      FORCE_EXEC,
+							      false);
+			}
+		} else {
+			/* BT already NOT ignore Wlan active,
+			 * do nothing here. */
+		}
+
+#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
+		if ((coex_sta->bt_info_ext & BIT4)) {
+			/* BT auto report already enabled, do nothing */
+		} else {
+			halbtc8192e2ant_bt_autoreport(btcoexist, FORCE_EXEC,
+						      true);
+		}
+#endif
+	}
+
+	/* check BIT2 first ==> check if bt is under inquiry or page scan */
+	if(bt_info & BT_INFO_8192E_2ANT_B_INQ_PAGE)
+		coex_sta->c2h_bt_inquiry_page = true;
+	else
+		coex_sta->c2h_bt_inquiry_page = false;
+
+	/* set link exist status */
+	if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
+		coex_sta->bt_link_exist = false;
+		coex_sta->pan_exist = false;
+		coex_sta->a2dp_exist = false;
+		coex_sta->hid_exist = false;
+		coex_sta->sco_exist = false;
+	} else {/* connection exists */
+		coex_sta->bt_link_exist = true;
+		if (bt_info & BT_INFO_8192E_2ANT_B_FTP)
+			coex_sta->pan_exist = true;
+		else
+			coex_sta->pan_exist = false;
+		if (bt_info & BT_INFO_8192E_2ANT_B_A2DP)
+			coex_sta->a2dp_exist = true;
+		else
+			coex_sta->a2dp_exist = false;
+		if (bt_info & BT_INFO_8192E_2ANT_B_HID)
+			coex_sta->hid_exist = true;
+		else
+			coex_sta->hid_exist = false;
+		if (bt_info & BT_INFO_8192E_2ANT_B_SCO_ESCO)
+			coex_sta->sco_exist = true;
+		else
+			coex_sta->sco_exist = false;
+	}
+
+	halbtc8192e2ant_update_btlink_info(btcoexist);
+
+	if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
+		coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BT Non-Connected idle!!!\n");
+	} else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
+		coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
+	} else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
+		   (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
+		coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
+	} else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
+		coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
+	} else {
+		coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
+	}
+
+	if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+	    (BT_8192E_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+	    (BT_8192E_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+		bt_busy = true;
+		limited_dig = true;
+	} else {
+		bt_busy = false;
+		limited_dig = false;
+	}
+
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+	coex_dm->limited_dig = limited_dig;
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+	halbtc8192e2ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+					       u8 type)
+{
+	if (BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+		          "[BTCoex] StackOP Inquiry/page/pair start notify\n");
+	else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+		          "[BTCoex] StackOP Inquiry/page/pair finish notify\n");
+}
+
+void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+	halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
+	ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
+{
+	static u8 dis_ver_info_cnt = 0;
+	u32 fw_ver = 0, bt_patch_ver = 0;
+	struct btc_board_info *board_info=&btcoexist->board_info;
+	struct btc_stack_info *stack_info=&btcoexist->stack_info;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+		  "=======================Periodical=======================\n");
+	if (dis_ver_info_cnt <= 5) {
+		dis_ver_info_cnt += 1;
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "************************************************\n");
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+			  board_info->pg_ant_num, board_info->btdm_ant_num,
+			  board_info->btdm_ant_pos);
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "BT stack/ hci ext ver = %s / %d\n",
+			  ((stack_info->profile_notified) ? "Yes" : "No"),
+			  stack_info->hci_version);
+		btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+				   &bt_patch_ver);
+		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+			  glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+			  fw_ver, bt_patch_ver, bt_patch_ver);
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "************************************************\n");
+	}
+
+#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
+	halbtc8192e2ant_querybt_info(btcoexist);
+	halbtc8192e2ant_monitor_bt_ctr(btcoexist);
+	halbtc8192e2ant_monitor_bt_enable_disable(btcoexist);
+#else
+	if (halbtc8192e2ant_iswifi_status_changed(btcoexist) ||
+	    coex_dm->auto_tdma_adjust)
+		halbtc8192e2ant_run_coexist_mechanism(btcoexist);
+#endif
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h
new file mode 100644
index 0000000..6d109edb
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h
@@ -0,0 +1,162 @@
+/*****************************************************************
+ *   The following is for 8192E 2Ant BT Co-exist definition
+ *****************************************************************/
+#define	BT_AUTO_REPORT_ONLY_8192E_2ANT			0
+
+#define	BT_INFO_8192E_2ANT_B_FTP			BIT7
+#define	BT_INFO_8192E_2ANT_B_A2DP			BIT6
+#define	BT_INFO_8192E_2ANT_B_HID			BIT5
+#define	BT_INFO_8192E_2ANT_B_SCO_BUSY			BIT4
+#define	BT_INFO_8192E_2ANT_B_ACL_BUSY			BIT3
+#define	BT_INFO_8192E_2ANT_B_INQ_PAGE			BIT2
+#define	BT_INFO_8192E_2ANT_B_SCO_ESCO			BIT1
+#define	BT_INFO_8192E_2ANT_B_CONNECTION			BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT		2
+
+enum bt_info_src_8192e_2ant{
+	BT_INFO_SRC_8192E_2ANT_WIFI_FW			= 0x0,
+	BT_INFO_SRC_8192E_2ANT_BT_RSP			= 0x1,
+	BT_INFO_SRC_8192E_2ANT_BT_ACTIVE_SEND		= 0x2,
+	BT_INFO_SRC_8192E_2ANT_MAX
+};
+
+enum bt_8192e_2ant_bt_status{
+	BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE	= 0x0,
+	BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE		= 0x1,
+	BT_8192E_2ANT_BT_STATUS_INQ_PAGE		= 0x2,
+	BT_8192E_2ANT_BT_STATUS_ACL_BUSY		= 0x3,
+	BT_8192E_2ANT_BT_STATUS_SCO_BUSY		= 0x4,
+	BT_8192E_2ANT_BT_STATUS_ACL_SCO_BUSY		= 0x5,
+	BT_8192E_2ANT_BT_STATUS_MAX
+};
+
+enum bt_8192e_2ant_coex_algo{
+	BT_8192E_2ANT_COEX_ALGO_UNDEFINED		= 0x0,
+	BT_8192E_2ANT_COEX_ALGO_SCO			= 0x1,
+	BT_8192E_2ANT_COEX_ALGO_SCO_PAN			= 0x2,
+	BT_8192E_2ANT_COEX_ALGO_HID			= 0x3,
+	BT_8192E_2ANT_COEX_ALGO_A2DP			= 0x4,
+	BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS		= 0x5,
+	BT_8192E_2ANT_COEX_ALGO_PANEDR			= 0x6,
+	BT_8192E_2ANT_COEX_ALGO_PANHS			= 0x7,
+	BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP		= 0x8,
+	BT_8192E_2ANT_COEX_ALGO_PANEDR_HID		= 0x9,
+	BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR		= 0xa,
+	BT_8192E_2ANT_COEX_ALGO_HID_A2DP		= 0xb,
+	BT_8192E_2ANT_COEX_ALGO_MAX			= 0xc
+};
+
+struct coex_dm_8192e_2ant{
+	/* fw mechanism */
+	u8 pre_dec_bt_pwr;
+	u8 cur_dec_bt_pwr;
+	u8 pre_fw_dac_swing_lvl;
+	u8 cur_fw_dac_swing_lvl;
+	bool cur_ignore_wlan_act;
+	bool pre_ignore_wlan_act;
+	u8 pre_ps_tdma;
+	u8 cur_ps_tdma;
+	u8 ps_tdma_para[5];
+	u8 ps_tdma_du_adj_type;
+	bool reset_tdma_adjust;
+	bool auto_tdma_adjust;
+	bool pre_ps_tdma_on;
+	bool cur_ps_tdma_on;
+	bool pre_bt_auto_report;
+	bool cur_bt_auto_report;
+
+	/* sw mechanism */
+	bool pre_rf_rx_lpf_shrink;
+	bool cur_rf_rx_lpf_shrink;
+	u32 bt_rf0x1e_backup;
+	bool pre_low_penalty_ra;
+	bool cur_low_penalty_ra;
+	bool pre_dac_swing_on;
+	u32 pre_dac_swing_lvl;
+	bool cur_dac_swing_on;
+	u32 cur_dac_swing_lvl;
+	bool pre_adc_back_off;
+	bool cur_adc_back_off;
+	bool pre_agc_table_en;
+	bool cur_agc_table_en;
+	u32 pre_val0x6c0;
+	u32 cur_val0x6c0;
+	u32 pre_val0x6c4;
+	u32 cur_val0x6c4;
+	u32 pre_val0x6c8;
+	u32 cur_val0x6c8;
+	u8 pre_val0x6cc;
+	u8 cur_val0x6cc;
+	bool limited_dig;
+
+	u32 backup_arfr_cnt1;	/* Auto Rate Fallback Retry cnt */
+	u32 backup_arfr_cnt2;	/* Auto Rate Fallback Retry cnt */
+	u16 backup_retrylimit;
+	u8 backup_ampdu_maxtime;
+
+	/* algorithm related */
+	u8 pre_algorithm;
+	u8 cur_algorithm;
+	u8 bt_status;
+	u8 wifi_chnl_info[3];
+
+	u8 pre_sstype;
+	u8 cur_sstype;
+
+	u32 prera_mask;
+	u32 curra_mask;
+	u8 curra_masktype;
+	u8 pre_arfrtype;
+	u8 cur_arfrtype;
+	u8 pre_retrylimit_type;
+	u8 cur_retrylimit_type;
+	u8 pre_ampdutime_type;
+	u8 cur_ampdutime_type;
+};
+
+struct coex_sta_8192e_2ant{
+	bool bt_link_exist;
+	bool sco_exist;
+	bool a2dp_exist;
+	bool hid_exist;
+	bool pan_exist;
+
+	bool under_lps;
+	bool under_ips;
+	u32 high_priority_tx;
+	u32 high_priority_rx;
+	u32 low_priority_tx;
+	u32 low_priority_rx;
+	u8 bt_rssi;
+	u8 pre_bt_rssi_state;
+	u8 pre_wifi_rssi_state[4];
+	bool c2h_bt_info_req_sent;
+	u8 bt_info_c2h[BT_INFO_SRC_8192E_2ANT_MAX][10];
+	u32 bt_info_c2h_cnt[BT_INFO_SRC_8192E_2ANT_MAX];
+	bool c2h_bt_inquiry_page;
+	u8 bt_retry_cnt;
+	u8 bt_info_ext;
+};
+
+/****************************************************************
+ *    The following is interface which will notify coex module.
+ ****************************************************************/
+void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
+					    u8 type);
+void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
+					      u8 type);
+void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
+				       u8 *tmpBuf,u8 length);
+void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+					       u8 type);
+void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist);
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c
new file mode 100644
index 0000000..180d6f1
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c
@@ -0,0 +1,3780 @@
+//============================================================
+// Description:
+//
+// This file is for RTL8723A Co-exist mechanism
+//
+// History
+// 2012/08/22 Cosa first check in.
+// 2012/11/14 Cosa Revise for 8723A 2Ant out sourcing.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "Mp_Precomp.h"
+#if(BT_30_SUPPORT == 1)
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8723A_2ANT	GLCoexDm8723a2Ant;
+static PCOEX_DM_8723A_2ANT 	pCoexDm=&GLCoexDm8723a2Ant;
+static COEX_STA_8723A_2ANT	GLCoexSta8723a2Ant;
+static PCOEX_STA_8723A_2ANT	pCoexSta=&GLCoexSta8723a2Ant;
+
+const char *const GLBtInfoSrc8723a2Ant[]={
+	"BT Info[wifi fw]",
+	"BT Info[bt rsp]",
+	"BT Info[bt auto report]",
+};
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8723a2ant_
+//============================================================
+BOOLEAN
+halbtc8723a2ant_IsWifiIdle(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BOOLEAN		bWifiConnected=FALSE, bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+	if(bWifiConnected)
+		return FALSE;
+	if(bScan)
+		return FALSE;
+	if(bLink)
+		return FALSE;
+	if(bRoam)
+		return FALSE;
+
+	return true;
+}
+
+u1Byte
+halbtc8723a2ant_BtRssiState(
+	u1Byte			levelNum,
+	u1Byte			rssiThresh,
+	u1Byte			rssiThresh1
+	)
+{
+	s4Byte			btRssi=0;
+	u1Byte			btRssiState=pCoexSta->preBtRssiState;
+
+	btRssi = pCoexSta->btRssi;
+
+	if(levelNum == 2)
+	{			
+		if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+			(pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+			{
+				btRssiState = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+			}
+			else
+			{
+				btRssiState = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+			}
+		}
+		else
+		{
+			if(btRssi < rssiThresh)
+			{
+				btRssiState = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+			}
+			else
+			{
+				btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+			}
+		}
+	}
+	else if(levelNum == 3)
+	{
+		if(rssiThresh > rssiThresh1)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n"));
+			return pCoexSta->preBtRssiState;
+		}
+		
+		if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+			(pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+			{
+				btRssiState = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+			}
+			else
+			{
+				btRssiState = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+			}
+		}
+		else if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) ||
+			(pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_MEDIUM))
+		{
+			if(btRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+			{
+				btRssiState = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+			}
+			else if(btRssi < rssiThresh)
+			{
+				btRssiState = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+			}
+			else
+			{
+				btRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n"));
+			}
+		}
+		else
+		{
+			if(btRssi < rssiThresh1)
+			{
+				btRssiState = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+			}
+			else
+			{
+				btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+			}
+		}
+	}
+		
+	pCoexSta->preBtRssiState = btRssiState;
+
+	return btRssiState;
+}
+
+u1Byte
+halbtc8723a2ant_WifiRssiState(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			index,
+	IN	u1Byte			levelNum,
+	IN	u1Byte			rssiThresh,
+	IN	u1Byte			rssiThresh1
+	)
+{
+	s4Byte			wifiRssi=0;
+	u1Byte			wifiRssiState=pCoexSta->preWifiRssiState[index];
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+	
+	if(levelNum == 2)
+	{
+		if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+			(pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+			{
+				wifiRssiState = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+			}
+			else
+			{
+				wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+			}
+		}
+		else
+		{
+			if(wifiRssi < rssiThresh)
+			{
+				wifiRssiState = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+			}
+			else
+			{
+				wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+			}
+		}
+	}
+	else if(levelNum == 3)
+	{
+		if(rssiThresh > rssiThresh1)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n"));
+			return pCoexSta->preWifiRssiState[index];
+		}
+		
+		if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+			(pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+		{
+			if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+			{
+				wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+			}
+			else
+			{
+				wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+			}
+		}
+		else if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_MEDIUM) ||
+			(pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_MEDIUM))
+		{
+			if(wifiRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+			{
+				wifiRssiState = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+			}
+			else if(wifiRssi < rssiThresh)
+			{
+				wifiRssiState = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+			}
+			else
+			{
+				wifiRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n"));
+			}
+		}
+		else
+		{
+			if(wifiRssi < rssiThresh1)
+			{
+				wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+			}
+			else
+			{
+				wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+			}
+		}
+	}
+		
+	pCoexSta->preWifiRssiState[index] = wifiRssiState;
+
+	return wifiRssiState;
+}
+
+VOID
+halbtc8723a2ant_IndicateWifiChnlBwInfo(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	)
+{
+	u1Byte			H2C_Parameter[3] ={0};
+	u4Byte			wifiBw;
+	u1Byte			wifiCentralChnl;
+	
+	// only 2.4G we need to inform bt the chnl mask
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl);
+	if( (BTC_MEDIA_CONNECT == type) &&
+		(wifiCentralChnl <= 14) )
+	{
+		H2C_Parameter[0] = 0x1;
+		H2C_Parameter[1] = wifiCentralChnl;
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+		if(BTC_WIFI_BW_HT40 == wifiBw)
+			H2C_Parameter[2] = 0x30;
+		else
+			H2C_Parameter[2] = 0x20;
+	}
+		
+	pCoexDm->wifiChnlInfo[0] = H2C_Parameter[0];
+	pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1];
+	pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2];
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x19=0x%x\n", 
+		H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x19, 3, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_QueryBtInfo(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+
+	pCoexSta->bC2hBtInfoReqSent = true;
+
+	H2C_Parameter[0] |= BIT0;	// trigger
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x38=0x%x\n", 
+		H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x38, 1, H2C_Parameter);
+}
+u1Byte
+halbtc8723a2ant_ActionAlgorithm(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	PBTC_STACK_INFO		pStackInfo=&pBtCoexist->stack_info;
+	BOOLEAN				bBtHsOn=FALSE, bBtBusy=FALSE, limited_dig=FALSE;
+	u1Byte				algorithm=BT_8723A_2ANT_COEX_ALGO_UNDEFINED;
+	u1Byte				numOfDiffProfile=0;
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+
+	//======================
+	// here we get BT status first
+	//======================
+	pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_IDLE;
+	
+	if((pStackInfo->bScoExist) ||(bBtHsOn) ||(pStackInfo->bHidExist))
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO or HID or HS exists, set BT non-idle !!!\n"));
+		pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+	}
+	else
+	{
+		// A2dp profile
+		if( (pBtCoexist->stack_info.numOfLink == 1) &&
+			(pStackInfo->bA2dpExist) )
+		{		
+			if( (pCoexSta->lowPriorityTx+	pCoexSta->lowPriorityRx) < 100)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP, low priority tx+rx < 100, set BT connected-idle!!!\n"));
+				pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+			}
+			else
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP, low priority tx+rx >= 100, set BT non-idle!!!\n"));
+				pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+			}
+		}
+		// Pan profile
+		if( (pBtCoexist->stack_info.numOfLink == 1) &&
+			(pStackInfo->bPanExist) )
+		{		
+			if((pCoexSta->lowPriorityTx+	pCoexSta->lowPriorityRx) < 600)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, low priority tx+rx < 600, set BT connected-idle!!!\n"));
+				pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+			}
+			else
+			{
+				if(pCoexSta->lowPriorityTx)
+				{
+					if((pCoexSta->lowPriorityRx /pCoexSta->lowPriorityTx)>9 )
+					{
+						BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, low priority rx/tx > 9, set BT connected-idle!!!\n"));
+						pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+					}
+				}
+			}
+			if(BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, set BT non-idle!!!\n"));
+				pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+			}
+		}
+		// Pan+A2dp profile
+		if( (pBtCoexist->stack_info.numOfLink == 2) &&
+			(pStackInfo->bA2dpExist) &&
+			(pStackInfo->bPanExist) )
+		{
+			if((pCoexSta->lowPriorityTx+	pCoexSta->lowPriorityRx) < 600)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, low priority tx+rx < 600, set BT connected-idle!!!\n"));
+				pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+			}
+			else
+			{
+				if(pCoexSta->lowPriorityTx)
+				{
+					if((pCoexSta->lowPriorityRx /pCoexSta->lowPriorityTx)>9 )
+					{
+						BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, low priority rx/tx > 9, set BT connected-idle!!!\n"));
+						pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+					}
+				}
+			}
+			if(BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, set BT non-idle!!!\n"));
+				pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+			}
+		}
+	}
+	if(BT_8723A_2ANT_BT_STATUS_IDLE != pCoexDm->btStatus)
+	{
+		bBtBusy = true;
+		limited_dig = true;
+	}
+	else
+	{
+		bBtBusy = FALSE;
+		limited_dig = FALSE;
+	}
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+	//======================
+
+	if(!pStackInfo->bBtLinkExist)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No profile exists!!!\n"));
+		return algorithm;
+	}
+
+	if(pStackInfo->bScoExist)
+		numOfDiffProfile++;
+	if(pStackInfo->bHidExist)
+		numOfDiffProfile++;
+	if(pStackInfo->bPanExist)
+		numOfDiffProfile++;
+	if(pStackInfo->bA2dpExist)
+		numOfDiffProfile++;
+	
+	if(numOfDiffProfile == 1)
+	{
+		if(pStackInfo->bScoExist)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n"));
+			algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+		}
+		else
+		{
+			if(pStackInfo->bHidExist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n"));
+				algorithm = BT_8723A_2ANT_COEX_ALGO_HID;
+			}
+			else if(pStackInfo->bA2dpExist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n"));
+				algorithm = BT_8723A_2ANT_COEX_ALGO_A2DP;
+			}
+			else if(pStackInfo->bPanExist)
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_PANHS;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR;
+				}
+			}
+		}
+	}
+	else if(numOfDiffProfile == 2)
+	{
+		if(pStackInfo->bScoExist)
+		{
+			if(pStackInfo->bHidExist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n"));
+				algorithm = BT_8723A_2ANT_COEX_ALGO_HID;
+			}
+			else if(pStackInfo->bA2dpExist)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n"));
+				algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+			}
+			else if(pStackInfo->bPanExist)
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+		else
+		{
+			if( pStackInfo->bHidExist &&
+				pStackInfo->bA2dpExist )
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n"));
+				algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+			}
+			else if( pStackInfo->bHidExist &&
+				pStackInfo->bPanExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+			else if( pStackInfo->bPanExist &&
+				pStackInfo->bA2dpExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_A2DP;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP;
+				}
+			}
+		}
+	}
+	else if(numOfDiffProfile == 3)
+	{
+		if(pStackInfo->bScoExist)
+		{
+			if( pStackInfo->bHidExist &&
+				pStackInfo->bA2dpExist )
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n"));
+				algorithm = BT_8723A_2ANT_COEX_ALGO_HID;
+			}
+			else if( pStackInfo->bHidExist &&
+				pStackInfo->bPanExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+			else if( pStackInfo->bPanExist &&
+				pStackInfo->bA2dpExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+		else
+		{
+			if( pStackInfo->bHidExist &&
+				pStackInfo->bPanExist &&
+				pStackInfo->bA2dpExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+				}
+			}
+		}
+	}
+	else if(numOfDiffProfile >= 3)
+	{
+		if(pStackInfo->bScoExist)
+		{
+			if( pStackInfo->bHidExist &&
+				pStackInfo->bPanExist &&
+				pStackInfo->bA2dpExist )
+			{
+				if(bBtHsOn)
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
+
+				}
+				else
+				{
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"));
+					algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+	}
+
+	return algorithm;
+}
+
+BOOLEAN
+halbtc8723a2ant_NeedToDecBtPwr(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BOOLEAN		bRet=FALSE;
+	BOOLEAN		bBtHsOn=FALSE, bWifiConnected=FALSE;
+	s4Byte		btHsRssi=0;
+
+	if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn))
+		return FALSE;
+	if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected))
+		return FALSE;
+	if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi))
+		return FALSE;
+
+	if(bWifiConnected)
+	{
+		if(bBtHsOn)
+		{
+			if(btHsRssi > 37)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for HS mode!!\n"));
+				bRet = true;
+			}
+		}
+		else
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for Wifi is connected!!\n"));
+			bRet = true;
+		}
+	}
+	
+	return bRet;
+}
+
+VOID
+halbtc8723a2ant_SetFwDacSwingLevel(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			dacSwingLvl
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+
+	// There are several type of dacswing
+	// 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+	H2C_Parameter[0] = dacSwingLvl;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dacSwingLvl));
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x29=0x%x\n", H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x29, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_SetFwDecBtPwr(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bDecBtPwr
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+	
+	H2C_Parameter[0] = 0;
+
+	if(bDecBtPwr)
+	{
+		H2C_Parameter[0] |= BIT1;
+	}
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power : %s, FW write 0x21=0x%x\n", 
+		(bDecBtPwr? "Yes!!":"No!!"), H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x21, 1, H2C_Parameter);	
+}
+
+VOID
+halbtc8723a2ant_DecBtPwr(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bDecBtPwr
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power = %s\n",  
+		(bForceExec? "force to":""), ((bDecBtPwr)? "ON":"OFF")));
+	pCoexDm->bCurDecBtPwr = bDecBtPwr;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n", 
+			pCoexDm->bPreDecBtPwr, pCoexDm->bCurDecBtPwr));
+
+		if(pCoexDm->bPreDecBtPwr == pCoexDm->bCurDecBtPwr) 
+			return;
+	}
+	halbtc8723a2ant_SetFwDecBtPwr(pBtCoexist, pCoexDm->bCurDecBtPwr);
+
+	pCoexDm->bPreDecBtPwr = pCoexDm->bCurDecBtPwr;
+}
+
+VOID
+halbtc8723a2ant_FwDacSwingLvl(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	u1Byte			fwDacSwingLvl
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n",  
+		(bForceExec? "force to":""), fwDacSwingLvl));
+	pCoexDm->curFwDacSwingLvl = fwDacSwingLvl;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", 
+			pCoexDm->preFwDacSwingLvl, pCoexDm->curFwDacSwingLvl));
+
+		if(pCoexDm->preFwDacSwingLvl == pCoexDm->curFwDacSwingLvl) 
+			return;
+	}
+
+	halbtc8723a2ant_SetFwDacSwingLevel(pBtCoexist, pCoexDm->curFwDacSwingLvl);
+
+	pCoexDm->preFwDacSwingLvl = pCoexDm->curFwDacSwingLvl;
+}
+
+VOID
+halbtc8723a2ant_SetSwRfRxLpfCorner(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bRxRfShrinkOn
+	)
+{
+	if(bRxRfShrinkOn)
+	{
+		//Shrink RF Rx LPF corner
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+	}
+	else
+	{
+		//Resume RF Rx LPF corner
+		// After initialized, we can use pCoexDm->btRf0x1eBackup
+		if(pBtCoexist->initilized)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+			pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup);
+		}
+	}
+}
+
+VOID
+halbtc8723a2ant_RfShrink(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bRxRfShrinkOn
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",  
+		(bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF")));
+	pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n", 
+			pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink));
+
+		if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink) 
+			return;
+	}
+	halbtc8723a2ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink);
+
+	pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink;
+}
+
+VOID
+halbtc8723a2ant_SetSwPenaltyTxRateAdaptive(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bLowPenaltyRa
+	)
+{
+	u1Byte	tmpU1;
+
+	tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd);
+	tmpU1 |= BIT0;
+	if(bLowPenaltyRa)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+		tmpU1 &= ~BIT2;
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+		tmpU1 |= BIT2;
+	}
+
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1);
+}
+
+VOID
+halbtc8723a2ant_LowPenaltyRa(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bLowPenaltyRa
+	)
+{
+	return;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",  
+		(bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF")));
+	pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n", 
+			pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa));
+
+		if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa) 
+			return;
+	}
+	halbtc8723a2ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa);
+
+	pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa;
+}
+
+VOID
+halbtc8723a2ant_SetSwFullTimeDacSwing(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bSwDacSwingOn,
+	IN	u4Byte			swDacSwingLvl
+	)
+{
+	if(bSwDacSwingOn)
+	{
+		pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, swDacSwingLvl);
+	}
+	else
+	{
+		pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0);
+	}
+}
+
+
+VOID
+halbtc8723a2ant_DacSwing(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bDacSwingOn,
+	IN	u4Byte			dacSwingLvl
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dacSwingLvl=0x%x\n",  
+		(bForceExec? "force to":""), ((bDacSwingOn)? "ON":"OFF"), dacSwingLvl));
+	pCoexDm->bCurDacSwingOn = bDacSwingOn;
+	pCoexDm->curDacSwingLvl = dacSwingLvl;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", 
+			pCoexDm->bPreDacSwingOn, pCoexDm->preDacSwingLvl,
+			pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl));
+
+		if( (pCoexDm->bPreDacSwingOn == pCoexDm->bCurDacSwingOn) &&
+			(pCoexDm->preDacSwingLvl == pCoexDm->curDacSwingLvl) )
+			return;
+	}
+	mdelay(30);
+	halbtc8723a2ant_SetSwFullTimeDacSwing(pBtCoexist, bDacSwingOn, dacSwingLvl);
+
+	pCoexDm->bPreDacSwingOn = pCoexDm->bCurDacSwingOn;
+	pCoexDm->preDacSwingLvl = pCoexDm->curDacSwingLvl;
+}
+
+VOID
+halbtc8723a2ant_SetAdcBackOff(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bAdcBackOff
+	)
+{
+	if(bAdcBackOff)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n"));
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc04,0x3a07611);
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n"));		
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc04,0x3a05611);
+	}
+}
+
+VOID
+halbtc8723a2ant_AdcBackOff(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bAdcBackOff
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n",  
+		(bForceExec? "force to":""), ((bAdcBackOff)? "ON":"OFF")));
+	pCoexDm->bCurAdcBackOff = bAdcBackOff;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n", 
+			pCoexDm->bPreAdcBackOff, pCoexDm->bCurAdcBackOff));
+
+		if(pCoexDm->bPreAdcBackOff == pCoexDm->bCurAdcBackOff) 
+			return;
+	}
+	halbtc8723a2ant_SetAdcBackOff(pBtCoexist, pCoexDm->bCurAdcBackOff);
+
+	pCoexDm->bPreAdcBackOff = pCoexDm->bCurAdcBackOff;
+}
+
+VOID
+halbtc8723a2ant_SetAgcTable(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bAgcTableEn
+	)
+{
+	u1Byte		rssiAdjustVal=0;
+
+	if(bAgcTableEn)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4e1c0001);
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4d1d0001);
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4c1e0001);
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4b1f0001);
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4a200001);
+		
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xdc000);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x90000);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x51000);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x12000);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1a, 0xfffff, 0x00355);
+		
+		rssiAdjustVal = 6;
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x641c0001);
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x631d0001);
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x621e0001);
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x611f0001);
+		pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x60200001);
+
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x32000);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x71000);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xb0000);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xfc000);
+		pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1a, 0xfffff, 0x30355);
+	}
+
+	// set rssiAdjustVal for wifi module.
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssiAdjustVal);
+}
+
+
+VOID
+halbtc8723a2ant_AgcTable(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bAgcTableEn
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n",  
+		(bForceExec? "force to":""), ((bAgcTableEn)? "Enable":"Disable")));
+	pCoexDm->bCurAgcTableEn = bAgcTableEn;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", 
+			pCoexDm->bPreAgcTableEn, pCoexDm->bCurAgcTableEn));
+
+		if(pCoexDm->bPreAgcTableEn == pCoexDm->bCurAgcTableEn) 
+			return;
+	}
+	halbtc8723a2ant_SetAgcTable(pBtCoexist, bAgcTableEn);
+
+	pCoexDm->bPreAgcTableEn = pCoexDm->bCurAgcTableEn;
+}
+
+VOID
+halbtc8723a2ant_SetCoexTable(
+	IN	PBTC_COEXIST	pBtCoexist,
+	IN	u4Byte		val0x6c0,
+	IN	u4Byte		val0x6c8,
+	IN	u1Byte		val0x6cc
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+	pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc);
+}
+
+VOID
+halbtc8723a2ant_CoexTable(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	u4Byte			val0x6c0,
+	IN	u4Byte			val0x6c8,
+	IN	u1Byte			val0x6cc
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", 
+		(bForceExec? "force to":""), val0x6c0, val0x6c8, val0x6cc));
+	pCoexDm->curVal0x6c0 = val0x6c0;
+	pCoexDm->curVal0x6c8 = val0x6c8;
+	pCoexDm->curVal0x6cc = val0x6cc;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n", 
+			pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc));
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n", 
+			pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc));
+	
+		if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
+			(pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) &&
+			(pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) )
+			return;
+	}
+	halbtc8723a2ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c8, val0x6cc);
+
+	pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0;
+	pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8;
+	pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc;
+}
+
+VOID
+halbtc8723a2ant_SetFwIgnoreWlanAct(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bEnable
+	)
+{
+	u1Byte			H2C_Parameter[1] ={0};
+		
+	if(bEnable)
+	{
+		H2C_Parameter[0] |= BIT0;		// function enable
+	}
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x25=0x%x\n", 
+		H2C_Parameter[0]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x25, 1, H2C_Parameter);	
+}
+
+VOID
+halbtc8723a2ant_IgnoreWlanAct(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bEnable
+	)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n", 
+		(bForceExec? "force to":""), (bEnable? "ON":"OFF")));
+	pCoexDm->bCurIgnoreWlanAct = bEnable;
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", 
+			pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct));
+
+		if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
+			return;
+	}
+	halbtc8723a2ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable);
+
+	pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct;
+}
+
+VOID
+halbtc8723a2ant_SetFwPstdma(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			byte1,
+	IN	u1Byte			byte2,
+	IN	u1Byte			byte3,
+	IN	u1Byte			byte4,
+	IN	u1Byte			byte5
+	)
+{
+	u1Byte			H2C_Parameter[5] ={0};
+
+	H2C_Parameter[0] = byte1;	
+	H2C_Parameter[1] = byte2;	
+	H2C_Parameter[2] = byte3;
+	H2C_Parameter[3] = byte4;
+	H2C_Parameter[4] = byte5;
+
+	pCoexDm->psTdmaPara[0] = byte1;
+	pCoexDm->psTdmaPara[1] = byte2;
+	pCoexDm->psTdmaPara[2] = byte3;
+	pCoexDm->psTdmaPara[3] = byte4;
+	pCoexDm->psTdmaPara[4] = byte5;
+	
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x3a(5bytes)=0x%x%08x\n", 
+		H2C_Parameter[0], 
+		H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
+
+	pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3a, 5, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_PsTdma(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bForceExec,
+	IN	BOOLEAN			bTurnOn,
+	IN	u1Byte			type
+	)
+{
+	u4Byte	btTxRxCnt=0;
+
+	btTxRxCnt = pCoexSta->highPriorityTx+pCoexSta->highPriorityRx+
+				pCoexSta->lowPriorityTx+pCoexSta->lowPriorityRx;
+
+	if(btTxRxCnt > 3000)
+	{		
+		pCoexDm->bCurPsTdmaOn = true;
+		pCoexDm->curPsTdma = 8;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], turn ON PS TDMA, type=%d for BT tx/rx counters=%d(>3000)\n", 
+			pCoexDm->curPsTdma, btTxRxCnt));
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n", 
+			(bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type));		
+		pCoexDm->bCurPsTdmaOn = bTurnOn;
+		pCoexDm->curPsTdma = type;
+	}
+
+	if(!bForceExec)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", 
+			pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn));
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", 
+			pCoexDm->prePsTdma, pCoexDm->curPsTdma));
+
+		if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
+			(pCoexDm->prePsTdma == pCoexDm->curPsTdma) )
+			return;
+	}	
+	if(pCoexDm->bCurPsTdmaOn)
+	{
+		switch(pCoexDm->curPsTdma)
+		{
+			case 1:
+			default:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x98);
+				break;
+			case 2:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x98);
+				break;
+			case 3:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0xe1, 0x98);
+				break;
+			case 4:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x5, 0x5, 0xe1, 0x80);
+				break;
+			case 5:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x60, 0x98);
+				break;
+			case 6:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0x60, 0x98);
+				break;
+			case 7:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0x60, 0x98);
+				break;
+			case 8: 
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x5, 0x5, 0x60, 0x80);
+				break;
+			case 9: 
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x98);
+				break;
+			case 10:	
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x98);
+				break;
+			case 11:	
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0xe1, 0x98);
+				break;
+			case 12:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0xe1, 0x98);
+				break;
+			case 13:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x60, 0x98);
+				break;
+			case 14:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0x60, 0x98);
+				break;
+			case 15:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0x60, 0x98);
+				break;
+			case 16:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0x60, 0x98);
+				break;
+			case 17:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x2f, 0x2f, 0x60, 0x80);
+				break;
+			case 18:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0xe1, 0x98);
+				break;			
+			case 19:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x25, 0xe1, 0x98);
+				break;
+			case 20:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x25, 0x60, 0x98);
+				break;
+		}
+	}
+	else
+	{
+		// disable PS tdma
+		switch(pCoexDm->curPsTdma)
+		{
+			case 0:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+				break;
+			case 1:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+				break;
+			default:
+				halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+				break;
+		}
+	}
+
+	// update pre state
+	pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn;
+	pCoexDm->prePsTdma = pCoexDm->curPsTdma;
+}
+
+
+VOID
+halbtc8723a2ant_CoexAllOff(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	// fw all off
+	halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+	halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+	halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+	// sw all off
+	halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+	// hw all off
+	halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+}
+
+VOID
+halbtc8723a2ant_InitCoexDm(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	// force to reset coex mechanism
+	halbtc8723a2ant_CoexTable(pBtCoexist, FORCE_EXEC, 0x55555555, 0xffff, 0x3);
+	halbtc8723a2ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+	halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, FORCE_EXEC, 0x20);
+	halbtc8723a2ant_DecBtPwr(pBtCoexist, FORCE_EXEC, FALSE);
+	halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+
+	halbtc8723a2ant_AgcTable(pBtCoexist, FORCE_EXEC, FALSE);
+	halbtc8723a2ant_AdcBackOff(pBtCoexist, FORCE_EXEC, FALSE);
+	halbtc8723a2ant_LowPenaltyRa(pBtCoexist, FORCE_EXEC, FALSE);
+	halbtc8723a2ant_RfShrink(pBtCoexist, FORCE_EXEC, FALSE);
+	halbtc8723a2ant_DacSwing(pBtCoexist, FORCE_EXEC, FALSE, 0xc0);
+}
+
+VOID
+halbtc8723a2ant_BtInquiryPage(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BOOLEAN	bLowPwrDisable=true;
+	
+	pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+
+	halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+	halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+}
+
+VOID
+halbtc8723a2ant_BtEnableAction(
+	IN 	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BOOLEAN		bWifiConnected=FALSE;
+	
+	// Here we need to resend some wifi info to BT
+	// because bt is reset and loss of the info.						
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+	if(bWifiConnected)
+	{
+		halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, BTC_MEDIA_CONNECT);
+	}
+	else
+	{
+		halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, BTC_MEDIA_DISCONNECT);
+	}
+
+	halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+}
+
+VOID
+halbtc8723a2ant_MonitorBtCtr(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u4Byte 			regHPTxRx, regLPTxRx, u4Tmp;
+	u4Byte			regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0;
+	u1Byte			u1Tmp;
+	
+	regHPTxRx = 0x770;
+	regLPTxRx = 0x774;
+
+	u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx);
+	regHPTx = u4Tmp & MASKLWORD;
+	regHPRx = (u4Tmp & MASKHWORD)>>16;
+
+	u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx);
+	regLPTx = u4Tmp & MASKLWORD;
+	regLPRx = (u4Tmp & MASKHWORD)>>16;
+		
+	pCoexSta->highPriorityTx = regHPTx;
+	pCoexSta->highPriorityRx = regHPRx;
+	pCoexSta->lowPriorityTx = regLPTx;
+	pCoexSta->lowPriorityRx = regLPRx;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", 
+		regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx));
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", 
+		regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx));
+
+	// reset counter
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc);
+}
+
+VOID
+halbtc8723a2ant_MonitorBtEnableDisable(
+	IN 	PBTC_COEXIST		pBtCoexist
+	)
+{
+	static BOOLEAN	bPreBtDisabled=FALSE;
+	static u4Byte	btDisableCnt=0;
+	BOOLEAN			bBtActive=true, bBtDisabled=FALSE;
+
+	// This function check if bt is disabled
+
+	if(	pCoexSta->highPriorityTx == 0 &&
+		pCoexSta->highPriorityRx == 0 &&
+		pCoexSta->lowPriorityTx == 0 &&
+		pCoexSta->lowPriorityRx == 0)
+	{
+		bBtActive = FALSE;
+	}
+	if(	pCoexSta->highPriorityTx == 0xffff &&
+		pCoexSta->highPriorityRx == 0xffff &&
+		pCoexSta->lowPriorityTx == 0xffff &&
+		pCoexSta->lowPriorityRx == 0xffff)
+	{
+		bBtActive = FALSE;
+	}
+	if(bBtActive)
+	{
+		btDisableCnt = 0;
+		bBtDisabled = FALSE;
+		pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+	}
+	else
+	{
+		btDisableCnt++;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n", 
+				btDisableCnt));
+		if(btDisableCnt >= 2)
+		{
+			bBtDisabled = true;
+			pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+		}
+	}
+	if(bPreBtDisabled != bBtDisabled)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n", 
+			(bPreBtDisabled ? "disabled":"enabled"), 
+			(bBtDisabled ? "disabled":"enabled")));
+		bPreBtDisabled = bBtDisabled;
+		if(!bBtDisabled)
+		{
+			halbtc8723a2ant_BtEnableAction(pBtCoexist);
+		}
+	}
+}
+
+BOOLEAN
+halbtc8723a2ant_IsCommonAction(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	PBTC_STACK_INFO		pStackInfo=&pBtCoexist->stack_info;
+	BOOLEAN			bCommon=FALSE, bWifiConnected=FALSE;
+	BOOLEAN			bLowPwrDisable=FALSE;
+
+	if(!pStackInfo->bBtLinkExist)
+	{
+		bLowPwrDisable = FALSE;
+		pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+	}
+	else
+	{
+		bLowPwrDisable = true;
+		pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+	}
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+	if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) && 
+		BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + Bt idle!!\n"));
+			
+		halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+		halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+		bCommon = true;
+	}
+	else if(!halbtc8723a2ant_IsWifiIdle(pBtCoexist) && 
+			(BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + BT idle!!\n"));
+
+		halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+		halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+		bCommon = true;
+	}
+	else if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) && 
+		(BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + Bt connected idle!!\n"));
+		
+		halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+		halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+		bCommon = true;
+	}
+	else if(!halbtc8723a2ant_IsWifiIdle(pBtCoexist) && 
+		(BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + Bt connected idle!!\n"));
+
+		halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+		halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+		bCommon = true;
+	}
+	else if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) && 
+			(BT_8723A_2ANT_BT_STATUS_NON_IDLE == pCoexDm->btStatus) )
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + BT non-idle!!\n"));
+		
+		halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+		halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		
+		bCommon = true;
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + BT non-idle!!\n"));
+		halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+		
+		bCommon = FALSE;
+	}
+	
+	return bCommon;
+}
+VOID
+halbtc8723a2ant_TdmaDurationAdjust(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	BOOLEAN			bScoHid,
+	IN	BOOLEAN			bTxPause,
+	IN	u1Byte			maxInterval
+	)
+{
+	static s4Byte		up,dn,m,n,WaitCount;
+	s4Byte			result;   //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+	u1Byte			retryCount=0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], TdmaDurationAdjust()\n"));
+
+	if(pCoexDm->bResetTdmaAdjust)
+	{
+		pCoexDm->bResetTdmaAdjust = FALSE;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
+		{
+			if(bScoHid)
+			{
+				if(bTxPause)
+				{
+					if(maxInterval == 1)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+						pCoexDm->psTdmaDuAdjType = 13;	
+					}
+					else if(maxInterval == 2)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+						pCoexDm->psTdmaDuAdjType = 14;	
+					}
+					else if(maxInterval == 3)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+						pCoexDm->psTdmaDuAdjType = 15;	
+					}
+					else
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+						pCoexDm->psTdmaDuAdjType = 15;
+					}
+				}
+				else
+				{
+					if(maxInterval == 1)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+						pCoexDm->psTdmaDuAdjType = 9;	
+					}
+					else if(maxInterval == 2)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+						pCoexDm->psTdmaDuAdjType = 10;	
+					}
+					else if(maxInterval == 3)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+					else
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+				}
+			}
+			else
+			{
+				if(bTxPause)
+				{
+					if(maxInterval == 1)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+						pCoexDm->psTdmaDuAdjType = 5;	
+					}
+					else if(maxInterval == 2)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+						pCoexDm->psTdmaDuAdjType = 6;	
+					}
+					else if(maxInterval == 3)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+						pCoexDm->psTdmaDuAdjType = 7;
+					}
+					else
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+						pCoexDm->psTdmaDuAdjType = 7;
+					}
+				}
+				else
+				{
+					if(maxInterval == 1)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+						pCoexDm->psTdmaDuAdjType = 1;	
+					}
+					else if(maxInterval == 2)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+						pCoexDm->psTdmaDuAdjType = 2;	
+					}
+					else if(maxInterval == 3)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+						pCoexDm->psTdmaDuAdjType = 3;
+					}
+					else
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+						pCoexDm->psTdmaDuAdjType = 3;
+					}
+				}
+			}
+		}
+		//============
+		up = 0;
+		dn = 0;
+		m = 1;
+		n= 3;
+		result = 0;
+		WaitCount = 0;
+	}
+	else
+	{
+		//accquire the BT TRx retry count from BT_Info byte2
+		retryCount = pCoexSta->btRetryCnt;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount));
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, WaitCount=%d\n", 
+			up, dn, m, n, WaitCount));
+		result = 0;
+		WaitCount++; 
+		  
+		if(retryCount == 0)  // no retry in the last 2-second duration
+		{
+			up++;
+			dn--;
+
+			if (dn <= 0)
+				dn = 0;				 
+
+			if(up >= n)	// if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+			{
+				WaitCount = 0; 
+				n = 3;
+				up = 0;
+				dn = 0;
+				result = 1; 
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n"));
+			}
+		}
+		else if (retryCount <= 3)	// <=3 retry in the last 2-second duration
+		{
+			up--; 
+			dn++;
+
+			if (up <= 0)
+				up = 0;
+
+			if (dn == 2)	// if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+			{
+				if (WaitCount <= 2)
+					m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+				else
+					m = 1;
+
+				if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+					m = 20;
+
+				n = 3*m;
+				up = 0;
+				dn = 0;
+				WaitCount = 0;
+				result = -1; 
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+			}
+		}
+		else  //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+		{
+			if (WaitCount == 1)
+				m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+			else
+				m = 1;
+
+			if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+				m = 20;
+
+			n = 3*m;
+			up = 0;
+			dn = 0;
+			WaitCount = 0; 
+			result = -1;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+		}
+
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], max Interval = %d\n", maxInterval));
+		if(maxInterval == 1)
+		{
+			if(bTxPause)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+
+				if(pCoexDm->curPsTdma == 1)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+					pCoexDm->psTdmaDuAdjType = 5;
+				}
+				else if(pCoexDm->curPsTdma == 2)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+					pCoexDm->psTdmaDuAdjType = 6;
+				}
+				else if(pCoexDm->curPsTdma == 3)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+					pCoexDm->psTdmaDuAdjType = 7;
+				}
+				else if(pCoexDm->curPsTdma == 4)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+					pCoexDm->psTdmaDuAdjType = 8;
+				}
+				if(pCoexDm->curPsTdma == 9)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+					pCoexDm->psTdmaDuAdjType = 13;
+				}
+				else if(pCoexDm->curPsTdma == 10)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+					pCoexDm->psTdmaDuAdjType = 14;
+				}
+				else if(pCoexDm->curPsTdma == 11)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+					pCoexDm->psTdmaDuAdjType = 15;
+				}
+				else if(pCoexDm->curPsTdma == 12)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+					pCoexDm->psTdmaDuAdjType = 16;
+				}
+				
+				if(result == -1)
+				{					
+					if(pCoexDm->curPsTdma == 5)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+						pCoexDm->psTdmaDuAdjType = 6;
+					}
+					else if(pCoexDm->curPsTdma == 6)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+						pCoexDm->psTdmaDuAdjType = 7;
+					}
+					else if(pCoexDm->curPsTdma == 7)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+						pCoexDm->psTdmaDuAdjType = 8;
+					}
+					else if(pCoexDm->curPsTdma == 13)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+						pCoexDm->psTdmaDuAdjType = 14;
+					}
+					else if(pCoexDm->curPsTdma == 14)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+						pCoexDm->psTdmaDuAdjType = 15;
+					}
+					else if(pCoexDm->curPsTdma == 15)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+						pCoexDm->psTdmaDuAdjType = 16;
+					}
+				} 
+				else if (result == 1)
+				{
+					if(pCoexDm->curPsTdma == 8)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+						pCoexDm->psTdmaDuAdjType = 7;
+					}
+					else if(pCoexDm->curPsTdma == 7)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+						pCoexDm->psTdmaDuAdjType = 6;
+					}
+					else if(pCoexDm->curPsTdma == 6)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+						pCoexDm->psTdmaDuAdjType = 5;
+					}
+					else if(pCoexDm->curPsTdma == 16)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+						pCoexDm->psTdmaDuAdjType = 15;
+					}
+					else if(pCoexDm->curPsTdma == 15)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+						pCoexDm->psTdmaDuAdjType = 14;
+					}
+					else if(pCoexDm->curPsTdma == 14)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+						pCoexDm->psTdmaDuAdjType = 13;
+					}
+				}
+			}
+			else
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+				if(pCoexDm->curPsTdma == 5)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+					pCoexDm->psTdmaDuAdjType = 1;
+				}
+				else if(pCoexDm->curPsTdma == 6)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+					pCoexDm->psTdmaDuAdjType = 2;
+				}
+				else if(pCoexDm->curPsTdma == 7)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+					pCoexDm->psTdmaDuAdjType = 3;
+				}
+				else if(pCoexDm->curPsTdma == 8)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+					pCoexDm->psTdmaDuAdjType = 4;
+				}
+				if(pCoexDm->curPsTdma == 13)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+					pCoexDm->psTdmaDuAdjType = 9;
+				}
+				else if(pCoexDm->curPsTdma == 14)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+					pCoexDm->psTdmaDuAdjType = 10;
+				}
+				else if(pCoexDm->curPsTdma == 15)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+					pCoexDm->psTdmaDuAdjType = 11;
+				}
+				else if(pCoexDm->curPsTdma == 16)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+					pCoexDm->psTdmaDuAdjType = 12;
+				}
+				
+				if(result == -1)
+				{
+					if(pCoexDm->curPsTdma == 1)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+						pCoexDm->psTdmaDuAdjType = 2;
+					}
+					else if(pCoexDm->curPsTdma == 2)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+						pCoexDm->psTdmaDuAdjType = 3;
+					}
+					else if(pCoexDm->curPsTdma == 3)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+						pCoexDm->psTdmaDuAdjType = 4;
+					}
+					else if(pCoexDm->curPsTdma == 9)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+						pCoexDm->psTdmaDuAdjType = 10;
+					}
+					else if(pCoexDm->curPsTdma == 10)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+					else if(pCoexDm->curPsTdma == 11)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+						pCoexDm->psTdmaDuAdjType = 12;
+					}
+				} 
+				else if (result == 1)
+				{
+					if(pCoexDm->curPsTdma == 4)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+						pCoexDm->psTdmaDuAdjType = 3;
+					}
+					else if(pCoexDm->curPsTdma == 3)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+						pCoexDm->psTdmaDuAdjType = 2;
+					}
+					else if(pCoexDm->curPsTdma == 2)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+						pCoexDm->psTdmaDuAdjType = 1;
+					}
+					else if(pCoexDm->curPsTdma == 12)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+					else if(pCoexDm->curPsTdma == 11)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+						pCoexDm->psTdmaDuAdjType = 10;
+					}
+					else if(pCoexDm->curPsTdma == 10)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+						pCoexDm->psTdmaDuAdjType = 9;
+					}
+				}
+			}
+		}
+		else if(maxInterval == 2)
+		{
+			if(bTxPause)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+				if(pCoexDm->curPsTdma == 1)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+					pCoexDm->psTdmaDuAdjType = 6;
+				}
+				else if(pCoexDm->curPsTdma == 2)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+					pCoexDm->psTdmaDuAdjType = 6;
+				}
+				else if(pCoexDm->curPsTdma == 3)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+					pCoexDm->psTdmaDuAdjType = 7;
+				}
+				else if(pCoexDm->curPsTdma == 4)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+					pCoexDm->psTdmaDuAdjType = 8;
+				}
+				if(pCoexDm->curPsTdma == 9)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+					pCoexDm->psTdmaDuAdjType = 14;
+				}
+				else if(pCoexDm->curPsTdma == 10)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+					pCoexDm->psTdmaDuAdjType = 14;
+				}
+				else if(pCoexDm->curPsTdma == 11)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+					pCoexDm->psTdmaDuAdjType = 15;
+				}
+				else if(pCoexDm->curPsTdma == 12)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+					pCoexDm->psTdmaDuAdjType = 16;
+				}
+				if(result == -1)
+				{
+					if(pCoexDm->curPsTdma == 5) 
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+						pCoexDm->psTdmaDuAdjType = 6;
+					}
+					else if(pCoexDm->curPsTdma == 6)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+						pCoexDm->psTdmaDuAdjType = 7;
+					}
+					else if(pCoexDm->curPsTdma == 7)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+						pCoexDm->psTdmaDuAdjType = 8;
+					}
+					else if(pCoexDm->curPsTdma == 13)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+						pCoexDm->psTdmaDuAdjType = 14;
+					}
+					else if(pCoexDm->curPsTdma == 14)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+						pCoexDm->psTdmaDuAdjType = 15;
+					}
+					else if(pCoexDm->curPsTdma == 15)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+						pCoexDm->psTdmaDuAdjType = 16;
+					}
+				} 
+				else if (result == 1)
+				{
+					if(pCoexDm->curPsTdma == 8)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+						pCoexDm->psTdmaDuAdjType = 7;
+					}
+					else if(pCoexDm->curPsTdma == 7)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+						pCoexDm->psTdmaDuAdjType = 6;
+					}
+					else if(pCoexDm->curPsTdma == 6)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+						pCoexDm->psTdmaDuAdjType = 6;
+					}					
+					else if(pCoexDm->curPsTdma == 16)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+						pCoexDm->psTdmaDuAdjType = 15;
+					}
+					else if(pCoexDm->curPsTdma == 15)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+						pCoexDm->psTdmaDuAdjType = 14;
+					}
+					else if(pCoexDm->curPsTdma == 14)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+						pCoexDm->psTdmaDuAdjType = 14;
+					}
+				}
+			}
+			else
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+				if(pCoexDm->curPsTdma == 5)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+					pCoexDm->psTdmaDuAdjType = 2;
+				}
+				else if(pCoexDm->curPsTdma == 6)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+					pCoexDm->psTdmaDuAdjType = 2;
+				}
+				else if(pCoexDm->curPsTdma == 7)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+					pCoexDm->psTdmaDuAdjType = 3;
+				}
+				else if(pCoexDm->curPsTdma == 8)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+					pCoexDm->psTdmaDuAdjType = 4;
+				}
+				if(pCoexDm->curPsTdma == 13)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+					pCoexDm->psTdmaDuAdjType = 10;
+				}
+				else if(pCoexDm->curPsTdma == 14)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+					pCoexDm->psTdmaDuAdjType = 10;
+				}
+				else if(pCoexDm->curPsTdma == 15)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+					pCoexDm->psTdmaDuAdjType = 11;
+				}
+				else if(pCoexDm->curPsTdma == 16)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+					pCoexDm->psTdmaDuAdjType = 12;
+				}
+				if(result == -1)
+				{
+					if(pCoexDm->curPsTdma == 1)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+						pCoexDm->psTdmaDuAdjType = 2;
+					}
+					else if(pCoexDm->curPsTdma == 2)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+						pCoexDm->psTdmaDuAdjType = 3;
+					}
+					else if(pCoexDm->curPsTdma == 3)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+						pCoexDm->psTdmaDuAdjType = 4;
+					}
+					else if(pCoexDm->curPsTdma == 9)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+						pCoexDm->psTdmaDuAdjType = 10;
+					}
+					else if(pCoexDm->curPsTdma == 10)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+					else if(pCoexDm->curPsTdma == 11)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+						pCoexDm->psTdmaDuAdjType = 12;
+					}
+				} 
+				else if (result == 1)
+				{
+					if(pCoexDm->curPsTdma == 4)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+						pCoexDm->psTdmaDuAdjType = 3;
+					}
+					else if(pCoexDm->curPsTdma == 3)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+						pCoexDm->psTdmaDuAdjType = 2;
+					}
+					else if(pCoexDm->curPsTdma == 2)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+						pCoexDm->psTdmaDuAdjType = 2;
+					}
+					else if(pCoexDm->curPsTdma == 12)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+					else if(pCoexDm->curPsTdma == 11)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+						pCoexDm->psTdmaDuAdjType = 10;
+					}
+					else if(pCoexDm->curPsTdma == 10)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+						pCoexDm->psTdmaDuAdjType = 10;
+					}
+				}
+			}
+		}
+		else if(maxInterval == 3)
+		{
+			if(bTxPause)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+				if(pCoexDm->curPsTdma == 1)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+					pCoexDm->psTdmaDuAdjType = 7;
+				}
+				else if(pCoexDm->curPsTdma == 2)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+					pCoexDm->psTdmaDuAdjType = 7;
+				}
+				else if(pCoexDm->curPsTdma == 3)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+					pCoexDm->psTdmaDuAdjType = 7;
+				}
+				else if(pCoexDm->curPsTdma == 4)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+					pCoexDm->psTdmaDuAdjType = 8;
+				}
+				if(pCoexDm->curPsTdma == 9)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+					pCoexDm->psTdmaDuAdjType = 15;
+				}
+				else if(pCoexDm->curPsTdma == 10)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+					pCoexDm->psTdmaDuAdjType = 15;
+				}
+				else if(pCoexDm->curPsTdma == 11)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+					pCoexDm->psTdmaDuAdjType = 15;
+				}
+				else if(pCoexDm->curPsTdma == 12)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+					pCoexDm->psTdmaDuAdjType = 16;
+				}
+				if(result == -1)
+				{
+					if(pCoexDm->curPsTdma == 5) 
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+						pCoexDm->psTdmaDuAdjType = 7;
+					}
+					else if(pCoexDm->curPsTdma == 6)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+						pCoexDm->psTdmaDuAdjType = 7;
+					}
+					else if(pCoexDm->curPsTdma == 7)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+						pCoexDm->psTdmaDuAdjType = 8;
+					}
+					else if(pCoexDm->curPsTdma == 13)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+						pCoexDm->psTdmaDuAdjType = 15;
+					}
+					else if(pCoexDm->curPsTdma == 14)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+						pCoexDm->psTdmaDuAdjType = 15;
+					}
+					else if(pCoexDm->curPsTdma == 15)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+						pCoexDm->psTdmaDuAdjType = 16;
+					}
+				} 
+				else if (result == 1)
+				{
+					if(pCoexDm->curPsTdma == 8)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+						pCoexDm->psTdmaDuAdjType = 7;
+					}
+					else if(pCoexDm->curPsTdma == 7)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+						pCoexDm->psTdmaDuAdjType = 7;
+					}
+					else if(pCoexDm->curPsTdma == 6)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+						pCoexDm->psTdmaDuAdjType = 7;
+					}					
+					else if(pCoexDm->curPsTdma == 16)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+						pCoexDm->psTdmaDuAdjType = 15;
+					}
+					else if(pCoexDm->curPsTdma == 15)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+						pCoexDm->psTdmaDuAdjType = 15;
+					}
+					else if(pCoexDm->curPsTdma == 14)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+						pCoexDm->psTdmaDuAdjType = 15;
+					}
+				}
+			}
+			else
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+				if(pCoexDm->curPsTdma == 5)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+					pCoexDm->psTdmaDuAdjType = 3;
+				}
+				else if(pCoexDm->curPsTdma == 6)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+					pCoexDm->psTdmaDuAdjType = 3;
+				}
+				else if(pCoexDm->curPsTdma == 7)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+					pCoexDm->psTdmaDuAdjType = 3;
+				}
+				else if(pCoexDm->curPsTdma == 8)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+					pCoexDm->psTdmaDuAdjType = 4;
+				}
+				if(pCoexDm->curPsTdma == 13)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+					pCoexDm->psTdmaDuAdjType = 11;
+				}
+				else if(pCoexDm->curPsTdma == 14)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+					pCoexDm->psTdmaDuAdjType = 11;
+				}
+				else if(pCoexDm->curPsTdma == 15)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+					pCoexDm->psTdmaDuAdjType = 11;
+				}
+				else if(pCoexDm->curPsTdma == 16)
+				{
+					halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+					pCoexDm->psTdmaDuAdjType = 12;
+				}
+				if(result == -1)
+				{
+					if(pCoexDm->curPsTdma == 1)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+						pCoexDm->psTdmaDuAdjType = 3;
+					}
+					else if(pCoexDm->curPsTdma == 2)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+						pCoexDm->psTdmaDuAdjType = 3;
+					}
+					else if(pCoexDm->curPsTdma == 3)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+						pCoexDm->psTdmaDuAdjType = 4;
+					}
+					else if(pCoexDm->curPsTdma == 9)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+					else if(pCoexDm->curPsTdma == 10)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+					else if(pCoexDm->curPsTdma == 11)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+						pCoexDm->psTdmaDuAdjType = 12;
+					}
+				} 
+				else if (result == 1)
+				{
+					if(pCoexDm->curPsTdma == 4)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+						pCoexDm->psTdmaDuAdjType = 3;
+					}
+					else if(pCoexDm->curPsTdma == 3)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+						pCoexDm->psTdmaDuAdjType = 3;
+					}
+					else if(pCoexDm->curPsTdma == 2)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+						pCoexDm->psTdmaDuAdjType = 3;
+					}
+					else if(pCoexDm->curPsTdma == 12)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+					else if(pCoexDm->curPsTdma == 11)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+					else if(pCoexDm->curPsTdma == 10)
+					{
+						halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+						pCoexDm->psTdmaDuAdjType = 11;
+					}
+				}
+			}
+		}
+	}
+
+	// if current PsTdma not match with the recorded one (when scan, dhcp...), 
+	// then we have to adjust it back to the previous record one.
+	if(pCoexDm->curPsTdma != pCoexDm->psTdmaDuAdjType)
+	{
+		BOOLEAN	bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", 
+			pCoexDm->curPsTdma, pCoexDm->psTdmaDuAdjType));
+
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+		pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+		
+		if( !bScan && !bLink && !bRoam)
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType);
+		}
+		else
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"));
+		}
+	}
+}
+
+// SCO only or SCO+PAN(HS)
+VOID
+halbtc8723a2ant_ActionSco(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte	wifiRssiState, wifiRssiState1;
+	u4Byte	wifiBw;
+
+	if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+	
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+		}
+		else
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+		}
+
+		// sw mechanism		
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+	}
+	else
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+		wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+		
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+		}
+		else
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+		}
+		
+		// sw mechanism
+		if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+		else
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}		
+	}
+}
+
+
+VOID
+halbtc8723a2ant_ActionHid(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte	wifiRssiState, wifiRssiState1;
+	u4Byte	wifiBw;
+
+	if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+		}
+		else
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+		}
+
+		// sw mechanism
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+	}
+	else
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+		wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+		}
+		else
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+		}
+
+		// sw mechanism
+		if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+		else
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}		
+	}
+}
+
+//A2DP only / PAN(EDR) only/ A2DP+PAN(HS)
+VOID
+halbtc8723a2ant_ActionA2dp(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState, wifiRssiState1, btInfoExt;
+	u4Byte		wifiBw;
+
+	btInfoExt = pCoexSta->btInfoExt;
+
+	if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+	
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			if(btInfoExt&BIT0)	//a2dp rate, 1:basic /0:edr
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 3);
+			}
+			else
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 1);
+			}
+		}
+		else
+		{
+			if(btInfoExt&BIT0)	//a2dp rate, 1:basic /0:edr
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 3);
+			}
+			else
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 1);
+			}
+		}
+
+		// sw mechanism		
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+	}
+	else
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+		wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+		
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			if(btInfoExt&BIT0)	//a2dp rate, 1:basic /0:edr
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 3);
+			}
+			else
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 1);
+			}
+		}
+		else
+		{
+			if(btInfoExt&BIT0)	//a2dp rate, 1:basic /0:edr
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 3);
+			}
+			else
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 1);
+			}
+		}
+		
+		// sw mechanism
+		if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+		else
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}		
+	}
+}
+
+VOID
+halbtc8723a2ant_ActionPanEdr(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState, wifiRssiState1, btInfoExt;
+	u4Byte		wifiBw;
+	
+	btInfoExt = pCoexSta->btInfoExt;
+
+	if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+			
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+		}
+		else
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+		}
+
+		// sw mechanism
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+	}
+	else
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+		wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+		
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+		}
+		else
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+		}
+
+		// sw mechanism
+		if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+		else
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+	}
+}
+
+
+//PAN(HS) only
+VOID
+halbtc8723a2ant_ActionPanHs(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState;
+	u4Byte		wifiBw;
+
+	halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+		}
+		else
+		{
+			halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+		}
+		halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+		// sw mechanism		
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+	}
+	else
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		}
+		else
+		{
+			halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+		}
+
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+		else
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+	}
+}
+
+//PAN(EDR)+A2DP
+VOID
+halbtc8723a2ant_ActionPanEdrA2dp(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState, wifiRssiState1, btInfoExt;
+	u4Byte		wifiBw;
+
+	btInfoExt = pCoexSta->btInfoExt;
+
+	if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+			}
+		}
+		else
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+			}
+		}
+
+		// sw mechanism
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+	}
+	else
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+		wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 47, 0);
+		
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+			}
+		}
+		else
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+			}
+		}
+
+		// sw mechanism
+		if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+		else
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+	}
+}
+
+VOID
+halbtc8723a2ant_ActionPanEdrHid(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState, wifiRssiState1;
+	u4Byte		wifiBw;
+
+	if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); 
+		}
+		else
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); 
+		}
+
+		// sw mechanism
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+	}
+	else
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+		wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+		
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+		}
+		else
+		{
+			halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+		}
+		
+		// sw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+		else
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+	}
+}
+
+// HID+A2DP+PAN(EDR)
+VOID
+halbtc8723a2ant_ActionHidA2dpPanEdr(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState, wifiRssiState1, btInfoExt;
+	u4Byte		wifiBw;
+
+	btInfoExt = pCoexSta->btInfoExt;
+
+	if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+			
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+			}
+		}
+		else
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+			}
+		}
+		
+		// sw mechanism
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+	}
+	else
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+		wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+		
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+			}
+		}
+		else
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+			}
+		}
+
+		// sw mechanism
+		if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+		else
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+	}
+}
+
+VOID
+halbtc8723a2ant_ActionHidA2dp(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u1Byte		wifiRssiState, wifiRssiState1, btInfoExt;
+	u4Byte		wifiBw;
+
+	btInfoExt = pCoexSta->btInfoExt;
+
+	if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+	else	
+		halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+	halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	if(BTC_WIFI_BW_HT40 == wifiBw)
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+		
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 3);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 1);
+			}
+		}
+		else
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 3);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 1);
+			}
+		}
+		
+		// sw mechanism
+		halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+		halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+		halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+	}
+	else
+	{
+		wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+		wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+		
+		// fw mechanism
+		if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 3);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 1);
+			}
+		}
+		else
+		{
+			if(btInfoExt&BIT0)	//a2dp basic rate
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 3);
+			}
+			else				//a2dp edr rate
+			{
+				halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 1);
+			}
+		}
+
+		// sw mechanism
+		if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+			(wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+		else
+		{
+			halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+			halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+		}
+	}
+}
+
+VOID
+halbtc8723a2ant_RunCoexistMechanism(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	PBTC_STACK_INFO		pStackInfo=&pBtCoexist->stack_info;
+	u1Byte				btInfoOriginal=0, btRetryCnt=0;
+	u1Byte				algorithm=0;
+
+	if(pBtCoexist->manual_control)
+	{
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Manual control!!!\n"));
+		return;
+	}
+
+	if(pStackInfo->bProfileNotified)
+	{
+		if(pCoexSta->bHoldForStackOperation)
+		{
+			// if bt inquiry/page/pair, do not execute.
+			return;
+		}
+		
+		algorithm = halbtc8723a2ant_ActionAlgorithm(pBtCoexist);
+		if(pCoexSta->bHoldPeriodCnt && (BT_8723A_2ANT_COEX_ALGO_PANHS!=algorithm))
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex],Hold BT inquiry/page scan setting (cnt = %d)!!\n", 
+				pCoexSta->bHoldPeriodCnt));
+			if(pCoexSta->bHoldPeriodCnt >= 6)
+			{
+				pCoexSta->bHoldPeriodCnt = 0;
+				// next time the coexist parameters should be reset again.
+			}
+			else
+				pCoexSta->bHoldPeriodCnt++;
+			return;
+		}
+
+		pCoexDm->curAlgorithm = algorithm;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Algorithm = %d \n", pCoexDm->curAlgorithm));
+		if(halbtc8723a2ant_IsCommonAction(pBtCoexist))
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant common.\n"));
+			pCoexDm->bResetTdmaAdjust = true;
+		}
+		else
+		{
+			if(pCoexDm->curAlgorithm != pCoexDm->preAlgorithm)
+			{
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n", 
+					pCoexDm->preAlgorithm, pCoexDm->curAlgorithm));
+				pCoexDm->bResetTdmaAdjust = true;
+			}
+			switch(pCoexDm->curAlgorithm)
+			{
+				case BT_8723A_2ANT_COEX_ALGO_SCO:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = SCO.\n"));
+					halbtc8723a2ant_ActionSco(pBtCoexist);
+					break;
+				case BT_8723A_2ANT_COEX_ALGO_HID:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID.\n"));
+					halbtc8723a2ant_ActionHid(pBtCoexist);
+					break;
+				case BT_8723A_2ANT_COEX_ALGO_A2DP:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = A2DP.\n"));
+					halbtc8723a2ant_ActionA2dp(pBtCoexist);
+					break;
+				case BT_8723A_2ANT_COEX_ALGO_PANEDR:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n"));
+					halbtc8723a2ant_ActionPanEdr(pBtCoexist);
+					break;
+				case BT_8723A_2ANT_COEX_ALGO_PANHS:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HS mode.\n"));
+					halbtc8723a2ant_ActionPanHs(pBtCoexist);
+					break;
+				case BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n"));
+					halbtc8723a2ant_ActionPanEdrA2dp(pBtCoexist);
+					break;
+				case BT_8723A_2ANT_COEX_ALGO_PANEDR_HID:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n"));
+					halbtc8723a2ant_ActionPanEdrHid(pBtCoexist);
+					break;
+				case BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n"));
+					halbtc8723a2ant_ActionHidA2dpPanEdr(pBtCoexist);
+					break;
+				case BT_8723A_2ANT_COEX_ALGO_HID_A2DP:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n"));
+					halbtc8723a2ant_ActionHidA2dp(pBtCoexist);
+					break;
+				default:
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"));
+					halbtc8723a2ant_CoexAllOff(pBtCoexist);
+					break;
+			}
+			pCoexDm->preAlgorithm = pCoexDm->curAlgorithm;
+		}
+	}
+}
+
+//============================================================
+// work around function start with wa_halbtc8723a2ant_
+//============================================================
+VOID
+wa_halbtc8723a2ant_MonitorC2h(
+	IN	PBTC_COEXIST			pBtCoexist
+	)
+{
+	u1Byte	tmp1b=0x0;
+	u4Byte	curC2hTotalCnt=0x0;
+	static u4Byte	preC2hTotalCnt=0x0, sameCntPollingTime=0x0;
+
+	curC2hTotalCnt+=pCoexSta->btInfoC2hCnt[BT_INFO_SRC_8723A_2ANT_BT_RSP];
+
+	if(curC2hTotalCnt == preC2hTotalCnt)
+	{
+		sameCntPollingTime++;
+	}
+	else
+	{
+		preC2hTotalCnt = curC2hTotalCnt;
+		sameCntPollingTime = 0;
+	}
+
+	if(sameCntPollingTime >= 2)
+	{
+		tmp1b = pBtCoexist->btc_read_1byte(pBtCoexist, 0x1af);
+		if(tmp1b != 0x0)
+		{
+			pCoexSta->c2hHangDetectCnt++;
+			pBtCoexist->btc_write_1byte(pBtCoexist, 0x1af, 0x0);
+		}
+	}
+}
+
+//============================================================
+// extern function start with EXhalbtc8723a2ant_
+//============================================================
+VOID
+EXhalbtc8723a2ant_InitHwConfig(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	u4Byte	u4Tmp=0;
+	u1Byte	u1Tmp=0;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 2Ant Init HW Config!!\n"));
+
+	// backup rf 0x1e value
+	pCoexDm->btRf0x1eBackup = 
+		pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+	// Enable counter statistics
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4);
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x3);
+	pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20);
+}
+
+VOID
+EXhalbtc8723a2ant_InitCoexDm(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+	
+	halbtc8723a2ant_InitCoexDm(pBtCoexist);
+}
+
+VOID
+EXhalbtc8723a2ant_DisplayCoexInfo(
+	IN	PBTC_COEXIST		pBtCoexist
+	)
+{
+	struct btc_board_info *		pBoardInfo=&pBtCoexist->board_info;
+	PBTC_STACK_INFO		pStackInfo=&pBtCoexist->stack_info;
+	pu1Byte				cliBuf=pBtCoexist->cli_buf;
+	u1Byte				u1Tmp[4], i, btInfoExt, psTdmaCase=0;
+	u4Byte				u4Tmp[4];
+	BOOLEAN				bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE;
+	BOOLEAN				bBtHsOn=FALSE, bWifiBusy=FALSE;
+	s4Byte				wifiRssi=0, btHsRssi=0;
+	u4Byte				wifiBw, wifiTrafficDir;
+	u1Byte				wifiDot11Chnl, wifiHsChnl;
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+	CL_PRINTF(cliBuf);
+
+	if(!pBoardInfo->bt_exist)
+	{
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+		CL_PRINTF(cliBuf);
+		return;
+	}
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+		pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num);
+	CL_PRINTF(cliBuf);	
+	
+	if(pBtCoexist->manual_control)
+	{
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "[Action Manual control]!!");
+		CL_PRINTF(cliBuf);
+	}
+	
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+		((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+		wifiDot11Chnl, wifiHsChnl, bBtHsOn);
+	CL_PRINTF(cliBuf);
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+		pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1],
+		pCoexDm->wifiChnlInfo[2]);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+		wifiRssi, btHsRssi);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \
+		bLink, bRoam, bScan);
+	CL_PRINTF(cliBuf);
+
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+		(bWifiUnder5G? "5G":"2.4G"),
+		((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))),
+		((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+	CL_PRINTF(cliBuf);
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+		((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus)? "idle":(  (BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy"))),
+		pCoexSta->btRssi, pCoexSta->btRetryCnt);
+	CL_PRINTF(cliBuf);
+	
+	if(pStackInfo->bProfileNotified)
+	{			
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+			pStackInfo->bScoExist, pStackInfo->bHidExist, pStackInfo->bPanExist, pStackInfo->bA2dpExist);
+		CL_PRINTF(cliBuf);	
+
+		pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);
+	}
+
+	btInfoExt = pCoexSta->btInfoExt;
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+		(btInfoExt&BIT0)? "Basic rate":"EDR rate");
+	CL_PRINTF(cliBuf);	
+
+	for(i=0; i<BT_INFO_SRC_8723A_2ANT_MAX; i++)
+	{
+		if(pCoexSta->btInfoC2hCnt[i])
+		{				
+			CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723a2Ant[i], \
+				pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1],
+				pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3],
+				pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5],
+				pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]);
+			CL_PRINTF(cliBuf);
+		}
+	}
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "write 0x1af=0x0 num", \
+		pCoexSta->c2hHangDetectCnt);
+	CL_PRINTF(cliBuf);
+	
+	// Sw mechanism	
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+	CL_PRINTF(cliBuf);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d", "SM1[ShRf/ LpRA/ LimDig]", \
+		pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig);
+	CL_PRINTF(cliBuf);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+		pCoexDm->bCurAgcTableEn, pCoexDm->bCurAdcBackOff, pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl);
+	CL_PRINTF(cliBuf);
+
+	// Fw mechanism		
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+	CL_PRINTF(cliBuf);	
+	
+	if(!pBtCoexist->manual_control)
+	{
+		psTdmaCase = pCoexDm->curPsTdma;
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \
+			pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
+			pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
+			pCoexDm->psTdmaPara[4], psTdmaCase);
+		CL_PRINTF(cliBuf);
+	
+		CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", \
+			pCoexDm->bCurDecBtPwr, pCoexDm->bCurIgnoreWlanAct);
+		CL_PRINTF(cliBuf);
+	}
+
+	// Hw setting		
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+	CL_PRINTF(cliBuf);	
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+		pCoexDm->btRf0x1eBackup);
+	CL_PRINTF(cliBuf);
+
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778);
+	u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x783);
+	u1Tmp[2] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x796);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/ 0x783/ 0x796", \
+		u1Tmp[0], u1Tmp[1], u1Tmp[2]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x880);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x880", \
+		u4Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x40", \
+		u1Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550);
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+		u4Tmp[0], u1Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x484);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x484(rate adaptive)", \
+		u4Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+		u4Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda0);
+	u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda4);
+	u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda8);
+	u4Tmp[3] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xdac);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0xda0/0xda4/0xda8/0xdac(FA cnt)", \
+		u4Tmp[0], u4Tmp[1], u4Tmp[2], u4Tmp[3]);
+	CL_PRINTF(cliBuf);
+
+	u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0);
+	u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4);
+	u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8);
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+		u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]);
+	CL_PRINTF(cliBuf);
+
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770 (hp rx[31:16]/tx[15:0])", \
+		pCoexSta->highPriorityRx, pCoexSta->highPriorityTx);
+	CL_PRINTF(cliBuf);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+		pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx);
+	CL_PRINTF(cliBuf);
+
+	// Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang
+	u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x41b);
+	CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x41b (mgntQ hang chk == 0xf)", \
+		u1Tmp[0]);
+	CL_PRINTF(cliBuf);	
+
+	pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+VOID
+EXhalbtc8723a2ant_IpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	if(BTC_IPS_ENTER == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+		halbtc8723a2ant_CoexAllOff(pBtCoexist);
+	}
+	else if(BTC_IPS_LEAVE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+		//halbtc8723a2ant_InitCoexDm(pBtCoexist);
+	}
+}
+
+VOID
+EXhalbtc8723a2ant_LpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	if(BTC_LPS_ENABLE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+	}
+	else if(BTC_LPS_DISABLE == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+	}
+}
+
+VOID
+EXhalbtc8723a2ant_ScanNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	if(BTC_SCAN_START == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+	}
+	else if(BTC_SCAN_FINISH == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+	}
+}
+
+VOID
+EXhalbtc8723a2ant_ConnectNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	)
+{
+	if(BTC_ASSOCIATE_START == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+	}
+	else if(BTC_ASSOCIATE_FINISH == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+	}
+}
+
+VOID
+EXhalbtc8723a2ant_MediaStatusNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	)
+{
+	if(BTC_MEDIA_CONNECT == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
+	}
+	else
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
+	}
+
+	halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, type);
+}
+
+VOID
+EXhalbtc8723a2ant_SpecialPacketNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	)
+{
+	if(type == BTC_PACKET_DHCP)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], DHCP Packet notify\n"));
+	}
+}
+
+VOID
+EXhalbtc8723a2ant_BtInfoNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	pu1Byte			tmpBuf,
+	IN	u1Byte			length
+	)
+{
+	u1Byte			btInfo=0;
+	u1Byte			i, rspSource=0;
+	BOOLEAN			bBtBusy=FALSE, limited_dig=FALSE;
+	BOOLEAN			bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+	pCoexSta->bC2hBtInfoReqSent = FALSE;
+	
+	rspSource = BT_INFO_SRC_8723A_2ANT_BT_RSP;
+	pCoexSta->btInfoC2hCnt[rspSource]++;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length));
+	for(i=0; i<length; i++)
+	{
+		pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
+		if(i == 0)
+			btInfo = tmpBuf[i];
+		if(i == length-1)
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i]));
+		}
+		else
+		{
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
+		}
+	}
+
+	if(BT_INFO_SRC_8723A_2ANT_WIFI_FW != rspSource)
+	{
+		pCoexSta->btRetryCnt =
+			pCoexSta->btInfoC2h[rspSource][1];
+
+		pCoexSta->btRssi =
+			pCoexSta->btInfoC2h[rspSource][2]*2+10;
+
+		pCoexSta->btInfoExt = 
+			pCoexSta->btInfoC2h[rspSource][3];
+	}
+		
+	pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+	// check BIT2 first ==> check if bt is under inquiry or page scan
+	if(btInfo & BT_INFO_8723A_2ANT_B_INQ_PAGE)
+	{
+		pCoexSta->bC2hBtInquiryPage = true;
+	}
+	else
+	{
+		pCoexSta->bC2hBtInquiryPage = FALSE;
+	}
+}
+
+VOID
+EXhalbtc8723a2ant_StackOperationNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	)
+{
+	if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+		pCoexSta->bHoldForStackOperation = true;
+		pCoexSta->bHoldPeriodCnt = 1;
+		halbtc8723a2ant_BtInquiryPage(pBtCoexist);
+	}
+	else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+	{
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+		pCoexSta->bHoldForStackOperation = FALSE;
+	}
+}
+
+VOID
+EXhalbtc8723a2ant_HaltNotify(
+	IN	PBTC_COEXIST			pBtCoexist
+	)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
+
+	halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+	EXhalbtc8723a2ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+}
+
+VOID
+EXhalbtc8723a2ant_Periodical(
+	IN	PBTC_COEXIST			pBtCoexist
+	)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 2Ant Periodical!!\n"));
+
+	// work around for c2h hang
+	wa_halbtc8723a2ant_MonitorC2h(pBtCoexist);
+	
+	halbtc8723a2ant_QueryBtInfo(pBtCoexist);
+	halbtc8723a2ant_MonitorBtCtr(pBtCoexist);
+	halbtc8723a2ant_MonitorBtEnableDisable(pBtCoexist);
+
+	halbtc8723a2ant_RunCoexistMechanism(pBtCoexist);
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h
new file mode 100644
index 0000000..c07d373
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h
@@ -0,0 +1,179 @@
+//===========================================
+// The following is for 8723A 2Ant BT Co-exist definition
+//===========================================
+#define	BT_INFO_8723A_2ANT_B_FTP						BIT7
+#define	BT_INFO_8723A_2ANT_B_A2DP					BIT6
+#define	BT_INFO_8723A_2ANT_B_HID						BIT5
+#define	BT_INFO_8723A_2ANT_B_SCO_BUSY				BIT4
+#define	BT_INFO_8723A_2ANT_B_ACL_BUSY				BIT3
+#define	BT_INFO_8723A_2ANT_B_INQ_PAGE				BIT2
+#define	BT_INFO_8723A_2ANT_B_SCO_ESCO				BIT1
+#define	BT_INFO_8723A_2ANT_B_CONNECTION				BIT0
+
+#define		BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT		2
+
+typedef enum _BT_INFO_SRC_8723A_2ANT{
+	BT_INFO_SRC_8723A_2ANT_WIFI_FW			= 0x0,
+	BT_INFO_SRC_8723A_2ANT_BT_RSP				= 0x1,
+	BT_INFO_SRC_8723A_2ANT_BT_ACTIVE_SEND		= 0x2,
+	BT_INFO_SRC_8723A_2ANT_MAX
+}BT_INFO_SRC_8723A_2ANT,*PBT_INFO_SRC_8723A_2ANT;
+
+typedef enum _BT_8723A_2ANT_BT_STATUS{
+	BT_8723A_2ANT_BT_STATUS_IDLE				= 0x0,
+	BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE	= 0x1,
+	BT_8723A_2ANT_BT_STATUS_NON_IDLE			= 0x2,
+	BT_8723A_2ANT_BT_STATUS_MAX
+}BT_8723A_2ANT_BT_STATUS,*PBT_8723A_2ANT_BT_STATUS;
+
+typedef enum _BT_8723A_2ANT_COEX_ALGO{
+	BT_8723A_2ANT_COEX_ALGO_UNDEFINED			= 0x0,
+	BT_8723A_2ANT_COEX_ALGO_SCO				= 0x1,
+	BT_8723A_2ANT_COEX_ALGO_HID				= 0x2,
+	BT_8723A_2ANT_COEX_ALGO_A2DP				= 0x3,
+	BT_8723A_2ANT_COEX_ALGO_PANEDR			= 0x4,
+	BT_8723A_2ANT_COEX_ALGO_PANHS			= 0x5,
+	BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP		= 0x6,
+	BT_8723A_2ANT_COEX_ALGO_PANEDR_HID		= 0x7,
+	BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR	= 0x8,
+	BT_8723A_2ANT_COEX_ALGO_HID_A2DP			= 0x9,
+	BT_8723A_2ANT_COEX_ALGO_MAX
+}BT_8723A_2ANT_COEX_ALGO,*PBT_8723A_2ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8723A_2ANT{
+	// fw mechanism
+	BOOLEAN		bPreDecBtPwr;
+	BOOLEAN		bCurDecBtPwr;
+	//BOOLEAN		bPreBtLnaConstrain;
+	//BOOLEAN		bCurBtLnaConstrain;
+	//u1Byte		bPreBtPsdMode;
+	//u1Byte		bCurBtPsdMode;
+	u1Byte		preFwDacSwingLvl;
+	u1Byte		curFwDacSwingLvl;
+	BOOLEAN		bCurIgnoreWlanAct;
+	BOOLEAN		bPreIgnoreWlanAct;
+	u1Byte		prePsTdma;
+	u1Byte		curPsTdma;
+	u1Byte		psTdmaPara[5];
+	u1Byte		psTdmaDuAdjType;
+	BOOLEAN		bResetTdmaAdjust;
+	BOOLEAN		bPrePsTdmaOn;
+	BOOLEAN		bCurPsTdmaOn;
+	//BOOLEAN		bPreBtAutoReport;
+	//BOOLEAN		bCurBtAutoReport;
+
+	// sw mechanism
+	BOOLEAN		bPreRfRxLpfShrink;
+	BOOLEAN		bCurRfRxLpfShrink;
+	u4Byte		btRf0x1eBackup;
+	BOOLEAN 	bPreLowPenaltyRa;
+	BOOLEAN		bCurLowPenaltyRa;
+	BOOLEAN		bPreDacSwingOn;
+	u4Byte		preDacSwingLvl;
+	BOOLEAN		bCurDacSwingOn;
+	u4Byte		curDacSwingLvl;
+	BOOLEAN		bPreAdcBackOff;
+	BOOLEAN		bCurAdcBackOff;
+	BOOLEAN 	bPreAgcTableEn;
+	BOOLEAN		bCurAgcTableEn;
+	u4Byte		preVal0x6c0;
+	u4Byte		curVal0x6c0;
+	u4Byte		preVal0x6c8;
+	u4Byte		curVal0x6c8;
+	u1Byte		preVal0x6cc;
+	u1Byte		curVal0x6cc;
+	BOOLEAN		limited_dig;
+
+	// algorithm related
+	u1Byte		preAlgorithm;
+	u1Byte		curAlgorithm;
+	u1Byte		btStatus;
+	u1Byte		wifiChnlInfo[3];
+} COEX_DM_8723A_2ANT, *PCOEX_DM_8723A_2ANT;
+
+typedef struct _COEX_STA_8723A_2ANT{
+	u4Byte					highPriorityTx;
+	u4Byte					highPriorityRx;
+	u4Byte					lowPriorityTx;
+	u4Byte					lowPriorityRx;
+	u1Byte					btRssi;
+	u1Byte					preBtRssiState;
+	u1Byte					preBtRssiState1;
+	u1Byte					preWifiRssiState[4];
+	BOOLEAN					bC2hBtInfoReqSent;
+	u1Byte					btInfoC2h[BT_INFO_SRC_8723A_2ANT_MAX][10];
+	u4Byte					btInfoC2hCnt[BT_INFO_SRC_8723A_2ANT_MAX];
+	BOOLEAN					bC2hBtInquiryPage;
+	u1Byte					btRetryCnt;
+	u1Byte					btInfoExt;
+	BOOLEAN					bHoldForStackOperation;
+	u1Byte					bHoldPeriodCnt;
+	// this is for c2h hang work-around
+	u4Byte					c2hHangDetectCnt;
+}COEX_STA_8723A_2ANT, *PCOEX_STA_8723A_2ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+VOID
+EXhalbtc8723a2ant_InitHwConfig(
+	IN	PBTC_COEXIST		pBtCoexist
+	);
+VOID
+EXhalbtc8723a2ant_InitCoexDm(
+	IN	PBTC_COEXIST		pBtCoexist
+	);
+VOID
+EXhalbtc8723a2ant_IpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8723a2ant_LpsNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8723a2ant_ScanNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8723a2ant_ConnectNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	u1Byte			type
+	);
+VOID
+EXhalbtc8723a2ant_MediaStatusNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	);
+VOID
+EXhalbtc8723a2ant_SpecialPacketNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	);
+VOID
+EXhalbtc8723a2ant_HaltNotify(
+	IN	PBTC_COEXIST			pBtCoexist
+	);
+VOID
+EXhalbtc8723a2ant_Periodical(
+	IN	PBTC_COEXIST			pBtCoexist
+	);
+VOID
+EXhalbtc8723a2ant_BtInfoNotify(
+	IN	PBTC_COEXIST		pBtCoexist,
+	IN	pu1Byte			tmpBuf,
+	IN	u1Byte			length
+	);
+VOID
+EXhalbtc8723a2ant_StackOperationNotify(
+	IN	PBTC_COEXIST			pBtCoexist,
+	IN	u1Byte				type
+	);
+VOID
+EXhalbtc8723a2ant_DisplayCoexInfo(
+	IN	PBTC_COEXIST		pBtCoexist
+	);
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c
new file mode 100644
index 0000000..3414ba7
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c
@@ -0,0 +1,4104 @@
+/***************************************************************
+ * Description:
+ *
+ * This file is for RTL8723B Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ ***************************************************************/
+
+
+/***************************************************************
+ * include files
+ ***************************************************************/
+#include "halbt_precomp.h"
+#if 1
+/***************************************************************
+ * Global variables, these are static variables
+ ***************************************************************/
+static struct coex_dm_8723b_1ant glcoex_dm_8723b_1ant;
+static struct coex_dm_8723b_1ant *coex_dm = &glcoex_dm_8723b_1ant;
+static struct coex_sta_8723b_1ant glcoex_sta_8723b_1ant;
+static struct coex_sta_8723b_1ant *coex_sta = &glcoex_sta_8723b_1ant;
+
+const char *const GLBtInfoSrc8723b1Ant[]={
+	"BT Info[wifi fw]",
+	"BT Info[bt rsp]",
+	"BT Info[bt auto report]",
+};
+
+u32 glcoex_ver_date_8723b_1ant = 20130906;
+u32 glcoex_ver_8723b_1ant = 0x45;
+
+/***************************************************************
+ * local function proto type if needed
+ ***************************************************************/
+/***************************************************************
+ * local function start with halbtc8723b1ant_
+ ***************************************************************/
+u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+	s32 bt_rssi=0;
+	u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
+
+	bt_rssi = coex_sta->bt_rssi;
+
+	if (level_num == 2){
+		if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+			if (bt_rssi >= rssi_thresh +
+					BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+				bt_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to High\n");
+			} else {
+				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "stay at Low\n");
+			}
+		} else {
+			if (bt_rssi < rssi_thresh) {
+				bt_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to Low\n");
+			} else {
+				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "stay at High\n");
+			}
+		}
+	} else if (level_num == 3) {
+		if (rssi_thresh > rssi_thresh1) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+				  "[BTCoex], BT Rssi thresh error!!\n");
+			return coex_sta->pre_bt_rssi_state;
+		}
+
+		if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+			if (bt_rssi >= rssi_thresh +
+					BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to Medium\n");
+			} else {
+				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "stay at Low\n");
+			}
+		} else if ((coex_sta->pre_bt_rssi_state ==
+					BTC_RSSI_STATE_MEDIUM) ||
+			  (coex_sta->pre_bt_rssi_state ==
+			  		BTC_RSSI_STATE_STAY_MEDIUM)) {
+			if (bt_rssi >= rssi_thresh1 +
+					BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+				bt_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to High\n");
+			} else if (bt_rssi < rssi_thresh) {
+				bt_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to Low\n");
+			} else {
+				bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "stay at Medium\n");
+			}
+		} else {
+			if (bt_rssi < rssi_thresh1) {
+				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to Medium\n");
+			} else {
+				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "stay at High\n");
+			}
+		}
+	}
+
+	coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+	return bt_rssi_state;
+}
+
+u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+				   u8 index, u8 level_num,
+				   u8 rssi_thresh, u8 rssi_thresh1)
+{
+	s32 wifi_rssi=0;
+	u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+	btcoexist->btc_get(btcoexist,
+		BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+
+	if (level_num == 2) {
+		if ((coex_sta->pre_wifi_rssi_state[index] ==
+					BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_wifi_rssi_state[index] ==
+		    			BTC_RSSI_STATE_STAY_LOW)) {
+			if (wifi_rssi >= rssi_thresh +
+					BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to High\n");
+			} else {
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "stay at Low\n");
+			}
+		} else {
+			if (wifi_rssi < rssi_thresh) {
+				wifi_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to Low\n");
+			} else {
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "stay at High\n");
+			}
+		}
+	} else if (level_num == 3) {
+		if (rssi_thresh > rssi_thresh1) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+				  "[BTCoex], wifi RSSI thresh error!!\n");
+			return coex_sta->pre_wifi_rssi_state[index];
+		}
+
+		if ((coex_sta->pre_wifi_rssi_state[index] ==
+						BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_wifi_rssi_state[index] ==
+		    				BTC_RSSI_STATE_STAY_LOW)) {
+			if (wifi_rssi >= rssi_thresh +
+					 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to Medium\n");
+			} else {
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "stay at Low\n");
+			}
+		} else if ((coex_sta->pre_wifi_rssi_state[index] ==
+						BTC_RSSI_STATE_MEDIUM) ||
+			   (coex_sta->pre_wifi_rssi_state[index] ==
+						BTC_RSSI_STATE_STAY_MEDIUM)) {
+			if (wifi_rssi >= rssi_thresh1 +
+					 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to High\n");
+			} else if (wifi_rssi < rssi_thresh) {
+				wifi_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to Low\n");
+			} else {
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "stay at Medium\n");
+			}
+		} else {
+			if (wifi_rssi < rssi_thresh1) {
+				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to Medium\n");
+			} else {
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "stay at High\n");
+			}
+		}
+	}
+
+	coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+	return wifi_rssi_state;
+}
+
+void halbtc8723b1ant_updatera_mask(struct btc_coexist *btcoexist,
+				   bool force_exec, u32 dis_rate_mask)
+{
+	coex_dm->curra_mask = dis_rate_mask;
+
+	if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask))
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
+				   &coex_dm->curra_mask);
+
+	coex_dm->prera_mask = coex_dm->curra_mask;
+}
+
+void halbtc8723b1ant_auto_rate_fallback_retry(struct btc_coexist *btcoexist,
+					      bool force_exec, u8 type)
+{
+	bool wifi_under_bmode = false;
+
+	coex_dm->cur_arfr_type = type;
+
+	if (force_exec || (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) {
+		switch (coex_dm->cur_arfr_type) {
+		case 0:	/* normal mode */
+			btcoexist->btc_write_4byte(btcoexist, 0x430,
+						   coex_dm->backup_arfr_cnt1);
+			btcoexist->btc_write_4byte(btcoexist, 0x434,
+						   coex_dm->backup_arfr_cnt2);
+			break;
+		case 1:
+			btcoexist->btc_get(btcoexist,
+					   BTC_GET_BL_WIFI_UNDER_B_MODE,
+					   &wifi_under_bmode);
+			if (wifi_under_bmode) {
+				btcoexist->btc_write_4byte(btcoexist,
+							   0x430, 0x0);
+				btcoexist->btc_write_4byte(btcoexist,
+							   0x434, 0x01010101);
+			} else {
+				btcoexist->btc_write_4byte(btcoexist,
+							   0x430, 0x0);
+				btcoexist->btc_write_4byte(btcoexist,
+							   0x434, 0x04030201);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	coex_dm->pre_arfr_type = coex_dm->cur_arfr_type;
+}
+
+void halbtc8723b1ant_retry_limit(struct btc_coexist *btcoexist,
+				 bool force_exec, u8 type)
+{
+	coex_dm->cur_retry_limit_type = type;
+
+	if (force_exec || (coex_dm->pre_retry_limit_type !=
+			   coex_dm->cur_retry_limit_type)) {
+
+		switch (coex_dm->cur_retry_limit_type) {
+		case 0:	/* normal mode */
+			btcoexist->btc_write_2byte(btcoexist, 0x42a,
+						   coex_dm->backup_retry_limit);
+			break;
+		case 1:	/* retry limit=8 */
+			btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808);
+			break;
+		default:
+			break;
+		}
+	}
+
+	coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type;
+}
+
+void halbtc8723b1ant_ampdu_maxtime(struct btc_coexist *btcoexist,
+				   bool force_exec, u8 type)
+{
+	coex_dm->cur_ampdu_time_type = type;
+
+	if (force_exec || (coex_dm->pre_ampdu_time_type !=
+		coex_dm->cur_ampdu_time_type)) {
+		switch (coex_dm->cur_ampdu_time_type) {
+			case 0:	/* normal mode */
+				btcoexist->btc_write_1byte(btcoexist, 0x456,
+						coex_dm->backup_ampdu_max_time);
+				break;
+			case 1:	/* AMPDU timw = 0x38 * 32us */
+				btcoexist->btc_write_1byte(btcoexist,
+							   0x456, 0x38);
+				break;
+			default:
+				break;
+		}
+	}
+
+	coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type;
+}
+
+void halbtc8723b1ant_limited_tx(struct btc_coexist *btcoexist,
+				bool force_exec, u8 ra_maskType, u8 arfr_type,
+				u8 retry_limit_type, u8 ampdu_time_type)
+{
+	switch (ra_maskType) {
+	case 0:	/* normal mode */
+		halbtc8723b1ant_updatera_mask(btcoexist, force_exec, 0x0);
+		break;
+	case 1:	/* disable cck 1/2 */
+		halbtc8723b1ant_updatera_mask(btcoexist, force_exec,
+					      0x00000003);
+		break;
+	/* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/
+	case 2:
+		halbtc8723b1ant_updatera_mask(btcoexist, force_exec,
+					      0x0001f1f7);
+		break;
+	default:
+		break;
+	}
+
+	halbtc8723b1ant_auto_rate_fallback_retry(btcoexist, force_exec,
+						 arfr_type);
+	halbtc8723b1ant_retry_limit(btcoexist, force_exec, retry_limit_type);
+	halbtc8723b1ant_ampdu_maxtime(btcoexist, force_exec, ampdu_time_type);
+}
+
+void halbtc8723b1ant_limited_rx(struct btc_coexist *btcoexist,
+				bool force_exec, bool rej_ap_agg_pkt,
+				bool b_bt_ctrl_agg_buf_size, u8 agg_buf_size)
+{
+	bool reject_rx_agg = rej_ap_agg_pkt;
+	bool bt_ctrl_rx_agg_size = b_bt_ctrl_agg_buf_size;
+	u8 rxAggSize = agg_buf_size;
+
+	/**********************************************
+	 *	Rx Aggregation related setting
+	 **********************************************/
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+			   &reject_rx_agg);
+	/* decide BT control aggregation buf size or not  */
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
+			   &bt_ctrl_rx_agg_size);
+	/* aggregation buf size, only work
+	 *when BT control Rx aggregation size.  */
+	btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rxAggSize);
+	/* real update aggregation setting  */
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+}
+
+void halbtc8723b1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+	u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+	u32 reg_hp_tx = 0, reg_hp_rx = 0;
+	u32 reg_lp_tx = 0, reg_lp_rx = 0;
+
+	reg_hp_txrx = 0x770;
+	reg_lp_txrx = 0x774;
+
+	u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+	reg_hp_tx = u32tmp & MASKLWORD;
+	reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
+
+	u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+	reg_lp_tx = u32tmp & MASKLWORD;
+	reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
+
+	coex_sta->high_priority_tx = reg_hp_tx;
+	coex_sta->high_priority_rx = reg_hp_rx;
+	coex_sta->low_priority_tx = reg_lp_tx;
+	coex_sta->low_priority_rx = reg_lp_rx;
+
+	/* reset counter */
+	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
+{
+	u8 h2c_parameter[1] = {0};
+
+	coex_sta->c2h_bt_info_req_sent = true;
+
+	h2c_parameter[0] |= BIT0;	/* trigger*/
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n",
+		  h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
+}
+
+bool halbtc8723b1ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+	static bool pre_wifi_busy = false;
+	static bool pre_under_4way = false, pre_bt_hs_on = false;
+	bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+	bool wifi_connected = false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+			   &wifi_connected);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ 	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+			   &under_4way);
+
+	if (wifi_connected) {
+		if (wifi_busy != pre_wifi_busy) {
+			pre_wifi_busy = wifi_busy;
+			return true;
+		}
+		if (under_4way != pre_under_4way) {
+			pre_under_4way = under_4way;
+			return true;
+		}
+		if (bt_hs_on != pre_bt_hs_on) {
+			pre_bt_hs_on = bt_hs_on;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+void halbtc8723b1ant_update_bt_link_info(struct btc_coexist *btcoexist)
+{
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+	bool bt_hs_on = false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+	bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+	bt_link_info->sco_exist = coex_sta->sco_exist;
+	bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+	bt_link_info->pan_exist = coex_sta->pan_exist;
+	bt_link_info->hid_exist = coex_sta->hid_exist;
+
+	/* work around for HS mode. */
+	if (bt_hs_on) {
+		bt_link_info->pan_exist = true;
+		bt_link_info->bt_link_exist = true;
+	}
+
+	/* check if Sco only */
+	if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+	    !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+		bt_link_info->sco_only = true;
+	else
+		bt_link_info->sco_only = false;
+
+	/* check if A2dp only */
+	if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
+	    !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+		bt_link_info->a2dp_only = true;
+	else
+		bt_link_info->a2dp_only = false;
+
+	/* check if Pan only */
+	if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+	    bt_link_info->pan_exist && !bt_link_info->hid_exist)
+		bt_link_info->pan_only = true;
+	else
+		bt_link_info->pan_only = false;
+
+	/* check if Hid only */
+	if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+	    !bt_link_info->pan_exist && bt_link_info->hid_exist )
+		bt_link_info->hid_only = true;
+	else
+		bt_link_info->hid_only = false;
+}
+
+u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+	bool bt_hs_on = false;
+	u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED;
+	u8 numOfDiffProfile = 0;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+	if (!bt_link_info->bt_link_exist) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], No BT link exists!!!\n");
+		return algorithm;
+	}
+
+	if (bt_link_info->sco_exist)
+		numOfDiffProfile++;
+	if (bt_link_info->hid_exist)
+		numOfDiffProfile++;
+	if (bt_link_info->pan_exist)
+		numOfDiffProfile++;
+	if (bt_link_info->a2dp_exist)
+		numOfDiffProfile++;
+
+	if (numOfDiffProfile == 1) {
+		if (bt_link_info->sco_exist) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], BT Profile = SCO only\n");
+			algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+		} else {
+			if (bt_link_info->hid_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], BT Profile = HID only\n");
+				algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+			} else if (bt_link_info->a2dp_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], BT Profile = A2DP only\n");
+				algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
+			} else if (bt_link_info->pan_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "PAN(HS) only\n");
+					algorithm =
+						BT_8723B_1ANT_COEX_ALGO_PANHS;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "PAN(EDR) only\n");
+					algorithm =
+						BT_8723B_1ANT_COEX_ALGO_PANEDR;
+				}
+			}
+		}
+	} else if (numOfDiffProfile == 2) {
+		if (bt_link_info->sco_exist) {
+			if (bt_link_info->hid_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], BT Profile = SCO + HID\n");
+				algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+			} else if (bt_link_info->a2dp_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], BT Profile = "
+					  "SCO + A2DP ==> SCO\n");
+				algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+			} else if (bt_link_info->pan_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile "
+						  "= SCO + PAN(HS)\n");
+					algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile "
+						  "= SCO + PAN(EDR)\n");
+					algorithm =
+					    BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		} else {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->a2dp_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], BT Profile = "
+					  "HID + A2DP\n");
+				algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+			} else if (bt_link_info->hid_exist &&
+				   bt_link_info->pan_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "HID + PAN(HS)\n");
+					algorithm =
+					    BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "HID + PAN(EDR)\n");
+					algorithm =
+					    BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			} else if (bt_link_info->pan_exist &&
+				   bt_link_info->a2dp_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "A2DP + PAN(HS)\n");
+					algorithm =
+					    BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "A2DP + PAN(EDR)\n");
+					algorithm =
+					    BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
+				}
+			}
+		}
+	} else if (numOfDiffProfile == 3) {
+		if (bt_link_info->sco_exist) {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->a2dp_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], BT Profile = "
+					  "SCO + HID + A2DP ==> HID\n");
+				algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+			} else if (bt_link_info->hid_exist &&
+				   bt_link_info->pan_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "SCO + HID + PAN(HS)\n");
+					algorithm =
+					    BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "SCO + HID + PAN(EDR)\n");
+					algorithm =
+					    BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			} else if (bt_link_info->pan_exist &&
+				   bt_link_info->a2dp_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "SCO + A2DP + PAN(HS)\n");
+					algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = SCO + "
+						  "A2DP + PAN(EDR) ==> HID\n");
+					algorithm =
+					    BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		} else {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->pan_exist &&
+			    bt_link_info->a2dp_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "HID + A2DP + PAN(HS)\n");
+					algorithm =
+					    BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "HID + A2DP + PAN(EDR)\n");
+					algorithm =
+					    BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
+				}
+			}
+		}
+	} else if (numOfDiffProfile >= 3) {
+		if (bt_link_info->sco_exist) {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->pan_exist &&
+			    bt_link_info->a2dp_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], Error!!! "
+						  "BT Profile = SCO + "
+						  "HID + A2DP + PAN(HS)\n");
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], BT Profile = "
+						  "SCO + HID + A2DP + PAN(EDR)"
+						  "==>PAN(EDR)+HID\n");
+					algorithm =
+					    BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+	}
+
+	return algorithm;
+}
+
+bool halbtc8723b1ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
+{
+	bool ret = false;
+	bool bt_hs_on = false, wifi_connected = false;
+	s32 bt_hs_rssi = 0;
+	u8 bt_rssi_state;
+
+	if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+		return false;
+	if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+				&wifi_connected))
+		return false;
+	if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+		return false;
+
+	bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 35, 0);
+
+	if (wifi_connected) {
+		if (bt_hs_on) {
+			if (bt_hs_rssi > 37)
+				ret = true;
+		} else {
+			if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+				ret = true;
+		}
+	}
+
+	return ret;
+}
+
+void halbtc8723b1ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
+					    u8 dac_swing_lvl)
+{
+	u8 h2c_parameter[1] = {0};
+
+	/* There are several type of dacswing
+	 * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
+	h2c_parameter[0] = dac_swing_lvl;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+				       bool dec_bt_pwr)
+{
+	u8 h2c_parameter[1] = {0};
+
+	h2c_parameter[0] = 0;
+
+	if (dec_bt_pwr)
+		h2c_parameter[0] |= BIT1;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+		  (dec_bt_pwr? "Yes!!":"No!!"),h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+				bool force_exec, bool dec_bt_pwr)
+{
+	return;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s Dec BT power = %s\n",
+		  (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF"));
+	coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+			  coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+
+		if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+			return;
+	}
+	halbtc8723b1ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+	coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void halbtc8723b1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
+					bool enable_auto_report)
+{
+	u8 h2c_parameter[1] = {0};
+
+	h2c_parameter[0] = 0;
+
+	if (enable_auto_report)
+		h2c_parameter[0] |= BIT0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+		  (enable_auto_report? "Enabled!!":"Disabled!!"),
+		  h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_bt_auto_report(struct btc_coexist *btcoexist,
+				    bool force_exec, bool enable_auto_report)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s BT Auto report = %s\n",
+		  (force_exec? "force to":""),
+		  ((enable_auto_report)? "Enabled":"Disabled"));
+	coex_dm->cur_bt_auto_report = enable_auto_report;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], bPreBtAutoReport=%d, "
+			  "bCurBtAutoReport=%d\n",
+			  coex_dm->pre_bt_auto_report,
+			  coex_dm->cur_bt_auto_report);
+
+		if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+			return;
+	}
+	halbtc8723b1ant_set_bt_auto_report(btcoexist,
+					   coex_dm->cur_bt_auto_report);
+
+	coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void halbtc8723b1ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+				      bool force_exec, u8 fw_dac_swing_lvl)
+{
+	return;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s set FW Dac Swing level = %d\n",
+		  (force_exec? "force to":""), fw_dac_swing_lvl);
+	coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], preFwDacSwingLvl=%d, "
+			  "curFwDacSwingLvl=%d\n",
+			  coex_dm->pre_fw_dac_swing_lvl,
+			  coex_dm->cur_fw_dac_swing_lvl);
+
+		if (coex_dm->pre_fw_dac_swing_lvl ==
+		    coex_dm->cur_fw_dac_swing_lvl)
+			return;
+	}
+
+	halbtc8723b1ant_set_fw_dac_swing_level(btcoexist,
+					       coex_dm->cur_fw_dac_swing_lvl);
+
+	coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void halbtc8723b1ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+					     bool rx_rf_shrink_on)
+{
+	if (rx_rf_shrink_on) {
+		/*Shrink RF Rx LPF corner */
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], Shrink RF Rx LPF corner!!\n");
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+					  0xfffff, 0xffff7);
+	} else {
+		/*Resume RF Rx LPF corner
+		 * After initialized, we can use coex_dm->btRf0x1eBackup */
+		if (btcoexist->initilized) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+				  "[BTCoex], Resume RF Rx LPF corner!!\n");
+			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
+						  0x1e, 0xfffff,
+						  coex_dm->bt_rf0x1e_backup);
+		}
+	}
+}
+
+void halbtc8723b1ant_rf_shrink(struct btc_coexist *btcoexist,
+			       bool force_exec, bool rx_rf_shrink_on)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s turn Rx RF Shrink = %s\n",
+		  (force_exec? "force to":""),
+		  ((rx_rf_shrink_on)? "ON":"OFF"));
+	coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreRfRxLpfShrink=%d, "
+			  "bCurRfRxLpfShrink=%d\n",
+			  coex_dm->pre_rf_rx_lpf_shrink,
+			  coex_dm->cur_rf_rx_lpf_shrink);
+
+		if (coex_dm->pre_rf_rx_lpf_shrink ==
+		    coex_dm->cur_rf_rx_lpf_shrink)
+			return;
+	}
+	halbtc8723b1ant_set_sw_rf_rx_lpf_corner(btcoexist,
+						coex_dm->cur_rf_rx_lpf_shrink);
+
+	coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void halbtc8723b1ant_set_sw_penalty_tx_rate_adaptive(
+					struct btc_coexist *btcoexist,
+					bool low_penalty_ra)
+{
+	u8 h2c_parameter[6] = {0};
+
+	h2c_parameter[0] = 0x6;	/* opCode, 0x6= Retry_Penalty */
+
+	if (low_penalty_ra) {
+		h2c_parameter[1] |= BIT0;
+		/*normal rate except MCS7/6/5, OFDM54/48/36 */
+		h2c_parameter[2] = 0x00;
+		h2c_parameter[3] = 0xf7;  /*MCS7 or OFDM54 */
+		h2c_parameter[4] = 0xf8;  /*MCS6 or OFDM48 */
+		h2c_parameter[5] = 0xf9;  /*MCS5 or OFDM36 */
+	}
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], set WiFi Low-Penalty Retry: %s",
+		  (low_penalty_ra ? "ON!!" : "OFF!!"));
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+void halbtc8723b1ant_low_penalty_ra(struct btc_coexist *btcoexist,
+				    bool force_exec, bool low_penalty_ra)
+{
+	coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+	if (!force_exec) {
+		if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
+			return;
+	}
+	halbtc8723b1ant_set_sw_penalty_tx_rate_adaptive(btcoexist,
+						coex_dm->cur_low_penalty_ra);
+
+	coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void halbtc8723b1ant_set_dac_swing_reg(struct btc_coexist *btcoexist, u32 level)
+{
+	u8 val = (u8) level;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+	btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+void halbtc8723b1ant_set_sw_full_time_dac_swing(struct btc_coexist *btcoexist,
+						bool sw_dac_swing_on,
+						u32 sw_dac_swing_lvl)
+{
+	if (sw_dac_swing_on)
+		halbtc8723b1ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
+	else
+		halbtc8723b1ant_set_dac_swing_reg(btcoexist, 0x18);
+}
+
+
+void halbtc8723b1ant_dac_swing(struct btc_coexist *btcoexist, bool force_exec,
+			       bool dac_swing_on, u32 dac_swing_lvl)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+		  (force_exec ? "force to" : ""), (dac_swing_on ? "ON" : "OFF"),
+		  dac_swing_lvl);
+
+	coex_dm->cur_dac_swing_on = dac_swing_on;
+	coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, "
+			  "bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+			  coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+			  coex_dm->cur_dac_swing_on,
+			  coex_dm->cur_dac_swing_lvl);
+
+		if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+		    (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+			return;
+	}
+	mdelay(30);
+	halbtc8723b1ant_set_sw_full_time_dac_swing(btcoexist, dac_swing_on,
+						   dac_swing_lvl);
+
+	coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+	coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void halbtc8723b1ant_set_adc_backoff(struct btc_coexist *btcoexist,
+				     bool adc_backoff)
+{
+	if (adc_backoff) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], BB BackOff Level On!\n");
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
+	} else {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], BB BackOff Level Off!\n");
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
+	}
+}
+
+void halbtc8723b1ant_adc_backoff(struct btc_coexist *btcoexist,
+				 bool force_exec, bool adc_backoff)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s turn AdcBackOff = %s\n",
+		  (force_exec ? "force to" : ""), (adc_backoff ? "ON" : "OFF"));
+	coex_dm->cur_adc_backoff = adc_backoff;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+			  coex_dm->pre_adc_backoff, coex_dm->cur_adc_backoff);
+
+		if(coex_dm->pre_adc_backoff == coex_dm->cur_adc_backoff)
+			return;
+	}
+	halbtc8723b1ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_backoff);
+
+	coex_dm->pre_adc_backoff =
+		coex_dm->cur_adc_backoff;
+}
+
+void halbtc8723b1ant_set_agc_table(struct btc_coexist *btcoexist,
+				   bool adc_table_en)
+{
+	u8 rssi_adjust_val = 0;
+
+	btcoexist->btc_set_rf_reg(btcoexist,
+		BTC_RF_A, 0xef, 0xfffff, 0x02000);
+	if (adc_table_en) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], Agc Table On!\n");
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+					  0xfffff, 0x3fa58);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+					  0xfffff, 0x37a58);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+					  0xfffff, 0x2fa58);
+		rssi_adjust_val = 8;
+	} else {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], Agc Table Off!\n");
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+					  0xfffff, 0x39258);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+					  0xfffff, 0x31258);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+					  0xfffff, 0x29258);
+	}
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+	/* set rssi_adjust_val for wifi module. */
+	btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+			   &rssi_adjust_val);
+}
+
+
+void halbtc8723b1ant_agc_table(struct btc_coexist *btcoexist,
+			       bool force_exec, bool adc_table_en)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s %s Agc Table\n",
+		  (force_exec ? "force to" : ""),
+		  (adc_table_en ? "Enable" : "Disable"));
+	coex_dm->cur_agc_table_en = adc_table_en;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+			  coex_dm->pre_agc_table_en,
+			  coex_dm->cur_agc_table_en);
+
+		if(coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+			return;
+	}
+	halbtc8723b1ant_set_agc_table(btcoexist, adc_table_en);
+
+	coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist,
+				    u32 val0x6c0, u32 val0x6c4,
+				    u32 val0x6c8, u8 val0x6cc)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+	btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist,
+ 				bool force_exec, u32 val0x6c0,
+ 				u32 val0x6c4, u32 val0x6c8,
+ 				u8 val0x6cc)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
+		  " 0x6c4=0x%x, 0x6cc=0x%x\n", (force_exec ? "force to" : ""),
+		  val0x6c0, val0x6c4, val0x6cc);
+	coex_dm->cur_val0x6c0 = val0x6c0;
+	coex_dm->cur_val0x6c4 = val0x6c4;
+	coex_dm->cur_val0x6c8 = val0x6c8;
+	coex_dm->cur_val0x6cc = val0x6cc;
+
+	if (!force_exec) {
+		if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+		    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+		    (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+		    (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+			return;
+	}
+	halbtc8723b1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+				       val0x6c8, val0x6cc);
+
+	coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+	coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+	coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+	coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist,
+					  bool force_exec, u8 type)
+{
+	switch (type) {
+	case 0:
+		halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+					   0x55555555, 0xffffff, 0x3);
+		break;
+	case 1:
+		halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+					   0x5a5a5a5a, 0xffffff, 0x3);
+		break;
+	case 2:
+		halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+					   0x5a5a5a5a, 0xffffff, 0x3);
+		break;
+	case 3:
+		halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+					   0xaaaaaaaa, 0xffffff, 0x3);
+		break;
+	case 4:
+		halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+					   0x5aaa5aaa, 0xffffff, 0x3);
+		break;
+	case 5:
+		halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+					   0xaaaa5a5a, 0xffffff, 0x3);
+		break;
+	case 6:
+		halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+					   0xaaaa5a5a, 0xffffff, 0x3);
+		break;
+	case 7:
+		halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5afa5afa,
+					   0x5afa5afa, 0xffffff, 0x3);
+		break;
+	default:
+		break;
+	}
+}
+
+void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
+					bool enable)
+{
+	u8 h2c_parameter[1] = {0};
+
+	if (enable)
+		h2c_parameter[0] |= BIT0;	/* function enable */
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], set FW for BT Ignore Wlan_Act,"
+		  " FW write 0x63=0x%x\n", h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+				     bool force_exec, bool enable)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s turn Ignore WlanAct %s\n",
+		  (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+	coex_dm->cur_ignore_wlan_act = enable;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], bPreIgnoreWlanAct = %d, "
+			  "bCurIgnoreWlanAct = %d!!\n",
+			  coex_dm->pre_ignore_wlan_act,
+			  coex_dm->cur_ignore_wlan_act);
+
+		if (coex_dm->pre_ignore_wlan_act ==
+		    coex_dm->cur_ignore_wlan_act)
+			return;
+	}
+	halbtc8723b1ant_SetFwIgnoreWlanAct(btcoexist, enable);
+
+	coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
+				    u8 byte1, u8 byte2, u8 byte3,
+				    u8 byte4, u8 byte5)
+{
+	u8 h2c_parameter[5] = {0};
+	u8 real_byte1 = byte1, real_byte5 = byte5;
+	bool ap_enable = false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+			   &ap_enable);
+
+	if (ap_enable) {
+		if ((byte1 & BIT4) && !(byte1 & BIT5)) {
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+				  "[BTCoex], FW for 1Ant AP mode\n");
+			real_byte1 &= ~BIT4;
+			real_byte1 |= BIT5;
+
+			real_byte5 |= BIT5;
+			real_byte5 &= ~BIT6;
+		}
+	}
+
+	h2c_parameter[0] = real_byte1;
+	h2c_parameter[1] = byte2;
+	h2c_parameter[2] = byte3;
+	h2c_parameter[3] = byte4;
+	h2c_parameter[4] = real_byte5;
+
+	coex_dm->ps_tdma_para[0] = real_byte1;
+	coex_dm->ps_tdma_para[1] = byte2;
+	coex_dm->ps_tdma_para[2] = byte3;
+	coex_dm->ps_tdma_para[3] = byte4;
+	coex_dm->ps_tdma_para[4] = real_byte5;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+		  h2c_parameter[0],
+		  h2c_parameter[1] << 24 |
+		  h2c_parameter[2] << 16 |
+		  h2c_parameter[3] << 8 |
+		  h2c_parameter[4]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void halbtc8723b1ant_SetLpsRpwm(struct btc_coexist *btcoexist,
+				u8 lps_val, u8 rpwm_val)
+{
+	u8 lps = lps_val;
+	u8 rpwm = rpwm_val;
+
+	btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_LPS, &lps);
+	btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_RPWM, &rpwm);
+}
+
+void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist, bool force_exec,
+			     u8 lps_val, u8 rpwm_val)
+{
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s set lps/rpwm=0x%x/0x%x \n",
+		  (force_exec ? "force to" : ""), lps_val, rpwm_val);
+	coex_dm->cur_lps = lps_val;
+	coex_dm->cur_rpwm = rpwm_val;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], LPS-RxBeaconMode=0x%x , LPS-RPWM=0x%x!!\n",
+			  coex_dm->cur_lps, coex_dm->cur_rpwm);
+
+		if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
+		    (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+				  "[BTCoex], LPS-RPWM_Last=0x%x"
+				  " , LPS-RPWM_Now=0x%x!!\n",
+				  coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+
+			return;
+		}
+	}
+	halbtc8723b1ant_SetLpsRpwm(btcoexist, lps_val, rpwm_val);
+
+	coex_dm->pre_lps = coex_dm->cur_lps;
+	coex_dm->pre_rpwm = coex_dm->cur_rpwm;
+}
+
+void halbtc8723b1ant_sw_mechanism1(struct btc_coexist *btcoexist,
+				   bool shrink_rx_lpf, bool low_penalty_ra,
+				   bool limited_dig, bool bt_lna_constrain)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+		  "[BTCoex], SM1[ShRf/ LpRA/ LimDig/ btLna] = %d %d %d %d\n",
+		  shrink_rx_lpf, low_penalty_ra, limited_dig, bt_lna_constrain);
+
+	halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
+}
+
+void halbtc8723b1ant_sw_mechanism2(struct btc_coexist *btcoexist,
+				   bool agc_table_shift, bool adc_backoff,
+				   bool sw_dac_swing, u32 dac_swing_lvl)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+		  "[BTCoex], SM2[AgcT/ AdcB/ SwDacSwing(lvl)] = %d %d %d\n",
+		  agc_table_shift, adc_backoff, sw_dac_swing);
+}
+
+void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
+				u8 ant_pos_type, bool init_hw_cfg,
+				bool wifi_off)
+{
+	struct btc_board_info *board_info = &btcoexist->board_info;
+	u32 fw_ver = 0, u32tmp = 0;
+	bool pg_ext_switch = false;
+	bool use_ext_switch = false;
+	u8 h2c_parameter[2] = {0};
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_EXT_SWITCH, &pg_ext_switch);
+	/* [31:16]=fw ver, [15:0]=fw sub ver */
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+
+
+	if ((fw_ver < 0xc0000) || pg_ext_switch)
+		use_ext_switch = true;
+
+	if (init_hw_cfg){
+		/*BT select s0/s1 is controlled by WiFi */
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
+
+		/*Force GNT_BT to Normal */
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+	} else if (wifi_off) {
+		/*Force GNT_BT to High */
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3);
+		/*BT select s0/s1 is controlled by BT */
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0);
+
+		/* 0x4c[24:23]=00, Set Antenna control by BT_RFE_CTRL
+		 * BT Vendor 0xac=0xf002 */
+		u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+		u32tmp &= ~BIT23;
+		u32tmp &= ~BIT24;
+		btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+	}
+
+	if (use_ext_switch) {
+		if (init_hw_cfg) {
+			/* 0x4c[23]=0, 0x4c[24]=1  Antenna control by WL/BT */
+			u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+			u32tmp &= ~BIT23;
+			u32tmp |= BIT24;
+			btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+			if (board_info->btdm_ant_pos ==
+			    BTC_ANTENNA_AT_MAIN_PORT) {
+				/* Main Ant to  BT for IPS case 0x4c[23]=1 */
+				btcoexist->btc_write_1byte_bitmask(btcoexist,
+								   0x64, 0x1,
+								   0x1);
+
+				/*tell firmware "no antenna inverse"*/
+				h2c_parameter[0] = 0;
+				h2c_parameter[1] = 1;  /*ext switch type*/
+				btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+							h2c_parameter);
+			} else {
+				/*Aux Ant to  BT for IPS case 0x4c[23]=1 */
+				btcoexist->btc_write_1byte_bitmask(btcoexist,
+								   0x64, 0x1,
+								   0x0);
+
+				/*tell firmware "antenna inverse"*/
+				h2c_parameter[0] = 1;
+				h2c_parameter[1] = 1;  /*ext switch type*/
+				btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+							h2c_parameter);
+			}
+		}
+
+		/* fixed internal switch first*/
+		/* fixed internal switch S1->WiFi, S0->BT*/
+		if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ 			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+		else/* fixed internal switch S0->WiFi, S1->BT*/
+			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+
+		/* ext switch setting */
+		switch (ant_pos_type) {
+		case BTC_ANT_PATH_WIFI:
+			if (board_info->btdm_ant_pos ==
+			    BTC_ANTENNA_AT_MAIN_PORT)
+				btcoexist->btc_write_1byte_bitmask(btcoexist,
+								   0x92c, 0x3,
+								   0x1);
+			else
+				btcoexist->btc_write_1byte_bitmask(btcoexist,
+								   0x92c, 0x3,
+								   0x2);
+			break;
+		case BTC_ANT_PATH_BT:
+			if (board_info->btdm_ant_pos ==
+			    BTC_ANTENNA_AT_MAIN_PORT)
+				btcoexist->btc_write_1byte_bitmask(btcoexist,
+							    	   0x92c, 0x3,
+							    	   0x2);
+			else
+				btcoexist->btc_write_1byte_bitmask(btcoexist,
+							    	   0x92c, 0x3,
+							    	   0x1);
+			break;
+		default:
+		case BTC_ANT_PATH_PTA:
+			if (board_info->btdm_ant_pos ==
+			    BTC_ANTENNA_AT_MAIN_PORT)
+				btcoexist->btc_write_1byte_bitmask(btcoexist,
+							    	   0x92c, 0x3,
+							    	   0x1);
+			else
+				btcoexist->btc_write_1byte_bitmask(btcoexist,
+							    	   0x92c, 0x3,
+							    	   0x2);
+			break;
+		}
+
+	} else {
+		if (init_hw_cfg) {
+			/* 0x4c[23]=1, 0x4c[24]=0  Antenna control by 0x64*/
+			u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+			u32tmp |= BIT23;
+			u32tmp &= ~BIT24;
+			btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+			if (board_info->btdm_ant_pos ==
+			    BTC_ANTENNA_AT_MAIN_PORT) {
+				/*Main Ant to  WiFi for IPS case 0x4c[23]=1*/
+				btcoexist->btc_write_1byte_bitmask(btcoexist,
+								   0x64, 0x1,
+								   0x0);
+
+				/*tell firmware "no antenna inverse"*/
+				h2c_parameter[0] = 0;
+				h2c_parameter[1] = 0;  /*internal switch type*/
+				btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+							h2c_parameter);
+			} else {
+				/*Aux Ant to  BT for IPS case 0x4c[23]=1*/
+				btcoexist->btc_write_1byte_bitmask(btcoexist,
+								   0x64, 0x1,
+								   0x1);
+
+				/*tell firmware "antenna inverse"*/
+				h2c_parameter[0] = 1;
+				h2c_parameter[1] = 0;  /*internal switch type*/
+				btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+							h2c_parameter);
+			}
+		}
+
+		/* fixed external switch first*/
+		/*Main->WiFi, Aux->BT*/
+		if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+			btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+							   0x3, 0x1);
+		else/*Main->BT, Aux->WiFi */
+			btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+							   0x3, 0x2);
+
+		/* internal switch setting*/
+		switch (ant_pos_type) {
+		case BTC_ANT_PATH_WIFI:
+			if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+				btcoexist->btc_write_2byte(btcoexist, 0x948,
+							   0x0);
+			else
+				btcoexist->btc_write_2byte(btcoexist, 0x948,
+							   0x280);
+			break;
+		case BTC_ANT_PATH_BT:
+			if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+				btcoexist->btc_write_2byte(btcoexist, 0x948,
+							   0x280);
+			else
+				btcoexist->btc_write_2byte(btcoexist, 0x948,
+							   0x0);
+			break;
+		default:
+		case BTC_ANT_PATH_PTA:
+			if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+				btcoexist->btc_write_2byte(btcoexist, 0x948,
+							   0x200);
+			else
+				btcoexist->btc_write_2byte(btcoexist, 0x948,
+							   0x80);
+			break;
+		}
+	}
+}
+
+void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
+			     bool turn_on, u8 type)
+{
+	bool wifi_busy = false;
+	u8 rssi_adjust_val = 0;
+
+	coex_dm->cur_ps_tdma_on = turn_on;
+	coex_dm->cur_ps_tdma = type;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+	if (!force_exec) {
+		if (coex_dm->cur_ps_tdma_on)
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+				  "[BTCoex], ******** TDMA(on, %d) *********\n",
+				  coex_dm->cur_ps_tdma);
+		else
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+				  "[BTCoex], ******** TDMA(off, %d) ********\n",
+				  coex_dm->cur_ps_tdma);
+
+
+		if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+		    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+			return;
+	}
+	if (turn_on) {
+		switch (type) {
+		default:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1a,
+						       0x1a, 0x0, 0x50);
+			break;
+		case 1:
+			if (wifi_busy)
+				halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+							       0x3a, 0x03,
+							       0x10, 0x50);
+			else
+				halbtc8723b1ant_set_fw_ps_tdma(btcoexist,0x51,
+							       0x3a, 0x03,
+							       0x10, 0x51);
+
+			rssi_adjust_val = 11;
+			break;
+		case 2:
+			if (wifi_busy)
+				halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+							       0x2b, 0x03,
+							       0x10, 0x50);
+			else
+				halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+							       0x2b, 0x03,
+							       0x10, 0x51);
+			rssi_adjust_val = 14;
+			break;
+		case 3:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1d,
+						       0x1d, 0x0, 0x52);
+			break;
+		case 4:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
+						       0x3, 0x14, 0x0);
+			rssi_adjust_val = 17;
+			break;
+		case 5:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
+						       0x3, 0x11, 0x10);
+			break;
+		case 6:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x20,
+						       0x3, 0x11, 0x13);
+			break;
+		case 7:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xc,
+						       0x5, 0x0, 0x0);
+			break;
+		case 8:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
+						       0x3, 0x10, 0x0);
+			break;
+		case 9:
+			if(wifi_busy)
+				halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+							       0x21, 0x3,
+							       0x10, 0x50);
+			else
+				halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+							       0x21, 0x3,
+							       0x10, 0x50);
+			rssi_adjust_val = 18;
+			break;
+		case 10:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+						       0xa, 0x0, 0x40);
+			break;
+		case 11:
+			if (wifi_busy)
+				halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+							       0x15, 0x03,
+							       0x10, 0x50);
+			else
+				halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+							       0x15, 0x03,
+							       0x10, 0x50);
+			rssi_adjust_val = 20;
+			break;
+		case 12:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x0a,
+						       0x0a, 0x0, 0x50);
+			break;
+		case 13:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x15,
+						       0x15, 0x0, 0x50);
+			break;
+		case 14:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x21,
+						       0x3, 0x10, 0x52);
+			break;
+		case 15:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+						       0x3, 0x8, 0x0);
+			break;
+		case 16:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
+						       0x3, 0x10, 0x0);
+			rssi_adjust_val = 18;
+			break;
+		case 18:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
+						       0x3, 0x10, 0x0);
+			rssi_adjust_val = 14;
+			break;
+		case 20:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x35,
+						       0x03, 0x11, 0x10);
+			break;
+		case 21:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
+						       0x03, 0x11, 0x10);
+			break;
+		case 22:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25,
+						       0x03, 0x11, 0x10);
+			break;
+		case 23:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+						       0x3, 0x31, 0x18);
+			rssi_adjust_val = 22;
+			break;
+		case 24:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+						       0x3, 0x31, 0x18);
+			rssi_adjust_val = 22;
+			break;
+		case 25:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+						       0x3, 0x31, 0x18);
+			rssi_adjust_val = 22;
+			break;
+		case 26:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+						       0x3, 0x31, 0x18);
+			rssi_adjust_val = 22;
+			break;
+		case 27:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+						       0x3, 0x31, 0x98);
+			rssi_adjust_val = 22;
+			break;
+		case 28:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x69, 0x25,
+						       0x3, 0x31, 0x0);
+			break;
+		case 29:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xab, 0x1a,
+						       0x1a, 0x1, 0x10);
+			break;
+		case 30:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14,
+						       0x3, 0x10, 0x50);
+			break;
+		case 31:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1a,
+						       0x1a, 0, 0x58);
+			break;
+		case 32:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0xa,
+						       0x3, 0x10, 0x0);
+			break;
+		case 33:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x25,
+						       0x3, 0x30, 0x90);
+			break;
+		case 34:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x53, 0x1a,
+						       0x1a, 0x0, 0x10);
+			break;
+		case 35:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x63, 0x1a,
+						       0x1a, 0x0, 0x10);
+			break;
+		case 36:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12,
+						       0x3, 0x14, 0x50);
+			break;
+		/* SoftAP only with no sta associated,BT disable ,
+		 * TDMA mode for power saving
+		 * here softap mode screen off will cost 70-80mA for phone */
+		case 40:
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x23, 0x18,
+						       0x00, 0x10, 0x24);
+			break;
+		}
+	} else {
+		switch (type) {
+		case 8: /*PTA Control */
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0,
+						       0x0, 0x0, 0x0);
+			halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA,
+						   false, false);
+			break;
+		case 0:
+		default:  /*Software control, Antenna at BT side */
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
+						       0x0, 0x0, 0x0);
+			halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
+						   false, false);
+			break;
+		case 9:   /*Software control, Antenna at WiFi side */
+			halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
+						       0x0, 0x0, 0x0);
+			halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_WIFI,
+						   false, false);
+			break;
+		}
+	}
+	rssi_adjust_val = 0;
+	btcoexist->btc_set(btcoexist,
+			   BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
+			   &rssi_adjust_val);
+
+	/* update pre state */
+	coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+	coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void halbtc8723b1ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+	/* fw all off */
+	halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+	halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	/* sw all off */
+	halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, false, false);
+	halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+
+	/* hw all off */
+	halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
+{
+	bool commom = false, wifi_connected = false;
+	bool wifi_busy = false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+			   &wifi_connected);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+	if (!wifi_connected &&
+	    BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], Wifi non connected-idle + "
+			  "BT non connected-idle!!\n");
+		halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ 		halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+					      false, false);
+		halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+					      false, 0x18);
+
+		commom = true;
+	} else if (wifi_connected &&
+		   (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+		    coex_dm->bt_status)) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], Wifi connected + "
+			  "BT non connected-idle!!\n");
+		halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+      		halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+					      false, false);
+		halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+					      false, 0x18);
+
+		commom = true;
+	} else if (!wifi_connected &&
+		   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+		    coex_dm->bt_status)) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], Wifi non connected-idle + "
+			  "BT connected-idle!!\n");
+		halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+		halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+					      false, false);
+		halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+					      false, 0x18);
+
+		commom = true;
+	} else if (wifi_connected &&
+		   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+		    coex_dm->bt_status)) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], Wifi connected + BT connected-idle!!\n");
+		halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+		halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+					      false, false);
+		halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+					      false, 0x18);
+
+		commom = true;
+	} else if (!wifi_connected &&
+		   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
+		    coex_dm->bt_status)) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+		halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+		halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+					      false, false);
+		halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+					      false, 0x18);
+
+		commom = true;
+	} else {
+		if (wifi_busy)
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Wifi Connected-Busy"
+				  " + BT Busy!!\n");
+		else
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Wifi Connected-Idle"
+				  " + BT Busy!!\n");
+
+		commom = false;
+	}
+
+	return commom;
+}
+
+
+void halbtc8723b1ant_tdma_duration_adjust_for_acl(struct btc_coexist *btcoexist,
+						  u8 wifi_status)
+{
+	static s32 up, dn, m, n, wait_count;
+	/* 0: no change, +1: increase WiFi duration,
+	 * -1: decrease WiFi duration */
+	s32 result;
+	u8 retry_count = 0, bt_info_ext;
+	static bool pre_wifi_busy = false;
+	bool wifi_busy = false;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], TdmaDurationAdjustForAcl()\n");
+
+	if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
+		wifi_busy = true;
+	else
+		wifi_busy = false;
+
+	if ((BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
+							 wifi_status) ||
+	    (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifi_status) ||
+	    (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifi_status)) {
+		if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
+		    coex_dm->cur_ps_tdma != 3 && coex_dm->cur_ps_tdma != 9) {
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+						true, 9);
+			coex_dm->ps_tdma_du_adj_type = 9;
+
+			up = 0;
+			dn = 0;
+			m = 1;
+			n = 3;
+			result = 0;
+			wait_count = 0;
+		}
+		return;
+	}
+
+	if (!coex_dm->auto_tdma_adjust) {
+		coex_dm->auto_tdma_adjust = true;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], first run TdmaDurationAdjust()!!\n");
+
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+		coex_dm->ps_tdma_du_adj_type = 2;
+
+		up = 0;
+		dn = 0;
+		m = 1;
+		n = 3;
+		result = 0;
+		wait_count = 0;
+	} else {
+		/*accquire the BT TRx retry count from BT_Info byte2 */
+		retry_count = coex_sta->bt_retry_cnt;
+		bt_info_ext = coex_sta->bt_info_ext;
+		result = 0;
+		wait_count++;
+		/* no retry in the last 2-second duration */
+		if (retry_count == 0) {
+			up++;
+			dn--;
+
+			if (dn <= 0)
+				dn = 0;
+
+			if (up >= n) {
+				wait_count = 0;
+				n = 3;
+				up = 0;
+				dn = 0;
+				result = 1;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], Increase wifi "
+					  "duration!!\n");
+			}
+		} else if (retry_count <= 3) {
+			up--;
+			dn++;
+
+			if (up <= 0)
+				up = 0;
+
+			if (dn == 2) {
+				if (wait_count <= 2)
+					m++;
+				else
+					m = 1;
+
+				if (m >= 20)
+					m = 20;
+
+				n = 3 * m;
+				up = 0;
+				dn = 0;
+				wait_count = 0;
+				result = -1;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], Decrease wifi duration"
+					  " for retryCounter<3!!\n");
+			}
+		} else {
+			if (wait_count == 1)
+				m++;
+			else
+				m = 1;
+
+			if (m >= 20)
+				m = 20;
+
+			n = 3 * m;
+			up = 0;
+			dn = 0;
+			wait_count = 0;
+			result = -1;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+				  "[BTCoex], Decrease wifi duration"
+				  " for retryCounter>3!!\n");
+		}
+
+		if (result == -1) {
+			if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+			    ((coex_dm->cur_ps_tdma == 1) ||
+			     (coex_dm->cur_ps_tdma == 2))) {
+				halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+							true, 9);
+				coex_dm->ps_tdma_du_adj_type = 9;
+			} else if (coex_dm->cur_ps_tdma == 1) {
+				halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+							true, 2);
+				coex_dm->ps_tdma_du_adj_type = 2;
+			} else if (coex_dm->cur_ps_tdma == 2) {
+				halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+							true, 9);
+				coex_dm->ps_tdma_du_adj_type = 9;
+			} else if (coex_dm->cur_ps_tdma == 9) {
+				halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+							true, 11);
+				coex_dm->ps_tdma_du_adj_type = 11;
+			}
+		} else if(result == 1) {
+			if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+			    ((coex_dm->cur_ps_tdma == 1) ||
+			     (coex_dm->cur_ps_tdma == 2))) {
+				halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+							true, 9);
+				coex_dm->ps_tdma_du_adj_type = 9;
+			} else if (coex_dm->cur_ps_tdma == 11) {
+				halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+							true, 9);
+				coex_dm->ps_tdma_du_adj_type = 9;
+			} else if (coex_dm->cur_ps_tdma == 9) {
+				halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+							true, 2);
+				coex_dm->ps_tdma_du_adj_type = 2;
+			} else if (coex_dm->cur_ps_tdma == 2) {
+				halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+							true, 1);
+				coex_dm->ps_tdma_du_adj_type = 1;
+			}
+		} else {	  /*no change */
+			/*if busy / idle change */
+			if (wifi_busy != pre_wifi_busy) {
+				pre_wifi_busy = wifi_busy;
+				halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC,
+						        true,
+						        coex_dm->cur_ps_tdma);
+			}
+
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+				  "[BTCoex],********* TDMA(on, %d) ********\n",
+				  coex_dm->cur_ps_tdma);
+		}
+
+		if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
+		    coex_dm->cur_ps_tdma != 9 && coex_dm->cur_ps_tdma != 11) {
+			/* recover to previous adjust type */
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+						coex_dm->ps_tdma_du_adj_type);
+		}
+	}
+}
+
+u8 halbtc8723b1ant_ps_tdma_type_by_wifi_rssi(s32 wifi_rssi, s32 pre_wifi_rssi,
+					     u8 wifi_rssi_thresh)
+{
+	u8 ps_tdma_type=0;
+
+	if (wifi_rssi > pre_wifi_rssi) {
+		if (wifi_rssi > (wifi_rssi_thresh + 5))
+			ps_tdma_type = 26;
+		else
+			ps_tdma_type = 25;
+	} else  {
+		if (wifi_rssi > wifi_rssi_thresh)
+ 			ps_tdma_type = 26;
+ 		else
+ 			ps_tdma_type = 25;
+ 	}
+
+	return ps_tdma_type;
+}
+
+void halbtc8723b1ant_PsTdmaCheckForPowerSaveState(struct btc_coexist *btcoexist,
+						  bool new_ps_state)
+{
+	u8 lps_mode = 0x0;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
+
+	if (lps_mode) {	/* already under LPS state */
+		if (new_ps_state) {
+			/* keep state under LPS, do nothing. */
+		} else {
+			/* will leave LPS state, turn off psTdma first */
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+						false, 0);
+		}
+	} else {	/* NO PS state */
+		if (new_ps_state) {
+			/* will enter LPS state, turn off psTdma first */
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+						false, 0);
+		} else {
+			/* keep state under NO PS state, do nothing. */
+		}
+	}
+}
+
+void halbtc8723b1ant_power_save_state(struct btc_coexist *btcoexist,
+				      u8 ps_type, u8 lps_val,
+				      u8 rpwm_val)
+{
+	bool low_pwr_disable = false;
+
+	switch (ps_type) {
+	case BTC_PS_WIFI_NATIVE:
+		/* recover to original 32k low power setting */
+		low_pwr_disable = false;
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+				   &low_pwr_disable);
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+		break;
+	case BTC_PS_LPS_ON:
+		halbtc8723b1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+		halbtc8723b1ant_LpsRpwm(btcoexist, NORMAL_EXEC, lps_val,
+					rpwm_val);
+		/* when coex force to enter LPS, do not enter 32k low power. */
+		low_pwr_disable = true;
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+				   &low_pwr_disable);
+		/* power save must executed before psTdma.	 */
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+		break;
+	case BTC_PS_LPS_OFF:
+		halbtc8723b1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+		break;
+	default:
+		break;
+	}
+}
+
+void halbtc8723b1ant_action_wifi_only(struct btc_coexist *btcoexist)
+{
+	halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+	halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
+}
+
+void halbtc8723b1ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist)
+{
+	static bool pre_bt_disabled = false;
+	static u32 bt_disable_cnt = 0;
+	bool bt_active = true, bt_disabled = false;
+
+	/* This function check if bt is disabled */
+
+	if (coex_sta->high_priority_tx == 0 &&
+	    coex_sta->high_priority_rx == 0 &&
+	    coex_sta->low_priority_tx == 0 &&
+	    coex_sta->low_priority_rx == 0)
+		bt_active = false;
+
+	if (coex_sta->high_priority_tx == 0xffff &&
+	    coex_sta->high_priority_rx == 0xffff &&
+	    coex_sta->low_priority_tx == 0xffff &&
+	    coex_sta->low_priority_rx == 0xffff)
+		bt_active = false;
+
+	if (bt_active) {
+		bt_disable_cnt = 0;
+		bt_disabled = false;
+ 		btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+				   &bt_disabled);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+			  "[BTCoex], BT is enabled !!\n");
+	} else {
+		bt_disable_cnt++;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+			  "[BTCoex], bt all counters=0, %d times!!\n",
+			  bt_disable_cnt);
+		if (bt_disable_cnt >= 2) {
+			bt_disabled = true;
+			btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+					   &bt_disabled);
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+				  "[BTCoex], BT is disabled !!\n");
+			halbtc8723b1ant_action_wifi_only(btcoexist);
+		}
+	}
+	if (pre_bt_disabled != bt_disabled) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+			  "[BTCoex], BT is from %s to %s!!\n",
+			  (pre_bt_disabled ? "disabled" : "enabled"),
+			  (bt_disabled ? "disabled" : "enabled"));
+		pre_bt_disabled = bt_disabled;
+		if (!bt_disabled) {
+		} else {
+			btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
+					   NULL);
+			btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS,
+					   NULL);
+		}
+	}
+}
+
+/***************************************************
+ *
+ *	Software Coex Mechanism start
+ *
+ ***************************************************/
+/* SCO only or SCO+PAN(HS) */
+void halbtc8723b1ant_action_sco(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state =
+		halbtc8723b1ant_wifi_rssi_state(btcoexist, 0, 2, 25, 0);
+
+	halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+
+	if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+	else
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+			  			      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+			  			      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+			  			      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+			  			      false, 0x18);
+		}
+	} else {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+
+void halbtc8723b1ant_action_hid(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+							  0, 2, 25, 0);
+	bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+	halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+	else
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist,
+		BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
+void halbtc8723b1ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+							  0, 2, 25, 0);
+	bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+	halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+	else
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8723b1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+	u32 wifi_bw;
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+							  0, 2, 25, 0);
+	bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+	halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+	else
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ 			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8723b1ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+							  0, 2, 25, 0);
+	bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+	halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+	else
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+
+/* PAN(HS) only */
+void halbtc8723b1ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+							  0, 2, 25, 0);
+	bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+	halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		/* fw mechanism */
+		if((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+			halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+						   false);
+		else
+			halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+						   false);
+
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		/* fw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+			halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+						   false);
+		else
+			halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+						   false);
+
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+/*PAN(EDR)+A2DP */
+void halbtc8723b1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+	u32 wifi_bw;
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+							  0, 2, 25, 0);
+	bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+	halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+	else
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist,
+		BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8723b1ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+							  0, 2, 25, 0);
+	bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+	halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+	else
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+/* HID+A2DP+PAN(EDR) */
+void halbtc8723b1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+	u32 wifi_bw;
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+							  0, 2, 25, 0);
+	bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+	halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+	else
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		/* sw mechanism */
+		if((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8723b1ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+	u32 wifi_bw;
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+							  0, 2, 25, 0);
+	bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+	if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+	else
+		halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		/* sw mechanism */
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+/*****************************************************
+ *
+ *	Non-Software Coex Mechanism start
+ *
+ *****************************************************/
+void halbtc8723b1ant_action_hs(struct btc_coexist *btcoexist)
+{
+	halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+	halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2);
+}
+
+void halbtc8723b1ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+	bool wifi_connected = false, ap_enable = false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+			   &ap_enable);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+			   &wifi_connected);
+
+	if (!wifi_connected) {
+		halbtc8723b1ant_power_save_state(btcoexist,
+						 BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+	} else if (bt_link_info->sco_exist || bt_link_info->hid_only) {
+		/* SCO/HID-only busy */
+		halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+						 0x0, 0x0);
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+	} else {
+		if (ap_enable)
+			halbtc8723b1ant_power_save_state(btcoexist,
+							 BTC_PS_WIFI_NATIVE,
+							 0x0, 0x0);
+		else
+			halbtc8723b1ant_power_save_state(btcoexist,
+							 BTC_PS_LPS_ON,
+							 0x50, 0x4);
+
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 30);
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+	}
+}
+
+void halbtc8723b1ant_action_bt_sco_hid_only_busy(struct btc_coexist * btcoexist,
+						 u8 wifi_status)
+{
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+	bool wifi_connected = false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+			   &wifi_connected);
+
+	/* tdma and coex table */
+
+	if (bt_link_info->sco_exist) {
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+	} else { /* HID */
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
+	}
+}
+
+void halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
+					struct btc_coexist *btcoexist,
+					u8 wifi_status)
+{
+	u8 bt_rssi_state;
+
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+	bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 28, 0);
+
+	if (bt_link_info->hid_only) {  /*HID */
+		halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+							    wifi_status);
+		coex_dm->auto_tdma_adjust = false;
+		return;
+	} else if (bt_link_info->a2dp_only) { /*A2DP */
+		if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ 			 halbtc8723b1ant_tdma_duration_adjust_for_acl(btcoexist,
+			 					   wifi_status);
+		} else { /*for low BT RSSI */
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+						true, 11);
+			coex_dm->auto_tdma_adjust = false;
+		}
+
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+	} else if (bt_link_info->hid_exist &&
+			bt_link_info->a2dp_exist) { /*HID+A2DP */
+		if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+						true, 14);
+			coex_dm->auto_tdma_adjust = false;
+		} else { /*for low BT RSSI*/
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+						true, 14);
+			coex_dm->auto_tdma_adjust = false;
+		}
+
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
+	 /*PAN(OPP,FTP), HID+PAN(OPP,FTP) */
+	} else if (bt_link_info->pan_only ||
+		   (bt_link_info->hid_exist && bt_link_info->pan_exist)) {
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
+		coex_dm->auto_tdma_adjust = false;
+	 /*A2DP+PAN(OPP,FTP), HID+A2DP+PAN(OPP,FTP)*/
+	} else if ((bt_link_info->a2dp_exist && bt_link_info->pan_exist) ||
+		   (bt_link_info->hid_exist && bt_link_info->a2dp_exist &&
+		    bt_link_info->pan_exist)) {
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+		coex_dm->auto_tdma_adjust = false;
+	} else {
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+		coex_dm->auto_tdma_adjust = false;
+	}
+}
+
+void halbtc8723b1ant_action_wifi_not_connected(struct btc_coexist *btcoexist)
+{
+	/* power save state */
+	halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+					 0x0, 0x0);
+
+	/* tdma and coex table */
+	halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+	halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+void halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(
+						struct btc_coexist *btcoexist)
+{
+	halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+					 0x0, 0x0);
+
+	halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+	halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+}
+
+void halbtc8723b1ant_ActionWifiConnectedScan(struct btc_coexist *btcoexist)
+{
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+
+	halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+					 0x0, 0x0);
+
+	/* tdma and coex table */
+	if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+		if (bt_link_info->a2dp_exist &&
+		    bt_link_info->pan_exist) {
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+						true, 22);
+			halbtc8723b1ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 1);
+	 	} else {
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+		}
+	} else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+		   (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+		    coex_dm->bt_status)) {
+		halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+				BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+	} else {
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+	}
+}
+
+void halbtc8723b1ant_action_wifi_connected_special_packet(
+						struct btc_coexist *btcoexist)
+{
+	bool hs_connecting = false;
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+
+	halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+					 0x0, 0x0);
+
+	/* tdma and coex table */
+	if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+		if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+						true, 22);
+			halbtc8723b1ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 1);
+	 	} else {
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+						true, 20);
+			halbtc8723b1ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 1);
+	 	}
+	} else {
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+	}
+}
+
+void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
+{
+	bool wifi_busy = false;
+	bool scan = false, link = false, roam = false;
+	bool under_4way = false, ap_enable = false;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+		  "[BTCoex], CoexForWifiConnect()===>\n");
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+			   &under_4way);
+	if (under_4way) {
+		halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], CoexForWifiConnect(), "
+			  "return for wifi is under 4way<===\n");
+		return;
+	}
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+	if (scan || link || roam) {
+		halbtc8723b1ant_ActionWifiConnectedScan(btcoexist);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], CoexForWifiConnect(), "
+			  "return for wifi is under scan<===\n");
+		return;
+	}
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+			   &ap_enable);
+	/* power save state */
+	if (!ap_enable &&
+	    BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status &&
+	    !btcoexist->bt_link_info.hid_only)
+		halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_LPS_ON,
+						 0x50, 0x4);
+	else
+		halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+						 0x0, 0x0);
+
+	/* tdma and coex table */
+	btcoexist->btc_get(btcoexist,
+		BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+	if (!wifi_busy) {
+		if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+			halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist,
+				      BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+		} else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY ==
+						coex_dm->bt_status) ||
+			   (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+			   			coex_dm->bt_status)) {
+			halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+				     BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+		} else {
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+						false, 8);
+			halbtc8723b1ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 2);
+		}
+	} else {
+		if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+			halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist,
+				    BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+		} else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY ==
+						coex_dm->bt_status) ||
+			   (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+			   			coex_dm->bt_status)) {
+			halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+				    BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+		} else {
+			halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+			halbtc8723b1ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 2);
+		}
+	}
+}
+
+void halbtc8723b1ant_run_sw_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+	u8 algorithm = 0;
+
+	algorithm = halbtc8723b1ant_action_algorithm(btcoexist);
+	coex_dm->cur_algorithm = algorithm;
+
+	if (halbtc8723b1ant_is_common_action(btcoexist)) {
+	} else {
+		switch (coex_dm->cur_algorithm) {
+		case BT_8723B_1ANT_COEX_ALGO_SCO:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action algorithm = SCO.\n");
+			halbtc8723b1ant_action_sco(btcoexist);
+			break;
+		case BT_8723B_1ANT_COEX_ALGO_HID:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action algorithm = HID.\n");
+			halbtc8723b1ant_action_hid(btcoexist);
+			break;
+		case BT_8723B_1ANT_COEX_ALGO_A2DP:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action algorithm = A2DP.\n");
+			halbtc8723b1ant_action_a2dp(btcoexist);
+			break;
+		case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action algorithm = "
+				  "A2DP+PAN(HS).\n");
+			halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
+			break;
+		case BT_8723B_1ANT_COEX_ALGO_PANEDR:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action algorithm = PAN(EDR).\n");
+			halbtc8723b1ant_action_pan_edr(btcoexist);
+			break;
+		case BT_8723B_1ANT_COEX_ALGO_PANHS:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action algorithm = HS mode.\n");
+			halbtc8723b1ant_action_pan_hs(btcoexist);
+			break;
+		case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action algorithm = PAN+A2DP.\n");
+			halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
+			break;
+		case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action algorithm = "
+				  "PAN(EDR)+HID.\n");
+			halbtc8723b1ant_action_pan_edr_hid(btcoexist);
+			break;
+		case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action algorithm = "
+				  "HID+A2DP+PAN.\n");
+			halbtc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
+			break;
+		case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action algorithm = HID+A2DP.\n");
+			halbtc8723b1ant_action_hid_a2dp(btcoexist);
+			break;
+		default:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action algorithm = "
+				  "coexist All Off!!\n");
+			break;
+		}
+		coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+	}
+}
+
+void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+	bool wifi_connected = false, bt_hs_on = false;
+	bool limited_dig = false, bIncreaseScanDevNum = false;
+	bool b_bt_ctrl_agg_buf_size = false;
+	u8 agg_buf_size = 5;
+	u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+		  "[BTCoex], RunCoexistMechanism()===>\n");
+
+	if (btcoexist->manual_control) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], RunCoexistMechanism(), "
+			  "return for Manual CTRL <===\n");
+		return;
+	}
+
+	if (btcoexist->stop_coex_dm) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], RunCoexistMechanism(), "
+			  "return for Stop Coex DM <===\n");
+		return;
+	}
+
+	if (coex_sta->under_ips) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], wifi is under IPS !!!\n");
+		return;
+	}
+
+	if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+	    (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+	    (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+		limited_dig = true;
+		bIncreaseScanDevNum = true;
+	}
+
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_INC_SCAN_DEV_NUM,
+			   &bIncreaseScanDevNum);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+			   &wifi_connected);
+
+	if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) {
+		halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+	} else {
+		if (wifi_connected) {
+			wifi_rssi_state =
+				halbtc8723b1ant_wifi_rssi_state(btcoexist,
+								1, 2, 30, 0);
+			if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+				halbtc8723b1ant_limited_tx(btcoexist,
+							   NORMAL_EXEC,
+							   1, 1, 1, 1);
+			} else {
+				halbtc8723b1ant_limited_tx(btcoexist,
+							   NORMAL_EXEC,
+							   1, 1, 1, 1);
+			}
+		} else {
+			halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC,
+						   0, 0, 0, 0);
+		}
+	}
+
+	if (bt_link_info->sco_exist) {
+		b_bt_ctrl_agg_buf_size = true;
+		agg_buf_size = 0x3;
+	} else if (bt_link_info->hid_exist) {
+		b_bt_ctrl_agg_buf_size = true;
+		agg_buf_size = 0x5;
+ 	} else if (bt_link_info->a2dp_exist || bt_link_info->pan_exist) {
+		b_bt_ctrl_agg_buf_size = true;
+		agg_buf_size = 0x8;
+	}
+	halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+				   b_bt_ctrl_agg_buf_size, agg_buf_size);
+
+	halbtc8723b1ant_run_sw_coexist_mechanism(btcoexist);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+	if (coex_sta->c2h_bt_inquiry_page) {
+		halbtc8723b1ant_action_bt_inquiry(btcoexist);
+		return;
+	} else if (bt_hs_on) {
+		halbtc8723b1ant_action_hs(btcoexist);
+		return;
+	}
+
+
+	if (!wifi_connected) {
+		bool scan = false, link = false, roam = false;
+
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], wifi is non connected-idle !!!\n");
+
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+		if (scan || link || roam)
+			halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist);
+		else
+			halbtc8723b1ant_action_wifi_not_connected(btcoexist);
+	} else { /* wifi LPS/Busy */
+		halbtc8723b1ant_action_wifi_connected(btcoexist);
+	}
+}
+
+void halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+	/* force to reset coex mechanism */
+	halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+	halbtc8723b1ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+
+	/* sw all off */
+	halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, false, false);
+	halbtc8723b1ant_sw_mechanism2(btcoexist,false, false, false, 0x18);
+
+	halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+	halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+}
+
+void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist, bool backup)
+{
+	u32 u32tmp = 0;
+	u8 u8tmp = 0;
+	u32 cnt_bt_cal_chk = 0;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+		  "[BTCoex], 1Ant Init HW Config!!\n");
+
+	if (backup) {/* backup rf 0x1e value */
+		coex_dm->bt_rf0x1e_backup =
+			btcoexist->btc_get_rf_reg(btcoexist,
+						  BTC_RF_A, 0x1e, 0xfffff);
+
+		coex_dm->backup_arfr_cnt1 =
+			btcoexist->btc_read_4byte(btcoexist, 0x430);
+		coex_dm->backup_arfr_cnt2 =
+			btcoexist->btc_read_4byte(btcoexist, 0x434);
+		coex_dm->backup_retry_limit =
+			btcoexist->btc_read_2byte(btcoexist, 0x42a);
+		coex_dm->backup_ampdu_max_time =
+			btcoexist->btc_read_1byte(btcoexist, 0x456);
+	}
+
+	/* WiFi goto standby while GNT_BT 0-->1 */
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x780);
+	/* BT goto standby while GNT_BT 1-->0 */
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x2, 0xfffff, 0x500);
+
+	btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
+	btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
+	btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
+
+
+	/* BT calibration check */
+	while (cnt_bt_cal_chk <= 20) {
+		u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d);
+		cnt_bt_cal_chk++;
+		if (u32tmp & BIT0) {
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+				  "[BTCoex], ########### BT "
+				  "calibration(cnt=%d) ###########\n",
+				  cnt_bt_cal_chk);
+			mdelay(50);
+		} else {
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+				  "[BTCoex], ********** BT NOT "
+				  "calibration (cnt=%d)**********\n",
+				  cnt_bt_cal_chk);
+			break;
+		}
+	}
+
+	/* 0x790[5:0]=0x5 */
+	u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+	u8tmp &= 0xc0;
+	u8tmp |= 0x5;
+	btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+	/* Enable counter statistics */
+	/*0x76e[3] =1, WLAN_Act control by PTA */
+	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+	btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1);
+	btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
+
+	/*Antenna config */
+	halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA, true, false);
+	/* PTA parameter */
+	halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+}
+
+void halbtc8723b1ant_wifi_off_hw_cfg(struct btc_coexist *btcoexist)
+{
+	/* set wlan_act to low */
+	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0);
+}
+
+/**************************************************************
+ * work around function start with wa_halbtc8723b1ant_
+ **************************************************************/
+/**************************************************************
+ * extern function start with EXhalbtc8723b1ant_
+ **************************************************************/
+
+void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+	halbtc8723b1ant_init_hw_config(btcoexist, true);
+}
+
+void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+		  "[BTCoex], Coex Mechanism Init!!\n");
+
+	btcoexist->stop_coex_dm = false;
+
+	halbtc8723b1ant_init_coex_dm(btcoexist);
+
+	halbtc8723b1ant_query_bt_info(btcoexist);
+}
+
+void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+	struct btc_board_info *board_info = &btcoexist->board_info;
+	struct btc_stack_info *stack_info = &btcoexist->stack_info;
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+	u8 *cli_buf = btcoexist->cli_buf;
+	u8 u8tmp[4], i, bt_info_ext, psTdmaCase=0;
+	u16 u16tmp[4];
+	u32 u32tmp[4];
+	bool roam = false, scan = false;
+	bool link = false, wifi_under_5g = false;
+	bool bt_hs_on = false, wifi_busy = false;
+	s32 wifi_rssi =0, bt_hs_rssi = 0;
+	u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck;
+	u8 wifi_dot11_chnl, wifi_hs_chnl;
+	u32 fw_ver = 0, bt_patch_ver = 0;
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n ============[BT Coexist info]============");
+	CL_PRINTF(cli_buf);
+
+	if (btcoexist->manual_control) {
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+			   "\r\n ============[Under Manual Control]==========");
+		CL_PRINTF(cli_buf);
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+			   "\r\n ==========================================");
+		CL_PRINTF(cli_buf);
+	}
+	if (btcoexist->stop_coex_dm) {
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+			   "\r\n ============[Coex is STOPPED]============");
+		CL_PRINTF(cli_buf);
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+			   "\r\n ==========================================");
+		CL_PRINTF(cli_buf);
+	}
+
+	if (!board_info->bt_exist) {
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+		CL_PRINTF(cli_buf);
+		return;
+	}
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d",
+		   "Ant PG Num/ Ant Mech/ Ant Pos:", \
+		   board_info->pg_ant_num, board_info->btdm_ant_num,
+		   board_info->btdm_ant_pos);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+		   "BT stack/ hci ext ver", \
+		   ((stack_info->profile_notified)? "Yes":"No"),
+		   stack_info->hci_version);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+		   "CoexVer/ FwVer/ PatchVer", \
+		   glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
+		   fw_ver, bt_patch_ver, bt_patch_ver);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+			   &wifi_dot11_chnl);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+		   "Dot11 channel / HsChnl(HsMode)", \
+		   wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+		   "H2C Wifi inform bt chnl Info", \
+		   coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1],
+		   coex_dm->wifi_chnl_info[2]);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+		   "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+		"Wifi link/ roam/ scan", link, roam, scan);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist,BTC_GET_BL_WIFI_UNDER_5G,
+			   &wifi_under_5g);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+			   &wifi_traffic_dir);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+		   "Wifi status", (wifi_under_5g? "5G":"2.4G"),
+		   ((BTC_WIFI_BW_LEGACY==wifi_bw)? "Legacy":
+			(((BTC_WIFI_BW_HT40==wifi_bw)? "HT40":"HT20"))),
+		   ((!wifi_busy)? "idle":
+			((BTC_WIFI_TRAFFIC_TX==wifi_traffic_dir)?
+				"uplink":"downlink")));
+	CL_PRINTF(cli_buf);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ",
+		"BT [status/ rssi/ retryCnt]",
+		((btcoexist->bt_info.bt_disabled)? ("disabled"):
+		  ((coex_sta->c2h_bt_inquiry_page)?("inquiry/page scan"):
+		    ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)?
+		      "non-connected idle":
+			((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)?
+			  "connected-idle":"busy")))),
+			    coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+	CL_PRINTF(cli_buf);
+
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+		"SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
+		bt_link_info->hid_exist, bt_link_info->pan_exist,
+		bt_link_info->a2dp_exist);
+	CL_PRINTF(cli_buf);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+		   "BT Info A2DP rate",
+		   (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+	CL_PRINTF(cli_buf);
+
+	for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) {
+		if (coex_sta->bt_info_c2h_cnt[i]) {
+			CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+				   "\r\n %-35s = %02x %02x %02x "
+				   "%02x %02x %02x %02x(%d)",
+				   GLBtInfoSrc8723b1Ant[i],
+				   coex_sta->bt_info_c2h[i][0],
+				   coex_sta->bt_info_c2h[i][1],
+				   coex_sta->bt_info_c2h[i][2],
+				   coex_sta->bt_info_c2h[i][3],
+				   coex_sta->bt_info_c2h[i][4],
+				   coex_sta->bt_info_c2h[i][5],
+				   coex_sta->bt_info_c2h[i][6],
+				   coex_sta->bt_info_c2h_cnt[i]);
+			CL_PRINTF(cli_buf);
+		}
+	}
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n %-35s = %s/%s, (0x%x/0x%x)",
+		   "PS state, IPS/LPS, (lps/rpwm)", \
+		   ((coex_sta->under_ips? "IPS ON":"IPS OFF")),
+		   ((coex_sta->under_lps? "LPS ON":"LPS OFF")),
+		   btcoexist->bt_info.lps_1ant,
+		   btcoexist->bt_info.rpwm_1ant);
+	CL_PRINTF(cli_buf);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+	if (!btcoexist->manual_control) {
+		/* Sw mechanism	*/
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+			   "============[Sw mechanism]============");
+		CL_PRINTF(cli_buf);
+
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ 			   "SM1[ShRf/ LpRA/ LimDig]", \
+ 			   coex_dm->cur_rf_rx_lpf_shrink,
+			   coex_dm->cur_low_penalty_ra,
+			   btcoexist->bt_info.limited_dig);
+		CL_PRINTF(cli_buf);
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+			   "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+			   "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+			   coex_dm->cur_agc_table_en,
+			   coex_dm->cur_adc_backoff,
+			   coex_dm->cur_dac_swing_on,
+			   coex_dm->cur_dac_swing_lvl);
+		CL_PRINTF(cli_buf);
+
+
+		CL_PRINTF(cli_buf);
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ",
+			   "Rate Mask", btcoexist->bt_info.ra_mask);
+		CL_PRINTF(cli_buf);
+
+		/* Fw mechanism	*/
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+			   "============[Fw mechanism]============");
+		CL_PRINTF(cli_buf);
+
+		psTdmaCase = coex_dm->cur_ps_tdma;
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+			   "\r\n %-35s = %02x %02x %02x %02x %02x "
+			   "case-%d (auto:%d)",
+			   "PS TDMA", coex_dm->ps_tdma_para[0],
+			   coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+			   coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+			   psTdmaCase, coex_dm->auto_tdma_adjust);
+		CL_PRINTF(cli_buf);
+
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ",
+			   "Latest error condition(should be 0)", \
+			   coex_dm->error_condition);
+		CL_PRINTF(cli_buf);
+
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+			   "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+			   coex_dm->cur_ignore_wlan_act);
+		CL_PRINTF(cli_buf);
+	}
+
+	/* Hw setting */
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+		   "============[Hw setting]============");
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+		   "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+	CL_PRINTF(cli_buf);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+		   "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+		   coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
+		   coex_dm->backup_ampdu_max_time);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
+	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
+	u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+		   "0x430/0x434/0x42a/0x456",
+		   u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc);
+	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
+		   (u32tmp[1] & 0x3e000000) >> 25);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
+	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x948/ 0x67[5] / 0x765",
+		   u32tmp[0], ((u8tmp[0] & 0x20)>> 5), u8tmp[1]);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
+	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+		   u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+	CL_PRINTF(cli_buf);
+
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
+	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+	u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+		   "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+		   ((u8tmp[0] & 0x8)>>3), u8tmp[1],
+		   ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+		   "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+		   "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
+	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
+	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8);
+	u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0);
+
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
+	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
+
+	fa_ofdm = ((u32tmp[0] & 0xffff0000) >> 16) +
+		  ((u32tmp[1] & 0xffff0000) >> 16) +
+		   (u32tmp[1] & 0xffff) +
+		   (u32tmp[2] & 0xffff) + \
+		  ((u32tmp[3] & 0xffff0000) >> 16) +
+		   (u32tmp[3] & 0xffff) ;
+	fa_cck = (u8tmp[0] << 8) + u8tmp[1];
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "OFDM-CCA/OFDM-FA/CCK-FA",
+		   u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x6c0/0x6c4/0x6c8(coexTable)",
+		   u32tmp[0], u32tmp[1], u32tmp[2]);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+		   "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
+		   coex_sta->high_priority_tx);
+	CL_PRINTF(cli_buf);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+		   "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+		   coex_sta->low_priority_tx);
+	CL_PRINTF(cli_buf);
+#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 1)
+	halbtc8723b1ant_monitor_bt_ctr(btcoexist);
+#endif
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+
+	if (btcoexist->manual_control || btcoexist->stop_coex_dm)
+		return;
+
+	if (BTC_IPS_ENTER == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], IPS ENTER notify\n");
+		coex_sta->under_ips = true;
+
+		halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
+					   false, true);
+                /* set PTA control */
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+		halbtc8723b1ant_coex_table_with_type(btcoexist,
+			NORMAL_EXEC, 0);
+	} else if (BTC_IPS_LEAVE == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], IPS LEAVE notify\n");
+		coex_sta->under_ips = false;
+
+		halbtc8723b1ant_run_coexist_mechanism(btcoexist);
+	}
+}
+
+void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	if (btcoexist->manual_control || btcoexist->stop_coex_dm)
+		return;
+
+	if (BTC_LPS_ENABLE == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], LPS ENABLE notify\n");
+		coex_sta->under_lps = true;
+	} else if (BTC_LPS_DISABLE == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], LPS DISABLE notify\n");
+		coex_sta->under_lps = false;
+	}
+}
+
+void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	bool wifi_connected = false, bt_hs_on = false;
+
+	if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+	    btcoexist->bt_info.bt_disabled)
+		return;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+			   &wifi_connected);
+
+	halbtc8723b1ant_query_bt_info(btcoexist);
+
+	if (coex_sta->c2h_bt_inquiry_page) {
+		halbtc8723b1ant_action_bt_inquiry(btcoexist);
+		return;
+	} else if (bt_hs_on) {
+		halbtc8723b1ant_action_hs(btcoexist);
+		return;
+	}
+
+	if (BTC_SCAN_START == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], SCAN START notify\n");
+		if (!wifi_connected)	/* non-connected scan */
+			halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist);
+		else	/* wifi is connected */
+			halbtc8723b1ant_ActionWifiConnectedScan(btcoexist);
+	} else if (BTC_SCAN_FINISH == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], SCAN FINISH notify\n");
+		if (!wifi_connected)	/* non-connected scan */
+			halbtc8723b1ant_action_wifi_not_connected(btcoexist);
+		else
+			halbtc8723b1ant_action_wifi_connected(btcoexist);
+	}
+}
+
+void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	bool wifi_connected = false, bt_hs_on = false;
+
+	if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+	    btcoexist->bt_info.bt_disabled)
+		return;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	if (coex_sta->c2h_bt_inquiry_page) {
+		halbtc8723b1ant_action_bt_inquiry(btcoexist);
+		return;
+	} else if (bt_hs_on) {
+		halbtc8723b1ant_action_hs(btcoexist);
+		return;
+	}
+
+	if (BTC_ASSOCIATE_START == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], CONNECT START notify\n");
+		halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist);
+	} else if (BTC_ASSOCIATE_FINISH == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], CONNECT FINISH notify\n");
+
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+				   &wifi_connected);
+		if (!wifi_connected) /* non-connected scan */
+			halbtc8723b1ant_action_wifi_not_connected(btcoexist);
+		else
+			halbtc8723b1ant_action_wifi_connected(btcoexist);
+	}
+}
+
+void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
+					    u8 type)
+{
+	u8 h2c_parameter[3] ={0};
+	u32 wifi_bw;
+	u8 wifiCentralChnl;
+
+	if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+	    btcoexist->bt_info.bt_disabled )
+		return;
+
+	if (BTC_MEDIA_CONNECT == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], MEDIA connect notify\n");
+	else
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], MEDIA disconnect notify\n");
+
+	/* only 2.4G we need to inform bt the chnl mask */
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
+			   &wifiCentralChnl);
+
+	if ((BTC_MEDIA_CONNECT == type) &&
+	    (wifiCentralChnl <= 14)) {
+		h2c_parameter[0] = 0x0;
+		h2c_parameter[1] = wifiCentralChnl;
+		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+		if (BTC_WIFI_BW_HT40 == wifi_bw)
+			h2c_parameter[2] = 0x30;
+		else
+			h2c_parameter[2] = 0x20;
+	}
+
+	coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+	coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+	coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], FW write 0x66=0x%x\n",
+		  h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+		  h2c_parameter[2]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
+					      u8 type)
+{
+	bool bt_hs_on = false;
+
+	if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+	    btcoexist->bt_info.bt_disabled)
+		return;
+
+	coex_sta->special_pkt_period_cnt = 0;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	if (coex_sta->c2h_bt_inquiry_page) {
+		halbtc8723b1ant_action_bt_inquiry(btcoexist);
+		return;
+	} else if (bt_hs_on) {
+		halbtc8723b1ant_action_hs(btcoexist);
+		return;
+	}
+
+	if (BTC_PACKET_DHCP == type ||
+		BTC_PACKET_EAPOL == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], special Packet(%d) notify\n", type);
+		halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
+	}
+}
+
+void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
+				       u8 *tmp_buf, u8 length)
+{
+	u8 bt_info = 0;
+	u8 i, rsp_source = 0;
+	bool wifi_connected = false;
+	bool bt_busy = false;
+
+	coex_sta->c2h_bt_info_req_sent = false;
+
+	rsp_source = tmp_buf[0] & 0xf;
+	if (rsp_source >= BT_INFO_SRC_8723B_1ANT_MAX)
+		rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
+	coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+		  "[BTCoex], Bt info[%d], length=%d, hex data=[",
+		  rsp_source, length);
+	for (i=0; i<length; i++) {
+		coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
+		if (i == 1)
+			bt_info = tmp_buf[i];
+		if (i == length - 1)
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+				  "0x%02x]\n", tmp_buf[i]);
+		else
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+				  "0x%02x, ", tmp_buf[i]);
+	}
+
+	if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) {
+		coex_sta->bt_retry_cnt =	/* [3:0] */
+			coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
+
+		coex_sta->bt_rssi =
+			coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
+
+		coex_sta->bt_info_ext =
+			coex_sta->bt_info_c2h[rsp_source][4];
+
+		/* Here we need to resend some wifi info to BT
+		 * because bt is reset and loss of the info.*/
+		if(coex_sta->bt_info_ext & BIT1)
+		{
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], BT ext info bit1 check, "
+				  "send wifi BW&Chnl to BT!!\n");
+			btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+					   &wifi_connected);
+			if(wifi_connected)
+				ex_halbtc8723b1ant_media_status_notify(btcoexist,
+							     BTC_MEDIA_CONNECT);
+			else
+				ex_halbtc8723b1ant_media_status_notify(btcoexist,
+							  BTC_MEDIA_DISCONNECT);
+		}
+
+		if (coex_sta->bt_info_ext & BIT3) {
+			if (!btcoexist->manual_control &&
+			    !btcoexist->stop_coex_dm) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], BT ext info bit3 check, "
+					  "set BT NOT ignore Wlan active!!\n");
+				halbtc8723b1ant_ignore_wlan_act(btcoexist,
+								FORCE_EXEC,
+								false);
+			}
+		} else {
+			/* BT already NOT ignore Wlan active, do nothing here.*/
+		}
+#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
+		if (coex_sta->bt_info_ext & BIT4) {
+			/* BT auto report already enabled, do nothing */
+		} else {
+			halbtc8723b1ant_bt_auto_report(btcoexist, FORCE_EXEC,
+						       true);
+		}
+#endif
+	}
+
+	/* check BIT2 first ==> check if bt is under inquiry or page scan */
+	if (bt_info & BT_INFO_8723B_1ANT_B_INQ_PAGE)
+		coex_sta->c2h_bt_inquiry_page = true;
+	else
+		coex_sta->c2h_bt_inquiry_page = false;
+
+	/* set link exist status */
+	if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) {
+		coex_sta->bt_link_exist = false;
+		coex_sta->pan_exist = false;
+		coex_sta->a2dp_exist = false;
+		coex_sta->hid_exist = false;
+		coex_sta->sco_exist = false;
+	} else { /* connection exists */
+		coex_sta->bt_link_exist = true;
+		if (bt_info & BT_INFO_8723B_1ANT_B_FTP)
+			coex_sta->pan_exist = true;
+		else
+			coex_sta->pan_exist = false;
+		if (bt_info & BT_INFO_8723B_1ANT_B_A2DP)
+			coex_sta->a2dp_exist = true;
+		else
+			coex_sta->a2dp_exist = false;
+		if (bt_info & BT_INFO_8723B_1ANT_B_HID)
+			coex_sta->hid_exist = true;
+		else
+			coex_sta->hid_exist = false;
+		if (bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO)
+			coex_sta->sco_exist = true;
+		else
+			coex_sta->sco_exist = false;
+	}
+
+	halbtc8723b1ant_update_bt_link_info(btcoexist);
+
+	if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) {
+		coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BtInfoNotify(), "
+			  "BT Non-Connected idle!!!\n");
+	/* connection exists but no busy */
+	} else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
+		coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+	} else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
+		(bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
+		coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BtInfoNotify(), "
+			  "BT SCO busy!!!\n");
+	} else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
+		if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
+			coex_dm->auto_tdma_adjust = false;
+
+		coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+	} else {
+		coex_dm->bt_status =
+			BT_8723B_1ANT_BT_STATUS_MAX;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
+	}
+
+	if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+	    (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+	    (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status))
+		bt_busy = true;
+	else
+		bt_busy = false;
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+	halbtc8723b1ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+	btcoexist->stop_coex_dm = true;
+
+	halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, true);
+
+	halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
+	halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+
+	halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+					 0x0, 0x0);
+	halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
+
+	ex_halbtc8723b1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Pnp notify\n");
+
+	if (BTC_WIFI_PNP_SLEEP == pnp_state) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], Pnp notify to SLEEP\n");
+		btcoexist->stop_coex_dm = true;
+		halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+		halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+						 0x0, 0x0);
+		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
+	} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], Pnp notify to WAKE UP\n");
+		btcoexist->stop_coex_dm = false;
+		halbtc8723b1ant_init_hw_config(btcoexist, false);
+		halbtc8723b1ant_init_coex_dm(btcoexist);
+		halbtc8723b1ant_query_bt_info(btcoexist);
+	}
+}
+
+void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist)
+{
+	struct btc_board_info *board_info = &btcoexist->board_info;
+	struct btc_stack_info *stack_info = &btcoexist->stack_info;
+	static u8 dis_ver_info_cnt = 0;
+	u32 fw_ver = 0, bt_patch_ver = 0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+		  "[BTCoex], =========================="
+		  "Periodical===========================\n");
+
+	if (dis_ver_info_cnt <= 5) {
+		dis_ver_info_cnt += 1;
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "[BTCoex], *************************"
+			  "***************************************\n");
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "[BTCoex], Ant PG Num/ Ant Mech/ "
+			  "Ant Pos = %d/ %d/ %d\n", \
+			  board_info->pg_ant_num, board_info->btdm_ant_num,
+			  board_info->btdm_ant_pos);
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "[BTCoex], BT stack/ hci ext ver = %s / %d\n", \
+			  ((stack_info->profile_notified)? "Yes":"No"),
+			  stack_info->hci_version);
+		btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+				   &bt_patch_ver);
+		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "[BTCoex], CoexVer/ FwVer/ PatchVer "
+			  "= %d_%x/ 0x%x/ 0x%x(%d)\n", \
+			  glcoex_ver_date_8723b_1ant,
+			  glcoex_ver_8723b_1ant, fw_ver,
+			  bt_patch_ver, bt_patch_ver);
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "[BTCoex], *****************************"
+			  "***********************************\n");
+	}
+
+#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
+	halbtc8723b1ant_query_bt_info(btcoexist);
+	halbtc8723b1ant_monitor_bt_ctr(btcoexist);
+	halbtc8723b1ant_monitor_bt_enable_disable(btcoexist);
+#else
+	if (halbtc8723b1ant_is_wifi_status_changed(btcoexist) ||
+	    coex_dm->auto_tdma_adjust) {
+		if (coex_sta->special_pkt_period_cnt > 2)
+			halbtc8723b1ant_run_coexist_mechanism(btcoexist);
+	}
+
+	coex_sta->special_pkt_period_cnt++;
+#endif
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h
new file mode 100644
index 0000000..5ce292f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h
@@ -0,0 +1,175 @@
+/**********************************************************************
+ * The following is for 8723B 1ANT BT Co-exist definition
+ **********************************************************************/
+#define	BT_AUTO_REPORT_ONLY_8723B_1ANT			1
+
+#define	BT_INFO_8723B_1ANT_B_FTP			BIT7
+#define	BT_INFO_8723B_1ANT_B_A2DP			BIT6
+#define	BT_INFO_8723B_1ANT_B_HID			BIT5
+#define	BT_INFO_8723B_1ANT_B_SCO_BUSY			BIT4
+#define	BT_INFO_8723B_1ANT_B_ACL_BUSY			BIT3
+#define	BT_INFO_8723B_1ANT_B_INQ_PAGE			BIT2
+#define	BT_INFO_8723B_1ANT_B_SCO_ESCO			BIT1
+#define	BT_INFO_8723B_1ANT_B_CONNECTION			BIT0
+
+#define	BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_)	\
+		(((_BT_INFO_EXT_&BIT0))? true:false)
+
+#define	BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT		2
+
+typedef enum _BT_INFO_SRC_8723B_1ANT{
+	BT_INFO_SRC_8723B_1ANT_WIFI_FW			= 0x0,
+	BT_INFO_SRC_8723B_1ANT_BT_RSP			= 0x1,
+	BT_INFO_SRC_8723B_1ANT_BT_ACTIVE_SEND		= 0x2,
+	BT_INFO_SRC_8723B_1ANT_MAX
+}BT_INFO_SRC_8723B_1ANT,*PBT_INFO_SRC_8723B_1ANT;
+
+typedef enum _BT_8723B_1ANT_BT_STATUS{
+	BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE	= 0x0,
+	BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE		= 0x1,
+	BT_8723B_1ANT_BT_STATUS_INQ_PAGE		= 0x2,
+	BT_8723B_1ANT_BT_STATUS_ACL_BUSY		= 0x3,
+	BT_8723B_1ANT_BT_STATUS_SCO_BUSY		= 0x4,
+	BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY		= 0x5,
+	BT_8723B_1ANT_BT_STATUS_MAX
+}BT_8723B_1ANT_BT_STATUS,*PBT_8723B_1ANT_BT_STATUS;
+
+typedef enum _BT_8723B_1ANT_WIFI_STATUS{
+	BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE			= 0x0,
+	BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN		= 0x1,
+	BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN			= 0x2,
+	BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT			= 0x3,
+	BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE			= 0x4,
+	BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY			= 0x5,
+	BT_8723B_1ANT_WIFI_STATUS_MAX
+}BT_8723B_1ANT_WIFI_STATUS,*PBT_8723B_1ANT_WIFI_STATUS;
+
+typedef enum _BT_8723B_1ANT_COEX_ALGO{
+	BT_8723B_1ANT_COEX_ALGO_UNDEFINED		= 0x0,
+	BT_8723B_1ANT_COEX_ALGO_SCO			= 0x1,
+	BT_8723B_1ANT_COEX_ALGO_HID			= 0x2,
+	BT_8723B_1ANT_COEX_ALGO_A2DP			= 0x3,
+	BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS		= 0x4,
+	BT_8723B_1ANT_COEX_ALGO_PANEDR			= 0x5,
+	BT_8723B_1ANT_COEX_ALGO_PANHS			= 0x6,
+	BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP		= 0x7,
+	BT_8723B_1ANT_COEX_ALGO_PANEDR_HID		= 0x8,
+	BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR		= 0x9,
+	BT_8723B_1ANT_COEX_ALGO_HID_A2DP		= 0xa,
+	BT_8723B_1ANT_COEX_ALGO_MAX			= 0xb,
+}BT_8723B_1ANT_COEX_ALGO,*PBT_8723B_1ANT_COEX_ALGO;
+
+struct coex_dm_8723b_1ant{
+	/* fw mechanism */
+	bool pre_dec_bt_pwr;
+	bool cur_dec_bt_pwr;
+	u8 pre_fw_dac_swing_lvl;
+	u8 cur_fw_dac_swing_lvl;
+	bool cur_ignore_wlan_act;
+	bool pre_ignore_wlan_act;
+	u8 pre_ps_tdma;
+	u8 cur_ps_tdma;
+	u8 ps_tdma_para[5];
+	u8 ps_tdma_du_adj_type;
+	bool auto_tdma_adjust;
+	bool pre_ps_tdma_on;
+	bool cur_ps_tdma_on;
+	bool pre_bt_auto_report;
+	bool cur_bt_auto_report;
+	u8 pre_lps;
+	u8 cur_lps;
+	u8 pre_rpwm;
+	u8 cur_rpwm;
+
+	/* sw mechanism */
+	bool pre_rf_rx_lpf_shrink;
+	bool cur_rf_rx_lpf_shrink;
+	u32 bt_rf0x1e_backup;
+	bool pre_low_penalty_ra;
+	bool cur_low_penalty_ra;
+	bool pre_dac_swing_on;
+	u32 pre_dac_swing_lvl;
+	bool cur_dac_swing_on;
+	u32 cur_dac_swing_lvl;
+	bool pre_adc_backoff;
+	bool cur_adc_backoff;
+	bool pre_agc_table_en;
+	bool cur_agc_table_en;
+	u32 pre_val0x6c0;
+	u32 cur_val0x6c0;
+	u32 pre_val0x6c4;
+	u32 cur_val0x6c4;
+	u32 pre_val0x6c8;
+	u32 cur_val0x6c8;
+	u8 pre_val0x6cc;
+	u8 cur_val0x6cc;
+	bool limited_dig;
+
+	u32 backup_arfr_cnt1;	/* Auto Rate Fallback Retry cnt */
+	u32 backup_arfr_cnt2;	/* Auto Rate Fallback Retry cnt */
+	u16 backup_retry_limit;
+	u8 backup_ampdu_max_time;
+
+	/* algorithm related */
+	u8 pre_algorithm;
+	u8 cur_algorithm;
+	u8 bt_status;
+	u8 wifi_chnl_info[3];
+
+	u32 prera_mask;
+	u32 curra_mask;
+	u8 pre_arfr_type;
+	u8 cur_arfr_type;
+	u8 pre_retry_limit_type;
+	u8 cur_retry_limit_type;
+	u8 pre_ampdu_time_type;
+	u8 cur_ampdu_time_type;
+
+	u8 error_condition;
+};
+
+struct coex_sta_8723b_1ant{
+	bool bt_link_exist;
+	bool sco_exist;
+	bool a2dp_exist;
+	bool hid_exist;
+	bool pan_exist;
+
+	bool under_lps;
+	bool under_ips;
+	u32 special_pkt_period_cnt;
+	u32 high_priority_tx;
+	u32 high_priority_rx;
+	u32 low_priority_tx;
+	u32 low_priority_rx;
+	u8 bt_rssi;
+	u8 pre_bt_rssi_state;
+	u8 pre_wifi_rssi_state[4];
+	bool c2h_bt_info_req_sent;
+	u8 bt_info_c2h[BT_INFO_SRC_8723B_1ANT_MAX][10];
+	u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_1ANT_MAX];
+	bool c2h_bt_inquiry_page;
+	u8 bt_retry_cnt;
+	u8 bt_info_ext;
+};
+
+/*************************************************************************
+ * The following is interface which will notify coex module.
+ *************************************************************************/
+void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
+					    u8 type);
+void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
+					      u8 type);
+void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
+				       u8 *tmpbuf, u8 length);
+void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpState);
+void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist);
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c
new file mode 100644
index 0000000..83b1b42
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c
@@ -0,0 +1,4185 @@
+/***************************************************************
+ * Description:
+ *
+ * This file is for RTL8723B Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ **************************************************************/
+/**************************************************************
+ * include files
+ **************************************************************/
+#include "halbt_precomp.h"
+#if 1
+/**************************************************************
+ * Global variables, these are static variables
+ **************************************************************/
+static struct coex_dm_8723b_2ant glcoex_dm_8723b_2ant;
+static struct coex_dm_8723b_2ant *coex_dm = &glcoex_dm_8723b_2ant;
+static struct coex_sta_8723b_2ant glcoex_sta_8723b_2ant;
+static struct coex_sta_8723b_2ant *coex_sta = &glcoex_sta_8723b_2ant;
+
+const char *const glbt_info_src_8723b_2ant[] = {
+	"BT Info[wifi fw]",
+	"BT Info[bt rsp]",
+	"BT Info[bt auto report]",
+};
+
+u32 glcoex_ver_date_8723b_2ant = 20131113;
+u32 glcoex_ver_8723b_2ant = 0x3f;
+
+/**************************************************************
+ * local function proto type if needed
+ **************************************************************/
+/**************************************************************
+ * local function start with halbtc8723b2ant_
+ **************************************************************/
+u8 halbtc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+	s32 bt_rssi = 0;
+	u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
+
+	bt_rssi = coex_sta->bt_rssi;
+
+	if (level_num == 2) {
+		if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+			if (bt_rssi >= rssi_thresh +
+				       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+				bt_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to High\n");
+			} else {
+				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "stay at Low\n");
+			}
+		} else {
+			if (bt_rssi < rssi_thresh) {
+				bt_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to Low\n");
+			} else {
+				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "stay at High\n");
+			}
+		}
+	} else if (level_num == 3) {
+		if (rssi_thresh > rssi_thresh1) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+				  "[BTCoex], BT Rssi thresh error!!\n");
+			return coex_sta->pre_bt_rssi_state;
+		}
+
+		if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+			if (bt_rssi >= rssi_thresh +
+				       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to Medium\n");
+			} else {
+				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "stay at Low\n");
+			}
+		} else if ((coex_sta->pre_bt_rssi_state ==
+						BTC_RSSI_STATE_MEDIUM) ||
+			   (coex_sta->pre_bt_rssi_state ==
+			   			BTC_RSSI_STATE_STAY_MEDIUM)) {
+			if (bt_rssi >= rssi_thresh1 +
+				       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+				bt_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to High\n");
+			} else if (bt_rssi < rssi_thresh) {
+				bt_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to Low\n");
+			} else {
+				bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "stay at Medium\n");
+			}
+		} else {
+			if (bt_rssi < rssi_thresh1) {
+				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "switch to Medium\n");
+			} else {
+				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+					  "[BTCoex], BT Rssi state "
+					  "stay at High\n");
+			}
+		}
+	}
+
+	coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+	return bt_rssi_state;
+}
+
+u8 halbtc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+				   u8 index, u8 level_num,
+				   u8 rssi_thresh, u8 rssi_thresh1)
+{
+	s32 wifi_rssi=0;
+	u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+
+	if (level_num == 2) {
+		if ((coex_sta->pre_wifi_rssi_state[index] ==
+						BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_wifi_rssi_state[index] ==
+		    				BTC_RSSI_STATE_STAY_LOW)) {
+			if (wifi_rssi >= rssi_thresh +
+					 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to High\n");
+			} else {
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "stay at Low\n");
+			}
+		} else {
+			if (wifi_rssi < rssi_thresh) {
+				wifi_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to Low\n");
+			} else {
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "stay at High\n");
+			}
+		}
+	} else if (level_num == 3) {
+		if (rssi_thresh > rssi_thresh1) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+				  "[BTCoex], wifi RSSI thresh error!!\n");
+			return coex_sta->pre_wifi_rssi_state[index];
+		}
+
+		if ((coex_sta->pre_wifi_rssi_state[index] ==
+						BTC_RSSI_STATE_LOW) ||
+		    (coex_sta->pre_wifi_rssi_state[index] ==
+		    				BTC_RSSI_STATE_STAY_LOW)) {
+			if(wifi_rssi >= rssi_thresh +
+					BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to Medium\n");
+			} else {
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "stay at Low\n");
+			}
+		} else if ((coex_sta->pre_wifi_rssi_state[index] ==
+						BTC_RSSI_STATE_MEDIUM) ||
+			   (coex_sta->pre_wifi_rssi_state[index] ==
+			   			BTC_RSSI_STATE_STAY_MEDIUM)) {
+			if (wifi_rssi >= rssi_thresh1 +
+					 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to High\n");
+			} else if (wifi_rssi < rssi_thresh) {
+				wifi_rssi_state = BTC_RSSI_STATE_LOW;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to Low\n");
+			} else {
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "stay at Medium\n");
+			}
+		} else {
+			if (wifi_rssi < rssi_thresh1) {
+				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "switch to Medium\n");
+			} else {
+				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_WIFI_RSSI_STATE,
+					  "[BTCoex], wifi RSSI state "
+					  "stay at High\n");
+			}
+		}
+	}
+
+	coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+	return wifi_rssi_state;
+}
+
+void halbtc8723b2ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist)
+{
+	static bool pre_bt_disabled = false;
+	static u32 bt_disable_cnt = 0;
+	bool bt_active = true, bt_disabled = false;
+
+	/* This function check if bt is disabled */
+	if (coex_sta->high_priority_tx == 0 &&
+	    coex_sta->high_priority_rx == 0 &&
+	    coex_sta->low_priority_tx == 0 &&
+	    coex_sta->low_priority_rx == 0)
+		bt_active = false;
+
+	if (coex_sta->high_priority_tx == 0xffff &&
+	    coex_sta->high_priority_rx == 0xffff &&
+	    coex_sta->low_priority_tx == 0xffff &&
+	    coex_sta->low_priority_rx == 0xffff)
+		bt_active = true;
+
+	if (bt_active) {
+		bt_disable_cnt = 0;
+		bt_disabled = false;
+		btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+				   &bt_disabled);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+			  "[BTCoex], BT is enabled !!\n");
+	} else {
+		bt_disable_cnt++;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+			  "[BTCoex], bt all counters=0, %d times!!\n",
+			  bt_disable_cnt);
+		if (bt_disable_cnt >= 2) {
+			bt_disabled = true;
+			btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+					   &bt_disabled);
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+				  "[BTCoex], BT is disabled !!\n");
+		}
+	}
+
+	if (pre_bt_disabled != bt_disabled) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+			  "[BTCoex], BT is from %s to %s!!\n",
+			  (pre_bt_disabled ? "disabled":"enabled"),
+			  (bt_disabled ? "disabled":"enabled"));
+
+		pre_bt_disabled = bt_disabled;
+		if (!bt_disabled) {
+		} else {
+		}
+	}
+}
+
+void halbtc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+	u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+	u32 reg_hp_tx = 0, reg_hp_rx = 0;
+	u32 reg_lp_tx = 0, reg_lp_rx = 0;
+
+	reg_hp_txrx = 0x770;
+	reg_lp_txrx = 0x774;
+
+	u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+	reg_hp_tx = u32tmp & MASKLWORD;
+	reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
+
+	u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+	reg_lp_tx = u32tmp & MASKLWORD;
+	reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
+
+	coex_sta->high_priority_tx = reg_hp_tx;
+	coex_sta->high_priority_rx = reg_hp_rx;
+	coex_sta->low_priority_tx = reg_lp_tx;
+	coex_sta->low_priority_rx = reg_lp_rx;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+		  "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+		  reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+		  "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+		  reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+
+	/* reset counter */
+	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void halbtc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
+{
+	u8 h2c_parameter[1] ={0};
+
+	coex_sta->c2h_bt_info_req_sent = true;
+
+	h2c_parameter[0] |= BIT0;	/* trigger */
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n",
+		  h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
+}
+
+bool halbtc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+	static bool pre_wifi_busy = false;
+	static bool pre_under_4way = false;
+	static bool pre_bt_hs_on = false;
+	bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+	bool wifi_connected = false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+			   &wifi_connected);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+			   &under_4way);
+
+	if (wifi_connected) {
+		if (wifi_busy != pre_wifi_busy) {
+			pre_wifi_busy = wifi_busy;
+			return true;
+		}
+
+		if (under_4way != pre_under_4way) {
+			pre_under_4way = under_4way;
+			return true;
+		}
+
+		if (bt_hs_on != pre_bt_hs_on) {
+			pre_bt_hs_on = bt_hs_on;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+void halbtc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
+{
+	/*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/
+	struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+	bool bt_hs_on = false;
+
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+	bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+	bt_link_info->sco_exist = coex_sta->sco_exist;
+	bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+	bt_link_info->pan_exist = coex_sta->pan_exist;
+	bt_link_info->hid_exist = coex_sta->hid_exist;
+
+	/* work around for HS mode. */
+	if (bt_hs_on) {
+		bt_link_info->pan_exist = true;
+		bt_link_info->bt_link_exist = true;
+	}
+#else	/* profile from bt stack */
+	bt_link_info->bt_link_exist = stack_info->bt_link_exist;
+	bt_link_info->sco_exist = stack_info->sco_exist;
+	bt_link_info->a2dp_exist = stack_info->a2dp_exist;
+	bt_link_info->pan_exist = stack_info->pan_exist;
+	bt_link_info->hid_exist = stack_info->hid_exist;
+
+	/*for win-8 stack HID report error*/
+	if (!stack_info->hid_exist)
+		stack_info->hid_exist = coex_sta->hid_exist;
+	/*sync  BTInfo with BT firmware and stack*/
+	/* when stack HID report error, here we use the info from bt fw.*/
+	if (!stack_info->bt_link_exist)
+		stack_info->bt_link_exist = coex_sta->bt_link_exist;
+#endif
+	/* check if Sco only */
+	if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+	    !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+		bt_link_info->sco_only = true;
+	else
+		bt_link_info->sco_only = false;
+
+	/* check if A2dp only */
+	if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
+	    !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+		bt_link_info->a2dp_only = true;
+	else
+		bt_link_info->a2dp_only = false;
+
+	/* check if Pan only */
+	if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+	    bt_link_info->pan_exist && !bt_link_info->hid_exist)
+		bt_link_info->pan_only = true;
+	else
+		bt_link_info->pan_only = false;
+
+	/* check if Hid only */
+	if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+	    !bt_link_info->pan_exist && bt_link_info->hid_exist)
+		bt_link_info->hid_only = true;
+	else
+		bt_link_info->hid_only = false;
+}
+
+u8 halbtc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+	struct btc_bt_link_info *bt_link_info=&btcoexist->bt_link_info;
+	bool bt_hs_on = false;
+	u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED;
+	u8 num_of_diff_profile = 0;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+	if (!bt_link_info->bt_link_exist) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], No BT link exists!!!\n");
+		return algorithm;
+	}
+
+	if (bt_link_info->sco_exist)
+		num_of_diff_profile++;
+	if (bt_link_info->hid_exist)
+		num_of_diff_profile++;
+	if (bt_link_info->pan_exist)
+		num_of_diff_profile++;
+	if (bt_link_info->a2dp_exist)
+		num_of_diff_profile++;
+
+	if (num_of_diff_profile == 1) {
+		if (bt_link_info->sco_exist) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], SCO only\n");
+			algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+		} else {
+			if (bt_link_info->hid_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], HID only\n");
+				algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+			} else if (bt_link_info->a2dp_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], A2DP only\n");
+				algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
+			} else if (bt_link_info->pan_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], PAN(HS) only\n");
+					algorithm =
+						BT_8723B_2ANT_COEX_ALGO_PANHS;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], PAN(EDR) only\n");
+					algorithm =
+						BT_8723B_2ANT_COEX_ALGO_PANEDR;
+				}
+			}
+		}
+	} else if (num_of_diff_profile == 2) {
+		if (bt_link_info->sco_exist) {
+			if (bt_link_info->hid_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], SCO + HID\n");
+				algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+			} else if (bt_link_info->a2dp_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], SCO + A2DP ==> SCO\n");
+				algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+			} else if (bt_link_info->pan_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], SCO + PAN(HS)\n");
+					algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], SCO + PAN(EDR)\n");
+					algorithm =
+					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		} else {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->a2dp_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], HID + A2DP\n");
+				algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+			} else if (bt_link_info->hid_exist &&
+				   bt_link_info->pan_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], HID + PAN(HS)\n");
+					algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], HID + PAN(EDR)\n");
+					algorithm =
+					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			} else if (bt_link_info->pan_exist &&
+				   bt_link_info->a2dp_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], A2DP + PAN(HS)\n");
+					algorithm =
+					    BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex],A2DP + PAN(EDR)\n");
+					algorithm =
+					    BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
+				}
+			}
+		}
+	} else if (num_of_diff_profile == 3) {
+		if (bt_link_info->sco_exist) {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->a2dp_exist) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], SCO + HID + A2DP"
+					  " ==> HID\n");
+				algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+			} else if (bt_link_info->hid_exist &&
+				   bt_link_info->pan_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], SCO + HID + "
+						  "PAN(HS)\n");
+					algorithm =
+					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], SCO + HID + "
+						  "PAN(EDR)\n");
+					algorithm =
+					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			} else if (bt_link_info->pan_exist &&
+				   bt_link_info->a2dp_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], SCO + A2DP + "
+						  "PAN(HS)\n");
+					algorithm =
+					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], SCO + A2DP + "
+						  "PAN(EDR) ==> HID\n");
+					algorithm =
+					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		} else {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->pan_exist &&
+			    bt_link_info->a2dp_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], HID + A2DP + "
+						  "PAN(HS)\n");
+					algorithm =
+					    BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], HID + A2DP + "
+						  "PAN(EDR)\n");
+					algorithm =
+					BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+				}
+			}
+		}
+	} else if (num_of_diff_profile >= 3) {
+		if (bt_link_info->sco_exist) {
+			if (bt_link_info->hid_exist &&
+			    bt_link_info->pan_exist &&
+			    bt_link_info->a2dp_exist) {
+				if (bt_hs_on) {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], Error!!! SCO + HID"
+						  " + A2DP + PAN(HS)\n");
+				} else {
+					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+						  "[BTCoex], SCO + HID + A2DP +"
+						  " PAN(EDR)==>PAN(EDR)+HID\n");
+					algorithm =
+					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+				}
+			}
+		}
+	}
+
+	return algorithm;
+}
+
+bool halbtc8723b2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
+{
+	bool bRet = false;
+	bool bt_hs_on = false, wifi_connected = false;
+	s32 bt_hs_rssi=0;
+	u8 bt_rssi_state;
+
+	if (!btcoexist->btc_get(btcoexist,
+			BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+		return false;
+	if (!btcoexist->btc_get(btcoexist,
+			BTC_GET_BL_WIFI_CONNECTED, &wifi_connected))
+		return false;
+	if (!btcoexist->btc_get(btcoexist,
+			BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+		return false;
+
+	bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+	if (wifi_connected) {
+		if (bt_hs_on) {
+			if (bt_hs_rssi > 37) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+					  "[BTCoex], Need to decrease bt "
+					  "power for HS mode!!\n");
+				bRet = true;
+			}
+		} else {
+			if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+			    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+					  "[BTCoex], Need to decrease bt "
+					  "power for Wifi is connected!!\n");
+				bRet = true;
+			}
+		}
+	}
+
+	return bRet;
+}
+
+void halbtc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
+					    u8 dac_swing_lvl)
+{
+	u8 h2c_parameter[1] ={0};
+
+	/* There are several type of dacswing
+	 * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
+	h2c_parameter[0] = dac_swing_lvl;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+				       bool dec_bt_pwr)
+{
+	u8 h2c_parameter[1] = {0};
+
+	h2c_parameter[0] = 0;
+
+	if (dec_bt_pwr)
+		h2c_parameter[0] |= BIT1;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+		  (dec_bt_pwr? "Yes!!":"No!!"), h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+				bool force_exec, bool dec_bt_pwr)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s Dec BT power = %s\n",
+		  (force_exec? "force to":""), (dec_bt_pwr? "ON":"OFF"));
+	coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+			  coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+
+		if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+			return;
+	}
+	halbtc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+	coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void halbtc8723b2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
+					bool enable_auto_report)
+{
+	u8 h2c_parameter[1] = {0};
+	h2c_parameter[0] = 0;
+
+	if (enable_auto_report)
+		h2c_parameter[0] |= BIT0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+		  (enable_auto_report? "Enabled!!":"Disabled!!"),
+		  h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_bt_auto_report(struct btc_coexist *btcoexist,
+				    bool force_exec, bool enable_auto_report)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s BT Auto report = %s\n",
+		  (force_exec? "force to":""),
+		  ((enable_auto_report)? "Enabled":"Disabled"));
+	coex_dm->cur_bt_auto_report = enable_auto_report;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], bPreBtAutoReport=%d, "
+			  "bCurBtAutoReport=%d\n",
+			  coex_dm->pre_bt_auto_report,
+			  coex_dm->cur_bt_auto_report);
+
+		if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+			return;
+	}
+	halbtc8723b2ant_set_bt_auto_report(btcoexist,
+					   coex_dm->cur_bt_auto_report);
+
+	coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void halbtc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+				      bool force_exec, u8 fw_dac_swing_lvl)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s set FW Dac Swing level = %d\n",
+		  (force_exec? "force to":""), fw_dac_swing_lvl);
+	coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], preFwDacSwingLvl=%d, "
+			  "curFwDacSwingLvl=%d\n",
+			  coex_dm->pre_fw_dac_swing_lvl,
+			  coex_dm->cur_fw_dac_swing_lvl);
+
+		if(coex_dm->pre_fw_dac_swing_lvl ==
+		   coex_dm->cur_fw_dac_swing_lvl)
+			return;
+	}
+
+	halbtc8723b2ant_set_fw_dac_swing_level(btcoexist,
+					       coex_dm->cur_fw_dac_swing_lvl);
+	coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void halbtc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+					     bool rx_rf_shrink_on)
+{
+	if (rx_rf_shrink_on) {
+		/* Shrink RF Rx LPF corner */
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], Shrink RF Rx LPF corner!!\n");
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+					  0xfffff, 0xffffc);
+	} else {
+		/* Resume RF Rx LPF corner */
+		/* After initialized, we can use coex_dm->btRf0x1eBackup */
+		if (btcoexist->initilized) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+				  "[BTCoex], Resume RF Rx LPF corner!!\n");
+			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+						  0xfffff,
+						  coex_dm->bt_rf0x1e_backup);
+		}
+	}
+}
+
+void halbtc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
+			       bool force_exec, bool rx_rf_shrink_on)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s turn Rx RF Shrink = %s\n",
+		  (force_exec? "force to":""), (rx_rf_shrink_on? "ON":"OFF"));
+	coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreRfRxLpfShrink=%d, "
+			  "bCurRfRxLpfShrink=%d\n",
+			  coex_dm->pre_rf_rx_lpf_shrink,
+			  coex_dm->cur_rf_rx_lpf_shrink);
+
+		if (coex_dm->pre_rf_rx_lpf_shrink ==
+		    coex_dm->cur_rf_rx_lpf_shrink)
+			return;
+	}
+	halbtc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist,
+						coex_dm->cur_rf_rx_lpf_shrink);
+
+	coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void halbtc8723b2ant_set_sw_penalty_txrate_adaptive(
+						struct btc_coexist *btcoexist,
+						bool low_penalty_ra)
+{
+	u8 h2c_parameter[6] ={0};
+
+	h2c_parameter[0] = 0x6;	/* opCode, 0x6= Retry_Penalty*/
+
+	if (low_penalty_ra) {
+		h2c_parameter[1] |= BIT0;
+		/*normal rate except MCS7/6/5, OFDM54/48/36*/
+		h2c_parameter[2] = 0x00;
+		h2c_parameter[3] = 0xf7;  /*MCS7 or OFDM54*/
+		h2c_parameter[4] = 0xf8;  /*MCS6 or OFDM48*/
+		h2c_parameter[5] = 0xf9;  /*MCS5 or OFDM36*/
+	}
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], set WiFi Low-Penalty Retry: %s",
+		  (low_penalty_ra? "ON!!":"OFF!!"));
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+void halbtc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
+				    bool force_exec, bool low_penalty_ra)
+{
+	/*return; */
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s turn LowPenaltyRA = %s\n",
+		  (force_exec? "force to":""), (low_penalty_ra? "ON":"OFF"));
+	coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreLowPenaltyRa=%d, "
+			  "bCurLowPenaltyRa=%d\n",
+			  coex_dm->pre_low_penalty_ra,
+			  coex_dm->cur_low_penalty_ra);
+
+		if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
+			return;
+	}
+	halbtc8723b2ant_set_sw_penalty_txrate_adaptive(btcoexist,
+						coex_dm->cur_low_penalty_ra);
+
+	coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void halbtc8723b2ant_set_dac_swing_reg(struct btc_coexist * btcoexist,
+				       u32 level)
+{
+	u8 val = (u8) level;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+	btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+void halbtc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoexist,
+					       bool sw_dac_swing_on,
+					       u32 sw_dac_swing_lvl)
+{
+	if(sw_dac_swing_on)
+		halbtc8723b2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
+	else
+		halbtc8723b2ant_set_dac_swing_reg(btcoexist, 0x18);
+}
+
+
+void halbtc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
+			       bool force_exec, bool dac_swing_on,
+			       u32 dac_swing_lvl)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+		  (force_exec? "force to":""),
+		  (dac_swing_on? "ON":"OFF"), dac_swing_lvl);
+	coex_dm->cur_dac_swing_on = dac_swing_on;
+	coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x,"
+			  " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+			  coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+			  coex_dm->cur_dac_swing_on,
+			  coex_dm->cur_dac_swing_lvl);
+
+		if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+		    (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+			return;
+	}
+	mdelay(30);
+	halbtc8723b2ant_set_sw_fulltime_dac_swing(btcoexist, dac_swing_on,
+						  dac_swing_lvl);
+
+	coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+	coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void halbtc8723b2ant_set_adc_backoff(struct btc_coexist *btcoexist,
+				     bool adc_backoff)
+{
+	if (adc_backoff) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], BB BackOff Level On!\n");
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x3);
+	} else {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], BB BackOff Level Off!\n");
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x1);
+	}
+}
+
+void halbtc8723b2ant_adc_backoff(struct btc_coexist *btcoexist,
+				 bool force_exec, bool adc_backoff)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s turn AdcBackOff = %s\n",
+		  (force_exec? "force to":""), (adc_backoff? "ON":"OFF"));
+	coex_dm->cur_adc_back_off = adc_backoff;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+			  coex_dm->pre_adc_back_off,
+			  coex_dm->cur_adc_back_off);
+
+		if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
+			return;
+	}
+	halbtc8723b2ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_back_off);
+
+	coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off;
+}
+
+void halbtc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
+				   bool agc_table_en)
+{
+	u8 rssi_adjust_val = 0;
+
+	/*  BB AGC Gain Table */
+	if (agc_table_en) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], BB Agc Table On!\n");
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
+	} else {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], BB Agc Table Off!\n");
+	 	btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
+		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001);
+	}
+
+
+	/* RF Gain */
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+	if (agc_table_en) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], Agc Table On!\n");
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+					  0xfffff, 0x38fff);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+					  0xfffff, 0x38ffe);
+	} else {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], Agc Table Off!\n");
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+					  0xfffff, 0x380c3);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+					  0xfffff, 0x28ce6);
+	}
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
+
+	if (agc_table_en) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], Agc Table On!\n");
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+					  0xfffff, 0x38fff);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+					  0xfffff, 0x38ffe);
+	} else {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+			  "[BTCoex], Agc Table Off!\n");
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+					  0xfffff, 0x380c3);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+					  0xfffff, 0x28ce6);
+	}
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0);
+
+	/* set rssiAdjustVal for wifi module. */
+	if (agc_table_en)
+		rssi_adjust_val = 8;
+	btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+			   &rssi_adjust_val);
+}
+
+void halbtc8723b2ant_agc_table(struct btc_coexist *btcoexist,
+			       bool force_exec, bool agc_table_en)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s %s Agc Table\n",
+		  (force_exec? "force to":""),
+		  (agc_table_en? "Enable":"Disable"));
+	coex_dm->cur_agc_table_en = agc_table_en;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+			  coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+
+		if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+			return;
+	}
+	halbtc8723b2ant_set_agc_table(btcoexist, agc_table_en);
+
+	coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void halbtc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
+				    u32 val0x6c0, u32 val0x6c4,
+				    u32 val0x6c8, u8 val0x6cc)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+	btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+		  "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+	btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void halbtc8723b2ant_coex_table(struct btc_coexist *btcoexist,
+				bool force_exec, u32 val0x6c0,
+				u32 val0x6c4, u32 val0x6c8,
+				u8 val0x6cc)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+		  "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
+		  " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+		  (force_exec? "force to":""), val0x6c0,
+		  val0x6c4, val0x6c8, val0x6cc);
+	coex_dm->cur_val0x6c0 = val0x6c0;
+	coex_dm->cur_val0x6c4 = val0x6c4;
+	coex_dm->cur_val0x6c8 = val0x6c8;
+	coex_dm->cur_val0x6cc = val0x6cc;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], preVal0x6c0=0x%x, "
+			  "preVal0x6c4=0x%x, preVal0x6c8=0x%x, "
+			  "preVal0x6cc=0x%x !!\n",
+			  coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+			  coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+			  "[BTCoex], curVal0x6c0=0x%x, "
+			  "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
+			  "curVal0x6cc=0x%x !!\n",
+			  coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+			  coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+
+		if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+		    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+		    (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+		    (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+			return;
+	}
+	halbtc8723b2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+				       val0x6c8, val0x6cc);
+
+	coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+	coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+	coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+	coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void halbtc8723b2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+					  bool force_exec, u8 type)
+{
+	switch (type) {
+	case 0:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+					   0x55555555, 0xffff, 0x3);
+		break;
+	case 1:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+					   0x5afa5afa, 0xffff, 0x3);
+		break;
+	case 2:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+					   0x5a5a5a5a, 0xffff, 0x3);
+		break;
+	case 3:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
+					   0xaaaaaaaa, 0xffff, 0x3);
+		break;
+	case 4:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff,
+					   0xffffffff, 0xffff, 0x3);
+		break;
+	case 5:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+					   0x5fff5fff, 0xffff, 0x3);
+		break;
+	case 6:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+					   0x5a5a5a5a, 0xffff, 0x3);
+		break;
+	case 7:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+					   0x5afa5afa, 0xffff, 0x3);
+		break;
+	case 8:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea,
+					   0x5aea5aea, 0xffff, 0x3);
+		break;
+	case 9:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+					   0x5aea5aea, 0xffff, 0x3);
+		break;
+	case 10:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+					   0x5aff5aff, 0xffff, 0x3);
+		break;
+	case 11:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+					   0x5a5f5a5f, 0xffff, 0x3);
+		break;
+	case 12:
+		halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+					   0x5f5f5f5f, 0xffff, 0x3);
+		break;
+	default:
+		break;
+	}
+}
+
+void halbtc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
+					    bool enable)
+{
+	u8 h2c_parameter[1] ={0};
+
+	if (enable)
+		h2c_parameter[0] |= BIT0;/* function enable*/
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], set FW for BT Ignore Wlan_Act, "
+		  "FW write 0x63=0x%x\n", h2c_parameter[0]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+				     bool force_exec, bool enable)
+{
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s turn Ignore WlanAct %s\n",
+		  (force_exec? "force to":""), (enable? "ON":"OFF"));
+	coex_dm->cur_ignore_wlan_act = enable;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], bPreIgnoreWlanAct = %d, "
+			  "bCurIgnoreWlanAct = %d!!\n",
+			  coex_dm->pre_ignore_wlan_act,
+			  coex_dm->cur_ignore_wlan_act);
+
+		if (coex_dm->pre_ignore_wlan_act ==
+		    coex_dm->cur_ignore_wlan_act)
+			return;
+	}
+	halbtc8723b2ant_set_fw_ignore_wlan_act(btcoexist, enable);
+
+	coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void halbtc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
+				    u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+	u8 h2c_parameter[5] ={0};
+
+	h2c_parameter[0] = byte1;
+	h2c_parameter[1] = byte2;
+	h2c_parameter[2] = byte3;
+	h2c_parameter[3] = byte4;
+	h2c_parameter[4] = byte5;
+
+	coex_dm->ps_tdma_para[0] = byte1;
+	coex_dm->ps_tdma_para[1] = byte2;
+	coex_dm->ps_tdma_para[2] = byte3;
+	coex_dm->ps_tdma_para[3] = byte4;
+	coex_dm->ps_tdma_para[4] = byte5;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+		  h2c_parameter[0],
+		  h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+		  h2c_parameter[3] << 8 | h2c_parameter[4]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void halbtc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+				   bool shrink_rx_lpf, bool low_penalty_ra,
+				   bool limited_dig, bool bt_lna_constrain)
+{
+	/*
+	u32	wifi_bw;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if(BTC_WIFI_BW_HT40 != wifi_bw)  //only shrink RF Rx LPF for HT40
+	{
+		if (shrink_rx_lpf)
+			shrink_rx_lpf = false;
+	}
+	*/
+
+	halbtc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+	halbtc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
+}
+
+void halbtc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+				   bool agc_table_shift, bool adc_backoff,
+				   bool sw_dac_swing, u32 dac_swing_lvl)
+{
+	halbtc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift);
+	/*halbtc8723b2ant_adc_backoff(btcoexist, NORMAL_EXEC, adc_backoff);*/
+	halbtc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+				  dac_swing_lvl);
+}
+
+void halbtc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
+				  u8 antpos_type, bool init_hwcfg,
+				  bool wifi_off)
+{
+	struct btc_board_info *board_info = &btcoexist->board_info;
+	u32 fw_ver = 0, u32tmp=0;
+	bool pg_ext_switch = false;
+	bool use_ext_switch = false;
+	u8 h2c_parameter[2] ={0};
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_EXT_SWITCH, &pg_ext_switch);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+
+	if ((fw_ver<0xc0000) || pg_ext_switch)
+		use_ext_switch = true;
+
+	if (init_hwcfg) {
+		/* 0x4c[23]=0, 0x4c[24]=1  Antenna control by WL/BT */
+		u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+		u32tmp &= ~BIT23;
+		u32tmp |= BIT24;
+		btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+		btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
+		btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
+
+		/* Force GNT_BT to low */
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+		btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+
+		if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
+			/* tell firmware "no antenna inverse" */
+			h2c_parameter[0] = 0;
+			h2c_parameter[1] = 1;  /* ext switch type */
+			btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+						h2c_parameter);
+		} else {
+			/* tell firmware "antenna inverse" */
+			h2c_parameter[0] = 1;
+			h2c_parameter[1] = 1;  /* ext switch type */
+			btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+						h2c_parameter);
+		}
+	}
+
+	/* ext switch setting */
+	if (use_ext_switch) {
+		 /* fixed internal switch S1->WiFi, S0->BT */
+		btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+		switch (antpos_type) {
+		case BTC_ANT_WIFI_AT_MAIN:
+			/* ext switch main at wifi */
+			btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+							   0x3, 0x1);
+			break;
+		case BTC_ANT_WIFI_AT_AUX:
+			/* ext switch aux at wifi */
+			btcoexist->btc_write_1byte_bitmask(btcoexist,
+							   0x92c, 0x3, 0x2);
+			break;
+		}
+	} else {	/* internal switch */
+		/* fixed ext switch */
+		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1);
+		switch (antpos_type) {
+		case BTC_ANT_WIFI_AT_MAIN:
+			/* fixed internal switch S1->WiFi, S0->BT */
+			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+			break;
+		case BTC_ANT_WIFI_AT_AUX:
+			/* fixed internal switch S0->WiFi, S1->BT */
+			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+			break;
+		}
+	}
+}
+
+
+void halbtc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
+			     bool turn_on, u8 type)
+{
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+		  (force_exec? "force to":""), (turn_on? "ON":"OFF"), type);
+	coex_dm->cur_ps_tdma_on = turn_on;
+	coex_dm->cur_ps_tdma = type;
+
+	if (!force_exec) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+			  coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+			  coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+
+		if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+		    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+			return;
+	}
+	if (turn_on) {
+		switch (type) {
+		case 1:
+		default:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+						       0x1a, 0xe1, 0x90);
+			break;
+		case 2:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+						       0x12, 0xe1, 0x90);
+			break;
+		case 3:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+						       0x3, 0xf1, 0x90);
+			break;
+		case 4:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+						       0x03, 0xf1, 0x90);
+			break;
+		case 5:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+						       0x1a, 0x60, 0x90);
+			break;
+		case 6:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+						       0x12, 0x60, 0x90);
+			break;
+		case 7:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+						       0x3, 0x70, 0x90);
+			break;
+		case 8:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
+						       0x3, 0x70, 0x90);
+			break;
+		case 9:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+						       0x1a, 0xe1, 0x90);
+			break;
+		case 10:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+						       0x12, 0xe1, 0x90);
+			break;
+		case 11:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+						       0xa, 0xe1, 0x90);
+			break;
+		case 12:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+						       0x5, 0xe1, 0x90);
+			break;
+		case 13:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+						       0x1a, 0x60, 0x90);
+			break;
+		case 14:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+						       0x12, 0x60, 0x90);
+			break;
+		case 15:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+						       0xa, 0x60, 0x90);
+			break;
+		case 16:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+						       0x5, 0x60, 0x90);
+			break;
+		case 17:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f,
+						       0x2f, 0x60, 0x90);
+			break;
+		case 18:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+						       0x5, 0xe1, 0x90);
+			break;
+		case 19:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+						       0x25, 0xe1, 0x90);
+			break;
+		case 20:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+						       0x25, 0x60, 0x90);
+			break;
+		case 21:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+						       0x03, 0x70, 0x90);
+			break;
+		case 71:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+						       0x1a, 0xe1, 0x90);
+			break;
+		}
+	} else {
+		/* disable PS tdma */
+		switch (type) {
+		case 0:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+						       0x40, 0x0);
+			break;
+		case 1:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+						       0x48, 0x0);
+			break;
+		default:
+			halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+						       0x40, 0x0);
+			break;
+		}
+	}
+
+	/* update pre state */
+	coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+	coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void halbtc8723b2ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+	/* fw all off */
+	halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+	halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	/* sw all off */
+	halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+	halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+	/* hw all off */
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+	halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+void halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+	/* force to reset coex mechanism*/
+
+	halbtc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+	halbtc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+
+	halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+	halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+void halbtc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+	bool wifi_connected = false;
+	bool low_pwr_disable = true;
+
+	btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+			   &low_pwr_disable);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+			   &wifi_connected);
+
+	if (wifi_connected) {
+		halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+		halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+	} else {
+		halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+		halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+	}
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+	halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+	halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+	coex_dm->need_recover_0x948 = true;
+	coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948);
+
+	halbtc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_AUX,
+				     false, false);
+}
+
+bool halbtc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
+{
+	bool bCommon = false, wifi_connected = false;
+	bool wifi_busy = false;
+	bool bt_hs_on = false, low_pwr_disable = false;
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+			   &wifi_connected);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+	if (!wifi_connected) {
+		low_pwr_disable = false;
+		btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+				   &low_pwr_disable);
+
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], Wifi non-connected idle!!\n");
+
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+					  0x0);
+		halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+		halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+		halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ 		halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false,
+					      false);
+		halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false,
+					      0x18);
+
+		bCommon = true;
+	} else {
+		if (BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+		    coex_dm->bt_status) {
+			low_pwr_disable = false;
+			btcoexist->btc_set(btcoexist,
+					   BTC_SET_ACT_DISABLE_LOW_POWER,
+					   &low_pwr_disable);
+
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Wifi connected + "
+				  "BT non connected-idle!!\n");
+
+			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+						  0xfffff, 0x0);
+			halbtc8723b2ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 0);
+			halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+						1);
+			halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+							 0xb);
+			halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+						   false);
+
+	      		halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+
+			bCommon = true;
+		} else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE ==
+			   coex_dm->bt_status) {
+			low_pwr_disable = true;
+			btcoexist->btc_set(btcoexist,
+					   BTC_SET_ACT_DISABLE_LOW_POWER,
+					   &low_pwr_disable);
+
+			if(bt_hs_on)
+				return false;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Wifi connected + "
+				  "BT connected-idle!!\n");
+
+			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+						  0xfffff, 0x0);
+			halbtc8723b2ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 0);
+			halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+						1);
+			halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+							 0xb);
+			halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+						   false);
+
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+
+			bCommon = true;
+		} else {
+			low_pwr_disable = true;
+			btcoexist->btc_set(btcoexist,
+					   BTC_SET_ACT_DISABLE_LOW_POWER,
+					   &low_pwr_disable);
+
+			if (wifi_busy) {
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], Wifi Connected-Busy + "
+					  "BT Busy!!\n");
+				bCommon = false;
+			} else {
+				if(bt_hs_on)
+					return false;
+
+				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+					  "[BTCoex], Wifi Connected-Idle + "
+					  "BT Busy!!\n");
+
+				btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
+							  0x1, 0xfffff, 0x0);
+				halbtc8723b2ant_coex_table_with_type(btcoexist,
+								    NORMAL_EXEC,
+								    7);
+				halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+							true, 21);
+				halbtc8723b2ant_fw_dac_swing_lvl(btcoexist,
+								 NORMAL_EXEC,
+								 0xb);
+				if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+					halbtc8723b2ant_dec_bt_pwr(btcoexist,
+								   NORMAL_EXEC,
+								   true);
+				else
+					halbtc8723b2ant_dec_bt_pwr(btcoexist,
+								   NORMAL_EXEC,
+								   false);
+				halbtc8723b2ant_sw_mechanism1(btcoexist, false,
+							      false, false,
+							      false);
+				halbtc8723b2ant_sw_mechanism2(btcoexist, false,
+							      false, false,
+							      0x18);
+				bCommon = true;
+			}
+		}
+	}
+
+	return bCommon;
+}
+void halbtc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+					  bool sco_hid, bool tx_pause,
+					  u8 max_interval)
+{
+	static s32 up, dn, m, n, wait_count;
+	/*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
+	s32 result;
+	u8 retryCount=0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+		  "[BTCoex], TdmaDurationAdjust()\n");
+
+	if (!coex_dm->auto_tdma_adjust) {
+		coex_dm->auto_tdma_adjust = true;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], first run TdmaDurationAdjust()!!\n");
+		if (sco_hid) {
+			if (tx_pause) {
+				if (max_interval == 1) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 13);
+					coex_dm->ps_tdma_du_adj_type = 13;
+				}else if (max_interval == 2) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+					coex_dm->ps_tdma_du_adj_type = 14;
+				} else if (max_interval == 3) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				}
+			} else {
+				if(max_interval == 1) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 9);
+					coex_dm->ps_tdma_du_adj_type = 9;
+				} else if (max_interval == 2) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+					coex_dm->ps_tdma_du_adj_type = 10;
+				} else if (max_interval == 3) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				}
+			}
+		} else {
+			if (tx_pause) {
+				if (max_interval == 1) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 5);
+					coex_dm->ps_tdma_du_adj_type = 5;
+				} else if (max_interval == 2) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+					coex_dm->ps_tdma_du_adj_type = 6;
+				} else if (max_interval == 3) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				}
+			} else {
+				if (max_interval == 1) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 1);
+					coex_dm->ps_tdma_du_adj_type = 1;
+				} else if (max_interval == 2) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+					coex_dm->ps_tdma_du_adj_type = 2;
+				} else if (max_interval == 3) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				}
+			}
+		}
+
+		up = 0;
+		dn = 0;
+		m = 1;
+		n= 3;
+		result = 0;
+		wait_count = 0;
+	} else {
+		/*accquire the BT TRx retry count from BT_Info byte2*/
+		retryCount = coex_sta->bt_retry_cnt;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], retryCount = %d\n", retryCount);
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+			  up, dn, m, n, wait_count);
+		result = 0;
+		wait_count++;
+		 /* no retry in the last 2-second duration*/
+		if (retryCount == 0) {
+			up++;
+			dn--;
+
+			if (dn <= 0)
+				dn = 0;
+
+			if (up >= n) {
+				wait_count = 0;
+				n = 3;
+				up = 0;
+				dn = 0;
+				result = 1;
+ 				BTC_PRINT(BTC_MSG_ALGORITHM,
+ 					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], Increase wifi "
+					  "duration!!\n");
+			}/* <=3 retry in the last 2-second duration*/
+		} else if (retryCount <= 3) {
+			up--;
+			dn++;
+
+			if (up <= 0)
+				up = 0;
+
+			if (dn == 2) {
+				if (wait_count <= 2)
+					m++;
+				else
+					m = 1;
+
+				if (m >= 20)
+					m = 20;
+
+				n = 3 * m;
+				up = 0;
+				dn = 0;
+				wait_count = 0;
+				result = -1;
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], Decrease wifi duration "
+					  "for retryCounter<3!!\n");
+			}
+		} else {
+			if (wait_count == 1)
+				m++;
+			else
+				m = 1;
+
+			if (m >= 20)
+				m = 20;
+
+			n = 3 * m;
+			up = 0;
+			dn = 0;
+			wait_count = 0;
+			result = -1;
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+				  "[BTCoex], Decrease wifi duration "
+				  "for retryCounter>3!!\n");
+		}
+
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], max Interval = %d\n", max_interval);
+		if (max_interval == 1) {
+			if (tx_pause) {
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 1\n");
+
+				if (coex_dm->cur_ps_tdma == 71) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 5);
+					coex_dm->ps_tdma_du_adj_type = 5;
+				} else if (coex_dm->cur_ps_tdma == 1) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 5);
+					coex_dm->ps_tdma_du_adj_type = 5;
+				} else if (coex_dm->cur_ps_tdma == 2) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+					coex_dm->ps_tdma_du_adj_type = 6;
+				} else if (coex_dm->cur_ps_tdma == 3) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else if (coex_dm->cur_ps_tdma == 4) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 8);
+					coex_dm->ps_tdma_du_adj_type = 8;
+				}
+
+				if (coex_dm->cur_ps_tdma == 9) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 13);
+					coex_dm->ps_tdma_du_adj_type = 13;
+				} else if (coex_dm->cur_ps_tdma == 10) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+					coex_dm->ps_tdma_du_adj_type = 14;
+				} else if (coex_dm->cur_ps_tdma == 11) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else if (coex_dm->cur_ps_tdma == 12) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 16);
+					coex_dm->ps_tdma_du_adj_type = 16;
+				}
+
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 5) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+						coex_dm->ps_tdma_du_adj_type =
+									      6;
+					} else if (coex_dm->cur_ps_tdma == 6) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									      7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 8);
+						coex_dm->ps_tdma_du_adj_type =
+									      8;
+					} else if (coex_dm->cur_ps_tdma == 13) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+						coex_dm->ps_tdma_du_adj_type =
+									     14;
+					} else if(coex_dm->cur_ps_tdma == 14) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if(coex_dm->cur_ps_tdma == 15) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 16);
+						coex_dm->ps_tdma_du_adj_type =
+									     16;
+					}
+				}  else if (result == 1) {
+					if (coex_dm->cur_ps_tdma == 8) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									      7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+						coex_dm->ps_tdma_du_adj_type =
+									      6;
+					} else if(coex_dm->cur_ps_tdma == 6) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 5);
+						coex_dm->ps_tdma_du_adj_type =
+									      5;
+					} else if(coex_dm->cur_ps_tdma == 16) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if(coex_dm->cur_ps_tdma == 15) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+						coex_dm->ps_tdma_du_adj_type =
+									     14;
+					} else if(coex_dm->cur_ps_tdma == 14) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 13);
+						coex_dm->ps_tdma_du_adj_type =
+									     13;
+					}
+				}
+			} else {
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 0\n");
+				if (coex_dm->cur_ps_tdma == 5) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 71);
+					coex_dm->ps_tdma_du_adj_type = 71;
+				} else if (coex_dm->cur_ps_tdma == 6) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+					coex_dm->ps_tdma_du_adj_type = 2;
+				} else if (coex_dm->cur_ps_tdma == 7) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else if (coex_dm->cur_ps_tdma == 8) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 4);
+					coex_dm->ps_tdma_du_adj_type = 4;
+				}
+
+				if (coex_dm->cur_ps_tdma == 13) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 9);
+					coex_dm->ps_tdma_du_adj_type = 9;
+				} else if (coex_dm->cur_ps_tdma == 14) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+					coex_dm->ps_tdma_du_adj_type = 10;
+				} else if (coex_dm->cur_ps_tdma == 15) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else if(coex_dm->cur_ps_tdma == 16) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 12);
+					coex_dm->ps_tdma_du_adj_type = 12;
+				}
+
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 71) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 1);
+						coex_dm->ps_tdma_du_adj_type =
+									      1;
+					} else if (coex_dm->cur_ps_tdma == 1) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+						coex_dm->ps_tdma_du_adj_type =
+									      2;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									      3;
+					} else if(coex_dm->cur_ps_tdma == 3) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 4);
+						coex_dm->ps_tdma_du_adj_type =
+									      4;
+					} else if(coex_dm->cur_ps_tdma == 9) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+						coex_dm->ps_tdma_du_adj_type =
+									     10;
+ 					} else if (coex_dm->cur_ps_tdma == 10) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 11) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 12);
+						coex_dm->ps_tdma_du_adj_type =
+									     12;
+					}
+				}  else if (result == 1) {
+					if (coex_dm->cur_ps_tdma == 4) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									      3;
+					} else if (coex_dm->cur_ps_tdma == 3) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+						coex_dm->ps_tdma_du_adj_type =
+									      2;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 1);
+						coex_dm->ps_tdma_du_adj_type =
+									      1;
+					} else if (coex_dm->cur_ps_tdma == 1) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 71);
+						coex_dm->ps_tdma_du_adj_type =
+									     71;
+					} else if (coex_dm->cur_ps_tdma == 12) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 11) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+						coex_dm->ps_tdma_du_adj_type =
+									     10;
+					} else if (coex_dm->cur_ps_tdma == 10) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 9);
+						coex_dm->ps_tdma_du_adj_type =
+									      9;
+					}
+				}
+			}
+		} else if(max_interval == 2) {
+			if (tx_pause) {
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 1\n");
+				if (coex_dm->cur_ps_tdma == 1) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+					coex_dm->ps_tdma_du_adj_type = 6;
+				} else if (coex_dm->cur_ps_tdma == 2) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+					coex_dm->ps_tdma_du_adj_type = 6;
+				} else if (coex_dm->cur_ps_tdma == 3) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else if (coex_dm->cur_ps_tdma == 4) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 8);
+					coex_dm->ps_tdma_du_adj_type = 8;
+				}
+				if (coex_dm->cur_ps_tdma == 9) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+					coex_dm->ps_tdma_du_adj_type = 14;
+				} else if (coex_dm->cur_ps_tdma == 10) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+					coex_dm->ps_tdma_du_adj_type = 14;
+				} else if (coex_dm->cur_ps_tdma == 11) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else if (coex_dm->cur_ps_tdma == 12) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 16);
+					coex_dm->ps_tdma_du_adj_type = 16;
+				}
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 5) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+						coex_dm->ps_tdma_du_adj_type =
+									      6;
+					} else if (coex_dm->cur_ps_tdma == 6) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									      7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 8);
+						coex_dm->ps_tdma_du_adj_type =
+									      8;
+					} else if (coex_dm->cur_ps_tdma == 13) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+						coex_dm->ps_tdma_du_adj_type =
+									     14;
+					} else if (coex_dm->cur_ps_tdma == 14) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 15) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 16);
+						coex_dm->ps_tdma_du_adj_type =
+									     16;
+					}
+				}  else if (result == 1) {
+					if (coex_dm->cur_ps_tdma == 8) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									      7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+						coex_dm->ps_tdma_du_adj_type =
+									      6;
+					} else if (coex_dm->cur_ps_tdma == 6) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 6);
+						coex_dm->ps_tdma_du_adj_type =
+									      6;
+					} else if (coex_dm->cur_ps_tdma == 16) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 15) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+						coex_dm->ps_tdma_du_adj_type =
+									     14;
+					} else if (coex_dm->cur_ps_tdma == 14) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 14);
+						coex_dm->ps_tdma_du_adj_type =
+									     14;
+					}
+				}
+			} else {
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 0\n");
+				if (coex_dm->cur_ps_tdma == 5) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+					coex_dm->ps_tdma_du_adj_type = 2;
+				} else if (coex_dm->cur_ps_tdma == 6) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+					coex_dm->ps_tdma_du_adj_type = 2;
+				} else if (coex_dm->cur_ps_tdma == 7) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else if (coex_dm->cur_ps_tdma == 8) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 4);
+					coex_dm->ps_tdma_du_adj_type = 4;
+				}
+				if (coex_dm->cur_ps_tdma == 13) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+					coex_dm->ps_tdma_du_adj_type = 10;
+				} else if (coex_dm->cur_ps_tdma == 14){
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+					coex_dm->ps_tdma_du_adj_type = 10;
+				} else if (coex_dm->cur_ps_tdma == 15) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else if (coex_dm->cur_ps_tdma == 16) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 12);
+					coex_dm->ps_tdma_du_adj_type = 12;
+				}
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 1) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+						coex_dm->ps_tdma_du_adj_type =
+									      2;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									      3;
+					} else if (coex_dm->cur_ps_tdma == 3) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 4);
+						coex_dm->ps_tdma_du_adj_type =
+									      4;
+					} else if (coex_dm->cur_ps_tdma == 9) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+						coex_dm->ps_tdma_du_adj_type =
+									     10;
+					} else if (coex_dm->cur_ps_tdma == 10) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 11) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 12);
+						coex_dm->ps_tdma_du_adj_type =
+									     12;
+					}
+				} else if (result == 1) {
+					if (coex_dm->cur_ps_tdma == 4) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									      3;
+					} else if (coex_dm->cur_ps_tdma == 3) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+						coex_dm->ps_tdma_du_adj_type =
+									      2;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 2);
+						coex_dm->ps_tdma_du_adj_type =
+									      2;
+					} else if (coex_dm->cur_ps_tdma == 12) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 11) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+						coex_dm->ps_tdma_du_adj_type =
+									     10;
+					} else if (coex_dm->cur_ps_tdma == 10) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 10);
+						coex_dm->ps_tdma_du_adj_type =
+									     10;
+					}
+				}
+			}
+		} else if (max_interval == 3) {
+			if (tx_pause) {
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 1\n");
+				if (coex_dm->cur_ps_tdma == 1) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else if (coex_dm->cur_ps_tdma == 2) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else if (coex_dm->cur_ps_tdma == 3) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+					coex_dm->ps_tdma_du_adj_type = 7;
+				} else if (coex_dm->cur_ps_tdma == 4) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 8);
+					coex_dm->ps_tdma_du_adj_type = 8;
+				}
+				if (coex_dm->cur_ps_tdma == 9) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else if (coex_dm->cur_ps_tdma == 10) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else if (coex_dm->cur_ps_tdma == 11) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+					coex_dm->ps_tdma_du_adj_type = 15;
+				} else if (coex_dm->cur_ps_tdma == 12) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 16);
+					coex_dm->ps_tdma_du_adj_type = 16;
+				}
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 5) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									      7;
+					} else if (coex_dm->cur_ps_tdma == 6) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									      7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 8);
+						coex_dm->ps_tdma_du_adj_type =
+									      8;
+					} else if (coex_dm->cur_ps_tdma == 13) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 14) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 15) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 16);
+						coex_dm->ps_tdma_du_adj_type =
+									     16;
+					}
+				}  else if (result == 1) {
+					if (coex_dm->cur_ps_tdma == 8) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									      7;
+					} else if (coex_dm->cur_ps_tdma == 7) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									      7;
+					} else if (coex_dm->cur_ps_tdma == 6) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 7);
+						coex_dm->ps_tdma_du_adj_type =
+									      7;
+					} else if (coex_dm->cur_ps_tdma == 16) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 15) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					} else if (coex_dm->cur_ps_tdma == 14) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 15);
+						coex_dm->ps_tdma_du_adj_type =
+									     15;
+					}
+				}
+			} else {
+				BTC_PRINT(BTC_MSG_ALGORITHM,
+					  ALGO_TRACE_FW_DETAIL,
+					  "[BTCoex], TxPause = 0\n");
+				if (coex_dm->cur_ps_tdma == 5) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else if (coex_dm->cur_ps_tdma == 6) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else if (coex_dm->cur_ps_tdma == 7) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+					coex_dm->ps_tdma_du_adj_type = 3;
+				} else if (coex_dm->cur_ps_tdma == 8) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 4);
+					coex_dm->ps_tdma_du_adj_type = 4;
+				}
+				if (coex_dm->cur_ps_tdma == 13) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else if (coex_dm->cur_ps_tdma == 14) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else if (coex_dm->cur_ps_tdma == 15) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+					coex_dm->ps_tdma_du_adj_type = 11;
+				} else if (coex_dm->cur_ps_tdma == 16) {
+					halbtc8723b2ant_ps_tdma(btcoexist,
+								NORMAL_EXEC,
+								true, 12);
+					coex_dm->ps_tdma_du_adj_type = 12;
+				}
+				if (result == -1) {
+					if (coex_dm->cur_ps_tdma == 1) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									      3;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									      3;
+					} else if (coex_dm->cur_ps_tdma == 3) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 4);
+						coex_dm->ps_tdma_du_adj_type =
+									      4;
+					} else if (coex_dm->cur_ps_tdma == 9) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 10) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 11) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 12);
+						coex_dm->ps_tdma_du_adj_type =
+									     12;
+					}
+				} else if (result == 1) {
+					if (coex_dm->cur_ps_tdma == 4) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									      3;
+					} else if (coex_dm->cur_ps_tdma == 3) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									      3;
+					} else if (coex_dm->cur_ps_tdma == 2) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 3);
+						coex_dm->ps_tdma_du_adj_type =
+									      3;
+					} else if (coex_dm->cur_ps_tdma == 12) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 11) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					} else if (coex_dm->cur_ps_tdma == 10) {
+						halbtc8723b2ant_ps_tdma(
+								btcoexist,
+								NORMAL_EXEC,
+								true, 11);
+						coex_dm->ps_tdma_du_adj_type =
+									     11;
+					}
+				}
+			}
+		}
+	}
+
+	/*if current PsTdma not match with the recorded one (when scan, dhcp..),
+	 *then we have to adjust it back to the previous record one.*/
+	if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
+		bool scan = false, link = false, roam = false;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+			  "[BTCoex], PsTdma type dismatch!!!, "
+			  "curPsTdma=%d, recordPsTdma=%d\n",
+			  coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
+
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+		if (!scan && !link && !roam)
+			halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+						coex_dm->ps_tdma_du_adj_type);
+		else
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+				  "[BTCoex], roaming/link/scan is under"
+				  " progress, will adjust next time!!!\n");
+	}
+}
+
+/* SCO only or SCO+PAN(HS) */
+void halbtc8723b2ant_action_sco(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+							  0, 2, 15, 0);
+
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+
+	if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+	else
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	/*for SCO quality at 11b/g mode*/
+	if (BTC_WIFI_BW_LEGACY == wifi_bw)
+		halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+	else  /*for SCO quality & wifi performance balance at 11n mode*/
+		halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
+
+	/*for voice quality */
+	halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+
+	/* sw mechanism */
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      true, 0x4);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      true, 0x4);
+		}
+	} else {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      true, 0x4);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      true, 0x4);
+		}
+	}
+}
+
+
+void halbtc8723b2ant_action_hid(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+							  0, 2, 15, 0);
+	bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+	else
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/
+		halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+	else  /*for HID quality & wifi performance balance at 11n mode*/
+		halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 9);
+
+	if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+	    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+		halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+	else
+		halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+
+	/* sw mechanism */
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+ 			halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+ 			halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/
+void halbtc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+	u32 wifi_bw;
+	u8 ap_num = 0;
+
+	wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+							  0, 2, 15, 0);
+	wifi_rssi_state1 = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+							   1, 2, 40, 0);
+	bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
+
+	/* define the office environment */
+	/* driver don't know AP num in Linux, so we will never enter this if */
+	if (ap_num >= 10 && BTC_RSSI_HIGH(wifi_rssi_state1)) {
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+					  0x0);
+		halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+		halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+		halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+		/* sw mechanism */
+		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+		if (BTC_WIFI_BW_HT40 == wifi_bw) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      true, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      true, 0x18);
+		}
+		return;
+	}
+
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+	else
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+	if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+	    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+		halbtc8723b2ant_tdma_duration_adjust(btcoexist,false, false, 1);
+	else
+		halbtc8723b2ant_tdma_duration_adjust(btcoexist,false, true, 1);
+
+	/* sw mechanism */
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ 			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+							  0, 2, 15, 0);
+
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+	else
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+	halbtc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2);
+
+	/* sw mechanism */
+	btcoexist->btc_get(btcoexist,
+		BTC_GET_U4_WIFI_BW, &wifi_bw);
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ 			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false,0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+							  0, 2, 15, 0);
+	bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+	else
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 10);
+
+	if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+	    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+		halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+	else
+		halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+
+	/* sw mechanism */
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+
+/*PAN(HS) only*/
+void halbtc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+							  0, 2, 15, 0);
+
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+	    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+	else
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+	halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+/*PAN(EDR)+A2DP*/
+void halbtc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+							  0, 2, 15, 0);
+	bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+	else
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+	    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		halbtc8723b2ant_coex_table_with_type(btcoexist,NORMAL_EXEC, 12);
+		if (BTC_WIFI_BW_HT40 == wifi_bw)
+			halbtc8723b2ant_tdma_duration_adjust(btcoexist, false,
+							     true, 3);
+		else
+			halbtc8723b2ant_tdma_duration_adjust(btcoexist, false,
+							     false, 3);
+	} else {
+		halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+		halbtc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+	}
+
+	/* sw mechanism	*/
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+							  0, 2, 15, 0);
+	bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+	else
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+	    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		if (BTC_WIFI_BW_HT40 == wifi_bw) {
+			halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+							 3);
+			halbtc8723b2ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 11);
+			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+						  0xfffff, 0x780);
+		} else {
+			halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+							 6);
+			halbtc8723b2ant_coex_table_with_type(btcoexist,
+							     NORMAL_EXEC, 7);
+			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+						  0xfffff, 0x0);
+		}
+		halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+	} else {
+		halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+		halbtc8723b2ant_coex_table_with_type(btcoexist,NORMAL_EXEC, 11);
+		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+					  0x0);
+		halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+	}
+
+	/* sw mechanism */
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+ 			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)){
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+/* HID+A2DP+PAN(EDR) */
+void halbtc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+							  0, 2, 15, 0);
+	bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+	else
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+	if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+	    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+		if (BTC_WIFI_BW_HT40 == wifi_bw)
+			halbtc8723b2ant_tdma_duration_adjust(btcoexist, true,
+							     true, 2);
+		else
+			halbtc8723b2ant_tdma_duration_adjust(btcoexist, true,
+							     false, 3);
+	} else {
+		halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+	}
+
+	/* sw mechanism */
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+	u8 wifi_rssi_state, bt_rssi_state;
+	u32 wifi_bw;
+
+	wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+							  0, 2, 15, 0);
+	bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+	halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+	if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+	else
+		halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+	halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+	if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+	    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+		halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+	else
+		halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+
+	/* sw mechanism */
+	if (BTC_WIFI_BW_HT40 == wifi_bw) {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	} else {
+		if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+		    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+						      false, 0x18);
+		} else {
+			halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+						      false, false);
+			halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+						      false, 0x18);
+		}
+	}
+}
+
+void halbtc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+	u8 algorithm = 0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+		  "[BTCoex], RunCoexistMechanism()===>\n");
+
+	if (btcoexist->manual_control) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], RunCoexistMechanism(), "
+			  "return for Manual CTRL <===\n");
+		return;
+	}
+
+	if (coex_sta->under_ips) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], wifi is under IPS !!!\n");
+		return;
+	}
+
+	algorithm = halbtc8723b2ant_action_algorithm(btcoexist);
+	if (coex_sta->c2h_bt_inquiry_page &&
+	    (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BT is under inquiry/page scan !!\n");
+		halbtc8723b2ant_action_bt_inquiry(btcoexist);
+		return;
+	} else {
+		if (coex_dm->need_recover_0x948) {
+			coex_dm->need_recover_0x948 = false;
+			btcoexist->btc_write_2byte(btcoexist, 0x948,
+						   coex_dm->backup_0x948);
+		}
+	}
+
+	coex_dm->cur_algorithm = algorithm;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d \n",
+		  coex_dm->cur_algorithm);
+
+	if (halbtc8723b2ant_is_common_action(btcoexist)) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], Action 2-Ant common.\n");
+		coex_dm->auto_tdma_adjust = false;
+	} else {
+		if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], preAlgorithm=%d, "
+				  "curAlgorithm=%d\n", coex_dm->pre_algorithm,
+				  coex_dm->cur_algorithm);
+			coex_dm->auto_tdma_adjust = false;
+		}
+		switch (coex_dm->cur_algorithm) {
+		case BT_8723B_2ANT_COEX_ALGO_SCO:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+			halbtc8723b2ant_action_sco(btcoexist);
+			break;
+		case BT_8723B_2ANT_COEX_ALGO_HID:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+			halbtc8723b2ant_action_hid(btcoexist);
+			break;
+		case BT_8723B_2ANT_COEX_ALGO_A2DP:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action 2-Ant, "
+				  "algorithm = A2DP.\n");
+			halbtc8723b2ant_action_a2dp(btcoexist);
+			break;
+		case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action 2-Ant, "
+				  "algorithm = A2DP+PAN(HS).\n");
+			halbtc8723b2ant_action_a2dp_pan_hs(btcoexist);
+			break;
+		case BT_8723B_2ANT_COEX_ALGO_PANEDR:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action 2-Ant, "
+				  "algorithm = PAN(EDR).\n");
+			halbtc8723b2ant_action_pan_edr(btcoexist);
+			break;
+		case BT_8723B_2ANT_COEX_ALGO_PANHS:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action 2-Ant, "
+				  "algorithm = HS mode.\n");
+			halbtc8723b2ant_action_pan_hs(btcoexist);
+				break;
+		case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action 2-Ant, "
+				  "algorithm = PAN+A2DP.\n");
+			halbtc8723b2ant_action_pan_edr_a2dp(btcoexist);
+			break;
+		case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action 2-Ant, "
+				  "algorithm = PAN(EDR)+HID.\n");
+			halbtc8723b2ant_action_pan_edr_hid(btcoexist);
+			break;
+		case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action 2-Ant, "
+				  "algorithm = HID+A2DP+PAN.\n");
+			halbtc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
+			break;
+		case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action 2-Ant, "
+				  "algorithm = HID+A2DP.\n");
+			halbtc8723b2ant_action_hid_a2dp(btcoexist);
+			break;
+		default:
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], Action 2-Ant, "
+				  "algorithm = coexist All Off!!\n");
+			halbtc8723b2ant_coex_alloff(btcoexist);
+			break;
+		}
+		coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+	}
+}
+
+void halbtc8723b2ant_wifioff_hwcfg(struct btc_coexist *btcoexist)
+{
+	/* set wlan_act to low */
+	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+	/* Force GNT_BT to High */
+	btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3);
+	/* BT select s0/s1 is controlled by BT */
+	btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0);
+}
+
+/*********************************************************************
+ *  work around function start with wa_halbtc8723b2ant_
+ *********************************************************************/
+/*********************************************************************
+ *  extern function start with EXhalbtc8723b2ant_
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+	u8 u8tmp = 0;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+		  "[BTCoex], 2Ant Init HW Config!!\n");
+	coex_dm->bt_rf0x1e_backup =
+		btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+	/* 0x790[5:0]=0x5 */
+	u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+	u8tmp &= 0xc0;
+	u8tmp |= 0x5;
+	btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+
+	/*Antenna config	*/
+	halbtc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN,
+				     true, false);
+
+
+
+
+	/* PTA parameter */
+	halbtc8723b2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+	/* Enable counter statistics */
+	/*0x76e[3] =1, WLAN_Act control by PTA*/
+	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+	btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
+	btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
+}
+
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+		  "[BTCoex], Coex Mechanism Init!!\n");
+	halbtc8723b2ant_init_coex_dm(btcoexist);
+}
+
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+	struct btc_board_info *board_info = &btcoexist->board_info;
+	struct btc_stack_info *stack_info = &btcoexist->stack_info;
+	struct btc_bt_link_info* bt_link_info = &btcoexist->bt_link_info;
+	u8 *cli_buf = btcoexist->cli_buf;
+	u8 u8tmp[4], i, bt_info_ext, ps_tdma_case=0;
+	u32 u32tmp[4];
+	bool roam = false, scan = false;
+	bool link = false, wifi_under_5g = false;
+	bool bt_hs_on = false, wifi_busy = false;
+	s32 wifi_rssi = 0, bt_hs_rssi = 0;
+	u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck;
+	u8 wifi_dot11_chnl, wifi_hs_chnl;
+	u32 fw_ver = 0, bt_patch_ver = 0;
+	u8 ap_num = 0;
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n ============[BT Coexist info]============");
+	CL_PRINTF(cli_buf);
+
+	if (btcoexist->manual_control) {
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+			   "\r\n ==========[Under Manual Control]============");
+		CL_PRINTF(cli_buf);
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+			   "\r\n ==========================================");
+		CL_PRINTF(cli_buf);
+	}
+
+	if (!board_info->bt_exist) {
+		CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+		CL_PRINTF(cli_buf);
+		return;
+	}
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+		   "Ant PG number/ Ant mechanism:",
+		   board_info->pg_ant_num, board_info->btdm_ant_num);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+		   "BT stack/ hci ext ver",
+		   ((stack_info->profile_notified)? "Yes":"No"),
+		   stack_info->hci_version);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+		   "CoexVer/ fw_ver/ PatchVer",
+		   glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+		   fw_ver, bt_patch_ver, bt_patch_ver);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+			   &wifi_dot11_chnl);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+		   "Dot11 channel / HsChnl(HsMode)",
+		   wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+		   "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0],
+		   coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+	btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+	btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d",
+		   "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+		   "Wifi link/ roam/ scan", link, roam, scan);
+	CL_PRINTF(cli_buf);
+
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+	btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+			   &wifi_traffic_dir);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+		   "Wifi status", (wifi_under_5g? "5G":"2.4G"),
+		   ((BTC_WIFI_BW_LEGACY == wifi_bw)? "Legacy":
+		   (((BTC_WIFI_BW_HT40 == wifi_bw)? "HT40":"HT20"))),
+		   ((!wifi_busy)? "idle":
+		   ((BTC_WIFI_TRAFFIC_TX ==wifi_traffic_dir)?\
+		   "uplink":"downlink")));
+	CL_PRINTF(cli_buf);
+
+
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+		   "SCO/HID/PAN/A2DP",
+		   bt_link_info->sco_exist, bt_link_info->hid_exist,
+		   bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+	CL_PRINTF(cli_buf);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+	bt_info_ext = coex_sta->bt_info_ext;
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+		   "BT Info A2DP rate",
+		   (bt_info_ext&BIT0)? "Basic rate":"EDR rate");
+	CL_PRINTF(cli_buf);
+
+	for (i=0; i<BT_INFO_SRC_8723B_2ANT_MAX; i++) {
+		if (coex_sta->bt_info_c2h_cnt[i]) {
+			CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+				   "\r\n %-35s = %02x %02x %02x "
+				   "%02x %02x %02x %02x(%d)",
+				   glbt_info_src_8723b_2ant[i], \
+				   coex_sta->bt_info_c2h[i][0],
+				   coex_sta->bt_info_c2h[i][1],
+				   coex_sta->bt_info_c2h[i][2],
+				   coex_sta->bt_info_c2h[i][3],
+				   coex_sta->bt_info_c2h[i][4],
+				   coex_sta->bt_info_c2h[i][5],
+				   coex_sta->bt_info_c2h[i][6],
+				   coex_sta->bt_info_c2h_cnt[i]);
+			CL_PRINTF(cli_buf);
+		}
+	}
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s",
+		   "PS state, IPS/LPS",
+		   ((coex_sta->under_ips? "IPS ON":"IPS OFF")),
+		   ((coex_sta->under_lps? "LPS ON":"LPS OFF")));
+	CL_PRINTF(cli_buf);
+	btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+	/* Sw mechanism	*/
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n %-35s", "============[Sw mechanism]============");
+	CL_PRINTF(cli_buf);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ 		   "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+		   coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+	CL_PRINTF(cli_buf);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+		   "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+		   coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+		   coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+	CL_PRINTF(cli_buf);
+
+	/* Fw mechanism	*/
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+		   "============[Fw mechanism]============");
+	CL_PRINTF(cli_buf);
+
+	ps_tdma_case = coex_dm->cur_ps_tdma;
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)",
+		   "PS TDMA", coex_dm->ps_tdma_para[0],
+		   coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+		   coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+		   ps_tdma_case, coex_dm->auto_tdma_adjust);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+		   "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+		   coex_dm->cur_ignore_wlan_act);
+	CL_PRINTF(cli_buf);
+
+	/* Hw setting */
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+		   "============[Hw setting]============");
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+		   "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+	CL_PRINTF(cli_buf);
+
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+		   "0x778/0x880[29:25]", u8tmp[0],
+		   (u32tmp[0]&0x3e000000) >> 25);
+	CL_PRINTF(cli_buf);
+
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
+	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x948/ 0x67[5] / 0x765",
+		   u32tmp[0], ((u8tmp[0]&0x20)>> 5), u8tmp[1]);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
+	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+		   u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3);
+	CL_PRINTF(cli_buf);
+
+
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
+	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+	u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+		   "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+		   ((u8tmp[0] & 0x8)>>3), u8tmp[1],
+		   ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+		   "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+		   "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
+	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
+	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8);
+	u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0);
+
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
+	u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
+
+	fa_ofdm = ((u32tmp[0]&0xffff0000) >> 16) +
+		  ((u32tmp[1]&0xffff0000) >> 16) +
+		   (u32tmp[1] & 0xffff) +
+		   (u32tmp[2] & 0xffff) +
+		  ((u32tmp[3]&0xffff0000) >> 16) +
+		   (u32tmp[3] & 0xffff) ;
+	fa_cck = (u8tmp[0] << 8) + u8tmp[1];
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+		   "OFDM-CCA/OFDM-FA/CCK-FA", \
+		   u32tmp[0]&0xffff, fa_ofdm, fa_cck);
+	CL_PRINTF(cli_buf);
+
+	u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+	u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+	u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+	u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+		   "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+		   "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+		   u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+	CL_PRINTF(cli_buf);
+
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+		   "0x770(high-pri rx/tx)",
+		   coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+	CL_PRINTF(cli_buf);
+	CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+		   "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+		   coex_sta->low_priority_tx);
+	CL_PRINTF(cli_buf);
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 1)
+	halbtc8723b2ant_monitor_bt_ctr(btcoexist);
+#endif
+	btcoexist->btc_disp_dbg_msg(btcoexist,
+	BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	if (BTC_IPS_ENTER == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], IPS ENTER notify\n");
+		coex_sta->under_ips = true;
+		halbtc8723b2ant_wifioff_hwcfg(btcoexist);
+		halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+		halbtc8723b2ant_coex_alloff(btcoexist);
+	} else if (BTC_IPS_LEAVE == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], IPS LEAVE notify\n");
+		coex_sta->under_ips = false;
+		ex_halbtc8723b2ant_init_hwconfig(btcoexist);
+		halbtc8723b2ant_init_coex_dm(btcoexist);
+		halbtc8723b2ant_query_bt_info(btcoexist);
+	}
+}
+
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	if (BTC_LPS_ENABLE == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], LPS ENABLE notify\n");
+		coex_sta->under_lps = true;
+	} else if (BTC_LPS_DISABLE == type) {
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], LPS DISABLE notify\n");
+		coex_sta->under_lps = false;
+	}
+}
+
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	if (BTC_SCAN_START == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], SCAN START notify\n");
+	else if (BTC_SCAN_FINISH == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], SCAN FINISH notify\n");
+}
+
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	if (BTC_ASSOCIATE_START == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], CONNECT START notify\n");
+	else if (BTC_ASSOCIATE_FINISH == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], CONNECT FINISH notify\n");
+}
+
+void ex_halbtc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
+					    u8 type)
+{
+	u8 h2c_parameter[3] ={0};
+	u32 wifi_bw;
+	u8 wifi_central_chnl;
+
+	if (BTC_MEDIA_CONNECT == type)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], MEDIA connect notify\n");
+	else
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], MEDIA disconnect notify\n");
+
+	/* only 2.4G we need to inform bt the chnl mask */
+	btcoexist->btc_get(btcoexist,
+		BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl);
+	if ((BTC_MEDIA_CONNECT == type) &&
+	    (wifi_central_chnl <= 14)) {
+		h2c_parameter[0] = 0x1;
+		h2c_parameter[1] = wifi_central_chnl;
+		btcoexist->btc_get(btcoexist,
+			BTC_GET_U4_WIFI_BW, &wifi_bw);
+		if (BTC_WIFI_BW_HT40 == wifi_bw)
+			h2c_parameter[2] = 0x30;
+		else
+			h2c_parameter[2] = 0x20;
+	}
+
+	coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+	coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+	coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+		  "[BTCoex], FW write 0x66=0x%x\n",
+		  h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+		  h2c_parameter[2]);
+
+	btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+					      u8 type)
+{
+	if (type == BTC_PACKET_DHCP)
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+			  "[BTCoex], DHCP Packet notify\n");
+}
+
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+				       u8 *tmpbuf, u8 length)
+{
+	u8 btInfo = 0;
+	u8 i, rsp_source = 0;
+	bool bt_busy = false, limited_dig = false;
+	bool wifi_connected = false;
+
+	coex_sta->c2h_bt_info_req_sent = false;
+
+	rsp_source = tmpbuf[0]&0xf;
+	if(rsp_source >= BT_INFO_SRC_8723B_2ANT_MAX)
+		rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
+	coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+		  "[BTCoex], Bt info[%d], length=%d, hex data=[",
+		  rsp_source, length);
+	for (i = 0; i < length; i++) {
+		coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
+		if (i == 1)
+			btInfo = tmpbuf[i];
+		if (i == length-1)
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+				  "0x%02x]\n", tmpbuf[i]);
+		else
+			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+				  "0x%02x, ", tmpbuf[i]);
+	}
+
+	if (btcoexist->manual_control) {
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BtInfoNotify(), "
+			  "return for Manual CTRL<===\n");
+		return;
+	}
+
+	if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) {
+		coex_sta->bt_retry_cnt =	/* [3:0]*/
+			coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
+
+		coex_sta->bt_rssi =
+			coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
+
+		coex_sta->bt_info_ext =
+			coex_sta->bt_info_c2h[rsp_source][4];
+
+		/* Here we need to resend some wifi info to BT
+		     because bt is reset and loss of the info.*/
+		if ((coex_sta->bt_info_ext & BIT1)) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], BT ext info bit1 check,"
+				  " send wifi BW&Chnl to BT!!\n");
+			btcoexist->btc_get(btcoexist,BTC_GET_BL_WIFI_CONNECTED,
+					   &wifi_connected);
+			if (wifi_connected)
+				ex_halbtc8723b2ant_media_status_notify(
+							btcoexist,
+							BTC_MEDIA_CONNECT);
+			else
+				ex_halbtc8723b2ant_media_status_notify(
+							btcoexist,
+							BTC_MEDIA_DISCONNECT);
+		}
+
+		if ((coex_sta->bt_info_ext & BIT3)) {
+			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+				  "[BTCoex], BT ext info bit3 check, "
+				  "set BT NOT to ignore Wlan active!!\n");
+			halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
+							false);
+		} else {
+			/* BT already NOT ignore Wlan active, do nothing here.*/
+		}
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+		if ((coex_sta->bt_info_ext & BIT4)) {
+			/* BT auto report already enabled, do nothing*/
+		} else {
+			halbtc8723b2ant_bt_auto_report(btcoexist, FORCE_EXEC,
+						       true);
+		}
+#endif
+	}
+
+	/* check BIT2 first ==> check if bt is under inquiry or page scan*/
+	if (btInfo & BT_INFO_8723B_2ANT_B_INQ_PAGE)
+		coex_sta->c2h_bt_inquiry_page = true;
+	else
+		coex_sta->c2h_bt_inquiry_page = false;
+
+	/* set link exist status*/
+	if (!(btInfo & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+		coex_sta->bt_link_exist = false;
+		coex_sta->pan_exist = false;
+		coex_sta->a2dp_exist = false;
+		coex_sta->hid_exist = false;
+		coex_sta->sco_exist = false;
+	} else {// connection exists
+		coex_sta->bt_link_exist = true;
+		if (btInfo & BT_INFO_8723B_2ANT_B_FTP)
+			coex_sta->pan_exist = true;
+		else
+			coex_sta->pan_exist = false;
+		if (btInfo & BT_INFO_8723B_2ANT_B_A2DP)
+			coex_sta->a2dp_exist = true;
+		else
+			coex_sta->a2dp_exist = false;
+		if (btInfo & BT_INFO_8723B_2ANT_B_HID)
+			coex_sta->hid_exist = true;
+		else
+			coex_sta->hid_exist = false;
+		if (btInfo & BT_INFO_8723B_2ANT_B_SCO_ESCO)
+			coex_sta->sco_exist = true;
+		else
+			coex_sta->sco_exist = false;
+	}
+
+	halbtc8723b2ant_update_bt_link_info(btcoexist);
+
+	if (!(btInfo & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+		coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BtInfoNotify(), "
+			  "BT Non-Connected idle!!!\n");
+	/* connection exists but no busy */
+	} else if (btInfo == BT_INFO_8723B_2ANT_B_CONNECTION) {
+		coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+	} else if ((btInfo & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
+		   (btInfo & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
+		coex_dm->bt_status =
+			BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+	} else if (btInfo&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
+		coex_dm->bt_status =
+			BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+	} else {
+		coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
+		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+			  "[BTCoex], BtInfoNotify(), "
+			  "BT Non-Defined state!!!\n");
+	}
+
+	if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+	    (BT_8723B_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+	    (BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+		bt_busy = true;
+		limited_dig = true;
+	} else {
+		bt_busy = false;
+		limited_dig = false;
+	}
+
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+	coex_dm->limited_dig = limited_dig;
+	btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+	halbtc8723b2ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
+{
+	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+	halbtc8723b2ant_wifioff_hwcfg(btcoexist);
+	halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+	ex_halbtc8723b2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist)
+{
+	struct btc_board_info *board_info = &btcoexist->board_info;
+	struct btc_stack_info *stack_info = &btcoexist->stack_info;
+	static u8 dis_ver_info_cnt = 0;
+	u32 fw_ver = 0, bt_patch_ver = 0;
+
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+		  "[BTCoex], =========================="
+		  "Periodical===========================\n");
+
+	if (dis_ver_info_cnt <= 5) {
+		dis_ver_info_cnt += 1;
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "[BTCoex], ****************************"
+			  "************************************\n");
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "[BTCoex], Ant PG Num/ Ant Mech/ "
+			  "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num,
+			  board_info->btdm_ant_num, board_info->btdm_ant_pos);
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+			  ((stack_info->profile_notified)? "Yes":"No"),
+			  stack_info->hci_version);
+		btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+				   &bt_patch_ver);
+		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "[BTCoex], CoexVer/ fw_ver/ PatchVer = "
+			  "%d_%x/ 0x%x/ 0x%x(%d)\n",
+			  glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+			  fw_ver, bt_patch_ver, bt_patch_ver);
+		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+			  "[BTCoex], *****************************"
+			  "***********************************\n");
+	}
+
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+	halbtc8723b2ant_query_bt_info(btcoexist);
+	halbtc8723b2ant_monitor_bt_ctr(btcoexist);
+	halbtc8723b2ant_monitor_bt_enable_disable(btcoexist);
+#else
+	if (halbtc8723b2ant_is_wifi_status_changed(btcoexist) ||
+	    coex_dm->auto_tdma_adjust)
+		halbtc8723b2ant_run_coexist_mechanism(btcoexist);
+#endif
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h
new file mode 100644
index 0000000..fa3784a
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h
@@ -0,0 +1,145 @@
+/************************************************************************
+ * The following is for 8723B 2Ant BT Co-exist definition
+ ************************************************************************/
+#define	BT_AUTO_REPORT_ONLY_8723B_2ANT			1
+
+
+#define	BT_INFO_8723B_2ANT_B_FTP			BIT7
+#define	BT_INFO_8723B_2ANT_B_A2DP			BIT6
+#define	BT_INFO_8723B_2ANT_B_HID			BIT5
+#define	BT_INFO_8723B_2ANT_B_SCO_BUSY			BIT4
+#define	BT_INFO_8723B_2ANT_B_ACL_BUSY			BIT3
+#define	BT_INFO_8723B_2ANT_B_INQ_PAGE			BIT2
+#define	BT_INFO_8723B_2ANT_B_SCO_ESCO			BIT1
+#define	BT_INFO_8723B_2ANT_B_CONNECTION			BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT		2
+
+typedef enum _BT_INFO_SRC_8723B_2ANT{
+	BT_INFO_SRC_8723B_2ANT_WIFI_FW			= 0x0,
+	BT_INFO_SRC_8723B_2ANT_BT_RSP			= 0x1,
+	BT_INFO_SRC_8723B_2ANT_BT_ACTIVE_SEND		= 0x2,
+	BT_INFO_SRC_8723B_2ANT_MAX
+}BT_INFO_SRC_8723B_2ANT,*PBT_INFO_SRC_8723B_2ANT;
+
+typedef enum _BT_8723B_2ANT_BT_STATUS{
+	BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE	= 0x0,
+	BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE		= 0x1,
+	BT_8723B_2ANT_BT_STATUS_INQ_PAGE		= 0x2,
+	BT_8723B_2ANT_BT_STATUS_ACL_BUSY		= 0x3,
+	BT_8723B_2ANT_BT_STATUS_SCO_BUSY		= 0x4,
+	BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY		= 0x5,
+	BT_8723B_2ANT_BT_STATUS_MAX
+}BT_8723B_2ANT_BT_STATUS,*PBT_8723B_2ANT_BT_STATUS;
+
+typedef enum _BT_8723B_2ANT_COEX_ALGO{
+	BT_8723B_2ANT_COEX_ALGO_UNDEFINED		= 0x0,
+	BT_8723B_2ANT_COEX_ALGO_SCO			= 0x1,
+	BT_8723B_2ANT_COEX_ALGO_HID			= 0x2,
+	BT_8723B_2ANT_COEX_ALGO_A2DP			= 0x3,
+	BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS		= 0x4,
+	BT_8723B_2ANT_COEX_ALGO_PANEDR			= 0x5,
+	BT_8723B_2ANT_COEX_ALGO_PANHS			= 0x6,
+	BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP		= 0x7,
+	BT_8723B_2ANT_COEX_ALGO_PANEDR_HID		= 0x8,
+	BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR		= 0x9,
+	BT_8723B_2ANT_COEX_ALGO_HID_A2DP		= 0xa,
+	BT_8723B_2ANT_COEX_ALGO_MAX			= 0xb,
+}BT_8723B_2ANT_COEX_ALGO,*PBT_8723B_2ANT_COEX_ALGO;
+
+struct coex_dm_8723b_2ant{
+	/* fw mechanism */
+	bool pre_dec_bt_pwr;
+	bool cur_dec_bt_pwr;
+	u8 pre_fw_dac_swing_lvl;
+	u8 cur_fw_dac_swing_lvl;
+	bool cur_ignore_wlan_act;
+	bool pre_ignore_wlan_act;
+	u8 pre_ps_tdma;
+	u8 cur_ps_tdma;
+	u8 ps_tdma_para[5];
+	u8 ps_tdma_du_adj_type;
+	bool reset_tdma_adjust;
+	bool auto_tdma_adjust;
+	bool pre_ps_tdma_on;
+	bool cur_ps_tdma_on;
+	bool pre_bt_auto_report;
+	bool cur_bt_auto_report;
+
+	/* sw mechanism */
+	bool pre_rf_rx_lpf_shrink;
+	bool cur_rf_rx_lpf_shrink;
+	u32 bt_rf0x1e_backup;
+	bool pre_low_penalty_ra;
+	bool cur_low_penalty_ra;
+	bool pre_dac_swing_on;
+	u32 pre_dac_swing_lvl;
+	bool cur_dac_swing_on;
+	u32 cur_dac_swing_lvl;
+	bool pre_adc_back_off;
+	bool cur_adc_back_off;
+	bool pre_agc_table_en;
+	bool cur_agc_table_en;
+	u32 pre_val0x6c0;
+	u32 cur_val0x6c0;
+	u32 pre_val0x6c4;
+	u32 cur_val0x6c4;
+	u32 pre_val0x6c8;
+	u32 cur_val0x6c8;
+	u8 pre_val0x6cc;
+	u8 cur_val0x6cc;
+	bool limited_dig;
+
+	/* algorithm related */
+	u8 pre_algorithm;
+	u8 cur_algorithm;
+	u8 bt_status;
+	u8 wifi_chnl_info[3];
+
+	bool need_recover_0x948;
+	u16 backup_0x948;
+};
+
+struct coex_sta_8723b_2ant{
+	bool bt_link_exist;
+	bool sco_exist;
+	bool a2dp_exist;
+	bool hid_exist;
+	bool pan_exist;
+
+	bool under_lps;
+	bool under_ips;
+	u32 high_priority_tx;
+	u32 high_priority_rx;
+	u32 low_priority_tx;
+	u32 low_priority_rx;
+	u8 bt_rssi;
+	u8 pre_bt_rssi_state;
+	u8 pre_wifi_rssi_state[4];
+	bool c2h_bt_info_req_sent;
+	u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10];
+	u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX];
+	bool c2h_bt_inquiry_page;
+	u8 bt_retry_cnt;
+	u8 bt_info_ext;
+};
+
+/*********************************************************************
+ * The following is interface which will notify coex module.
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
+					    u8 type);
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+					      u8 type);
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+				       u8 *tmpbuf, u8 length);
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_periodical(struct btc_coexist * btcoexist);
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c
new file mode 100644
index 0000000..9d9fa4d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c
@@ -0,0 +1,1181 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "halbt_precomp.h"
+
+/*#if(BT_30_SUPPORT == 1)*/
+#if 1
+/***********************************************
+ *		Global variables
+ ***********************************************/
+const char *const bt_profile_string[]={
+	"NONE",
+	"A2DP",
+	"PAN",
+	"HID",
+	"SCO",
+};
+
+const char *const bt_spec_string[]={
+	"1.0b",
+	"1.1",
+	"1.2",
+	"2.0+EDR",
+	"2.1+EDR",
+	"3.0+HS",
+	"4.0",
+};
+
+const char *const bt_link_role_string[]={
+	"Master",
+	"Slave",
+};
+
+const char *const h2c_state_string[]={
+	"successful",
+	"h2c busy",
+	"rf off",
+	"fw not read",
+};
+
+const char *const io_state_string[]={
+	"IO_STATUS_SUCCESS",
+	"IO_STATUS_FAIL_CANNOT_IO",
+	"IO_STATUS_FAIL_RF_OFF",
+	"IO_STATUS_FAIL_FW_READ_CLEAR_TIMEOUT",
+	"IO_STATUS_FAIL_WAIT_IO_EVENT_TIMEOUT",
+	"IO_STATUS_INVALID_LEN",
+	"IO_STATUS_IO_IDLE_QUEUE_EMPTY",
+	"IO_STATUS_IO_INSERT_WAIT_QUEUE_FAIL",
+	"IO_STATUS_UNKNOWN_FAIL",
+	"IO_STATUS_WRONG_LEVEL",
+	"IO_STATUS_H2C_STOPPED",
+};
+
+struct btc_coexist gl_bt_coexist;
+
+u32 btc_dbg_type[BTC_MSG_MAX];
+u8 btc_dbg_buf[100];
+
+/***************************************************
+ *		Debug related function
+ ***************************************************/
+bool halbtc_is_bt_coexist_available(struct btc_coexist *btcoexist)
+{
+	if (!btcoexist->binded || NULL == btcoexist->adapter)
+		return false;
+	
+	return true;
+}
+
+bool halbtc_is_wifi_busy(struct rtl_priv *rtlpriv)
+{
+
+	if (rtlpriv->link_info.b_busytraffic)
+		return true;
+	else
+		return false;
+}
+
+
+void halbtc_dbg_init(void)
+{
+	u8 i;
+
+	for (i = 0; i < BTC_MSG_MAX; i++)
+		btc_dbg_type[i] = 0;
+
+	btc_dbg_type[BTC_MSG_INTERFACE]			= 	\
+//			INTF_INIT				|
+//			INTF_NOTIFY				|
+			0;
+	
+	btc_dbg_type[BTC_MSG_ALGORITHM]			= 	\
+//			ALGO_BT_RSSI_STATE			|
+//			ALGO_WIFI_RSSI_STATE			|
+//			ALGO_BT_MONITOR				|
+//			ALGO_TRACE				|
+//			ALGO_TRACE_FW				|
+//			ALGO_TRACE_FW_DETAIL			|
+//			ALGO_TRACE_FW_EXEC			|
+//			ALGO_TRACE_SW				|
+//			ALGO_TRACE_SW_DETAIL			|
+//			ALGO_TRACE_SW_EXEC			|
+			0;
+}
+
+bool halbtc_is_hw_mailbox_exist(struct btc_coexist *btcoexist)
+{
+	return true;
+}
+
+bool halbtc_is_bt40(struct rtl_priv *adapter)
+{
+	struct rtl_priv *rtlpriv = adapter;
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	bool is_ht40 = true;
+	enum ht_channel_width bw = rtlphy->current_chan_bw;
+	
+	
+	if (bw == HT_CHANNEL_WIDTH_20)
+		is_ht40 = false;
+	else if (bw == HT_CHANNEL_WIDTH_20_40)
+		is_ht40 = true;
+
+	return is_ht40;
+}
+
+bool halbtc_legacy(struct rtl_priv *adapter)
+{
+	struct rtl_priv *rtlpriv = adapter;
+	struct rtl_mac *mac = rtl_mac(rtlpriv);
+	
+	bool is_legacy = false;
+
+	if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
+		is_legacy = true;
+
+	return is_legacy;
+}
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter)
+{
+	struct rtl_priv *rtlpriv = adapter;
+	
+	if (rtlpriv->link_info.b_tx_busy_traffic)
+		return true;
+	else
+		return false;
+}
+
+u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist)
+{
+	struct rtl_priv *rtlpriv = 
+		(struct rtl_priv *)btcoexist->adapter;
+	u32 wifi_bw = BTC_WIFI_BW_HT20;
+	
+	if (halbtc_is_bt40(rtlpriv)){
+		wifi_bw = BTC_WIFI_BW_HT40;	
+	} else {
+		if(halbtc_legacy(rtlpriv))
+			wifi_bw = BTC_WIFI_BW_LEGACY;
+		else
+			wifi_bw = BTC_WIFI_BW_HT20;
+	}
+	return wifi_bw;
+}
+
+u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_phy 	*rtlphy = &(rtlpriv->phy);
+	u8 chnl = 1;
+	
+
+	if (rtlphy->current_channel != 0)
+		chnl = rtlphy->current_channel;
+	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 
+		  "halbtc_get_wifi_central_chnl:%d\n",chnl);
+	return chnl;
+}
+
+void halbtc_leave_lps(struct btc_coexist *btcoexist)
+{
+	struct rtl_priv *rtlpriv;
+	struct rtl_ps_ctl *ppsc;
+	bool ap_enable = false;
+	
+	rtlpriv = btcoexist->adapter;
+	ppsc = rtl_psc(rtlpriv);
+	
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+			   &ap_enable);
+	
+	if (ap_enable) {
+		printk("halbtc_leave_lps()<--dont leave lps under AP mode\n");
+		return;
+	}
+
+	btcoexist->bt_info.bt_ctrl_lps = true;
+	btcoexist->bt_info.bt_lps_on = false;
+}
+
+void halbtc_enter_lps(struct btc_coexist *btcoexist)
+{
+	struct rtl_priv *rtlpriv;
+	struct rtl_ps_ctl *ppsc;
+	bool ap_enable = false;
+	
+	rtlpriv = btcoexist->adapter;
+	ppsc = rtl_psc(rtlpriv);
+	
+	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, 
+			   &ap_enable);
+	
+	if (ap_enable) {
+		printk("halbtc_enter_lps()<--dont enter lps under AP mode\n");
+		return;
+	}
+	
+	btcoexist->bt_info.bt_ctrl_lps = true;
+	btcoexist->bt_info.bt_lps_on = false;
+}
+
+void halbtc_normal_lps(struct btc_coexist *btcoexist)
+{
+	if (btcoexist->bt_info.bt_ctrl_lps) {
+		btcoexist->bt_info.bt_lps_on = false;
+		btcoexist->bt_info.bt_ctrl_lps = false;
+	}
+		
+}
+
+void halbtc_leave_low_power(void)
+{
+}
+
+void halbtc_nomal_low_power(void)
+{
+}
+
+void halbtc_disable_low_power(void)
+{
+}
+
+void halbtc_aggregation_check(void)
+{
+}
+
+
+u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
+{
+	return 0;
+}
+
+s32 halbtc_get_wifi_rssi(struct rtl_priv *adapter)
+{
+	struct rtl_priv *rtlpriv = adapter;
+	s32	undecorated_smoothed_pwdb = 0;
+	
+	if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+		undecorated_smoothed_pwdb = 
+			rtlpriv->dm.undecorated_smoothed_pwdb;
+	else /* associated entry pwdb */
+		undecorated_smoothed_pwdb =
+			rtlpriv->dm.undecorated_smoothed_pwdb;
+	return undecorated_smoothed_pwdb;
+}
+
+bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_mac *mac = rtl_mac(rtlpriv);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	bool *bool_tmp = (bool*)out_buf;
+	int *s32_tmp = (int*)out_buf;
+	u32 *u32_tmp = (u32*)out_buf;
+	u8 *u8_tmp = (u8*)out_buf;
+	bool tmp = false;
+
+
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return false;
+	
+
+	switch (get_type){
+	case BTC_GET_BL_HS_OPERATION:
+		*bool_tmp = false;
+		break;
+	case BTC_GET_BL_HS_CONNECTING:
+		*bool_tmp = false;
+		break;
+	case BTC_GET_BL_WIFI_CONNECTED:
+		if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+			tmp = true;
+				
+		*bool_tmp = tmp;
+		break;
+	case BTC_GET_BL_WIFI_BUSY:
+		if(halbtc_is_wifi_busy(rtlpriv))
+			*bool_tmp = true;
+		else
+			*bool_tmp = false;
+		break;
+	case BTC_GET_BL_WIFI_SCAN:
+		if (mac->act_scanning == true)
+			*bool_tmp = true;
+		else
+			*bool_tmp = false;
+		break;
+	case BTC_GET_BL_WIFI_LINK:
+		if (mac->link_state == MAC80211_LINKING)
+			*bool_tmp = true;
+		else
+			*bool_tmp = false;
+		break;
+	case BTC_GET_BL_WIFI_ROAM:	/*TODO*/
+		if (mac->link_state == MAC80211_LINKING)
+			*bool_tmp = true;
+		else
+			*bool_tmp = false;
+		break;
+	case BTC_GET_BL_WIFI_4_WAY_PROGRESS:	/*TODO*/
+			*bool_tmp = false;
+
+		break;
+	case BTC_GET_BL_WIFI_UNDER_5G:
+		*bool_tmp = false; /*TODO*/
+		
+	case BTC_GET_BL_WIFI_DHCP:	/*TODO*/
+		break;
+	case BTC_GET_BL_WIFI_SOFTAP_IDLE:
+		*bool_tmp = true;
+		break;
+	case BTC_GET_BL_WIFI_SOFTAP_LINKING:
+		*bool_tmp = false;
+		break;
+	case BTC_GET_BL_WIFI_IN_EARLY_SUSPEND:
+		*bool_tmp = false;
+		break;
+	case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
+		*bool_tmp = false;
+		break;
+	case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
+		if (NO_ENCRYPTION == rtlpriv->sec.pairwise_enc_algorithm)
+			*bool_tmp = false;
+		else
+			*bool_tmp = true;
+		break;
+	case BTC_GET_BL_WIFI_UNDER_B_MODE:
+		*bool_tmp = false; /*TODO*/
+		break;
+	case BTC_GET_BL_EXT_SWITCH:
+		*bool_tmp = false;
+		break;
+	case BTC_GET_S4_WIFI_RSSI:
+		*s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+		break;
+	case BTC_GET_S4_HS_RSSI:	/*TODO*/
+		*s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+		break;
+	case BTC_GET_U4_WIFI_BW:
+		*u32_tmp = halbtc_get_wifi_bw(btcoexist);
+		break;
+	case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
+		if (halbtc_is_wifi_uplink(rtlpriv))
+			*u32_tmp = BTC_WIFI_TRAFFIC_TX;
+		else
+			*u32_tmp = BTC_WIFI_TRAFFIC_RX;
+		break;
+	case BTC_GET_U4_WIFI_FW_VER:
+		*u32_tmp = rtlhal->fw_version;
+		break;
+	case BTC_GET_U4_BT_PATCH_VER:
+		*u32_tmp = halbtc_get_bt_patch_version(btcoexist);
+		break;
+	case BTC_GET_U1_WIFI_DOT11_CHNL:
+		*u8_tmp = rtlphy->current_channel;
+		break;
+	case BTC_GET_U1_WIFI_CENTRAL_CHNL:
+		*u8_tmp = halbtc_get_wifi_central_chnl(btcoexist);
+		break;
+	case BTC_GET_U1_WIFI_HS_CHNL:
+		*u8_tmp = 1;/* BT_OperateChnl(rtlpriv); */
+		break;
+	case BTC_GET_U1_MAC_PHY_MODE:
+		*u8_tmp = BTC_MP_UNKNOWN;
+		break;
+	case BTC_GET_U1_AP_NUM:
+		/* driver don't know AP num in Linux, 
+		 * So, the return value here is not right */
+		*u8_tmp = 1;/* pDefMgntInfo->NumBssDesc4Query; */
+		break;
+
+	/************* 1Ant **************/
+	case BTC_GET_U1_LPS_MODE:
+		*u8_tmp = btcoexist->pwr_mode_val[0];
+		break;
+					
+	default:
+		break;
+	}
+	
+	return true;
+}
+
+bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+	bool *bool_tmp = (bool *)in_buf;
+	u8 *u8_tmp = (u8 *)in_buf;
+	u32 *u32_tmp = (u32 *)in_buf;
+	
+	
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return false;
+
+	switch (set_type) {
+	/* set some bool type variables. */
+	case BTC_SET_BL_BT_DISABLE:
+		btcoexist->bt_info.bt_disabled = *bool_tmp;
+		break;
+	case BTC_SET_BL_BT_TRAFFIC_BUSY:
+		btcoexist->bt_info.bt_busy = *bool_tmp;
+		break;
+	case BTC_SET_BL_BT_LIMITED_DIG:
+		btcoexist->bt_info.limited_dig = *bool_tmp;
+		break;
+	case BTC_SET_BL_FORCE_TO_ROAM:
+		btcoexist->bt_info.force_to_roam = *bool_tmp;
+		break;
+	case BTC_SET_BL_TO_REJ_AP_AGG_PKT:
+		btcoexist->bt_info.reject_agg_pkt = *bool_tmp;
+		break;
+	case BTC_SET_BL_BT_CTRL_AGG_SIZE:
+		btcoexist->bt_info.b_bt_ctrl_buf_size = *bool_tmp;
+		break;
+	case BTC_SET_BL_INC_SCAN_DEV_NUM:
+		btcoexist->bt_info.increase_scan_dev_num = *bool_tmp;
+		break;
+		/* set some u1Byte type variables. */
+	case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON:
+		btcoexist->bt_info.rssi_adjust_for_agc_table_on = *u8_tmp;
+		break;
+	case BTC_SET_U1_AGG_BUF_SIZE:
+		btcoexist->bt_info.agg_buf_size = *u8_tmp;
+		break;
+		/* the following are some action which will be triggered */
+	case BTC_SET_ACT_GET_BT_RSSI:
+		/*BTHCI_SendGetBtRssiEvent(rtlpriv);*/
+		break;
+	case BTC_SET_ACT_AGGREGATE_CTRL:
+		halbtc_aggregation_check();
+		break;
+
+		/* 1Ant */
+	case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE:
+		btcoexist->bt_info.rssi_adjust_for_1ant_coex_type = *u8_tmp;
+		break;
+	case BTC_SET_UI_SCAN_SIG_COMPENSATION:
+	/*	rtlpriv->mlmepriv.scan_compensation = *u8_tmp;  */
+		break;
+	case BTC_SET_U1_1ANT_LPS:
+		btcoexist->bt_info.lps_1ant = *u8_tmp;
+		break;
+	case BTC_SET_U1_1ANT_RPWM:
+		btcoexist->bt_info.rpwm_1ant = *u8_tmp;
+		break;
+	/* the following are some action which will be triggered  */
+	case BTC_SET_ACT_LEAVE_LPS:
+		halbtc_leave_lps(btcoexist);
+		break;
+	case BTC_SET_ACT_ENTER_LPS:
+		halbtc_enter_lps(btcoexist);
+		break;
+	case BTC_SET_ACT_NORMAL_LPS:
+		halbtc_normal_lps(btcoexist);
+		break;
+	case BTC_SET_ACT_DISABLE_LOW_POWER:
+		halbtc_disable_low_power();
+		break;
+	case BTC_SET_ACT_UPDATE_ra_mask:
+		btcoexist->bt_info.ra_mask = *u32_tmp;
+		break;
+	case BTC_SET_ACT_SEND_MIMO_PS:
+		break;
+	case BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT:
+		btcoexist->bt_info.force_exec_pwr_cmd_cnt++;
+		break;
+	case BTC_SET_ACT_CTRL_BT_INFO: /*wait for 8812/8821*/
+		break;
+	case BTC_SET_ACT_CTRL_BT_COEX:
+		break;
+	default:
+		break;
+	}
+	
+	return true;
+}
+
+void halbtc_display_coex_statistics(struct btc_coexist *btcoexist)
+{
+}
+
+void halbtc_display_bt_link_info(struct btc_coexist *btcoexist)
+{
+}
+
+void halbtc_display_bt_fw_info(struct btc_coexist *btcoexist)
+{
+}
+
+void halbtc_display_fw_pwr_mode_cmd(struct btc_coexist *btcoexist)
+{
+}
+
+/************************************************************
+ *		IO related function
+ ************************************************************/
+u8 halbtc_read_1byte(void *bt_context, u32 reg_addr)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	
+	return	rtl_read_byte(rtlpriv, reg_addr);
+}
+
+
+u16 halbtc_read_2byte(void *bt_context, u32 reg_addr)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+	return	rtl_read_word(rtlpriv, reg_addr);
+}
+
+
+u32 halbtc_read_4byte(void *bt_context, u32 reg_addr)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	
+	return	rtl_read_dword(rtlpriv, reg_addr);
+}
+
+
+void halbtc_write_1byte(void *bt_context, u32 reg_addr, u8 data)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+	rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+void halbtc_bitmask_write_1byte(void *bt_context, u32 reg_addr, 
+				u8 bit_mask, u8 data)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	u8 original_value, bit_shift = 0;
+	u8 i;
+
+	if (bit_mask != MASKDWORD) {/*if not "double word" write*/
+		original_value = rtl_read_byte(rtlpriv, reg_addr);	
+		for (i=0; i<=7; i++) {
+			if((bit_mask>>i)&0x1)
+				break;
+		}
+		bit_shift = i;
+		data = (original_value & (~bit_mask)) | 
+			((data << bit_shift) & bit_mask);
+	}
+	rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+
+void halbtc_write_2byte(void *bt_context, u32 reg_addr, u16 data)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+	rtl_write_word(rtlpriv, reg_addr, data);
+}
+
+
+void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
+{
+	struct btc_coexist *btcoexist = 
+		(struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+	rtl_write_dword(rtlpriv, reg_addr, data);
+}
+
+
+void halbtc_set_macreg(void *bt_context, u32 reg_addr, u32 bit_mask, u32 data)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+	rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
+}
+
+
+u32 halbtc_get_macreg(void *bt_context, u32 reg_addr, u32 bit_mask)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+	return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
+}
+
+
+void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask, u32 data)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+	rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
+}
+
+
+u32 halbtc_get_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+	return rtl_get_bbreg(rtlpriv->mac80211.hw,reg_addr, bit_mask);
+}
+
+
+void halbtc_set_rfreg(void *bt_context, u8 rf_path, u32 reg_addr, 
+		      u32 bit_mask, u32 data)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+	rtl_set_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask, data);
+}
+
+
+u32 halbtc_get_rfreg(void *bt_context, u8 rf_path, u32 reg_addr, u32 bit_mask)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+	return rtl_get_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask);
+}
+
+
+void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id, 
+			 u32 cmd_len, u8 *cmd_buf)
+{
+	struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	
+	rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, element_id, 
+					cmd_len, cmd_buf);
+}
+
+void halbtc_display_dbg_msg(void *bt_context, u8 disp_type)
+{
+	struct btc_coexist *btcoexist =	(struct btc_coexist *)bt_context;
+	switch (disp_type) {
+	case BTC_DBG_DISP_COEX_STATISTICS:
+		halbtc_display_coex_statistics(btcoexist);
+		break;
+	case BTC_DBG_DISP_BT_LINK_INFO:
+		halbtc_display_bt_link_info(btcoexist);
+		break;
+	case BTC_DBG_DISP_BT_FW_VER:
+		halbtc_display_bt_fw_info(btcoexist);
+		break;
+	case BTC_DBG_DISP_FW_PWR_MODE_CMD:
+		halbtc_display_fw_pwr_mode_cmd(btcoexist);
+		break;
+	default:
+		break;
+	}
+}
+
+bool halbtc_under_ips(struct btc_coexist *btcoexist)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+	enum rf_pwrstate rtstate;
+	
+	if (ppsc->b_inactiveps) {
+		rtstate = ppsc->rfpwr_state;
+
+		if (rtstate != ERFON &&
+		    ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+
+			return true;
+		}
+	}
+
+	return false;
+}
+
+/*****************************************************************
+ *         Extern functions called by other module
+ *****************************************************************/
+bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
+{
+	struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+	btcoexist->statistics.cnt_bind++;
+	
+	halbtc_dbg_init();
+
+	if (btcoexist->binded)
+		return false;
+	else
+		btcoexist->binded = true;
+
+#if ( defined(CONFIG_PCI_HCI))
+	btcoexist->chip_interface = BTC_INTF_PCI;
+#elif ( defined(CONFIG_USB_HCI))
+	btcoexist->chip_interface = BTC_INTF_USB;
+#elif ( defined(CONFIG_SDIO_HCI))
+	btcoexist->chip_interface = BTC_INTF_SDIO;
+#elif ( defined(CONFIG_GSPI_HCI))
+	btcoexist->chip_interface = BTC_INTF_GSPI;
+#else
+	btcoexist->chip_interface = BTC_INTF_UNKNOWN;
+#endif
+
+	if (NULL == btcoexist->adapter)
+		btcoexist->adapter = adapter;
+
+	btcoexist->stack_info.profile_notified = false;
+
+	btcoexist->btc_read_1byte = halbtc_read_1byte;
+	btcoexist->btc_write_1byte = halbtc_write_1byte;
+	btcoexist->btc_write_1byte_bitmask = halbtc_bitmask_write_1byte;
+	btcoexist->btc_read_2byte = halbtc_read_2byte;
+	btcoexist->btc_write_2byte = halbtc_write_2byte;
+	btcoexist->btc_read_4byte = halbtc_read_4byte;
+	btcoexist->btc_write_4byte = halbtc_write_4byte;
+
+	btcoexist->btc_set_bb_reg = halbtc_set_bbreg;
+	btcoexist->btc_get_bb_reg = halbtc_get_bbreg;
+
+	btcoexist->btc_set_rf_reg = halbtc_set_rfreg;
+	btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
+
+	btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
+	btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
+	
+	btcoexist->btc_get = halbtc_get;
+	btcoexist->btc_set = halbtc_set;
+
+	btcoexist->cli_buf = &btc_dbg_buf[0];
+
+	btcoexist->bt_info.b_bt_ctrl_buf_size = false;
+	btcoexist->bt_info.agg_buf_size = 5;
+
+	btcoexist->bt_info.increase_scan_dev_num = false;
+	return true;
+}
+
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+
+	btcoexist->statistics.cnt_init_hw_config++;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+		if (btcoexist->board_info.btdm_ant_num == 2)
+			ex_halbtc8723b2ant_init_hwconfig(btcoexist);
+		else if(btcoexist->board_info.btdm_ant_num == 1)
+			ex_halbtc8723b1ant_init_hwconfig(btcoexist);
+	} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+		ex_halbtc8192e2ant_init_hwconfig(btcoexist);
+	}
+	
+}
+
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+
+	btcoexist->statistics.cnt_init_coex_dm++;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+		if (btcoexist->board_info.btdm_ant_num == 2)
+			ex_halbtc8723b2ant_init_coex_dm(btcoexist);		
+		else if(btcoexist->board_info.btdm_ant_num == 1)
+			ex_halbtc8723b1ant_init_coex_dm(btcoexist);
+	} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+		ex_halbtc8192e2ant_init_coex_dm(btcoexist);
+	}
+	
+	btcoexist->initilized = true;
+}
+
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	u8 ips_type;
+
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+	btcoexist->statistics.cnt_ips_notify++;
+	if (btcoexist->manual_control)
+		return;
+
+	if (ERFOFF == type)
+		ips_type = BTC_IPS_ENTER;
+	else
+		ips_type = BTC_IPS_LEAVE;
+
+	halbtc_leave_low_power();
+	
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+		if (btcoexist->board_info.btdm_ant_num == 2)
+			ex_halbtc8723b2ant_ips_notify(btcoexist, ips_type);
+		else if(btcoexist->board_info.btdm_ant_num == 1)
+			ex_halbtc8723b1ant_ips_notify(btcoexist, ips_type);
+	} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+		ex_halbtc8192e2ant_ips_notify(btcoexist, ips_type);
+	}
+
+	halbtc_nomal_low_power();
+}
+
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	u8 lps_type;
+
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+	btcoexist->statistics.cnt_lps_notify++;
+	if (btcoexist->manual_control)
+		return;
+	
+	if (EACTIVE == type)
+		lps_type = BTC_LPS_DISABLE;
+	else
+		lps_type = BTC_LPS_ENABLE;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+		if (btcoexist->board_info.btdm_ant_num == 2)
+			ex_halbtc8723b2ant_lps_notify(btcoexist, lps_type);
+		else if(btcoexist->board_info.btdm_ant_num == 1)
+			ex_halbtc8723b1ant_lps_notify(btcoexist, lps_type);
+	} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+		ex_halbtc8192e2ant_lps_notify(btcoexist, lps_type);
+	}
+}
+
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	u8 scan_type;
+
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+	btcoexist->statistics.cnt_scan_notify++;
+	if (btcoexist->manual_control)
+		return;
+
+	if (type)
+		scan_type = BTC_SCAN_START;
+	else
+		scan_type = BTC_SCAN_FINISH;
+
+	halbtc_leave_low_power();
+	
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+		if (btcoexist->board_info.btdm_ant_num == 2)
+			ex_halbtc8723b2ant_scan_notify(btcoexist, scan_type);
+		else if(btcoexist->board_info.btdm_ant_num == 1)
+			ex_halbtc8723b1ant_scan_notify(btcoexist, scan_type);
+	} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+		ex_halbtc8192e2ant_scan_notify(btcoexist, scan_type);
+	}
+
+	halbtc_nomal_low_power();
+}
+
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	u8 asso_type;
+
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+	btcoexist->statistics.cnt_connect_notify++;
+	if (btcoexist->manual_control)
+		return;
+
+	if (action)
+		asso_type = BTC_ASSOCIATE_START;
+	else
+		asso_type = BTC_ASSOCIATE_FINISH;
+
+	halbtc_leave_low_power();
+	
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+		if (btcoexist->board_info.btdm_ant_num == 2)
+			ex_halbtc8723b2ant_connect_notify(btcoexist, asso_type);
+		else if(btcoexist->board_info.btdm_ant_num == 1)
+			ex_halbtc8723b1ant_connect_notify(btcoexist, asso_type);
+	} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+		ex_halbtc8192e2ant_connect_notify(btcoexist, asso_type);
+	}
+
+	halbtc_nomal_low_power();
+}
+
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist, 
+				 enum rt_media_status media_status)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	u8 status;
+
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+	btcoexist->statistics.cnt_media_status_notify++;
+	if (btcoexist->manual_control)
+		return;
+
+	if (RT_MEDIA_CONNECT == media_status)
+		status = BTC_MEDIA_CONNECT;
+	else
+		status = BTC_MEDIA_DISCONNECT;
+
+	halbtc_leave_low_power();
+	
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+		if (btcoexist->board_info.btdm_ant_num == 2)
+			ex_halbtc8723b2ant_media_status_notify(btcoexist, status);
+		else if(btcoexist->board_info.btdm_ant_num == 1)
+			ex_halbtc8723b1ant_media_status_notify(btcoexist, status);
+	} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+		ex_halbtc8192e2ant_media_status_notify(btcoexist, status);
+	}
+
+	halbtc_nomal_low_power();
+}
+
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type)
+{
+	u8 packet_type;
+
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+	btcoexist->statistics.cnt_special_packet_notify++;
+	if (btcoexist->manual_control)
+		return;
+
+	/*if(PACKET_DHCP == pkt_type)*/
+		packet_type = BTC_PACKET_DHCP;
+	/*else if(PACKET_EAPOL == pkt_type)
+		packet_type = BTC_PACKET_EAPOL;
+	else
+		packet_type = BTC_PACKET_UNKNOWN;*/
+
+	halbtc_leave_low_power();
+
+	if (btcoexist->board_info.btdm_ant_num == 2)
+		ex_halbtc8723b2ant_special_packet_notify(btcoexist,
+							 packet_type);
+	else if (btcoexist->board_info.btdm_ant_num == 1)
+		ex_halbtc8723b1ant_special_packet_notify(btcoexist,
+							 packet_type);
+
+	halbtc_nomal_low_power();
+}
+
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, 
+			     u8 *tmp_buf, u8 length)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+	btcoexist->statistics.cnt_bt_info_notify++;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+		if (btcoexist->board_info.btdm_ant_num == 2)
+			ex_halbtc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length);
+		else if(btcoexist->board_info.btdm_ant_num == 1)
+			ex_halbtc8723b1ant_bt_info_notify(btcoexist, tmp_buf, length);
+	} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+	//	ex_halbtc8192e2ant_bt_info_notify(btcoexist, tmp_buf, length);
+	}
+}
+
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
+{
+	u8 stack_op_type;
+
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+	btcoexist->statistics.cnt_stack_operation_notify++;
+	if (btcoexist->manual_control)
+		return;
+
+	stack_op_type = BTC_STACK_OP_NONE;
+}
+
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+		if (btcoexist->board_info.btdm_ant_num == 2)
+			ex_halbtc8723b2ant_halt_notify(btcoexist);
+		else if(btcoexist->board_info.btdm_ant_num == 1)
+			ex_halbtc8723b1ant_halt_notify(btcoexist);
+	} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+		ex_halbtc8192e2ant_halt_notify(btcoexist);
+	}
+}
+
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+}
+
+void exhalbtc_periodical(struct btc_coexist *btcoexist)
+{
+	struct rtl_priv *rtlpriv = btcoexist->adapter;
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+	btcoexist->statistics.cnt_periodical++;
+
+	halbtc_leave_low_power();
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+		if (btcoexist->board_info.btdm_ant_num == 2)
+			ex_halbtc8723b2ant_periodical(btcoexist);
+		else if(btcoexist->board_info.btdm_ant_num == 1)
+			ex_halbtc8723b1ant_periodical(btcoexist);
+	} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+		ex_halbtc8192e2ant_periodical(btcoexist);
+	}
+
+	halbtc_nomal_low_power();
+}
+
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist, 
+			  u8 code, u8 len, u8 *data)
+{
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+	btcoexist->statistics.cnt_dbg_ctrl++;
+}
+
+void exhalbtc_stack_update_profile_info()
+{
+}
+
+void exhalbtc_update_min_bt_rssi(char bt_rssi)
+{
+	struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+
+	btcoexist->stack_info.min_bt_rssi = bt_rssi;
+}
+
+
+void exhalbtc_set_hci_version(u16 hci_version)
+{
+	struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+
+	btcoexist->stack_info.hci_version = hci_version;
+}
+
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version) 
+{
+	struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+
+	btcoexist->bt_info.bt_real_fw_ver = bt_patch_version;
+	btcoexist->bt_info.bt_hci_ver = bt_hci_version;
+}
+
+void exhalbtc_set_bt_exist(bool bt_exist)
+{
+	gl_bt_coexist.board_info.bt_exist = bt_exist;
+}
+
+void exhalbtc_set_chip_type(u8 chip_type)
+{
+	switch (chip_type) {
+	default:
+	case BT_2WIRE:
+	case BT_ISSC_3WIRE:
+	case BT_ACCEL:
+	case BT_RTL8756:
+		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF;
+		break;
+	case BT_CSR_BC4:
+		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
+		break;
+	case BT_CSR_BC8:
+		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
+		break;
+	case BT_RTL8723A:
+		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A;
+		break;
+	case BT_RTL8821A:
+		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821;
+		break;
+	case BT_RTL8723B:
+		gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B;
+		break;
+	}
+}
+
+void exhalbtc_set_ant_num(u8 type, u8 ant_num)
+{
+	if (BT_COEX_ANT_TYPE_PG == type) {
+		gl_bt_coexist.board_info.pg_ant_num = ant_num;
+		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+	} else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
+		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+	}
+}
+
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
+{
+	if (!halbtc_is_bt_coexist_available(btcoexist))
+		return;
+	
+	if (btcoexist->board_info.btdm_ant_num == 2)
+		ex_halbtc8723b2ant_display_coex_info(btcoexist);
+	else if (btcoexist->board_info.btdm_ant_num == 1)
+		ex_halbtc8723b1ant_display_coex_info(btcoexist);
+}
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h
new file mode 100644
index 0000000..787798e
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h
@@ -0,0 +1,549 @@
+#ifndef	__HALBTC_OUT_SRC_H__
+#define __HALBTC_OUT_SRC_H__
+
+#include	"../wifi.h"
+
+#define		NORMAL_EXEC				false
+#define		FORCE_EXEC				true
+
+#define		BTC_RF_A				RF90_PATH_A
+#define		BTC_RF_B				RF90_PATH_B
+#define		BTC_RF_C				RF90_PATH_C
+#define		BTC_RF_D				RF90_PATH_D
+
+#define		BTC_SMSP				SINGLEMAC_SINGLEPHY
+#define		BTC_DMDP				DUALMAC_DUALPHY
+#define		BTC_DMSP				DUALMAC_SINGLEPHY
+#define		BTC_MP_UNKNOWN				0xff
+
+#define 	IN
+#define 	OUT
+
+#define 	BT_TMP_BUF_SIZE 			100
+
+#define		BT_COEX_ANT_TYPE_PG			0
+#define		BT_COEX_ANT_TYPE_ANTDIV			1
+#define		BT_COEX_ANT_TYPE_DETECTED		2
+
+#define		BTC_MIMO_PS_STATIC			0
+#define		BTC_MIMO_PS_DYNAMIC			1
+
+#define		BTC_RATE_DISABLE			0
+#define		BTC_RATE_ENABLE				1
+
+/* single Antenna definition */
+#define		BTC_ANT_PATH_WIFI			0
+#define		BTC_ANT_PATH_BT				1
+#define		BTC_ANT_PATH_PTA			2
+/* dual Antenna definition */
+#define		BTC_ANT_WIFI_AT_MAIN			0
+#define		BTC_ANT_WIFI_AT_AUX			1
+/* coupler Antenna definition */
+#define		BTC_ANT_WIFI_AT_CPL_MAIN		0
+#define		BTC_ANT_WIFI_AT_CPL_AUX			1
+
+enum btc_chip_interface{
+	BTC_INTF_UNKNOWN	= 0,
+	BTC_INTF_PCI		= 1,
+	BTC_INTF_USB		= 2,
+	BTC_INTF_SDIO		= 3,
+	BTC_INTF_GSPI		= 4,
+	BTC_INTF_MAX
+};
+
+enum btc_chip_type{
+	BTC_CHIP_UNDEF		= 0,
+	BTC_CHIP_CSR_BC4	= 1,
+	BTC_CHIP_CSR_BC8	= 2,
+	BTC_CHIP_RTL8723A 	= 3,
+	BTC_CHIP_RTL8821	= 4,
+	BTC_CHIP_RTL8723B 	= 5,
+	BTC_CHIP_MAX
+};
+
+enum btc_msg_type{
+	BTC_MSG_INTERFACE	= 0x0,
+	BTC_MSG_ALGORITHM	= 0x1,
+	BTC_MSG_MAX
+};
+
+extern u32 btc_dbg_type[];
+
+/* following is for BTC_MSG_INTERFACE */
+#define		INTF_INIT				BIT0
+#define		INTF_NOTIFY				BIT2
+
+/* following is for BTC_ALGORITHM */
+#define		ALGO_BT_RSSI_STATE			BIT0
+#define		ALGO_WIFI_RSSI_STATE			BIT1
+#define		ALGO_BT_MONITOR				BIT2
+#define		ALGO_TRACE				BIT3
+#define		ALGO_TRACE_FW				BIT4
+#define		ALGO_TRACE_FW_DETAIL			BIT5
+#define		ALGO_TRACE_FW_EXEC			BIT6
+#define		ALGO_TRACE_SW				BIT7
+#define		ALGO_TRACE_SW_DETAIL			BIT8
+#define		ALGO_TRACE_SW_EXEC			BIT9
+
+
+
+#define	CL_SPRINTF	snprintf
+#define	CL_PRINTF	printk
+
+#define	BTC_PRINT(dbgtype, dbgflag, printstr, ...)		\
+	do { 							\
+		if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+			printk(printstr, ##__VA_ARGS__);	\
+		}						\
+	} while(0)
+
+#define	BTC_PRINT_F(dbgtype, dbgflag, printstr, ...)		\
+	do {							\
+		if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+			printk(KERN_DEBUG "%s: ", __func__);	\
+			printk(printstr, ##__VA_ARGS__);	\
+		}						\
+	} while(0)
+
+#define	BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _ptr)	\
+	do { 							\
+		if(unlikely(btc_dbg_type[dbgtype] & dbgflag)) {	\
+			int __i;				\
+			u8* __ptr = (u8*)_Ptr;			\
+			printk printstr;			\
+			for( __i = 0; __i < 6; __i++ )		\
+				printk("%02X%s", __ptr[__i], (__i==5)?"":"-");\
+			printk(KERN_DEBUG "\n");		\
+		}\
+	} while(0)
+
+#define BTC_PRINT_DATA(dbgtype, dbgflag, _titlestring, _hexdata, _hexdatalen) \
+	do {								\
+		if(unlikely(btc_dbg_type[dbgtype] & dbgflag) )	{ 	\
+			int __i;					\
+			u8 *__ptr = (u8*)_hexdata;			\
+			printk(_titlestring);				\
+			for( __i = 0; __i < (int)_hexdatalen; __i++ ) {	\
+				printk("%02X%s", __ptr[__i], (((__i + 1) % 4) \
+							== 0)?"  ":" ");\
+				if (((__i + 1) % 16) == 0)		\
+					printk("\n");			\
+			}						\
+			printk(KERN_DEBUG "\n");			\
+		} 							\
+	} while(0)
+
+
+#define	BTC_RSSI_HIGH(_rssi_) \
+	((_rssi_==BTC_RSSI_STATE_HIGH || _rssi_==BTC_RSSI_STATE_STAY_HIGH) ? \
+	true : false)
+
+#define	BTC_RSSI_MEDIUM(_rssi_)	\
+	((_rssi_==BTC_RSSI_STATE_MEDIUM || _rssi_==BTC_RSSI_STATE_STAY_MEDIUM) \
+	? true : false)
+
+#define	BTC_RSSI_LOW(_rssi_) \
+	((_rssi_==BTC_RSSI_STATE_LOW || _rssi_==BTC_RSSI_STATE_STAY_LOW) ? \
+	true : false)
+
+
+enum btc_power_save_type {
+	BTC_PS_WIFI_NATIVE = 0,
+	BTC_PS_LPS_ON = 1,
+	BTC_PS_LPS_OFF = 2,
+	BTC_PS_LPS_MAX
+};
+
+struct btc_board_info {
+	/* The following is some board information */
+	u8 bt_chip_type;
+	u8 pg_ant_num;	/* pg ant number */
+	u8 btdm_ant_num;	/* ant number for btdm */
+	u8 btdm_ant_pos;
+	bool bt_exist;
+};
+
+enum btc_dbg_opcode{
+	BTC_DBG_SET_COEX_NORMAL = 0x0,
+	BTC_DBG_SET_COEX_WIFI_ONLY = 0x1,
+	BTC_DBG_SET_COEX_BT_ONLY = 0x2,
+	BTC_DBG_MAX
+};
+
+enum btc_rssi_state{
+	BTC_RSSI_STATE_HIGH = 0x0,
+	BTC_RSSI_STATE_MEDIUM = 0x1,
+	BTC_RSSI_STATE_LOW = 0x2,
+	BTC_RSSI_STATE_STAY_HIGH = 0x3,
+	BTC_RSSI_STATE_STAY_MEDIUM = 0x4,
+	BTC_RSSI_STATE_STAY_LOW = 0x5,
+	BTC_RSSI_MAX
+};
+
+enum btc_wifi_role{
+	BTC_ROLE_STATION = 0x0,
+	BTC_ROLE_AP = 0x1,
+	BTC_ROLE_IBSS = 0x2,
+	BTC_ROLE_HS_MODE = 0x3,
+	BTC_ROLE_MAX
+};
+
+enum btc_wifi_bw_mode{
+	BTC_WIFI_BW_LEGACY = 0x0,
+	BTC_WIFI_BW_HT20 = 0x1,
+	BTC_WIFI_BW_HT40 = 0x2,
+	BTC_WIFI_BW_MAX
+};
+
+enum btc_wifi_traffic_dir{
+	BTC_WIFI_TRAFFIC_TX = 0x0,
+	BTC_WIFI_TRAFFIC_RX = 0x1,
+	BTC_WIFI_TRAFFIC_MAX
+};
+
+enum btc_wifi_pnp{
+	BTC_WIFI_PNP_WAKE_UP = 0x0,
+	BTC_WIFI_PNP_SLEEP = 0x1,
+	BTC_WIFI_PNP_MAX
+};
+
+
+enum btc_get_type{
+	/* type bool */
+	BTC_GET_BL_HS_OPERATION,
+	BTC_GET_BL_HS_CONNECTING,
+	BTC_GET_BL_WIFI_CONNECTED,
+	BTC_GET_BL_WIFI_BUSY,
+	BTC_GET_BL_WIFI_SCAN,
+	BTC_GET_BL_WIFI_LINK,
+	BTC_GET_BL_WIFI_DHCP,
+	BTC_GET_BL_WIFI_SOFTAP_IDLE,
+	BTC_GET_BL_WIFI_SOFTAP_LINKING,
+	BTC_GET_BL_WIFI_IN_EARLY_SUSPEND,
+	BTC_GET_BL_WIFI_ROAM,
+	BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+	BTC_GET_BL_WIFI_UNDER_5G,
+	BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+	BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
+	BTC_GET_BL_WIFI_UNDER_B_MODE,
+	BTC_GET_BL_EXT_SWITCH,
+
+	/* type s4Byte */
+	BTC_GET_S4_WIFI_RSSI,
+	BTC_GET_S4_HS_RSSI,
+
+	/* type u32 */
+	BTC_GET_U4_WIFI_BW,
+	BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+	BTC_GET_U4_WIFI_FW_VER,
+	BTC_GET_U4_BT_PATCH_VER,
+
+	/* type u1Byte */
+	BTC_GET_U1_WIFI_DOT11_CHNL,
+	BTC_GET_U1_WIFI_CENTRAL_CHNL,
+	BTC_GET_U1_WIFI_HS_CHNL,
+	BTC_GET_U1_MAC_PHY_MODE,
+	BTC_GET_U1_AP_NUM,
+
+	/* for 1Ant */
+	BTC_GET_U1_LPS_MODE,
+	BTC_GET_BL_BT_SCO_BUSY,
+
+	/* for test mode */
+	BTC_GET_DRIVER_TEST_CFG,
+#if 0
+	BTC_GET_U1_LPS,
+	BTC_GET_U1_RPWM,
+#endif
+	BTC_GET_MAX
+};
+
+
+enum btc_set_type{
+	/* type bool */
+	BTC_SET_BL_BT_DISABLE,
+	BTC_SET_BL_BT_TRAFFIC_BUSY,
+	BTC_SET_BL_BT_LIMITED_DIG,
+	BTC_SET_BL_FORCE_TO_ROAM,
+	BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+	BTC_SET_BL_BT_CTRL_AGG_SIZE,
+	BTC_SET_BL_INC_SCAN_DEV_NUM,
+
+	/* type u1Byte */
+	BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+	BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
+	BTC_SET_UI_SCAN_SIG_COMPENSATION,
+	BTC_SET_U1_AGG_BUF_SIZE,
+
+	/* type trigger some action */
+	BTC_SET_ACT_GET_BT_RSSI,
+	BTC_SET_ACT_AGGREGATE_CTRL,
+
+	/********* for 1Ant **********/
+	/* type bool */
+	BTC_SET_BL_BT_SCO_BUSY,
+	/* type u1Byte */
+	BTC_SET_U1_1ANT_LPS,
+	BTC_SET_U1_1ANT_RPWM,
+	/* type trigger some action */
+	BTC_SET_ACT_LEAVE_LPS,
+	BTC_SET_ACT_ENTER_LPS,
+	BTC_SET_ACT_NORMAL_LPS,
+	BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT,
+	BTC_SET_ACT_DISABLE_LOW_POWER,
+	BTC_SET_ACT_UPDATE_ra_mask,
+	BTC_SET_ACT_SEND_MIMO_PS,
+	/* BT Coex related */
+	BTC_SET_ACT_CTRL_BT_INFO,
+	BTC_SET_ACT_CTRL_BT_COEX,
+	/***************************/
+	BTC_SET_MAX
+};
+
+enum btc_dbg_disp_type{
+	BTC_DBG_DISP_COEX_STATISTICS = 0x0,
+	BTC_DBG_DISP_BT_LINK_INFO = 0x1,
+	BTC_DBG_DISP_BT_FW_VER = 0x2,
+	BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
+	BTC_DBG_DISP_MAX
+};
+
+enum btc_notify_type_ips{
+	BTC_IPS_LEAVE = 0x0,
+	BTC_IPS_ENTER = 0x1,
+	BTC_IPS_MAX
+};
+
+enum btc_notify_type_lps{
+	BTC_LPS_DISABLE = 0x0,
+	BTC_LPS_ENABLE = 0x1,
+	BTC_LPS_MAX
+};
+
+enum btc_notify_type_scan{
+	BTC_SCAN_FINISH = 0x0,
+	BTC_SCAN_START = 0x1,
+	BTC_SCAN_MAX
+};
+
+enum btc_notify_type_associate{
+	BTC_ASSOCIATE_FINISH = 0x0,
+	BTC_ASSOCIATE_START = 0x1,
+	BTC_ASSOCIATE_MAX
+};
+
+enum btc_notify_type_media_status{
+	BTC_MEDIA_DISCONNECT = 0x0,
+	BTC_MEDIA_CONNECT = 0x1,
+	BTC_MEDIA_MAX
+};
+
+enum btc_notify_type_special_packet{
+	BTC_PACKET_UNKNOWN = 0x0,
+	BTC_PACKET_DHCP = 0x1,
+	BTC_PACKET_ARP = 0x2,
+	BTC_PACKET_EAPOL = 0x3,
+	BTC_PACKET_MAX
+};
+
+enum btc_notify_type_stack_operation{
+	BTC_STACK_OP_NONE = 0x0,
+	BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1,
+	BTC_STACK_OP_INQ_PAGE_PAIR_FINISH = 0x2,
+	BTC_STACK_OP_MAX
+};
+
+
+typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr);
+
+typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr);
+
+typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr);
+
+typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u8 data);
+
+typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr,
+				   u8 bit_mask, u8 data1b);
+
+typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
+
+typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
+
+typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
+					  u8 bit_mask, u8 data);
+
+typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr,
+				   u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr,
+				  u32 bit_mask);
+
+typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
+				   u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path,
+				  u32 reg_addr, u32 bit_mask);
+
+typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id,
+				 u32 cmd_len, u8 *cmd_buffer);
+
+typedef	bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
+
+typedef	bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
+
+typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
+
+struct btc_bt_info {
+	bool bt_disabled;
+	u8 rssi_adjust_for_agc_table_on;
+	u8 rssi_adjust_for_1ant_coex_type;
+	bool bt_busy;
+	u8 agg_buf_size;
+	bool limited_dig;
+	bool reject_agg_pkt;
+	bool b_bt_ctrl_buf_size;
+	bool increase_scan_dev_num;
+	u16 bt_hci_ver;
+	u16 bt_real_fw_ver;
+	u8 bt_fw_ver;
+
+	/* the following is for 1Ant solution */
+	bool bt_ctrl_lps;
+	bool bt_pwr_save_mode;
+	bool bt_lps_on;
+	bool force_to_roam;
+	u8 force_exec_pwr_cmd_cnt;
+	u8 lps_1ant;
+	u8 rpwm_1ant;
+	u32 ra_mask;
+};
+
+struct btc_stack_info {
+	bool profile_notified;
+	u16 hci_version;	/* stack hci version */
+	u8 num_of_link;
+	bool bt_link_exist;
+	bool sco_exist;
+	bool acl_exist;
+	bool a2dp_exist;
+	bool hid_exist;
+	u8 num_of_hid;
+	bool pan_exist;
+	bool unknown_acl_exist;
+	char min_bt_rssi;
+};
+
+struct btc_statistics {
+	u32 cnt_bind;
+	u32 cnt_init_hw_config;
+	u32 cnt_init_coex_dm;
+	u32 cnt_ips_notify;
+	u32 cnt_lps_notify;
+	u32 cnt_scan_notify;
+	u32 cnt_connect_notify;
+	u32 cnt_media_status_notify;
+	u32 cnt_special_packet_notify;
+	u32 cnt_bt_info_notify;
+	u32 cnt_periodical;
+	u32 cnt_stack_operation_notify;
+	u32 cnt_dbg_ctrl;
+};
+
+struct btc_bt_link_info {
+	bool bt_link_exist;
+	bool sco_exist;
+	bool sco_only;
+	bool a2dp_exist;
+	bool a2dp_only;
+	bool hid_exist;
+	bool hid_only;
+	bool pan_exist;
+	bool pan_only;
+};
+
+enum btc_antenna_pos {
+	BTC_ANTENNA_AT_MAIN_PORT = 0x1,
+	BTC_ANTENNA_AT_AUX_PORT = 0x2,
+};
+
+struct btc_coexist {
+	/* make sure only one adapter can bind the data context  */
+	bool binded;
+	/* default adapter */
+	void *adapter;
+	struct btc_board_info board_info;
+	/* some bt info referenced by non-bt module */
+	struct btc_bt_info bt_info;
+	struct btc_stack_info stack_info;
+	enum btc_chip_interface	chip_interface;
+	struct btc_bt_link_info bt_link_info;
+
+	bool initilized;
+	bool stop_coex_dm;
+	bool manual_control;
+	u8 *cli_buf;
+	struct btc_statistics statistics;
+	u8 pwr_mode_val[10];
+
+	/* function pointers
+	 * io related */
+	bfp_btc_r1 btc_read_1byte;
+	bfp_btc_w1 btc_write_1byte;
+	bfp_btc_w1_bit_mak btc_write_1byte_bitmask;
+	bfp_btc_r2 btc_read_2byte;
+	bfp_btc_w2 btc_write_2byte;
+	bfp_btc_r4 btc_read_4byte;
+	bfp_btc_w4 btc_write_4byte;
+
+	bfp_btc_set_bb_reg btc_set_bb_reg;
+	bfp_btc_get_bb_reg btc_get_bb_reg;
+
+
+	bfp_btc_set_rf_reg btc_set_rf_reg;
+	bfp_btc_get_rf_reg btc_get_rf_reg;
+
+
+	bfp_btc_fill_h2c btc_fill_h2c;
+
+	bfp_btc_disp_dbg_msg btc_disp_dbg_msg;
+
+	bfp_btc_get btc_get;
+	bfp_btc_set btc_set;
+};
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
+
+
+extern struct btc_coexist gl_bt_coexist;
+
+bool exhalbtc_initlize_variables(struct rtl_priv* adapter);
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist);
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action);
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
+				 enum rt_media_status media_status);
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
+			     u8 length);
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
+void exhalbtc_periodical(struct btc_coexist *btcoexist);
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
+			  u8 *data);
+void exhalbtc_stack_update_profile_info(void);
+void exhalbtc_set_hci_version(u16 hci_version);
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
+void exhalbtc_update_min_bt_rssi(char bt_rssi);
+void exhalbtc_set_bt_exist(bool bt_exist);
+void exhalbtc_set_chip_type(u8 chip_type);
+void exhalbtc_set_ant_num(u8 type, u8 ant_num);
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
+				  u8 *rssi_wifi, u8 *rssi_bt);
+void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
+void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
+#endif
diff --git a/drivers/staging/rtl8821ae/btcoexist/rtl_btc.c b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.c
new file mode 100644
index 0000000..6653f14
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.c
@@ -0,0 +1,236 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+#include "rtl_btc.h"
+#include "halbt_precomp.h"
+
+struct rtl_btc_ops rtl_btc_operation ={
+	.btc_init_variables = rtl_btc_init_variables,
+	.btc_init_hal_vars = rtl_btc_init_hal_vars,
+	.btc_init_hw_config = rtl_btc_init_hw_config,
+	.btc_ips_notify = rtl_btc_ips_notify,
+	.btc_scan_notify = rtl_btc_scan_notify,
+	.btc_connect_notify = rtl_btc_connect_notify,
+	.btc_mediastatus_notify = rtl_btc_mediastatus_notify,
+	.btc_periodical = rtl_btc_periodical,
+	.btc_halt_notify = rtl_btc_halt_notify,
+	.btc_btinfo_notify = rtl_btc_btinfo_notify,
+	.btc_is_limited_dig = rtl_btc_is_limited_dig,
+	.btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
+	.btc_is_bt_disabled = rtl_btc_is_bt_disabled,
+};
+
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
+{
+
+	exhalbtc_initlize_variables(rtlpriv);
+}
+
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
+{
+	u8 ant_num;
+	u8 bt_exist;
+	u8 bt_type;
+	ant_num = rtl_get_hwpg_ant_num(rtlpriv);
+	RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, antNum is %d\n", __func__, ant_num));
+
+	bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
+	RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, bt_exist is %d\n", __func__, bt_exist));
+	exhalbtc_set_bt_exist(bt_exist);
+
+	bt_type = rtl_get_hwpg_bt_type(rtlpriv);
+	RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, bt_type is %d\n", __func__, bt_type));
+	exhalbtc_set_chip_type(bt_type);
+
+	exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num);
+
+}
+
+
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
+{
+	exhalbtc_init_hw_config(&gl_bt_coexist);
+	exhalbtc_init_coex_dm(&gl_bt_coexist);
+}
+
+
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type)
+{
+	exhalbtc_ips_notify(&gl_bt_coexist, type);
+}
+
+
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype)
+{
+	exhalbtc_scan_notify(&gl_bt_coexist, scantype);
+}
+
+
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action)
+{
+	exhalbtc_connect_notify(&gl_bt_coexist, action);
+}
+
+
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, enum rt_media_status mstatus)
+{
+	exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus);
+}
+
+void rtl_btc_periodical(struct rtl_priv *rtlpriv)
+{
+//	rtl_bt_dm_monitor();
+	exhalbtc_periodical(&gl_bt_coexist);
+}
+
+void rtl_btc_halt_notify(void)
+{
+	exhalbtc_halt_notify(&gl_bt_coexist);
+}
+
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 * tmp_buf, u8 length)
+{
+	exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length);
+}
+
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv)
+{
+	return gl_bt_coexist.bt_info.limited_dig;
+}
+
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
+{
+	bool bt_change_edca = false;
+	u32 cur_edca_val;
+	u32 edca_bt_hs_uplink = 0x5ea42b, edca_bt_hs_downlink = 0x5ea42b;
+	u32 edca_hs;
+	u32 edca_addr = 0x504;
+
+	cur_edca_val = rtl_read_dword(rtlpriv, edca_addr);
+	if (halbtc_is_wifi_uplink(rtlpriv)){
+		if (cur_edca_val != edca_bt_hs_uplink){
+			edca_hs = edca_bt_hs_uplink;
+			bt_change_edca = true;
+		}
+	}else{
+		if (cur_edca_val != edca_bt_hs_downlink){
+			edca_hs = edca_bt_hs_downlink;
+			bt_change_edca = true;
+		}
+	}
+
+	if(bt_change_edca)
+		rtl_write_dword(rtlpriv, edca_addr, edca_hs);
+
+	return true;
+}
+
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
+{
+	if (gl_bt_coexist.bt_info.bt_disabled)
+		return true;
+	else
+		return false;
+}
+
+struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
+{
+	return &rtl_btc_operation;
+}
+//EXPORT_SYMBOL(rtl_btc_get_ops_pointer);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+{
+	u8 num;
+
+	if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
+		num = 2;
+	else
+		num = 1;
+
+	return num;
+}
+
+#if 0
+enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw)
+{
+    struct rtl_priv *rtlpriv = rtl_priv(hw);
+    struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+    enum rt_media_status    m_status = RT_MEDIA_DISCONNECT;
+
+    u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+    if(bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+            m_status = RT_MEDIA_CONNECT;
+    }
+
+    return m_status;
+}
+#endif
+
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv)
+{
+	return rtlpriv->btcoexist.btc_info.btcoexist;
+}
+
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+{
+	return rtlpriv->btcoexist.btc_info.bt_type;
+}
+
+
+#if 0
+
+MODULE_AUTHOR("Page He	<page_he@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE	<wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger	<Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+
+static int __init rtl_btcoexist_module_init(void)
+{
+
+	//printk("%s, rtlpriv->btc_ops.btc_init_variables addr is %p\n", __func__, rtlpriv->btc_ops.btc_init_variables);
+
+	return 0;
+}
+
+static void __exit rtl_btcoexist_module_exit(void)
+{
+	return;
+}
+
+module_init(rtl_btcoexist_module_init);
+module_exit(rtl_btcoexist_module_exit);
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/rtl_btc.h b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.h
new file mode 100644
index 0000000..452fbf1
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.h
@@ -0,0 +1,66 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_BTC_H__
+#define __RTL_BTC_H__
+
+#include "halbt_precomp.h"
+
+
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv);
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type);
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype);
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action);
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, enum rt_media_status mstatus);
+void rtl_btc_periodical(struct rtl_priv *rtlpriv);
+void rtl_btc_halt_notify(void);
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 * tmpBuf, u8 length);
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
+
+
+//extern struct rtl_btc_ops rtl_btc_operation;
+extern struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv);
+//enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw);
+
+
+
+
+
+
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/cam.c b/drivers/staging/rtl8821ae/cam.c
new file mode 100644
index 0000000..72743e7
--- /dev/null
+++ b/drivers/staging/rtl8821ae/cam.c
@@ -0,0 +1,354 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "wifi.h"
+#include "cam.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+void rtl_cam_reset_sec_info(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->sec.use_defaultkey = false;
+	rtlpriv->sec.pairwise_enc_algorithm = NO_ENCRYPTION;
+	rtlpriv->sec.group_enc_algorithm = NO_ENCRYPTION;
+	memset(rtlpriv->sec.key_buf, 0, KEY_BUF_SIZE * MAX_KEY_LEN);
+	memset(rtlpriv->sec.key_len, 0, KEY_BUF_SIZE);
+	rtlpriv->sec.pairwise_key = NULL;
+}
+
+static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
+			   u8 *mac_addr, u8 *key_cont_128, u16 us_config)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	u32 target_command;
+	u32 target_content = 0;
+	u8 entry_i;
+
+	RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_DMESG, "Key content :",
+			key_cont_128, 16);
+
+	for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+		target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
+		target_command = target_command | BIT(31) | BIT(16);
+
+		if (entry_i == 0) {
+			target_content = (u32) (*(mac_addr + 0)) << 16 |
+			    (u32) (*(mac_addr + 1)) << 24 | (u32) us_config;
+
+			rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+					target_content);
+			rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+					target_command);
+
+			RT_TRACE(COMP_SEC, DBG_LOUD,
+				 ("WRITE %x: %x \n",
+				  rtlpriv->cfg->maps[WCAMI], target_content));
+			RT_TRACE(COMP_SEC, DBG_LOUD,
+				 ("The Key ID is %d\n", entry_no));
+			RT_TRACE(COMP_SEC, DBG_LOUD,
+				 ("WRITE %x: %x \n",
+				  rtlpriv->cfg->maps[RWCAM], target_command));
+
+		} else if (entry_i == 1) {
+
+			target_content = (u32) (*(mac_addr + 5)) << 24 |
+			    (u32) (*(mac_addr + 4)) << 16 |
+			    (u32) (*(mac_addr + 3)) << 8 |
+			    (u32) (*(mac_addr + 2));
+
+			rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+					target_content);
+			rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+					target_command);
+
+			RT_TRACE(COMP_SEC, DBG_LOUD,
+				 ("WRITE A4: %x \n", target_content));
+			RT_TRACE(COMP_SEC, DBG_LOUD,
+				 ("WRITE A0: %x \n", target_command));
+
+		} else {
+
+			target_content =
+			    (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 3)) <<
+			    24 | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 2))
+			    << 16 |
+			    (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 1)) << 8
+			    | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 0));
+
+			rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+					target_content);
+			rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+					target_command);
+			udelay(100);
+
+			RT_TRACE(COMP_SEC, DBG_LOUD,
+				 ("WRITE A4: %x \n", target_content));
+			RT_TRACE(COMP_SEC, DBG_LOUD,
+				 ("WRITE A0: %x \n", target_command));
+		}
+	}
+
+	RT_TRACE(COMP_SEC, DBG_LOUD,
+		 ("after set key, usconfig:%x\n", us_config));
+}
+
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+			 u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+			 u32 ul_default_key, u8 *key_content)
+{
+	u32 us_config;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_SEC, DBG_DMESG,
+		 ("EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, "
+		  "ulUseDK=%x MacAddr %pM\n",
+		  ul_entry_idx, ul_key_id, ul_enc_alg,
+		  ul_default_key, mac_addr));
+
+	if (ul_key_id == TOTAL_CAM_ENTRY) {
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("ulKeyId exceed!\n"));
+		return 0;
+	}
+
+	if (ul_default_key == 1) {
+		us_config = CFG_VALID | ((u16) (ul_enc_alg) << 2);
+	} else {
+		us_config = CFG_VALID | ((ul_enc_alg) << 2) | ul_key_id;
+	}
+
+	rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
+			      (u8 *) key_content, us_config);
+
+	RT_TRACE(COMP_SEC, DBG_DMESG, ("end \n"));
+
+	return 1;
+
+}
+//EXPORT_SYMBOL(rtl_cam_add_one_entry);
+
+int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
+			     u8 *mac_addr, u32 ul_key_id)
+{
+	u32 ul_command;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_SEC, DBG_DMESG, ("key_idx:%d\n", ul_key_id));
+
+	ul_command = ul_key_id * CAM_CONTENT_COUNT;
+	ul_command = ul_command | BIT(31) | BIT(16);
+
+	rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0);
+	rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+	RT_TRACE(COMP_SEC, DBG_DMESG,
+		 ("rtl_cam_delete_one_entry(): WRITE A4: %x \n", 0));
+	RT_TRACE(COMP_SEC, DBG_DMESG,
+		 ("rtl_cam_delete_one_entry(): WRITE A0: %x \n", ul_command));
+
+	return 0;
+
+}
+//EXPORT_SYMBOL(rtl_cam_delete_one_entry);
+
+void rtl_cam_reset_all_entry(struct ieee80211_hw *hw)
+{
+	u32 ul_command;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	ul_command = BIT(31) | BIT(30);
+	rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+}
+//EXPORT_SYMBOL(rtl_cam_reset_all_entry);
+
+void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	u32 ul_command;
+	u32 ul_content;
+	u32 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+
+	switch (rtlpriv->sec.pairwise_enc_algorithm) {
+	case WEP40_ENCRYPTION:
+		ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
+		break;
+	case WEP104_ENCRYPTION:
+		ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
+		break;
+	case TKIP_ENCRYPTION:
+		ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
+		break;
+	case AESCCMP_ENCRYPTION:
+		ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+		break;
+	default:
+		ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+	}
+
+	ul_content = (uc_index & 3) | ((u16) (ul_enc_algo) << 2);
+
+	ul_content |= BIT(15);
+	ul_command = CAM_CONTENT_COUNT * uc_index;
+	ul_command = ul_command | BIT(31) | BIT(16);
+
+	rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
+	rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+	RT_TRACE(COMP_SEC, DBG_DMESG,
+		 ("rtl_cam_mark_invalid(): WRITE A4: %x \n", ul_content));
+	RT_TRACE(COMP_SEC, DBG_DMESG,
+		 ("rtl_cam_mark_invalid(): WRITE A0: %x \n", ul_command));
+}
+//EXPORT_SYMBOL(rtl_cam_mark_invalid);
+
+void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	u32 ul_command;
+	u32 ul_content;
+	u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+	u8 entry_i;
+
+	switch (rtlpriv->sec.pairwise_enc_algorithm) {
+	case WEP40_ENCRYPTION:
+		ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
+		break;
+	case WEP104_ENCRYPTION:
+		ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
+		break;
+	case TKIP_ENCRYPTION:
+		ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
+		break;
+	case AESCCMP_ENCRYPTION:
+		ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+		break;
+	default:
+		ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+	}
+
+	for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+
+		if (entry_i == 0) {
+			ul_content =
+			    (uc_index & 0x03) | ((u16) (ul_encalgo) << 2);
+			ul_content |= BIT(15);
+
+		} else {
+			ul_content = 0;
+		}
+
+		ul_command = CAM_CONTENT_COUNT * uc_index + entry_i;
+		ul_command = ul_command | BIT(31) | BIT(16);
+
+		rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
+		rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+		RT_TRACE(COMP_SEC, DBG_LOUD,
+			 ("rtl_cam_empty_entry(): WRITE A4: %x \n",
+			  ul_content));
+		RT_TRACE(COMP_SEC, DBG_LOUD,
+			 ("rtl_cam_empty_entry(): WRITE A0: %x \n",
+			  ul_command));
+	}
+
+}
+//EXPORT_SYMBOL(rtl_cam_empty_entry);
+
+u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> 4;
+	u8 entry_idx = 0;
+	u8 i, *addr;
+
+	if (NULL == sta_addr) {
+		RT_TRACE(COMP_SEC, DBG_EMERG,
+			("sta_addr is NULL.\n"));
+		return TOTAL_CAM_ENTRY;
+	}
+	/* Does STA already exist? */
+	for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
+		addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
+		if(memcmp(addr, sta_addr, ETH_ALEN) == 0)
+			return i;
+	}
+	/* Get a free CAM entry. */
+	for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) {
+		if ((bitmap & BIT(0)) == 0) {
+			RT_TRACE(COMP_SEC, DBG_EMERG,
+				("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n",
+				 rtlpriv->sec.hwsec_cam_bitmap, entry_idx));
+			rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx;
+			memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx],
+			       sta_addr, ETH_ALEN);
+			return entry_idx;
+		}
+		bitmap = bitmap >>1;
+	}
+	return TOTAL_CAM_ENTRY;
+}
+//EXPORT_SYMBOL(rtl_cam_get_free_entry);
+
+void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 bitmap;
+	u8 i, *addr;
+
+	if (NULL == sta_addr) {
+		RT_TRACE(COMP_SEC, DBG_EMERG,
+			("sta_addr is NULL.\n"));
+	}
+
+	if ((sta_addr[0]|sta_addr[1]|sta_addr[2]|sta_addr[3]|\
+				sta_addr[4]|sta_addr[5]) == 0) {
+		RT_TRACE(COMP_SEC, DBG_EMERG,
+			("sta_addr is 00:00:00:00:00:00.\n"));
+		return;
+	}
+	/* Does STA already exist? */
+	for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
+		addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
+		bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> i;
+		if (((bitmap & BIT(0)) == BIT(0)) &&
+		    (memcmp(addr, sta_addr, ETH_ALEN) == 0)) {
+			/* Remove from HW Security CAM */
+			memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN);
+			rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
+			printk("&&&&&&&&&del entry %d\n",i);
+		}
+	}
+	return;
+}
+//EXPORT_SYMBOL(rtl_cam_del_entry);
diff --git a/drivers/staging/rtl8821ae/cam.h b/drivers/staging/rtl8821ae/cam.h
new file mode 100644
index 0000000..326fa67
--- /dev/null
+++ b/drivers/staging/rtl8821ae/cam.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_CAM_H_
+#define __RTL_CAM_H_
+
+#define CAM_CONTENT_COUNT 				8
+
+#define CFG_DEFAULT_KEY  				BIT(5)
+#define CFG_VALID        				BIT(15)
+
+#define PAIRWISE_KEYIDX					0
+#define CAM_PAIRWISE_KEY_POSITION			4
+
+#define	CAM_CONFIG_USEDK				1
+#define	CAM_CONFIG_NO_USEDK				0
+
+extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
+extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+				u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+				u32 ul_default_key, u8 *key_content);
+int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+			     u32 ul_key_id);
+void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
+void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
+void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
+u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr);
+void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr);
+
+#endif
diff --git a/drivers/staging/rtl8821ae/compat.h b/drivers/staging/rtl8821ae/compat.h
new file mode 100644
index 0000000..68269cc
--- /dev/null
+++ b/drivers/staging/rtl8821ae/compat.h
@@ -0,0 +1,125 @@
+#ifndef __RTL_COMPAT_H__
+#define __RTL_COMPAT_H__
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+/*
+ * Use this if you want to use the same suspend and resume callbacks for suspend
+ * to RAM and hibernation.
+ */
+#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+struct dev_pm_ops name = { \
+	.suspend = suspend_fn, \
+	.resume = resume_fn, \
+	.freeze = suspend_fn, \
+	.thaw = resume_fn, \
+	.poweroff = suspend_fn, \
+	.restore = resume_fn, \
+}
+
+#define compat_pci_suspend(fn)						\
+	int fn##_compat(struct pci_dev *pdev, pm_message_t state) 	\
+	{								\
+		int r;							\
+									\
+		r = fn(&pdev->dev);					\
+		if (r)							\
+			return r;					\
+									\
+		pci_save_state(pdev);					\
+		pci_disable_device(pdev);				\
+		pci_set_power_state(pdev, PCI_D3hot);			\
+									\
+		return 0;						\
+	}
+
+#define compat_pci_resume(fn)						\
+	int fn##_compat(struct pci_dev *pdev)				\
+	{								\
+		int r;							\
+									\
+		pci_set_power_state(pdev, PCI_D0);			\
+		r = pci_enable_device(pdev);				\
+		if (r)							\
+			return r;					\
+		pci_restore_state(pdev);				\
+									\
+		return fn(&pdev->dev);					\
+	}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+#define RX_FLAG_MACTIME_MPDU RX_FLAG_TSFT
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+#define RX_FLAG_MACTIME_MPDU RX_FLAG_MACTIME_START
+#else
+#endif
+//#define NETDEV_TX_OK
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+#define IEEE80211_KEY_FLAG_SW_MGMT IEEE80211_KEY_FLAG_SW_MGMT_TX
+#endif
+
+struct ieee80211_mgmt_compat {
+	__le16 frame_control;
+	__le16 duration;
+	u8 da[6];
+	u8 sa[6];
+	u8 bssid[6];
+	__le16 seq_ctrl;
+	union {
+		struct {
+			u8 category;
+			union {
+				struct {
+					u8 action_code;
+					u8 dialog_token;
+					u8 status_code;
+					u8 variable[0];
+				} __attribute__ ((packed)) wme_action;
+				struct{
+					u8 action_code;
+					u8 dialog_token;
+					__le16 capab;
+					__le16 timeout;
+					__le16 start_seq_num;
+				} __attribute__((packed)) addba_req;
+				struct{
+					u8 action_code;
+					u8 dialog_token;
+					__le16 status;
+					__le16 capab;
+					__le16 timeout;
+				} __attribute__((packed)) addba_resp;
+				struct{
+					u8 action_code;
+					__le16 params;
+					__le16 reason_code;
+				} __attribute__((packed)) delba;
+				struct{
+					u8 action_code;
+					/* capab_info for open and confirm,
+					 * reason for close
+					 */
+					__le16 aux;
+					/* Followed in plink_confirm by status
+					 * code, AID and supported rates,
+					 * and directly by supported rates in
+					 * plink_open and plink_close
+					 */
+					u8 variable[0];
+				} __attribute__((packed)) plink_action;
+				struct{
+					u8 action_code;
+					u8 variable[0];
+				} __attribute__((packed)) mesh_action;
+				struct {
+					u8 action;
+					u8 smps_control;
+				} __attribute__ ((packed)) ht_smps;
+			} u;
+		} __attribute__ ((packed)) action;
+	} u;
+} __attribute__ ((packed));
+#endif
diff --git a/drivers/staging/rtl8821ae/core.c b/drivers/staging/rtl8821ae/core.c
new file mode 100644
index 0000000..40de608
--- /dev/null
+++ b/drivers/staging/rtl8821ae/core.c
@@ -0,0 +1,1464 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "core.h"
+#include "cam.h"
+#include "base.h"
+#include "ps.h"
+
+#include "btcoexist/rtl_btc.h"
+
+/*mutex for start & stop is must here. */
+static int rtl_op_start(struct ieee80211_hw *hw)
+{
+	int err = 0;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	if (!is_hal_stop(rtlhal))
+		return 0;
+	if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+		return 0;
+	mutex_lock(&rtlpriv->locks.conf_mutex);
+	err = rtlpriv->intf_ops->adapter_start(hw);
+	if (err)
+		goto out;
+	rtl_watch_dog_timer_callback((unsigned long)hw);
+
+out:
+	mutex_unlock(&rtlpriv->locks.conf_mutex);
+	return err;
+}
+
+static void rtl_op_stop(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	if (is_hal_stop(rtlhal))
+		return;
+
+	/* here is must, because adhoc do stop and start,
+	 * but stop with RFOFF may cause something wrong,
+	 * like adhoc TP */
+	if (unlikely(ppsc->rfpwr_state == ERFOFF))
+		rtl_ips_nic_on(hw);
+
+	mutex_lock(&rtlpriv->locks.conf_mutex);
+
+	mac->link_state = MAC80211_NOLINK;
+	memset(mac->bssid, 0, 6);
+	mac->vendor = PEER_UNKNOWN;
+
+	/*reset sec info */
+	rtl_cam_reset_sec_info(hw);
+
+	rtl_deinit_deferred_work(hw);
+	rtlpriv->intf_ops->adapter_stop(hw);
+
+	mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_tcb_desc tcb_desc;
+	memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+	if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
+		goto err_free;
+
+	if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+		goto err_free;
+
+	if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
+		rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+
+	return NETDEV_TX_OK;
+
+err_free:
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+#else
+/*<delete in kernel end>*/
+static void rtl_op_tx(struct ieee80211_hw *hw,
+		      struct ieee80211_tx_control *control,
+		      struct sk_buff *skb)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_tcb_desc tcb_desc;
+	memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+	if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
+		goto err_free;
+
+	if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+		goto err_free;
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+	if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
+		rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+	if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
+	        rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+	return;
+
+err_free:
+	dev_kfree_skb_any(skb);
+	return;
+}
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+static int rtl_op_add_interface(struct ieee80211_hw *hw,
+		struct ieee80211_vif *vif)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	int err = 0;
+
+	if (mac->vif) {
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("vif has been set!! mac->vif = 0x%p\n", mac->vif));
+		return -EOPNOTSUPP;
+	}
+
+/*This flag is not defined before kernel 3.4*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+#endif
+
+	rtl_ips_nic_on(hw);
+
+	mutex_lock(&rtlpriv->locks.conf_mutex);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+	switch (ieee80211_vif_type_p2p(vif)) {
+	case NL80211_IFTYPE_P2P_CLIENT:
+		mac->p2p = P2P_ROLE_CLIENT;
+		/*fall through*/
+#else
+/*<delete in kernel end>*/
+	switch (vif->type) {
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+	case NL80211_IFTYPE_STATION:
+		if (mac->beacon_enabled == 1) {
+			RT_TRACE(COMP_MAC80211, DBG_LOUD,
+				 ("NL80211_IFTYPE_STATION \n"));
+			mac->beacon_enabled = 0;
+			rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+					rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
+		}
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		RT_TRACE(COMP_MAC80211, DBG_LOUD,
+			 ("NL80211_IFTYPE_ADHOC \n"));
+
+		mac->link_state = MAC80211_LINKED;
+		rtlpriv->cfg->ops->set_bcn_reg(hw);
+		if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+			mac->basic_rates = 0xfff;
+		else
+			mac->basic_rates = 0xff0;
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+				(u8 *) (&mac->basic_rates));
+
+		break;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+	case NL80211_IFTYPE_P2P_GO:
+		mac->p2p = P2P_ROLE_GO;
+		/*fall through*/
+#endif
+/*<delete in kernel end>*/
+	case NL80211_IFTYPE_AP:
+		RT_TRACE(COMP_MAC80211, DBG_LOUD,
+			 ("NL80211_IFTYPE_AP \n"));
+
+		mac->link_state = MAC80211_LINKED;
+		rtlpriv->cfg->ops->set_bcn_reg(hw);
+		if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+			mac->basic_rates = 0xfff;
+		else
+			mac->basic_rates = 0xff0;
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+					      (u8 *) (&mac->basic_rates));
+		break;
+	case NL80211_IFTYPE_MESH_POINT:
+		RT_TRACE(COMP_MAC80211, DBG_LOUD,
+			 ("NL80211_IFTYPE_MESH_POINT \n"));
+
+		mac->link_state = MAC80211_LINKED;
+		rtlpriv->cfg->ops->set_bcn_reg(hw);
+		if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+			mac->basic_rates = 0xfff;
+		else
+			mac->basic_rates = 0xff0;
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+				(u8 *) (&mac->basic_rates));
+		break;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("operation mode %d is not support!\n", vif->type));
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+#ifdef VIF_TODO
+	if (!rtl_set_vif_info(hw, vif))
+		goto out;
+#endif
+
+	if (mac->p2p) {
+		RT_TRACE(COMP_MAC80211, DBG_LOUD,
+			 ("p2p role %x \n",vif->type));
+		mac->basic_rates = 0xff0;/*disable cck rate for p2p*/
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+				(u8 *) (&mac->basic_rates));
+	}
+	mac->vif = vif;
+	mac->opmode = vif->type;
+	rtlpriv->cfg->ops->set_network_type(hw, vif->type);
+	memcpy(mac->mac_addr, vif->addr, ETH_ALEN);
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+
+out:
+	mutex_unlock(&rtlpriv->locks.conf_mutex);
+	return err;
+}
+
+static void rtl_op_remove_interface(struct ieee80211_hw *hw,
+		struct ieee80211_vif *vif)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+	mutex_lock(&rtlpriv->locks.conf_mutex);
+
+	/* Free beacon resources */
+	if ((vif->type == NL80211_IFTYPE_AP) ||
+	    (vif->type == NL80211_IFTYPE_ADHOC) ||
+	    (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+		if (mac->beacon_enabled == 1) {
+			mac->beacon_enabled = 0;
+			rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+					rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
+		}
+	}
+
+	/*
+	 *Note: We assume NL80211_IFTYPE_UNSPECIFIED as
+	 *NO LINK for our hardware.
+	 */
+	mac->p2p = 0;
+	mac->vif = NULL;
+	mac->link_state = MAC80211_NOLINK;
+	memset(mac->bssid, 0, 6);
+	mac->vendor = PEER_UNKNOWN;
+	mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
+	rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
+
+	mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+static int rtl_op_change_interface(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   enum nl80211_iftype new_type, bool p2p)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int ret;
+	rtl_op_remove_interface(hw, vif);
+
+	vif->type = new_type;
+	vif->p2p = p2p;
+	ret = rtl_op_add_interface(hw, vif);
+	RT_TRACE(COMP_MAC80211, DBG_LOUD,
+		 (" p2p  %x\n",p2p));
+	return ret;
+}
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct ieee80211_conf *conf = &hw->conf;
+
+	if (mac->skip_scan)
+		return 1;
+
+
+	mutex_lock(&rtlpriv->locks.conf_mutex);
+	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {	/* BIT(2) */
+		RT_TRACE(COMP_MAC80211, DBG_LOUD,
+			 ("IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n"));
+	}
+
+	/*For IPS */
+	if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+		if (hw->conf.flags & IEEE80211_CONF_IDLE)
+			rtl_ips_nic_off(hw);
+		else
+			rtl_ips_nic_on(hw);
+	} else {
+		/*
+		 *although rfoff may not cause by ips, but we will
+		 *check the reason in set_rf_power_state function
+		 */
+		if (unlikely(ppsc->rfpwr_state == ERFOFF))
+			rtl_ips_nic_on(hw);
+	}
+
+	/*For LPS */
+	if (changed & IEEE80211_CONF_CHANGE_PS) {
+		cancel_delayed_work(&rtlpriv->works.ps_work);
+		cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
+		if (conf->flags & IEEE80211_CONF_PS) {
+			rtlpriv->psc.sw_ps_enabled = true;
+			/* sleep here is must, or we may recv the beacon and
+			 * cause mac80211 into wrong ps state, this will cause
+			 * power save nullfunc send fail, and further cause
+			 * pkt loss, So sleep must quickly but not immediatly
+			 * because that will cause nullfunc send by mac80211
+			 * fail, and cause pkt loss, we have tested that 5mA
+			 * is worked very well */
+			if (!rtlpriv->psc.multi_buffered)
+				queue_delayed_work(rtlpriv->works.rtl_wq,
+						   &rtlpriv->works.ps_work,
+						   MSECS(5));
+		} else {
+			rtl_swlps_rf_awake(hw);
+			rtlpriv->psc.sw_ps_enabled = false;
+		}
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+		RT_TRACE(COMP_MAC80211, DBG_LOUD,
+			 ("IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
+			  hw->conf.long_frame_max_tx_count));
+		mac->retry_long = hw->conf.long_frame_max_tx_count;
+		mac->retry_short = hw->conf.long_frame_max_tx_count;
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+				(u8 *) (&hw->conf.long_frame_max_tx_count));
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+		struct ieee80211_channel *channel = hw->conf.chandef.chan;
+		enum nl80211_channel_type channel_type =
+				cfg80211_get_chandef_type(&(hw->conf.chandef));
+#else
+		struct ieee80211_channel *channel = hw->conf.channel;
+		enum nl80211_channel_type channel_type = hw->conf.channel_type;
+#endif
+		u8 wide_chan = (u8) channel->hw_value;
+
+		if (mac->act_scanning)
+			mac->n_channels++;
+
+		if (rtlpriv->dm.supp_phymode_switch &&
+			mac->link_state < MAC80211_LINKED &&
+			!mac->act_scanning) {
+			if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+				rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+		}
+
+		/*
+		 *because we should back channel to
+		 *current_network.chan in in scanning,
+		 *So if set_chan == current_network.chan
+		 *we should set it.
+		 *because mac80211 tell us wrong bw40
+		 *info for cisco1253 bw20, so we modify
+		 *it here based on UPPER & LOWER
+		 */
+		switch (channel_type) {
+			case NL80211_CHAN_HT20:
+			case NL80211_CHAN_NO_HT:
+				/* SC */
+				mac->cur_40_prime_sc =
+					PRIME_CHNL_OFFSET_DONT_CARE;
+				rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20;
+				mac->bw_40 = false;
+				break;
+			case NL80211_CHAN_HT40MINUS:
+				/* SC */
+				mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_UPPER;
+				rtlphy->current_chan_bw =
+					HT_CHANNEL_WIDTH_20_40;
+				mac->bw_40 = true;
+
+				/*wide channel */
+				wide_chan -= 2;
+
+				break;
+			case NL80211_CHAN_HT40PLUS:
+				/* SC */
+				mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_LOWER;
+				rtlphy->current_chan_bw =
+					HT_CHANNEL_WIDTH_20_40;
+				mac->bw_40 = true;
+
+				/*wide channel */
+				wide_chan += 2;
+
+				break;
+			default:
+				mac->bw_40 = false;
+				RT_TRACE(COMP_ERR, DBG_EMERG,
+						("switch case not processed \n"));
+				break;
+		}
+
+		if (wide_chan <= 0)
+			wide_chan = 1;
+
+		/* in scanning, when before we offchannel we may send a ps=1
+		 * null to AP, and then we may send a ps = 0 null to AP quickly,
+		 * but first null have cause AP's put lots of packet to hw tx
+		 * buffer, these packet must be tx before off channel so we must
+		 * delay more time to let AP flush these packets before
+		 * offchannel, or dis-association or delete BA will happen by AP
+		 */
+		if (rtlpriv->mac80211.offchan_deley) {
+			rtlpriv->mac80211.offchan_deley = false;
+			mdelay(50);
+		}
+
+		rtlphy->current_channel = wide_chan;
+
+		rtlpriv->cfg->ops->switch_channel(hw);
+		rtlpriv->cfg->ops->set_channel_access(hw);
+		rtlpriv->cfg->ops->set_bw_mode(hw,
+			channel_type);
+	}
+
+	mutex_unlock(&rtlpriv->locks.conf_mutex);
+
+	return 0;
+}
+
+static void rtl_op_configure_filter(struct ieee80211_hw *hw,
+				    unsigned int changed_flags,
+				    unsigned int *new_flags, u64 multicast)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+	*new_flags &= RTL_SUPPORTED_FILTERS;
+	if (0 == changed_flags)
+		return;
+
+	/*TODO: we disable broadcase now, so enable here */
+	if (changed_flags & FIF_ALLMULTI) {
+		if (*new_flags & FIF_ALLMULTI) {
+			mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
+			    rtlpriv->cfg->maps[MAC_RCR_AB];
+			RT_TRACE(COMP_MAC80211, DBG_LOUD,
+				 ("Enable receive multicast frame.\n"));
+		} else {
+			mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
+					  rtlpriv->cfg->maps[MAC_RCR_AB]);
+			RT_TRACE(COMP_MAC80211, DBG_LOUD,
+				 ("Disable receive multicast frame.\n"));
+		}
+	}
+
+	if (changed_flags & FIF_FCSFAIL) {
+		if (*new_flags & FIF_FCSFAIL) {
+			mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+			RT_TRACE(COMP_MAC80211, DBG_LOUD,
+				 ("Enable receive FCS error frame.\n"));
+		} else {
+			mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+			RT_TRACE(COMP_MAC80211, DBG_LOUD,
+				 ("Disable receive FCS error frame.\n"));
+		}
+	}
+
+	/* if ssid not set to hw don't check bssid
+	 * here just used for linked scanning, & linked
+	 * and nolink check bssid is set in set network_type */
+	if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
+		(mac->link_state >= MAC80211_LINKED)) {
+		if (mac->opmode != NL80211_IFTYPE_AP &&
+			mac->opmode != NL80211_IFTYPE_MESH_POINT) {
+			if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
+				rtlpriv->cfg->ops->set_chk_bssid(hw, false);
+			} else {
+				rtlpriv->cfg->ops->set_chk_bssid(hw, true);
+			}
+		}
+	}
+
+	if (changed_flags & FIF_CONTROL) {
+		if (*new_flags & FIF_CONTROL) {
+			mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
+
+			RT_TRACE(COMP_MAC80211, DBG_LOUD,
+				 ("Enable receive control frame.\n"));
+		} else {
+			mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
+			RT_TRACE(COMP_MAC80211, DBG_LOUD,
+				 ("Disable receive control frame.\n"));
+		}
+	}
+
+	if (changed_flags & FIF_OTHER_BSS) {
+		if (*new_flags & FIF_OTHER_BSS) {
+			mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
+			RT_TRACE(COMP_MAC80211, DBG_LOUD,
+				 ("Enable receive other BSS's frame.\n"));
+		} else {
+			mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
+			RT_TRACE(COMP_MAC80211, DBG_LOUD,
+				 ("Disable receive other BSS's frame.\n"));
+		}
+	}
+}
+static int rtl_op_sta_add(struct ieee80211_hw *hw,
+			 struct ieee80211_vif *vif,
+			 struct ieee80211_sta *sta)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal= rtl_hal(rtl_priv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_sta_info *sta_entry;
+
+	if (sta) {
+		sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+		spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+		list_add_tail(&sta_entry->list, &rtlpriv->entry_list);
+		spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+		if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+			sta_entry->wireless_mode = WIRELESS_MODE_G;
+			if (sta->supp_rates[0] <= 0xf)
+				sta_entry->wireless_mode = WIRELESS_MODE_B;
+			if (sta->ht_cap.ht_supported == true)
+				sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
+
+			if (vif->type == NL80211_IFTYPE_ADHOC)
+				sta_entry->wireless_mode = WIRELESS_MODE_G;
+		} else if (rtlhal->current_bandtype == BAND_ON_5G) {
+			sta_entry->wireless_mode = WIRELESS_MODE_A;
+			if (sta->ht_cap.ht_supported == true)
+				sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
+
+			if (vif->type == NL80211_IFTYPE_ADHOC)
+				sta_entry->wireless_mode = WIRELESS_MODE_A;
+		}
+		/*disable cck rate for p2p*/
+		if (mac->p2p)
+			sta->supp_rates[0] &= 0xfffffff0;
+
+		memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
+		RT_TRACE(COMP_MAC80211, DBG_DMESG,
+			("Add sta addr is %pM\n",sta->addr));
+		rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
+	}
+
+	return 0;
+}
+
+static int rtl_op_sta_remove(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_sta *sta)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_sta_info *sta_entry;
+	if (sta) {
+		RT_TRACE(COMP_MAC80211, DBG_DMESG,
+			("Remove sta addr is %pM\n",sta->addr));
+		sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+		sta_entry->wireless_mode = 0;
+		sta_entry->ratr_index = 0;
+		spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+		list_del(&sta_entry->list);
+		spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+	}
+	return 0;
+}
+static int _rtl_get_hal_qnum(u16 queue)
+{
+	int qnum;
+
+	switch (queue) {
+	case 0:
+		qnum = AC3_VO;
+		break;
+	case 1:
+		qnum = AC2_VI;
+		break;
+	case 2:
+		qnum = AC0_BE;
+		break;
+	case 3:
+		qnum = AC1_BK;
+		break;
+	default:
+		qnum = AC0_BE;
+		break;
+	}
+	return qnum;
+}
+
+/*
+ *for mac80211 VO=0, VI=1, BE=2, BK=3
+ *for rtl819x  BE=0, BK=1, VI=2, VO=3
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static int rtl_op_conf_tx(struct ieee80211_hw *hw,
+			  struct ieee80211_vif *vif, u16 queue,
+			  const struct ieee80211_tx_queue_params *param)
+#else
+static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+			  const struct ieee80211_tx_queue_params *param)
+#endif
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	int aci;
+
+	if (queue >= AC_MAX) {
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("queue number %d is incorrect!\n", queue));
+		return -EINVAL;
+	}
+
+	aci = _rtl_get_hal_qnum(queue);
+	mac->ac[aci].aifs = param->aifs;
+	mac->ac[aci].cw_min = param->cw_min;
+	mac->ac[aci].cw_max = param->cw_max;
+	mac->ac[aci].tx_op = param->txop;
+	memcpy(&mac->edca_param[aci], param, sizeof(*param));
+	rtlpriv->cfg->ops->set_qos(hw, aci);
+	return 0;
+}
+
+static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_bss_conf *bss_conf,
+				    u32 changed)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	mutex_lock(&rtlpriv->locks.conf_mutex);
+	if ((vif->type == NL80211_IFTYPE_ADHOC) ||
+	    (vif->type == NL80211_IFTYPE_AP) ||
+	    (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+		if ((changed & BSS_CHANGED_BEACON) ||
+		    (changed & BSS_CHANGED_BEACON_ENABLED &&
+		     bss_conf->enable_beacon)) {
+			if (mac->beacon_enabled == 0) {
+				RT_TRACE(COMP_MAC80211, DBG_DMESG,
+					 ("BSS_CHANGED_BEACON_ENABLED \n"));
+
+				/*start hw beacon interrupt. */
+				/*rtlpriv->cfg->ops->set_bcn_reg(hw); */
+				mac->beacon_enabled = 1;
+				rtlpriv->cfg->ops->update_interrupt_mask(hw,
+						rtlpriv->cfg->maps
+						[RTL_IBSS_INT_MASKS], 0);
+
+				if (rtlpriv->cfg->ops->linked_set_reg)
+					rtlpriv->cfg->ops->linked_set_reg(hw);
+			}
+		}
+		if ((changed & BSS_CHANGED_BEACON_ENABLED &&
+			!bss_conf->enable_beacon)){
+			if (mac->beacon_enabled == 1) {
+				RT_TRACE(COMP_MAC80211, DBG_DMESG,
+					 ("ADHOC DISABLE BEACON\n"));
+
+				mac->beacon_enabled = 0;
+				rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+						rtlpriv->cfg->maps
+						[RTL_IBSS_INT_MASKS]);
+			}
+		}
+		if (changed & BSS_CHANGED_BEACON_INT) {
+			RT_TRACE(COMP_BEACON, DBG_TRACE,
+				 ("BSS_CHANGED_BEACON_INT\n"));
+			mac->beacon_interval = bss_conf->beacon_int;
+			rtlpriv->cfg->ops->set_bcn_intv(hw);
+		}
+	}
+
+	/*TODO: reference to enum ieee80211_bss_change */
+	if (changed & BSS_CHANGED_ASSOC) {
+		if (bss_conf->assoc) {
+			struct ieee80211_sta *sta = NULL;
+			/* we should reset all sec info & cam
+			 * before set cam after linked, we should not
+			 * reset in disassoc, that will cause tkip->wep
+			 * fail because some flag will be wrong */
+			/* reset sec info */
+			rtl_cam_reset_sec_info(hw);
+			/* reset cam to fix wep fail issue
+			 * when change from wpa to wep */
+			rtl_cam_reset_all_entry(hw);
+
+			mac->link_state = MAC80211_LINKED;
+			mac->cnt_after_linked = 0;
+			mac->assoc_id = bss_conf->aid;
+			memcpy(mac->bssid, bss_conf->bssid, 6);
+
+			if (rtlpriv->cfg->ops->linked_set_reg)
+				rtlpriv->cfg->ops->linked_set_reg(hw);
+
+			rcu_read_lock();
+			sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid);
+
+			if (vif->type == NL80211_IFTYPE_STATION && sta)
+				rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
+			RT_TRACE(COMP_EASY_CONCURRENT, DBG_LOUD,
+					("send PS STATIC frame \n"));
+			if (rtlpriv->dm.supp_phymode_switch) {
+				if (sta->ht_cap.ht_supported)
+					rtl_send_smps_action(hw, sta,
+							IEEE80211_SMPS_STATIC);
+			}
+			rcu_read_unlock();
+
+			RT_TRACE(COMP_MAC80211, DBG_DMESG,
+				 ("BSS_CHANGED_ASSOC\n"));
+		} else {
+			if (mac->link_state == MAC80211_LINKED)
+				rtl_lps_leave(hw);
+			if (ppsc->p2p_ps_info.p2p_ps_mode> P2P_PS_NONE)
+				rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+			mac->link_state = MAC80211_NOLINK;
+			memset(mac->bssid, 0, 6);
+			mac->vendor = PEER_UNKNOWN;
+
+			if (rtlpriv->dm.supp_phymode_switch) {
+				if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+					rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+			}
+			RT_TRACE(COMP_MAC80211, DBG_DMESG,
+				 ("BSS_CHANGED_UN_ASSOC\n"));
+		}
+	}
+
+	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+		RT_TRACE(COMP_MAC80211, DBG_TRACE,
+			 ("BSS_CHANGED_ERP_CTS_PROT\n"));
+		mac->use_cts_protect = bss_conf->use_cts_prot;
+	}
+
+	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+		RT_TRACE(COMP_MAC80211, DBG_LOUD,
+			 ("BSS_CHANGED_ERP_PREAMBLE use short preamble:%x \n",
+			  bss_conf->use_short_preamble));
+
+		mac->short_preamble = bss_conf->use_short_preamble;
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
+					      (u8 *) (&mac->short_preamble));
+	}
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		RT_TRACE(COMP_MAC80211, DBG_TRACE,
+			 ("BSS_CHANGED_ERP_SLOT\n"));
+
+		if (bss_conf->use_short_slot)
+			mac->slot_time = RTL_SLOT_TIME_9;
+		else
+			mac->slot_time = RTL_SLOT_TIME_20;
+
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+					      (u8 *) (&mac->slot_time));
+	}
+
+	if (changed & BSS_CHANGED_HT) {
+		struct ieee80211_sta *sta = NULL;
+
+		RT_TRACE(COMP_MAC80211, DBG_TRACE,
+			 ("BSS_CHANGED_HT\n"));
+
+		rcu_read_lock();
+		sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid);
+		if (sta) {
+			if (sta->ht_cap.ampdu_density >
+			    mac->current_ampdu_density)
+				mac->current_ampdu_density =
+				    sta->ht_cap.ampdu_density;
+			if (sta->ht_cap.ampdu_factor <
+			    mac->current_ampdu_factor)
+				mac->current_ampdu_factor =
+				    sta->ht_cap.ampdu_factor;
+		}
+		rcu_read_unlock();
+
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
+					      (u8 *) (&mac->max_mss_density));
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR,
+					      &mac->current_ampdu_factor);
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE,
+					      &mac->current_ampdu_density);
+	}
+
+	if (changed & BSS_CHANGED_BSSID) {
+		u32 basic_rates;
+		struct ieee80211_sta *sta = NULL;
+
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
+					      (u8 *) bss_conf->bssid);
+
+		RT_TRACE(COMP_MAC80211, DBG_DMESG,
+			 ("bssid: %pM\n", bss_conf->bssid));
+
+		mac->vendor = PEER_UNKNOWN;
+		memcpy(mac->bssid, bss_conf->bssid, 6);
+		rtlpriv->cfg->ops->set_network_type(hw, vif->type);
+
+		rcu_read_lock();
+		sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid);
+		if (!sta) {
+			rcu_read_unlock();
+			goto out;
+		}
+
+		if (rtlhal->current_bandtype == BAND_ON_5G) {
+			mac->mode = WIRELESS_MODE_A;
+		} else {
+			if (sta->supp_rates[0] <= 0xf)
+				mac->mode = WIRELESS_MODE_B;
+			else
+				mac->mode = WIRELESS_MODE_G;
+		}
+
+		if (sta->ht_cap.ht_supported) {
+			if (rtlhal->current_bandtype == BAND_ON_2_4G)
+				mac->mode = WIRELESS_MODE_N_24G;
+			else
+				mac->mode = WIRELESS_MODE_N_5G;
+		}
+
+		/* just station need it, because ibss & ap mode will
+		 * set in sta_add, and will be NULL here */
+		if (vif->type == NL80211_IFTYPE_STATION) {
+			struct rtl_sta_info *sta_entry;
+			sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+			sta_entry->wireless_mode = mac->mode;
+		}
+
+		if (sta->ht_cap.ht_supported) {
+			mac->ht_enable = true;
+
+			/*
+			 * for cisco 1252 bw20 it's wrong
+			 * if (ht_cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+			 * 	mac->bw_40 = true;
+			 * }
+			 * */
+		}
+
+		if (changed & BSS_CHANGED_BASIC_RATES) {
+			/* for 5G must << RATE_6M_INDEX=4,
+			 * because 5G have no cck rate*/
+			if (rtlhal->current_bandtype == BAND_ON_5G)
+				basic_rates = sta->supp_rates[1] << 4;
+			else
+				basic_rates = sta->supp_rates[0];
+
+			mac->basic_rates = basic_rates;
+			rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+					(u8 *) (&basic_rates));
+		}
+		rcu_read_unlock();
+	}
+
+	/*
+	 * For FW LPS and Keep Alive:
+	 * To tell firmware we have connected
+	 * to an AP. For 92SE/CE power save v2.
+	 */
+	if (changed & BSS_CHANGED_ASSOC) {
+		if (bss_conf->assoc) {
+			u8 keep_alive = 10;
+			u8 mstatus = RT_MEDIA_CONNECT;
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_KEEP_ALIVE,
+						      (u8 *) (&keep_alive));
+
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_H2C_FW_JOINBSSRPT,
+						      (u8 *) (&mstatus));
+			ppsc->report_linked = true;
+
+		} else {
+
+			u8 mstatus = RT_MEDIA_DISCONNECT;
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_H2C_FW_JOINBSSRPT,
+						      (u8 *) (&mstatus));
+			ppsc->report_linked = false;
+
+		}
+
+		if (rtlpriv->cfg->ops->get_btc_status()){
+			rtlpriv->btcoexist.btc_ops->btc_mediastatus_notify(
+						rtlpriv, ppsc->report_linked);
+		}
+	}
+
+out:
+	mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static u64 rtl_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+#else
+static u64 rtl_op_get_tsf(struct ieee80211_hw *hw)
+#endif
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u64 tsf;
+
+	rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&tsf));
+	return tsf;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static void rtl_op_set_tsf(struct ieee80211_hw *hw,
+			   struct ieee80211_vif *vif, u64 tsf)
+#else
+static void rtl_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+#endif
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+	mac->tsf = tsf;
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss));
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static void rtl_op_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+#else
+static void rtl_op_reset_tsf(struct ieee80211_hw *hw)
+#endif
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmp = 0;
+
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp));
+}
+
+static void rtl_op_sta_notify(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      enum sta_notify_cmd cmd,
+			      struct ieee80211_sta *sta)
+{
+	switch (cmd) {
+	case STA_NOTIFY_SLEEP:
+		break;
+	case STA_NOTIFY_AWAKE:
+		break;
+	default:
+		break;
+	}
+}
+
+static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       enum ieee80211_ampdu_mlme_action action,
+			       struct ieee80211_sta *sta, u16 tid, u16 * ssn
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+/*<delete in kernel end>*/
+			       ,u8 buf_size
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+			       )
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	switch (action) {
+	case IEEE80211_AMPDU_TX_START:
+		RT_TRACE(COMP_MAC80211, DBG_TRACE,
+			 ("IEEE80211_AMPDU_TX_START: TID:%d\n", tid));
+		return rtl_tx_agg_start(hw, vif, sta, tid, ssn);
+		break;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+#else
+	case IEEE80211_AMPDU_TX_STOP:
+#endif
+		RT_TRACE(COMP_MAC80211, DBG_TRACE,
+			 ("IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid));
+		return rtl_tx_agg_stop(hw, vif, sta, tid);
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		RT_TRACE(COMP_MAC80211, DBG_TRACE,
+			 ("IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid));
+		rtl_tx_agg_oper(hw, sta, tid);
+		break;
+	case IEEE80211_AMPDU_RX_START:
+		RT_TRACE(COMP_MAC80211, DBG_TRACE,
+			 ("IEEE80211_AMPDU_RX_START:TID:%d\n", tid));
+		return rtl_rx_agg_start(hw, sta, tid);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		RT_TRACE(COMP_MAC80211, DBG_TRACE,
+			 ("IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid));
+		return rtl_rx_agg_stop(hw, sta, tid);
+		break;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("IEEE80211_AMPDU_ERR!!!!:\n"));
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+	RT_TRACE(COMP_MAC80211, DBG_LOUD, ("\n"));
+	mac->act_scanning = true;
+	/*rtlpriv->btcops->btc_scan_notify(rtlpriv, 0); */
+	if (rtlpriv->link_info.b_higher_busytraffic) {
+		mac->skip_scan = true;
+		return;
+	}
+
+	if (rtlpriv->dm.supp_phymode_switch) {
+		if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+			rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+	}
+
+	if (mac->link_state == MAC80211_LINKED) {
+		rtl_lps_leave(hw);
+		mac->link_state = MAC80211_LINKED_SCANNING;
+	} else {
+		rtl_ips_nic_on(hw);
+	}
+
+	/* Dul mac */
+	rtlpriv->rtlhal.b_load_imrandiqk_setting_for2g = false;
+
+	rtlpriv->cfg->ops->led_control(hw, LED_CTL_SITE_SURVEY);
+
+	rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP_BAND0);
+
+}
+
+static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+	RT_TRACE(COMP_MAC80211, DBG_LOUD, ("\n"));
+	mac->act_scanning = false;
+	mac->skip_scan = false;
+	if (rtlpriv->link_info.b_higher_busytraffic) {
+		return;
+	}
+
+	/* p2p will use 1/6/11 to scan */
+	if (mac->n_channels == 3)
+		mac->p2p_in_use = true;
+	else
+		mac->p2p_in_use = false;
+	mac->n_channels = 0;
+	/* Dul mac */
+	rtlpriv->rtlhal.b_load_imrandiqk_setting_for2g = false;
+
+	if (mac->link_state == MAC80211_LINKED_SCANNING) {
+		mac->link_state = MAC80211_LINKED;
+		if (mac->opmode == NL80211_IFTYPE_STATION) {
+			/* fix fwlps issue */
+			rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
+		}
+	}
+
+	rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE);
+	/* rtlpriv->btcops->btc_scan_notify(rtlpriv, 1); */
+}
+
+static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+			  struct ieee80211_key_conf *key)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 key_type = NO_ENCRYPTION;
+	u8 key_idx;
+	bool group_key = false;
+	bool wep_only = false;
+	int err = 0;
+	u8 mac_addr[ETH_ALEN];
+	u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+	u8 zero_addr[ETH_ALEN] = { 0 };
+
+	if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("not open hw encryption\n"));
+		return -ENOSPC;	/*User disabled HW-crypto */
+	}
+	/* To support IBSS, use sw-crypto for GTK */
+	if(((vif->type == NL80211_IFTYPE_ADHOC) ||
+	    (vif->type == NL80211_IFTYPE_MESH_POINT)) &&
+	   !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		return -ENOSPC;
+	RT_TRACE(COMP_SEC, DBG_DMESG,
+		 ("%s hardware based encryption for keyidx: %d, mac: %pM\n",
+		  cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
+		  sta ? sta->addr : bcast_addr));
+	rtlpriv->sec.being_setkey = true;
+	rtl_ips_nic_on(hw);
+	mutex_lock(&rtlpriv->locks.conf_mutex);
+	/* <1> get encryption alg */
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		key_type = WEP40_ENCRYPTION;
+		RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP40\n"));
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP104\n"));
+		key_type = WEP104_ENCRYPTION;
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		key_type = TKIP_ENCRYPTION;
+		RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:TKIP\n"));
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key_type = AESCCMP_ENCRYPTION;
+		RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CCMP\n"));
+		break;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		/* HW don't support CMAC encryption,
+		 * use software CMAC encryption */
+		key_type = AESCMAC_ENCRYPTION;
+		RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CMAC\n"));
+		RT_TRACE(COMP_SEC, DBG_DMESG,
+				("HW don't support CMAC encrypiton, "
+				"use software CMAC encrypiton\n"));
+		err = -EOPNOTSUPP;
+		goto out_unlock;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("alg_err:%x!!!!:\n", key->cipher));
+		goto out_unlock;
+	}
+/*<delete in kernel start>*/
+#else
+	switch (key->alg) {
+	case ALG_WEP:
+		if (key->keylen == WLAN_KEY_LEN_WEP40) {
+			key_type = WEP40_ENCRYPTION;
+			RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP40\n"));
+		} else {
+			RT_TRACE(COMP_SEC, DBG_DMESG,
+				 ("alg:WEP104\n"));
+			key_type = WEP104_ENCRYPTION;
+		}
+		break;
+	case ALG_TKIP:
+		key_type = TKIP_ENCRYPTION;
+		RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:TKIP\n"));
+		break;
+	case ALG_CCMP:
+		key_type = AESCCMP_ENCRYPTION;
+		RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CCMP\n"));
+		break;
+	case ALG_AES_CMAC:
+		/*HW don't support CMAC encryption, use software CMAC encryption */
+		key_type = AESCMAC_ENCRYPTION;
+		RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CMAC\n"));
+		RT_TRACE(COMP_SEC, DBG_DMESG,
+			 ("HW don't support CMAC encrypiton, "
+			  "use software CMAC encrypiton\n"));
+		err = -EOPNOTSUPP;
+		goto out_unlock;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("alg_err:%x!!!!:\n", key->alg));
+		goto out_unlock;
+	}
+#endif
+/*<delete in kernel end>*/
+	if(key_type == WEP40_ENCRYPTION ||
+			key_type == WEP104_ENCRYPTION ||
+			vif->type == NL80211_IFTYPE_ADHOC)
+		rtlpriv->sec.use_defaultkey = true;
+
+	/* <2> get key_idx */
+	key_idx = (u8) (key->keyidx);
+	if (key_idx > 3)
+		goto out_unlock;
+	/* <3> if pairwise key enable_hw_sec */
+	group_key = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+
+	/* wep always be group key, but there are two conditions:
+	 * 1) wep only: is just for wep enc, in this condition
+	 * rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION
+	 * will be true & enable_hw_sec will be set when wep
+	 * ke setting.
+	 * 2) wep(group) + AES(pairwise): some AP like cisco
+	 * may use it, in this condition enable_hw_sec will not
+	 * be set when wep key setting */
+	/* we must reset sec_info after lingked before set key,
+	 * or some flag will be wrong*/
+	if (vif->type == NL80211_IFTYPE_AP ||
+		vif->type == NL80211_IFTYPE_MESH_POINT) {
+		if (!group_key || key_type == WEP40_ENCRYPTION ||
+			key_type == WEP104_ENCRYPTION) {
+			if (group_key) {
+				wep_only = true;
+			}
+			rtlpriv->cfg->ops->enable_hw_sec(hw);
+		}
+	} else {
+		if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) ||
+	    	    rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
+			if (rtlpriv->sec.pairwise_enc_algorithm ==
+			    NO_ENCRYPTION &&
+		    	   (key_type == WEP40_ENCRYPTION ||
+		    	    key_type == WEP104_ENCRYPTION))
+				wep_only = true;
+			rtlpriv->sec.pairwise_enc_algorithm = key_type;
+			RT_TRACE(COMP_SEC, DBG_DMESG,
+				 ("set enable_hw_sec, key_type:%x(OPEN:0 WEP40:"
+				  "1 TKIP:2 AES:4 WEP104:5)\n", key_type));
+			rtlpriv->cfg->ops->enable_hw_sec(hw);
+		}
+	}
+	/* <4> set key based on cmd */
+	switch (cmd) {
+	case SET_KEY:
+		if (wep_only) {
+			RT_TRACE(COMP_SEC, DBG_DMESG,
+				 ("set WEP(group/pairwise) key\n"));
+			/* Pairwise key with an assigned MAC address. */
+			rtlpriv->sec.pairwise_enc_algorithm = key_type;
+			rtlpriv->sec.group_enc_algorithm = key_type;
+			/*set local buf about wep key. */
+			memcpy(rtlpriv->sec.key_buf[key_idx],
+			       key->key, key->keylen);
+			rtlpriv->sec.key_len[key_idx] = key->keylen;
+			memcpy(mac_addr, zero_addr, ETH_ALEN);
+		} else if (group_key) {	/* group key */
+			RT_TRACE(COMP_SEC, DBG_DMESG,
+				 ("set group key\n"));
+			/* group key */
+			rtlpriv->sec.group_enc_algorithm = key_type;
+			/*set local buf about group key. */
+			memcpy(rtlpriv->sec.key_buf[key_idx],
+			       key->key, key->keylen);
+			rtlpriv->sec.key_len[key_idx] = key->keylen;
+			memcpy(mac_addr, bcast_addr, ETH_ALEN);
+		} else {	/* pairwise key */
+			RT_TRACE(COMP_SEC, DBG_DMESG,
+				 ("set pairwise key\n"));
+			if (!sta) {
+				RT_ASSERT(false, ("pairwise key withnot"
+						  "mac_addr\n"));
+
+				err = -EOPNOTSUPP;
+				goto out_unlock;
+			}
+			/* Pairwise key with an assigned MAC address. */
+			rtlpriv->sec.pairwise_enc_algorithm = key_type;
+			/*set local buf about pairwise key. */
+			memcpy(rtlpriv->sec.key_buf[PAIRWISE_KEYIDX],
+			       key->key, key->keylen);
+			rtlpriv->sec.key_len[PAIRWISE_KEYIDX] = key->keylen;
+			rtlpriv->sec.pairwise_key =
+			    rtlpriv->sec.key_buf[PAIRWISE_KEYIDX];
+			memcpy(mac_addr, sta->addr, ETH_ALEN);
+		}
+		rtlpriv->cfg->ops->set_key(hw, key_idx, mac_addr,
+					   group_key, key_type, wep_only,
+					   false);
+		/* <5> tell mac80211 do something: */
+		/*must use sw generate IV, or can not work !!!!. */
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+		key->hw_key_idx = key_idx;
+		if (key_type == TKIP_ENCRYPTION)
+			key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+		/*use software CCMP encryption for management frames (MFP) */
+		if (key_type == AESCCMP_ENCRYPTION)
+			key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+		break;
+	case DISABLE_KEY:
+		RT_TRACE(COMP_SEC, DBG_DMESG,
+			 ("disable key delete one entry\n"));
+		/*set local buf about wep key. */
+		if (vif->type == NL80211_IFTYPE_AP ||
+			vif->type == NL80211_IFTYPE_MESH_POINT) {
+			if (sta)
+				rtl_cam_del_entry(hw, sta->addr);
+		}
+		memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen);
+		rtlpriv->sec.key_len[key_idx] = 0;
+		memcpy(mac_addr, zero_addr, ETH_ALEN);
+		/*
+		 *mac80211 will delete entrys one by one,
+		 *so don't use rtl_cam_reset_all_entry
+		 *or clear all entry here.
+		 */
+		rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
+		break;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("cmd_err:%x!!!!:\n", cmd));
+	}
+out_unlock:
+	mutex_unlock(&rtlpriv->locks.conf_mutex);
+	rtlpriv->sec.being_setkey = false;
+	return err;
+}
+
+static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	bool radio_state;
+	bool blocked;
+	u8 valid = 0;
+
+	if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+		return;
+
+	mutex_lock(&rtlpriv->locks.conf_mutex);
+
+	/*if Radio On return true here */
+	radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
+
+	if (valid) {
+		if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) {
+			rtlpriv->rfkill.rfkill_state = radio_state;
+
+			RT_TRACE(COMP_RF, DBG_DMESG,
+				 (KERN_INFO "wireless radio switch turned %s\n",
+				  radio_state ? "on" : "off"));
+
+			blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+			wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+		}
+	}
+
+	mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+/* this function is called by mac80211 to flush tx buffer
+ * before switch channle or power save, or tx buffer packet
+ * maybe send after offchannel or rf sleep, this may cause
+ * dis-association by AP */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (rtlpriv->intf_ops->flush)
+		rtlpriv->intf_ops->flush(hw, queues, drop);
+}
+#else
+static void rtl_op_flush(struct ieee80211_hw *hw, bool drop)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (rtlpriv->intf_ops->flush)
+		rtlpriv->intf_ops->flush(hw, drop);
+}
+#endif
+
+const struct ieee80211_ops rtl_ops = {
+	.start = rtl_op_start,
+	.stop = rtl_op_stop,
+	.tx = rtl_op_tx,
+	.add_interface = rtl_op_add_interface,
+	.remove_interface = rtl_op_remove_interface,
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+	.change_interface = rtl_op_change_interface,
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+	.config = rtl_op_config,
+	.configure_filter = rtl_op_configure_filter,
+	.set_key = rtl_op_set_key,
+	.conf_tx = rtl_op_conf_tx,
+	.bss_info_changed = rtl_op_bss_info_changed,
+	.get_tsf = rtl_op_get_tsf,
+	.set_tsf = rtl_op_set_tsf,
+	.reset_tsf = rtl_op_reset_tsf,
+	.sta_notify = rtl_op_sta_notify,
+	.ampdu_action = rtl_op_ampdu_action,
+	.sw_scan_start = rtl_op_sw_scan_start,
+	.sw_scan_complete = rtl_op_sw_scan_complete,
+	.rfkill_poll = rtl_op_rfkill_poll,
+	.sta_add = rtl_op_sta_add,
+	.sta_remove = rtl_op_sta_remove,
+	.flush = rtl_op_flush,
+};
diff --git a/drivers/staging/rtl8821ae/core.h b/drivers/staging/rtl8821ae/core.h
new file mode 100644
index 0000000..4b247db
--- /dev/null
+++ b/drivers/staging/rtl8821ae/core.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_CORE_H__
+#define __RTL_CORE_H__
+
+#define RTL_SUPPORTED_FILTERS		\
+	(FIF_PROMISC_IN_BSS | \
+	FIF_ALLMULTI | FIF_CONTROL | \
+	FIF_OTHER_BSS | \
+	FIF_FCSFAIL | \
+	FIF_BCN_PRBRESP_PROMISC)
+
+#define RTL_SUPPORTED_CTRL_FILTER	0xFF
+
+extern const struct ieee80211_ops rtl_ops;
+#endif
diff --git a/drivers/staging/rtl8821ae/debug.c b/drivers/staging/rtl8821ae/debug.c
new file mode 100644
index 0000000..cb05122
--- /dev/null
+++ b/drivers/staging/rtl8821ae/debug.c
@@ -0,0 +1,988 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "cam.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+#define GET_INODE_DATA(__node)		PDE_DATA(__node)
+#else
+#define GET_INODE_DATA(__node)		PDE(__node)->data
+#endif
+
+
+void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 i;
+
+	rtlpriv->dbg.global_debuglevel = DBG_DMESG;
+
+	rtlpriv->dbg.global_debugcomponents =
+		COMP_ERR |
+		COMP_FW |
+		COMP_INIT |
+		COMP_RECV |
+		COMP_SEND |
+	  	COMP_MLME |
+	  	COMP_SCAN |
+	  	COMP_INTR |
+	  	COMP_LED |
+	  	COMP_SEC |
+	  	COMP_BEACON |
+	  	COMP_RATE |
+	  	COMP_RXDESC |
+	  	COMP_DIG |
+	  	COMP_TXAGC |
+		COMP_POWER |
+	  	COMP_POWER_TRACKING |
+	  	COMP_BB_POWERSAVING |
+	  	COMP_SWAS |
+	  	COMP_RF |
+	  	COMP_TURBO |
+	  	COMP_RATR |
+	  	COMP_CMD |
+	  	COMP_EASY_CONCURRENT |
+	  	COMP_EFUSE |
+	  	COMP_QOS | COMP_MAC80211 | COMP_REGD |
+		COMP_CHAN |
+		COMP_BT_COEXIST |
+		COMP_IQK |
+		0;
+
+	for (i = 0; i < DBGP_TYPE_MAX; i++)
+		rtlpriv->dbg.dbgp_type[i] = 0;
+
+	/*Init Debug flag enable condition */
+}
+
+struct proc_dir_entry *proc_topdir;
+static int rtl_proc_get_mac_0(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i, n, page;
+	int max = 0xff;
+	page = 0x000;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_read_dword(rtlpriv, (page | n)));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_mac_0(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_mac_0, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_0 = {
+	.open = dl_proc_open_mac_0,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_mac_1(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i, n, page;
+	int max = 0xff;
+	page = 0x100;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_read_dword(rtlpriv, (page | n)));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_mac_1(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_mac_1, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_1 = {
+	.open = dl_proc_open_mac_1,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_mac_2(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i, n, page;
+	int max = 0xff;
+	page = 0x200;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_read_dword(rtlpriv, (page | n)));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_mac_2(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_mac_2, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_2 = {
+	.open = dl_proc_open_mac_2,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_mac_3(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i, n, page;
+	int max = 0xff;
+	page = 0x300;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_read_dword(rtlpriv, (page | n)));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_mac_3(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_mac_3, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_3 = {
+	.open = dl_proc_open_mac_3,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_mac_4(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i, n, page;
+	int max = 0xff;
+	page = 0x400;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_read_dword(rtlpriv, (page | n)));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_mac_4(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_mac_4, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_4 = {
+	.open = dl_proc_open_mac_4,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_mac_5(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i, n, page;
+	int max = 0xff;
+	page = 0x500;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_read_dword(rtlpriv, (page | n)));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_mac_5(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_mac_5, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_5 = {
+	.open = dl_proc_open_mac_5,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_mac_6(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i, n, page;
+	int max = 0xff;
+	page = 0x600;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_read_dword(rtlpriv, (page | n)));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_mac_6(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_mac_6, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_6 = {
+	.open = dl_proc_open_mac_6,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_mac_7(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i, n, page;
+	int max = 0xff;
+	page = 0x700;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_read_dword(rtlpriv, (page | n)));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_mac_7(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_mac_7, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_7 = {
+	.open = dl_proc_open_mac_7,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_bb_8(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	int i, n, page;
+	int max = 0xff;
+	page = 0x800;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_bbreg(hw, (page | n), 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_bb_8(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_bb_8, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_8 = {
+	.open = dl_proc_open_bb_8,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_bb_9(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	int i, n, page;
+	int max = 0xff;
+	page = 0x900;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_bbreg(hw, (page | n), 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_bb_9(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_bb_9, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_9 = {
+	.open = dl_proc_open_bb_9,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_bb_a(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	int i, n, page;
+	int max = 0xff;
+	page = 0xa00;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_bbreg(hw, (page | n), 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_bb_a(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_bb_a, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_a = {
+	.open = dl_proc_open_bb_a,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_bb_b(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	int i, n, page;
+	int max = 0xff;
+	page = 0xb00;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_bbreg(hw, (page | n), 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_bb_b(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_bb_b, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_b = {
+	.open = dl_proc_open_bb_b,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_bb_c(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	int i, n, page;
+	int max = 0xff;
+	page = 0xc00;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_bbreg(hw, (page | n), 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_bb_c(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_bb_c, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_c = {
+	.open = dl_proc_open_bb_c,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_bb_d(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	int i, n, page;
+	int max = 0xff;
+	page = 0xd00;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_bbreg(hw, (page | n), 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_bb_d(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_bb_d, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_d = {
+	.open = dl_proc_open_bb_d,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_bb_e(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	int i, n, page;
+	int max = 0xff;
+	page = 0xe00;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_bbreg(hw, (page | n), 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_bb_e(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_bb_e, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_e = {
+	.open = dl_proc_open_bb_e,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_bb_f(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	int i, n, page;
+	int max = 0xff;
+	page = 0xf00;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n + page);
+		for (i = 0; i < 4 && n <= max; i++, n += 4)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_bbreg(hw, (page | n), 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_bb_f(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_bb_f, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_f = {
+	.open = dl_proc_open_bb_f,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_reg_rf_a(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	int i, n;
+	int max = 0x40;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n);
+		for (i = 0; i < 4 && n <= max; n += 1, i++)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_rfreg(hw, RF90_PATH_A, n, 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_rf_a(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_reg_rf_a, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_rf_a = {
+	.open = dl_proc_open_rf_a,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_reg_rf_b(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	int i, n;
+	int max = 0x40;
+
+	for (n = 0; n <= max; ) {
+		seq_printf(m, "\n%8.8x  ", n);
+		for (i = 0; i < 4 && n <= max; n += 1, i++)
+			seq_printf(m, "%8.8x    ",
+				   rtl_get_rfreg(hw, RF90_PATH_B, n,
+				   		 0xffffffff));
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_rf_b(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_reg_rf_b, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_rf_b = {
+	.open = dl_proc_open_rf_b,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_cam_register_1(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 target_cmd = 0;
+	u32 target_val=0;
+	u8 entry_i=0;
+	u32 ulstatus;
+	int i = 100, j = 0;
+
+	/* This dump the current register page */
+	seq_puts(m,
+	    "\n#################### SECURITY CAM (0-10) ##################\n ");
+
+	for (j = 0; j < 11; j++) {
+		seq_printf(m, "\nD:  %2x > ", j);
+	 	for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+	   		/* polling bit, and No Write enable, and address  */
+			target_cmd = entry_i + CAM_CONTENT_COUNT * j;
+			target_cmd = target_cmd | BIT(31);
+
+			/* Check polling bit is clear */
+			while ((i--) >= 0) {
+				ulstatus = rtl_read_dword(rtlpriv,
+						rtlpriv->cfg->maps[RWCAM]);
+				if (ulstatus & BIT(31)) {
+					continue;
+				} else {
+					break;
+				}
+			}
+
+	  		rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+					target_cmd);
+	  	 	target_val = rtl_read_dword(rtlpriv,
+						    rtlpriv->cfg->maps[RCAMO]);
+			seq_printf(m, "%8.8x ", target_val);
+	 	}
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_cam_1(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_cam_register_1,
+			   GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_cam_1 = {
+	.open = dl_proc_open_cam_1,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_cam_register_2(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 target_cmd = 0;
+	u32 target_val = 0;
+	u8 entry_i = 0;
+	u32 ulstatus;
+	int i = 100, j = 0;
+
+	/* This dump the current register page */
+	seq_puts(m,
+	    "\n################### SECURITY CAM (11-21) ##################\n ");
+
+	for (j = 11; j < 22; j++) {
+		seq_printf(m, "\nD:  %2x > ", j);
+	 	for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+			target_cmd = entry_i + CAM_CONTENT_COUNT * j;
+			target_cmd = target_cmd | BIT(31);
+
+			while ((i--) >= 0) {
+				ulstatus = rtl_read_dword(rtlpriv,
+						rtlpriv->cfg->maps[RWCAM]);
+				if (ulstatus & BIT(31)) {
+					continue;
+				} else {
+					break;
+				}
+			}
+
+	  		rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+					target_cmd);
+	  	 	target_val = rtl_read_dword(rtlpriv,
+						    rtlpriv->cfg->maps[RCAMO]);
+			seq_printf(m, "%8.8x ", target_val);
+	 	}
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_cam_2(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_cam_register_2,
+			   GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_cam_2 = {
+	.open = dl_proc_open_cam_2,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static int rtl_proc_get_cam_register_3(struct seq_file *m, void *v)
+{
+	struct ieee80211_hw *hw = m->private;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 target_cmd = 0;
+	u32 target_val = 0;
+	u8 entry_i = 0;
+	u32 ulstatus;
+	int i = 100, j = 0;
+
+	/* This dump the current register page */
+	seq_puts(m,
+	    "\n################### SECURITY CAM (22-31) ##################\n ");
+
+	for (j = 22; j < TOTAL_CAM_ENTRY; j++) {
+		seq_printf(m, "\nD:  %2x > ", j);
+	 	for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+			target_cmd = entry_i+CAM_CONTENT_COUNT*j;
+			target_cmd = target_cmd | BIT(31);
+
+			while ((i--) >= 0) {
+				ulstatus = rtl_read_dword(rtlpriv,
+						rtlpriv->cfg->maps[RWCAM]);
+				if (ulstatus & BIT(31)) {
+					continue;
+				} else {
+					break;
+				}
+			}
+
+	  		rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+					target_cmd);
+	  	 	target_val = rtl_read_dword(rtlpriv,
+						    rtlpriv->cfg->maps[RCAMO]);
+			seq_printf(m, "%8.8x ", target_val);
+	 	}
+	}
+	seq_puts(m, "\n");
+	return 0;
+}
+
+static int dl_proc_open_cam_3(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtl_proc_get_cam_register_3,
+			   GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_cam_3 = {
+	.open = dl_proc_open_cam_3,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+void rtl_proc_add_one(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct proc_dir_entry *entry;
+
+	snprintf(rtlpriv->dbg.proc_name, 18, "%x-%x-%x-%x-%x-%x",
+		rtlefuse->dev_addr[0], rtlefuse->dev_addr[1],
+		rtlefuse->dev_addr[2], rtlefuse->dev_addr[3],
+		rtlefuse->dev_addr[4], rtlefuse->dev_addr[5]);
+
+	rtlpriv->dbg.proc_dir = proc_mkdir(rtlpriv->dbg.proc_name, proc_topdir);
+	if (!rtlpriv->dbg.proc_dir) {
+		RT_TRACE(COMP_INIT, DBG_EMERG, ("Unable to init "
+			"/proc/net/%s/%s\n", rtlpriv->cfg->name,
+			rtlpriv->dbg.proc_name));
+		return;
+	}
+
+	entry = proc_create_data("mac-0", S_IFREG | S_IRUGO,
+				  rtlpriv->dbg.proc_dir, &file_ops_mac_0, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, DBG_EMERG,
+			 ("Unable to initialize /proc/net/%s/%s/mac-0\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("mac-1", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_mac_1, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/mac-1\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("mac-2", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_mac_2, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/mac-2\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("mac-3", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_mac_3, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/mac-3\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("mac-4", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_mac_4, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/mac-4\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("mac-5", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_mac_5, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/mac-5\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("mac-6", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_mac_6, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/mac-6\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("mac-7", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_mac_7, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/mac-7\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("bb-8", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_bb_8, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/bb-8\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("bb-9", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_bb_9, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/bb-9\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("bb-a", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_bb_a, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/bb-a\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("bb-b", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_bb_b, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/bb-b\n",
+		      rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("bb-c", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_bb_c, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/bb-c\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("bb-d", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_bb_d, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/bb-d\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("bb-e", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_bb_e, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/bb-e\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("bb-f", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_bb_f, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/bb-f\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("rf-a", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_rf_a, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/rf-a\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("rf-b", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_rf_b, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/rf-b\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("cam-1", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_cam_1, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/cam-1\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("cam-2", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_cam_2, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/cam-2\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+	entry = proc_create_data("cam-3", S_IFREG | S_IRUGO,
+				 rtlpriv->dbg.proc_dir, &file_ops_cam_3, hw);
+	if (!entry)
+		RT_TRACE(COMP_INIT, COMP_ERR,
+			 ("Unable to initialize /proc/net/%s/%s/cam-3\n",
+			  rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+}
+
+void rtl_proc_remove_one(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (rtlpriv->dbg.proc_dir) {
+		remove_proc_entry("mac-0", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("mac-1", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("mac-2", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("mac-3", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("mac-4", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("mac-5", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("mac-6", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("mac-7", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("bb-8", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("bb-9", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("bb-a", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("bb-b", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("bb-c", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("bb-d", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("bb-e", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("bb-f", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("rf-a", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("rf-b", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("cam-1", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("cam-2", rtlpriv->dbg.proc_dir);
+		remove_proc_entry("cam-3", rtlpriv->dbg.proc_dir);
+
+		remove_proc_entry(rtlpriv->dbg.proc_name, proc_topdir);
+
+		rtlpriv->dbg.proc_dir = NULL;
+	}
+}
+
+void rtl_proc_add_topdir(void)
+{
+	proc_topdir = proc_mkdir("rtlwifi", init_net.proc_net);
+}
+
+void rtl_proc_remove_topdir(void)
+{
+	if (proc_topdir)
+		remove_proc_entry("rtlwifi", init_net.proc_net);
+}
\ No newline at end of file
diff --git a/drivers/staging/rtl8821ae/debug.h b/drivers/staging/rtl8821ae/debug.h
new file mode 100644
index 0000000..5eb6251
--- /dev/null
+++ b/drivers/staging/rtl8821ae/debug.h
@@ -0,0 +1,227 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_DEBUG_H__
+#define __RTL_DEBUG_H__
+
+/*--------------------------------------------------------------
+			Debug level
+--------------------------------------------------------------*/
+/*
+ *Fatal bug.
+ *For example, Tx/Rx/IO locked up,
+ *memory access violation,
+ *resource allocation failed,
+ *unexpected HW behavior, HW BUG
+ *and so on.
+ */
+#define DBG_EMERG			0
+
+/*
+ *Abnormal, rare, or unexpeted cases.
+ *For example, Packet/IO Ctl canceled,
+ *device suprisely unremoved and so on.
+ */
+#define	DBG_WARNING			2
+
+/*
+ *Normal case driver developer should
+ *open, we can see link status like
+ *assoc/AddBA/DHCP/adapter start and
+ *so on basic and useful infromations.
+ */
+#define DBG_DMESG			3
+
+/*
+ *Normal case with useful information
+ *about current SW or HW state.
+ *For example, Tx/Rx descriptor to fill,
+ *Tx/Rx descriptor completed status,
+ *SW protocol state change, dynamic
+ *mechanism state change and so on.
+ */
+#define DBG_LOUD			4
+
+/*
+ *Normal case with detail execution
+ *flow or information.
+ */
+#define	DBG_TRACE			5
+
+/*--------------------------------------------------------------
+		Define the rt_trace components
+--------------------------------------------------------------*/
+#define COMP_ERR			BIT(0)
+#define COMP_FW				BIT(1)
+#define COMP_INIT			BIT(2)	/*For init/deinit */
+#define COMP_RECV			BIT(3)	/*For Rx. */
+#define COMP_SEND			BIT(4)	/*For Tx. */
+#define COMP_MLME			BIT(5)	/*For MLME. */
+#define COMP_SCAN			BIT(6)	/*For Scan. */
+#define COMP_INTR			BIT(7)	/*For interrupt Related. */
+#define COMP_LED			BIT(8)	/*For LED. */
+#define COMP_SEC			BIT(9)	/*For sec. */
+#define COMP_BEACON			BIT(10)	/*For beacon. */
+#define COMP_RATE			BIT(11)	/*For rate. */
+#define COMP_RXDESC			BIT(12)	/*For rx desc. */
+#define COMP_DIG			BIT(13)	/*For DIG */
+#define COMP_TXAGC			BIT(14)	/*For Tx power */
+#define COMP_HIPWR			BIT(15)	/*For High Power Mechanism */
+#define COMP_POWER			BIT(16)	/*For lps/ips/aspm. */
+#define COMP_POWER_TRACKING	BIT(17)	/*For TX POWER TRACKING */
+#define COMP_BB_POWERSAVING	BIT(18)
+#define COMP_SWAS			BIT(19)	/*For SW Antenna Switch */
+#define COMP_RF				BIT(20)	/*For RF. */
+#define COMP_TURBO			BIT(21)	/*For EDCA TURBO. */
+#define COMP_RATR			BIT(22)
+#define COMP_CMD			BIT(23)
+#define COMP_EFUSE			BIT(24)
+#define COMP_QOS			BIT(25)
+#define COMP_MAC80211		BIT(26)
+#define COMP_REGD			BIT(27)
+#define COMP_CHAN			BIT(28)
+#define COMP_EASY_CONCURRENT 		BIT(29)
+#define COMP_BT_COEXIST			BIT(30)
+#define COMP_IQK			BIT(31)
+
+/*--------------------------------------------------------------
+		Define the rt_print components
+--------------------------------------------------------------*/
+/* Define EEPROM and EFUSE  check module bit*/
+#define EEPROM_W			BIT(0)
+#define EFUSE_PG			BIT(1)
+#define EFUSE_READ_ALL		BIT(2)
+
+/* Define init check for module bit*/
+#define	INIT_EEPROM			BIT(0)
+#define	INIT_TxPower		BIT(1)
+#define	INIT_IQK			BIT(2)
+#define	INIT_RF				BIT(3)
+
+/* Define PHY-BB/RF/MAC check module bit */
+#define	PHY_BBR				BIT(0)
+#define	PHY_BBW				BIT(1)
+#define	PHY_RFR				BIT(2)
+#define	PHY_RFW				BIT(3)
+#define	PHY_MACR			BIT(4)
+#define	PHY_MACW			BIT(5)
+#define	PHY_ALLR			BIT(6)
+#define	PHY_ALLW			BIT(7)
+#define	PHY_TXPWR			BIT(8)
+#define	PHY_PWRDIFF			BIT(9)
+
+/* Define Dynamic Mechanism check module bit --> FDM */
+#define WA_IOT				BIT(0)
+#define DM_PWDB				BIT(1)
+#define DM_MONITOR			BIT(2)
+#define DM_DIG				BIT(3)
+#define DM_EDCA_TURBO		BIT(4)
+
+enum dbgp_flag_e {
+	FQOS = 0,
+	FTX = 1,
+	FRX = 2,
+	FSEC = 3,
+	FMGNT = 4,
+	FMLME = 5,
+	FRESOURCE = 6,
+	FBEACON = 7,
+	FISR = 8,
+	FPHY = 9,
+	FMP = 10,
+	FEEPROM = 11,
+	FPWR = 12,
+	FDM = 13,
+	FDBGCtrl = 14,
+	FC2H = 15,
+	FBT = 16,
+	FINIT = 17,
+	FIOCTL = 18,
+	DBGP_TYPE_MAX
+};
+
+#define RT_ASSERT(_exp,fmt)				\
+	do { \
+		if(!(_exp))	{			\
+			printk(KERN_DEBUG "%s:%s(): ", KBUILD_MODNAME, \
+			__func__);	\
+			printk fmt;			\
+		} \
+	} while(0);
+
+#define	RT_DISP(dbgtype, dbgflag, printstr)
+
+#define RT_TRACE(comp, level, fmt)\
+	do { \
+		if(unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && \
+			((level) <= rtlpriv->dbg.global_debuglevel))) {\
+			printk(KERN_DEBUG "%s-%d:%s():<%lx-%x> ", \
+			KBUILD_MODNAME, \
+			rtlpriv->rtlhal.interfaceindex, __func__, \
+			in_interrupt(), in_atomic());	\
+			printk fmt; 			\
+		}\
+	} while(0);
+
+#define RTPRINT(rtlpriv, dbgtype, dbgflag, printstr)	\
+	do {						\
+		if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) { \
+			printk(KERN_DEBUG "%s: ", KBUILD_MODNAME);	\
+			printk printstr;		\
+		}					\
+	} while(0);
+
+#define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \
+		_hexdatalen) \
+	do {\
+		if(unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents ) &&\
+			(_level <= rtlpriv->dbg.global_debuglevel )))	{ \
+			int __i;					\
+			u8*	ptr = (u8*)_hexdata;			\
+			printk(KERN_DEBUG "%s: ", KBUILD_MODNAME);	\
+			printk(KERN_DEBUG "In process \"%s\" (pid %i):", \
+					current->comm, 	\
+					current->pid); \
+			printk(_titlestring);		\
+			for( __i=0; __i<(int)_hexdatalen; __i++ ) {	\
+				printk("%02X%s", ptr[__i], (((__i + 1) % 4) \
+							== 0)?"  ":" ");\
+				if (((__i + 1) % 16) == 0)	\
+					printk("\n");	\
+			}				\
+			printk(KERN_DEBUG "\n");			\
+		} \
+	} while(0);
+
+void rtl_dbgp_flag_init(struct ieee80211_hw *hw);
+void rtl_proc_add_one(struct ieee80211_hw *hw);
+void rtl_proc_remove_one(struct ieee80211_hw *hw);
+void rtl_proc_add_topdir(void);
+void rtl_proc_remove_topdir(void);
+#endif
diff --git a/drivers/staging/rtl8821ae/efuse.c b/drivers/staging/rtl8821ae/efuse.c
new file mode 100644
index 0000000..74c19ec
--- /dev/null
+++ b/drivers/staging/rtl8821ae/efuse.c
@@ -0,0 +1,1285 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "wifi.h"
+#include "efuse.h"
+#include "btcoexist/halbt_precomp.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+static const u8 MAX_PGPKT_SIZE = 9;
+static const u8 PGPKT_DATA_SIZE = 8;
+static const int EFUSE_MAX_SIZE = 512;
+
+static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = {
+	{0, 0, 0, 2},
+	{0, 1, 0, 2},
+	{0, 2, 0, 2},
+	{1, 0, 0, 1},
+	{1, 0, 1, 1},
+	{1, 1, 0, 1},
+	{1, 1, 1, 3},
+	{1, 3, 0, 17},
+	{3, 3, 1, 48},
+	{10, 0, 0, 6},
+	{10, 3, 0, 1},
+	{10, 3, 1, 1},
+	{11, 0, 0, 28}
+};
+
+static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, u16 offset,
+				    u8 * value);
+static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, u16 offset,
+				    u16 * value);
+static void efuse_shadow_read_4byte(struct ieee80211_hw *hw, u16 offset,
+				    u32 * value);
+static void efuse_shadow_write_1byte(struct ieee80211_hw *hw, u16 offset,
+				     u8 value);
+static void efuse_shadow_write_2byte(struct ieee80211_hw *hw, u16 offset,
+				     u16 value);
+static void efuse_shadow_write_4byte(struct ieee80211_hw *hw, u16 offset,
+				     u32 value);
+static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr,
+				u8 data);
+static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse);
+static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset,
+				u8 *data);
+static int efuse_pg_packet_write(struct ieee80211_hw *hw, u8 offset,
+				 u8 word_en, u8 * data);
+static void efuse_word_enable_data_read(u8 word_en, u8 * sourdata,
+					u8 * targetdata);
+static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
+				       u16 efuse_addr, u8 word_en, u8 * data);
+static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite,
+					u8 pwrstate);
+static u16 efuse_get_current_size(struct ieee80211_hw *hw);
+static u8 efuse_calculate_word_cnts(u8 word_en);
+
+void efuse_initialize(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 bytetemp;
+	u8 temp;
+
+	bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1);
+	temp = bytetemp | 0x20;
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1, temp);
+
+	bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1);
+	temp = bytetemp & 0xFE;
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1, temp);
+
+	bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
+	temp = bytetemp | 0x80;
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, temp);
+
+	rtl_write_byte(rtlpriv, 0x2F8, 0x3);
+
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
+
+}
+
+u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 data;
+	u8 bytetemp;
+	u8 temp;
+	u32 k = 0;
+	const u32 efuse_real_content_len =
+		rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
+
+	if (address < efuse_real_content_len) {
+		temp = address & 0xFF;
+		rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+			       temp);
+		bytetemp = rtl_read_byte(rtlpriv,
+					 rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+		temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
+		rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+			       temp);
+
+		bytetemp = rtl_read_byte(rtlpriv,
+					 rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+		temp = bytetemp & 0x7F;
+		rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
+			       temp);
+
+		bytetemp = rtl_read_byte(rtlpriv,
+					 rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+		while (!(bytetemp & 0x80)) {
+			bytetemp = rtl_read_byte(rtlpriv,
+						 rtlpriv->cfg->
+						 maps[EFUSE_CTRL] + 3);
+			k++;
+			if (k == 1000) {
+				k = 0;
+				break;
+			}
+		}
+		data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+		return data;
+	} else
+		return 0xFF;
+
+}
+//EXPORT_SYMBOL(efuse_read_1byte);
+
+void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 bytetemp;
+	u8 temp;
+	u32 k = 0;
+	const u32 efuse_real_content_len =
+		rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
+
+	RT_TRACE(COMP_EFUSE, DBG_LOUD,
+		 ("Addr=%x Data =%x\n", address, value));
+
+	if (address < efuse_real_content_len) {
+		rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
+
+		temp = address & 0xFF;
+		rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+			       temp);
+		bytetemp = rtl_read_byte(rtlpriv,
+					 rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+
+		temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
+		rtl_write_byte(rtlpriv,
+			       rtlpriv->cfg->maps[EFUSE_CTRL] + 2, temp);
+
+		bytetemp = rtl_read_byte(rtlpriv,
+					 rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+		temp = bytetemp | 0x80;
+		rtl_write_byte(rtlpriv,
+			       rtlpriv->cfg->maps[EFUSE_CTRL] + 3, temp);
+
+		bytetemp = rtl_read_byte(rtlpriv,
+					 rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+
+		while (bytetemp & 0x80) {
+			bytetemp = rtl_read_byte(rtlpriv,
+						 rtlpriv->cfg->
+						 maps[EFUSE_CTRL] + 3);
+			k++;
+			if (k == 100) {
+				k = 0;
+				break;
+			}
+		}
+	}
+
+}
+
+void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 value32;
+	u8 readbyte;
+	u16 retry;
+
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+		       (_offset & 0xff));
+	readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+		       ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
+
+	readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
+		       (readbyte & 0x7f));
+
+	retry = 0;
+	value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+	while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) {
+		value32 = rtl_read_dword(rtlpriv,
+					 rtlpriv->cfg->maps[EFUSE_CTRL]);
+		retry++;
+	}
+
+	udelay(50);
+	value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+
+	*pbuf = (u8) (value32 & 0xff);
+}
+
+void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 efuse_tbl[rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]];
+	u8 rtemp8[1];
+	u16 efuse_addr = 0;
+	u8 offset, wren;
+	u8 u1temp = 0;
+	u16 i;
+	u16 j;
+	const u16 efuse_max_section =
+		rtlpriv->cfg->maps[EFUSE_MAX_SECTION_MAP];
+	const u32 efuse_real_content_len =
+		rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
+	u16 efuse_word[efuse_max_section][EFUSE_MAX_WORD_UNIT];
+	u16 efuse_utilized = 0;
+	u8 efuse_usage;
+
+	if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) {
+		RT_TRACE(COMP_EFUSE, DBG_LOUD,
+			 ("read_efuse(): Invalid offset(%#x) with read "
+			  "bytes(%#x)!!\n", _offset, _size_byte));
+		return;
+	}
+
+	for (i = 0; i < efuse_max_section; i++)
+		for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
+			efuse_word[i][j] = 0xFFFF;
+
+	read_efuse_byte(hw, efuse_addr, rtemp8);
+	if (*rtemp8 != 0xFF) {
+		efuse_utilized++;
+		RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+			("Addr=%d\n", efuse_addr));
+		efuse_addr++;
+	}
+
+	while ((*rtemp8 != 0xFF) && (efuse_addr < efuse_real_content_len)) {
+		/*  Check PG header for section num.  */
+		if((*rtemp8 & 0x1F ) == 0x0F) {/* extended header */
+			u1temp =( (*rtemp8 & 0xE0) >> 5);
+			read_efuse_byte(hw, efuse_addr, rtemp8);
+
+			if((*rtemp8 & 0x0F) == 0x0F) {
+				efuse_addr++;
+				read_efuse_byte(hw, efuse_addr, rtemp8);
+
+				if (*rtemp8 != 0xFF &&
+				    (efuse_addr < efuse_real_content_len)) {
+					efuse_addr++;
+				}
+				continue;
+			} else {
+				offset = ((*rtemp8 & 0xF0) >> 1) | u1temp;
+				wren = (*rtemp8 & 0x0F);
+				efuse_addr++;
+			}
+		} else {
+			offset = ((*rtemp8 >> 4) & 0x0f);
+			wren = (*rtemp8 & 0x0f);
+		}
+
+		if (offset < efuse_max_section) {
+			RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+				("offset-%d Worden=%x\n", offset, wren));
+
+			for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+				if (!(wren & 0x01)) {
+					RTPRINT(rtlpriv, FEEPROM,
+						EFUSE_READ_ALL, ("Addr=%d\n",
+								 efuse_addr));
+
+					read_efuse_byte(hw, efuse_addr, rtemp8);
+					efuse_addr++;
+					efuse_utilized++;
+					efuse_word[offset][i] = (*rtemp8 &
+								 0xff);
+
+					if (efuse_addr >=
+					    efuse_real_content_len)
+						break;
+
+					RTPRINT(rtlpriv, FEEPROM,
+						EFUSE_READ_ALL, ("Addr=%d\n",
+								 efuse_addr));
+
+					read_efuse_byte(hw, efuse_addr, rtemp8);
+					efuse_addr++;
+					efuse_utilized++;
+					efuse_word[offset][i] |=
+					    (((u16) * rtemp8 << 8) & 0xff00);
+
+					if (efuse_addr >= efuse_real_content_len)
+						break;
+				}
+
+				wren >>= 1;
+			}
+		}
+
+		RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+			("Addr=%d\n", efuse_addr));
+		read_efuse_byte(hw, efuse_addr, rtemp8);
+		if (*rtemp8 != 0xFF && (efuse_addr < efuse_real_content_len)) {
+			efuse_utilized++;
+			efuse_addr++;
+		}
+	}
+
+	for (i = 0; i < efuse_max_section; i++) {
+		for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
+			efuse_tbl[(i * 8) + (j * 2)] =
+			    (efuse_word[i][j] & 0xff);
+			efuse_tbl[(i * 8) + ((j * 2) + 1)] =
+			    ((efuse_word[i][j] >> 8) & 0xff);
+		}
+	}
+
+	for (i = 0; i < _size_byte; i++)
+		pbuf[i] = efuse_tbl[_offset + i];
+
+	rtlefuse->efuse_usedbytes = efuse_utilized;
+	efuse_usage = (u8) ((efuse_utilized * 100) / efuse_real_content_len);
+	rtlefuse->efuse_usedpercentage = efuse_usage;
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
+				      (u8 *) & efuse_utilized);
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
+				      (u8 *) & efuse_usage);
+}
+
+bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 section_idx, i, Base;
+	u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used;
+	bool bwordchanged, bresult = true;
+
+	for (section_idx = 0; section_idx < 16; section_idx++) {
+		Base = section_idx * 8;
+		bwordchanged = false;
+
+		for (i = 0; i < 8; i = i + 2) {
+			if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i] !=
+			     rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i]) ||
+			    (rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i + 1] !=
+			     rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i +
+								   1])) {
+				words_need++;
+				bwordchanged = true;
+			}
+		}
+
+		if (bwordchanged == true)
+			hdr_num++;
+	}
+
+	totalbytes = hdr_num + words_need * 2;
+	efuse_used = rtlefuse->efuse_usedbytes;
+
+	if ((totalbytes + efuse_used) >= (EFUSE_MAX_SIZE -
+		rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))
+		bresult = false;
+
+	RT_TRACE(COMP_EFUSE, DBG_LOUD,
+		 ("efuse_shadow_update_chk(): totalbytes(%#x), "
+		  "hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
+		  totalbytes, hdr_num, words_need, efuse_used));
+
+	return bresult;
+}
+
+void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
+		       u16 offset, u32 *value)
+{
+	if (type == 1)
+		efuse_shadow_read_1byte(hw, offset, (u8 *) value);
+	else if (type == 2)
+		efuse_shadow_read_2byte(hw, offset, (u16 *) value);
+	else if (type == 4)
+		efuse_shadow_read_4byte(hw, offset, (u32 *) value);
+
+}
+
+void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
+				u32 value)
+{
+	if (type == 1)
+		efuse_shadow_write_1byte(hw, offset, (u8) value);
+	else if (type == 2)
+		efuse_shadow_write_2byte(hw, offset, (u16) value);
+	else if (type == 4)
+		efuse_shadow_write_4byte(hw, offset, (u32) value);
+
+}
+
+bool efuse_shadow_update(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u16 i, offset, base;
+	u8 word_en = 0x0F;
+	u8 first_pg = false;
+
+	RT_TRACE(COMP_EFUSE, DBG_LOUD, ("\n"));
+
+	if (!efuse_shadow_update_chk(hw)) {
+		efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+		memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+		       &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+		       rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+		RT_TRACE(COMP_EFUSE, DBG_LOUD,
+			 ("efuse out of capacity!!\n"));
+		return false;
+	}
+	efuse_power_switch(hw, true, true);
+
+	for (offset = 0; offset < 16; offset++) {
+
+		word_en = 0x0F;
+		base = offset * 8;
+
+		for (i = 0; i < 8; i++) {
+			if (first_pg == true) {
+
+				word_en &= ~(BIT(i / 2));
+
+				rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
+				    rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
+			} else {
+
+				if (rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] !=
+				    rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i]) {
+					word_en &= ~(BIT(i / 2));
+
+					rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
+					    rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
+				}
+			}
+		}
+
+		if (word_en != 0x0F) {
+			u8 tmpdata[8];
+			memcpy(tmpdata, (&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base]), 8);
+			RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
+				      ("U-efuse\n"), tmpdata, 8);
+
+			if (!efuse_pg_packet_write(hw, (u8) offset, word_en,
+						   tmpdata)) {
+				RT_TRACE(COMP_ERR, DBG_WARNING,
+					 ("PG section(%#x) fail!!\n", offset));
+				break;
+			}
+		}
+
+	}
+
+	efuse_power_switch(hw, true, false);
+	efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+
+	memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+	       &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+	       rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+	RT_TRACE(COMP_EFUSE, DBG_LOUD, ("\n"));
+	return true;
+}
+
+void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+	if (rtlefuse->autoload_failflag == true) {
+		memset((&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]),
+			0xFF, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+	} else {
+		efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+	}
+
+	memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+			&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+			rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+}
+//EXPORT_SYMBOL(rtl_efuse_shadow_map_update);
+
+void efuse_force_write_vendor_Id(struct ieee80211_hw *hw)
+{
+	u8 tmpdata[8] = { 0xFF, 0xFF, 0xEC, 0x10, 0xFF, 0xFF, 0xFF, 0xFF };
+
+	efuse_power_switch(hw, true, true);
+
+	efuse_pg_packet_write(hw, 1, 0xD, tmpdata);
+
+	efuse_power_switch(hw, true, false);
+
+}
+
+void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx)
+{
+}
+
+static void efuse_shadow_read_1byte(struct ieee80211_hw *hw,
+				    u16 offset, u8 *value)
+{
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	*value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+}
+
+static void efuse_shadow_read_2byte(struct ieee80211_hw *hw,
+				    u16 offset, u16 *value)
+{
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+	*value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+	*value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
+
+}
+
+static void efuse_shadow_read_4byte(struct ieee80211_hw *hw,
+				    u16 offset, u32 *value)
+{
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+	*value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+	*value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
+	*value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] << 16;
+	*value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] << 24;
+}
+
+static void efuse_shadow_write_1byte(struct ieee80211_hw *hw,
+				     u16 offset, u8 value)
+{
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+	rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value;
+}
+
+static void efuse_shadow_write_2byte(struct ieee80211_hw *hw,
+				     u16 offset, u16 value)
+{
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+	rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value & 0x00FF;
+	rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] = value >> 8;
+
+}
+
+static void efuse_shadow_write_4byte(struct ieee80211_hw *hw,
+				     u16 offset, u32 value)
+{
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+	rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] =
+	    (u8) (value & 0x000000FF);
+	rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] =
+	    (u8) ((value >> 8) & 0x0000FF);
+	rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] =
+	    (u8) ((value >> 16) & 0x00FF);
+	rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] =
+	    (u8) ((value >> 24) & 0xFF);
+
+}
+
+int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmpidx = 0;
+	int bresult;
+
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+		       (u8) (addr & 0xff));
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+		       ((u8) ((addr >> 8) & 0x03)) |
+		       (rtl_read_byte(rtlpriv,
+				      rtlpriv->cfg->maps[EFUSE_CTRL] + 2) &
+			0xFC));
+
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
+
+	while (!(0x80 & rtl_read_byte(rtlpriv,
+				      rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
+	       && (tmpidx < 100)) {
+		tmpidx++;
+	}
+
+	if (tmpidx < 100) {
+		*data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+		bresult = true;
+	} else {
+		*data = 0xff;
+		bresult = false;
+	}
+	return bresult;
+}
+//EXPORT_SYMBOL(efuse_one_byte_read);
+
+static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmpidx = 0;
+	bool bresult;
+
+	RT_TRACE(COMP_EFUSE, DBG_LOUD,
+		 ("Addr = %x Data=%x\n", addr, data));
+
+	rtl_write_byte(rtlpriv,
+		       rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff));
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+		       (rtl_read_byte(rtlpriv,
+			 rtlpriv->cfg->maps[EFUSE_CTRL] +
+			 2) & 0xFC) | (u8) ((addr >> 8) & 0x03));
+
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], data);
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0xF2);
+
+	while ((0x80 & rtl_read_byte(rtlpriv,
+				     rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
+	       && (tmpidx < 100)) {
+		tmpidx++;
+	}
+
+	if (tmpidx < 100)
+		bresult = true;
+	else
+		bresult = false;
+
+	return bresult;
+}
+
+static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	efuse_power_switch(hw, false, true);
+	read_efuse(hw, 0, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE], efuse);
+	efuse_power_switch(hw, false, false);
+}
+
+static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
+				u8 efuse_data, u8 offset, u8 *tmpdata,
+				u8 *readstate)
+{
+	bool bdataempty = true;
+	u8 hoffset;
+	u8 tmpidx;
+	u8 hworden;
+	u8 word_cnts;
+
+	hoffset = (efuse_data >> 4) & 0x0F;
+	hworden = efuse_data & 0x0F;
+	word_cnts = efuse_calculate_word_cnts(hworden);
+
+	if (hoffset == offset) {
+		for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) {
+			if (efuse_one_byte_read(hw, *efuse_addr + 1 + tmpidx,
+						&efuse_data)) {
+				tmpdata[tmpidx] = efuse_data;
+				if (efuse_data != 0xff)
+					bdataempty = true;
+			}
+		}
+
+		if (bdataempty == true) {
+			*readstate = PG_STATE_DATA;
+		} else {
+			*efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
+			*readstate = PG_STATE_HEADER;
+		}
+
+	} else {
+		*efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
+		*readstate = PG_STATE_HEADER;
+	}
+}
+
+static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
+{
+	u8 readstate = PG_STATE_HEADER;
+
+	bool bcontinual = true;
+
+	u8 efuse_data, word_cnts = 0;
+	u16 efuse_addr = 0;
+	u8 hworden = 0;
+	u8 tmpdata[8];
+
+	if (data == NULL)
+		return false;
+	if (offset > 15)
+		return false;
+
+	memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
+	memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
+
+	while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) {
+		if (readstate & PG_STATE_HEADER) {
+			if (efuse_one_byte_read(hw, efuse_addr, &efuse_data)
+			    && (efuse_data != 0xFF))
+				efuse_read_data_case1(hw, &efuse_addr, efuse_data, offset,
+							tmpdata, &readstate);
+			else
+				bcontinual = false;
+		} else if (readstate & PG_STATE_DATA) {
+			efuse_word_enable_data_read(hworden, tmpdata, data);
+			efuse_addr = efuse_addr + (word_cnts * 2) + 1;
+			readstate = PG_STATE_HEADER;
+		}
+
+	}
+
+	if ((data[0] == 0xff) && (data[1] == 0xff) &&
+	    (data[2] == 0xff) && (data[3] == 0xff) &&
+	    (data[4] == 0xff) && (data[5] == 0xff) &&
+	    (data[6] == 0xff) && (data[7] == 0xff))
+		return false;
+	else
+		return true;
+
+}
+
+static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
+				u8 efuse_data, u8 offset, int *bcontinual,
+				u8 *write_state, struct pgpkt_struct *target_pkt,
+				int *repeat_times, int *bresult, u8 word_en)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct pgpkt_struct tmp_pkt;
+	int bdataempty = true;
+	u8 originaldata[8 * sizeof(u8)];
+	u8 badworden = 0x0F;
+	u8 match_word_en, tmp_word_en;
+	u8 tmpindex;
+	u8 tmp_header = efuse_data;
+	u8 tmp_word_cnts;
+
+	tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
+	tmp_pkt.word_en = tmp_header & 0x0F;
+	tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
+
+	if (tmp_pkt.offset != target_pkt->offset) {
+		*efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+		*write_state = PG_STATE_HEADER;
+	} else {
+		for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
+			if (efuse_one_byte_read(hw,
+						(*efuse_addr + 1 + tmpindex),
+						&efuse_data) && (efuse_data != 0xFF))
+				bdataempty = false;
+		}
+
+		if (bdataempty == false) {
+			*efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+			*write_state = PG_STATE_HEADER;
+		} else {
+			match_word_en = 0x0F;
+			if (!((target_pkt->word_en & BIT(0)) |
+			    (tmp_pkt.word_en & BIT(0))))
+				match_word_en &= (~BIT(0));
+
+			if (!((target_pkt->word_en & BIT(1)) |
+			    (tmp_pkt.word_en & BIT(1))))
+				match_word_en &= (~BIT(1));
+
+			if (!((target_pkt->word_en & BIT(2)) |
+			    (tmp_pkt.word_en & BIT(2))))
+				match_word_en &= (~BIT(2));
+
+			if (!((target_pkt->word_en & BIT(3)) |
+			    (tmp_pkt.word_en & BIT(3))))
+				match_word_en &= (~BIT(3));
+
+			if ((match_word_en & 0x0F) != 0x0F) {
+				badworden = efuse_word_enable_data_write(hw,
+							*efuse_addr + 1,
+							tmp_pkt.word_en,
+							target_pkt->data);
+
+				if (0x0F != (badworden & 0x0F))	{
+					u8 reorg_offset = offset;
+					u8 reorg_worden = badworden;
+					efuse_pg_packet_write(hw, reorg_offset,
+							      reorg_worden,
+							      originaldata);
+				}
+
+				tmp_word_en = 0x0F;
+				if ((target_pkt->word_en & BIT(0)) ^
+				    (match_word_en & BIT(0)))
+					tmp_word_en &= (~BIT(0));
+
+				if ((target_pkt->word_en & BIT(1)) ^
+				    (match_word_en & BIT(1)))
+					tmp_word_en &= (~BIT(1));
+
+				if ((target_pkt->word_en & BIT(2)) ^
+				    (match_word_en & BIT(2)))
+					tmp_word_en &= (~BIT(2));
+
+				if ((target_pkt->word_en & BIT(3)) ^
+				    (match_word_en & BIT(3)))
+					tmp_word_en &= (~BIT(3));
+
+				if ((tmp_word_en & 0x0F) != 0x0F) {
+					*efuse_addr = efuse_get_current_size(hw);
+					target_pkt->offset = offset;
+					target_pkt->word_en = tmp_word_en;
+				} else {
+					*bcontinual = false;
+				}
+				*write_state = PG_STATE_HEADER;
+				*repeat_times += 1;
+				if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+					*bcontinual = false;
+					*bresult = false;
+				}
+			} else {
+				*efuse_addr += (2 * tmp_word_cnts) + 1;
+				target_pkt->offset = offset;
+				target_pkt->word_en = word_en;
+				*write_state = PG_STATE_HEADER;
+			}
+		}
+	}
+	RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse PG_STATE_HEADER-1\n"));
+}
+
+static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
+				   int *bcontinual, u8 *write_state,
+				   struct pgpkt_struct target_pkt,
+				   int *repeat_times, int *bresult)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct pgpkt_struct tmp_pkt;
+	u8 pg_header;
+	u8 tmp_header;
+	u8 originaldata[8 * sizeof(u8)];
+	u8 tmp_word_cnts;
+	u8 badworden = 0x0F;
+
+	pg_header = ((target_pkt.offset << 4) & 0xf0) | target_pkt.word_en;
+	efuse_one_byte_write(hw, *efuse_addr, pg_header);
+	efuse_one_byte_read(hw, *efuse_addr, &tmp_header);
+
+	if (tmp_header == pg_header) {
+		*write_state = PG_STATE_DATA;
+	} else if (tmp_header == 0xFF) {
+		*write_state = PG_STATE_HEADER;
+		*repeat_times += 1;
+		if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+			*bcontinual = false;
+			*bresult = false;
+		}
+	} else {
+		tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
+		tmp_pkt.word_en = tmp_header & 0x0F;
+
+		tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
+
+		memset(originaldata, 0xff,  8 * sizeof(u8));
+
+		if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
+			badworden = efuse_word_enable_data_write(hw,
+								*efuse_addr + 1,
+								tmp_pkt.word_en,
+								originaldata);
+
+			if (0x0F != (badworden & 0x0F)) {
+				u8 reorg_offset = tmp_pkt.offset;
+				u8 reorg_worden = badworden;
+				efuse_pg_packet_write(hw, reorg_offset,
+						      reorg_worden,
+						      originaldata);
+				*efuse_addr = efuse_get_current_size(hw);
+			} else {
+				*efuse_addr = *efuse_addr +
+					      (tmp_word_cnts * 2) + 1;
+			}
+		} else {
+			*efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+		}
+
+		*write_state = PG_STATE_HEADER;
+		*repeat_times += 1;
+		if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+			*bcontinual = false;
+			*bresult = false;
+		}
+
+		RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+			("efuse PG_STATE_HEADER-2\n"));
+	}
+}
+
+static int efuse_pg_packet_write(struct ieee80211_hw *hw,
+				 u8 offset, u8 word_en, u8 *data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct pgpkt_struct target_pkt;
+	u8 write_state = PG_STATE_HEADER;
+	int bcontinual = true, bdataempty = true, bresult = true;
+	u16 efuse_addr = 0;
+	u8 efuse_data;
+	u8 target_word_cnts = 0;
+	u8 badworden = 0x0F;
+	static int repeat_times = 0;
+
+	if (efuse_get_current_size(hw) >= (EFUSE_MAX_SIZE -
+		rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
+		RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+			("efuse_pg_packet_write error \n"));
+		return false;
+	}
+
+	target_pkt.offset = offset;
+	target_pkt.word_en = word_en;
+
+	memset(target_pkt.data, 0xFF,  8 * sizeof(u8));
+
+	efuse_word_enable_data_read(word_en, data, target_pkt.data);
+	target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
+
+	RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse Power ON\n"));
+
+	while (bcontinual && (efuse_addr < (EFUSE_MAX_SIZE -
+		rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) {
+
+		if (write_state == PG_STATE_HEADER) {
+			bdataempty = true;
+			badworden = 0x0F;
+			RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+				("efuse PG_STATE_HEADER\n"));
+
+			if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
+			    (efuse_data != 0xFF))
+				efuse_write_data_case1(hw, &efuse_addr,
+						       efuse_data, offset,
+						       &bcontinual,
+						       &write_state,
+						       &target_pkt,
+						       &repeat_times, &bresult,
+						       word_en);
+			else
+				efuse_write_data_case2(hw, &efuse_addr,
+						       &bcontinual,
+						       &write_state,
+						       target_pkt,
+						       &repeat_times,
+						       &bresult);
+
+		} else if (write_state == PG_STATE_DATA) {
+			RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+				("efuse PG_STATE_DATA\n"));
+			badworden = 0x0f;
+			badworden =
+			    efuse_word_enable_data_write(hw, efuse_addr + 1,
+							 target_pkt.word_en,
+							 target_pkt.data);
+
+			if ((badworden & 0x0F) == 0x0F) {
+				bcontinual = false;
+			} else {
+				efuse_addr =
+				    efuse_addr + (2 * target_word_cnts) + 1;
+
+				target_pkt.offset = offset;
+				target_pkt.word_en = badworden;
+				target_word_cnts =
+				    efuse_calculate_word_cnts(target_pkt.
+							      word_en);
+				write_state = PG_STATE_HEADER;
+				repeat_times++;
+				if (repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+					bcontinual = false;
+					bresult = false;
+				}
+				RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+					("efuse PG_STATE_HEADER-3\n"));
+			}
+		}
+	}
+
+	if (efuse_addr >= (EFUSE_MAX_SIZE -
+		rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
+		RT_TRACE(COMP_EFUSE, DBG_LOUD,
+			 ("efuse_addr(%#x) Out of size!!\n", efuse_addr));
+	}
+
+	return true;
+}
+
+static void efuse_word_enable_data_read(u8 word_en, u8 * sourdata,
+					u8 *targetdata)
+{
+	if (!(word_en & BIT(0))) {
+		targetdata[0] = sourdata[0];
+		targetdata[1] = sourdata[1];
+	}
+
+	if (!(word_en & BIT(1))) {
+		targetdata[2] = sourdata[2];
+		targetdata[3] = sourdata[3];
+	}
+
+	if (!(word_en & BIT(2))) {
+		targetdata[4] = sourdata[4];
+		targetdata[5] = sourdata[5];
+	}
+
+	if (!(word_en & BIT(3))) {
+		targetdata[6] = sourdata[6];
+		targetdata[7] = sourdata[7];
+	}
+}
+
+static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
+				       u16 efuse_addr, u8 word_en, u8 *data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u16 tmpaddr;
+	u16 start_addr = efuse_addr;
+	u8 badworden = 0x0F;
+	u8 tmpdata[8];
+
+	memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
+	RT_TRACE(COMP_EFUSE, DBG_LOUD,
+		 ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr));
+
+	if (!(word_en & BIT(0))) {
+		tmpaddr = start_addr;
+		efuse_one_byte_write(hw, start_addr++, data[0]);
+		efuse_one_byte_write(hw, start_addr++, data[1]);
+
+		efuse_one_byte_read(hw, tmpaddr, &tmpdata[0]);
+		efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[1]);
+		if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
+			badworden &= (~BIT(0));
+	}
+
+	if (!(word_en & BIT(1))) {
+		tmpaddr = start_addr;
+		efuse_one_byte_write(hw, start_addr++, data[2]);
+		efuse_one_byte_write(hw, start_addr++, data[3]);
+
+		efuse_one_byte_read(hw, tmpaddr, &tmpdata[2]);
+		efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[3]);
+		if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
+			badworden &= (~BIT(1));
+	}
+
+	if (!(word_en & BIT(2))) {
+		tmpaddr = start_addr;
+		efuse_one_byte_write(hw, start_addr++, data[4]);
+		efuse_one_byte_write(hw, start_addr++, data[5]);
+
+		efuse_one_byte_read(hw, tmpaddr, &tmpdata[4]);
+		efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[5]);
+		if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
+			badworden &= (~BIT(2));
+	}
+
+	if (!(word_en & BIT(3))) {
+		tmpaddr = start_addr;
+		efuse_one_byte_write(hw, start_addr++, data[6]);
+		efuse_one_byte_write(hw, start_addr++, data[7]);
+
+		efuse_one_byte_read(hw, tmpaddr, &tmpdata[6]);
+		efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[7]);
+		if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
+			badworden &= (~BIT(3));
+	}
+
+	return badworden;
+}
+
+static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tempval;
+	u16 tmpV16;
+
+	if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+	{
+		if (pwrstate == true)
+		{
+			rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0x69);
+
+			// 1.2V Power: From VDDON with Power Cut(0x0000h[15]), defualt valid
+			tmpV16 = rtl_read_word(rtlpriv,
+					       rtlpriv->cfg->maps[SYS_ISO_CTRL]);
+
+			printk("SYS_ISO_CTRL=%04x.\n",tmpV16);
+			if( ! (tmpV16 & PWC_EV12V ) ){
+				tmpV16 |= PWC_EV12V ;
+				 //PlatformEFIOWrite2Byte(pAdapter,REG_SYS_ISO_CTRL,tmpV16);
+			}
+			// Reset: 0x0000h[28], default valid
+ 			tmpV16 = rtl_read_word(rtlpriv,  rtlpriv->cfg->maps[SYS_FUNC_EN]);
+			printk("SYS_FUNC_EN=%04x.\n",tmpV16);
+			if( !(tmpV16 & FEN_ELDR) ){
+				tmpV16 |= FEN_ELDR ;
+				rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16);
+			}
+
+			// Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid
+			tmpV16 = rtl_read_word(rtlpriv,  rtlpriv->cfg->maps[SYS_CLK] );
+			printk("SYS_CLK=%04x.\n",tmpV16);
+			if( (!(tmpV16 & LOADER_CLK_EN) )  ||(!(tmpV16 & ANA8M) ) )
+			{
+				tmpV16 |= (LOADER_CLK_EN |ANA8M ) ;
+				rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_CLK], tmpV16);
+			}
+
+			if(bwrite == true)
+			{
+				// Enable LDO 2.5V before read/write action
+				tempval = rtl_read_word(rtlpriv,  rtlpriv->cfg->maps[EFUSE_TEST] + 3);
+				printk("EFUSE_TEST=%04x.\n",tmpV16);
+				tempval &= ~(BIT(3) | BIT(4) |BIT(5) | BIT(6));
+				tempval |= (VOLTAGE_V25 << 3);
+				tempval |= BIT(7);
+				rtl_write_byte(rtlpriv,  rtlpriv->cfg->maps[EFUSE_TEST] + 3, tempval);
+			}
+		}
+		else
+		{
+			rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0x00);
+			if(bwrite == true){
+				// Disable LDO 2.5V after read/write action
+				tempval = rtl_read_word(rtlpriv,  rtlpriv->cfg->maps[EFUSE_TEST] + 3);
+				rtl_write_byte(rtlpriv,  rtlpriv->cfg->maps[EFUSE_TEST] + 3, (tempval & 0x7F));
+			}
+		}
+	}
+	else
+	{
+		if (pwrstate == true && (rtlhal->hw_type !=
+			HARDWARE_TYPE_RTL8192SE)) {
+
+			if(rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE)
+				rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS],
+						0x69);
+
+			tmpV16 = rtl_read_word(rtlpriv,
+					       rtlpriv->cfg->maps[SYS_ISO_CTRL]);
+			if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) {
+				tmpV16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V];
+				rtl_write_word(rtlpriv,
+					       rtlpriv->cfg->maps[SYS_ISO_CTRL],
+					       tmpV16);
+			}
+
+			tmpV16 = rtl_read_word(rtlpriv,
+					       rtlpriv->cfg->maps[SYS_FUNC_EN]);
+			if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) {
+				tmpV16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR];
+				rtl_write_word(rtlpriv,
+					       rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16);
+			}
+
+			tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]);
+			if ((!(tmpV16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) ||
+			    (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) {
+				tmpV16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] |
+					   rtlpriv->cfg->maps[EFUSE_ANA8M]);
+				rtl_write_word(rtlpriv,
+					       rtlpriv->cfg->maps[SYS_CLK], tmpV16);
+			}
+		}
+
+		if (pwrstate == true) {
+			if (bwrite == true) {
+				tempval = rtl_read_byte(rtlpriv,
+							rtlpriv->cfg->maps[EFUSE_TEST] +
+							3);
+
+				if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) {
+					tempval &= 0x0F;
+					tempval |= (VOLTAGE_V25 << 4);
+				}
+
+				rtl_write_byte(rtlpriv,
+					       rtlpriv->cfg->maps[EFUSE_TEST] + 3,
+					       (tempval | 0x80));
+			}
+
+			if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
+				rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
+							0x03);
+			}
+
+		} else {
+			if(rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE)
+				rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0);
+
+			if (bwrite == true) {
+				tempval = rtl_read_byte(rtlpriv,
+							rtlpriv->cfg->maps[EFUSE_TEST] +
+							3);
+				rtl_write_byte(rtlpriv,
+					       rtlpriv->cfg->maps[EFUSE_TEST] + 3,
+					       (tempval & 0x7F));
+			}
+
+			if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
+				rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
+							0x02);
+			}
+
+		}
+	}
+
+}
+
+static u16 efuse_get_current_size(struct ieee80211_hw *hw)
+{
+	int bcontinual = true;
+	u16 efuse_addr = 0;
+	u8 hoffset, hworden;
+	u8 efuse_data, word_cnts;
+
+	while (bcontinual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
+	       && (efuse_addr < EFUSE_MAX_SIZE)) {
+		if (efuse_data != 0xFF) {
+			hoffset = (efuse_data >> 4) & 0x0F;
+			hworden = efuse_data & 0x0F;
+			word_cnts = efuse_calculate_word_cnts(hworden);
+			efuse_addr = efuse_addr + (word_cnts * 2) + 1;
+		} else {
+			bcontinual = false;
+		}
+	}
+
+	return efuse_addr;
+}
+
+static u8 efuse_calculate_word_cnts(u8 word_en)
+{
+	u8 word_cnts = 0;
+	if (!(word_en & BIT(0)))
+		word_cnts++;
+	if (!(word_en & BIT(1)))
+		word_cnts++;
+	if (!(word_en & BIT(2)))
+		word_cnts++;
+	if (!(word_en & BIT(3)))
+		word_cnts++;
+	return word_cnts;
+}
+
diff --git a/drivers/staging/rtl8821ae/efuse.h b/drivers/staging/rtl8821ae/efuse.h
new file mode 100644
index 0000000..a9fcbe0
--- /dev/null
+++ b/drivers/staging/rtl8821ae/efuse.h
@@ -0,0 +1,130 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_EFUSE_H_
+#define __RTL_EFUSE_H_
+
+#define EFUSE_IC_ID_OFFSET		506
+
+/*
+#define EFUSE_REAL_CONTENT_LEN	512
+#define EFUSE_MAP_LEN			128
+#define EFUSE_MAX_SECTION		16
+#define EFUSE_MAX_WORD_UNIT		4
+#define EFUSE_IC_ID_OFFSET		506
+*/
+
+#define EFUSE_MAX_WORD_UNIT		4
+
+#define EFUSE_INIT_MAP			0
+#define EFUSE_MODIFY_MAP		1
+
+#define PG_STATE_HEADER 		0x01
+#define PG_STATE_WORD_0			0x02
+#define PG_STATE_WORD_1			0x04
+#define PG_STATE_WORD_2			0x08
+#define PG_STATE_WORD_3			0x10
+#define PG_STATE_DATA			0x20
+
+#define PG_SWBYTE_H			0x01
+#define PG_SWBYTE_L			0x02
+
+#define _POWERON_DELAY_
+#define _PRE_EXECUTE_READ_CMD_
+
+#define EFUSE_REPEAT_THRESHOLD_		3
+#define EFUSE_ERROE_HANDLE		1
+
+struct efuse_map {
+	u8 offset;
+	u8 word_start;
+	u8 byte_start;
+	u8 byte_cnts;
+};
+
+struct pgpkt_struct {
+	u8 offset;
+	u8 word_en;
+	u8 data[8];
+};
+
+enum efuse_data_item {
+	EFUSE_CHIP_ID = 0,
+	EFUSE_LDO_SETTING,
+	EFUSE_CLK_SETTING,
+	EFUSE_SDIO_SETTING,
+	EFUSE_CCCR,
+	EFUSE_SDIO_MODE,
+	EFUSE_OCR,
+	EFUSE_F0CIS,
+	EFUSE_F1CIS,
+	EFUSE_MAC_ADDR,
+	EFUSE_EEPROM_VER,
+	EFUSE_CHAN_PLAN,
+	EFUSE_TXPW_TAB
+};
+
+enum {
+	VOLTAGE_V25 = 0x03,
+	LDOE25_SHIFT = 28,
+};
+
+struct efuse_priv {
+	u8 id[2];
+	u8 ldo_setting[2];
+	u8 clk_setting[2];
+	u8 cccr;
+	u8 sdio_mode;
+	u8 ocr[3];
+	u8 cis0[17];
+	u8 cis1[48];
+	u8 mac_addr[6];
+	u8 eeprom_verno;
+	u8 channel_plan;
+	u8 tx_power_b[14];
+	u8 tx_power_g[14];
+};
+
+extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+extern void efuse_initialize(struct ieee80211_hw *hw);
+extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
+extern int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data);
+extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
+extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
+		       u16 _size_byte, u8 * pbuf);
+extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
+			      u16 offset, u32 * value);
+extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
+			       u16 offset, u32 value);
+extern bool efuse_shadow_update(struct ieee80211_hw *hw);
+extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
+extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
+extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+#endif
diff --git a/drivers/staging/rtl8821ae/pci.c b/drivers/staging/rtl8821ae/pci.c
new file mode 100644
index 0000000..cfa651e
--- /dev/null
+++ b/drivers/staging/rtl8821ae/pci.c
@@ -0,0 +1,2549 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "core.h"
+#include "wifi.h"
+#include "pci.h"
+#include "base.h"
+#include "ps.h"
+#include "efuse.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
+	INTEL_VENDOR_ID,
+	ATI_VENDOR_ID,
+	AMD_VENDOR_ID,
+	SIS_VENDOR_ID
+};
+
+static const u8 ac_to_hwq[] = {
+	VO_QUEUE,
+	VI_QUEUE,
+	BE_QUEUE,
+	BK_QUEUE
+};
+
+u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
+		struct sk_buff *skb)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u16 fc = rtl_get_fc(skb);
+	u8 queue_index = skb_get_queue_mapping(skb);
+
+	if (unlikely(ieee80211_is_beacon(fc)))
+		return BEACON_QUEUE;
+	if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
+		return MGNT_QUEUE;
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+		if (ieee80211_is_nullfunc(fc))
+			return HIGH_QUEUE;
+
+	return ac_to_hwq[queue_index];
+}
+
+/* Update PCI dependent default settings*/
+static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+	u8 init_aspm;
+
+	ppsc->reg_rfps_level = 0;
+	ppsc->b_support_aspm = 0;
+
+	/*Update PCI ASPM setting */
+	ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
+	switch (rtlpci->const_pci_aspm) {
+	case 0:
+		/*No ASPM */
+		break;
+
+	case 1:
+		/*ASPM dynamically enabled/disable. */
+		ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
+		break;
+
+	case 2:
+		/*ASPM with Clock Req dynamically enabled/disable. */
+		ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
+					 RT_RF_OFF_LEVL_CLK_REQ);
+		break;
+
+	case 3:
+		/*
+		 * Always enable ASPM and Clock Req
+		 * from initialization to halt.
+		 * */
+		ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
+		ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
+					 RT_RF_OFF_LEVL_CLK_REQ);
+		break;
+
+	case 4:
+		/*
+		 * Always enable ASPM without Clock Req
+		 * from initialization to halt.
+		 * */
+		ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
+					  RT_RF_OFF_LEVL_CLK_REQ);
+		ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
+		break;
+	}
+
+	ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
+
+	/*Update Radio OFF setting */
+	switch (rtlpci->const_hwsw_rfoff_d3) {
+	case 1:
+		if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
+			ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
+		break;
+
+	case 2:
+		if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
+			ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
+		ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
+		break;
+
+	case 3:
+		ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
+		break;
+	}
+
+	/*Set HW definition to determine if it supports ASPM. */
+	switch (rtlpci->const_support_pciaspm) {
+	case 0:{
+			/*Not support ASPM. */
+			bool b_support_aspm = false;
+			ppsc->b_support_aspm = b_support_aspm;
+			break;
+		}
+	case 1:{
+			/*Support ASPM. */
+			bool b_support_aspm = true;
+			bool b_support_backdoor = true;
+			ppsc->b_support_aspm = b_support_aspm;
+
+			/*if(priv->oem_id == RT_CID_TOSHIBA &&
+			   !priv->ndis_adapter.amd_l1_patch)
+			   b_support_backdoor = false; */
+
+			ppsc->b_support_backdoor = b_support_backdoor;
+
+			break;
+		}
+	case 2:
+		/*ASPM value set by chipset. */
+		if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
+			bool b_support_aspm = true;
+			ppsc->b_support_aspm = b_support_aspm;
+		}
+		break;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("switch case not process \n"));
+		break;
+	}
+
+	/* toshiba aspm issue, toshiba will set aspm selfly
+	 * so we should not set aspm in driver */
+	pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
+	if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
+		init_aspm == 0x43)
+		ppsc->b_support_aspm = false;
+}
+
+static bool _rtl_pci_platform_switch_device_pci_aspm(struct ieee80211_hw *hw,
+						     u8 value)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	bool bresult = false;
+
+	if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
+		value |= 0x40;
+
+	pci_write_config_byte(rtlpci->pdev, 0x80, value);
+
+	return bresult;
+}
+
+/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
+static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	bool bresult = false;
+
+	pci_write_config_byte(rtlpci->pdev, 0x81, value);
+	bresult = true;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+		udelay(100);
+
+	return bresult;
+}
+
+/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
+static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+	u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+	u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+	/*Retrieve original configuration settings. */
+	u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
+	u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
+				pcibridge_linkctrlreg;
+	u16 aspmlevel = 0;
+
+	if (!ppsc->b_support_aspm)
+		return;
+
+	if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
+		RT_TRACE(COMP_POWER, DBG_TRACE,
+			 ("PCI(Bridge) UNKNOWN.\n"));
+
+		return;
+	}
+
+	if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
+		RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
+		_rtl_pci_switch_clk_req(hw, 0x0);
+	}
+
+	if (1) {
+		/*for promising device will in L0 state after an I/O. */
+		u8 tmp_u1b;
+		pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
+	}
+
+	/*Set corresponding value. */
+	aspmlevel |= BIT(0) | BIT(1);
+	linkctrl_reg &= ~aspmlevel;
+	pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
+
+	_rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
+	udelay(50);
+
+	/*4 Disable Pci Bridge ASPM */
+	rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+				     pcicfg_addrport + (num4bytes << 2));
+	rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
+
+	udelay(50);
+
+}
+
+/*
+ *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
+ *power saving We should follow the sequence to enable
+ *RTL8192SE first then enable Pci Bridge ASPM
+ *or the system will show bluescreen.
+ */
+static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+	u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+	u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+	u16 aspmlevel;
+	u8 u_pcibridge_aspmsetting;
+	u8 u_device_aspmsetting;
+
+	if (!ppsc->b_support_aspm)
+		return;
+
+	if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
+		RT_TRACE(COMP_POWER, DBG_TRACE,
+			 ("PCI(Bridge) UNKNOWN.\n"));
+		return;
+	}
+
+	/*4 Enable Pci Bridge ASPM */
+	rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+				     pcicfg_addrport + (num4bytes << 2));
+
+	u_pcibridge_aspmsetting =
+	    pcipriv->ndis_adapter.pcibridge_linkctrlreg |
+	    rtlpci->const_hostpci_aspm_setting;
+
+	if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
+		u_pcibridge_aspmsetting &= ~BIT(0);
+
+	rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting);
+
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("PlatformEnableASPM(): Write reg[%x] = %x\n",
+		  (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
+		  u_pcibridge_aspmsetting));
+
+	udelay(50);
+
+	/*Get ASPM level (with/without Clock Req) */
+	aspmlevel = rtlpci->const_devicepci_aspm_setting;
+	u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
+
+	/*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
+	/*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
+
+	u_device_aspmsetting |= aspmlevel;
+
+	_rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
+
+	if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
+		_rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
+					     RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
+		RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
+	}
+	udelay(100);
+}
+
+static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+
+	bool status = false;
+	u8 offset_e0;
+	unsigned offset_e4;
+
+	rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+			pcicfg_addrport + 0xE0);
+	rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0);
+
+	rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+			pcicfg_addrport + 0xE0);
+	rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0);
+
+	if (offset_e0 == 0xA0) {
+		rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+					     pcicfg_addrport + 0xE4);
+		rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4);
+		if (offset_e4 & BIT(23))
+			status = true;
+	}
+
+	return status;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
+static u8 _rtl_pci_get_pciehdr_offset(struct ieee80211_hw *hw)
+{
+	u8 capability_offset;
+	u8 num4bytes = 0x34/4;
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	u32 pcicfg_addr_port = (pcipriv->ndis_adapter.pcibridge_busnum << 16)|
+			       (pcipriv->ndis_adapter.pcibridge_devnum << 11)|
+			       (pcipriv->ndis_adapter.pcibridge_funcnum << 8)|
+			       (1 << 31);
+
+	rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS , pcicfg_addr_port
+							+ (num4bytes << 2));
+	rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &capability_offset);
+	while (capability_offset != 0) {
+		struct rtl_pci_capabilities_header capability_hdr;
+
+		num4bytes = capability_offset / 4;
+		/* Read the header of the capability at  this offset.
+		 * If the retrieved capability is not the power management
+		 * capability that we are looking for, follow the link to
+		 * the next capability and continue looping.
+		 */
+		rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS , 
+					     pcicfg_addr_port + 
+					     (num4bytes << 2));
+		rtl_pci_raw_read_port_ushort(PCI_CONF_DATA,
+					     (u16*)&capability_hdr);
+		/* Found the PCI express capability. */
+		if (capability_hdr.capability_id == 
+		    PCI_CAPABILITY_ID_PCI_EXPRESS)
+			break;
+		else
+        		capability_offset = capability_hdr.next;
+	}
+	return capability_offset;
+}
+#endif
+/*<delete in kernel end>*/
+
+bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
+        		      struct rtl_priv **buddy_priv)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	bool b_find_buddy_priv = false;
+	struct rtl_priv *temp_priv = NULL;
+	struct rtl_pci_priv *temp_pcipriv = NULL;
+
+	if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
+		list_for_each_entry(temp_priv, &rtlpriv->glb_var->glb_priv_list,
+			list) {
+			if (temp_priv) {
+				temp_pcipriv = 
+					(struct rtl_pci_priv *)temp_priv->priv;
+				RT_TRACE(COMP_INIT, DBG_LOUD,
+					(("pcipriv->ndis_adapter.funcnumber %x \n"),
+					pcipriv->ndis_adapter.funcnumber));
+				RT_TRACE(COMP_INIT, DBG_LOUD,
+					(("temp_pcipriv->ndis_adapter.funcnumber %x \n"),
+					temp_pcipriv->ndis_adapter.funcnumber));
+
+				if ((pcipriv->ndis_adapter.busnumber ==
+					temp_pcipriv->ndis_adapter.busnumber) &&
+				    (pcipriv->ndis_adapter.devnumber ==
+				    temp_pcipriv->ndis_adapter.devnumber) &&
+				    (pcipriv->ndis_adapter.funcnumber !=
+				    temp_pcipriv->ndis_adapter.funcnumber)) {
+					b_find_buddy_priv = true;
+					break;
+				}
+			}
+		}
+	}
+
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		(("b_find_buddy_priv %d \n"), b_find_buddy_priv));
+
+	if (b_find_buddy_priv)
+		*buddy_priv = temp_priv;
+
+	return b_find_buddy_priv;
+}
+
+void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
+	u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+	u8 linkctrl_reg;
+	u8 num4bbytes;
+
+	num4bbytes = (capabilityoffset + 0x10) / 4;
+
+	/*Read  Link Control Register */
+	rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+				     pcicfg_addrport + (num4bbytes << 2));
+	rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
+
+	pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
+}
+
+static void rtl_pci_parse_configuration(struct pci_dev *pdev,
+					struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+
+	u8 tmp;
+	int pos;
+	u8 linkctrl_reg;
+
+	/*Link Control Register */
+	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
+	pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
+
+	RT_TRACE(COMP_INIT, DBG_TRACE,
+		 ("Link Control Register =%x\n",
+		  pcipriv->ndis_adapter.linkctrl_reg));
+
+	pci_read_config_byte(pdev, 0x98, &tmp);
+	tmp |= BIT(4);
+	pci_write_config_byte(pdev, 0x98, tmp);
+
+	tmp = 0x17;
+	pci_write_config_byte(pdev, 0x70f, tmp);
+}
+
+static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
+{
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	_rtl_pci_update_default_setting(hw);
+
+	if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
+		/*Always enable ASPM & Clock Req. */
+		rtl_pci_enable_aspm(hw);
+		RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
+	}
+
+}
+
+static void _rtl_pci_io_handler_init(struct device *dev,
+				     struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->io.dev = dev;
+
+	rtlpriv->io.write8_async = pci_write8_async;
+	rtlpriv->io.write16_async = pci_write16_async;
+	rtlpriv->io.write32_async = pci_write32_async;
+
+	rtlpriv->io.read8_sync = pci_read8_sync;
+	rtlpriv->io.read16_sync = pci_read16_sync;
+	rtlpriv->io.read32_sync = pci_read32_sync;
+
+}
+
+static bool _rtl_pci_update_earlymode_info(struct ieee80211_hw *hw,
+					   struct sk_buff *skb, 
+					   struct rtl_tcb_desc *tcb_desc, 
+					   u8 tid)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 additionlen = FCS_LEN;
+	struct sk_buff *next_skb;
+		
+	/* here open is 4, wep/tkip is 8, aes is 12*/
+	if (info->control.hw_key)
+		additionlen += info->control.hw_key->icv_len;
+
+	/* The most skb num is 6 */
+	tcb_desc->empkt_num = 0;
+	spin_lock_bh(&rtlpriv->locks.waitq_lock);
+	skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
+		struct ieee80211_tx_info *next_info = 
+					IEEE80211_SKB_CB(next_skb);
+		if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
+			tcb_desc->empkt_len[tcb_desc->empkt_num] =
+				next_skb->len + additionlen;
+			tcb_desc->empkt_num++;
+		} else {
+			break;
+		}
+
+		if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid], 
+				      next_skb))
+			break;
+
+		if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num)
+			break;
+	}
+	spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+	return true;
+}
+
+/* just for early mode now */
+static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct sk_buff *skb = NULL;
+	struct ieee80211_tx_info *info = NULL;
+	int tid; /* should be int */
+	
+	if (!rtlpriv->rtlhal.b_earlymode_enable)
+		return;	
+	if (rtlpriv->dm.supp_phymode_switch &&
+		(rtlpriv->easy_concurrent_ctl.bswitch_in_process ||
+		(rtlpriv->buddy_priv && 
+		 rtlpriv->buddy_priv->easy_concurrent_ctl.bswitch_in_process)))
+		return;
+	/* we juse use em for BE/BK/VI/VO */
+	for (tid = 7; tid >= 0; tid--) {
+		u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
+		struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+		while (!mac->act_scanning && 
+		       rtlpriv->psc.rfpwr_state == ERFON) {
+			struct rtl_tcb_desc tcb_desc;
+			memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));			
+
+			spin_lock_bh(&rtlpriv->locks.waitq_lock);
+			if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
+			    (ring->entries - skb_queue_len(&ring->queue) > 
+			     rtlhal->max_earlymode_num)) {
+				skb = skb_dequeue(&mac->skb_waitq[tid]);
+			} else {
+				spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+				break;
+			}
+			spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+
+			/* Some macaddr can't do early mode. like
+			 * multicast/broadcast/no_qos data */
+			info = IEEE80211_SKB_CB(skb);
+			if (info->flags & IEEE80211_TX_CTL_AMPDU)
+				_rtl_pci_update_earlymode_info(hw, skb, 
+							       &tcb_desc, tid);
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+			rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+			rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
+#endif
+/*<delete in kernel end>*/
+		}
+	}
+}
+
+static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
+
+	while (skb_queue_len(&ring->queue)) {
+		struct sk_buff *skb;
+		struct ieee80211_tx_info *info;
+		u16 fc;
+		u8 tid;
+		u8 *entry;
+
+		
+		if (rtlpriv->use_new_trx_flow)
+			entry = (u8 *)(&ring->buffer_desc[ring->idx]);
+		else
+			entry = (u8 *)(&ring->desc[ring->idx]);
+
+		if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
+			return;
+			
+		ring->idx = (ring->idx + 1) % ring->entries;
+		
+		skb = __skb_dequeue(&ring->queue);
+		
+		pci_unmap_single(rtlpci->pdev,
+				 le32_to_cpu(rtlpriv->cfg->ops->
+					     get_desc((u8 *) entry, true,
+						      HW_DESC_TXBUFF_ADDR)),
+				 skb->len, PCI_DMA_TODEVICE);
+
+		/* remove early mode header */
+		if(rtlpriv->rtlhal.b_earlymode_enable)
+			skb_pull(skb, EM_HDR_LEN);
+
+		RT_TRACE((COMP_INTR | COMP_SEND), DBG_TRACE,
+			 ("new ring->idx:%d, "
+			  "free: skb_queue_len:%d, free: seq:%d\n",
+			  ring->idx,
+			  skb_queue_len(&ring->queue),
+			  *(u16 *) (skb->data + 22)));
+
+		if(prio == TXCMD_QUEUE) {
+			dev_kfree_skb(skb);
+			goto tx_status_ok;
+
+		}
+
+		/* for sw LPS, just after NULL skb send out, we can
+		 * sure AP kown we are sleeped, our we should not let
+		 * rf to sleep*/
+		fc = rtl_get_fc(skb);
+		if (ieee80211_is_nullfunc(fc)) {
+			if(ieee80211_has_pm(fc)) {
+				rtlpriv->mac80211.offchan_deley = true;
+				rtlpriv->psc.state_inap = 1;
+			} else {
+				rtlpriv->psc.state_inap = 0;
+			}
+		}
+		if (ieee80211_is_action(fc)) {
+			struct ieee80211_mgmt_compat *action_frame =
+				(struct ieee80211_mgmt_compat *)skb->data;
+			if (action_frame->u.action.u.ht_smps.action ==
+				WLAN_HT_ACTION_SMPS) {
+				dev_kfree_skb(skb);
+				goto tx_status_ok;
+			}
+		}
+
+		/* update tid tx pkt num */
+		tid = rtl_get_tid(skb);
+		if (tid <= 7)
+			rtlpriv->link_info.tidtx_inperiod[tid]++;
+
+		info = IEEE80211_SKB_CB(skb);
+		ieee80211_tx_info_clear_status(info);
+
+		info->flags |= IEEE80211_TX_STAT_ACK;
+		/*info->status.rates[0].count = 1; */
+
+		ieee80211_tx_status_irqsafe(hw, skb);
+
+		if ((ring->entries - skb_queue_len(&ring->queue))
+				== 2) {
+
+			RT_TRACE(COMP_ERR, DBG_LOUD,
+					("more desc left, wake"
+					 "skb_queue@%d,ring->idx = %d,"
+					 "skb_queue_len = 0x%d\n",
+					 prio, ring->idx,
+					 skb_queue_len(&ring->queue)));
+
+			ieee80211_wake_queue(hw,
+					skb_get_queue_mapping
+					(skb));
+		}
+tx_status_ok:
+		skb = NULL;
+	}
+
+	if (((rtlpriv->link_info.num_rx_inperiod +
+		rtlpriv->link_info.num_tx_inperiod) > 8) ||
+		(rtlpriv->link_info.num_rx_inperiod > 2)) {
+		rtl_lps_leave(hw);
+	}
+}
+
+static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
+	u8 *entry, int rxring_idx, int desc_idx)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	u32 bufferaddress;
+	u8 tmp_one = 1;
+	struct sk_buff *skb;
+
+	skb = dev_alloc_skb(rtlpci->rxbuffersize);	
+	if (!skb)
+		return 0;
+	rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
+
+	/* just set skb->cb to mapping addr
+	 * for pci_unmap_single use */
+	*((dma_addr_t *) skb->cb) = pci_map_single(rtlpci->pdev,
+				skb_tail_pointer(skb), rtlpci->rxbuffersize,
+				PCI_DMA_FROMDEVICE);
+	bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb));
+	if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
+		return 0;
+	if (rtlpriv->use_new_trx_flow) {
+		rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false, 
+					    HW_DESC_RX_PREPARE,
+					    (u8 *) & bufferaddress);
+	} else {	
+		rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false, 
+					    HW_DESC_RXBUFF_ADDR,
+					    (u8 *) & bufferaddress);
+		rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false, 
+					    HW_DESC_RXPKT_LEN,
+					    (u8 *) & rtlpci->rxbuffersize);
+		rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false, 
+					    HW_DESC_RXOWN,
+					    (u8 *) & tmp_one);
+	}
+	
+	return 1;
+}
+
+/* inorder to receive 8K AMSDU we have set skb to
+ * 9100bytes in init rx ring, but if this packet is
+ * not a AMSDU, this so big packet will be sent to
+ * TCP/IP directly, this cause big packet ping fail
+ * like: "ping -s 65507", so here we will realloc skb
+ * based on the true size of packet, I think mac80211
+ * do it will be better, but now mac80211 haven't */
+
+/* but some platform will fail when alloc skb sometimes.
+ * in this condition, we will send the old skb to
+ * mac80211 directly, this will not cause any other
+ * issues, but only be losted by TCP/IP */
+static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw,
+	struct sk_buff *skb, struct ieee80211_rx_status rx_status)
+{
+	if (unlikely(!rtl_action_proc(hw, skb, false))) {
+		dev_kfree_skb_any(skb);
+	} else {
+		struct sk_buff *uskb = NULL;
+		u8 *pdata;
+		
+		uskb = dev_alloc_skb(skb->len + 128);
+		if (likely(uskb)) {
+			memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
+					sizeof(rx_status));
+			pdata = (u8 *)skb_put(uskb, skb->len);
+			memcpy(pdata, skb->data, skb->len);
+			dev_kfree_skb_any(skb);
+
+			ieee80211_rx_irqsafe(hw, uskb);
+		} else {
+			ieee80211_rx_irqsafe(hw, skb);
+		}	
+	}
+}
+
+/*hsisr interrupt handler*/
+static void _rtl_pci_hs_interrupt(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR], 
+		       rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) | 
+		       rtlpci->sys_irq_mask);
+
+	
+}
+static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	int rxring_idx = RTL_PCI_RX_MPDU_QUEUE;
+
+	struct ieee80211_rx_status rx_status = { 0 };
+	unsigned int count = rtlpci->rxringcount;
+	bool unicast = false;
+	u8 hw_queue = 0;
+	unsigned int rx_remained_cnt;
+	u8 own;
+	u8 tmp_one;
+
+	struct rtl_stats status = {
+		.signal = 0,
+		.noise = -98,
+		.rate = 0,
+	};
+	
+	/*RX NORMAL PKT */
+	while (count--) {
+		struct ieee80211_hdr *hdr;
+		u16 fc;
+		u16 len;
+		/*rx buffer descriptor */
+		struct rtl_rx_buffer_desc *buffer_desc = NULL;
+		/*if use new trx flow, it means wifi info */
+		struct rtl_rx_desc *pdesc = NULL;
+		/*rx pkt */
+		struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
+					rtlpci->rx_ring[rxring_idx].idx];
+		
+		if (rtlpriv->use_new_trx_flow) {
+			rx_remained_cnt = 
+				rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
+								      hw_queue);
+			if (rx_remained_cnt < 1) 
+				return;
+			
+		} else {	/* rx descriptor */
+			pdesc = &rtlpci->rx_ring[rxring_idx].desc[
+				rtlpci->rx_ring[rxring_idx].idx];
+			
+			own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
+							       false, 
+							       HW_DESC_OWN);
+			if (own) /* wait data to be filled by hardware */
+				return;
+		}
+		
+		/* Get here means: data is filled already*/
+		/* AAAAAAttention !!!
+		 * We can NOT access 'skb' before 'pci_unmap_single' */
+		pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb),
+			 	 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+		
+		if (rtlpriv->use_new_trx_flow) {
+			buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
+				rtlpci->rx_ring[rxring_idx].idx];
+			/*means rx wifi info*/
+			pdesc = (struct rtl_rx_desc *)skb->data;
+		}
+		
+		rtlpriv->cfg->ops->query_rx_desc(hw, &status,
+						 &rx_status, (u8 *) pdesc, skb);
+			
+		if (rtlpriv->use_new_trx_flow)
+			rtlpriv->cfg->ops->rx_check_dma_ok(hw, 
+							   (u8 *)buffer_desc, 
+							   hw_queue);
+
+			
+		len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false, 
+						  HW_DESC_RXPKT_LEN);
+			
+		if (skb->end - skb->tail > len) {
+			skb_put(skb, len);
+			if (rtlpriv->use_new_trx_flow)	
+				skb_reserve(skb, status.rx_drvinfo_size + 
+						 status.rx_bufshift + 24);
+			else
+				skb_reserve(skb, status.rx_drvinfo_size + 
+						 status.rx_bufshift);
+
+		} else {
+			printk("skb->end - skb->tail = %d, len is %d\n", 
+			       skb->end - skb->tail, len);
+			break;
+		}
+			
+		rtlpriv->cfg->ops->rx_command_packet_handler(hw, status, skb);
+
+		/*
+		 *NOTICE This can not be use for mac80211,
+		 *this is done in mac80211 code,
+		 *if you done here sec DHCP will fail
+		 *skb_trim(skb, skb->len - 4);
+		 */
+
+		hdr = rtl_get_hdr(skb);
+		fc = rtl_get_fc(skb);
+		
+		if (!status.b_crc && !status.b_hwerror) {
+			memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, 
+			       sizeof(rx_status));
+
+			if (is_broadcast_ether_addr(hdr->addr1)) {
+				;/*TODO*/
+			} else if (is_multicast_ether_addr(hdr->addr1)) {
+			 	;/*TODO*/
+			} else {
+				unicast = true;
+				rtlpriv->stats.rxbytesunicast += skb->len;
+			}
+
+			rtl_is_special_data(hw, skb, false);
+
+			if (ieee80211_is_data(fc)) {
+				rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+				if (unicast)
+					rtlpriv->link_info.num_rx_inperiod++;
+			}
+
+			/* static bcn for roaming */
+			rtl_beacon_statistic(hw, skb);
+			rtl_p2p_info(hw, (void*)skb->data, skb->len);	
+			/* for sw lps */
+			rtl_swlps_beacon(hw, (void*)skb->data, skb->len);
+			rtl_recognize_peer(hw, (void*)skb->data, skb->len);
+			if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
+			    (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)&&
+			    (ieee80211_is_beacon(fc) || 
+			     ieee80211_is_probe_resp(fc))) {
+				dev_kfree_skb_any(skb);
+			} else {
+				_rtl_pci_rx_to_mac80211(hw, skb, rx_status);
+			}
+		} else {
+			dev_kfree_skb_any(skb);
+		}
+		if (rtlpriv->use_new_trx_flow) {
+			rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
+			rtlpci->rx_ring[hw_queue].next_rx_rp %= 
+							RTL_PCI_MAX_RX_COUNT;
+
+
+			rx_remained_cnt--;
+			if (1/*rx_remained_cnt == 0*/) {
+				rtl_write_word(rtlpriv, 0x3B4, 
+					rtlpci->rx_ring[hw_queue].next_rx_rp);
+			}
+		}
+		if (((rtlpriv->link_info.num_rx_inperiod +
+		      rtlpriv->link_info.num_tx_inperiod) > 8) ||
+		    (rtlpriv->link_info.num_rx_inperiod > 2)) {
+			rtl_lps_leave(hw);
+		}
+
+		if (rtlpriv->use_new_trx_flow) {
+			_rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc, 
+						 rxring_idx,
+					       rtlpci->rx_ring[rxring_idx].idx);			
+		} else {
+			_rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
+					       rtlpci->rx_ring[rxring_idx].idx);			
+
+			if (rtlpci->rx_ring[rxring_idx].idx == 
+			    rtlpci->rxringcount - 1)
+				rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, 
+							    false, 
+							    HW_DESC_RXERO,
+							    (u8 *) & tmp_one);
+		}
+		rtlpci->rx_ring[rxring_idx].idx = 
+				(rtlpci->rx_ring[rxring_idx].idx + 1) % 
+				rtlpci->rxringcount;
+	}
+}
+
+static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
+{
+	struct ieee80211_hw *hw = dev_id;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	unsigned long flags;
+	u32 inta = 0;
+	u32 intb = 0;
+
+	
+
+	if (rtlpci->irq_enabled == 0)
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,flags);
+
+
+	rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR], 0x0);
+	
+
+	rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE], 0x0);
+
+
+	/*read ISR: 4/8bytes */
+	rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
+
+
+	/*Shared IRQ or HW disappared */
+	if (!inta || inta == 0xffff)
+		goto done;
+	/*<1> beacon related */
+	if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
+		RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon ok interrupt!\n"));
+	}
+
+	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
+		RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon err interrupt!\n"));
+	}
+
+	if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
+		RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon interrupt!\n"));
+	}
+
+	if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
+		RT_TRACE(COMP_INTR, DBG_TRACE,
+			 ("prepare beacon for interrupt!\n"));
+		tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
+	}
+
+
+	/*<2> tx related */
+	if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
+		RT_TRACE(COMP_ERR, DBG_TRACE, ("IMR_TXFOVW!\n"));
+
+	if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
+		RT_TRACE(COMP_INTR, DBG_TRACE, ("Manage ok interrupt!\n"));
+		_rtl_pci_tx_isr(hw, MGNT_QUEUE);
+	}
+
+	if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
+		RT_TRACE(COMP_INTR, DBG_TRACE, ("HIGH_QUEUE ok interrupt!\n"));
+		_rtl_pci_tx_isr(hw, HIGH_QUEUE);
+	}
+
+	if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
+		rtlpriv->link_info.num_tx_inperiod++;
+
+		RT_TRACE(COMP_INTR, DBG_TRACE, ("BK Tx OK interrupt!\n"));
+		_rtl_pci_tx_isr(hw, BK_QUEUE);
+	}
+
+	if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
+		rtlpriv->link_info.num_tx_inperiod++;
+
+		RT_TRACE(COMP_INTR, DBG_TRACE, ("BE TX OK interrupt!\n"));
+		_rtl_pci_tx_isr(hw, BE_QUEUE);
+	}
+
+	if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
+		rtlpriv->link_info.num_tx_inperiod++;
+
+		RT_TRACE(COMP_INTR, DBG_TRACE, ("VI TX OK interrupt!\n"));
+		_rtl_pci_tx_isr(hw, VI_QUEUE);
+	}
+
+	if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
+		rtlpriv->link_info.num_tx_inperiod++;
+
+		RT_TRACE(COMP_INTR, DBG_TRACE, ("Vo TX OK interrupt!\n"));
+		_rtl_pci_tx_isr(hw, VO_QUEUE);
+	}
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
+		if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
+			rtlpriv->link_info.num_tx_inperiod++;
+
+			RT_TRACE(COMP_INTR, DBG_TRACE,
+				 ("CMD TX OK interrupt!\n"));
+			_rtl_pci_tx_isr(hw, TXCMD_QUEUE);
+		}
+	}
+
+	/*<3> rx related */
+	if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
+		RT_TRACE(COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
+
+		_rtl_pci_rx_interrupt(hw);
+
+	}
+
+	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("rx descriptor unavailable!\n"));
+		rtl_write_byte(rtlpriv, 0xb4, BIT(1) );
+		_rtl_pci_rx_interrupt(hw);
+	}
+
+	if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
+		RT_TRACE(COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
+		_rtl_pci_rx_interrupt(hw);
+	}
+
+	/*<4> fw related*/
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
+		if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
+			RT_TRACE(COMP_INTR, DBG_TRACE, 
+				 ("firmware interrupt!\n"));
+			queue_delayed_work(rtlpriv->works.rtl_wq,
+					   &rtlpriv->works.fwevt_wq, 0);
+		}
+	}
+
+	/*<5> hsisr related*/
+	/* Only 8188EE & 8723BE Supported.
+	 * If Other ICs Come in, System will corrupt,
+	 * because maps[RTL_IMR_HSISR_IND] & maps[MAC_HSISR]
+	 * are not initialized*/
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
+	    rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+		if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
+			RT_TRACE(COMP_INTR, DBG_TRACE, 
+					 ("hsisr interrupt!\n"));
+			_rtl_pci_hs_interrupt(hw);
+		}
+	}
+	
+
+	if(rtlpriv->rtlhal.b_earlymode_enable)
+		tasklet_schedule(&rtlpriv->works.irq_tasklet);
+
+	rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR],
+			rtlpci->irq_mask[0]);
+	rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE],
+			rtlpci->irq_mask[1]);
+	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+	
+	return IRQ_HANDLED;
+
+done:
+	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+	return IRQ_HANDLED;
+}
+
+static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
+{
+	_rtl_pci_tx_chk_waitq(hw);
+}
+
+static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl8192_tx_ring *ring = NULL;
+	struct ieee80211_hdr *hdr = NULL;
+	struct ieee80211_tx_info *info = NULL;
+	struct sk_buff *pskb = NULL;
+	struct rtl_tx_desc *pdesc = NULL;
+	struct rtl_tcb_desc tcb_desc;
+	/*This is for new trx flow*/
+	struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
+	u8 temp_one = 1;
+
+	memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+	ring = &rtlpci->tx_ring[BEACON_QUEUE];
+	pskb = __skb_dequeue(&ring->queue);
+	if (pskb)
+		kfree_skb(pskb);
+
+	/*NB: the beacon data buffer must be 32-bit aligned. */
+	pskb = ieee80211_beacon_get(hw, mac->vif);
+	if (pskb == NULL)
+		return;
+	hdr = rtl_get_hdr(pskb);
+	info = IEEE80211_SKB_CB(pskb);
+	pdesc = &ring->desc[0];
+	if (rtlpriv->use_new_trx_flow)
+		pbuffer_desc = &ring->buffer_desc[0];
+	
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, 
+				        (u8 *)pbuffer_desc, info, pskb, 
+				        BEACON_QUEUE, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+					(u8 *)pbuffer_desc, info, NULL, pskb, 
+					BEACON_QUEUE, &tcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+	__skb_queue_tail(&ring->queue, pskb);
+
+	rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true, HW_DESC_OWN,
+				    (u8 *) & temp_one);
+
+	return;
+}
+
+static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	u8 i;
+	u16 desc_num;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
+		desc_num = TX_DESC_NUM_92E;
+	else
+		desc_num = RT_TXDESC_NUM;
+	
+	for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
+		rtlpci->txringcount[i] = desc_num;
+	}
+	/*
+	 *we just alloc 2 desc for beacon queue,
+	 *because we just need first desc in hw beacon.
+	 */
+	rtlpci->txringcount[BEACON_QUEUE] = 2;
+
+	/*
+	 *BE queue need more descriptor for performance
+	 *consideration or, No more tx desc will happen,
+	 *and may cause mac80211 mem leakage.
+	 */
+	if (rtl_priv(hw)->use_new_trx_flow == false)
+		rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
+
+	rtlpci->rxbuffersize = 9100;	/*2048/1024; */
+	rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT;	/*64; */
+}
+
+static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
+		struct pci_dev *pdev)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	rtlpriv->rtlhal.up_first_time = true;
+	rtlpriv->rtlhal.being_init_adapter = false;
+
+	rtlhal->hw = hw;
+	rtlpci->pdev = pdev;
+
+	/*Tx/Rx related var */
+	_rtl_pci_init_trx_var(hw);
+
+	/*IBSS*/ mac->beacon_interval = 100;
+
+	/*AMPDU*/
+	mac->min_space_cfg = 0;
+	mac->max_mss_density = 0;
+	/*set sane AMPDU defaults */
+	mac->current_ampdu_density = 7;
+	mac->current_ampdu_factor = 3;
+
+	/*QOS*/
+	rtlpci->acm_method = eAcmWay2_SW;
+
+	/*task */
+	tasklet_init(&rtlpriv->works.irq_tasklet,
+		     (void (*)(unsigned long))_rtl_pci_irq_tasklet,
+		     (unsigned long)hw);
+	tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
+		     (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
+		     (unsigned long)hw);
+}
+
+static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
+				 unsigned int prio, unsigned int entries)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_tx_buffer_desc *buffer_desc;
+	struct rtl_tx_desc *desc;
+	dma_addr_t buffer_desc_dma, desc_dma;
+	u32 nextdescaddress;
+	int i;
+
+	/* alloc tx buffer desc for new trx flow*/
+	if (rtlpriv->use_new_trx_flow) {
+		buffer_desc = pci_alloc_consistent(rtlpci->pdev,
+					    sizeof(*buffer_desc) * entries, 
+					    &buffer_desc_dma);
+
+		if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
+			RT_TRACE(COMP_ERR, DBG_EMERG,
+				 ("Cannot allocate TX ring (prio = %d)\n", 
+				 prio));
+			return -ENOMEM;
+		}
+
+		memset(buffer_desc, 0, sizeof(*buffer_desc) * entries);
+		rtlpci->tx_ring[prio].buffer_desc = buffer_desc;
+		rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma;
+		
+		rtlpci->tx_ring[prio].cur_tx_rp = 0;
+		rtlpci->tx_ring[prio].cur_tx_wp = 0;
+		rtlpci->tx_ring[prio].avl_desc = entries;
+
+	}
+	
+	/* alloc dma for this ring */
+	desc = pci_alloc_consistent(rtlpci->pdev,
+				    sizeof(*desc) * entries, &desc_dma);
+
+	if (!desc || (unsigned long)desc & 0xFF) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("Cannot allocate TX ring (prio = %d)\n", prio));
+		return -ENOMEM;
+	}
+
+	memset(desc, 0, sizeof(*desc) * entries);
+	rtlpci->tx_ring[prio].desc = desc;
+	rtlpci->tx_ring[prio].dma = desc_dma;
+	
+	rtlpci->tx_ring[prio].idx = 0;
+	rtlpci->tx_ring[prio].entries = entries;
+	skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("queue:%d, ring_addr:%p\n", prio, desc));
+
+	/* init every desc in this ring */
+	if (rtlpriv->use_new_trx_flow == false) {
+		for (i = 0; i < entries; i++) {
+			nextdescaddress = cpu_to_le32((u32) desc_dma +
+						      ((i +	1) % entries) *
+						      sizeof(*desc));
+
+			rtlpriv->cfg->ops->set_desc(hw, (u8 *) & (desc[i]),
+						    true, 
+						    HW_DESC_TX_NEXTDESC_ADDR,
+						    (u8 *) & nextdescaddress);
+		}
+	}
+	return 0;
+}
+
+static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	int i;
+	
+	if (rtlpriv->use_new_trx_flow) {
+		struct rtl_rx_buffer_desc *entry = NULL;
+		/* alloc dma for this ring */
+		rtlpci->rx_ring[rxring_idx].buffer_desc = 
+		    pci_alloc_consistent(rtlpci->pdev,
+					 sizeof(*rtlpci->rx_ring[rxring_idx].
+					        buffer_desc) * 
+					        rtlpci->rxringcount, 
+					 &rtlpci->rx_ring[rxring_idx].dma);
+		if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
+		    (unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
+			RT_TRACE(COMP_ERR, DBG_EMERG, ("Cannot allocate RX ring\n"));
+			return -ENOMEM;
+		}
+
+		memset(rtlpci->rx_ring[rxring_idx].buffer_desc, 0,
+		       sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
+		       rtlpci->rxringcount);
+
+		/* init every desc in this ring */
+		rtlpci->rx_ring[rxring_idx].idx = 0;
+		for (i = 0; i < rtlpci->rxringcount; i++) {			
+			entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
+			if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, 
+						      rxring_idx, i))
+				return -ENOMEM;
+		}
+	} else {
+		struct rtl_rx_desc *entry = NULL;
+		u8 tmp_one = 1;
+		/* alloc dma for this ring */
+		rtlpci->rx_ring[rxring_idx].desc = 
+		    pci_alloc_consistent(rtlpci->pdev,
+					 sizeof(*rtlpci->rx_ring[rxring_idx].
+					        desc) * rtlpci->rxringcount, 
+					 &rtlpci->rx_ring[rxring_idx].dma);
+		if (!rtlpci->rx_ring[rxring_idx].desc ||
+		    (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
+			RT_TRACE(COMP_ERR, DBG_EMERG, 
+				 ("Cannot allocate RX ring\n"));
+			return -ENOMEM;
+		}
+
+		memset(rtlpci->rx_ring[rxring_idx].desc, 0,
+		       sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
+		       rtlpci->rxringcount);
+
+		/* init every desc in this ring */
+		rtlpci->rx_ring[rxring_idx].idx = 0;
+		for (i = 0; i < rtlpci->rxringcount; i++) {			
+			entry = &rtlpci->rx_ring[rxring_idx].desc[i];
+			if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, 
+						      rxring_idx, i))
+				return -ENOMEM;
+		}
+		rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+					    HW_DESC_RXERO, (u8 *) & tmp_one);
+	}
+	return 0;
+}
+
+static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
+				  unsigned int prio)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
+
+	/* free every desc in this ring */
+	while (skb_queue_len(&ring->queue)) {
+		u8 *entry;
+		struct sk_buff *skb = __skb_dequeue(&ring->queue);
+		if (rtlpriv->use_new_trx_flow)
+			entry = (u8 *)(&ring->buffer_desc[ring->idx]);
+		else
+			entry = (u8 *)(&ring->desc[ring->idx]);
+
+		pci_unmap_single(rtlpci->pdev,
+				 le32_to_cpu(rtlpriv->cfg->ops->get_desc(
+				 (u8 *) entry, true, HW_DESC_TXBUFF_ADDR)),
+				 skb->len, PCI_DMA_TODEVICE);
+		kfree_skb(skb);
+		ring->idx = (ring->idx + 1) % ring->entries;
+	}
+
+	/* free dma of this ring */
+	pci_free_consistent(rtlpci->pdev,
+			    sizeof(*ring->desc) * ring->entries,
+			    ring->desc, ring->dma);
+	ring->desc = NULL;
+	if (rtlpriv->use_new_trx_flow) {
+		pci_free_consistent(rtlpci->pdev,
+				    sizeof(*ring->buffer_desc) * ring->entries,
+				    ring->buffer_desc, ring->buffer_desc_dma);
+		ring->buffer_desc = NULL;
+	}	
+}
+
+static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	int i;
+
+	/* free every desc in this ring */
+	for (i = 0; i < rtlpci->rxringcount; i++) {
+		struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i];
+		if (!skb)
+			continue;
+
+		pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb),
+				 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+		kfree_skb(skb);
+	}
+
+	/* free dma of this ring */
+	if (rtlpriv->use_new_trx_flow) {
+		pci_free_consistent(rtlpci->pdev,
+				    sizeof(*rtlpci->rx_ring[rxring_idx].
+				           buffer_desc) * rtlpci->rxringcount,
+				    rtlpci->rx_ring[rxring_idx].buffer_desc,
+				    rtlpci->rx_ring[rxring_idx].dma);
+		rtlpci->rx_ring[rxring_idx].buffer_desc = NULL;
+	} else {
+		pci_free_consistent(rtlpci->pdev,
+				    sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
+				    rtlpci->rxringcount,
+				    rtlpci->rx_ring[rxring_idx].desc,
+				    rtlpci->rx_ring[rxring_idx].dma);
+		rtlpci->rx_ring[rxring_idx].desc = NULL;
+	}
+}
+
+static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	int ret;
+	int i, rxring_idx;
+
+	/* rxring_idx 0:RX_MPDU_QUEUE
+	 * rxring_idx 1:RX_CMD_QUEUE */
+	for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
+		ret = _rtl_pci_init_rx_ring(hw, rxring_idx);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
+		ret = _rtl_pci_init_tx_ring(hw, i, rtlpci->txringcount[i]);
+		if (ret)
+			goto err_free_rings;
+	}
+
+	return 0;
+
+err_free_rings:
+	for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
+		_rtl_pci_free_rx_ring(hw, rxring_idx);
+
+	for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
+		if (rtlpci->tx_ring[i].desc || 
+		    rtlpci->tx_ring[i].buffer_desc)
+			_rtl_pci_free_tx_ring(hw, i);
+
+	return 1;
+}
+
+static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
+{
+	u32 i, rxring_idx;
+
+	/*free rx rings */
+	for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
+		_rtl_pci_free_rx_ring(hw, rxring_idx);
+
+	/*free tx rings */
+	for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
+		_rtl_pci_free_tx_ring(hw, i);
+
+	return 0;
+}
+
+int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	int i, rxring_idx;
+	unsigned long flags;
+	u8 tmp_one = 1;
+	/* rxring_idx 0:RX_MPDU_QUEUE */
+	/* rxring_idx 1:RX_CMD_QUEUE */
+	for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
+		/* force the rx_ring[RX_MPDU_QUEUE/
+		 * RX_CMD_QUEUE].idx to the first one */
+		/*new trx flow, do nothing*/
+		if ((rtlpriv->use_new_trx_flow == false) && 
+		     rtlpci->rx_ring[rxring_idx].desc) {
+			struct rtl_rx_desc *entry = NULL;
+
+			for (i = 0; i < rtlpci->rxringcount; i++) {
+				entry = &rtlpci->rx_ring[rxring_idx].desc[i];			
+				rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, 
+							    false,
+							    HW_DESC_RXOWN, 
+							    (u8 *) & tmp_one);		
+			}
+		}
+		rtlpci->rx_ring[rxring_idx].idx = 0;	}
+
+	/* after reset, release previous pending packet,
+	 * and force the  tx idx to the first one */
+	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+	for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
+		if (rtlpci->tx_ring[i].desc || 
+			rtlpci->tx_ring[i].buffer_desc) {
+			struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
+
+			while (skb_queue_len(&ring->queue)) {
+				u8 *entry;
+				struct sk_buff *skb = 
+					__skb_dequeue(&ring->queue);
+				if (rtlpriv->use_new_trx_flow)
+					entry = (u8 *)(&ring->buffer_desc
+								[ring->idx]);
+				else
+					entry = (u8 *)(&ring->desc[ring->idx]);
+
+				pci_unmap_single(rtlpci->pdev,
+					le32_to_cpu(rtlpriv->cfg->ops->get_desc(
+							(u8 *)entry, true,
+							HW_DESC_TXBUFF_ADDR)),
+					skb->len, PCI_DMA_TODEVICE);
+				kfree_skb(skb);
+				ring->idx = (ring->idx + 1) % ring->entries;
+			}
+			ring->idx = 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+	return 0;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw, 
+					struct sk_buff *skb)
+#else
+/*<delete in kernel end>*/
+static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+					struct ieee80211_sta *sta,
+					struct sk_buff *skb)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_sta *sta = info->control.sta;
+#endif
+/*<delete in kernel end>*/
+	struct rtl_sta_info *sta_entry = NULL;
+	u8 tid = rtl_get_tid(skb);
+	u16 fc = rtl_get_fc(skb);
+
+	if(!sta)
+		return false;
+	sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+
+	if (!rtlpriv->rtlhal.b_earlymode_enable)
+		return false;
+	if (ieee80211_is_nullfunc(fc))
+		return false;
+	if (ieee80211_is_qos_nullfunc(fc))
+		return false;
+	if (ieee80211_is_pspoll(fc)) {
+		return false;
+	}
+
+	if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
+		return false;
+	if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
+		return false;
+	if (tid > 7)
+		return false;
+	/* maybe every tid should be checked */
+	if (!rtlpriv->link_info.higher_busytxtraffic[tid])
+		return false;
+
+	spin_lock_bh(&rtlpriv->locks.waitq_lock);
+	skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
+	spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+
+	return true;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
+	       struct rtl_tcb_desc *ptcb_desc)
+#else
+/*<delete in kernel end>*/
+static int rtl_pci_tx(struct ieee80211_hw *hw,
+		      struct ieee80211_sta *sta,
+		      struct sk_buff *skb,
+		      struct rtl_tcb_desc *ptcb_desc)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_sta_info *sta_entry = NULL;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+	struct ieee80211_sta *sta = info->control.sta;
+#endif
+/*<delete in kernel end>*/
+	struct rtl8192_tx_ring *ring;
+	struct rtl_tx_desc *pdesc;
+	struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
+	u16 idx;
+	u8 own;
+	u8 temp_one = 1;
+	u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
+	unsigned long flags;
+	struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+	u16 fc = rtl_get_fc(skb);
+	u8 *pda_addr = hdr->addr1;
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	/*ssn */
+	u8 tid = 0;
+	u16 seq_number = 0;
+
+
+	if (ieee80211_is_mgmt(fc))
+		rtl_tx_mgmt_proc(hw, skb);
+
+	if (rtlpriv->psc.sw_ps_enabled) {
+		if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
+		    !ieee80211_has_pm(fc))
+			hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+	}
+
+	rtl_action_proc(hw, skb, true);
+
+	if (is_multicast_ether_addr(pda_addr))
+		rtlpriv->stats.txbytesmulticast += skb->len;
+	else if (is_broadcast_ether_addr(pda_addr))
+		rtlpriv->stats.txbytesbroadcast += skb->len;
+	else
+		rtlpriv->stats.txbytesunicast += skb->len;
+
+	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+	ring = &rtlpci->tx_ring[hw_queue];
+	if (hw_queue != BEACON_QUEUE) {
+		if (rtlpriv->use_new_trx_flow)
+			idx = ring->cur_tx_wp;
+		else
+			idx = (ring->idx + skb_queue_len(&ring->queue)) %
+			      ring->entries;
+	} else {
+		idx = 0;
+	}
+
+	pdesc = &ring->desc[idx];
+	
+	if (rtlpriv->use_new_trx_flow) {
+		ptx_bd_desc = &ring->buffer_desc[idx];
+	} else {	
+		own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
+				true, HW_DESC_OWN);
+
+		if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
+			RT_TRACE(COMP_ERR, DBG_WARNING,
+				 ("No more TX desc@%d, ring->idx = %d,"
+				  "idx = %d, skb_queue_len = 0x%d\n",
+				  hw_queue, ring->idx, idx,
+				  skb_queue_len(&ring->queue)));
+
+			spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, 
+					       flags);
+			return skb->len;
+		}
+	}
+	
+	if (ieee80211_is_data_qos(fc)) {
+		tid = rtl_get_tid(skb);
+		if (sta) {
+			sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+			seq_number = (le16_to_cpu(hdr->seq_ctrl) & 
+				      IEEE80211_SCTL_SEQ) >> 4;
+			seq_number += 1;
+
+			if (!ieee80211_has_morefrags(hdr->frame_control))
+				sta_entry->tids[tid].seq_number = seq_number;
+		}
+	}
+
+	if (ieee80211_is_data(fc))
+		rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, 
+					(u8 *)ptx_bd_desc, info, skb, 
+					hw_queue, ptcb_desc);
+#else
+/*<delete in kernel end>*/
+	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, 
+					(u8 *)ptx_bd_desc, info, sta, skb, 
+					hw_queue, ptcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+	__skb_queue_tail(&ring->queue, skb);
+	if (rtlpriv->use_new_trx_flow) {
+		rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true,
+					    HW_DESC_OWN, (u8 *) & hw_queue);
+	} else {
+		rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true,
+					    HW_DESC_OWN, (u8 *) & temp_one);
+	}
+
+	if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
+	    hw_queue != BEACON_QUEUE) {
+
+		RT_TRACE(COMP_ERR, DBG_LOUD,
+			 ("less desc left, stop skb_queue@%d, "
+			  "ring->idx = %d,"
+			  "idx = %d, skb_queue_len = 0x%d\n",
+			  hw_queue, ring->idx, idx,
+			  skb_queue_len(&ring->queue)));
+
+		ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
+	}
+
+	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+	rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
+
+	return 0;
+}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+#else
+static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
+#endif
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u16 i = 0;
+	int queue_id;
+	struct rtl8192_tx_ring *ring;
+	
+	if (mac->skip_scan)
+		return;
+	
+	for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
+		u32 queue_len;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+		if (((queues >> queue_id) & 0x1) == 0) {
+			queue_id--;
+			continue;
+		}
+#endif
+		ring = &pcipriv->dev.tx_ring[queue_id];
+		queue_len = skb_queue_len(&ring->queue);
+		if (queue_len == 0 || queue_id == BEACON_QUEUE ||
+			queue_id == TXCMD_QUEUE) {
+			queue_id--;
+			continue;
+		} else {
+			msleep(5);
+			i++;
+		}
+
+		/* we just wait 1s for all queues */
+		if (rtlpriv->psc.rfpwr_state == ERFOFF ||
+			is_hal_stop(rtlhal) || i >= 200)
+			return;
+	}
+}
+
+void rtl_pci_deinit(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	_rtl_pci_deinit_trx_ring(hw);
+
+	synchronize_irq(rtlpci->pdev->irq);
+	tasklet_kill(&rtlpriv->works.irq_tasklet);
+
+	flush_workqueue(rtlpriv->works.rtl_wq);
+	destroy_workqueue(rtlpriv->works.rtl_wq);
+
+}
+
+int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int err;
+
+	_rtl_pci_init_struct(hw, pdev);
+
+	err = _rtl_pci_init_trx_ring(hw);
+	if (err) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("tx ring initialization failed"));
+		return err;
+	}
+
+	return 1;
+}
+
+int rtl_pci_start(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	int err = 0;
+	RT_TRACE(COMP_INIT, DBG_DMESG, (" rtl_pci_start \n"));
+	rtl_pci_reset_trx_ring(hw);
+
+	rtlpriv->rtlhal.driver_is_goingto_unload = false;
+	err = rtlpriv->cfg->ops->hw_init(hw);
+	if (err) {
+		RT_TRACE(COMP_INIT, DBG_DMESG,
+			 ("Failed to config hardware err %x!\n",err));
+		return err;
+	}
+
+	rtlpriv->cfg->ops->enable_interrupt(hw);
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n"));
+
+	rtl_init_rx_config(hw);
+
+	/*should after adapter start and interrupt enable. */
+	set_hal_start(rtlhal);
+
+	RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+	rtlpriv->rtlhal.up_first_time = false;
+
+	RT_TRACE(COMP_INIT, DBG_DMESG, ("rtl_pci_start OK\n"));
+	return 0;
+}
+
+void rtl_pci_stop(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 RFInProgressTimeOut = 0;
+
+	/*
+	 *should before disable interrrupt&adapter
+	 *and will do it immediately.
+	 */
+	set_hal_stop(rtlhal);
+
+	rtlpriv->cfg->ops->disable_interrupt(hw);
+
+	spin_lock(&rtlpriv->locks.rf_ps_lock);
+	while (ppsc->rfchange_inprogress) {
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
+		if (RFInProgressTimeOut > 100) {
+			spin_lock(&rtlpriv->locks.rf_ps_lock);
+			break;
+		}
+		mdelay(1);
+		RFInProgressTimeOut++;
+		spin_lock(&rtlpriv->locks.rf_ps_lock);
+	}
+	ppsc->rfchange_inprogress = true;
+	spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+	rtlpriv->rtlhal.driver_is_goingto_unload = true;
+	rtlpriv->cfg->ops->hw_disable(hw);
+	rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+
+	spin_lock(&rtlpriv->locks.rf_ps_lock);
+	ppsc->rfchange_inprogress = false;
+	spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+	rtl_pci_enable_aspm(hw);
+}
+
+static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+				  struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct pci_dev *bridge_pdev = pdev->bus->self;
+	u16 venderid;
+	u16 deviceid;
+	u8 revisionid;
+	u16 irqline;
+	u8 tmp;
+
+	venderid = pdev->vendor;
+	deviceid = pdev->device;
+	pci_read_config_byte(pdev, 0x8, &revisionid);
+	pci_read_config_word(pdev, 0x3C, &irqline);
+
+	if (deviceid == RTL_PCI_8192_DID ||
+	    deviceid == RTL_PCI_0044_DID ||
+	    deviceid == RTL_PCI_0047_DID ||
+	    deviceid == RTL_PCI_8192SE_DID ||
+	    deviceid == RTL_PCI_8174_DID ||
+	    deviceid == RTL_PCI_8173_DID ||
+	    deviceid == RTL_PCI_8172_DID ||
+	    deviceid == RTL_PCI_8171_DID) {
+		switch (revisionid) {
+		case RTL_PCI_REVISION_ID_8192PCIE:
+			RT_TRACE(COMP_INIT, DBG_DMESG,
+				 ("8192E is found but not supported now-"
+				  "vid/did=%x/%x\n", venderid, deviceid));
+			rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
+			return false;
+			break;
+		case RTL_PCI_REVISION_ID_8192SE:
+			RT_TRACE(COMP_INIT, DBG_DMESG,
+				 ("8192SE is found - "
+				  "vid/did=%x/%x\n", venderid, deviceid));
+			rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
+			break;
+		default:
+			RT_TRACE(COMP_ERR, DBG_WARNING,
+				 ("Err: Unknown device - "
+				  "vid/did=%x/%x\n", venderid, deviceid));
+			rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
+			break;
+
+		}
+	}else if(deviceid == RTL_PCI_8723AE_DID) {
+		rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
+		RT_TRACE(COMP_INIT, DBG_DMESG,
+			 ("8723AE PCI-E is found - "
+			  "vid/did=%x/%x\n", venderid, deviceid));
+	} else if (deviceid == RTL_PCI_8192CET_DID ||
+		   deviceid == RTL_PCI_8192CE_DID ||
+		   deviceid == RTL_PCI_8191CE_DID ||
+		   deviceid == RTL_PCI_8188CE_DID) {
+		rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
+		RT_TRACE(COMP_INIT, DBG_DMESG,
+			 ("8192C PCI-E is found - "
+			  "vid/did=%x/%x\n", venderid, deviceid));
+	} else if (deviceid == RTL_PCI_8192DE_DID ||
+		   deviceid == RTL_PCI_8192DE_DID2) {
+		rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
+		RT_TRACE(COMP_INIT, DBG_DMESG,
+			 ("8192D PCI-E is found - "
+			  "vid/did=%x/%x\n", venderid, deviceid));
+	}else if(deviceid == RTL_PCI_8188EE_DID){
+			rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
+			RT_TRACE(COMP_INIT,DBG_LOUD,
+				 ("Find adapter, Hardware type is 8188EE\n"));
+	}else if (deviceid == RTL_PCI_8723BE_DID){
+			rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
+			RT_TRACE(COMP_INIT,DBG_LOUD,
+				 ("Find adapter, Hardware type is 8723BE\n"));
+	}else if (deviceid == RTL_PCI_8192EE_DID){
+			rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
+			RT_TRACE(COMP_INIT,DBG_LOUD,
+				 ("Find adapter, Hardware type is 8192EE\n"));
+	}else if (deviceid == RTL_PCI_8821AE_DID) {
+			rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
+			RT_TRACE(COMP_INIT,DBG_LOUD,
+				("Find adapter, Hardware type is 8821AE\n"));
+	}else if (deviceid == RTL_PCI_8812AE_DID) {
+			rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
+			RT_TRACE(COMP_INIT,DBG_LOUD,
+				("Find adapter, Hardware type is 8812AE\n"));
+	}else {
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("Err: Unknown device -"
+			  " vid/did=%x/%x\n", venderid, deviceid));
+
+		rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
+	}
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
+		if (revisionid == 0 || revisionid == 1) {
+			if (revisionid == 0) {
+				RT_TRACE(COMP_INIT, DBG_LOUD,
+					 ("Find 92DE MAC0.\n"));
+				rtlhal->interfaceindex = 0;
+			} else if (revisionid == 1) {
+				RT_TRACE(COMP_INIT, DBG_LOUD,
+					 ("Find 92DE MAC1.\n"));
+				rtlhal->interfaceindex = 1;
+			}
+		} else {
+			RT_TRACE(COMP_INIT, DBG_LOUD, ("Unknown device - "
+				 "VendorID/DeviceID=%x/%x, Revision=%x\n",
+				 venderid, deviceid, revisionid));
+			rtlhal->interfaceindex = 0;
+		}
+	}
+	
+	/* 92ee use new trx flow */
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
+		rtlpriv->use_new_trx_flow = true;
+	else
+		rtlpriv->use_new_trx_flow = false;
+	
+	/*find bus info */
+	pcipriv->ndis_adapter.busnumber = pdev->bus->number;
+	pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
+	pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
+
+	/*find bridge info */
+	pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
+	/* some ARM have no bridge_pdev and will crash here 
+	 * so we should check if bridge_pdev is NULL */
+	if (bridge_pdev) {
+		pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
+		for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
+			if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
+				pcipriv->ndis_adapter.pcibridge_vendor = tmp;
+				RT_TRACE(COMP_INIT, DBG_DMESG,
+					 ("Pci Bridge Vendor is found index: %d\n",
+					  tmp));
+				break;
+			}
+		}
+	}
+
+	if (pcipriv->ndis_adapter.pcibridge_vendor !=
+	    PCI_BRIDGE_VENDOR_UNKNOWN) {
+		pcipriv->ndis_adapter.pcibridge_busnum =
+		    bridge_pdev->bus->number;
+		pcipriv->ndis_adapter.pcibridge_devnum =
+		    PCI_SLOT(bridge_pdev->devfn);
+		pcipriv->ndis_adapter.pcibridge_funcnum =
+		    PCI_FUNC(bridge_pdev->devfn);
+		pcipriv->ndis_adapter.pcicfg_addrport =
+		    (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
+		    (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
+		    (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*<delete in kernel end>*/
+		pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
+		    pci_pcie_cap(bridge_pdev);
+/*<delete in kernel start>*/
+#else
+		pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
+			_rtl_pci_get_pciehdr_offset(hw);
+#endif
+/*<delete in kernel end>*/
+		pcipriv->ndis_adapter.num4bytes =
+		    (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
+
+		rtl_pci_get_linkcontrol_field(hw);
+
+		if (pcipriv->ndis_adapter.pcibridge_vendor ==
+		    PCI_BRIDGE_VENDOR_AMD) {
+			pcipriv->ndis_adapter.amd_l1_patch =
+			    rtl_pci_get_amd_l1_patch(hw);
+		}
+	}
+
+	RT_TRACE(COMP_INIT, DBG_DMESG,
+		 ("pcidev busnumber:devnumber:funcnumber:"
+		  "vendor:link_ctl %d:%d:%d:%x:%x\n",
+		  pcipriv->ndis_adapter.busnumber,
+		  pcipriv->ndis_adapter.devnumber,
+		  pcipriv->ndis_adapter.funcnumber,
+		  pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg));
+
+	RT_TRACE(COMP_INIT, DBG_DMESG,
+		 ("pci_bridge busnumber:devnumber:funcnumber:vendor:"
+		  "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
+		  pcipriv->ndis_adapter.pcibridge_busnum,
+		  pcipriv->ndis_adapter.pcibridge_devnum,
+		  pcipriv->ndis_adapter.pcibridge_funcnum,
+		  pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+		  pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
+		  pcipriv->ndis_adapter.pcibridge_linkctrlreg,
+		  pcipriv->ndis_adapter.amd_l1_patch));
+
+	rtl_pci_parse_configuration(pdev, hw);
+	list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
+	return true;
+}
+
+static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+	int ret;
+	ret = pci_enable_msi(rtlpci->pdev);
+	if (ret < 0)
+		return ret;
+
+	ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+			  IRQF_SHARED, KBUILD_MODNAME, hw);
+	if (ret < 0) {
+		pci_disable_msi(rtlpci->pdev);
+		return ret;
+	}
+
+	rtlpci->using_msi = true;
+	
+	RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG, ("MSI Interrupt Mode!\n"));
+	return 0;
+}
+
+static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+	int ret;
+	
+	ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+			  IRQF_SHARED, KBUILD_MODNAME, hw);
+	if (ret < 0) {
+		return ret;
+	}
+
+	rtlpci->using_msi = false;
+	RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG, 
+		 ("Pin-based Interrupt Mode!\n"));
+	return 0;
+}
+
+static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+	int ret;
+	if (rtlpci->msi_support == true) {
+		ret = rtl_pci_intr_mode_msi(hw);
+		if (ret < 0)
+			ret = rtl_pci_intr_mode_legacy(hw);
+	} else {
+		ret = rtl_pci_intr_mode_legacy(hw);
+	}
+	return ret;
+}
+
+/* this is used for other modules get
+ * hw pointer in rtl_pci_get_hw_pointer */
+struct ieee80211_hw *hw_export = NULL;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+int rtl_pci_probe(struct pci_dev *pdev,
+                  const struct pci_device_id *id)
+
+#else
+int __devinit rtl_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *id)
+#endif
+{
+	struct ieee80211_hw *hw = NULL;
+
+	struct rtl_priv *rtlpriv = NULL;
+	struct rtl_pci_priv *pcipriv = NULL;
+	struct rtl_pci *rtlpci;
+	unsigned long pmem_start, pmem_len, pmem_flags;
+	int err;
+
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		RT_ASSERT(false,
+			  ("%s : Cannot enable new PCI device\n",
+			   pci_name(pdev)));
+		return err;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+			RT_ASSERT(false, ("Unable to obtain 32bit DMA "
+					  "for consistent allocations\n"));
+			pci_disable_device(pdev);
+			return -ENOMEM;
+		}
+	}
+
+	pci_set_master(pdev);
+
+	hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
+				sizeof(struct rtl_priv), &rtl_ops);
+	if (!hw) {
+		RT_ASSERT(false,
+			  ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
+		err = -ENOMEM;
+		goto fail1;
+	}
+	hw_export = hw;
+
+	SET_IEEE80211_DEV(hw, &pdev->dev);
+	pci_set_drvdata(pdev, hw);
+
+	rtlpriv = hw->priv;
+	pcipriv = (void *)rtlpriv->priv;
+	pcipriv->dev.pdev = pdev;
+
+	/* init cfg & intf_ops */
+	rtlpriv->rtlhal.interface = INTF_PCI;
+	rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
+	rtlpriv->intf_ops = &rtl_pci_ops;
+	rtlpriv->glb_var = &global_var;
+
+	/*
+	 *init dbgp flags before all
+	 *other functions, because we will
+	 *use it in other funtions like
+	 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
+	 *you can not use these macro
+	 *before this
+	 */
+	rtl_dbgp_flag_init(hw);
+
+	/* MEM map */
+	err = pci_request_regions(pdev, KBUILD_MODNAME);
+	if (err) {
+		RT_ASSERT(false, ("Can't obtain PCI resources\n"));
+		return err;
+	}
+
+	pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
+	pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
+	pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
+
+	/*shared mem start */
+	rtlpriv->io.pci_mem_start =
+			(unsigned long)pci_iomap(pdev,
+			rtlpriv->cfg->bar_id, pmem_len);
+	if (rtlpriv->io.pci_mem_start == 0) {
+		RT_ASSERT(false, ("Can't map PCI mem\n"));
+		goto fail2;
+	}
+
+	RT_TRACE(COMP_INIT, DBG_DMESG,
+		 ("mem mapped space: start: 0x%08lx len:%08lx "
+		  "flags:%08lx, after map:0x%08lx\n",
+		  pmem_start, pmem_len, pmem_flags,
+		  rtlpriv->io.pci_mem_start));
+
+	/* Disable Clk Request */
+	pci_write_config_byte(pdev, 0x81, 0);
+	/* leave D3 mode */
+	pci_write_config_byte(pdev, 0x44, 0);
+	pci_write_config_byte(pdev, 0x04, 0x06);
+	pci_write_config_byte(pdev, 0x04, 0x07);
+
+	/* find adapter */
+	/* if chip not support, will return false */
+	if(!_rtl_pci_find_adapter(pdev, hw))
+		goto fail3;
+
+	/* Init IO handler */
+	_rtl_pci_io_handler_init(&pdev->dev, hw);
+
+	/*like read eeprom and so on */
+	rtlpriv->cfg->ops->read_eeprom_info(hw);
+
+	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("Can't init_sw_vars.\n"));
+		goto fail3;
+	}
+
+	rtlpriv->cfg->ops->init_sw_leds(hw);
+
+	/*aspm */
+	rtl_pci_init_aspm(hw);
+
+	/* Init mac80211 sw */
+	err = rtl_init_core(hw);
+	if (err) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("Can't allocate sw for mac80211.\n"));
+		goto fail3;
+	}
+
+	/* Init PCI sw */
+	err = !rtl_pci_init(hw, pdev);
+	if (err) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("Failed to init PCI.\n"));
+		goto fail3;
+	}
+
+	err = ieee80211_register_hw(hw);
+	if (err) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("Can't register mac80211 hw.\n"));
+		goto fail3;
+	} else {
+		rtlpriv->mac80211.mac80211_registered = 1;
+	}
+	/* the wiphy must have been registed to 
+	 * cfg80211 prior to regulatory_hint */
+	if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2)) {
+		RT_TRACE(COMP_ERR, DBG_WARNING, ("regulatory_hint fail\n"));
+	}
+
+	err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
+	if (err) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("failed to create sysfs device attributes\n"));
+		goto fail3;
+	}
+	/* add for prov */
+	rtl_proc_add_one(hw);
+
+	/*init rfkill */
+	rtl_init_rfkill(hw);
+
+	rtlpci = rtl_pcidev(pcipriv);
+
+	err = rtl_pci_intr_mode_decide(hw);
+	if (err) {
+		RT_TRACE(COMP_INIT, DBG_DMESG,
+			 ("%s: failed to register IRQ handler\n",
+			  wiphy_name(hw->wiphy)));
+		goto fail3;
+	} else {
+		rtlpci->irq_alloc = 1;
+	}
+
+	set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+	return 0;
+
+fail3:
+	pci_set_drvdata(pdev, NULL);
+	rtl_deinit_core(hw);
+	ieee80211_free_hw(hw);
+
+	if (rtlpriv->io.pci_mem_start != 0)
+		pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+
+fail2:
+	pci_release_regions(pdev);
+
+fail1:
+
+	pci_disable_device(pdev);
+
+	return -ENODEV;
+
+}
+//EXPORT_SYMBOL(rtl_pci_probe);
+
+struct ieee80211_hw *rtl_pci_get_hw_pointer(void)
+{
+	return hw_export;
+}
+//EXPORT_SYMBOL(rtl_pci_get_hw_pointer);
+
+void rtl_pci_disconnect(struct pci_dev *pdev)
+{
+	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+	struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
+
+	clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+
+	sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
+
+	/* add for prov */
+	rtl_proc_remove_one(hw);
+	
+
+	/*ieee80211_unregister_hw will call ops_stop */
+	if (rtlmac->mac80211_registered == 1) {
+		ieee80211_unregister_hw(hw);
+		rtlmac->mac80211_registered = 0;
+	} else {
+		rtl_deinit_deferred_work(hw);
+		rtlpriv->intf_ops->adapter_stop(hw);
+	}
+
+	/*deinit rfkill */
+	rtl_deinit_rfkill(hw);
+
+	rtl_pci_deinit(hw);
+	rtl_deinit_core(hw);
+	rtlpriv->cfg->ops->deinit_sw_vars(hw);
+
+	if (rtlpci->irq_alloc) {
+		synchronize_irq(rtlpci->pdev->irq);
+		free_irq(rtlpci->pdev->irq, hw);
+		rtlpci->irq_alloc = 0;
+	}
+
+	if (rtlpci->using_msi == true)
+		pci_disable_msi(rtlpci->pdev);
+
+	list_del(&rtlpriv->list);
+	if (rtlpriv->io.pci_mem_start != 0) {
+		pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+		pci_release_regions(pdev);
+	}
+
+	pci_disable_device(pdev);
+
+	rtl_pci_disable_aspm(hw);
+
+	pci_set_drvdata(pdev, NULL);
+
+	ieee80211_free_hw(hw);
+}
+//EXPORT_SYMBOL(rtl_pci_disconnect);
+
+/***************************************
+kernel pci power state define:
+PCI_D0         ((pci_power_t __force) 0)
+PCI_D1         ((pci_power_t __force) 1)
+PCI_D2         ((pci_power_t __force) 2)
+PCI_D3hot      ((pci_power_t __force) 3)
+PCI_D3cold     ((pci_power_t __force) 4)
+PCI_UNKNOWN    ((pci_power_t __force) 5)
+
+This function is called when system
+goes into suspend state mac80211 will
+call rtl_mac_stop() from the mac80211
+suspend function first, So there is
+no need to call hw_disable here.
+****************************************/
+int rtl_pci_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->cfg->ops->hw_suspend(hw);
+	rtl_deinit_rfkill(hw);
+
+	return 0;
+}
+//EXPORT_SYMBOL(rtl_pci_suspend);
+
+int rtl_pci_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->cfg->ops->hw_resume(hw);
+	rtl_init_rfkill(hw);
+	
+	return 0;
+}
+//EXPORT_SYMBOL(rtl_pci_resume);
+
+struct rtl_intf_ops rtl_pci_ops = {
+	.read_efuse_byte = read_efuse_byte,
+	.adapter_start = rtl_pci_start,
+	.adapter_stop = rtl_pci_stop,
+	.check_buddy_priv = rtl_pci_check_buddy_priv,
+	.adapter_tx = rtl_pci_tx,
+	.flush = rtl_pci_flush,
+	.reset_trx_ring = rtl_pci_reset_trx_ring,
+	.waitq_insert = rtl_pci_tx_chk_waitq_insert,
+
+	.disable_aspm = rtl_pci_disable_aspm,
+	.enable_aspm = rtl_pci_enable_aspm,
+};
diff --git a/drivers/staging/rtl8821ae/pci.h b/drivers/staging/rtl8821ae/pci.h
new file mode 100644
index 0000000..9f20655
--- /dev/null
+++ b/drivers/staging/rtl8821ae/pci.h
@@ -0,0 +1,353 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_PCI_H__
+#define __RTL_PCI_H__
+
+#include <linux/pci.h>
+/*
+1: MSDU packet queue,
+2: Rx Command Queue
+*/
+#define RTL_PCI_RX_MPDU_QUEUE			0
+#define RTL_PCI_RX_CMD_QUEUE			1
+#define RTL_PCI_MAX_RX_QUEUE			2
+
+#define RTL_PCI_MAX_RX_COUNT			512//64
+#define RTL_PCI_MAX_TX_QUEUE_COUNT		9
+
+#define RT_TXDESC_NUM				128
+#define TX_DESC_NUM_92E				512
+#define RT_TXDESC_NUM_BE_QUEUE			256
+
+#define BK_QUEUE				0
+#define BE_QUEUE				1
+#define VI_QUEUE				2
+#define VO_QUEUE				3
+#define BEACON_QUEUE				4
+#define TXCMD_QUEUE				5
+#define MGNT_QUEUE				6
+#define HIGH_QUEUE				7
+#define HCCA_QUEUE				8
+
+#define RTL_PCI_DEVICE(vend, dev, cfg)  \
+	.vendor = (vend), \
+	.device = (dev), \
+	.subvendor = PCI_ANY_ID, \
+	.subdevice = PCI_ANY_ID,\
+	.driver_data = (kernel_ulong_t)&(cfg)
+
+#define INTEL_VENDOR_ID				0x8086
+#define SIS_VENDOR_ID				0x1039
+#define ATI_VENDOR_ID				0x1002
+#define ATI_DEVICE_ID				0x7914
+#define AMD_VENDOR_ID				0x1022
+
+#define PCI_MAX_BRIDGE_NUMBER			255
+#define PCI_MAX_DEVICES				32
+#define PCI_MAX_FUNCTION			8
+
+#define PCI_CONF_ADDRESS   	0x0CF8	/*PCI Configuration Space Address */
+#define PCI_CONF_DATA		0x0CFC	/*PCI Configuration Space Data */
+
+#define PCI_CLASS_BRIDGE_DEV		0x06
+#define PCI_SUBCLASS_BR_PCI_TO_PCI	0x04
+#define PCI_CAPABILITY_ID_PCI_EXPRESS	0x10
+#define PCI_CAP_ID_EXP			0x10
+
+#define U1DONTCARE 			0xFF
+#define U2DONTCARE 			0xFFFF
+#define U4DONTCARE 			0xFFFFFFFF
+
+#define RTL_PCI_8192_DID	0x8192	/*8192 PCI-E */
+#define RTL_PCI_8192SE_DID	0x8192	/*8192 SE */
+#define RTL_PCI_8174_DID	0x8174	/*8192 SE */
+#define RTL_PCI_8173_DID	0x8173	/*8191 SE Crab */
+#define RTL_PCI_8172_DID	0x8172	/*8191 SE RE */
+#define RTL_PCI_8171_DID	0x8171	/*8191 SE Unicron */
+#define RTL_PCI_0045_DID	0x0045	/*8190 PCI for Ceraga */
+#define RTL_PCI_0046_DID	0x0046	/*8190 Cardbus for Ceraga */
+#define RTL_PCI_0044_DID	0x0044	/*8192e PCIE for Ceraga */
+#define RTL_PCI_0047_DID	0x0047	/*8192e Express Card for Ceraga */
+#define RTL_PCI_700F_DID	0x700F
+#define RTL_PCI_701F_DID	0x701F
+#define RTL_PCI_DLINK_DID	0x3304
+#define RTL_PCI_8723AE_DID	0x8723	/*8723e */
+#define RTL_PCI_8192CET_DID	0x8191	/*8192ce */
+#define RTL_PCI_8192CE_DID	0x8178	/*8192ce */
+#define RTL_PCI_8191CE_DID	0x8177	/*8192ce */
+#define RTL_PCI_8188CE_DID	0x8176	/*8192ce */
+#define RTL_PCI_8192CU_DID	0x8191	/*8192ce */
+#define RTL_PCI_8192DE_DID	0x8193	/*8192de */
+#define RTL_PCI_8192DE_DID2	0x002B	/*92DE*/
+#define RTL_PCI_8188EE_DID	0x8179  /*8188ee*/
+#define RTL_PCI_8723BE_DID	0xB723  /*8723be*/
+#define RTL_PCI_8192EE_DID	0x818B	/*8192ee*/
+#define RTL_PCI_8821AE_DID	0x8821	/*8821ae*/
+#define RTL_PCI_8812AE_DID	0x8812	/*8812ae*/
+
+/*8192 support 16 pages of IO registers*/
+#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 	0x1000
+#define RTL_MEM_MAPPED_IO_RANGE_8192PCIE	0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192SE		0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192CE		0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192DE		0x4000
+
+#define RTL_PCI_REVISION_ID_8190PCI		0x00
+#define RTL_PCI_REVISION_ID_8192PCIE		0x01
+#define RTL_PCI_REVISION_ID_8192SE		0x10
+#define RTL_PCI_REVISION_ID_8192CE		0x1
+#define RTL_PCI_REVISION_ID_8192DE		0x0
+
+#define PCI_VENDOR_ID_REALTEK		0x10ec
+
+#define RTL_DEFAULT_HARDWARE_TYPE	HARDWARE_TYPE_RTL8192CE
+
+enum pci_bridge_vendor {
+	PCI_BRIDGE_VENDOR_INTEL = 0x0,	/*0b'0000,0001 */
+	PCI_BRIDGE_VENDOR_ATI,		/*0b'0000,0010*/
+	PCI_BRIDGE_VENDOR_AMD,		/*0b'0000,0100*/
+	PCI_BRIDGE_VENDOR_SIS,		/*0b'0000,1000*/
+	PCI_BRIDGE_VENDOR_UNKNOWN,	/*0b'0100,0000*/
+	PCI_BRIDGE_VENDOR_MAX,	
+};
+
+struct rtl_pci_capabilities_header {
+    u8 capability_id;
+    u8 next;
+};
+
+/* In new TRX flow, Buffer_desc is new concept 
+  * But TX wifi info == TX descriptor in old flow
+  * RX wifi info == RX descriptor in old flow */
+struct rtl_tx_buffer_desc {
+#if (RTL8192EE_SEG_NUM == 2)
+	u32 dword[2*(DMA_IS_64BIT + 1)*8]; //seg = 8
+#elif (RTL8192EE_SEG_NUM == 1)
+	u32 dword[2*(DMA_IS_64BIT + 1)*4]; //seg = 4
+#elif (RTL8192EE_SEG_NUM == 0)
+	u32 dword[2*(DMA_IS_64BIT + 1)*2]; //seg = 2
+#endif
+} __packed;
+
+struct rtl_tx_desc {/*old: tx desc*//*new: tx wifi info*/
+	u32 dword[16];
+} __packed;
+
+struct rtl_rx_buffer_desc { /*rx buffer desc*/
+	u32 dword[2];
+} __packed;
+
+struct rtl_rx_desc { /*old: rx desc*//*new: rx wifi info*/
+	u32 dword[8];
+} __packed;
+
+struct rtl_tx_cmd_desc {
+	u32 dword[16];
+} __packed;
+
+struct rtl8192_tx_ring {
+	struct rtl_tx_desc *desc; /*tx desc / tx wifi info*/
+	dma_addr_t dma; /*tx desc dma memory / tx wifi info dma memory*/
+	unsigned int idx;
+	unsigned int entries;
+	struct sk_buff_head queue;
+	/*add for new trx flow*/
+	struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
+	dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/
+	u16 avl_desc; /* available_desc_to_write */
+	u16 cur_tx_wp; /* current_tx_write_point */
+	u16 cur_tx_rp; /* current_tx_read_point */
+};
+
+struct rtl8192_rx_ring {
+	struct rtl_rx_desc *desc;/*for old trx flow, not uesd in new trx*/
+	/*dma matches either 'desc' or 'buffer_desc'*/
+	dma_addr_t dma;
+	unsigned int idx;
+	struct sk_buff *rx_buf[RTL_PCI_MAX_RX_COUNT];
+	/*add for new trx flow*/
+	struct rtl_rx_buffer_desc *buffer_desc; /*rx buffer descriptor*/
+	u16 next_rx_rp; /* next_rx_read_point */
+};
+
+struct rtl_pci {
+	struct pci_dev *pdev;
+	bool irq_enabled;
+
+	/*Tx */
+	struct rtl8192_tx_ring tx_ring[RTL_PCI_MAX_TX_QUEUE_COUNT];
+	int txringcount[RTL_PCI_MAX_TX_QUEUE_COUNT];
+	u32 transmit_config;
+
+	/*Rx */
+	struct rtl8192_rx_ring rx_ring[RTL_PCI_MAX_RX_QUEUE];
+	int rxringcount;
+	u16 rxbuffersize;
+	u32 receive_config;
+
+	/*irq */
+	u8 irq_alloc;
+	u32 irq_mask[2];
+	u32 sys_irq_mask;
+
+	/*Bcn control register setting */
+	u32 reg_bcn_ctrl_val;
+
+	 /*ASPM*/ u8 const_pci_aspm;
+	u8 const_amdpci_aspm;
+	u8 const_hwsw_rfoff_d3;
+	u8 const_support_pciaspm;
+	/*pci-e bridge */
+	u8 const_hostpci_aspm_setting;
+	/*pci-e device */
+	u8 const_devicepci_aspm_setting;
+	/*If it supports ASPM, Offset[560h] = 0x40,
+	   otherwise Offset[560h] = 0x00. */
+	bool b_support_aspm;
+	bool b_support_backdoor;
+
+	/*QOS & EDCA */
+	enum acm_method acm_method;
+
+	u16 shortretry_limit;
+	u16 longretry_limit;
+	
+	/* MSI support */
+	bool msi_support;
+	bool using_msi;
+};
+
+struct mp_adapter {
+	u8 linkctrl_reg;
+
+	u8 busnumber;
+	u8 devnumber;
+	u8 funcnumber;
+
+	u8 pcibridge_busnum;
+	u8 pcibridge_devnum;
+	u8 pcibridge_funcnum;
+
+	u8 pcibridge_vendor;
+	u16 pcibridge_vendorid;
+	u16 pcibridge_deviceid;
+
+	u32 pcicfg_addrport;
+	u8 num4bytes;
+
+	u8 pcibridge_pciehdr_offset;
+	u8 pcibridge_linkctrlreg;
+
+	bool amd_l1_patch;
+};
+
+struct rtl_pci_priv {
+	struct rtl_pci dev;
+	struct mp_adapter ndis_adapter;
+	struct rtl_led_ctl ledctl;
+	struct bt_coexist_info btcoexist;
+};
+
+#define rtl_pcipriv(hw)		(((struct rtl_pci_priv *)(rtl_priv(hw))->priv))
+#define rtl_pcidev(pcipriv)	(&((pcipriv)->dev))
+
+int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw);
+
+extern struct rtl_intf_ops rtl_pci_ops;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+int rtl_pci_probe(struct pci_dev *pdev,
+                  const struct pci_device_id *id);
+#else
+int __devinit rtl_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *id);
+#endif
+void rtl_pci_disconnect(struct pci_dev *pdev);
+int rtl_pci_suspend(struct device *dev);
+int rtl_pci_resume(struct device *dev);
+
+static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+	return 0xff & readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+	return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+	return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+{
+	writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write16_async(struct rtl_priv *rtlpriv,
+				     u32 addr, u16 val)
+{
+	writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write32_async(struct rtl_priv *rtlpriv,
+				     u32 addr, u32 val)
+{
+	writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val)
+{
+	outl(val, port);
+}
+
+static inline void rtl_pci_raw_write_port_uchar(u32 port, u8 val)
+{
+	outb(val, port);
+}
+
+static inline void rtl_pci_raw_read_port_uchar(u32 port, u8 * pval)
+{
+	*pval = inb(port);
+}
+
+static inline void rtl_pci_raw_read_port_ushort(u32 port, u16 * pval)
+{
+	*pval = inw(port);
+}
+
+static inline void rtl_pci_raw_read_port_ulong(u32 port, u32 * pval)
+{
+	*pval = inl(port);
+}
+
+#endif
diff --git a/drivers/staging/rtl8821ae/ps.c b/drivers/staging/rtl8821ae/ps.c
new file mode 100644
index 0000000..f12ffa8
--- /dev/null
+++ b/drivers/staging/rtl8821ae/ps.c
@@ -0,0 +1,1025 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "base.h"
+#include "ps.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+#include "btcoexist/rtl_btc.h"
+
+bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	bool init_status = true;
+
+	/*<1> reset trx ring */
+	if (rtlhal->interface == INTF_PCI)
+		rtlpriv->intf_ops->reset_trx_ring(hw);
+
+	if (is_hal_stop(rtlhal))
+		RT_TRACE(COMP_ERR, DBG_WARNING, ("Driver is already down!\n"));
+
+	/*<2> Enable Adapter */
+	rtlpriv->cfg->ops->hw_init(hw);
+	RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+	/*init_status = false; */
+
+	/*<3> Enable Interrupt */
+	rtlpriv->cfg->ops->enable_interrupt(hw);
+
+	/*<enable timer> */
+	rtl_watch_dog_timer_callback((unsigned long)hw);
+
+	return init_status;
+}
+//EXPORT_SYMBOL(rtl_ps_enable_nic);
+
+bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
+{
+	bool status = true;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	/*<1> Stop all timer */
+	rtl_deinit_deferred_work(hw);
+
+	/*<2> Disable Interrupt */
+	rtlpriv->cfg->ops->disable_interrupt(hw);
+
+	/*<3> Disable Adapter */
+	rtlpriv->cfg->ops->hw_disable(hw);
+
+	return status;
+}
+//EXPORT_SYMBOL(rtl_ps_disable_nic);
+
+bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
+			 enum rf_pwrstate state_toset,
+			 u32 changesource, bool protect_or_not)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	enum rf_pwrstate rtstate;
+	bool b_actionallowed = false;
+	u16 rfwait_cnt = 0;
+
+	/*protect_or_not = true; */
+
+	if (protect_or_not)
+		goto no_protect;
+
+	/*
+	 *Only one thread can change
+	 *the RF state at one time, and others
+	 *should wait to be executed.
+	 */
+	while (true) {
+		spin_lock(&rtlpriv->locks.rf_ps_lock);
+		if (ppsc->rfchange_inprogress) {
+			spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+			RT_TRACE(COMP_ERR, DBG_WARNING,
+				 ("RF Change in progress!"
+				  "Wait to set..state_toset(%d).\n",
+				  state_toset));
+
+			/* Set RF after the previous action is done.  */
+			while (ppsc->rfchange_inprogress) {
+				rfwait_cnt++;
+				mdelay(1);
+				/*
+				 *Wait too long, return false to avoid
+				 *to be stuck here.
+				 */
+				if (rfwait_cnt > 100)
+					return false;
+			}
+		} else {
+			ppsc->rfchange_inprogress = true;
+			spin_unlock(&rtlpriv->locks.rf_ps_lock);
+			break;
+		}
+	}
+
+no_protect:
+	rtstate = ppsc->rfpwr_state;
+
+	switch (state_toset) {
+	case ERFON:
+		ppsc->rfoff_reason &= (~changesource);
+
+		if ((changesource == RF_CHANGE_BY_HW) &&
+		    (ppsc->b_hwradiooff == true)) {
+			ppsc->b_hwradiooff = false;
+		}
+
+		if (!ppsc->rfoff_reason) {
+			ppsc->rfoff_reason = 0;
+			b_actionallowed = true;
+		}
+
+		break;
+
+	case ERFOFF:
+
+		if ((changesource == RF_CHANGE_BY_HW) &&
+		    (ppsc->b_hwradiooff == false)) {
+			ppsc->b_hwradiooff = true;
+		}
+
+		ppsc->rfoff_reason |= changesource;
+		b_actionallowed = true;
+		break;
+
+	case ERFSLEEP:
+		ppsc->rfoff_reason |= changesource;
+		b_actionallowed = true;
+		break;
+
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case not process \n"));
+		break;
+	}
+
+	if (b_actionallowed)
+		rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
+
+	if (!protect_or_not) {
+		spin_lock(&rtlpriv->locks.rf_ps_lock);
+		ppsc->rfchange_inprogress = false;
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
+	}
+
+	return b_actionallowed;
+}
+//EXPORT_SYMBOL(rtl_ps_set_rf_state);
+
+static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	ppsc->b_swrf_processing = true;
+
+	if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) {
+		if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
+		    RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
+		    rtlhal->interface == INTF_PCI) {
+			rtlpriv->intf_ops->disable_aspm(hw);
+			RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+		}
+	}
+
+	if (rtlpriv->cfg->ops->get_btc_status()){
+		rtlpriv->btcoexist.btc_ops->btc_ips_notify(rtlpriv,
+						ppsc->inactive_pwrstate);
+	}
+	rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate,
+			    RF_CHANGE_BY_IPS, false);
+
+	if (ppsc->inactive_pwrstate == ERFOFF &&
+	    rtlhal->interface == INTF_PCI) {
+		if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
+		    !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
+			rtlpriv->intf_ops->enable_aspm(hw);
+			RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+		}
+	}
+
+	ppsc->b_swrf_processing = false;
+}
+
+void rtl_ips_nic_off_wq_callback(void *data)
+{
+	struct rtl_works *rtlworks =
+	    container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq);
+	struct ieee80211_hw *hw = rtlworks->hw;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	enum rf_pwrstate rtstate;
+
+	if (mac->opmode != NL80211_IFTYPE_STATION) {
+		RT_TRACE(COMP_ERR, DBG_WARNING, ("not station return\n"));
+		return;
+	}
+
+	if (mac->p2p_in_use)
+		return;
+
+	if (mac->link_state > MAC80211_NOLINK)
+		return;
+
+	if (is_hal_stop(rtlhal))
+		return;
+
+	if (rtlpriv->sec.being_setkey)
+		return;
+
+	if(rtlpriv->cfg->ops->bt_turn_off_bt_coexist_before_enter_lps)
+		rtlpriv->cfg->ops->bt_turn_off_bt_coexist_before_enter_lps(hw);
+
+	if (ppsc->b_inactiveps) {
+		rtstate = ppsc->rfpwr_state;
+
+		/*
+		 *Do not enter IPS in the following conditions:
+		 *(1) RF is already OFF or Sleep
+		 *(2) b_swrf_processing (indicates the IPS is still under going)
+		 *(3) Connectted (only disconnected can trigger IPS)
+		 *(4) IBSS (send Beacon)
+		 *(5) AP mode (send Beacon)
+		 *(6) monitor mode (rcv packet)
+		 */
+
+		if (rtstate == ERFON &&
+		    !ppsc->b_swrf_processing &&
+		    (mac->link_state == MAC80211_NOLINK) &&
+		    !mac->act_scanning) {
+			RT_TRACE(COMP_RF, DBG_LOUD,
+				 ("IPSEnter(): Turn off RF.\n"));
+
+			ppsc->inactive_pwrstate = ERFOFF;
+			ppsc->b_in_powersavemode = true;
+
+			/*rtl_pci_reset_trx_ring(hw); */
+			_rtl_ps_inactive_ps(hw);
+		}
+	}
+}
+
+void rtl_ips_nic_off(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	/*
+	 *because when link with ap, mac80211 will ask us
+	 *to disable nic quickly after scan before linking,
+	 *this will cause link failed, so we delay 100ms here
+	 */
+	queue_delayed_work(rtlpriv->works.rtl_wq,
+			   &rtlpriv->works.ips_nic_off_wq, MSECS(100));
+}
+
+/* NOTICE: any opmode should exc nic_on, or disable without
+ * nic_on may something wrong, like adhoc TP*/
+void rtl_ips_nic_on(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	enum rf_pwrstate rtstate;
+
+	cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+
+	spin_lock(&rtlpriv->locks.ips_lock);
+	if (ppsc->b_inactiveps) {
+		rtstate = ppsc->rfpwr_state;
+
+		if (rtstate != ERFON &&
+		    !ppsc->b_swrf_processing &&
+		    ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) {
+
+			ppsc->inactive_pwrstate = ERFON;
+			ppsc->b_in_powersavemode = false;
+			_rtl_ps_inactive_ps(hw);
+		}
+	}
+	spin_unlock(&rtlpriv->locks.ips_lock);
+}
+
+/*for FW LPS*/
+
+/*
+ *Determine if we can set Fw into PS mode
+ *in current condition.Return true if it
+ *can enter PS mode.
+ */
+static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	u32 ps_timediff;
+
+	ps_timediff = jiffies_to_msecs(jiffies -
+				       ppsc->last_delaylps_stamp_jiffies);
+
+	if (ps_timediff < 2000) {
+		RT_TRACE(COMP_POWER, DBG_LOUD,
+			 ("Delay enter Fw LPS for DHCP, ARP,"
+			  " or EAPOL exchanging state.\n"));
+		return false;
+	}
+
+	if (mac->link_state != MAC80211_LINKED)
+		return false;
+
+	if (mac->opmode == NL80211_IFTYPE_ADHOC)
+		return false;
+
+	return true;
+}
+
+/* Change current and default preamble mode.*/
+void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	bool enter_fwlps;
+
+	if (mac->opmode == NL80211_IFTYPE_ADHOC)
+		return;
+
+	if (mac->link_state != MAC80211_LINKED)
+		return;
+
+	if (ppsc->dot11_psmode == rt_psmode)
+		return;
+
+	/* Update power save mode configured. */
+	ppsc->dot11_psmode = rt_psmode;
+
+	/*
+	 *<FW control LPS>
+	 *1. Enter PS mode
+	 *   Set RPWM to Fw to turn RF off and send H2C fw_pwrmode
+	 *   cmd to set Fw into PS mode.
+	 *2. Leave PS mode
+	 *   Send H2C fw_pwrmode cmd to Fw to set Fw into Active
+	 *   mode and set RPWM to turn RF on.
+	 */
+
+	if ((ppsc->b_fwctrl_lps) && ppsc->report_linked) {
+		if (ppsc->dot11_psmode == EACTIVE) {
+			RT_TRACE(COMP_RF, DBG_DMESG,
+				 ("FW LPS leave ps_mode:%x\n",
+				  FW_PS_ACTIVE_MODE));
+			enter_fwlps = false;
+			ppsc->pwr_mode = FW_PS_ACTIVE_MODE;
+			ppsc->smart_ps = 0;
+			rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_LPS_ACTION,
+						      (u8 *)(&enter_fwlps));
+			if (ppsc->p2p_ps_info.opp_ps)
+				rtl_p2p_ps_cmd(hw,P2P_PS_ENABLE);
+
+		} else {
+			if (rtl_get_fwlps_doze(hw)) {
+				RT_TRACE(COMP_RF, DBG_DMESG,
+					 ("FW LPS enter ps_mode:%x\n",
+					 ppsc->fwctrl_psmode));
+				enter_fwlps = true;
+				ppsc->pwr_mode = ppsc->fwctrl_psmode;
+				ppsc->smart_ps = 2;
+				rtlpriv->cfg->ops->set_hw_reg(hw,
+							HW_VAR_FW_LPS_ACTION,
+							(u8 *)(&enter_fwlps));
+
+			} else {
+				/* Reset the power save related parameters. */
+				ppsc->dot11_psmode = EACTIVE;
+			}
+		}
+	}
+}
+
+/*Enter the leisure power save mode.*/
+void rtl_lps_enter(struct ieee80211_hw *hw)
+{
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	unsigned long flag;
+
+	if (!ppsc->b_fwctrl_lps)
+		return;
+
+	if (rtlpriv->sec.being_setkey)
+		return;
+
+	if (rtlpriv->link_info.b_busytraffic)
+		return;
+
+	/*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
+	if (mac->cnt_after_linked < 5)
+		return;
+
+	if (mac->opmode == NL80211_IFTYPE_ADHOC)
+		return;
+
+	if (mac->link_state != MAC80211_LINKED)
+		return;
+
+	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+
+	/* Idle for a while if we connect to AP a while ago. */
+	if (mac->cnt_after_linked >= 2) {
+		if (ppsc->dot11_psmode == EACTIVE) {
+			RT_TRACE(COMP_POWER, DBG_LOUD,
+				 ("Enter 802.11 power save mode...\n"));
+
+			rtl_lps_set_psmode(hw, EAUTOPS);
+		}
+	}
+
+	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+}
+
+/*Leave the leisure power save mode.*/
+void rtl_lps_leave(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	unsigned long flag;
+
+	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+
+	if (ppsc->b_fwctrl_lps) {
+		if (ppsc->dot11_psmode != EACTIVE) {
+
+			/*FIX ME */
+			rtlpriv->cfg->ops->enable_interrupt(hw);
+
+			if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
+			    RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
+			    rtlhal->interface == INTF_PCI) {
+				rtlpriv->intf_ops->disable_aspm(hw);
+				RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+			}
+
+			RT_TRACE(COMP_POWER, DBG_LOUD,
+				 ("Busy Traffic,Leave 802.11 power save..\n"));
+
+			rtl_lps_set_psmode(hw, EACTIVE);
+		}
+	}
+	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+}
+
+/* For sw LPS*/
+void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct ieee80211_hdr *hdr = (void *) data;
+	struct ieee80211_tim_ie *tim_ie;
+	u8 *tim;
+	u8 tim_len;
+	bool u_buffed;
+	bool m_buffed;
+
+	if (mac->opmode != NL80211_IFTYPE_STATION)
+		return;
+
+	if (!rtlpriv->psc.b_swctrl_lps)
+		return;
+
+	if (rtlpriv->mac80211.link_state != MAC80211_LINKED)
+		return;
+
+	if (!rtlpriv->psc.sw_ps_enabled)
+		return;
+
+	if (rtlpriv->psc.b_fwctrl_lps)
+		return;
+
+	if (likely(!(hw->conf.flags & IEEE80211_CONF_PS)))
+		return;
+
+	/* check if this really is a beacon */
+	if (!ieee80211_is_beacon(hdr->frame_control))
+		return;
+
+	/* min. beacon length + FCS_LEN */
+	if (len <= 40 + FCS_LEN)
+		return;
+
+	/* and only beacons from the associated BSSID, please */
+	if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+		return;
+
+	rtlpriv->psc.last_beacon = jiffies;
+
+	tim = rtl_find_ie(data, len - FCS_LEN, WLAN_EID_TIM);
+	if (!tim)
+		return;
+
+	if (tim[1] < sizeof(*tim_ie))
+		return;
+
+	tim_len = tim[1];
+	tim_ie = (struct ieee80211_tim_ie *) &tim[2];
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*<delete in kernel end>*/
+	if (!WARN_ON_ONCE(!hw->conf.ps_dtim_period))
+		rtlpriv->psc.dtim_counter = tim_ie->dtim_count;
+/*<delete in kernel start>*/
+#else
+	if (!WARN_ON_ONCE(!mac->vif->bss_conf.dtim_period))
+		rtlpriv->psc.dtim_counter = tim_ie->dtim_count;
+#endif
+/*<delete in kernel end>*/
+
+	/* Check whenever the PHY can be turned off again. */
+
+	/* 1. What about buffered unicast traffic for our AID? */
+	u_buffed = ieee80211_check_tim(tim_ie, tim_len,
+				       rtlpriv->mac80211.assoc_id);
+
+	/* 2. Maybe the AP wants to send multicast/broadcast data? */
+	m_buffed = tim_ie->bitmap_ctrl & 0x01;
+	rtlpriv->psc.multi_buffered = m_buffed;
+
+	/* unicast will process by mac80211 through
+	 * set ~IEEE80211_CONF_PS, So we just check
+	 * multicast frames here */
+	if (!m_buffed ) {//&&) {// !rtlpriv->psc.tx_doing) {
+		/* back to low-power land. and delay is
+		 * prevent null power save frame tx fail */
+		queue_delayed_work(rtlpriv->works.rtl_wq,
+				   &rtlpriv->works.ps_work, MSECS(5));
+	} else {
+		RT_TRACE(COMP_POWER, DBG_DMESG,
+			 ("u_bufferd: %x, m_buffered: %x\n",
+			  u_buffed, m_buffed));
+	}
+}
+
+void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	unsigned long flag;
+
+	if (!rtlpriv->psc.b_swctrl_lps)
+		return;
+	if (mac->link_state != MAC80211_LINKED)
+		return;
+
+	if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
+	    RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
+		rtlpriv->intf_ops->disable_aspm(hw);
+		RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+	}
+
+	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+	rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS, false);
+	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+}
+
+void rtl_swlps_rfon_wq_callback(void *data)
+{
+	struct rtl_works *rtlworks =
+	    container_of_dwork_rtl(data, struct rtl_works, ps_rfon_wq);
+	struct ieee80211_hw *hw = rtlworks->hw;
+
+	rtl_swlps_rf_awake(hw);
+}
+
+void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	unsigned long flag;
+	u8 sleep_intv;
+
+	if (!rtlpriv->psc.sw_ps_enabled)
+		return;
+
+	if ((rtlpriv->sec.being_setkey) ||
+	    (mac->opmode == NL80211_IFTYPE_ADHOC))
+		return;
+
+	/*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
+	if ((mac->link_state != MAC80211_LINKED) || (mac->cnt_after_linked < 5))
+		return;
+
+	if (rtlpriv->link_info.b_busytraffic)
+		return;
+
+	spin_lock(&rtlpriv->locks.rf_ps_lock);
+	if (rtlpriv->psc.rfchange_inprogress) {
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
+		return;
+	}
+	spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+	rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS,false);
+	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+
+	if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
+	    !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
+		rtlpriv->intf_ops->enable_aspm(hw);
+		RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+	}
+
+	/* here is power save alg, when this beacon is DTIM
+	 * we will set sleep time to dtim_period * n;
+	 * when this beacon is not DTIM, we will set sleep
+	 * time to sleep_intv = rtlpriv->psc.dtim_counter or
+	 * MAX_SW_LPS_SLEEP_INTV(default set to 5) */
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*<delete in kernel end>*/
+	if (rtlpriv->psc.dtim_counter == 0) {
+		if (hw->conf.ps_dtim_period == 1)
+			sleep_intv = hw->conf.ps_dtim_period * 2;
+		else
+			sleep_intv = hw->conf.ps_dtim_period;
+	} else {
+		sleep_intv = rtlpriv->psc.dtim_counter;
+	}
+/*<delete in kernel start>*/
+#else
+	if (rtlpriv->psc.dtim_counter == 0) {
+		if (mac->vif->bss_conf.dtim_period == 1)
+			sleep_intv = mac->vif->bss_conf.dtim_period * 2;
+		else
+			sleep_intv = mac->vif->bss_conf.dtim_period;
+	} else {
+		sleep_intv = rtlpriv->psc.dtim_counter;
+	}
+#endif
+/*<delete in kernel end>*/
+
+	if (sleep_intv > MAX_SW_LPS_SLEEP_INTV)
+		sleep_intv = MAX_SW_LPS_SLEEP_INTV;
+
+	/* this print should always be dtim_conter = 0 &
+	 * sleep  = dtim_period, that meaons, we should
+	 * awake before every dtim */
+	RT_TRACE(COMP_POWER, DBG_DMESG,
+		 ("dtim_counter:%x will sleep :%d beacon_intv\n",
+		  rtlpriv->psc.dtim_counter, sleep_intv));
+
+	/* we tested that 40ms is enough for sw & hw sw delay */
+	queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq,
+			MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
+}
+
+
+void rtl_swlps_wq_callback(void *data)
+{
+	struct rtl_works *rtlworks =
+		container_of_dwork_rtl(data, struct rtl_works, ps_work);
+	struct ieee80211_hw *hw = rtlworks->hw;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	bool ps = false;
+
+	ps = (hw->conf.flags & IEEE80211_CONF_PS);
+
+	/* we can sleep after ps null send ok */
+	if (rtlpriv->psc.state_inap) {
+		rtl_swlps_rf_sleep(hw);
+
+		if (rtlpriv->psc.state && !ps) {
+			rtlpriv->psc.sleep_ms =
+				jiffies_to_msecs(jiffies -
+						 rtlpriv->psc.last_action);
+		}
+
+		if (ps)
+			rtlpriv->psc.last_slept = jiffies;
+
+		rtlpriv->psc.last_action = jiffies;
+		rtlpriv->psc.state = ps;
+	}
+}
+
+
+void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct ieee80211_mgmt *mgmt = (void *)data;
+	struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
+	u8 *pos, *end, *ie;
+	u16 noa_len;
+	static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
+	u8 noa_num, index,i, noa_index = 0;
+	bool find_p2p_ie = false , find_p2p_ps_ie = false;
+	pos = (u8 *)mgmt->u.beacon.variable;
+	end = data + len;
+	ie = NULL;
+
+	while (pos + 1 < end) {
+
+		if (pos + 2 + pos[1] > end)
+			return;
+
+		if (pos[0] == 221 && pos[1] > 4) {
+			if (memcmp(&pos[2], p2p_oui_ie_type, 4) == 0) {
+				ie = pos + 2+4;
+				break;
+			}
+		}
+		pos += 2 + pos[1];
+	}
+
+	if (ie == NULL)
+		return;
+	find_p2p_ie = true;
+	/*to find noa ie*/
+	while (ie + 1 < end) {
+		noa_len = READEF2BYTE(&ie[1]);
+		if (ie + 3 + ie[1] > end)
+			return;
+
+		if (ie[0] == 12) {
+			find_p2p_ps_ie = true;
+			if ( (noa_len - 2) % 13 != 0){
+				RT_TRACE(COMP_INIT, DBG_LOUD,
+					 ("P2P notice of absence: "
+					  "invalid length.%d\n",noa_len));
+				return;
+			} else {
+				noa_num = (noa_len - 2) / 13;
+			}
+			noa_index = ie[3];
+			if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == P2P_PS_NONE
+			    || noa_index != p2pinfo->noa_index) {
+				RT_TRACE(COMP_FW, DBG_LOUD,
+					 ("update NOA ie.\n"));
+				p2pinfo->noa_index = noa_index;
+				p2pinfo->opp_ps= (ie[4] >> 7);
+				p2pinfo->ctwindow = ie[4] & 0x7F;
+				p2pinfo->noa_num = noa_num;
+				index = 5;
+				for (i = 0; i< noa_num; i++){
+					p2pinfo->noa_count_type[i] =
+							READEF1BYTE(ie+index);
+					index += 1;
+					p2pinfo->noa_duration[i] =
+							READEF4BYTE(ie+index);
+					index += 4;
+					p2pinfo->noa_interval[i] =
+							READEF4BYTE(ie+index);
+					index += 4;
+					p2pinfo->noa_start_time[i] =
+							READEF4BYTE(ie+index);
+					index += 4;
+				}
+
+				if (p2pinfo->opp_ps == 1) {
+					p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
+					/* Driver should wait LPS
+					 * entering CTWindow*/
+					if (rtlpriv->psc.b_fw_current_inpsmode){
+						rtl_p2p_ps_cmd(hw,
+							       P2P_PS_ENABLE);
+					}
+				} else if (p2pinfo->noa_num > 0) {
+					p2pinfo->p2p_ps_mode = P2P_PS_NOA;
+					rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
+				} else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+					rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+				}
+			}
+
+		break;
+		}
+		ie += 3 + noa_len;
+	}
+
+	if (find_p2p_ie == true) {
+		if ((p2pinfo->p2p_ps_mode > P2P_PS_NONE) &&
+		    (find_p2p_ps_ie == false))
+			rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+	}
+}
+
+void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct ieee80211_mgmt *mgmt = (void *)data;
+	struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
+	bool find_p2p_ie = false , find_p2p_ps_ie = false;
+	u8 noa_num, index,i, noa_index = 0;
+	u8 *pos, *end, *ie;
+	u16 noa_len;
+	static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
+
+	pos = (u8 *) &mgmt->u.action.category;
+	end = data + len;
+	ie = NULL;
+
+	if (pos[0] == 0x7f ) {
+		if (memcmp(&pos[1], p2p_oui_ie_type, 4) == 0) {
+			ie = pos + 3+4;
+		}
+	}
+
+	if (ie == NULL)
+		return;
+	find_p2p_ie = true;
+
+	RT_TRACE(COMP_FW, DBG_LOUD, ("action frame find P2P IE.\n"));
+	/*to find noa ie*/
+	while (ie + 1 < end) {
+		noa_len = READEF2BYTE(&ie[1]);
+		if (ie + 3 + ie[1] > end)
+			return;
+
+		if (ie[0] == 12) {
+			RT_TRACE(COMP_FW, DBG_LOUD, ("find NOA IE.\n"));
+			RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, ("noa ie "),
+				      ie, noa_len);
+			find_p2p_ps_ie = true;
+			if ( (noa_len - 2) % 13 != 0){
+				RT_TRACE(COMP_FW, DBG_LOUD,
+					 ("P2P notice of absence: "
+					  "invalid length.%d\n",noa_len));
+				return;
+			} else {
+				noa_num = (noa_len - 2) / 13;
+			}
+			noa_index = ie[3];
+			if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == P2P_PS_NONE
+			    || noa_index != p2pinfo->noa_index) {
+				p2pinfo->noa_index = noa_index;
+				p2pinfo->opp_ps= (ie[4] >> 7);
+				p2pinfo->ctwindow = ie[4] & 0x7F;
+				p2pinfo->noa_num = noa_num;
+				index = 5;
+				for (i = 0; i< noa_num; i++){
+					p2pinfo->noa_count_type[i] =
+							READEF1BYTE(ie+index);
+					index += 1;
+					p2pinfo->noa_duration[i] =
+							READEF4BYTE(ie+index);
+					index += 4;
+					p2pinfo->noa_interval[i] =
+							READEF4BYTE(ie+index);
+					index += 4;
+					p2pinfo->noa_start_time[i] =
+							READEF4BYTE(ie+index);
+					index += 4;
+				}
+
+				if (p2pinfo->opp_ps == 1) {
+					p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
+					/* Driver should wait LPS
+					 * entering CTWindow */
+					if (rtlpriv->psc.b_fw_current_inpsmode){
+						rtl_p2p_ps_cmd(hw,
+							       P2P_PS_ENABLE);
+					}
+				} else if (p2pinfo->noa_num > 0) {
+					p2pinfo->p2p_ps_mode = P2P_PS_NOA;
+					rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
+				} else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+					rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+				}
+			}
+
+		break;
+		}
+		ie += 3 + noa_len;
+	}
+
+
+}
+
+
+void rtl_p2p_ps_cmd(struct ieee80211_hw *hw,u8 p2p_ps_state)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
+	struct rtl_p2p_ps_info  *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
+
+	RT_TRACE(COMP_FW, DBG_LOUD, (" p2p state %x\n",p2p_ps_state));
+	switch (p2p_ps_state) {
+		case P2P_PS_DISABLE:
+			p2pinfo->p2p_ps_state = p2p_ps_state;
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						   HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+						   (u8 *)(&p2p_ps_state));
+
+			p2pinfo->noa_index = 0;
+			p2pinfo->ctwindow = 0;
+			p2pinfo->opp_ps = 0;
+			p2pinfo->noa_num = 0;
+			p2pinfo->p2p_ps_mode = P2P_PS_NONE;
+			if (rtlps->b_fw_current_inpsmode == true) {
+				if (rtlps->smart_ps == 0) {
+					rtlps->smart_ps = 2;
+					rtlpriv->cfg->ops->set_hw_reg(hw,
+						    HW_VAR_H2C_FW_PWRMODE,
+						    (u8 *)(&rtlps->pwr_mode));
+				}
+
+			}
+			break;
+		case P2P_PS_ENABLE:
+			if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+				p2pinfo->p2p_ps_state = p2p_ps_state;
+
+				if (p2pinfo->ctwindow > 0) {
+					if (rtlps->smart_ps != 0){
+						rtlps->smart_ps = 0;
+						rtlpriv->cfg->ops->set_hw_reg(
+						    hw, HW_VAR_H2C_FW_PWRMODE,
+						    (u8 *)(&rtlps->pwr_mode));
+					}
+				}
+				rtlpriv->cfg->ops->set_hw_reg(hw,
+						HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+						(u8 *)(&p2p_ps_state));
+
+			}
+			break;
+		case P2P_PS_SCAN:
+		case P2P_PS_SCAN_DONE:
+		case P2P_PS_ALLSTASLEEP:
+			if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+				p2pinfo->p2p_ps_state = p2p_ps_state;
+				rtlpriv->cfg->ops->set_hw_reg(hw,
+						HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+						(u8 *)(&p2p_ps_state));
+			}
+			break;
+		default:
+			break;
+
+	}
+	RT_TRACE(COMP_FW, DBG_LOUD, (" ctwindow %x oppps %x \n",
+		 		     p2pinfo->ctwindow,p2pinfo->opp_ps));
+	RT_TRACE(COMP_FW, DBG_LOUD, ("count %x duration %x index %x interval %x"
+				     " start time %x noa num %x\n",
+				     p2pinfo->noa_count_type[0],
+				     p2pinfo->noa_duration[0],
+				     p2pinfo->noa_index,
+				     p2pinfo->noa_interval[0],
+				     p2pinfo->noa_start_time[0],
+				     p2pinfo->noa_num));
+	RT_TRACE(COMP_FW, DBG_LOUD, ("end\n"));
+}
+
+void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct ieee80211_hdr *hdr = (void *) data;
+
+	if (!mac->p2p)
+		return;
+	if (mac->link_state != MAC80211_LINKED)
+		return;
+	/* min. beacon length + FCS_LEN */
+	if (len <= 40 + FCS_LEN)
+		return;
+
+	/* and only beacons from the associated BSSID, please */
+	if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+		return;
+
+	/* check if this really is a beacon */
+	if (!(ieee80211_is_beacon(hdr->frame_control) ||
+	      ieee80211_is_probe_resp(hdr->frame_control) ||
+	      ieee80211_is_action(hdr->frame_control)))
+		return;
+
+	if (ieee80211_is_action(hdr->frame_control)) {
+		rtl_p2p_action_ie(hw,data,len - FCS_LEN);
+	} else {
+		rtl_p2p_noa_ie(hw,data,len - FCS_LEN);
+	}
+
+}
diff --git a/drivers/staging/rtl8821ae/ps.h b/drivers/staging/rtl8821ae/ps.h
new file mode 100644
index 0000000..374ed77
--- /dev/null
+++ b/drivers/staging/rtl8821ae/ps.h
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __REALTEK_RTL_PCI_PS_H__
+#define __REALTEK_RTL_PCI_PS_H__
+
+#define MAX_SW_LPS_SLEEP_INTV	5
+
+bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
+			 enum rf_pwrstate state_toset, u32 changesource,
+			 bool protect_or_not);
+bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
+bool rtl_ps_disable_nic(struct ieee80211_hw *hw);
+void rtl_ips_nic_off(struct ieee80211_hw *hw);
+void rtl_ips_nic_on(struct ieee80211_hw *hw);
+void rtl_ips_nic_off_wq_callback(void *data);
+void rtl_lps_enter(struct ieee80211_hw *hw);
+void rtl_lps_leave(struct ieee80211_hw *hw);
+
+void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode);
+
+void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len);
+void rtl_swlps_wq_callback(void *data);
+void rtl_swlps_rfon_wq_callback(void *data);
+void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
+void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
+void rtl_p2p_ps_cmd(struct ieee80211_hw *hw,u8 p2p_ps_state);
+void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
+#endif
diff --git a/drivers/staging/rtl8821ae/rc.c b/drivers/staging/rtl8821ae/rc.c
new file mode 100644
index 0000000..d387f13
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rc.c
@@ -0,0 +1,309 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "base.h"
+#include "rc.h"
+
+/*
+ *Finds the highest rate index we can use
+ *if skb is special data like DHCP/EAPOL, we set should
+ *it to lowest rate CCK_1M, otherwise we set rate to
+ *highest rate based on wireless mode used for iwconfig
+ *show Tx rate.
+ */
+static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
+				  struct ieee80211_sta *sta,
+				  struct sk_buff *skb, bool not_data)
+{
+	struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_sta_info *sta_entry = NULL;
+	u8 wireless_mode = 0;
+
+	/*
+	 *this rate is no use for true rate, firmware
+	 *will control rate at all it just used for
+	 *1.show in iwconfig in B/G mode
+	 *2.in rtl_get_tcb_desc when we check rate is
+	 *      1M we will not use FW rate but user rate.
+	 */
+	if (rtlmac->opmode == NL80211_IFTYPE_AP ||
+	    rtlmac->opmode == NL80211_IFTYPE_ADHOC ||
+	    rtlmac->opmode == NL80211_IFTYPE_MESH_POINT) {
+		if (sta) {
+			sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+			wireless_mode = sta_entry->wireless_mode;
+		} else {
+			return 0;
+		}
+	} else {
+		wireless_mode = rtlmac->mode;
+	}
+
+	if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true) || not_data) {
+		return 0;
+	} else {
+		if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+			if (wireless_mode == WIRELESS_MODE_B) {
+				return B_MODE_MAX_RIX;
+			} else if (wireless_mode == WIRELESS_MODE_G) {
+				return G_MODE_MAX_RIX;
+			} else {
+				if (get_rf_type(rtlphy) != RF_2T2R)
+					return N_MODE_MCS7_RIX;
+				else
+					return N_MODE_MCS15_RIX;
+			}
+		} else {
+			if (wireless_mode == WIRELESS_MODE_A) {
+				return A_MODE_MAX_RIX;
+			} else {
+				if (get_rf_type(rtlphy) != RF_2T2R)
+					return N_MODE_MCS7_RIX;
+				else
+					return N_MODE_MCS15_RIX;
+			}
+		}
+	}
+}
+
+static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
+				    struct ieee80211_sta *sta,
+				    struct ieee80211_tx_rate *rate,
+				    struct ieee80211_tx_rate_control *txrc,
+				    u8 tries, char rix, int rtsctsenable,
+				    bool not_data)
+{
+	struct rtl_mac *mac = rtl_mac(rtlpriv);
+	u8 sgi_20 = 0, sgi_40 = 0;
+
+	if (sta) {
+		sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+		sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+	}
+	rate->count = tries;
+	rate->idx = rix >= 0x00 ? rix : 0x00;
+
+	if (!not_data) {
+		if (txrc->short_preamble)
+			rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
+		if (mac->opmode == NL80211_IFTYPE_AP ||
+			mac->opmode == NL80211_IFTYPE_ADHOC) {
+			if (sta && (sta->ht_cap.cap &
+				    IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+				rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+		} else {
+			if (mac->bw_40)
+				rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+		}
+		if (sgi_20 || sgi_40)
+			rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+		if (sta && sta->ht_cap.ht_supported)
+			rate->flags |= IEEE80211_TX_RC_MCS;
+	}
+}
+
+static void rtl_get_rate(void *ppriv, struct ieee80211_sta *sta,
+			 void *priv_sta,
+			 struct ieee80211_tx_rate_control *txrc)
+{
+	struct rtl_priv *rtlpriv = ppriv;
+	struct sk_buff *skb = txrc->skb;
+	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_rate *rates = tx_info->control.rates;
+	__le16 fc = rtl_get_fc(skb);
+	u8 try_per_rate, i, rix;
+	bool not_data = !ieee80211_is_data(fc);
+
+	if (rate_control_send_low(sta, priv_sta, txrc))
+		return;
+
+	rix = _rtl_rc_get_highest_rix(rtlpriv, sta, skb, not_data);
+	try_per_rate = 1;
+	_rtl_rc_rate_set_series(rtlpriv, sta, &rates[0], txrc,
+				try_per_rate, rix, 1, not_data);
+
+	if (!not_data) {
+		for (i = 1; i < 4; i++)
+			_rtl_rc_rate_set_series(rtlpriv, sta, &rates[i],
+						txrc, i, (rix - i), 1,
+						not_data);
+	}
+}
+
+static bool _rtl_tx_aggr_check(struct rtl_priv *rtlpriv,
+			       struct rtl_sta_info *sta_entry, u16 tid)
+{
+	struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+	if (mac->act_scanning)
+		return false;
+
+	if (mac->opmode == NL80211_IFTYPE_STATION &&
+	    mac->cnt_after_linked < 3)
+		return false;
+
+	if (sta_entry->tids[tid].agg.agg_state == RTL_AGG_STOP)
+		return true;
+
+	return false;
+}
+
+/*mac80211 Rate Control callbacks*/
+static void rtl_tx_status(void *ppriv,
+			  struct ieee80211_supported_band *sband,
+			  struct ieee80211_sta *sta, void *priv_sta,
+			  struct sk_buff *skb)
+{
+	struct rtl_priv *rtlpriv = ppriv;
+	struct rtl_mac *mac = rtl_mac(rtlpriv);
+	struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+	__le16 fc = rtl_get_fc(skb);
+	struct rtl_sta_info *sta_entry;
+
+	if (!priv_sta || !ieee80211_is_data(fc))
+		return;
+
+	if (rtl_is_special_data(mac->hw, skb, true))
+		return;
+
+	if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+	    is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+		return;
+
+	if (sta) {
+		/* Check if aggregation has to be enabled for this tid */
+		sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+		if ((sta->ht_cap.ht_supported == true) &&
+				!(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+			if (ieee80211_is_data_qos(fc)) {
+				u8 tid = rtl_get_tid(skb);
+				if (_rtl_tx_aggr_check(rtlpriv, sta_entry,
+						       tid)) {
+					sta_entry->tids[tid].agg.agg_state =
+						RTL_AGG_PROGRESS;
+					/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
+					/*<delete in kernel end>*/
+					ieee80211_start_tx_ba_session(sta, tid,
+								      5000);
+					/*<delete in kernel start>*/
+#else
+					ieee80211_start_tx_ba_session(sta, tid);
+#endif
+					/*<delete in kernel end>*/
+				}
+			}
+		}
+	}
+}
+
+static void rtl_rate_init(void *ppriv,
+			  struct ieee80211_supported_band *sband,
+			  struct cfg80211_chan_def *chandef,
+			  struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
+static void rtl_rate_update(void *ppriv,
+			    struct ieee80211_supported_band *sband,
+			    struct ieee80211_sta *sta, void *priv_sta,
+			    u32 changed,
+			    enum nl80211_channel_type oper_chan_type)
+{
+}
+#else
+static void rtl_rate_update(void *ppriv,
+			    struct ieee80211_supported_band *sband,
+			    struct cfg80211_chan_def *chandef,
+			    struct ieee80211_sta *sta, void *priv_sta,
+			    u32 changed)
+{
+}
+#endif
+static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	return rtlpriv;
+}
+
+static void rtl_rate_free(void *rtlpriv)
+{
+	return;
+}
+
+static void *rtl_rate_alloc_sta(void *ppriv,
+				struct ieee80211_sta *sta, gfp_t gfp)
+{
+	struct rtl_priv *rtlpriv = ppriv;
+	struct rtl_rate_priv *rate_priv;
+
+	rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
+	if (!rate_priv) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("Unable to allocate private rc structure\n"));
+		return NULL;
+	}
+
+	rtlpriv->rate_priv = rate_priv;
+
+	return rate_priv;
+}
+
+static void rtl_rate_free_sta(void *rtlpriv,
+			      struct ieee80211_sta *sta, void *priv_sta)
+{
+	struct rtl_rate_priv *rate_priv = priv_sta;
+	kfree(rate_priv);
+}
+
+static struct rate_control_ops rtl_rate_ops = {
+	.module = NULL,
+	.name = "rtl_rc",
+	.alloc = rtl_rate_alloc,
+	.free = rtl_rate_free,
+	.alloc_sta = rtl_rate_alloc_sta,
+	.free_sta = rtl_rate_free_sta,
+	.rate_init = rtl_rate_init,
+	.rate_update = rtl_rate_update,
+	.tx_status = rtl_tx_status,
+	.get_rate = rtl_get_rate,
+};
+
+int rtl_rate_control_register(void)
+{
+	return ieee80211_rate_control_register(&rtl_rate_ops);
+}
+
+void rtl_rate_control_unregister(void)
+{
+	ieee80211_rate_control_unregister(&rtl_rate_ops);
+}
diff --git a/drivers/staging/rtl8821ae/rc.h b/drivers/staging/rtl8821ae/rc.h
new file mode 100644
index 0000000..4afa2c2
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rc.h
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_RC_H__
+#define __RTL_RC_H__
+
+#define B_MODE_MAX_RIX 3
+#define G_MODE_MAX_RIX 11
+#define A_MODE_MAX_RIX 7
+
+/* in mac80211 mcs0-mcs15 is idx0-idx15*/
+#define N_MODE_MCS7_RIX 7
+#define N_MODE_MCS15_RIX 15
+
+struct rtl_rate_priv {
+	u8 ht_cap;
+};
+
+int rtl_rate_control_register(void);
+void rtl_rate_control_unregister(void);
+#endif
diff --git a/drivers/staging/rtl8821ae/regd.c b/drivers/staging/rtl8821ae/regd.c
new file mode 100644
index 0000000..d89f15c
--- /dev/null
+++ b/drivers/staging/rtl8821ae/regd.c
@@ -0,0 +1,503 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "regd.h"
+
+static struct country_code_to_enum_rd allCountries[] = {
+	{COUNTRY_CODE_FCC, "US"},
+	{COUNTRY_CODE_IC, "US"},
+	{COUNTRY_CODE_ETSI, "EC"},
+	{COUNTRY_CODE_SPAIN, "EC"},
+	{COUNTRY_CODE_FRANCE, "EC"},
+	{COUNTRY_CODE_MKK, "JP"},
+	{COUNTRY_CODE_MKK1, "JP"},
+	{COUNTRY_CODE_ISRAEL, "EC"},
+	{COUNTRY_CODE_TELEC, "JP"},
+	{COUNTRY_CODE_MIC, "JP"},
+	{COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
+	{COUNTRY_CODE_WORLD_WIDE_13, "EC"},
+	{COUNTRY_CODE_TELEC_NETGEAR, "EC"},
+};
+
+/*
+ *Only these channels all allow active
+ *scan on all world regulatory domains
+ */
+#define RTL819x_2GHZ_CH01_11	\
+	REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
+
+/*
+ *We enable active scan on these a case
+ *by case basis by regulatory domain
+ */
+#define RTL819x_2GHZ_CH12_13	\
+	REG_RULE(2467-10, 2472+10, 40, 0, 20,\
+	NL80211_RRF_PASSIVE_SCAN)
+
+#define RTL819x_2GHZ_CH14	\
+	REG_RULE(2484-10, 2484+10, 40, 0, 20, \
+	NL80211_RRF_PASSIVE_SCAN | \
+	NL80211_RRF_NO_OFDM)
+
+/* 5G chan 36 - chan 64*/
+#define RTL819x_5GHZ_5150_5350	\
+	REG_RULE(5150-10, 5350+10, 40, 0, 30, \
+	NL80211_RRF_PASSIVE_SCAN | \
+	NL80211_RRF_NO_IBSS)
+
+/* 5G chan 100 - chan 165*/
+#define RTL819x_5GHZ_5470_5850	\
+	REG_RULE(5470-10, 5850+10, 40, 0, 30, \
+	NL80211_RRF_PASSIVE_SCAN | \
+	NL80211_RRF_NO_IBSS)
+
+/* 5G chan 149 - chan 165*/
+#define RTL819x_5GHZ_5725_5850	\
+	REG_RULE(5725-10, 5850+10, 40, 0, 30, \
+	NL80211_RRF_PASSIVE_SCAN | \
+	NL80211_RRF_NO_IBSS)
+
+#define RTL819x_5GHZ_ALL	\
+	RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5470_5850
+
+static const struct ieee80211_regdomain rtl_regdom_11 = {
+	.n_reg_rules = 1,
+	.alpha2 = "99",
+	.reg_rules = {
+		      RTL819x_2GHZ_CH01_11,
+		      }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_12_13 = {
+	.n_reg_rules = 2,
+	.alpha2 = "99",
+	.reg_rules = {
+		      RTL819x_2GHZ_CH01_11,
+			  RTL819x_2GHZ_CH12_13,
+		      }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_no_midband = {
+	.n_reg_rules = 3,
+	.alpha2 = "99",
+	.reg_rules = {
+		      RTL819x_2GHZ_CH01_11,
+			  RTL819x_5GHZ_5150_5350,
+			  RTL819x_5GHZ_5725_5850,
+		      }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_60_64 = {
+	.n_reg_rules = 3,
+	.alpha2 = "99",
+	.reg_rules = {
+		      RTL819x_2GHZ_CH01_11,
+			  RTL819x_2GHZ_CH12_13,
+			  RTL819x_5GHZ_5725_5850,
+		      }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_14_60_64 = {
+	.n_reg_rules = 4,
+	.alpha2 = "99",
+	.reg_rules = {
+		      RTL819x_2GHZ_CH01_11,
+			  RTL819x_2GHZ_CH12_13,
+			  RTL819x_2GHZ_CH14,
+			  RTL819x_5GHZ_5725_5850,
+		      }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_14 = {
+	.n_reg_rules = 3,
+	.alpha2 = "99",
+	.reg_rules = {
+		      RTL819x_2GHZ_CH01_11,
+			  RTL819x_2GHZ_CH12_13,
+			  RTL819x_2GHZ_CH14,
+		      }
+};
+
+static bool _rtl_is_radar_freq(u16 center_freq)
+{
+	return (center_freq >= 5260 && center_freq <= 5700);
+}
+
+static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
+					   enum nl80211_reg_initiator initiator)
+{
+	enum ieee80211_band band;
+	struct ieee80211_supported_band *sband;
+	const struct ieee80211_reg_rule *reg_rule;
+	struct ieee80211_channel *ch;
+	unsigned int i;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+	u32 bandwidth = 0;
+	int r;
+#endif
+
+	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+
+		if (!wiphy->bands[band])
+			continue;
+
+		sband = wiphy->bands[band];
+
+		for (i = 0; i < sband->n_channels; i++) {
+			ch = &sband->channels[i];
+			if (_rtl_is_radar_freq(ch->center_freq) ||
+			    (ch->flags & IEEE80211_CHAN_RADAR))
+				continue;
+			if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+				reg_rule = freq_reg_info(wiphy, ch->center_freq);
+				if (IS_ERR(reg_rule))
+					continue;
+#else
+				r = freq_reg_info(wiphy, ch->center_freq,
+						  bandwidth, &reg_rule);
+				if (r)
+					continue;
+#endif
+
+				/*
+				 *If 11d had a rule for this channel ensure
+				 *we enable adhoc/beaconing if it allows us to
+				 *use it. Note that we would have disabled it
+				 *by applying our static world regdomain by
+				 *default during init, prior to calling our
+				 *regulatory_hint().
+				 */
+
+				if (!(reg_rule->flags & NL80211_RRF_NO_IBSS))
+					ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
+				if (!(reg_rule->flags &
+				      NL80211_RRF_PASSIVE_SCAN))
+					ch->flags &=
+					    ~IEEE80211_CHAN_PASSIVE_SCAN;
+			} else {
+				if (ch->beacon_found)
+					ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
+						   IEEE80211_CHAN_PASSIVE_SCAN);
+			}
+		}
+	}
+}
+
+/* Allows active scan scan on Ch 12 and 13 */
+static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
+					     enum nl80211_reg_initiator
+					     initiator)
+{
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_channel *ch;
+	const struct ieee80211_reg_rule *reg_rule;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+	u32 bandwidth = 0;
+	int r;
+#endif
+
+	if (!wiphy->bands[IEEE80211_BAND_2GHZ])
+		return;
+	sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+
+	/*
+	 *If no country IE has been received always enable active scan
+	 *on these channels. This is only done for specific regulatory SKUs
+	 */
+	if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
+		ch = &sband->channels[11];	/* CH 12 */
+		if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+			ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+		ch = &sband->channels[12];	/* CH 13 */
+		if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+			ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+		return;
+	}
+
+	/*
+	 *If a country IE has been recieved check its rule for this
+	 *channel first before enabling active scan. The passive scan
+	 *would have been enforced by the initial processing of our
+	 *custom regulatory domain.
+	 */
+
+	ch = &sband->channels[11];	/* CH 12 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+	reg_rule = freq_reg_info(wiphy, ch->center_freq);
+	if (!IS_ERR(reg_rule)) {
+#else
+	r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
+	if (!r) {
+#endif
+		if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
+			if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+				ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+	}
+
+	ch = &sband->channels[12];	/* CH 13 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+	reg_rule = freq_reg_info(wiphy, ch->center_freq);
+	if (!IS_ERR(reg_rule)) {
+#else
+	r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
+	if (!r) {
+#endif
+		if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
+			if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+				ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+	}
+}
+
+/*
+ *Always apply Radar/DFS rules on
+ *freq range 5260 MHz - 5700 MHz
+ */
+static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
+{
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_channel *ch;
+	unsigned int i;
+
+	if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+		return;
+
+	sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+	for (i = 0; i < sband->n_channels; i++) {
+		ch = &sband->channels[i];
+		if (!_rtl_is_radar_freq(ch->center_freq))
+			continue;
+
+		/*
+		 *We always enable radar detection/DFS on this
+		 *frequency range. Additionally we also apply on
+		 *this frequency range:
+		 *- If STA mode does not yet have DFS supports disable
+		 * active scanning
+		 *- If adhoc mode does not support DFS yet then disable
+		 * adhoc in the frequency.
+		 *- If AP mode does not yet support radar detection/DFS
+		 *do not allow AP mode
+		 */
+		if (!(ch->flags & IEEE80211_CHAN_DISABLED))
+			ch->flags |= IEEE80211_CHAN_RADAR |
+			    IEEE80211_CHAN_NO_IBSS |
+			    IEEE80211_CHAN_PASSIVE_SCAN;
+	}
+}
+
+static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
+				       enum nl80211_reg_initiator initiator,
+				       struct rtl_regulatory *reg)
+{
+	_rtl_reg_apply_beaconing_flags(wiphy, initiator);
+	_rtl_reg_apply_active_scan_flags(wiphy, initiator);
+	return;
+}
+
+static void _rtl_dump_channel_map(struct wiphy *wiphy)
+{
+	enum ieee80211_band band;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_channel *ch;
+	unsigned int i;
+
+	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		if (!wiphy->bands[band])
+			continue;
+		sband = wiphy->bands[band];
+		for (i = 0; i < sband->n_channels; i++)
+			ch = &sband->channels[i];
+	}
+}
+
+static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
+				   struct regulatory_request *request,
+				   struct rtl_regulatory *reg)
+{
+	/* We always apply this */
+	_rtl_reg_apply_radar_flags(wiphy);
+
+	switch (request->initiator) {
+	case NL80211_REGDOM_SET_BY_DRIVER:
+	case NL80211_REGDOM_SET_BY_CORE:
+	case NL80211_REGDOM_SET_BY_USER:
+		break;
+	case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+		_rtl_reg_apply_world_flags(wiphy, request->initiator, reg);
+		break;
+	}
+
+	_rtl_dump_channel_map(wiphy);
+
+	return 0;
+}
+
+static const struct ieee80211_regdomain *_rtl_regdomain_select(
+						struct rtl_regulatory *reg)
+{
+	switch (reg->country_code) {
+	case COUNTRY_CODE_FCC:
+		return &rtl_regdom_no_midband;
+	case COUNTRY_CODE_IC:
+		return &rtl_regdom_11;
+	case COUNTRY_CODE_ETSI:
+	case COUNTRY_CODE_TELEC_NETGEAR:
+		return &rtl_regdom_60_64;
+	case COUNTRY_CODE_SPAIN:
+	case COUNTRY_CODE_FRANCE:
+	case COUNTRY_CODE_ISRAEL:
+	case COUNTRY_CODE_WORLD_WIDE_13:
+		return &rtl_regdom_12_13;
+	case COUNTRY_CODE_MKK:
+	case COUNTRY_CODE_MKK1:
+	case COUNTRY_CODE_TELEC:
+	case COUNTRY_CODE_MIC:
+		return &rtl_regdom_14_60_64;
+	case COUNTRY_CODE_GLOBAL_DOMAIN:
+		return &rtl_regdom_14;
+	default:
+		return &rtl_regdom_no_midband;
+	}
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
+				struct wiphy *wiphy,
+				void (*reg_notifier) (struct wiphy * wiphy,
+						     struct regulatory_request *
+						     request))
+#else
+static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
+				struct wiphy *wiphy,
+				int (*reg_notifier) (struct wiphy * wiphy,
+						     struct regulatory_request *
+						     request))
+#endif
+{
+	const struct ieee80211_regdomain *regd;
+
+	wiphy->reg_notifier = reg_notifier;
+
+	wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+	wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY;
+	wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+	regd = _rtl_regdomain_select(reg);
+	wiphy_apply_custom_regulatory(wiphy, regd);
+	_rtl_reg_apply_radar_flags(wiphy);
+	_rtl_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
+	return 0;
+}
+
+static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+		if (allCountries[i].countrycode == countrycode)
+			return &allCountries[i];
+	}
+	return NULL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+int rtl_regd_init(struct ieee80211_hw *hw,
+		  void (*reg_notifier) (struct wiphy *wiphy,
+				        struct regulatory_request *request))
+#else
+int rtl_regd_init(struct ieee80211_hw *hw,
+		  int (*reg_notifier) (struct wiphy *wiphy,
+				       struct regulatory_request *request))
+#endif
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct wiphy *wiphy = hw->wiphy;
+	struct country_code_to_enum_rd *country = NULL;
+
+	if (wiphy == NULL || &rtlpriv->regd == NULL)
+		return -EINVAL;
+
+	/* init country_code from efuse channel plan */
+	rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan;
+
+	RT_TRACE(COMP_REGD, DBG_TRACE,
+		 (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x\n",
+		  rtlpriv->regd.country_code));
+
+	if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
+		RT_TRACE(COMP_REGD, DBG_DMESG,
+			 (KERN_DEBUG "rtl: EEPROM indicates invalid contry code"
+			  "world wide 13 should be used\n"));
+
+		rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
+	}
+
+	country = _rtl_regd_find_country(rtlpriv->regd.country_code);
+
+	if (country) {
+		rtlpriv->regd.alpha2[0] = country->iso_name[0];
+		rtlpriv->regd.alpha2[1] = country->iso_name[1];
+	} else {
+		rtlpriv->regd.alpha2[0] = '0';
+		rtlpriv->regd.alpha2[1] = '0';
+	}
+
+	RT_TRACE(COMP_REGD, DBG_TRACE,
+		 (KERN_DEBUG "rtl: Country alpha2 being used: %c%c\n",
+		  rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]));
+
+	_rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier);
+
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_REGD, DBG_LOUD, ("\n"));
+
+	_rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
+}
+#else
+int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_REGD, DBG_LOUD, ("\n"));
+
+	return _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
+}
+#endif
diff --git a/drivers/staging/rtl8821ae/regd.h b/drivers/staging/rtl8821ae/regd.h
new file mode 100644
index 0000000..abc60ab
--- /dev/null
+++ b/drivers/staging/rtl8821ae/regd.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_REGD_H__
+#define __RTL_REGD_H__
+
+#define IEEE80211_CHAN_NO_IBSS		1<<2
+#define IEEE80211_CHAN_PASSIVE_SCAN	1<<1
+#define WIPHY_FLAG_CUSTOM_REGULATORY	BIT(0)
+#define WIPHY_FLAG_STRICT_REGULATORY	BIT(1)
+#define WIPHY_FLAG_DISABLE_BEACON_HINTS	BIT(2)
+
+struct country_code_to_enum_rd {
+	u16 countrycode;
+	const char *iso_name;
+};
+
+enum country_code_type_t {
+	COUNTRY_CODE_FCC = 0,
+	COUNTRY_CODE_IC = 1,
+	COUNTRY_CODE_ETSI = 2,
+	COUNTRY_CODE_SPAIN = 3,
+	COUNTRY_CODE_FRANCE = 4,
+	COUNTRY_CODE_MKK = 5,
+	COUNTRY_CODE_MKK1 = 6,
+	COUNTRY_CODE_ISRAEL = 7,
+	COUNTRY_CODE_TELEC = 8,
+	COUNTRY_CODE_MIC = 9,
+	COUNTRY_CODE_GLOBAL_DOMAIN = 10,
+	COUNTRY_CODE_WORLD_WIDE_13 = 11,
+	COUNTRY_CODE_TELEC_NETGEAR = 12,
+
+	/*add new channel plan above this line */
+	COUNTRY_CODE_MAX
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+int rtl_regd_init(struct ieee80211_hw *hw,
+		  void (*reg_notifier) (struct wiphy *wiphy,
+				        struct regulatory_request *request));
+void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#else
+int rtl_regd_init(struct ieee80211_hw *hw,
+		  int (*reg_notifier) (struct wiphy *wiphy,
+				       struct regulatory_request *request));
+int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#endif
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/btc.h b/drivers/staging/rtl8821ae/rtl8821ae/btc.h
new file mode 100644
index 0000000..74ac189
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/btc.h
@@ -0,0 +1,87 @@
+
+/******************************************************************************
+ **
+ ** Copyright(c) 2009-2010  Realtek Corporation.
+ **
+ ** This program is free software; you can redistribute it and/or modify it
+ ** under the terms of version 2 of the GNU General Public License as
+ ** published by the Free Software Foundation.
+ **
+ ** This program is distributed in the hope that it will be useful, but WITHOUT
+ ** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ ** FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ ** more details.
+ **
+ ** You should have received a copy of the GNU General Public License along with
+ ** this program; if not, write to the Free Software Foundation, Inc.,
+ ** 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ **
+ ** The full GNU General Public License is included in this distribution in the
+ ** file called LICENSE.
+ **
+ ** Contact Information:
+ ** wlanfae <wlanfae@realtek.com>
+ ** Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ ** Hsinchu 300, Taiwan.
+ ** Larry Finger <Larry.Finger@lwfinger.net>
+ **
+ ******************************************************************************/
+
+#ifndef __RTL8821AE_BTC_H__
+#define __RTL8821AE_BTC_H__
+
+#include "../wifi.h"
+#include "hal_bt_coexist.h"
+
+struct bt_coexist_c2h_info {
+	u8 no_parse_c2h;
+	u8 has_c2h;
+};
+
+struct btdm_8821ae {
+	bool b_all_off;
+	bool b_agc_table_en;
+	bool b_adc_back_off_on;
+	bool b2_ant_hid_en;
+	bool b_low_penalty_rate_adaptive;
+	bool b_rf_rx_lpf_shrink;
+	bool b_reject_aggre_pkt;
+	bool b_tra_tdma_on;
+	u8 tra_tdma_nav;
+	u8 tra_tdma_ant;
+	bool b_tdma_on;
+	u8 tdma_ant;
+	u8 tdma_nav;
+	u8 tdma_dac_swing;
+	u8 fw_dac_swing_lvl;
+	bool b_ps_tdma_on;
+	u8 ps_tdma_byte[5];
+	bool b_pta_on;
+	u32 val_0x6c0;
+	u32 val_0x6c8;
+	u32 val_0x6cc;
+	bool b_sw_dac_swing_on;
+	u32 sw_dac_swing_lvl;
+	u32 wlan_act_hi;
+	u32 wlan_act_lo;
+	u32 bt_retry_index;
+	bool b_dec_bt_pwr;
+	bool b_ignore_wlan_act;
+};
+
+struct bt_coexist_8821ae {
+	u32 high_priority_tx;
+	u32 high_priority_rx;
+	u32 low_priority_tx;
+	u32 low_priority_rx;
+	u8 c2h_bt_info;
+	bool b_c2h_bt_info_req_sent;
+	bool b_c2h_bt_inquiry_page;
+	u32 bt_inq_page_start_time;
+	u8 bt_retry_cnt;
+	u8 c2h_bt_info_original;
+	u8 bt_inquiry_page_cnt;
+	struct btdm_8821ae btdm;
+};
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/def.h b/drivers/staging/rtl8821ae/rtl8821ae/def.h
new file mode 100644
index 0000000..72ebdea
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/def.h
@@ -0,0 +1,442 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_DEF_H__
+#define __RTL8821AE_DEF_H__
+
+/*--------------------------Define -------------------------------------------*/
+/* BIT 7 HT Rate*/
+/*TxHT = 0*/
+#define	MGN_1M				0x02
+#define	MGN_2M				0x04
+#define	MGN_5_5M			0x0b
+#define	MGN_11M				0x16
+
+#define	MGN_6M				0x0c
+#define	MGN_9M				0x12
+#define	MGN_12M				0x18
+#define	MGN_18M				0x24
+#define	MGN_24M				0x30
+#define	MGN_36M				0x48
+#define	MGN_48M				0x60
+#define	MGN_54M				0x6c
+
+// TxHT = 1
+#define	MGN_MCS0			0x80
+#define	MGN_MCS1			0x81
+#define	MGN_MCS2			0x82
+#define	MGN_MCS3			0x83
+#define	MGN_MCS4			0x84
+#define	MGN_MCS5			0x85
+#define	MGN_MCS6			0x86
+#define	MGN_MCS7			0x87
+#define	MGN_MCS8			0x88
+#define	MGN_MCS9			0x89
+#define	MGN_MCS10			0x8a
+#define	MGN_MCS11			0x8b
+#define	MGN_MCS12			0x8c
+#define	MGN_MCS13			0x8d
+#define	MGN_MCS14			0x8e
+#define	MGN_MCS15			0x8f
+//VHT rate
+#define	MGN_VHT1SS_MCS0		0x90
+#define	MGN_VHT1SS_MCS1		0x91
+#define	MGN_VHT1SS_MCS2		0x92
+#define	MGN_VHT1SS_MCS3		0x93
+#define	MGN_VHT1SS_MCS4		0x94
+#define	MGN_VHT1SS_MCS5		0x95
+#define	MGN_VHT1SS_MCS6		0x96
+#define	MGN_VHT1SS_MCS7		0x97
+#define	MGN_VHT1SS_MCS8		0x98
+#define	MGN_VHT1SS_MCS9		0x99
+#define	MGN_VHT2SS_MCS0		0x9a
+#define	MGN_VHT2SS_MCS1		0x9b
+#define	MGN_VHT2SS_MCS2		0x9c
+#define	MGN_VHT2SS_MCS3		0x9d
+#define	MGN_VHT2SS_MCS4		0x9e
+#define	MGN_VHT2SS_MCS5		0x9f
+#define	MGN_VHT2SS_MCS6		0xa0
+#define	MGN_VHT2SS_MCS7		0xa1
+#define	MGN_VHT2SS_MCS8		0xa2
+#define	MGN_VHT2SS_MCS9		0xa3
+
+#define	MGN_VHT3SS_MCS0		0xa4
+#define	MGN_VHT3SS_MCS1		0xa5
+#define	MGN_VHT3SS_MCS2		0xa6
+#define	MGN_VHT3SS_MCS3		0xa7
+#define	MGN_VHT3SS_MCS4		0xa8
+#define	MGN_VHT3SS_MCS5		0xa9
+#define	MGN_VHT3SS_MCS6		0xaa
+#define	MGN_VHT3SS_MCS7		0xab
+#define	MGN_VHT3SS_MCS8		0xac
+#define	MGN_VHT3SS_MCS9		0xad
+
+#define	MGN_MCS0_SG			0xc0
+#define	MGN_MCS1_SG			0xc1
+#define	MGN_MCS2_SG			0xc2
+#define	MGN_MCS3_SG			0xc3
+#define	MGN_MCS4_SG			0xc4
+#define	MGN_MCS5_SG			0xc5
+#define	MGN_MCS6_SG			0xc6
+#define	MGN_MCS7_SG			0xc7
+#define	MGN_MCS8_SG			0xc8
+#define	MGN_MCS9_SG			0xc9
+#define	MGN_MCS10_SG		0xca
+#define	MGN_MCS11_SG		0xcb
+#define	MGN_MCS12_SG		0xcc
+#define	MGN_MCS13_SG		0xcd
+#define	MGN_MCS14_SG		0xce
+#define	MGN_MCS15_SG		0xcf
+
+#define	MGN_UNKNOWN			0xff
+
+
+/* 30 ms */
+#define	WIFI_NAV_UPPER_US				30000
+#define HAL_92C_NAV_UPPER_UNIT			128
+
+#define HAL_RETRY_LIMIT_INFRA				48
+#define HAL_RETRY_LIMIT_AP_ADHOC			7
+
+#define RESET_DELAY_8185					20
+
+#define RT_IBSS_INT_MASKS	(IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
+#define RT_AC_INT_MASKS		(IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
+
+#define NUM_OF_FIRMWARE_QUEUE				10
+#define NUM_OF_PAGES_IN_FW					0x100
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK			0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE			0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI			0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO			0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA		0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_CMD			0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT		0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH		0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_BCN			0x2
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB			0xA1
+
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM		0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM		0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM		0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM		0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM		0x00
+
+#define MAX_RX_DMA_BUFFER_SIZE				0x3E80
+
+
+#define MAX_LINES_HWCONFIG_TXT				1000
+#define MAX_BYTES_LINE_HWCONFIG_TXT			256
+
+#define SW_THREE_WIRE						0
+#define HW_THREE_WIRE						2
+
+#define BT_DEMO_BOARD						0
+#define BT_QA_BOARD							1
+#define BT_FPGA								2
+
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE		0
+#define HAL_PRIME_CHNL_OFFSET_LOWER			1
+#define HAL_PRIME_CHNL_OFFSET_UPPER			2
+
+#define MAX_H2C_QUEUE_NUM					10
+
+#define RX_MPDU_QUEUE						0
+#define RX_CMD_QUEUE						1
+#define RX_MAX_QUEUE						2
+#define AC2QUEUEID(_AC)						(_AC)
+
+#define	C2H_RX_CMD_HDR_LEN					8
+#define	GET_C2H_CMD_CMD_LEN(__prxhdr)		\
+	LE_BITS_TO_4BYTE((__prxhdr), 0, 16)
+#define	GET_C2H_CMD_ELEMENT_ID(__prxhdr)	\
+	LE_BITS_TO_4BYTE((__prxhdr), 16, 8)
+#define	GET_C2H_CMD_CMD_SEQ(__prxhdr)		\
+	LE_BITS_TO_4BYTE((__prxhdr), 24, 7)
+#define	GET_C2H_CMD_CONTINUE(__prxhdr)		\
+	LE_BITS_TO_4BYTE((__prxhdr), 31, 1)
+#define	GET_C2H_CMD_CONTENT(__prxhdr)		\
+	((u8*)(__prxhdr) + C2H_RX_CMD_HDR_LEN)
+
+#define	GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr)	\
+	LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8)
+#define	GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr)		\
+	LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8)
+#define	GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr)	\
+	LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16)
+#define	GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr)	\
+	LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5)
+#define	GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr)		\
+	LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1)
+#define	GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr)	\
+	LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5)
+#define	GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr)		\
+	LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1)
+#define	GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr)		\
+	LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
+#define	GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr)		\
+	LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
+
+#define CHIP_BONDING_IDENTIFIER(_value)	(((_value)>>22)&0x3)
+
+#define CHIP_8812				BIT(2)
+#define CHIP_8821				(BIT(0)|BIT(2))
+
+#define CHIP_8821A						(BIT(0)|BIT(2))
+#define NORMAL_CHIP  					BIT(3)
+#define RF_TYPE_1T1R					(~(BIT(4)|BIT(5)|BIT(6)))
+#define RF_TYPE_1T2R					BIT(4)
+#define RF_TYPE_2T2R					BIT(5)
+#define CHIP_VENDOR_UMC					BIT(7)
+#define B_CUT_VERSION					BIT(12)
+#define C_CUT_VERSION					BIT(13)
+#define D_CUT_VERSION					((BIT(12)|BIT(13)))
+#define E_CUT_VERSION					BIT(14)
+#define	RF_RL_ID						(BIT(31)|BIT(30)|BIT(29)|BIT(28))
+
+
+
+enum version_8821ae {
+	VERSION_TEST_CHIP_1T1R_8812 = 0x0004,
+	VERSION_TEST_CHIP_2T2R_8812 = 0x0024,
+	VERSION_NORMAL_TSMC_CHIP_1T1R_8812 = 0x100c,
+	VERSION_NORMAL_TSMC_CHIP_2T2R_8812 = 0x102c,
+	VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT = 0x200c,
+	VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT = 0x202c,
+	VERSION_TEST_CHIP_8821 = 0x0005,
+	VERSION_NORMAL_TSMC_CHIP_8821 = 0x000d,
+	VERSION_NORMAL_TSMC_CHIP_8821_B_CUT = 0x100d,
+	VERSION_UNKNOWN = 0xFF,
+};
+
+enum vht_data_sc{
+	VHT_DATA_SC_DONOT_CARE = 0,
+	VHT_DATA_SC_20_UPPER_OF_80MHZ = 1,
+	VHT_DATA_SC_20_LOWER_OF_80MHZ = 2,
+	VHT_DATA_SC_20_UPPERST_OF_80MHZ = 3,
+	VHT_DATA_SC_20_LOWEST_OF_80MHZ = 4,
+	VHT_DATA_SC_20_RECV1 = 5,
+	VHT_DATA_SC_20_RECV2 = 6,
+	VHT_DATA_SC_20_RECV3 = 7,
+	VHT_DATA_SC_20_RECV4 = 8,
+	VHT_DATA_SC_40_UPPER_OF_80MHZ = 9,
+	VHT_DATA_SC_40_LOWER_OF_80MHZ = 10,
+};
+
+
+/* MASK */
+#define IC_TYPE_MASK					(BIT(0)|BIT(1)|BIT(2))
+#define CHIP_TYPE_MASK 					BIT(3)
+#define RF_TYPE_MASK					(BIT(4)|BIT(5)|BIT(6))
+#define MANUFACTUER_MASK				BIT(7)
+#define ROM_VERSION_MASK				(BIT(11)|BIT(10)|BIT(9)|BIT(8))
+#define CUT_VERSION_MASK				(BIT(15)|BIT(14)|BIT(13)|BIT(12))
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version)			((version) & IC_TYPE_MASK)
+#define GET_CVID_CHIP_TYPE(version)			((version) & CHIP_TYPE_MASK)
+#define GET_CVID_RF_TYPE(version)			((version) & RF_TYPE_MASK)
+#define GET_CVID_MANUFACTUER(version)		((version) & MANUFACTUER_MASK)
+#define GET_CVID_ROM_VERSION(version)		((version) & ROM_VERSION_MASK)
+#define GET_CVID_CUT_VERSION(version)		((version) & CUT_VERSION_MASK)
+
+#define IS_1T1R(version)			((GET_CVID_RF_TYPE(version))? false : true)
+#define IS_1T2R(version)			((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\
+									? true : false)
+#define IS_2T2R(version)			((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\
+									? true : false)
+
+#define IS_8812_SERIES(version)			((GET_CVID_IC_TYPE(version) == CHIP_8812)? \
+										true : false)
+#define IS_8821_SERIES(version)			((GET_CVID_IC_TYPE(version) == CHIP_8821)? \
+										true : false)
+
+#define IS_VENDOR_8812A_TEST_CHIP(version)	((IS_8812_SERIES(version)) ? \
+												((IS_NORMAL_CHIP(version)) ? \
+												false : true) : false)
+#define IS_VENDOR_8812A_MP_CHIP(version)		((IS_8812_SERIES(version)) ? \
+												((IS_NORMAL_CHIP(version)) ? \
+												true : false) : false)
+#define IS_VENDOR_8812A_C_CUT(version)		((IS_8812_SERIES(version)) ? \
+											((GET_CVID_CUT_VERSION(version) == C_CUT_VERSION) ? \
+											true : false) : false)
+
+#define IS_VENDOR_8821A_TEST_CHIP(version)	((IS_8821_SERIES(version)) ? \
+												((IS_NORMAL_CHIP(version)) ? \
+												false : true) : false)
+#define IS_VENDOR_8821A_MP_CHIP(version)		((IS_8821_SERIES(version)) ? \
+												((IS_NORMAL_CHIP(version)) ? \
+												true : false) : false)
+#define IS_VENDOR_8821A_B_CUT(version)		((IS_8821_SERIES(version)) ? \
+												((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? \
+												true : false) : false)
+
+
+enum rf_optype {
+	RF_OP_BY_SW_3WIRE = 0,
+	RF_OP_BY_FW,
+	RF_OP_MAX
+};
+
+enum rf_power_state {
+	RF_ON,
+	RF_OFF,
+	RF_SLEEP,
+	RF_SHUT_DOWN,
+};
+
+enum power_save_mode {
+	POWER_SAVE_MODE_ACTIVE,
+	POWER_SAVE_MODE_SAVE,
+};
+
+enum power_polocy_config {
+	POWERCFG_MAX_POWER_SAVINGS,
+	POWERCFG_GLOBAL_POWER_SAVINGS,
+	POWERCFG_LOCAL_POWER_SAVINGS,
+	POWERCFG_LENOVO,
+};
+
+enum interface_select_pci {
+	INTF_SEL1_MINICARD = 0,
+	INTF_SEL0_PCIE = 1,
+	INTF_SEL2_RSV = 2,
+	INTF_SEL3_RSV = 3,
+};
+
+enum hal_fw_c2h_cmd_id {
+	HAL_FW_C2H_CMD_Read_MACREG = 0,
+	HAL_FW_C2H_CMD_Read_BBREG = 1,
+	HAL_FW_C2H_CMD_Read_RFREG = 2,
+	HAL_FW_C2H_CMD_Read_EEPROM = 3,
+	HAL_FW_C2H_CMD_Read_EFUSE = 4,
+	HAL_FW_C2H_CMD_Read_CAM = 5,
+	HAL_FW_C2H_CMD_Get_BasicRate = 6,
+	HAL_FW_C2H_CMD_Get_DataRate = 7,
+	HAL_FW_C2H_CMD_Survey = 8,
+	HAL_FW_C2H_CMD_SurveyDone = 9,
+	HAL_FW_C2H_CMD_JoinBss = 10,
+	HAL_FW_C2H_CMD_AddSTA = 11,
+	HAL_FW_C2H_CMD_DelSTA = 12,
+	HAL_FW_C2H_CMD_AtimDone = 13,
+	HAL_FW_C2H_CMD_TX_Report = 14,
+	HAL_FW_C2H_CMD_CCX_Report = 15,
+	HAL_FW_C2H_CMD_DTM_Report = 16,
+	HAL_FW_C2H_CMD_TX_Rate_Statistics = 17,
+	HAL_FW_C2H_CMD_C2HLBK = 18,
+	HAL_FW_C2H_CMD_C2HDBG = 19,
+	HAL_FW_C2H_CMD_C2HFEEDBACK = 20,
+	HAL_FW_C2H_CMD_MAX
+};
+
+enum rtl_desc_qsel {
+	QSLT_BK = 0x2,
+	QSLT_BE = 0x0,
+	QSLT_VI = 0x5,
+	QSLT_VO = 0x7,
+	QSLT_BEACON = 0x10,
+	QSLT_HIGH = 0x11,
+	QSLT_MGNT = 0x12,
+	QSLT_CMD = 0x13,
+};
+
+enum rtl_desc8821ae_rate {
+	DESC_RATE1M = 0x00,
+	DESC_RATE2M = 0x01,
+	DESC_RATE5_5M = 0x02,
+	DESC_RATE11M = 0x03,
+
+	DESC_RATE6M = 0x04,
+	DESC_RATE9M = 0x05,
+	DESC_RATE12M = 0x06,
+	DESC_RATE18M = 0x07,
+	DESC_RATE24M = 0x08,
+	DESC_RATE36M = 0x09,
+	DESC_RATE48M = 0x0a,
+	DESC_RATE54M = 0x0b,
+
+	DESC_RATEMCS0 = 0x0c,
+	DESC_RATEMCS1 = 0x0d,
+	DESC_RATEMCS2 = 0x0e,
+	DESC_RATEMCS3 = 0x0f,
+	DESC_RATEMCS4 = 0x10,
+	DESC_RATEMCS5 = 0x11,
+	DESC_RATEMCS6 = 0x12,
+	DESC_RATEMCS7 = 0x13,
+	DESC_RATEMCS8 = 0x14,
+	DESC_RATEMCS9 = 0x15,
+	DESC_RATEMCS10 = 0x16,
+	DESC_RATEMCS11 = 0x17,
+	DESC_RATEMCS12 = 0x18,
+	DESC_RATEMCS13 = 0x19,
+	DESC_RATEMCS14 = 0x1a,
+	DESC_RATEMCS15 = 0x1b,
+	DESC_RATEVHT1SS_MCS0 = 0x1c,
+	DESC_RATEVHT1SS_MCS1 = 0x1d,
+	DESC_RATEVHT1SS_MCS2 = 0x1e,
+	DESC_RATEVHT1SS_MCS3 = 0x1f,
+	DESC_RATEVHT1SS_MCS4 = 0x20,
+	DESC_RATEVHT1SS_MCS5 = 0x21,
+	DESC_RATEVHT1SS_MCS6 = 0x22,
+	DESC_RATEVHT1SS_MCS7 = 0x23,
+	DESC_RATEVHT1SS_MCS8 = 0x24,
+	DESC_RATEVHT1SS_MCS9 = 0x25,
+	DESC_RATEVHT2SS_MCS0 = 0x26,
+	DESC_RATEVHT2SS_MCS1 = 0x27,
+	DESC_RATEVHT2SS_MCS2 = 0x28,
+	DESC_RATEVHT2SS_MCS3 = 0x29,
+	DESC_RATEVHT2SS_MCS4 = 0x2a,
+	DESC_RATEVHT2SS_MCS5 = 0x2b,
+	DESC_RATEVHT2SS_MCS6 = 0x2c,
+	DESC_RATEVHT2SS_MCS7 = 0x2d,
+	DESC_RATEVHT2SS_MCS8 = 0x2e,
+	DESC_RATEVHT2SS_MCS9 = 0x2f,
+};
+
+enum rx_packet_type{
+	NORMAL_RX,
+	TX_REPORT1,
+	TX_REPORT2,
+	HIS_REPORT,
+	C2H_PACKET,
+};
+
+struct phy_sts_cck_8821ae_t {
+	u8 adc_pwdb_X[4];
+	u8 sq_rpt;
+	u8 cck_agc_rpt;
+};
+
+struct h2c_cmd_8821ae {
+	u8 element_id;
+	u32 cmd_len;
+	u8 *p_cmdbuffer;
+};
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/dm.c b/drivers/staging/rtl8821ae/rtl8821ae/dm.c
new file mode 100644
index 0000000..8634206
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/dm.c
@@ -0,0 +1,3045 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+#include "trx.h"
+#include "../btcoexist/rtl_btc.h"
+
+struct dig_t dm_digtable;
+static struct ps_t dm_pstable;
+
+static const u32 rtl8812ae_txscaling_table[TXSCALE_TABLE_SIZE] =
+{
+	0x081, // 0,  -12.0dB
+	0x088, // 1,  -11.5dB
+	0x090, // 2,  -11.0dB
+	0x099, // 3,  -10.5dB
+	0x0A2, // 4,  -10.0dB
+	0x0AC, // 5,  -9.5dB
+	0x0B6, // 6,  -9.0dB
+	0x0C0, // 7,  -8.5dB
+	0x0CC, // 8,  -8.0dB
+	0x0D8, // 9,  -7.5dB
+	0x0E5, // 10, -7.0dB
+	0x0F2, // 11, -6.5dB
+	0x101, // 12, -6.0dB
+	0x110, // 13, -5.5dB
+	0x120, // 14, -5.0dB
+	0x131, // 15, -4.5dB
+	0x143, // 16, -4.0dB
+	0x156, // 17, -3.5dB
+	0x16A, // 18, -3.0dB
+	0x180, // 19, -2.5dB
+	0x197, // 20, -2.0dB
+	0x1AF, // 21, -1.5dB
+	0x1C8, // 22, -1.0dB
+	0x1E3, // 23, -0.5dB
+	0x200, // 24, +0  dB
+	0x21E, // 25, +0.5dB
+	0x23E, // 26, +1.0dB
+	0x261, // 27, +1.5dB
+	0x285, // 28, +2.0dB
+	0x2AB, // 29, +2.5dB
+	0x2D3, // 30, +3.0dB
+	0x2FE, // 31, +3.5dB
+	0x32B, // 32, +4.0dB
+	0x35C, // 33, +4.5dB
+	0x38E, // 34, +5.0dB
+	0x3C4, // 35, +5.5dB
+	0x3FE  // 36, +6.0dB
+};
+
+static const u32 rtl8821ae_txscaling_table[TXSCALE_TABLE_SIZE] = {
+	0x081, // 0,  -12.0dB
+	0x088, // 1,  -11.5dB
+	0x090, // 2,  -11.0dB
+	0x099, // 3,  -10.5dB
+	0x0A2, // 4,  -10.0dB
+	0x0AC, // 5,  -9.5dB
+	0x0B6, // 6,  -9.0dB
+	0x0C0, // 7,  -8.5dB
+	0x0CC, // 8,  -8.0dB
+	0x0D8, // 9,  -7.5dB
+	0x0E5, // 10, -7.0dB
+	0x0F2, // 11, -6.5dB
+	0x101, // 12, -6.0dB
+	0x110, // 13, -5.5dB
+	0x120, // 14, -5.0dB
+	0x131, // 15, -4.5dB
+	0x143, // 16, -4.0dB
+	0x156, // 17, -3.5dB
+	0x16A, // 18, -3.0dB
+	0x180, // 19, -2.5dB
+	0x197, // 20, -2.0dB
+	0x1AF, // 21, -1.5dB
+	0x1C8, // 22, -1.0dB
+	0x1E3, // 23, -0.5dB
+	0x200, // 24, +0  dB
+	0x21E, // 25, +0.5dB
+	0x23E, // 26, +1.0dB
+	0x261, // 27, +1.5dB
+	0x285, // 28, +2.0dB
+	0x2AB, // 29, +2.5dB
+	0x2D3, // 30, +3.0dB
+	0x2FE, // 31, +3.5dB
+	0x32B, // 32, +4.0dB
+	0x35C, // 33, +4.5dB
+	0x38E, // 34, +5.0dB
+	0x3C4, // 35, +5.5dB
+	0x3FE  // 36, +6.0dB
+};
+
+static const u32 ofdmswing_table[] = {
+	0x0b40002d, // 0,  -15.0dB
+	0x0c000030, // 1,  -14.5dB
+	0x0cc00033, // 2,  -14.0dB
+	0x0d800036, // 3,  -13.5dB
+	0x0e400039, // 4,  -13.0dB
+	0x0f00003c, // 5,  -12.5dB
+	0x10000040, // 6,  -12.0dB
+	0x11000044, // 7,  -11.5dB
+	0x12000048, // 8,  -11.0dB
+	0x1300004c, // 9,  -10.5dB
+	0x14400051, // 10, -10.0dB
+	0x15800056, // 11, -9.5dB
+	0x16c0005b, // 12, -9.0dB
+	0x18000060, // 13, -8.5dB
+	0x19800066, // 14, -8.0dB
+	0x1b00006c, // 15, -7.5dB
+	0x1c800072, // 16, -7.0dB
+	0x1e400079, // 17, -6.5dB
+	0x20000080, // 18, -6.0dB
+	0x22000088, // 19, -5.5dB
+	0x24000090, // 20, -5.0dB
+	0x26000098, // 21, -4.5dB
+	0x288000a2, // 22, -4.0dB
+	0x2ac000ab, // 23, -3.5dB
+	0x2d4000b5, // 24, -3.0dB
+	0x300000c0, // 25, -2.5dB
+	0x32c000cb, // 26, -2.0dB
+	0x35c000d7, // 27, -1.5dB
+	0x390000e4, // 28, -1.0dB
+	0x3c8000f2, // 29, -0.5dB
+	0x40000100, // 30, +0dB
+	0x43c0010f, // 31, +0.5dB
+	0x47c0011f, // 32, +1.0dB
+	0x4c000130, // 33, +1.5dB
+	0x50800142, // 34, +2.0dB
+	0x55400155, // 35, +2.5dB
+	0x5a400169, // 36, +3.0dB
+	0x5fc0017f, // 37, +3.5dB
+	0x65400195, // 38, +4.0dB
+	0x6b8001ae, // 39, +4.5dB
+	0x71c001c7, // 40, +5.0dB
+	0x788001e2, // 41, +5.5dB
+	0x7f8001fe  // 42, +6.0dB
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+	{0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01},	//  0, -16.0dB
+	{0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},	//  1, -15.5dB
+	{0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},	//  2, -15.0dB
+	{0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},	//  3, -14.5dB
+	{0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},	//  4, -14.0dB
+	{0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},	//  5, -13.5dB
+	{0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},	//  6, -13.0dB
+	{0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},	//  7, -12.5dB
+	{0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},	//  8, -12.0dB
+	{0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},	//  9, -11.5dB
+	{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},	// 10, -11.0dB
+	{0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},	// 11, -10.5dB
+	{0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},	// 12, -10.0dB
+	{0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},	// 13, -9.5dB
+	{0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},	// 14, -9.0dB
+	{0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},	// 15, -8.5dB
+	{0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},	// 16, -8.0dB
+	{0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},	// 17, -7.5dB
+	{0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},	// 18, -7.0dB
+	{0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},	// 19, -6.5dB
+    {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},	// 20, -6.0dB
+	{0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},	// 21, -5.5dB
+	{0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},	// 22, -5.0dB
+	{0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},	// 23, -4.5dB
+	{0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},	// 24, -4.0dB
+	{0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},	// 25, -3.5dB
+	{0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},	// 26, -3.0dB
+	{0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},	// 27, -2.5dB
+	{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},	// 28, -2.0dB
+	{0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},	// 29, -1.5dB
+	{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},	// 30, -1.0dB
+	{0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},	// 31, -0.5dB
+	{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} 	// 32, +0dB
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8]= {
+	{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00},	//  0, -16.0dB
+	{0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},	//  1, -15.5dB
+	{0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},	//  2, -15.0dB
+	{0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},	//  3, -14.5dB
+	{0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},	//  4, -14.0dB
+	{0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},	//  5, -13.5dB
+	{0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},	//  6, -13.0dB
+	{0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},	//  7, -12.5dB
+	{0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},	//  8, -12.0dB
+	{0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},	//  9, -11.5dB
+	{0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},	// 10, -11.0dB
+	{0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},	// 11, -10.5dB
+	{0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},	// 12, -10.0dB
+	{0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},	// 13, -9.5dB
+	{0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},	// 14, -9.0dB
+	{0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},	// 15, -8.5dB
+	{0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},	// 16, -8.0dB
+	{0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},	// 17, -7.5dB
+	{0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},	// 18, -7.0dB
+	{0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},	// 19, -6.5dB
+	{0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},	// 20, -6.0dB
+	{0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},	// 21, -5.5dB
+	{0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},	// 22, -5.0dB
+	{0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},	// 23, -4.5dB
+	{0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},	// 24, -4.0dB
+	{0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},	// 25, -3.5dB
+	{0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},	// 26, -3.0dB
+	{0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},	// 27, -2.5dB
+	{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},	// 28, -2.0dB
+	{0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},	// 29, -1.5dB
+	{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},	// 30, -1.0dB
+	{0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},	// 31, -0.5dB
+	{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} 	// 32, +0dB
+};
+
+static const u32 edca_setting_dl[PEER_MAX] = {
+	0xa44f, 	/* 0 UNKNOWN */
+ 	0x5ea44f,	/* 1 REALTEK_90 */
+	0x5e4322,	/* 2 REALTEK_92SE */
+	0x5ea42b,		/* 3 BROAD	*/
+	0xa44f,		/* 4 RAL */
+	0xa630,		/* 5 ATH */
+	0x5ea630,		/* 6 CISCO */
+	0x5ea42b,		/* 7 MARVELL */
+};
+
+static const u32 edca_setting_ul[PEER_MAX] = {
+	0x5e4322, 	/* 0 UNKNOWN */
+	0xa44f,		/* 1 REALTEK_90 */
+	0x5ea44f,	/* 2 REALTEK_92SE */
+	0x5ea32b, 	/* 3 BROAD */
+	0x5ea422,	/* 4 RAL */
+	0x5ea322, 	/* 5 ATH */
+	0x3ea430,	/* 6 CISCO */
+	0x5ea44f,	/* 7 MARV */
+};
+
+static u8 rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack[] =
+	{0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4,  4,  4,  4,  4,  4,  5,  5,  7,  7,  8,  8,  8,  9,  9,  9,  9,  9};
+static u8 rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack[] =
+	{0, 0, 0, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5,  6,  6,  7,  7,  7,  7,  8,  8,  9,  9, 10, 10, 10, 11, 11, 11, 11};
+
+
+u8 rtl8812ae_delta_swing_table_idx_24gb_n_txpwrtrack[]    =
+	{0, 1, 1, 1, 2, 2, 2, 3, 3,  3,  4,  4,  5,  5,  5,  6,  6,  6,  7,  8,  9,  9,  9,  9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24gb_p_txpwrtrack[]    =
+	{0, 0, 1, 1, 2, 2, 2, 2, 3,  3,  3,  4,  4,  5,  5,  6,  6,  6,  7,  7,  7,  8,  8,  8,  9,  9,  9,  9,  9,  9};
+u8 rtl8812ae_delta_swing_table_idx_24ga_n_txpwrtrack[]    =
+	{0, 1, 1, 1, 2, 2, 2, 3, 3,  3,  4,  4,  5,  5,  5,  6,  6,  6,  7,  8,  8,  9,  9,  9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24ga_p_txpwrtrack[]    =
+	{0, 0, 1, 1, 2, 2, 2, 2, 3,  3,  3,  4,  4,  5,  5,  6,  6,  6,  7,  7,  7,  8,  8,  8,  9,  9,  9,  9,  9,  9};
+u8 rtl8812ae_delta_swing_table_idx_24gcckb_n_txpwrtrack[] =
+	{0, 1, 1, 1, 2, 2, 2, 3, 3,  3,  4,  4,  5,  5,  5,  6,  6,  6,  7,  8,  9,  9,  9,  9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24gcckb_p_txpwrtrack[] =
+	{0, 0, 1, 1, 2, 2, 2, 2, 3,  3,  3,  4,  4,  5,  5,  6,  6,  6,  7,  7,  7,  8,  8,  8,  9,  9,  9,  9,  9,  9};
+u8 rtl8812ae_delta_swing_table_idx_24gccka_n_txpwrtrack[] =
+	{0, 1, 1, 1, 2, 2, 2, 3, 3,  3,  4,  4,  5,  5,  5,  6,  6,  6,  7,  8,  8,  9,  9,  9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24gccka_p_txpwrtrack[] =
+	{0, 0, 1, 1, 2, 2, 2, 2, 3,  3,  3,  4,  4,  5,  5,  6,  6,  6,  7,  7,  7,  8,  8,  8,  9,  9,  9,  9,  9,  9};
+
+u8 rtl8812ae_delta_swing_table_idx_5gb_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+	{0, 1, 1, 2, 2, 2, 3, 3, 4,  4,  5,  5,  6,  6,  6,  7,  7,  7,  8,  8,  9,  9,  9, 10, 10, 11, 11, 12, 12, 13},
+	{0, 1, 1, 2, 2, 2, 3, 3, 4,  4,  4,  5,  5,  6,  6,  7,  7,  8,  8,  9,  9, 10, 10, 11, 11, 12, 12, 12, 13, 13},
+	{0, 1, 1, 2, 3, 3, 4, 4, 5,  6,  6,  7,  8,  9, 10, 11, 12, 12, 13, 14, 14, 14, 15, 16, 17, 17, 17, 18, 18, 18},
+};
+u8 rtl8812ae_delta_swing_table_idx_5gb_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+	{0, 1, 1, 2, 2, 3, 3, 4, 4,  5,  5,  6,  6,  6,  7,  7,  8,  8,  9,  9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11},
+	{0, 1, 1, 2, 2, 3, 3, 4, 4,  4,  5,  5,  6,  6,  7,  7,  8,  8,  9,  9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11},
+	{0, 1, 1, 2, 2, 3, 3, 4, 4,  5,  5,  6,  7,  7,  8,  8,  9,  9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
+};
+u8 rtl8812ae_delta_swing_table_idx_5ga_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+	{0, 1, 1, 2, 2, 3, 3, 4, 4,  4,  5,  5,  6,  6,  7,  7,  8,  8,  9,  9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13},
+	{0, 1, 1, 2, 2, 2, 3, 3, 4,  4,  5,  5,  6,  6,  7,  8,  9,  9, 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13},
+	{0, 1, 1, 2, 2, 3, 3, 4, 5,  6,  7,  8,  8,  9, 10, 11, 12, 13, 14, 14, 15, 15, 15, 16, 16, 16, 17, 17, 18, 18},
+};
+u8 rtl8812ae_delta_swing_table_idx_5ga_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+	{0, 1, 1, 2, 2, 3, 3, 4, 4,  4,  4,  5,  5,  6,  7,  7,  8,  8,  9,  9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11},
+	{0, 1, 1, 2, 2, 3, 3, 4, 4,  4,  5,  5,  6,  6,  7,  7,  8,  9,  9, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
+	{0, 1, 1, 2, 3, 3, 4, 4, 5,  6,  6,  7,  7,  8,  9,  9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_24gb_n_txpwrtrack[] =
+	{0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24gb_p_txpwrtrack[]  =
+	{0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+u8 rtl8821ae_delta_swing_table_idx_24ga_n_txpwrtrack[]  =
+	{0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24ga_p_txpwrtrack[] =
+	{0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+u8 rtl8821ae_delta_swing_table_idx_24gcckb_n_txpwrtrack[] =
+	{0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24gcckb_p_txpwrtrack[] =
+	{0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+u8 rtl8821ae_delta_swing_table_idx_24gccka_n_txpwrtrack[] =
+	{0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24gccka_p_txpwrtrack[] =
+	{0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+
+u8 rtl8821ae_delta_swing_table_idx_5gb_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_5gb_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_5ga_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_5ga_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+	{0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+void rtl8812ae_dm_read_and_config_txpower_track(
+ 	struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("===> rtl8821ae_dm_read_and_config_txpower_track\n"));
+
+
+	memcpy(rtldm->delta_swing_table_idx_24ga_p,
+		rtl8812ae_delta_swing_table_idx_24ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24ga_n,
+		rtl8812ae_delta_swing_table_idx_24ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24gb_p,
+		rtl8812ae_delta_swing_table_idx_24gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24gb_n,
+		rtl8812ae_delta_swing_table_idx_24gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+	memcpy(rtldm->delta_swing_table_idx_24gccka_p,
+		rtl8812ae_delta_swing_table_idx_24gccka_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24gccka_n,
+		rtl8812ae_delta_swing_table_idx_24gccka_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24gcckb_p,
+		rtl8812ae_delta_swing_table_idx_24gcckb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24gcckb_n,
+		rtl8812ae_delta_swing_table_idx_24gcckb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+	memcpy(rtldm->delta_swing_table_idx_5ga_p,
+		rtl8812ae_delta_swing_table_idx_5ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+	memcpy(rtldm->delta_swing_table_idx_5ga_n,
+		rtl8812ae_delta_swing_table_idx_5ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+	memcpy(rtldm->delta_swing_table_idx_5gb_p,
+		rtl8812ae_delta_swing_table_idx_5gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+	memcpy(rtldm->delta_swing_table_idx_5gb_n,
+		rtl8812ae_delta_swing_table_idx_5gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+}
+
+void rtl8821ae_dm_read_and_config_txpower_track(
+ 	struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("===> rtl8821ae_dm_read_and_config_txpower_track\n"));
+
+
+	memcpy(rtldm->delta_swing_table_idx_24ga_p,
+		rtl8821ae_delta_swing_table_idx_24ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24ga_n,
+		rtl8821ae_delta_swing_table_idx_24ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24gb_p,
+		rtl8821ae_delta_swing_table_idx_24gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24gb_n,
+		rtl8821ae_delta_swing_table_idx_24gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+	memcpy(rtldm->delta_swing_table_idx_24gccka_p,
+		rtl8821ae_delta_swing_table_idx_24gccka_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24gccka_n,
+		rtl8821ae_delta_swing_table_idx_24gccka_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24gcckb_p,
+		rtl8821ae_delta_swing_table_idx_24gcckb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+	memcpy(rtldm->delta_swing_table_idx_24gcckb_n,
+		rtl8821ae_delta_swing_table_idx_24gcckb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+	memcpy(rtldm->delta_swing_table_idx_5ga_p,
+		rtl8821ae_delta_swing_table_idx_5ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+	memcpy(rtldm->delta_swing_table_idx_5ga_n,
+		rtl8821ae_delta_swing_table_idx_5ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+	memcpy(rtldm->delta_swing_table_idx_5gb_p,
+		rtl8821ae_delta_swing_table_idx_5gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+	memcpy(rtldm->delta_swing_table_idx_5gb_n,
+		rtl8821ae_delta_swing_table_idx_5gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+}
+
+
+
+#define 	CALCULATE_SWINGTALBE_OFFSET(_offset, _direction, _size, _deltaThermal) \
+					do {\
+						for(_offset = 0; _offset < _size; _offset++)\
+						{\
+							if(_deltaThermal < thermal_threshold[_direction][_offset])\
+							{\
+								if(_offset != 0)\
+									_offset--;\
+								break;\
+							}\
+						}			\
+						if(_offset >= _size)\
+							_offset = _size-1;\
+					} while(0)
+
+
+void rtl8821ae_dm_txpower_track_adjust(struct ieee80211_hw *hw,
+												   u8 type,u8 *pdirection,
+												   u32 *poutwrite_val)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+	u8 pwr_val = 0;
+
+	if (type == 0){
+		if (rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_A] <=
+			rtlpriv->dm.bb_swing_idx_ofdm_base[RF90_PATH_A]) {
+			*pdirection = 1;
+			pwr_val = rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A] - rtldm->bb_swing_idx_ofdm[RF90_PATH_A];
+		} else {
+			*pdirection = 2;
+			pwr_val = rtldm->bb_swing_idx_ofdm[RF90_PATH_A] - rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A];
+		}
+	} else if (type ==1) {
+		if (rtldm->bb_swing_idx_cck <= rtldm->bb_swing_idx_cck_base) {
+			*pdirection = 1;
+			pwr_val = rtldm->bb_swing_idx_cck_base - rtldm->bb_swing_idx_cck;
+		} else {
+			*pdirection = 2;
+			pwr_val = rtldm->bb_swing_idx_cck - rtldm->bb_swing_idx_cck_base;
+		}
+	}
+
+	if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1))
+		pwr_val = TXPWRTRACK_MAX_IDX;
+
+	*poutwrite_val = pwr_val |(pwr_val << 8)|(pwr_val << 16) | (pwr_val << 24);
+}
+
+void rtl8821ae_dm_clear_txpower_tracking_state(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+	u8 p = 0;
+	rtldm->bb_swing_idx_cck_base = rtldm->default_cck_index;
+	rtldm->bb_swing_idx_cck = rtldm->default_cck_index;
+	rtldm->cck_index = 0;
+
+	for (p = RF90_PATH_A; p < MAX_RF_PATH; ++p) {
+		rtldm->bb_swing_idx_ofdm_base[p] = rtldm->default_ofdm_index;
+		rtldm->bb_swing_idx_ofdm[p] = rtldm->default_ofdm_index;
+		rtldm->ofdm_index[p] = rtldm->default_ofdm_index;
+
+		rtldm->power_index_offset[p] = 0;
+		rtldm->delta_power_index[p] = 0;
+		rtldm->delta_power_index_last[p] = 0;
+
+		rtldm->aboslute_ofdm_swing_idx[p] = 0;    /*Initial Mix mode power tracking*/
+		rtldm->remnant_ofdm_swing_idx[p] = 0;
+	}
+
+	rtldm->modify_txagc_flag_path_a = false;       /*Initial at Modify Tx Scaling Mode*/
+	rtldm->modify_txagc_flag_path_b = false;       /*Initial at Modify Tx Scaling Mode*/
+	rtldm->remnant_cck_idx = 0;
+	rtldm->thermalvalue = rtlefuse->eeprom_thermalmeter;
+	rtldm->thermalvalue_iqk = rtlefuse->eeprom_thermalmeter;
+	rtldm->thermalvalue_lck = rtlefuse->eeprom_thermalmeter;
+}
+
+u8  rtl8821ae_dm_get_swing_index(struct ieee80211_hw *hw)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 i = 0;
+	u32  bb_swing;
+
+	bb_swing =rtl8821ae_phy_query_bb_reg(hw, rtlhal->current_bandtype, RF90_PATH_A);
+
+	for (i = 0; i < TXSCALE_TABLE_SIZE; ++i)
+		if ( bb_swing == rtl8821ae_txscaling_table[i])
+			break;
+
+	return i;
+}
+
+void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(
+				struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 default_swing_index  = 0;
+	u8 p = 0;
+
+	rtlpriv->dm.txpower_track_control = true;
+	rtldm->thermalvalue = rtlefuse->eeprom_thermalmeter;
+	rtldm->thermalvalue_iqk = rtlefuse->eeprom_thermalmeter;
+	rtldm->thermalvalue_lck = rtlefuse->eeprom_thermalmeter;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+		rtl8812ae_dm_read_and_config_txpower_track(hw);
+	else
+		rtl8821ae_dm_read_and_config_txpower_track(hw);
+
+	default_swing_index = rtl8821ae_dm_get_swing_index(hw);
+
+	rtldm->default_ofdm_index = (default_swing_index == TXSCALE_TABLE_SIZE) ? 24 : default_swing_index;
+	rtldm->default_cck_index = 24;
+
+	rtldm->bb_swing_idx_cck_base = rtldm->default_cck_index;
+	rtldm->cck_index = rtldm->default_cck_index;
+
+	for (p = RF90_PATH_A; p < MAX_RF_PATH; ++p)
+	{
+		rtldm->bb_swing_idx_ofdm_base[p] = rtldm->default_ofdm_index;
+	   	rtldm->ofdm_index[p] = rtldm->default_ofdm_index;
+		rtldm->delta_power_index[p] = 0;
+		rtldm->power_index_offset[p] = 0;
+		rtldm->delta_power_index_last[p] = 0;
+	}
+}
+
+static void rtl8821ae_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+	dm_pstable.pre_ccastate = CCA_MAX;
+	dm_pstable.cur_ccasate = CCA_MAX;
+	dm_pstable.pre_rfstate = RF_MAX;
+	dm_pstable.cur_rfstate = RF_MAX;
+	dm_pstable.rssi_val_min = 0;
+	dm_pstable.initialize = 0;
+}
+
+
+static void rtl8821ae_dm_diginit(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	//dm_digtable.dig_enable_flag = true;
+	dm_digtable.cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
+	/*dm_digtable.pre_igvalue = 0;
+	dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+	dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
+	dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;*/
+	dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
+	dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
+	dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+	dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+	dm_digtable.rx_gain_range_max = DM_DIG_MAX;
+	dm_digtable.rx_gain_range_min = DM_DIG_MIN;
+	dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+	dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
+	dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+	dm_digtable.pre_cck_cca_thres = 0xff;
+	dm_digtable.cur_cck_cca_thres = 0x83;
+	dm_digtable.forbidden_igi = DM_DIG_MIN;
+	dm_digtable.large_fa_hit = 0;
+	dm_digtable.recover_cnt = 0;
+	dm_digtable.dig_dynamic_min_0 = DM_DIG_MIN;
+	dm_digtable.dig_dynamic_min_1 = DM_DIG_MIN;
+	dm_digtable.b_media_connect_0 = false;
+	dm_digtable.b_media_connect_1 = false;
+	rtlpriv->dm.b_dm_initialgain_enable = true;
+	dm_digtable.bt30_cur_igi = 0x32;
+}
+
+static void rtl8821ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->dm.bdynamic_txpower_enable = false;
+
+	rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+	rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+
+
+void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	rtlpriv->dm.bcurrent_turbo_edca = false;
+	rtlpriv->dm.bis_any_nonbepkts = false;
+	rtlpriv->dm.bis_cur_rdlstate = false;
+}
+
+
+void rtl8821ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rate_adaptive *p_ra = &(rtlpriv->ra);
+
+	p_ra->ratr_state = DM_RATR_STA_INIT;
+	p_ra->pre_ratr_state = DM_RATR_STA_INIT;
+
+	rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+	if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+		rtlpriv->dm.b_useramask = true;
+	else
+		rtlpriv->dm.b_useramask = false;
+
+	p_ra->high_rssi_thresh_for_ra = 50;
+	p_ra->low_rssi_thresh_for_ra = 20;
+}
+
+
+static void rtl8821ae_dm_init_txpower_tracking(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->dm.btxpower_tracking = true;
+	rtlpriv->dm.btxpower_trackinginit = false;
+	rtlpriv->dm.txpowercount = 0;
+	rtlpriv->dm.txpower_track_control = true;
+	rtlpriv->dm.thermalvalue = 0;
+
+	rtlpriv->dm.ofdm_index[0] = 30;
+	rtlpriv->dm.cck_index = 20;
+
+	rtlpriv->dm.bb_swing_idx_cck_base = rtlpriv->dm.cck_index;
+
+
+	rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_A] = rtlpriv->dm.ofdm_index[0];
+	rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_B] = rtlpriv->dm.ofdm_index[0];
+	rtlpriv->dm.delta_power_index[0] = 0;
+	rtlpriv->dm.delta_power_index_last[0] = 0;
+	rtlpriv->dm.power_index_offset[0] = 0;
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		 ("  rtlpriv->dm.btxpower_tracking = %d\n",
+		  rtlpriv->dm.btxpower_tracking));
+}
+
+
+void rtl8821ae_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap;
+
+	rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11));
+	rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL;
+}
+
+
+void rtl8821ae_dm_init(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	spin_lock(&rtlpriv->locks.iqk_lock);
+	rtlphy->b_iqk_in_progress = false;
+	spin_unlock(&rtlpriv->locks.iqk_lock);
+
+	rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+	rtl8821ae_dm_diginit(hw);
+	rtl8821ae_dm_init_rate_adaptive_mask(hw);
+	rtl8812ae_dm_path_diversity_init(hw);
+	rtl8821ae_dm_init_edca_turbo(hw);
+	rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(hw);
+#if 1
+	rtl8821ae_dm_init_dynamic_bb_powersaving(hw);
+	rtl8821ae_dm_init_dynamic_txpower(hw);
+	rtl8821ae_dm_init_txpower_tracking(hw);
+#endif
+	rtl8821ae_dm_init_dynamic_atc_switch(hw);
+}
+
+void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_dig *rtl_dm_dig = &(rtlpriv->dm.dm_digtable);
+	struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+	/* Determine the minimum RSSI  */
+	if ((mac->link_state < MAC80211_LINKED) &&
+	    (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+		rtl_dm_dig->min_undecorated_pwdb_for_dm = 0;
+		RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+			 ("Not connected to any \n"));
+	}
+	if (mac->link_state >= MAC80211_LINKED) {
+		if (mac->opmode == NL80211_IFTYPE_AP ||
+			mac->opmode == NL80211_IFTYPE_ADHOC) {
+			rtl_dm_dig->min_undecorated_pwdb_for_dm =
+			    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+			RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+				 ("AP Client PWDB = 0x%lx \n",
+				  rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb));
+		} else {
+			rtl_dm_dig->min_undecorated_pwdb_for_dm =
+			    rtlpriv->dm.undecorated_smoothed_pwdb;
+			RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+				 ("STA Default Port PWDB = 0x%x \n",
+				  rtl_dm_dig->min_undecorated_pwdb_for_dm));
+		}
+	} else {
+		rtl_dm_dig->min_undecorated_pwdb_for_dm =
+		    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+		RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+			 ("AP Ext Port or disconnet PWDB = 0x%x \n",
+			  rtl_dm_dig->min_undecorated_pwdb_for_dm));
+	}
+	RT_TRACE(COMP_DIG, DBG_LOUD, ("MinUndecoratedPWDBForDM =%d\n",
+			rtl_dm_dig->min_undecorated_pwdb_for_dm));
+}
+
+#if 0
+void  rtl8812ae_dm_rssi_dump_to_register(
+	struct ieee80211_hw *hw
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtl_write_byte(rtlpriv, RA_RSSI_DUMP, Adapter->RxStats.RxRSSIPercentage[0]);
+	rtl_write_byte(rtlpriv, RB_RSSI_DUMP, Adapter->RxStats.RxRSSIPercentage[1]);
+
+	/* Rx EVM*/
+	rtl_write_byte(rtlpriv, RS1_RX_EVM_DUMP, Adapter->RxStats.RxEVMdbm[0]);
+	rtl_write_byte(rtlpriv, RS2_RX_EVM_DUMP, Adapter->RxStats.RxEVMdbm[1]);
+
+	/*Rx SNR*/
+	rtl_write_byte(rtlpriv, RA_RX_SNR_DUMP, (u1Byte)(Adapter->RxStats.RxSNRdB[0]));
+	rtl_write_byte(rtlpriv, RB_RX_SNR_DUMP, (u1Byte)(Adapter->RxStats.RxSNRdB[1]));
+
+	/*Rx Cfo_Short*/
+	rtl_write_word(rtlpriv, RA_CFO_SHORT_DUMP, Adapter->RxStats.RxCfoShort[0]);
+	rtl_write_word(rtlpriv, RB_CFO_SHORT_DUMP, Adapter->RxStats.RxCfoShort[1]);
+
+	/*Rx Cfo_Tail*/
+	rtl_write_word(rtlpriv, RA_CFO_LONG_DUMP, Adapter->RxStats.RxCfoTail[0]);
+	rtl_write_word(rtlpriv, RB_CFO_LONG_DUMP, Adapter->RxStats.RxCfoTail[1]);
+
+}
+#endif
+
+static void rtl8821ae_dm_check_rssi_monitor(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_sta_info *drv_priv;
+	u8 h2c_parameter[3] = { 0 };
+	long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
+
+
+	/* AP & ADHOC & MESH */
+	spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+	list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+		if(drv_priv->rssi_stat.undecorated_smoothed_pwdb < tmp_entry_min_pwdb)
+			tmp_entry_min_pwdb = drv_priv->rssi_stat.undecorated_smoothed_pwdb;
+		if(drv_priv->rssi_stat.undecorated_smoothed_pwdb > tmp_entry_max_pwdb)
+			tmp_entry_max_pwdb = drv_priv->rssi_stat.undecorated_smoothed_pwdb;
+
+		/*h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+		h2c_parameter[1] = 0x20;
+		h2c_parameter[0] =  drv_priv->rssi_stat;
+		rtl8821ae_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);*/
+	}
+	spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+	/* If associated entry is found */
+	if (tmp_entry_max_pwdb != 0) {
+		rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = tmp_entry_max_pwdb;
+		RTPRINT(rtlpriv, FDM, DM_PWDB, ("EntryMaxPWDB = 0x%lx(%ld)\n",
+			tmp_entry_max_pwdb, tmp_entry_max_pwdb));
+	} else {
+		rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
+	}
+	/* If associated entry is found */
+	if (tmp_entry_min_pwdb != 0xff) {
+		rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = tmp_entry_min_pwdb;
+		RTPRINT(rtlpriv, FDM, DM_PWDB, ("EntryMinPWDB = 0x%lx(%ld)\n",
+					tmp_entry_min_pwdb, tmp_entry_min_pwdb));
+	} else {
+		rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
+	}
+	/* Indicate Rx signal strength to FW. */
+	if (rtlpriv->dm.b_useramask) {
+		h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+		h2c_parameter[1] = 0x20;
+		h2c_parameter[0] = 0;
+		rtl8821ae_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
+	} else {
+		rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undecorated_smoothed_pwdb);
+	}
+	rtl8821ae_dm_find_minimum_rssi(hw);
+	dm_digtable.rssi_val_min = rtlpriv->dm.dm_digtable.min_undecorated_pwdb_for_dm;
+}
+
+void rtl8821ae_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 current_cca)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (dm_digtable.cur_cck_cca_thres != current_cca)
+		rtl_write_byte(rtlpriv, DM_REG_CCK_CCA_11AC, current_cca);
+
+	dm_digtable.pre_cck_cca_thres = dm_digtable.cur_cck_cca_thres;
+	dm_digtable.cur_cck_cca_thres = current_cca;
+}
+
+void rtl8821ae_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	if(dm_digtable.stop_dig)
+		return;
+
+	if (dm_digtable.cur_igvalue != current_igi){
+		rtl_set_bbreg(hw, DM_REG_IGI_A_11AC, DM_BIT_IGI_11AC, current_igi);
+		if (rtlpriv->phy.rf_type != RF_1T1R)
+			rtl_set_bbreg(hw, DM_REG_IGI_B_11AC, DM_BIT_IGI_11AC, current_igi);
+	}
+	//dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
+	dm_digtable.cur_igvalue = current_igi;
+}
+
+static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 dig_dynamic_min;
+	u8 dig_max_of_min;
+	bool first_connect, first_disconnect;
+	u8 dm_dig_max, dm_dig_min, offset;
+	u8 current_igi =dm_digtable.cur_igvalue;
+
+
+	RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig()==>\n"));
+
+
+	if (mac->act_scanning == true) {
+		RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig() Return: In Scan Progress \n"));
+	    	return;
+	}
+
+	/*add by Neil Chen to avoid PSD is processing*/
+	dig_dynamic_min = dm_digtable.dig_dynamic_min_0;
+	first_connect = (mac->link_state >= MAC80211_LINKED) &&
+			(dm_digtable.b_media_connect_0 == false);
+	first_disconnect = (mac->link_state < MAC80211_LINKED) &&
+			(dm_digtable.b_media_connect_0 == true);
+
+	/*1 Boundary Decision*/
+
+
+	dm_dig_max = 0x5A;
+
+	if (rtlhal->hw_type != HARDWARE_TYPE_RTL8821AE)
+		dm_dig_min = DM_DIG_MIN;
+	else
+		dm_dig_min = 0x1C;
+
+	dig_max_of_min = DM_DIG_MAX_AP;
+
+	if (mac->link_state >= MAC80211_LINKED) {
+		if (rtlhal->hw_type != HARDWARE_TYPE_RTL8821AE)
+			offset = 20;
+		else
+			offset = 10;
+
+		if ((dm_digtable.rssi_val_min + offset) > dm_dig_max)
+			dm_digtable.rx_gain_range_max = dm_dig_max;
+		else if ((dm_digtable.rssi_val_min + offset) < dm_dig_min)
+			dm_digtable.rx_gain_range_max = dm_dig_min;
+		else
+			dm_digtable.rx_gain_range_max = dm_digtable.rssi_val_min + offset;
+
+		if(rtlpriv->dm.b_one_entry_only){
+			offset = 0;
+
+			if (dm_digtable.rssi_val_min - offset < dm_dig_min)
+				dig_dynamic_min = dm_dig_min;
+			else if (dm_digtable.rssi_val_min - offset > dig_max_of_min)
+				dig_dynamic_min = dig_max_of_min;
+			else
+				dig_dynamic_min = dm_digtable.rssi_val_min - offset;
+
+			RT_TRACE(COMP_DIG, DBG_LOUD,
+				("rtl8821ae_dm_dig() : bOneEntryOnly=TRUE,  dig_dynamic_min=0x%x\n",
+				dig_dynamic_min));
+			RT_TRACE(COMP_DIG, DBG_LOUD,
+				("rtl8821ae_dm_dig() : dm_digtable.rssi_val_min=%d",dm_digtable.
+				rssi_val_min));
+		} else {
+			dig_dynamic_min = dm_dig_min;
+		}
+	} else {
+		dm_digtable.rx_gain_range_max = dm_dig_max;
+		dig_dynamic_min = dm_dig_min;
+		RT_TRACE(COMP_DIG, DBG_LOUD,
+			("rtl8821ae_dm_dig() : No Link\n"));
+	}
+
+	if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
+		RT_TRACE(COMP_DIG, DBG_LOUD,
+			("rtl8821ae_dm_dig(): Abnornally false alarm case. \n"));
+
+		if (dm_digtable.large_fa_hit != 3)
+		        dm_digtable.large_fa_hit++;
+		if (dm_digtable.forbidden_igi < current_igi) {
+			dm_digtable.forbidden_igi = current_igi;
+			dm_digtable.large_fa_hit = 1;
+		}
+
+		if (dm_digtable.large_fa_hit >= 3) {
+			if((dm_digtable.forbidden_igi + 1) > dm_digtable.rx_gain_range_max)
+				dm_digtable.rx_gain_range_min = dm_digtable.rx_gain_range_max;
+			else
+				dm_digtable.rx_gain_range_min = (dm_digtable.forbidden_igi + 1);
+			dm_digtable.recover_cnt = 3600;
+		}
+
+	} else {
+		/*Recovery mechanism for IGI lower bound*/
+		if (dm_digtable.recover_cnt != 0)
+			dm_digtable.recover_cnt --;
+		else {
+			if (dm_digtable.large_fa_hit < 3) {
+				if ((dm_digtable.forbidden_igi -1) < dig_dynamic_min) {
+					dm_digtable.forbidden_igi = dig_dynamic_min;
+					dm_digtable.rx_gain_range_min = dig_dynamic_min;
+					RT_TRACE(COMP_DIG, DBG_LOUD,
+						("rtl8821ae_dm_dig(): Normal Case: At Lower Bound\n"));
+				} else {
+					dm_digtable.forbidden_igi --;
+					dm_digtable.rx_gain_range_min = (dm_digtable.forbidden_igi + 1);
+					RT_TRACE(COMP_DIG, DBG_LOUD,
+						("rtl8821ae_dm_dig(): Normal Case: Approach Lower Bound\n"));
+				}
+			} else {
+				dm_digtable.large_fa_hit = 0;
+			}
+		}
+	}
+	RT_TRACE(COMP_DIG, DBG_LOUD,
+		("rtl8821ae_dm_dig(): pDM_DigTable->LargeFAHit=%d\n",
+		dm_digtable.large_fa_hit));
+
+	if (rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10)
+		dm_digtable.rx_gain_range_min = dm_dig_min;
+
+	if (dm_digtable.rx_gain_range_min > dm_digtable.rx_gain_range_max)
+		dm_digtable.rx_gain_range_min = dm_digtable.rx_gain_range_max;
+
+	/*Adjust initial gain by false alarm*/
+	if (mac->link_state >= MAC80211_LINKED) {
+		RT_TRACE(COMP_DIG, DBG_LOUD,
+			("rtl8821ae_dm_dig(): DIG AfterLink\n"));
+		if (first_connect) {
+			if (dm_digtable.rssi_val_min <= dig_max_of_min)
+				current_igi = dm_digtable.rssi_val_min;
+			else
+				current_igi = dig_max_of_min;
+			RT_TRACE(COMP_DIG, DBG_LOUD,
+				("rtl8821ae_dm_dig: First Connect\n"));
+		} else {
+			if(rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2)
+				current_igi = current_igi + 4;
+			else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1)
+				current_igi = current_igi + 2;
+			else if(rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
+				current_igi = current_igi - 2;
+
+			if((rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10)
+				&&(rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)) {
+				current_igi = dm_digtable.rx_gain_range_min;
+				RT_TRACE(COMP_DIG, DBG_LOUD,
+					("rtl8821ae_dm_dig(): Beacon is less than 10 and FA is less than 768, IGI GOES TO 0x1E!!!!!!!!!!!!\n"));
+			}
+		}
+	}  else{
+		RT_TRACE(COMP_DIG, DBG_LOUD,
+			("rtl8821ae_dm_dig(): DIG BeforeLink\n"));
+		if (first_disconnect){
+			current_igi = dm_digtable.rx_gain_range_min;
+			RT_TRACE(COMP_DIG, DBG_LOUD,
+				("rtl8821ae_dm_dig(): First DisConnect \n"));
+		} else {
+			/*2012.03.30 LukeLee: enable DIG before link but with very high thresholds*/
+	       	if (rtlpriv->falsealm_cnt.cnt_all > 2000)
+				current_igi = current_igi + 4;
+			else if (rtlpriv->falsealm_cnt.cnt_all > 600)
+				current_igi = current_igi + 2;
+			else if(rtlpriv->falsealm_cnt.cnt_all < 300)
+				current_igi = current_igi - 2;
+			if (current_igi >= 0x3e)
+				current_igi = 0x3e;
+			RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig(): England DIG \n"));
+		}
+	}
+	RT_TRACE(COMP_DIG, DBG_LOUD,
+		("rtl8821ae_dm_dig(): DIG End Adjust IGI\n"));
+	/* Check initial gain by upper/lower bound*/
+
+	if (current_igi > dm_digtable.rx_gain_range_max)
+		current_igi = dm_digtable.rx_gain_range_max;
+	if (current_igi < dm_digtable.rx_gain_range_min)
+		current_igi = dm_digtable.rx_gain_range_min;
+
+	RT_TRACE(COMP_DIG, DBG_LOUD,
+		("rtl8821ae_dm_dig(): rx_gain_range_max=0x%x, rx_gain_range_min=0x%x\n",
+		dm_digtable.rx_gain_range_max, dm_digtable.rx_gain_range_min));
+	RT_TRACE(COMP_DIG, DBG_LOUD,
+		("rtl8821ae_dm_dig(): TotalFA=%d\n", rtlpriv->falsealm_cnt.cnt_all));
+	RT_TRACE(COMP_DIG, DBG_LOUD,
+		("rtl8821ae_dm_dig(): CurIGValue=0x%x\n", current_igi));
+
+	rtl8821ae_dm_write_dig(hw, current_igi);
+	dm_digtable.b_media_connect_0= ((mac->link_state >= MAC80211_LINKED) ? true :false);
+	dm_digtable.dig_dynamic_min_0 = dig_dynamic_min;
+}
+
+static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 cnt = 0;
+	struct rtl_sta_info *drv_priv;
+
+	rtlpriv->dm.b_one_entry_only = false;
+
+	if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION &&
+		rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+		rtlpriv->dm.b_one_entry_only = true;
+		return;
+	}
+
+	if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP ||
+		rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC ||
+		rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) {
+		spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+		list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+			cnt ++;
+		}
+		spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+		if (cnt == 1)
+			rtlpriv->dm.b_one_entry_only = true;
+	}
+}
+
+
+static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+	u32 cck_enable =0;
+
+	/*read OFDM FA counter*/
+	falsealm_cnt->cnt_ofdm_fail = rtl_get_bbreg(hw, ODM_REG_OFDM_FA_11AC, BMASKLWORD);
+	falsealm_cnt->cnt_cck_fail = rtl_get_bbreg(hw, ODM_REG_CCK_FA_11AC, BMASKLWORD);
+
+	cck_enable =  rtl_get_bbreg(hw, ODM_REG_BB_RX_PATH_11AC, BIT(28));
+	if (cck_enable)  /*if(pDM_Odm->pBandType == ODM_BAND_2_4G)*/
+		falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail + falsealm_cnt->cnt_cck_fail;
+	else
+		falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail;
+
+	/*reset OFDM FA coutner*/
+	rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1);
+	rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0);
+	/* reset CCK FA counter*/
+	rtl_set_bbreg(hw,  ODM_REG_CCK_FA_RST_11AC, BIT(15), 0);
+	rtl_set_bbreg(hw,  ODM_REG_CCK_FA_RST_11AC, BIT(15), 1);
+
+	RT_TRACE(COMP_DIG, DBG_LOUD, ("Cnt_Cck_fail=%d\n",
+			falsealm_cnt->cnt_cck_fail));
+	RT_TRACE(COMP_DIG, DBG_LOUD, ("cnt_ofdm_fail=%d\n",
+			falsealm_cnt->cnt_ofdm_fail));
+	RT_TRACE(COMP_DIG, DBG_LOUD, ("Total False Alarm=%d\n",
+			falsealm_cnt->cnt_all));
+}
+
+void rtl8812ae_dm_check_txpower_tracking_thermalmeter(
+		struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	static u8 tm_trigger = 0;
+
+	if (!rtlpriv->dm.btxpower_tracking)
+		return;
+
+	if (!tm_trigger) {
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16), 0x03);
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			 ("Trigger 8812 Thermal Meter!!\n"));
+		tm_trigger = 1;
+		return;
+	} else {
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			 ("Schedule TxPowerTracking direct call!!\n"));
+		rtl8812ae_dm_txpower_tracking_callback_thermalmeter(hw);
+		tm_trigger = 0;
+	}
+}
+
+static void rtl8821ae_dm_iq_calibrate(struct ieee80211_hw *hw)
+{
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	if (mac->link_state >= MAC80211_LINKED) {
+		/*if ((*rtldm->p_channel != rtldm->pre_channel )
+			&& (!mac->act_scanning)) {
+			rtldm->pre_channel = *rtldm->p_channel;
+			rtldm->linked_interval = 0;
+		}*/
+
+		if(rtldm->linked_interval < 3)
+			rtldm->linked_interval ++;
+
+		if(rtldm->linked_interval == 2)
+		{
+			if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+				rtl8812ae_phy_iq_calibrate(hw, false);
+			else
+				rtl8821ae_phy_iq_calibrate(hw, false);
+		}
+	} else {
+		rtldm->linked_interval = 0;
+	}
+}
+
+
+void rtl8812ae_get_delta_swing_table(
+	struct ieee80211_hw *hw,
+	u8 **temperature_up_a,
+	u8 **temperature_down_a,
+	u8 **temperature_up_b,
+	u8 **temperature_down_b
+	)
+{
+   	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+	u8 channel = rtlphy->current_channel;
+	u8 rate = rtldm->tx_rate;
+
+
+	if ( 1 <= channel && channel <= 14) {
+		if (RX_HAL_IS_CCK_RATE(rate)) {
+		        *temperature_up_a = rtldm->delta_swing_table_idx_24gccka_p;
+		        *temperature_down_a = rtldm->delta_swing_table_idx_24gccka_n;
+		        *temperature_up_b = rtldm->delta_swing_table_idx_24gcckb_p;
+		        *temperature_down_b = rtldm->delta_swing_table_idx_24gcckb_n;
+		} else {
+		        *temperature_up_a = rtldm->delta_swing_table_idx_24ga_p;
+		        *temperature_down_a = rtldm->delta_swing_table_idx_24ga_n;
+		        *temperature_up_b = rtldm->delta_swing_table_idx_24gb_p;
+		        *temperature_down_b = rtldm->delta_swing_table_idx_24gb_n;
+		}
+ 	} else if ( 36 <= channel && channel <= 64) {
+	        *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[0];
+	        *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[0];
+	        *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[0];
+	        *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[0];
+    	} else if ( 100 <= channel && channel <= 140) {
+		*temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[1];
+		*temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[1];
+		*temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[1];
+		*temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[1];
+    	} else if ( 149 <= channel && channel <= 173) {
+		*temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[2];
+		*temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[2];
+		*temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[2];
+		*temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[2];
+    	} else {
+	    *temperature_up_a = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+	    *temperature_down_a =(u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+	    *temperature_up_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+	    *temperature_down_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+    	}
+
+	return;
+}
+
+void rtl8812ae_phy_lccalibrate(
+	struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("===> rtl8812ae_phy_lccalibrate\n"));
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("<=== rtl8812ae_phy_lccalibrate\n"));
+
+}
+
+void rtl8812ae_dm_update_init_rate(
+	struct ieee80211_hw *hw,
+	u8 rate
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_dm	*rtldm = rtl_dm(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 p = 0;
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("Get C2H Command! Rate=0x%x\n", rate));
+
+	rtldm->tx_rate = rate;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE){
+		rtl8821ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, RF90_PATH_A, 0);
+	}
+	else
+	{
+		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+		{
+			rtl8812ae_dm_txpwr_track_set_pwr(hw, BBSWING, p, 0);
+		}
+	}
+
+}
+
+u8 rtl8812ae_hw_rate_to_mrate(
+	struct ieee80211_hw *hw,
+	u8 rate
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 ret_rate = MGN_1M;
+
+
+	switch(rate)
+	{
+		case DESC_RATE1M:		ret_rate = MGN_1M;		break;
+		case DESC_RATE2M:		ret_rate = MGN_2M;		break;
+		case DESC_RATE5_5M:		ret_rate = MGN_5_5M;		break;
+		case DESC_RATE11M:		ret_rate = MGN_11M;		break;
+		case DESC_RATE6M:		ret_rate = MGN_6M;		break;
+		case DESC_RATE9M:		ret_rate = MGN_9M;		break;
+		case DESC_RATE12M:		ret_rate = MGN_12M;		break;
+		case DESC_RATE18M:		ret_rate = MGN_18M;		break;
+		case DESC_RATE24M:		ret_rate = MGN_24M;		break;
+		case DESC_RATE36M:		ret_rate = MGN_36M;		break;
+		case DESC_RATE48M:		ret_rate = MGN_48M;		break;
+		case DESC_RATE54M:		ret_rate = MGN_54M;		break;
+		case DESC_RATEMCS0:	ret_rate = MGN_MCS0;		break;
+		case DESC_RATEMCS1:	ret_rate = MGN_MCS1;		break;
+		case DESC_RATEMCS2:	ret_rate = MGN_MCS2;		break;
+		case DESC_RATEMCS3:	ret_rate = MGN_MCS3;		break;
+		case DESC_RATEMCS4:	ret_rate = MGN_MCS4;		break;
+		case DESC_RATEMCS5:	ret_rate = MGN_MCS5;		break;
+		case DESC_RATEMCS6:	ret_rate = MGN_MCS6;		break;
+		case DESC_RATEMCS7:	ret_rate = MGN_MCS7;		break;
+		case DESC_RATEMCS8:	ret_rate = MGN_MCS8;		break;
+		case DESC_RATEMCS9:	ret_rate = MGN_MCS9;		break;
+		case DESC_RATEMCS10:	ret_rate = MGN_MCS10;	break;
+		case DESC_RATEMCS11:	ret_rate = MGN_MCS11;	break;
+		case DESC_RATEMCS12:	ret_rate = MGN_MCS12;	break;
+		case DESC_RATEMCS13:	ret_rate = MGN_MCS13;	break;
+		case DESC_RATEMCS14:	ret_rate = MGN_MCS14;	break;
+		case DESC_RATEMCS15:	ret_rate = MGN_MCS15;	break;
+		case DESC_RATEVHT1SS_MCS0:	ret_rate = MGN_VHT1SS_MCS0;		break;
+		case DESC_RATEVHT1SS_MCS1:	ret_rate = MGN_VHT1SS_MCS1;		break;
+		case DESC_RATEVHT1SS_MCS2:	ret_rate = MGN_VHT1SS_MCS2;		break;
+		case DESC_RATEVHT1SS_MCS3:	ret_rate = MGN_VHT1SS_MCS3;		break;
+		case DESC_RATEVHT1SS_MCS4:	ret_rate = MGN_VHT1SS_MCS4;		break;
+		case DESC_RATEVHT1SS_MCS5:	ret_rate = MGN_VHT1SS_MCS5;		break;
+		case DESC_RATEVHT1SS_MCS6:	ret_rate = MGN_VHT1SS_MCS6;		break;
+		case DESC_RATEVHT1SS_MCS7:	ret_rate = MGN_VHT1SS_MCS7;		break;
+		case DESC_RATEVHT1SS_MCS8:	ret_rate = MGN_VHT1SS_MCS8;		break;
+		case DESC_RATEVHT1SS_MCS9:	ret_rate = MGN_VHT1SS_MCS9;		break;
+		case DESC_RATEVHT2SS_MCS0:	ret_rate = MGN_VHT2SS_MCS0;		break;
+		case DESC_RATEVHT2SS_MCS1:	ret_rate = MGN_VHT2SS_MCS1;		break;
+		case DESC_RATEVHT2SS_MCS2:	ret_rate = MGN_VHT2SS_MCS2;		break;
+		case DESC_RATEVHT2SS_MCS3:	ret_rate = MGN_VHT2SS_MCS3;		break;
+		case DESC_RATEVHT2SS_MCS4:	ret_rate = MGN_VHT2SS_MCS4;		break;
+		case DESC_RATEVHT2SS_MCS5:	ret_rate = MGN_VHT2SS_MCS5;		break;
+		case DESC_RATEVHT2SS_MCS6:	ret_rate = MGN_VHT2SS_MCS6;		break;
+		case DESC_RATEVHT2SS_MCS7:	ret_rate = MGN_VHT2SS_MCS7;		break;
+		case DESC_RATEVHT2SS_MCS8:	ret_rate = MGN_VHT2SS_MCS8;		break;
+		case DESC_RATEVHT2SS_MCS9:	ret_rate = MGN_VHT2SS_MCS9;		break;
+
+		default:
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("HwRateToMRate8812(): Non supported Rate [%x]!!!\n",rate ));
+			break;
+	}
+	return ret_rate;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function:	odm_TxPwrTrackSetPwr88E()
+ *
+ * Overview:	88E change all channel tx power accordign to flag.
+ *				OFDM & CCK are all different.
+ *
+ * Input:		NONE
+ *
+ * Output:		NONE
+ *
+ * Return:		NONE
+ *
+ * Revised History:
+ *	When		Who		Remark
+ *	04/23/2012	MHC		Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+	enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_dm	*rtldm = rtl_dm(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u32 final_bb_swing_idx[2];
+	u8 pwr_tracking_limit = 26; /*+1.0dB*/
+	u8 tx_rate = 0xFF;
+	s8 final_ofdm_swing_index = 0;
+
+	if(rtldm->tx_rate != 0xFF)
+		tx_rate = rtl8812ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
+
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+	if(tx_rate != 0xFF) { /*20130429 Mimic Modify High Rate BBSwing Limit.*/
+		/*CCK*/
+		if((tx_rate >= MGN_1M) && (tx_rate <= MGN_11M))
+			pwr_tracking_limit = 32; /*+4dB*/
+		/*OFDM*/
+		else if((tx_rate >= MGN_6M) && (tx_rate <= MGN_48M))
+			pwr_tracking_limit = 30; /*+3dB*/
+		else if(tx_rate == MGN_54M)
+			pwr_tracking_limit = 28; /*+2dB*/
+		/*HT*/
+		else if((tx_rate >= MGN_MCS0) && (tx_rate <= MGN_MCS2)) /*QPSK/BPSK*/
+			pwr_tracking_limit = 34; /*+5dB*/
+		else if((tx_rate >= MGN_MCS3) && (tx_rate <= MGN_MCS4)) /*16QAM*/
+			pwr_tracking_limit = 30; /*+3dB*/
+		else if((tx_rate >= MGN_MCS5) && (tx_rate <= MGN_MCS7)) /*64QAM*/
+			pwr_tracking_limit = 28; /*+2dB*/
+
+		else if((tx_rate >= MGN_MCS8) && (tx_rate <= MGN_MCS10)) /*QPSK/BPSK*/
+			pwr_tracking_limit = 34; /*+5dB*/
+		else if((tx_rate >= MGN_MCS11) && (tx_rate <= MGN_MCS12)) /*16QAM*/
+			pwr_tracking_limit = 30; /*+3dB*/
+		else if((tx_rate >= MGN_MCS13) && (tx_rate <= MGN_MCS15)) /*64QAM*/
+			pwr_tracking_limit = 28; /*+2dB*/
+
+		/*2 VHT*/
+		else if((tx_rate >= MGN_VHT1SS_MCS0) && (tx_rate <= MGN_VHT1SS_MCS2)) /*QPSK/BPSK*/
+			pwr_tracking_limit = 34; /*+5dB*/
+		else if((tx_rate >= MGN_VHT1SS_MCS3) && (tx_rate <= MGN_VHT1SS_MCS4)) /*16QAM*/
+			pwr_tracking_limit = 30; /*+3dB*/
+		else if((tx_rate >= MGN_VHT1SS_MCS5)&&(tx_rate <= MGN_VHT1SS_MCS6)) /*64QAM*/
+			pwr_tracking_limit = 28; /*+2dB*/
+		else if(tx_rate == MGN_VHT1SS_MCS7) /*64QAM*/
+			pwr_tracking_limit = 26; /*+1dB*/
+		else if(tx_rate == MGN_VHT1SS_MCS8) /*256QAM*/
+			pwr_tracking_limit = 24; /*+0dB*/
+		else if(tx_rate == MGN_VHT1SS_MCS9) /*256QAM*/
+			pwr_tracking_limit = 22; /*-1dB*/
+
+		else if((tx_rate >= MGN_VHT2SS_MCS0)&&(tx_rate <= MGN_VHT2SS_MCS2)) /*QPSK/BPSK*/
+			pwr_tracking_limit = 34; /*+5dB*/
+		else if((tx_rate >= MGN_VHT2SS_MCS3)&&(tx_rate <= MGN_VHT2SS_MCS4)) /*16QAM*/
+			pwr_tracking_limit = 30; /*+3dB*/
+		else if((tx_rate >= MGN_VHT2SS_MCS5)&&(tx_rate <= MGN_VHT2SS_MCS6)) /*64QAM*/
+			pwr_tracking_limit = 28; /*+2dB*/
+		else if(tx_rate == MGN_VHT2SS_MCS7) /*64QAM*/
+			pwr_tracking_limit = 26; /*+1dB*/
+		else if(tx_rate == MGN_VHT2SS_MCS8) /*256QAM*/
+			pwr_tracking_limit = 24; /*+0dB*/
+		else if(tx_rate == MGN_VHT2SS_MCS9) /*256QAM*/
+			pwr_tracking_limit = 22; /*-1dB*/
+		else
+			pwr_tracking_limit = 24;
+	}
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("TxRate=0x%x, PwrTrackingLimit=%d\n", tx_rate, pwr_tracking_limit));
+
+
+	if (method == BBSWING) {
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+		if (rf_path == RF90_PATH_A) {
+			final_bb_swing_idx[RF90_PATH_A] =
+				(rtldm->ofdm_index[RF90_PATH_A] > pwr_tracking_limit) ?
+				pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_A];
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d, \
+				pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
+				rtldm->ofdm_index[RF90_PATH_A], final_bb_swing_idx[RF90_PATH_A]));
+
+			rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_A]]);
+		} else {
+			final_bb_swing_idx[RF90_PATH_B] =
+				rtldm->ofdm_index[RF90_PATH_B] > pwr_tracking_limit ? \
+				pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_B];
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_B]=%d, \
+				pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_B]=%d\n",
+				rtldm->ofdm_index[RF90_PATH_B], final_bb_swing_idx[RF90_PATH_B]));
+
+			rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_B]]);
+		}
+	} else if (method == MIX_MODE) {
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("pDM_Odm->DefaultOfdmIndex=%d, \
+			pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
+			rtldm->default_ofdm_index, rtldm->aboslute_ofdm_swing_idx[rf_path],
+			rf_path ));
+
+
+		final_ofdm_swing_index = rtldm->default_ofdm_index + rtldm->aboslute_ofdm_swing_idx[rf_path];
+
+		if (rf_path == RF90_PATH_A) {
+			if(final_ofdm_swing_index > pwr_tracking_limit) {     /*BBSwing higher then Limit*/
+
+				rtldm->remnant_cck_idx = final_ofdm_swing_index - pwr_tracking_limit;
+				/* CCK Follow the same compensate value as Path A*/
+				rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit;
+
+				rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]);
+
+				rtldm->modify_txagc_flag_path_a = true;
+
+				/*Set TxAGC Page C{};*/
+				rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+				RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+					("******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n",
+					pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path]));
+			} else if (final_ofdm_swing_index < 0) {
+				rtldm->remnant_cck_idx = final_ofdm_swing_index;
+				/* CCK Follow the same compensate value as Path A*/
+				rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index;
+
+				rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]);
+
+				rtldm->modify_txagc_flag_path_a = true;
+
+				/*Set TxAGC Page C{};*/
+				rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+				RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+					("******Path_A Lower then BBSwing lower bound  0 , Remnant TxAGC Value = %d \n",
+					 rtldm->remnant_ofdm_swing_idx[rf_path]));
+			} else {
+				rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]);
+
+				RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+					("******Path_A Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n",
+					final_ofdm_swing_index));
+
+				if(rtldm->modify_txagc_flag_path_a) { /*If TxAGC has changed, reset TxAGC again*/
+					rtldm->remnant_cck_idx = 0;
+					rtldm->remnant_ofdm_swing_idx[rf_path] = 0;
+
+					/*Set TxAGC Page C{};*/
+					rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+					rtldm->modify_txagc_flag_path_a = false;
+
+					RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+						("******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE \n"));
+				}
+			}
+		}
+
+		if (rf_path == RF90_PATH_B) {
+			if(final_ofdm_swing_index > pwr_tracking_limit) {    /*BBSwing higher then Limit*/
+				rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit;
+
+				rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]);
+
+				rtldm->modify_txagc_flag_path_b = true;
+
+				/*Set TxAGC Page E{};*/
+				rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B);
+
+				RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+					("******Path_B Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n",
+					pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path]));
+			} else if (final_ofdm_swing_index < 0) {
+				rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index;
+
+				rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]);
+
+				rtldm->modify_txagc_flag_path_b = true;
+
+				/*Set TxAGC Page E{};*/
+				rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B);
+
+				RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+					("******Path_B Lower then BBSwing lower bound  0 , Remnant TxAGC Value = %d \n",
+					rtldm->remnant_ofdm_swing_idx[rf_path] ));
+			} else {
+				rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]);
+
+				RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+					("******Path_B Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n",
+					final_ofdm_swing_index));
+
+				if(rtldm->modify_txagc_flag_path_b) { /*If TxAGC has changed, reset TxAGC again*/
+					rtldm->remnant_ofdm_swing_idx[rf_path] = 0;
+
+					/*Set TxAGC Page E{};*/
+					rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B);
+
+					rtldm->modify_txagc_flag_path_b = false;
+
+					RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+						("******Path_B pDM_Odm->Modify_TxAGC_Flag = FALSE \n"));
+				}
+			}
+		}
+
+	} else {
+		return;
+	}
+}
+
+void rtl8812ae_dm_txpower_tracking_callback_thermalmeter
+	(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_dm	*rtldm = rtl_dm(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0;
+	u8 thermal_value_avg_count = 0;
+	u32 thermal_value_avg = 0;
+
+	u8 ofdm_min_index = 6;  /*OFDM BB Swing should be less than +3.0dB, which is required by Arthur*/
+	u8 index_for_channel = 0; /* GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/
+
+	/* 1. The following TWO tables decide the final index of OFDM/CCK swing table.*/
+	u8 *delta_swing_table_idx_tup_a;
+	u8 *delta_swing_table_idx_tdown_a;
+	u8 *delta_swing_table_idx_tup_b;
+	u8 *delta_swing_table_idx_tdown_b;
+
+	/*2. Initilization ( 7 steps in total )*/
+	rtl8812ae_get_delta_swing_table(hw, (u8**)&delta_swing_table_idx_tup_a,
+									(u8**)&delta_swing_table_idx_tdown_a,
+									  (u8**)&delta_swing_table_idx_tup_b,
+									  (u8**)&delta_swing_table_idx_tdown_b);
+
+	rtldm->btxpower_trackinginit = true;
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter, \
+		 \n pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:\
+		 %d, pDM_Odm->DefaultOfdmIndex: %d\n",
+		rtldm->bb_swing_idx_cck_base,
+		rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A],
+		rtldm->default_ofdm_index));
+
+	thermal_value = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER_8812A, 0xfc00);	/*0x42: RF Reg[15:10] 88E*/
+	if( ! rtldm->txpower_track_control || rtlefuse->eeprom_thermalmeter == 0 ||
+		rtlefuse->eeprom_thermalmeter == 0xFF)
+        	return;
+
+
+	/* 3. Initialize ThermalValues of RFCalibrateInfo*/
+
+	if(rtlhal->reloadtxpowerindex)
+	{
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("reload ofdm index for band switch\n"));
+	}
+
+	/*4. Calculate average thermal meter*/
+	rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value;
+	rtldm->thermalvalue_avg_index++;
+	if(rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8812A)
+		/*Average times =  c.AverageThermalNum*/
+		rtldm->thermalvalue_avg_index = 0;
+
+	for(i = 0; i < AVG_THERMAL_NUM_8812A; i++)
+	{
+		if(rtldm->thermalvalue_avg[i])
+		{
+			thermal_value_avg += rtldm->thermalvalue_avg[i];
+			thermal_value_avg_count++;
+		}
+	}
+
+	if(thermal_value_avg_count) /*Calculate Average ThermalValue after average enough times*/
+	{
+		thermal_value = (u8)(thermal_value_avg / thermal_value_avg_count);
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+			thermal_value, rtlefuse->eeprom_thermalmeter));
+	}
+
+	/*5. Calculate delta, delta_LCK, delta_IQK.*/
+	/*"delta" here is used to determine whether thermal value changes or not.*/
+	delta = (thermal_value > rtldm->thermalvalue) ? \
+		(thermal_value - rtldm->thermalvalue): \
+		(rtldm->thermalvalue - thermal_value);
+	delta_lck = (thermal_value > rtldm->thermalvalue_lck) ? \
+		(thermal_value - rtldm->thermalvalue_lck) : \
+		(rtldm->thermalvalue_lck - thermal_value);
+	delta_iqk = (thermal_value > rtldm->thermalvalue_iqk) ? \
+		(thermal_value - rtldm->thermalvalue_iqk) : \
+		(rtldm->thermalvalue_iqk - thermal_value);
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
+		delta, delta_lck, delta_iqk));
+
+	/* 6. If necessary, do LCK.	*/
+
+	if (delta_lck >= IQK_THRESHOLD) /*Delta temperature is equal to or larger than 20 centigrade.*/
+	{
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("delta_LCK(%d) >= Threshold_IQK(%d)\n",
+			delta_lck, IQK_THRESHOLD));
+		rtldm->thermalvalue_lck = thermal_value;
+		rtl8812ae_phy_lccalibrate(hw);
+	}
+
+	/*7. If necessary, move the index of swing table to adjust Tx power.*/
+
+	if (delta > 0 && rtldm->txpower_track_control)
+	{
+		/*"delta" here is used to record the absolute value of differrence.*/
+	    	delta = thermal_value > rtlefuse->eeprom_thermalmeter ? \
+		    	(thermal_value - rtlefuse->eeprom_thermalmeter) : \
+		    	(rtlefuse->eeprom_thermalmeter - thermal_value);
+
+		if (delta >= TXPWR_TRACK_TABLE_SIZE)
+			delta = TXPWR_TRACK_TABLE_SIZE - 1;
+
+		/*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/
+
+		if(thermal_value > rtlefuse->eeprom_thermalmeter) {
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			    		("delta_swing_table_idx_tup_a[%d] = %d\n",
+			    		delta, delta_swing_table_idx_tup_a[delta]));
+			rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+			rtldm->delta_power_index[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta];
+
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] =  delta_swing_table_idx_tup_a[delta];
+			/*Record delta swing for mix mode power tracking*/
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+	                                     ("delta_swing_table_idx_tup_b[%d] = %d\n",
+	                                     delta, delta_swing_table_idx_tup_b[delta]));
+			rtldm->delta_power_index_last[RF90_PATH_B] = rtldm->delta_power_index[RF90_PATH_B];
+			rtldm->delta_power_index[RF90_PATH_B] = delta_swing_table_idx_tup_b[delta];
+
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B] =  delta_swing_table_idx_tup_b[delta];
+			/*Record delta swing for mix mode power tracking*/
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B]));
+
+        	} else {
+        		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+               	 	("delta_swing_table_idx_tdown_a[%d] = %d\n",
+               	 	delta, delta_swing_table_idx_tdown_a[delta]));
+
+			rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+			rtldm->delta_power_index[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta];
+
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] =  -1 * delta_swing_table_idx_tdown_a[delta];
+			/* Record delta swing for mix mode power tracking*/
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+                		("deltaSwingTableIdx_TDOWN_B[%d] = %d\n",
+                		delta, delta_swing_table_idx_tdown_b[delta]));
+
+			rtldm->delta_power_index_last[RF90_PATH_B] = rtldm->delta_power_index[RF90_PATH_B];
+			rtldm->delta_power_index[RF90_PATH_B] = -1 * delta_swing_table_idx_tdown_b[delta];
+
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B] =  -1 * delta_swing_table_idx_tdown_b[delta];
+			/*Record delta swing for mix mode power tracking*/
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B]));
+ 		}
+
+		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+        	{
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		                ("\n\n================================ [Path-%c] \
+		                Calculating PowerIndexOffset ================================\n",
+		                (p == RF90_PATH_A ? 'A' : 'B')));
+
+		    	if (rtldm->delta_power_index[p] == rtldm->delta_power_index_last[p])
+				/*If Thermal value changes but lookup table value still the same*/
+		    		rtldm->power_index_offset[p] = 0;
+		    	else
+		    		rtldm->power_index_offset[p] =
+		    			rtldm->delta_power_index[p] - rtldm->delta_power_index_last[p];
+				/*Power Index Diff between 2 times Power Tracking*/
+
+		    	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		    		("[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
+                		(p == RF90_PATH_A ? 'A' : 'B'),
+                		rtldm->power_index_offset[p],
+                		rtldm->delta_power_index[p] ,
+                		rtldm->delta_power_index_last[p]));
+
+	    		rtldm->ofdm_index[p] =
+					rtldm->bb_swing_idx_ofdm_base[p] + rtldm->power_index_offset[p];
+		    	rtldm->cck_index =
+					rtldm->bb_swing_idx_cck_base + rtldm->power_index_offset[p];
+
+		    	rtldm->bb_swing_idx_cck = rtldm->cck_index;
+		   	rtldm->bb_swing_idx_ofdm[p] = rtldm->ofdm_index[p];
+
+	           	/*************Print BB Swing Base and Index Offset*************/
+
+		    	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		    		("The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
+		    		rtldm->bb_swing_idx_cck,
+		    		rtldm->bb_swing_idx_cck_base,
+		    		rtldm->power_index_offset[p]));
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
+		   		rtldm->bb_swing_idx_ofdm[p],
+		   		(p == RF90_PATH_A ? 'A' : 'B'),
+		   		rtldm->bb_swing_idx_ofdm_base[p],
+		   		rtldm->power_index_offset[p]));
+
+		    	/*7.1 Handle boundary conditions of index.*/
+
+
+			if(rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE -1)
+			{
+				rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE -1;
+			}
+			else if (rtldm->ofdm_index[p] < ofdm_min_index)
+			{
+				rtldm->ofdm_index[p] = ofdm_min_index;
+			}
+		}
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+            		("\n\n======================================================\
+            		==================================================\n"));
+		if(rtldm->cck_index > TXSCALE_TABLE_SIZE -1)
+			rtldm->cck_index = TXSCALE_TABLE_SIZE -1;
+		else if (rtldm->cck_index < 0)
+			rtldm->cck_index = 0;
+	} else {
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("The thermal meter is unchanged or TxPowerTracking OFF(%d): \
+			ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
+			rtldm->txpower_track_control,
+			thermal_value,
+			rtldm->thermalvalue));
+
+	    	for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+		    	rtldm->power_index_offset[p] = 0;
+	}
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
+		rtldm->cck_index, rtldm->bb_swing_idx_cck_base));       /*Print Swing base & current*/
+	for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+	{
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
+			rtldm->ofdm_index[p],
+			(p == RF90_PATH_A ? 'A' : 'B'),
+			rtldm->bb_swing_idx_ofdm_base[p]));
+	}
+
+	if ((rtldm->power_index_offset[RF90_PATH_A] != 0 ||
+		rtldm->power_index_offset[RF90_PATH_B] != 0 ) &&
+     	 	rtldm->txpower_track_control)
+	{
+		/*7.2 Configure the Swing Table to adjust Tx Power.*/
+		/*Always TRUE after Tx Power is adjusted by power tracking.*/
+		/*
+		  2012/04/23 MH According to Luke's suggestion, we can not write BB digital
+		  to increase TX power. Otherwise, EVM will be bad.
+
+		  2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E.
+		*/
+		if (thermal_value > rtldm->thermalvalue)
+		{
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+				rtldm->power_index_offset[RF90_PATH_A],
+				delta, thermal_value,
+				rtlefuse->eeprom_thermalmeter,
+				rtldm->thermalvalue));
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("Temperature Increasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+				rtldm->power_index_offset[RF90_PATH_B],
+				delta, thermal_value,
+				rtlefuse->eeprom_thermalmeter,
+				rtldm->thermalvalue));
+
+		} else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+				rtldm->power_index_offset[RF90_PATH_A],
+				delta, thermal_value,
+				rtlefuse->eeprom_thermalmeter,
+				rtldm->thermalvalue));
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("Temperature Decreasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+				rtldm->power_index_offset[RF90_PATH_B],
+				delta, thermal_value,
+				rtlefuse->eeprom_thermalmeter,
+				rtldm->thermalvalue));
+		}
+
+		if (thermal_value > rtlefuse->eeprom_thermalmeter) {
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("Temperature(%d) higher than PG value(%d)\n",
+				thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("**********Enter POWER Tracking MIX_MODE**********\n"));
+			for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+					rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, 0);
+
+		} else {
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("Temperature(%d) lower than PG value(%d)\n",
+				thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+	            	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("**********Enter POWER Tracking MIX_MODE**********\n"));
+			for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+				rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel);
+
+		}
+
+		rtldm->bb_swing_idx_cck_base = rtldm->bb_swing_idx_cck;   /*Record last time Power Tracking result as base.*/
+		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+				rtldm->bb_swing_idx_ofdm_base[p] = rtldm->bb_swing_idx_ofdm[p];
+
+	 		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+					("pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+					rtldm->thermalvalue, thermal_value));
+
+		rtldm->thermalvalue = thermal_value;         /*Record last Power Tracking Thermal Value*/
+
+	}
+	/*Delta temperature is equal to or larger than 20 centigrade (When threshold is 8).*/
+	if ((delta_iqk >= IQK_THRESHOLD)) {
+
+		if ( !rtlphy->b_iqk_in_progress) {
+
+			spin_lock(&rtlpriv->locks.iqk_lock);
+			rtlphy->b_iqk_in_progress = true;
+			spin_unlock(&rtlpriv->locks.iqk_lock);
+
+			rtl8812ae_do_iqk(hw, delta_iqk, thermal_value, 8);
+
+			spin_lock(&rtlpriv->locks.iqk_lock);
+			rtlphy->b_iqk_in_progress = false;
+			spin_unlock(&rtlpriv->locks.iqk_lock);
+		}
+	}
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n"));
+}
+
+
+void rtl8821ae_get_delta_swing_table(
+	struct ieee80211_hw *hw,
+	u8 **temperature_up_a,
+	u8 **temperature_down_a,
+	u8 **temperature_up_b,
+	u8 **temperature_down_b
+	)
+{
+   	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+	u8 channel = rtlphy->current_channel;
+	u8 rate = rtldm->tx_rate;
+
+
+	if ( 1 <= channel && channel <= 14) {
+		if (RX_HAL_IS_CCK_RATE(rate)) {
+		        *temperature_up_a = rtldm->delta_swing_table_idx_24gccka_p;
+		        *temperature_down_a = rtldm->delta_swing_table_idx_24gccka_n;
+		        *temperature_up_b = rtldm->delta_swing_table_idx_24gcckb_p;
+		        *temperature_down_b = rtldm->delta_swing_table_idx_24gcckb_n;
+		} else {
+		        *temperature_up_a = rtldm->delta_swing_table_idx_24ga_p;
+		        *temperature_down_a = rtldm->delta_swing_table_idx_24ga_n;
+		        *temperature_up_b = rtldm->delta_swing_table_idx_24gb_p;
+		        *temperature_down_b = rtldm->delta_swing_table_idx_24gb_n;
+		}
+ 	} else if ( 36 <= channel && channel <= 64) {
+	        *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[0];
+	        *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[0];
+	        *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[0];
+	        *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[0];
+    	} else if ( 100 <= channel && channel <= 140) {
+		*temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[1];
+		*temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[1];
+		*temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[1];
+		*temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[1];
+    	} else if ( 149 <= channel && channel <= 173) {
+		*temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[2];
+		*temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[2];
+		*temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[2];
+		*temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[2];
+    	} else {
+	    *temperature_up_a = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+	    *temperature_down_a =(u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+	    *temperature_up_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+	    *temperature_down_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+    	}
+
+	return;
+}
+
+void rtl8821ae_phy_lccalibrate(
+	struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("===> rtl8812ae_phy_lccalibrate\n"));
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("<=== rtl8812ae_phy_lccalibrate\n"));
+
+}
+
+/*-----------------------------------------------------------------------------
+ * Function:	odm_TxPwrTrackSetPwr88E()
+ *
+ * Overview:	88E change all channel tx power accordign to flag.
+ *				OFDM & CCK are all different.
+ *
+ * Input:		NONE
+ *
+ * Output:		NONE
+ *
+ * Return:		NONE
+ *
+ * Revised History:
+ *	When		Who		Remark
+ *	04/23/2012	MHC		Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+	enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_dm	*rtldm = rtl_dm(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u32 final_bb_swing_idx[1];
+	u8 pwr_tracking_limit = 26; /*+1.0dB*/
+	u8 tx_rate = 0xFF;
+	s8 final_ofdm_swing_index = 0;
+
+	if(rtldm->tx_rate != 0xFF)
+		tx_rate = rtl8812ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
+
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+	if(tx_rate != 0xFF) { /*20130429 Mimic Modify High Rate BBSwing Limit.*/
+		/*CCK*/
+		if((tx_rate >= MGN_1M) && (tx_rate <= MGN_11M))
+			pwr_tracking_limit = 32; /*+4dB*/
+		/*OFDM*/
+		else if((tx_rate >= MGN_6M) && (tx_rate <= MGN_48M))
+			pwr_tracking_limit = 30; /*+3dB*/
+		else if(tx_rate == MGN_54M)
+			pwr_tracking_limit = 28; /*+2dB*/
+		/*HT*/
+		else if((tx_rate >= MGN_MCS0) && (tx_rate <= MGN_MCS2)) /*QPSK/BPSK*/
+			pwr_tracking_limit = 34; /*+5dB*/
+		else if((tx_rate >= MGN_MCS3) && (tx_rate <= MGN_MCS4)) /*16QAM*/
+			pwr_tracking_limit = 30; /*+3dB*/
+		else if((tx_rate >= MGN_MCS5) && (tx_rate <= MGN_MCS7)) /*64QAM*/
+			pwr_tracking_limit = 28; /*+2dB*/
+#if 0
+		else if((tx_rate >= MGN_MCS8) && (tx_rate <= MGN_MCS10)) /*QPSK/BPSK*/
+			pwr_tracking_limit = 34; /*+5dB*/
+		else if((tx_rate >= MGN_MCS11) && (tx_rate <= MGN_MCS12)) /*16QAM*/
+			pwr_tracking_limit = 30; /*+3dB*/
+		else if((tx_rate >= MGN_MCS13) && (tx_rate <= MGN_MCS15)) /*64QAM*/
+			pwr_tracking_limit = 28; /*+2dB*/
+#endif
+		/*2 VHT*/
+		else if((tx_rate >= MGN_VHT1SS_MCS0) && (tx_rate <= MGN_VHT1SS_MCS2)) /*QPSK/BPSK*/
+			pwr_tracking_limit = 34; /*+5dB*/
+		else if((tx_rate >= MGN_VHT1SS_MCS3) && (tx_rate <= MGN_VHT1SS_MCS4)) /*16QAM*/
+			pwr_tracking_limit = 30; /*+3dB*/
+		else if((tx_rate >= MGN_VHT1SS_MCS5)&&(tx_rate <= MGN_VHT1SS_MCS6)) /*64QAM*/
+			pwr_tracking_limit = 28; /*+2dB*/
+		else if(tx_rate == MGN_VHT1SS_MCS7) /*64QAM*/
+			pwr_tracking_limit = 26; /*+1dB*/
+		else if(tx_rate == MGN_VHT1SS_MCS8) /*256QAM*/
+			pwr_tracking_limit = 24; /*+0dB*/
+		else if(tx_rate == MGN_VHT1SS_MCS9) /*256QAM*/
+			pwr_tracking_limit = 22; /*-1dB*/
+		else
+			pwr_tracking_limit = 24;
+	}
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("TxRate=0x%x, PwrTrackingLimit=%d\n", tx_rate, pwr_tracking_limit));
+
+
+	if (method == BBSWING) {
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+		if (rf_path == RF90_PATH_A) {
+			final_bb_swing_idx[RF90_PATH_A] =
+				(rtldm->ofdm_index[RF90_PATH_A] > pwr_tracking_limit) ?
+				pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_A];
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d, \
+				pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
+				rtldm->ofdm_index[RF90_PATH_A], final_bb_swing_idx[RF90_PATH_A]));
+
+			rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_A]]);
+		}
+	} else if (method == MIX_MODE) {
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("pDM_Odm->DefaultOfdmIndex=%d, \
+			pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
+			rtldm->default_ofdm_index, rtldm->aboslute_ofdm_swing_idx[rf_path],
+			rf_path ));
+
+
+		final_ofdm_swing_index = rtldm->default_ofdm_index + rtldm->aboslute_ofdm_swing_idx[rf_path];
+
+		if (rf_path == RF90_PATH_A) {
+			if(final_ofdm_swing_index > pwr_tracking_limit) {     /*BBSwing higher then Limit*/
+
+				rtldm->remnant_cck_idx = final_ofdm_swing_index - pwr_tracking_limit;
+				/* CCK Follow the same compensate value as Path A*/
+				rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit;
+
+				rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]);
+
+				rtldm->modify_txagc_flag_path_a = true;
+
+				/*Set TxAGC Page C{};*/
+				rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+				RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+					("******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n",
+					pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path]));
+			} else if (final_ofdm_swing_index < 0) {
+				rtldm->remnant_cck_idx = final_ofdm_swing_index;
+				/* CCK Follow the same compensate value as Path A*/
+				rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index;
+
+				rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]);
+
+				rtldm->modify_txagc_flag_path_a = true;
+
+				/*Set TxAGC Page C{};*/
+				rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+				RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+					("******Path_A Lower then BBSwing lower bound  0 , Remnant TxAGC Value = %d \n",
+					 rtldm->remnant_ofdm_swing_idx[rf_path]));
+			} else {
+				rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]);
+
+				RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+					("******Path_A Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n",
+					final_ofdm_swing_index));
+
+				if(rtldm->modify_txagc_flag_path_a) { /*If TxAGC has changed, reset TxAGC again*/
+					rtldm->remnant_cck_idx = 0;
+					rtldm->remnant_ofdm_swing_idx[rf_path] = 0;
+
+					/*Set TxAGC Page C{};*/
+					rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+					rtldm->modify_txagc_flag_path_a = false;
+
+					RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+						("******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE \n"));
+				}
+			}
+		}
+
+	} else {
+		return;
+	}
+}
+
+
+void rtl8821ae_dm_txpower_tracking_callback_thermalmeter
+	(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_dm	*rtldm = rtl_dm(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0;
+	u8 thermal_value_avg_count = 0;
+	u32 thermal_value_avg = 0;
+
+	u8 ofdm_min_index = 6;  /*OFDM BB Swing should be less than +3.0dB, which is required by Arthur*/
+	u8 index_for_channel = 0; /* GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/
+
+	/* 1. The following TWO tables decide the final index of OFDM/CCK swing table.*/
+	u8 *delta_swing_table_idx_tup_a;
+	u8 *delta_swing_table_idx_tdown_a;
+	u8 *delta_swing_table_idx_tup_b;
+	u8 *delta_swing_table_idx_tdown_b;
+
+	/*2. Initilization ( 7 steps in total )*/
+	rtl8821ae_get_delta_swing_table(hw, (u8**)&delta_swing_table_idx_tup_a,
+									(u8**)&delta_swing_table_idx_tdown_a,
+									  (u8**)&delta_swing_table_idx_tup_b,
+									  (u8**)&delta_swing_table_idx_tdown_b);
+
+	rtldm->btxpower_trackinginit = true;
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter, \
+		 \n pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:\
+		 %d, pDM_Odm->DefaultOfdmIndex: %d\n",
+		rtldm->bb_swing_idx_cck_base,
+		rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A],
+		rtldm->default_ofdm_index));
+
+	thermal_value = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER_8812A, 0xfc00);	/*0x42: RF Reg[15:10] 88E*/
+	if( ! rtldm->txpower_track_control || rtlefuse->eeprom_thermalmeter == 0 ||
+		rtlefuse->eeprom_thermalmeter == 0xFF)
+        	return;
+
+
+	/* 3. Initialize ThermalValues of RFCalibrateInfo*/
+
+	if(rtlhal->reloadtxpowerindex)
+	{
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("reload ofdm index for band switch\n"));
+	}
+
+	/*4. Calculate average thermal meter*/
+	rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value;
+	rtldm->thermalvalue_avg_index++;
+	if(rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8812A)
+		/*Average times =  c.AverageThermalNum*/
+		rtldm->thermalvalue_avg_index = 0;
+
+	for(i = 0; i < AVG_THERMAL_NUM_8812A; i++)
+	{
+		if(rtldm->thermalvalue_avg[i])
+		{
+			thermal_value_avg += rtldm->thermalvalue_avg[i];
+			thermal_value_avg_count++;
+		}
+	}
+
+	if(thermal_value_avg_count) /*Calculate Average ThermalValue after average enough times*/
+	{
+		thermal_value = (u8)(thermal_value_avg / thermal_value_avg_count);
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+			thermal_value, rtlefuse->eeprom_thermalmeter));
+	}
+
+	/*5. Calculate delta, delta_LCK, delta_IQK.*/
+	/*"delta" here is used to determine whether thermal value changes or not.*/
+	delta = (thermal_value > rtldm->thermalvalue) ? \
+		(thermal_value - rtldm->thermalvalue): \
+		(rtldm->thermalvalue - thermal_value);
+	delta_lck = (thermal_value > rtldm->thermalvalue_lck) ? \
+		(thermal_value - rtldm->thermalvalue_lck) : \
+		(rtldm->thermalvalue_lck - thermal_value);
+	delta_iqk = (thermal_value > rtldm->thermalvalue_iqk) ? \
+		(thermal_value - rtldm->thermalvalue_iqk) : \
+		(rtldm->thermalvalue_iqk - thermal_value);
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
+		delta, delta_lck, delta_iqk));
+
+	/* 6. If necessary, do LCK.	*/
+
+	if (delta_lck >= IQK_THRESHOLD) /*Delta temperature is equal to or larger than 20 centigrade.*/
+	{
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("delta_LCK(%d) >= Threshold_IQK(%d)\n",
+			delta_lck, IQK_THRESHOLD));
+		rtldm->thermalvalue_lck = thermal_value;
+		rtl8821ae_phy_lccalibrate(hw);
+	}
+
+	/*7. If necessary, move the index of swing table to adjust Tx power.*/
+
+	if (delta > 0 && rtldm->txpower_track_control)
+	{
+		/*"delta" here is used to record the absolute value of differrence.*/
+	    	delta = thermal_value > rtlefuse->eeprom_thermalmeter ? \
+		    	(thermal_value - rtlefuse->eeprom_thermalmeter) : \
+		    	(rtlefuse->eeprom_thermalmeter - thermal_value);
+
+		if (delta >= TXSCALE_TABLE_SIZE)
+			delta = TXSCALE_TABLE_SIZE - 1;
+
+		/*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/
+
+		if(thermal_value > rtlefuse->eeprom_thermalmeter) {
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			    		("delta_swing_table_idx_tup_a[%d] = %d\n",
+			    		delta, delta_swing_table_idx_tup_a[delta]));
+			rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+			rtldm->delta_power_index[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta];
+
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] =  delta_swing_table_idx_tup_a[delta];
+			/*Record delta swing for mix mode power tracking*/
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+
+        	} else {
+        		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+               	 	("delta_swing_table_idx_tdown_a[%d] = %d\n",
+               	 	delta, delta_swing_table_idx_tdown_a[delta]));
+
+			rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+			rtldm->delta_power_index[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta];
+
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] =  -1 * delta_swing_table_idx_tdown_a[delta];
+			/* Record delta swing for mix mode power tracking*/
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+			rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+ 		}
+
+		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+        	{
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		                ("\n\n================================ [Path-%c] \
+		                Calculating PowerIndexOffset ================================\n",
+		                (p == RF90_PATH_A ? 'A' : 'B')));
+
+		    	if (rtldm->delta_power_index[p] == rtldm->delta_power_index_last[p])
+				/*If Thermal value changes but lookup table value still the same*/
+		    		rtldm->power_index_offset[p] = 0;
+		    	else
+		    		rtldm->power_index_offset[p] =
+		    			rtldm->delta_power_index[p] - rtldm->delta_power_index_last[p];
+				/*Power Index Diff between 2 times Power Tracking*/
+
+		    	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		    		("[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
+                		(p == RF90_PATH_A ? 'A' : 'B'),
+                		rtldm->power_index_offset[p],
+                		rtldm->delta_power_index[p] ,
+                		rtldm->delta_power_index_last[p]));
+
+	    		rtldm->ofdm_index[p] =
+					rtldm->bb_swing_idx_ofdm_base[p] + rtldm->power_index_offset[p];
+		    	rtldm->cck_index =
+					rtldm->bb_swing_idx_cck_base + rtldm->power_index_offset[p];
+
+		    	rtldm->bb_swing_idx_cck = rtldm->cck_index;
+		   	rtldm->bb_swing_idx_ofdm[p] = rtldm->ofdm_index[p];
+
+	           	/*************Print BB Swing Base and Index Offset*************/
+
+		    	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		    		("The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
+		    		rtldm->bb_swing_idx_cck,
+		    		rtldm->bb_swing_idx_cck_base,
+		    		rtldm->power_index_offset[p]));
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
+		   		rtldm->bb_swing_idx_ofdm[p],
+		   		(p == RF90_PATH_A ? 'A' : 'B'),
+		   		rtldm->bb_swing_idx_ofdm_base[p],
+		   		rtldm->power_index_offset[p]));
+
+		    	/*7.1 Handle boundary conditions of index.*/
+
+
+			if(rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE -1)
+			{
+				rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE -1;
+			}
+			else if (rtldm->ofdm_index[p] < ofdm_min_index)
+			{
+				rtldm->ofdm_index[p] = ofdm_min_index;
+			}
+		}
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+            		("\n\n======================================================\
+            		==================================================\n"));
+		if(rtldm->cck_index > TXSCALE_TABLE_SIZE -1)
+			rtldm->cck_index = TXSCALE_TABLE_SIZE -1;
+		else if (rtldm->cck_index < 0)
+			rtldm->cck_index = 0;
+	} else {
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("The thermal meter is unchanged or TxPowerTracking OFF(%d): \
+			ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
+			rtldm->txpower_track_control,
+			thermal_value,
+			rtldm->thermalvalue));
+
+	    	for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+		    	rtldm->power_index_offset[p] = 0;
+	}
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
+		rtldm->cck_index, rtldm->bb_swing_idx_cck_base));       /*Print Swing base & current*/
+	for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+	{
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			("TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
+			rtldm->ofdm_index[p],
+			(p == RF90_PATH_A ? 'A' : 'B'),
+			rtldm->bb_swing_idx_ofdm_base[p]));
+	}
+
+	if ((rtldm->power_index_offset[RF90_PATH_A] != 0 ||
+		rtldm->power_index_offset[RF90_PATH_B] != 0 ) &&
+     	 	rtldm->txpower_track_control)
+	{
+		/*7.2 Configure the Swing Table to adjust Tx Power.*/
+		/*Always TRUE after Tx Power is adjusted by power tracking.*/
+		/*
+		  2012/04/23 MH According to Luke's suggestion, we can not write BB digital
+		  to increase TX power. Otherwise, EVM will be bad.
+
+		  2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E.
+		*/
+		if (thermal_value > rtldm->thermalvalue)
+		{
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+				rtldm->power_index_offset[RF90_PATH_A],
+				delta, thermal_value,
+				rtlefuse->eeprom_thermalmeter,
+				rtldm->thermalvalue));
+		} else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+				rtldm->power_index_offset[RF90_PATH_A],
+				delta, thermal_value,
+				rtlefuse->eeprom_thermalmeter,
+				rtldm->thermalvalue));
+		}
+
+		if (thermal_value > rtlefuse->eeprom_thermalmeter) {
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("Temperature(%d) higher than PG value(%d)\n",
+				thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("**********Enter POWER Tracking MIX_MODE**********\n"));
+			for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+					rtl8821ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel);
+
+		} else {
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("Temperature(%d) lower than PG value(%d)\n",
+				thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+	            	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+				("**********Enter POWER Tracking MIX_MODE**********\n"));
+			for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+				rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel);
+
+		}
+
+		rtldm->bb_swing_idx_cck_base = rtldm->bb_swing_idx_cck;   /*Record last time Power Tracking result as base.*/
+		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+				rtldm->bb_swing_idx_ofdm_base[p] = rtldm->bb_swing_idx_ofdm[p];
+
+	 		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+					("pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+					rtldm->thermalvalue, thermal_value));
+
+		rtldm->thermalvalue = thermal_value;         /*Record last Power Tracking Thermal Value*/
+
+	}
+	/*Delta temperature is equal to or larger than 20 centigrade (When threshold is 8).*/
+	if ((delta_iqk >= IQK_THRESHOLD)) {
+
+		if ( !rtlphy->b_iqk_in_progress) {
+
+			spin_lock(&rtlpriv->locks.iqk_lock);
+			rtlphy->b_iqk_in_progress = true;
+			spin_unlock(&rtlpriv->locks.iqk_lock);
+
+			rtl8821ae_do_iqk(hw, delta_iqk, thermal_value, 8);
+
+			spin_lock(&rtlpriv->locks.iqk_lock);
+			rtlphy->b_iqk_in_progress = false;
+			spin_unlock(&rtlpriv->locks.iqk_lock);
+		}
+	}
+
+	RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+		("<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n"));
+}
+
+
+void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	static u8 tm_trigger = 0;
+
+	//if (!rtlpriv->dm.btxpower_tracking)
+	//	return;
+
+	if (!tm_trigger) {
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16),
+			      0x03);
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			 ("Trigger 8821ae Thermal Meter!!\n"));
+		tm_trigger = 1;
+		return;
+	} else {
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+			 ("Schedule TxPowerTracking !!\n"));
+
+		rtl8821ae_dm_txpower_tracking_callback_thermalmeter(hw);
+		tm_trigger = 0;
+	}
+}
+
+
+void rtl8821ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rate_adaptive *p_ra = &(rtlpriv->ra);
+	u32 low_rssithresh_for_ra = p_ra->low2high_rssi_thresh_for_ra;
+	u32 high_rssithresh_for_ra = p_ra->high_rssi_thresh_for_ra;
+	u8 go_up_gap = 5;
+	struct ieee80211_sta *sta = NULL;
+
+	if (is_hal_stop(rtlhal)) {
+		RT_TRACE(COMP_RATE, DBG_LOUD,
+			 ("driver is going to unload\n"));
+		return;
+	}
+
+	if (!rtlpriv->dm.b_useramask) {
+		RT_TRACE(COMP_RATE, DBG_LOUD,
+			 ("driver does not control rate adaptive mask\n"));
+		return;
+	}
+
+	if (mac->link_state == MAC80211_LINKED &&
+		mac->opmode == NL80211_IFTYPE_STATION) {
+
+		switch (p_ra->pre_ratr_state) {
+			case DM_RATR_STA_MIDDLE:
+				high_rssithresh_for_ra += go_up_gap;
+				break;
+			case DM_RATR_STA_LOW:
+				high_rssithresh_for_ra += go_up_gap;
+				low_rssithresh_for_ra += go_up_gap;
+				break;
+			default:
+				break;
+		}
+
+		if (rtlpriv->dm.undecorated_smoothed_pwdb >
+		    (long)high_rssithresh_for_ra)
+			p_ra->ratr_state = DM_RATR_STA_HIGH;
+		else if (rtlpriv->dm.undecorated_smoothed_pwdb >
+			 (long)low_rssithresh_for_ra)
+			p_ra->ratr_state = DM_RATR_STA_MIDDLE;
+		else
+			p_ra->ratr_state = DM_RATR_STA_LOW;
+
+		if (p_ra->pre_ratr_state != p_ra->ratr_state ) {
+			RT_TRACE(COMP_RATE, DBG_LOUD,
+				 ("RSSI = %ld\n",
+				  rtlpriv->dm.undecorated_smoothed_pwdb));
+			RT_TRACE(COMP_RATE, DBG_LOUD,
+				 ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
+			RT_TRACE(COMP_RATE, DBG_LOUD,
+				 ("PreState = %d, CurState = %d\n",
+				  p_ra->pre_ratr_state, p_ra->ratr_state));
+
+			rcu_read_lock();
+			sta = rtl_find_sta(hw, mac->bssid);
+			if (sta)
+			rtlpriv->cfg->ops->update_rate_tbl(hw, sta, p_ra->ratr_state);
+			rcu_read_unlock();
+
+			p_ra->pre_ratr_state = p_ra->ratr_state;
+		}
+	}
+}
+
+bool rtl8821ae_dm_is_edca_turbo_disable(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (rtlpriv->btcoexist.btc_ops->btc_is_disable_edca_turbo(rtlpriv))
+		return true;
+	if (rtlpriv->mac80211.mode == WIRELESS_MODE_B)
+		return true;
+
+	return false;
+}
+
+void rtl8821ae_dm_edca_choose_traffic_idx(
+	struct ieee80211_hw *hw, u64 cur_tx_bytes, u64 cur_rx_bytes, bool b_bias_on_rx,
+	bool *pb_is_cur_rdl_state)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if(b_bias_on_rx)
+	{
+		if (cur_tx_bytes > (cur_rx_bytes*4)) {
+			*pb_is_cur_rdl_state = false;
+			RT_TRACE(COMP_TURBO, DBG_LOUD,
+				("Uplink Traffic\n "));
+		} else {
+			*pb_is_cur_rdl_state = true;
+			RT_TRACE(COMP_TURBO, DBG_LOUD,
+				("Balance Traffic\n"));
+		}
+	} else {
+		if (cur_rx_bytes > (cur_tx_bytes*4)) {
+			*pb_is_cur_rdl_state = true;
+			RT_TRACE(COMP_TURBO, DBG_LOUD,
+				("Downlink	Traffic\n"));
+		} else {
+			*pb_is_cur_rdl_state = false;
+			RT_TRACE(COMP_TURBO, DBG_LOUD,
+				("Balance Traffic\n"));
+		}
+	}
+	return ;
+}
+
+static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_dm *rtldm =  rtl_dm(rtl_priv(hw));
+
+	/*Keep past Tx/Rx packet count for RT-to-RT EDCA turbo.*/
+	unsigned long cur_tx_ok_cnt = 0;
+	unsigned long cur_rx_ok_cnt = 0;
+	u32 edca_be_ul = 0x5ea42b;
+	u32 edca_be_dl = 0x5ea42b;
+	u32 edca_be = 0x5ea42b;
+	u8 iot_peer = 0;
+	bool *pb_is_cur_rdl_state = NULL;
+	bool b_last_is_cur_rdl_state = false;
+	bool b_bias_on_rx = false;
+	bool b_edca_turbo_on = false;
+
+	RT_TRACE(COMP_TURBO, DBG_LOUD,
+		("rtl8821ae_dm_check_edca_turbo=====>"));
+	RT_TRACE(COMP_TURBO, DBG_LOUD,
+		("Orginial BE PARAM: 0x%x\n",
+		rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N)));
+
+	/*===============================
+	list paramter for different platform
+	===============================*/
+	b_last_is_cur_rdl_state = rtlpriv->dm.bis_cur_rdlstate;
+	pb_is_cur_rdl_state = &( rtlpriv->dm.bis_cur_rdlstate);
+
+	cur_tx_ok_cnt = rtlpriv->stats.txbytesunicast - rtldm->last_tx_ok_cnt;
+	cur_rx_ok_cnt = rtlpriv->stats.rxbytesunicast - rtldm->last_rx_ok_cnt;
+
+	rtldm->last_tx_ok_cnt = rtlpriv->stats.txbytesunicast;
+	rtldm->last_rx_ok_cnt = rtlpriv->stats.rxbytesunicast;
+
+	iot_peer = rtlpriv->mac80211.vendor;
+	b_bias_on_rx = (iot_peer == PEER_RAL || iot_peer == PEER_ATH) ?
+		       true : false;
+	b_edca_turbo_on = ((!rtlpriv->dm.bis_any_nonbepkts) &&
+			   (!rtlpriv->dm.b_disable_framebursting)) ?
+			   true : false;
+
+	/*if (rtl8821ae_dm_is_edca_turbo_disable(hw))
+		goto dm_CheckEdcaTurbo_EXIT;*/
+
+	if ((iot_peer == PEER_CISCO) && (mac->mode == WIRELESS_MODE_N_24G))
+	{
+		edca_be_dl = edca_setting_dl[iot_peer];
+		edca_be_ul = edca_setting_ul[iot_peer];
+	}
+
+	RT_TRACE(COMP_TURBO, DBG_LOUD,
+		("bIsAnyNonBEPkts : 0x%x  bDisableFrameBursting : 0x%x  \n",
+		rtlpriv->dm.bis_any_nonbepkts, rtlpriv->dm.b_disable_framebursting));
+
+	RT_TRACE(COMP_TURBO, DBG_LOUD,
+			("bEdcaTurboOn : 0x%x bBiasOnRx : 0x%x\n",
+			b_edca_turbo_on, b_bias_on_rx));
+
+	if (b_edca_turbo_on) {
+		RT_TRACE(COMP_TURBO, DBG_LOUD,
+			("curTxOkCnt : 0x%lx \n",cur_tx_ok_cnt));
+		RT_TRACE(COMP_TURBO, DBG_LOUD,
+			("curRxOkCnt : 0x%lx \n",cur_rx_ok_cnt));
+		if(b_bias_on_rx)
+			rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt,
+				cur_rx_ok_cnt, true, pb_is_cur_rdl_state);
+		else
+			rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt,
+				cur_rx_ok_cnt, false, pb_is_cur_rdl_state);
+
+		edca_be = ((*pb_is_cur_rdl_state) == true) ? edca_be_dl : edca_be_ul;
+
+		rtl_write_dword(rtlpriv, DM_REG_EDCA_BE_11N, edca_be);
+
+		RT_TRACE(COMP_TURBO, DBG_LOUD,
+			("EDCA Turbo on: EDCA_BE:0x%x\n", edca_be));
+
+		rtlpriv->dm.bcurrent_turbo_edca = true;
+
+		RT_TRACE(COMP_TURBO, DBG_LOUD,
+			("EDCA_BE_DL : 0x%x  EDCA_BE_UL : 0x%x  EDCA_BE : 0x%x  \n",
+			edca_be_dl, edca_be_ul, edca_be));
+	} else {
+		if (rtlpriv->dm.bcurrent_turbo_edca) {
+			u8 tmp = AC0_BE;
+			rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+						      (u8 *) (&tmp));
+		}
+		rtlpriv->dm.bcurrent_turbo_edca = false;
+	}
+
+/* dm_CheckEdcaTurbo_EXIT: */
+	rtlpriv->dm.bis_any_nonbepkts = false;
+	rtldm->last_tx_ok_cnt = rtlpriv->stats.txbytesunicast;
+	rtldm->last_rx_ok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl8821ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 cur_cck_cca_thresh;
+
+	if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+		/*dm_digtable.rssi_val_min = rtl8821ae_dm_initial_gain_min_pwdb(hw);*/
+		if (dm_digtable.rssi_val_min > 25)
+			cur_cck_cca_thresh = 0xcd;
+		else if ((dm_digtable.rssi_val_min <= 25) && (dm_digtable.rssi_val_min > 10))
+			cur_cck_cca_thresh = 0x83;
+		else {
+			if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+				cur_cck_cca_thresh = 0x83;
+			else
+				cur_cck_cca_thresh = 0x40;
+		}
+
+	} else {
+		if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+			cur_cck_cca_thresh = 0x83;
+		else
+			cur_cck_cca_thresh = 0x40;
+	}
+
+	if (dm_digtable.cur_cck_cca_thres != cur_cck_cca_thresh) {
+		rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh);
+	}
+
+	dm_digtable.pre_cck_cca_thres = dm_digtable.cur_cck_cca_thres;
+	dm_digtable.cur_cck_cca_thres = cur_cck_cca_thresh;
+	RT_TRACE(COMP_DIG, DBG_TRACE,
+		 ("CCK cca thresh hold =%x\n", dm_digtable.cur_cck_cca_thres));
+
+}
+
+void rtl8821ae_dm_dynamic_edcca(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	bool b_fw_current_in_ps_mode = false;
+
+	rtlpriv->cfg->ops->get_hw_reg(hw,HW_VAR_FW_PSMODE_STATUS, \
+		(u8*)(&b_fw_current_in_ps_mode));
+	if (b_fw_current_in_ps_mode)
+		return;
+}
+
+void rtl8812ae_dm_update_txpath(struct ieee80211_hw *hw, u8 path)
+{
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (rtldm->resp_tx_path != path) {
+		RT_TRACE(COMP_DIG, DBG_LOUD, \
+			("Need to Update Tx Path\n"));
+		if (path == RF90_PATH_A) {
+			/*Tx by Reg*/
+			rtl_set_bbreg(hw, 0x80c, 0xFFF0, 0x111);
+			 /*Resp Tx by Txinfo*/
+			rtl_set_bbreg(hw, 0x6d8, BIT(7) | BIT(6), 1);
+		} else {
+			/*Tx by Reg*/
+			rtl_set_bbreg(hw, 0x80c, 0xFFF0, 0x222);
+			 /*Resp Tx by Txinfo*/
+			rtl_set_bbreg(hw, 0x6d8, BIT(7) |BIT(6), 2);
+		}
+	}
+	rtldm->resp_tx_path = path;
+	RT_TRACE(COMP_DIG, DBG_LOUD, \
+		("Path=%s\n",(path == RF90_PATH_A) ?  \
+		"RF90_PATH_A":"RF90_PATH_A"));
+}
+
+void rtl8812ae_dm_path_diversity_init(struct ieee80211_hw *hw)
+{
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+	//rtl_set_bbreg(hw, 0x80c , BIT(29), 1); /*Tx path from Reg*/
+	rtl_set_bbreg(hw, 0x80c , 0xFFF0, 0x111); /*Tx by Reg*/
+	rtl_set_bbreg(hw, 0x6d8 , BIT(7) | BIT(6), 1); /*Resp Tx by Txinfo*/
+	rtl8812ae_dm_update_txpath(hw, RF90_PATH_A);
+
+	rtldm->path_sel = 1; /* TxInfo default at path-A*/
+}
+
+void rtl812ae_dm_set_txpath_by_txinfo(struct ieee80211_hw *hw,
+	u8 *pdesc)
+{
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+	SET_TX_DESC_TX_ANT(pdesc, rtldm->path_sel);
+}
+
+void rtl8812ae_dm_path_statistics(struct ieee80211_hw *hw,
+	u32 rssi_a, u32 rssi_b)
+{
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+	rtldm->patha_sum += rssi_a;
+	rtldm->patha_cnt ++;
+
+	rtldm->pathb_sum += rssi_b;
+	rtldm->pathb_cnt ++;
+}
+
+void rtl8812ae_dm_path_diversity(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	u32	rssi_avg_a = 0;
+	u32 rssi_avg_b = 0;
+	u32 local_min_rssi = 0;
+	u32 min_rssi = 0xFF;
+	u8 tx_resp_path=0, target_path;
+	struct ieee80211_sta *sta = NULL;
+
+	sta = rtl_find_sta(hw, mac->bssid);
+	if (sta) {
+		/*Caculate RSSI per Path*/
+		rssi_avg_a = (rtldm->patha_cnt != 0) ? \
+			(rtldm->patha_sum / rtldm->patha_cnt) : 0;
+		rssi_avg_b = (rtldm->pathb_cnt != 0) ? \
+			(rtldm->pathb_sum / rtldm->pathb_cnt) : 0;
+
+		target_path = (rssi_avg_a == rssi_avg_b) ? rtldm->resp_tx_path : \
+			((rssi_avg_a>=rssi_avg_b) ? RF90_PATH_A : RF90_PATH_B);
+
+		RT_TRACE(COMP_DIG, DBG_TRACE, \
+			("assoc_id=%d, PathA_Sum=%d, PathA_Cnt=%d\n", \
+			mac->assoc_id, rtldm->patha_sum, rtldm->patha_cnt));
+		RT_TRACE(COMP_DIG, DBG_TRACE, \
+			("assoc_id=%d, PathB_Sum=%d, PathB_Cnt=%d\n", \
+			mac->assoc_id, rtldm->pathb_sum, rtldm->pathb_cnt));
+		RT_TRACE(COMP_DIG, DBG_TRACE, \
+			("assoc_id=%d, RssiAvgA= %d, RssiAvgB= %d\n", \
+			mac->assoc_id, rssi_avg_a, rssi_avg_b));
+
+		/*Select Resp Tx Path*/
+		local_min_rssi = (rssi_avg_a > rssi_avg_b) ?  rssi_avg_b : rssi_avg_a;
+		if(local_min_rssi  < min_rssi)
+		{
+			min_rssi = local_min_rssi;
+			tx_resp_path = target_path;
+		}
+
+		/*Select Tx DESC*/
+		if(target_path == RF90_PATH_A)
+			rtldm->path_sel = 1;
+		else
+			rtldm->path_sel = 2;
+
+		RT_TRACE(COMP_DIG, DBG_TRACE, \
+			("Tx from TxInfo, TargetPath=%s\n", \
+			(target_path==RF90_PATH_A) ? \
+			"ODM_RF_PATH_A":"ODM_RF_PATH_B"));
+		RT_TRACE(COMP_DIG, DBG_TRACE, \
+			("pDM_PathDiv->PathSel= %d\n", \
+			rtldm->path_sel));
+	}
+	rtldm->patha_cnt = 0;
+	rtldm->patha_sum = 0;
+	rtldm->pathb_cnt = 0;
+	rtldm->pathb_sum = 0;
+}
+
+void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+	u8 crystal_cap;
+	u32 packet_count;
+	int cfo_khz_a,cfo_khz_b,cfo_ave = 0, adjust_xtal = 0;
+	int cfo_ave_diff;
+
+	if (rtlpriv->mac80211.link_state < MAC80211_LINKED){
+		/*1.Enable ATC*/
+		if (rtldm->atc_status == ATC_STATUS_OFF)
+		{
+			rtl_set_bbreg(hw, RFC_AREA, BIT(14), ATC_STATUS_ON);
+			rtldm->atc_status = ATC_STATUS_ON;
+		}
+
+		RT_TRACE(COMP_DIG, DBG_LOUD, \
+			("rtl8821ae_dm_dynamic_atc_switch(): No link!!\n"));
+		RT_TRACE(COMP_DIG, DBG_LOUD, \
+			("rtl8821ae_dm_dynamic_atc_switch(): atc_status = %d\n", \
+			rtldm->atc_status));
+
+		if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap)
+		{
+			rtldm->crystal_cap = rtlpriv->efuse.crystalcap;
+			crystal_cap = rtldm->crystal_cap & 0x3f;
+			crystal_cap = crystal_cap & 0x3f;
+			rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, \
+				0x7ff80000, (crystal_cap | (crystal_cap << 6)));
+		}
+		RT_TRACE(COMP_DIG, DBG_LOUD, \
+			("rtl8821ae_dm_dynamic_atc_switch(): crystal_cap = 0x%x\n", \
+			rtldm->crystal_cap));
+	}else{
+		/*1. Calculate CFO for path-A & path-B*/
+		cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280;
+		cfo_khz_b = (int)(rtldm->cfo_tail[1] * 3125) / 1280;
+		packet_count = rtldm->packet_count;
+
+		/*2.No new packet*/
+		if (packet_count == rtldm->packet_count_pre) {
+			RT_TRACE(COMP_DIG, DBG_LOUD, \
+				("rtl8821ae_dm_dynamic_atc_switch(): packet counter doesn't change\n"));
+			return;
+		}
+
+		rtldm->packet_count_pre = packet_count;
+		RT_TRACE(COMP_DIG, DBG_LOUD, \
+			("rtl8821ae_dm_dynamic_atc_switch(): packet counter = %d\n", \
+			rtldm->packet_count));
+
+		/*3.Average CFO*/
+		if (rtlpriv->phy.rf_type == RF_1T1R)
+			cfo_ave = cfo_khz_a;
+		else
+			cfo_ave = (cfo_khz_a + cfo_khz_b) >> 1;
+
+		RT_TRACE(COMP_DIG, DBG_LOUD, \
+			("rtl8821ae_dm_dynamic_atc_switch():"
+			"cfo_khz_a = %dkHz, cfo_khz_b = %dkHz, cfo_ave = %dkHz\n",
+			cfo_khz_a, cfo_khz_b, cfo_ave));
+
+		/*4.Avoid abnormal large CFO*/
+		cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave)?
+						(rtldm->cfo_ave_pre - cfo_ave):
+						(cfo_ave - rtldm->cfo_ave_pre);
+
+		if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0){
+			RT_TRACE(COMP_DIG, DBG_LOUD, \
+				("rtl8821ae_dm_dynamic_atc_switch(): first large CFO hit\n"));
+			rtldm->large_cfo_hit = 1;
+			return;
+		}
+		else
+			rtldm->large_cfo_hit = 0;
+
+		rtldm->cfo_ave_pre = cfo_ave;
+
+		/*CFO tracking by adjusting Xtal cap.*/
+
+		/*1.Dynamic Xtal threshold*/
+		if (cfo_ave >= -rtldm->cfo_threshold &&
+			cfo_ave <= rtldm->cfo_threshold &&
+			rtldm->is_freeze == 0){
+			if (rtldm->cfo_threshold == CFO_THRESHOLD_XTAL){
+				rtldm->cfo_threshold = CFO_THRESHOLD_XTAL + 10;
+				rtldm->is_freeze = 1;
+			}
+			else
+				rtldm->cfo_threshold = CFO_THRESHOLD_XTAL;
+		}
+		RT_TRACE(COMP_DIG, DBG_LOUD, \
+			("rtl8821ae_dm_dynamic_atc_switch(): Dynamic threshold = %d\n", \
+			rtldm->cfo_threshold));
+
+		/* 2.Calculate Xtal offset*/
+		if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f)
+			adjust_xtal = ((cfo_ave - CFO_THRESHOLD_XTAL) >> 2) + 1;
+		else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) && rtlpriv->dm.crystal_cap > 0)
+			adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 2) - 1;
+		RT_TRACE(COMP_DIG, DBG_LOUD, \
+			("rtl8821ae_dm_dynamic_atc_switch(): "
+			"Crystal cap = 0x%x, Crystal cap offset = %d\n",
+			rtldm->crystal_cap, adjust_xtal));
+
+		/*3.Adjudt Crystal Cap.*/
+		if (adjust_xtal != 0){
+			rtldm->is_freeze = 0;
+			rtldm->crystal_cap += adjust_xtal;
+
+			if (rtldm->crystal_cap > 0x3f)
+				rtldm->crystal_cap = 0x3f;
+			else if (rtldm->crystal_cap < 0)
+				rtldm->crystal_cap = 0;
+
+			crystal_cap = rtldm->crystal_cap & 0x3f;
+			crystal_cap = crystal_cap & 0x3f;
+			rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, \
+				0x7ff80000, (crystal_cap | (crystal_cap << 6)));
+			RT_TRACE(COMP_DIG, DBG_LOUD, \
+				("rtl8821ae_dm_dynamic_atc_switch(): New crystal cap = 0x%x \n", \
+				rtldm->crystal_cap));
+		}
+	}
+
+}
+
+void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	bool b_fw_current_inpsmode = false;
+	bool b_fw_ps_awake = true;
+
+	rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+				      (u8 *) (&b_fw_current_inpsmode));
+
+	rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+				      (u8 *) (&b_fw_ps_awake));
+
+	if(ppsc->p2p_ps_info.p2p_ps_mode)
+		b_fw_ps_awake = false;
+
+	if((ppsc->rfpwr_state == ERFON) &&
+		((!b_fw_current_inpsmode) && b_fw_ps_awake) &&
+		(!ppsc->rfchange_inprogress)) {
+		rtl8821ae_dm_common_info_self_update(hw);
+		rtl8821ae_dm_false_alarm_counter_statistics(hw);
+		rtl8821ae_dm_check_rssi_monitor(hw);
+		rtl8821ae_dm_dig(hw);
+		rtl8821ae_dm_dynamic_edcca(hw);
+		rtl8821ae_dm_cck_packet_detection_thresh(hw);
+		rtl8821ae_dm_refresh_rate_adaptive_mask(hw);
+		rtl8821ae_dm_check_edca_turbo(hw);
+		rtl8821ae_dm_dynamic_atc_switch(hw);
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			rtl8812ae_dm_check_txpower_tracking_thermalmeter(hw);
+		else
+			rtl8821ae_dm_check_txpower_tracking_thermalmeter(hw);
+		rtl8821ae_dm_iq_calibrate(hw);
+		if (rtlpriv->cfg->ops->get_btc_status()){
+			rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv);
+		}
+	}
+
+	rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0;
+}
+
+void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
+												   u8 *pdesc, u32 mac_id)
+{
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+	struct fast_ant_trainning *pfat_table= &(rtldm->fat_table);
+
+	if (rtlhal->hw_type != HARDWARE_TYPE_RTL8812AE)
+		return;
+
+	if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) ||
+		(rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)){
+		SET_TX_DESC_TX_ANT(pdesc, pfat_table->antsel_a[mac_id]);
+	}
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/dm.h b/drivers/staging/rtl8821ae/rtl8821ae/dm.h
new file mode 100644
index 0000000..ebbff9b
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/dm.h
@@ -0,0 +1,426 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef	__RTL8821AE_DM_H__
+#define __RTL8821AE_DM_H__
+
+#define	MAIN_ANT	0
+#define	AUX_ANT	1
+#define	MAIN_ANT_CG_TRX	1
+#define	AUX_ANT_CG_TRX	0
+#define	MAIN_ANT_CGCS_RX	0
+#define	AUX_ANT_CGCS_RX	1
+
+#define	TXSCALE_TABLE_SIZE 37
+
+/*RF REG LIST*/
+#define	DM_REG_RF_MODE_11N				0x00
+#define	DM_REG_RF_0B_11N				0x0B
+#define	DM_REG_CHNBW_11N				0x18
+#define	DM_REG_T_METER_11N				0x24
+#define	DM_REG_RF_25_11N				0x25
+#define	DM_REG_RF_26_11N				0x26
+#define	DM_REG_RF_27_11N				0x27
+#define	DM_REG_RF_2B_11N				0x2B
+#define	DM_REG_RF_2C_11N				0x2C
+#define	DM_REG_RXRF_A3_11N				0x3C
+#define	DM_REG_T_METER_92D_11N			0x42
+#define	DM_REG_T_METER_88E_11N			0x42
+
+
+
+/*BB REG LIST*/
+/*PAGE 8 */
+#define	DM_REG_BB_CTRL_11N				0x800
+#define	DM_REG_RF_PIN_11N				0x804
+#define	DM_REG_PSD_CTRL_11N				0x808
+#define	DM_REG_TX_ANT_CTRL_11N			0x80C
+#define	DM_REG_BB_PWR_SAV5_11N			0x818
+#define	DM_REG_CCK_RPT_FORMAT_11N		0x824
+#define	DM_REG_RX_DEFUALT_A_11N		0x858
+#define	DM_REG_RX_DEFUALT_B_11N		0x85A
+#define	DM_REG_BB_PWR_SAV3_11N			0x85C
+#define	DM_REG_ANTSEL_CTRL_11N			0x860
+#define	DM_REG_RX_ANT_CTRL_11N			0x864
+#define	DM_REG_PIN_CTRL_11N				0x870
+#define	DM_REG_BB_PWR_SAV1_11N			0x874
+#define	DM_REG_ANTSEL_PATH_11N			0x878
+#define	DM_REG_BB_3WIRE_11N			0x88C
+#define	DM_REG_SC_CNT_11N				0x8C4
+#define	DM_REG_PSD_DATA_11N			0x8B4
+/*PAGE 9*/
+#define	DM_REG_ANT_MAPPING1_11N		0x914
+#define	DM_REG_ANT_MAPPING2_11N		0x918
+/*PAGE A*/
+#define	DM_REG_CCK_ANTDIV_PARA1_11N	0xA00
+#define	DM_REG_CCK_CCA_11N			0xA0A
+#define	DM_REG_CCK_CCA_11AC			0xA0A
+#define	DM_REG_CCK_ANTDIV_PARA2_11N	0xA0C
+#define	DM_REG_CCK_ANTDIV_PARA3_11N	0xA10
+#define	DM_REG_CCK_ANTDIV_PARA4_11N	0xA14
+#define	DM_REG_CCK_FILTER_PARA1_11N	0xA22
+#define	DM_REG_CCK_FILTER_PARA2_11N	0xA23
+#define	DM_REG_CCK_FILTER_PARA3_11N	0xA24
+#define	DM_REG_CCK_FILTER_PARA4_11N	0xA25
+#define	DM_REG_CCK_FILTER_PARA5_11N	0xA26
+#define	DM_REG_CCK_FILTER_PARA6_11N	0xA27
+#define	DM_REG_CCK_FILTER_PARA7_11N	0xA28
+#define	DM_REG_CCK_FILTER_PARA8_11N	0xA29
+#define	DM_REG_CCK_FA_RST_11N			0xA2C
+#define	DM_REG_CCK_FA_MSB_11N			0xA58
+#define	DM_REG_CCK_FA_LSB_11N			0xA5C
+#define	DM_REG_CCK_CCA_CNT_11N			0xA60
+#define	DM_REG_BB_PWR_SAV4_11N			0xA74
+/*PAGE B */
+#define	DM_REG_LNA_SWITCH_11N			0xB2C
+#define	DM_REG_PATH_SWITCH_11N			0xB30
+#define	DM_REG_RSSI_CTRL_11N			0xB38
+#define	DM_REG_CONFIG_ANTA_11N			0xB68
+#define	DM_REG_RSSI_BT_11N				0xB9C
+/*PAGE C */
+#define	DM_REG_OFDM_FA_HOLDC_11N		0xC00
+#define	DM_REG_RX_PATH_11N				0xC04
+#define	DM_REG_TRMUX_11N				0xC08
+#define	DM_REG_OFDM_FA_RSTC_11N		0xC0C
+#define	DM_REG_RXIQI_MATRIX_11N		0xC14
+#define	DM_REG_TXIQK_MATRIX_LSB1_11N	0xC4C
+#define	DM_REG_IGI_A_11N				0xC50
+#define	DM_REG_IGI_A_11AC				0xC50
+#define	DM_REG_ANTDIV_PARA2_11N		0xC54
+#define	DM_REG_IGI_B_11N					0xC58
+#define	DM_REG_IGI_B_11AC					0xE50
+#define	DM_REG_ANTDIV_PARA3_11N		0xC5C
+#define	DM_REG_BB_PWR_SAV2_11N			0xC70
+#define	DM_REG_RX_OFF_11N				0xC7C
+#define	DM_REG_TXIQK_MATRIXA_11N		0xC80
+#define	DM_REG_TXIQK_MATRIXB_11N		0xC88
+#define	DM_REG_TXIQK_MATRIXA_LSB2_11N	0xC94
+#define	DM_REG_TXIQK_MATRIXB_LSB2_11N	0xC9C
+#define	DM_REG_RXIQK_MATRIX_LSB_11N	0xCA0
+#define	DM_REG_ANTDIV_PARA1_11N		0xCA4
+#define	DM_REG_OFDM_FA_TYPE1_11N		0xCF0
+/*PAGE D */
+#define	DM_REG_OFDM_FA_RSTD_11N		0xD00
+#define	DM_REG_OFDM_FA_TYPE2_11N		0xDA0
+#define	DM_REG_OFDM_FA_TYPE3_11N		0xDA4
+#define	DM_REG_OFDM_FA_TYPE4_11N		0xDA8
+/*PAGE E */
+#define	DM_REG_TXAGC_A_6_18_11N		0xE00
+#define	DM_REG_TXAGC_A_24_54_11N		0xE04
+#define	DM_REG_TXAGC_A_1_MCS32_11N	0xE08
+#define	DM_REG_TXAGC_A_MCS0_3_11N		0xE10
+#define	DM_REG_TXAGC_A_MCS4_7_11N		0xE14
+#define	DM_REG_TXAGC_A_MCS8_11_11N	0xE18
+#define	DM_REG_TXAGC_A_MCS12_15_11N	0xE1C
+#define	DM_REG_FPGA0_IQK_11N			0xE28
+#define	DM_REG_TXIQK_TONE_A_11N		0xE30
+#define	DM_REG_RXIQK_TONE_A_11N		0xE34
+#define	DM_REG_TXIQK_PI_A_11N			0xE38
+#define	DM_REG_RXIQK_PI_A_11N			0xE3C
+#define	DM_REG_TXIQK_11N				0xE40
+#define	DM_REG_RXIQK_11N				0xE44
+#define	DM_REG_IQK_AGC_PTS_11N			0xE48
+#define	DM_REG_IQK_AGC_RSP_11N			0xE4C
+#define	DM_REG_BLUETOOTH_11N			0xE6C
+#define	DM_REG_RX_WAIT_CCA_11N			0xE70
+#define	DM_REG_TX_CCK_RFON_11N			0xE74
+#define	DM_REG_TX_CCK_BBON_11N			0xE78
+#define	DM_REG_OFDM_RFON_11N			0xE7C
+#define	DM_REG_OFDM_BBON_11N			0xE80
+#define DM_REG_TX2RX_11N				0xE84
+#define	DM_REG_TX2TX_11N				0xE88
+#define	DM_REG_RX_CCK_11N				0xE8C
+#define	DM_REG_RX_OFDM_11N				0xED0
+#define	DM_REG_RX_WAIT_RIFS_11N		0xED4
+#define	DM_REG_RX2RX_11N				0xED8
+#define	DM_REG_STANDBY_11N				0xEDC
+#define	DM_REG_SLEEP_11N				0xEE0
+#define	DM_REG_PMPD_ANAEN_11N			0xEEC
+
+
+/*MAC REG LIST*/
+#define	DM_REG_BB_RST_11N				0x02
+#define	DM_REG_ANTSEL_PIN_11N			0x4C
+#define	DM_REG_EARLY_MODE_11N			0x4D0
+#define	DM_REG_RSSI_MONITOR_11N		0x4FE
+#define	DM_REG_EDCA_VO_11N				0x500
+#define	DM_REG_EDCA_VI_11N				0x504
+#define	DM_REG_EDCA_BE_11N				0x508
+#define	DM_REG_EDCA_BK_11N				0x50C
+#define	DM_REG_TXPAUSE_11N				0x522
+#define	DM_REG_RESP_TX_11N				0x6D8
+#define	DM_REG_ANT_TRAIN_PARA1_11N	0x7b0
+#define	DM_REG_ANT_TRAIN_PARA2_11N	0x7b4
+
+
+/*DIG Related*/
+#define	DM_BIT_IGI_11N					0x0000007F
+#define	DM_BIT_IGI_11AC					0xFFFFFFFF
+
+
+
+#define HAL_DM_DIG_DISABLE			BIT(0)
+#define HAL_DM_HIPWR_DISABLE		BIT(1)
+
+#define OFDM_TABLE_LENGTH 			43
+#define CCK_TABLE_LENGTH 			33
+
+#define OFDM_TABLE_SIZE 			37
+#define CCK_TABLE_SIZE				33
+
+#define BW_AUTO_SWITCH_HIGH_LOW		25
+#define BW_AUTO_SWITCH_LOW_HIGH		30
+
+#define DM_DIG_THRESH_HIGH			40
+#define DM_DIG_THRESH_LOW			35
+
+#define DM_FALSEALARM_THRESH_LOW	400
+#define DM_FALSEALARM_THRESH_HIGH	1000
+
+#define DM_DIG_MAX					0x3e
+#define DM_DIG_MIN					0x1e
+
+#define DM_DIG_MAX_AP				0x32
+#define DM_DIG_MIN_AP				0x20
+
+#define DM_DIG_FA_UPPER				0x3e
+#define DM_DIG_FA_LOWER				0x1e
+#define DM_DIG_FA_TH0				0x200
+#define DM_DIG_FA_TH1				0x300
+#define DM_DIG_FA_TH2				0x400
+
+#define DM_DIG_BACKOFF_MAX			12
+#define DM_DIG_BACKOFF_MIN			-4
+#define DM_DIG_BACKOFF_DEFAULT		10
+
+#define RXPATHSELECTION_SS_TH_lOW	30
+#define RXPATHSELECTION_DIFF_TH		18
+
+#define DM_RATR_STA_INIT			0
+#define DM_RATR_STA_HIGH			1
+#define DM_RATR_STA_MIDDLE			2
+#define DM_RATR_STA_LOW				3
+
+#define CTS2SELF_THVAL				30
+#define REGC38_TH					20
+
+#define WAIOTTHVal					25
+
+#define TXHIGHPWRLEVEL_NORMAL		0
+#define TXHIGHPWRLEVEL_LEVEL1		1
+#define TXHIGHPWRLEVEL_LEVEL2		2
+#define TXHIGHPWRLEVEL_BT1			3
+#define TXHIGHPWRLEVEL_BT2			4
+
+#define DM_TYPE_BYFW				0
+#define DM_TYPE_BYDRIVER			1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2	74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1	67
+#define TXPWRTRACK_MAX_IDX 6
+
+/* Dynamic ATC switch */
+#define ATC_STATUS_OFF				0x0			/* enable */
+#define	ATC_STATUS_ON				0x1			/* disable */
+#define	CFO_THRESHOLD_XTAL			10			/* kHz */
+#define	CFO_THRESHOLD_ATC			80			/* kHz */
+
+#define AVG_THERMAL_NUM_8812A	4
+#define TXPWR_TRACK_TABLE_SIZE 	30
+#define MAX_PATH_NUM_8812A		2
+#define MAX_PATH_NUM_8821A		1
+
+
+struct ps_t {
+	u8 pre_ccastate;
+	u8 cur_ccasate;
+	u8 pre_rfstate;
+	u8 cur_rfstate;
+	u8 initialize;
+	long rssi_val_min;
+
+};
+
+struct dig_t {
+	u8 dig_enable_flag;
+	u8 dig_ext_port_stage;
+	u32 rssi_lowthresh;
+	u32 rssi_highthresh;
+
+	u32 fa_lowthresh;
+	u32 fa_highthresh;
+
+	u8 cursta_connectctate;
+	u8 presta_connectstate;
+	u8 curmultista_connectstate;
+
+	u8 pre_igvalue;
+	u8 cur_igvalue;
+	u8 bt30_cur_igi;
+	u8 backup_igvalue;
+	u8 stop_dig;
+
+	char backoff_val;
+	char backoff_val_range_max;
+	char backoff_val_range_min;
+	u8 rx_gain_range_max;
+	u8 rx_gain_range_min;
+	u8 rssi_val_min;
+
+	u8 pre_cck_cca_thres;
+	u8 cur_cck_cca_thres;
+	u8 pre_cck_pd_state;
+	u8 cur_cck_pd_state;
+
+	u8 large_fa_hit;
+	u8 forbidden_igi;
+	u32 recover_cnt;
+
+	u8 dig_dynamic_min_0;
+	u8 dig_dynamic_min_1;
+	bool b_media_connect_0;
+	bool b_media_connect_1;
+
+	u32 antdiv_rssi_max;
+	u32 rssi_max;
+};
+
+
+enum FAT_STATE {
+	FAT_NORMAL_STATE	= 0,
+	FAT_TRAINING_STATE = 1,
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+	DIG_TYPE_THRESH_HIGH = 0,
+	DIG_TYPE_THRESH_LOW = 1,
+	DIG_TYPE_BACKOFF = 2,
+	DIG_TYPE_RX_GAIN_MIN = 3,
+	DIG_TYPE_RX_GAIN_MAX = 4,
+	DIG_TYPE_ENABLE = 5,
+	DIG_TYPE_DISABLE = 6,
+	DIG_OP_TYPE_MAX
+};
+
+enum tag_cck_packet_detection_threshold_type_definition {
+	CCK_PD_STAGE_LowRssi = 0,
+	CCK_PD_STAGE_HighRssi = 1,
+	CCK_FA_STAGE_Low = 2,
+	CCK_FA_STAGE_High = 3,
+	CCK_PD_STAGE_MAX = 4,
+};
+
+enum dm_1r_cca_e {
+	CCA_1R = 0,
+	CCA_2R = 1,
+	CCA_MAX = 2,
+};
+
+enum dm_rf_e {
+	RF_SAVE = 0,
+	RF_NORMAL = 1,
+	RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch_e {
+	ANS_ANTENNA_B = 1,
+	ANS_ANTENNA_A = 2,
+	ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+	DIG_EXT_PORT_STAGE_0 = 0,
+	DIG_EXT_PORT_STAGE_1 = 1,
+	DIG_EXT_PORT_STAGE_2 = 2,
+	DIG_EXT_PORT_STAGE_3 = 3,
+	DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+	DIG_STA_DISCONNECT = 0,
+	DIG_STA_CONNECT = 1,
+	DIG_STA_BEFORE_CONNECT = 2,
+	DIG_MULTISTA_DISCONNECT = 3,
+	DIG_MULTISTA_CONNECT = 4,
+	DIG_CONNECT_MAX
+};
+
+enum pwr_track_control_method {
+	BBSWING,
+	TXAGC,
+	MIX_MODE
+};
+
+#define BT_RSSI_STATE_NORMAL_POWER      BIT_OFFSET_LEN_MASK_32(0, 1)
+#define BT_RSSI_STATE_AMDPU_OFF         BIT_OFFSET_LEN_MASK_32(1, 1)
+#define BT_RSSI_STATE_SPECIAL_LOW       BIT_OFFSET_LEN_MASK_32(2, 1)
+#define BT_RSSI_STATE_BG_EDCA_LOW       BIT_OFFSET_LEN_MASK_32(3, 1)
+#define BT_RSSI_STATE_TXPOWER_LOW       BIT_OFFSET_LEN_MASK_32(4, 1)
+#define GET_UNDECORATED_AVERAGE_RSSI(_priv)     \
+        (((struct rtl_priv *)(_priv))->mac80211.opmode == NL80211_IFTYPE_ADHOC)?  \
+        (((struct rtl_priv *)(_priv))->dm.entry_min_undecoratedsmoothed_pwdb):  \
+        (((struct rtl_priv *)(_priv))->dm.undecorated_smoothed_pwdb)
+
+extern struct dig_t dm_digtable;
+void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
+												   u8 *pdesc, u32 mac_id);
+void rtl8821ae_dm_ant_sel_statistics(struct ieee80211_hw *hw,
+											  u8 antsel_tr_mux, u32 mac_id,
+											  u32 rx_pwdb_all);
+void rtl8821ae_dm_fast_antenna_trainning_callback(unsigned long data);
+void rtl8821ae_dm_init(struct ieee80211_hw *hw);
+void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw);
+void rtl8821ae_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi);
+void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw);
+void rtl8821ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl8821ae_dm_txpower_track_adjust(struct ieee80211_hw *hw,
+												   u8 type,u8 *pdirection,
+												   u32 *poutwrite_val);
+void rtl8821ae_dm_clear_txpower_tracking_state(struct ieee80211_hw *hw);
+void rtl8821ae_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 current_cca);
+void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(struct ieee80211_hw *hw);
+void rtl8812ae_dm_path_diversity(struct ieee80211_hw *hw);
+void rtl8812ae_dm_path_diversity_init(struct ieee80211_hw *hw);
+void rtl8812ae_dm_path_statistics(struct ieee80211_hw *hw,
+	u32 rssi_a, u32 rssi_b);
+void rtl812ae_dm_set_txpath_by_txinfo(struct ieee80211_hw *hw,
+	u8 *pdesc);
+void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+												enum pwr_track_control_method method,
+												u8 rf_path,
+												u8 channel_mapped_index);
+void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+	enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index);
+
+void rtl8812ae_dm_update_init_rate(struct ieee80211_hw *hw, u8 rate);
+u8 rtl8812ae_hw_rate_to_mrate(struct ieee80211_hw *hw, u8 rate);
+void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw);
+void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/fw.c b/drivers/staging/rtl8821ae/rtl8821ae/fw.c
new file mode 100644
index 0000000..4083cab
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/fw.c
@@ -0,0 +1,1349 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "fw.h"
+#include "dm.h"
+
+static void _rtl8821ae_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmp;
+
+	if (enable) {
+		rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x05);
+
+		tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+		rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+
+		tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+		//printk("0x80=%02x.\n",tmp);
+	} else {
+		tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+		rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+		tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+		//printk("0x80=%02x.\n",tmp);
+	}
+
+}
+
+static void _rtl8821ae_fw_block_write(struct ieee80211_hw *hw,
+				   const u8 *buffer, u32 size)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 blockSize = sizeof(u32);
+	u8 *bufferPtr = (u8 *) buffer;
+	u32 *pu4BytePtr = (u32 *) buffer;
+	u32 i, offset, blockCount, remainSize;
+
+	blockCount = size / blockSize;
+	remainSize = size % blockSize;
+
+	for (i = 0; i < blockCount; i++) {
+		offset = i * blockSize;
+		rtl_write_dword(rtlpriv, (FW_8821AE_START_ADDRESS + offset),
+				*(pu4BytePtr + i));
+	}
+
+	if (remainSize) {
+		offset = blockCount * blockSize;
+		bufferPtr += offset;
+		for (i = 0; i < remainSize; i++) {
+			rtl_write_byte(rtlpriv, (FW_8821AE_START_ADDRESS +
+						 offset + i), *(bufferPtr + i));
+		}
+	}
+}
+
+static void _rtl8821ae_fw_page_write(struct ieee80211_hw *hw,
+				  u32 page, const u8 *buffer, u32 size)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 value8;
+	u8 u8page = (u8) (page & 0x07);
+
+	value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+
+	rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+	_rtl8821ae_fw_block_write(hw, buffer, size);
+}
+
+static void _rtl8821ae_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
+{
+	u32 fwlen = *pfwlen;
+	u8 remain = (u8) (fwlen % 4);
+
+	remain = (remain == 0) ? 0 : (4 - remain);
+
+	while (remain > 0) {
+		pfwbuf[fwlen] = 0;
+		fwlen++;
+		remain--;
+	}
+
+	*pfwlen = fwlen;
+}
+
+static void _rtl8821ae_write_fw(struct ieee80211_hw *hw,
+			     					  enum version_8821ae version,
+			     					  u8 *buffer, u32 size)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 *bufferPtr = (u8 *) buffer;
+	u32 pageNums, remainSize;
+	u32 page, offset;
+
+	RT_TRACE(COMP_FW, DBG_LOUD, ("FW size is %d bytes,\n", size));
+
+	_rtl8821ae_fill_dummy(bufferPtr, &size);
+
+	pageNums = size / FW_8821AE_PAGE_SIZE;
+	remainSize = size % FW_8821AE_PAGE_SIZE;
+
+	if (pageNums > 8) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("Page numbers should not greater then 8\n"));
+	}
+
+	for (page = 0; page < pageNums; page++) {
+		offset = page * FW_8821AE_PAGE_SIZE;
+		_rtl8821ae_fw_page_write(hw, page, (bufferPtr + offset),
+				      FW_8821AE_PAGE_SIZE);
+	}
+
+	if (remainSize) {
+		offset = pageNums * FW_8821AE_PAGE_SIZE;
+		page = pageNums;
+		_rtl8821ae_fw_page_write(hw, page, (bufferPtr + offset),
+				      remainSize);
+	}
+
+}
+
+static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int err = -EIO;
+	u32 counter = 0;
+	u32 value32;
+
+	do {
+		value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+	} while ((counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT) &&
+		 (!(value32 & FWDL_CHKSUM_RPT)));
+
+	if (counter >= FW_8821AE_POLLING_TIMEOUT_COUNT) {
+		RT_TRACE(COMP_ERR, DBG_LOUD,
+			 ("chksum report faill ! REG_MCUFWDL:0x%08x .\n",
+			  value32));
+		goto exit;
+	}
+
+	RT_TRACE(COMP_FW, DBG_EMERG,
+		 ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32));
+
+	value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+	value32 |= MCUFWDL_RDY;
+	value32 &= ~WINTINI_RDY;
+	rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+
+	rtl8821ae_firmware_selfreset(hw);
+
+	counter = 0;
+	do {
+		value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+		if (value32 & WINTINI_RDY) {
+			RT_TRACE(COMP_FW, DBG_LOUD,
+				 ("Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
+				  value32));
+			err = 0;
+			goto exit;
+		}
+
+		udelay(FW_8821AE_POLLING_DELAY);
+
+	} while (counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT);
+
+	RT_TRACE(COMP_ERR, DBG_EMERG,
+		 ("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32));
+
+exit:
+	return err;
+}
+
+int rtl8821ae_download_fw(struct ieee80211_hw *hw,
+	bool buse_wake_on_wlan_fw
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl8821a_firmware_header *pfwheader;
+	u8 *pfwdata;
+	u32 fwsize;
+	int err;
+	enum version_8821ae version = rtlhal->version;
+
+	if(!rtlhal->pfirmware)
+		return 1;
+
+	pfwheader = (struct rtl8821a_firmware_header *)rtlhal->pfirmware;
+	pfwdata = (u8 *) rtlhal->pfirmware;
+	fwsize = rtlhal->fwsize;
+	RT_TRACE(COMP_FW, DBG_DMESG,
+		 ("normal Firmware SIZE %d \n",fwsize));
+
+	if (IS_FW_HEADER_EXIST_8812(pfwheader) || IS_FW_HEADER_EXIST_8821(pfwheader)) {
+		RT_TRACE(COMP_FW, DBG_DMESG,
+			 ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
+			  pfwheader->version, pfwheader->signature,
+			  (int)sizeof(struct rtl8821a_firmware_header)));
+
+		pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header);
+		fwsize = fwsize - sizeof(struct rtl8821a_firmware_header);
+	}
+
+	if(rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)){
+		rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+		rtl8821ae_firmware_selfreset(hw);
+	}
+	_rtl8821ae_enable_fw_download(hw, true);
+	_rtl8821ae_write_fw(hw, version, pfwdata, fwsize);
+	_rtl8821ae_enable_fw_download(hw, false);
+
+	err = _rtl8821ae_fw_free_to_go(hw);
+	if (err) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("Firmware is not ready to run!\n"));
+	} else {
+		RT_TRACE(COMP_FW, DBG_LOUD,
+			 ("Firmware is ready to run!\n"));
+	}
+
+	return 0;
+}
+
+static bool _rtl8821ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 val_hmetfr;
+	bool result = false;
+
+	val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+	if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
+		result = true;
+	return result;
+}
+
+static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
+			      u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 boxnum =0;
+	u16 box_reg = 0, box_extreg = 0;
+	u8 u1b_tmp = 0;
+	bool isfw_read = false;
+	u8 buf_index = 0;
+	bool bwrite_sucess = false;
+	u8 wait_h2c_limmit = 100;
+	/*u8 wait_writeh2c_limmit = 100;*/
+	u8 boxcontent[4], boxextcontent[4];
+	u32 h2c_waitcounter = 0;
+	unsigned long flag =0;
+	u8 idx =0;
+
+	RT_TRACE(COMP_CMD, DBG_LOUD, ("come in\n"));
+
+	while (true) {
+		spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+		if (rtlhal->b_h2c_setinprogress) {
+			RT_TRACE(COMP_CMD, DBG_LOUD,
+				 ("H2C set in progress! Wait to set.."
+				  "element_id(%d).\n", element_id));
+
+			while (rtlhal->b_h2c_setinprogress) {
+				spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+						       flag);
+				h2c_waitcounter++;
+				RT_TRACE(COMP_CMD, DBG_LOUD,
+					 ("Wait 100 us (%d times)...\n",
+					  h2c_waitcounter));
+				udelay(100);
+
+				if (h2c_waitcounter > 1000)
+					return;
+				spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+						  flag);
+			}
+			spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+		} else {
+			rtlhal->b_h2c_setinprogress = true;
+			spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+			break;
+		}
+	}
+
+	while (!bwrite_sucess) {
+	/*cosa remove this because never reach this.*/
+#if 0
+		wait_writeh2c_limmit--;
+		if (wait_writeh2c_limmit == 0) {
+			RT_TRACE(COMP_ERR, DBG_EMERG,
+				 ("Write H2C fail because no trigger "
+				  "for FW INT!\n"));
+			break;
+		}
+#endif
+
+		boxnum = rtlhal->last_hmeboxnum;
+		switch (boxnum) {
+		case 0:
+			box_reg = REG_HMEBOX_0;
+			box_extreg = REG_HMEBOX_EXT_0;
+			break;
+		case 1:
+			box_reg = REG_HMEBOX_1;
+			box_extreg = REG_HMEBOX_EXT_1;
+			break;
+		case 2:
+			box_reg = REG_HMEBOX_2;
+			box_extreg = REG_HMEBOX_EXT_2;
+			break;
+		case 3:
+			box_reg = REG_HMEBOX_3;
+			box_extreg = REG_HMEBOX_EXT_3;
+			break;
+		default:
+			RT_TRACE(COMP_ERR, DBG_EMERG,
+				 ("switch case not process \n"));
+			break;
+		}
+
+		isfw_read = false;
+		u1b_tmp = rtl_read_byte(rtlpriv, REG_CR);
+
+		if (u1b_tmp != 0xEA)
+			isfw_read = true;
+		else {
+			if( rtl_read_byte(rtlpriv, REG_TXDMA_STATUS) == 0xEA ||
+				rtl_read_byte(rtlpriv, REG_TXPKT_EMPTY) == 0xEA)
+				rtl_write_byte(rtlpriv, REG_SYS_CFG1 + 3, 0xFF);
+		}
+
+		if (isfw_read == true) {
+			wait_h2c_limmit = 100;
+			isfw_read = _rtl8821ae_check_fw_read_last_h2c(hw, boxnum);
+			while (!isfw_read) {
+				/*wait until Fw read*/
+				wait_h2c_limmit--;
+				if (wait_h2c_limmit == 0) {
+					RT_TRACE(COMP_CMD, DBG_LOUD,
+						 ("Wating too long for FW read "
+						  "clear HMEBox(%d)!\n", boxnum));
+					break;
+				}
+
+				udelay(10);
+
+				isfw_read = _rtl8821ae_check_fw_read_last_h2c(hw, boxnum);
+				u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
+				RT_TRACE(COMP_CMD, DBG_LOUD,
+					 ("Wating for FW read clear HMEBox(%d)!!! "
+					  "0x130 = %2x\n", boxnum, u1b_tmp));
+			}
+		}
+
+		if (!isfw_read) {
+			RT_TRACE(COMP_CMD, DBG_LOUD,
+				 ("Write H2C register BOX[%d] fail!!!!! "
+				  "Fw do not read. \n", boxnum));
+			break;
+		}
+
+		memset(boxcontent, 0, sizeof(boxcontent));
+		memset(boxextcontent, 0, sizeof(boxextcontent));
+		boxcontent[0] = element_id;
+		RT_TRACE(COMP_CMD, DBG_LOUD,
+			 ("Write element_id box_reg(%4x) = %2x \n",
+			  box_reg, element_id));
+
+		switch (cmd_len) {
+		case 1:
+		case 2:
+		case 3:
+			/*boxcontent[0] &= ~(BIT(7));*/
+			memcpy((u8 *) (boxcontent) + 1,
+			       p_cmdbuffer + buf_index, cmd_len);
+
+			for (idx = 0; idx < 4; idx++) {
+				rtl_write_byte(rtlpriv, box_reg + idx,
+					       boxcontent[idx]);
+			}
+			break;
+		case 4:
+		case 5:
+		case 6:
+		case 7:
+			/*boxcontent[0] |= (BIT(7));*/
+			memcpy((u8 *) (boxextcontent),
+			       p_cmdbuffer + buf_index+3, cmd_len-3);
+			memcpy((u8 *) (boxcontent) + 1,
+			       p_cmdbuffer + buf_index, 3);
+
+			for (idx = 0; idx < 4; idx++) {
+				rtl_write_byte(rtlpriv, box_extreg + idx,
+					       boxextcontent[idx]);
+			}
+
+			for (idx = 0; idx < 4; idx++) {
+				rtl_write_byte(rtlpriv, box_reg + idx,
+					       boxcontent[idx]);
+			}
+			break;
+		default:
+			RT_TRACE(COMP_ERR, DBG_EMERG,
+				 ("switch case not process \n"));
+			break;
+		}
+
+		bwrite_sucess = true;
+
+		rtlhal->last_hmeboxnum = boxnum + 1;
+		if (rtlhal->last_hmeboxnum == 4)
+			rtlhal->last_hmeboxnum = 0;
+
+		RT_TRACE(COMP_CMD, DBG_LOUD,
+			 ("pHalData->last_hmeboxnum  = %d\n",
+			  rtlhal->last_hmeboxnum));
+	}
+
+	spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+	rtlhal->b_h2c_setinprogress = false;
+	spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+
+	RT_TRACE(COMP_CMD, DBG_LOUD, ("go out\n"));
+}
+
+void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw,
+			 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u32 tmp_cmdbuf[2];
+
+	if (rtlhal->bfw_ready == false) {
+		RT_ASSERT(false, ("return H2C cmd because of Fw "
+				  "download fail!!!\n"));
+		return;
+	}
+
+	memset(tmp_cmdbuf, 0, 8);
+	memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
+	_rtl8821ae_fill_h2c_command(hw, element_id, cmd_len, (u8 *) & tmp_cmdbuf);
+
+	return;
+}
+
+void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw)
+{
+	u8 u1b_tmp;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+	{
+		u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+		rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3))));
+	}else {
+		u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+		rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(0))));
+	}
+
+	u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2))));
+	udelay(50);
+
+	if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+	{
+		u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+		rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(3)));
+	}else {
+		u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+		rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(0)));
+	}
+
+	u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp | BIT(2)));
+
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("  _8051Reset8812ae(): 8051 reset success .\n"));
+
+}
+
+void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 u1_h2c_set_pwrmode[H2C_8821AE_PWEMODE_LENGTH] = { 0 };
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	u8 rlbm,power_state = 0;
+	RT_TRACE(COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode));
+
+	SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
+	rlbm = 0;/*YJ,temp,120316. FW now not support RLBM=2.*/
+	SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
+	SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, (rtlpriv->mac80211.p2p) ? ppsc->smart_ps : 1);
+	SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode, ppsc->reg_max_lps_awakeintvl);
+	SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
+	if(mode == FW_PS_ACTIVE_MODE)
+	{
+		power_state |= FW_PWR_STATE_ACTIVE;
+	}
+	else
+	{
+		power_state |= FW_PWR_STATE_RF_OFF;
+	}
+	SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+
+	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+		      "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode \n",
+		      u1_h2c_set_pwrmode, H2C_8821AE_PWEMODE_LENGTH);
+	rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_SETPWRMODE, H2C_8821AE_PWEMODE_LENGTH, u1_h2c_set_pwrmode);
+
+}
+
+void rtl8821ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+	u8 u1_joinbssrpt_parm[1] = { 0 };
+
+	SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
+
+	rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_JOINBSSRPT, 1, u1_joinbssrpt_parm);
+}
+
+void rtl8821ae_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw,  u8 ap_offload_enable)
+{
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	u8 u1_apoffload_parm[H2C_8821AE_AP_OFFLOAD_LENGTH] = { 0 };
+
+	SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable);
+	SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->bhiddenssid);
+	SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0);
+
+	rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AP_OFFLOAD, H2C_8821AE_AP_OFFLOAD_LENGTH, u1_apoffload_parm);
+
+}
+
+static bool _rtl8821ae_cmd_send_packet(struct ieee80211_hw *hw,
+				struct sk_buff *skb)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl8192_tx_ring *ring;
+	struct rtl_tx_desc *pdesc;
+	u8 own;
+	unsigned long flags;
+	struct sk_buff *pskb = NULL;
+
+	ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+	pskb = __skb_dequeue(&ring->queue);
+	if (pskb)
+		kfree_skb(pskb);
+
+	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+	pdesc = &ring->desc[0];
+	own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
+
+	rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
+
+	__skb_queue_tail(&ring->queue, skb);
+
+	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+	rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+	return true;
+}
+
+#define BEACON_PG		0 /* ->1 */
+#define PSPOLL_PG		2
+#define NULL_PG			3
+#define PROBERSP_PG		4 /* ->5 */
+
+#define BEACON_PG_8812		0
+#define PSPOLL_PG_8812		1
+#define NULL_PG_8812			2
+#define PROBERSP_PG_8812		3
+
+#define BEACON_PG_8821		0
+#define PSPOLL_PG_8821		1
+#define NULL_PG_8821			2
+#define PROBERSP_PG_8821		3
+
+#define TOTAL_RESERVED_PKT_LEN_8812	2048
+#define TOTAL_RESERVED_PKT_LEN_8821	1024
+
+
+static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
+	/* page 0 */
+	0x80, 0x00, 0x00, 0x00,  0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0x00, 0xe0,  0x4c, 0x02, 0xe2, 0x64,
+	0x40, 0x16, 0x9f, 0x23,  0xd4, 0x46, 0x20, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x64, 0x00, 0x20, 0x04,  0x00, 0x06, 0x64, 0x6c,
+	0x69, 0x6e, 0x6b, 0x31,  0x01, 0x08, 0x82, 0x84,
+	0x8b, 0x96, 0x0c, 0x18,  0x30, 0x48, 0x03, 0x01,
+	0x0b, 0x06, 0x02, 0x00,  0x00, 0x2a, 0x01, 0x8b,
+	0x32, 0x04, 0x12, 0x24,  0x60, 0x6c, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x10, 0x00, 0x28, 0x8c,  0x00, 0x12, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x81, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	/* page 1 */
+	0xa4, 0x10, 0x01, 0xc0,  0x40, 0x16, 0x9f, 0x23,
+	0xd4, 0x46, 0x00, 0xe0,  0x4c, 0x02, 0xe2, 0x64,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x18, 0x00, 0x28, 0x8c,  0x00, 0x12, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x01, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x80, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	/* page 2 */
+	0x48, 0x01, 0x00, 0x00,  0x40, 0x16, 0x9f, 0x23,
+	0xd4, 0x46, 0x00, 0xe0,  0x4c, 0x02, 0xe2, 0x64,
+	0x40, 0x16, 0x9f, 0x23,  0xd4, 0x46, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x1a, 0x00, 0x28, 0x8c,  0x00, 0x12, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x01, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x80, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	/* page 3 */
+	0xc8, 0x01, 0x00, 0x00,  0x40, 0x16, 0x9f, 0x23,
+	0xd4, 0x46, 0x00, 0xe0,  0x4c, 0x02, 0xe2, 0x64,
+	0x40, 0x16, 0x9f, 0x23,  0xd4, 0x46, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+};
+
+
+static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
+ 	0x80, 0x00, 0x00, 0x00,  0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0x00, 0xE0,  0x4C, 0x02, 0x53, 0xE5,
+	0xE0, 0x46, 0x9A, 0x57,  0x71, 0x30, 0x20, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x64, 0x00, 0x30, 0x04,  0x00, 0x0C, 0x4E, 0x45,
+	0x54, 0x47, 0x45, 0x41,  0x52, 0x5F, 0x31, 0x35,
+	0x30, 0x4E, 0x01, 0x08,  0x82, 0x84, 0x8B, 0x96,
+	0x0C, 0x12, 0x18, 0x24,  0x03, 0x01, 0x03, 0x06,
+	0x02, 0x00, 0x00, 0x2A,  0x01, 0x8A, 0x32, 0x04,
+	0x30, 0x48, 0x60, 0x6C,  0xDD, 0x18, 0x00, 0x50,
+	0xF2, 0x01, 0x01, 0x00,  0x00, 0x50, 0xF2, 0x02,
+	0x01, 0x00, 0x00, 0x50,  0xF2, 0x02, 0x01, 0x00,
+	0x00, 0x50, 0xF2, 0x02,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x10, 0x00, 0x28, 0x8C,  0x00, 0x12, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x81, 0x00, 0x00,
+	0x04, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+
+	0xA4, 0x10, 0x02, 0xC0,  0xE0, 0x46, 0x9A, 0x57,
+	0x71, 0x30, 0x00, 0xE0,  0x4C, 0x02, 0x53, 0xE5,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x18, 0x00, 0x28, 0x8C,  0x00, 0x12, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x01, 0x00, 0x00,
+	0x04, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x80, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+
+	0x48, 0x01, 0x00, 0x00,  0xE0, 0x46, 0x9A, 0x57,
+	0x71, 0x30, 0x00, 0xE0,  0x4C, 0x02, 0x53, 0xE5,
+	0xE0, 0x46, 0x9A, 0x57,  0x71, 0x30, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x1A, 0x00, 0x28, 0x8C,  0x00, 0x12, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x01, 0x00, 0x00,
+	0x04, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x80, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+
+	0xC8, 0x01, 0x00, 0x00,  0xE0, 0x46, 0x9A, 0x57,
+	0x71, 0x30, 0x00, 0xE0,  0x4C, 0x02, 0x53, 0xE5,
+	0xE0, 0x46, 0x9A, 0x57,  0x71, 0x30, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+};
+
+void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct sk_buff *skb = NULL;
+
+	u32 totalpacketlen;
+	bool rtstatus;
+	u8 u1RsvdPageLoc[5] = { 0 };
+	bool b_dlok = false;
+
+	u8* beacon;
+	u8* p_pspoll;
+	u8* nullfunc;
+	u8* p_probersp;
+	/*---------------------------------------------------------
+				(1) beacon
+	---------------------------------------------------------*/
+	beacon = &reserved_page_packet_8812[BEACON_PG_8812 * 512];
+	SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+	SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+	/*-------------------------------------------------------
+				(2) ps-poll
+	--------------------------------------------------------*/
+	p_pspoll = &reserved_page_packet_8812[PSPOLL_PG_8812 * 512];
+	SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+	SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+	SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+	SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG_8812);
+
+	/*--------------------------------------------------------
+				(3) null data
+	---------------------------------------------------------*/
+	nullfunc = &reserved_page_packet_8812[NULL_PG_8812* 512];
+	SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+	SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+	SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+	SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG_8812);
+
+	/*---------------------------------------------------------
+				(4) probe response
+	----------------------------------------------------------*/
+	p_probersp = &reserved_page_packet_8812[PROBERSP_PG_8812 * 512];
+	SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+	SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+	SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+	SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG_8812);
+
+	totalpacketlen = TOTAL_RESERVED_PKT_LEN_8812;
+
+	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+		      "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+		      &reserved_page_packet_8812[0], totalpacketlen);
+	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+		      "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+		      u1RsvdPageLoc, 3);
+
+
+	skb = dev_alloc_skb(totalpacketlen);
+	memcpy((u8 *) skb_put(skb, totalpacketlen),
+	       &reserved_page_packet_8812, totalpacketlen);
+
+	rtstatus = _rtl8821ae_cmd_send_packet(hw, skb);
+
+	if (rtstatus)
+		b_dlok = true;
+
+	if (b_dlok) {
+		RT_TRACE(COMP_POWER, DBG_LOUD,
+			 ("Set RSVD page location to Fw.\n"));
+		RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+				"H2C_RSVDPAGE:\n",
+				u1RsvdPageLoc, 3);
+		rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE,
+				    sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+	} else
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
+}
+
+void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct sk_buff *skb = NULL;
+
+	u32 totalpacketlen;
+	bool rtstatus;
+	u8 u1RsvdPageLoc[5] = { 0 };
+	bool b_dlok = false;
+
+	u8* beacon;
+	u8* p_pspoll;
+	u8* nullfunc;
+	u8* p_probersp;
+	/*---------------------------------------------------------
+				(1) beacon
+	---------------------------------------------------------*/
+	beacon = &reserved_page_packet_8821[BEACON_PG_8821 * 256];
+	SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+	SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+	/*-------------------------------------------------------
+				(2) ps-poll
+	--------------------------------------------------------*/
+	p_pspoll = &reserved_page_packet_8821[PSPOLL_PG_8821 * 256];
+	SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+	SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+	SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+	SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG_8821);
+
+	/*--------------------------------------------------------
+				(3) null data
+	---------------------------------------------------------*/
+	nullfunc = &reserved_page_packet_8821[NULL_PG_8821 * 256];
+	SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+	SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+	SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+	SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG_8821);
+
+	/*---------------------------------------------------------
+				(4) probe response
+	----------------------------------------------------------*/
+	p_probersp = &reserved_page_packet_8821[PROBERSP_PG_8821 * 256];
+	SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+	SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+	SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+	SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG_8821);
+
+	totalpacketlen = TOTAL_RESERVED_PKT_LEN_8821;
+
+	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+		      "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+		      &reserved_page_packet_8821[0], totalpacketlen);
+	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+		      "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+		      u1RsvdPageLoc, 3);
+
+
+	skb = dev_alloc_skb(totalpacketlen);
+	memcpy((u8 *) skb_put(skb, totalpacketlen),
+	       &reserved_page_packet_8821, totalpacketlen);
+
+	rtstatus = _rtl8821ae_cmd_send_packet(hw, skb);
+
+	if (rtstatus)
+		b_dlok = true;
+
+	if (b_dlok) {
+		RT_TRACE(COMP_POWER, DBG_LOUD,
+			 ("Set RSVD page location to Fw.\n"));
+		RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+				"H2C_RSVDPAGE:\n",
+				u1RsvdPageLoc, 3);
+		rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE,
+				    sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+	} else
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
+}
+
+/*Shoud check FW support p2p or not.*/
+void rtl8821ae_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow)
+{
+	u8 u1_ctwindow_period[1] ={ ctwindow};
+
+	rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_P2P_PS_CTW_CMD, 1, u1_ctwindow_period);
+
+}
+
+void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info);
+	struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload;
+	u8	i;
+	u16	ctwindow;
+	u32	start_time, tsf_low;
+
+	switch(p2p_ps_state)
+	{
+		case P2P_PS_DISABLE:
+			RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_DISABLE \n"));
+			memset(p2p_ps_offload, 0, 1);
+			break;
+		case P2P_PS_ENABLE:
+			RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_ENABLE \n"));
+			/* update CTWindow value. */
+			if( p2pinfo->ctwindow > 0 )
+			{
+				p2p_ps_offload->CTWindow_En = 1;
+				ctwindow = p2pinfo->ctwindow;
+				rtl8821ae_set_p2p_ctw_period_cmd(hw, ctwindow);
+			}
+
+			/* hw only support 2 set of NoA */
+			for( i=0 ; i<p2pinfo->noa_num ; i++)
+			{
+				/* To control the register setting for which NOA*/
+				rtl_write_byte(rtlpriv, 0x5cf, (i << 4));
+				if(i == 0)
+					p2p_ps_offload->NoA0_En = 1;
+				else
+					p2p_ps_offload->NoA1_En = 1;
+
+				/* config P2P NoA Descriptor Register */
+				rtl_write_dword(rtlpriv, 0x5E0, p2pinfo->noa_duration[i]);
+				rtl_write_dword(rtlpriv, 0x5E4, p2pinfo->noa_interval[i]);
+
+				/*Get Current TSF value */
+				tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+				start_time = p2pinfo->noa_start_time[i];
+				if(p2pinfo->noa_count_type[i] != 1)
+				{
+					while( start_time <= (tsf_low+(50*1024) ) ) {
+						start_time += p2pinfo->noa_interval[i];
+						if(p2pinfo->noa_count_type[i] != 255)
+							p2pinfo->noa_count_type[i]--;
+					}
+				}
+				rtl_write_dword(rtlpriv, 0x5E8, start_time);
+				rtl_write_dword(rtlpriv, 0x5EC, p2pinfo->noa_count_type[i] );
+
+			}
+
+			if( (p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0) )
+			{
+				/* rst p2p circuit */
+				rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
+
+				p2p_ps_offload->Offload_En = 1;
+
+				if(P2P_ROLE_GO == rtlpriv->mac80211.p2p)
+				{
+					p2p_ps_offload->role= 1;
+					p2p_ps_offload->AllStaSleep = 0;
+				}
+				else
+				{
+					p2p_ps_offload->role= 0;
+				}
+
+				p2p_ps_offload->discovery = 0;
+			}
+			break;
+		case P2P_PS_SCAN:
+			RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_SCAN \n"));
+			p2p_ps_offload->discovery = 1;
+			break;
+		case P2P_PS_SCAN_DONE:
+			RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_SCAN_DONE \n"));
+			p2p_ps_offload->discovery = 0;
+			p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
+			break;
+		default:
+			break;
+	}
+
+	rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload);
+
+}
+
+void rtl8812ae_c2h_ra_report_handler(
+	struct ieee80211_hw *hw,
+	u8 *cmd_buf,
+	u8 cmd_len
+)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 rate = cmd_buf[0] & 0x3F;
+
+	rtlhal->current_ra_rate= rtl8812ae_hw_rate_to_mrate(hw, rate);
+
+	rtl8812ae_dm_update_init_rate(hw, rate);
+}
+
+
+void _rtl8812ae_c2h_content_parsing(
+	struct ieee80211_hw *hw,
+	u8 c2h_cmd_id,
+	u8 c2h_cmd_len,
+	u8 *tmp_buf
+)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	switch (c2h_cmd_id) {
+	case C2H_8812_DBG:
+		RT_TRACE(COMP_FW, DBG_LOUD,("[C2H], C2H_8812_DBG!!\n"));
+		break;
+
+	case C2H_8812_RA_RPT:
+		rtl8812ae_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len);
+		break;
+
+	default:
+		break;
+	}
+
+}
+
+void rtl8812ae_c2h_packet_handler(
+	struct ieee80211_hw *hw,
+	u8 *buffer,
+	u8 length
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 c2h_cmd_id=0, c2h_cmd_seq=0, c2h_cmd_len=0;
+	u8 *tmp_buf=NULL;
+
+	c2h_cmd_id = buffer[0];
+	c2h_cmd_seq = buffer[1];
+	c2h_cmd_len = length -2;
+	tmp_buf = buffer + 2;
+
+	RT_TRACE(COMP_FW, DBG_LOUD,
+		("[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n",
+		c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len));
+
+	RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD,
+		"[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len);
+	_rtl8812ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+}
+
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/fw.h b/drivers/staging/rtl8821ae/rtl8821ae/fw.h
new file mode 100644
index 0000000..30eec88
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/fw.h
@@ -0,0 +1,321 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE__FW__H__
+#define __RTL8821AE__FW__H__
+
+#define FW_8821AE_SIZE					0x8000
+#define FW_8821AE_START_ADDRESS			0x1000
+#define FW_8821AE_END_ADDRESS			0x5FFF
+#define FW_8821AE_PAGE_SIZE				4096
+#define FW_8821AE_POLLING_DELAY			5
+#define FW_8821AE_POLLING_TIMEOUT_COUNT	6000
+
+#define IS_FW_HEADER_EXIST_8812(_pfwhdr)	\
+	((_pfwhdr->signature&0xFFF0) == 0x9500 )
+
+#define IS_FW_HEADER_EXIST_8821(_pfwhdr)	\
+	((_pfwhdr->signature&0xFFF0) == 0x2100 )
+
+#define USE_OLD_WOWLAN_DEBUG_FW 0
+
+#define H2C_8821AE_RSVDPAGE_LOC_LEN		5
+#define H2C_8821AE_PWEMODE_LENGTH			5
+#define H2C_8821AE_JOINBSSRPT_LENGTH		1
+#define H2C_8821AE_AP_OFFLOAD_LENGTH		3
+#define H2C_8821AE_WOWLAN_LENGTH			3
+#define H2C_8821AE_KEEP_ALIVE_CTRL_LENGTH	3
+#if(USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define H2C_8821AE_REMOTE_WAKE_CTRL_LEN	1
+#else
+#define H2C_8821AE_REMOTE_WAKE_CTRL_LEN	3
+#endif
+#define H2C_8821AE_AOAC_GLOBAL_INFO_LEN	2
+#define H2C_8821AE_AOAC_RSVDPAGE_LOC_LEN	7
+
+
+/* Fw PS state for RPWM.
+*BIT[2:0] = HW state
+*BIT[3] = Protocol PS state,   1: register active state , 0: register sleep state
+*BIT[4] = sub-state
+*/
+#define	FW_PS_GO_ON			BIT(0)
+#define	FW_PS_TX_NULL			BIT(1)
+#define	FW_PS_RF_ON			BIT(2)
+#define	FW_PS_REGISTER_ACTIVE	BIT(3)
+
+#define	FW_PS_DPS    		BIT(0)
+#define	FW_PS_LCLK   		(FW_PS_DPS)
+#define	FW_PS_RF_OFF   		BIT(1)
+#define	FW_PS_ALL_ON   		BIT(2)
+#define	FW_PS_ST_ACTIVE  	BIT(3)
+#define	FW_PS_ISR_ENABLE    	BIT(4)
+#define	FW_PS_IMR_ENABLE	BIT(5)
+
+
+#define	FW_PS_ACK    		BIT(6)
+#define	FW_PS_TOGGLE   		BIT(7)
+
+ /* 8821AE RPWM value*/
+ /* BIT[0] = 1: 32k, 0: 40M*/
+#define	FW_PS_CLOCK_OFF		BIT(0)		/* 32k*/
+#define	FW_PS_CLOCK_ON		0		/*40M*/
+
+#define	FW_PS_STATE_MASK  		(0x0F)
+#define	FW_PS_STATE_HW_MASK 	(0x07)
+#define	FW_PS_STATE_INT_MASK 	(0x3F)	/*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/
+
+#define	FW_PS_STATE(x)   		(FW_PS_STATE_MASK & (x))
+#define	FW_PS_STATE_HW(x) 		(FW_PS_STATE_HW_MASK & (x))
+#define	FW_PS_STATE_INT(x)   	(FW_PS_STATE_INT_MASK & (x))
+#define	FW_PS_ISR_VAL(x)		((x) & 0x70)
+#define	FW_PS_IMR_MASK(x)   	((x) & 0xDF)
+#define	FW_PS_KEEP_IMR(x)		((x) & 0x20)
+
+
+#define	FW_PS_STATE_S0		(FW_PS_DPS)
+#define	FW_PS_STATE_S1		(FW_PS_LCLK)
+#define	FW_PS_STATE_S2		(FW_PS_RF_OFF)
+#define	FW_PS_STATE_S3		(FW_PS_ALL_ON)
+#define	FW_PS_STATE_S4		((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON))
+
+#define	FW_PS_STATE_ALL_ON_8821AE	(FW_PS_CLOCK_ON) /* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/
+#define	FW_PS_STATE_RF_ON_8821AE	(FW_PS_CLOCK_ON) /* (FW_PS_RF_ON)*/
+#define	FW_PS_STATE_RF_OFF_8821AE	(FW_PS_CLOCK_ON) /* 0x0*/
+#define	FW_PS_STATE_RF_OFF_LOW_PWR_8821AE	(FW_PS_CLOCK_OFF) /* (FW_PS_STATE_RF_OFF)*/
+
+#define	FW_PS_STATE_ALL_ON_92C	(FW_PS_STATE_S4)
+#define	FW_PS_STATE_RF_ON_92C		(FW_PS_STATE_S3)
+#define	FW_PS_STATE_RF_OFF_92C	(FW_PS_STATE_S2)
+#define	FW_PS_STATE_RF_OFF_LOW_PWR_92C	(FW_PS_STATE_S1)
+
+
+/* For 8821AE H2C PwrMode Cmd ID 5.*/
+#define	FW_PWR_STATE_ACTIVE	((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define	FW_PWR_STATE_RF_OFF	0
+
+#define	FW_PS_IS_ACK(x)  		((x) & FW_PS_ACK )
+#define	FW_PS_IS_CLK_ON(x) 		((x) & (FW_PS_RF_OFF |FW_PS_ALL_ON ))
+#define	FW_PS_IS_RF_ON(x)  		((x) & (FW_PS_ALL_ON))
+#define	FW_PS_IS_ACTIVE(x)  		((x) & (FW_PS_ST_ACTIVE))
+#define	FW_PS_IS_CPWM_INT(x)	((x) & 0x40)
+
+#define	FW_CLR_PS_STATE(x) 		((x) = ((x) & (0xF0)))
+
+#define	IS_IN_LOW_POWER_STATE_8821AE(FwPSState)		\
+			(FW_PS_STATE(FwPSState) == FW_PS_CLOCK_OFF)
+
+#define	FW_PWR_STATE_ACTIVE	((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define	FW_PWR_STATE_RF_OFF	0
+
+struct rtl8821a_firmware_header {
+	u16 signature;
+	u8 category;
+	u8 function;
+	u16 version;
+	u8 subversion;
+	u8 rsvd1;
+	u8 month;
+	u8 date;
+	u8 hour;
+	u8 minute;
+	u16 ramcodeSize;
+	u16 rsvd2;
+	u32 svnindex;
+	u32 rsvd3;
+	u32 rsvd4;
+	u32 rsvd5;
+};
+
+enum rtl8812_c2h_evt{
+	C2H_8812_DBG = 0,
+	C2H_8812_LB = 1,
+	C2H_8812_TXBF = 2,
+	C2H_8812_TX_REPORT = 3,
+	C2H_8812_BT_INFO = 9,
+	C2H_8812_BT_MP = 11,
+	C2H_8812_RA_RPT=12,
+
+	C2H_8812_FW_SWCHNL = 0x10,
+	C2H_8812_IQK_FINISH = 0x11,
+	MAX_8812_C2HEVENT
+};
+
+enum rtl8821a_h2c_cmd {
+	H2C_8821AE_RSVDPAGE = 0,
+	H2C_8821AE_JOINBSSRPT = 1,
+	H2C_8821AE_SCAN = 2,
+	H2C_8821AE_KEEP_ALIVE_CTRL = 3,
+	H2C_8821AE_DISCONNECT_DECISION = 4,
+#if(USE_OLD_WOWLAN_DEBUG_FW == 1)
+	H2C_8821AE_WO_WLAN = 5,
+#endif
+	H2C_8821AE_INIT_OFFLOAD = 6,
+#if(USE_OLD_WOWLAN_DEBUG_FW == 1)
+	H2C_8821AE_REMOTE_WAKE_CTRL = 7,
+#endif
+	H2C_8821AE_AP_OFFLOAD = 8,
+	H2C_8821AE_BCN_RSVDPAGE = 9,
+	H2C_8821AE_PROBERSP_RSVDPAGE = 10,
+
+	H2C_8821AE_SETPWRMODE = 0x20,
+	H2C_8821AE_PS_TUNING_PARA = 0x21,
+	H2C_8821AE_PS_TUNING_PARA2 = 0x22,
+	H2C_8821AE_PS_LPS_PARA = 0x23,
+	H2C_8821AE_P2P_PS_OFFLOAD = 024,
+
+#if(USE_OLD_WOWLAN_DEBUG_FW == 0)
+	H2C_8821AE_WO_WLAN = 0x80,
+	H2C_8821AE_REMOTE_WAKE_CTRL = 0x81,
+	H2C_8821AE_AOAC_GLOBAL_INFO = 0x82,
+	H2C_8821AE_AOAC_RSVDPAGE = 0x83,
+#endif
+	H2C_RSSI_REPORT = 0x42,
+	H2C_8821AE_RA_MASK = 0x40,
+	H2C_8821AE_SELECTIVE_SUSPEND_ROF_CMD,
+	H2C_8821AE_P2P_PS_MODE,
+	H2C_8821AE_PSD_RESULT,
+	/*Not defined CTW CMD for P2P yet*/
+	H2C_8821AE_P2P_PS_CTW_CMD,
+	MAX_8821AE_H2CCMD
+};
+
+#define pagenum_128(_len)		(u32)(((_len)>>7) + ((_len)&0x7F ? 1:0))
+
+#define SET_8821AE_H2CCMD_WOWLAN_FUNC_ENABLE(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 3, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_ALL_PKT_DROP(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 4, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_GPIO_ACTIVE(__pH2CCmd, __Value)				\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 5, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_REKEY_WAKE_UP(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 6, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 7, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_GPIONUM(__pH2CCmd, __Value)					\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_GPIO_DURATION(__pH2CCmd, __Value)			\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+
+
+#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val)			\
+	SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_RLBM(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 4, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__pH2CCmd, __Value)	\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 4, 4, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__pH2CCmd, __Value)	\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__pH2CCmd, __Value)	\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+4, 0, 8, __Value)
+#define GET_8821AE_H2CCMD_PWRMODE_PARM_MODE(__pH2CCmd)		\
+	LE_BITS_TO_1BYTE(__pH2CCmd, 0, 8)
+
+#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val)		\
+	SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val)		\
+	SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val)		\
+	SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)		\
+	SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+/* AP_OFFLOAD */
+#define SET_H2CCMD_AP_OFFLOAD_ON(__pH2CCmd, __Value)	\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value)
+#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__pH2CCmd, __Value)	\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value)
+
+/* Keep Alive Control*/
+#define SET_8821AE_H2CCMD_KEEP_ALIVE_ENABLE(__pH2CCmd, __Value)				\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_8821AE_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__pH2CCmd, __Value)	\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8821AE_H2CCMD_KEEP_ALIVE_PERIOD(__pH2CCmd, __Value)				\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+
+/*REMOTE_WAKE_CTRL */
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_EN(__pH2CCmd, __Value)				\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#if(USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__pH2CCmd, __Value)				\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__pH2CCmd, __Value)				\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__pH2CCmd, __Value)				\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 3, 1, __Value)
+#else
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_PAIRWISE_ENC_ALG(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#endif
+
+/* GTK_OFFLOAD */
+#define SET_8821AE_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+
+/* AOAC_RSVDPAGE_LOC */
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd), 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__pH2CCmd, __Value)		\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__pH2CCmd, __Value)			\
+	SET_BITS_TO_LE_1BYTE((__pH2CCmd)+4, 0, 8, __Value)
+
+int rtl8821ae_download_fw(struct ieee80211_hw *hw,
+				bool buse_wake_on_wlan_fw);
+void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+			 u32 cmd_len, u8 *p_cmdbuffer);
+void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl8821ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void rtl8821ae_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw,  u8 ap_offload_enable);
+void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
+void rtl8812ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 length);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c
new file mode 100644
index 0000000..8bee772
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c
@@ -0,0 +1,519 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "hal_bt_coexist.h"
+#include "../pci.h"
+#include "dm.h"
+#include "fw.h"
+#include "phy.h"
+#include "reg.h"
+#include "hal_btc.h"
+
+static bool bt_operation_on = false;
+
+void rtl8821ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw, bool b_reject)
+{
+#if 0
+	struct rtl_priv rtlpriv = rtl_priv(hw);
+	PRX_TS_RECORD			pRxTs = NULL;
+
+	if(b_reject){
+		// Do not allow receiving A-MPDU aggregation.
+		if (rtlpriv->mac80211.vendor == PEER_CISCO) {
+				if (pHTInfo->bAcceptAddbaReq) {
+					RTPRINT(FBT, BT_TRACE, ("BT_Disallow AMPDU \n"));
+					pHTInfo->bAcceptAddbaReq = FALSE;
+					if(GetTs(Adapter, (PTS_COMMON_INFO*)(&pRxTs), pMgntInfo->Bssid, 0, RX_DIR, FALSE))
+						TsInitDelBA(Adapter, (PTS_COMMON_INFO)pRxTs, RX_DIR);
+				}
+			} else {
+				if (!pHTInfo->bAcceptAddbaReq) {
+					RTPRINT(FBT, BT_TRACE, ("BT_Allow AMPDU BT Idle\n"));
+					pHTInfo->bAcceptAddbaReq = TRUE;
+				}
+			}
+		} else {
+			if(rtlpriv->mac80211.vendor == PEER_CISCO) {
+				if (!pHTInfo->bAcceptAddbaReq) {
+					RTPRINT(FBT, BT_TRACE, ("BT_Allow AMPDU \n"));
+					pHTInfo->bAcceptAddbaReq = TRUE;
+				}
+			}
+		}
+#endif
+}
+
+void _rtl8821ae_dm_bt_check_wifi_state(struct ieee80211_hw *hw)
+{
+struct rtl_priv *rtlpriv = rtl_priv(hw);
+struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+if (rtlpriv->link_info.b_busytraffic) {
+	rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_IDLE;
+
+	if(rtlpriv->link_info.b_tx_busy_traffic) {
+		rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_UPLINK;
+	} else {
+		rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_UPLINK;
+	}
+
+	if(rtlpriv->link_info.b_rx_busy_traffic) {
+		rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_DOWNLINK;
+	} else {
+		rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_DOWNLINK;
+	}
+} else {
+	rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_IDLE;
+	rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_UPLINK;
+	rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_DOWNLINK;
+}
+
+if (rtlpriv->mac80211.mode == WIRELESS_MODE_G
+	|| rtlpriv->mac80211.mode == WIRELESS_MODE_B) {
+	rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_LEGACY;
+	rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT20;
+	rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT40;
+} else {
+	rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_LEGACY;
+	if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+		rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_HT40;
+		rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT20;
+	} else {
+		rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_HT20;
+		rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT40;
+	}
+}
+
+if (bt_operation_on) {
+	rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT30;
+} else {
+	rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT30;
+}
+}
+
+
+u8 rtl8821ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
+						u8	level_num, u8	rssi_thresh, u8 rssi_thresh1)
+
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	long undecoratedsmoothed_pwdb = 0;
+	u8 bt_rssi_state = 0;
+
+	undecoratedsmoothed_pwdb =  rtl8821ae_dm_bt_get_rx_ss(hw);
+
+	if(level_num == 2) {
+		rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+
+		if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+			(rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) {
+			if(undecoratedsmoothed_pwdb >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_HIGH;
+				rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to High\n"));
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n"));
+			}
+		} else {
+			if(undecoratedsmoothed_pwdb < rssi_thresh) {
+				bt_rssi_state = BT_RSSI_STATE_LOW;
+				rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n"));
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at High\n"));
+			}
+		}
+	} else if(level_num == 3) {
+		if(rssi_thresh > rssi_thresh1) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 thresh error!!\n"));
+			return rtlpcipriv->btcoexist.bt_pre_rssi_state;
+		}
+
+		if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+			(rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) {
+			if(undecoratedsmoothed_pwdb >= (rssi_thresh+BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+				rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+				rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Medium\n"));
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n"));
+			}
+		} else if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_MEDIUM) ||
+			(rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_MEDIUM)) {
+			if(undecoratedsmoothed_pwdb >= (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_HIGH;
+				rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to High\n"));
+			} else if(undecoratedsmoothed_pwdb < rssi_thresh) {
+				bt_rssi_state = BT_RSSI_STATE_LOW;
+				rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n"));
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Medium\n"));
+			}
+		} else {
+			if(undecoratedsmoothed_pwdb < rssi_thresh1) {
+				bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+				rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+				rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+				rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,("[DM][BT], RSSI_1 state switch to Medium\n"));
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at High\n"));
+			}
+		}
+	}
+
+	rtlpcipriv->btcoexist.bt_pre_rssi_state1 = bt_rssi_state;
+
+	return bt_rssi_state;
+}
+
+u8 rtl8821ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
+						u8	level_num, u8	rssi_thresh, u8 rssi_thresh1)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	long undecoratedsmoothed_pwdb = 0;
+	u8 bt_rssi_state = 0;
+
+	undecoratedsmoothed_pwdb = rtl8821ae_dm_bt_get_rx_ss(hw);
+
+	if (level_num == 2) {
+		rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+
+		if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+			(rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)){
+			if (undecoratedsmoothed_pwdb
+				>= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_HIGH;
+				rtlpcipriv->btcoexist.current_state
+					|= BT_COEX_STATE_WIFI_RSSI_HIGH;
+				rtlpcipriv->btcoexist.current_state
+					&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+					("[DM][BT], RSSI state switch to High\n"));
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+					("[DM][BT], RSSI state stay at Low\n"));
+			}
+		} else {
+			if (undecoratedsmoothed_pwdb < rssi_thresh) {
+				bt_rssi_state = BT_RSSI_STATE_LOW;
+				rtlpcipriv->btcoexist.current_state
+					|= BT_COEX_STATE_WIFI_RSSI_LOW;
+				rtlpcipriv->btcoexist.current_state
+					&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+					("[DM][BT], RSSI state switch to Low\n"));
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+					("[DM][BT], RSSI state stay at High\n"));
+			}
+		}
+	}
+	else if (level_num == 3) {
+		if (rssi_thresh > rssi_thresh1) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+				("[DM][BT], RSSI thresh error!!\n"));
+			return rtlpcipriv->btcoexist.bt_pre_rssi_state;
+		}
+		if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+			(rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) {
+			if(undecoratedsmoothed_pwdb
+				>= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+				rtlpcipriv->btcoexist.current_state
+					|= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+				rtlpcipriv->btcoexist.current_state
+					&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+				rtlpcipriv->btcoexist.current_state
+					&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+					("[DM][BT], RSSI state switch to Medium\n"));
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+					("[DM][BT], RSSI state stay at Low\n"));
+			}
+		} else if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_MEDIUM) ||
+			(rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_MEDIUM)) {
+			if (undecoratedsmoothed_pwdb
+				>= (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) {
+				bt_rssi_state = BT_RSSI_STATE_HIGH;
+				rtlpcipriv->btcoexist.current_state
+					|= BT_COEX_STATE_WIFI_RSSI_HIGH;
+				rtlpcipriv->btcoexist.current_state
+					&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+				rtlpcipriv->btcoexist.current_state
+					&= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+					("[DM][BT], RSSI state switch to High\n"));
+			} else if(undecoratedsmoothed_pwdb < rssi_thresh)
+			{
+				bt_rssi_state = BT_RSSI_STATE_LOW;
+				rtlpcipriv->btcoexist.current_state
+					|= BT_COEX_STATE_WIFI_RSSI_LOW;
+				rtlpcipriv->btcoexist.current_state
+					&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+				rtlpcipriv->btcoexist.current_state
+					&= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+					("[DM][BT], RSSI state switch to Low\n"));
+			} else {
+				bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+					("[DM][BT], RSSI state stay at Medium\n"));
+			}
+		} else {
+			if(undecoratedsmoothed_pwdb < rssi_thresh1) {
+				bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+				rtlpcipriv->btcoexist.current_state
+					|= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+				rtlpcipriv->btcoexist.current_state
+					&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+				rtlpcipriv->btcoexist.current_state
+					&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+					("[DM][BT], RSSI state switch to Medium\n"));
+			} else {
+			bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+				("[DM][BT], RSSI state stay at High\n"));
+			}
+		}
+	}
+
+	rtlpcipriv->btcoexist.bt_pre_rssi_state = bt_rssi_state;
+	return bt_rssi_state;
+}
+long rtl8821ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	long undecoratedsmoothed_pwdb = 0;
+
+	if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+		undecoratedsmoothed_pwdb = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
+	} else {
+		undecoratedsmoothed_pwdb
+			= rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+	}
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("rtl8821ae_dm_bt_get_rx_ss() = %ld\n", undecoratedsmoothed_pwdb));
+
+	return undecoratedsmoothed_pwdb;
+}
+
+void rtl8821ae_dm_bt_balance(struct ieee80211_hw *hw,
+							bool b_balance_on, u8 ms0, u8 ms1)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[3] ={0};
+
+	if (b_balance_on) {
+		h2c_parameter[2] = 1;
+		h2c_parameter[1] = ms1;
+		h2c_parameter[0] = ms0;
+		rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+	} else {
+		h2c_parameter[2] = 0;
+		h2c_parameter[1] = 0;
+		h2c_parameter[0] = 0;
+	}
+	rtlpcipriv->btcoexist.b_balance_on = b_balance_on;
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n",
+		b_balance_on?"ON":"OFF", ms0, ms1,
+		h2c_parameter[0]<<16 | h2c_parameter[1]<<8 | h2c_parameter[2]));
+
+	rtl8821ae_fill_h2c_cmd(hw, 0xc, 3, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	if (type == BT_AGCTABLE_OFF) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]AGCTable Off!\n"));
+		rtl_write_dword(rtlpriv, 0xc78,0x641c0001);
+		rtl_write_dword(rtlpriv, 0xc78,0x631d0001);
+		rtl_write_dword(rtlpriv, 0xc78,0x621e0001);
+		rtl_write_dword(rtlpriv, 0xc78,0x611f0001);
+		rtl_write_dword(rtlpriv, 0xc78,0x60200001);
+
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0x32000);
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0x71000);
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0xb0000);
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0xfc000);
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_G1, 0xfffff, 0x30355);
+	} else if (type == BT_AGCTABLE_ON) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]AGCTable On!\n"));
+		rtl_write_dword(rtlpriv, 0xc78,0x4e1c0001);
+		rtl_write_dword(rtlpriv, 0xc78,0x4d1d0001);
+		rtl_write_dword(rtlpriv, 0xc78,0x4c1e0001);
+		rtl_write_dword(rtlpriv, 0xc78,0x4b1f0001);
+		rtl_write_dword(rtlpriv, 0xc78,0x4a200001);
+
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0xdc000);
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0x90000);
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0x51000);
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_AGC_HP, 0xfffff, 0x12000);
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+					RF_RX_G1, 0xfffff, 0x00355);
+
+		rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+	}
+}
+
+void rtl8821ae_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	if (type == BT_BB_BACKOFF_OFF) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]BBBackOffLevel Off!\n"));
+		rtl_write_dword(rtlpriv, 0xc04,0x3a05611);
+	} else if (type == BT_BB_BACKOFF_ON) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]BBBackOffLevel On!\n"));
+		rtl_write_dword(rtlpriv, 0xc04,0x3a07611);
+		rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+	}
+}
+
+void rtl8821ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("rtl8821ae_dm_bt_fw_coex_all_off()\n"));
+
+	if(rtlpcipriv->btcoexist.b_fw_coexist_all_off)
+		return;
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("rtl8821ae_dm_bt_fw_coex_all_off(), real Do\n"));
+	rtl8821ae_dm_bt_fw_coex_all_off_8723a(hw);
+	rtlpcipriv->btcoexist.b_fw_coexist_all_off = true;
+}
+
+void rtl8821ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("rtl8821ae_dm_bt_sw_coex_all_off()\n"));
+
+	if(rtlpcipriv->btcoexist.b_sw_coexist_all_off)
+		return;
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("rtl8821ae_dm_bt_sw_coex_all_off(), real Do\n"));
+	rtl8821ae_dm_bt_sw_coex_all_off_8723a(hw);
+	rtlpcipriv->btcoexist.b_sw_coexist_all_off = true;
+}
+
+void rtl8821ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("rtl8821ae_dm_bt_hw_coex_all_off()\n"));
+
+	if(rtlpcipriv->btcoexist.b_hw_coexist_all_off)
+		return;
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("rtl8821ae_dm_bt_hw_coex_all_off(), real Do\n"));
+
+	rtl8821ae_dm_bt_hw_coex_all_off_8723a(hw);
+
+	rtlpcipriv->btcoexist.b_hw_coexist_all_off = true;
+}
+
+void rtl8821ae_btdm_coex_all_off(struct ieee80211_hw *hw)
+{
+	rtl8821ae_dm_bt_fw_coex_all_off(hw);
+	rtl8821ae_dm_bt_sw_coex_all_off(hw);
+	rtl8821ae_dm_bt_hw_coex_all_off(hw);
+}
+
+bool rtl8821ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	if((rtlpcipriv->btcoexist.previous_state
+		== rtlpcipriv->btcoexist.current_state)
+		&& (rtlpcipriv->btcoexist.previous_state_h
+		== rtlpcipriv->btcoexist.current_state_h))
+		return false;
+	else
+		return true;
+}
+
+bool rtl8821ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (rtlpriv->link_info.b_tx_busy_traffic)
+		return true;
+	else
+		return false;
+}
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h
new file mode 100644
index 0000000..799cc6f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h
@@ -0,0 +1,169 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_HAL_BT_COEXIST_H__
+#define __RTL8821AE_HAL_BT_COEXIST_H__
+
+#include "../wifi.h"
+
+/* The reg define is for 8723 */
+#define	REG_HIGH_PRIORITY_TXRX			0x770
+#define	REG_LOW_PRIORITY_TXRX			0x774
+
+#define BT_FW_COEX_THRESH_TOL			6
+#define BT_FW_COEX_THRESH_20			20
+#define BT_FW_COEX_THRESH_23			23
+#define BT_FW_COEX_THRESH_25			25
+#define BT_FW_COEX_THRESH_30			30
+#define BT_FW_COEX_THRESH_35			35
+#define BT_FW_COEX_THRESH_40			40
+#define BT_FW_COEX_THRESH_45			45
+#define BT_FW_COEX_THRESH_47			47
+#define BT_FW_COEX_THRESH_50			50
+#define BT_FW_COEX_THRESH_55			55
+
+#define BT_COEX_STATE_BT30				BIT(0)
+#define BT_COEX_STATE_WIFI_HT20			BIT(1)
+#define BT_COEX_STATE_WIFI_HT40			BIT(2)
+#define BT_COEX_STATE_WIFI_LEGACY		BIT(3)
+
+#define BT_COEX_STATE_WIFI_RSSI_LOW		BIT(4)
+#define BT_COEX_STATE_WIFI_RSSI_MEDIUM	BIT(5)
+#define BT_COEX_STATE_WIFI_RSSI_HIGH	BIT(6)
+#define BT_COEX_STATE_DEC_BT_POWER		BIT(7)
+
+#define BT_COEX_STATE_WIFI_IDLE			BIT(8)
+#define BT_COEX_STATE_WIFI_UPLINK		BIT(9)
+#define BT_COEX_STATE_WIFI_DOWNLINK		BIT(10)
+
+#define BT_COEX_STATE_BT_INQ_PAGE			BIT(11)
+#define BT_COEX_STATE_BT_IDLE			BIT(12)
+#define BT_COEX_STATE_BT_UPLINK			BIT(13)
+#define BT_COEX_STATE_BT_DOWNLINK		BIT(14)
+
+#define BT_COEX_STATE_HOLD_FOR_BT_OPERATION	BIT(15)
+#define BT_COEX_STATE_BT_RSSI_LOW		BIT(19)
+
+#define BT_COEX_STATE_PROFILE_HID		BIT(20)
+#define BT_COEX_STATE_PROFILE_A2DP		BIT(21)
+#define BT_COEX_STATE_PROFILE_PAN		BIT(22)
+#define BT_COEX_STATE_PROFILE_SCO		BIT(23)
+
+#define BT_COEX_STATE_WIFI_RSSI_1_LOW		BIT(24)
+#define BT_COEX_STATE_WIFI_RSSI_1_MEDIUM	BIT(25)
+#define BT_COEX_STATE_WIFI_RSSI_1_HIGH		BIT(26)
+
+#define BT_COEX_STATE_BTINFO_COMMON			BIT(30)
+#define BT_COEX_STATE_BTINFO_B_HID_SCOESCO	BIT(31)
+#define BT_COEX_STATE_BTINFO_B_FTP_A2DP		BIT(29)
+
+#define BT_COEX_STATE_BT_CNT_LEVEL_0		BIT(0)
+#define BT_COEX_STATE_BT_CNT_LEVEL_1		BIT(1)
+#define BT_COEX_STATE_BT_CNT_LEVEL_2		BIT(2)
+#define BT_COEX_STATE_BT_CNT_LEVEL_3		BIT(3)
+
+#define BT_RSSI_STATE_HIGH			0
+#define BT_RSSI_STATE_MEDIUM		1
+#define BT_RSSI_STATE_LOW			2
+#define BT_RSSI_STATE_STAY_HIGH		3
+#define BT_RSSI_STATE_STAY_MEDIUM	4
+#define BT_RSSI_STATE_STAY_LOW		5
+
+#define	BT_AGCTABLE_OFF				0
+#define	BT_AGCTABLE_ON				1
+#define	BT_BB_BACKOFF_OFF			0
+#define	BT_BB_BACKOFF_ON			1
+#define	BT_FW_NAV_OFF				0
+#define	BT_FW_NAV_ON				1
+
+#define	BT_COEX_MECH_NONE			0
+#define	BT_COEX_MECH_SCO			1
+#define	BT_COEX_MECH_HID			2
+#define	BT_COEX_MECH_A2DP			3
+#define	BT_COEX_MECH_PAN			4
+#define	BT_COEX_MECH_HID_A2DP		5
+#define	BT_COEX_MECH_HID_PAN		6
+#define	BT_COEX_MECH_PAN_A2DP		7
+#define	BT_COEX_MECH_HID_SCO_ESCO	8
+#define	BT_COEX_MECH_FTP_A2DP		9
+#define	BT_COEX_MECH_COMMON			10
+#define	BT_COEX_MECH_MAX			11
+
+#define	BT_DBG_PROFILE_NONE			0
+#define	BT_DBG_PROFILE_SCO			1
+#define	BT_DBG_PROFILE_HID			2
+#define	BT_DBG_PROFILE_A2DP			3
+#define	BT_DBG_PROFILE_PAN			4
+#define	BT_DBG_PROFILE_HID_A2DP		5
+#define	BT_DBG_PROFILE_HID_PAN		6
+#define	BT_DBG_PROFILE_PAN_A2DP		7
+#define	BT_DBG_PROFILE_MAX			9
+
+#define	BTINFO_B_FTP					BIT(7)
+#define	BTINFO_B_A2DP					BIT(6)
+#define	BTINFO_B_HID					BIT(5)
+#define	BTINFO_B_SCO_BUSY				BIT(4)
+#define	BTINFO_B_ACL_BUSY				BIT(3)
+#define	BTINFO_B_INQ_PAGE				BIT(2)
+#define	BTINFO_B_SCO_ESCO				BIT(1)
+#define	BTINFO_B_CONNECTION				BIT(0)
+
+
+void rtl8821ae_btdm_coex_all_off(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw);
+
+void rtl8821ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw);
+long rtl8821ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_balance(struct ieee80211_hw *hw,
+			bool b_balance_on, u8 ms0, u8 ms1);
+void rtl8821ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 tyep);
+void rtl8821ae_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type);
+u8 rtl8821ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
+						u8	level_num, u8	rssi_thresh, u8 rssi_thresh1);
+u8 rtl8821ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
+		                        u8  level_num, u8   rssi_thresh, u8 rssi_thresh1);
+void _rtl8821ae_dm_bt_check_wifi_state(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw,
+			bool b_reject);
+
+#if 0
+VOID
+BTDM_PWDBMonitor(
+	 	PADAPTER	Adapter
+	);
+
+BOOLEAN
+BTDM_DIGByBTRSSI(
+	 		PADAPTER	Adapter
+	);
+#endif
+bool rtl8821ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw);
+bool rtl8821ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw);
+#endif
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c
new file mode 100644
index 0000000..79386ee
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c
@@ -0,0 +1,2069 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "hal_btc.h"
+#include "../pci.h"
+#include "phy.h"
+#include "fw.h"
+#include "reg.h"
+#include "def.h"
+#include "../btcoexist/rtl_btc.h"
+
+static struct bt_coexist_8821ae hal_coex_8821ae;
+
+void rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+    struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	if(!rtlpcipriv->btcoexist.bt_coexistence)
+		return;
+
+	if(ppsc->b_inactiveps) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("[BT][DM], Before enter IPS, turn off all Coexist DM\n"));
+		rtlpcipriv->btcoexist.current_state = 0;
+		rtlpcipriv->btcoexist.previous_state = 0;
+		rtlpcipriv->btcoexist.current_state_h = 0;
+		rtlpcipriv->btcoexist.previous_state_h = 0;
+		rtl8821ae_btdm_coex_all_off(hw);
+	}
+}
+
+
+enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw)
+{
+    struct rtl_priv *rtlpriv = rtl_priv(hw);
+    struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+    enum rt_media_status    m_status = RT_MEDIA_DISCONNECT;
+
+    u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+    if(bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+            m_status = RT_MEDIA_CONNECT;
+    }
+
+    return m_status;
+}
+
+void rtl_8821ae_bt_wifi_media_status_notify(struct ieee80211_hw *hw, bool mstatus)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 h2c_parameter[3] ={0};
+	u8 chnl;
+
+	if(!rtlpcipriv->btcoexist.bt_coexistence)
+		return;
+
+	if(RT_MEDIA_CONNECT == mstatus)
+		h2c_parameter[0] = 0x1; // 0: disconnected, 1:connected
+	else
+		h2c_parameter[0] = 0x0;
+
+	if(mgnt_link_status_query(hw))	{
+		chnl = rtlphy->current_channel;
+		h2c_parameter[1] = chnl;
+	}
+
+	if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40){
+		h2c_parameter[2] = 0x30;
+	} else {
+		h2c_parameter[2] = 0x20;
+	}
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("[BTCoex], FW write 0x19=0x%x\n",
+		h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]));
+
+	rtl8821ae_fill_h2c_cmd(hw, 0x19, 3, h2c_parameter);
+
+}
+
+
+bool rtl8821ae_dm_bt_is_wifi_busy(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	if(rtlpriv->link_info.b_busytraffic ||
+		rtlpriv->link_info.b_rx_busy_traffic ||
+		rtlpriv->link_info.b_tx_busy_traffic)
+		return true;
+	else
+		return false;
+}
+void rtl8821ae_dm_bt_set_fw_3a(struct ieee80211_hw *hw,
+						u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[5] ={0};
+	h2c_parameter[0] = byte1;
+	h2c_parameter[1] = byte2;
+	h2c_parameter[2] = byte3;
+	h2c_parameter[3] = byte4;
+	h2c_parameter[4] = byte5;
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], FW write 0x3a(4bytes)=0x%x%8x\n",
+		h2c_parameter[0], h2c_parameter[1]<<24 | h2c_parameter[2]<<16 | h2c_parameter[3]<<8 | h2c_parameter[4]));
+	rtl8821ae_fill_h2c_cmd(hw, 0x3a, 5, h2c_parameter);
+}
+
+bool rtl8821ae_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Need to decrease bt power\n"));
+		rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_DEC_BT_POWER;
+			return true;
+	}
+
+	rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_DEC_BT_POWER;
+	return false;
+}
+
+
+bool rtl8821ae_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	if ((rtlpcipriv->btcoexist.previous_state
+		== rtlpcipriv->btcoexist.current_state)
+		&&(rtlpcipriv->btcoexist.previous_state_h
+		== rtlpcipriv->btcoexist.current_state_h)) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+				("[DM][BT], Coexist state do not chang!!\n"));
+		return true;
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+				("[DM][BT], Coexist state changed!!\n"));
+		return false;
+	}
+}
+
+void rtl8821ae_dm_bt_set_coex_table(struct ieee80211_hw *hw,
+						u32 val_0x6c0, u32 val_0x6c8, u32 val_0x6cc)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6c0=0x%x\n", val_0x6c0));
+	rtl_write_dword(rtlpriv, 0x6c0, val_0x6c0);
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6c8=0x%x\n", val_0x6c8));
+	rtl_write_dword(rtlpriv, 0x6c8, val_0x6c8);
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6cc=0x%x\n", val_0x6cc));
+	rtl_write_byte(rtlpriv, 0x6cc, val_0x6cc);
+}
+
+void rtl8821ae_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool b_mode)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (BT_PTA_MODE_ON == b_mode) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("PTA mode on, "));
+		/*  Enable GPIO 0/1/2/3/8 pins for bt */
+		rtl_write_byte(rtlpriv, 0x40, 0x20);
+		rtlpcipriv->btcoexist.b_hw_coexist_all_off = false;
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("PTA mode off\n"));
+		rtl_write_byte(rtlpriv, 0x40, 0x0);
+	}
+}
+
+void rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(struct ieee80211_hw *hw, u8 type)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (BT_RF_RX_LPF_CORNER_SHRINK == type) {
+		/* Shrink RF Rx LPF corner, 0x1e[7:4]=1111 ==> [11:4] by Jenyu */
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Shrink RF Rx LPF corner!!\n"));
+		/* PHY_SetRFReg(Adapter, (RF_RADIO_PATH_E)PathA, 0x1e, 0xf0, 0xf); */
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff, 0xf0ff7);
+		rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+	} else if(BT_RF_RX_LPF_CORNER_RESUME == type) {
+		/*Resume RF Rx LPF corner*/
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Resume RF Rx LPF corner!!\n"));
+		/* PHY_SetRFReg(Adapter, (RF_RADIO_PATH_E)PathA, 0x1e, 0xf0,
+		 * pHalData->btcoexist.BtRfRegOrigin1E); */
+		rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff,
+			rtlpcipriv->btcoexist.bt_rfreg_origin_1e);
+	}
+}
+
+void rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(struct ieee80211_hw *hw,
+																	u8 ra_type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u8 tmp_u1;
+
+	tmp_u1 = rtl_read_byte(rtlpriv, 0x4fd);
+	tmp_u1 |= BIT(0);
+	if (BT_TX_RATE_ADAPTIVE_LOW_PENALTY == ra_type) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Tx rate adaptive, set low penalty!!\n"));
+		tmp_u1 &= ~BIT(2);
+		rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+	} else if(BT_TX_RATE_ADAPTIVE_NORMAL == ra_type) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Tx rate adaptive, set normal!!\n"));
+		tmp_u1 |= BIT(2);
+	}
+
+	rtl_write_byte(rtlpriv, 0x4fd, tmp_u1);
+}
+
+void rtl8821ae_dm_bt_btdm_structure_reload(struct ieee80211_hw *hw,
+													struct btdm_8821ae	*p_btdm)
+{
+	p_btdm->b_all_off = false;
+	p_btdm->b_agc_table_en = false;
+	p_btdm->b_adc_back_off_on = false;
+	p_btdm->b2_ant_hid_en = false;
+	p_btdm->b_low_penalty_rate_adaptive = false;
+	p_btdm->b_rf_rx_lpf_shrink = false;
+	p_btdm->b_reject_aggre_pkt= false;
+
+	p_btdm->b_tdma_on = false;
+	p_btdm->tdma_ant = TDMA_2ANT;
+	p_btdm->tdma_nav = TDMA_NAV_OFF;
+	p_btdm->tdma_dac_swing = TDMA_DAC_SWING_OFF;
+	p_btdm->fw_dac_swing_lvl = 0x20;
+
+	p_btdm->b_tra_tdma_on = false;
+	p_btdm->tra_tdma_ant = TDMA_2ANT;
+	p_btdm->tra_tdma_nav = TDMA_NAV_OFF;
+	p_btdm->b_ignore_wlan_act = false;
+
+	p_btdm->b_ps_tdma_on = false;
+	p_btdm->ps_tdma_byte[0] = 0x0;
+	p_btdm->ps_tdma_byte[1] = 0x0;
+	p_btdm->ps_tdma_byte[2] = 0x0;
+	p_btdm->ps_tdma_byte[3] = 0x8;
+	p_btdm->ps_tdma_byte[4] = 0x0;
+
+	p_btdm->b_pta_on = true;
+	p_btdm->val_0x6c0 = 0x5a5aaaaa;
+	p_btdm->val_0x6c8 = 0xcc;
+	p_btdm->val_0x6cc = 0x3;
+
+	p_btdm->b_sw_dac_swing_on = false;
+	p_btdm->sw_dac_swing_lvl = 0xc0;
+	p_btdm->wlan_act_hi = 0x20;
+	p_btdm->wlan_act_lo = 0x10;
+	p_btdm->bt_retry_index = 2;
+
+	p_btdm->b_dec_bt_pwr = false;
+}
+
+void rtl8821ae_dm_bt_btdm_structure_reload_all_off(struct ieee80211_hw *hw,
+													struct btdm_8821ae	*p_btdm)
+{
+	rtl8821ae_dm_bt_btdm_structure_reload(hw, p_btdm);
+	p_btdm->b_all_off = true;
+	p_btdm->b_pta_on = false;
+	p_btdm->wlan_act_hi = 0x10;
+}
+
+bool rtl8821ae_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct btdm_8821ae btdm8821ae;
+	bool b_common = false;
+
+	rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+	if(!rtl8821ae_dm_bt_is_wifi_busy(hw)
+		&& !rtlpcipriv->btcoexist.b_bt_busy) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("Wifi idle + Bt idle, bt coex mechanism always off!!\n"));
+		rtl8821ae_dm_bt_btdm_structure_reload_all_off(hw, &btdm8821ae);
+		b_common = true;
+	} else if (rtl8821ae_dm_bt_is_wifi_busy(hw)
+		&& !rtlpcipriv->btcoexist.b_bt_busy) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("Wifi non-idle + Bt disabled/idle!!\n"));
+		btdm8821ae.b_low_penalty_rate_adaptive = true;
+		btdm8821ae.b_rf_rx_lpf_shrink = false;
+		btdm8821ae.b_reject_aggre_pkt = false;
+
+		/* sw mechanism */
+		btdm8821ae.b_agc_table_en = false;
+		btdm8821ae.b_adc_back_off_on = false;
+		btdm8821ae.b_sw_dac_swing_on = false;
+
+		btdm8821ae.b_pta_on = true;
+		btdm8821ae.val_0x6c0 = 0x5a5aaaaa;
+		btdm8821ae.val_0x6c8 = 0xcccc;
+		btdm8821ae.val_0x6cc = 0x3;
+
+		btdm8821ae.b_tdma_on = false;
+		btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+		btdm8821ae.b2_ant_hid_en = false;
+
+		b_common = true;
+	}else if (rtlpcipriv->btcoexist.b_bt_busy) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("Bt non-idle!\n"));
+		if(mgnt_link_status_query(hw) == RT_MEDIA_CONNECT){
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi connection exist\n"))
+			b_common = false;
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+				("No Wifi connection!\n"));
+			btdm8821ae.b_rf_rx_lpf_shrink = true;
+			btdm8821ae.b_low_penalty_rate_adaptive = false;
+			btdm8821ae.b_reject_aggre_pkt = false;
+
+			/* sw mechanism */
+			btdm8821ae.b_agc_table_en = false;
+			btdm8821ae.b_adc_back_off_on = false;
+			btdm8821ae.b_sw_dac_swing_on = false;
+
+			btdm8821ae.b_pta_on = true;
+			btdm8821ae.val_0x6c0 = 0x55555555;
+			btdm8821ae.val_0x6c8 = 0x0000ffff;
+			btdm8821ae.val_0x6cc = 0x3;
+
+			btdm8821ae.b_tdma_on = false;
+			btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+			btdm8821ae.b2_ant_hid_en = false;
+
+			b_common = true;
+		}
+	}
+
+	if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+		btdm8821ae.b_dec_bt_pwr = true;
+	}
+
+	if(b_common)
+		 rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_COMMON;
+
+	if (b_common && rtl8821ae_dm_bt_is_coexist_state_changed(hw))
+		rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+
+	return b_common;
+}
+
+void rtl8821ae_dm_bt_set_sw_full_time_dac_swing(
+		struct ieee80211_hw * hw, bool b_sw_dac_swing_on, u32 sw_dac_swing_lvl)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (b_sw_dac_swing_on) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+			("[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl));
+		rtl8821ae_phy_set_bb_reg(hw, 0x880, 0xff000000, sw_dac_swing_lvl);
+		rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], SwDacSwing Off!\n"));
+		rtl8821ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
+	}
+}
+
+void rtl8821ae_dm_bt_set_fw_dec_bt_pwr(
+		struct ieee80211_hw *hw, bool b_dec_bt_pwr)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[1] ={0};
+
+	h2c_parameter[0] = 0;
+
+	if (b_dec_bt_pwr) {
+		h2c_parameter[0] |= BIT(1);
+		rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+	}
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], decrease Bt Power : %s, write 0x21=0x%x\n",
+		(b_dec_bt_pwr? "Yes!!":"No!!"), h2c_parameter[0]));
+
+	rtl8821ae_fill_h2c_cmd(hw, 0x21, 1, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_set_fw_2_ant_hid(struct ieee80211_hw *hw,
+									bool b_enable, bool b_dac_swing_on)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[1] ={0};
+
+	if (b_enable) {
+		h2c_parameter[0] |= BIT(0);
+		rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+	}
+	if (b_dac_swing_on) {
+		h2c_parameter[0] |= BIT(1); /* Dac Swing default enable */
+	}
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15=0x%x\n",
+		(b_enable ? "ON!!":"OFF!!"), (b_dac_swing_on ? "ON":"OFF"),
+		h2c_parameter[0]));
+
+	rtl8821ae_fill_h2c_cmd(hw, 0x15, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_tdma_ctrl(struct ieee80211_hw *hw,
+				bool b_enable, u8 ant_num, u8 nav_en, u8 dac_swing_en)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u8 h2c_parameter[1] ={0};
+	u8 h2c_parameter1[1] = {0};
+
+	h2c_parameter[0] = 0;
+	h2c_parameter1[0] = 0;
+
+	if(b_enable) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+			("[BTCoex], set BT PTA update manager to trigger update!!\n"));
+		h2c_parameter1[0] |= BIT(0);
+
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+			("[BTCoex], turn TDMA mode ON!!\n"));
+		h2c_parameter[0] |= BIT(0);		/* function enable */
+		if (TDMA_1ANT == ant_num) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_1ANT\n"));
+			h2c_parameter[0] |= BIT(1);
+		} else if(TDMA_2ANT == ant_num) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_2ANT\n"));
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], Unknown Ant\n"));
+		}
+
+		if (TDMA_NAV_OFF == nav_en) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_NAV_OFF\n"));
+		} else if (TDMA_NAV_ON == nav_en) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_NAV_ON\n"));
+			h2c_parameter[0] |= BIT(2);
+		}
+
+		if (TDMA_DAC_SWING_OFF == dac_swing_en) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+				("[BTCoex], TDMA_DAC_SWING_OFF\n"));
+		} else if(TDMA_DAC_SWING_ON == dac_swing_en) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+				("[BTCoex], TDMA_DAC_SWING_ON\n"));
+			h2c_parameter[0] |= BIT(4);
+		}
+		rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+			("[BTCoex], set BT PTA update manager to no update!!\n"));
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+			("[BTCoex], turn TDMA mode OFF!!\n"));
+	}
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], FW2AntTDMA, write 0x26=0x%x\n", h2c_parameter1[0]));
+	rtl8821ae_fill_h2c_cmd(hw, 0x26, 1, h2c_parameter1);
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], FW2AntTDMA, write 0x14=0x%x\n", h2c_parameter[0]));
+	rtl8821ae_fill_h2c_cmd(hw, 0x14, 1, h2c_parameter);
+
+	if (!b_enable) {
+		/* delay_ms(2);
+		 * PlatformEFIOWrite1Byte(Adapter, 0x778, 0x1); */
+	}
+}
+
+
+void rtl8821ae_dm_bt_set_fw_ignore_wlan_act( struct ieee80211_hw *hw, bool b_enable)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u8 h2c_parameter[1] ={0};
+
+	if (b_enable) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], BT Ignore Wlan_Act !!\n"));
+		h2c_parameter[0] |= BIT(0);		// function enable
+		rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], BT don't ignore Wlan_Act !!\n"));
+	}
+
+    RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25=0x%x\n",
+		h2c_parameter[0]));
+
+	rtl8821ae_fill_h2c_cmd(hw, 0x25, 1, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(struct ieee80211_hw *hw,
+		bool b_enable, u8 ant_num, u8 nav_en
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	//struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	u8 h2c_parameter[2] ={0};
+
+
+	if (b_enable) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+			("[BTCoex], turn TTDMA mode ON!!\n"));
+		h2c_parameter[0] |= BIT(0);		// function enable
+		if (TDMA_1ANT == ant_num) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_1ANT\n"));
+			h2c_parameter[0] |= BIT(1);
+		} else if (TDMA_2ANT == ant_num) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_2ANT\n"));
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], Unknown Ant\n"));
+		}
+
+		if (TDMA_NAV_OFF == nav_en) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_NAV_OFF\n"));
+		} else if (TDMA_NAV_ON == nav_en) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_NAV_ON\n"));
+			h2c_parameter[1] |= BIT(0);
+		}
+
+		rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+			("[BTCoex], turn TTDMA mode OFF!!\n"));
+	}
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], FW Traditional TDMA, write 0x33=0x%x\n",
+		h2c_parameter[0] << 8| h2c_parameter[1]));
+
+	rtl8821ae_fill_h2c_cmd(hw, 0x33, 2, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_set_fw_dac_swing_level(struct ieee80211_hw *hw,
+													u8 dac_swing_lvl)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[1] ={0};
+	h2c_parameter[0] = dac_swing_lvl;
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl));
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], write 0x29=0x%x\n", h2c_parameter[0]));
+
+	rtl8821ae_fill_h2c_cmd(hw, 0x29, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_bt_hid_info(struct ieee80211_hw *hw, bool b_enable)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[1] ={0};
+	h2c_parameter[0] = 0;
+
+	if(b_enable){
+		h2c_parameter[0] |= BIT(0);
+		rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+	}
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], Set BT HID information=0x%x\n", b_enable));
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], write 0x24=0x%x\n", h2c_parameter[0]));
+
+	rtl8821ae_fill_h2c_cmd(hw, 0x24, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_bt_retry_index(struct ieee80211_hw *hw,
+													u8 retry_index)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[1] ={0};
+	h2c_parameter[0] = retry_index;
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], Set BT Retry Index=%d\n", retry_index));
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], write 0x23=0x%x\n", h2c_parameter[0]));
+
+	rtl8821ae_fill_h2c_cmd(hw, 0x23, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_wlan_act(struct ieee80211_hw *hw,
+											u8 wlan_act_hi, u8 wlan_act_lo)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter_hi[1] ={0};
+	u8 h2c_parameter_lo[1] ={0};
+	h2c_parameter_hi[0] = wlan_act_hi;
+	h2c_parameter_lo[0] = wlan_act_lo;
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], Set WLAN_ACT Hi:Lo=0x%x/0x%x\n", wlan_act_hi, wlan_act_lo));
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], write 0x22=0x%x\n", h2c_parameter_hi[0]));
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("[BTCoex], write 0x11=0x%x\n", h2c_parameter_lo[0]));
+
+	/* WLAN_ACT = High duration, unit:ms */
+	rtl8821ae_fill_h2c_cmd(hw, 0x22, 1, h2c_parameter_hi);
+	/*  WLAN_ACT = Low duration, unit:3*625us */
+	rtl8821ae_fill_h2c_cmd(hw, 0x11, 1, h2c_parameter_lo);
+}
+
+void rtl8821ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8821ae *p_btdm)
+{
+	struct rtl_pci_priv	*rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv	*rtlpriv = rtl_priv(hw);
+	struct btdm_8821ae *p_btdm_8821ae = &hal_coex_8821ae.btdm;
+	u8 i;
+
+	bool b_fw_current_inpsmode = false;
+    bool b_fw_ps_awake = true;
+
+    rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+			                      (u8 *) (&b_fw_current_inpsmode));
+    rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+			                      (u8 *) (&b_fw_ps_awake));
+
+	// check new setting is different with the old one,
+	// if all the same, don't do the setting again.
+	if (memcmp(p_btdm_8821ae, p_btdm, sizeof(struct btdm_8821ae)) == 0) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], the same coexist setting, return!!\n"));
+		return;
+	} else {	//save the new coexist setting
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], UPDATE TO NEW COEX SETTING!!\n"));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new bAllOff=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_all_off, p_btdm->b_all_off));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new b_agc_table_en=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_agc_table_en, p_btdm->b_agc_table_en));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new b_adc_back_off_on=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_adc_back_off_on, p_btdm->b_adc_back_off_on));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new b2_ant_hid_en=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b2_ant_hid_en, p_btdm->b2_ant_hid_en));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new bLowPenaltyRateAdaptive=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_low_penalty_rate_adaptive,
+			p_btdm->b_low_penalty_rate_adaptive));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new bRfRxLpfShrink=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_rf_rx_lpf_shrink, p_btdm->b_rf_rx_lpf_shrink));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new bRejectAggrePkt=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_reject_aggre_pkt, p_btdm->b_reject_aggre_pkt));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new b_tdma_on=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_tdma_on, p_btdm->b_tdma_on));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new tdmaAnt=0x%x/ 0x%x \n",
+			p_btdm_8821ae->tdma_ant, p_btdm->tdma_ant));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new tdmaNav=0x%x/ 0x%x \n",
+			p_btdm_8821ae->tdma_nav, p_btdm->tdma_nav));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new tdma_dac_swing=0x%x/ 0x%x \n",
+			p_btdm_8821ae->tdma_dac_swing, p_btdm->tdma_dac_swing));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new fw_dac_swing_lvl=0x%x/ 0x%x \n",
+			p_btdm_8821ae->fw_dac_swing_lvl, p_btdm->fw_dac_swing_lvl));
+
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new bTraTdmaOn=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_tra_tdma_on, p_btdm->b_tra_tdma_on));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new traTdmaAnt=0x%x/ 0x%x \n",
+			p_btdm_8821ae->tra_tdma_ant, p_btdm->tra_tdma_ant));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new traTdmaNav=0x%x/ 0x%x \n",
+			p_btdm_8821ae->tra_tdma_nav, p_btdm->tra_tdma_nav));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new bPsTdmaOn=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_ps_tdma_on, p_btdm->b_ps_tdma_on));
+		for(i=0; i<5; i++)
+		{
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+				("[BTCoex], original/new psTdmaByte[i]=0x%x/ 0x%x \n",
+				p_btdm_8821ae->ps_tdma_byte[i], p_btdm->ps_tdma_byte[i]));
+		}
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new bIgnoreWlanAct=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_ignore_wlan_act, p_btdm->b_ignore_wlan_act));
+
+
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new bPtaOn=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_pta_on, p_btdm->b_pta_on));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new val_0x6c0=0x%x/ 0x%x \n",
+			p_btdm_8821ae->val_0x6c0, p_btdm->val_0x6c0));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new val_0x6c8=0x%x/ 0x%x \n",
+			p_btdm_8821ae->val_0x6c8, p_btdm->val_0x6c8));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new val_0x6cc=0x%x/ 0x%x \n",
+			p_btdm_8821ae->val_0x6cc, p_btdm->val_0x6cc));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new b_sw_dac_swing_on=0x%x/ 0x%x \n",
+			p_btdm_8821ae->b_sw_dac_swing_on, p_btdm->b_sw_dac_swing_on));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new sw_dac_swing_lvl=0x%x/ 0x%x \n",
+			p_btdm_8821ae->sw_dac_swing_lvl, p_btdm->sw_dac_swing_lvl));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new wlanActHi=0x%x/ 0x%x \n",
+			p_btdm_8821ae->wlan_act_hi, p_btdm->wlan_act_hi));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new wlanActLo=0x%x/ 0x%x \n",
+			p_btdm_8821ae->wlan_act_lo, p_btdm->wlan_act_lo));
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], original/new btRetryIndex=0x%x/ 0x%x \n",
+			p_btdm_8821ae->bt_retry_index, p_btdm->bt_retry_index));
+
+		memcpy(p_btdm_8821ae, p_btdm, sizeof(struct btdm_8821ae));
+	}
+	/*
+	 * Here we only consider when Bt Operation
+	 * inquiry/paging/pairing is ON
+	 * we only need to turn off TDMA */
+
+	if (rtlpcipriv->btcoexist.b_hold_for_bt_operation) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+			("[BTCoex], set to ignore wlanAct for BT OP!!\n"));
+		rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, true);
+		return;
+	}
+
+	if (p_btdm->b_all_off) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+			("[BTCoex], disable all coexist mechanism !!\n"));
+		rtl8821ae_btdm_coex_all_off(hw);
+		return;
+	}
+
+	rtl8821ae_dm_bt_reject_ap_aggregated_packet(hw, p_btdm->b_reject_aggre_pkt);
+
+	if(p_btdm->b_low_penalty_rate_adaptive)
+		rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw,
+			BT_TX_RATE_ADAPTIVE_LOW_PENALTY);
+	else
+		rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw,
+			BT_TX_RATE_ADAPTIVE_NORMAL);
+
+	if(p_btdm->b_rf_rx_lpf_shrink)
+		rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_SHRINK);
+	else
+		rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME);
+
+	if(p_btdm->b_agc_table_en)
+		rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_ON);
+	else
+		rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF);
+
+	if(p_btdm->b_adc_back_off_on)
+		rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_ON);
+	else
+		rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_OFF);
+
+	rtl8821ae_dm_bt_set_fw_bt_retry_index(hw, p_btdm->bt_retry_index);
+
+	rtl8821ae_dm_bt_set_fw_dac_swing_level(hw, p_btdm->fw_dac_swing_lvl);
+	rtl8821ae_dm_bt_set_fw_wlan_act(hw, p_btdm->wlan_act_hi, p_btdm->wlan_act_lo);
+
+	rtl8821ae_dm_bt_set_coex_table(hw, p_btdm->val_0x6c0,
+		p_btdm->val_0x6c8, p_btdm->val_0x6cc);
+	rtl8821ae_dm_bt_set_hw_pta_mode(hw, p_btdm->b_pta_on);
+
+	/*
+	 * Note: There is a constraint between TDMA and 2AntHID
+	 * Only one of 2AntHid and tdma can be turn on
+	 * We should turn off those mechanisms should be turned off first
+	 * and then turn on those mechanisms should be turned on.
+	*/
+#if 1
+	if(p_btdm->b2_ant_hid_en) {
+		// turn off tdma
+		rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on,
+							p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+		rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant,
+							p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+
+		// turn off Pstdma
+		rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+		rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); 	// Antenna control by PTA, 0x870 = 0x300.
+
+		// turn on 2AntHid
+		rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, true);
+		rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, true, true);
+	} else if(p_btdm->b_tdma_on) {
+		// turn off 2AntHid
+		rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+		rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+		// turn off pstdma
+		rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+		rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); 	// Antenna control by PTA, 0x870 = 0x300.
+
+		// turn on tdma
+		rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+		rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, true, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+	} else if(p_btdm->b_ps_tdma_on) {
+		// turn off 2AntHid
+		rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+		rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+		// turn off tdma
+		rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+		rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+
+		// turn on pstdma
+		rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+		rtl8821ae_dm_bt_set_fw_3a(hw,
+			p_btdm->ps_tdma_byte[0],
+			p_btdm->ps_tdma_byte[1],
+			p_btdm->ps_tdma_byte[2],
+			p_btdm->ps_tdma_byte[3],
+			p_btdm->ps_tdma_byte[4]);
+	} else {
+		// turn off 2AntHid
+		rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+		rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+		// turn off tdma
+		rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+		rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+
+		// turn off pstdma
+		rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+		rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); 	// Antenna control by PTA, 0x870 = 0x300.
+	}
+#else
+	if (p_btdm->b_tdma_on) {
+		if(p_btdm->b_ps_tdma_on) {
+		} else {
+			rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+		}
+		/* Turn off 2AntHID first then turn tdma ON */
+		rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+		rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+		rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+		rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, true,
+			p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+	} else {
+		/* Turn off tdma first then turn 2AntHID ON if need */
+		rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+		rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant,
+			p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+		if (p_btdm->b2_ant_hid_en) {
+			rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, true);
+			rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, true, true);
+		} else {
+			rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+			rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+		}
+		if(p_btdm->b_ps_tdma_on) {
+			rtl8821ae_dm_bt_set_fw_3a(hw, p_btdm->ps_tdma_byte[0], p_btdm->ps_tdma_byte[1],
+				p_btdm->ps_tdma_byte[2], p_btdm->ps_tdma_byte[3], p_btdm->ps_tdma_byte[4]);
+		} else {
+			rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+		}
+	}
+#endif
+
+	/*
+	 * Note:
+	 * We should add delay for making sure sw DacSwing can be set sucessfully.
+	 * because of that rtl8821ae_dm_bt_set_fw_2_ant_hid() and rtl8821ae_dm_bt_set_fw_tdma_ctrl()
+	 * will overwrite the reg 0x880.
+	*/
+	mdelay(30);
+	rtl8821ae_dm_bt_set_sw_full_time_dac_swing(hw,
+		p_btdm->b_sw_dac_swing_on, p_btdm->sw_dac_swing_lvl);
+	rtl8821ae_dm_bt_set_fw_dec_bt_pwr(hw, p_btdm->b_dec_bt_pwr);
+}
+
+void rtl8821ae_dm_bt_bt_state_update_2_ant_hid(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], HID busy!!\n"));
+	rtlpcipriv->btcoexist.b_bt_busy = true;
+	rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE;
+}
+
+void rtl8821ae_dm_bt_bt_state_update_2_ant_pan(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	bool b_idle = false;
+
+	if (hal_coex_8821ae.low_priority_tx >=
+		hal_coex_8821ae.low_priority_rx) {
+		if((hal_coex_8821ae.low_priority_tx/
+			hal_coex_8821ae.low_priority_rx) > 10) {
+			b_idle = true;
+		}
+	} else {
+		if((hal_coex_8821ae.low_priority_rx/
+			hal_coex_8821ae.low_priority_tx) > 10) {
+			b_idle = true;
+		}
+	}
+
+	if(!b_idle) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], PAN busy!!\n"));
+		rtlpcipriv->btcoexist.b_bt_busy = true;
+		rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE;
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], PAN idle!!\n"));
+	}
+}
+
+void rtl8821ae_dm_bt_2_ant_sco_action(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct btdm_8821ae btdm8821ae;
+	u8 bt_rssi_state;
+
+	rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+	btdm8821ae.b_rf_rx_lpf_shrink = true;
+	btdm8821ae.b_low_penalty_rate_adaptive = true;
+	btdm8821ae.b_reject_aggre_pkt = false;
+
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+		/*  coex table */
+		btdm8821ae.val_0x6c0 = 0x5a5aaaaa;
+		btdm8821ae.val_0x6c8 = 0xcc;
+		btdm8821ae.val_0x6cc = 0x3;
+		/* sw mechanism */
+		btdm8821ae.b_agc_table_en = false;
+		btdm8821ae.b_adc_back_off_on = true;
+		btdm8821ae.b_sw_dac_swing_on = false;
+		/* fw mechanism */
+		btdm8821ae.b_tdma_on = false;
+		btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+		bt_rssi_state
+			= rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, BT_FW_COEX_THRESH_47, 0);
+
+		/* coex table */
+		btdm8821ae.val_0x6c0 = 0x5a5aaaaa;
+		btdm8821ae.val_0x6c8 = 0xcc;
+		btdm8821ae.val_0x6cc = 0x3;
+		/* sw mechanism */
+		if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+			(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+			btdm8821ae.b_agc_table_en = true;
+			btdm8821ae.b_adc_back_off_on = true;
+			btdm8821ae.b_sw_dac_swing_on = false;
+		} else {
+			btdm8821ae.b_agc_table_en = false;
+			btdm8821ae.b_adc_back_off_on = false;
+			btdm8821ae.b_sw_dac_swing_on = false;
+		}
+		/* fw mechanism */
+		btdm8821ae.b_tdma_on = false;
+		btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+	}
+
+	if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+		btdm8821ae.b_dec_bt_pwr = true;
+	}
+
+	if(rtl8821ae_dm_bt_is_coexist_state_changed(hw))
+		rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+}
+
+void rtl8821ae_dm_bt_2_ant_hid_action(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct btdm_8821ae btdm8821ae;
+	u8 bt_rssi_state;
+
+	rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+	btdm8821ae.b_rf_rx_lpf_shrink = true;
+	btdm8821ae.b_low_penalty_rate_adaptive = true;
+	btdm8821ae.b_reject_aggre_pkt = false;
+
+	// coex table
+	btdm8821ae.val_0x6c0 = 0x55555555;
+	btdm8821ae.val_0x6c8 = 0xffff;
+	btdm8821ae.val_0x6cc = 0x3;
+	btdm8821ae.b_ignore_wlan_act = true;
+
+	if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+		// sw mechanism
+		btdm8821ae.b_agc_table_en = false;
+		btdm8821ae.b_adc_back_off_on = false;
+		btdm8821ae.b_sw_dac_swing_on = false;
+
+		// fw mechanism
+		btdm8821ae.b_ps_tdma_on = true;
+		btdm8821ae.ps_tdma_byte[0] = 0xa3;
+		btdm8821ae.ps_tdma_byte[1] = 0xf;
+		btdm8821ae.ps_tdma_byte[2] = 0xf;
+		btdm8821ae.ps_tdma_byte[3] = 0x0;
+		btdm8821ae.ps_tdma_byte[4] = 0x80;
+
+		btdm8821ae.b_tra_tdma_on = false;
+		btdm8821ae.b_tdma_on = false;
+		btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+		btdm8821ae.b2_ant_hid_en = false;
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+		bt_rssi_state =
+			rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
+
+		if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+			(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+			// sw mechanism
+			btdm8821ae.b_agc_table_en = false;
+			btdm8821ae.b_adc_back_off_on = false;
+			btdm8821ae.b_sw_dac_swing_on = true;
+			btdm8821ae.sw_dac_swing_lvl = 0x20;
+
+			// fw mechanism
+			btdm8821ae.b_ps_tdma_on = false;
+			btdm8821ae.b_tdma_on = false;
+			btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+			btdm8821ae.b2_ant_hid_en = false;
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+			// sw mechanism
+			btdm8821ae.b_agc_table_en = false;
+			btdm8821ae.b_adc_back_off_on = false;
+			btdm8821ae.b_sw_dac_swing_on = false;
+
+			// fw mechanism
+			btdm8821ae.b_ps_tdma_on = false;
+			btdm8821ae.b_tdma_on = false;
+			btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+			btdm8821ae.b2_ant_hid_en = true;
+			btdm8821ae.fw_dac_swing_lvl = 0x20;
+		}
+	}
+
+	if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+		btdm8821ae.b_dec_bt_pwr = true;
+	}
+
+	if (rtl8821ae_dm_bt_is_coexist_state_changed(hw)) {
+		rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+	}
+}
+
+
+void rtl8821ae_dm_bt_2_ant_2_dp_action_no_profile(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct btdm_8821ae btdm8821ae;
+	u8 bt_rssi_state;
+
+	rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+	btdm8821ae.b_rf_rx_lpf_shrink = true;
+	btdm8821ae.b_low_penalty_rate_adaptive = true;
+	btdm8821ae.b_reject_aggre_pkt = false;
+
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("HT40\n"));
+		if (rtl8821ae_dm_bt_is_wifi_up_link(hw)) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Uplink\n"));
+			/* coex table */
+			btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+			btdm8821ae.val_0x6c8 = 0xcccc;
+			btdm8821ae.val_0x6cc = 0x3;
+			// sw mechanism
+			btdm8821ae.b_agc_table_en = false;
+			btdm8821ae.b_adc_back_off_on = true;
+			btdm8821ae.b_sw_dac_swing_on = false;
+			// fw mechanism
+			btdm8821ae.b_tra_tdma_on = true;
+			btdm8821ae.b_tdma_on = true;
+			btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+			btdm8821ae.b2_ant_hid_en = false;
+			//btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+			//if(btSpec >= BT_SPEC_2_1_EDR)
+			{
+				btdm8821ae.wlan_act_hi = 0x10;
+				btdm8821ae.wlan_act_lo = 0x10;
+			}
+			//else
+			//{
+				//btdm8821ae.wlanActHi = 0x20;
+				//btdm8821ae.wlanActLo = 0x20;
+			//}
+			btdm8821ae.bt_retry_index = 2;
+			btdm8821ae.fw_dac_swing_lvl = 0x18;
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Downlink\n"));
+			// coex table
+			btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+			btdm8821ae.val_0x6c8 = 0xcc;
+			btdm8821ae.val_0x6cc = 0x3;
+			// sw mechanism
+			btdm8821ae.b_agc_table_en = false;
+			btdm8821ae.b_adc_back_off_on = true;
+			btdm8821ae.b_sw_dac_swing_on = false;
+			// fw mechanism
+			btdm8821ae.b_tra_tdma_on = true;
+			btdm8821ae.b_tdma_on = true;
+			btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+			btdm8821ae.b2_ant_hid_en = false;
+			//btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+			//if(btSpec >= BT_SPEC_2_1_EDR)
+			{
+				btdm8821ae.wlan_act_hi = 0x10;
+				btdm8821ae.wlan_act_lo = 0x10;
+			}
+			//else
+			//{
+			//	btdm8821ae.wlanActHi = 0x20;
+			//	btdm8821ae.wlanActLo = 0x20;
+			//}
+			btdm8821ae.bt_retry_index = 2;
+			btdm8821ae.fw_dac_swing_lvl = 0x40;
+		}
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("HT20 or Legacy\n"));
+		bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, BT_FW_COEX_THRESH_47, 0);
+
+		if(rtl8821ae_dm_bt_is_wifi_up_link(hw))
+		{
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Uplink\n"));
+			// coex table
+			btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+			btdm8821ae.val_0x6c8 = 0xcccc;
+			btdm8821ae.val_0x6cc = 0x3;
+			// sw mechanism
+			if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+				(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) )
+			{
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi high \n"));
+				btdm8821ae.b_agc_table_en = true;
+				btdm8821ae.b_adc_back_off_on = true;
+				btdm8821ae.b_sw_dac_swing_on = false;
+			} else {
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi low \n"));
+				btdm8821ae.b_agc_table_en = false;
+				btdm8821ae.b_adc_back_off_on = false;
+				btdm8821ae.b_sw_dac_swing_on = false;
+			}
+			// fw mechanism
+			btdm8821ae.b_tra_tdma_on = true;
+			btdm8821ae.b_tdma_on = true;
+			btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+			btdm8821ae.b2_ant_hid_en = false;
+			//btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+			//if(btSpec >= BT_SPEC_2_1_EDR)
+			{
+				btdm8821ae.wlan_act_hi = 0x10;
+				btdm8821ae.wlan_act_lo = 0x10;
+			}
+			//else
+			//{
+				//btdm8821ae.wlanActHi = 0x20;
+				//btdm8821ae.wlanActLo = 0x20;
+			//}
+			btdm8821ae.bt_retry_index = 2;
+			btdm8821ae.fw_dac_swing_lvl = 0x18;
+		}
+		else
+		{
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Downlink\n"));
+			// coex table
+			btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+			btdm8821ae.val_0x6c8 = 0xcc;
+			btdm8821ae.val_0x6cc = 0x3;
+			// sw mechanism
+			if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+				(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) )
+			{
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi high \n"));
+				btdm8821ae.b_agc_table_en = true;
+				btdm8821ae.b_adc_back_off_on = true;
+				btdm8821ae.b_sw_dac_swing_on = false;
+			}
+			else
+			{
+				RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi low \n"));
+				btdm8821ae.b_agc_table_en = false;
+				btdm8821ae.b_adc_back_off_on = false;
+				btdm8821ae.b_sw_dac_swing_on = false;
+			}
+			// fw mechanism
+			btdm8821ae.b_tra_tdma_on = true;
+			btdm8821ae.b_tdma_on = true;
+			btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+			btdm8821ae.b2_ant_hid_en = false;
+			//btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+			//if(btSpec >= BT_SPEC_2_1_EDR)
+			{
+				btdm8821ae.wlan_act_hi = 0x10;
+				btdm8821ae.wlan_act_lo = 0x10;
+			}
+			//else
+			//{
+				//btdm8821ae.wlanActHi = 0x20;
+				//btdm8821ae.wlanActLo = 0x20;
+			//}
+			btdm8821ae.bt_retry_index = 2;
+			btdm8821ae.fw_dac_swing_lvl = 0x40;
+		}
+	}
+
+	if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+		btdm8821ae.b_dec_bt_pwr = true;
+	}
+
+	if (rtl8821ae_dm_bt_is_coexist_state_changed(hw)) {
+		rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+	}
+}
+
+
+//============================================================
+// extern function start with BTDM_
+//============================================================
+u32 rtl8821ae_dm_bt_tx_rx_couter_h(struct ieee80211_hw *hw)
+{
+	u32	counters=0;
+
+	counters = hal_coex_8821ae.high_priority_tx + hal_coex_8821ae.high_priority_rx ;
+	return counters;
+}
+
+u32 rtl8821ae_dm_bt_tx_rx_couter_l(struct ieee80211_hw *hw)
+{
+	u32 counters=0;
+
+	counters = hal_coex_8821ae.low_priority_tx + hal_coex_8821ae.low_priority_rx ;
+	return counters;
+}
+
+u8 rtl8821ae_dm_bt_bt_tx_rx_counter_level(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u32	bt_tx_rx_cnt = 0;
+	u8	bt_tx_rx_cnt_lvl = 0;
+
+	bt_tx_rx_cnt = rtl8821ae_dm_bt_tx_rx_couter_h(hw)
+				+ rtl8821ae_dm_bt_tx_rx_couter_l(hw);
+	RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+		("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt));
+
+	rtlpcipriv->btcoexist.current_state_h &= ~\
+		 (BT_COEX_STATE_BT_CNT_LEVEL_0 | BT_COEX_STATE_BT_CNT_LEVEL_1|
+		  BT_COEX_STATE_BT_CNT_LEVEL_2);
+
+	if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_3) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], BT TxRx Counters at level 3\n"));
+		bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_3;
+		rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_3;
+	} else if(bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_2) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], BT TxRx Counters at level 2\n"));
+		bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_2;
+		rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_2;
+	} else if(bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_1) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], BT TxRx Counters at level 1\n"));
+		bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_1;
+		rtlpcipriv->btcoexist.current_state_h  |= BT_COEX_STATE_BT_CNT_LEVEL_1;
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], BT TxRx Counters at level 0\n"));
+		bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_0;
+		rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_0;
+	}
+	return bt_tx_rx_cnt_lvl;
+}
+
+
+void rtl8821ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct btdm_8821ae btdm8821ae;
+
+	u8 bt_rssi_state, bt_rssi_state1;
+	u8	bt_tx_rx_cnt_lvl = 0;
+
+	rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+
+	btdm8821ae.b_rf_rx_lpf_shrink = true;
+	btdm8821ae.b_low_penalty_rate_adaptive = true;
+	btdm8821ae.b_reject_aggre_pkt = false;
+
+	bt_tx_rx_cnt_lvl = rtl8821ae_dm_bt_bt_tx_rx_counter_level(hw);
+	RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl));
+
+	if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+	{
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+		// coex table
+		btdm8821ae.val_0x6c0 = 0x55555555;
+		btdm8821ae.val_0x6c8 = 0xffff;
+		btdm8821ae.val_0x6cc = 0x3;
+
+		// sw mechanism
+		btdm8821ae.b_agc_table_en = false;
+		btdm8821ae.b_adc_back_off_on = false;
+		btdm8821ae.b_sw_dac_swing_on = false;
+
+		// fw mechanism
+		btdm8821ae.b_ps_tdma_on = true;
+		if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+			btdm8821ae.ps_tdma_byte[0] = 0xa3;
+			btdm8821ae.ps_tdma_byte[1] = 0x5;
+			btdm8821ae.ps_tdma_byte[2] = 0x5;
+			btdm8821ae.ps_tdma_byte[3] = 0x2;
+			btdm8821ae.ps_tdma_byte[4] = 0x80;
+		} else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+			btdm8821ae.ps_tdma_byte[0] = 0xa3;
+			btdm8821ae.ps_tdma_byte[1] = 0xa;
+			btdm8821ae.ps_tdma_byte[2] = 0xa;
+			btdm8821ae.ps_tdma_byte[3] = 0x2;
+			btdm8821ae.ps_tdma_byte[4] = 0x80;
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+			btdm8821ae.ps_tdma_byte[0] = 0xa3;
+			btdm8821ae.ps_tdma_byte[1] = 0xf;
+			btdm8821ae.ps_tdma_byte[2] = 0xf;
+			btdm8821ae.ps_tdma_byte[3] = 0x2;
+			btdm8821ae.ps_tdma_byte[4] = 0x80;
+		}
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+		bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
+		bt_rssi_state1 = rtl8821ae_dm_bt_check_coex_rssi_state1(hw, 2, 27, 0);
+
+		// coex table
+		btdm8821ae.val_0x6c0 = 0x55555555;
+		btdm8821ae.val_0x6c8 = 0xffff;
+		btdm8821ae.val_0x6cc = 0x3;
+
+		// sw mechanism
+		if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+			(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+			btdm8821ae.b_agc_table_en = true;
+			btdm8821ae.b_adc_back_off_on = true;
+			btdm8821ae.b_sw_dac_swing_on = false;
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+			btdm8821ae.b_agc_table_en = false;
+			btdm8821ae.b_adc_back_off_on = false;
+			btdm8821ae.b_sw_dac_swing_on = false;
+		}
+
+		// fw mechanism
+		btdm8821ae.b_ps_tdma_on = true;
+		if( (bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
+			(bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH) ) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("Wifi rssi-1 high \n"));
+			// only rssi high we need to do this,
+			// when rssi low, the value will modified by fw
+			rtl_write_byte(rtlpriv, 0x883, 0x40);
+			if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0x5;
+				btdm8821ae.ps_tdma_byte[2] = 0x5;
+				btdm8821ae.ps_tdma_byte[3] = 0x83;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xa;
+				btdm8821ae.ps_tdma_byte[2] = 0xa;
+				btdm8821ae.ps_tdma_byte[3] = 0x83;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xf;
+				btdm8821ae.ps_tdma_byte[2] = 0xf;
+				btdm8821ae.ps_tdma_byte[3] = 0x83;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			}
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 low \n"));
+			if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2)
+			{
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0x5;
+				btdm8821ae.ps_tdma_byte[2] = 0x5;
+				btdm8821ae.ps_tdma_byte[3] = 0x2;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xa;
+				btdm8821ae.ps_tdma_byte[2] = 0xa;
+				btdm8821ae.ps_tdma_byte[3] = 0x2;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xf;
+				btdm8821ae.ps_tdma_byte[2] = 0xf;
+				btdm8821ae.ps_tdma_byte[3] = 0x2;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			}
+		}
+	}
+
+	if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+		btdm8821ae.b_dec_bt_pwr = true;
+	}
+
+	// Always ignore WlanAct if bHid|bSCOBusy|bSCOeSCO
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+		("[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+		hal_coex_8821ae.bt_inq_page_start_time, bt_tx_rx_cnt_lvl));
+	if( (hal_coex_8821ae.bt_inq_page_start_time) ||
+		(BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl) ) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], Set BT inquiry / page scan 0x3a setting\n"));
+		btdm8821ae.b_ps_tdma_on = true;
+		btdm8821ae.ps_tdma_byte[0] = 0xa3;
+		btdm8821ae.ps_tdma_byte[1] = 0x5;
+		btdm8821ae.ps_tdma_byte[2] = 0x5;
+		btdm8821ae.ps_tdma_byte[3] = 0x2;
+		btdm8821ae.ps_tdma_byte[4] = 0x80;
+	}
+
+	if(rtl8821ae_dm_bt_is_coexist_state_changed(hw)) {
+		rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+	}
+}
+
+void rtl8821ae_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct btdm_8821ae btdm8821ae;
+
+	u8 bt_rssi_state, bt_rssi_state1;
+	u32 bt_tx_rx_cnt_lvl = 0;
+
+	rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+	btdm8821ae.b_rf_rx_lpf_shrink = true;
+	btdm8821ae.b_low_penalty_rate_adaptive = true;
+	btdm8821ae.b_reject_aggre_pkt = false;
+
+	bt_tx_rx_cnt_lvl = rtl8821ae_dm_bt_bt_tx_rx_counter_level(hw);
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl));
+
+	if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+	{
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+		bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 37, 0);
+
+		// coex table
+		btdm8821ae.val_0x6c0 = 0x55555555;
+		btdm8821ae.val_0x6c8 = 0xffff;
+		btdm8821ae.val_0x6cc = 0x3;
+
+		// sw mechanism
+		btdm8821ae.b_agc_table_en = false;
+		btdm8821ae.b_adc_back_off_on = true;
+		btdm8821ae.b_sw_dac_swing_on = false;
+
+		// fw mechanism
+		btdm8821ae.b_ps_tdma_on = true;
+		if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+			(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+			if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0x5;
+				btdm8821ae.ps_tdma_byte[2] = 0x5;
+				btdm8821ae.ps_tdma_byte[3] = 0x81;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xa;
+				btdm8821ae.ps_tdma_byte[2] = 0xa;
+				btdm8821ae.ps_tdma_byte[3] = 0x81;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xf;
+				btdm8821ae.ps_tdma_byte[2] = 0xf;
+				btdm8821ae.ps_tdma_byte[3] = 0x81;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			}
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+			if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0x5;
+				btdm8821ae.ps_tdma_byte[2] = 0x5;
+				btdm8821ae.ps_tdma_byte[3] = 0x0;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xa;
+				btdm8821ae.ps_tdma_byte[2] = 0xa;
+				btdm8821ae.ps_tdma_byte[3] = 0x0;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xf;
+				btdm8821ae.ps_tdma_byte[2] = 0xf;
+				btdm8821ae.ps_tdma_byte[3] = 0x0;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			}
+		}
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+		bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
+		bt_rssi_state1 = rtl8821ae_dm_bt_check_coex_rssi_state1(hw, 2, 27, 0);
+
+		// coex table
+		btdm8821ae.val_0x6c0 = 0x55555555;
+		btdm8821ae.val_0x6c8 = 0xffff;
+		btdm8821ae.val_0x6cc = 0x3;
+
+		// sw mechanism
+		if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+			(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+			btdm8821ae.b_agc_table_en = true;
+			btdm8821ae.b_adc_back_off_on = true;
+			btdm8821ae.b_sw_dac_swing_on = false;
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+			btdm8821ae.b_agc_table_en = false;
+			btdm8821ae.b_adc_back_off_on = false;
+			btdm8821ae.b_sw_dac_swing_on = false;
+		}
+
+		// fw mechanism
+		btdm8821ae.b_ps_tdma_on = true;
+		if( (bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
+			(bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH) ) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 high \n"));
+			// only rssi high we need to do this,
+			// when rssi low, the value will modified by fw
+			rtl_write_byte(rtlpriv, 0x883, 0x40);
+			if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0x5;
+				btdm8821ae.ps_tdma_byte[2] = 0x5;
+				btdm8821ae.ps_tdma_byte[3] = 0x81;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xa;
+				btdm8821ae.ps_tdma_byte[2] = 0xa;
+				btdm8821ae.ps_tdma_byte[3] = 0x81;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xf;
+				btdm8821ae.ps_tdma_byte[2] = 0xf;
+				btdm8821ae.ps_tdma_byte[3] = 0x81;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			}
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 low \n"));
+			if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0x5;
+				btdm8821ae.ps_tdma_byte[2] = 0x5;
+				btdm8821ae.ps_tdma_byte[3] = 0x0;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xa;
+				btdm8821ae.ps_tdma_byte[2] = 0xa;
+				btdm8821ae.ps_tdma_byte[3] = 0x0;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			} else {
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+				btdm8821ae.ps_tdma_byte[0] = 0xa3;
+				btdm8821ae.ps_tdma_byte[1] = 0xf;
+				btdm8821ae.ps_tdma_byte[2] = 0xf;
+				btdm8821ae.ps_tdma_byte[3] = 0x0;
+				btdm8821ae.ps_tdma_byte[4] = 0x80;
+			}
+		}
+	}
+
+	if(rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+		btdm8821ae.b_dec_bt_pwr = true;
+	}
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+	       ("[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+	        hal_coex_8821ae.bt_inq_page_start_time, bt_tx_rx_cnt_lvl));
+
+	if( (hal_coex_8821ae.bt_inq_page_start_time) ||
+		(BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl) )
+	{
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+				("[BTCoex], Set BT inquiry / page scan 0x3a setting\n"));
+		btdm8821ae.b_ps_tdma_on = true;
+		btdm8821ae.ps_tdma_byte[0] = 0xa3;
+		btdm8821ae.ps_tdma_byte[1] = 0x5;
+		btdm8821ae.ps_tdma_byte[2] = 0x5;
+		btdm8821ae.ps_tdma_byte[3] = 0x83;
+		btdm8821ae.ps_tdma_byte[4] = 0x80;
+	}
+
+	if(rtl8821ae_dm_bt_is_coexist_state_changed(hw)){
+		rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+	}
+}
+
+void rtl8821ae_dm_bt_inq_page_monitor(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 cur_time;
+	cur_time = jiffies;
+	if (hal_coex_8821ae.b_c2h_bt_inquiry_page) {
+		//pHalData->btcoexist.halCoex8821ae.btInquiryPageCnt++;
+		// bt inquiry or page is started.
+		if(hal_coex_8821ae.bt_inq_page_start_time == 0){
+			rtlpcipriv->btcoexist.current_state  |= BT_COEX_STATE_BT_INQ_PAGE;
+			hal_coex_8821ae.bt_inq_page_start_time = cur_time;
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+				("[BTCoex], BT Inquiry/page is started at time : 0x%x \n",
+				hal_coex_8821ae.bt_inq_page_start_time));
+		}
+	}
+	RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+		("[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x \n",
+		hal_coex_8821ae.bt_inq_page_start_time, cur_time));
+
+	if (hal_coex_8821ae.bt_inq_page_start_time) {
+		if ((((long)cur_time - (long)hal_coex_8821ae.bt_inq_page_start_time) / HZ) >= 10) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,  ("[BTCoex], BT Inquiry/page >= 10sec!!!"));
+			hal_coex_8821ae.bt_inq_page_start_time = 0;
+			rtlpcipriv->btcoexist.current_state &=~ BT_COEX_STATE_BT_INQ_PAGE;
+		}
+	}
+
+#if 0
+	if (hal_coex_8821ae.b_c2h_bt_inquiry_page) {
+		hal_coex_8821ae.b_c2h_bt_inquiry_page++;
+		// bt inquiry or page is started.
+	} if(hal_coex_8821ae.b_c2h_bt_inquiry_page) {
+		rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT_INQ_PAGE;
+		if(hal_coex_8821ae.bt_inquiry_page_cnt >= 4)
+			hal_coex_8821ae.bt_inquiry_page_cnt = 0;
+		hal_coex_8821ae.bt_inquiry_page_cnt++;
+	} else {
+		rtlpcipriv->btcoexist.current_state &=~ BT_COEX_STATE_BT_INQ_PAGE;
+	}
+#endif
+}
+
+void rtl8821ae_dm_bt_reset_action_profile_state(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	rtlpcipriv->btcoexist.current_state &= ~\
+		(BT_COEX_STATE_PROFILE_HID | BT_COEX_STATE_PROFILE_A2DP|
+		BT_COEX_STATE_PROFILE_PAN | BT_COEX_STATE_PROFILE_SCO);
+
+	rtlpcipriv->btcoexist.current_state &= ~\
+		(BT_COEX_STATE_BTINFO_COMMON | BT_COEX_STATE_BTINFO_B_HID_SCOESCO|
+		BT_COEX_STATE_BTINFO_B_FTP_A2DP);
+}
+
+void _rtl8821ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u8 bt_retry_cnt;
+	u8 bt_info_original;
+	RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex] Get bt info by fw!!\n"));
+
+	_rtl8821ae_dm_bt_check_wifi_state(hw);
+
+	if (hal_coex_8821ae.b_c2h_bt_info_req_sent) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+				("[BTCoex] c2h for bt_info not rcvd yet!!\n"));
+	}
+
+	bt_retry_cnt = hal_coex_8821ae.bt_retry_cnt;
+	bt_info_original = hal_coex_8821ae.c2h_bt_info_original;
+
+	// when bt inquiry or page scan, we have to set h2c 0x25
+	// ignore wlanact for continuous 4x2secs
+	rtl8821ae_dm_bt_inq_page_monitor(hw);
+	rtl8821ae_dm_bt_reset_action_profile_state(hw);
+
+	if(rtl8821ae_dm_bt_is_2_ant_common_action(hw)) {
+		rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_COMMON;
+		rtlpcipriv->btcoexist.bt_profile_action= BT_COEX_MECH_COMMON;
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Action 2-Ant common.\n"));
+	} else {
+		if( (bt_info_original & BTINFO_B_HID) ||
+			(bt_info_original & BTINFO_B_SCO_BUSY) ||
+			(bt_info_original & BTINFO_B_SCO_ESCO) ) {
+				rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_HID_SCOESCO;
+				rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_HID_SCO_ESCO;
+				rtlpcipriv->btcoexist.bt_profile_action = BT_COEX_MECH_HID_SCO_ESCO;
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n"));
+				rtl8821ae_dm_bt_2_ant_hid_sco_esco(hw);
+		} else if( (bt_info_original & BTINFO_B_FTP) ||
+				(bt_info_original & BTINFO_B_A2DP) ) {
+				rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_FTP_A2DP;
+				rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_FTP_A2DP;
+				rtlpcipriv->btcoexist.bt_profile_action = BT_COEX_MECH_FTP_A2DP;
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("BTInfo: bFTP|bA2DP\n"));
+				rtl8821ae_dm_bt_2_ant_ftp_a2dp(hw);
+		} else {
+				rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_HID_SCOESCO;
+				rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_NONE;
+				rtlpcipriv->btcoexist.bt_profile_action= BT_COEX_MECH_NONE;
+				RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BTInfo: undefined case!!!!\n"));
+				rtl8821ae_dm_bt_2_ant_hid_sco_esco(hw);
+		}
+	}
+}
+
+void _rtl8821ae_dm_bt_coexist_1_ant(struct ieee80211_hw *hw)
+{
+	return;
+}
+
+void rtl8821ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+	rtl8821ae_dm_bt_set_coex_table(hw, 0x5a5aaaaa, 0xcc, 0x3);
+	rtl8821ae_dm_bt_set_hw_pta_mode(hw, true);
+}
+
+void rtl8821ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+	rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, false);
+	rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+	rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+	rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, false, TDMA_2ANT, TDMA_NAV_OFF);
+	rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, TDMA_2ANT,
+				TDMA_NAV_OFF, TDMA_DAC_SWING_OFF);
+	rtl8821ae_dm_bt_set_fw_dac_swing_level(hw, 0);
+	rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+	rtl8821ae_dm_bt_set_fw_bt_retry_index(hw, 2);
+	rtl8821ae_dm_bt_set_fw_wlan_act(hw, 0x10, 0x10);
+	rtl8821ae_dm_bt_set_fw_dec_bt_pwr(hw, false);
+}
+
+void rtl8821ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+	rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF);
+	rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_OFF);
+	rtl8821ae_dm_bt_reject_ap_aggregated_packet(hw, false);
+
+	rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw,
+							BT_TX_RATE_ADAPTIVE_NORMAL);
+	rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME);
+	rtl8821ae_dm_bt_set_sw_full_time_dac_swing(hw, false, 0xc0);
+}
+
+void rtl8821ae_dm_bt_query_bt_information(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 h2c_parameter[1] = {0};
+
+	hal_coex_8821ae.b_c2h_bt_info_req_sent = true;
+
+	h2c_parameter[0] |=  BIT(0);
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+		("Query Bt information, write 0x38=0x%x\n", h2c_parameter[0]));
+
+	rtl8821ae_fill_h2c_cmd(hw, 0x38, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_bt_hw_counters_monitor(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u32 reg_hp_tx_rx, reg_lp_tx_rx, u32_tmp;
+	u32 reg_hp_tx=0, reg_hp_rx=0, reg_lp_tx=0, reg_lp_rx=0;
+
+	reg_hp_tx_rx = REG_HIGH_PRIORITY_TXRX;
+	reg_lp_tx_rx = REG_LOW_PRIORITY_TXRX;
+
+	u32_tmp = rtl_read_dword(rtlpriv, reg_hp_tx_rx);
+	reg_hp_tx = u32_tmp & MASKLWORD;
+	reg_hp_rx = (u32_tmp & MASKHWORD)>>16;
+
+	u32_tmp = rtl_read_dword(rtlpriv, reg_lp_tx_rx);
+	reg_lp_tx = u32_tmp & MASKLWORD;
+	reg_lp_rx = (u32_tmp & MASKHWORD)>>16;
+
+	if(rtlpcipriv->btcoexist.lps_counter > 1) {
+		reg_hp_tx %= rtlpcipriv->btcoexist.lps_counter;
+		reg_hp_rx %= rtlpcipriv->btcoexist.lps_counter;
+		reg_lp_tx %= rtlpcipriv->btcoexist.lps_counter;
+		reg_lp_rx %= rtlpcipriv->btcoexist.lps_counter;
+	}
+
+	hal_coex_8821ae.high_priority_tx = reg_hp_tx;
+	hal_coex_8821ae.high_priority_rx = reg_hp_rx;
+	hal_coex_8821ae.low_priority_tx = reg_lp_tx;
+	hal_coex_8821ae.low_priority_rx = reg_lp_rx;
+
+	RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+		("High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+		reg_hp_tx_rx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx));
+	RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+		("Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+		reg_lp_tx_rx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx));
+	rtlpcipriv->btcoexist.lps_counter = 0;
+	//rtl_write_byte(rtlpriv, 0x76e, 0xc);
+}
+
+void rtl8821ae_dm_bt_bt_enable_disable_check(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	bool bt_alife = true;
+
+	if (hal_coex_8821ae.high_priority_tx == 0 &&
+		hal_coex_8821ae.high_priority_rx == 0 &&
+		hal_coex_8821ae.low_priority_tx == 0 &&
+		hal_coex_8821ae.low_priority_rx == 0) {
+		bt_alife = false;
+	}
+	if (hal_coex_8821ae.high_priority_tx == 0xeaea &&
+		hal_coex_8821ae.high_priority_rx == 0xeaea &&
+		hal_coex_8821ae.low_priority_tx == 0xeaea &&
+		hal_coex_8821ae.low_priority_rx == 0xeaea) {
+		bt_alife = false;
+	}
+	if (hal_coex_8821ae.high_priority_tx == 0xffff &&
+		hal_coex_8821ae.high_priority_rx == 0xffff &&
+		hal_coex_8821ae.low_priority_tx == 0xffff &&
+		hal_coex_8821ae.low_priority_rx == 0xffff) {
+		bt_alife = false;
+	}
+	if (bt_alife) {
+		rtlpcipriv->btcoexist.bt_active_zero_cnt = 0;
+		rtlpcipriv->btcoexist.b_cur_bt_disabled = false;
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is enabled !!\n"));
+	} else {
+		rtlpcipriv->btcoexist.bt_active_zero_cnt++;
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+			("8821AE bt all counters=0, %d times!!\n",
+			rtlpcipriv->btcoexist.bt_active_zero_cnt));
+		if (rtlpcipriv->btcoexist.bt_active_zero_cnt >= 2) {
+			rtlpcipriv->btcoexist.b_cur_bt_disabled = true;
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is disabled !!\n"));
+		}
+	}
+	if (rtlpcipriv->btcoexist.b_pre_bt_disabled !=
+		rtlpcipriv->btcoexist.b_cur_bt_disabled) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is from %s to %s!!\n",
+			(rtlpcipriv->btcoexist.b_pre_bt_disabled ? "disabled":"enabled"),
+			(rtlpcipriv->btcoexist.b_cur_bt_disabled ? "disabled":"enabled")));
+		rtlpcipriv->btcoexist.b_pre_bt_disabled
+			= rtlpcipriv->btcoexist.b_cur_bt_disabled;
+	}
+}
+
+
+void rtl8821ae_dm_bt_coexist(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	rtl8821ae_dm_bt_query_bt_information(hw);
+	rtl8821ae_dm_bt_bt_hw_counters_monitor(hw);
+	rtl8821ae_dm_bt_bt_enable_disable_check(hw);
+
+	if (rtlpcipriv->btcoexist.bt_ant_num == ANT_X2) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], 2 Ant mechanism\n"));
+		_rtl8821ae_dm_bt_coexist_2_ant(hw);
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], 1 Ant mechanism\n"));
+		_rtl8821ae_dm_bt_coexist_1_ant(hw);
+	}
+
+	if (!rtl8821ae_dm_bt_is_same_coexist_state(hw)) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+			("[BTCoex], Coexist State[bitMap] change from 0x%x%8x to 0x%x%8x\n",
+			rtlpcipriv->btcoexist.previous_state_h,
+			rtlpcipriv->btcoexist.previous_state,
+			rtlpcipriv->btcoexist.current_state_h,
+			rtlpcipriv->btcoexist.current_state));
+		rtlpcipriv->btcoexist.previous_state
+			= rtlpcipriv->btcoexist.current_state;
+		rtlpcipriv->btcoexist.previous_state_h
+			= rtlpcipriv->btcoexist.current_state_h;
+	}
+}
+
+void rtl8821ae_dm_bt_parse_bt_info(struct ieee80211_hw *hw, u8 * tmp_buf, u8 len)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	u8 bt_info;
+	u8 i;
+
+	hal_coex_8821ae.b_c2h_bt_info_req_sent = false;
+	hal_coex_8821ae.bt_retry_cnt = 0;
+	for (i = 0; i < len; i++) {
+		if (i == 0) {
+			hal_coex_8821ae.c2h_bt_info_original = tmp_buf[i];
+		} else if (i == 1) {
+			hal_coex_8821ae.bt_retry_cnt = tmp_buf[i];
+		}
+		if(i == len-1) {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("0x%2x]", tmp_buf[i]));
+		} else {
+			RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("0x%2x, ", tmp_buf[i]));
+		}
+	}
+	RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+		("BT info bt_info (Data)= 0x%x\n",hal_coex_8821ae.c2h_bt_info_original));
+	bt_info = hal_coex_8821ae.c2h_bt_info_original;
+
+	if(bt_info & BIT(2)){
+		hal_coex_8821ae.b_c2h_bt_inquiry_page = true;
+	} else {
+		hal_coex_8821ae.b_c2h_bt_inquiry_page = false;
+	}
+
+	if (bt_info & BTINFO_B_CONNECTION) {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTC2H], BTInfo: bConnect=true\n"));
+		rtlpcipriv->btcoexist.b_bt_busy = true;
+		rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE;
+	} else {
+		RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTC2H], BTInfo: bConnect=false\n"));
+		rtlpcipriv->btcoexist.b_bt_busy = false;
+		rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT_IDLE;
+	}
+}
+void rtl_8821ae_c2h_command_handle(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct c2h_evt_hdr c2h_event;
+	u8 * ptmp_buf = NULL;
+	u8 index = 0;
+	u8 u1b_tmp = 0;
+	memset(&c2h_event, 0, sizeof(c2h_event));
+	u1b_tmp = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL);
+	RT_TRACE(COMP_FW, DBG_DMESG,
+		("&&&&&&: REG_C2HEVT_MSG_NORMAL is 0x%x\n", u1b_tmp));
+	c2h_event.cmd_id = u1b_tmp & 0xF;
+	c2h_event.cmd_len = (u1b_tmp & 0xF0) >> 4;
+	c2h_event.cmd_seq = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 1);
+	RT_TRACE(COMP_FW, DBG_DMESG, ("cmd_id: %d, cmd_len: %d, cmd_seq: %d\n",
+		c2h_event.cmd_id , c2h_event.cmd_len, c2h_event.cmd_seq));
+	u1b_tmp = rtl_read_byte(rtlpriv, 0x01AF);
+	if (u1b_tmp == C2H_EVT_HOST_CLOSE) {
+		return;
+	} else if (u1b_tmp != C2H_EVT_FW_CLOSE) {
+		rtl_write_byte(rtlpriv, 0x1AF, 0x00);
+		return;
+	}
+	ptmp_buf = (u8 *) kmalloc(c2h_event.cmd_len, GFP_KERNEL);
+	if(ptmp_buf == NULL) {
+		RT_TRACE(COMP_FW, DBG_TRACE, ("malloc cmd buf failed\n"));
+		return;
+	}
+
+	/* Read the content */
+	for (index = 0; index < c2h_event.cmd_len; index ++) {
+		ptmp_buf[index] = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 2+ index);
+	}
+
+	switch(c2h_event.cmd_id) {
+		case C2H_BT_RSSI:
+			break;
+
+	case C2H_BT_OP_MODE:
+			break;
+
+	case BT_INFO:
+		RT_TRACE(COMP_FW, DBG_TRACE,
+			("BT info Byte[0] (ID) is 0x%x\n", c2h_event.cmd_id));
+		RT_TRACE(COMP_FW, DBG_TRACE,
+			("BT info Byte[1] (Seq) is 0x%x\n", c2h_event.cmd_seq));
+		RT_TRACE(COMP_FW, DBG_TRACE,
+			("BT info Byte[2] (Data)= 0x%x\n", ptmp_buf[0]));
+
+		if (rtlpriv->cfg->ops->get_btc_status()){
+			rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, ptmp_buf, c2h_event.cmd_len);
+		}
+		break;
+	default:
+		break;
+	}
+
+	if(ptmp_buf)
+		kfree(ptmp_buf);
+
+	rtl_write_byte(rtlpriv, 0x01AF, C2H_EVT_HOST_CLOSE);
+}
+
+
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h
new file mode 100644
index 0000000..a94474f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h
@@ -0,0 +1,160 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_HAL_BTC_H__
+#define __RTL8821AE_HAL_BTC_H__
+
+#include "../wifi.h"
+#include "btc.h"
+#include "hal_bt_coexist.h"
+
+#define	BT_TXRX_CNT_THRES_1				1200
+#define	BT_TXRX_CNT_THRES_2				1400
+#define	BT_TXRX_CNT_THRES_3				3000
+#define	BT_TXRX_CNT_LEVEL_0				0	// < 1200
+#define	BT_TXRX_CNT_LEVEL_1				1	// >= 1200 && < 1400
+#define	BT_TXRX_CNT_LEVEL_2				2	// >= 1400
+#define	BT_TXRX_CNT_LEVEL_3				3
+
+
+
+#define	BT_COEX_DISABLE		0
+#define	BT_Q_PKT_OFF		0
+#define	BT_Q_PKT_ON			1
+
+#define	BT_TX_PWR_OFF		0
+#define	BT_TX_PWR_ON		1
+
+/* TDMA mode definition */
+#define	TDMA_2ANT			0
+#define	TDMA_1ANT			1
+#define	TDMA_NAV_OFF		0
+#define	TDMA_NAV_ON			1
+#define	TDMA_DAC_SWING_OFF	0
+#define	TDMA_DAC_SWING_ON	1
+
+/* PTA mode related definition */
+#define	BT_PTA_MODE_OFF		0
+#define	BT_PTA_MODE_ON		1
+
+/* Penalty Tx Rate Adaptive */
+#define	BT_TX_RATE_ADAPTIVE_NORMAL	0
+#define	BT_TX_RATE_ADAPTIVE_LOW_PENALTY	1
+
+/* RF Corner */
+#define	BT_RF_RX_LPF_CORNER_RESUME	0
+#define	BT_RF_RX_LPF_CORNER_SHRINK	1
+
+#define C2H_EVT_HOST_CLOSE		0x00
+#define C2H_EVT_FW_CLOSE		0xFF
+
+enum bt_traffic_mode {
+	BT_MOTOR_EXT_BE = 0x00,
+	BT_MOTOR_EXT_GUL = 0x01,
+	BT_MOTOR_EXT_GUB = 0x02,
+	BT_MOTOR_EXT_GULB = 0x03
+};
+
+enum bt_traffic_mode_profile {
+	BT_PROFILE_NONE,
+	BT_PROFILE_A2DP,
+	BT_PROFILE_PAN,
+	BT_PROFILE_HID,
+	BT_PROFILE_SCO
+};
+
+enum hci_ext_bt_operation {
+	HCI_BT_OP_NONE = 0x0,
+	HCI_BT_OP_INQUIRE_START	= 0x1,
+	HCI_BT_OP_INQUIRE_FINISH = 0x2,
+	HCI_BT_OP_PAGING_START = 0x3,
+	HCI_BT_OP_PAGING_SUCCESS = 0x4,
+	HCI_BT_OP_PAGING_UNSUCCESS = 0x5,
+	HCI_BT_OP_PAIRING_START = 0x6,
+	HCI_BT_OP_PAIRING_FINISH = 0x7,
+	HCI_BT_OP_BT_DEV_ENABLE = 0x8,
+	HCI_BT_OP_BT_DEV_DISABLE = 0x9,
+	HCI_BT_OP_MAX,
+};
+
+enum bt_spec {
+	BT_SPEC_1_0_b = 0x00,
+	BT_SPEC_1_1 = 0x01,
+	BT_SPEC_1_2 = 0x02,
+	BT_SPEC_2_0_EDR = 0x03,
+	BT_SPEC_2_1_EDR = 0x04,
+	BT_SPEC_3_0_HS = 0x05,
+	BT_SPEC_4_0 = 0x06
+};
+
+struct c2h_evt_hdr {
+        u8 cmd_id;
+        u8 cmd_len;
+        u8 cmd_seq;
+};
+
+enum bt_state{
+	BT_INFO_STATE_DISABLED = 0,
+	BT_INFO_STATE_NO_CONNECTION = 1,
+	BT_INFO_STATE_CONNECT_IDLE = 2,
+	BT_INFO_STATE_INQ_OR_PAG = 3,
+	BT_INFO_STATE_ACL_ONLY_BUSY = 4,
+	BT_INFO_STATE_SCO_ONLY_BUSY = 5,
+	BT_INFO_STATE_ACL_SCO_BUSY = 6,
+	BT_INFO_STATE_HID_BUSY = 7,
+	BT_INFO_STATE_HID_SCO_BUSY = 8,
+	BT_INFO_STATE_MAX = 7
+};
+
+enum rtl8723be_c2h_evt {
+	C2H_DBG = 0,
+	C2H_TSF = 1,
+	C2H_AP_RPT_RSP = 2,
+	C2H_CCX_TX_RPT = 3,	// The FW notify the report of the specific tx packet.
+	C2H_BT_RSSI = 4,
+	C2H_BT_OP_MODE = 5,
+	C2H_HW_INFO_EXCH = 10,
+	C2H_C2H_H2C_TEST = 11,
+	BT_INFO = 9,
+	MAX_C2HEVENT
+};
+
+
+
+void rtl8821ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_coexist(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8821ae *p_btdm);
+void rtl_8821ae_c2h_command_handle(struct ieee80211_hw * hw);
+void rtl_8821ae_bt_wifi_media_status_notify(struct ieee80211_hw * hw, bool mstatus);
+void rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps(struct ieee80211_hw *hw);
+
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hw.c b/drivers/staging/rtl8821ae/rtl8821ae/hw.c
new file mode 100644
index 0000000..5ed7a11
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hw.c
@@ -0,0 +1,3346 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+#include "led.h"
+#include "hw.h"
+#include "pwrseqcmd.h"
+#include "pwrseq.h"
+#include "btc.h"
+#include "../btcoexist/rtl_btc.h"
+
+#define LLT_CONFIG	5
+
+static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+	while (skb_queue_len(&ring->queue)) {
+		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+		struct sk_buff *skb = __skb_dequeue(&ring->queue);
+
+		pci_unmap_single(rtlpci->pdev,
+				 le32_to_cpu(rtlpriv->cfg->ops->get_desc(
+				 (u8 *) entry, true, HW_DESC_TXBUFF_ADDR)),
+				 skb->len, PCI_DMA_TODEVICE);
+		kfree_skb(skb);
+		ring->idx = (ring->idx + 1) % ring->entries;
+	}
+
+}
+
+static void _rtl8821ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+				      u8 set_bits, u8 clear_bits)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtlpci->reg_bcn_ctrl_val |= set_bits;
+	rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
+
+	rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+}
+
+void _rtl8821ae_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmp1byte;
+
+	tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+	rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
+	rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+	tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+	tmp1byte &= ~(BIT(0));
+	rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+void _rtl8821ae_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmp1byte;
+
+	tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+	rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
+	rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+	tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+	tmp1byte |= BIT(0);
+	rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl8821ae_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+	_rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(1));
+}
+
+static void _rtl8821ae_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+	_rtl8821ae_set_bcn_ctrl_reg(hw, BIT(1), 0);
+}
+
+static void _rtl8821ae_set_fw_clock_on(struct ieee80211_hw *hw,
+	u8 rpwm_val, bool b_need_turn_off_ckk)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	bool b_support_remote_wake_up;
+	u32 count = 0,isr_regaddr,content;
+	bool b_schedule_timer = b_need_turn_off_ckk;
+	rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
+					(u8 *) (&b_support_remote_wake_up));
+
+	if (!rtlhal->bfw_ready)
+		return;
+	if (!rtlpriv->psc.b_fw_current_inpsmode)
+		return;
+
+	while (1) {
+		spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+		if (rtlhal->bfw_clk_change_in_progress) {
+			while (rtlhal->bfw_clk_change_in_progress) {
+				spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+				count++;
+				udelay(100);
+				if (count > 1000)
+					return;
+				spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+			}
+			spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+		} else {
+			rtlhal->bfw_clk_change_in_progress = false;
+			spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+		}
+	}
+
+	if (IS_IN_LOW_POWER_STATE_8821AE(rtlhal->fw_ps_state)) {
+		rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
+					(u8 *) (&rpwm_val));
+		if (FW_PS_IS_ACK(rpwm_val)) {
+			isr_regaddr = REG_HISR;
+			content = rtl_read_dword(rtlpriv, isr_regaddr);
+			while (!(content & IMR_CPWM) && (count < 500)) {
+				udelay(50);
+				count++;
+				content = rtl_read_dword(rtlpriv, isr_regaddr);
+			}
+
+			if (content & IMR_CPWM) {
+			rtl_write_word(rtlpriv,isr_regaddr, 0x0100);
+			rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_8821AE;
+			RT_TRACE(COMP_POWER, DBG_LOUD, ("Receive CPWM INT!!! Set pHalData->FwPSState = %X\n", rtlhal->fw_ps_state));
+			}
+		}
+
+		spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+		rtlhal->bfw_clk_change_in_progress = false;
+		spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+		if (b_schedule_timer) {
+			mod_timer(&rtlpriv->works.fw_clockoff_timer,
+				  jiffies + MSECS(10));
+		}
+
+	} else  {
+		spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+		rtlhal->bfw_clk_change_in_progress = false;
+		spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+	}
+
+
+}
+
+static void _rtl8821ae_set_fw_clock_off(struct ieee80211_hw *hw,
+	u8 rpwm_val)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl8192_tx_ring *ring;
+	enum rf_pwrstate rtstate;
+	bool b_schedule_timer = false;
+	u8 queue;
+
+	if (!rtlhal->bfw_ready)
+		return;
+	if (!rtlpriv->psc.b_fw_current_inpsmode)
+		return;
+	if (!rtlhal->ballow_sw_to_change_hwclc)
+		return;
+	rtlpriv->cfg->ops->get_hw_reg(hw,HW_VAR_RF_STATE,(u8 *)(&rtstate));
+	if (rtstate == ERFOFF ||rtlpriv->psc.inactive_pwrstate ==ERFOFF)
+		return;
+
+	for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
+		ring = &rtlpci->tx_ring[queue];
+		if (skb_queue_len(&ring->queue)) {
+			b_schedule_timer = true;
+			break;
+		}
+	}
+
+	if (b_schedule_timer) {
+		mod_timer(&rtlpriv->works.fw_clockoff_timer,
+					  jiffies + MSECS(10));
+		return;
+	}
+
+	if (FW_PS_STATE(rtlhal->fw_ps_state) != FW_PS_STATE_RF_OFF_LOW_PWR_8821AE) {
+		spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+		if (!rtlhal->bfw_clk_change_in_progress) {
+			rtlhal->bfw_clk_change_in_progress = true;
+			spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+			rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
+			rtl_write_word(rtlpriv, REG_HISR, 0x0100);
+			rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+				(u8 *) (&rpwm_val));
+			spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+			rtlhal->bfw_clk_change_in_progress = false;
+			spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+		} else {
+			spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+			mod_timer(&rtlpriv->works.fw_clockoff_timer,
+					  jiffies + MSECS(10));
+		}
+	}
+
+}
+
+static void _rtl8821ae_set_fw_ps_rf_on(struct ieee80211_hw *hw)
+{
+	u8 rpwm_val = 0;
+	rpwm_val |= (FW_PS_STATE_RF_OFF_8821AE | FW_PS_ACK);
+	_rtl8821ae_set_fw_clock_on(hw, rpwm_val, true);
+}
+
+static void _rtl8821ae_fwlps_leave(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	bool b_fw_current_inps = false;
+	u8 rpwm_val = 0,fw_pwrmode = FW_PS_ACTIVE_MODE;
+
+	if (ppsc->b_low_power_enable){
+		rpwm_val = (FW_PS_STATE_ALL_ON_8821AE|FW_PS_ACK);/* RF on */
+		_rtl8821ae_set_fw_clock_on(hw, rpwm_val, false);
+		rtlhal->ballow_sw_to_change_hwclc = false;
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+				(u8 *) (&fw_pwrmode));
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+				(u8 *) (&b_fw_current_inps));
+	} else {
+		rpwm_val = FW_PS_STATE_ALL_ON_8821AE;	/* RF on */
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+				(u8 *) (&rpwm_val));
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+				(u8 *) (&fw_pwrmode));
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+				(u8 *) (&b_fw_current_inps));
+	}
+
+}
+
+static void _rtl8821ae_fwlps_enter(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	bool b_fw_current_inps = true;
+	u8 rpwm_val;
+
+	if (ppsc->b_low_power_enable){
+		rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_8821AE;	/* RF off */
+		rtlpriv->cfg->ops->set_hw_reg(hw,
+				HW_VAR_FW_PSMODE_STATUS,
+				(u8 *) (&b_fw_current_inps));
+		rtlpriv->cfg->ops->set_hw_reg(hw,
+				HW_VAR_H2C_FW_PWRMODE,
+				(u8 *) (&ppsc->fwctrl_psmode));
+		rtlhal->ballow_sw_to_change_hwclc = true;
+		_rtl8821ae_set_fw_clock_off(hw, rpwm_val);
+
+
+	} else {
+		rpwm_val = FW_PS_STATE_RF_OFF_8821AE;	/* RF off */
+		rtlpriv->cfg->ops->set_hw_reg(hw,
+				HW_VAR_FW_PSMODE_STATUS,
+				(u8 *) (&b_fw_current_inps));
+		rtlpriv->cfg->ops->set_hw_reg(hw,
+				HW_VAR_H2C_FW_PWRMODE,
+				(u8 *) (&ppsc->fwctrl_psmode));
+		rtlpriv->cfg->ops->set_hw_reg(hw,
+				HW_VAR_SET_RPWM,
+				(u8 *) (&rpwm_val));
+	}
+
+}
+
+void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+	switch (variable) {
+	case HW_VAR_ETHER_ADDR:
+		*((u32 *)(val)) = rtl_read_dword(rtlpriv, REG_MACID);
+		*((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_MACID + 4);
+		break;
+	case HW_VAR_BSSID:
+		*((u32 *)(val)) = rtl_read_dword(rtlpriv, REG_BSSID);
+		*((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_BSSID+4);
+		break;
+	case HW_VAR_MEDIA_STATUS:
+		val[0] = rtl_read_byte(rtlpriv, REG_CR+2) & 0x3;
+		break;
+	case HW_VAR_SLOT_TIME:
+		*((u8 *)(val)) = mac->slot_time;
+		break;
+	case HW_VAR_BEACON_INTERVAL:
+		*((u16 *)(val)) = rtl_read_word(rtlpriv, REG_BCN_INTERVAL);
+		break;
+	case HW_VAR_ATIM_WINDOW:
+		*((u16 *)(val)) =  rtl_read_word(rtlpriv, REG_ATIMWND);
+		break;
+	case HW_VAR_RCR:
+		*((u32 *) (val)) = rtlpci->receive_config;
+		break;
+	case HW_VAR_RF_STATE:
+		*((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+		break;
+	case HW_VAR_FWLPS_RF_ON:{
+			enum rf_pwrstate rfState;
+			u32 val_rcr;
+
+			rtlpriv->cfg->ops->get_hw_reg(hw,
+						      HW_VAR_RF_STATE,
+						      (u8 *) (&rfState));
+			if (rfState == ERFOFF) {
+				*((bool *) (val)) = true;
+			} else {
+				val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+				val_rcr &= 0x00070000;
+				if (val_rcr)
+					*((bool *) (val)) = false;
+				else
+					*((bool *) (val)) = true;
+			}
+			break;
+		}
+	case HW_VAR_FW_PSMODE_STATUS:
+		*((bool *) (val)) = ppsc->b_fw_current_inpsmode;
+		break;
+	case HW_VAR_CORRECT_TSF:{
+			u64 tsf;
+			u32 *ptsf_low = (u32 *) & tsf;
+			u32 *ptsf_high = ((u32 *) & tsf) + 1;
+
+			*ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+			*ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+			*((u64 *) (val)) = tsf;
+
+			break;
+		}
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("switch case not process %x\n",variable));
+		break;
+	}
+}
+
+
+void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 idx;
+
+	switch (variable) {
+	case HW_VAR_ETHER_ADDR:{
+			for (idx = 0; idx < ETH_ALEN; idx++) {
+				rtl_write_byte(rtlpriv, (REG_MACID + idx),
+					       val[idx]);
+			}
+			break;
+		}
+	case HW_VAR_BASIC_RATE:{
+			u16 b_rate_cfg = ((u16 *) val)[0];
+			u8 rate_index = 0;
+			b_rate_cfg = b_rate_cfg & 0x15f;
+			b_rate_cfg |= 0x01;
+			rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff);
+			rtl_write_byte(rtlpriv, REG_RRSR + 1,
+				       (b_rate_cfg >> 8) & 0xff);
+			while (b_rate_cfg > 0x1) {
+				b_rate_cfg = (b_rate_cfg >> 1);
+				rate_index++;
+			}
+			rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+				       rate_index);
+			break;
+		}
+	case HW_VAR_BSSID:{
+			for (idx = 0; idx < ETH_ALEN; idx++) {
+				rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+					       val[idx]);
+			}
+			break;
+		}
+	case HW_VAR_SIFS:{
+			rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
+			rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
+
+			rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+			rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+
+			if (!mac->ht_enable)
+				rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+					       0x0e0e);
+			else
+				rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+					       *((u16 *) val));
+			break;
+		}
+	case HW_VAR_SLOT_TIME:{
+			u8 e_aci;
+
+			RT_TRACE(COMP_MLME, DBG_LOUD,
+				 ("HW_VAR_SLOT_TIME %x\n", val[0]));
+
+			rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+
+			for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
+				rtlpriv->cfg->ops->set_hw_reg(hw,
+							      HW_VAR_AC_PARAM,
+							      (u8 *) (&e_aci));
+			}
+			break;
+		}
+	case HW_VAR_ACK_PREAMBLE:{
+			u8 reg_tmp;
+			u8 short_preamble = (bool) (*(u8 *) val);
+			reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2);
+			if (short_preamble){
+				reg_tmp |= BIT(1);
+				rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+			} else {
+				reg_tmp &= (~BIT(1));
+				rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+			}
+			break;
+		}
+	case HW_VAR_WPA_CONFIG:
+		rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+		break;
+	case HW_VAR_AMPDU_MIN_SPACE:{
+			u8 min_spacing_to_set;
+			u8 sec_min_space;
+
+			min_spacing_to_set = *((u8 *) val);
+			if (min_spacing_to_set <= 7) {
+				sec_min_space = 0;
+
+				if (min_spacing_to_set < sec_min_space)
+					min_spacing_to_set = sec_min_space;
+
+				mac->min_space_cfg = ((mac->min_space_cfg &
+						       0xf8) |
+						      min_spacing_to_set);
+
+				*val = min_spacing_to_set;
+
+				RT_TRACE(COMP_MLME, DBG_LOUD,
+					 ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+					  mac->min_space_cfg));
+
+				rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+					       mac->min_space_cfg);
+			}
+			break;
+		}
+	case HW_VAR_SHORTGI_DENSITY:{
+			u8 density_to_set;
+
+			density_to_set = *((u8 *) val);
+			mac->min_space_cfg |= (density_to_set << 3);
+
+			RT_TRACE(COMP_MLME, DBG_LOUD,
+				 ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+				  mac->min_space_cfg));
+
+			rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+				       mac->min_space_cfg);
+
+			break;
+		}
+	case HW_VAR_AMPDU_FACTOR:{
+			u32	ampdu_len =  (*((u8 *)val));
+			if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+				if(ampdu_len < VHT_AGG_SIZE_128K)
+					ampdu_len = (0x2000 << (*((u8 *)val))) -1;
+				else
+					ampdu_len = 0x1ffff;
+			} else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+				if(ampdu_len < HT_AGG_SIZE_64K)
+					ampdu_len = (0x2000 << (*((u8 *)val))) -1;
+				else
+					ampdu_len = 0xffff;
+			}
+			ampdu_len |= BIT(31);
+
+			rtl_write_dword(rtlpriv,
+				REG_AMPDU_MAX_LENGTH_8812, ampdu_len);
+			break;
+		}
+	case HW_VAR_AC_PARAM:{
+			u8 e_aci = *((u8 *) val);
+			rtl8821ae_dm_init_edca_turbo(hw);
+
+			if (rtlpci->acm_method != eAcmWay2_SW)
+				rtlpriv->cfg->ops->set_hw_reg(hw,
+							      HW_VAR_ACM_CTRL,
+							      (u8 *) (&e_aci));
+			break;
+		}
+	case HW_VAR_ACM_CTRL:{
+			u8 e_aci = *((u8 *) val);
+			union aci_aifsn *p_aci_aifsn =
+			    (union aci_aifsn *)(&(mac->ac[0].aifs));
+			u8 acm = p_aci_aifsn->f.acm;
+			u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+			acm_ctrl =
+			    acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
+
+			if (acm) {
+				switch (e_aci) {
+				case AC0_BE:
+					acm_ctrl |= AcmHw_BeqEn;
+					break;
+				case AC2_VI:
+					acm_ctrl |= AcmHw_ViqEn;
+					break;
+				case AC3_VO:
+					acm_ctrl |= AcmHw_VoqEn;
+					break;
+				default:
+					RT_TRACE(COMP_ERR, DBG_WARNING,
+						 ("HW_VAR_ACM_CTRL acm set "
+						  "failed: eACI is %d\n", acm));
+					break;
+				}
+			} else {
+				switch (e_aci) {
+				case AC0_BE:
+					acm_ctrl &= (~AcmHw_BeqEn);
+					break;
+				case AC2_VI:
+					acm_ctrl &= (~AcmHw_ViqEn);
+					break;
+				case AC3_VO:
+					acm_ctrl &= (~AcmHw_BeqEn);
+					break;
+				default:
+					RT_TRACE(COMP_ERR, DBG_EMERG,
+						 ("switch case not process \n"));
+					break;
+				}
+			}
+
+			RT_TRACE(COMP_QOS, DBG_TRACE,
+				 ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+				  "Write 0x%X\n", acm_ctrl));
+			rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+			break;
+		}
+	case HW_VAR_RCR:{
+			rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
+			rtlpci->receive_config = ((u32 *) (val))[0];
+			break;
+		}
+	case HW_VAR_RETRY_LIMIT:{
+			u8 retry_limit = ((u8 *) (val))[0];
+
+			rtl_write_word(rtlpriv, REG_RL,
+				       retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+				       retry_limit << RETRY_LIMIT_LONG_SHIFT);
+			break;
+		}
+	case HW_VAR_DUAL_TSF_RST:
+		rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+		break;
+	case HW_VAR_EFUSE_BYTES:
+		rtlefuse->efuse_usedbytes = *((u16 *) val);
+		break;
+	case HW_VAR_EFUSE_USAGE:
+		rtlefuse->efuse_usedpercentage = *((u8 *) val);
+		break;
+	case HW_VAR_IO_CMD:
+		rtl8821ae_phy_set_io_cmd(hw, (*(enum io_type *)val));
+		break;
+	case HW_VAR_SET_RPWM:{
+			u8 rpwm_val;
+
+			rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
+			udelay(1);
+
+			if (rpwm_val & BIT(7)) {
+				rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
+					       (*(u8 *) val));
+			} else {
+				rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
+					       ((*(u8 *) val) | BIT(7)));
+			}
+
+			break;
+		}
+	case HW_VAR_H2C_FW_PWRMODE:{
+			rtl8821ae_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+			break;
+		}
+	case HW_VAR_FW_PSMODE_STATUS:
+		ppsc->b_fw_current_inpsmode = *((bool *) val);
+		break;
+
+	case HW_VAR_RESUME_CLK_ON:
+		_rtl8821ae_set_fw_ps_rf_on(hw);
+		break;
+
+	case HW_VAR_FW_LPS_ACTION:{
+		bool b_enter_fwlps = *((bool *) val);
+
+		if (b_enter_fwlps)
+			_rtl8821ae_fwlps_enter(hw);
+		 else
+			_rtl8821ae_fwlps_leave(hw);
+
+		 break;
+		}
+
+	case HW_VAR_H2C_FW_JOINBSSRPT:{
+			u8 mstatus = (*(u8 *) val);
+			u8 tmp_regcr, tmp_reg422,bcnvalid_reg;
+			u8 count = 0, dlbcn_count = 0;
+			bool b_recover = false;
+
+			if (mstatus == RT_MEDIA_CONNECT) {
+				rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID,
+							      NULL);
+
+				tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
+				rtl_write_byte(rtlpriv, REG_CR + 1,
+					       (tmp_regcr | BIT(0)));
+
+				_rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(3));
+				_rtl8821ae_set_bcn_ctrl_reg(hw, BIT(4), 0);
+
+				tmp_reg422 =
+				    rtl_read_byte(rtlpriv,
+						  REG_FWHW_TXQ_CTRL + 2);
+				rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+					       tmp_reg422 & (~BIT(6)));
+				if (tmp_reg422 & BIT(6))
+					b_recover = true;
+
+				do {
+					bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2);
+					rtl_write_byte(rtlpriv, REG_TDECTRL+2,(bcnvalid_reg | BIT(0)));
+					_rtl8821ae_return_beacon_queue_skb(hw);
+
+					if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+						rtl8812ae_set_fw_rsvdpagepkt(hw, 0);
+					else
+						rtl8821ae_set_fw_rsvdpagepkt(hw, 0);
+					bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2);
+					count = 0;
+					while (!(bcnvalid_reg & BIT(0)) && count <20){
+						count++;
+						udelay(10);
+						bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2);
+					}
+					dlbcn_count++;
+				} while (!(bcnvalid_reg & BIT(0)) && dlbcn_count <5);
+
+				if (bcnvalid_reg & BIT(0))
+					rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0));
+
+				_rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
+				_rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(4));
+
+				if (b_recover) {
+					rtl_write_byte(rtlpriv,
+						       REG_FWHW_TXQ_CTRL + 2,
+						       tmp_reg422);
+				}
+
+				rtl_write_byte(rtlpriv, REG_CR + 1,
+					       (tmp_regcr & ~(BIT(0))));
+			}
+			rtl8821ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+
+			break;
+		}
+	case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:{
+		rtl8821ae_set_p2p_ps_offload_cmd(hw, (*(u8 *) val));
+		break;
+	}
+
+	case HW_VAR_AID:{
+			u16 u2btmp;
+			u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+			u2btmp &= 0xC000;
+			rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
+						mac->assoc_id));
+
+			break;
+		}
+	case HW_VAR_CORRECT_TSF:{
+			u8 btype_ibss = ((u8 *) (val))[0];
+
+			if (btype_ibss == true)
+				_rtl8821ae_stop_tx_beacon(hw);
+
+			_rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(3));
+
+			rtl_write_dword(rtlpriv, REG_TSFTR,
+					(u32) (mac->tsf & 0xffffffff));
+			rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+					(u32) ((mac->tsf >> 32) & 0xffffffff));
+
+			_rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
+
+			if (btype_ibss == true)
+				_rtl8821ae_resume_tx_beacon(hw);
+
+			break;
+
+		}
+	case HW_VAR_NAV_UPPER: {
+			u32	us_nav_upper = ((u32)*val);
+
+			if(us_nav_upper > HAL_92C_NAV_UPPER_UNIT * 0xFF)
+			{
+				RT_TRACE(COMP_INIT , DBG_WARNING,
+					("The setting value (0x%08X us) of NAV_UPPER"
+					 " is larger than (%d * 0xFF)!!!\n",
+					 us_nav_upper, HAL_92C_NAV_UPPER_UNIT));
+				break;
+			}
+			rtl_write_byte(rtlpriv, REG_NAV_UPPER,
+				((u8)((us_nav_upper + HAL_92C_NAV_UPPER_UNIT - 1) / HAL_92C_NAV_UPPER_UNIT)));
+			break;
+		}
+	case HW_VAR_KEEP_ALIVE: {
+			u8 array[2];
+			array[0] = 0xff;
+			array[1] = *((u8 *)val);
+			rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_KEEP_ALIVE_CTRL, 2, array);
+		}
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case "
+							"not process %x\n",variable));
+		break;
+	}
+}
+
+static bool _rtl8821ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	bool status = true;
+	long count = 0;
+	u32 value = _LLT_INIT_ADDR(address) |
+	    _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+	rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+
+	do {
+		value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+		if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+			break;
+
+		if (count > POLLING_LLT_THRESHOLD) {
+			RT_TRACE(COMP_ERR, DBG_EMERG,
+				 ("Failed to polling write LLT done at "
+				  "address %d!\n", address));
+			status = false;
+			break;
+		}
+	} while (++count);
+
+	return status;
+}
+
+static bool _rtl8821ae_llt_table_init(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	unsigned short i;
+	u8 txpktbuf_bndy;
+	u8 maxPage;
+	bool status;
+
+	maxPage = 255;
+	txpktbuf_bndy = 0xF8;
+
+
+	rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
+	rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, MAX_RX_DMA_BUFFER_SIZE - 1);
+
+	rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
+
+	rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+	rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+
+	rtl_write_byte(rtlpriv, REG_PBP, 0x31);
+	rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
+
+	for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+		status = _rtl8821ae_llt_write(hw, i, i + 1);
+		if (true != status)
+			return status;
+	}
+
+	status = _rtl8821ae_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+	if (true != status)
+		return status;
+
+	for (i = txpktbuf_bndy; i < maxPage; i++) {
+		status = _rtl8821ae_llt_write(hw, i, (i + 1));
+		if (true != status)
+			return status;
+	}
+
+	status = _rtl8821ae_llt_write(hw, maxPage, txpktbuf_bndy);
+	if (true != status)
+		return status;
+
+	rtl_write_dword(rtlpriv, REG_RQPN, 0x80e70808);
+	rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x00);
+
+	return true;
+}
+
+static void _rtl8821ae_gen_refresh_led_state(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	if (rtlpriv->rtlhal.up_first_time)
+		return;
+
+	if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			rtl8812ae_sw_led_on(hw, pLed0);
+		else
+			rtl8821ae_sw_led_on(hw, pLed0);
+	else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			rtl8812ae_sw_led_on(hw, pLed0);
+		else
+			rtl8821ae_sw_led_on(hw, pLed0);
+	else
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			rtl8812ae_sw_led_off(hw, pLed0);
+		else
+			rtl8821ae_sw_led_off(hw, pLed0);
+}
+
+static bool _rtl8821ae_init_mac(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	u8 bytetmp = 0;
+	u16 wordtmp = 0;
+	bool b_mac_func_enable = rtlhal->b_mac_func_enable;
+
+	rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+
+	/*Auto Power Down to CHIP-off State*/
+	bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7));
+	rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+		/* HW Power on sequence*/
+		if(!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+			PWR_INTF_PCI_MSK, RTL8812_NIC_ENABLE_FLOW)) {
+				RT_TRACE(COMP_INIT,DBG_LOUD,("init 8812 MAC Fail as power on failure\n"));
+				return false;
+		}
+	} else {
+		/* HW Power on sequence */
+		if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_A_MSK, PWR_FAB_ALL_MSK,
+			PWR_INTF_PCI_MSK, RTL8821A_NIC_ENABLE_FLOW)){
+			RT_TRACE(COMP_INIT,DBG_LOUD,("init 8821 MAC Fail as power on failure\n"));
+			return false;
+		}
+	}
+
+	bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4);
+	rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp);
+
+	bytetmp = rtl_read_byte(rtlpriv, REG_CR);
+	bytetmp = 0xff;
+	rtl_write_byte(rtlpriv, REG_CR, bytetmp);
+	mdelay(2);
+
+	bytetmp |= 0x7f;
+	rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, bytetmp);
+	mdelay(2);
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+		bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CFG + 3);
+		if (bytetmp & BIT(0)) {
+			bytetmp = rtl_read_byte(rtlpriv, 0x7c);
+			bytetmp |= BIT(6);
+			rtl_write_byte(rtlpriv, 0x7c, bytetmp);
+		}
+	}
+
+	bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1);
+	bytetmp &= ~BIT(4);
+	rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp);
+
+	rtl_write_word(rtlpriv, REG_CR, 0x2ff);
+
+	if (!b_mac_func_enable) {
+		if (!_rtl8821ae_llt_table_init(hw))
+			return false;
+	}
+
+	rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
+	rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff);
+
+	/* Enable FW Beamformer Interrupt */
+	bytetmp = rtl_read_byte(rtlpriv, REG_FWIMR + 3);
+	rtl_write_byte(rtlpriv, REG_FWIMR + 3, bytetmp | BIT(6));
+
+	wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
+	wordtmp &= 0xf;
+	wordtmp |= 0xF5B1;
+	rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
+
+	rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
+	rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+	rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF);
+	/*low address*/
+	rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
+			rtlpci->tx_ring[BEACON_QUEUE].dma & DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_MGQ_DESA,
+			rtlpci->tx_ring[MGNT_QUEUE].dma & DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_VOQ_DESA,
+			rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_VIQ_DESA,
+			rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_BEQ_DESA,
+			rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_BKQ_DESA,
+			rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_HQ_DESA,
+			rtlpci->tx_ring[HIGH_QUEUE].dma & DMA_BIT_MASK(32));
+	rtl_write_dword(rtlpriv, REG_RX_DESA,
+			rtlpci->rx_ring[RX_MPDU_QUEUE].dma & DMA_BIT_MASK(32));
+
+	rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x77);
+
+	rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
+
+	rtl_write_byte(rtlpriv, REG_SECONDARY_CCA_CTRL, 0x3);
+	_rtl8821ae_gen_refresh_led_state(hw);
+
+	return true;
+}
+
+static void _rtl8821ae_hw_configure(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	u32 reg_rrsr;
+
+	reg_rrsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+
+	rtl_write_dword(rtlpriv, REG_RRSR, reg_rrsr);
+	/* ARFB table 9 for 11ac 5G 2SS */
+	rtl_write_dword(rtlpriv, REG_ARFR0 + 4, 0xfffff000);
+	/* ARFB table 10 for 11ac 5G 1SS */
+	rtl_write_dword(rtlpriv, REG_ARFR1 + 4, 0x003ff000);
+	/* ARFB table 11 for 11ac 24G 1SS */
+	rtl_write_dword(rtlpriv, REG_ARFR2, 0x00000015);
+	rtl_write_dword(rtlpriv, REG_ARFR2 + 4, 0x003ff000);
+	/* ARFB table 12 for 11ac 24G 1SS */
+	rtl_write_dword(rtlpriv, REG_ARFR3, 0x00000015);
+	rtl_write_dword(rtlpriv, REG_ARFR3 + 4, 0xffcff000);
+	/* 0x420[7] = 0 , enable retry AMPDU in new AMPD not singal MPDU. */
+	rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F00);
+	rtl_write_byte(rtlpriv, REG_AMPDU_MAX_TIME, 0x70);
+
+	/*Set retry limit*/
+	rtl_write_word(rtlpriv, REG_RL, 0x0707);
+
+
+	/* Set Data / Response auto rate fallack retry count*/
+	rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000);
+	rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504);
+	rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
+	rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
+
+	rtlpci->reg_bcn_ctrl_val = 0x1d;
+	rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val);
+
+	/* TBTT prohibit hold time. Suggested by designer TimChen. */
+	rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1,0xff); // 8 ms
+
+	/* AGGR_BK_TIME Reg51A 0x16 */
+	rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
+
+	/*For Rx TP. Suggested by SD1 Richard. Added by tynli. 2010.04.12.*/
+	rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666);
+
+	rtl_write_byte(rtlpriv, REG_HT_SINGLE_AMPDU, 0x80);
+	rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20);
+	rtl_write_word(rtlpriv, REG_MAX_AGGR_NUM, 0x1F1F);
+}
+
+static u16 _rtl8821ae_mdio_read(struct rtl_priv *rtlpriv, u8 addr)
+{
+	u16 ret = 0;
+	u8 tmp = 0, count = 0;
+
+	rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(6));
+	tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6) ;
+	count = 0;
+	while (tmp && count < 20) {
+		udelay(10);
+		tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6);
+		count++;
+	}
+	if (0 == tmp)
+		ret = rtl_read_word(rtlpriv, REG_MDIO_RDATA);
+
+	return ret;
+}
+
+void _rtl8821ae_mdio_write(struct rtl_priv *rtlpriv, u8 addr, u16 data)
+{
+	u8 tmp = 0, count = 0;
+
+	rtl_write_word(rtlpriv, REG_MDIO_WDATA, data);
+	rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(5));
+	tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5) ;
+	count = 0;
+	while (tmp && count < 20) {
+		udelay(10);
+		tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5);
+		count++;
+	}
+}
+
+static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
+{
+	u16 read_addr = addr & 0xfffc;
+	u8 tmp = 0, count = 0, ret = 0;
+
+	rtl_write_word(rtlpriv, REG_DBI_ADDR, read_addr);
+	rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x2);
+	tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+	count = 0;
+	while (tmp && count < 20) {
+		udelay(10);
+		tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+		count++;
+	}
+	if (0 == tmp) {
+		read_addr = REG_DBI_RDATA + addr % 4;
+		ret = rtl_read_word(rtlpriv, read_addr);
+	}
+	return ret;
+}
+
+void _rtl8821ae_dbi_write(struct rtl_priv *rtlpriv, u16 addr, u8 data)
+{
+	u8 tmp = 0, count = 0;
+	u16 wrtie_addr, remainder = addr % 4;
+
+	wrtie_addr = REG_DBI_WDATA + remainder;
+	rtl_write_byte(rtlpriv, wrtie_addr, data);
+
+	wrtie_addr = (addr & 0xfffc) | (BIT(0) << (remainder + 12));
+	rtl_write_word(rtlpriv, REG_DBI_ADDR, wrtie_addr);
+
+	rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x1);
+
+	tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+	count = 0;
+	while (tmp && count < 20) {
+		udelay(10);
+		tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+		count++;
+	}
+
+}
+
+static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tmp;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+		if (_rtl8821ae_mdio_read(rtlpriv, 0x04) != 0x8544)
+			_rtl8821ae_mdio_write(rtlpriv, 0x04, 0x8544);
+
+		if (_rtl8821ae_mdio_read(rtlpriv, 0x0b) != 0x0070)
+			_rtl8821ae_mdio_write(rtlpriv, 0x0b, 0x0070);
+	}
+
+	tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
+	_rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7));
+
+	tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
+	_rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));
+
+	if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+	{
+		tmp  = _rtl8821ae_dbi_read(rtlpriv, 0x718);
+		_rtl8821ae_dbi_write(rtlpriv, 0x718, tmp|BIT(4));
+	}
+}
+
+void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 sec_reg_value;
+	u8 tmp;
+
+	RT_TRACE(COMP_INIT, DBG_DMESG,
+		 ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+		  rtlpriv->sec.pairwise_enc_algorithm,
+		  rtlpriv->sec.group_enc_algorithm));
+
+	if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+		RT_TRACE(COMP_SEC, DBG_DMESG, ("not open hw encryption\n"));
+		return;
+	}
+
+	sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
+
+	if (rtlpriv->sec.use_defaultkey) {
+		sec_reg_value |= SCR_TxUseDK;
+		sec_reg_value |= SCR_RxUseDK;
+	}
+
+	sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+
+	tmp = rtl_read_byte(rtlpriv, REG_CR + 1);
+	rtl_write_byte(rtlpriv, REG_CR + 1, tmp | BIT(1));
+
+	RT_TRACE(COMP_SEC, DBG_DMESG,
+		 ("The SECR-value %x \n", sec_reg_value));
+
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+
+}
+
+#if 0
+bool _rtl8821ae_check_pcie_dma_hang(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmp;
+	tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL+3);
+	if (!(tmp&BIT(2))) {
+		rtl_write_byte(rtlpriv, REG_DBI_CTRL+3, tmp|BIT(2));
+		mdelay(100);
+	}
+
+	tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL+3);
+	if (tmp&BIT(0) || tmp&BIT(1)) {
+		RT_TRACE(COMP_INIT, DBG_LOUD,
+			("rtl8821ae_check_pcie_dma_hang(): TRUE! Reset PCIE DMA!\n"));
+		return true;
+	} else {
+		return false;
+	}
+}
+
+void _rtl8821ae_reset_pcie_interface_dma(struct ieee80211_hw *hw,
+													bool mac_power_on, bool watch_dog)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 tmp;
+	bool release_mac_rx_pause;
+	u8 backup_pcie_dma_pause;
+
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("_rtl8821ae_reset_pcie_interface_dma()\n"));
+
+	tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL);
+	tmp &= ~BIT(1);
+	rtl_write_byte(rtlpriv, REG_RSV_CTRL, tmp);
+	tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2);
+	tmp |= BIT2;
+	rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp);
+
+	tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
+	if (tmp & BIT(2)) {
+		release_mac_rx_pause = false;
+	} else {
+		rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, tmp | BIT(2));
+		release_mac_rx_pause = true;
+	}
+	backup_pcie_dma_pause = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+1);
+	if (backup_pcie_dma_pause != 0xFF)
+		rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0xFF);
+
+	if (mac_power_on)
+		rtl_write_byte(rtlpriv, REG_CR, 0);
+
+	tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+	tmp &= ~BIT(0);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, tmp);
+
+	tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+	tmp |= ~BIT(0);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, tmp);
+
+	if (mac_power_on)
+		rtl_write_byte(rtlpriv, REG_CR, 0xFF);
+
+	tmp = rtl_read_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL+2);
+	tmp |= BIT1;
+	rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL+2, tmp);
+
+	if (watch_dog) {
+		u32 rqpn = 0;
+		u32 rqpn_npq = 0;
+		u8 tx_page_boundary = _RQPN_Init_8812E(Adapter, &rqpn_npq, &rqpn);
+
+		if(LLT_table_init_8812(Adapter, TX_PAGE_BOUNDARY, RQPN, RQPN_NPQ) == RT_STATUS_FAILURE)
+			return false;
+
+			PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
+			PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK);
+
+			// <1> Reset Tx descriptor
+			Adapter->HalFunc.ResetTxDescHandler(Adapter,Adapter->NumTxDesc);
+
+			// <2> Reset Rx descriptor
+			Adapter->HalFunc.ResetRxDescHandler(Adapter,Adapter->NumRxDesc);
+
+			// <3> Reset RFDs
+			FreeRFDs( Adapter, TRUE);
+
+			// <4> Reset TCBs
+			FreeTCBs( Adapter, TRUE);
+
+			// We should set all Rx desc own bit to 1 to prevent from RDU after enable Rx DMA. 2013.02.18, by tynli.
+			PrepareAllRxDescBuffer(Adapter);
+
+			PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
+			PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
+
+			//
+			// Initialize TRx DMA address.
+			//
+			// Because set 0x100 to 0x0 will cause the Rx descriptor address 0x340 be cleared to zero on 88EE,
+			// we should re-initialize Rx desc. address before enable DMA. 2012.11.07. by tynli.
+			InitTRxDescHwAddress8812AE(Adapter);
+		}
+
+		// In MAC power on state, BB and RF maybe in ON state, if we release TRx DMA here
+		// it will cause packets to be started to Tx/Rx, so we release Tx/Rx DMA later.
+		if(!bInMACPowerOn || bInWatchDog)
+		{
+			// 8. release TRX DMA
+			//write 0x284 bit[18] = 1'b0
+			//write 0x301 = 0x00
+			if(bReleaseMACRxPause)
+			{
+				u1Tmp = PlatformEFIORead1Byte(Adapter, REG_RXDMA_CONTROL);
+				PlatformEFIOWrite1Byte(Adapter, REG_RXDMA_CONTROL, (u1Tmp&~BIT2));
+			}
+			PlatformEFIOWrite1Byte(Adapter, 	REG_PCIE_CTRL_REG+1, BackUpPcieDMAPause);
+		}
+
+		if(IS_HARDWARE_TYPE_8821E(Adapter))
+		{
+			//9. lock system register
+			//	 write 0xCC bit[2] = 1'b0
+			u1Tmp = PlatformEFIORead1Byte(Adapter, REG_PMC_DBG_CTRL2_8723B);
+			u1Tmp &= ~(BIT2);
+			PlatformEFIOWrite1Byte(Adapter, REG_PMC_DBG_CTRL2_8723B, u1Tmp);
+		}
+
+		return RT_STATUS_SUCCESS;
+}
+#endif
+
+// Static MacID Mapping (cf. Used in MacIdDoStaticMapping) ----------
+#define MAC_ID_STATIC_FOR_DEFAULT_PORT				0
+#define MAC_ID_STATIC_FOR_BROADCAST_MULTICAST		1
+#define MAC_ID_STATIC_FOR_BT_CLIENT_START				2
+#define MAC_ID_STATIC_FOR_BT_CLIENT_END				3
+// -----------------------------------------------------------
+
+void rtl8821ae_macid_initialize_mediastatus(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8	media_rpt[4] = {RT_MEDIA_CONNECT, 1, \
+		MAC_ID_STATIC_FOR_BROADCAST_MULTICAST, \
+		MAC_ID_STATIC_FOR_BT_CLIENT_END};
+
+	rtlpriv->cfg->ops->set_hw_reg(hw, \
+		HW_VAR_H2C_FW_MEDIASTATUSRPT, media_rpt);
+
+	RT_TRACE(COMP_INIT,DBG_LOUD, \
+		("Initialize MacId media status: from %d to %d\n", \
+		MAC_ID_STATIC_FOR_BROADCAST_MULTICAST, \
+		MAC_ID_STATIC_FOR_BT_CLIENT_END));
+}
+
+int rtl8821ae_hw_init(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	bool rtstatus = true;
+	int err;
+	u8 tmp_u1b;
+	u32 nav_upper = WIFI_NAV_UPPER_US;
+
+	rtlpriv->rtlhal.being_init_adapter = true;
+	rtlpriv->intf_ops->disable_aspm(hw);
+
+	/*YP wowlan not considered*/
+
+	tmp_u1b = rtl_read_byte(rtlpriv, REG_CR);
+	if (tmp_u1b!=0 && tmp_u1b != 0xEA) {
+		rtlhal->b_mac_func_enable = true;
+		RT_TRACE(COMP_INIT,DBG_LOUD,(" MAC has already power on.\n"));
+	} else {
+		rtlhal->b_mac_func_enable = false;
+		rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE;
+	}
+
+/*	if (_rtl8821ae_check_pcie_dma_hang(hw)) {
+		_rtl8821ae_reset_pcie_interface_dma(hw,rtlhal->b_mac_func_enable,false);
+		rtlhal->b_mac_func_enable = false;
+	} */
+
+	rtstatus = _rtl8821ae_init_mac(hw);
+	if (rtstatus != true) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("Init MAC failed\n"));
+		err = 1;
+		return err;
+	}
+
+	tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CFG);
+	tmp_u1b &= 0x7F;
+	rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b);
+
+	err = rtl8821ae_download_fw(hw, false);
+	if (err) {
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("Failed to download FW. Init HW "
+			  "without FW now..\n"));
+		err = 1;
+		rtlhal->bfw_ready = false;
+		return err;
+	} else {
+		rtlhal->bfw_ready = true;
+	}
+	rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE;
+	rtlhal->bfw_clk_change_in_progress = false;
+	rtlhal->ballow_sw_to_change_hwclc = false;
+	rtlhal->last_hmeboxnum = 0;
+
+	/*SIC_Init(Adapter);
+	if(pHalData->AMPDUBurstMode)
+		PlatformEFIOWrite1Byte(Adapter,REG_AMPDU_BURST_MODE_8812,  0x7F);*/
+
+	rtl8821ae_phy_mac_config(hw);
+	/* because last function modify RCR, so we update
+	 * rcr var here, or TP will unstable for receive_config
+	 * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
+	 * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
+	rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
+	rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
+	rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);*/
+	rtl8821ae_phy_bb_config(hw);
+
+	rtl8821ae_phy_rf_config(hw);
+
+	_rtl8821ae_hw_configure(hw);
+
+	rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_2_4G);
+
+	/*set wireless mode*/
+
+	rtlhal->b_mac_func_enable = true;
+
+	rtl_cam_reset_all_entry(hw);
+
+	rtl8821ae_enable_hw_security_config(hw);
+
+	ppsc->rfpwr_state = ERFON;
+
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+	_rtl8821ae_enable_aspm_back_door(hw);
+	rtlpriv->intf_ops->enable_aspm(hw);
+
+	//rtl8821ae_bt_hw_init(hw);
+	rtlpriv->rtlhal.being_init_adapter = false;
+
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_NAV_UPPER, (u8 *)&nav_upper);
+
+	//rtl8821ae_dm_check_txpower_tracking(hw);
+	//rtl8821ae_phy_lc_calibrate(hw);
+
+	/* Release Rx DMA*/
+	tmp_u1b = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
+	if (tmp_u1b & BIT(2)) {
+		/* Release Rx DMA if needed*/
+		tmp_u1b &= ~BIT(2);
+		rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, tmp_u1b);
+	}
+
+	/* Release Tx/Rx PCIE DMA if*/
+	rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0);
+
+	rtl8821ae_dm_init(hw);
+	rtl8821ae_macid_initialize_mediastatus(hw);
+
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("rtl8821ae_hw_init() <====\n"));
+	return err;
+}
+
+static enum version_8821ae _rtl8821ae_read_chip_version(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	enum version_8821ae version = VERSION_UNKNOWN;
+	u32 value32;
+
+	value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1);
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("ReadChipVersion8812A 0xF0 = 0x%x \n", value32));
+
+
+
+	if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+		rtlphy->rf_type = RF_2T2R;
+	else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+		rtlphy->rf_type = RF_1T1R;
+
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("RF_Type is %x!!\n", rtlphy->rf_type));
+
+
+	if (value32 & TRP_VAUX_EN)
+	{
+		if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+		{
+			if(rtlphy->rf_type == RF_2T2R)
+				version = VERSION_TEST_CHIP_2T2R_8812;
+			else
+				version = VERSION_TEST_CHIP_1T1R_8812;
+		}
+		else
+			version = VERSION_TEST_CHIP_8821;
+	} else {
+		if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+		{
+			u32	rtl_id = ((value32 & CHIP_VER_RTL_MASK) >> 12) +1 ;
+
+			if(rtlphy->rf_type == RF_2T2R)
+				version = (enum version_8821ae)(CHIP_8812 | NORMAL_CHIP | RF_TYPE_2T2R);
+			else
+				version = (enum version_8821ae)(CHIP_8812 | NORMAL_CHIP);
+
+			version = (enum version_8821ae)(version| (rtl_id << 12));
+		}
+		else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+		{
+			u32	rtl_id = value32 & CHIP_VER_RTL_MASK;
+
+			version = (enum version_8821ae)(CHIP_8821 | NORMAL_CHIP | rtl_id);
+		}
+	}
+
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+		  "RF_2T2R" : "RF_1T1R"));
+
+	if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+	{
+		/*WL_HWROF_EN.*/
+		value32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
+		rtlphy->hw_rof_enable= ((value32 & WL_HWROF_EN) ? 1 : 0);
+	}
+
+	switch(version)
+	{
+		case VERSION_TEST_CHIP_1T1R_8812:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Chip Version ID: VERSION_TEST_CHIP_1T1R_8812.\n"));
+			break;
+		case VERSION_TEST_CHIP_2T2R_8812:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Chip Version ID: VERSION_TEST_CHIP_2T2R_8812.\n"));
+			break;
+		case VERSION_NORMAL_TSMC_CHIP_1T1R_8812:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812.\n"));
+			break;
+		case VERSION_NORMAL_TSMC_CHIP_2T2R_8812:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812.\n"));
+			break;
+		case VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812 C CUT.\n"));
+			break;
+		case VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812 C CUT.\n"));
+			break;
+		case VERSION_TEST_CHIP_8821:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Chip Version ID: VERSION_TEST_CHIP_8821.\n"));
+			break;
+		case VERSION_NORMAL_TSMC_CHIP_8821:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 A CUT.\n"));
+			break;
+		case VERSION_NORMAL_TSMC_CHIP_8821_B_CUT:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 B CUT.\n"));
+			break;
+		default:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Chip Version ID: Unknow (0x%X).\n", version));
+			break;
+	}
+
+	return version;
+}
+
+static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
+				     enum nl80211_iftype type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
+	enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+	bt_msr &= 0xfc;
+
+	rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
+	RT_TRACE(COMP_BEACON, DBG_LOUD,
+		("clear 0x550 when set HW_VAR_MEDIA_STATUS\n"));
+
+	if (type == NL80211_IFTYPE_UNSPECIFIED ||
+	    type == NL80211_IFTYPE_STATION) {
+		_rtl8821ae_stop_tx_beacon(hw);
+		_rtl8821ae_enable_bcn_sub_func(hw);
+	} else if (type == NL80211_IFTYPE_ADHOC ||
+		type == NL80211_IFTYPE_AP) {
+		_rtl8821ae_resume_tx_beacon(hw);
+		_rtl8821ae_disable_bcn_sub_func(hw);
+	} else {
+		RT_TRACE(COMP_ERR, DBG_WARNING,("Set HW_VAR_MEDIA_STATUS: "
+			  "No such media status(%x).\n", type));
+	}
+
+	switch (type) {
+	case NL80211_IFTYPE_UNSPECIFIED:
+		bt_msr |= MSR_NOLINK;
+		ledaction = LED_CTL_LINK;
+		RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to NO LINK!\n"));
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		bt_msr |= MSR_ADHOC;
+		RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to Ad Hoc!\n"));
+		break;
+	case NL80211_IFTYPE_STATION:
+		bt_msr |= MSR_INFRA;
+		ledaction = LED_CTL_LINK;
+		RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to STA!\n"));
+		break;
+	case NL80211_IFTYPE_AP:
+		bt_msr |= MSR_AP;
+		RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to AP!\n"));
+		break;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("Network type %d not support!\n", type));
+		return 1;
+		break;
+
+	}
+
+	rtl_write_byte(rtlpriv, (MSR), bt_msr);
+	rtlpriv->cfg->ops->led_control(hw, ledaction);
+	if ((bt_msr & 0xfc) == MSR_AP)
+		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+	else
+		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+
+	return 0;
+}
+
+void rtl8821ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	u32 reg_rcr = rtlpci->receive_config;
+
+	if (rtlpriv->psc.rfpwr_state != ERFON)
+		return;
+
+	if (check_bssid == true) {
+		reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+		rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+					      (u8 *) (&reg_rcr));
+		_rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(4));
+	} else if (check_bssid == false) {
+		reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+		_rtl8821ae_set_bcn_ctrl_reg(hw, BIT(4), 0);
+		rtlpriv->cfg->ops->set_hw_reg(hw,
+			HW_VAR_RCR, (u8 *) (&reg_rcr));
+	}
+
+}
+
+int rtl8821ae_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("rtl8821ae_set_network_type!\n"));
+
+	if (_rtl8821ae_set_media_status(hw, type))
+		return -EOPNOTSUPP;
+
+	if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+		if (type != NL80211_IFTYPE_AP)
+			rtl8821ae_set_check_bssid(hw, true);
+	} else {
+		rtl8821ae_set_check_bssid(hw, false);
+	}
+
+	return 0;
+}
+
+/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */
+void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	rtl8821ae_dm_init_edca_turbo(hw);
+	switch (aci) {
+	case AC1_BK:
+		rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
+		break;
+	case AC0_BE:
+		/* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); */
+		break;
+	case AC2_VI:
+		rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
+		break;
+	case AC3_VO:
+		rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
+		break;
+	default:
+		RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+		break;
+	}
+}
+
+void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+	rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+	rtlpci->irq_enabled = true;
+	/* there are some C2H CMDs have been sent before system interrupt is enabled, e.g., C2H, CPWM.
+	*So we need to clear all C2H events that FW has notified, otherwise FW won't schedule any commands anymore.
+	*/
+	//rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0);
+	/*enable system interrupt*/
+	rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF);
+}
+
+void rtl8821ae_disable_interrupt(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED);
+	rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED);
+	rtlpci->irq_enabled = false;
+	synchronize_irq(rtlpci->pdev->irq);
+}
+
+static void _rtl8821ae_poweroff_adapter(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 u1b_tmp;
+
+	rtlhal->b_mac_func_enable = false;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+	/* Combo (PCIe + USB) Card and PCIe-MF Card */
+	/* 1. Run LPS WL RFOFF flow */
+		//RT_TRACE(COMP_INIT, DBG_LOUD, ("=====>CardDisableRTL8812E,RTL8821A_NIC_LPS_ENTER_FLOW\n"));
+		rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+			PWR_INTF_PCI_MSK, RTL8821A_NIC_LPS_ENTER_FLOW);
+	}
+	/* 2. 0x1F[7:0] = 0 */
+	/* turn off RF */
+	//rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
+	if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) &&
+		rtlhal->bfw_ready ) {
+		rtl8821ae_firmware_selfreset(hw);
+	}
+
+	/* Reset MCU. Suggested by Filen. */
+	u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2))));
+
+	/* g.	MCUFWDL 0x80[1:0]=0	 */
+	/* reset MCU ready status */
+	rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+		/* HW card disable configuration. */
+		rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+			PWR_INTF_PCI_MSK, RTL8821A_NIC_DISABLE_FLOW);
+	} else {
+		/* HW card disable configuration. */
+		rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+			PWR_INTF_PCI_MSK, RTL8812_NIC_DISABLE_FLOW);
+	}
+
+	/* Reset MCU IO Wrapper */
+	u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+	rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0))));
+	u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+	rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0));
+
+	/* 7. RSV_CTRL 0x1C[7:0] = 0x0E */
+	/* lock ISO/CLK/Power control register */
+	rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
+}
+
+void rtl8821ae_card_disable(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	enum nl80211_iftype opmode;
+
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("rtl8821ae_card_disable.\n"));
+
+	mac->link_state = MAC80211_NOLINK;
+	opmode = NL80211_IFTYPE_UNSPECIFIED;
+	_rtl8821ae_set_media_status(hw, opmode);
+	if (rtlpriv->rtlhal.driver_is_goingto_unload ||
+	    ppsc->rfoff_reason > RF_CHANGE_BY_PS)
+		rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+	RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+	_rtl8821ae_poweroff_adapter(hw);
+
+	/* after power off we should do iqk again */
+	rtlpriv->phy.iqk_initialized = false;
+}
+
+void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
+				  u32 *p_inta, u32 *p_intb)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, *p_inta);
+
+
+	*p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+	rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+
+}
+
+
+void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	u16 bcn_interval, atim_window;
+
+	bcn_interval = mac->beacon_interval;
+	atim_window = 2;	/*FIX MERGE */
+	rtl8821ae_disable_interrupt(hw);
+	rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+	rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+	rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+	rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
+	rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
+	rtl_write_byte(rtlpriv, 0x606, 0x30);
+	rtlpci->reg_bcn_ctrl_val |= BIT(3);
+	rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+	rtl8821ae_enable_interrupt(hw);
+}
+
+void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	u16 bcn_interval = mac->beacon_interval;
+
+	RT_TRACE(COMP_BEACON, DBG_DMESG,
+		 ("beacon_interval:%d\n", bcn_interval));
+	rtl8821ae_disable_interrupt(hw);
+	rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+	rtl8821ae_enable_interrupt(hw);
+}
+
+void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw,
+				   u32 add_msr, u32 rm_msr)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	RT_TRACE(COMP_INTR, DBG_LOUD,
+		 ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr));
+
+	if (add_msr)
+		rtlpci->irq_mask[0] |= add_msr;
+	if (rm_msr)
+		rtlpci->irq_mask[0] &= (~rm_msr);
+	rtl8821ae_disable_interrupt(hw);
+	rtl8821ae_enable_interrupt(hw);
+}
+
+static u8 _rtl8821ae_get_chnl_group(u8 chnl)
+{
+	u8 group = 0;
+
+	if (chnl <= 14) {
+		if (1 <= chnl && chnl <= 2 )
+			group = 0;
+        else if (3 <= chnl && chnl <= 5 )
+			group = 1;
+        else if (6 <= chnl && chnl <= 8 )
+			group = 2;
+        else if (9 <= chnl && chnl <= 11)
+			group = 3;
+        else /*if (12 <= chnl && chnl <= 14)*/
+			group = 4;
+	} else {
+		if (36 <= chnl && chnl <= 42)
+			group = 0;
+        else if (44 <= chnl && chnl <= 48)
+			group = 1;
+        else if (50 <= chnl && chnl <= 58)
+			group = 2;
+        else if (60 <= chnl && chnl <= 64)
+			group = 3;
+        else if (100 <= chnl && chnl <= 106)
+			group = 4;
+        else if (108 <= chnl && chnl <= 114)
+			group = 5;
+        else if (116 <= chnl && chnl <= 122)
+			group = 6;
+        else if (124 <= chnl && chnl <= 130)
+			group = 7;
+        else if (132 <= chnl && chnl <= 138)
+			group = 8;
+        else if (140 <= chnl && chnl <= 144)
+			group = 9;
+        else if (149 <= chnl && chnl <= 155)
+			group = 10;
+        else if (157 <= chnl && chnl <= 161)
+			group = 11;
+        else if (165 <= chnl && chnl <= 171)
+			group = 12;
+        else if (173 <= chnl && chnl <= 177)
+			group = 13;
+		else
+			/*RT_TRACE(COMP_EFUSE,DBG_LOUD,
+				("5G, Channel %d in Group not found \n",chnl));*/
+			RT_ASSERT(!COMP_EFUSE,
+				("5G, Channel %d in Group not found \n",chnl));
+	}
+	return group;
+}
+
+static void _rtl8821ae_read_power_value_fromprom(struct ieee80211_hw *hw,
+	struct txpower_info_2g *pwrinfo24g,
+	struct txpower_info_5g *pwrinfo5g,
+	bool autoload_fail,
+	u8 *hwinfo)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 rfPath, eeAddr=EEPROM_TX_PWR_INX, group,TxCount=0;
+
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("hal_ReadPowerValueFromPROM8821ae(): PROMContent[0x%x]=0x%x\n", (eeAddr+1), hwinfo[eeAddr+1]));
+	if (0xFF == hwinfo[eeAddr+1])  /*YJ,add,120316*/
+		autoload_fail = true;
+
+	if (autoload_fail)
+	{
+		RT_TRACE(COMP_INIT, DBG_LOUD, ("auto load fail : Use Default value!\n"));
+		for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) {
+			/*2.4G default value*/
+			for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
+				pwrinfo24g->index_cck_base[rfPath][group] =	0x2D;
+				pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D;
+			}
+			for (TxCount = 0;TxCount < MAX_TX_COUNT;TxCount++) {
+				if (TxCount == 0) {
+					pwrinfo24g->bw20_diff[rfPath][0] = 0x02;
+					pwrinfo24g->ofdm_diff[rfPath][0] = 0x04;
+				} else {
+					pwrinfo24g->bw20_diff[rfPath][TxCount] = 0xFE;
+					pwrinfo24g->bw40_diff[rfPath][TxCount] = 0xFE;
+					pwrinfo24g->cck_diff[rfPath][TxCount] =	0xFE;
+					pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0xFE;
+				}
+			}
+			/*5G default value*/
+			for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++)
+				pwrinfo5g->index_bw40_base[rfPath][group] = 0x2A;
+
+			for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+				if (TxCount == 0) {
+					pwrinfo5g->ofdm_diff[rfPath][0] = 0x04;
+					pwrinfo5g->bw20_diff[rfPath][0] = 0x00;
+					pwrinfo5g->bw80_diff[rfPath][0] = 0xFE;
+					pwrinfo5g->bw160_diff[rfPath][0] = 0xFE;
+				} else {
+					pwrinfo5g->ofdm_diff[rfPath][0] = 0xFE;
+					pwrinfo5g->bw20_diff[rfPath][0] = 0xFE;
+					pwrinfo5g->bw40_diff[rfPath][0] = 0xFE;
+					pwrinfo5g->bw80_diff[rfPath][0] = 0xFE;
+					pwrinfo5g->bw160_diff[rfPath][0] = 0xFE;
+				}
+			}
+		}
+		return;
+	}
+
+	rtl_priv(hw)->efuse.b_txpwr_fromeprom = true;
+
+	for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) {
+		/*2.4G default value*/
+		for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
+			pwrinfo24g->index_cck_base[rfPath][group] = hwinfo[eeAddr++];
+			if (pwrinfo24g->index_cck_base[rfPath][group] == 0xFF)
+				pwrinfo24g->index_cck_base[rfPath][group] = 0x2D;
+		}
+		for (group = 0 ; group < MAX_CHNL_GROUP_24G - 1; group++) {
+			pwrinfo24g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++];
+			if (pwrinfo24g->index_bw40_base[rfPath][group] == 0xFF)
+				pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D;
+		}
+		for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount ++) {
+			if (TxCount == 0) {
+				pwrinfo24g->bw40_diff[rfPath][TxCount] = 0;
+				if (hwinfo[eeAddr] == 0xFF) {
+					pwrinfo24g->bw20_diff[rfPath][TxCount] = 0x02;
+				} else {
+					pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
+					if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3))	/*bit sign number to 8 bit sign number*/
+						pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0;
+				}
+
+				if (hwinfo[eeAddr] == 0xFF) {
+					pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0x04;
+				} else {
+					pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
+					if(pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3))		/*bit sign number to 8 bit sign number*/
+						pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+				}
+				pwrinfo24g->cck_diff[rfPath][TxCount] = 0;
+				eeAddr++;
+			} else {
+				if (hwinfo[eeAddr] == 0xFF) {
+					pwrinfo24g->bw40_diff[rfPath][TxCount] = 0xFE;
+				} else {
+					pwrinfo24g->bw40_diff[rfPath][TxCount] = (hwinfo[eeAddr]&0xf0) >> 4;
+					if (pwrinfo24g->bw40_diff[rfPath][TxCount] & BIT(3))
+						pwrinfo24g->bw40_diff[rfPath][TxCount] |= 0xF0;
+				}
+
+				if (hwinfo[eeAddr] == 0xFF) {
+					pwrinfo24g->bw20_diff[rfPath][TxCount] = 0xFE;
+				} else {
+					pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
+					if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3))
+						pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0;
+				}
+
+				eeAddr++;
+
+				if (hwinfo[eeAddr] == 0xFF) {
+					pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0xFE;
+				} else {
+					pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
+					if(pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3))
+						pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+				}
+
+				if (hwinfo[eeAddr] == 0xFF) {
+					pwrinfo24g->cck_diff[rfPath][TxCount] = 0xFE;
+				} else {
+					pwrinfo24g->cck_diff[rfPath][TxCount] =	(hwinfo[eeAddr] & 0x0f);
+					if(pwrinfo24g->cck_diff[rfPath][TxCount] & BIT(3))
+						pwrinfo24g->cck_diff[rfPath][TxCount] |= 0xF0;
+				}
+				eeAddr++;
+			}
+		}
+
+		/*5G default value*/
+		for (group = 0 ; group < MAX_CHNL_GROUP_5G; group ++) {
+			pwrinfo5g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++];
+			if (pwrinfo5g->index_bw40_base[rfPath][group] == 0xFF)
+				pwrinfo5g->index_bw40_base[rfPath][group] = 0xFE;
+		}
+
+		for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+			if (TxCount == 0) {
+				pwrinfo5g->bw40_diff[rfPath][TxCount] = 0;
+				if (hwinfo[eeAddr] == 0xFF) {
+					pwrinfo5g->bw20_diff[rfPath][TxCount] = 0x0;
+				} else {
+					pwrinfo5g->bw20_diff[rfPath][0] = (hwinfo[eeAddr] & 0xf0) >> 4;
+					if(pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3))
+						pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0;
+				}
+
+				if (hwinfo[eeAddr] == 0xFF) {
+					pwrinfo5g->ofdm_diff[rfPath][TxCount] = 0x4;
+				} else {
+					pwrinfo5g->ofdm_diff[rfPath][0] = (hwinfo[eeAddr] & 0x0f);
+					if(pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3))
+						pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+				}
+				eeAddr++;
+			} else {
+				if (hwinfo[eeAddr] == 0xFF) {
+					pwrinfo5g->bw40_diff[rfPath][TxCount] = 0xFE;
+				} else {
+					pwrinfo5g->bw40_diff[rfPath][TxCount]= (hwinfo[eeAddr] & 0xf0) >> 4;
+					if(pwrinfo5g->bw40_diff[rfPath][TxCount] & BIT(3))
+						pwrinfo5g->bw40_diff[rfPath][TxCount] |= 0xF0;
+				}
+
+				if (hwinfo[eeAddr] == 0xFF) {
+					pwrinfo5g->bw20_diff[rfPath][TxCount] = 0xFE;
+				} else {
+					pwrinfo5g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
+					if(pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3))
+						pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0;
+				}
+				eeAddr++;
+			}
+		}
+
+		if (hwinfo[eeAddr] == 0xFF) {
+			pwrinfo5g->ofdm_diff[rfPath][1] = 0xFE;
+			pwrinfo5g->ofdm_diff[rfPath][2] = 0xFE;
+		} else {
+			pwrinfo5g->ofdm_diff[rfPath][1] =	(hwinfo[eeAddr] & 0xf0) >> 4;
+			pwrinfo5g->ofdm_diff[rfPath][2] =	(hwinfo[eeAddr] & 0x0f);
+		}
+		eeAddr++;
+		if (hwinfo[eeAddr] == 0xFF)
+			pwrinfo5g->ofdm_diff[rfPath][3] = 0xFE;
+		else
+			pwrinfo5g->ofdm_diff[rfPath][3] = (hwinfo[eeAddr] & 0x0f);
+
+		eeAddr++;
+
+		for (TxCount = 1; TxCount < MAX_TX_COUNT; TxCount++) {
+			if (pwrinfo5g->ofdm_diff[rfPath][TxCount] == 0xFF)
+				pwrinfo5g->ofdm_diff[rfPath][TxCount] = 0xFE;
+			else if(pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3))
+				pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+		}
+		for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+			if (hwinfo[eeAddr] == 0xFF) {
+				pwrinfo5g->bw80_diff[rfPath][TxCount] = 0xFE;
+			} else {
+				pwrinfo5g->bw80_diff[rfPath][TxCount] =	(hwinfo[eeAddr] & 0xf0) >> 4;
+				if(pwrinfo5g->bw80_diff[rfPath][TxCount] & BIT(3))		//4bit sign number to 8 bit sign number
+					pwrinfo5g->bw80_diff[rfPath][TxCount] |= 0xF0;
+			}
+
+			if (hwinfo[eeAddr] == 0xFF) {
+				pwrinfo5g->bw160_diff[rfPath][TxCount] = 0xFE;
+			} else {
+				pwrinfo5g->bw160_diff[rfPath][TxCount]=	(hwinfo[eeAddr] & 0x0f);
+				if(pwrinfo5g->bw160_diff[rfPath][TxCount] & BIT(3))		//4bit sign number to 8 bit sign number
+					pwrinfo5g->bw160_diff[rfPath][TxCount] |= 0xF0;
+			}
+			eeAddr++;
+		}
+	}
+}
+
+static void _rtl8812ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+						 bool autoload_fail,
+						 u8 *hwinfo)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct txpower_info_2g pwrinfo24g;
+	struct txpower_info_5g pwrinfo5g;
+	u8 channel5g[CHANNEL_MAX_NUMBER_5G] =
+				 {36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112,
+                          114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151,
+                          153,155,157,159,161,163,165,167,168,169,171,173,175,177};
+	u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
+	u8 rf_path, index;
+	u8 i;
+
+	_rtl8821ae_read_power_value_fromprom(hw, &pwrinfo24g, &pwrinfo5g, autoload_fail, hwinfo);
+
+	for (rf_path = 0; rf_path < 2; rf_path++) {
+		for (i = 0; i < CHANNEL_MAX_NUMBER_2G; i++) {
+			index = _rtl8821ae_get_chnl_group(i + 1);
+
+			if (i == CHANNEL_MAX_NUMBER_2G - 1) {
+				rtlefuse->txpwrlevel_cck[rf_path][i] =
+					pwrinfo24g.index_cck_base[rf_path][5];
+				rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+					pwrinfo24g.index_bw40_base[rf_path][index];
+			} else {
+				rtlefuse->txpwrlevel_cck[rf_path][i] =
+					pwrinfo24g.index_cck_base[rf_path][index];
+				rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+					pwrinfo24g.index_bw40_base[rf_path][index];
+			}
+		}
+
+		for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) {
+			index = _rtl8821ae_get_chnl_group(channel5g[i]);
+			rtlefuse->txpwr_5g_bw40base[rf_path][i] = pwrinfo5g.index_bw40_base[rf_path][index];
+		}
+		for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) {
+			u8 upper, lower;
+			index = _rtl8821ae_get_chnl_group(channel5g_80m[i]);
+			upper = pwrinfo5g.index_bw40_base[rf_path][index];
+			lower = pwrinfo5g.index_bw40_base[rf_path][index + 1];
+
+			rtlefuse->txpwr_5g_bw80base[rf_path][i] = (upper + lower) / 2;
+		}
+		for (i = 0; i < MAX_TX_COUNT; i++) {
+			rtlefuse->txpwr_cckdiff[rf_path][i] = pwrinfo24g.cck_diff[rf_path][i];
+			rtlefuse->txpwr_legacyhtdiff[rf_path][i] = pwrinfo24g.ofdm_diff[rf_path][i];
+			rtlefuse->txpwr_ht20diff[rf_path][i] = pwrinfo24g.bw20_diff[rf_path][i];
+			rtlefuse->txpwr_ht40diff[rf_path][i] = pwrinfo24g.bw40_diff[rf_path][i];
+
+			rtlefuse->txpwr_5g_ofdmdiff[rf_path][i] = pwrinfo5g.ofdm_diff[rf_path][i];
+			rtlefuse->txpwr_5g_bw20diff[rf_path][i] = pwrinfo5g.bw20_diff[rf_path][i];
+			rtlefuse->txpwr_5g_bw40diff[rf_path][i] = pwrinfo5g.bw40_diff[rf_path][i];
+			rtlefuse->txpwr_5g_bw80diff[rf_path][i] = pwrinfo5g.bw80_diff[rf_path][i];
+		}
+	}
+
+	if (!autoload_fail){
+		rtlefuse->eeprom_regulatory =
+			hwinfo[EEPROM_RF_BOARD_OPTION] & 0x07;/*bit0~2*/
+		if (hwinfo[EEPROM_RF_BOARD_OPTION] == 0xFF)
+			rtlefuse->eeprom_regulatory = 0;
+	} else {
+		rtlefuse->eeprom_regulatory = 0;
+	}
+
+	RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+	("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory ));
+}
+
+static void _rtl8821ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+						 bool autoload_fail,
+						 u8 *hwinfo)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct txpower_info_2g pwrinfo24g;
+	struct txpower_info_5g pwrinfo5g;
+	u8 channel5g[CHANNEL_MAX_NUMBER_5G] =
+				{36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112,
+                 114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151,
+                 153,155,157,159,161,163,165,167,168,169,171,173,175,177};
+	u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
+	u8 rf_path, index;
+	u8 i;
+
+	_rtl8821ae_read_power_value_fromprom(hw, &pwrinfo24g, &pwrinfo5g, autoload_fail, hwinfo);
+
+	for (rf_path = 0; rf_path < 2; rf_path++) {
+		for (i = 0; i < CHANNEL_MAX_NUMBER_2G; i++) {
+			index = _rtl8821ae_get_chnl_group(i + 1);
+
+			if (i == CHANNEL_MAX_NUMBER_2G - 1) {
+				rtlefuse->txpwrlevel_cck[rf_path][i] = pwrinfo24g.index_cck_base[rf_path][5];
+				rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = pwrinfo24g.index_bw40_base[rf_path][index];
+			} else {
+				rtlefuse->txpwrlevel_cck[rf_path][i] = pwrinfo24g.index_cck_base[rf_path][index];
+				rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = pwrinfo24g.index_bw40_base[rf_path][index];
+			}
+		}
+
+		for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) {
+			index = _rtl8821ae_get_chnl_group(channel5g[i]);
+			rtlefuse->txpwr_5g_bw40base[rf_path][i] = pwrinfo5g.index_bw40_base[rf_path][index];
+		}
+		for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) {
+			u8 upper, lower;
+			index = _rtl8821ae_get_chnl_group(channel5g_80m[i]);
+			upper = pwrinfo5g.index_bw40_base[rf_path][index];
+			lower = pwrinfo5g.index_bw40_base[rf_path][index + 1];
+
+			rtlefuse->txpwr_5g_bw80base[rf_path][i] = (upper + lower) / 2;
+		}
+		for (i = 0; i < MAX_TX_COUNT; i++) {
+			rtlefuse->txpwr_cckdiff[rf_path][i] = pwrinfo24g.cck_diff[rf_path][i];
+			rtlefuse->txpwr_legacyhtdiff[rf_path][i] = pwrinfo24g.ofdm_diff[rf_path][i];
+			rtlefuse->txpwr_ht20diff[rf_path][i] = pwrinfo24g.bw20_diff[rf_path][i];
+			rtlefuse->txpwr_ht40diff[rf_path][i] = pwrinfo24g.bw40_diff[rf_path][i];
+
+			rtlefuse->txpwr_5g_ofdmdiff[rf_path][i] = pwrinfo5g.ofdm_diff[rf_path][i];
+			rtlefuse->txpwr_5g_bw20diff[rf_path][i] = pwrinfo5g.bw20_diff[rf_path][i];
+			rtlefuse->txpwr_5g_bw40diff[rf_path][i] = pwrinfo5g.bw40_diff[rf_path][i];
+			rtlefuse->txpwr_5g_bw80diff[rf_path][i] = pwrinfo5g.bw80_diff[rf_path][i];
+		}
+	}
+
+	if (!autoload_fail){
+		rtlefuse->eeprom_regulatory = hwinfo[EEPROM_RF_BOARD_OPTION] & 0x07;/*bit0~2*/
+		if (hwinfo[EEPROM_RF_BOARD_OPTION] == 0xFF)
+			rtlefuse->eeprom_regulatory = 0;
+	} else {
+		rtlefuse->eeprom_regulatory = 0;
+	}
+
+	RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+	("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory ));
+}
+
+static void _rtl8812ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_test )
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	u16 i, usvalue;
+	u8 hwinfo[HWSET_MAX_SIZE];
+	u16 eeprom_id;
+
+	if (b_pseudo_test) {
+		/* need add */
+	}
+
+	if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+		rtl_efuse_shadow_map_update(hw);
+		memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+		       HWSET_MAX_SIZE);
+	} else if (rtlefuse->epromtype == EEPROM_93C46) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("RTL819X Not boot from eeprom, check it !!"));
+	}
+
+	RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP \n"),
+		      hwinfo, HWSET_MAX_SIZE);
+
+	eeprom_id = *((u16 *) & hwinfo[0]);
+	if (eeprom_id != RTL_EEPROM_ID) {
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+		rtlefuse->autoload_failflag = true;
+	} else {
+		RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+		rtlefuse->autoload_failflag = false;
+	}
+
+	if (rtlefuse->autoload_failflag == true) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("RTL8812AE autoload_failflag, check it !!"));
+		return;
+	}
+
+	rtlefuse->eeprom_version = *(u8 *) & hwinfo[EEPROM_VERSION];
+	if (rtlefuse->eeprom_version == 0xff)
+			rtlefuse->eeprom_version = 0;
+
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM version: 0x%2x\n", rtlefuse->eeprom_version));
+
+	rtlefuse->eeprom_vid = *(u16 *) &hwinfo[EEPROM_VID];
+	rtlefuse->eeprom_did = *(u16 *) &hwinfo[EEPROM_DID];
+	rtlefuse->eeprom_svid = *(u16 *) &hwinfo[EEPROM_SVID];
+	rtlefuse->eeprom_smid = *(u16 *) &hwinfo[EEPROM_SMID];
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROMId = 0x%4x\n", eeprom_id));
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid));
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did));
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid));
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid));
+
+	/*customer ID*/
+	rtlefuse->eeprom_oemid = *(u8 *) & hwinfo[EEPROM_CUSTOMER_ID];
+	if (rtlefuse->eeprom_oemid == 0xFF)
+		rtlefuse->eeprom_oemid = 0;
+
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+
+	for (i = 0; i < 6; i += 2) {
+		usvalue = *(u16 *) & hwinfo[EEPROM_MAC_ADDR + i];
+		*((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+	}
+
+	RT_TRACE(COMP_INIT, DBG_DMESG,
+		 ("dev_addr: %pM\n", rtlefuse->dev_addr));
+
+	_rtl8812ae_read_txpower_info_from_hwpg(hw,
+		rtlefuse->autoload_failflag, hwinfo);
+
+	/*board type*/
+	rtlefuse->board_type = (((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) & 0xE0 ) >> 5);
+	if ((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) == 0xff )
+		rtlefuse->board_type = 0;
+	rtlhal->boad_type = rtlefuse->board_type;
+
+	rtl8812ae_read_bt_coexist_info_from_hwpg(hw,
+			rtlefuse->autoload_failflag, hwinfo);
+
+	rtlefuse->eeprom_channelplan = *(u8 *) & hwinfo[EEPROM_CHANNELPLAN];
+	if (rtlefuse->eeprom_channelplan == 0xff)
+		rtlefuse->eeprom_channelplan = 0x7F;
+
+	/* set channel paln to world wide 13 */
+	//rtlefuse->channel_plan = (u8) rtlefuse->eeprom_channelplan;
+
+	/*parse xtal*/
+	rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE];
+	if ( rtlefuse->crystalcap == 0xFF )
+		rtlefuse->crystalcap = 0x20;
+
+	rtlefuse->eeprom_thermalmeter = *(u8 *) & hwinfo[EEPROM_THERMAL_METER];
+	if ((rtlefuse->eeprom_thermalmeter == 0xff) ||rtlefuse->autoload_failflag )
+	{
+		rtlefuse->b_apk_thermalmeterignore = true;
+		rtlefuse->eeprom_thermalmeter = 0xff;
+	}
+
+	rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+
+	if (rtlefuse->autoload_failflag == false) {
+		rtlefuse->antenna_div_cfg = *(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] & 0x18 >> 3;
+		if (*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] == 0xff)
+			rtlefuse->antenna_div_cfg = 0x00;
+		/*if (BT_1ant())
+			rtlefuse->antenna_div_cfg = 0;*/
+		rtlefuse->antenna_div_type = *(u8 *) & hwinfo[EEPROM_RF_ANTENNA_OPT_88E];
+		if (rtlefuse->antenna_div_type == 0xFF)
+		{
+			rtlefuse->antenna_div_type = FIXED_HW_ANTDIV;
+		}
+	} else {
+		rtlefuse->antenna_div_cfg = 0;
+		rtlefuse->antenna_div_type = 0;
+	}
+
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		("SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n",
+                rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type));
+
+	/*Hal_ReadPAType_8821A()*/
+	/*Hal_EfuseParseRateIndicationOption8821A()*/
+	/*Hal_ReadEfusePCIeCap8821AE()*/
+
+	pcipriv->ledctl.bled_opendrain = true;
+
+	if (rtlhal->oem_id == RT_CID_DEFAULT) {
+		switch (rtlefuse->eeprom_oemid) {
+		case RT_CID_DEFAULT:
+			break;
+		case EEPROM_CID_TOSHIBA:
+			rtlhal->oem_id = RT_CID_TOSHIBA;
+			break;
+		case EEPROM_CID_CCX:
+			rtlhal->oem_id = RT_CID_CCX;
+			break;
+		case EEPROM_CID_QMI:
+			rtlhal->oem_id = RT_CID_819x_QMI;
+			break;
+		case EEPROM_CID_WHQL:
+			break;
+		default:
+			break;
+
+		}
+	}
+}
+
+static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_test )
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	u16 i, usvalue;
+	u8 hwinfo[HWSET_MAX_SIZE];
+	u16 eeprom_id;
+
+	if (b_pseudo_test) {
+		/* need add */
+	}
+
+	if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+		rtl_efuse_shadow_map_update(hw);
+		memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+		       HWSET_MAX_SIZE);
+	} else if (rtlefuse->epromtype == EEPROM_93C46) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("RTL819X Not boot from eeprom, check it !!"));
+	}
+
+	RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP \n"),
+		      hwinfo, HWSET_MAX_SIZE);
+
+	eeprom_id = *((u16 *) & hwinfo[0]);
+	if (eeprom_id != RTL_EEPROM_ID) {
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+		rtlefuse->autoload_failflag = true;
+	} else {
+		RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+		rtlefuse->autoload_failflag = false;
+	}
+
+	if (rtlefuse->autoload_failflag == true) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("RTL8812AE autoload_failflag, check it !!"));
+		return;
+	}
+
+	rtlefuse->eeprom_version = *(u8 *) & hwinfo[EEPROM_VERSION];
+	if (rtlefuse->eeprom_version == 0xff)
+			rtlefuse->eeprom_version = 0;
+
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM version: 0x%2x\n", rtlefuse->eeprom_version));
+
+	rtlefuse->eeprom_vid = *(u16 *) &hwinfo[EEPROM_VID];
+	rtlefuse->eeprom_did = *(u16 *) &hwinfo[EEPROM_DID];
+	rtlefuse->eeprom_svid = *(u16 *) &hwinfo[EEPROM_SVID];
+	rtlefuse->eeprom_smid = *(u16 *) &hwinfo[EEPROM_SMID];
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROMId = 0x%4x\n", eeprom_id));
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid));
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did));
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid));
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid));
+
+	/*customer ID*/
+	rtlefuse->eeprom_oemid = *(u8 *) & hwinfo[EEPROM_CUSTOMER_ID];
+	if (rtlefuse->eeprom_oemid == 0xFF)
+		rtlefuse->eeprom_oemid = 0;
+
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+
+	for (i = 0; i < 6; i += 2) {
+		usvalue = *(u16 *) & hwinfo[EEPROM_MAC_ADDR + i];
+		*((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+	}
+
+	RT_TRACE(COMP_INIT, DBG_DMESG,
+		 ("dev_addr: %pM\n", rtlefuse->dev_addr));
+
+	_rtl8821ae_read_txpower_info_from_hwpg(hw,
+		rtlefuse->autoload_failflag, hwinfo);
+
+	/*board type*/
+	rtlefuse->board_type = (((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) & 0xE0 ) >> 5);
+	if ((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) == 0xff )
+		rtlefuse->board_type = 0;
+	rtlhal->boad_type = rtlefuse->board_type;
+
+	rtl8821ae_read_bt_coexist_info_from_hwpg(hw,
+			rtlefuse->autoload_failflag, hwinfo);
+
+	rtlefuse->eeprom_channelplan = *(u8 *) & hwinfo[EEPROM_CHANNELPLAN];
+	if (rtlefuse->eeprom_channelplan == 0xff)
+		rtlefuse->eeprom_channelplan = 0x7F;
+
+	/* set channel paln to world wide 13 */
+	//rtlefuse->channel_plan = (u8) rtlefuse->eeprom_channelplan;
+
+	/*parse xtal*/
+	rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE];
+	if ( rtlefuse->crystalcap == 0xFF )
+		rtlefuse->crystalcap = 0x20;
+
+	rtlefuse->eeprom_thermalmeter = *(u8 *) & hwinfo[EEPROM_THERMAL_METER];
+	if ((rtlefuse->eeprom_thermalmeter == 0xff) ||rtlefuse->autoload_failflag )
+	{
+		rtlefuse->b_apk_thermalmeterignore = true;
+		rtlefuse->eeprom_thermalmeter = 0x18;
+	}
+
+	rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+
+	if (rtlefuse->autoload_failflag == false) {
+		rtlefuse->antenna_div_cfg = (*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] & BIT(3))?true:false;
+		/*if (BT_1ant())
+			rtlefuse->antenna_div_cfg = 0;*/
+
+		rtlefuse->antenna_div_type = CG_TRX_HW_ANTDIV;
+	} else {
+		rtlefuse->antenna_div_cfg = 0;
+		rtlefuse->antenna_div_type = 0;
+	}
+
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		("SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n",
+                rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type));
+
+	pcipriv->ledctl.bled_opendrain = true;
+
+	if (rtlhal->oem_id == RT_CID_DEFAULT) {
+		switch (rtlefuse->eeprom_oemid) {
+		case RT_CID_DEFAULT:
+			break;
+		case EEPROM_CID_TOSHIBA:
+			rtlhal->oem_id = RT_CID_TOSHIBA;
+			break;
+		case EEPROM_CID_CCX:
+			rtlhal->oem_id = RT_CID_CCX;
+			break;
+		case EEPROM_CID_QMI:
+			rtlhal->oem_id = RT_CID_819x_QMI;
+			break;
+		case EEPROM_CID_WHQL:
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+
+/*static void _rtl8821ae_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	pcipriv->ledctl.bled_opendrain = true;
+	switch (rtlhal->oem_id) {
+	case RT_CID_819x_HP:
+		pcipriv->ledctl.bled_opendrain = true;
+		break;
+	case RT_CID_819x_Lenovo:
+	case RT_CID_DEFAULT:
+	case RT_CID_TOSHIBA:
+	case RT_CID_CCX:
+	case RT_CID_819x_Acer:
+	case RT_CID_WHQL:
+	default:
+		break;
+	}
+	RT_TRACE(COMP_INIT, DBG_DMESG,
+		 ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
+}*/
+
+void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tmp_u1b;
+
+	rtlhal->version = _rtl8821ae_read_chip_version(hw);
+
+	if (get_rf_type(rtlphy) == RF_1T1R)
+		rtlpriv->dm.brfpath_rxenable[0] = true;
+	else
+		rtlpriv->dm.brfpath_rxenable[0] =
+		    rtlpriv->dm.brfpath_rxenable[1] = true;
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
+						rtlhal->version));
+
+	tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+	if (tmp_u1b & BIT(4)) {
+		RT_TRACE(COMP_INIT, DBG_DMESG, ("Boot from EEPROM\n"));
+		rtlefuse->epromtype = EEPROM_93C46;
+	} else {
+		RT_TRACE(COMP_INIT, DBG_DMESG, ("Boot from EFUSE\n"));
+		rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
+	}
+
+	if (tmp_u1b & BIT(5)) {
+		RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+		rtlefuse->autoload_failflag = false;
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			_rtl8812ae_read_adapter_info(hw, false);
+		else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+			_rtl8821ae_read_adapter_info(hw, false);
+	} else {
+			RT_TRACE(COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n"));
+	}
+	/*hal_ReadRFType_8812A()*/
+	//_rtl8821ae_hal_customized_behavior(hw);
+}
+
+static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
+		struct ieee80211_sta *sta)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u32 ratr_value;
+	u8 ratr_index = 0;
+	u8 b_nmode = mac->ht_enable;
+	u8 mimo_ps = IEEE80211_SMPS_OFF;
+	u16 shortgi_rate;
+	u32 tmp_ratr_value;
+	u8 b_curtxbw_40mhz = mac->bw_40;
+	u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+				1 : 0;
+	u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+				1 : 0;
+	enum wireless_mode wirelessmode = mac->mode;
+
+	if (rtlhal->current_bandtype == BAND_ON_5G)
+		ratr_value = sta->supp_rates[1] << 4;
+	else
+		ratr_value = sta->supp_rates[0];
+	if (mac->opmode == NL80211_IFTYPE_ADHOC)
+		ratr_value = 0xfff;
+	ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+			sta->ht_cap.mcs.rx_mask[0] << 12);
+	switch (wirelessmode) {
+	case WIRELESS_MODE_B:
+		if (ratr_value & 0x0000000c)
+			ratr_value &= 0x0000000d;
+		else
+			ratr_value &= 0x0000000f;
+		break;
+	case WIRELESS_MODE_G:
+		ratr_value &= 0x00000FF5;
+		break;
+	case WIRELESS_MODE_N_24G:
+	case WIRELESS_MODE_N_5G:
+		b_nmode = 1;
+		if (mimo_ps == IEEE80211_SMPS_STATIC) {
+			ratr_value &= 0x0007F005;
+		} else {
+			u32 ratr_mask;
+
+			if (get_rf_type(rtlphy) == RF_1T2R ||
+			    get_rf_type(rtlphy) == RF_1T1R)
+				ratr_mask = 0x000ff005;
+			else
+				ratr_mask = 0x0f0ff005;
+
+			ratr_value &= ratr_mask;
+		}
+		break;
+	default:
+		if (rtlphy->rf_type == RF_1T2R)
+			ratr_value &= 0x000ff0ff;
+		else
+			ratr_value &= 0x0f0ff0ff;
+
+		break;
+	}
+
+	if ( (rtlpcipriv->btcoexist.bt_coexistence) &&
+	     (rtlpcipriv->btcoexist.bt_coexist_type == BT_CSR_BC4) &&
+	     (rtlpcipriv->btcoexist.bt_cur_state) &&
+	     (rtlpcipriv->btcoexist.bt_ant_isolation) &&
+	     ((rtlpcipriv->btcoexist.bt_service == BT_SCO)||
+	     (rtlpcipriv->btcoexist.bt_service == BT_BUSY)) )
+		ratr_value &= 0x0fffcfc0;
+	else
+		ratr_value &= 0x0FFFFFFF;
+
+	if (b_nmode && ((b_curtxbw_40mhz &&
+			 b_curshortgi_40mhz) || (!b_curtxbw_40mhz &&
+						 b_curshortgi_20mhz))) {
+
+		ratr_value |= 0x10000000;
+		tmp_ratr_value = (ratr_value >> 12);
+
+		for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+			if ((1 << shortgi_rate) & tmp_ratr_value)
+				break;
+		}
+
+		shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+		    (shortgi_rate << 4) | (shortgi_rate);
+	}
+
+	rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+
+	RT_TRACE(COMP_RATR, DBG_DMESG,
+		 ("%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)));
+}
+
+
+static u8 _rtl8821ae_mrate_idx_to_arfr_id(
+	struct ieee80211_hw *hw, u8 rate_index,
+	enum wireless_mode wirelessmode)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 ret = 0;
+	switch(rate_index){
+		case RATR_INX_WIRELESS_NGB:
+			if(rtlphy->rf_type == RF_1T1R)
+				ret = 1;
+			else
+				ret = 0;
+			;break;
+		case RATR_INX_WIRELESS_N:
+		case RATR_INX_WIRELESS_NG:
+			if(rtlphy->rf_type == RF_1T1R)
+				ret = 5;
+			else
+				ret = 4;
+			;break;
+		case RATR_INX_WIRELESS_NB:
+			if(rtlphy->rf_type == RF_1T1R)
+				ret = 3;
+			else
+				ret = 2;
+			;break;
+		case RATR_INX_WIRELESS_GB:
+			ret = 6;
+			break;
+		case RATR_INX_WIRELESS_G:
+			ret = 7;
+			break;
+		case RATR_INX_WIRELESS_B:
+			ret = 8;
+			break;
+		case RATR_INX_WIRELESS_MC:
+			if ((wirelessmode == WIRELESS_MODE_B)
+				|| (wirelessmode == WIRELESS_MODE_G)
+				|| (wirelessmode == WIRELESS_MODE_N_24G)
+				|| (wirelessmode == WIRELESS_MODE_AC_24G))
+				ret = 6;
+			else
+				ret = 7;
+		case RATR_INX_WIRELESS_AC_5N:
+			if(rtlphy->rf_type == RF_1T1R)
+				ret = 10;
+			else
+				ret = 9;
+			break;
+		case RATR_INX_WIRELESS_AC_24N:
+			if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+			{
+				if(rtlphy->rf_type == RF_1T1R)
+					ret = 10;
+				else
+					ret = 9;
+			} else {
+				if(rtlphy->rf_type == RF_1T1R)
+					ret = 11;
+				else
+					ret = 12;
+			}
+			break;
+		default:
+			ret = 0;break;
+	}
+	return ret;
+}
+
+static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
+		struct ieee80211_sta *sta, u8 rssi_level)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_sta_info * sta_entry = NULL;
+	u32 ratr_bitmap;
+	u8 ratr_index;
+	u8 b_curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+				? 1 : 0;
+	u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+				1 : 0;
+	u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+				1 : 0;
+	enum wireless_mode wirelessmode = 0;
+	bool b_shortgi = false;
+	u8 rate_mask[7];
+	u8 macid = 0;
+	u8 mimo_ps = IEEE80211_SMPS_OFF;
+
+	sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+	wirelessmode = sta_entry->wireless_mode;
+	if (mac->opmode == NL80211_IFTYPE_STATION ||
+		mac->opmode == NL80211_IFTYPE_MESH_POINT)
+		b_curtxbw_40mhz = mac->bw_40;
+	else if (mac->opmode == NL80211_IFTYPE_AP ||
+		mac->opmode == NL80211_IFTYPE_ADHOC)
+		macid = sta->aid + 1;
+
+	ratr_bitmap = sta->supp_rates[0];
+
+	if (mac->opmode == NL80211_IFTYPE_ADHOC)
+		ratr_bitmap = 0xfff;
+
+	ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+			sta->ht_cap.mcs.rx_mask[0] << 12);
+/*mac id owner*/
+	switch (wirelessmode) {
+	case WIRELESS_MODE_B:
+		ratr_index = RATR_INX_WIRELESS_B;
+		if (ratr_bitmap & 0x0000000c)
+			ratr_bitmap &= 0x0000000d;
+		else
+			ratr_bitmap &= 0x0000000f;
+		break;
+	case WIRELESS_MODE_G:
+		ratr_index = RATR_INX_WIRELESS_GB;
+
+		if (rssi_level == 1)
+			ratr_bitmap &= 0x00000f00;
+		else if (rssi_level == 2)
+			ratr_bitmap &= 0x00000ff0;
+		else
+			ratr_bitmap &= 0x00000ff5;
+		break;
+	case WIRELESS_MODE_A:
+		ratr_index = RATR_INX_WIRELESS_G;
+		ratr_bitmap &= 0x00000ff0;
+		break;
+	case WIRELESS_MODE_N_24G:
+	case WIRELESS_MODE_N_5G:
+		if (wirelessmode == WIRELESS_MODE_N_24G)
+			ratr_index = RATR_INX_WIRELESS_NGB;
+		else
+			ratr_index = RATR_INX_WIRELESS_NG;
+
+		if (mimo_ps == IEEE80211_SMPS_STATIC  || mimo_ps == IEEE80211_SMPS_DYNAMIC) {
+			if (rssi_level == 1)
+				ratr_bitmap &= 0x00070000;
+			else if (rssi_level == 2)
+				ratr_bitmap &= 0x0007f000;
+			else
+				ratr_bitmap &= 0x0007f005;
+		} else {
+			if ( rtlphy->rf_type == RF_1T1R) {
+				if (b_curtxbw_40mhz) {
+					if (rssi_level == 1)
+						ratr_bitmap &= 0x000f0000;
+					else if (rssi_level == 2)
+						ratr_bitmap &= 0x000ff000;
+					else
+						ratr_bitmap &= 0x000ff015;
+				} else {
+					if (rssi_level == 1)
+						ratr_bitmap &= 0x000f0000;
+					else if (rssi_level == 2)
+						ratr_bitmap &= 0x000ff000;
+					else
+						ratr_bitmap &= 0x000ff005;
+				}
+			} else {
+				if (b_curtxbw_40mhz) {
+					if (rssi_level == 1)
+						ratr_bitmap &= 0x0fff0000;
+					else if (rssi_level == 2)
+						ratr_bitmap &= 0x0ffff000;
+					else
+						ratr_bitmap &= 0x0ffff015;
+				} else {
+					if (rssi_level == 1)
+						ratr_bitmap &= 0x0fff0000;
+					else if (rssi_level == 2)
+						ratr_bitmap &= 0x0ffff000;
+					else
+						ratr_bitmap &= 0x0ffff005;
+				}
+			}
+		}
+		if ((b_curtxbw_40mhz && b_curshortgi_40mhz) ||
+		    (!b_curtxbw_40mhz && b_curshortgi_20mhz)) {
+
+			if (macid == 0)
+				b_shortgi = true;
+			else if (macid == 1)
+				b_shortgi = false;
+		}
+		break;
+
+	case WIRELESS_MODE_AC_24G:
+		ratr_index = RATR_INX_WIRELESS_AC_24N;
+		if(rssi_level == 1)
+			ratr_bitmap &= 0xfc3f0000;
+		else if(rssi_level == 2)
+			ratr_bitmap &= 0xfffff000;
+		else
+			ratr_bitmap &= 0xffffffff;
+		break;
+
+	case WIRELESS_MODE_AC_5G:
+		ratr_index = RATR_INX_WIRELESS_AC_5N;
+
+		if (rtlphy->rf_type == RF_1T1R)
+		{
+			if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			{
+				if(rssi_level == 1)				/*add by Gary for ac-series*/
+					ratr_bitmap &= 0x003f8000;
+				else if (rssi_level == 2)
+					ratr_bitmap &= 0x003ff000;
+				else
+					ratr_bitmap &= 0x003ff010;
+			}
+			else
+				ratr_bitmap &= 0x000ff010;
+		}
+		else
+		{
+			if(rssi_level == 1)				/* add by Gary for ac-series*/
+				ratr_bitmap &= 0xfe3f8000;       /*VHT 2SS MCS3~9*/
+			else if (rssi_level == 2)
+				ratr_bitmap &= 0xfffff000;       /*VHT 2SS MCS0~9*/
+			else
+				ratr_bitmap &= 0xfffff010;       /*All*/
+		}
+		break;
+
+	default:
+		ratr_index = RATR_INX_WIRELESS_NGB;
+
+		if (rtlphy->rf_type == RF_1T2R)
+			ratr_bitmap &= 0x000ff0ff;
+		else
+			ratr_bitmap &= 0x0f0ff0ff;
+		break;
+
+	}
+
+	sta_entry->ratr_index = ratr_index;
+
+	RT_TRACE(COMP_RATR, DBG_DMESG,
+		 ("ratr_bitmap :%x\n", ratr_bitmap));
+	*(u32 *) & rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
+				       (ratr_index << 28));
+	rate_mask[0] = macid;
+	rate_mask[1] = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode) | (b_shortgi ? 0x80 : 0x00);
+	rate_mask[2] = b_curtxbw_40mhz;
+	/* if (prox_priv->proxim_modeinfo->power_output > 0)
+		rate_mask[2] |= BIT(6); */
+
+	rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff);
+	rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >>8);
+	rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
+	rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
+
+	RT_TRACE(COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
+						 "ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+						 ratr_index, ratr_bitmap,
+						 rate_mask[0], rate_mask[1],
+						 rate_mask[2], rate_mask[3],
+						 rate_mask[4], rate_mask[5],
+						 rate_mask[6]));
+	rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RA_MASK, 7, rate_mask);
+	_rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
+}
+
+void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
+		struct ieee80211_sta *sta, u8 rssi_level)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	if (rtlpriv->dm.b_useramask)
+		rtl8821ae_update_hal_rate_mask(hw, sta, rssi_level);
+	else
+		/*RT_TRACE(COMP_RATR,DBG_LOUD,("rtl8821ae_update_hal_rate_tbl(): Error! 8821ae FW RA Only"));*/
+		rtl8821ae_update_hal_rate_table(hw, sta);
+}
+
+void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	u16 sifs_timer;
+
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+				      (u8 *) & mac->slot_time);
+	if (!mac->ht_enable)
+		sifs_timer = 0x0a0a;
+	else
+		sifs_timer = 0x0e0e;
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *) & sifs_timer);
+}
+
+bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+	u8 u1tmp = 0;
+	bool b_actuallyset = false;
+
+	if (rtlpriv->rtlhal.being_init_adapter)
+		return false;
+
+	if (ppsc->b_swrf_processing)
+		return false;
+
+	spin_lock(&rtlpriv->locks.rf_ps_lock);
+	if (ppsc->rfchange_inprogress) {
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
+		return false;
+	} else {
+		ppsc->rfchange_inprogress = true;
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
+	}
+
+	cur_rfstate = ppsc->rfpwr_state;
+
+	rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
+					rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2) & ~(BIT(1)));
+
+	u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2);
+
+	if (rtlphy->polarity_ctl) {
+		e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON;
+	} else {
+		e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
+	}
+
+	if ((ppsc->b_hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
+		RT_TRACE(COMP_RF, DBG_DMESG,
+			 ("GPIOChangeRF  - HW Radio ON, RF ON\n"));
+
+		e_rfpowerstate_toset = ERFON;
+		ppsc->b_hwradiooff = false;
+		b_actuallyset = true;
+	} else if ((ppsc->b_hwradiooff == false)
+		   && (e_rfpowerstate_toset == ERFOFF)) {
+		RT_TRACE(COMP_RF, DBG_DMESG,
+			 ("GPIOChangeRF  - HW Radio OFF, RF OFF\n"));
+
+		e_rfpowerstate_toset = ERFOFF;
+		ppsc->b_hwradiooff = true;
+		b_actuallyset = true;
+	}
+
+	if (b_actuallyset) {
+		spin_lock(&rtlpriv->locks.rf_ps_lock);
+		ppsc->rfchange_inprogress = false;
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
+	} else {
+		if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
+			RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+		spin_lock(&rtlpriv->locks.rf_ps_lock);
+		ppsc->rfchange_inprogress = false;
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
+	}
+
+	*valid = 1;
+	return !ppsc->b_hwradiooff;
+
+}
+
+void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
+		     u8 *p_macaddr, bool is_group, u8 enc_algo,
+		     bool is_wepkey, bool clear_all)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 *macaddr = p_macaddr;
+	u32 entry_id = 0;
+	bool is_pairwise = false;
+
+	static u8 cam_const_addr[4][6] = {
+		{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+		{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+		{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+		{0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+	};
+	static u8 cam_const_broad[] = {
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+	};
+
+	if (clear_all) {
+		u8 idx = 0;
+		u8 cam_offset = 0;
+		u8 clear_number = 5;
+
+		RT_TRACE(COMP_SEC, DBG_DMESG, ("clear_all\n"));
+
+		for (idx = 0; idx < clear_number; idx++) {
+			rtl_cam_mark_invalid(hw, cam_offset + idx);
+			rtl_cam_empty_entry(hw, cam_offset + idx);
+
+			if (idx < 5) {
+				memset(rtlpriv->sec.key_buf[idx], 0,
+				       MAX_KEY_LEN);
+				rtlpriv->sec.key_len[idx] = 0;
+			}
+		}
+
+	} else {
+		switch (enc_algo) {
+		case WEP40_ENCRYPTION:
+			enc_algo = CAM_WEP40;
+			break;
+		case WEP104_ENCRYPTION:
+			enc_algo = CAM_WEP104;
+			break;
+		case TKIP_ENCRYPTION:
+			enc_algo = CAM_TKIP;
+			break;
+		case AESCCMP_ENCRYPTION:
+			enc_algo = CAM_AES;
+			break;
+		default:
+			RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case "
+								"not process \n"));
+			enc_algo = CAM_TKIP;
+			break;
+		}
+
+		if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+			macaddr = cam_const_addr[key_index];
+			entry_id = key_index;
+		} else {
+			if (is_group) {
+				macaddr = cam_const_broad;
+				entry_id = key_index;
+			} else {
+				if (mac->opmode == NL80211_IFTYPE_AP) {
+					entry_id = rtl_cam_get_free_entry(hw, p_macaddr);
+					if (entry_id >=  TOTAL_CAM_ENTRY) {
+						RT_TRACE(COMP_SEC, DBG_EMERG,
+								("Can not find free hw security cam entry\n"));
+						return;
+					}
+				} else {
+					entry_id = CAM_PAIRWISE_KEY_POSITION;
+				}
+
+				key_index = PAIRWISE_KEYIDX;
+				is_pairwise = true;
+			}
+		}
+
+		if (rtlpriv->sec.key_len[key_index] == 0) {
+			RT_TRACE(COMP_SEC, DBG_DMESG,
+				 ("delete one entry, entry_id is %d\n",entry_id));
+			if (mac->opmode == NL80211_IFTYPE_AP)
+				rtl_cam_del_entry(hw, p_macaddr);
+			rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+		} else {
+			RT_TRACE(COMP_SEC, DBG_DMESG, ("add one entry\n"));
+			if (is_pairwise) {
+				RT_TRACE(COMP_SEC, DBG_DMESG, ("set Pairwiase key\n"));
+
+				rtl_cam_add_one_entry(hw, macaddr, key_index,
+						      entry_id, enc_algo,
+						      CAM_CONFIG_NO_USEDK,
+						      rtlpriv->sec.key_buf[key_index]);
+			} else {
+				RT_TRACE(COMP_SEC, DBG_DMESG, ("set group key\n"));
+
+				if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+					rtl_cam_add_one_entry(hw,
+							rtlefuse->dev_addr,
+							PAIRWISE_KEYIDX,
+							CAM_PAIRWISE_KEY_POSITION,
+							enc_algo,
+							CAM_CONFIG_NO_USEDK,
+							rtlpriv->sec.key_buf
+							[entry_id]);
+				}
+
+				rtl_cam_add_one_entry(hw, macaddr, key_index,
+						entry_id, enc_algo,
+						CAM_CONFIG_NO_USEDK,
+						rtlpriv->sec.key_buf[entry_id]);
+			}
+
+		}
+	}
+}
+
+
+void rtl8812ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+					      bool auto_load_fail, u8 *hwinfo)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 value;
+
+	if (!auto_load_fail) {
+		value = *(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION];
+		if (((value & 0xe0) >> 5) == 0x1)
+			rtlpriv->btcoexist.btc_info.btcoexist = 1;
+		else
+			rtlpriv->btcoexist.btc_info.btcoexist = 0;
+		rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8812A;
+
+		value = hwinfo[EEPROM_RF_BT_SETTING];
+		rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+	} else {
+		rtlpriv->btcoexist.btc_info.btcoexist = 0;
+		rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8812A;
+		rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+	}
+	/*move BT_InitHalVars() to init_sw_vars*/
+}
+
+void rtl8821ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+					      bool auto_load_fail, u8 *hwinfo)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 value;
+	u32 tmpu_32;
+
+	if (!auto_load_fail) {
+		tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
+		if(tmpu_32 & BIT(18))
+			rtlpriv->btcoexist.btc_info.btcoexist = 1;
+		else
+			rtlpriv->btcoexist.btc_info.btcoexist = 0;
+		rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8821A;
+
+		value = hwinfo[EEPROM_RF_BT_SETTING];
+		rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+	} else {
+		rtlpriv->btcoexist.btc_info.btcoexist = 0;
+		rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8821A;
+		rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+	}
+	/*move BT_InitHalVars() to init_sw_vars*/
+}
+
+void rtl8821ae_bt_reg_init(struct ieee80211_hw* hw)
+{
+	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+	/* 0:Low, 1:High, 2:From Efuse. */
+	rtlpcipriv->btcoexist.b_reg_bt_iso = 2;
+	/* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
+	rtlpcipriv->btcoexist.b_reg_bt_sco= 3;
+	/* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
+	rtlpcipriv->btcoexist.b_reg_bt_sco= 0;
+}
+
+
+void rtl8821ae_bt_hw_init(struct ieee80211_hw* hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (rtlpriv->cfg->ops->get_btc_status()){
+		rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv);
+	}
+}
+
+void rtl8821ae_suspend(struct ieee80211_hw *hw)
+{
+}
+
+void rtl8821ae_resume(struct ieee80211_hw *hw)
+{
+}
+
+/* Turn on AAP (RCR:bit 0) for promicuous mode. */
+void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw,
+	bool allow_all_da, bool write_into_reg)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	if (allow_all_da) /* Set BIT0 */
+		rtlpci->receive_config |= RCR_AAP;
+	else /* Clear BIT0 */
+		rtlpci->receive_config &= ~RCR_AAP;
+
+	if(write_into_reg)
+		rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+
+
+	RT_TRACE(COMP_TURBO | COMP_INIT, DBG_LOUD,
+		("receive_config=0x%08X, write_into_reg=%d\n",
+		rtlpci->receive_config, write_into_reg ));
+}
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hw.h b/drivers/staging/rtl8821ae/rtl8821ae/hw.h
new file mode 100644
index 0000000..4fb6bf0
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hw.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_HW_H__
+#define __RTL8821AE_HW_H__
+
+void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw);
+
+void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
+											 u32 *p_inta, u32 *p_intb);
+int rtl8821ae_hw_init(struct ieee80211_hw *hw);
+void rtl8821ae_card_disable(struct ieee80211_hw *hw);
+void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw);
+void rtl8821ae_disable_interrupt(struct ieee80211_hw *hw);
+int rtl8821ae_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl8821ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw,
+												u32 add_msr, u32 rm_msr);
+void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
+											struct ieee80211_sta *sta,
+											u8 rssi_level);
+void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
+		     				u8 *p_macaddr, bool is_group, u8 enc_algo,
+		     				bool is_wepkey, bool clear_all);
+
+void rtl8821ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+															bool autoload_fail,
+															u8* hwinfo);
+void rtl8812ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+															bool autoload_fail,
+															u8* hwinfo);
+void rtl8821ae_bt_reg_init(struct ieee80211_hw* hw);
+void rtl8821ae_bt_hw_init(struct ieee80211_hw* hw);
+void rtl8821ae_suspend(struct ieee80211_hw *hw);
+void rtl8821ae_resume(struct ieee80211_hw *hw);
+void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw,
+										  bool allow_all_da,
+										  bool write_into_reg);
+void _rtl8821ae_stop_tx_beacon(struct ieee80211_hw *hw);
+void _rtl8821ae_resume_tx_beacon(struct ieee80211_hw *hw);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/led.c b/drivers/staging/rtl8821ae/rtl8821ae/led.c
new file mode 100644
index 0000000..130a4f4
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/led.c
@@ -0,0 +1,239 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "reg.h"
+
+static void _rtl8821ae_init_led(struct ieee80211_hw *hw,
+			      					 struct rtl_led *pled,
+			      					 enum rtl_led_pin ledpin)
+{
+	pled->hw = hw;
+	pled->ledpin = ledpin;
+	pled->b_ledon = false;
+}
+
+void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+	u8 ledcfg;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	RT_TRACE(COMP_LED, DBG_LOUD,
+		 ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+
+	switch (pled->ledpin) {
+	case LED_PIN_GPIO0:
+		break;
+	case LED_PIN_LED0:
+		ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+		ledcfg &= ~BIT(6);
+		rtl_write_byte(rtlpriv,
+			       REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5));
+		break;
+	case LED_PIN_LED1:
+		ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+		rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
+		break;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("switch case not process \n"));
+		break;
+	}
+	pled->b_ledon = true;
+}
+
+void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+	u16 	ledreg = REG_LEDCFG1;
+	u8 	ledcfg = 0;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	switch (pled->ledpin) {
+		case LED_PIN_LED0:
+			ledreg = REG_LEDCFG1;
+			break;
+
+		case LED_PIN_LED1:
+			ledreg = REG_LEDCFG2;
+			break;
+
+		case LED_PIN_GPIO0:
+		default:
+			break;
+	}
+
+	RT_TRACE(COMP_LED, DBG_LOUD, ("In SwLedOn, LedAddr:%X LEDPIN=%d \n", ledreg, pled->ledpin));
+
+	ledcfg =  rtl_read_byte(rtlpriv, ledreg);
+	ledcfg |= BIT(5); /*Set 0x4c[21]*/
+	ledcfg &= ~(BIT(7) | BIT(6) | BIT(3) |BIT(2) | BIT(1) |BIT(0));
+		/*Clear 0x4c[23:22] and 0x4c[19:16]*/
+	rtl_write_byte(rtlpriv, ledreg, ledcfg); /*SW control led0 on.*/
+	pled->b_ledon = true;
+}
+
+void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	u8 ledcfg;
+
+	RT_TRACE(COMP_LED, DBG_LOUD,
+		 ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+
+	ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+	switch (pled->ledpin) {
+	case LED_PIN_GPIO0:
+		break;
+	case LED_PIN_LED0:
+		ledcfg &= 0xf0;
+		if (pcipriv->ledctl.bled_opendrain == true) {
+			ledcfg &= 0x90; /* Set to software control. */
+			rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
+			ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
+			ledcfg &= 0xFE;
+			rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg);
+		}
+		else {
+			ledcfg &= ~BIT(6);
+			rtl_write_byte(rtlpriv, REG_LEDCFG2,
+					(ledcfg | BIT(3) | BIT(5)));
+		}
+		break;
+	case LED_PIN_LED1:
+		ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+		ledcfg &= 0x10; /* Set to software control. */
+		rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3));
+
+		break;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("switch case not process \n"));
+		break;
+	}
+	pled->b_ledon = false;
+}
+
+void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled){
+	u16 ledreg = REG_LEDCFG1;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+
+	switch(pled->ledpin)
+	{
+		case LED_PIN_LED0:
+			ledreg = REG_LEDCFG1;
+			break;
+
+		case LED_PIN_LED1:
+			ledreg = REG_LEDCFG2;
+			break;
+
+		case LED_PIN_GPIO0:
+		default:
+			break;
+	}
+
+	RT_TRACE(COMP_LED,DBG_LOUD,("In SwLedOff,LedAddr:%X LEDPIN=%d\n", ledreg, pled->ledpin));
+
+	if(pcipriv->ledctl.bled_opendrain == true) /*Open-drain arrangement for controlling the LED*/
+	{
+		u8 ledcfg = rtl_read_byte(rtlpriv,  ledreg);
+
+		ledreg &= 0xd0; /* Set to software control.*/
+		rtl_write_byte(rtlpriv, ledreg, (ledcfg | BIT(3)));
+
+		/*Open-drain arrangement*/
+		ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
+		ledcfg &= 0xFE;/*Set GPIO[8] to input mode*/
+		rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg);
+	}
+	else
+	{
+		rtl_write_byte(rtlpriv, ledreg, 0x28);
+	}
+
+	pled->b_ledon = false;
+}
+
+void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw)
+{
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	_rtl8821ae_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
+	_rtl8821ae_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+static void _rtl8821ae_sw_led_control(struct ieee80211_hw *hw,
+				    enum led_ctl_mode ledaction)
+{
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	switch (ledaction) {
+	case LED_CTL_POWER_ON:
+	case LED_CTL_LINK:
+	case LED_CTL_NO_LINK:
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			rtl8812ae_sw_led_on(hw, pLed0);
+		else
+			rtl8821ae_sw_led_on(hw, pLed0);
+		break;
+	case LED_CTL_POWER_OFF:
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)\
+			rtl8812ae_sw_led_off(hw, pLed0);
+		else
+			rtl8821ae_sw_led_off(hw, pLed0);
+		break;
+	default:
+		break;
+	}
+}
+
+void rtl8821ae_led_control(struct ieee80211_hw *hw,
+			enum led_ctl_mode ledaction)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+	    (ledaction == LED_CTL_TX ||
+	     ledaction == LED_CTL_RX ||
+	     ledaction == LED_CTL_SITE_SURVEY ||
+	     ledaction == LED_CTL_LINK ||
+	     ledaction == LED_CTL_NO_LINK ||
+	     ledaction == LED_CTL_START_TO_LINK ||
+	     ledaction == LED_CTL_POWER_ON)) {
+		return;
+	}
+	RT_TRACE(COMP_LED, DBG_LOUD, ("ledaction %d, \n",
+				ledaction));
+	_rtl8821ae_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/led.h b/drivers/staging/rtl8821ae/rtl8821ae/led.h
new file mode 100644
index 0000000..44be401
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/led.h
@@ -0,0 +1,40 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_LED_H__
+#define __RTL8821AE_LED_H__
+
+void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw);
+void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8821ae_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/phy.c b/drivers/staging/rtl8821ae/rtl8821ae/phy.c
new file mode 100644
index 0000000..d02fca3
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/phy.c
@@ -0,0 +1,5525 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+#include "trx.h"
+#include "../btcoexist/halbt_precomp.h"
+#include "hw.h"
+
+#define READ_NEXT_PAIR(array_table,v1, v2, i) do { i += 2; v1 = array_table[i]; v2 = array_table[i+1]; } while(0)
+
+static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
+				      enum radio_path rfpath, u32 offset);
+static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
+					enum radio_path rfpath, u32 offset,
+					u32 data);
+static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask);
+static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw);
+static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+						u8 configtype);
+static bool _rtl8812ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+						u8 configtype);
+static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+						  u8 configtype);
+static bool _rtl8812ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+						u8 configtype);
+static void _rtl8821ae_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+
+static long _rtl8821ae_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+					 enum wireless_mode wirelessmode,
+					 u8 txpwridx);
+static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw);
+static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw);
+
+void rtl8812ae_fixspur(
+	struct ieee80211_hw *hw,
+	enum ht_channel_width band_width,
+	u8 channel
+)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	/*C cut Item12 ADC FIFO CLOCK*/
+	if(IS_VENDOR_8812A_C_CUT(rtlhal->version))
+	{
+		if(band_width == HT_CHANNEL_WIDTH_20_40 && channel == 11)
+			rtl_set_bbreg(hw, RRFMOD, 0xC00, 0x3)	;
+			/* 0x8AC[11:10] = 2'b11*/
+		else
+			rtl_set_bbreg(hw, RRFMOD, 0xC00, 0x2);
+			/* 0x8AC[11:10] = 2'b10*/
+
+
+		/* <20120914, Kordan> A workarould to resolve
+		2480Mhz spur by setting ADC clock as 160M. (Asked by Binson)*/
+		if (band_width == HT_CHANNEL_WIDTH_20 &&
+			(channel == 13 || channel == 14)) {
+			rtl_set_bbreg(hw, RRFMOD, 0x300, 0x3);
+			/*0x8AC[9:8] = 2'b11*/
+			rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1);
+			/* 0x8C4[30] = 1*/
+		} else if (band_width == HT_CHANNEL_WIDTH_20_40 &&
+			channel == 11) {
+			rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1);
+			/*0x8C4[30] = 1*/
+		} else if (band_width != HT_CHANNEL_WIDTH_80) {
+			rtl_set_bbreg(hw, RRFMOD, 0x300, 0x2);
+			/*0x8AC[9:8] = 2'b10*/
+			rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0);
+			/*0x8C4[30] = 0*/
+		}
+	}
+	else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+	{
+		/* <20120914, Kordan> A workarould to resolve
+		2480Mhz spur by setting ADC clock as 160M. (Asked by Binson)*/
+		if (band_width == HT_CHANNEL_WIDTH_20 &&
+			(channel == 13 || channel == 14))
+			rtl_set_bbreg(hw, RRFMOD, 0x300, 0x3);
+			/*0x8AC[9:8] = 11*/
+		else if (channel  <= 14) /*2.4G only*/
+			rtl_set_bbreg(hw, RRFMOD, 0x300, 0x2);
+			/*0x8AC[9:8] = 10*/
+	}
+
+}
+
+u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 returnvalue, originalvalue, bitshift;
+
+	RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+					       "bitmask(%#x)\n", regaddr,
+					       bitmask));
+	originalvalue = rtl_read_dword(rtlpriv, regaddr);
+	bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+	returnvalue = (originalvalue & bitmask) >> bitshift;
+
+	RT_TRACE(COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
+					       "Addr[0x%x]=0x%x\n", bitmask,
+					       regaddr, originalvalue));
+
+	return returnvalue;
+
+}
+
+void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+			   u32 regaddr, u32 bitmask, u32 data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 originalvalue, bitshift;
+
+	RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+					       " data(%#x)\n", regaddr, bitmask,
+					       data));
+
+	if (bitmask != MASKDWORD) {
+		originalvalue = rtl_read_dword(rtlpriv, regaddr);
+		bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+		data = ((originalvalue & (~bitmask)) | ((data << bitshift) & bitmask));
+	}
+
+	rtl_write_dword(rtlpriv, regaddr, data);
+
+	RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+					       " data(%#x)\n", regaddr, bitmask,
+					       data));
+
+}
+
+u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+			    enum radio_path rfpath, u32 regaddr, u32 bitmask)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 original_value, readback_value, bitshift;
+	unsigned long flags;
+
+	RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+					       "rfpath(%#x), bitmask(%#x)\n",
+					       regaddr, rfpath, bitmask));
+
+	spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+
+	original_value = _rtl8821ae_phy_rf_serial_read(hw,rfpath, regaddr);
+	bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+	readback_value = (original_value & bitmask) >> bitshift;
+
+	spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+	RT_TRACE(COMP_RF, DBG_TRACE,
+		 ("regaddr(%#x), rfpath(%#x), "
+		  "bitmask(%#x), original_value(%#x)\n",
+		  regaddr, rfpath, bitmask, original_value));
+
+	return readback_value;
+}
+
+void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+			   enum radio_path rfpath,
+			   u32 regaddr, u32 bitmask, u32 data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 original_value, bitshift;
+	unsigned long flags;
+
+	RT_TRACE(COMP_RF, DBG_TRACE,
+		 ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+		  regaddr, bitmask, data, rfpath));
+
+	spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+	if (bitmask != RFREG_OFFSET_MASK) {
+			original_value = _rtl8821ae_phy_rf_serial_read(hw,
+								    rfpath,
+								    regaddr);
+			bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+			data =
+			    ((original_value & (~bitmask)) |
+			     (data << bitshift));
+		}
+
+	_rtl8821ae_phy_rf_serial_write(hw, rfpath, regaddr, data);
+
+
+	spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+	RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+					       "bitmask(%#x), data(%#x), rfpath(%#x)\n",
+					       regaddr, bitmask, data, rfpath));
+
+}
+
+static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
+				      enum radio_path rfpath, u32 offset)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	bool b_is_pi_mode =false;
+	u32 retvalue = 0;
+
+	/* 2009/06/17 MH We can not execute IO for power save or other accident mode.*/
+	if (RT_CANNOT_IO(hw)) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("return all one\n"));
+		return 0xFFFFFFFF;
+	}
+
+	/* <20120809, Kordan> CCA OFF(when entering), asked by James to avoid reading the wrong value.
+	    <20120828, Kordan> Toggling CCA would affect RF 0x0, skip it!*/
+	if (offset != 0x0 &&
+		!((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+		|| (IS_VENDOR_8812A_C_CUT(rtlhal->version))))
+		rtl_set_bbreg(hw, RCCAONSEC, 0x8, 1);
+
+	offset &= 0xff;
+
+	if (rfpath == RF90_PATH_A)
+		b_is_pi_mode = (bool) rtl_get_bbreg(hw, 0xC00, 0x4);
+	else if (rfpath == RF90_PATH_B)
+		b_is_pi_mode = (bool) rtl_get_bbreg(hw, 0xE00, 0x4);
+
+	rtl_set_bbreg(hw, RHSSIREAD_8821AE, 0xff, offset);
+
+	if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+		|| (IS_VENDOR_8812A_C_CUT(rtlhal->version)))
+		udelay(20);
+
+	if (b_is_pi_mode)
+	{
+		if (rfpath == RF90_PATH_A) {
+			retvalue = rtl_get_bbreg(hw, RA_PIREAD_8821A, BLSSIREADBACKDATA);
+		}
+		else if (rfpath == RF90_PATH_B){
+			retvalue = rtl_get_bbreg(hw, RB_PIREAD_8821A, BLSSIREADBACKDATA);
+		}
+	}
+	else
+	{
+		if (rfpath == RF90_PATH_A) {
+			retvalue = rtl_get_bbreg(hw, RA_SIREAD_8821A, BLSSIREADBACKDATA);
+		}
+		else if (rfpath == RF90_PATH_B){
+			retvalue = rtl_get_bbreg(hw, RB_SIREAD_8821A, BLSSIREADBACKDATA);
+		}
+	}
+
+	/*<20120809, Kordan> CCA ON(when exiting), asked by James to avoid reading the wrong value.
+	    <20120828, Kordan> Toggling CCA would affect RF 0x0, skip it!*/
+	if (offset != 0x0 &&  ! ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+		|| (IS_VENDOR_8812A_C_CUT(rtlhal->version))))
+		rtl_set_bbreg(hw, RCCAONSEC, 0x8, 0);
+	return retvalue;
+}
+
+#if 0
+static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
+				      enum radio_path rfpath, u32 offset)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+	u32 newoffset;
+	u32 tmplong, tmplong2;
+	u8 rfpi_enable = 0;
+	u32 retvalue;
+
+	offset &= 0xff;
+	newoffset = offset;
+	if (RT_CANNOT_IO(hw)) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("return all one\n"));
+		return 0xFFFFFFFF;
+	}
+	tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+	if (rfpath == RF90_PATH_A)
+		tmplong2 = tmplong;
+	else
+		tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+	tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+	    (newoffset << 23) | BLSSIREADEDGE;
+	rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+		      tmplong & (~BLSSIREADEDGE));
+	mdelay(1);
+	rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+	mdelay(1);
+	/*rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+		      tmplong | BLSSIREADEDGE);*/
+	mdelay(1);
+	if (rfpath == RF90_PATH_A)
+		rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+						 BIT(8));
+	else if (rfpath == RF90_PATH_B)
+		rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+						 BIT(8));
+	if (rfpi_enable)
+		retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+					 BLSSIREADBACKDATA);
+	else
+		retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+					 BLSSIREADBACKDATA);
+	RT_TRACE(COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
+					       rfpath, pphyreg->rflssi_readback,
+					       retvalue));
+	return retvalue;
+}
+#endif
+
+static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
+					enum radio_path rfpath, u32 offset,
+					u32 data)
+{
+	u32 data_and_addr;
+	u32 newoffset;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+	if (RT_CANNOT_IO(hw)) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("stop\n"));
+		return;
+	}
+	offset &= 0xff;
+	newoffset = offset;
+	data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+	rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+	RT_TRACE(COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
+					       rfpath, pphyreg->rf3wire_offset,
+					       data_and_addr));
+}
+
+static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
+{
+	u32 i;
+
+	for (i = 0; i <= 31; i++) {
+		if (((bitmask >> i) & 0x1) == 1)
+			break;
+	}
+	return i;
+}
+
+bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	bool rtstatus = 0;
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+		rtstatus = _rtl8812ae_phy_config_mac_with_headerfile(hw);
+	else
+		rtstatus = _rtl8821ae_phy_config_mac_with_headerfile(hw);
+
+	return rtstatus;
+}
+
+bool rtl8821ae_phy_bb_config(struct ieee80211_hw *hw)
+{
+	bool rtstatus = true;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 regval;
+	u8 crystal_cap;
+	//u32 tmp;
+
+	_rtl8821ae_phy_init_bb_rf_register_definition(hw);
+
+	regval = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN);
+	regval |= regval | FEN_PCIEA;
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, regval);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
+				regval | FEN_BB_GLB_RSTN | FEN_BBRSTB);
+
+	rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x7);/*RF_EN | RF_RSTB | RF_SDMRSTB*/
+	rtl_write_byte(rtlpriv, REG_OPT_CTRL + 2, 0x7);/*RF_EN | RF_RSTB | RF_SDMRSTB*/
+
+	rtstatus = _rtl8821ae_phy_bb8821a_config_parafile(hw);
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+	{
+		crystal_cap = rtlefuse->crystalcap & 0x3F;
+		rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0x7FF80000, (crystal_cap | (crystal_cap << 6)));
+	}else{
+		crystal_cap = rtlefuse->crystalcap & 0x3F;
+		rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000, (crystal_cap | (crystal_cap << 6)));
+	}
+	rtlphy->reg_837 = rtl_read_byte(rtlpriv, 0x837);
+
+	return rtstatus;
+}
+
+bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw)
+{
+	return rtl8821ae_phy_rf6052_config(hw);
+}
+
+
+u32 phy_get_tx_bb_swing_8812A(
+	struct ieee80211_hw *hw,
+	u8 	band,
+	u8 	rf_path
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+	char bb_swing_2g = (char) (-1 * 0xFF);
+	char bb_swing_5g = (char) (-1 * 0xFF);
+	u32  out = 0x200;
+	const char auto_temp = -1;
+
+	RT_TRACE(COMP_SCAN, DBG_LOUD,
+		("===> PHY_GetTxBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d\n",
+		(int)bb_swing_2g, (int)bb_swing_5g));
+
+    if ( rtlefuse->autoload_failflag) {
+        	if ( band == BAND_ON_2_4G ) {
+			rtldm->bb_swing_diff_2g = bb_swing_2g;
+			if      (bb_swing_2g == 0)  out = 0x200; //  0 dB
+			else if (bb_swing_2g == -3) out = 0x16A; // -3 dB
+			else if (bb_swing_2g == -6) out = 0x101; // -6 dB
+			else if (bb_swing_2g == -9) out = 0x0B6; // -9 dB
+			else {
+			        rtldm->bb_swing_diff_2g = 0;
+			        out = 0x200;
+			}
+
+		} else if ( band == BAND_ON_5G ) {
+			rtldm->bb_swing_diff_5g = bb_swing_5g;
+			if      (bb_swing_5g == 0)  out = 0x200; //  0 dB
+			else if (bb_swing_5g == -3) out = 0x16A; // -3 dB
+			else if (bb_swing_5g == -6) out = 0x101; // -6 dB
+			else if (bb_swing_5g == -9) out = 0x0B6; // -9 dB
+			else {
+				if ( rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+					rtldm->bb_swing_diff_5g = -3;
+					out = 0x16A;
+				} else  {
+					rtldm->bb_swing_diff_5g = 0;
+					out = 0x200;
+				}
+			}
+		} else  {
+				rtldm->bb_swing_diff_2g = -3;
+				rtldm->bb_swing_diff_5g = -3;
+				out = 0x16A; // -3 dB
+		}
+	}
+	else
+	{
+	    u32 swing = 0, swing_a = 0, swing_b = 0;
+
+	    if (band == BAND_ON_2_4G)
+	    {
+			if (0xFF == auto_temp)
+			{
+				efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing);
+				swing = (swing == 0xFF) ? 0x00 : swing;
+			}
+			else if (bb_swing_2g ==  0) swing = 0x00; //  0 dB
+			else if (bb_swing_2g == -3) swing = 0x05; // -3 dB
+			else if (bb_swing_2g == -6) swing = 0x0A; // -6 dB
+			else if (bb_swing_2g == -9) swing = 0xFF; // -9 dB
+			else swing = 0x00;
+		}
+		else
+		{
+			if (0xFF == auto_temp)
+			{
+				efuse_shadow_read(hw, 1, 0xC7, (u32 *)&swing);
+				swing = (swing == 0xFF) ? 0x00 : swing;
+			}
+			else if (bb_swing_5g ==  0) swing = 0x00; //  0 dB
+			else if (bb_swing_5g == -3) swing = 0x05; // -3 dB
+			else if (bb_swing_5g == -6) swing = 0x0A; // -6 dB
+			else if (bb_swing_5g == -9) swing = 0xFF; // -9 dB
+			else swing = 0x00;
+		}
+
+		swing_a = (swing & 0x3) >> 0; // 0xC6/C7[1:0]
+		swing_b = (swing & 0xC) >> 2; // 0xC6/C7[3:2]
+		RT_TRACE(COMP_SCAN, DBG_LOUD,
+		("===> PHY_GetTxBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n",
+		swing_a, swing_b));
+
+		//3 Path-A
+		if (swing_a == 0x0) {
+			if (band == BAND_ON_2_4G)
+				rtldm->bb_swing_diff_2g = 0;
+			else
+				rtldm->bb_swing_diff_5g = 0;
+			out = 0x200; // 0 dB
+		} else if (swing_a == 0x1) {
+			if (band == BAND_ON_2_4G)
+				rtldm->bb_swing_diff_2g = -3;
+			else
+				rtldm->bb_swing_diff_5g = -3;
+			out = 0x16A; // -3 dB
+		} else if (swing_a == 0x2) {
+			if (band == BAND_ON_2_4G)
+				rtldm->bb_swing_diff_2g = -6;
+			else
+				rtldm->bb_swing_diff_5g = -6;
+			out = 0x101; // -6 dB
+		} else if (swing_a == 0x3) {
+			if (band == BAND_ON_2_4G)
+				rtldm->bb_swing_diff_2g = -9;
+			else
+				rtldm->bb_swing_diff_5g = -9;
+			out = 0x0B6; // -9 dB
+		}
+
+			//3 Path-B
+		if (swing_b == 0x0) {
+			if (band == BAND_ON_2_4G)
+				rtldm->bb_swing_diff_2g = 0;
+			else
+				rtldm->bb_swing_diff_5g = 0;
+			out = 0x200; // 0 dB
+		} else if (swing_b == 0x1) {
+			if (band == BAND_ON_2_4G)
+				rtldm->bb_swing_diff_2g = -3;
+			else
+				rtldm->bb_swing_diff_5g = -3;
+			out = 0x16A; // -3 dB
+		} else if (swing_b == 0x2) {
+			if (band == BAND_ON_2_4G)
+				rtldm->bb_swing_diff_2g = -6;
+			else
+				rtldm->bb_swing_diff_5g = -6;
+			out = 0x101; // -6 dB
+		} else if (swing_b == 0x3) {
+			if (band == BAND_ON_2_4G)
+				rtldm->bb_swing_diff_2g = -9;
+			else
+				rtldm->bb_swing_diff_5g = -9;
+			out = 0x0B6; // -9 dB
+		}
+	}
+
+	RT_TRACE(COMP_SCAN, DBG_LOUD,
+		("<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out));
+	 return out;
+}
+void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+	u8 current_band = rtlhal->current_bandtype;
+	u32 txpath, rxpath;
+	//u8 i, value8;
+	char bb_diff_between_band;
+
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("\n"));
+	txpath = rtl8821ae_phy_query_bb_reg(hw, RTXPATH, 0xf0);
+	rxpath = rtl8821ae_phy_query_bb_reg(hw, RCCK_RX, 0x0f000000);
+	rtlhal->current_bandtype = (enum band_type) band;
+	/* reconfig BB/RF according to wireless mode */
+	if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+		/* BB & RF Config */
+		RT_TRACE(COMP_CMD, DBG_DMESG, ("2.4G\n"));
+		rtl_set_bbreg(hw, ROFDMCCKEN, BOFDMEN|BCCKEN, 0x03);
+
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+			/* 0xCB0[15:12] = 0x7 (LNA_On)*/
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF000, 0x7);
+			/* 0xCB0[7:4] = 0x7 (PAPE_A)*/
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF0, 0x7);
+		}
+
+		if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+			rtl_set_bbreg(hw, 0x830, 0xE, 0x4);	/*0x830[3:1] = 0x4*/
+			rtl_set_bbreg(hw, 0x834, 0x3, 0x1);	/*0x834[1:0] = 0x1*/
+		}
+
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+			rtl_set_bbreg(hw, RA_TXSCALE, 0xF00, 0); // 0xC1C[11:8] = 0
+		else
+			rtl_set_bbreg(hw, 0x82c, 0x3, 0);	// 0x82C[1:0] = 2b'00
+
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+			rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
+			rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
+		}
+
+		rtl_set_bbreg(hw, RTXPATH, 0xf0, txpath);
+		rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, rxpath);
+
+		rtl_write_byte(rtlpriv, REG_CCK_CHECK, 0x0);
+	} else {/* 5G band */
+		u16 count, reg_41a;
+		RT_TRACE(COMP_CMD, DBG_DMESG, ("5G\n"));
+
+		if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+			/*0xCB0[15:12] = 0x5 (LNA_On)*/
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF000, 0x5);
+			/*0xCB0[7:4] = 0x4 (PAPE_A)*/
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF0, 0x4);
+		}
+		/*CCK_CHECK_en*/
+		rtl_write_byte(rtlpriv, REG_CCK_CHECK, 0x80);
+
+		count = 0;
+		reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
+		RT_TRACE(COMP_SCAN, DBG_LOUD, ("Reg41A value %d", reg_41a));
+		reg_41a &= 0x30;
+		while ((reg_41a!= 0x30) && (count < 50)) {
+			udelay(50);
+			RT_TRACE(COMP_SCAN, DBG_LOUD, ("Delay 50us \n"));
+
+			reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
+			reg_41a &= 0x30;
+			count++;
+			RT_TRACE(COMP_SCAN, DBG_LOUD, ("Reg41A value %d", reg_41a));
+		}
+		if (count != 0)
+			RT_TRACE(COMP_MLME, DBG_LOUD,
+			("PHY_SwitchWirelessBand8812(): Switch to 5G Band. "
+			"Count = %d reg41A=0x%x\n", count, reg_41a));
+
+		// 2012/02/01, Sinda add registry to switch workaround without long-run verification for scan issue.
+		rtl_set_bbreg(hw, ROFDMCCKEN, BOFDMEN|BCCKEN, 0x03);
+
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+			rtl_set_bbreg(hw, 0x830, 0xE, 0x3);	/*0x830[3:1] = 0x3*/
+			rtl_set_bbreg(hw, 0x834, 0x3, 0x2);	/*0x834[1:0] = 0x2*/
+		}
+
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+			/* AGC table select */
+			rtl_set_bbreg(hw, RA_TXSCALE, 0xF00, 1); /* 0xC1C[11:8] = 1*/
+		} else
+			rtl_set_bbreg(hw, 0x82c, 0x3, 1);	// 0x82C[1:0] = 2'b00
+
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
+			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+			rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
+			rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
+		}
+
+		rtl_set_bbreg(hw, RTXPATH, 0xf0, txpath);
+		rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, rxpath);
+
+		RT_TRACE(COMP_SCAN, DBG_LOUD,
+			("==>PHY_SwitchWirelessBand8812() BAND_ON_5G settings OFDM index 0x%x\n",
+			rtlpriv->dm.ofdm_index[RF90_PATH_A]));
+	}
+
+	if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) ||
+			(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)) {
+		rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000,
+						 phy_get_tx_bb_swing_8812A(hw, band, RF90_PATH_A)); // 0xC1C[31:21]
+		rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000,
+						 phy_get_tx_bb_swing_8812A(hw, band, RF90_PATH_B)); // 0xE1C[31:21]
+
+		/* <20121005, Kordan> When TxPowerTrack is ON, we should take care of the change of BB swing.
+		    That is, reset all info to trigger Tx power tracking.*/
+		if (band != current_band) {
+			bb_diff_between_band = (rtldm->bb_swing_diff_2g - rtldm->bb_swing_diff_5g);
+			bb_diff_between_band = (band == BAND_ON_2_4G) ? bb_diff_between_band : (-1 * bb_diff_between_band);
+			rtldm->default_ofdm_index += bb_diff_between_band * 2;
+		}
+		rtl8821ae_dm_clear_txpower_tracking_state(hw);
+	}
+
+	RT_TRACE(COMP_SCAN, DBG_TRACE,
+		("<==rtl8821ae_phy_switch_wirelessband():Switch Band OK.\n"));
+	return;
+}
+
+static bool _rtl8821ae_check_condition(struct ieee80211_hw *hw,
+	const u32  Condition
+	)
+{
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u32 _board = rtlefuse->board_type; /*need efuse define*/
+	u32 _interface = rtlhal->interface;
+	u32 _platform = 0x08;/*SupportPlatform */
+	u32 cond = Condition;
+
+	if ( Condition == 0xCDCDCDCD )
+		return true;
+
+	cond = Condition & 0xFF;
+	if ( (_board != cond) == 0 && cond != 0xFF)
+		return false;
+
+	cond = Condition & 0xFF00;
+	cond = cond >> 8;
+	if ( (_interface & cond) == 0 && cond != 0x07)
+		return false;
+
+	cond = Condition & 0xFF0000;
+	cond = cond >> 16;
+	if ( (_platform & cond) == 0 && cond != 0x0F)
+		return false;
+	return true;
+}
+
+static void _rtl8821ae_config_rf_reg(struct ieee80211_hw *hw,
+	u32 addr,
+	u32 data,
+	enum radio_path rfpath,
+	u32 regaddr
+	)
+{
+	if ( addr == 0xfe || addr == 0xffe) {
+		mdelay(50);
+	} else {
+		rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data);
+		udelay(1);
+	}
+}
+
+static void _rtl8821ae_config_rf_radio_a(struct ieee80211_hw *hw,
+	u32 addr, u32 data)
+{
+	u32 content = 0x1000; /*RF Content: radio_a_txt*/
+	u32 maskforphyset = (u32)(content & 0xE000);
+
+	_rtl8821ae_config_rf_reg(hw, addr, data, RF90_PATH_A, addr | maskforphyset);
+
+}
+
+static void _rtl8821ae_config_rf_radio_b(struct ieee80211_hw *hw,
+	u32 addr, u32 data)
+{
+	u32 content = 0x1001; /*RF Content: radio_b_txt*/
+	u32 maskforphyset = (u32)(content & 0xE000);
+
+	_rtl8821ae_config_rf_reg(hw, addr, data, RF90_PATH_B, addr | maskforphyset);
+
+}
+
+static void _rtl8812ae_config_bb_reg(struct ieee80211_hw *hw,
+	u32 addr, u32 data)
+{
+	if ( addr == 0xfe) {
+		mdelay(50);
+	} else if ( addr == 0xfd)
+		mdelay(5);
+	else if ( addr == 0xfc)
+		mdelay(1);
+	else if ( addr == 0xfb)
+		udelay(50);
+	else if ( addr == 0xfa)
+		udelay(5);
+	else if ( addr == 0xf9)
+		udelay(1);
+	else {
+		rtl_set_bbreg(hw, addr, MASKDWORD,data);
+	}
+	udelay(1);
+}
+
+static void _rtl8821ae_config_bb_reg(struct ieee80211_hw *hw,
+	u32 addr, u32 data)
+{
+	if ( addr == 0xfe) {
+		mdelay(50);
+	} else if ( addr == 0xfd)
+		mdelay(5);
+	else if ( addr == 0xfc)
+		mdelay(1);
+	else if ( addr == 0xfb)
+		udelay(50);
+	else if ( addr == 0xfa)
+		udelay(5);
+	else if ( addr == 0xf9)
+		udelay(1);
+
+	rtl_set_bbreg(hw, addr, MASKDWORD,data);
+	udelay(1);
+}
+
+static void _rtl8821ae_phy_init_tx_power_by_rate(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	u8 band, rfpath, txnum, rate_section;
+
+	for ( band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band )
+		for ( rfpath = 0; rfpath < TX_PWR_BY_RATE_NUM_RF; ++rfpath )
+			for ( txnum = 0; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum )
+				for ( rate_section = 0; rate_section < TX_PWR_BY_RATE_NUM_SECTION; ++rate_section )
+					rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = 0;
+}
+
+void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
+														   u8 band, u8 path,
+														   u8 rate_section,
+														   u8 txnum, u8 value)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	if (path > RF90_PATH_D) {
+		RT_TRACE(COMP_INIT, DBG_LOUD,
+			("Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n", path));
+		return;
+	}
+
+	if (band == BAND_ON_2_4G) {
+		switch (rate_section) {
+		case CCK:
+			rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value;
+			break;
+		case OFDM:
+			rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value;
+			break;
+		case HT_MCS0_MCS7:
+			rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value;
+			break;
+		case HT_MCS8_MCS15:
+			rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value;
+			break;
+		case VHT_1SSMCS0_1SSMCS9:
+			rtlphy->txpwr_by_rate_base_24g[path][txnum][4] = value;
+			break;
+		case VHT_2SSMCS0_2SSMCS9:
+			rtlphy->txpwr_by_rate_base_24g[path][txnum][5] = value;
+			break;
+		default:
+			RT_TRACE(COMP_INIT, DBG_LOUD, ( "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
+					 rate_section, path, txnum ) );
+			break;
+		};
+	} else if (band == BAND_ON_5G) {
+		switch (rate_section) {
+		case OFDM:
+			rtlphy->txpwr_by_rate_base_5g[path][txnum][0] = value;
+			break;
+		case HT_MCS0_MCS7:
+			rtlphy->txpwr_by_rate_base_5g[path][txnum][1] = value;
+			break;
+		case HT_MCS8_MCS15:
+			rtlphy->txpwr_by_rate_base_5g[path][txnum][2] = value;
+			break;
+		case VHT_1SSMCS0_1SSMCS9:
+			rtlphy->txpwr_by_rate_base_5g[path][txnum][3] = value;
+			break;
+		case VHT_2SSMCS0_2SSMCS9:
+			rtlphy->txpwr_by_rate_base_5g[path][txnum][4] = value;
+			break;
+		default:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Invalid RateSection %d in Band 5G, Rf Path %d, "
+				"%dTx in PHY_SetTxPowerByRateBase()\n",
+				rate_section, path, txnum));
+			break;
+		};
+	} else {
+		RT_TRACE(COMP_INIT, DBG_LOUD,
+			("Invalid Band %d in PHY_SetTxPowerByRateBase()\n", band));
+	}
+
+}
+
+u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
+														 u8 band, u8 path,
+														 u8 txnum, u8 rate_section)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 value = 0;
+
+	if (path > RF90_PATH_D) {
+		RT_TRACE(COMP_INIT, DBG_LOUD,
+			("Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n", path));
+		return 0;
+	}
+
+	if (band == BAND_ON_2_4G) {
+		switch (rate_section) {
+		case CCK:
+			value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0];
+			break;
+		case OFDM:
+			value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1];
+			break;
+		case HT_MCS0_MCS7:
+			value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2];
+			break;
+		case HT_MCS8_MCS15:
+			value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3];
+			break;
+		case VHT_1SSMCS0_1SSMCS9:
+			value = rtlphy->txpwr_by_rate_base_24g[path][txnum][4];
+			break;
+		case VHT_2SSMCS0_2SSMCS9:
+			value = rtlphy->txpwr_by_rate_base_24g[path][txnum][5];
+			break;
+		default:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Invalid RateSection %d in Band 2.4G, Rf Path %d,"
+				" %dTx in PHY_GetTxPowerByRateBase()\n",
+				rate_section, path, txnum));
+			break;
+		};
+	} else if (band == BAND_ON_5G) {
+		switch (rate_section) {
+		case OFDM:
+			value = rtlphy->txpwr_by_rate_base_5g[path][txnum][0];
+			break;
+		case HT_MCS0_MCS7:
+			value = rtlphy->txpwr_by_rate_base_5g[path][txnum][1];
+			break;
+		case HT_MCS8_MCS15:
+			value = rtlphy->txpwr_by_rate_base_5g[path][txnum][2];
+			break;
+		case VHT_1SSMCS0_1SSMCS9:
+			value = rtlphy->txpwr_by_rate_base_5g[path][txnum][3];
+			break;
+		case VHT_2SSMCS0_2SSMCS9:
+			value = rtlphy->txpwr_by_rate_base_5g[path][txnum][4];
+			break;
+		default:
+			RT_TRACE(COMP_INIT, DBG_LOUD,
+				("Invalid RateSection %d in Band 5G, Rf Path %d,"
+				" %dTx in PHY_GetTxPowerByRateBase()\n",
+				rate_section, path, txnum));
+			break;
+		};
+	} else {
+		RT_TRACE(COMP_INIT, DBG_LOUD,
+			("Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band));
+	}
+
+	return value;
+
+}
+void _rtl8821ae_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u16 rawValue = 0;
+	u8 base = 0, path = 0;
+
+	for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) {
+		rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][0] >> 24) & 0xFF;
+		base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+		_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, CCK, RF_1TX, base);
+
+		rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][2] >> 24) & 0xFF;
+		base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+		_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX, base );
+
+		rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][4] >> 24) & 0xFF;
+		base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+		_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7, RF_1TX, base );
+
+		rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][6] >> 24) & 0xFF;
+		base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+		_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS8_MCS15, RF_2TX, base );
+
+		rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][8] >> 24) & 0xFF;
+		base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+		_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base );
+
+		rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][11] >> 8) & 0xFF;
+		base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+		_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base );
+
+		rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][2] >> 24) & 0xFF;
+		base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+		_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, OFDM, RF_1TX, base );
+
+		rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][4] >> 24) & 0xFF;
+		base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+		_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS0_MCS7, RF_1TX, base );
+
+		rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][6] >> 24) & 0xFF;
+		base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+		_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS8_MCS15, RF_2TX, base );
+
+		rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][8] >> 24) & 0xFF;
+		base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+		_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base );
+
+		rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][11] >> 8) & 0xFF;
+		base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+		_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base );
+	}
+}
+
+void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start,
+																u8 end, u8 base_val)
+{
+	char i = 0;
+	u8 temp_value = 0;
+	u32 temp_data = 0;
+
+	for (i = 3; i >= 0; --i)
+	{
+		if (i >= start && i <= end) {
+			// Get the exact value
+			temp_value = (u8) (*data >> (i * 8)) & 0xF;
+			temp_value += ((u8) ((*data >> (i * 8 + 4)) & 0xF)) * 10;
+
+			// Change the value to a relative value
+			temp_value = (temp_value > base_val) ? temp_value - base_val : base_val - temp_value;
+		} else {
+			temp_value = (u8) (*data >> (i * 8)) & 0xFF;
+		}
+		temp_data <<= 8;
+		temp_data |= temp_value;
+	}
+	*data = temp_data;
+}
+
+void _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 base = 0, rfPath = 0;
+
+	for (rfPath = RF90_PATH_A; rfPath <= RF90_PATH_B; ++rfPath) {
+		base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, CCK);
+		RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G CCK 1TX: %d\n", base ) );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][0] ),
+			0, 3, base );
+
+		base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, OFDM );
+		RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G OFDM 1TX: %d\n", base ) );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][1] ),
+			0, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][2] ),
+			0, 3, base );
+
+		base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, HT_MCS0_MCS7 );
+		RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G HTMCS0-7 1TX: %d\n", base ) );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][3] ),
+			0, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][4] ),
+			0, 3, base );
+
+		base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, HT_MCS8_MCS15 );
+		RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G HTMCS8-15 2TX: %d\n", base ) );
+
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][5] ),
+			0, 3, base );
+
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][6] ),
+			0, 3, base );
+
+		base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9 );
+		RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G VHT1SSMCS0-9 1TX: %d\n", base ) );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][7] ),
+			0, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][8] ),
+			0, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9] ),
+			0, 1, base );
+
+		base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9 );
+		RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G VHT2SSMCS0-9 2TX: %d\n", base ) );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9] ),
+			2, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][10] ),
+			0, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][11] ),
+			0, 3, base );
+
+		base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, OFDM );
+		RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G OFDM 1TX: %d\n", base ) );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][1] ),
+			0, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][2] ),
+			0, 3, base );
+
+		base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, HT_MCS0_MCS7 );
+		RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G HTMCS0-7 1TX: %d\n", base ) );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][3] ),
+			0, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][4] ),
+			0, 3, base );
+
+		base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, HT_MCS8_MCS15 );
+		RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G HTMCS8-15 2TX: %d\n", base ) );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][5] ),
+			0, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][6] ),
+			0, 3, base );
+
+		base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9 );
+		RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G VHT1SSMCS0-9 1TX: %d\n", base ) );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][7] ),
+			0, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][8] ),
+			0, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9] ),
+			0, 1, base );
+
+		base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9 );
+		RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G VHT2SSMCS0-9 2TX: %d\n", base ) );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9] ),
+			2, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][10] ),
+			0, 3, base );
+		_phy_convert_txpower_dbm_to_relative_value(
+			&(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][11] ),
+			0, 3, base );
+	}
+
+	RT_TRACE(COMP_POWER, DBG_TRACE,
+		("<===_rtl8821ae_phy_convert_txpower_dbm_to_relative_value()\n"));
+
+}
+
+void _rtl8821ae_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw)
+{
+	_rtl8821ae_phy_store_txpower_by_rate_base(hw);
+	_rtl8821ae_phy_convert_txpower_dbm_to_relative_value(hw);
+}
+
+static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	bool rtstatus;
+
+	/*TX POWER LIMIT
+	    PHY_InitTxPowerLimit
+	    PHY_ConfigRFWithCustomPowerLimitTableParaFile*/
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+		rtstatus = _rtl8812ae_phy_config_bb_with_headerfile(hw,
+								BASEBAND_CONFIG_PHY_REG);
+	else{
+		rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
+								BASEBAND_CONFIG_PHY_REG);
+	}
+	if (rtstatus != true) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
+		return false;
+	}
+	_rtl8821ae_phy_init_tx_power_by_rate(hw);
+	if (rtlefuse->autoload_failflag == false) {
+		//rtlphy->pwrgroup_cnt = 0;
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			rtstatus = _rtl8812ae_phy_config_bb_with_pgheaderfile(hw,
+								   BASEBAND_CONFIG_PHY_REG);
+		else{
+			rtstatus = _rtl8821ae_phy_config_bb_with_pgheaderfile(hw,
+								   BASEBAND_CONFIG_PHY_REG);
+		}
+	}
+	if (rtstatus != true) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
+		return false;
+	}
+
+	_rtl8821ae_phy_txpower_by_rate_configuration(hw);
+
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+		rtstatus = _rtl8812ae_phy_config_bb_with_headerfile(hw,
+							 BASEBAND_CONFIG_AGC_TAB);
+	else
+		rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
+							 BASEBAND_CONFIG_AGC_TAB);
+
+	if (rtstatus != true) {
+		RT_TRACE(COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
+		return false;
+	}
+	rtlphy->bcck_high_power = (bool) (rtl_get_bbreg(hw,
+							RFPGA0_XA_HSSIPARAMETER2,
+							0x200));
+	return true;
+}
+
+static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i, v1, v2;
+	u32 arraylength;
+	u32 *ptrarray;
+
+	RT_TRACE(COMP_INIT, DBG_TRACE, ("Read rtl8812AE_MAC_REG_Array\n"));
+	arraylength = RTL8812AEMAC_1T_ARRAYLEN;
+	ptrarray = RTL8812AE_MAC_REG_ARRAY;
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("Img:RTL8812AE_MAC_REG_ARRAY LEN %d\n",arraylength));
+	for (i = 0; i < arraylength; i += 2) {
+		v1 = ptrarray[i];
+		v2 = (u8) ptrarray[i + 1];
+		if (v1<0xCDCDCDCD) {
+			rtl_write_byte(rtlpriv, v1, (u8) v2);
+		} else {
+			if (!_rtl8821ae_check_condition(hw,v1)) {
+				/*Discard the following (offset, data) pairs*/
+				READ_NEXT_PAIR(ptrarray, v1, v2, i);
+				while (v2 != 0xDEAD &&
+					   v2 != 0xCDEF &&
+					   v2 != 0xCDCD && i < arraylength -2)
+					READ_NEXT_PAIR(ptrarray, v1, v2, i);
+
+				i -= 2; /* prevent from for-loop += 2*/
+			} else {/*Configure matched pairs and skip to end of if-else.*/
+				READ_NEXT_PAIR(ptrarray, v1, v2, i);
+				while (v2 != 0xDEAD &&
+					   v2 != 0xCDEF &&
+					   v2 != 0xCDCD && i < arraylength -2) {
+					rtl_write_byte(rtlpriv,v1,v2);
+					READ_NEXT_PAIR(ptrarray, v1, v2, i);
+				}
+
+				while (v2 != 0xDEAD && i < arraylength -2)
+					READ_NEXT_PAIR(ptrarray, v1, v2, i);
+			}
+		}
+	}
+	return true;
+}
+
+static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i, v1, v2;
+	u32 arraylength;
+	u32 *ptrarray;
+
+	RT_TRACE(COMP_INIT, DBG_TRACE, ("Read rtl8821AE_MAC_REG_Array\n"));
+	arraylength = RTL8821AEMAC_1T_ARRAYLEN;
+	ptrarray = RTL8821AE_MAC_REG_ARRAY;
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("Img:RTL8821AE_MAC_REG_ARRAY LEN %d\n",arraylength));
+	for (i = 0; i < arraylength; i += 2) {
+		v1 = ptrarray[i];
+		v2 = (u8) ptrarray[i + 1];
+		if (v1<0xCDCDCDCD) {
+			rtl_write_byte(rtlpriv, v1, (u8) v2);
+			continue;
+		} else {
+			if (!_rtl8821ae_check_condition(hw,v1)) {
+				/*Discard the following (offset, data) pairs*/
+				READ_NEXT_PAIR(ptrarray, v1, v2, i);
+				while (v2 != 0xDEAD &&
+					   v2 != 0xCDEF &&
+					   v2 != 0xCDCD && i < arraylength -2)
+					READ_NEXT_PAIR(ptrarray, v1, v2, i);
+
+				i -= 2; /* prevent from for-loop += 2*/
+			} else {/*Configure matched pairs and skip to end of if-else.*/
+				READ_NEXT_PAIR(ptrarray, v1, v2, i);
+				while (v2 != 0xDEAD &&
+					   v2 != 0xCDEF &&
+					   v2 != 0xCDCD && i < arraylength -2) {
+					rtl_write_byte(rtlpriv,v1,v2);
+					READ_NEXT_PAIR(ptrarray, v1, v2, i);
+				}
+
+				while (v2 != 0xDEAD && i < arraylength -2)
+					READ_NEXT_PAIR(ptrarray, v1, v2, i);
+			}
+		}
+	}
+	return true;
+}
+
+static bool _rtl8812ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+						  u8 configtype)
+{
+	int i;
+	u32 *array_table;
+	u16 arraylen;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 v1 = 0, v2 = 0;
+
+	if (configtype == BASEBAND_CONFIG_PHY_REG) {
+		arraylen = RTL8812AEPHY_REG_1TARRAYLEN;
+		array_table = RTL8812AE_PHY_REG_ARRAY;
+
+		for (i = 0; i < arraylen; i += 2) {
+			v1 = array_table[i];
+			v2 = array_table[i+1];
+			if (v1<0xCDCDCDCD) {
+				_rtl8812ae_config_bb_reg(hw, v1, v2);
+				continue;
+			} else {/*This line is the start line of branch.*/
+				if (!_rtl8821ae_check_condition(hw,v1)) {
+					/*Discard the following (offset, data) pairs*/
+					READ_NEXT_PAIR(array_table,v1, v2, i);
+					while (v2 != 0xDEAD &&
+					       v2 != 0xCDEF &&
+					       v2 != 0xCDCD && i < arraylen -2)
+					    READ_NEXT_PAIR(array_table,v1, v2, i);
+
+					i -= 2; /* prevent from for-loop += 2*/
+				} else {/*Configure matched pairs and skip to end of if-else.*/
+					READ_NEXT_PAIR(array_table,v1, v2, i);
+					while (v2 != 0xDEAD &&
+					       v2 != 0xCDEF &&
+					       v2 != 0xCDCD && i < arraylen -2) {
+						_rtl8812ae_config_bb_reg(hw,v1,v2);
+						READ_NEXT_PAIR(array_table,v1, v2, i);
+					}
+
+					while (v2 != 0xDEAD && i < arraylen -2)
+						READ_NEXT_PAIR(array_table,v1, v2, i);
+				}
+			}
+		}
+	} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+			arraylen = RTL8812AEAGCTAB_1TARRAYLEN;
+			array_table = RTL8812AE_AGC_TAB_ARRAY;
+
+			for (i = 0; i < arraylen; i = i + 2) {
+				v1 = array_table[i];
+				v2 = array_table[i+1];
+				if (v1 < 0xCDCDCDCD) {
+					rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+					udelay(1);
+					continue;
+			    } else {/*This line is the start line of branch.*/
+					if (!_rtl8821ae_check_condition(hw,v1)) {
+						/*Discard the following (offset, data) pairs*/
+						READ_NEXT_PAIR(array_table,v1, v2, i);
+						while (v2 != 0xDEAD &&
+						       v2 != 0xCDEF &&
+						       v2 != 0xCDCD && i < arraylen -2)
+						    READ_NEXT_PAIR(array_table,v1, v2, i);
+
+						i -= 2; /* prevent from for-loop += 2*/
+					}else{/*Configure matched pairs and skip to end of if-else.*/
+						READ_NEXT_PAIR(array_table,v1, v2, i);
+						while (v2 != 0xDEAD &&
+						       v2 != 0xCDEF &&
+						       v2 != 0xCDCD && i < arraylen -2)
+						{
+							rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+							udelay(1);
+							READ_NEXT_PAIR(array_table,v1, v2, i);
+						}
+
+						while (v2 != 0xDEAD && i < arraylen -2)
+							READ_NEXT_PAIR(array_table,v1, v2, i);
+					}
+				}
+				RT_TRACE(COMP_INIT, DBG_TRACE,
+				 ("The agctab_array_table[0] is "
+				  "%x Rtl818EEPHY_REGArray[1] is %x \n",
+				  array_table[i],
+				  array_table[i + 1]));
+		}
+	}
+	return true;
+}
+
+static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+						  u8 configtype)
+{
+	int i;
+	u32 *array_table;
+	u16 arraylen;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 v1 = 0, v2 = 0;
+
+	if (configtype == BASEBAND_CONFIG_PHY_REG) {
+		arraylen = RTL8821AEPHY_REG_1TARRAYLEN;
+		array_table = RTL8821AE_PHY_REG_ARRAY;
+
+		for (i = 0; i < arraylen; i += 2) {
+			v1 = array_table[i];
+			v2 = array_table[i+1];
+			if (v1<0xCDCDCDCD) {
+				_rtl8821ae_config_bb_reg(hw, v1, v2);
+				continue;
+			} else {/*This line is the start line of branch.*/
+				if (!_rtl8821ae_check_condition(hw,v1)) {
+					/*Discard the following (offset, data) pairs*/
+					READ_NEXT_PAIR(array_table, v1, v2, i);
+					while (v2 != 0xDEAD &&
+					       v2 != 0xCDEF &&
+					       v2 != 0xCDCD && i < arraylen -2)
+					    READ_NEXT_PAIR(array_table, v1, v2, i);
+
+					i -= 2; /* prevent from for-loop += 2*/
+				} else {/*Configure matched pairs and skip to end of if-else.*/
+					READ_NEXT_PAIR(array_table, v1, v2, i);
+					while (v2 != 0xDEAD &&
+					       v2 != 0xCDEF &&
+					       v2 != 0xCDCD && i < arraylen -2) {
+						_rtl8821ae_config_bb_reg(hw,v1,v2);
+						READ_NEXT_PAIR(array_table, v1, v2, i);
+					}
+
+					while (v2 != 0xDEAD && i < arraylen -2)
+						READ_NEXT_PAIR(array_table, v1, v2, i);
+				}
+			}
+		}
+	} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+			arraylen = RTL8821AEAGCTAB_1TARRAYLEN;
+			array_table = RTL8821AE_AGC_TAB_ARRAY;
+
+			for (i = 0; i < arraylen; i = i + 2) {
+				v1 = array_table[i];
+				v2 = array_table[i+1];
+				if (v1 < 0xCDCDCDCD) {
+					rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+					udelay(1);
+					continue;
+			    } else {/*This line is the start line of branch.*/
+					if (!_rtl8821ae_check_condition(hw,v1)) {
+						/*Discard the following (offset, data) pairs*/
+						READ_NEXT_PAIR(array_table, v1, v2, i);
+						while (v2 != 0xDEAD &&
+						       v2 != 0xCDEF &&
+						       v2 != 0xCDCD && i < arraylen -2)
+						    READ_NEXT_PAIR(array_table, v1, v2, i);
+
+						i -= 2; /* prevent from for-loop += 2*/
+					}else{/*Configure matched pairs and skip to end of if-else.*/
+						READ_NEXT_PAIR(array_table, v1, v2, i);
+						while (v2 != 0xDEAD &&
+						       v2 != 0xCDEF &&
+						       v2 != 0xCDCD && i < arraylen -2)
+						{
+							rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+							udelay(1);
+							READ_NEXT_PAIR(array_table, v1, v2, i);
+						}
+
+						while (v2 != 0xDEAD && i < arraylen -2)
+							READ_NEXT_PAIR(array_table, v1, v2, i);
+					}
+				}
+				RT_TRACE(COMP_INIT, DBG_TRACE,
+				 ("The agctab_array_table[0] is "
+				  "%x Rtl818EEPHY_REGArray[1] is %x \n",
+				  array_table[i],
+				  array_table[i + 1]));
+		}
+	}
+	return true;
+}
+
+static u8 _rtl8821ae_get_rate_selection_index(u32 regaddr)
+{
+	u8 index = 0;
+
+	regaddr &= 0xFFF;
+	if (regaddr >= 0xC20 && regaddr <= 0xC4C)
+		index = (u8) ((regaddr - 0xC20) / 4);
+	else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
+		index = (u8) ((regaddr - 0xE20) / 4);
+	else
+		RT_ASSERT(!COMP_INIT,
+			("Invalid RegAddr 0x%x in"
+			 "PHY_GetRateSectionIndexOfTxPowerByRate()\n",regaddr));
+
+	return index;
+}
+
+static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw,
+						   								 u32 band, u32 rfpath,
+						   								 u32 txnum, u32 regaddr,
+						   								 u32 bitmask, u32 data)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 rate_section = _rtl8821ae_get_rate_selection_index(regaddr);
+
+	if (band != BAND_ON_2_4G && band != BAND_ON_5G)
+		RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid Band %d\n", band));
+
+	if (rfpath > MAX_RF_PATH)
+		RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid RfPath %d\n", rfpath));
+
+	if (txnum > MAX_RF_PATH)
+		RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid TxNum %d\n", txnum ) );
+
+	rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data;
+	RT_TRACE(COMP_INIT, DBG_WARNING,( "pHalData->TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n",
+			band, rfpath, txnum, rate_section, rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section]));
+
+}
+
+static bool _rtl8812ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+						   											 u8 configtype)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i;
+	u32 *phy_regarray_table_pg;
+	u16 phy_regarray_pg_len;
+	u32 v1, v2, v3, v4, v5, v6;
+
+	phy_regarray_pg_len = RTL8812AEPHY_REG_ARRAY_PGLEN;
+	phy_regarray_table_pg = RTL8812AE_PHY_REG_ARRAY_PG;
+
+	if (configtype == BASEBAND_CONFIG_PHY_REG) {
+		for (i = 0; i < phy_regarray_pg_len; i += 6) {
+			v1 = phy_regarray_table_pg[i];
+			v2 = phy_regarray_table_pg[i+1];
+			v3 = phy_regarray_table_pg[i+2];
+			v4 = phy_regarray_table_pg[i+3];
+			v5 = phy_regarray_table_pg[i+4];
+			v6 = phy_regarray_table_pg[i+5];
+
+			if (v1<0xCDCDCDCD) {
+				if ( (v4 == 0xfe) || (v4 == 0xffe))
+					mdelay(50);
+				else
+					/*_rtl8821ae_store_pwrIndex_diffrate_offset*/
+					_rtl8821ae_store_tx_power_by_rate(hw, v1, v2, v3, v4, v5, v6);
+				continue;
+			} else {
+				if (!_rtl8821ae_check_condition(hw,v1)) { /*don't need the hw_body*/
+			    	i += 2; /* skip the pair of expression*/
+			        v1 = phy_regarray_table_pg[i];
+			        v2 = phy_regarray_table_pg[i+1];
+			        v3 = phy_regarray_table_pg[i+2];
+			        while (v2 != 0xDEAD) {
+			            i += 3;
+			            v1 = phy_regarray_table_pg[i];
+			            v2 = phy_regarray_table_pg[i+1];
+			            v3 = phy_regarray_table_pg[i+2];
+			        }
+			    }
+			}
+		}
+	} else {
+
+		RT_TRACE(COMP_SEND, DBG_TRACE,
+			 ("configtype != BaseBand_Config_PHY_REG\n"));
+	}
+	return true;
+}
+
+static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+						   											 u8 configtype)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int i;
+	u32 *phy_regarray_table_pg;
+	u16 phy_regarray_pg_len;
+	u32 v1, v2, v3, v4, v5, v6;
+
+	phy_regarray_pg_len = RTL8821AEPHY_REG_ARRAY_PGLEN;
+	phy_regarray_table_pg = RTL8821AE_PHY_REG_ARRAY_PG;
+
+	if (configtype == BASEBAND_CONFIG_PHY_REG) {
+		for (i = 0; i < phy_regarray_pg_len; i += 6) {
+			v1 = phy_regarray_table_pg[i];
+			v2 = phy_regarray_table_pg[i+1];
+			v3 = phy_regarray_table_pg[i+2];
+			v4 = phy_regarray_table_pg[i+3];
+			v5 = phy_regarray_table_pg[i+4];
+			v6 = phy_regarray_table_pg[i+5];
+
+			if (v1<0xCDCDCDCD) {
+				if (v4 == 0xfe)
+					mdelay(50);
+				else if (v4 == 0xfd)
+					mdelay(5);
+				else if (v4 == 0xfc)
+					mdelay(1);
+				else if (v4 == 0xfb)
+					udelay(50);
+				else if (v4 == 0xfa)
+					udelay(5);
+				else if (v4 == 0xf9)
+					udelay(1);
+
+				/*_rtl8821ae_store_pwrIndex_diffrate_offset*/
+				_rtl8821ae_store_tx_power_by_rate(hw, v1, v2, v3, v4, v5, v6);
+				continue;
+			} else {
+				if (!_rtl8821ae_check_condition(hw,v1)) { /*don't need the hw_body*/
+			    	i += 2; /* skip the pair of expression*/
+			        v1 = phy_regarray_table_pg[i];
+			        v2 = phy_regarray_table_pg[i+1];
+			        v3 = phy_regarray_table_pg[i+2];
+			        while (v2 != 0xDEAD) {
+			            i += 3;
+			            v1 = phy_regarray_table_pg[i];
+			            v2 = phy_regarray_table_pg[i+1];
+			            v3 = phy_regarray_table_pg[i+2];
+			        }
+			    }
+			}
+		}
+	} else {
+
+		RT_TRACE(COMP_SEND, DBG_TRACE,
+			 ("configtype != BaseBand_Config_PHY_REG\n"));
+	}
+	return true;
+}
+
+bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw * hw,
+					  									 enum radio_path rfpath)
+{
+	#define READ_NEXT_RF_PAIR_8812(radioa_array_table,v1, v2, i) do { i += 2; v1 = radioa_array_table[i]; v2 = radioa_array_table[i+1]; } while(0)
+
+	int i;
+	bool rtstatus = true;
+	u32 *radioa_array_table_a, *radioa_array_table_b;
+	u16 radioa_arraylen_a, radioa_arraylen_b;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 v1 = 0, v2 = 0;
+
+	radioa_arraylen_a = RTL8812AE_RADIOA_1TARRAYLEN;
+	radioa_array_table_a= RTL8812AE_RADIOA_ARRAY;
+	radioa_arraylen_b= RTL8812AE_RADIOB_1TARRAYLEN;
+	radioa_array_table_b = RTL8812AE_RADIOB_ARRAY;
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("Radio_A:RTL8821AE_RADIOA_ARRAY %d\n",radioa_arraylen_a));
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("Radio No %x\n", rfpath));
+	rtstatus = true;
+	switch (rfpath) {
+	case RF90_PATH_A:
+		for (i = 0; i < radioa_arraylen_a; i = i + 2) {
+			v1 = radioa_array_table_a[i];
+			v2 = radioa_array_table_a[i+1];
+			if (v1<0xcdcdcdcd) {
+				_rtl8821ae_config_rf_radio_a(hw,v1,v2);
+				continue;
+			}else{/*This line is the start line of branch.*/
+				if(!_rtl8821ae_check_condition(hw,v1)){
+					/*Discard the following (offset, data) pairs*/
+					READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+					while (v2 != 0xDEAD &&
+					       v2 != 0xCDEF &&
+					       v2 != 0xCDCD && i < radioa_arraylen_a-2)
+					    READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+
+					i -= 2; /* prevent from for-loop += 2*/
+				} else {/*Configure matched pairs and skip to end of if-else.*/
+					READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+					while (v2 != 0xDEAD &&
+					       v2 != 0xCDEF &&
+					       v2 != 0xCDCD && i < radioa_arraylen_a -2) {
+						_rtl8821ae_config_rf_radio_a(hw,v1,v2);
+						READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+					}
+
+					while (v2 != 0xDEAD && i < radioa_arraylen_a-2)
+						READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+				}
+			}
+		}
+		break;
+	case RF90_PATH_B:
+		for (i = 0; i < radioa_arraylen_b; i = i + 2) {
+			v1 = radioa_array_table_b[i];
+			v2 = radioa_array_table_b[i+1];
+			if (v1<0xcdcdcdcd) {
+				_rtl8821ae_config_rf_radio_b(hw,v1,v2);
+				continue;
+			}else{/*This line is the start line of branch.*/
+				if(!_rtl8821ae_check_condition(hw,v1)){
+					/*Discard the following (offset, data) pairs*/
+					READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+					while (v2 != 0xDEAD &&
+					       v2 != 0xCDEF &&
+					       v2 != 0xCDCD && i < radioa_arraylen_b-2)
+					    READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+
+					i -= 2; /* prevent from for-loop += 2*/
+				} else {/*Configure matched pairs and skip to end of if-else.*/
+					READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+					while (v2 != 0xDEAD &&
+					       v2 != 0xCDEF &&
+					       v2 != 0xCDCD && i < radioa_arraylen_b-2) {
+						_rtl8821ae_config_rf_radio_b(hw,v1,v2);
+						READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+					}
+
+					while (v2 != 0xDEAD && i < radioa_arraylen_b-2)
+						READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+				}
+			}
+		}
+		break;
+	case RF90_PATH_C:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("switch case not process \n"));
+		break;
+	case RF90_PATH_D:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("switch case not process \n"));
+		break;
+	}
+	return true;
+}
+
+
+bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw * hw,
+					  									 enum radio_path rfpath)
+{
+	#define READ_NEXT_RF_PAIR(v1, v2, i) do { i += 2; v1 = radioa_array_table[i]; v2 = radioa_array_table[i+1]; } while(0)
+
+	int i;
+	bool rtstatus = true;
+	u32 *radioa_array_table;
+	u16 radioa_arraylen;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	//struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u32 v1 = 0, v2 = 0;
+
+	radioa_arraylen = RTL8821AE_RADIOA_1TARRAYLEN;
+	radioa_array_table = RTL8821AE_RADIOA_ARRAY;
+	RT_TRACE(COMP_INIT, DBG_LOUD,
+		 ("Radio_A:RTL8821AE_RADIOA_ARRAY %d\n",radioa_arraylen));
+	RT_TRACE(COMP_INIT, DBG_LOUD, ("Radio No %x\n", rfpath));
+	rtstatus = true;
+	switch (rfpath) {
+	case RF90_PATH_A:
+		for (i = 0; i < radioa_arraylen; i = i + 2) {
+			v1 = radioa_array_table[i];
+			v2 = radioa_array_table[i+1];
+			if (v1<0xcdcdcdcd) {
+				_rtl8821ae_config_rf_radio_a(hw,v1,v2);
+			}else{/*This line is the start line of branch.*/
+				if(!_rtl8821ae_check_condition(hw,v1)){
+					/*Discard the following (offset, data) pairs*/
+					READ_NEXT_RF_PAIR(v1, v2, i);
+					while (v2 != 0xDEAD &&
+					       v2 != 0xCDEF &&
+					       v2 != 0xCDCD && i < radioa_arraylen -2)
+					    READ_NEXT_RF_PAIR(v1, v2, i);
+
+					i -= 2; /* prevent from for-loop += 2*/
+				} else {/*Configure matched pairs and skip to end of if-else.*/
+					READ_NEXT_RF_PAIR(v1, v2, i);
+					while (v2 != 0xDEAD &&
+					       v2 != 0xCDEF &&
+					       v2 != 0xCDCD && i < radioa_arraylen -2) {
+						_rtl8821ae_config_rf_radio_a(hw,v1,v2);
+						READ_NEXT_RF_PAIR(v1, v2, i);
+					}
+
+					while (v2 != 0xDEAD && i < radioa_arraylen -2)
+						READ_NEXT_RF_PAIR(v1, v2, i);
+				}
+			}
+		}
+		break;
+
+	case RF90_PATH_B:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("switch case not process \n"));
+		break;
+	case RF90_PATH_C:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("switch case not process \n"));
+		break;
+	case RF90_PATH_D:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("switch case not process \n"));
+		break;
+	}
+	return true;
+}
+
+void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	rtlphy->default_initialgain[0] =
+	    (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+	rtlphy->default_initialgain[1] =
+	    (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+	rtlphy->default_initialgain[2] =
+	    (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+	rtlphy->default_initialgain[3] =
+	    (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+
+	RT_TRACE(COMP_INIT, DBG_TRACE,
+		 ("Default initial gain (c50=0x%x, "
+		  "c58=0x%x, c60=0x%x, c68=0x%x \n",
+		  rtlphy->default_initialgain[0],
+		  rtlphy->default_initialgain[1],
+		  rtlphy->default_initialgain[2],
+		  rtlphy->default_initialgain[3]));
+
+	rtlphy->framesync = (u8) rtl_get_bbreg(hw,
+					       ROFDM0_RXDETECTOR3, MASKBYTE0);
+	rtlphy->framesync_c34 = rtl_get_bbreg(hw,
+					      ROFDM0_RXDETECTOR2, MASKDWORD);
+
+	RT_TRACE(COMP_INIT, DBG_TRACE,
+		 ("Default framesync (0x%x) = 0x%x \n",
+		  ROFDM0_RXDETECTOR3, rtlphy->framesync));
+}
+
+static void _rtl8821ae_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+	rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+	rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+	rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = RA_LSSIWRITE_8821A;
+	rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = RB_LSSIWRITE_8821A;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RHSSIREAD_8821AE;
+	rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RHSSIREAD_8821AE;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback = RA_SIREAD_8821A;
+	rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback = RB_SIREAD_8821A;
+
+	rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi = RA_PIREAD_8821A;
+	rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi = RB_PIREAD_8821A;
+}
+
+void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 txpwr_level;
+	long txpwr_dbm;
+
+	txpwr_level = rtlphy->cur_cck_txpwridx;
+	txpwr_dbm = _rtl8821ae_phy_txpwr_idx_to_dbm(hw,
+						 WIRELESS_MODE_B, txpwr_level);
+	txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+	if (_rtl8821ae_phy_txpwr_idx_to_dbm(hw,
+					 WIRELESS_MODE_G,
+					 txpwr_level) > txpwr_dbm)
+		txpwr_dbm =
+		    _rtl8821ae_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+						 txpwr_level);
+	txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+	if (_rtl8821ae_phy_txpwr_idx_to_dbm(hw,
+					 WIRELESS_MODE_N_24G,
+					 txpwr_level) > txpwr_dbm)
+		txpwr_dbm =
+		    _rtl8821ae_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+						 txpwr_level);
+	*powerlevel = txpwr_dbm;
+}
+
+static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index)
+{
+	u8 channel_5g[CHANNEL_MAX_NUMBER_5G] =
+				 {36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112,
+				114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151,
+				153,155,157,159,161,163,165,167,168,169,171,173,175,177};
+	u8 i = 0;
+	bool in_24g = true;
+
+	if (channel <= 14) {
+		in_24g = true;
+		*chnl_index = channel - 1;
+	} else {
+		in_24g = false;
+
+		for (i = 0; i < sizeof(channel_5g) / sizeof(u8); ++i) {
+			if (channel_5g[i] == channel) {
+				*chnl_index = i;
+				return in_24g;
+			}
+		}
+	}
+	return in_24g;
+}
+
+static char _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate)
+{
+	char rate_section = 0;
+	switch (rate) {
+	case DESC_RATE1M:
+	case DESC_RATE2M:
+	case DESC_RATE5_5M:
+	case DESC_RATE11M:
+		rate_section = 0;
+		break;
+
+	case DESC_RATE6M:
+	case DESC_RATE9M:
+	case DESC_RATE12M:
+	case DESC_RATE18M:
+		rate_section = 1;
+		break;
+
+	case DESC_RATE24M:
+	case DESC_RATE36M:
+	case DESC_RATE48M:
+	case DESC_RATE54M:
+		rate_section = 2;
+		break;
+
+	case DESC_RATEMCS0:
+	case DESC_RATEMCS1:
+	case DESC_RATEMCS2:
+	case DESC_RATEMCS3:
+		rate_section = 3;
+		break;
+
+	case DESC_RATEMCS4:
+	case DESC_RATEMCS5:
+	case DESC_RATEMCS6:
+	case DESC_RATEMCS7:
+		rate_section = 4;
+		break;
+
+	case DESC_RATEMCS8:
+	case DESC_RATEMCS9:
+	case DESC_RATEMCS10:
+	case DESC_RATEMCS11:
+		rate_section = 5;
+		break;
+
+	case DESC_RATEMCS12:
+	case DESC_RATEMCS13:
+	case DESC_RATEMCS14:
+	case DESC_RATEMCS15:
+		rate_section = 6;
+		break;
+
+	case DESC_RATEVHT1SS_MCS0:
+	case DESC_RATEVHT1SS_MCS1:
+	case DESC_RATEVHT1SS_MCS2:
+	case DESC_RATEVHT1SS_MCS3:
+		rate_section = 7;
+		break;
+
+	case DESC_RATEVHT1SS_MCS4:
+	case DESC_RATEVHT1SS_MCS5:
+	case DESC_RATEVHT1SS_MCS6:
+	case DESC_RATEVHT1SS_MCS7:
+		rate_section = 8;
+		break;
+
+	case DESC_RATEVHT1SS_MCS8:
+	case DESC_RATEVHT1SS_MCS9:
+	case DESC_RATEVHT2SS_MCS0:
+	case DESC_RATEVHT2SS_MCS1:
+		rate_section = 9;
+		break;
+
+	case DESC_RATEVHT2SS_MCS2:
+	case DESC_RATEVHT2SS_MCS3:
+	case DESC_RATEVHT2SS_MCS4:
+	case DESC_RATEVHT2SS_MCS5:
+		rate_section = 10;
+		break;
+
+	case DESC_RATEVHT2SS_MCS6:
+	case DESC_RATEVHT2SS_MCS7:
+	case DESC_RATEVHT2SS_MCS8:
+	case DESC_RATEVHT2SS_MCS9:
+		rate_section = 11;
+		break;
+
+	default:
+		RT_ASSERT(true, ("Rate_Section is Illegal\n"));
+		break;
+	}
+
+	return rate_section;
+}
+
+static char _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw,
+														   u8 band, u8 path, u8 rate)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 shift = 0, rate_section, tx_num;
+	char tx_pwr_diff = 0;
+
+	rate_section = _rtl8821ae_phy_get_ratesection_intxpower_byrate(path, rate);
+	tx_num = RF_TX_NUM_NONIMPLEMENT;
+
+	if (tx_num == RF_TX_NUM_NONIMPLEMENT) {
+		if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15 ) ||
+			(rate >= DESC_RATEVHT2SS_MCS2 && rate <= DESC_RATEVHT2SS_MCS9))
+			 tx_num = RF_2TX;
+		else
+			tx_num = RF_1TX;
+	}
+
+	switch (rate) {
+	case DESC_RATE1M:	shift = 0;		break;
+	case DESC_RATE2M:	shift = 8;		break;
+	case DESC_RATE5_5M:	shift = 16;		break;
+	case DESC_RATE11M:	shift = 24;		break;
+
+	case DESC_RATE6M:	shift = 0;		break;
+	case DESC_RATE9M:	shift = 8;      break;
+	case DESC_RATE12M:	shift = 16;     break;
+	case DESC_RATE18M:	shift = 24;     break;
+
+	case DESC_RATE24M:	shift = 0;    	break;
+	case DESC_RATE36M:	shift = 8;      break;
+	case DESC_RATE48M:	shift = 16;     break;
+	case DESC_RATE54M:	shift = 24;     break;
+
+	case DESC_RATEMCS0:	shift = 0; 		break;
+	case DESC_RATEMCS1:	shift = 8;      break;
+	case DESC_RATEMCS2:	shift = 16;     break;
+	case DESC_RATEMCS3:	shift = 24;     break;
+
+	case DESC_RATEMCS4:	shift = 0; 		break;
+	case DESC_RATEMCS5:	shift = 8;      break;
+	case DESC_RATEMCS6:	shift = 16;     break;
+	case DESC_RATEMCS7:	shift = 24;     break;
+
+	case DESC_RATEMCS8:	shift = 0; 		break;
+	case DESC_RATEMCS9:	shift = 8;      break;
+	case DESC_RATEMCS10:	shift = 16;     break;
+	case DESC_RATEMCS11:	shift = 24;     break;
+
+	case DESC_RATEMCS12:	shift = 0; 		break;
+	case DESC_RATEMCS13:	shift = 8;      break;
+	case DESC_RATEMCS14:	shift = 16;     break;
+	case DESC_RATEMCS15:	shift = 24;     break;
+
+	case DESC_RATEVHT1SS_MCS0:	shift = 0; 		break;
+	case DESC_RATEVHT1SS_MCS1:	shift = 8;      break;
+	case DESC_RATEVHT1SS_MCS2:	shift = 16;     break;
+	case DESC_RATEVHT1SS_MCS3:	shift = 24;     break;
+
+	case DESC_RATEVHT1SS_MCS4:	shift = 0; 		break;
+	case DESC_RATEVHT1SS_MCS5:	shift = 8;      break;
+	case DESC_RATEVHT1SS_MCS6:	shift = 16;     break;
+	case DESC_RATEVHT1SS_MCS7:	shift = 24;     break;
+
+	case DESC_RATEVHT1SS_MCS8:	shift = 0; 		break;
+	case DESC_RATEVHT1SS_MCS9:	shift = 8;      break;
+	case DESC_RATEVHT2SS_MCS0:	shift = 16;     break;
+	case DESC_RATEVHT2SS_MCS1:	shift = 24;     break;
+
+	case DESC_RATEVHT2SS_MCS2:	shift = 0; 		break;
+	case DESC_RATEVHT2SS_MCS3:	shift = 8;      break;
+	case DESC_RATEVHT2SS_MCS4:	shift = 16;     break;
+	case DESC_RATEVHT2SS_MCS5:	shift = 24;     break;
+
+	case DESC_RATEVHT2SS_MCS6:	shift = 0; 		break;
+	case DESC_RATEVHT2SS_MCS7:	shift = 8;      break;
+	case DESC_RATEVHT2SS_MCS8:	shift = 16;     break;
+	case DESC_RATEVHT2SS_MCS9:	shift = 24;     break;
+
+	default:
+		RT_ASSERT(true, ("Rate_Section is Illegal\n"));
+		break;
+	}
+
+	tx_pwr_diff = (u8) (rtlphy->tx_power_by_rate_offset[band][path][tx_num][rate_section] >> shift) & 0xff;
+
+	return	tx_pwr_diff;
+}
+
+static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path,
+				      							 u8 rate, u8 bandwidth, u8 channel)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 index = (channel - 1);
+	u8 txpower = 0;
+	bool in_24g = false;
+	char powerdiff_byrate = 0;
+
+	if (((rtlhal->current_bandtype == BAND_ON_2_4G) && (channel > 14 || channel < 1)) ||
+		((rtlhal->current_bandtype == BAND_ON_5G) && (channel <= 14))) {
+		index = 0;
+		RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("Illegal channel!!\n"));
+	}
+
+	in_24g = _rtl8821ae_phy_get_chnl_index(channel, &index);
+	if (in_24g) {
+		if (RX_HAL_IS_CCK_RATE(rate))
+			txpower = rtlefuse->txpwrlevel_cck[path][index];
+		else if ( DESC_RATE6M <= rate )
+			txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
+		else
+			RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("invalid rate\n"));
+
+		if (DESC_RATE6M <= rate && rate <= DESC_RATE54M && !RX_HAL_IS_CCK_RATE(rate))
+			txpower += rtlefuse->txpwr_legacyhtdiff[path][TX_1S];
+
+		if (bandwidth == HT_CHANNEL_WIDTH_20) {
+			if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+				txpower += rtlefuse->txpwr_ht20diff[path][TX_1S];
+			if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+				txpower += rtlefuse->txpwr_ht20diff[path][TX_2S];
+		} else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
+			if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+				txpower += rtlefuse->txpwr_ht40diff[path][TX_1S];
+			if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+				txpower += rtlefuse->txpwr_ht40diff[path][TX_2S];
+		} else if (bandwidth == HT_CHANNEL_WIDTH_80) {
+			if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+				txpower += rtlefuse->txpwr_ht40diff[path][TX_1S];
+			if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+				txpower += rtlefuse->txpwr_ht40diff[path][TX_2S];
+		}
+
+	} else {
+		if (DESC_RATE6M <= rate)
+			txpower = rtlefuse->txpwr_5g_bw40base[path][index];
+	    else
+	        RT_TRACE(COMP_POWER_TRACKING, DBG_WARNING,("INVALID Rate.\n"));
+
+        if (DESC_RATE6M <= rate && rate <= DESC_RATE54M && !RX_HAL_IS_CCK_RATE(rate))
+	    	txpower += rtlefuse->txpwr_5g_ofdmdiff[path][TX_1S];
+
+	    if (bandwidth == HT_CHANNEL_WIDTH_20) {
+	        if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+	    	    txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_1S];
+	        if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+	    	    txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_2S];
+	    } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
+	        if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+	    	    txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_1S];
+	        if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+	    	    txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_2S];
+	    } else if (bandwidth == HT_CHANNEL_WIDTH_80) {
+			u8 channel_5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
+			u8 i = 0;
+			for (i = 0; i < sizeof(channel_5g_80m) / sizeof(u8); ++i)
+				if (channel_5g_80m[i] == channel)
+					index = i;
+
+	        if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+	    	    txpower = rtlefuse->txpwr_5g_bw80base[path][index]
+	    	    		+ rtlefuse->txpwr_5g_bw80diff[path][TX_1S];
+	        if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+				(DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+	    	    txpower = rtlefuse->txpwr_5g_bw80base[path][index]
+	    	    		+ rtlefuse->txpwr_5g_bw80diff[path][TX_1S]
+	    	    		+ rtlefuse->txpwr_5g_bw80diff[path][TX_2S];
+	    }
+	}
+	if (rtlefuse->eeprom_regulatory != 2)
+		powerdiff_byrate = _rtl8821ae_phy_get_txpower_by_rate(hw,
+									(u8)(!in_24g), path, rate);
+
+	if (rate == DESC_RATEVHT1SS_MCS8 || rate == DESC_RATEVHT1SS_MCS9 ||
+		rate == DESC_RATEVHT2SS_MCS8 || rate == DESC_RATEVHT2SS_MCS9)
+		txpower -= powerdiff_byrate;
+	else
+		txpower += powerdiff_byrate;
+
+	if (rate > DESC_RATE11M)
+		txpower += rtlpriv->dm.remnant_ofdm_swing_idx[path];
+	else
+		txpower += rtlpriv->dm.remnant_cck_idx;
+
+	if (txpower > MAX_POWER_INDEX)
+		txpower = MAX_POWER_INDEX;
+
+	return txpower;
+}
+
+static void _rtl8821ae_phy_set_txpower_index(struct ieee80211_hw *hw,
+												u8 power_index, u8 path, u8 rate)
+{
+	struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+	if (path == RF90_PATH_A) {
+        switch (rate) {
+        case DESC_RATE1M:
+		rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE0, power_index);
+		break;
+        case DESC_RATE2M:
+		rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE1, power_index);
+		break;
+        case DESC_RATE5_5M:
+		rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE2, power_index);
+		break;
+        case DESC_RATE11M:
+		rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE3, power_index);
+		break;
+
+        case DESC_RATE6M:
+		rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE0, power_index);
+		break;
+        case DESC_RATE9M:
+		rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE1, power_index);
+		break;
+        case DESC_RATE12M:
+		rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE2, power_index);
+		break;
+        case DESC_RATE18M:
+		rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE3, power_index);
+		break;
+
+        case DESC_RATE24M:
+		rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE0, power_index);
+		break;
+        case DESC_RATE36M:
+		rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE1, power_index);
+		break;
+        case DESC_RATE48M:
+		rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE2, power_index);
+		break;
+        case DESC_RATE54M:
+		rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE3, power_index);
+		break;
+
+        case DESC_RATEMCS0:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE0, power_index);
+		break;
+        case DESC_RATEMCS1:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE1, power_index);
+		break;
+        case DESC_RATEMCS2:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE2, power_index);
+		break;
+        case DESC_RATEMCS3:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE3, power_index);
+		break;
+
+        case DESC_RATEMCS4:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE0, power_index);
+		break;
+        case DESC_RATEMCS5:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE1, power_index);
+		break;
+        case DESC_RATEMCS6:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE2, power_index);
+		break;
+        case DESC_RATEMCS7:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE3, power_index);
+		break;
+
+        case DESC_RATEMCS8:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE0, power_index);
+		break;
+        case DESC_RATEMCS9:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE1, power_index);
+		break;
+        case DESC_RATEMCS10:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE2, power_index);
+		break;
+        case DESC_RATEMCS11:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE3, power_index);
+		break;
+
+        case DESC_RATEMCS12:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE0, power_index);
+		break;
+        case DESC_RATEMCS13:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE1, power_index);
+		break;
+        case DESC_RATEMCS14:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE2, power_index);
+		break;
+        case DESC_RATEMCS15:
+		rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEVHT1SS_MCS0:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS1:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS2:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS3:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEVHT1SS_MCS4:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS5:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS6:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS7:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEVHT1SS_MCS8:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS9:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS0:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS1:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEVHT2SS_MCS2:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS3:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS4:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS5:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEVHT2SS_MCS6:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS7:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS8:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS9:
+		rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE3, power_index);
+		break;
+
+        default:
+			RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid Rate!!\n"));
+            break;
+        }
+    } else if (path == RF90_PATH_B) {
+	switch (rate) {
+	case DESC_RATE1M:
+		rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE0, power_index);
+		break;
+	case DESC_RATE2M:
+		rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE1, power_index);
+		break;
+	case DESC_RATE5_5M:
+		rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE2, power_index);
+		break;
+	case DESC_RATE11M:
+		rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATE6M:
+		rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE0, power_index);
+		break;
+	case DESC_RATE9M:
+		rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE1, power_index);
+		break;
+	case DESC_RATE12M:
+		rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE2, power_index);
+		break;
+	case DESC_RATE18M:
+		rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATE24M:
+		rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE0, power_index);
+		break;
+	case DESC_RATE36M:
+		rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE1, power_index);
+		break;
+	case DESC_RATE48M:
+		rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE2, power_index);
+		break;
+	case DESC_RATE54M:
+		rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEMCS0:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEMCS1:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEMCS2:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEMCS3:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEMCS4:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEMCS5:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEMCS6:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEMCS7:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEMCS8:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEMCS9:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEMCS10:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEMCS11:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEMCS12:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEMCS13:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEMCS14:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEMCS15:
+		rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEVHT1SS_MCS0:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS1:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS2:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS3:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEVHT1SS_MCS4:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS5:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS6:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS7:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEVHT1SS_MCS8:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEVHT1SS_MCS9:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS0:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS1:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEVHT2SS_MCS2:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS3:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS4:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS5:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE3, power_index);
+		break;
+
+	case DESC_RATEVHT2SS_MCS6:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE0, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS7:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE1, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS8:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE2, power_index);
+		break;
+	case DESC_RATEVHT2SS_MCS9:
+		rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE3, power_index);
+		break;
+
+       default:
+		RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid Rate!!\n"));
+            	break;
+        }
+    } else {
+		RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid RFPath!!\n"));
+    }
+}
+
+void _rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw,
+													u8 *array, u8 path, u8 channel,
+													u8 size)
+{
+	struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy);
+	u8 i;
+	u8 power_index;
+	for (i = 0; i < size; i ++) {
+		power_index = _rtl8821ae_get_txpower_index(hw, path, array[i],
+									rtlphy->current_chan_bw, channel);
+		_rtl8821ae_phy_set_txpower_index(hw, power_index, path, array[i]);
+	}
+}
+
+static void _rtl8821ae_phy_txpower_training_by_path(struct ieee80211_hw *hw,
+														u8 bw, u8 channel, u8 path)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	u8 i;
+	u32 power_level, data, offset;
+
+	if(path >= rtlphy->num_total_rfpath)
+		return;
+
+	data = 0;
+	if (path == RF90_PATH_A) {
+		power_level =
+			_rtl8821ae_get_txpower_index(hw, RF90_PATH_A,
+			DESC_RATEMCS7, bw, channel);
+		offset =  RA_TXPWRTRAING;
+	} else {
+		power_level =
+			_rtl8821ae_get_txpower_index(hw, RF90_PATH_A,
+			DESC_RATEMCS7, bw, channel);
+		offset =  RB_TXPWRTRAING;
+	}
+
+	for (i = 0; i < 3; i++) {
+		if (i == 0)
+			power_level = power_level - 10;
+		else if (i == 1)
+			power_level = power_level - 8;
+		else
+			power_level = power_level - 6;
+
+		data |= (((power_level > 2) ? (power_level) : 2) << (i * 8));
+	}
+	rtl_set_bbreg(hw, offset, 0xffffff, data);
+}
+
+void rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, u8 channel, u8 path)
+{
+	//struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy);
+	u8 cck_rates[]  = {DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M};
+	u8 ofdm_rates[]  = {DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, DESC_RATE18M,
+						DESC_RATE24M, DESC_RATE36M, DESC_RATE48M, DESC_RATE54M};
+	u8 ht_rates_1t[]  = {DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, DESC_RATEMCS3,
+						 DESC_RATEMCS4, DESC_RATEMCS5, DESC_RATEMCS6, DESC_RATEMCS7};
+	u8 ht_rates_2t[]  = {DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, DESC_RATEMCS11,
+						 DESC_RATEMCS12, DESC_RATEMCS13, DESC_RATEMCS14, DESC_RATEMCS15};
+	u8 vht_rates_1t[]  = {DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, DESC_RATEVHT1SS_MCS2,
+						DESC_RATEVHT1SS_MCS3, DESC_RATEVHT1SS_MCS4,
+						  DESC_RATEVHT1SS_MCS5, DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
+						  DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9};
+	u8 vht_rates_2t[]  = {DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, DESC_RATEVHT2SS_MCS2,
+						DESC_RATEVHT2SS_MCS3, DESC_RATEVHT2SS_MCS4,
+						  DESC_RATEVHT2SS_MCS5, DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
+						  DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9};
+	//u8 i,size;
+	//u8 power_index;
+
+	if (rtlhal->current_bandtype == BAND_ON_2_4G)
+		_rtl8821ae_phy_set_txpower_level_by_path(hw,cck_rates,path,channel,
+		                                         sizeof(cck_rates) / sizeof(u8));
+
+	_rtl8821ae_phy_set_txpower_level_by_path(hw,ofdm_rates,path,channel,
+		                                     sizeof(ofdm_rates) / sizeof(u8));
+	_rtl8821ae_phy_set_txpower_level_by_path(hw,ht_rates_1t,path,channel,
+		                                     sizeof(ht_rates_1t) / sizeof(u8));
+	_rtl8821ae_phy_set_txpower_level_by_path(hw,vht_rates_1t,path,channel,
+		                                     sizeof(vht_rates_1t) / sizeof(u8));
+
+	if (rtlphy->num_total_rfpath >= 2) {
+		_rtl8821ae_phy_set_txpower_level_by_path(hw,ht_rates_2t,path,channel,
+			                                     sizeof(ht_rates_2t) / sizeof(u8));
+		_rtl8821ae_phy_set_txpower_level_by_path(hw,vht_rates_2t,path,channel,
+			                                     sizeof(vht_rates_2t) / sizeof(u8));
+	}
+
+	_rtl8821ae_phy_txpower_training_by_path(hw, rtlphy->current_chan_bw, channel, path);
+}
+/*just in case, write txpower in DW, to reduce time*/
+#if 0
+void _rtl8821ae_phy_get_txpower_index_by_rate_array(struct ieee80211_hw *hw, u8 channel,
+											u8 *rate, u8 path, u8 bw, u8 *power_index, u8 size)
+{
+	u8 i;
+	for (i = 0; i < size; i++)
+		power_index[i] = _rtl8821ae_get_txpower_index(hw, path, rate[i], bw, channel);
+}
+
+void rtl8821ae_phy_set_txpower_level_by_path2(struct ieee80211_hw *hw, u8 channel, u8 path)
+{
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy);
+	u8 cck_rates[]  = {DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M};
+	u8 ofdm_rates[]  = {DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, DESC_RATE18M,
+						DESC_RATE24M, DESC_RATE36M, DESC_RATE48M, DESC_RATE54M};
+	u8 ht_rates_1t[]  = {DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, DESC_RATEMCS3,
+						 DESC_RATEMCS4, DESC_RATEMCS5, DESC_RATEMCS6, DESC_RATEMCS7};
+	u8 ht_rates_2t[]  = {DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, DESC_RATEMCS11,
+						 DESC_RATEMCS12, DESC_RATEMCS13, DESC_RATEMCS14, DESC_RATEMCS15};
+	u8 vht_rates_1t[]  = {DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, DESC_RATEVHT1SS_MCS4,
+						  DESC_RATEVHT1SS_MCS5, DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9};
+	u8 vht_rates_2t[]  = {DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, DESC_RATEVHT2SS_MCS4,
+						  DESC_RATEVHT2SS_MCS5, DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9};
+	u8 i, j;
+	u8 pwridx[48] = {0};
+	u8 cs = sizeof(cck_rates) / sizeof(u8);
+	u8 os = sizeof(ofdm_rates) / sizeof(u8);
+	u8 h1s = sizeof(ht_rates_1t) / sizeof(u8);
+	u8 h2s = sizeof(ht_rates_2t) / sizeof(u8);
+	u8 v1s = sizeof(vht_rates_1t) / sizeof(u8);
+	u8 v2s = sizeof(vht_rates_2t) / sizeof(u8);
+
+	u8 len, start;
+	u32 reg_addr, power_index;
+	u8 bw = rtlphy->current_chan_bw;
+
+	_rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+		ofdm_rates, path, bw, &pwridx[cs], os);
+
+	_rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+		ht_rates_1t, path, bw, &pwridx[cs+os], h1s);
+
+	_rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+		vht_rates_1t, path, bw, &pwridx[cs+os+h1s+h2s], v1s);
+
+
+	if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+		_rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+			cck_rates, path, bw, pwridx, cs);
+
+		start = 0;
+	} else {
+		start = cs;
+	}
+
+	reg_addr = (path == 0) ? RTXAGC_A_CCK11_CCK1 : RTXAGC_B_CCK11_CCK1;
+	reg_addr += start;
+
+	len = cs + os + h1s + h2s + v1s;
+	if (rtlphy->num_total_rfpath >= 2) {
+		_rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+			ht_rates_2t, path, bw, &pwridx[cs+os+h1s], h2s);
+
+		_rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+			vht_rates_2t, path, bw, &pwridx[cs+os+h1s+h2s+v1s], v2s);
+
+		len += v2s;
+	}
+	for (i = start; i < len; i += 4) {
+		power_index = 0;
+		for (j = 0; j < 4; j++)
+			power_index |= (pwridx[i+j] << (j*8));
+		rtl_set_bbreg(hw, reg_addr + i, MASKDWORD, power_index);
+	}
+
+	_rtl8821ae_phy_txpower_training_by_path(hw, rtlphy->current_chan_bw, channel, path);
+}
+#endif
+
+void rtl8821ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 path = 0;
+
+	for (path = RF90_PATH_A; path < rtlphy->num_total_rfpath; ++path )
+		rtl8821ae_phy_set_txpower_level_by_path(hw, channel, path);
+}
+
+static long _rtl8821ae_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+					 enum wireless_mode wirelessmode,
+					 u8 txpwridx)
+{
+	long offset;
+	long pwrout_dbm;
+
+	switch (wirelessmode) {
+	case WIRELESS_MODE_B:
+		offset = -7;
+		break;
+	case WIRELESS_MODE_G:
+	case WIRELESS_MODE_N_24G:
+		offset = -8;
+		break;
+	default:
+		offset = -8;
+		break;
+	}
+	pwrout_dbm = txpwridx / 2 + offset;
+	return pwrout_dbm;
+}
+
+void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	enum io_type iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN;
+
+	if (!is_hal_stop(rtlhal)) {
+		switch (operation) {
+		case SCAN_OPT_BACKUP_BAND0:
+			iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN;
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_IO_CMD,
+						      (u8 *) & iotype);
+
+			break;
+		case SCAN_OPT_BACKUP_BAND1:
+			iotype = IO_CMD_PAUSE_BAND1_DM_BY_SCAN;
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_IO_CMD,
+						      (u8 *) & iotype);
+
+			break;
+		case SCAN_OPT_RESTORE:
+			iotype = IO_CMD_RESUME_DM_BY_SCAN;
+			rtlpriv->cfg->ops->set_hw_reg(hw,
+						      HW_VAR_IO_CMD,
+						      (u8 *) & iotype);
+			break;
+		default:
+			RT_TRACE(COMP_ERR, DBG_EMERG,
+				 ("Unknown Scan Backup operation.\n"));
+			break;
+		}
+	}
+}
+
+static void _rtl8821ae_phy_set_reg_bw(struct rtl_priv * rtlpriv, u8 bw)
+{
+	u16 reg_rf_mode_bw, tmp = 0;
+	reg_rf_mode_bw = rtl_read_word(rtlpriv, REG_TRXPTCL_CTL);
+	switch (bw) {
+	case HT_CHANNEL_WIDTH_20:
+		rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, reg_rf_mode_bw & 0xFE7F);
+		break;
+	case HT_CHANNEL_WIDTH_20_40:
+		tmp = reg_rf_mode_bw | BIT(7);
+		rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFEFF);
+		break;
+	case HT_CHANNEL_WIDTH_80:
+		tmp = reg_rf_mode_bw | BIT(8);
+		rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFF7F);
+		break;
+	default:
+		RT_TRACE(COMP_ERR, DBG_WARNING,("unknown Bandwidth: 0x%x\n",bw));
+		break;
+	}
+}
+
+static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv * rtlpriv)
+{
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_mac *mac = rtl_mac(rtlpriv);
+	u8 sc_set_40 = 0, sc_set_20 =0;
+
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
+		if(mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_LOWER)
+			sc_set_40 = VHT_DATA_SC_40_LOWER_OF_80MHZ;
+		else if(mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_UPPER)
+			sc_set_40 = VHT_DATA_SC_40_UPPER_OF_80MHZ;
+		else
+			RT_TRACE(COMP_ERR, DBG_EMERG,
+				("SCMapping: Not Correct Primary40MHz Setting \n"));
+
+		if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) &&
+			(mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER))
+			sc_set_20 = VHT_DATA_SC_20_LOWEST_OF_80MHZ;
+		else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) &&
+			(mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER))
+			sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+		else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) &&
+			(mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER))
+			sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+		else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) &&
+			(mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER))
+			sc_set_20 = VHT_DATA_SC_20_UPPERST_OF_80MHZ;
+		else
+			RT_TRACE(COMP_ERR, DBG_EMERG,
+				("SCMapping: Not Correct Primary40MHz Setting \n"));
+	} else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+		if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER)
+			sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+		else if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER)
+			sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+		else
+			RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("SCMapping: Not Correct Primary40MHz Setting \n"));
+	}
+	return ((sc_set_40 << 4) | sc_set_20);
+}
+
+void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 sub_chnl = 0;
+	u8 l1pk_val = 0;
+
+	RT_TRACE(COMP_SCAN, DBG_TRACE,
+		 ("Switch to %s bandwidth\n",
+		  (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+		  "20MHz" :
+		  (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40 ?
+		  "40MHz" : "80MHz"))))
+
+
+
+	_rtl8821ae_phy_set_reg_bw(rtlpriv, rtlphy->current_chan_bw);
+	sub_chnl = _rtl8821ae_phy_get_secondary_chnl(rtlpriv);
+	rtl_write_byte(rtlpriv, 0x0483, sub_chnl);
+
+	switch (rtlphy->current_chan_bw) {
+	case HT_CHANNEL_WIDTH_20:
+		rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300200);
+		rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0);
+
+		if(rtlphy->rf_type == RF_2T2R)
+			rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, 7);
+		else
+			rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, 8);
+		break;
+	case HT_CHANNEL_WIDTH_20_40:
+		rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300201);
+		rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0);
+		rtl_set_bbreg(hw, RRFMOD, 0x3C, sub_chnl);
+		rtl_set_bbreg(hw, RCCAONSEC, 0xf0000000, sub_chnl);
+
+		if(rtlphy->reg_837 & BIT(2))
+			l1pk_val = 6;
+		else
+		{
+			if(rtlphy->rf_type == RF_2T2R)
+				l1pk_val = 7;
+			else
+				l1pk_val = 8;
+		}
+		rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, l1pk_val);	// 0x848[25:22] = 0x6
+
+		if(sub_chnl == VHT_DATA_SC_20_UPPER_OF_80MHZ)
+			rtl_set_bbreg(hw, RCCK_SYSTEM, BCCK_SYSTEM, 1);
+		else
+			rtl_set_bbreg(hw, RCCK_SYSTEM, BCCK_SYSTEM, 0);
+		break;
+
+	case HT_CHANNEL_WIDTH_80:
+		rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300202); // 0x8ac[21,20,9:6,1,0]=8'b11100010
+		rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1);			// 0x8c4[30] = 1
+		rtl_set_bbreg(hw, RRFMOD, 0x3C, sub_chnl);
+		rtl_set_bbreg(hw, RCCAONSEC, 0xf0000000, sub_chnl);
+
+		if(rtlphy->reg_837 & BIT(2))
+			l1pk_val = 5;
+		else
+		{
+			if(rtlphy->rf_type == RF_2T2R)
+				l1pk_val = 6;
+			else
+				l1pk_val = 7;
+		}
+		rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, l1pk_val);
+
+		break;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+		break;
+	}
+
+	rtl8812ae_fixspur(hw, rtlphy->current_chan_bw, rtlphy->current_channel);
+
+	rtl8821ae_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+	rtlphy->set_bwmode_inprogress = false;
+
+	RT_TRACE(COMP_SCAN, DBG_LOUD, (" \n"));
+}
+
+void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+			    enum nl80211_channel_type ch_type)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tmp_bw = rtlphy->current_chan_bw;
+
+	if (rtlphy->set_bwmode_inprogress)
+		return;
+	rtlphy->set_bwmode_inprogress = true;
+	if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+		rtl8821ae_phy_set_bw_mode_callback(hw);
+	} else {
+		RT_TRACE(COMP_ERR, DBG_WARNING,
+			 ("FALSE driver sleep or unload\n"));
+		rtlphy->set_bwmode_inprogress = false;
+		rtlphy->current_chan_bw = tmp_bw;
+	}
+}
+
+void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 channel = rtlphy->current_channel;
+	u8 path;
+	u32 data;
+
+	RT_TRACE(COMP_SCAN, DBG_TRACE,
+		 ("switch to channel%d\n", rtlphy->current_channel));
+	if (is_hal_stop(rtlhal))
+		return;
+
+	if (36 <= channel && channel <= 48)
+		data = 0x494;
+	else if (50 <= channel && channel <= 64)
+		data = 0x453;
+	else if (100 <= channel && channel <= 116)
+		data = 0x452;
+	else if (118 <= channel)
+		data = 0x412;
+	else
+		data = 0x96a;
+	rtl_set_bbreg(hw, RFC_AREA, 0x1ffe0000, data);
+
+
+	for(path = RF90_PATH_A; path < rtlphy->num_total_rfpath; path++)
+	{
+		if (36 <= channel && channel <= 64)
+			data = 0x101;
+		else if (100 <= channel && channel <= 140)
+			data = 0x301;
+		else if (140 < channel)
+			data = 0x501;
+		else
+			data = 0x000;
+		rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW,
+			BIT(18)|BIT(17)|BIT(16)|BIT(9)|BIT(8), data);
+
+		rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW,
+			BMASKBYTE0, channel);
+
+		if (channel > 14) {
+			if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+				if (36 <= channel && channel <= 64)
+					data = 0x114E9;
+				else if (100 <= channel && channel <= 140)
+					data = 0x110E9;
+				else
+					data = 0x110E9;
+				rtl8821ae_phy_set_rf_reg(hw, path, RF_APK,
+					BRFREGOFFSETMASK, data);
+			}
+		}
+	}
+	RT_TRACE(COMP_SCAN, DBG_TRACE, ("\n"));
+}
+
+u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u32 timeout = 1000, timecount = 0;
+	u8 channel = rtlphy->current_channel;
+
+	if (rtlphy->sw_chnl_inprogress)
+		return 0;
+	if (rtlphy->set_bwmode_inprogress)
+		return 0;
+
+	if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
+		RT_TRACE(COMP_CHAN, DBG_LOUD,
+			 ("sw_chnl_inprogress false driver sleep or unload\n"));
+		return 0;
+	}
+	while (rtlphy->lck_inprogress && timecount < timeout) {
+		mdelay(50);
+		timecount += 50;
+	}
+
+	if (rtlphy->current_channel > 14 && rtlhal->current_bandtype != BAND_ON_5G)
+		rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_5G);
+	else if (rtlphy->current_channel <= 14 && rtlhal->current_bandtype != BAND_ON_2_4G)
+		rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_2_4G);
+
+	rtlphy->sw_chnl_inprogress = true;
+	if (channel == 0)
+		channel = 1;
+
+	RT_TRACE(COMP_SCAN, DBG_TRACE,
+		 ("switch to channel%d, band type is %d\n", rtlphy->current_channel, rtlhal->current_bandtype));
+
+	rtl8821ae_phy_sw_chnl_callback(hw);
+
+	rtl8821ae_dm_clear_txpower_tracking_state(hw);
+	rtl8821ae_phy_set_txpower_level(hw, rtlphy->current_channel);
+
+	RT_TRACE(COMP_SCAN, DBG_TRACE, ("\n"));
+	rtlphy->sw_chnl_inprogress = false;
+	return 1;
+}
+
+#if 0
+static u8 _rtl8821ae_phy_path_b_iqk(struct ieee80211_hw *hw)
+{
+	u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+	u8 result = 0x00;
+
+	rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
+	rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
+	mdelay(IQK_DELAY_TIME);
+	reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+	reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
+	reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
+	reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
+	reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
+
+	if (!(reg_eac & BIT(31)) &&
+	    (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
+	    (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
+		result |= 0x01;
+	else
+		return result;
+	if (!(reg_eac & BIT(30)) &&
+	    (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
+	    (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
+		result |= 0x02;
+	return result;
+}
+
+static u8 _rtl8821ae_phy_path_a_rx_iqk(struct ieee80211_hw *hw, bool config_pathb)
+{
+	u32 reg_eac, reg_e94, reg_e9c, reg_ea4,u32temp;
+	u8 result = 0x00;
+
+	/*Get TXIMR Setting*/
+	/*Modify RX IQK mode table*/
+	rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000);
+	rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0);
+	rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000);
+	rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f);
+	rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf117b);
+	rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000);
+
+	/*IQK Setting*/
+	rtl_set_bbreg(hw, RTx_IQK, MASKDWORD, 0x01007c00);
+	rtl_set_bbreg(hw, RRx_IQK, MASKDWORD, 0x81004800);
+
+	/*path a IQK setting*/
+	rtl_set_bbreg(hw, RTx_IQK_Tone_A, MASKDWORD, 0x10008c1c);
+	rtl_set_bbreg(hw, RRx_IQK_Tone_A, MASKDWORD, 0x30008c1c);
+	rtl_set_bbreg(hw, RTx_IQK_PI_A, MASKDWORD, 0x82160804);
+	rtl_set_bbreg(hw, RRx_IQK_PI_A, MASKDWORD, 0x28160000);
+
+	/*LO calibration Setting*/
+	rtl_set_bbreg(hw, RIQK_AGC_Rsp, MASKDWORD, 0x0046a911);
+	/*one shot,path A LOK & iqk*/
+	rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf9000000);
+	rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf8000000);
+
+	mdelay(IQK_DELAY_TIME);
+
+	reg_eac = rtl_get_bbreg(hw, RRx_Power_After_IQK_A_2, MASKDWORD);
+	reg_e94 = rtl_get_bbreg(hw, RTx_Power_Before_IQK_A, MASKDWORD);
+	reg_e9c = rtl_get_bbreg(hw, RTx_Power_After_IQK_A, MASKDWORD);
+
+
+	if (!(reg_eac & BIT(28)) &&
+	    (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+	    (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
+		result |= 0x01;
+	else
+		return result;
+
+	u32temp = 0x80007C00 | (reg_e94&0x3FF0000)  | ((reg_e9c&0x3FF0000) >> 16);
+	rtl_set_bbreg(hw, RTx_IQK, MASKDWORD, u32temp);
+	/*RX IQK*/
+	/*Modify RX IQK mode table*/
+	rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000);
+	rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0);
+	rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000);
+	rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f);
+	rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7ffa);
+	rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000);
+
+	/*IQK Setting*/
+	rtl_set_bbreg(hw, RRx_IQK, MASKDWORD, 0x01004800);
+
+	/*path a IQK setting*/
+	rtl_set_bbreg(hw, RTx_IQK_Tone_A, MASKDWORD, 0x30008c1c);
+	rtl_set_bbreg(hw, RRx_IQK_Tone_A, MASKDWORD, 0x10008c1c);
+	rtl_set_bbreg(hw, RTx_IQK_PI_A, MASKDWORD, 0x82160c05);
+	rtl_set_bbreg(hw, RRx_IQK_PI_A, MASKDWORD, 0x28160c05);
+
+	/*LO calibration Setting*/
+	rtl_set_bbreg(hw, RIQK_AGC_Rsp, MASKDWORD, 0x0046a911);
+	/*one shot,path A LOK & iqk*/
+	rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf9000000);
+	rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf8000000);
+
+	mdelay(IQK_DELAY_TIME);
+
+	reg_eac = rtl_get_bbreg(hw, RRx_Power_After_IQK_A_2, MASKDWORD);
+	reg_e94 = rtl_get_bbreg(hw, RTx_Power_Before_IQK_A, MASKDWORD);
+	reg_e9c = rtl_get_bbreg(hw, RTx_Power_After_IQK_A, MASKDWORD);
+	reg_ea4 = rtl_get_bbreg(hw, RRx_Power_Before_IQK_A_2, MASKDWORD);
+
+	if (!(reg_eac & BIT(27)) &&
+	    (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
+	    (((reg_eac & 0x03FF0000) >> 16) != 0x36))
+		result |= 0x02;
+	return result;
+}
+#endif
+
+u8 _rtl8812ae_get_right_chnl_place_for_iqk(u8 chnl)
+{
+	u8 channel_all[TARGET_CHNL_NUM_2G_5G_8812] =
+		{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,38,40,42,\
+		44,46,48,50,52,54,56,58,60,62,64,100,\
+		102,104,106,108,110,112,114,116,118,\
+		120,122,124,126,128,130,132,134,136,\
+		138,140,149,151,153,155,157,159,161,\
+		163,165};
+	u8 place = chnl;
+
+	if(chnl > 14)
+	{
+		for(place = 14; place<sizeof(channel_all); place++)
+		{
+			if(channel_all[place] == chnl)
+			{
+				return place-13;
+			}
+		}
+	}
+
+	return 0;
+}
+
+void _rtl8812ae_iqk_rx_fill_iqc(
+	struct ieee80211_hw *hw,
+	enum radio_path path,
+	u32 rx_x,
+	u32 rx_y
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	switch (path) {
+	case RF90_PATH_A:
+		{
+			rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+			if (rx_x >> 1 ==0x112 || rx_y >> 1 == 0x3ee){
+				rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x100);
+				rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0);
+				RT_TRACE(COMP_IQK, DBG_LOUD,
+					("RX_X = %x;;RX_Y = %x ====>fill to IQC\n",
+					rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+			}
+			else{
+				rtl_set_bbreg(hw, 0xc10, 0x000003ff, rx_x >> 1);
+				rtl_set_bbreg(hw, 0xc10, 0x03ff0000, rx_y >> 1);
+				RT_TRACE(COMP_IQK, DBG_LOUD,
+					("RX_X = %x;;RX_Y = %x ====>fill to IQC\n",
+					rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+				RT_TRACE(COMP_IQK, DBG_LOUD,
+					("0xc10 = %x ====>fill to IQC\n",
+					rtl_read_dword(rtlpriv, 0xc10)));
+			}
+		}
+		break;
+	case RF90_PATH_B:
+		{
+			rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+			if (rx_x >> 1 ==0x112 || rx_y >> 1 == 0x3ee){
+				rtl_set_bbreg(hw, 0xe10, 0x000003ff, 0x100);
+				rtl_set_bbreg(hw, 0xe10, 0x03ff0000, 0);
+				RT_TRACE(COMP_IQK, DBG_LOUD,
+					("RX_X = %x;;RX_Y = %x ====>fill to IQC\n",
+					rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+			}
+			else{
+				rtl_set_bbreg(hw, 0xe10, 0x000003ff, rx_x >> 1);
+				rtl_set_bbreg(hw, 0xe10, 0x03ff0000, rx_y >> 1);
+				RT_TRACE(COMP_IQK, DBG_LOUD,
+					("RX_X = %x;;RX_Y = %x====>fill to IQC\n ",
+					rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+				RT_TRACE(COMP_IQK, DBG_LOUD,
+					("0xe10 = %x====>fill to IQC\n",
+					rtl_read_dword(rtlpriv, 0xe10)));
+			}
+		}
+		break;
+	default:
+		break;
+	};
+}
+
+void _rtl8812ae_iqk_tx_fill_iqc(
+	struct ieee80211_hw *hw,
+	enum radio_path path,
+	u32 tx_x,
+	u32 tx_y
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	switch (path) {
+	case RF90_PATH_A:
+		{
+			rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+			rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+			rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+			rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+			rtl_set_bbreg(hw, 0xccc, 0x000007ff, tx_y);
+			rtl_set_bbreg(hw, 0xcd4, 0x000007ff, tx_x);
+			RT_TRACE(COMP_IQK, DBG_LOUD,
+				("TX_X = %x;;TX_Y = %x =====> fill to IQC\n",
+				tx_x & 0x000007ff, tx_y & 0x000007ff));
+			RT_TRACE(COMP_IQK, DBG_LOUD,
+				("0xcd4 = %x;;0xccc = %x ====>fill to IQC\n",
+				rtl_get_bbreg(hw, 0xcd4, 0x000007ff),
+				rtl_get_bbreg(hw, 0xccc, 0x000007ff)));
+		}
+		break;
+	case RF90_PATH_B:
+		{
+			rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+			rtl_write_dword(rtlpriv, 0xe90, 0x00000080);
+			rtl_write_dword(rtlpriv, 0xec4, 0x20040000);
+			rtl_write_dword(rtlpriv, 0xec8, 0x20000000);
+			rtl_set_bbreg(hw, 0xecc, 0x000007ff, tx_y);
+			rtl_set_bbreg(hw, 0xed4, 0x000007ff, tx_x);
+			RT_TRACE(COMP_IQK, DBG_LOUD,
+				("TX_X = %x;;TX_Y = %x =====> fill to IQC\n",
+				tx_x&0x000007ff, tx_y&0x000007ff));
+			RT_TRACE(COMP_IQK, DBG_LOUD,
+				("0xed4 = %x;;0xecc = %x ====>fill to IQC\n",
+				rtl_get_bbreg(hw, 0xed4, 0x000007ff),
+				rtl_get_bbreg(hw, 0xecc, 0x000007ff)));
+		}
+		break;
+	default:
+		break;
+	};
+}
+
+void _rtl8812ae_iqk_backup_macbb(
+	struct ieee80211_hw *hw,
+	u32 *macbb_backup,
+	u32 *backup_macbb_reg,
+	u32 mac_bb_num
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+	 /*save MACBB default value*/
+	for (i = 0; i < mac_bb_num; i++) {
+		macbb_backup[i] =rtl_read_dword(rtlpriv,backup_macbb_reg[i]);
+	}
+
+	RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupMacBB Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_backup_afe(
+	struct ieee80211_hw *hw,
+	u32 *afe_backup,
+	u32 *backup_afe_REG,
+	u32 afe_num
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+	/*Save AFE Parameters */
+    	for (i = 0; i < afe_num; i++){
+        	afe_backup[i] = rtl_read_dword(rtlpriv, backup_afe_REG[i]);
+    	}
+    	RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupAFE Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_backup_rf(
+	struct ieee80211_hw *hw,
+	u32 *rfa_backup,
+	u32 *rfb_backup,
+	u32 *backup_rf_reg,
+	u32 rf_num
+	)
+{
+
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+	/*Save RF Parameters*/
+    	for (i = 0; i < rf_num; i++){
+        	rfa_backup[i] = rtl_get_rfreg(hw, RF90_PATH_A, backup_rf_reg[i], BMASKDWORD);
+        	rfb_backup[i] = rtl_get_rfreg(hw, RF90_PATH_B, backup_rf_reg[i], BMASKDWORD);
+    	}
+	RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupRF Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_configure_mac(
+	struct ieee80211_hw *hw
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	/* ========MAC register setting========*/
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+	rtl_write_byte(rtlpriv, 0x522, 0x3f);
+	rtl_set_bbreg(hw, 0x550, BIT(11) | BIT(3), 0x0);
+	rtl_write_byte(rtlpriv, 0x808, 0x00);		/*RX ante off*/
+	rtl_set_bbreg(hw, 0x838, 0xf, 0xc);		/*CCA off*/
+}
+
+#define cal_num 10
+
+void _rtl8812ae_iqk_tx(
+	struct ieee80211_hw *hw,
+	u8 chnl_idx
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	u8 delay_count;
+	u8 cal0_retry, cal1_retry;
+	u8 tx0_average = 0, tx1_average = 0, rx0_average = 0, rx1_average = 0;
+    	int tx0_x = 0, tx0_y = 0, rx0_x = 0, rx0_y = 0;
+	int tx_x0[cal_num], tx_y0[cal_num], rx_x0[cal_num], rx_y0[cal_num];
+	int tx1_x = 0, tx1_y = 0, rx1_x = 0, rx1_y = 0;
+	int tx_x1[cal_num], tx_y1[cal_num], rx_x1[cal_num], rx_y1[cal_num];
+	bool tx0iqkok= false, rx0iqkok = false, tx0_fail = true, rx0_fail;
+	bool iqk0_ready = false, tx0_finish = false, rx0_finish = false;
+	bool tx1iqkok = false, rx1iqkok = false, tx1_fail = true, rx1_fail;
+	bool iqk1_ready = false, tx1_finish = false, rx1_finish = false, vdf_enable = false;
+	int i, tx_dt[3] = {0}, rx_dt[3] = {0}, ii, dx = 0, dy = 0;
+
+	RT_TRACE(COMP_IQK, DBG_LOUD,
+			("BandWidth = %d.\n",
+			rtlphy->current_chan_bw));
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80){
+		vdf_enable = true;
+	}
+	vdf_enable = false;
+
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+	/*========Path-A AFE all on========*/
+	/*Port 0 DAC/ADC on*/
+	rtl_write_dword(rtlpriv, 0xc60, 0x77777777);
+	rtl_write_dword(rtlpriv, 0xc64, 0x77777777);
+
+	/* Port 1 DAC/ADC off*/
+	rtl_write_dword(rtlpriv, 0xe60, 0x77777777);
+	rtl_write_dword(rtlpriv, 0xe64, 0x77777777);
+
+	rtl_write_dword(rtlpriv, 0xc68, 0x19791979);
+	rtl_write_dword(rtlpriv, 0xe68, 0x19791979);
+	rtl_set_bbreg(hw,0xc00, 0xf, 0x4);/*hardware 3-wire off*/
+	rtl_set_bbreg(hw,0xe00, 0xf, 0x4);/*hardware 3-wire off*/
+
+	/*DAC/ADC sampling rate (160 MHz)*/
+	rtl_set_bbreg(hw, 0xc5c, BIT(26) | BIT(25) | BIT(24), 0x7);
+	rtl_set_bbreg(hw, 0xe5c, BIT(26) | BIT(25) | BIT(24), 0x7);
+	rtl_set_bbreg(hw, 0x8c4, BIT(30), 0x1);
+
+        /*====== Path A TX IQK RF Setting ======*/
+	rtl_set_bbreg(hw,0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+	rtl_set_rfreg(hw,RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x80002);
+	rtl_set_rfreg(hw,RF90_PATH_A, 0x30, BRFREGOFFSETMASK, 0x20000);
+	rtl_set_rfreg(hw,RF90_PATH_A, 0x31, BRFREGOFFSETMASK, 0x3fffd);
+	rtl_set_rfreg(hw,RF90_PATH_A, 0x32, BRFREGOFFSETMASK, 0xfe83f);
+	rtl_set_rfreg(hw,RF90_PATH_A, 0x65, BRFREGOFFSETMASK, 0x931d5);
+	rtl_set_rfreg(hw,RF90_PATH_A, 0x8f, BRFREGOFFSETMASK, 0x8a001);
+	/*====== Path A TX IQK RF Setting ======*/
+	rtl_set_rfreg(hw,RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x80002);
+	rtl_set_rfreg(hw,RF90_PATH_B, 0x30, BRFREGOFFSETMASK, 0x20000);
+	rtl_set_rfreg(hw,RF90_PATH_B, 0x31, BRFREGOFFSETMASK, 0x3fffd);
+	rtl_set_rfreg(hw,RF90_PATH_B, 0x32, BRFREGOFFSETMASK, 0xfe83f);
+	rtl_set_rfreg(hw,RF90_PATH_B, 0x65, BRFREGOFFSETMASK, 0x931d5);
+	rtl_set_rfreg(hw,RF90_PATH_B, 0x8f, BRFREGOFFSETMASK, 0x8a001);
+	rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+	rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+	rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1);
+	rtl_set_bbreg(hw, 0xe94, BIT(0), 0x1);
+	rtl_write_dword(rtlpriv, 0x978, 0x29002000);/* TX (X,Y)*/
+	rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);/* RX (X,Y)*/
+	rtl_write_dword(rtlpriv, 0x984, 0x00462910);/*[0]:AGC_en, [15]:idac_K_Mask*/
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/
+
+	/*ExternalPA_5G == 0*/
+	rtl_write_dword(rtlpriv, 0xc88, 0x821403f1);
+	rtl_write_dword(rtlpriv, 0xe88, 0x821403f1);
+
+	if (rtlhal->current_bandtype){
+		rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96);
+		rtl_write_dword(rtlpriv, 0xe8c, 0x68163e96);
+	}
+	else{
+		rtl_write_dword(rtlpriv, 0xc8c, 0x28163e96);
+		rtl_write_dword(rtlpriv, 0xe8c, 0x28163e96);
+	}
+
+	if (vdf_enable){}
+	else{
+		rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+		rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+		rtl_write_dword(rtlpriv, 0xce8, 0x00000000);
+		rtl_write_dword(rtlpriv, 0xe80, 0x18008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+		rtl_write_dword(rtlpriv, 0xe84, 0x38008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+		rtl_write_dword(rtlpriv, 0xee8, 0x00000000);
+
+		cal0_retry = 0;
+		cal1_retry = 0;
+		while(1){
+			/*one shot*/
+			rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);/* cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+			rtl_write_dword(rtlpriv, 0xeb8, 0x00100000);/* cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+			rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+			rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+			mdelay(10); /*Delay 25ms*/
+			rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+			rtl_write_dword(rtlpriv, 0xeb8, 0x00000000);
+			delay_count = 0;
+			while (1){
+				if (!tx0_finish)
+					iqk0_ready = (bool) rtl_get_bbreg(hw, 0xd00, BIT(10));
+				if (!tx1_finish)
+					iqk1_ready = (bool) rtl_get_bbreg(hw, 0xd40, BIT(10));
+				if ((iqk0_ready && iqk1_ready) || (delay_count>20))
+					break;
+				else{
+				mdelay(1);
+				delay_count++;
+				}
+			}
+			RT_TRACE(COMP_IQK, DBG_LOUD, ("TX delay_count = %d\n", delay_count));
+			if (delay_count < 20){							// If 20ms No Result, then cal_retry++
+				/* ============TXIQK Check==============*/
+				tx0_fail = (bool) rtl_get_bbreg(hw, 0xd00, BIT(12));
+				tx1_fail = (bool) rtl_get_bbreg(hw, 0xd40, BIT(12));
+				if (!(tx0_fail || tx0_finish)){
+					rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+					tx_x0[tx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+					rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+					tx_y0[tx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+					tx0iqkok = true;
+					RT_TRACE(COMP_IQK, DBG_LOUD,
+						("TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n",
+						tx0_average, (tx_x0[tx0_average]) >> 21 & 0x000007ff,
+						tx0_average, (tx_y0[tx0_average]) >> 21 & 0x000007ff));
+
+					tx0_average++;
+			}
+			else{
+				tx0iqkok = false;
+				cal0_retry++;
+				if (cal0_retry == 10)
+					break;
+				}
+			if (!(tx1_fail || tx1_finish)){
+				rtl_write_dword(rtlpriv, 0xeb8, 0x02000000);
+				tx_x1[tx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+				rtl_write_dword(rtlpriv, 0xeb8, 0x04000000);
+				tx_y1[tx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+				tx1iqkok= true;
+				RT_TRACE(COMP_IQK, DBG_LOUD,
+					("TX_X1[%d] = %x ;; TX_Y1[%d] = %x\n",
+					tx1_average, (tx_x1[tx1_average]) >> 21 & 0x000007ff,
+					tx1_average, (tx_y1[tx1_average]) >> 21 & 0x000007ff));
+
+				tx1_average++;
+				}
+			else{
+				tx1iqkok = false;
+				cal1_retry++;
+				if (cal1_retry == 10)
+					break;
+				}
+			}
+			else{
+				tx0iqkok = false;
+				tx1iqkok = false;
+				cal0_retry++;
+				cal1_retry++;
+				RT_TRACE(COMP_IQK, DBG_LOUD,
+					("Delay 20ms TX IQK Not Ready!!!!!\n"));
+				if (cal0_retry == 10)
+					break;
+			}
+			if (tx0_average >= 2){
+				for (i = 0; i < tx0_average; i++){
+					for (ii = i+1; ii <tx0_average; ii++){
+						dx = (tx_x0[i] >> 21) - (tx_x0[ii] >> 21);
+						if (dx < 4 && dx > -4){
+							dy = (tx_y0[i]>>21) - (tx_y0[ii]>>21);
+							if (dy < 4 && dy > -4){
+								tx0_x = ((tx_x0[i] >> 21) + (tx_x0[ii] >> 21)) / 2;
+								tx0_y = ((tx_y0[i] >> 21) + (tx_y0[ii] >> 21)) / 2;
+								tx_x0[0] = tx_x0[i];
+								tx_y0[1] = tx_y0[ii];
+								RT_TRACE(COMP_IQK, DBG_LOUD,
+									("TX0_X = %x;;TX0_Y = %x\n",
+									tx0_x & 0x000007ff, tx0_y & 0x000007ff));
+								if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+									&& vdf_enable) {
+									tx_dt[0] = (tx_dt[i] + tx_dt[ii]) / 2;
+								}
+								tx0_finish = true;
+							}
+						}
+					}
+				}
+			}
+			if (tx1_average >= 2){
+				for (i = 0; i < tx1_average; i++){
+					for (ii = i+1; ii < tx1_average; ii++){
+						dx = (tx_x1[i] >> 21) - (tx_x1[ii] >> 21);
+						if (dx < 4 && dx > -4){
+							dy = (tx_y1[i] >> 21) - (tx_y1[ii] >> 21);
+							if (dy < 4 && dy > -4){
+								tx1_x = ((tx_x1[i] >> 21) + (tx_x1[ii] >> 21)) / 2;
+								tx1_y = ((tx_y1[i] >> 21) + (tx_y1[ii] >> 21)) / 2;
+								tx_x1[0] = tx_x1[i];
+								tx_y1[1] = tx_y1[ii];
+								RT_TRACE(COMP_IQK, DBG_LOUD,
+									("TX1_X = %x;;TX1_Y = %x\n",
+									tx1_x & 0x000007ff, tx1_y & 0x000007ff));
+								if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+									&& vdf_enable) {
+									tx_dt[0] = (tx_dt[i] +  tx_dt[ii]) / 2;
+								}
+								tx1_finish = true;
+							}
+						}
+					}
+				}
+			}
+			RT_TRACE(COMP_IQK, DBG_LOUD,
+				("TX0_Average = %d, TX1_Average = %d\n",
+				tx0_average, tx1_average));
+			RT_TRACE(COMP_IQK, DBG_LOUD,
+				("TX0_finish = %d, TX1_finish = %d\n",
+				tx0_finish, tx1_finish));
+			if (tx0_finish && tx1_finish)
+				break;
+			if ((cal0_retry + tx0_average) >= 10
+				|| (cal1_retry + tx1_average) >= 10 )
+				break;
+		}
+		RT_TRACE(COMP_IQK, DBG_LOUD,
+			("TXA_cal_retry = %d\n", cal0_retry));
+		RT_TRACE(COMP_IQK, DBG_LOUD,
+			("TXB_cal_retry = %d\n", cal1_retry));
+
+	}
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+	rtl_set_rfreg(hw, RF90_PATH_A, 0x58, 0x7fe00,
+		rtl_get_rfreg(hw, RF90_PATH_A, 0x8, 0xffc00)); /*Load LOK*/
+	rtl_set_rfreg(hw, RF90_PATH_B, 0x58, 0x7fe00,
+		rtl_get_rfreg(hw, RF90_PATH_B, 0x8, 0xffc00)); /* Load LOK*/
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+
+
+	if (vdf_enable) {}
+	else{
+		rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+		if (tx0_finish) {
+			/*====== Path A RX IQK RF Setting======*/
+			rtl_set_rfreg(hw, RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x80000);
+			rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x3);     /* BW 20M*/
+			rtl_set_rfreg(hw, RF90_PATH_A, 0x30, BRFREGOFFSETMASK, 0x30000);
+			rtl_set_rfreg(hw, RF90_PATH_A, 0x31, BRFREGOFFSETMASK, 0x3f7ff);
+			rtl_set_rfreg(hw, RF90_PATH_A, 0x32, BRFREGOFFSETMASK, 0xfe7bf);
+			rtl_set_rfreg(hw, RF90_PATH_A, 0x8f, BRFREGOFFSETMASK, 0x88001);
+			rtl_set_rfreg(hw, RF90_PATH_A, 0x65, BRFREGOFFSETMASK, 0x931d6);
+			rtl_set_rfreg(hw, RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x00000);
+		}
+		if (tx1_finish){
+			/*====== Path B RX IQK RF Setting======*/
+			rtl_set_rfreg(hw, RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x80000);
+			rtl_set_rfreg(hw, RF90_PATH_B, 0x30, BRFREGOFFSETMASK, 0x30000);
+			rtl_set_rfreg(hw, RF90_PATH_B, 0x31, BRFREGOFFSETMASK, 0x3f7ff);
+			rtl_set_rfreg(hw, RF90_PATH_B, 0x32, BRFREGOFFSETMASK, 0xfe7bf);
+			rtl_set_rfreg(hw, RF90_PATH_B, 0x8f, BRFREGOFFSETMASK, 0x88001);
+			rtl_set_rfreg(hw, RF90_PATH_B, 0x65, BRFREGOFFSETMASK, 0x931d1);
+			rtl_set_rfreg(hw, RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x00000);
+		}
+		rtl_set_bbreg(hw, 0x978, BIT(31), 0x1);
+		rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0);
+		rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+		rtl_write_dword(rtlpriv, 0x984, 0x0046a890);
+
+		rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+		if (tx0_finish) {
+			rtl_write_dword(rtlpriv, 0xc80, 0x38008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+			rtl_write_dword(rtlpriv, 0xc84, 0x18008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+			rtl_write_dword(rtlpriv, 0xc88, 0x02140119);
+			rtl_write_dword(rtlpriv, 0xc8c, 0x28160cc0);
+		}
+		if (tx1_finish){
+			rtl_write_dword(rtlpriv, 0xe80, 0x38008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+			rtl_write_dword(rtlpriv, 0xe84, 0x18008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+			rtl_write_dword(rtlpriv, 0xe88, 0x02140119);
+			rtl_write_dword(rtlpriv, 0xe8c, 0x28160ca0);
+		}
+              cal0_retry = 0;
+		cal1_retry = 0;
+		while(1){
+		    /* one shot*/
+		    	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+		    	if (tx0_finish){
+				rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0[rx0_average % 2]) >> 21 & 0x000007ff);
+				rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0[rx0_average % 2]) >> 21 & 0x000007ff);
+				rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/
+				rtl_write_dword(rtlpriv, 0xcb8, 0x00300000);/*cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+				rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);
+				mdelay(5); /*Delay 10ms*/
+		    	}
+			if (tx1_finish){
+				rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+				rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x1[rx1_average % 2]) >> 21 & 0x000007ff);
+				rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y1[rx1_average % 2]) >> 21 & 0x000007ff);
+				rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+				rtl_write_dword(rtlpriv, 0xeb8, 0x00300000);/*cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+				rtl_write_dword(rtlpriv, 0xeb8, 0x00100000);/* cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+			}
+			mdelay(10); /*Delay 10ms*/
+			rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+			rtl_write_dword(rtlpriv, 0xeb8, 0x00000000);
+			delay_count = 0;
+			while (1){
+				if (!rx0_finish && tx0_finish)
+					iqk0_ready = (bool) rtl_get_bbreg(hw, 0xd00, BIT(10));
+				if (!rx1_finish && tx1_finish)
+					iqk1_ready = (bool) rtl_get_bbreg(hw, 0xd40, BIT(10));
+				if ((iqk0_ready && iqk1_ready)||(delay_count>20))
+					break;
+				else{
+					mdelay(1);
+					delay_count++;
+				}
+			}
+			RT_TRACE(COMP_IQK, DBG_LOUD,
+				("RX delay_count = %d\n", delay_count));
+			if (delay_count < 20){	// If 20ms No Result, then cal_retry++
+				// ============RXIQK Check==============
+				rx0_fail = (bool) rtl_get_bbreg(hw, 0xd00, BIT(11));
+				rx1_fail = (bool) rtl_get_bbreg(hw, 0xd40, BIT(11));
+				if (!(rx0_fail || rx0_finish) && tx0_finish){
+					rtl_write_dword(rtlpriv, 0xcb8, 0x06000000);
+					rx_x0[rx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+					rtl_write_dword(rtlpriv, 0xcb8, 0x08000000);
+					rx_y0[rx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+					rx0iqkok= true;
+					RT_TRACE(COMP_IQK, DBG_LOUD,
+						("RX_X0[%d] = %x ;; RX_Y0[%d] = %x\n",
+						rx0_average, (rx_x0[rx0_average]) >> 21 & 0x000007ff,
+						rx0_average, (rx_y0[rx0_average]) >> 21 & 0x000007ff));
+
+					rx0_average++;
+				}
+				else{
+					RT_TRACE(COMP_IQK, DBG_LOUD,
+						("1. RXA_cal_retry = %d\n", cal0_retry));
+					rx0iqkok = false;
+					cal0_retry++;
+					if (cal0_retry == 10)
+					break;
+				}
+				if (!(rx1_fail || rx1_finish) && tx1_finish){
+                            		rtl_write_dword(rtlpriv, 0xeb8, 0x06000000);
+                            		rx_x1[rx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+                            		rtl_write_dword(rtlpriv, 0xeb8, 0x08000000);
+                            		rx_y1[rx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+                            		rx1iqkok = true;
+					RT_TRACE(COMP_IQK, DBG_LOUD,
+						("RX_X1[%d] = %x ;; RX_Y1[%d] = %x\n",
+						rx1_average, (rx_x1[rx1_average]) >> 21 & 0x000007ff,
+						rx1_average, (rx_y1[rx1_average]) >> 21 & 0x000007ff));
+
+					rx1_average++;
+                        	}
+                        	else{
+                            		rx1iqkok= false;
+                            		cal1_retry++;
+                            		if (cal1_retry == 10)
+                                		break;
+                    		}
+
+			}
+			else{
+				RT_TRACE(COMP_IQK, DBG_LOUD,
+					("2. RXA_cal_retry = %d\n", cal0_retry));
+				rx0iqkok = false;
+				rx1iqkok = false;
+				cal0_retry++;
+				cal1_retry++;
+				RT_TRACE(COMP_IQK, DBG_LOUD,
+					("Delay 20ms RX IQK Not Ready!!!!!\n"));
+			    if (cal0_retry == 10)
+			        break;
+			}
+			RT_TRACE(COMP_IQK, DBG_LOUD,
+				("3. RXA_cal_retry = %d\n", cal0_retry));
+			if (rx0_average >= 2){
+				for (i = 0; i < rx0_average; i++){
+					for (ii = i+1; ii < rx0_average; ii++){
+					dx = (rx_x0[i] >> 21) - (rx_x0[ii] >> 21);
+						if (dx < 4 && dx > -4){
+						dy = (rx_y0[i] >> 21) - (rx_y0[ii] >> 21);
+							if (dy < 4 && dy > -4){
+								rx0_x = ((rx_x0[i]>>21) + (rx_x0[ii] >> 21)) / 2;
+								rx0_y = ((rx_y0[i]>>21) + (rx_y0[ii] >> 21)) / 2;
+								if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+									&& vdf_enable) {
+									rx_dt[0] = (rx_dt[i] + rx_dt[ii]) / 2;
+								}
+								rx0_finish = true;
+								break;
+							}
+						}
+					}
+				}
+			}
+			if (rx1_average >= 2){
+				for (i = 0; i < rx1_average; i++){
+					for (ii = i+1; ii < rx1_average; ii++){
+					dx = (rx_x1[i] >> 21) - (rx_x1[ii] >> 21);
+						if (dx < 4 && dx > -4){
+						dy = (rx_y1[i] >> 21) - (rx_y1[ii] >> 21);
+							if (dy < 4 && dy > -4){
+								rx1_x = ((rx_x1[i] >> 21) + (rx_x1[ii] >> 21)) / 2;
+								rx1_y = ((rx_y1[i] >> 21) + (rx_y1[ii] >> 21)) / 2;
+								if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+									&& vdf_enable) {
+									rx_dt[0] = (rx_dt[i] + rx_dt[ii]) / 2;
+								}
+								rx1_finish = true;
+								break;
+							}
+						}
+					}
+				}
+			}
+			RT_TRACE(COMP_IQK, DBG_LOUD,
+				("RX0_Average = %d, RX1_Average = %d\n",
+				rx0_average, rx1_average));
+			RT_TRACE(COMP_IQK, DBG_LOUD,
+				("RX0_finish = %d, RX1_finish = %d\n",
+				rx0_finish, rx1_finish));
+			if ((rx0_finish|| !tx0_finish) && (rx1_finish || !tx1_finish) )
+				break;
+			if ((cal0_retry + rx0_average) >= 10
+				|| (cal1_retry + rx1_average) >= 10
+				|| rx0_average == 3
+				|| rx1_average == 3)
+				break;
+		}
+		RT_TRACE(COMP_IQK, DBG_LOUD,
+			("RXA_cal_retry = %d\n", cal0_retry));
+		RT_TRACE(COMP_IQK, DBG_LOUD,
+			("RXB_cal_retry = %d\n", cal1_retry));
+	}
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+	switch (rtlphy->current_chan_bw)
+	{
+	case HT_CHANNEL_WIDTH_20_40:
+		{
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x1);
+		}
+		break;
+	case HT_CHANNEL_WIDTH_80:
+		{
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x0);
+		}
+		break;
+	default:
+		break;
+
+	}
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/
+	/*FillIQK Result*/
+	RT_TRACE(COMP_IQK, DBG_LOUD,
+		("========Path_A =======\n"));
+
+	if (tx0_finish){
+		_rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_A, tx0_x, tx0_y);
+	}
+	else{
+		_rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_A, 0x200, 0x0);
+	}
+
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+		|| vdf_enable){
+		rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 0 --> Page C*/
+		rtl_set_bbreg(hw, 0xce8, 0x3fff0000, tx_dt[0] & 0x00003fff);
+		rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+	}
+
+	if (rx0_finish == 1){
+		_rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_A, rx0_x, rx0_y);
+	}
+	else{
+		_rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_A, 0x200, 0x0);
+	}
+
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+		|| vdf_enable){
+		rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 0 --> Page C*/
+		rtl_set_bbreg(hw, 0xce8, 0x00003fff, rx_dt[0] & 0x00003fff);
+		rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+	}
+
+	RT_TRACE(COMP_IQK, DBG_LOUD,
+		("========Path_B =======\n"));
+
+	if (tx1_finish){
+		_rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_B, tx1_x, tx1_y);
+	}
+	else{
+		_rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_B, 0x200, 0x0);
+	}
+
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+		|| vdf_enable){
+		rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/
+		rtl_set_bbreg(hw, 0xee8, 0x3fff0000, tx_dt[0] & 0x00003fff);
+		rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+	}
+
+	if (rx1_finish == 1){
+		_rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_B, rx1_x, rx1_y);
+	}
+	else{
+		_rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_B, 0x200, 0x0);
+	}
+
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+		|| vdf_enable){
+		rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/
+		rtl_set_bbreg(hw, 0xee8, 0x00003fff, rx_dt[0] & 0x00003fff);
+		rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+	}
+}
+
+void _rtl8812ae_iqk_restore_rf(
+	struct ieee80211_hw *hw,
+	enum radio_path path,
+	u32 *backup_rf_reg,
+	u32 *rf_backup,
+	u32 rf_reg_num
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+    	for (i = 0; i < rf_reg_num; i++)
+        	rtl_set_rfreg(hw, path, backup_rf_reg[i], BRFREGOFFSETMASK, rf_backup[i]);
+
+	rtl_set_rfreg(hw, path, 0xef, BRFREGOFFSETMASK, 0x0);
+
+	switch(path){
+	case RF90_PATH_A:
+       {
+       	RT_TRACE(COMP_IQK, DBG_LOUD,
+			("RestoreRF Path A Success!!!!\n"));
+       }
+		break;
+	case RF90_PATH_B:
+       {
+       	RT_TRACE(COMP_IQK, DBG_LOUD,
+			("RestoreRF Path B Success!!!!\n"));
+       }
+		break;
+	default:
+		break;
+	}
+}
+
+void _rtl8812ae_iqk_restore_afe(
+	struct ieee80211_hw *hw,
+	u32 *afe_backup,
+	u32 *backup_afe_reg,
+	u32 afe_num
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+	/*Reload AFE Parameters */
+    	for (i = 0; i < afe_num; i++){
+        	rtl_write_dword(rtlpriv, backup_afe_reg[i], afe_backup[i]);
+    	}
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/
+	rtl_write_dword(rtlpriv, 0xc80, 0x0);
+	rtl_write_dword(rtlpriv, 0xc84, 0x0);
+	rtl_write_dword(rtlpriv, 0xc88, 0x0);
+	rtl_write_dword(rtlpriv, 0xc8c, 0x3c000000);
+	rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+	rtl_write_dword(rtlpriv, 0xc94, 0x00000000);
+	rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+	rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+	rtl_write_dword(rtlpriv, 0xcb8, 0x0);
+	rtl_write_dword(rtlpriv, 0xe80, 0x0);
+	rtl_write_dword(rtlpriv, 0xe84, 0x0);
+	rtl_write_dword(rtlpriv, 0xe88, 0x0);
+	rtl_write_dword(rtlpriv, 0xe8c, 0x3c000000);
+	rtl_write_dword(rtlpriv, 0xe90, 0x00000080);
+	rtl_write_dword(rtlpriv, 0xe94, 0x00000000);
+	rtl_write_dword(rtlpriv, 0xec4, 0x20040000);
+	rtl_write_dword(rtlpriv, 0xec8, 0x20000000);
+	rtl_write_dword(rtlpriv, 0xeb8, 0x0);
+	RT_TRACE(COMP_IQK, DBG_LOUD,
+		("RestoreAFE Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_restore_macbb(
+	struct ieee80211_hw *hw,
+	u32 *macbb_backup,
+	u32 *backup_macbb_reg,
+	u32 macbb_num
+	)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+	//Reload MacBB Parameters
+    	for (i = 0; i < macbb_num; i++){
+        	rtl_write_dword(rtlpriv, backup_macbb_reg[i], macbb_backup[i]);
+    	}
+    	RT_TRACE(COMP_IQK, DBG_LOUD,
+			("RestoreMacBB Success!!!!\n"));
+}
+
+#define MACBB_REG_NUM 10
+#define AFE_REG_NUM 14
+#define RF_REG_NUM 3
+
+static void _rtl8812ae_phy_iq_calibrate(
+		struct ieee80211_hw *hw,
+		u8 channel)
+{
+	u32	macbb_backup[MACBB_REG_NUM];
+	u32 afe_backup[AFE_REG_NUM];
+	u32 rfa_backup[RF_REG_NUM];
+	u32 rfb_backup[RF_REG_NUM];
+	u32 	backup_macbb_reg[MACBB_REG_NUM] = {0xb00, 0x520, 0x550,
+											0x808, 0x90c, 0xc00, 0xe00,
+											0x8c4,0x838,  0x82c};
+	u32	backup_afe_reg[AFE_REG_NUM] = {0xc5c, 0xc60, 0xc64, 0xc68,
+										0xcb8, 0xcb0, 0xcb4,0xe5c,
+										0xe60, 0xe64, 0xe68, 0xeb8,
+										0xeb0, 0xeb4};
+	u32 	backup_rf_reg[RF_REG_NUM] = {0x65, 0x8f, 0x0};
+	u8 	chnl_idx = _rtl8812ae_get_right_chnl_place_for_iqk(channel);
+
+	_rtl8812ae_iqk_backup_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+	_rtl8812ae_iqk_backup_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+	_rtl8812ae_iqk_backup_rf(hw, rfa_backup, rfb_backup, backup_rf_reg, RF_REG_NUM);
+
+	_rtl8812ae_iqk_configure_mac(hw);
+	_rtl8812ae_iqk_tx(hw, chnl_idx);
+	_rtl8812ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfa_backup, RF_REG_NUM);
+	_rtl8812ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfb_backup, RF_REG_NUM); // PATH_A ?
+
+	_rtl8812ae_iqk_restore_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+	_rtl8812ae_iqk_restore_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+}
+
+
+void _rtl8821ae_iqk_backup_macbb(
+		struct ieee80211_hw *hw,
+		u32 *macbb_backup,
+		u32 *backup_macbb_reg,
+		u32 mac_bb_num
+		)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+	/*save MACBB default value*/
+	for (i = 0; i < mac_bb_num; i++) {
+		macbb_backup[i] =rtl_read_dword(rtlpriv,backup_macbb_reg[i]);
+	}
+
+	RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupMacBB Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_backup_afe(
+		struct ieee80211_hw *hw,
+		u32 *afe_backup,
+		u32 *backup_afe_REG,
+		u32 afe_num
+		)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+	/*Save AFE Parameters */
+	for (i = 0; i < afe_num; i++){
+		afe_backup[i] = rtl_read_dword(rtlpriv, backup_afe_REG[i]);
+	}
+	RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupAFE Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_backup_rf(
+		struct ieee80211_hw *hw,
+		u32 *rfa_backup,
+		u32 *rfb_backup,
+		u32 *backup_rf_reg,
+		u32 rf_num
+		)
+{
+
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 i;
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+	/*Save RF Parameters*/
+	for (i = 0; i < rf_num; i++){
+		rfa_backup[i] = rtl_get_rfreg(hw, RF90_PATH_A, backup_rf_reg[i], BMASKDWORD);
+		rfb_backup[i] = rtl_get_rfreg(hw, RF90_PATH_B, backup_rf_reg[i], BMASKDWORD);
+	}
+	RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupRF Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_configure_mac(
+		struct ieee80211_hw *hw
+		)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	/* ========MAC register setting========*/
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+	rtl_write_byte(rtlpriv, 0x522, 0x3f);
+	rtl_set_bbreg(hw, 0x550, BIT(11) | BIT(3), 0x0);
+	rtl_write_byte(rtlpriv, 0x808, 0x00);		/*RX ante off*/
+	rtl_set_bbreg(hw, 0x838, 0xf, 0xc);		/*CCA off*/
+}
+
+
+void _rtl8821ae_iqk_tx_fill_iqc(
+		struct ieee80211_hw *hw,
+		enum radio_path path,
+		u32 tx_x,
+		u32 tx_y
+		)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	switch (path) {
+		case RF90_PATH_A:
+			{
+				rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+				rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+				rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+				rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+				rtl_set_bbreg(hw, 0xccc, 0x000007ff, tx_y);
+				rtl_set_bbreg(hw, 0xcd4, 0x000007ff, tx_x);
+				RT_TRACE(COMP_IQK, DBG_LOUD, ("TX_X = %x;;TX_Y = %x =====> fill to IQC\n", tx_x, tx_y));
+				RT_TRACE(COMP_IQK, DBG_LOUD, ("0xcd4 = %x;;0xccc = %x ====>fill to IQC\n", rtl_get_bbreg(hw, 0xcd4, 0x000007ff), rtl_get_bbreg(hw, 0xccc, 0x000007ff)));
+			}
+			break;
+		default:
+			break;
+	};
+}
+
+
+void _rtl8821ae_iqk_rx_fill_iqc(
+		struct ieee80211_hw *hw,
+		enum radio_path path,
+		u32 rx_x,
+		u32 rx_y
+		)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	switch (path) {
+		case RF90_PATH_A:
+			{
+				rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+				rtl_set_bbreg(hw, 0xc10, 0x000003ff, rx_x>>1);
+				rtl_set_bbreg(hw, 0xc10, 0x03ff0000, rx_y>>1);
+				RT_TRACE(COMP_IQK, DBG_LOUD, ("rx_x = %x;;rx_y = %x ====>fill to IQC\n", rx_x>>1, rx_y>>1));
+				RT_TRACE(COMP_IQK, DBG_LOUD, ("0xc10 = %x ====>fill to IQC\n", rtl_read_dword(rtlpriv, 0xc10)));
+			}
+			break;
+		default:
+			break;
+	};
+}
+
+
+
+#define cal_num 10
+
+void _rtl8821ae_iqk_tx(
+		struct ieee80211_hw *hw,
+		enum radio_path path
+		)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+	u32 	tx_fail, rx_fail, delay_count, iqk_ready, cal_retry, cal = 0, temp_reg65;
+	int	tx_x = 0, tx_y = 0, rx_x = 0, rx_y = 0, tx_average = 0, rx_average = 0;
+	int	tx_x0[cal_num], tx_y0[cal_num], tx_x0_rxk[cal_num], tx_y0_rxk[cal_num], rx_x0[cal_num], rx_y0[cal_num];
+	bool 	tx0iqkok = false, rx0iqkok = false;
+	bool  	vdf_enable = false;
+	int	i, k, vdf_y[3], vdf_x[3], tx_dt[3], rx_dt[3], ii, dx = 0, dy = 0, tx_finish = 0, rx_finish = 0;
+
+
+	RT_TRACE(COMP_IQK, DBG_LOUD,
+			("BandWidth = %d.\n",
+			 rtlphy->current_chan_bw));
+	if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80){
+		vdf_enable = true;
+	}
+
+	while (cal < cal_num) {
+		switch (path) {
+			case RF90_PATH_A:
+				{
+					temp_reg65 = rtl_get_rfreg(hw, path, 0x65, 0xffffffff);
+					//Path-A LOK
+					rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+					/*========Path-A AFE all on========*/
+					/*Port 0 DAC/ADC on*/
+					rtl_write_dword(rtlpriv, 0xc60, 0x77777777);
+					rtl_write_dword(rtlpriv, 0xc64, 0x77777777);
+					rtl_write_dword(rtlpriv, 0xc68, 0x19791979);
+					rtl_write_dword(rtlpriv, 0xc6c, 0x19791979);
+					rtl_write_dword(rtlpriv, 0xc70, 0x19791979);
+					rtl_write_dword(rtlpriv, 0xc74, 0x19791979);
+					rtl_write_dword(rtlpriv, 0xc78, 0x19791979);
+					rtl_write_dword(rtlpriv, 0xc7c, 0x19791979);
+					rtl_write_dword(rtlpriv, 0xc80, 0x19791979);
+					rtl_write_dword(rtlpriv, 0xc84, 0x19791979);
+
+					rtl_set_bbreg(hw, 0xc00, 0xf, 0x4); /*hardware 3-wire off*/
+
+					// LOK Setting
+					//====== LOK ======
+					/*DAC/ADC sampling rate (160 MHz)*/
+					rtl_set_bbreg(hw, 0xc5c, BIT(26) | BIT(25) | BIT(24), 0x7);
+
+					// 2. LoK RF Setting (at BW = 20M)
+					rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80002);
+					rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x3);     // BW 20M
+					rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000);
+					rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f);
+					rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3);
+					rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5);
+					rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+					rtl_set_bbreg(hw, 0xcb8, 0xf, 0xd);
+					rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+					rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+					rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1);
+					rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y)
+					rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y)
+					rtl_write_dword(rtlpriv, 0x984, 0x00462910);// [0]:AGC_en, [15]:idac_K_Mask
+
+					rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+					rtl_write_dword(rtlpriv, 0xc88, 0x821403f4);
+
+					if (rtlhal->current_bandtype)
+						rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96);
+					else
+						rtl_write_dword(rtlpriv, 0xc8c, 0x28163e96);
+
+					rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+					rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+					rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+					rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+					rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+					mdelay(10); //Delay 10ms
+					rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+
+					rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+					rtl_set_rfreg(hw, path, 0x58, 0x7fe00, rtl_get_rfreg(hw, path, 0x8, 0xffc00)); // Load LOK
+
+					switch (rtlphy->current_chan_bw)
+					{
+						case 1:
+							{
+								rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x1);
+							}
+							break;
+						case 2:
+							{
+								rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x0);
+							}
+							break;
+						default:
+							break;
+
+					}
+
+					rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+
+					// 3. TX RF Setting
+					rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+					rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+					rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000);
+					rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f);
+					rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3);
+					rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5);
+					rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+					rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+					//ODM_SetBBReg(pDM_Odm, 0xcb8, 0xf, 0xd);
+					rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+					rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+					rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1);
+					rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y)
+					rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y)
+					rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask
+
+					rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+					rtl_write_dword(rtlpriv, 0xc88, 0x821403f1);
+					if (rtlhal->current_bandtype)
+						rtl_write_dword(rtlpriv, 0xc8c, 0x40163e96);
+					else
+						rtl_write_dword(rtlpriv, 0xc8c, 0x00163e96);
+
+					if (vdf_enable == 1){
+						RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_enable\n"));
+						for (k = 0;k <= 2; k++){
+							switch (k){
+								case 0:
+									{
+										rtl_write_dword(rtlpriv, 0xc80, 0x18008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+										rtl_write_dword(rtlpriv, 0xc84, 0x38008c38);// RX_Tone_idx[9:0], RxK_Mask[29]
+										rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0);
+									}
+									break;
+								case 1:
+									{
+										rtl_set_bbreg(hw, 0xc80, BIT(28), 0x0);
+										rtl_set_bbreg(hw, 0xc84, BIT(28), 0x0);
+										rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0);
+									}
+									break;
+								case 2:
+									{
+										RT_TRACE(COMP_IQK, DBG_LOUD, ("vdf_y[1] = %x;;;vdf_y[0] = %x\n", vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff));
+										RT_TRACE(COMP_IQK, DBG_LOUD, ("vdf_x[1] = %x;;;vdf_x[0] = %x\n", vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff));
+										tx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20);
+										tx_dt[cal] = ((16*tx_dt[cal])*10000/15708);
+										tx_dt[cal] = (tx_dt[cal] >> 1 )+(tx_dt[cal] & BIT(0));
+										rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+										rtl_write_dword(rtlpriv, 0xc84, 0x38008c20);// RX_Tone_idx[9:0], RxK_Mask[29]
+										rtl_set_bbreg(hw, 0xce8, BIT(31), 0x1);
+										rtl_set_bbreg(hw, 0xce8, 0x3fff0000, tx_dt[cal] & 0x00003fff);
+									}
+									break;
+								default:
+									break;
+							}
+							rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+							cal_retry = 0;
+							while(1){
+								// one shot
+								rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+								rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+								mdelay(10); //Delay 10ms
+								rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+								delay_count = 0;
+								while (1){
+									iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+									if ((~iqk_ready) || (delay_count>20)){
+										break;
+									}
+									else{
+										mdelay(1);
+										delay_count++;
+									}
+								}
+
+								if (delay_count < 20){							// If 20ms No Result, then cal_retry++
+									// ============TXIQK Check==============
+									tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+									if (~tx_fail){
+										rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+										vdf_x[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+										rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+										vdf_y[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+										tx0iqkok = true;
+										break;
+									}
+									else{
+										rtl_set_bbreg(hw, 0xccc, 0x000007ff, 0x0);
+										rtl_set_bbreg(hw, 0xcd4, 0x000007ff, 0x200);
+										tx0iqkok = false;
+										cal_retry++;
+										if (cal_retry == 10) {
+											break;
+										}
+									}
+								}
+								else{
+									tx0iqkok = false;
+									cal_retry++;
+									if (cal_retry == 10){
+										break;
+									}
+								}
+							}
+						}
+						if (k == 3){
+							tx_x0[cal] = vdf_x[k-1] ;
+							tx_y0[cal] = vdf_y[k-1];
+						}
+					}
+
+					else {
+						rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+						rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+						rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+						cal_retry = 0;
+						while(1){
+							// one shot
+							rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+							rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+							mdelay(10); //Delay 10ms
+							rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+							delay_count = 0;
+							while (1){
+								iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+								if ((~iqk_ready) || (delay_count>20)) {
+									break;
+								}
+								else{
+									mdelay(1);
+									delay_count++;
+								}
+							}
+
+							if (delay_count < 20){							// If 20ms No Result, then cal_retry++
+								// ============TXIQK Check==============
+								tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+								if (~tx_fail){
+									rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+									tx_x0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+									rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+									tx_y0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+									tx0iqkok = true;
+									break;
+								}
+								else{
+									rtl_set_bbreg(hw, 0xccc, 0x000007ff, 0x0);
+									rtl_set_bbreg(hw, 0xcd4, 0x000007ff, 0x200);
+									tx0iqkok = false;
+									cal_retry++;
+									if (cal_retry == 10) {
+										break;
+									}
+								}
+							}
+							else{
+								tx0iqkok = false;
+								cal_retry++;
+								if (cal_retry == 10)
+									break;
+							}
+						}
+					}
+
+
+					if (tx0iqkok == false)
+						break;				// TXK fail, Don't do RXK
+
+					if (vdf_enable == 1){
+						rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0);    // TX VDF Disable
+						RT_TRACE(COMP_IQK, DBG_LOUD, ("RXVDF Start\n"));
+						for (k = 0;k <= 2; k++){
+							//====== RX mode TXK (RXK Step 1) ======
+							rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+							// 1. TX RF Setting
+							rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+							rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+							rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029);
+							rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb);
+							rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65);
+							rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+							rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+
+							rtl_set_bbreg(hw, 0xcb8, 0xf, 0xd);
+							rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y)
+							rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y)
+							rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask
+							rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+							rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+							rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+							switch (k){
+								case 0:
+									{
+										rtl_write_dword(rtlpriv, 0xc80, 0x18008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+										rtl_write_dword(rtlpriv, 0xc84, 0x38008c38);// RX_Tone_idx[9:0], RxK_Mask[29]
+										rtl_set_bbreg(hw, 0xce8, BIT(30), 0x0);
+									}
+									break;
+								case 1:
+									{
+										rtl_write_dword(rtlpriv, 0xc80, 0x08008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+										rtl_write_dword(rtlpriv, 0xc84, 0x28008c38);// RX_Tone_idx[9:0], RxK_Mask[29]
+										rtl_set_bbreg(hw, 0xce8, BIT(30), 0x0);
+									}
+									break;
+								case 2:
+									{
+										RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_Y[1] = %x;;;VDF_Y[0] = %x\n", vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff));
+										RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_X[1] = %x;;;VDF_X[0] = %x\n", vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff));
+										rx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20);
+										RT_TRACE(COMP_IQK, DBG_LOUD, ("Rx_dt = %d\n", rx_dt[cal]));
+										rx_dt[cal] = ((16*rx_dt[cal])*10000/13823);
+										rx_dt[cal] = (rx_dt[cal] >> 1 )+(rx_dt[cal] & BIT(0));
+										rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+										rtl_write_dword(rtlpriv, 0xc84, 0x38008c20);// RX_Tone_idx[9:0], RxK_Mask[29]
+										rtl_set_bbreg(hw, 0xce8, 0x00003fff, rx_dt[cal] & 0x00003fff);
+									}
+									break;
+								default:
+									break;
+							}
+							rtl_write_dword(rtlpriv, 0xc88, 0x821603e0);
+							rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96);
+							rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+							cal_retry = 0;
+							while(1){
+								// one shot
+								rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+								rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+								mdelay(10); //Delay 10ms
+								rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+								delay_count = 0;
+								while (1){
+									iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+									if ((~iqk_ready)||(delay_count>20)){
+										break;
+									}
+									else{
+										mdelay(1);
+										delay_count++;
+									}
+								}
+
+								if (delay_count < 20){							// If 20ms No Result, then cal_retry++
+									// ============TXIQK Check==============
+									tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+									if (~tx_fail){
+										rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+										tx_x0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+										rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+										tx_y0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+										tx0iqkok = true;
+										break;
+									}
+									else{
+										tx0iqkok = false;
+										cal_retry++;
+										if (cal_retry == 10)
+											break;
+									}
+								}
+								else{
+									tx0iqkok = false;
+									cal_retry++;
+									if (cal_retry == 10)
+										break;
+								}
+							}
+
+							if (tx0iqkok == false){   //If RX mode TXK fail, then take TXK Result
+								tx_x0_rxk[cal] = tx_x0[cal];
+								tx_y0_rxk[cal] = tx_y0[cal];
+								tx0iqkok = true;
+								RT_TRACE(COMP_IQK, DBG_LOUD, ("RXK Step 1 fail\n"));
+							}
+
+
+							//====== RX IQK ======
+							rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+							// 1. RX RF Setting
+							rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+							rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+							rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f);
+							rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb);
+							rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001);
+							rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8);
+							rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+
+							rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0_rxk[cal])>>21&0x000007ff);
+							rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0_rxk[cal])>>21&0x000007ff);
+							rtl_set_bbreg(hw, 0x978, BIT(31), 0x1);
+							rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0);
+							rtl_set_bbreg(hw, 0xcb8, 0xF, 0xe);
+							rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+							rtl_write_dword(rtlpriv, 0x984, 0x0046a911);
+
+							rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+							rtl_set_bbreg(hw, 0xc80, BIT(29), 0x1);
+							rtl_set_bbreg(hw, 0xc84, BIT(29), 0x0);
+							rtl_write_dword(rtlpriv, 0xc88, 0x02140119);
+
+							rtl_write_dword(rtlpriv, 0xc8c, 0x28160d00); /* pDM_Odm->SupportInterface == 1 */
+
+							if (k==2){
+								rtl_set_bbreg(hw, 0xce8, BIT(30), 0x1);  //RX VDF Enable
+							}
+							rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+
+							cal_retry = 0;
+							while(1){
+								// one shot
+								rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+								rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+								mdelay(10); //Delay 10ms
+								rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+								delay_count = 0;
+								while (1){
+									iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+									if ((~iqk_ready)||(delay_count>20)){
+										break;
+									}
+									else{
+										mdelay(1);
+										delay_count++;
+									}
+								}
+
+								if (delay_count < 20){	// If 20ms No Result, then cal_retry++
+									// ============RXIQK Check==============
+									rx_fail = rtl_get_bbreg(hw, 0xd00, BIT(11));
+									if (rx_fail == 0){
+										rtl_write_dword(rtlpriv, 0xcb8, 0x06000000);
+										vdf_x[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+										rtl_write_dword(rtlpriv, 0xcb8, 0x08000000);
+										vdf_y[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+										rx0iqkok = true;
+										break;
+									}
+									else{
+										rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x200>>1);
+										rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0x0>>1);
+										rx0iqkok = false;
+										cal_retry++;
+										if (cal_retry == 10)
+											break;
+
+									}
+								}
+								else{
+									rx0iqkok = false;
+									cal_retry++;
+									if (cal_retry == 10)
+										break;
+								}
+							}
+
+						}
+						if (k == 3){
+							rx_x0[cal] = vdf_x[k-1] ;
+							rx_y0[cal] = vdf_y[k-1];
+						}
+						rtl_set_bbreg(hw, 0xce8, BIT(31), 0x1);    // TX VDF Enable
+					}
+
+					else{
+						//====== RX mode TXK (RXK Step 1) ======
+						rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+						// 1. TX RF Setting
+						rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+						rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+						rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029);
+						rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb);
+						rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65);
+						rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+						rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+						rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+						rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+						rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask
+
+						rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+						rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+						rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+						rtl_write_dword(rtlpriv, 0xc88, 0x821603e0);
+						//ODM_Write4Byte(pDM_Odm, 0xc8c, 0x68163e96);
+						rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+						cal_retry = 0;
+						while(1){
+							// one shot
+							rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+							rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+							mdelay(10); //Delay 10ms
+							rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+							delay_count = 0;
+							while (1){
+								iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+								if ((~iqk_ready)||(delay_count>20)){
+									break;
+								}
+								else{
+									mdelay(1);
+									delay_count++;
+								}
+							}
+
+							if (delay_count < 20){							// If 20ms No Result, then cal_retry++
+								// ============TXIQK Check==============
+								tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+								if (~tx_fail){
+									rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+									tx_x0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+									rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+									tx_y0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+									tx0iqkok = true;
+									break;
+								}
+								else{
+									tx0iqkok = false;
+									cal_retry++;
+									if (cal_retry == 10)
+										break;
+								}
+							}
+							else{
+								tx0iqkok = false;
+								cal_retry++;
+								if (cal_retry == 10)
+									break;
+							}
+						}
+
+
+						if (tx0iqkok == false){   //If RX mode TXK fail, then take TXK Result
+							tx_x0_rxk[cal] = tx_x0[cal];
+							tx_y0_rxk[cal] = tx_y0[cal];
+							tx0iqkok = true;
+							RT_TRACE(COMP_IQK, DBG_LOUD, ("1"));
+						}
+
+
+						//====== RX IQK ======
+						rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+						// 1. RX RF Setting
+						rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+						rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+						rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f);
+						rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb);
+						rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001);
+						rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8);
+						rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+
+						rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0_rxk[cal])>>21&0x000007ff);
+						rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0_rxk[cal])>>21&0x000007ff);
+						rtl_set_bbreg(hw, 0x978, BIT(31), 0x1);
+						rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0);
+						//ODM_SetBBReg(pDM_Odm, 0xcb8, 0xF, 0xe);
+						rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+						rtl_write_dword(rtlpriv, 0x984, 0x0046a911);
+
+						rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+						rtl_write_dword(rtlpriv, 0xc80, 0x38008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+						rtl_write_dword(rtlpriv, 0xc84, 0x18008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+						rtl_write_dword(rtlpriv, 0xc88, 0x02140119);
+
+						rtl_write_dword(rtlpriv, 0xc8c, 0x28160d00); /*pDM_Odm->SupportInterface == 1*/
+
+						rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+
+						cal_retry = 0;
+						while(1){
+							// one shot
+							rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+							rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+							mdelay(10); //Delay 10ms
+							rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+							delay_count = 0;
+							while (1){
+								iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+								if ((~iqk_ready)||(delay_count>20)){
+									break;
+								}
+								else{
+									mdelay(1);
+									delay_count++;
+								}
+							}
+
+							if (delay_count < 20){	// If 20ms No Result, then cal_retry++
+								// ============RXIQK Check==============
+								rx_fail = rtl_get_bbreg(hw, 0xd00, BIT(11));
+								if (rx_fail == 0){
+									/*
+									   ODM_Write4Byte(pDM_Odm, 0xcb8, 0x05000000);
+									   reg1 = ODM_GetBBReg(pDM_Odm, 0xd00, 0xffffffff);
+									   ODM_Write4Byte(pDM_Odm, 0xcb8, 0x06000000);
+									   reg2 = ODM_GetBBReg(pDM_Odm, 0xd00, 0x0000001f);
+									   DbgPrint("reg1 = %d, reg2 = %d", reg1, reg2);
+									   Image_Power = (reg2<<32)+reg1;
+									   DbgPrint("Before PW = %d\n", Image_Power);
+									   ODM_Write4Byte(pDM_Odm, 0xcb8, 0x07000000);
+									   reg1 = ODM_GetBBReg(pDM_Odm, 0xd00, 0xffffffff);
+									   ODM_Write4Byte(pDM_Odm, 0xcb8, 0x08000000);
+									   reg2 = ODM_GetBBReg(pDM_Odm, 0xd00, 0x0000001f);
+									   Image_Power = (reg2<<32)+reg1;
+									   DbgPrint("After PW = %d\n", Image_Power);
+									   */
+
+									rtl_write_dword(rtlpriv, 0xcb8, 0x06000000);
+									rx_x0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+									rtl_write_dword(rtlpriv, 0xcb8, 0x08000000);
+									rx_y0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+									rx0iqkok = true;
+									break;
+								}
+								else{
+									rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x200>>1);
+									rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0x0>>1);
+									rx0iqkok = false;
+									cal_retry++;
+									if (cal_retry == 10)
+										break;
+
+								}
+							}
+							else{
+								rx0iqkok = false;
+								cal_retry++;
+								if (cal_retry == 10)
+									break;
+							}
+						}
+					}
+
+					if (tx0iqkok)
+						tx_average++;
+					if (rx0iqkok)
+						rx_average++;
+					rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+					rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65);
+				}
+				break;
+			default:
+				break;
+		}
+		cal++;
+	}
+
+	// FillIQK Result
+	switch (path){
+		case RF90_PATH_A:
+			{
+				RT_TRACE(COMP_IQK, DBG_LOUD, ("========Path_A =======\n"));
+				if (tx_average == 0)
+					break;
+
+				for (i = 0; i < tx_average; i++){
+					RT_TRACE(COMP_IQK, DBG_LOUD, (" TX_X0_RXK[%d] = %x ;; TX_Y0_RXK[%d] = %x\n", i, (tx_x0_rxk[i])>>21&0x000007ff, i, (tx_y0_rxk[i])>>21&0x000007ff));
+					RT_TRACE(COMP_IQK, DBG_LOUD, ("TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n", i, (tx_x0[i])>>21&0x000007ff, i, (tx_y0[i])>>21&0x000007ff));
+				}
+				for (i = 0; i < tx_average; i++){
+					for (ii = i+1; ii <tx_average; ii++){
+						dx = (tx_x0[i]>>21) - (tx_x0[ii]>>21);
+						if (dx < 3 && dx > -3){
+							dy = (tx_y0[i]>>21) - (tx_y0[ii]>>21);
+							if (dy < 3 && dy > -3){
+								tx_x = ((tx_x0[i]>>21) + (tx_x0[ii]>>21))/2;
+								tx_y = ((tx_y0[i]>>21) + (tx_y0[ii]>>21))/2;
+								tx_finish = 1;
+								break;
+							}
+						}
+					}
+					if (tx_finish == 1)
+						break;
+				}
+
+				if (tx_finish == 1){
+					_rtl8821ae_iqk_tx_fill_iqc(hw, path, tx_x, tx_y); // ?
+				}
+				else{
+					_rtl8821ae_iqk_tx_fill_iqc(hw, path, 0x200, 0x0);
+				}
+
+				if (rx_average == 0)
+					break;
+
+				for (i = 0; i < rx_average; i++){
+					RT_TRACE(COMP_IQK, DBG_LOUD, ("RX_X0[%d] = %x ;; RX_Y0[%d] = %x\n", i, (rx_x0[i])>>21&0x000007ff, i, (rx_y0[i])>>21&0x000007ff));
+				}
+				for (i = 0; i < rx_average; i++){
+					for (ii = i+1; ii <rx_average; ii++){
+						dx = (rx_x0[i]>>21) - (rx_x0[ii]>>21);
+						if (dx < 4 && dx > -4){
+							dy = (rx_y0[i]>>21) - (rx_y0[ii]>>21);
+							if (dy < 4 && dy > -4){
+								rx_x = ((rx_x0[i]>>21) + (rx_x0[ii]>>21))/2;
+								rx_y = ((rx_y0[i]>>21) + (rx_y0[ii]>>21))/2;
+								rx_finish = 1;
+								break;
+							}
+						}
+					}
+					if (rx_finish == 1)
+						break;
+				}
+
+				if (rx_finish == 1){
+					_rtl8821ae_iqk_rx_fill_iqc(hw, path, rx_x, rx_y);
+				}
+				else{
+					_rtl8821ae_iqk_rx_fill_iqc(hw, path, 0x200, 0x0);
+				}
+			}
+			break;
+		default:
+			break;
+	}
+}
+
+void _rtl8821ae_iqk_restore_rf(
+		struct ieee80211_hw *hw,
+		enum radio_path path,
+		u32*			backup_rf_reg,
+		u32* 			rf_backup,
+		u32			rf_reg_num
+		)
+{
+	u32 i;
+	struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+	for (i = 0; i < RF_REG_NUM; i++)
+		rtl_set_rfreg(hw, path, backup_rf_reg[i], RFREG_OFFSET_MASK, rf_backup[i]);
+
+	switch(path){
+		case RF90_PATH_A:
+			{
+				RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreRF Path A Success!!!!\n"));
+			}
+			break;
+		default:
+			break;
+	}
+}
+
+void _rtl8821ae_iqk_restore_afe(
+		struct ieee80211_hw *hw,
+		u32*		afe_backup,
+		u32*		backup_afe_reg,
+		u32		afe_num
+		)
+{
+	u32 i;
+	struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+	//Reload AFE Parameters
+	for (i = 0; i < afe_num; i++){
+		rtl_write_dword(rtlpriv, backup_afe_reg[i], afe_backup[i]);
+	}
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+	rtl_write_dword(rtlpriv, 0xc80, 0x0);
+	rtl_write_dword(rtlpriv, 0xc84, 0x0);
+	rtl_write_dword(rtlpriv, 0xc88, 0x0);
+	rtl_write_dword(rtlpriv, 0xc8c, 0x3c000000);
+	rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+	rtl_write_dword(rtlpriv, 0xc94, 0x00000000);
+	rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+	rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+	rtl_write_dword(rtlpriv, 0xcb8, 0x0);
+	RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreAFE Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_restore_macbb(
+		struct ieee80211_hw *hw,
+		u32*		macbb_backup,
+		u32*		backup_macbb_reg,
+		u32		macbb_num
+		)
+{
+	u32 i;
+	struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+	rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+	//Reload MacBB Parameters
+	for (i = 0; i < macbb_num; i++){
+		rtl_write_dword(rtlpriv, backup_macbb_reg[i], macbb_backup[i]);
+	}
+	RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreMacBB Success!!!!\n"));
+}
+
+
+#undef MACBB_REG_NUM
+#undef AFE_REG_NUM
+#undef RF_REG_NUM
+
+#define MACBB_REG_NUM 11
+#define AFE_REG_NUM 12
+#define RF_REG_NUM 3
+
+static void _rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw)
+{
+	u32	macbb_backup[MACBB_REG_NUM];
+	u32 afe_backup[AFE_REG_NUM];
+	u32 rfa_backup[RF_REG_NUM];
+	u32 rfb_backup[RF_REG_NUM];
+	u32 	backup_macbb_reg[MACBB_REG_NUM] = {0xb00, 0x520, 0x550, 0x808, 0x90c, 0xc00, 0xc50,
+							0xe00, 0xe50, 0x838, 0x82c};
+	u32	backup_afe_reg[AFE_REG_NUM] = {0xc5c, 0xc60, 0xc64, 0xc68, 0xc6c, 0xc70, 0xc74,
+							0xc78, 0xc7c, 0xc80, 0xc84, 0xcb8};
+	u32 	backup_rf_reg[RF_REG_NUM] = {0x65, 0x8f, 0x0};
+
+	_rtl8821ae_iqk_backup_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+	_rtl8821ae_iqk_backup_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+	_rtl8821ae_iqk_backup_rf(hw, rfa_backup, rfb_backup, backup_rf_reg, RF_REG_NUM);
+
+	_rtl8821ae_iqk_configure_mac(hw);
+	_rtl8821ae_iqk_tx(hw, RF90_PATH_A);
+	_rtl8821ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfa_backup, RF_REG_NUM);
+
+	_rtl8821ae_iqk_restore_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+	_rtl8821ae_iqk_restore_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+}
+
+static void _rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+	u8 tmpreg;
+	u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+	if ((tmpreg & 0x70) != 0)
+		rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+	else
+		rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+	if ((tmpreg & 0x70) != 0) {
+		rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+
+		if (is2t)
+			rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+						  MASK12BITS);
+
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+			      (rf_a_mode & 0x8FFFF) | 0x10000);
+
+		if (is2t)
+			rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+				      (rf_b_mode & 0x8FFFF) | 0x10000);
+	}
+	lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+
+	rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdfbe0);
+	/* rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000); */
+	rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, 0x8c0a);
+
+	mdelay(100);
+
+	rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdffe0);
+
+	if ((tmpreg & 0x70) != 0) {
+		rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+
+		if (is2t)
+			rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, rf_b_mode);
+	} else {
+		rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+	}
+RT_TRACE(COMP_INIT,DBG_LOUD,("\n"));
+
+}
+
+static void _rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool main)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	//struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	//struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	RT_TRACE(COMP_INIT,DBG_LOUD,("\n"));
+
+	if (main)
+		rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x1);
+	else
+		rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x2);
+}
+
+#undef IQK_ADDA_REG_NUM
+#undef IQK_DELAY_TIME
+
+void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	if (!rtlphy->b_iqk_in_progress)
+	{
+		spin_lock(&rtlpriv->locks.iqk_lock);
+		rtlphy->b_iqk_in_progress = true;
+		spin_unlock(&rtlpriv->locks.iqk_lock);
+
+		_rtl8812ae_phy_iq_calibrate(hw, rtlphy->current_channel);
+
+		spin_lock(&rtlpriv->locks.iqk_lock);
+		rtlphy->b_iqk_in_progress = false;
+		spin_unlock(&rtlpriv->locks.iqk_lock);
+	}
+}
+
+void rtl8812ae_reset_iqk_result(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 i;
+
+	RT_TRACE(COMP_IQK, DBG_LOUD,
+		("rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n",
+		(int)(sizeof(rtlphy->iqk_matrix_regsetting) /
+		sizeof(struct iqk_matrix_regs)),
+		IQK_MATRIX_SETTINGS_NUM));
+
+	for(i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
+		{
+			rtlphy->iqk_matrix_regsetting[i].value[0][0] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][2] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][4] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][6] = 0x100;
+
+			rtlphy->iqk_matrix_regsetting[i].value[0][1] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][3] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][5] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][7] = 0x0;
+
+			rtlphy->iqk_matrix_regsetting[i].b_iqk_done = false;
+
+		}
+	}
+}
+
+void rtl8812ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+	u8 thermal_value, u8 threshold)
+{
+	struct rtl_dm	*rtldm = rtl_dm(rtl_priv(hw));
+
+	rtl8812ae_reset_iqk_result(hw);
+
+	rtldm->thermalvalue_iqk= thermal_value;
+	rtl8812ae_phy_iq_calibrate(hw, false);
+}
+
+void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	if (!rtlphy->b_iqk_in_progress)
+	{
+		spin_lock(&rtlpriv->locks.iqk_lock);
+		rtlphy->b_iqk_in_progress = true;
+		spin_unlock(&rtlpriv->locks.iqk_lock);
+
+		_rtl8821ae_phy_iq_calibrate(hw);
+
+		spin_lock(&rtlpriv->locks.iqk_lock);
+		rtlphy->b_iqk_in_progress = false;
+		spin_unlock(&rtlpriv->locks.iqk_lock);
+	}
+}
+
+void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 i;
+
+	RT_TRACE(COMP_IQK, DBG_LOUD,
+		("rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n",
+		(int)(sizeof(rtlphy->iqk_matrix_regsetting) /
+		sizeof(struct iqk_matrix_regs)),
+		IQK_MATRIX_SETTINGS_NUM));
+
+	for(i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
+		{
+			rtlphy->iqk_matrix_regsetting[i].value[0][0] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][2] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][4] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][6] = 0x100;
+
+			rtlphy->iqk_matrix_regsetting[i].value[0][1] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][3] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][5] =
+				rtlphy->iqk_matrix_regsetting[i].value[0][7] = 0x0;
+
+			rtlphy->iqk_matrix_regsetting[i].b_iqk_done = false;
+
+		}
+	}
+}
+
+void rtl8821ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+	u8 thermal_value, u8 threshold)
+{
+	struct rtl_dm	*rtldm = rtl_dm(rtl_priv(hw));
+
+	rtl8821ae_reset_iqk_result(hw);
+
+	rtldm->thermalvalue_iqk= thermal_value;
+	rtl8821ae_phy_iq_calibrate(hw, false);
+}
+
+void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+	u32 timeout = 2000, timecount = 0;
+
+
+	while (rtlpriv->mac80211.act_scanning && timecount < timeout) {
+		udelay(50);
+		timecount += 50;
+	}
+
+	rtlphy->lck_inprogress = true;
+	RTPRINT(rtlpriv, FINIT, INIT_IQK,
+		("LCK:Start!!! currentband %x delay %d ms\n",
+		 rtlhal->current_bandtype, timecount));
+
+	_rtl8821ae_phy_lc_calibrate(hw, false);
+
+	rtlphy->lck_inprogress = false;
+}
+
+void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	if (rtlphy->b_apk_done)
+		return;
+
+	return;
+}
+
+void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
+{
+	_rtl8821ae_phy_set_rfpath_switch(hw, bmain);
+}
+
+bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	bool b_postprocessing = false;
+
+	RT_TRACE(COMP_CMD, DBG_TRACE,
+		 ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+		  iotype, rtlphy->set_io_inprogress));
+	do {
+		switch (iotype) {
+		case IO_CMD_RESUME_DM_BY_SCAN:
+			RT_TRACE(COMP_CMD, DBG_TRACE,
+				 ("[IO CMD] Resume DM after scan.\n"));
+			b_postprocessing = true;
+			break;
+		case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
+		case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
+			RT_TRACE(COMP_CMD, DBG_TRACE,
+				 ("[IO CMD] Pause DM before scan.\n"));
+			b_postprocessing = true;
+			break;
+		default:
+			RT_TRACE(COMP_ERR, DBG_EMERG,
+				 ("switch case not process \n"));
+			break;
+		}
+	} while (false);
+	if (b_postprocessing && !rtlphy->set_io_inprogress) {
+		rtlphy->set_io_inprogress = true;
+		rtlphy->current_io_type = iotype;
+	} else {
+		return false;
+	}
+	rtl8821ae_phy_set_io(hw);
+	RT_TRACE(COMP_CMD, DBG_TRACE, ("IO Type(%#x)\n", iotype));
+	return true;
+}
+
+static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	RT_TRACE(COMP_CMD, DBG_TRACE,
+		 ("--->Cmd(%#x), set_io_inprogress(%d)\n",
+		  rtlphy->current_io_type, rtlphy->set_io_inprogress));
+	switch (rtlphy->current_io_type) {
+	case IO_CMD_RESUME_DM_BY_SCAN:
+		if (rtlpriv->mac80211.opmode== NL80211_IFTYPE_ADHOC)
+			_rtl8821ae_resume_tx_beacon(hw);
+		rtl8821ae_dm_write_dig(hw, rtlphy->initgain_backup.xaagccore1);
+		rtl8821ae_dm_write_cck_cca_thres(hw, rtlphy->initgain_backup.cca);
+		break;
+	case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
+		if (rtlpriv->mac80211.opmode== NL80211_IFTYPE_ADHOC)
+			_rtl8821ae_stop_tx_beacon(hw);
+		rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
+		rtl8821ae_dm_write_dig(hw, 0x17);
+		rtlphy->initgain_backup.cca = dm_digtable.cur_cck_cca_thres;
+		rtl8821ae_dm_write_cck_cca_thres(hw, 0x40);
+		break;
+	case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
+		break;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("switch case not process \n"));
+		break;
+	}
+	rtlphy->set_io_inprogress = false;
+	RT_TRACE(COMP_CMD, DBG_TRACE,
+		 ("(%#x)\n", rtlphy->current_io_type));
+}
+
+static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+	rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+
+#if 0
+static void _rtl8821ae_phy_set_rf_sleep(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+	rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+	/*rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+	u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+	while (u4b_tmp != 0 && delay > 0) {
+		rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
+		rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+		rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+		u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+		delay--;
+	}
+	if (delay == 0) {
+		rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+		rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+		rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+		rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+		RT_TRACE(COMP_POWER, DBG_TRACE,
+			 ("Switch RF timeout !!!.\n"));
+		return;
+	}*/
+	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+	rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+#endif
+
+static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+					    enum rf_pwrstate rfpwr_state)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+	bool bresult = true;
+	u8 i, queue_id;
+	struct rtl8192_tx_ring *ring = NULL;
+
+	switch (rfpwr_state) {
+	case ERFON:{
+			if ((ppsc->rfpwr_state == ERFOFF) &&
+			    RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+				bool rtstatus = false;
+				u32 InitializeCount = 0;
+				do {
+					InitializeCount++;
+					RT_TRACE(COMP_RF, DBG_DMESG,
+						 ("IPS Set eRf nic enable\n"));
+					rtstatus = rtl_ps_enable_nic(hw);
+				} while ((rtstatus != true)
+					 && (InitializeCount < 10));
+				RT_CLEAR_PS_LEVEL(ppsc,
+						  RT_RF_OFF_LEVL_HALT_NIC);
+			} else {
+				RT_TRACE(COMP_RF, DBG_DMESG,
+					 ("Set ERFON sleeped:%d ms\n",
+					  jiffies_to_msecs(jiffies -
+							   ppsc->
+							   last_sleep_jiffies)));
+				ppsc->last_awake_jiffies = jiffies;
+				rtl8821ae_phy_set_rf_on(hw);
+			}
+			if (mac->link_state == MAC80211_LINKED) {
+				rtlpriv->cfg->ops->led_control(hw,
+							       LED_CTL_LINK);
+			} else {
+				rtlpriv->cfg->ops->led_control(hw,
+							       LED_CTL_NO_LINK);
+			}
+			break;
+		}
+	case ERFOFF:{
+			for (queue_id = 0, i = 0;
+			     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+				ring = &pcipriv->dev.tx_ring[queue_id];
+				if (skb_queue_len(&ring->queue) == 0) {
+					queue_id++;
+					continue;
+				} else {
+					RT_TRACE(COMP_ERR, DBG_WARNING,
+						 ("eRf Off/Sleep: %d times "
+						  "TcbBusyQueue[%d] =%d before "
+						  "doze!\n", (i + 1), queue_id,
+						  skb_queue_len(&ring->queue)));
+
+					udelay(10);
+					i++;
+				}
+				if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+					RT_TRACE(COMP_ERR, DBG_WARNING,
+						 ("\n ERFSLEEP: %d times "
+						  "TcbBusyQueue[%d] = %d !\n",
+						  MAX_DOZE_WAITING_TIMES_9x,
+						  queue_id,
+						  skb_queue_len(&ring->queue)));
+					break;
+				}
+			}
+
+			if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+				RT_TRACE(COMP_RF, DBG_DMESG,
+					 ("IPS Set eRf nic disable\n"));
+				rtl_ps_disable_nic(hw);
+				RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+			} else {
+				if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+					rtlpriv->cfg->ops->led_control(hw,
+								       LED_CTL_NO_LINK);
+				} else {
+					rtlpriv->cfg->ops->led_control(hw,
+								       LED_CTL_POWER_OFF);
+				}
+			}
+			break;
+		}
+	/*case ERFSLEEP:{
+			if (ppsc->rfpwr_state == ERFOFF)
+				break;
+			for (queue_id = 0, i = 0;
+			     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+				ring = &pcipriv->dev.tx_ring[queue_id];
+				if (skb_queue_len(&ring->queue) == 0) {
+					queue_id++;
+					continue;
+				} else {
+					RT_TRACE(COMP_ERR, DBG_WARNING,
+						 ("eRf Off/Sleep: %d times "
+						  "TcbBusyQueue[%d] =%d before "
+						  "doze!\n", (i + 1), queue_id,
+						  skb_queue_len(&ring->queue)));
+
+					udelay(10);
+					i++;
+				}
+				if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+					RT_TRACE(COMP_ERR, DBG_WARNING,
+						 ("\n ERFSLEEP: %d times "
+						  "TcbBusyQueue[%d] = %d !\n",
+						  MAX_DOZE_WAITING_TIMES_9x,
+						  queue_id,
+						  skb_queue_len(&ring->queue)));
+					break;
+				}
+			}
+			RT_TRACE(COMP_RF, DBG_DMESG,
+				 ("Set ERFSLEEP awaked:%d ms\n",
+				  jiffies_to_msecs(jiffies -
+						   ppsc->last_awake_jiffies)));
+			ppsc->last_sleep_jiffies = jiffies;
+			_rtl8821ae_phy_set_rf_sleep(hw);
+			break;
+		}*/
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("switch case not process \n"));
+		bresult = false;
+		break;
+	}
+	if (bresult)
+		ppsc->rfpwr_state = rfpwr_state;
+	return bresult;
+}
+
+bool rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+				   enum rf_pwrstate rfpwr_state)
+{
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+	bool bresult = false;
+
+	if (rfpwr_state == ppsc->rfpwr_state)
+		return bresult;
+	bresult = _rtl8821ae_phy_set_rf_power_state(hw, rfpwr_state);
+	return bresult;
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/phy.h b/drivers/staging/rtl8821ae/rtl8821ae/phy.h
new file mode 100644
index 0000000..a932d8c
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/phy.h
@@ -0,0 +1,258 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_PHY_H__
+#define __RTL8821AE_PHY_H__
+
+/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/
+#define MAX_TX_COUNT	4
+#define	TX_1S			0
+#define	TX_2S			1
+#define	TX_3S			2
+#define	TX_4S			3
+
+#define	MAX_POWER_INDEX	0x3F
+
+#define MAX_PRECMD_CNT 				16
+#define MAX_RFDEPENDCMD_CNT 		16
+#define MAX_POSTCMD_CNT 			16
+
+#define MAX_DOZE_WAITING_TIMES_9x 	64
+
+#define RT_CANNOT_IO(hw)			false
+#define HIGHPOWER_RADIOA_ARRAYLEN 	22
+
+#define IQK_ADDA_REG_NUM			16
+#define IQK_BB_REG_NUM				9
+#define MAX_TOLERANCE				5
+#define	IQK_DELAY_TIME				10
+#define	index_mapping_NUM	15
+
+#define	APK_BB_REG_NUM				5
+#define	APK_AFE_REG_NUM				16
+#define	APK_CURVE_REG_NUM 			4
+#define	PATH_NUM					2
+
+#define LOOP_LIMIT					5
+#define MAX_STALL_TIME				50
+#define AntennaDiversityValue		0x80
+#define MAX_TXPWR_IDX_NMODE_92S		63
+#define Reset_Cnt_Limit				3
+
+#define IQK_ADDA_REG_NUM			16
+#define IQK_MAC_REG_NUM				4
+
+#define RF6052_MAX_PATH				2
+
+#define CT_OFFSET_MAC_ADDR 			0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX			0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX			0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF	0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF		0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF		0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET		0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET		0x72
+
+#define CT_OFFSET_CHANNEL_PLAH				0x75
+#define CT_OFFSET_THERMAL_METER				0x78
+#define CT_OFFSET_RF_OPTION					0x79
+#define CT_OFFSET_VERSION					0x7E
+#define CT_OFFSET_CUSTOMER_ID				0x7F
+
+#define RTL8821AE_MAX_PATH_NUM					2
+
+#define TARGET_CHNL_NUM_2G_5G_8812	59
+
+enum swchnlcmd_id {
+	CMDID_END,
+	CMDID_SET_TXPOWEROWER_LEVEL,
+	CMDID_BBREGWRITE10,
+	CMDID_WRITEPORT_ULONG,
+	CMDID_WRITEPORT_USHORT,
+	CMDID_WRITEPORT_UCHAR,
+	CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+	enum swchnlcmd_id cmdid;
+	u32 para1;
+	u32 para2;
+	u32 msdelay;
+};
+
+enum hw90_block_e {
+	HW90_BLOCK_MAC = 0,
+	HW90_BLOCK_PHY0 = 1,
+	HW90_BLOCK_PHY1 = 2,
+	HW90_BLOCK_RF = 3,
+	HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+	BASEBAND_CONFIG_PHY_REG = 0,
+	BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+	RA_OFFSET_LEGACY_OFDM1,
+	RA_OFFSET_LEGACY_OFDM2,
+	RA_OFFSET_HT_OFDM1,
+	RA_OFFSET_HT_OFDM2,
+	RA_OFFSET_HT_OFDM3,
+	RA_OFFSET_HT_OFDM4,
+	RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+	ANTENNA_NONE,
+	ANTENNA_D,
+	ANTENNA_C,
+	ANTENNA_CD,
+	ANTENNA_B,
+	ANTENNA_BD,
+	ANTENNA_BC,
+	ANTENNA_BCD,
+	ANTENNA_A,
+	ANTENNA_AD,
+	ANTENNA_AC,
+	ANTENNA_ACD,
+	ANTENNA_AB,
+	ANTENNA_ABD,
+	ANTENNA_ABC,
+	ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+	u32 r_tx_antenna:4;
+	u32 r_ant_l:4;
+	u32 r_ant_non_ht:4;
+	u32 r_ant_ht1:4;
+	u32 r_ant_ht2:4;
+	u32 r_ant_ht_s1:4;
+	u32 r_ant_non_ht_s1:4;
+	u32 ofdm_txsc:2;
+	u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+	u8 r_cckrx_enable_2:2;
+	u8 r_cckrx_enable:2;
+	u8 r_ccktx_enable:4;
+};
+
+
+struct efuse_contents {
+	u8 mac_addr[ETH_ALEN];
+	u8 cck_tx_power_idx[6];
+	u8 ht40_1s_tx_power_idx[6];
+	u8 ht40_2s_tx_power_idx_diff[3];
+	u8 ht20_tx_power_idx_diff[3];
+	u8 ofdm_tx_power_idx_diff[3];
+	u8 ht40_max_power_offset[3];
+	u8 ht20_max_power_offset[3];
+	u8 channel_plan;
+	u8 thermal_meter;
+	u8 rf_option[5];
+	u8 version;
+	u8 oem_id;
+	u8 regulatory;
+};
+
+struct tx_power_struct {
+	u8 cck[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 ht40_1s[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 ht40_2s[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 ht20_diff[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 legacy_ht_diff[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 legacy_ht_txpowerdiff;
+	u8 groupht20[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 groupht40[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+	u8 pwrgroup_cnt;
+	u32 mcs_original_offset[4][16];
+};
+enum _ANT_DIV_TYPE
+{
+	NO_ANTDIV				= 0xFF,
+	CG_TRX_HW_ANTDIV		= 0x01,
+	CGCS_RX_HW_ANTDIV 		= 0x02,
+	FIXED_HW_ANTDIV         = 0x03,
+	CG_TRX_SMART_ANTDIV		= 0x04,
+	CGCS_RX_SW_ANTDIV		= 0x05,
+
+};
+
+extern u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw,
+				   u32 regaddr, u32 bitmask);
+extern void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+				  u32 regaddr, u32 bitmask, u32 data);
+extern u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+				   enum radio_path rfpath, u32 regaddr,
+				   u32 bitmask);
+extern void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+				  enum radio_path rfpath, u32 regaddr,
+				  u32 bitmask, u32 data);
+extern bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw);
+extern bool rtl8821ae_phy_bb_config(struct ieee80211_hw *hw);
+extern bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band);
+extern void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw,
+					 long *powerlevel);
+extern void rtl8821ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+extern void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
+					     u8 operation);
+extern void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+				   enum nl80211_channel_type ch_type);
+extern void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+extern u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+extern void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+					  enum radio_path rfpath);
+bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+					  enum radio_path rfpath);
+bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+extern bool rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+					  enum rf_pwrstate rfpwr_state);
+u8 _rtl8812ae_get_right_chnl_place_for_iqk(u8 chnl);
+void rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, u8 channel, u8 path);
+void rtl8812ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+	u8 thermal_value, u8 threshold);
+void rtl8821ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+	u8 thermal_value, u8 threshold);
+void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw);
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c
new file mode 100644
index 0000000..a2e4a01
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c
@@ -0,0 +1,199 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseqcmd.h"
+#include "pwrseq.h"
+
+/*
+    drivers should parse below arrays and do the corresponding actions
+*/
+//3 Power on  Array
+struct wlan_pwr_cfg rtl8812_power_on_flow[RTL8812_TRANS_CARDEMU_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+	RTL8812_TRANS_CARDEMU_TO_ACT
+	RTL8812_TRANS_END
+};
+
+//3Radio off GPIO Array
+struct wlan_pwr_cfg rtl8812_radio_off_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+	RTL8812_TRANS_ACT_TO_CARDEMU
+	RTL8812_TRANS_END
+};
+
+//3Card Disable Array
+struct wlan_pwr_cfg rtl8812_card_disable_flow[ RTL8812_TRANS_ACT_TO_CARDEMU_STEPS
+	+ RTL8812_TRANS_CARDEMU_TO_PDN_STEPS
+	+ RTL8812_TRANS_END_STEPS ] =
+{
+	RTL8812_TRANS_ACT_TO_CARDEMU
+	RTL8812_TRANS_CARDEMU_TO_CARDDIS
+	RTL8812_TRANS_END
+};
+
+//3 Card Enable Array
+struct wlan_pwr_cfg rtl8812_card_enable_flow[ RTL8812_TRANS_ACT_TO_CARDEMU_STEPS
+	+ RTL8812_TRANS_CARDEMU_TO_PDN_STEPS
+	+ RTL8812_TRANS_END_STEPS ] =
+{
+	RTL8812_TRANS_CARDDIS_TO_CARDEMU
+	RTL8812_TRANS_CARDEMU_TO_ACT
+	RTL8812_TRANS_END
+};
+
+//3Suspend Array
+struct wlan_pwr_cfg rtl8812_suspend_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+	RTL8812_TRANS_ACT_TO_CARDEMU
+	RTL8812_TRANS_CARDEMU_TO_SUS
+	RTL8812_TRANS_END
+};
+
+//3 Resume Array
+struct wlan_pwr_cfg rtl8812_resume_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+	RTL8812_TRANS_SUS_TO_CARDEMU
+	RTL8812_TRANS_CARDEMU_TO_ACT
+	RTL8812_TRANS_END
+};
+
+
+
+//3HWPDN Array
+struct wlan_pwr_cfg rtl8812_hwpdn_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+	RTL8812_TRANS_ACT_TO_CARDEMU
+	RTL8812_TRANS_CARDEMU_TO_PDN
+	RTL8812_TRANS_END
+};
+
+//3 Enter LPS
+struct wlan_pwr_cfg rtl8812_enter_lps_flow[RTL8812_TRANS_ACT_TO_LPS_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+	//FW behavior
+	RTL8812_TRANS_ACT_TO_LPS
+	RTL8812_TRANS_END
+};
+
+//3 Leave LPS
+struct wlan_pwr_cfg rtl8812_leave_lps_flow[RTL8812_TRANS_LPS_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+	//FW behavior
+	RTL8812_TRANS_LPS_TO_ACT
+	RTL8812_TRANS_END
+};
+
+
+/*
+    drivers should parse below arrays and do the corresponding actions
+*/
+/*3 Power on  Array*/
+struct wlan_pwr_cfg rtl8821A_power_on_flow[RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS
+					+ RTL8821A_TRANS_END_STEPS] =
+{
+	RTL8821A_TRANS_CARDEMU_TO_ACT
+	RTL8821A_TRANS_END
+};
+
+/*3Radio off GPIO Array */
+struct wlan_pwr_cfg rtl8821A_radio_off_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+					+ RTL8821A_TRANS_END_STEPS] =
+{
+	RTL8821A_TRANS_ACT_TO_CARDEMU
+	RTL8821A_TRANS_END
+};
+
+/*3Card Disable Array*/
+struct wlan_pwr_cfg rtl8821A_card_disable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+					+ RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+					+ RTL8821A_TRANS_END_STEPS] =
+{
+	RTL8821A_TRANS_ACT_TO_CARDEMU
+	RTL8821A_TRANS_CARDEMU_TO_CARDDIS
+	RTL8821A_TRANS_END
+};
+
+/*3 Card Enable Array*/
+struct wlan_pwr_cfg rtl8821A_card_enable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+					+ RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS /*RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS*/
+					+ RTL8821A_TRANS_END_STEPS] =
+{
+	RTL8821A_TRANS_CARDDIS_TO_CARDEMU
+	RTL8821A_TRANS_CARDEMU_TO_ACT
+	RTL8821A_TRANS_END
+};
+
+/*3Suspend Array*/
+struct wlan_pwr_cfg rtl8821A_suspend_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+					+ RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+					+ RTL8821A_TRANS_END_STEPS] =
+{
+	RTL8821A_TRANS_ACT_TO_CARDEMU
+	RTL8821A_TRANS_CARDEMU_TO_SUS
+	RTL8821A_TRANS_END
+};
+
+/*3 Resume Array*/
+struct wlan_pwr_cfg rtl8821A_resume_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+					+ RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+					+ RTL8821A_TRANS_END_STEPS] =
+{
+	RTL8821A_TRANS_SUS_TO_CARDEMU
+	RTL8821A_TRANS_CARDEMU_TO_ACT
+	RTL8821A_TRANS_END
+};
+
+/*3HWPDN Array*/
+struct wlan_pwr_cfg rtl8821A_hwpdn_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+				+ RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+				+ RTL8821A_TRANS_END_STEPS] =
+{
+	RTL8821A_TRANS_ACT_TO_CARDEMU
+	RTL8821A_TRANS_CARDEMU_TO_PDN
+	RTL8821A_TRANS_END
+};
+
+/*3 Enter LPS */
+struct wlan_pwr_cfg rtl8821A_enter_lps_flow[RTL8821A_TRANS_ACT_TO_LPS_STEPS
+					+ RTL8821A_TRANS_END_STEPS] =
+{
+	/*FW behavior*/
+	RTL8821A_TRANS_ACT_TO_LPS
+	RTL8821A_TRANS_END
+};
+
+/*3 Leave LPS */
+struct wlan_pwr_cfg rtl8821A_leave_lps_flow[RTL8821A_TRANS_LPS_TO_ACT_STEPS
+					+ RTL8821A_TRANS_END_STEPS] =
+{
+	/*FW behavior*/
+	RTL8821A_TRANS_LPS_TO_ACT
+	RTL8821A_TRANS_END
+};
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h
new file mode 100644
index 0000000..8b39c04
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h
@@ -0,0 +1,413 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_PWRSEQ_H__
+#define __RTL8821AE_PWRSEQ_H__
+
+#include "pwrseqcmd.h"
+#include "../btcoexist/halbt_precomp.h"
+
+#define	RTL8812_TRANS_CARDEMU_TO_ACT_STEPS	15
+#define	RTL8812_TRANS_ACT_TO_CARDEMU_STEPS	15
+#define	RTL8812_TRANS_CARDEMU_TO_SUS_STEPS	15
+#define	RTL8812_TRANS_SUS_TO_CARDEMU_STEPS	15
+#define	RTL8812_TRANS_CARDEMU_TO_PDN_STEPS	25
+#define	RTL8812_TRANS_PDN_TO_CARDEMU_STEPS	15
+#define	RTL8812_TRANS_ACT_TO_LPS_STEPS	15
+#define	RTL8812_TRANS_LPS_TO_ACT_STEPS	15
+#define	RTL8812_TRANS_END_STEPS	1
+
+
+#define RTL8812_TRANS_CARDEMU_TO_ACT 														\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0},/* disable SW LPS 0x04[10]=0*/	\
+	{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1    power ready*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]=0*/ \
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0},/* disable WL suspend*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* polling until return 0*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT0, 0},/**/
+
+#define RTL8812_TRANS_ACT_TO_CARDEMU													\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0c00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xc00[7:0] = 4	turn off 3-wire */	\
+	{0x0e00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xe00[7:0] = 4	turn off 3-wire */	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /* 0x2[0] = 0	 RESET BB, CLOSE RF */	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},  /* Whole BB is reset*/			\
+	/*{0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},//0x1F[7:0] = 0 turn off RF*/	\
+	/*{0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},//0x4C[23] = 0x4E[7] = 0, switch DPDT_SEL_P output from register 0x65[2] */	\
+	{0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x2A}, /* 0x07[7:0] = 0x28 sps pwm mode 0x2a for BT coex*/	\
+	{0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */	\
+	/*{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0|BIT1, 0}, //  0x02[1:0] = 0	reset BB */	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/
+
+#define RTL8812_TRANS_CARDEMU_TO_SUS													\
+	/* format */								\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xcc},\
+	{0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xEC},\
+	{0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x07},/* gpio11 input mode, gpio10~8 output mode */	\
+	{0x0045, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio 0~7 output same value as input ?? */	\
+	{0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xff},/* gpio0~7 output mode */	\
+	{0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* 0x47[7:0] = 00 gpio mode */	\
+	{0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* suspend option all off */	\
+	{0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, BIT7},/*0x14[7] = 1 turn on ZCD */	\
+	{0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 trun on ZCD */	\
+	{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, BIT4},/*0x23[4] = 1 hpon LDO sleep mode */	\
+	{0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, BIT3}, /*0x04[11] = 2b'11 enable WL suspend for PCIe*/
+
+#define RTL8812_TRANS_SUS_TO_CARDEMU													\
+	/* format */								\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0}, /*0x04[11] = 2b'01enable WL suspend*/   \
+	{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, 0},/*0x23[4] = 0 hpon LDO sleep mode leave */	\
+	{0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 trun off ZCD */	\
+	{0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, 0},/*0x14[7] = 0 turn off ZCD */	\
+	{0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio0~7 input mode */	\
+	{0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio11 input mode, gpio10~8 input mode */
+
+#define RTL8812_TRANS_CARDEMU_TO_CARDDIS													\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	/**{0x0194, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, //0x194[0]=0 , disable 32K clock*/	\
+	/**{0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x94}, //0x93=0x94 , 90[30] =0 enable 500k ANA clock .switch clock from 12M to 500K , 90 [26] =0 disable EEprom loader clock*/	\
+	{0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0}, /*0x03[2] = 0, reset 8051*/	\
+	{0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x05}, /*0x80=05h if reload fw, fill the default value of host_CPU handshake field*/	\
+	{0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xcc},\
+	{0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xEC},\
+	{0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x07},/* gpio11 input mode, gpio10~8 output mode */	\
+	{0x0045, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio 0~7 output same value as input ?? */	\
+	{0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xff},/* gpio0~7 output mode */	\
+	{0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* 0x47[7:0] = 00 gpio mode */	\
+	{0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, BIT7},/*0x14[7] = 1 turn on ZCD */	\
+	{0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 trun on ZCD */	\
+	{0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/*0x12[0] = 0 force PFM mode */	\
+	{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, BIT4},/*0x23[4] = 1 hpon LDO sleep mode */	\
+	{0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */	\
+	{0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07=0x20 , SOP option to disable BG/MB*/	\
+	{0x001f, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /*0x01f[1]=0 , disable RFC_0  control  REG_RF_CTRL_8812 */	\
+	{0x0076, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /*0x076[1]=0 , disable RFC_1  control REG_OPT_CTRL_8812 +2 */	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, BIT3}, /*0x04[11] = 2b'01 enable WL suspend*/
+
+#define RTL8812_TRANS_CARDDIS_TO_CARDEMU													\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/                       \
+	{0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*0x12[0] = 1 force PWM mode */	\
+	{0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, 0},/*0x14[7] = 0 turn off ZCD */	\
+	{0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 trun off ZCD */	\
+	{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, 0},/*0x23[4] = 0 hpon LDO leave sleep mode */	\
+	{0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio0~7 input mode */	\
+	{0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio11 input mode, gpio10~8 input mode */ \
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0}, /*0x04[10] = 0, enable SW LPS PCIE only*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0}, /*0x04[11] = 2b'01enable WL suspend*/	\
+	{0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, BIT2}, /*0x03[2] = 1, enable 8051*/	\
+	{0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*PCIe DMA start*/
+
+
+#define RTL8812_TRANS_CARDEMU_TO_PDN												\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/
+
+#define RTL8812_TRANS_PDN_TO_CARDEMU												\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/
+
+#define RTL8812_TRANS_ACT_TO_LPS														\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/	\
+	{0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/		\
+	{0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/	\
+	{0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/	\
+	{0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/	\
+	{0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/	\
+	{0x0c00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xc00[7:0] = 4	turn off 3-wire */	\
+	{0x0e00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xe00[7:0] = 4	turn off 3-wire */	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled,and clock are gated,and RF closed*/	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},  /* Whole BB is reset*/			\
+	{0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/			\
+	{0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/		\
+	{0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/
+
+
+#define RTL8812_TRANS_LPS_TO_ACT															\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/	\
+	{0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/	\
+	{0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/	\
+	{0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/	\
+	{0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*.	0x08[4] = 0		 switch TSF to 40M*/	\
+	{0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]=0  TSF in 40M*/			\
+	{0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6|BIT7, 0}, /*.	0x29[7:6] = 2b'00	 enable BB clock*/	\
+	{0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*.	0x101[1] = 1*/					\
+	{0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF}, /*.	0x100[7:0] = 0xFF	 enable WMAC TRX*/	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*.	0x02[1:0] = 2b'11	 enable BB macro*/	\
+	{0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0}, /*.	0x522 = 0*/
+
+#define RTL8812_TRANS_END																\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/		\
+	{0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0,PWR_CMD_END, 0, 0}, //
+
+
+extern struct wlan_pwr_cfg  rtl8812_power_on_flow[RTL8812_TRANS_CARDEMU_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg  rtl8812_radio_off_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg  rtl8812_card_disable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg  rtl8812_card_enable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg  rtl8812_suspend_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg  rtl8812_resume_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg  rtl8812_hwpdn_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg  rtl8812_enter_lps_flow[RTL8812_TRANS_ACT_TO_LPS_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg  rtl8812_leave_lps_flow[RTL8812_TRANS_LPS_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS];
+
+/*
+	Check document WM-20130516-JackieLau-RTL8821A_Power_Architecture-R10.vsd
+	There are 6 HW Power States:
+	0: POFF--Power Off
+	1: PDN--Power Down
+	2: CARDEMU--Card Emulation
+	3: ACT--Active Mode
+	4: LPS--Low Power State
+	5: SUS--Suspend
+
+	The transision from different states are defined below
+	TRANS_CARDEMU_TO_ACT
+	TRANS_ACT_TO_CARDEMU
+	TRANS_CARDEMU_TO_SUS
+	TRANS_SUS_TO_CARDEMU
+	TRANS_CARDEMU_TO_PDN
+	TRANS_ACT_TO_LPS
+	TRANS_LPS_TO_ACT
+
+	TRANS_END
+*/
+#define	RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS	25
+#define	RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS	15
+#define	RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS	15
+#define	RTL8821A_TRANS_SUS_TO_CARDEMU_STEPS	15
+#define RTL8821A_TRANS_CARDDIS_TO_CARDEMU_STEPS	15
+#define	RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS	15
+#define	RTL8821A_TRANS_PDN_TO_CARDEMU_STEPS	15
+#define	RTL8821A_TRANS_ACT_TO_LPS_STEPS	15
+#define	RTL8821A_TRANS_LPS_TO_ACT_STEPS	15
+#define	RTL8821A_TRANS_END_STEPS	1
+
+
+#define RTL8821A_TRANS_CARDEMU_TO_ACT 														\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0}, /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/   \
+	{0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x67[0] = 0 to disable BT_GPS_SEL pins*/	\
+	{0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},/*Delay 1ms*/   \
+	{0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, 0}, /*0x00[5] = 1b'0 release analog Ips to digital ,1:isolation*/   \
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT4|BIT3|BIT2), 0},/* disable SW LPS 0x04[10]=0 and WLSUS_EN 0x04[12:11]=0*/	\
+	{0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0 , BIT0},/* Disable USB suspend */	\
+	{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1    power ready*/	\
+	{0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0 , 0},/* Enable USB suspend */	\
+	{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* release WLON reset  0x04[16]=1*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]=0*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT4|BIT3), 0},/* disable WL suspend*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* polling until return 0*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT0, 0},/**/	\
+	{0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*0x4C[24] = 0x4F[0] = 1, switch DPDT_SEL_P output from WL BB */\
+	{0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT5|BIT4), (BIT5|BIT4)},/*0x66[13] = 0x67[5] = 1, switch for PAPE_G/PAPE_A from WL BB ; 0x66[12] = 0x67[4] = 1, switch LNAON from WL BB */\
+	{0x0025, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6, 0},/*anapar_mac<118> , 0x25[6]=0 by wlan single function*/\
+	{0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable falling edge triggering interrupt*/\
+	{0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable GPIO9 interrupt mode*/\
+	{0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Enable GPIO9 input mode*/\
+	{0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*Enable HSISR GPIO[C:0] interrupt*/\
+	{0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable HSISR GPIO9 interrupt*/\
+	{0x007A, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x3A},/*0x7A = 0x3A start BT*/\
+	{0x002E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF , 0x82 },/* 0x2C[23:12]=0x820 ; XTAL trim */ \
+	{0x0010, PWR_CUT_A_MSK , PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6 , BIT6 },/* 0x10[6]=1 ; MP·s¼W¹ï©ó0x2Cªº±±¨îÅv¡A¶·§â0x10[6]³]¬°1¤~¯àÅýWLAN±±¨î */ \
+
+
+#define RTL8821A_TRANS_ACT_TO_CARDEMU													\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/	\
+	{0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*0x4C[24] = 0x4F[0] = 0, switch DPDT_SEL_P output from register 0x65[2] */\
+	{0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Enable rising edge triggering interrupt*/ \
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/	\
+	{0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5}, /*0x00[5] = 1b'1 analog Ips to digital ,1:isolation*/   \
+	{0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /*0x20[0] = 1b'0 disable LDOA12 MACRO block*/   \
+
+
+#define RTL8821A_TRANS_CARDEMU_TO_SUS													\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4|BIT3, (BIT4|BIT3)}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/	\
+	{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/   \
+	{0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SDIO SOP option to disable BG/MB/ACK/SWR*/   \
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3|BIT4}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/	\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/	\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8821A_TRANS_SUS_TO_CARDEMU													\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/	\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/	\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+	{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/   \
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
+
+#define RTL8821A_TRANS_CARDEMU_TO_CARDDIS													\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07=0x20 , SOP option to disable BG/MB*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/	\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, BIT2}, /*0x04[10] = 1, enable SW LPS*/	\
+        {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/   \
+	{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/   \
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/	\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8821A_TRANS_CARDDIS_TO_CARDEMU													\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/	\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/	\
+	{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+	{0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/   \
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/\
+	{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/   \
+	{0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*PCIe DMA start*/
+
+
+#define RTL8821A_TRANS_CARDEMU_TO_PDN												\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/   \
+	{0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK|PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SOP option to disable BG/MB/ACK/SWR*/   \
+	{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/* 0x04[16] = 0*/\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/
+
+#define RTL8821A_TRANS_PDN_TO_CARDEMU												\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/
+
+#define RTL8821A_TRANS_ACT_TO_LPS														\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/	\
+	{0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*Tx Pause*/	\
+	{0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/	\
+	{0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/	\
+	{0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/	\
+	{0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled,and clock are gated*/	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/	\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Whole BB is reset*/	\
+	{0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/	\
+	{0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/	\
+	{0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/*When driver enter Sus/ Disable, enable LOP for BT*/	\
+	{0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/	\
+
+
+#define RTL8821A_TRANS_LPS_TO_ACT															\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\
+	{0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\
+	{0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\
+	{0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*.	0x08[4] = 0		 switch TSF to 40M*/\
+	{0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]=0  TSF in 40M*/\
+	{0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6|BIT7, 0}, /*.	0x29[7:6] = 2b'00	 enable BB clock*/\
+	{0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*.	0x101[1] = 1*/\
+	{0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF}, /*.	0x100[7:0] = 0xFF	 enable WMAC TRX*/\
+	{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*.	0x02[1:0] = 2b'11	 enable BB macro*/\
+	{0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0}, /*.	0x522 = 0*/
+
+#define RTL8821A_TRANS_END															\
+	/* format */																\
+	/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/								\
+	{0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0,PWR_CMD_END, 0, 0}, //
+
+extern struct wlan_pwr_cfg rtl8821A_power_on_flow[RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS
+						+ RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_radio_off_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+						+ RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_card_disable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+						+ RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+						+ RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_card_enable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+						+ RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS/*RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS*/
+						+ RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_suspend_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+						+ RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+						+ RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_resume_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+						+ RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+						+ RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_hwpdn_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+						+ RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+						+ RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_enter_lps_flow[RTL8821A_TRANS_ACT_TO_LPS_STEPS
+						+ RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_leave_lps_flow[RTL8821A_TRANS_LPS_TO_ACT_STEPS
+						+ RTL8821A_TRANS_END_STEPS];
+
+/*RTL8812 Power Configuration CMDs for PCIe interface*/
+#define RTL8812_NIC_PWR_ON_FLOW				rtl8812_power_on_flow
+#define RTL8812_NIC_RF_OFF_FLOW				rtl8812_radio_off_flow
+#define RTL8812_NIC_DISABLE_FLOW 			rtl8812_card_disable_flow
+#define RTL8812_NIC_ENABLE_FLOW				rtl8812_card_enable_flow
+#define RTL8812_NIC_SUSPEND_FLOW			rtl8812_suspend_flow
+#define RTL8812_NIC_RESUME_FLOW				rtl8812_resume_flow
+#define RTL8812_NIC_PDN_FLOW					rtl8812_hwpdn_flow
+#define RTL8812_NIC_LPS_ENTER_FLOW			rtl8812_enter_lps_flow
+#define RTL8812_NIC_LPS_LEAVE_FLOW			rtl8812_leave_lps_flow
+
+/* RTL8821 Power Configuration CMDs for PCIe interface */
+#define RTL8821A_NIC_PWR_ON_FLOW			rtl8821A_power_on_flow
+#define RTL8821A_NIC_RF_OFF_FLOW			rtl8821A_radio_off_flow
+#define RTL8821A_NIC_DISABLE_FLOW			rtl8821A_card_disable_flow
+#define RTL8821A_NIC_ENABLE_FLOW			rtl8821A_card_enable_flow
+#define RTL8821A_NIC_SUSPEND_FLOW			rtl8821A_suspend_flow
+#define RTL8821A_NIC_RESUME_FLOW			rtl8821A_resume_flow
+#define RTL8821A_NIC_PDN_FLOW				rtl8821A_hwpdn_flow
+#define RTL8821A_NIC_LPS_ENTER_FLOW			rtl8821A_enter_lps_flow
+#define RTL8821A_NIC_LPS_LEAVE_FLOW			rtl8821A_leave_lps_flow
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c
new file mode 100644
index 0000000..710bc01
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseq.h"
+
+
+/*
+*	Description:
+*		This routine deal with the Power Configuration CMDs
+*		 parsing for RTL8723/RTL8188E Series IC.
+*	Assumption:
+*		We should follow specific format which was released from HW SD.
+*
+*	2011.07.07, added by Roger.
+*/
+bool rtl_hal_pwrseqcmdparsing (struct rtl_priv* rtlpriv, u8 cut_version,
+										u8 fab_version, u8 interface_type,
+										struct wlan_pwr_cfg	pwrcfgcmd[])
+
+{
+	struct wlan_pwr_cfg pwr_cfg_cmd = {0};
+	bool polling_bit = false;
+	u32 ary_idx=0;
+	u8 value = 0;
+	u32 offset = 0;
+	u32 polling_count = 0;
+	u32 max_polling_cnt = 5000;
+
+	do {
+		pwr_cfg_cmd = pwrcfgcmd[ary_idx];
+		RT_TRACE(COMP_INIT, DBG_TRACE,
+			("rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), fab_msk(%#x),"
+			"interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
+			GET_PWR_CFG_OFFSET(pwr_cfg_cmd), GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd),
+			GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd), GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd),
+			GET_PWR_CFG_BASE(pwr_cfg_cmd), GET_PWR_CFG_CMD(pwr_cfg_cmd),
+			GET_PWR_CFG_MASK(pwr_cfg_cmd), GET_PWR_CFG_VALUE(pwr_cfg_cmd)));
+
+		if ((GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd)&fab_version) &&
+			(GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd)&cut_version) &&
+			(GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd)&interface_type)) {
+			switch (GET_PWR_CFG_CMD(pwr_cfg_cmd)) {
+			case PWR_CMD_READ:
+				RT_TRACE(COMP_INIT, DBG_TRACE,
+					("rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n"));
+				break;
+
+			case PWR_CMD_WRITE: {
+				RT_TRACE(COMP_INIT, DBG_TRACE,
+					("rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n"));
+				offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+					/*Read the value from system register*/
+					value = rtl_read_byte(rtlpriv, offset);
+					value = value & (~(GET_PWR_CFG_MASK(pwr_cfg_cmd)));
+					value = value | (GET_PWR_CFG_VALUE(pwr_cfg_cmd)
+							& GET_PWR_CFG_MASK(pwr_cfg_cmd));
+
+					/*Write the value back to sytem register*/
+					rtl_write_byte(rtlpriv, offset, value);
+				}
+				break;
+
+			case PWR_CMD_POLLING:
+				RT_TRACE(COMP_INIT, DBG_TRACE,
+					("rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n"));
+				polling_bit = false;
+				offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+				do {
+					value = rtl_read_byte(rtlpriv, offset);
+
+					value = value & GET_PWR_CFG_MASK(pwr_cfg_cmd);
+					if (value == (GET_PWR_CFG_VALUE(pwr_cfg_cmd)
+							& GET_PWR_CFG_MASK(pwr_cfg_cmd)))
+						polling_bit=true;
+					else
+						udelay(10);
+
+					if (polling_count++ > max_polling_cnt) {
+						return false;
+					}
+				} while (!polling_bit);
+
+				break;
+
+			case PWR_CMD_DELAY:
+				RT_TRACE(COMP_INIT, DBG_TRACE,
+					("rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n"));
+				if (GET_PWR_CFG_VALUE(pwr_cfg_cmd) == PWRSEQ_DELAY_US)
+					udelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+				else
+					mdelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+				break;
+
+			case PWR_CMD_END:
+				RT_TRACE(COMP_INIT, DBG_TRACE,
+					("rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n"));
+				return true;
+				break;
+
+			default:
+				RT_ASSERT(false,
+					("rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"));
+				break;
+			}
+
+		}
+
+		ary_idx++;
+	} while (1);
+
+	return true;
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h
new file mode 100644
index 0000000..571e7e5
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h
@@ -0,0 +1,71 @@
+#ifndef __RTL8821AE_PWRSEQCMD_H__
+#define __RTL8821AE_PWRSEQCMD_H__
+
+#include "../wifi.h"
+/*---------------------------------------------*/
+/*The value of cmd: 4 bits */
+/*---------------------------------------------*/
+#define	PWR_CMD_READ 		0x00
+#define	PWR_CMD_WRITE	0x01
+#define	PWR_CMD_POLLING	0x02
+#define	PWR_CMD_DELAY	0x03
+#define	PWR_CMD_END		0x04
+
+/* define the base address of each block */
+#define	PWR_BASEADDR_MAC	0x00
+#define	PWR_BASEADDR_USB	0x01
+#define	PWR_BASEADDR_PCIE	0x02
+#define	PWR_BASEADDR_SDIO	0x03
+
+#define	PWR_INTF_SDIO_MSK	BIT(0)
+#define	PWR_INTF_USB_MSK	BIT(1)
+#define	PWR_INTF_PCI_MSK	BIT(2)
+#define	PWR_INTF_ALL_MSK	(BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+#define	PWR_FAB_TSMC_MSK	BIT(0)
+#define	PWR_FAB_UMC_MSK		BIT(1)
+#define	PWR_FAB_ALL_MSK		(BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+#define	PWR_CUT_TESTCHIP_MSK	BIT(0)
+#define	PWR_CUT_A_MSK		BIT(1)
+#define	PWR_CUT_B_MSK		BIT(2)
+#define	PWR_CUT_C_MSK		BIT(3)
+#define	PWR_CUT_D_MSK		BIT(4)
+#define	PWR_CUT_E_MSK		BIT(5)
+#define	PWR_CUT_F_MSK		BIT(6)
+#define	PWR_CUT_G_MSK		BIT(7)
+#define	PWR_CUT_ALL_MSK		0xFF
+
+
+enum pwrseq_delay_unit {
+   PWRSEQ_DELAY_US,
+   PWRSEQ_DELAY_MS,
+};
+
+struct wlan_pwr_cfg {
+	u16 offset;
+	u8 cut_msk;
+	u8 fab_msk:4;
+	u8 interface_msk:4;
+	u8 base:4;
+	u8 cmd:4;
+	u8 msk;
+	u8 value;
+
+};
+
+#define	GET_PWR_CFG_OFFSET(__PWR_CMD)	__PWR_CMD.offset
+#define	GET_PWR_CFG_CUT_MASK(__PWR_CMD)	__PWR_CMD.cut_msk
+#define	GET_PWR_CFG_FAB_MASK(__PWR_CMD)	__PWR_CMD.fab_msk
+#define	GET_PWR_CFG_INTF_MASK(__PWR_CMD)	__PWR_CMD.interface_msk
+#define	GET_PWR_CFG_BASE(__PWR_CMD)	__PWR_CMD.base
+#define	GET_PWR_CFG_CMD(__PWR_CMD)	__PWR_CMD.cmd
+#define	GET_PWR_CFG_MASK(__PWR_CMD)	__PWR_CMD.msk
+#define	GET_PWR_CFG_VALUE(__PWR_CMD)	__PWR_CMD.value
+
+bool rtl_hal_pwrseqcmdparsing(struct rtl_priv * rtlpriv, u8 cut_version,
+									   u8 fab_version, u8 interface_type,
+									   struct wlan_pwr_cfg pwrcfgcmd[]);
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/reg.h b/drivers/staging/rtl8821ae/rtl8821ae/reg.h
new file mode 100644
index 0000000..09c5f00
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/reg.h
@@ -0,0 +1,2427 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_REG_H__
+#define __RTL8821AE_REG_H__
+
+#define TXPKT_BUF_SELECT				0x69
+#define RXPKT_BUF_SELECT				0xA5
+#define DISABLE_TRXPKT_BUF_ACCESS			0x0
+
+#define REG_SYS_ISO_CTRL			0x0000
+#define REG_SYS_FUNC_EN				0x0002
+#define REG_APS_FSMCO				0x0004
+#define REG_SYS_CLKR				0x0008
+#define REG_9346CR					0x000A
+#define REG_EE_VPD					0x000C
+#define REG_AFE_MISC				0x0010
+#define REG_SPS0_CTRL				0x0011
+#define REG_SPS_OCP_CFG				0x0018
+#define REG_RSV_CTRL				0x001C
+#define REG_RF_CTRL					0x001F
+#define REG_LDOA15_CTRL				0x0020
+#define REG_LDOV12D_CTRL			0x0021
+#define REG_LDOHCI12_CTRL			0x0022
+#define REG_LPLDO_CTRL				0x0023
+#define REG_AFE_XTAL_CTRL			0x0024
+#define REG_AFE_LDO_CTRL                	0x0027 /* 1.5v for 8188EE test chip, 1.4v for MP chip */
+#define REG_AFE_PLL_CTRL			0x0028
+#define REG_MAC_PHY_CTRL			0x002c
+#define REG_EFUSE_CTRL				0x0030
+#define REG_EFUSE_TEST				0x0034
+#define REG_PWR_DATA				0x0038
+#define REG_CAL_TIMER				0x003C
+#define REG_ACLK_MON				0x003E
+#define REG_GPIO_MUXCFG			0x0040
+#define REG_GPIO_IO_SEL				0x0042
+#define REG_MAC_PINMUX_CFG		0x0043
+#define REG_GPIO_PIN_CTRL			0x0044
+#define REG_GPIO_INTM				0x0048
+#define REG_LEDCFG0					0x004C
+#define REG_LEDCFG1					0x004D
+#define REG_LEDCFG2					0x004E
+#define REG_LEDCFG3					0x004F
+#define REG_FSIMR					0x0050
+#define REG_FSISR					0x0054
+#define REG_HSIMR					0x0058
+#define REG_HSISR					0x005c
+#define REG_GPIO_PIN_CTRL_2		0x0060
+#define REG_GPIO_IO_SEL_2			0x0062
+#define REG_MULTI_FUNC_CTRL			0x0068
+#define REG_GPIO_OUTPUT			0x006c
+#define REG_OPT_CTRL			0x0074
+#define REG_AFE_XTAL_CTRL_EXT		0x0078
+#define REG_XCK_OUT_CTRL			0x007c
+#define REG_MCUFWDL				0x0080
+#define REG_WOL_EVENT				0x0081
+#define REG_MCUTSTCFG				0x0084
+
+
+#define REG_HIMR					0x00B0
+#define REG_HISR					0x00B4
+#define REG_HIMRE					0x00B8
+#define REG_HISRE					0x00BC
+
+#define REG_PMC_DBG_CTRL2			0x00CC
+
+#define REG_EFUSE_ACCESS			0x00CF
+
+#define REG_BIST_SCAN				0x00D0
+#define REG_BIST_RPT				0x00D4
+#define REG_BIST_ROM_RPT			0x00D8
+#define REG_USB_SIE_INTF			0x00E0
+#define REG_PCIE_MIO_INTF			0x00E4
+#define REG_PCIE_MIO_INTD			0x00E8
+#define REG_HPON_FSM				0x00EC
+#define REG_SYS_CFG					0x00F0
+#define REG_GPIO_OUTSTS				0x00F4
+#define REG_SYS_CFG1				0x00FC
+#define REG_ROM_VERSION				0x00FD
+
+#define REG_CR						0x0100
+#define REG_PBP						0x0104
+#define REG_PKT_BUFF_ACCESS_CTRL			0x0106
+#define REG_TRXDMA_CTRL				0x010C
+#define REG_TRXFF_BNDY				0x0114
+#define REG_TRXFF_STATUS			0x0118
+#define REG_RXFF_PTR				0x011C
+
+#define REG_CPWM					0x012F
+#define REG_FWIMR					0x0130
+#define REG_FWISR					0x0134
+#define REG_PKTBUF_DBG_CTRL			0x0140
+#define REG_PKTBUF_DBG_DATA_L		0x0144
+#define REG_PKTBUF_DBG_DATA_H		0x0148
+#define REG_RXPKTBUF_CTRL				(REG_PKTBUF_DBG_CTRL+2)
+
+#define REG_TC0_CTRL				0x0150
+#define REG_TC1_CTRL				0x0154
+#define REG_TC2_CTRL				0x0158
+#define REG_TC3_CTRL				0x015C
+#define REG_TC4_CTRL				0x0160
+#define REG_TCUNIT_BASE				0x0164
+#define REG_MBIST_START				0x0174
+#define REG_MBIST_DONE				0x0178
+#define REG_MBIST_FAIL				0x017C
+#define REG_32K_CTRL					0x0194
+#define REG_C2HEVT_MSG_NORMAL		0x01A0
+#define REG_C2HEVT_CLEAR			0x01AF
+#define REG_C2HEVT_MSG_TEST			0x01B8
+#define REG_MCUTST_1				0x01c0
+#define REG_FMETHR					0x01C8
+#define REG_HMETFR					0x01CC
+#define REG_HMEBOX_0				0x01D0
+#define REG_HMEBOX_1				0x01D4
+#define REG_HMEBOX_2				0x01D8
+#define REG_HMEBOX_3				0x01DC
+
+#define REG_LLT_INIT				0x01E0
+#define REG_BB_ACCEESS_CTRL			0x01E8
+#define REG_BB_ACCESS_DATA			0x01EC
+
+#define REG_HMEBOX_EXT_0			0x01F0
+#define REG_HMEBOX_EXT_1			0x01F4
+#define REG_HMEBOX_EXT_2			0x01F8
+#define REG_HMEBOX_EXT_3			0x01FC
+
+#define REG_RQPN					0x0200
+#define REG_FIFOPAGE				0x0204
+#define REG_TDECTRL					0x0208
+#define REG_TXDMA_OFFSET_CHK		0x020C
+#define REG_TXDMA_STATUS			0x0210
+#define REG_RQPN_NPQ				0x0214
+
+#define REG_RXDMA_AGG_PG_TH		0x0280
+#define REG_FW_UPD_RDPTR			0x0284 /* FW shall update this register before FW write RXPKT_RELEASE_POLL to 1 */
+#define REG_RXDMA_CONTROL			0x0286 /* Control the RX DMA.*/
+#define REG_RXPKT_NUM				0x0287 /* The number of packets in RXPKTBUF.	*/
+
+#define	REG_PCIE_CTRL_REG			0x0300
+#define	REG_INT_MIG					0x0304
+#define	REG_BCNQ_DESA				0x0308
+#define	REG_HQ_DESA					0x0310
+#define	REG_MGQ_DESA				0x0318
+#define	REG_VOQ_DESA				0x0320
+#define	REG_VIQ_DESA				0x0328
+#define	REG_BEQ_DESA				0x0330
+#define	REG_BKQ_DESA				0x0338
+#define	REG_RX_DESA					0x0340
+
+#define	REG_DBI_WDATA				0x0348
+#define	REG_DBI_RDATA				0x034C
+#define	REG_DBI_ADDR				0x0350
+#define	REG_DBI_FLAG				0x0352
+#define	REG_MDIO_WDATA				0x0354
+#define	REG_MDIO_RDATA				0x0356
+#define	REG_MDIO_CTL				0x0358
+#define	REG_DBG_SEL					0x0360
+#define	REG_PCIE_HRPWM				0x0361
+#define	REG_PCIE_HCPWM				0x0363
+#define	REG_UART_CTRL				0x0364
+#define	REG_WATCH_DOG				0x0368
+#define	REG_UART_TX_DESA			0x0370
+#define	REG_UART_RX_DESA			0x0378
+
+
+#define	REG_HDAQ_DESA_NODEF			0x0000
+#define	REG_CMDQ_DESA_NODEF			0x0000
+
+#define REG_VOQ_INFORMATION			0x0400
+#define REG_VIQ_INFORMATION			0x0404
+#define REG_BEQ_INFORMATION			0x0408
+#define REG_BKQ_INFORMATION			0x040C
+#define REG_MGQ_INFORMATION			0x0410
+#define REG_HGQ_INFORMATION			0x0414
+#define REG_BCNQ_INFORMATION			0x0418
+#define REG_TXPKT_EMPTY				0x041A
+
+
+#define REG_CPU_MGQ_INFORMATION		0x041C
+#define REG_FWHW_TXQ_CTRL			0x0420
+#define REG_HWSEQ_CTRL				0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY		0x0424
+#define REG_TXPKTBUF_MGQ_BDNY		0x0425
+#define REG_MULTI_BCNQ_EN			0x0426
+#define REG_MULTI_BCNQ_OFFSET		0x0427
+#define REG_SPEC_SIFS				0x0428
+#define REG_RL						0x042A
+#define REG_DARFRC					0x0430
+#define REG_RARFRC					0x0438
+#define REG_RRSR					0x0440
+#define REG_ARFR0					0x0444
+#define REG_ARFR1					0x044C
+#define REG_CCK_CHECK				0x0454
+#define REG_AMPDU_MAX_TIME			0x0456
+#define REG_AGGLEN_LMT				0x0458
+#define REG_AMPDU_MIN_SPACE			0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD	0x045D
+#define REG_FAST_EDCA_CTRL			0x0460
+#define REG_RD_RESP_PKT_TH			0x0463
+#define REG_INIRTS_RATE_SEL			0x0480
+#define REG_INIDATA_RATE_SEL		0x0484
+#define REG_ARFR2					0x048C
+#define REG_ARFR3					0x0494
+#define REG_POWER_STATUS			0x04A4
+#define REG_POWER_STAGE1			0x04B4
+#define REG_POWER_STAGE2			0x04B8
+#define REG_PKT_LIFE_TIME			0x04C0
+#define REG_STBC_SETTING			0x04C4
+#define REG_HT_SINGLE_AMPDU			0x04C7
+#define REG_PROT_MODE_CTRL			0x04C8
+#define REG_MAX_AGGR_NUM			0x04CA
+#define REG_BAR_MODE_CTRL			0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT		0x04CF
+#define REG_EARLY_MODE_CONTROL			0x04D0
+#define REG_NQOS_SEQ				0x04DC
+#define REG_QOS_SEQ					0x04DE
+#define REG_NEED_CPU_HANDLE			0x04E0
+#define REG_PKT_LOSE_RPT			0x04E1
+#define REG_PTCL_ERR_STATUS			0x04E2
+#define REG_TX_RPT_CTRL				0x04EC
+#define REG_TX_RPT_TIME				0x04F0
+#define REG_DUMMY					0x04FC
+
+#define REG_EDCA_VO_PARAM			0x0500
+#define REG_EDCA_VI_PARAM			0x0504
+#define REG_EDCA_BE_PARAM			0x0508
+#define REG_EDCA_BK_PARAM			0x050C
+#define REG_BCNTCFG					0x0510
+#define REG_PIFS					0x0512
+#define REG_RDG_PIFS				0x0513
+#define REG_SIFS_CTX				0x0514
+#define REG_SIFS_TRX				0x0516
+#define REG_AGGR_BREAK_TIME			0x051A
+#define REG_SLOT					0x051B
+#define REG_TX_PTCL_CTRL			0x0520
+#define REG_TXPAUSE					0x0522
+#define REG_DIS_TXREQ_CLR			0x0523
+#define REG_RD_CTRL					0x0524
+#define REG_TBTT_PROHIBIT			0x0540
+#define REG_RD_NAV_NXT				0x0544
+#define REG_NAV_PROT_LEN			0x0546
+#define REG_BCN_CTRL				0x0550
+#define REG_USTIME_TSF				0x0551
+#define REG_MBID_NUM				0x0552
+#define REG_DUAL_TSF_RST			0x0553
+#define REG_BCN_INTERVAL			0x0554
+#define REG_MBSSID_BCN_SPACE		0x0554
+#define REG_DRVERLYINT				0x0558
+#define REG_BCNDMATIM				0x0559
+#define REG_ATIMWND					0x055A
+#define REG_BCN_MAX_ERR				0x055D
+#define REG_RXTSF_OFFSET_CCK		0x055E
+#define REG_RXTSF_OFFSET_OFDM		0x055F
+#define REG_TSFTR					0x0560
+#define REG_INIT_TSFTR				0x0564
+#define REG_SECONDARY_CCA_CTRL		0x0577
+#define REG_PSTIMER					0x0580
+#define REG_TIMER0					0x0584
+#define REG_TIMER1					0x0588
+#define REG_ACMHWCTRL				0x05C0
+#define REG_ACMRSTCTRL				0x05C1
+#define REG_ACMAVG					0x05C2
+#define REG_VO_ADMTIME				0x05C4
+#define REG_VI_ADMTIME				0x05C6
+#define REG_BE_ADMTIME				0x05C8
+#define REG_EDCA_RANDOM_GEN			0x05CC
+#define REG_NOA_DESC_SEL			0x05CF
+#define REG_NOA_DESC_DURATION		0x05E0
+#define REG_NOA_DESC_INTERVAL		0x05E4
+#define REG_NOA_DESC_START			0x05E8
+#define REG_NOA_DESC_COUNT			0x05EC
+#define REG_SCH_TX_CMD				0x05F8
+
+#define REG_APSD_CTRL				0x0600
+#define REG_BWOPMODE				0x0603
+#define REG_TCR						0x0604
+#define REG_RCR						0x0608
+#define REG_RX_PKT_LIMIT			0x060C
+#define REG_RX_DLK_TIME				0x060D
+#define REG_RX_DRVINFO_SZ			0x060F
+
+#define REG_MACID					0x0610
+#define REG_BSSID					0x0618
+#define REG_MAR						0x0620
+#define REG_MBIDCAMCFG				0x0628
+
+#define REG_USTIME_EDCA				0x0638
+#define REG_MAC_SPEC_SIFS			0x063A
+#define REG_RESP_SIFS_CCK			0x063C
+#define REG_RESP_SIFS_OFDM			0x063E
+#define REG_ACKTO					0x0640
+#define REG_CTS2TO					0x0641
+#define REG_EIFS					0x0642
+
+#define REG_NAV_CTRL				0x0650
+#define REG_NAV_UPPER				0x0652
+#define REG_BACAMCMD				0x0654
+#define REG_BACAMCONTENT			0x0658
+#define REG_LBDLY					0x0660
+#define REG_FWDLY					0x0661
+#define REG_RXERR_RPT				0x0664
+#define REG_TRXPTCL_CTL				0x0668
+
+#define REG_CAMCMD					0x0670
+#define REG_CAMWRITE				0x0674
+#define REG_CAMREAD					0x0678
+#define REG_CAMDBG					0x067C
+#define REG_SECCFG					0x0680
+
+#define REG_WOW_CTRL				0x0690
+#define REG_PSSTATUS				0x0691
+#define REG_PS_RX_INFO				0x0692
+#define REG_UAPSD_TID				0x0693
+#define REG_LPNAV_CTRL				0x0694
+#define REG_WKFMCAM_NUM				0x0698
+#define REG_WKFMCAM_RWD				0x069C
+#define REG_RXFLTMAP0				0x06A0
+#define REG_RXFLTMAP1				0x06A2
+#define REG_RXFLTMAP2				0x06A4
+#define REG_BCN_PSR_RPT				0x06A8
+#define REG_CALB32K_CTRL			0x06AC
+#define REG_PKT_MON_CTRL			0x06B4
+#define REG_BT_COEX_TABLE			0x06C0
+#define REG_WMAC_RESP_TXINFO		0x06D8
+
+#define REG_USB_INFO				0xFE17
+#define REG_USB_SPECIAL_OPTION		0xFE55
+#define REG_USB_DMA_AGG_TO			0xFE5B
+#define REG_USB_AGG_TO				0xFE5C
+#define REG_USB_AGG_TH				0xFE5D
+
+#define REG_TEST_USB_TXQS			0xFE48
+#define REG_TEST_SIE_VID			0xFE60
+#define REG_TEST_SIE_PID			0xFE62
+#define REG_TEST_SIE_OPTIONAL		0xFE64
+#define REG_TEST_SIE_CHIRP_K		0xFE65
+#define REG_TEST_SIE_PHY			0xFE66
+#define REG_TEST_SIE_MAC_ADDR		0xFE70
+#define REG_TEST_SIE_STRING			0xFE80
+
+#define REG_NORMAL_SIE_VID			0xFE60
+#define REG_NORMAL_SIE_PID			0xFE62
+#define REG_NORMAL_SIE_OPTIONAL		0xFE64
+#define REG_NORMAL_SIE_EP			0xFE65
+#define REG_NORMAL_SIE_PHY			0xFE68
+#define REG_NORMAL_SIE_MAC_ADDR		0xFE70
+#define REG_NORMAL_SIE_STRING		0xFE80
+
+#define	CR9346				REG_9346CR
+#define	MSR				(REG_CR + 2)
+#define	ISR				REG_HISR
+#define	TSFR				REG_TSFTR
+
+#define	MACIDR0				REG_MACID
+#define	MACIDR4				(REG_MACID + 4)
+
+#define PBP				REG_PBP
+
+#define	IDR0				MACIDR0
+#define	IDR4				MACIDR4
+
+#define	UNUSED_REGISTER			0x1BF
+#define	DCAM				UNUSED_REGISTER
+#define	PSR				UNUSED_REGISTER
+#define BBADDR				UNUSED_REGISTER
+#define	PHYDATAR			UNUSED_REGISTER
+
+#define	INVALID_BBRF_VALUE		0x12345678
+
+#define	MAX_MSS_DENSITY_2T 		0x13
+#define	MAX_MSS_DENSITY_1T 		0x0A
+
+#define	CMDEEPROM_EN			BIT(5)
+#define	CMDEEPROM_SEL			BIT(4)
+#define	CMD9346CR_9356SEL		BIT(4)
+#define	AUTOLOAD_EEPROM			(CMDEEPROM_EN|CMDEEPROM_SEL)
+#define	AUTOLOAD_EFUSE			CMDEEPROM_EN
+
+#define	GPIOSEL_GPIO			0
+#define	GPIOSEL_ENBT			BIT(5)
+
+#define	GPIO_IN				REG_GPIO_PIN_CTRL
+#define	GPIO_OUT			(REG_GPIO_PIN_CTRL+1)
+#define	GPIO_IO_SEL			(REG_GPIO_PIN_CTRL+2)
+#define	GPIO_MOD			(REG_GPIO_PIN_CTRL+3)
+
+/*      8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
+#define	HSIMR_GPIO12_0_INT_EN			BIT(0)
+#define	HSIMR_SPS_OCP_INT_EN			BIT(5)
+#define	HSIMR_RON_INT_EN			BIT(6)
+#define	HSIMR_PDN_INT_EN			BIT(7)
+#define	HSIMR_GPIO9_INT_EN			BIT(25)
+
+
+/*
+*       8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte)
+*/
+#define	HSISR_GPIO12_0_INT			BIT(0)
+#define	HSISR_SPS_OCP_INT			BIT(5)
+#define	HSISR_RON_INT_EN			BIT(6)
+#define	HSISR_PDNINT				BIT(7)
+#define	HSISR_GPIO9_INT				BIT(25)
+
+#define	MSR_NOLINK					0x00
+#define	MSR_ADHOC					0x01
+#define	MSR_INFRA					0x02
+#define	MSR_AP						0x03
+
+#define	RRSR_RSC_OFFSET				21
+#define	RRSR_SHORT_OFFSET			23
+#define	RRSR_RSC_BW_40M				0x600000
+#define	RRSR_RSC_UPSUBCHNL			0x400000
+#define	RRSR_RSC_LOWSUBCHNL			0x200000
+#define	RRSR_SHORT					0x800000
+#define	RRSR_1M						BIT(0)
+#define	RRSR_2M						BIT(1)
+#define	RRSR_5_5M					BIT(2)
+#define	RRSR_11M					BIT(3)
+#define	RRSR_6M						BIT(4)
+#define	RRSR_9M						BIT(5)
+#define	RRSR_12M					BIT(6)
+#define	RRSR_18M					BIT(7)
+#define	RRSR_24M					BIT(8)
+#define	RRSR_36M					BIT(9)
+#define	RRSR_48M					BIT(10)
+#define	RRSR_54M					BIT(11)
+#define	RRSR_MCS0					BIT(12)
+#define	RRSR_MCS1					BIT(13)
+#define	RRSR_MCS2					BIT(14)
+#define	RRSR_MCS3					BIT(15)
+#define	RRSR_MCS4					BIT(16)
+#define	RRSR_MCS5					BIT(17)
+#define	RRSR_MCS6					BIT(18)
+#define	RRSR_MCS7					BIT(19)
+#define	BRSR_ACKSHORTPMB			BIT(23)
+
+#define	RATR_1M						0x00000001
+#define	RATR_2M						0x00000002
+#define	RATR_55M					0x00000004
+#define	RATR_11M					0x00000008
+#define	RATR_6M						0x00000010
+#define	RATR_9M						0x00000020
+#define	RATR_12M					0x00000040
+#define	RATR_18M					0x00000080
+#define	RATR_24M					0x00000100
+#define	RATR_36M					0x00000200
+#define	RATR_48M					0x00000400
+#define	RATR_54M					0x00000800
+#define	RATR_MCS0					0x00001000
+#define	RATR_MCS1					0x00002000
+#define	RATR_MCS2					0x00004000
+#define	RATR_MCS3					0x00008000
+#define	RATR_MCS4					0x00010000
+#define	RATR_MCS5					0x00020000
+#define	RATR_MCS6					0x00040000
+#define	RATR_MCS7					0x00080000
+#define	RATR_MCS8					0x00100000
+#define	RATR_MCS9					0x00200000
+#define	RATR_MCS10					0x00400000
+#define	RATR_MCS11					0x00800000
+#define	RATR_MCS12					0x01000000
+#define	RATR_MCS13					0x02000000
+#define	RATR_MCS14					0x04000000
+#define	RATR_MCS15					0x08000000
+
+#define RATE_1M						BIT(0)
+#define RATE_2M						BIT(1)
+#define RATE_5_5M					BIT(2)
+#define RATE_11M					BIT(3)
+#define RATE_6M						BIT(4)
+#define RATE_9M						BIT(5)
+#define RATE_12M					BIT(6)
+#define RATE_18M					BIT(7)
+#define RATE_24M					BIT(8)
+#define RATE_36M					BIT(9)
+#define RATE_48M					BIT(10)
+#define RATE_54M					BIT(11)
+#define RATE_MCS0					BIT(12)
+#define RATE_MCS1					BIT(13)
+#define RATE_MCS2					BIT(14)
+#define RATE_MCS3					BIT(15)
+#define RATE_MCS4					BIT(16)
+#define RATE_MCS5					BIT(17)
+#define RATE_MCS6					BIT(18)
+#define RATE_MCS7					BIT(19)
+#define RATE_MCS8					BIT(20)
+#define RATE_MCS9					BIT(21)
+#define RATE_MCS10					BIT(22)
+#define RATE_MCS11					BIT(23)
+#define RATE_MCS12					BIT(24)
+#define RATE_MCS13					BIT(25)
+#define RATE_MCS14					BIT(26)
+#define RATE_MCS15					BIT(27)
+
+#define	RATE_ALL_CCK		(RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define	RATE_ALL_OFDM_AG	(RATR_6M | RATR_9M | RATR_12M | RATR_18M |\
+							RATR_24M| RATR_36M | RATR_48M | RATR_54M)
+#define	RATE_ALL_OFDM_1SS	(RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\
+							RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\
+							RATR_MCS6 | RATR_MCS7)
+#define	RATE_ALL_OFDM_2SS	(RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\
+							RATR_MCS11| RATR_MCS12 | RATR_MCS13 |\
+							RATR_MCS14 | RATR_MCS15)
+
+#define	BW_OPMODE_20MHZ				BIT(2)
+#define	BW_OPMODE_5G				BIT(1)
+#define	BW_OPMODE_11J				BIT(0)
+
+#define	CAM_VALID					BIT(15)
+#define	CAM_NOTVALID				0x0000
+#define	CAM_USEDK					BIT(5)
+
+#define	CAM_NONE					0x0
+#define	CAM_WEP40					0x01
+#define	CAM_TKIP					0x02
+#define	CAM_AES						0x04
+#define	CAM_WEP104					0x05
+
+#define	TOTAL_CAM_ENTRY				32
+#define	HALF_CAM_ENTRY				16
+
+#define	CAM_WRITE					BIT(16)
+#define	CAM_READ					0x00000000
+#define	CAM_POLLINIG				BIT(31)
+
+#define	SCR_USEDK					0x01
+#define	SCR_TXSEC_ENABLE			0x02
+#define	SCR_RXSEC_ENABLE			0x04
+
+#define	WOW_PMEN					BIT(0)
+#define	WOW_WOMEN					BIT(1)
+#define	WOW_MAGIC					BIT(2)
+#define	WOW_UWF						BIT(3)
+
+/*********************************************
+*       8188 IMR/ISR bits
+**********************************************/
+#define	IMR_DISABLED			0x0
+/* IMR DW0(0x0060-0063) Bit 0-31 */
+#define	IMR_TXCCK				BIT(30)		/* TXRPT interrupt when CCX bit of the packet is set	*/
+#define	IMR_PSTIMEOUT			BIT(29)		/* Power Save Time Out Interrupt */
+#define	IMR_GTINT4				BIT(28)		/* When GTIMER4 expires, this bit is set to 1	*/
+#define	IMR_GTINT3				BIT(27)		/* When GTIMER3 expires, this bit is set to 1	*/
+#define	IMR_TBDER				BIT(26)		/* Transmit Beacon0 Error			*/
+#define	IMR_TBDOK				BIT(25)		/* Transmit Beacon0 OK			*/
+#define	IMR_TSF_BIT32_TOGGLE		BIT(24)		/* TSF Timer BIT32 toggle indication interrupt		*/
+#define	IMR_BCNDMAINT0			BIT(20)		/* Beacon DMA Interrupt 0			*/
+#define	IMR_BCNDOK0				BIT(16)		/* Beacon Queue DMA OK0			*/
+#define	IMR_HSISR_IND_ON_INT		BIT(15)		/* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1)		*/
+#define	IMR_BCNDMAINT_E			BIT(14)		/* Beacon DMA Interrupt Extension for Win7			*/
+#define	IMR_ATIMEND				BIT(12)		/* CTWidnow End or ATIM Window End */
+#define	IMR_HISR1_IND_INT			BIT(11)		/* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1)*/
+#define	IMR_C2HCMD				BIT(10)		/* CPU to Host Command INT Status, Write 1 clear	*/
+#define	IMR_CPWM2			BIT(9)			/* CPU power Mode exchange INT Status, Write 1 clear	*/
+#define	IMR_CPWM				BIT(8)			/* CPU power Mode exchange INT Status, Write 1 clear	*/
+#define	IMR_HIGHDOK				BIT(7)			/* High Queue DMA OK	*/
+#define	IMR_MGNTDOK				BIT(6)			/* Management Queue DMA OK	*/
+#define	IMR_BKDOK				BIT(5)			/* AC_BK DMA OK		*/
+#define	IMR_BEDOK				BIT(4)		/* AC_BE DMA OK	*/
+#define	IMR_VIDOK				BIT(3)			/* AC_VI DMA OK	*/
+#define	IMR_VODOK				BIT(2)			/* AC_VO DMA OK	*/
+#define	IMR_RDU				BIT(1)			/* Rx Descriptor Unavailable	*/
+#define	IMR_ROK				BIT(0)			/* Receive DMA OK */
+
+/* IMR DW1(0x00B4-00B7) Bit 0-31 */
+#define	IMR_BCNDMAINT7			BIT(27)		/* Beacon DMA Interrupt 7 	*/
+#define	IMR_BCNDMAINT6			BIT(26)		/* Beacon DMA Interrupt 6		*/
+#define	IMR_BCNDMAINT5			BIT(25)		/* Beacon DMA Interrupt 5		*/
+#define	IMR_BCNDMAINT4			BIT(24)		/* Beacon DMA Interrupt 4		*/
+#define	IMR_BCNDMAINT3			BIT(23)		/* Beacon DMA Interrupt 3		*/
+#define	IMR_BCNDMAINT2			BIT(22)		/* Beacon DMA Interrupt 2		*/
+#define	IMR_BCNDMAINT1			BIT(21)		/* Beacon DMA Interrupt 1		*/
+#define	IMR_BCNDOK7				BIT(20)		/* Beacon Queue DMA OK Interrup 7 */
+#define	IMR_BCNDOK6				BIT(19)		/* Beacon Queue DMA OK Interrup 6 */
+#define	IMR_BCNDOK5				BIT(18)		/* Beacon Queue DMA OK Interrup 5 */
+#define	IMR_BCNDOK4				BIT(17)		/* Beacon Queue DMA OK Interrup 4 */
+#define	IMR_BCNDOK3				BIT(16)		/* Beacon Queue DMA OK Interrup 3 */
+#define	IMR_BCNDOK2				BIT(15)		/* Beacon Queue DMA OK Interrup 2 */
+#define	IMR_BCNDOK1				BIT(14)		/* Beacon Queue DMA OK Interrup 1 */
+#define	IMR_ATIMEND_E		BIT(13)		/* ATIM Window End Extension for Win7 */
+#define	IMR_TXERR				BIT(11)		/* Tx Error Flag Interrupt Status, write 1 clear. */
+#define	IMR_RXERR				BIT(10)		/* Rx Error Flag INT Status, Write 1 clear */
+#define	IMR_TXFOVW				BIT(9)			/* Transmit FIFO Overflow */
+#define	IMR_RXFOVW				BIT(8)			/* Receive FIFO Overflow */
+
+
+#define	HWSET_MAX_SIZE				512
+#define   EFUSE_MAX_SECTION			64
+#define   EFUSE_REAL_CONTENT_LEN			256
+#define 	EFUSE_OOB_PROTECT_BYTES    	18 	/* PG data exclude header, dummy 7 bytes frome CP test and reserved 1byte.*/
+
+
+#define	EEPROM_DEFAULT_TSSI					0x0
+#define EEPROM_DEFAULT_TXPOWERDIFF			0x0
+#define EEPROM_DEFAULT_CRYSTALCAP			0x5
+#define EEPROM_DEFAULT_BOARDTYPE			0x02
+#define EEPROM_DEFAULT_TXPOWER				0x1010
+#define	EEPROM_DEFAULT_HT2T_TXPWR			0x10
+
+#define	EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF	0x3
+#define	EEPROM_DEFAULT_THERMALMETER			0x18
+#define	EEPROM_DEFAULT_ANTTXPOWERDIFF		0x0
+#define	EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP	0x5
+#define	EEPROM_DEFAULT_TXPOWERLEVEL			0x22
+#define	EEPROM_DEFAULT_HT40_2SDIFF			0x0
+#define EEPROM_DEFAULT_HT20_DIFF			2
+#define	EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF	0x3
+#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET	0
+#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET	0
+
+#define RF_OPTION1							0x79
+#define RF_OPTION2							0x7A
+#define RF_OPTION3							0x7B
+#define RF_OPTION4							0xC3
+
+#define EEPROM_DEFAULT_PID					0x1234
+#define EEPROM_DEFAULT_VID					0x5678
+#define EEPROM_DEFAULT_CUSTOMERID			0xAB
+#define EEPROM_DEFAULT_SUBCUSTOMERID		0xCD
+#define EEPROM_DEFAULT_VERSION				0
+
+#define	EEPROM_CHANNEL_PLAN_FCC				0x0
+#define	EEPROM_CHANNEL_PLAN_IC				0x1
+#define	EEPROM_CHANNEL_PLAN_ETSI			0x2
+#define	EEPROM_CHANNEL_PLAN_SPAIN			0x3
+#define	EEPROM_CHANNEL_PLAN_FRANCE			0x4
+#define	EEPROM_CHANNEL_PLAN_MKK				0x5
+#define	EEPROM_CHANNEL_PLAN_MKK1			0x6
+#define	EEPROM_CHANNEL_PLAN_ISRAEL			0x7
+#define	EEPROM_CHANNEL_PLAN_TELEC			0x8
+#define	EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN	0x9
+#define	EEPROM_CHANNEL_PLAN_WORLD_WIDE_13	0xA
+#define	EEPROM_CHANNEL_PLAN_NCC				0xB
+#define	EEPROM_CHANNEL_PLAN_BY_HW_MASK		0x80
+
+#define EEPROM_CID_DEFAULT					0x0
+#define EEPROM_CID_TOSHIBA					0x4
+#define	EEPROM_CID_CCX						0x10
+#define	EEPROM_CID_QMI						0x0D
+#define EEPROM_CID_WHQL 					0xFE
+
+#define	RTL_EEPROM_ID					0x8129
+
+#define EEPROM_HPON							0x02
+#define EEPROM_CLK							0x06
+#define EEPROM_TESTR						0x08
+
+
+#define EEPROM_TXPOWERCCK			0x10
+#define	EEPROM_TXPOWERHT40_1S		0x16
+#define EEPROM_TXPOWERHT20DIFF		0x1B
+#define EEPROM_TXPOWER_OFDMDIFF		0x1B
+
+
+
+#define	EEPROM_TX_PWR_INX				0x10
+
+#define	EEPROM_CHANNELPLAN					0xB8
+#define	EEPROM_XTAL_8821AE					0xB9
+#define	EEPROM_THERMAL_METER				0xBA
+#define	EEPROM_IQK_LCK_88E					0xBB
+
+#define	EEPROM_RF_BOARD_OPTION			0xC1
+#define	EEPROM_RF_FEATURE_OPTION_88E		0xC2
+#define	EEPROM_RF_BT_SETTING				0xC3
+#define	EEPROM_VERSION					0xC4
+#define	EEPROM_CUSTOMER_ID					0xC5
+#define	EEPROM_RF_ANTENNA_OPT_88E			0xC9
+
+#define	EEPROM_MAC_ADDR					0xD0
+#define EEPROM_VID							0xD6
+#define EEPROM_DID							0xD8
+#define EEPROM_SVID							0xDA
+#define EEPROM_SMID						0xDC
+
+#define	STOPBECON					BIT(6)
+#define	STOPHIGHT					BIT(5)
+#define	STOPMGT						BIT(4)
+#define	STOPVO						BIT(3)
+#define	STOPVI						BIT(2)
+#define	STOPBE						BIT(1)
+#define	STOPBK						BIT(0)
+
+#define	RCR_APPFCS					BIT(31)
+#define	RCR_APP_MIC					BIT(30)
+#define	RCR_APP_ICV					BIT(29)
+#define	RCR_APP_PHYST_RXFF			BIT(28)
+#define	RCR_APP_BA_SSN				BIT(27)
+#define	RCR_NONQOS_VHT				BIT(26)
+#define	RCR_ENMBID					BIT(24)
+#define	RCR_LSIGEN					BIT(23)
+#define	RCR_MFBEN					BIT(22)
+#define	RCR_HTC_LOC_CTRL			BIT(14)
+#define	RCR_AMF						BIT(13)
+#define	RCR_ACF						BIT(12)
+#define	RCR_ADF						BIT(11)
+#define	RCR_AICV					BIT(9)
+#define	RCR_ACRC32					BIT(8)
+#define	RCR_CBSSID_BCN				BIT(7)
+#define	RCR_CBSSID_DATA				BIT(6)
+#define	RCR_CBSSID					RCR_CBSSID_DATA
+#define	RCR_APWRMGT					BIT(5)
+#define	RCR_ADD3					BIT(4)
+#define	RCR_AB						BIT(3)
+#define	RCR_AM						BIT(2)
+#define	RCR_APM						BIT(1)
+#define	RCR_AAP						BIT(0)
+#define	RCR_MXDMA_OFFSET			8
+#define	RCR_FIFO_OFFSET				13
+
+#define RSV_CTRL					0x001C
+#define RD_CTRL						0x0524
+
+#define REG_USB_INFO				0xFE17
+#define REG_USB_SPECIAL_OPTION		0xFE55
+#define REG_USB_DMA_AGG_TO			0xFE5B
+#define REG_USB_AGG_TO				0xFE5C
+#define REG_USB_AGG_TH				0xFE5D
+
+#define REG_USB_VID					0xFE60
+#define REG_USB_PID					0xFE62
+#define REG_USB_OPTIONAL			0xFE64
+#define REG_USB_CHIRP_K				0xFE65
+#define REG_USB_PHY					0xFE66
+#define REG_USB_MAC_ADDR			0xFE70
+#define REG_USB_HRPWM				0xFE58
+#define REG_USB_HCPWM				0xFE57
+
+#define SW18_FPWM					BIT(3)
+
+#define ISO_MD2PP					BIT(0)
+#define ISO_UA2USB					BIT(1)
+#define ISO_UD2CORE					BIT(2)
+#define ISO_PA2PCIE					BIT(3)
+#define ISO_PD2CORE					BIT(4)
+#define ISO_IP2MAC					BIT(5)
+#define ISO_DIOP					BIT(6)
+#define ISO_DIOE					BIT(7)
+#define ISO_EB2CORE					BIT(8)
+#define ISO_DIOR					BIT(9)
+
+#define PWC_EV25V					BIT(14)
+#define PWC_EV12V					BIT(15)
+
+#define FEN_BBRSTB					BIT(0)
+#define FEN_BB_GLB_RSTN				BIT(1)
+#define FEN_USBA					BIT(2)
+#define FEN_UPLL					BIT(3)
+#define FEN_USBD					BIT(4)
+#define FEN_DIO_PCIE				BIT(5)
+#define FEN_PCIEA					BIT(6)
+#define FEN_PPLL					BIT(7)
+#define FEN_PCIED					BIT(8)
+#define FEN_DIOE					BIT(9)
+#define FEN_CPUEN					BIT(10)
+#define FEN_DCORE					BIT(11)
+#define FEN_ELDR					BIT(12)
+#define FEN_DIO_RF					BIT(13)
+#define FEN_HWPDN					BIT(14)
+#define FEN_MREGEN					BIT(15)
+
+#define PFM_LDALL					BIT(0)
+#define PFM_ALDN					BIT(1)
+#define PFM_LDKP					BIT(2)
+#define PFM_WOWL					BIT(3)
+#define EnPDN						BIT(4)
+#define PDN_PL						BIT(5)
+#define APFM_ONMAC					BIT(8)
+#define APFM_OFF					BIT(9)
+#define APFM_RSM					BIT(10)
+#define AFSM_HSUS					BIT(11)
+#define AFSM_PCIE					BIT(12)
+#define APDM_MAC					BIT(13)
+#define APDM_HOST					BIT(14)
+#define APDM_HPDN					BIT(15)
+#define RDY_MACON					BIT(16)
+#define SUS_HOST					BIT(17)
+#define ROP_ALD						BIT(20)
+#define ROP_PWR						BIT(21)
+#define ROP_SPS						BIT(22)
+#define SOP_MRST					BIT(25)
+#define SOP_FUSE					BIT(26)
+#define SOP_ABG						BIT(27)
+#define SOP_AMB						BIT(28)
+#define SOP_RCK						BIT(29)
+#define SOP_A8M						BIT(30)
+#define XOP_BTCK					BIT(31)
+
+#define ANAD16V_EN					BIT(0)
+#define ANA8M						BIT(1)
+#define MACSLP						BIT(4)
+#define LOADER_CLK_EN				BIT(5)
+#define _80M_SSC_DIS				BIT(7)
+#define _80M_SSC_EN_HO				BIT(8)
+#define PHY_SSC_RSTB				BIT(9)
+#define SEC_CLK_EN					BIT(10)
+#define MAC_CLK_EN					BIT(11)
+#define SYS_CLK_EN					BIT(12)
+#define RING_CLK_EN					BIT(13)
+
+#define	BOOT_FROM_EEPROM			BIT(4)
+#define	EEPROM_EN					BIT(5)
+
+#define AFE_BGEN					BIT(0)
+#define AFE_MBEN					BIT(1)
+#define MAC_ID_EN					BIT(7)
+
+#define WLOCK_ALL					BIT(0)
+#define WLOCK_00					BIT(1)
+#define WLOCK_04					BIT(2)
+#define WLOCK_08					BIT(3)
+#define WLOCK_40					BIT(4)
+#define R_DIS_PRST_0				BIT(5)
+#define R_DIS_PRST_1				BIT(6)
+#define LOCK_ALL_EN					BIT(7)
+
+#define RF_EN						BIT(0)
+#define RF_RSTB						BIT(1)
+#define RF_SDMRSTB					BIT(2)
+
+#define LDA15_EN					BIT(0)
+#define LDA15_STBY					BIT(1)
+#define LDA15_OBUF					BIT(2)
+#define LDA15_REG_VOS				BIT(3)
+#define _LDA15_VOADJ(x)				(((x) & 0x7) << 4)
+
+#define LDV12_EN					BIT(0)
+#define LDV12_SDBY					BIT(1)
+#define LPLDO_HSM					BIT(2)
+#define LPLDO_LSM_DIS				BIT(3)
+#define _LDV12_VADJ(x)				(((x) & 0xF) << 4)
+
+#define XTAL_EN						BIT(0)
+#define XTAL_BSEL					BIT(1)
+#define _XTAL_BOSC(x)				(((x) & 0x3) << 2)
+#define _XTAL_CADJ(x)				(((x) & 0xF) << 4)
+#define XTAL_GATE_USB				BIT(8)
+#define _XTAL_USB_DRV(x)			(((x) & 0x3) << 9)
+#define XTAL_GATE_AFE				BIT(11)
+#define _XTAL_AFE_DRV(x)			(((x) & 0x3) << 12)
+#define XTAL_RF_GATE				BIT(14)
+#define _XTAL_RF_DRV(x)				(((x) & 0x3) << 15)
+#define XTAL_GATE_DIG				BIT(17)
+#define _XTAL_DIG_DRV(x)			(((x) & 0x3) << 18)
+#define XTAL_BT_GATE				BIT(20)
+#define _XTAL_BT_DRV(x)				(((x) & 0x3) << 21)
+#define _XTAL_GPIO(x)				(((x) & 0x7) << 23)
+
+#define CKDLY_AFE					BIT(26)
+#define CKDLY_USB					BIT(27)
+#define CKDLY_DIG					BIT(28)
+#define CKDLY_BT					BIT(29)
+
+#define APLL_EN						BIT(0)
+#define APLL_320_EN					BIT(1)
+#define APLL_FREF_SEL				BIT(2)
+#define APLL_EDGE_SEL				BIT(3)
+#define APLL_WDOGB					BIT(4)
+#define APLL_LPFEN					BIT(5)
+
+#define APLL_REF_CLK_13MHZ			0x1
+#define APLL_REF_CLK_19_2MHZ		0x2
+#define APLL_REF_CLK_20MHZ			0x3
+#define APLL_REF_CLK_25MHZ			0x4
+#define APLL_REF_CLK_26MHZ			0x5
+#define APLL_REF_CLK_38_4MHZ		0x6
+#define APLL_REF_CLK_40MHZ			0x7
+
+#define APLL_320EN					BIT(14)
+#define APLL_80EN					BIT(15)
+#define APLL_1MEN					BIT(24)
+
+#define ALD_EN						BIT(18)
+#define EF_PD						BIT(19)
+#define EF_FLAG						BIT(31)
+
+#define EF_TRPT						BIT(7)
+#define LDOE25_EN					BIT(31)
+
+#define RSM_EN						BIT(0)
+#define Timer_EN					BIT(4)
+
+#define TRSW0EN						BIT(2)
+#define TRSW1EN						BIT(3)
+#define EROM_EN						BIT(4)
+#define EnBT						BIT(5)
+#define EnUart						BIT(8)
+#define Uart_910					BIT(9)
+#define EnPMAC						BIT(10)
+#define SIC_SWRST					BIT(11)
+#define EnSIC						BIT(12)
+#define SIC_23						BIT(13)
+#define EnHDP						BIT(14)
+#define SIC_LBK						BIT(15)
+
+#define LED0PL 						BIT(4)
+#define LED1PL 						BIT(12)
+#define LED0DIS						BIT(7)
+
+#define MCUFWDL_EN					BIT(0)
+#define MCUFWDL_RDY					BIT(1)
+#define FWDL_CHKSUM_RPT				BIT(2)
+#define MACINI_RDY					BIT(3)
+#define BBINI_RDY					BIT(4)
+#define RFINI_RDY					BIT(5)
+#define WINTINI_RDY					BIT(6)
+#define CPRST						BIT(23)
+
+#define XCLK_VLD					BIT(0)
+#define ACLK_VLD					BIT(1)
+#define UCLK_VLD					BIT(2)
+#define PCLK_VLD					BIT(3)
+#define PCIRSTB						BIT(4)
+#define V15_VLD						BIT(5)
+#define TRP_B15V_EN					BIT(7)
+#define SIC_IDLE					BIT(8)
+#define BD_MAC2						BIT(9)
+#define BD_MAC1						BIT(10)
+#define IC_MACPHY_MODE				BIT(11)
+#define VENDOR_ID					BIT(19)
+#define PAD_HWPD_IDN				BIT(22)
+#define TRP_VAUX_EN					BIT(23)
+#define TRP_BT_EN					BIT(24)
+#define BD_PKG_SEL					BIT(25)
+#define BD_HCI_SEL					BIT(26)
+#define TYPE_ID						BIT(27)
+
+#define CHIP_VER_RTL_MASK			0xF000
+#define CHIP_VER_RTL_SHIFT			12
+
+#define REG_LBMODE					(REG_CR + 3)
+
+#define HCI_TXDMA_EN				BIT(0)
+#define HCI_RXDMA_EN				BIT(1)
+#define TXDMA_EN					BIT(2)
+#define RXDMA_EN					BIT(3)
+#define PROTOCOL_EN					BIT(4)
+#define SCHEDULE_EN					BIT(5)
+#define MACTXEN						BIT(6)
+#define MACRXEN						BIT(7)
+#define ENSWBCN						BIT(8)
+#define ENSEC						BIT(9)
+
+#define _NETTYPE(x)					(((x) & 0x3) << 16)
+#define MASK_NETTYPE				0x30000
+#define NT_NO_LINK					0x0
+#define NT_LINK_AD_HOC				0x1
+#define NT_LINK_AP					0x2
+#define NT_AS_AP					0x3
+
+#define _LBMODE(x)					(((x) & 0xF) << 24)
+#define MASK_LBMODE					0xF000000
+#define LOOPBACK_NORMAL				0x0
+#define LOOPBACK_IMMEDIATELY		0xB
+#define LOOPBACK_MAC_DELAY			0x3
+#define LOOPBACK_PHY				0x1
+#define LOOPBACK_DMA				0x7
+
+#define GET_RX_PAGE_SIZE(value)		((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value)		(((value) & 0xF0) >> 4)
+#define _PSRX_MASK					0xF
+#define _PSTX_MASK					0xF0
+#define _PSRX(x)					(x)
+#define _PSTX(x)					((x) << 4)
+
+#define PBP_64						0x0
+#define PBP_128						0x1
+#define PBP_256						0x2
+#define PBP_512						0x3
+#define PBP_1024					0x4
+
+#define RXDMA_ARBBW_EN				BIT(0)
+#define RXSHFT_EN					BIT(1)
+#define RXDMA_AGG_EN				BIT(2)
+#define QS_VO_QUEUE					BIT(8)
+#define QS_VI_QUEUE					BIT(9)
+#define QS_BE_QUEUE					BIT(10)
+#define QS_BK_QUEUE					BIT(11)
+#define QS_MANAGER_QUEUE			BIT(12)
+#define QS_HIGH_QUEUE				BIT(13)
+
+#define HQSEL_VOQ					BIT(0)
+#define HQSEL_VIQ					BIT(1)
+#define HQSEL_BEQ					BIT(2)
+#define HQSEL_BKQ					BIT(3)
+#define HQSEL_MGTQ					BIT(4)
+#define HQSEL_HIQ					BIT(5)
+
+#define _TXDMA_HIQ_MAP(x) 	 		(((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) 	 		(((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) 	 		(((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) 	 		(((x)&0x3) << 8 )
+#define _TXDMA_VIQ_MAP(x) 	 		(((x)&0x3) << 6 )
+#define _TXDMA_VOQ_MAP(x) 	 		(((x)&0x3) << 4 )
+
+#define QUEUE_LOW					1
+#define QUEUE_NORMAL				2
+#define QUEUE_HIGH					3
+
+#define _LLT_NO_ACTIVE				0x0
+#define _LLT_WRITE_ACCESS			0x1
+#define _LLT_READ_ACCESS			0x2
+
+#define _LLT_INIT_DATA(x)			((x) & 0xFF)
+#define _LLT_INIT_ADDR(x)			(((x) & 0xFF) << 8)
+#define _LLT_OP(x)					(((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x)			(((x) >> 30) & 0x3)
+
+#define BB_WRITE_READ_MASK			(BIT(31) | BIT(30))
+#define BB_WRITE_EN					BIT(30)
+#define BB_READ_EN					BIT(31)
+
+#define _HPQ(x)			((x) & 0xFF)
+#define _LPQ(x)			(((x) & 0xFF) << 8)
+#define _PUBQ(x)		(((x) & 0xFF) << 16)
+#define _NPQ(x)			((x) & 0xFF)
+
+#define HPQ_PUBLIC_DIS		BIT(24)
+#define LPQ_PUBLIC_DIS		BIT(25)
+#define LD_RQPN			BIT(31)
+
+#define BCN_VALID		BIT(16)
+#define BCN_HEAD(x)		(((x) & 0xFF) << 8)
+#define	BCN_HEAD_MASK		0xFF00
+
+#define BLK_DESC_NUM_SHIFT			4
+#define BLK_DESC_NUM_MASK			0xF
+
+#define DROP_DATA_EN				BIT(9)
+
+#define EN_AMPDU_RTY_NEW			BIT(7)
+
+#define _INIRTSMCS_SEL(x)			((x) & 0x3F)
+
+#define _SPEC_SIFS_CCK(x)			((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x)			(((x) & 0xFF) << 8)
+
+#define RATE_REG_BITMAP_ALL			0xFFFFF
+
+#define _RRSC_BITMAP(x)				((x) & 0xFFFFF)
+
+#define _RRSR_RSC(x)				(((x) & 0x3) << 21)
+#define RRSR_RSC_RESERVED			0x0
+#define RRSR_RSC_UPPER_SUBCHANNEL	0x1
+#define RRSR_RSC_LOWER_SUBCHANNEL	0x2
+#define RRSR_RSC_DUPLICATE_MODE		0x3
+
+#define USE_SHORT_G1				BIT(20)
+
+#define _AGGLMT_MCS0(x)				((x) & 0xF)
+#define _AGGLMT_MCS1(x)				(((x) & 0xF) << 4)
+#define _AGGLMT_MCS2(x)				(((x) & 0xF) << 8)
+#define _AGGLMT_MCS3(x)				(((x) & 0xF) << 12)
+#define _AGGLMT_MCS4(x)				(((x) & 0xF) << 16)
+#define _AGGLMT_MCS5(x)				(((x) & 0xF) << 20)
+#define _AGGLMT_MCS6(x)				(((x) & 0xF) << 24)
+#define _AGGLMT_MCS7(x)				(((x) & 0xF) << 28)
+
+#define	RETRY_LIMIT_SHORT_SHIFT		8
+#define	RETRY_LIMIT_LONG_SHIFT		0
+
+#define _DARF_RC1(x)				((x) & 0x1F)
+#define _DARF_RC2(x)				(((x) & 0x1F) << 8)
+#define _DARF_RC3(x)				(((x) & 0x1F) << 16)
+#define _DARF_RC4(x)				(((x) & 0x1F) << 24)
+#define _DARF_RC5(x)				((x) & 0x1F)
+#define _DARF_RC6(x)				(((x) & 0x1F) << 8)
+#define _DARF_RC7(x)				(((x) & 0x1F) << 16)
+#define _DARF_RC8(x)				(((x) & 0x1F) << 24)
+
+#define _RARF_RC1(x)				((x) & 0x1F)
+#define _RARF_RC2(x)				(((x) & 0x1F) << 8)
+#define _RARF_RC3(x)				(((x) & 0x1F) << 16)
+#define _RARF_RC4(x)				(((x) & 0x1F) << 24)
+#define _RARF_RC5(x)				((x) & 0x1F)
+#define _RARF_RC6(x)				(((x) & 0x1F) << 8)
+#define _RARF_RC7(x)				(((x) & 0x1F) << 16)
+#define _RARF_RC8(x)				(((x) & 0x1F) << 24)
+
+#define AC_PARAM_TXOP_LIMIT_OFFSET	16
+#define AC_PARAM_ECW_MAX_OFFSET		12
+#define AC_PARAM_ECW_MIN_OFFSET		8
+#define AC_PARAM_AIFS_OFFSET		0
+
+#define _AIFS(x)					(x)
+#define _ECW_MAX_MIN(x)				((x) << 8)
+#define _TXOP_LIMIT(x)				((x) << 16)
+
+#define _BCNIFS(x)					((x) & 0xFF)
+#define _BCNECW(x)					((((x) & 0xF))<< 8)
+
+#define _LRL(x)						((x) & 0x3F)
+#define _SRL(x)						(((x) & 0x3F) << 8)
+
+#define _SIFS_CCK_CTX(x)			((x) & 0xFF)
+#define _SIFS_CCK_TRX(x)			(((x) & 0xFF) << 8);
+
+#define _SIFS_OFDM_CTX(x)			((x) & 0xFF)
+#define _SIFS_OFDM_TRX(x)			(((x) & 0xFF) << 8);
+
+#define _TBTT_PROHIBIT_HOLD(x)		(((x) & 0xFF) << 8)
+
+#define DIS_EDCA_CNT_DWN			BIT(11)
+
+#define EN_MBSSID					BIT(1)
+#define EN_TXBCN_RPT				BIT(2)
+#define	EN_BCN_FUNCTION				BIT(3)
+
+#define TSFTR_RST					BIT(0)
+#define TSFTR1_RST					BIT(1)
+
+#define STOP_BCNQ					BIT(6)
+
+#define	DIS_TSF_UDT0_NORMAL_CHIP	BIT(4)
+#define	DIS_TSF_UDT0_TEST_CHIP		BIT(5)
+
+#define	AcmHw_HwEn					BIT(0)
+#define	AcmHw_BeqEn					BIT(1)
+#define	AcmHw_ViqEn					BIT(2)
+#define	AcmHw_VoqEn					BIT(3)
+#define	AcmHw_BeqStatus				BIT(4)
+#define	AcmHw_ViqStatus				BIT(5)
+#define	AcmHw_VoqStatus				BIT(6)
+
+#define APSDOFF						BIT(6)
+#define APSDOFF_STATUS				BIT(7)
+
+#define BW_20MHZ					BIT(2)
+
+#define RATE_BITMAP_ALL				0xFFFFF
+
+#define RATE_RRSR_CCK_ONLY_1M		0xFFFF1
+
+#define TSFRST						BIT(0)
+#define DIS_GCLK					BIT(1)
+#define PAD_SEL						BIT(2)
+#define PWR_ST						BIT(6)
+#define PWRBIT_OW_EN				BIT(7)
+#define ACRC						BIT(8)
+#define CFENDFORM					BIT(9)
+#define ICV							BIT(10)
+
+#define AAP							BIT(0)
+#define APM							BIT(1)
+#define AM							BIT(2)
+#define AB							BIT(3)
+#define ADD3						BIT(4)
+#define APWRMGT						BIT(5)
+#define CBSSID						BIT(6)
+#define CBSSID_DATA					BIT(6)
+#define CBSSID_BCN					BIT(7)
+#define ACRC32						BIT(8)
+#define AICV						BIT(9)
+#define ADF							BIT(11)
+#define ACF							BIT(12)
+#define AMF							BIT(13)
+#define HTC_LOC_CTRL				BIT(14)
+#define UC_DATA_EN					BIT(16)
+#define BM_DATA_EN					BIT(17)
+#define MFBEN						BIT(22)
+#define LSIGEN						BIT(23)
+#define EnMBID						BIT(24)
+#define APP_BASSN					BIT(27)
+#define APP_PHYSTS					BIT(28)
+#define APP_ICV						BIT(29)
+#define APP_MIC						BIT(30)
+#define APP_FCS						BIT(31)
+
+#define _MIN_SPACE(x)				((x) & 0x7)
+#define _SHORT_GI_PADDING(x)		(((x) & 0x1F) << 3)
+
+#define RXERR_TYPE_OFDM_PPDU		0
+#define RXERR_TYPE_OFDM_FALSE_ALARM	1
+#define	RXERR_TYPE_OFDM_MPDU_OK		2
+#define RXERR_TYPE_OFDM_MPDU_FAIL	3
+#define RXERR_TYPE_CCK_PPDU			4
+#define RXERR_TYPE_CCK_FALSE_ALARM	5
+#define RXERR_TYPE_CCK_MPDU_OK		6
+#define RXERR_TYPE_CCK_MPDU_FAIL	7
+#define RXERR_TYPE_HT_PPDU			8
+#define RXERR_TYPE_HT_FALSE_ALARM	9
+#define RXERR_TYPE_HT_MPDU_TOTAL	10
+#define RXERR_TYPE_HT_MPDU_OK		11
+#define RXERR_TYPE_HT_MPDU_FAIL		12
+#define RXERR_TYPE_RX_FULL_DROP		15
+
+#define RXERR_COUNTER_MASK			0xFFFFF
+#define RXERR_RPT_RST				BIT(27)
+#define _RXERR_RPT_SEL(type)		((type) << 28)
+
+#define	SCR_TxUseDK					BIT(0)
+#define	SCR_RxUseDK					BIT(1)
+#define	SCR_TxEncEnable				BIT(2)
+#define	SCR_RxDecEnable				BIT(3)
+#define	SCR_SKByA2					BIT(4)
+#define	SCR_NoSKMC					BIT(5)
+#define SCR_TXBCUSEDK				BIT(6)
+#define SCR_RXBCUSEDK				BIT(7)
+
+#define XCLK_VLD					BIT(0)
+#define ACLK_VLD					BIT(1)
+#define UCLK_VLD					BIT(2)
+#define PCLK_VLD					BIT(3)
+#define PCIRSTB						BIT(4)
+#define V15_VLD						BIT(5)
+#define TRP_B15V_EN					BIT(7)
+#define SIC_IDLE					BIT(8)
+#define BD_MAC2						BIT(9)
+#define BD_MAC1						BIT(10)
+#define IC_MACPHY_MODE				BIT(11)
+#define BT_FUNC						BIT(16)
+#define VENDOR_ID					BIT(19)
+#define PAD_HWPD_IDN				BIT(22)
+#define TRP_VAUX_EN					BIT(23)
+#define TRP_BT_EN					BIT(24)
+#define BD_PKG_SEL					BIT(25)
+#define BD_HCI_SEL					BIT(26)
+#define TYPE_ID						BIT(27)
+
+#define USB_IS_HIGH_SPEED			0
+#define USB_IS_FULL_SPEED			1
+#define USB_SPEED_MASK				BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK		0xF
+#define USB_NORMAL_SIE_EP_SHIFT		4
+
+#define USB_TEST_EP_MASK			0x30
+#define USB_TEST_EP_SHIFT			4
+
+#define USB_AGG_EN					BIT(3)
+
+#define MAC_ADDR_LEN				6
+#define LAST_ENTRY_OF_TX_PKT_BUFFER	175/*255    88e*/
+
+#define POLLING_LLT_THRESHOLD		20
+#define POLLING_READY_TIMEOUT_COUNT		3000
+
+#define	MAX_MSS_DENSITY_2T			0x13
+#define	MAX_MSS_DENSITY_1T			0x0A
+
+#define EPROM_CMD_OPERATING_MODE_MASK 	((1<<7)|(1<<6))
+#define EPROM_CMD_CONFIG 			0x3
+#define EPROM_CMD_LOAD 				1
+
+#define	HWSET_MAX_SIZE_92S			HWSET_MAX_SIZE
+
+#define	HAL_8192C_HW_GPIO_WPS_BIT	BIT(2)
+
+#define RA_LSSIWRITE_8821A			0xc90
+#define RB_LSSIWRITE_8821A			0xe90
+
+#define	RA_PIREAD_8821A		0xd04
+#define	RB_PIREAD_8821A		0xd44
+#define	RA_SIREAD_8821A		0xd08
+#define	RB_SIREAD_8821A		0xd48
+
+#define	RPMAC_RESET					0x100
+#define	RPMAC_TXSTART				0x104
+#define	RPMAC_TXLEGACYSIG			0x108
+#define	RPMAC_TXHTSIG1				0x10c
+#define	RPMAC_TXHTSIG2				0x110
+#define	RPMAC_PHYDEBUG				0x114
+#define	RPMAC_TXPACKETNUM			0x118
+#define	RPMAC_TXIDLE				0x11c
+#define	RPMAC_TXMACHEADER0			0x120
+#define	RPMAC_TXMACHEADER1			0x124
+#define	RPMAC_TXMACHEADER2			0x128
+#define	RPMAC_TXMACHEADER3			0x12c
+#define	RPMAC_TXMACHEADER4			0x130
+#define	RPMAC_TXMACHEADER5			0x134
+#define	RPMAC_TXDADATYPE			0x138
+#define	RPMAC_TXRANDOMSEED			0x13c
+#define	RPMAC_CCKPLCPPREAMBLE		0x140
+#define	RPMAC_CCKPLCPHEADER			0x144
+#define	RPMAC_CCKCRC16				0x148
+#define	RPMAC_OFDMRXCRC32OK			0x170
+#define	RPMAC_OFDMRXCRC32Er			0x174
+#define	RPMAC_OFDMRXPARITYER		0x178
+#define	RPMAC_OFDMRXCRC8ER			0x17c
+#define	RPMAC_CCKCRXRC16ER			0x180
+#define	RPMAC_CCKCRXRC32ER			0x184
+#define	RPMAC_CCKCRXRC32OK			0x188
+#define	RPMAC_TXSTATUS				0x18c
+
+#define	RFPGA0_RFMOD				0x800
+
+#define	RFPGA0_TXINFO				0x804
+#define	RFPGA0_PSDFUNCTION			0x808
+
+#define	RFPGA0_TXGAINSTAGE			0x80c
+
+#define	RFPGA0_RFTIMING1			0x810
+#define	RFPGA0_RFTIMING2			0x814
+
+#define	RFPGA0_XA_HSSIPARAMETER1	0x820
+#define	RFPGA0_XA_HSSIPARAMETER2	0x824
+#define	RFPGA0_XB_HSSIPARAMETER1	0x828
+#define	RFPGA0_XB_HSSIPARAMETER2	0x82c
+#define 	RCCAONSEC					0x838
+
+#define	RFPGA0_XA_LSSIPARAMETER		0x840
+#define	RFPGA0_XB_LSSIPARAMETER		0x844
+#define	RL1PEAKTH					0x848
+
+#define	RFPGA0_RFWAKEUPPARAMETER	0x850
+#define	RFPGA0_RFSLEEPUPPARAMETER	0x854
+
+#define	RFPGA0_XAB_SWITCHCONTROL	0x858
+#define	RFPGA0_XCD_SWITCHCONTROL	0x85c
+
+#define	RFPGA0_XA_RFINTERFACEOE		0x860
+#define RFC_AREA					0x860
+#define	RFPGA0_XB_RFINTERFACEOE		0x864
+
+#define	RFPGA0_XAB_RFINTERFACESW	0x870
+#define	RFPGA0_XCD_RFINTERFACESW	0x874
+
+#define	rFPGA0_XAB_RFPARAMETER		0x878
+#define	rFPGA0_XCD_RFPARAMETER		0x87c
+
+#define	RFPGA0_ANALOGPARAMETER1		0x880
+#define	RFPGA0_ANALOGPARAMETER2		0x884
+#define	RFPGA0_ANALOGPARAMETER3		0x888
+#define	RFPGA0_ANALOGPARAMETER4		0x88c
+
+#define	RFPGA0_XA_LSSIREADBACK		0x8a0
+#define	RFPGA0_XB_LSSIREADBACK		0x8a4
+#define	RFPGA0_XC_LSSIREADBACK		0x8a8
+//#define	RFPGA0_XD_LSSIREADBACK		0x8ac
+#define RRFMOD						0x8ac
+#define	RHSSIREAD_8821AE			0x8b0
+
+#define	RFPGA0_PSDREPORT			0x8b4
+#define	TRANSCEIVEA_HSPI_READBACK	0x8b8
+#define	TRANSCEIVEB_HSPI_READBACK	0x8bc
+//#define	REG_SC_CNT					0x8c4
+#define RADC_BUF_CLK				0x8c4
+#define	RFPGA0_XAB_RFINTERFACERB	0x8e0
+#define	RFPGA0_XCD_RFINTERFACERB	0x8e4
+
+#define	RFPGA1_RFMOD				0x900
+
+#define	RFPGA1_TXBLOCK				0x904
+#define	RFPGA1_DEBUGSELECT			0x908
+#define	RFPGA1_TXINFO				0x90c
+
+#define	RCCK_SYSTEM					0xa00
+#define	BCCK_SYSTEM					0x10
+
+
+#define	RCCK0_AFESETTING			0xa04
+#define	RCCK0_CCA					0xa08
+
+#define	RCCK0_RXAGC1				0xa0c
+#define	RCCK0_RXAGC2				0xa10
+
+#define	RCCK0_RXHP					0xa14
+
+#define	RCCK0_DSPPARAMETER1			0xa18
+#define	RCCK0_DSPPARAMETER2			0xa1c
+
+#define	RCCK0_TXFILTER1				0xa20
+#define	RCCK0_TXFILTER2				0xa24
+#define	RCCK0_DEBUGPORT				0xa28
+#define	RCCK0_FALSEALARMREPORT		0xa2c
+#define	RCCK0_TRSSIREPORT         	0xa50
+#define	RCCK0_RXREPORT            	0xa54
+#define	RCCK0_FACOUNTERLOWER      	0xa5c
+#define	RCCK0_FACOUNTERUPPER      	0xa58
+#define	RCCK0_CCA_CNT			0xa60
+
+
+/* PageB(0xB00) */
+#define	rPdp_AntA      					0xb00
+#define	rPdp_AntA_4    				0xb04
+#define	rPdp_AntA_8    				0xb08
+#define	rPdp_AntA_C    				0xb0c
+#define	rPdp_AntA_10    				0xb10
+#define	rPdp_AntA_14    				0xb14
+#define	rPdp_AntA_18    				0xb18
+#define	rPdp_AntA_1C    				0xb1c
+#define	rPdp_AntA_20    				0xb20
+#define	rPdp_AntA_24    				0xb24
+
+#define	rConfig_Pmpd_AntA 			0xb28
+#define	rConfig_ram64x16				0xb2c
+
+#define	rBndA						0xb30
+#define	rHssiPar						0xb34
+
+#define	rConfig_AntA 					0xb68
+#define	rConfig_AntB 					0xb6c
+
+#define	rPdp_AntB 					0xb70
+#define	rPdp_AntB_4 					0xb74
+#define	rPdp_AntB_8 					0xb78
+#define	rPdp_AntB_C 					0xb7c
+#define	rPdp_AntB_10 					0xb80
+#define	rPdp_AntB_14 					0xb84
+#define	rPdp_AntB_18 					0xb88
+#define	rPdp_AntB_1C 					0xb8c
+#define	rPdp_AntB_20 					0xb90
+#define	rPdp_AntB_24 					0xb94
+
+#define	rConfig_Pmpd_AntB			0xb98
+
+#define	rBndB						0xba0
+
+#define	rAPK							0xbd8
+#define	rPm_Rx0_AntA				0xbdc
+#define	rPm_Rx1_AntA				0xbe0
+#define	rPm_Rx2_AntA				0xbe4
+#define	rPm_Rx3_AntA				0xbe8
+#define	rPm_Rx0_AntB				0xbec
+#define	rPm_Rx1_AntB				0xbf0
+#define	rPm_Rx2_AntB				0xbf4
+#define	rPm_Rx3_AntB				0xbf8
+
+/*RSSI Dump*/
+#define		RA_RSSI_DUMP		0xBF0
+#define		RB_RSSI_DUMP		0xBF1
+#define		RS1_RX_EVM_DUMP	0xBF4
+#define		RS2_RX_EVM_DUMP	0xBF5
+#define		RA_RX_SNR_DUMP		0xBF6
+#define		RB_RX_SNR_DUMP		0xBF7
+#define		RA_CFO_SHORT_DUMP	0xBF8
+#define		RB_CFO_SHORT_DUMP	0xBFA
+#define		RA_CFO_LONG_DUMP	0xBEC
+#define		RB_CFO_LONG_DUMP	0xBEE
+
+/*Page C*/
+#define	ROFDM0_LSTF					0xc00
+
+#define	ROFDM0_TRXPATHENABLE		0xc04
+#define	ROFDM0_TRMUXPAR				0xc08
+#define	ROFDM0_TRSWISOLATION		0xc0c
+
+#define	ROFDM0_XARXAFE				0xc10
+#define	ROFDM0_XARXIQIMBALANCE    	0xc14
+#define	ROFDM0_XBRXAFE            	0xc18
+#define	ROFDM0_XBRXIQIMBALANCE    	0xc1c
+#define	ROFDM0_XCRXAFE            	0xc20
+#define	ROFDM0_XCRXIQIMBANLANCE    	0xc24
+#define	ROFDM0_XDRXAFE            	0xc28
+#define	ROFDM0_XDRXIQIMBALANCE    	0xc2c
+
+#define	ROFDM0_RXDETECTOR1			0xc30
+#define	ROFDM0_RXDETECTOR2			0xc34
+#define	ROFDM0_RXDETECTOR3			0xc38
+#define	ROFDM0_RXDETECTOR4			0xc3c
+
+#define	ROFDM0_RXDSP				0xc40
+#define	ROFDM0_CFOANDDAGC			0xc44
+#define	ROFDM0_CCADROPTHRESHOLD		0xc48
+#define	ROFDM0_ECCATHRESHOLD		0xc4c
+
+#define	ROFDM0_XAAGCCORE1			0xc50
+#define	ROFDM0_XAAGCCORE2			0xc54
+#define	ROFDM0_XBAGCCORE1			0xc58
+#define	ROFDM0_XBAGCCORE2			0xc5c
+#define	ROFDM0_XCAGCCORE1			0xc60
+#define	ROFDM0_XCAGCCORE2			0xc64
+#define	ROFDM0_XDAGCCORE1			0xc68
+#define	ROFDM0_XDAGCCORE2			0xc6c
+
+#define	ROFDM0_AGCPARAMETER1		0xc70
+#define	ROFDM0_AGCPARAMETER2		0xc74
+#define	ROFDM0_AGCRSSITABLE			0xc78
+#define	ROFDM0_HTSTFAGC				0xc7c
+
+#define	ROFDM0_XATXIQIMBALANCE		0xc80
+#define	ROFDM0_XATXAFE				0xc84
+#define	ROFDM0_XBTXIQIMBALANCE		0xc88
+#define	ROFDM0_XBTXAFE				0xc8c
+#define	ROFDM0_XCTXIQIMBALANCE		0xc90
+#define	ROFDM0_XCTXAFE            	0xc94
+#define	ROFDM0_XDTXIQIMBALANCE		0xc98
+#define	ROFDM0_XDTXAFE				0xc9c
+
+#define ROFDM0_RXIQEXTANTA			0xca0
+#define	ROFDM0_TXCOEFF1				0xca4
+#define	ROFDM0_TXCOEFF2				0xca8
+#define	ROFDM0_TXCOEFF3				0xcac
+#define	ROFDM0_TXCOEFF4				0xcb0
+#define	ROFDM0_TXCOEFF5				0xcb4
+#define	ROFDM0_TXCOEFF6				0xcb8
+
+/*Path_A RFE cotrol */
+#define	RA_RFE_CTRL_8812				0xcb8
+/*Path_B RFE control*/
+#define	RB_RFE_CTRL_8812				0xeb8
+
+#define	ROFDM0_RXHPPARAMETER		0xce0
+#define	ROFDM0_TXPSEUDONOISEWGT		0xce4
+#define	ROFDM0_FRAMESYNC			0xcf0
+#define	ROFDM0_DFSREPORT			0xcf4
+
+
+#define	ROFDM1_LSTF					0xd00
+#define	ROFDM1_TRXPATHENABLE		0xd04
+
+#define	ROFDM1_CF0					0xd08
+#define	ROFDM1_CSI1					0xd10
+#define	ROFDM1_SBD					0xd14
+#define	ROFDM1_CSI2					0xd18
+#define	ROFDM1_CFOTRACKING			0xd2c
+#define	ROFDM1_TRXMESAURE1			0xd34
+#define	ROFDM1_INTFDET				0xd3c
+#define	ROFDM1_PSEUDONOISESTATEAB	0xd50
+#define	ROFDM1_PSEUDONOISESTATECD	0xd54
+#define	ROFDM1_RXPSEUDONOISEWGT		0xd58
+
+#define	ROFDM_PHYCOUNTER1			0xda0
+#define	ROFDM_PHYCOUNTER2			0xda4
+#define	ROFDM_PHYCOUNTER3			0xda8
+
+#define	ROFDM_SHORTCFOAB			0xdac
+#define	ROFDM_SHORTCFOCD			0xdb0
+#define	ROFDM_LONGCFOAB				0xdb4
+#define	ROFDM_LONGCFOCD				0xdb8
+#define	ROFDM_TAILCF0AB				0xdbc
+#define	ROFDM_TAILCF0CD				0xdc0
+#define	ROFDM_PWMEASURE1          	0xdc4
+#define	ROFDM_PWMEASURE2          	0xdc8
+#define	ROFDM_BWREPORT				0xdcc
+#define	ROFDM_AGCREPORT				0xdd0
+#define	ROFDM_RXSNR					0xdd4
+#define	ROFDM_RXEVMCSI				0xdd8
+#define	ROFDM_SIGREPORT				0xddc
+
+#define RTXAGC_A_CCK11_CCK1			0xc20
+#define RTXAGC_A_OFDM18_OFDM6		0xc24
+#define RTXAGC_A_OFDM54_OFDM24		0xc28
+#define RTXAGC_A_MCS03_MCS00			0xc2c
+#define RTXAGC_A_MCS07_MCS04			0xc30
+#define RTXAGC_A_MCS11_MCS08			0xc34
+#define RTXAGC_A_MCS15_MCS12		0xc38
+#define RTXAGC_A_NSS1INDEX3_NSS1INDEX0	0xc3c
+#define	RTXAGC_A_NSS1INDEX7_NSS1INDEX4	0xc40
+#define	RTXAGC_A_NSS2INDEX1_NSS1INDEX8	0xc44
+#define	RTXAGC_A_NSS2INDEX5_NSS2INDEX2	0xc48
+#define	RTXAGC_A_NSS2INDEX9_NSS2INDEX6	0xc4c
+#define	RTXAGC_B_CCK11_CCK1			0xe20
+#define	RTXAGC_B_OFDM18_OFDM6		0xe24
+#define	RTXAGC_B_OFDM54_OFDM24		0xe28
+#define	RTXAGC_B_MCS03_MCS00			0xe2c
+#define	RTXAGC_B_MCS07_MCS04			0xe30
+#define	RTXAGC_B_MCS11_MCS08			0xe34
+#define	RTXAGC_B_MCS15_MCS12		0xe38
+#define	RTXAGC_B_NSS1INDEX3_NSS1INDEX0	0xe3c
+#define	RTXAGC_B_NSS1INDEX7_NSS1INDEX4	0xe40
+#define	RTXAGC_B_NSS2INDEX1_NSS1INDEX8	0xe44
+#define	RTXAGC_B_NSS2INDEX5_NSS2INDEX2	0xe48
+#define	RTXAGC_B_NSS2INDEX9_NSS2INDEX6	0xe4c
+
+#define	RA_TXPWRTRAING		0xc54
+#define	RB_TXPWRTRAING		0xe54
+
+
+#define	RFPGA0_IQK					0xe28
+#define	RTx_IQK_Tone_A				0xe30
+#define	RRx_IQK_Tone_A				0xe34
+#define	RTx_IQK_PI_A					0xe38
+#define	RRx_IQK_PI_A					0xe3c
+
+#define	RTx_IQK 						0xe40
+#define	RRx_IQK						0xe44
+#define	RIQK_AGC_Pts					0xe48
+#define	RIQK_AGC_Rsp					0xe4c
+#define	RTx_IQK_Tone_B				0xe50
+#define	RRx_IQK_Tone_B				0xe54
+#define	RTx_IQK_PI_B					0xe58
+#define	RRx_IQK_PI_B					0xe5c
+#define	RIQK_AGC_Cont				0xe60
+
+#define	RBlue_Tooth					0xe6c
+#define	RRx_Wait_CCA					0xe70
+#define	RTx_CCK_RFON					0xe74
+#define	RTx_CCK_BBON				0xe78
+#define	RTx_OFDM_RFON				0xe7c
+#define	RTx_OFDM_BBON				0xe80
+#define	RTx_To_Rx					0xe84
+#define	RTx_To_Tx					0xe88
+#define	RRx_CCK						0xe8c
+
+#define	RTx_Power_Before_IQK_A		0xe94
+#define	RTx_Power_After_IQK_A			0xe9c
+
+#define	RRx_Power_Before_IQK_A		0xea0
+#define	RRx_Power_Before_IQK_A_2		0xea4
+#define	RRx_Power_After_IQK_A			0xea8
+#define	RRx_Power_After_IQK_A_2		0xeac
+
+#define	RTx_Power_Before_IQK_B		0xeb4
+#define	RTx_Power_After_IQK_B			0xebc
+
+#define	RRx_Power_Before_IQK_B		0xec0
+#define	RRx_Power_Before_IQK_B_2		0xec4
+#define	RRx_Power_After_IQK_B			0xec8
+#define	RRx_Power_After_IQK_B_2		0xecc
+
+#define	RRx_OFDM					0xed0
+#define	RRx_Wait_RIFS 				0xed4
+#define	RRx_TO_Rx 					0xed8
+#define	RStandby 						0xedc
+#define	RSleep 						0xee0
+#define	RPMPD_ANAEN				0xeec
+
+#define	RZEBRA1_HSSIENABLE			0x0
+#define	RZEBRA1_TRXENABLE1			0x1
+#define	RZEBRA1_TRXENABLE2			0x2
+#define	RZEBRA1_AGC					0x4
+#define	RZEBRA1_CHARGEPUMP			0x5
+#define	RZEBRA1_CHANNEL				0x7
+
+#define	RZEBRA1_TXGAIN				0x8
+#define	RZEBRA1_TXLPF				0x9
+#define	RZEBRA1_RXLPF				0xb
+#define	RZEBRA1_RXHPFCORNER			0xc
+
+#define	RGLOBALCTRL					0
+#define	RRTL8256_TXLPF				19
+#define	RRTL8256_RXLPF				11
+#define	RRTL8258_TXLPF				0x11
+#define	RRTL8258_RXLPF				0x13
+#define	RRTL8258_RSSILPF			0xa
+
+#define	RF_AC						0x00
+
+#define	RF_IQADJ_G1					0x01
+#define	RF_IQADJ_G2					0x02
+#define	RF_POW_TRSW					0x05
+
+#define	RF_GAIN_RX					0x06
+#define	RF_GAIN_TX					0x07
+
+#define	RF_TXM_IDAC					0x08
+#define	RF_BS_IQGEN					0x0F
+
+#define	RF_MODE1					0x10
+#define	RF_MODE2					0x11
+
+#define	RF_RX_AGC_HP				0x12
+#define	RF_TX_AGC					0x13
+#define	RF_BIAS						0x14
+#define	RF_IPA						0x15
+#define	RF_POW_ABILITY				0x17
+#define	RF_MODE_AG					0x18
+#define	RRFCHANNEL					0x18
+#define	RF_CHNLBW					0x18
+#define	RF_TOP						0x19
+
+#define	RF_RX_G1					0x1A
+#define	RF_RX_G2					0x1B
+
+#define	RF_RX_BB2					0x1C
+#define	RF_RX_BB1					0x1D
+
+#define	RF_RCK1						0x1E
+#define	RF_RCK2						0x1F
+
+#define	RF_TX_G1					0x20
+#define	RF_TX_G2					0x21
+#define	RF_TX_G3					0x22
+
+#define	RF_TX_BB1					0x23
+#define	RF_T_METER					0x24
+#define	RF_T_METER_88E				0x42
+#define  RF_T_METER_8812A 		0x42
+
+#define	RF_SYN_G1					0x25
+#define	RF_SYN_G2					0x26
+#define	RF_SYN_G3					0x27
+#define	RF_SYN_G4					0x28
+#define	RF_SYN_G5					0x29
+#define	RF_SYN_G6					0x2A
+#define	RF_SYN_G7					0x2B
+#define	RF_SYN_G8					0x2C
+
+#define	RF_RCK_OS					0x30
+#define	RF_TXPA_G1					0x31
+#define	RF_TXPA_G2					0x32
+#define	RF_TXPA_G3					0x33
+
+#define	RF_TX_BIAS_A					0x35
+#define	RF_TX_BIAS_D					0x36
+#define	RF_LOBF_9					0x38
+#define	RF_RXRF_A3					0x3C
+#define	RF_TRSW						0x3F
+
+#define	RF_TXRF_A2					0x41
+#define	RF_TXPA_G4					0x46
+#define	RF_TXPA_A4					0x4B
+
+#define RF_APK						0x63
+
+#define	RF_WE_LUT					0xEF
+
+#define	BBBRESETB					0x100
+#define	BGLOBALRESETB				0x200
+#define	BOFDMTXSTART				0x4
+#define	BCCKTXSTART					0x8
+#define	BCRC32DEBUG					0x100
+#define	BPMACLOOPBACK				0x10
+#define	BTXLSIG						0xffffff
+#define	BOFDMTXRATE					0xf
+#define	BOFDMTXRESERVED				0x10
+#define	BOFDMTXLENGTH				0x1ffe0
+#define	BOFDMTXPARITY				0x20000
+#define	BTXHTSIG1					0xffffff
+#define	BTXHTMCSRATE				0x7f
+#define	BTXHTBW						0x80
+#define	BTXHTLENGTH					0xffff00
+#define	BTXHTSIG2					0xffffff
+#define	BTXHTSMOOTHING				0x1
+#define	BTXHTSOUNDING				0x2
+#define	BTXHTRESERVED				0x4
+#define	BTXHTAGGREATION				0x8
+#define	BTXHTSTBC					0x30
+#define	BTXHTADVANCECODING			0x40
+#define	BTXHTSHORTGI				0x80
+#define	BTXHTNUMBERHT_LTF			0x300
+#define	BTXHTCRC8					0x3fc00
+#define	BCOUNTERRESET				0x10000
+#define	BNUMOFOFDMTX				0xffff
+#define	BNUMOFCCKTX					0xffff0000
+#define	BTXIDLEINTERVAL				0xffff
+#define	BOFDMSERVICE				0xffff0000
+#define	BTXMACHEADER				0xffffffff
+#define	BTXDATAINIT					0xff
+#define	BTXHTMODE					0x100
+#define	BTXDATATYPE					0x30000
+#define	BTXRANDOMSEED				0xffffffff
+#define	BCCKTXPREAMBLE				0x1
+#define	BCCKTXSFD					0xffff0000
+#define	BCCKTXSIG					0xff
+#define	BCCKTXSERVICE				0xff00
+#define	BCCKLENGTHEXT				0x8000
+#define	BCCKTXLENGHT				0xffff0000
+#define	BCCKTXCRC16					0xffff
+#define	BCCKTXSTATUS				0x1
+#define	BOFDMTXSTATUS				0x2
+#define IS_BB_REG_OFFSET_92S(_Offset)	\
+	((_Offset >= 0x800) && (_Offset <= 0xfff))
+
+#define	BRFMOD						0x1
+#define	BJAPANMODE					0x2
+#define	BCCKTXSC					0x30
+/* Block & Path enable*/
+#define ROFDMCCKEN					0x808
+#define	BCCKEN						0x10000000
+#define	BOFDMEN						0x20000000
+#define	RRXPATH						0x808	/* Rx antenna*/
+#define	BRXPATH						0xff
+#define	RTXPATH						0x80c	/* Tx antenna*/
+#define	BTXPATH						0x0fffffff
+#define	RCCK_RX						0xa04	/* for cck rx path selection*/
+#define	BCCK_RX						0x0c000000
+#define	RVHTLEN_USE_LSIG			0x8c3	/* Use LSIG for VHT length*/
+
+
+#define	BOFDMRXADCPHASE           	0x10000
+#define	BOFDMTXDACPHASE           	0x40000
+#define	BXATXAGC                  	0x3f
+
+#define	BXBTXAGC                  	0xf00
+#define	BXCTXAGC                  	0xf000
+#define	BXDTXAGC                  	0xf0000
+
+#define	BPASTART                  	0xf0000000
+#define	BTRSTART                  	0x00f00000
+#define	BRFSTART                  	0x0000f000
+#define	BBBSTART                  	0x000000f0
+#define	BBBCCKSTART               	0x0000000f
+#define	BPAEND                    	0xf
+#define	BTREND                    	0x0f000000
+#define	BRFEND                    	0x000f0000
+#define	BCCAMASK                  	0x000000f0
+#define	BR2RCCAMASK               	0x00000f00
+#define	BHSSI_R2TDELAY            	0xf8000000
+#define	BHSSI_T2RDELAY            	0xf80000
+#define	BCONTXHSSI               	0x400
+#define	BIGFROMCCK                	0x200
+#define	BAGCADDRESS               	0x3f
+#define	BRXHPTX                   	0x7000
+#define	BRXHP2RX                  	0x38000
+#define	BRXHPCCKINI               	0xc0000
+#define	BAGCTXCODE                	0xc00000
+#define	BAGCRXCODE                	0x300000
+
+#define	B3WIREDATALENGTH          	0x800
+#define	B3WIREADDREAALENGTH       	0x400
+
+#define	B3WIRERFPOWERDOWN         	0x1
+#define	B5GPAPEPOLARITY           	0x40000000
+#define	B2GPAPEPOLARITY           	0x80000000
+#define	BRFSW_TXDEFAULTANT        	0x3
+#define	BRFSW_TXOPTIONANT         	0x30
+#define	BRFSW_RXDEFAULTANT        	0x300
+#define	BRFSW_RXOPTIONANT         	0x3000
+#define	BRFSI_3WIREDATA           	0x1
+#define	BRFSI_3WIRECLOCK          	0x2
+#define	BRFSI_3WIRELOAD           	0x4
+#define	BRFSI_3WIRERW             	0x8
+#define	BRFSI_3WIRE               	0xf
+
+#define	BRFSI_RFENV               	0x10
+
+#define	BRFSI_TRSW                	0x20
+#define	BRFSI_TRSWB               	0x40
+#define	BRFSI_ANTSW               	0x100
+#define	BRFSI_ANTSWB              	0x200
+#define	BRFSI_PAPE                	0x400
+#define	BRFSI_PAPE5G              	0x800
+#define	BBANDSELECT               	0x1
+#define	BHTSIG2_GI                	0x80
+#define	BHTSIG2_SMOOTHING         	0x01
+#define	BHTSIG2_SOUNDING          	0x02
+#define	BHTSIG2_AGGREATON         	0x08
+#define	BHTSIG2_STBC              	0x30
+#define	BHTSIG2_ADVCODING         	0x40
+#define	BHTSIG2_NUMOFHTLTF        	0x300
+#define	BHTSIG2_CRC8              	0x3fc
+#define	BHTSIG1_MCS               	0x7f
+#define	BHTSIG1_BANDWIDTH         	0x80
+#define	BHTSIG1_HTLENGTH          	0xffff
+#define	BLSIG_RATE                	0xf
+#define	BLSIG_RESERVED            	0x10
+#define	BLSIG_LENGTH              	0x1fffe
+#define	BLSIG_PARITY              	0x20
+#define	BCCKRXPHASE               	0x4
+
+#define	BLSSIREADADDRESS          	0x7f800000
+#define	BLSSIREADEDGE             	0x80000000
+
+#define	BLSSIREADBACKDATA         	0xfffff
+
+#define	BLSSIREADOKFLAG           	0x1000
+#define	BCCKSAMPLERATE            	0x8
+#define	BREGULATOR0STANDBY        	0x1
+#define	BREGULATORPLLSTANDBY      	0x2
+#define	BREGULATOR1STANDBY        	0x4
+#define	BPLLPOWERUP               	0x8
+#define	BDPLLPOWERUP              	0x10
+#define	BDA10POWERUP              	0x20
+#define	BAD7POWERUP               	0x200
+#define	BDA6POWERUP               	0x2000
+#define	BXTALPOWERUP              	0x4000
+#define	B40MDCLKPOWERUP           	0x8000
+#define	BDA6DEBUGMODE             	0x20000
+#define	BDA6SWING                 	0x380000
+
+#define	BADCLKPHASE               	0x4000000
+#define	B80MCLKDELAY              	0x18000000
+#define	BAFEWATCHDOGENABLE        	0x20000000
+
+#define	BXTALCAP01                	0xc0000000
+#define	BXTALCAP23                	0x3
+#define	BXTALCAP92X					0x0f000000
+#define BXTALCAP                	0x0f000000
+
+#define	BINTDIFCLKENABLE          	0x400
+#define	BEXTSIGCLKENABLE         	0x800
+#define	BBANDGAP_MBIAS_POWERUP      0x10000
+#define	BAD11SH_GAIN               	0xc0000
+#define	BAD11NPUT_RANGE           	0x700000
+#define	BAD110P_CURRENT            	0x3800000
+#define	BLPATH_LOOPBACK            	0x4000000
+#define	BQPATH_LOOPBACK            	0x8000000
+#define	BAFE_LOOPBACK              	0x10000000
+#define	BDA10_SWING                	0x7e0
+#define	BDA10_REVERSE              	0x800
+#define	BDA_CLK_SOURCE              0x1000
+#define	BDA7INPUT_RANGE            	0x6000
+#define	BDA7_GAIN                  	0x38000
+#define	BDA7OUTPUT_CM_MODE          0x40000
+#define	BDA7INPUT_CM_MODE           0x380000
+#define	BDA7CURRENT               	0xc00000
+#define	BREGULATOR_ADJUST          	0x7000000
+#define	BAD11POWERUP_ATTX          	0x1
+#define	BDA10PS_ATTX               	0x10
+#define	BAD11POWERUP_ATRX          	0x100
+#define	BDA10PS_ATRX               	0x1000
+#define	BCCKRX_AGC_FORMAT           0x200
+#define	BPSDFFT_SAMPLE_POINT       	0xc000
+#define	BPSD_AVERAGE_NUM            0x3000
+#define	BIQPATH_CONTROL            	0xc00
+#define	BPSD_FREQ                  	0x3ff
+#define	BPSD_ANTENNA_PATH           0x30
+#define	BPSD_IQ_SWITCH              0x40
+#define	BPSD_RX_TRIGGER             0x400000
+#define	BPSD_TX_TRIGGER             0x80000000
+#define	BPSD_SINE_TONE_SCALE        0x7f000000
+#define	BPSD_REPORT                	0xffff
+
+#define	BOFDM_TXSC                	0x30000000
+#define	BCCK_TXON                  	0x1
+#define	BOFDM_TXON                 	0x2
+#define	BDEBUG_PAGE                	0xfff
+#define	BDEBUG_ITEM                	0xff
+#define	BANTL              	       	0x10
+#define	BANT_NONHT           	    0x100
+#define	BANT_HT1               		0x1000
+#define	BANT_HT2                   	0x10000
+#define	BANT_HT1S1                 	0x100000
+#define	BANT_NONHTS1               	0x1000000
+
+#define	BCCK_BBMODE                	0x3
+#define	BCCK_TXPOWERSAVING         	0x80
+#define	BCCK_RXPOWERSAVING         	0x40
+
+#define	BCCK_SIDEBAND              	0x10
+
+#define	BCCK_SCRAMBLE              	0x8
+#define	BCCK_ANTDIVERSITY    		0x8000
+#define	BCCK_CARRIER_RECOVERY   	0x4000
+#define	BCCK_TXRATE           		0x3000
+#define	BCCK_DCCANCEL             	0x0800
+#define	BCCK_ISICANCEL             	0x0400
+#define	BCCK_MATCH_FILTER           0x0200
+#define	BCCK_EQUALIZER             	0x0100
+#define	BCCK_PREAMBLE_DETECT       	0x800000
+#define	BCCK_FAST_FALSECCA          0x400000
+#define	BCCK_CH_ESTSTART            0x300000
+#define	BCCK_CCA_COUNT              0x080000
+#define	BCCK_CS_LIM                	0x070000
+#define	BCCK_BIST_MODE              0x80000000
+#define	BCCK_CCAMASK             	0x40000000
+#define	BCCK_TX_DAC_PHASE         	0x4
+#define	BCCK_RX_ADC_PHASE         	0x20000000
+#define	BCCKR_CP_MODE         	   	0x0100
+#define	BCCK_TXDC_OFFSET           	0xf0
+#define	BCCK_RXDC_OFFSET           	0xf
+#define	BCCK_CCA_MODE              	0xc000
+#define	BCCK_FALSECS_LIM           	0x3f00
+#define	BCCK_CS_RATIO              	0xc00000
+#define	BCCK_CORGBIT_SEL           	0x300000
+#define	BCCK_PD_LIM                	0x0f0000
+#define	BCCK_NEWCCA                	0x80000000
+#define	BCCK_RXHP_OF_IG             0x8000
+#define	BCCK_RXIG                  	0x7f00
+#define	BCCK_LNA_POLARITY           0x800000
+#define	BCCK_RX1ST_BAIN             0x7f0000
+#define	BCCK_RF_EXTEND              0x20000000
+#define	BCCK_RXAGC_SATLEVEL        	0x1f000000
+#define	BCCK_RXAGC_SATCOUNT       	0xe0
+#define	bCCKRxRFSettle            	0x1f
+#define	BCCK_FIXED_RXAGC           	0x8000
+#define	BCCK_ANTENNA_POLARITY      	0x2000
+#define	BCCK_TXFILTER_TYPE          0x0c00
+#define	BCCK_RXAGC_REPORTTYPE   	0x0300
+#define	BCCK_RXDAGC_EN              0x80000000
+#define	BCCK_RXDAGC_PERIOD        	0x20000000
+#define	BCCK_RXDAGC_SATLEVEL     	0x1f000000
+#define	BCCK_TIMING_RECOVERY       	0x800000
+#define	BCCK_TXC0                  	0x3f0000
+#define	BCCK_TXC1                  	0x3f000000
+#define	BCCK_TXC2                  	0x3f
+#define	BCCK_TXC3                  	0x3f00
+#define	BCCK_TXC4                  	0x3f0000
+#define	BCCK_TXC5                  	0x3f000000
+#define	BCCK_TXC6                  	0x3f
+#define	BCCK_TXC7                  	0x3f00
+#define	BCCK_DEBUGPORT             	0xff0000
+#define	BCCK_DAC_DEBUG              0x0f000000
+#define	BCCK_FALSEALARM_ENABLE      0x8000
+#define	BCCK_FALSEALARM_READ        0x4000
+#define	BCCK_TRSSI                 	0x7f
+#define	BCCK_RXAGC_REPORT           0xfe
+#define	BCCK_RXREPORT_ANTSEL       	0x80000000
+#define	BCCK_RXREPORT_MFOFF        	0x40000000
+#define	BCCK_RXREPORT_SQLOSS     	0x20000000
+#define	BCCK_RXREPORT_PKTLOSS      	0x10000000
+#define	BCCK_RXREPORT_LOCKEDBIT    	0x08000000
+#define	BCCK_RXREPORT_RATEERROR    	0x04000000
+#define	BCCK_RXREPORT_RXRATE       	0x03000000
+#define	BCCK_RXFA_COUNTER_LOWER     0xff
+#define	BCCK_RXFA_COUNTER_UPPER     0xff000000
+#define	BCCK_RXHPAGC_START          0xe000
+#define	BCCK_RXHPAGC_FINAL          0x1c00
+#define	BCCK_RXFALSEALARM_ENABLE    0x8000
+#define	BCCK_FACOUNTER_FREEZE       0x4000
+#define	BCCK_TXPATH_SEL             0x10000000
+#define	BCCK_DEFAULT_RXPATH         0xc000000
+#define	BCCK_OPTION_RXPATH          0x3000000
+
+#define	BNUM_OFSTF                	0x3
+#define	BSHIFT_L                 	0xc0
+#define	BGI_TH                   	0xc
+#define	BRXPATH_A                 	0x1
+#define	BRXPATH_B                 	0x2
+#define	BRXPATH_C                 	0x4
+#define	BRXPATH_D                 	0x8
+#define	BTXPATH_A                 	0x1
+#define	BTXPATH_B                 	0x2
+#define	BTXPATH_C                 	0x4
+#define	BTXPATH_D                 	0x8
+#define	BTRSSI_FREQ               	0x200
+#define	BADC_BACKOFF              	0x3000
+#define	BDFIR_BACKOFF             	0xc000
+#define	BTRSSI_LATCH_PHASE         	0x10000
+#define	BRX_LDC_OFFSET             	0xff
+#define	BRX_QDC_OFFSET             	0xff00
+#define	BRX_DFIR_MODE              	0x1800000
+#define	BRX_DCNF_TYPE              	0xe000000
+#define	BRXIQIMB_A               	0x3ff
+#define	BRXIQIMB_B               	0xfc00
+#define	BRXIQIMB_C               	0x3f0000
+#define	BRXIQIMB_D               	0xffc00000
+#define	BDC_DC_NOTCH             	0x60000
+#define	BRXNB_NOTCH              	0x1f000000
+#define	BPD_TH                   	0xf
+#define	BPD_TH_OPT2              	0xc000
+#define	BPWED_TH                 	0x700
+#define	BIFMF_WIN_L              	0x800
+#define	BPD_OPTION               	0x1000
+#define	BMF_WIN_L                	0xe000
+#define	BBW_SEARCH_L             	0x30000
+#define	BWIN_ENH_L               	0xc0000
+#define	BBW_TH                   	0x700000
+#define	BED_TH2                  	0x3800000
+#define	BBW_OPTION               	0x4000000
+#define	BRADIO_TH                	0x18000000
+#define	BWINDOW_L                	0xe0000000
+#define	BSBD_OPTION              	0x1
+#define	BFRAME_TH                	0x1c
+#define	BFS_OPTION               	0x60
+#define	BDC_SLOPE_CHECK          	0x80
+#define	BFGUARD_COUNTER_DC_L     	0xe00
+#define	BFRAME_WEIGHT_SHORT      	0x7000
+#define	BSUB_TUNE                	0xe00000
+#define	BFRAME_DC_LENGTH         	0xe000000
+#define	BSBD_START_OFFSET        	0x30000000
+#define	BFRAME_TH_2              	0x7
+#define	BFRAME_GI2_TH            	0x38
+#define	BGI2_SYNC_EN             	0x40
+#define	BSARCH_SHORT_EARLY       	0x300
+#define	BSARCH_SHORT_LATE        	0xc00
+#define	BSARCH_GI2_LATE          	0x70000
+#define	BCFOANTSUM               	0x1
+#define	BCFOACC                  	0x2
+#define	BCFOSTARTOFFSET          	0xc
+#define	BCFOLOOPBACK             	0x70
+#define	BCFOSUMWEIGHT            	0x80
+#define	BDAGCENABLE              	0x10000
+#define	BTXIQIMB_A               	0x3ff
+#define	BTXIQIMB_b               	0xfc00
+#define	BTXIQIMB_C               	0x3f0000
+#define	BTXIQIMB_D               	0xffc00000
+#define	BTXIDCOFFSET             	0xff
+#define	BTXIQDCOFFSET             	0xff00
+#define	BTXDFIRMODE              	0x10000
+#define	BTXPESUDO_NOISEON         	0x4000000
+#define	BTXPESUDO_NOISE_A         	0xff
+#define	BTXPESUDO_NOISE_B         	0xff00
+#define	BTXPESUDO_NOISE_C         	0xff0000
+#define	BTXPESUDO_NOISE_D         	0xff000000
+#define	BCCA_DROPOPTION           	0x20000
+#define	BCCA_DROPTHRES            	0xfff00000
+#define	BEDCCA_H                 	0xf
+#define	BEDCCA_L                 	0xf0
+#define	BLAMBDA_ED               	0x300
+#define	BRX_INITIALGAIN           	0x7f
+#define	BRX_ANTDIV_EN              	0x80
+#define	BRX_AGC_ADDRESS_FOR_LNA     0x7f00
+#define	BRX_HIGHPOWER_FLOW         	0x8000
+#define	BRX_AGC_FREEZE_THRES        0xc0000
+#define	BRX_FREEZESTEP_AGC1       	0x300000
+#define	BRX_FREEZESTEP_AGC2       	0xc00000
+#define	BRX_FREEZESTEP_AGC3       	0x3000000
+#define	BRX_FREEZESTEP_AGC0       	0xc000000
+#define	BRXRSSI_CMP_EN           	0x10000000
+#define	BRXQUICK_AGCEN            	0x20000000
+#define	BRXAGC_FREEZE_THRES_MODE    0x40000000
+#define	BRX_OVERFLOW_CHECKTYPE     	0x80000000
+#define	BRX_AGCSHIFT              	0x7f
+#define	BTRSW_TRI_ONLY           	0x80
+#define	BPOWER_THRES              	0x300
+#define	BRXAGC_EN                 	0x1
+#define	BRXAGC_TOGETHER_EN         	0x2
+#define	BRXAGC_MIN                	0x4
+#define	BRXHP_INI                	0x7
+#define	BRXHP_TRLNA              	0x70
+#define	BRXHP_RSSI               	0x700
+#define	BRXHP_BBP1               	0x7000
+#define	BRXHP_BBP2               	0x70000
+#define	BRXHP_BBP3               	0x700000
+#define	BRSSI_H                  	0x7f0000
+#define	BRSSI_GEN                	0x7f000000
+#define	BRXSETTLE_TRSW           	0x7
+#define	BRXSETTLE_LNA            	0x38
+#define	BRXSETTLE_RSSI           	0x1c0
+#define	BRXSETTLE_BBP            	0xe00
+#define	BRXSETTLE_RXHP           	0x7000
+#define	BRXSETTLE_ANTSW_RSSI     	0x38000
+#define	BRXSETTLE_ANTSW          	0xc0000
+#define	BRXPROCESS_TIME_DAGC      	0x300000
+#define	BRXSETTLE_HSSI           	0x400000
+#define	BRXPROCESS_TIME_BBPPW     	0x800000
+#define	BRXANTENNA_POWER_SHIFT     	0x3000000
+#define	BRSSI_TABLE_SELECT         	0xc000000
+#define	BRXHP_FINAL              	0x7000000
+#define	BRXHPSETTLE_BBP          	0x7
+#define	BRXHTSETTLE_HSSI         	0x8
+#define	BRXHTSETTLE_RXHP         	0x70
+#define	BRXHTSETTLE_BBPPW        	0x80
+#define	BRXHTSETTLE_IDLE         	0x300
+#define	BRXHTSETTLE_RESERVED     	0x1c00
+#define	BRXHT_RXHP_EN              	0x8000
+#define	BRXAGC_FREEZE_THRES      	0x30000
+#define	BRXAGC_TOGETHEREN       	0x40000
+#define	BRXHTAGC_MIN              	0x80000
+#define	BRXHTAGC_EN               	0x100000
+#define	BRXHTDAGC_EN              	0x200000
+#define	BRXHT_RXHP_BBP            	0x1c00000
+#define	BRXHT_RXHP_FINAL          	0xe0000000
+#define	BRXPW_RADIO_TH             	0x3
+#define	BRXPW_RADIO_EN             	0x4
+#define	BRXMF_HOLD                	0x3800
+#define	BRXPD_DELAY_TH1          	0x38
+#define	BRXPD_DELAY_TH2          	0x1c0
+#define	BRXPD_DC_COUNT_MAX       	0x600
+#define	BRXPD_DELAY_TH           	0x8000
+#define	BRXPROCESS_DELAY         	0xf0000
+#define	BRXSEARCHRANGE_GI2_EARLY 	0x700000
+#define	BRXFRAME_FUARD_COUNTER_L 	0x3800000
+#define	BRXSGI_GUARD_L           	0xc000000
+#define	BRXSGI_SEARCH_L          	0x30000000
+#define	BRXSGI_TH                	0xc0000000
+#define	BDFSCNT0                 	0xff
+#define	BDFSCNT1                 	0xff00
+#define	BDFSFLAG                 	0xf0000
+#define	BMF_WEIGHT_SUM             	0x300000
+#define	BMINIDX_TH                	0x7f000000
+#define	BDAFORMAT                	0x40000
+#define	BTXCH_EMU_ENABLE           	0x01000000
+#define	BTRSW_ISOLATION_A         	0x7f
+#define	BTRSW_ISOLATION_B         	0x7f00
+#define	BTRSW_ISOLATION_C         	0x7f0000
+#define	BTRSW_ISOLATION_D         	0x7f000000
+#define	BEXT_LNA_GAIN              	0x7c00
+
+#define	BSTBC_EN                  	0x4
+#define	BANTENNA_MAPPING          	0x10
+#define	BNSS                     	0x20
+#define	BCFO_ANTSUM_ID              0x200
+#define	BPHY_COUNTER_RESET         	0x8000000
+#define	BCFO_REPORT_GET            	0x4000000
+#define	BOFDM_CONTINUE_TX          	0x10000000
+#define	BOFDM_SINGLE_CARRIER       	0x20000000
+#define	BOFDM_SINGLE_TONE          	0x40000000
+#define	BHT_DETECT                	0x100
+#define	BCFOEN                   	0x10000
+#define	BCFOVALUE                	0xfff00000
+#define	BSIGTONE_RE              	0x3f
+#define	BSIGTONE_IM              	0x7f00
+#define	BCOUNTER_CCA             	0xffff
+#define	BCOUNTER_PARITYFAIL      	0xffff0000
+#define	BCOUNTER_RATEILLEGAL     	0xffff
+#define	BCOUNTER_CRC8FAIL        	0xffff0000
+#define	BCOUNTER_MCSNOSUPPORT    	0xffff
+#define	BCOUNTER_FASTSYNC        	0xffff
+#define	BSHORTCFO                	0xfff
+#define	BSHORTCFOT_LENGTH         	12
+#define	BSHORTCFOF_LENGTH         	11
+#define	BLONGCFO                 	0x7ff
+#define	BLONGCFOT_LENGTH          	11
+#define	BLONGCFOF_LENGTH          	11
+#define	BTAILCFO                 	0x1fff
+#define	BTAILCFOT_LENGTH          	13
+#define	BTAILCFOF_LENGTH          	12
+#define	BNOISE_EN_PWDB             	0xffff
+#define	BCC_POWER_DB             	0xffff0000
+#define	BMOISE_PWDB              	0xffff
+#define	BPOWERMEAST_LENGTH        	10
+#define	BPOWERMEASF_LENGTH        	3
+#define	BRX_HT_BW                	0x1
+#define	BRXSC                    	0x6
+#define	BRX_HT                   	0x8
+#define	BNB_INTF_DET_ON          	0x1
+#define	BINTF_WIN_LEN_CFG        	0x30
+#define	BNB_INTF_TH_CFG          	0x1c0
+#define	BRFGAIN                  	0x3f
+#define	BTABLESEL                	0x40
+#define	BTRSW                    	0x80
+#define	BRXSNR_A                 	0xff
+#define	BRXSNR_B                 	0xff00
+#define	BRXSNR_C                 	0xff0000
+#define	BRXSNR_D                 	0xff000000
+#define	BSNR_EVMT_LENGTH           	8
+#define	BSNR_EVMF_LENGTH           	1
+#define	BCSI1ST                  	0xff
+#define	BCSI2ND                  	0xff00
+#define	BRXEVM1ST                	0xff0000
+#define	BRXEVM2ND                	0xff000000
+#define	BSIGEVM                  	0xff
+#define	BPWDB                    	0xff00
+#define	BSGIEN                   	0x10000
+
+#define	BSFACTOR_QMA1             	0xf
+#define	BSFACTOR_QMA2             	0xf0
+#define	BSFACTOR_QMA3             	0xf00
+#define	BSFACTOR_QMA4             	0xf000
+#define	BSFACTOR_QMA5             	0xf0000
+#define	BSFACTOR_QMA6             	0xf0000
+#define	BSFACTOR_QMA7             	0xf00000
+#define	BSFACTOR_QMA8             	0xf000000
+#define	BSFACTOR_QMA9             	0xf0000000
+#define	BCSI_SCHEME               	0x100000
+
+#define	BNOISE_LVL_TOP_SET          0x3
+#define	BCHSMOOTH                	0x4
+#define	BCHSMOOTH_CFG1            	0x38
+#define	BCHSMOOTH_CFG2            	0x1c0
+#define	BCHSMOOTH_CFG3            	0xe00
+#define	BCHSMOOTH_CFG4            	0x7000
+#define	BMRCMODE                 	0x800000
+#define	BTHEVMCFG                	0x7000000
+
+#define	BLOOP_FIT_TYPE             	0x1
+#define	BUPD_CFO                  	0x40
+#define	BUPD_CFO_OFFDATA           	0x80
+#define	BADV_UPD_CFO               	0x100
+#define	BADV_TIME_CTRL             	0x800
+#define	BUPD_CLKO                 	0x1000
+#define	BFC                      	0x6000
+#define	BTRACKING_MODE            	0x8000
+#define	BPHCMP_ENABLE             	0x10000
+#define	BUPD_CLKO_LTF              	0x20000
+#define	BCOM_CH_CFO                	0x40000
+#define	BCSI_ESTI_MODE             	0x80000
+#define	BADV_UPD_EQZ               	0x100000
+#define	BUCHCFG                  	0x7000000
+#define	BUPDEQZ                  	0x8000000
+
+#define	BRX_PESUDO_NOISE_ON         0x20000000
+#define	BRX_PESUDO_NOISE_A         	0xff
+#define	BRX_PESUDO_NOISE_B         	0xff00
+#define	BRX_PESUDO_NOISE_C         	0xff0000
+#define	BRX_PESUDO_NOISE_D         	0xff000000
+#define	BRX_PESUDO_NOISESTATE_A     0xffff
+#define	BRX_PESUDO_NOISESTATE_B     0xffff0000
+#define	BRX_PESUDO_NOISESTATE_C     0xffff
+#define	BRX_PESUDO_NOISESTATE_D     0xffff0000
+
+#define	BZEBRA1_HSSIENABLE        	0x8
+#define	BZEBRA1_TRXCONTROL        	0xc00
+#define	BZEBRA1_TRXGAINSETTING    	0x07f
+#define	BZEBRA1_RXCOUNTER          	0xc00
+#define	BZEBRA1_TXCHANGEPUMP      	0x38
+#define	BZEBRA1_RXCHANGEPUMP      	0x7
+#define	BZEBRA1_CHANNEL_NUM        	0xf80
+#define	BZEBRA1_TXLPFBW           	0x400
+#define	BZEBRA1_RXLPFBW           	0x600
+
+#define	BRTL8256REG_MODE_CTRL1      0x100
+#define	BRTL8256REG_MODE_CTRL0      0x40
+#define	BRTL8256REG_TXLPFBW         0x18
+#define	BRTL8256REG_RXLPFBW         0x600
+
+#define	BRTL8258_TXLPFBW          	0xc
+#define	BRTL8258_RXLPFBW          	0xc00
+#define	BRTL8258_RSSILPFBW        	0xc0
+
+#define	BBYTE0                    	0x1
+#define	BBYTE1                    	0x2
+#define	BBYTE2                    	0x4
+#define	BBYTE3                    	0x8
+#define	BWORD0                    	0x3
+#define	BWORD1                    	0xc
+#define	BWORD                    	0xf
+
+#define	MASKBYTE0                	0xff
+#define	MASKBYTE1                	0xff00
+#define	MASKBYTE2                	0xff0000
+#define	MASKBYTE3                	0xff000000
+#define	MASKHWORD                	0xffff0000
+#define	MASKLWORD                	0x0000ffff
+#define	MASKDWORD					0xffffffff
+#define	MASK12BITS					0xfff
+#define	MASKH4BITS					0xf0000000
+#define MASKOFDM_D					0xffc00000
+#define	MASKCCK						0x3f3f3f3f
+
+#define	MASK4BITS              		0x0f
+#define	MASK20BITS             		0xfffff
+#define RFREG_OFFSET_MASK			0xfffff
+
+#define	BENABLE                   	0x1
+#define	BDISABLE                  	0x0
+
+#define	LEFT_ANTENNA               	0x0
+#define	RIGHT_ANTENNA              	0x1
+
+#define	TCHECK_TXSTATUS            	500
+#define	TUPDATE_RXCOUNTER          	100
+
+#define	REG_UN_used_register		0x01bf
+
+/* WOL bit information */
+#define	HAL92C_WOL_PTK_UPDATE_EVENT		BIT(0)
+#define	HAL92C_WOL_GTK_UPDATE_EVENT		BIT(1)
+#define	HAL92C_WOL_DISASSOC_EVENT		BIT(2)
+#define	HAL92C_WOL_DEAUTH_EVENT			BIT(3)
+#define	HAL92C_WOL_FW_DISCONNECT_EVENT	BIT(4)
+
+#define		WOL_REASON_PTK_UPDATE		BIT(0)
+#define		WOL_REASON_GTK_UPDATE		BIT(1)
+#define		WOL_REASON_DISASSOC			BIT(2)
+#define		WOL_REASON_DEAUTH			BIT(3)
+#define		WOL_REASON_FW_DISCONNECT	BIT(4)
+
+#define		RA_RFE_PINMUX	0xcb0  /* Path_A RFE cotrol pinmux*/
+#define		RB_RFE_PINMUX	0xeb0 /* Path_B RFE control pinmux*/
+
+#define		RA_RFE_INV 0xcb4
+#define		RB_RFE_INV 0xeb4
+
+/* RXIQC */
+#define		RA_RXIQC_AB    	0xc10  /*RxIQ imblance matrix coeff. A & B*/
+#define		RA_RXIQC_CD    	0xc14  /*RxIQ imblance matrix coeff. C & D*/
+#define	 	RA_TXSCALE 		0xc1c  /* Pah_A TX scaling factor*/
+#define		RB_TXSCALE 		0xe1c  /* Path_B TX scaling factor*/
+#define		RB_RXIQC_AB    	0xe10  /*RxIQ imblance matrix coeff. A & B*/
+#define		RB_RXIQC_CD    	0xe14  /*RxIQ imblance matrix coeff. C & D*/
+#define		RXIQC_AC		0x02ff  /*bit mask for IQC matrix element A & C*/
+#define		RXIQC_BD		0x02ff0000 /*bit mask for IQC matrix element A & C*/
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+#define EFUSE_SEL(x)					(((x) & 0x3) << 8)
+#define EFUSE_SEL_MASK				0x300
+#define EFUSE_WIFI_SEL_0				0x0
+
+/*REG_MULTI_FUNC_CTRL(For RTL8723 Only)*/
+#define	WL_HWPDN_EN				BIT(0)	/* Enable GPIO[9] as WiFi HW PDn source*/
+#define	WL_HWPDN_SL				BIT(1)	/* WiFi HW PDn polarity control*/
+#define	WL_FUNC_EN				BIT(2)	// WiFi function enable
+#define	WL_HWROF_EN				BIT(3)	// Enable GPIO[9] as WiFi RF HW PDn source
+#define	BT_HWPDN_EN				BIT(16)	// Enable GPIO[11] as BT HW PDn source
+#define	BT_HWPDN_SL				BIT(17)	// BT HW PDn polarity control
+#define	BT_FUNC_EN				BIT(18)	// BT function enable
+#define	BT_HWROF_EN				BIT(19)	// Enable GPIO[11] as BT/GPS RF HW PDn source
+#define	GPS_HWPDN_EN				BIT(20)	// Enable GPIO[10] as GPS HW PDn source
+#define	GPS_HWPDN_SL				BIT(21)	// GPS HW PDn polarity control
+#define	GPS_FUNC_EN				BIT(22)	// GPS function enable
+
+
+#define	BMASKBYTE0                	0xff
+#define	BMASKBYTE1                	0xff00
+#define	BMASKBYTE2                	0xff0000
+#define	BMASKBYTE3                	0xff000000
+#define	BMASKHWORD                	0xffff0000
+#define	BMASKLWORD                	0x0000ffff
+#define	BMASKDWORD                	0xffffffff
+#define	BMASK12BITS				  	0xfff
+#define	BMASKH4BITS				  	0xf0000000
+#define BMASKOFDM_D					0xffc00000
+#define	BMASKCCK				  	0x3f3f3f3f
+
+#define BRFREGOFFSETMASK			0xfffff
+
+#define	ODM_REG_CCK_RPT_FORMAT_11AC	0x804
+#define	ODM_REG_BB_RX_PATH_11AC			0x808
+/*PAGE 9*/
+#define	ODM_REG_OFDM_FA_RST_11AC		0x9A4
+/*PAGE A*/
+#define	ODM_REG_CCK_CCA_11AC			0xA0A
+#define	ODM_REG_CCK_FA_RST_11AC			0xA2C
+#define	ODM_REG_CCK_FA_11AC				0xA5C
+/*PAGE C*/
+#define	ODM_REG_IGI_A_11AC				0xC50
+/*PAGE E*/
+#define	ODM_REG_IGI_B_11AC				0xE50
+/*PAGE F*/
+#define	ODM_REG_OFDM_FA_11AC			0xF48
+
+
+//2 MAC REG LIST
+
+
+
+
+//DIG Related
+#define	ODM_BIT_IGI_11AC					0xFFFFFFFF
+#define	ODM_BIT_CCK_RPT_FORMAT_11AC		BIT16
+#define	ODM_BIT_BB_RX_PATH_11AC			0xF
+
+typedef enum AGGRE_SIZE{
+	HT_AGG_SIZE_8K = 0,
+	HT_AGG_SIZE_16K = 1,
+	HT_AGG_SIZE_32K = 2,
+	HT_AGG_SIZE_64K = 3,
+	VHT_AGG_SIZE_128K = 4,
+	VHT_AGG_SIZE_256K = 5,
+	VHT_AGG_SIZE_512K = 6,
+	VHT_AGG_SIZE_1024K = 7,
+}AGGRE_SIZE_E, *PAGGRE_SIZE_E;
+
+#define REG_AMPDU_MAX_LENGTH_8812	0x0458
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/rf.c b/drivers/staging/rtl8821ae/rtl8821ae/rf.c
new file mode 100644
index 0000000..87c1c97
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/rf.c
@@ -0,0 +1,464 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
+
+void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	switch (bandwidth) {
+	case HT_CHANNEL_WIDTH_20:
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 3);
+		rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 3);
+		break;
+	case HT_CHANNEL_WIDTH_20_40:
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 1);
+		rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 1);
+		break;
+	case HT_CHANNEL_WIDTH_80:
+		rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 0);
+		rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 0);
+		break;
+	default:
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("unknown bandwidth: %#X\n", bandwidth));
+		break;
+	}
+}
+
+void rtl8821ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+				       u8 *ppowerlevel)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u32 tx_agc[2] = {0, 0}, tmpval;
+	bool turbo_scanoff = false;
+	u8 idx1, idx2;
+	u8 *ptr;
+	u8 direction;
+	u32 pwrtrac_value;
+
+	if (rtlefuse->eeprom_regulatory != 0)
+		turbo_scanoff = true;
+
+	if (mac->act_scanning == true) {
+		tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+		tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+
+		if (turbo_scanoff) {
+			for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+				tx_agc[idx1] = ppowerlevel[idx1] |
+				    (ppowerlevel[idx1] << 8) |
+				    (ppowerlevel[idx1] << 16) |
+				    (ppowerlevel[idx1] << 24);
+			}
+		}
+	} else {
+		for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+			tx_agc[idx1] = ppowerlevel[idx1] |
+			    (ppowerlevel[idx1] << 8) |
+			    (ppowerlevel[idx1] << 16) |
+			    (ppowerlevel[idx1] << 24);
+		}
+
+		if (rtlefuse->eeprom_regulatory == 0) {
+			tmpval =
+			    (rtlphy->mcs_txpwrlevel_origoffset[0][6]) +
+			    (rtlphy->mcs_txpwrlevel_origoffset[0][7] <<
+			     8);
+			tx_agc[RF90_PATH_A] += tmpval;
+
+			tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) +
+			    (rtlphy->mcs_txpwrlevel_origoffset[0][15] <<
+			     24);
+			tx_agc[RF90_PATH_B] += tmpval;
+		}
+	}
+
+	for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+		ptr = (u8 *) (&(tx_agc[idx1]));
+		for (idx2 = 0; idx2 < 4; idx2++) {
+			if (*ptr > RF6052_MAX_TX_PWR)
+				*ptr = RF6052_MAX_TX_PWR;
+			ptr++;
+		}
+	}
+	rtl8821ae_dm_txpower_track_adjust(hw,1,&direction,&pwrtrac_value);
+	if (direction ==1){
+		tx_agc[0] += pwrtrac_value;
+		tx_agc[1] += pwrtrac_value;
+	} else if (direction == 2){
+		tx_agc[0] -= pwrtrac_value;
+		tx_agc[1] -= pwrtrac_value;
+	}
+	tmpval = tx_agc[RF90_PATH_A] ;
+	rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKDWORD, tmpval);
+
+	RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+		("CCK PWR 1~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+		 RTXAGC_A_CCK11_CCK1));
+
+	tmpval = tx_agc[RF90_PATH_B] ;
+	rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKDWORD, tmpval);
+
+	RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+		("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+		 RTXAGC_B_CCK11_CCK1));
+}
+
+static void rtl8821ae_phy_get_power_base(struct ieee80211_hw *hw,
+				      u8 *ppowerlevel_ofdm, u8 *ppowerlevel_bw20, u8 *ppowerlevel_bw40, u8 channel,
+				      u32 *ofdmbase, u32 *mcsbase)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u32 powerBase0, powerBase1;
+	u8 i, powerlevel[2];
+
+	for (i = 0; i < 2; i++) {
+		powerBase0 = ppowerlevel_ofdm[i];
+
+		powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
+		    (powerBase0 << 8) | powerBase0;
+		*(ofdmbase + i) = powerBase0;
+		RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+			(" [OFDM power base index rf(%c) = 0x%x]\n",
+			 ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+	}
+
+	for (i = 0; i < 2; i++) {
+		if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+			powerlevel[i] = ppowerlevel_bw20[i];
+		}else{
+			powerlevel[i] = ppowerlevel_bw40[i];
+		}
+		powerBase1 = powerlevel[i];
+		powerBase1 = (powerBase1 << 24) |
+		    (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
+
+		*(mcsbase + i) = powerBase1;
+
+		RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+			(" [MCS power base index rf(%c) = 0x%x]\n",
+			 ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+	}
+}
+
+static void _rtl8821ae_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+						       u8 channel, u8 index,
+						       u32 *powerBase0,
+						       u32 *powerBase1,
+						       u32 *p_outwriteval)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	u8 i, chnlgroup = 0, pwr_diff_limit[4],pwr_diff = 0,customer_pwr_diff;
+	u32 writeVal, customer_limit, rf;
+
+	for (rf = 0; rf < 2; rf++) {
+		switch (rtlefuse->eeprom_regulatory) {
+		case 0:
+			chnlgroup = 0;
+
+			writeVal =
+			    rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index +
+									 (rf ? 8 : 0)]
+			    + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+				("RTK better performance, "
+				 "writeVal(%c) = 0x%x\n",
+				 ((rf == 0) ? 'A' : 'B'), writeVal));
+			break;
+		case 1:
+			if (rtlphy->pwrgroup_cnt == 1)
+				chnlgroup = 0;
+			else {
+				if(channel<3)
+					chnlgroup = 0;
+				else if (channel <6)
+					chnlgroup = 1;
+				else if (channel <9)
+					chnlgroup = 2;
+				else if (channel <12)
+					chnlgroup = 3;
+				else if (channel < 14)
+					chnlgroup = 4;
+				else if (channel == 14)
+					chnlgroup = 5;
+			}
+
+			writeVal =
+			    rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+			    [index + (rf ? 8 : 0)] + ((index < 2) ?
+						      powerBase0[rf] :
+						      powerBase1[rf]);
+
+			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+				("Realtek regulatory, 20MHz, "
+				 "writeVal(%c) = 0x%x\n",
+				 ((rf == 0) ? 'A' : 'B'), writeVal));
+
+			break;
+		case 2:
+			writeVal =
+			    ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+				("Better regulatory, "
+				 "writeVal(%c) = 0x%x\n",
+				 ((rf == 0) ? 'A' : 'B'), writeVal));
+			break;
+		case 3:
+			chnlgroup = 0;
+
+			if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+				RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+					("customer's limit, 40MHz "
+					 "rf(%c) = 0x%x\n",
+					 ((rf == 0) ? 'A' : 'B'),
+					 rtlefuse->pwrgroup_ht40[rf][channel -
+								     1]));
+			} else {
+				RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+					("customer's limit, 20MHz "
+					 "rf(%c) = 0x%x\n",
+					 ((rf == 0) ? 'A' : 'B'),
+					 rtlefuse->pwrgroup_ht20[rf][channel -
+								     1]));
+			}
+
+			if (index < 2)
+				pwr_diff = rtlefuse->txpwr_legacyhtdiff[rf][channel-1];
+			else if (rtlphy->current_chan_bw ==  HT_CHANNEL_WIDTH_20)
+				pwr_diff = rtlefuse->txpwr_ht20diff[rf][channel-1];
+
+			if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+				customer_pwr_diff = rtlefuse->pwrgroup_ht40[rf][channel-1];
+			else
+				customer_pwr_diff = rtlefuse->pwrgroup_ht20[rf][channel-1];
+
+			if (pwr_diff > customer_pwr_diff)
+				pwr_diff = 0;
+			else
+				pwr_diff = customer_pwr_diff - pwr_diff;
+
+			for (i = 0; i < 4; i++) {
+				pwr_diff_limit[i] =
+				    (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+					   [chnlgroup][index + (rf ? 8 : 0)] & (0x7f <<
+									(i * 8))) >> (i * 8));
+
+					if(pwr_diff_limit[i] > pwr_diff)
+						pwr_diff_limit[i] = pwr_diff;
+			}
+
+			customer_limit = (pwr_diff_limit[3] << 24) |
+			    (pwr_diff_limit[2] << 16) |
+			    (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
+
+			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+				("Customer's limit rf(%c) = 0x%x\n",
+				 ((rf == 0) ? 'A' : 'B'), customer_limit));
+
+			writeVal = customer_limit +
+			    ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+				("Customer, writeVal rf(%c)= 0x%x\n",
+				 ((rf == 0) ? 'A' : 'B'), writeVal));
+			break;
+		default:
+			chnlgroup = 0;
+			writeVal =
+			    rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+			    [index + (rf ? 8 : 0)]
+			    + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+			RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+				("RTK better performance, writeVal "
+				 "rf(%c) = 0x%x\n",
+				 ((rf == 0) ? 'A' : 'B'), writeVal));
+			break;
+		}
+
+		if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+			writeVal = writeVal - 0x06060606;
+		else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+			 TXHIGHPWRLEVEL_BT2)
+			writeVal = writeVal - 0x0c0c0c0c;
+		*(p_outwriteval + rf) = writeVal;
+	}
+}
+
+static void _rtl8821ae_write_ofdm_power_reg(struct ieee80211_hw *hw,
+					 u8 index, u32 *pValue)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u16 regoffset_a[6] = {
+		RTXAGC_A_OFDM18_OFDM6, RTXAGC_A_OFDM54_OFDM24,
+		RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+		RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+	};
+	u16 regoffset_b[6] = {
+		RTXAGC_B_OFDM18_OFDM6, RTXAGC_B_OFDM54_OFDM24,
+		RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+		RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+	};
+	u8 i, rf, pwr_val[4];
+	u32 writeVal;
+	u16 regoffset;
+
+	for (rf = 0; rf < 2; rf++) {
+		writeVal = pValue[rf];
+		for (i = 0; i < 4; i++) {
+			pwr_val[i] = (u8) ((writeVal & (0x7f <<
+							(i * 8))) >> (i * 8));
+
+			if (pwr_val[i] > RF6052_MAX_TX_PWR)
+				pwr_val[i] = RF6052_MAX_TX_PWR;
+		}
+		writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+		    (pwr_val[1] << 8) | pwr_val[0];
+
+		if (rf == 0)
+			regoffset = regoffset_a[index];
+		else
+			regoffset = regoffset_b[index];
+		rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
+
+		RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+			("Set 0x%x = %08x\n", regoffset, writeVal));
+	}
+}
+
+void rtl8821ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+					      u8 *ppowerlevel_ofdm, u8 *ppowerlevel_bw20, u8 *ppowerlevel_bw40, u8 channel)
+{
+	u32 writeVal[2], powerBase0[2], powerBase1[2];
+	u8 index;
+	u8 direction;
+	u32 pwrtrac_value;
+
+	rtl8821ae_phy_get_power_base(hw, ppowerlevel_ofdm, ppowerlevel_bw20, ppowerlevel_bw40,
+				  channel, &powerBase0[0], &powerBase1[0]);
+
+	rtl8821ae_dm_txpower_track_adjust(hw,1,&direction,&pwrtrac_value);
+
+	for (index = 0; index < 6; index++) {
+		_rtl8821ae_get_txpower_writeval_by_regulatory(hw,
+							   channel, index,
+							   &powerBase0[0],
+							   &powerBase1[0],
+							   &writeVal[0]);
+		if (direction ==1){
+			writeVal[0] += pwrtrac_value;
+			writeVal[1] += pwrtrac_value;
+		} else if (direction == 2){
+			writeVal[0] -= pwrtrac_value;
+			writeVal[1] -= pwrtrac_value;
+		}
+		_rtl8821ae_write_ofdm_power_reg(hw, index, &writeVal[0]);
+	}
+}
+
+bool rtl8821ae_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+	if (rtlphy->rf_type == RF_1T1R)
+		rtlphy->num_total_rfpath = 1;
+	else
+		rtlphy->num_total_rfpath = 2;
+
+	return _rtl8821ae_phy_rf6052_config_parafile(hw);
+
+}
+
+static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	//u32 u4_regvalue = 0;
+	u8 rfpath;
+	bool rtstatus = true;
+	//struct bb_reg_def *pphyreg;
+
+	for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+		switch (rfpath) {
+		case RF90_PATH_A: {
+			if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+				rtstatus = rtl8812ae_phy_config_rf_with_headerfile(hw,
+									(enum radio_path)rfpath);
+			else
+				rtstatus = rtl8821ae_phy_config_rf_with_headerfile(hw,
+									(enum radio_path)rfpath);
+			break;
+			}
+		case RF90_PATH_B: {
+			if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+				rtstatus = rtl8812ae_phy_config_rf_with_headerfile(hw,
+									(enum radio_path)rfpath);
+			else
+				rtstatus = rtl8821ae_phy_config_rf_with_headerfile(hw,
+									(enum radio_path)rfpath);
+			break;
+			}
+		case RF90_PATH_C:
+			break;
+		case RF90_PATH_D:
+			break;
+		}
+
+		if (rtstatus != true) {
+			RT_TRACE(COMP_INIT, DBG_TRACE,
+				 ("Radio[%d] Fail!!", rfpath));
+			return false;
+		}
+
+	}
+
+	/*put arrays in dm.c*/
+	/*_rtl8821ae_config_rf_txpwr_track_headerfile(hw);*/
+	RT_TRACE(COMP_INIT, DBG_TRACE, ("\n"));
+	return rtstatus;
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/rf.h b/drivers/staging/rtl8821ae/rtl8821ae/rf.h
new file mode 100644
index 0000000..b665c0f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/rf.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_RF_H__
+#define __RTL8821AE_RF_H__
+
+#define RF6052_MAX_TX_PWR		0x3F
+#define RF6052_MAX_REG			0x3F
+
+extern void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+					    									u8 bandwidth);
+extern void rtl8821ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+					      									  u8 *ppowerlevel);
+extern void rtl8821ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+					      									   u8 *ppowerlevel_ofdm,
+					      									   u8 *ppowerlevel_bw20,
+					      									   u8 *ppowerlevel_bw40,
+					      									   u8 channel);
+extern bool rtl8821ae_phy_rf6052_config(struct ieee80211_hw *hw);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/sw.c b/drivers/staging/rtl8821ae/rtl8821ae/sw.c
new file mode 100644
index 0000000..85a3474
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/sw.c
@@ -0,0 +1,499 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "hw.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+#include "table.h"
+#include "hal_btc.h"
+#include "../btcoexist/rtl_btc.h"
+
+void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+	/*close ASPM for AMD defaultly */
+	rtlpci->const_amdpci_aspm = 0;
+
+	/*
+	 * ASPM PS mode.
+	 * 0 - Disable ASPM,
+	 * 1 - Enable ASPM without Clock Req,
+	 * 2 - Enable ASPM with Clock Req,
+	 * 3 - Alwyas Enable ASPM with Clock Req,
+	 * 4 - Always Enable ASPM without Clock Req.
+	 * set defult to RTL8192CE:3 RTL8192E:2
+	 * */
+	rtlpci->const_pci_aspm = 3;
+
+	/*Setting for PCI-E device */
+	rtlpci->const_devicepci_aspm_setting = 0x03;
+
+	/*Setting for PCI-E bridge */
+	rtlpci->const_hostpci_aspm_setting = 0x02;
+
+	/*
+	 * In Hw/Sw Radio Off situation.
+	 * 0 - Default,
+	 * 1 - From ASPM setting without low Mac Pwr,
+	 * 2 - From ASPM setting with low Mac Pwr,
+	 * 3 - Bus D3
+	 * set default to RTL8192CE:0 RTL8192SE:2
+	 */
+	rtlpci->const_hwsw_rfoff_d3 = 0;
+
+	/*
+	 * This setting works for those device with
+	 * backdoor ASPM setting such as EPHY setting.
+	 * 0 - Not support ASPM,
+	 * 1 - Support ASPM,
+	 * 2 - According to chipset.
+	 */
+	rtlpci->const_support_pciaspm = 1;
+}
+
+/*InitializeVariables8812E*/
+int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+{
+	int err = 0;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	const struct firmware *firmware;
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	char *fw_name = NULL;
+
+	rtl8821ae_bt_reg_init(hw);
+	rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
+
+	rtlpriv->dm.b_dm_initialgain_enable = 1;
+	rtlpriv->dm.dm_flag = 0;
+	rtlpriv->dm.b_disable_framebursting = 0;;
+	rtlpriv->dm.thermalvalue = 0;
+	rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25);
+
+	mac->ht_enable = true;
+
+	rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
+	/*following 2 is for register 5G band, refer to _rtl_init_mac80211()*/
+	rtlpriv->rtlhal.bandset = BAND_ON_BOTH;
+	rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
+
+	rtlpci->receive_config = (RCR_APPFCS			|
+							  RCR_APP_MIC 			|
+							  RCR_APP_ICV 			|
+							  RCR_APP_PHYST_RXFF	|
+							  RCR_NONQOS_VHT		|
+							  RCR_HTC_LOC_CTRL		|
+							  RCR_AMF 				|
+							  RCR_ACF 				|
+							  RCR_ADF 				|	/*This bit controls the PS-Poll packet filter.*/
+							  RCR_AICV				|
+							  RCR_ACRC32			|
+							  RCR_AB				|
+							  RCR_AM				|
+							  RCR_APM 				|
+							  0);
+
+
+	rtlpci->irq_mask[0] =
+	     (u32) (IMR_PSTIMEOUT			|
+				IMR_GTINT3 				|
+				/*IMR_TBDER				|
+				IMR_TBDOK				|
+				IMR_BCNDMAINT0 			|*/
+				IMR_HSISR_IND_ON_INT	|
+				IMR_C2HCMD 				|
+				IMR_HIGHDOK				|
+				IMR_MGNTDOK				|
+				IMR_BKDOK				|
+				IMR_BEDOK				|
+				IMR_VIDOK				|
+				IMR_VODOK				|
+				IMR_RDU					|
+				IMR_ROK					|
+				0);
+
+	rtlpci->irq_mask[1]	=
+		 (u32)(	IMR_RXFOVW |
+		 		IMR_TXFOVW |
+				0);
+
+	/* for LPS & IPS */
+	rtlpriv->psc.b_inactiveps = rtlpriv->cfg->mod_params->b_inactiveps;
+	rtlpriv->psc.b_swctrl_lps = rtlpriv->cfg->mod_params->b_swctrl_lps;
+	rtlpriv->psc.b_fwctrl_lps = rtlpriv->cfg->mod_params->b_fwctrl_lps;
+	rtlpriv->psc.b_reg_fwctrl_lps = 3;
+	rtlpriv->psc.reg_max_lps_awakeintvl = 5;
+	/* for ASPM, you can close aspm through
+	 * set const_support_pciaspm = 0 */
+	rtl8821ae_init_aspm_vars(hw);
+
+	if (rtlpriv->psc.b_reg_fwctrl_lps == 1)
+		rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
+	else if (rtlpriv->psc.b_reg_fwctrl_lps == 2)
+		rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
+	else if (rtlpriv->psc.b_reg_fwctrl_lps == 3)
+		rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
+
+	/* for firmware buf */
+	rtlpriv->rtlhal.pfirmware = (u8 *) vmalloc(0x8000);
+	if (!rtlpriv->rtlhal.pfirmware) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("Can't alloc buffer for fw.\n"));
+		return 1;
+	}
+
+	fw_name = "rtlwifi/rtl8821aefw.bin";
+	err = request_firmware(&firmware, fw_name, rtlpriv->io.dev);
+	if (err) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("Failed to request firmware!\n"));
+		return 1;
+	}
+
+	if (firmware->size > 0x8000) {
+		RT_TRACE(COMP_ERR, DBG_EMERG,
+			 ("Firmware is too big!\n"));
+		release_firmware(firmware);
+		return 1;
+	}
+
+	memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
+	rtlpriv->rtlhal.fwsize = firmware->size;
+	release_firmware(firmware);
+
+	if (rtlpriv->cfg->ops->get_btc_status()){
+		rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
+		rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
+	}
+
+	RT_TRACE(COMP_INIT, DBG_LOUD, (" FirmwareDownload OK\n"));
+	return err;
+}
+
+void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	//printk("=========>rtl8821ae_deinit_sw_vars().\n");
+	if (rtlpriv->cfg->ops->get_btc_status()){
+		//printk("=========>rtl8821ae_deinit_sw_vars().get_btc_status\n");
+		rtlpriv->btcoexist.btc_ops->btc_halt_notify();
+	}
+	if (rtlpriv->rtlhal.pfirmware) {
+		//printk("=========>rtl8821ae_deinit_sw_vars().rtlpriv->rtlhal.pfirmware\n");
+		vfree(rtlpriv->rtlhal.pfirmware);
+		rtlpriv->rtlhal.pfirmware = NULL;
+	}
+	//printk("<=========rtl8821ae_deinit_sw_vars().\n");
+}
+
+u32 rtl8812ae_rx_command_packet_handler(
+	struct ieee80211_hw *hw,
+	struct rtl_stats status,
+	struct sk_buff *skb
+	)
+{
+	u32 result = 0;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	switch (status.packet_report_type) {
+		case NORMAL_RX:
+			result = 0;
+			break;
+		case C2H_PACKET:
+			rtl8812ae_c2h_packet_handler(hw, skb->data, (u8) skb->len);
+			result = 1;
+			RT_TRACE(COMP_RECV, DBG_LOUD,
+				("===>rtl8821ae_rx_command_packet_handler(): (u8) skb->len=%d\n\n", skb->len));
+			break;
+		default:
+			RT_TRACE(COMP_RECV, DBG_LOUD,
+				("===>rtl8821ae_rx_command_packet_handler(): No this packet type!!\n"));
+			break;
+	}
+
+	return result;
+}
+
+
+/* get bt coexist status */
+bool rtl8821ae_get_btc_status(void)
+{
+	return true;
+}
+
+struct rtl_hal_ops rtl8821ae_hal_ops = {
+	.init_sw_vars = rtl8821ae_init_sw_vars,
+	.deinit_sw_vars = rtl8821ae_deinit_sw_vars,
+	.read_eeprom_info = rtl8821ae_read_eeprom_info,
+	.interrupt_recognized = rtl8821ae_interrupt_recognized,
+	.hw_init = rtl8821ae_hw_init,
+	.hw_disable = rtl8821ae_card_disable,
+	.hw_suspend = rtl8821ae_suspend,
+	.hw_resume = rtl8821ae_resume,
+	.enable_interrupt = rtl8821ae_enable_interrupt,
+	.disable_interrupt = rtl8821ae_disable_interrupt,
+	.set_network_type = rtl8821ae_set_network_type,
+	.set_chk_bssid = rtl8821ae_set_check_bssid,
+	.set_qos = rtl8821ae_set_qos,
+	.set_bcn_reg = rtl8821ae_set_beacon_related_registers,
+	.set_bcn_intv = rtl8821ae_set_beacon_interval,
+	.update_interrupt_mask = rtl8821ae_update_interrupt_mask,
+	.get_hw_reg = rtl8821ae_get_hw_reg,
+	.set_hw_reg = rtl8821ae_set_hw_reg,
+	.update_rate_tbl = rtl8821ae_update_hal_rate_tbl,
+	.fill_tx_desc = rtl8821ae_tx_fill_desc,
+	.fill_tx_cmddesc = rtl8821ae_tx_fill_cmddesc,
+	.query_rx_desc = rtl8821ae_rx_query_desc,
+	.set_channel_access = rtl8821ae_update_channel_access_setting,
+	.radio_onoff_checking = rtl8821ae_gpio_radio_on_off_checking,
+	.set_bw_mode = rtl8821ae_phy_set_bw_mode,
+	.switch_channel = rtl8821ae_phy_sw_chnl,
+	.dm_watchdog = rtl8821ae_dm_watchdog,
+	.scan_operation_backup = rtl8821ae_phy_scan_operation_backup,
+	.set_rf_power_state = rtl8821ae_phy_set_rf_power_state,
+	.led_control = rtl8821ae_led_control,
+	.set_desc = rtl8821ae_set_desc,
+	.get_desc = rtl8821ae_get_desc,
+	.is_tx_desc_closed = rtl8821ae_is_tx_desc_closed,
+	.tx_polling = rtl8821ae_tx_polling,
+	.enable_hw_sec = rtl8821ae_enable_hw_security_config,
+	.set_key = rtl8821ae_set_key,
+	.init_sw_leds = rtl8821ae_init_sw_leds,
+	.allow_all_destaddr = rtl8821ae_allow_all_destaddr,
+	.get_bbreg = rtl8821ae_phy_query_bb_reg,
+	.set_bbreg = rtl8821ae_phy_set_bb_reg,
+	.get_rfreg = rtl8821ae_phy_query_rf_reg,
+	.set_rfreg = rtl8821ae_phy_set_rf_reg,
+	.c2h_command_handle = rtl_8821ae_c2h_command_handle,
+	.bt_wifi_media_status_notify = rtl_8821ae_bt_wifi_media_status_notify,
+	.bt_turn_off_bt_coexist_before_enter_lps = rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps,
+	.fill_h2c_cmd = rtl8821ae_fill_h2c_cmd,
+	.get_btc_status = rtl8821ae_get_btc_status,
+	.rx_command_packet_handler = rtl8812ae_rx_command_packet_handler,
+};
+
+struct rtl_mod_params rtl8821ae_mod_params = {
+	.sw_crypto = false,
+	.b_inactiveps = false,//true,
+	.b_swctrl_lps = false,
+	.b_fwctrl_lps = false, //true,
+};
+
+struct rtl_hal_cfg rtl8821ae_hal_cfg = {
+	.bar_id = 2,
+	.write_readback = true,
+	.name = "rtl8821ae_pci",
+	.fw_name = "rtlwifi/rtl8821aefw.bin",
+	.ops = &rtl8821ae_hal_ops,
+	.mod_params = &rtl8821ae_mod_params,
+	.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+	.maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+	.maps[SYS_CLK] = REG_SYS_CLKR,
+	.maps[MAC_RCR_AM] = AM,
+	.maps[MAC_RCR_AB] = AB,
+	.maps[MAC_RCR_ACRC32] = ACRC32,
+	.maps[MAC_RCR_ACF] = ACF,
+	.maps[MAC_RCR_AAP] = AAP,
+	.maps[MAC_HIMR] = REG_HIMR,
+	.maps[MAC_HIMRE] = REG_HIMRE,
+
+
+       .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS,
+
+	.maps[EFUSE_TEST] = REG_EFUSE_TEST,
+	.maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+	.maps[EFUSE_CLK] = 0,
+	.maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+	.maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+	.maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+	.maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+	.maps[EFUSE_ANA8M] = ANA8M,
+	.maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+	.maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+	.maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+	.maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
+
+	.maps[RWCAM] = REG_CAMCMD,
+	.maps[WCAMI] = REG_CAMWRITE,
+	.maps[RCAMO] = REG_CAMREAD,
+	.maps[CAMDBG] = REG_CAMDBG,
+	.maps[SECR] = REG_SECCFG,
+	.maps[SEC_CAM_NONE] = CAM_NONE,
+	.maps[SEC_CAM_WEP40] = CAM_WEP40,
+	.maps[SEC_CAM_TKIP] = CAM_TKIP,
+	.maps[SEC_CAM_AES] = CAM_AES,
+	.maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+	.maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+	.maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+	.maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+	.maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+	.maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+	.maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+/*	.maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,     */   /*need check*/
+	.maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+	.maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+	.maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+	.maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+	.maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+	.maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+	.maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+/*	.maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,*/
+/*	.maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,*/
+
+	.maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+	.maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+	.maps[RTL_IMR_BcnInt] = IMR_BCNDMAINT0,
+	.maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+	.maps[RTL_IMR_RDU] = IMR_RDU,
+	.maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+	.maps[RTL_IMR_BDOK] = IMR_BCNDOK0,
+	.maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+	.maps[RTL_IMR_TBDER] = IMR_TBDER,
+	.maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+	.maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+	.maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+	.maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+	.maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+	.maps[RTL_IMR_VODOK] = IMR_VODOK,
+	.maps[RTL_IMR_ROK] = IMR_ROK,
+	.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER),
+
+	.maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+	.maps[RTL_RC_CCK_RATE2M] =  DESC_RATE2M,
+	.maps[RTL_RC_CCK_RATE5_5M] =  DESC_RATE5_5M,
+	.maps[RTL_RC_CCK_RATE11M] =  DESC_RATE11M,
+	.maps[RTL_RC_OFDM_RATE6M] =  DESC_RATE6M,
+	.maps[RTL_RC_OFDM_RATE9M] =  DESC_RATE9M,
+	.maps[RTL_RC_OFDM_RATE12M] =  DESC_RATE12M,
+	.maps[RTL_RC_OFDM_RATE18M] =  DESC_RATE18M,
+	.maps[RTL_RC_OFDM_RATE24M] =  DESC_RATE24M,
+	.maps[RTL_RC_OFDM_RATE36M] =  DESC_RATE36M,
+	.maps[RTL_RC_OFDM_RATE48M] =  DESC_RATE48M,
+	.maps[RTL_RC_OFDM_RATE54M] =  DESC_RATE54M,
+
+	.maps[RTL_RC_HT_RATEMCS7] =  DESC_RATEMCS7,
+	.maps[RTL_RC_HT_RATEMCS15] =  DESC_RATEMCS15,
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+static struct pci_device_id rtl8821ae_pci_ids[] = {
+        {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)},
+        {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)},
+        {},
+};
+#else
+static struct pci_device_id rtl8821ae_pci_ids[] __devinitdata = {
+	{RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)},
+	{RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)},
+	{},
+};
+#endif
+
+MODULE_DEVICE_TABLE(pci, rtl8821ae_pci_ids);
+
+MODULE_AUTHOR("Ping Yan<ping_yan@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE	<wlanfae@realtek.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin");
+
+module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444);
+module_param_named(ips, rtl8821ae_mod_params.b_inactiveps, bool, 0444);
+module_param_named(swlps, rtl8821ae_mod_params.b_swctrl_lps, bool, 0444);
+module_param_named(fwlps, rtl8821ae_mod_params.b_fwctrl_lps, bool, 0444);
+MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
+MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
+MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+static const SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+compat_pci_suspend(rtl_pci_suspend)
+compat_pci_resume(rtl_pci_resume)
+#endif
+
+static struct pci_driver rtl8821ae_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = rtl8821ae_pci_ids,
+	.probe = rtl_pci_probe,
+	.remove = rtl_pci_disconnect,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+	.driver.pm = &rtlwifi_pm_ops,
+#elif defined(CONFIG_PM)
+	.suspend = rtl_pci_suspend_compat,
+	.resume = rtl_pci_resume_compat,
+#endif
+
+};
+
+
+extern int rtl_core_module_init(void);
+extern void rtl_core_module_exit(void);
+
+static int __init rtl8821ae_module_init(void)
+{
+	int ret;
+
+	ret = rtl_core_module_init();
+	if (ret)
+		return ret;
+
+	//printk("==========>rtl8821ae_module_init().\n");
+	ret = pci_register_driver(&rtl8821ae_driver);
+	if (ret)
+		RT_ASSERT(false, (": No device found\n"));
+
+	return ret;
+}
+
+static void __exit rtl8821ae_module_exit(void)
+{
+	pci_unregister_driver(&rtl8821ae_driver);
+	rtl_core_module_exit();
+}
+
+module_init(rtl8821ae_module_init);
+module_exit(rtl8821ae_module_exit);
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/sw.h b/drivers/staging/rtl8821ae/rtl8821ae/sw.h
new file mode 100644
index 0000000..3d49b2f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/sw.h
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_SW_H__
+#define __RTL8821AE_SW_H__
+
+int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw);
+void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw);
+void rtl8821ae_init_var_map(struct ieee80211_hw *hw);
+bool rtl8821ae_get_btc_status(void);
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/table.c b/drivers/staging/rtl8821ae/rtl8821ae/table.c
new file mode 100644
index 0000000..a6c4ca4
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/table.c
@@ -0,0 +1,4002 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on  2010/ 5/18,  1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+u32 RTL8812AE_PHY_REG_ARRAY[] = {
+		0x800, 0x8020D010,
+		0x804, 0x080112E0,
+		0x808, 0x0E028233,
+		0x80C, 0x12131113,
+		0x810, 0x20101263,
+		0x814, 0x020C3D10,
+		0x818, 0x03A00385,
+		0x820, 0x00000000,
+		0x824, 0x00030FE0,
+		0x828, 0x00000000,
+		0x82C, 0x002083DD,
+		0x830, 0x2AAA6C86,
+		0x834, 0x0037A706,
+		0x838, 0x06C89B44,
+		0x83C, 0x0000095B,
+		0x840, 0xC0000001,
+		0x844, 0x40003CDE,
+		0x848, 0x6210FF8B,
+		0x84C, 0x6CFDFFB8,
+		0x850, 0x28874706,
+		0x854, 0x0001520C,
+		0x858, 0x8060E000,
+		0x85C, 0x74210168,
+		0x860, 0x6929C321,
+		0x864, 0x79727432,
+		0x868, 0x8CA7A314,
+		0x86C, 0x338C2878,
+		0x870, 0x03333333,
+		0x874, 0x31602C2E,
+		0x878, 0x00003152,
+		0x87C, 0x000FC000,
+		0x8A0, 0x00000013,
+		0x8A4, 0x7F7F7F7F,
+		0x8A8, 0xA202033E,
+		0x8AC, 0x0FF0FA0A,
+		0x8B0, 0x00000600,
+		0x8B4, 0x000FC080,
+		0x8B8, 0x6C0057FF,
+		0x8BC, 0x4CA520A3,
+		0x8C0, 0x27F00020,
+		0x8C4, 0x00000000,
+		0x8C8, 0x00013169,
+		0x8CC, 0x08248492,
+		0x8D0, 0x0000B800,
+		0x8DC, 0x00000000,
+		0x8D4, 0x940008A0,
+		0x8D8, 0x290B5612,
+		0x8F8, 0x400002C0,
+		0x8FC, 0x00000000,
+	0xFF0F07D8, 0xABCD,
+		0x900, 0x00000700,
+	0xFF0F07D0, 0xCDEF,
+		0x900, 0x00000700,
+	0xCDCDCDCD, 0xCDCD,
+		0x900, 0x00000700,
+	0xFF0F07D8, 0xDEAD,
+		0x90C, 0x00000000,
+		0x910, 0x0000FC00,
+		0x914, 0x00000404,
+		0x918, 0x1C1028C0,
+		0x91C, 0x64B11A1C,
+		0x920, 0xE0767233,
+		0x924, 0x055AA500,
+		0x928, 0x00000004,
+		0x92C, 0xFFFE0000,
+		0x930, 0xFFFFFFFE,
+		0x934, 0x001FFFFF,
+		0x960, 0x00000000,
+		0x964, 0x00000000,
+		0x968, 0x00000000,
+		0x96C, 0x00000000,
+		0x970, 0x801FFFFF,
+		0x978, 0x00000000,
+		0x97C, 0x00000000,
+		0x980, 0x00000000,
+		0x984, 0x00000000,
+		0x988, 0x00000000,
+		0x990, 0x27100000,
+		0x994, 0xFFFF0100,
+		0x998, 0xFFFFFF5C,
+		0x99C, 0xFFFFFFFF,
+		0x9A0, 0x000000FF,
+		0x9A4, 0x00080080,
+		0x9A8, 0x00000000,
+		0x9AC, 0x00000000,
+		0x9B0, 0x81081008,
+		0x9B4, 0x00000000,
+		0x9B8, 0x01081008,
+		0x9BC, 0x01081008,
+		0x9D0, 0x00000000,
+		0x9D4, 0x00000000,
+		0x9D8, 0x00000000,
+		0x9DC, 0x00000000,
+		0x9E4, 0x00000002,
+		0x9E8, 0x000002D5,
+		0xA00, 0x00D047C8,
+		0xA04, 0x01FF000C,
+		0xA08, 0x8C838300,
+		0xA0C, 0x2E7F000F,
+		0xA10, 0x9500BB78,
+		0xA14, 0x11144028,
+		0xA18, 0x00881117,
+		0xA1C, 0x89140F00,
+		0xA20, 0x1A1B0000,
+		0xA24, 0x090E1317,
+		0xA28, 0x00000204,
+		0xA2C, 0x00900000,
+		0xA70, 0x101FFF00,
+		0xA74, 0x00000008,
+		0xA78, 0x00000900,
+		0xA7C, 0x225B0606,
+		0xA80, 0x218075B2,
+		0xA84, 0x001F8C80,
+		0xB00, 0x03100000,
+		0xB04, 0x0000B000,
+		0xB08, 0xAE0201EB,
+		0xB0C, 0x01003207,
+		0xB10, 0x00009807,
+		0xB14, 0x01000000,
+		0xB18, 0x00000002,
+		0xB1C, 0x00000002,
+		0xB20, 0x0000001F,
+		0xB24, 0x03020100,
+		0xB28, 0x07060504,
+		0xB2C, 0x0B0A0908,
+		0xB30, 0x0F0E0D0C,
+		0xB34, 0x13121110,
+		0xB38, 0x17161514,
+		0xB3C, 0x0000003A,
+		0xB40, 0x00000000,
+		0xB44, 0x00000000,
+		0xB48, 0x13000032,
+		0xB4C, 0x48080000,
+		0xB50, 0x00000000,
+		0xB54, 0x00000000,
+		0xB58, 0x00000000,
+		0xB5C, 0x00000000,
+		0xC00, 0x00000007,
+		0xC04, 0x00042020,
+		0xC08, 0x80410231,
+		0xC0C, 0x00000000,
+		0xC10, 0x00000100,
+		0xC14, 0x01000000,
+		0xC1C, 0x40000003,
+		0xC20, 0x12121212,
+		0xC24, 0x12121212,
+		0xC28, 0x12121212,
+		0xC2C, 0x12121212,
+		0xC30, 0x12121212,
+		0xC34, 0x12121212,
+		0xC38, 0x12121212,
+		0xC3C, 0x12121212,
+		0xC40, 0x12121212,
+		0xC44, 0x12121212,
+		0xC48, 0x12121212,
+		0xC4C, 0x12121212,
+		0xC50, 0x00000020,
+		0xC54, 0x0008121C,
+		0xC58, 0x30000C1C,
+		0xC5C, 0x00000058,
+		0xC60, 0x34344443,
+		0xC64, 0x07003333,
+		0xC68, 0x59791979,
+		0xC6C, 0x59795979,
+		0xC70, 0x19795979,
+		0xC74, 0x19795979,
+		0xC78, 0x19791979,
+		0xC7C, 0x19791979,
+		0xC80, 0x19791979,
+		0xC84, 0x19791979,
+		0xC94, 0x0100005C,
+		0xC98, 0x00000000,
+		0xC9C, 0x00000000,
+		0xCA0, 0x00000029,
+		0xCA4, 0x08040201,
+		0xCA8, 0x80402010,
+	0xFF0F0740, 0xABCD,
+		0xCB0, 0x77547717,
+	0xFF0F01C0, 0xCDEF,
+		0xCB0, 0x77547717,
+	0xFF0F02C0, 0xCDEF,
+		0xCB0, 0x77547717,
+	0xFF0F07D8, 0xCDEF,
+		0xCB0, 0x54547710,
+	0xFF0F07D0, 0xCDEF,
+		0xCB0, 0x54547710,
+	0xCDCDCDCD, 0xCDCD,
+		0xCB0, 0x77547777,
+	0xFF0F0740, 0xDEAD,
+		0xCB4, 0x00000077,
+		0xCB8, 0x00508242,
+		0xE00, 0x00000007,
+		0xE04, 0x00042020,
+		0xE08, 0x80410231,
+		0xE0C, 0x00000000,
+		0xE10, 0x00000100,
+		0xE14, 0x01000000,
+		0xE1C, 0x40000003,
+		0xE20, 0x12121212,
+		0xE24, 0x12121212,
+		0xE28, 0x12121212,
+		0xE2C, 0x12121212,
+		0xE30, 0x12121212,
+		0xE34, 0x12121212,
+		0xE38, 0x12121212,
+		0xE3C, 0x12121212,
+		0xE40, 0x12121212,
+		0xE44, 0x12121212,
+		0xE48, 0x12121212,
+		0xE4C, 0x12121212,
+		0xE50, 0x00000020,
+		0xE54, 0x0008121C,
+		0xE58, 0x30000C1C,
+		0xE5C, 0x00000058,
+		0xE60, 0x34344443,
+		0xE64, 0x07003333,
+		0xE68, 0x59791979,
+		0xE6C, 0x59795979,
+		0xE70, 0x19795979,
+		0xE74, 0x19795979,
+		0xE78, 0x19791979,
+		0xE7C, 0x19791979,
+		0xE80, 0x19791979,
+		0xE84, 0x19791979,
+		0xE94, 0x0100005C,
+		0xE98, 0x00000000,
+		0xE9C, 0x00000000,
+		0xEA0, 0x00000029,
+		0xEA4, 0x08040201,
+		0xEA8, 0x80402010,
+	0xFF0F0740, 0xABCD,
+		0xEB0, 0x77547717,
+	0xFF0F01C0, 0xCDEF,
+		0xEB0, 0x77547717,
+	0xFF0F02C0, 0xCDEF,
+		0xEB0, 0x77547717,
+	0xFF0F07D8, 0xCDEF,
+		0xEB0, 0x54547710,
+	0xFF0F07D0, 0xCDEF,
+		0xEB0, 0x54547710,
+	0xCDCDCDCD, 0xCDCD,
+		0xEB0, 0x77547777,
+	0xFF0F0740, 0xDEAD,
+		0xEB4, 0x00000077,
+		0xEB8, 0x00508242,
+};
+
+u32 RTL8821AE_PHY_REG_ARRAY[] = {
+	0x800, 0x0020D090,
+	0x804, 0x080112E0,
+	0x808, 0x0E028211,
+	0x80C, 0x92131111,
+	0x810, 0x20101261,
+	0x814, 0x020C3D10,
+	0x818, 0x03A00385,
+	0x820, 0x00000000,
+	0x824, 0x00030FE0,
+	0x828, 0x00000000,
+	0x82C, 0x002081DD,
+	0x830, 0x2AAA8E24,
+	0x834, 0x0037A706,
+	0x838, 0x06489B44,
+	0x83C, 0x0000095B,
+	0x840, 0xC0000001,
+	0x844, 0x40003CDE,
+	0x848, 0x62103F8B,
+	0x84C, 0x6CFDFFB8,
+	0x850, 0x28874706,
+	0x854, 0x0001520C,
+	0x858, 0x8060E000,
+	0x85C, 0x74210168,
+	0x860, 0x6929C321,
+	0x864, 0x79727432,
+	0x868, 0x8CA7A314,
+	0x86C, 0x888C2878,
+	0x870, 0x08888888,
+	0x874, 0x31612C2E,
+	0x878, 0x00000152,
+	0x87C, 0x000FD000,
+	0x8A0, 0x00000013,
+	0x8A4, 0x7F7F7F7F,
+	0x8A8, 0xA2000338,
+	0x8AC, 0x0FF0FA0A,
+	0x8B4, 0x000FC080,
+	0x8B8, 0x6C10D7FF,
+	0x8BC, 0x0CA52090,
+	0x8C0, 0x1BF00020,
+	0x8C4, 0x00000000,
+	0x8C8, 0x00013169,
+	0x8CC, 0x08248492,
+	0x8D4, 0x940008A0,
+	0x8D8, 0x290B5612,
+	0x8F8, 0x400002C0,
+	0x8FC, 0x00000000,
+	0x900, 0x00000700,
+	0x90C, 0x00000000,
+	0x910, 0x0000FC00,
+	0x914, 0x00000404,
+	0x918, 0x1C1028C0,
+	0x91C, 0x64B11A1C,
+	0x920, 0xE0767233,
+	0x924, 0x055AA500,
+	0x928, 0x00000004,
+	0x92C, 0xFFFE0000,
+	0x930, 0xFFFFFFFE,
+	0x934, 0x001FFFFF,
+	0x960, 0x00000000,
+	0x964, 0x00000000,
+	0x968, 0x00000000,
+	0x96C, 0x00000000,
+	0x970, 0x801FFFFF,
+	0x974, 0x000003FF,
+	0x978, 0x00000000,
+	0x97C, 0x00000000,
+	0x980, 0x00000000,
+	0x984, 0x00000000,
+	0x988, 0x00000000,
+	0x990, 0x27100000,
+	0x994, 0xFFFF0100,
+	0x998, 0xFFFFFF5C,
+	0x99C, 0xFFFFFFFF,
+	0x9A0, 0x000000FF,
+	0x9A4, 0x00480080,
+	0x9A8, 0x00000000,
+	0x9AC, 0x00000000,
+	0x9B0, 0x81081008,
+	0x9B4, 0x01081008,
+	0x9B8, 0x01081008,
+	0x9BC, 0x01081008,
+	0x9D0, 0x00000000,
+	0x9D4, 0x00000000,
+	0x9D8, 0x00000000,
+	0x9DC, 0x00000000,
+	0x9E0, 0x00005D00,
+	0x9E4, 0x00000002,
+	0x9E8, 0x00000001,
+	0xA00, 0x00D047C8,
+	0xA04, 0x01FF000C,
+	0xA08, 0x8C8A8300,
+	0xA0C, 0x2E68000F,
+	0xA10, 0x9500BB78,
+	0xA14, 0x11144028,
+	0xA18, 0x00881117,
+	0xA1C, 0x89140F00,
+	0xA20, 0x1A1B0000,
+	0xA24, 0x090E1317,
+	0xA28, 0x00000204,
+	0xA2C, 0x00900000,
+	0xA70, 0x101FFF00,
+	0xA74, 0x00000008,
+	0xA78, 0x00000900,
+	0xA7C, 0x225B0606,
+	0xA80, 0x21805490,
+	0xA84, 0x001F0000,
+	0xB00, 0x03100040,
+	0xB04, 0x0000B000,
+	0xB08, 0xAE0201EB,
+	0xB0C, 0x01003207,
+	0xB10, 0x00009807,
+	0xB14, 0x01000000,
+	0xB18, 0x00000002,
+	0xB1C, 0x00000002,
+	0xB20, 0x0000001F,
+	0xB24, 0x03020100,
+	0xB28, 0x07060504,
+	0xB2C, 0x0B0A0908,
+	0xB30, 0x0F0E0D0C,
+	0xB34, 0x13121110,
+	0xB38, 0x17161514,
+	0xB3C, 0x0000003A,
+	0xB40, 0x00000000,
+	0xB44, 0x00000000,
+	0xB48, 0x13000032,
+	0xB4C, 0x48080000,
+	0xB50, 0x00000000,
+	0xB54, 0x00000000,
+	0xB58, 0x00000000,
+	0xB5C, 0x00000000,
+	0xC00, 0x00000007,
+	0xC04, 0x00042020,
+	0xC08, 0x80410231,
+	0xC0C, 0x00000000,
+	0xC10, 0x00000100,
+	0xC14, 0x01000000,
+	0xC1C, 0x40000003,
+	0xC20, 0x2C2C2C2C,
+	0xC24, 0x30303030,
+	0xC28, 0x30303030,
+	0xC2C, 0x2C2C2C2C,
+	0xC30, 0x2C2C2C2C,
+	0xC34, 0x2C2C2C2C,
+	0xC38, 0x2C2C2C2C,
+	0xC3C, 0x2A2A2A2A,
+	0xC40, 0x2A2A2A2A,
+	0xC44, 0x2A2A2A2A,
+	0xC48, 0x2A2A2A2A,
+	0xC4C, 0x2A2A2A2A,
+	0xC50, 0x00000020,
+	0xC54, 0x001C1208,
+	0xC58, 0x30000C1C,
+	0xC5C, 0x00000058,
+	0xC60, 0x34344443,
+	0xC64, 0x07003333,
+	0xC68, 0x19791979,
+	0xC6C, 0x19791979,
+	0xC70, 0x19791979,
+	0xC74, 0x19791979,
+	0xC78, 0x19791979,
+	0xC7C, 0x19791979,
+	0xC80, 0x19791979,
+	0xC84, 0x19791979,
+	0xC94, 0x0100005C,
+	0xC98, 0x00000000,
+	0xC9C, 0x00000000,
+	0xCA0, 0x00000029,
+	0xCA4, 0x08040201,
+	0xCA8, 0x80402010,
+	0xCB0, 0x77775747,
+	0xCB4, 0x10000077,
+	0xCB8, 0x00508240,
+};
+
+u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
+	0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840,
+	0, 0, 0, 0x00000c24, 0xffffffff, 0x42424444,
+	0, 0, 0, 0x00000c28, 0xffffffff, 0x30323638,
+	0, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444,
+	0, 0, 0, 0x00000c30, 0xffffffff, 0x28303236,
+	0, 0, 1, 0x00000c34, 0xffffffff, 0x38404242,
+	0, 0, 1, 0x00000c38, 0xffffffff, 0x26283034,
+	0, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444,
+	0, 0, 0, 0x00000c40, 0xffffffff, 0x28303236,
+	0, 0, 0, 0x00000c44, 0xffffffff, 0x42422426,
+	0, 0, 1, 0x00000c48, 0xffffffff, 0x30343840,
+	0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
+	0, 1, 0, 0x00000e20, 0xffffffff, 0x34363840,
+	0, 1, 0, 0x00000e24, 0xffffffff, 0x42424444,
+	0, 1, 0, 0x00000e28, 0xffffffff, 0x30323638,
+	0, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444,
+	0, 1, 0, 0x00000e30, 0xffffffff, 0x28303236,
+	0, 1, 1, 0x00000e34, 0xffffffff, 0x38404242,
+	0, 1, 1, 0x00000e38, 0xffffffff, 0x26283034,
+	0, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444,
+	0, 1, 0, 0x00000e40, 0xffffffff, 0x28303236,
+	0, 1, 0, 0x00000e44, 0xffffffff, 0x42422426,
+	0, 1, 1, 0x00000e48, 0xffffffff, 0x30343840,
+	0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
+	1, 0, 0, 0x00000c24, 0xffffffff, 0x42424444,
+	1, 0, 0, 0x00000c28, 0xffffffff, 0x30323640,
+	1, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444,
+	1, 0, 0, 0x00000c30, 0xffffffff, 0x28303236,
+	1, 0, 1, 0x00000c34, 0xffffffff, 0x38404242,
+	1, 0, 1, 0x00000c38, 0xffffffff, 0x26283034,
+	1, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444,
+	1, 0, 0, 0x00000c40, 0xffffffff, 0x28303236,
+	1, 0, 0, 0x00000c44, 0xffffffff, 0x42422426,
+	1, 0, 1, 0x00000c48, 0xffffffff, 0x30343840,
+	1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
+	1, 1, 0, 0x00000e24, 0xffffffff, 0x42424444,
+	1, 1, 0, 0x00000e28, 0xffffffff, 0x30323640,
+	1, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444,
+	1, 1, 0, 0x00000e30, 0xffffffff, 0x28303236,
+	1, 1, 1, 0x00000e34, 0xffffffff, 0x38404242,
+	1, 1, 1, 0x00000e38, 0xffffffff, 0x26283034,
+	1, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444,
+	1, 1, 0, 0x00000e40, 0xffffffff, 0x28303236,
+	1, 1, 0, 0x00000e44, 0xffffffff, 0x42422426,
+	1, 1, 1, 0x00000e48, 0xffffffff, 0x30343840,
+	1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628
+};
+
+u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
+	0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
+	0, 0, 0, 0x00000c24, 0xffffffff, 0x36363838,
+	0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
+	0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363838,
+	0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
+	0, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636,
+	0, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
+	0, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022,
+	1, 0, 0, 0x00000c24, 0xffffffff, 0x34343636,
+	1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032,
+	1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343636,
+	1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830,
+	1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636,
+	1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
+	1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022
+};
+
+/* it seems not used
+u8 *RTL8821AE_TXPWR_LMT_ARRAY[] = {
+	"FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "01", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "01", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "02", "32",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "02", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "02", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "03", "32",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "03", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "03", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "04", "34",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "04", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "04", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "05", "34",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "05", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "05", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "06", "34",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "06", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "06", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "07", "34",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "07", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "07", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "08", "34",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "08", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "08", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "09", "34",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "09", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "09", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "10", "32",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "10", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "10", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "11", "32",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "11", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "11", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "12", "63",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "12", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "12", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "13", "63",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "13", "32",
+	"MKK", "2.4G", "20M", "CCK", "1T", "13", "32",
+	"FCC", "2.4G", "20M", "CCK", "1T", "14", "63",
+	"ETSI", "2.4G", "20M", "CCK", "1T", "14", "63",
+	"MKK", "2.4G", "20M", "CCK", "1T", "14", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "01", "30",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "01", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "01", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "02", "30",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "02", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "02", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "03", "30",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "03", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "03", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "04", "32",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "04", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "04", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "05", "32",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "05", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "05", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "06", "32",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "06", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "06", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "07", "32",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "07", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "07", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "08", "32",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "08", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "08", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "09", "32",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "09", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "09", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "10", "30",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "10", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "10", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "11", "30",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "11", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "11", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "12", "63",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "12", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "12", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "13", "63",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "13", "32",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "13", "32",
+	"FCC", "2.4G", "20M", "OFDM", "1T", "14", "63",
+	"ETSI", "2.4G", "20M", "OFDM", "1T", "14", "63",
+	"MKK", "2.4G", "20M", "OFDM", "1T", "14", "63",
+	"FCC", "2.4G", "20M", "HT", "1T", "01", "26",
+	"ETSI", "2.4G", "20M", "HT", "1T", "01", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "01", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "02", "26",
+	"ETSI", "2.4G", "20M", "HT", "1T", "02", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "02", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "03", "26",
+	"ETSI", "2.4G", "20M", "HT", "1T", "03", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "03", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "04", "32",
+	"ETSI", "2.4G", "20M", "HT", "1T", "04", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "04", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "05", "32",
+	"ETSI", "2.4G", "20M", "HT", "1T", "05", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "05", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "06", "32",
+	"ETSI", "2.4G", "20M", "HT", "1T", "06", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "06", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "07", "32",
+	"ETSI", "2.4G", "20M", "HT", "1T", "07", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "07", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "08", "32",
+	"ETSI", "2.4G", "20M", "HT", "1T", "08", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "08", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "09", "32",
+	"ETSI", "2.4G", "20M", "HT", "1T", "09", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "09", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "10", "26",
+	"ETSI", "2.4G", "20M", "HT", "1T", "10", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "10", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "11", "26",
+	"ETSI", "2.4G", "20M", "HT", "1T", "11", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "11", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "12", "63",
+	"ETSI", "2.4G", "20M", "HT", "1T", "12", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "12", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "13", "63",
+	"ETSI", "2.4G", "20M", "HT", "1T", "13", "32",
+	"MKK", "2.4G", "20M", "HT", "1T", "13", "32",
+	"FCC", "2.4G", "20M", "HT", "1T", "14", "63",
+	"ETSI", "2.4G", "20M", "HT", "1T", "14", "63",
+	"MKK", "2.4G", "20M", "HT", "1T", "14", "63",
+	"FCC", "2.4G", "20M", "HT", "2T", "01", "30",
+	"ETSI", "2.4G", "20M", "HT", "2T", "01", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "01", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "02", "32",
+	"ETSI", "2.4G", "20M", "HT", "2T", "02", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "02", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "03", "32",
+	"ETSI", "2.4G", "20M", "HT", "2T", "03", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "03", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "04", "32",
+	"ETSI", "2.4G", "20M", "HT", "2T", "04", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "04", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "05", "32",
+	"ETSI", "2.4G", "20M", "HT", "2T", "05", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "05", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "06", "32",
+	"ETSI", "2.4G", "20M", "HT", "2T", "06", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "06", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "07", "32",
+	"ETSI", "2.4G", "20M", "HT", "2T", "07", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "07", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "08", "32",
+	"ETSI", "2.4G", "20M", "HT", "2T", "08", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "08", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "09", "32",
+	"ETSI", "2.4G", "20M", "HT", "2T", "09", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "09", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "10", "32",
+	"ETSI", "2.4G", "20M", "HT", "2T", "10", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "10", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "11", "30",
+	"ETSI", "2.4G", "20M", "HT", "2T", "11", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "11", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "12", "63",
+	"ETSI", "2.4G", "20M", "HT", "2T", "12", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "12", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "13", "63",
+	"ETSI", "2.4G", "20M", "HT", "2T", "13", "32",
+	"MKK", "2.4G", "20M", "HT", "2T", "13", "32",
+	"FCC", "2.4G", "20M", "HT", "2T", "14", "63",
+	"ETSI", "2.4G", "20M", "HT", "2T", "14", "63",
+	"MKK", "2.4G", "20M", "HT", "2T", "14", "63",
+	"FCC", "2.4G", "40M", "HT", "1T", "01", "63",
+	"ETSI", "2.4G", "40M", "HT", "1T", "01", "63",
+	"MKK", "2.4G", "40M", "HT", "1T", "01", "63",
+	"FCC", "2.4G", "40M", "HT", "1T", "02", "63",
+	"ETSI", "2.4G", "40M", "HT", "1T", "02", "63",
+	"MKK", "2.4G", "40M", "HT", "1T", "02", "63",
+	"FCC", "2.4G", "40M", "HT", "1T", "03", "26",
+	"ETSI", "2.4G", "40M", "HT", "1T", "03", "32",
+	"MKK", "2.4G", "40M", "HT", "1T", "03", "32",
+	"FCC", "2.4G", "40M", "HT", "1T", "04", "26",
+	"ETSI", "2.4G", "40M", "HT", "1T", "04", "32",
+	"MKK", "2.4G", "40M", "HT", "1T", "04", "32",
+	"FCC", "2.4G", "40M", "HT", "1T", "05", "32",
+	"ETSI", "2.4G", "40M", "HT", "1T", "05", "32",
+	"MKK", "2.4G", "40M", "HT", "1T", "05", "32",
+	"FCC", "2.4G", "40M", "HT", "1T", "06", "32",
+	"ETSI", "2.4G", "40M", "HT", "1T", "06", "32",
+	"MKK", "2.4G", "40M", "HT", "1T", "06", "32",
+	"FCC", "2.4G", "40M", "HT", "1T", "07", "32",
+	"ETSI", "2.4G", "40M", "HT", "1T", "07", "32",
+	"MKK", "2.4G", "40M", "HT", "1T", "07", "32",
+	"FCC", "2.4G", "40M", "HT", "1T", "08", "26",
+	"ETSI", "2.4G", "40M", "HT", "1T", "08", "32",
+	"MKK", "2.4G", "40M", "HT", "1T", "08", "32",
+	"FCC", "2.4G", "40M", "HT", "1T", "09", "26",
+	"ETSI", "2.4G", "40M", "HT", "1T", "09", "32",
+	"MKK", "2.4G", "40M", "HT", "1T", "09", "32",
+	"FCC", "2.4G", "40M", "HT", "1T", "10", "26",
+	"ETSI", "2.4G", "40M", "HT", "1T", "10", "32",
+	"MKK", "2.4G", "40M", "HT", "1T", "10", "32",
+	"FCC", "2.4G", "40M", "HT", "1T", "11", "26",
+	"ETSI", "2.4G", "40M", "HT", "1T", "11", "32",
+	"MKK", "2.4G", "40M", "HT", "1T", "11", "32",
+	"FCC", "2.4G", "40M", "HT", "1T", "12", "63",
+	"ETSI", "2.4G", "40M", "HT", "1T", "12", "32",
+	"MKK", "2.4G", "40M", "HT", "1T", "12", "32",
+	"FCC", "2.4G", "40M", "HT", "1T", "13", "63",
+	"ETSI", "2.4G", "40M", "HT", "1T", "13", "32",
+	"MKK", "2.4G", "40M", "HT", "1T", "13", "32",
+	"FCC", "2.4G", "40M", "HT", "1T", "14", "63",
+	"ETSI", "2.4G", "40M", "HT", "1T", "14", "63",
+	"MKK", "2.4G", "40M", "HT", "1T", "14", "63",
+	"FCC", "2.4G", "40M", "HT", "2T", "01", "63",
+	"ETSI", "2.4G", "40M", "HT", "2T", "01", "63",
+	"MKK", "2.4G", "40M", "HT", "2T", "01", "63",
+	"FCC", "2.4G", "40M", "HT", "2T", "02", "63",
+	"ETSI", "2.4G", "40M", "HT", "2T", "02", "63",
+	"MKK", "2.4G", "40M", "HT", "2T", "02", "63",
+	"FCC", "2.4G", "40M", "HT", "2T", "03", "30",
+	"ETSI", "2.4G", "40M", "HT", "2T", "03", "30",
+	"MKK", "2.4G", "40M", "HT", "2T", "03", "30",
+	"FCC", "2.4G", "40M", "HT", "2T", "04", "32",
+	"ETSI", "2.4G", "40M", "HT", "2T", "04", "30",
+	"MKK", "2.4G", "40M", "HT", "2T", "04", "30",
+	"FCC", "2.4G", "40M", "HT", "2T", "05", "32",
+	"ETSI", "2.4G", "40M", "HT", "2T", "05", "30",
+	"MKK", "2.4G", "40M", "HT", "2T", "05", "30",
+	"FCC", "2.4G", "40M", "HT", "2T", "06", "32",
+	"ETSI", "2.4G", "40M", "HT", "2T", "06", "30",
+	"MKK", "2.4G", "40M", "HT", "2T", "06", "30",
+	"FCC", "2.4G", "40M", "HT", "2T", "07", "32",
+	"ETSI", "2.4G", "40M", "HT", "2T", "07", "30",
+	"MKK", "2.4G", "40M", "HT", "2T", "07", "30",
+	"FCC", "2.4G", "40M", "HT", "2T", "08", "32",
+	"ETSI", "2.4G", "40M", "HT", "2T", "08", "30",
+	"MKK", "2.4G", "40M", "HT", "2T", "08", "30",
+	"FCC", "2.4G", "40M", "HT", "2T", "09", "32",
+	"ETSI", "2.4G", "40M", "HT", "2T", "09", "30",
+	"MKK", "2.4G", "40M", "HT", "2T", "09", "30",
+	"FCC", "2.4G", "40M", "HT", "2T", "10", "32",
+	"ETSI", "2.4G", "40M", "HT", "2T", "10", "30",
+	"MKK", "2.4G", "40M", "HT", "2T", "10", "30",
+	"FCC", "2.4G", "40M", "HT", "2T", "11", "30",
+	"ETSI", "2.4G", "40M", "HT", "2T", "11", "30",
+	"MKK", "2.4G", "40M", "HT", "2T", "11", "30",
+	"FCC", "2.4G", "40M", "HT", "2T", "12", "63",
+	"ETSI", "2.4G", "40M", "HT", "2T", "12", "32",
+	"MKK", "2.4G", "40M", "HT", "2T", "12", "32",
+	"FCC", "2.4G", "40M", "HT", "2T", "13", "63",
+	"ETSI", "2.4G", "40M", "HT", "2T", "13", "32",
+	"MKK", "2.4G", "40M", "HT", "2T", "13", "32",
+	"FCC", "2.4G", "40M", "HT", "2T", "14", "63",
+	"ETSI", "2.4G", "40M", "HT", "2T", "14", "63",
+	"MKK", "2.4G", "40M", "HT", "2T", "14", "63",
+	"FCC", "5G", "20M", "OFDM", "1T", "36", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "36", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "36", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "40", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "40", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "40", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "44", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "44", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "44", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "48", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "48", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "48", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "52", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "52", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "52", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "56", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "56", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "56", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "60", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "60", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "60", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "64", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "64", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "64", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "100", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "100", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "100", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "114", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "114", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "114", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "108", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "108", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "108", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "112", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "112", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "112", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "116", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "116", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "116", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "120", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "120", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "120", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "124", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "124", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "124", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "128", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "128", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "128", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "132", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "132", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "132", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "136", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "136", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "136", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "140", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "140", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "140", "30",
+	"FCC", "5G", "20M", "OFDM", "1T", "149", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "149", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "149", "63",
+	"FCC", "5G", "20M", "OFDM", "1T", "153", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "153", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "153", "63",
+	"FCC", "5G", "20M", "OFDM", "1T", "157", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "157", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "157", "63",
+	"FCC", "5G", "20M", "OFDM", "1T", "161", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "161", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "161", "63",
+	"FCC", "5G", "20M", "OFDM", "1T", "165", "30",
+	"ETSI", "5G", "20M", "OFDM", "1T", "165", "30",
+	"MKK", "5G", "20M", "OFDM", "1T", "165", "63",
+	"FCC", "5G", "20M", "HT", "1T", "36", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "36", "30",
+	"MKK", "5G", "20M", "HT", "1T", "36", "30",
+	"FCC", "5G", "20M", "HT", "1T", "40", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "40", "30",
+	"MKK", "5G", "20M", "HT", "1T", "40", "30",
+	"FCC", "5G", "20M", "HT", "1T", "44", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "44", "30",
+	"MKK", "5G", "20M", "HT", "1T", "44", "30",
+	"FCC", "5G", "20M", "HT", "1T", "48", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "48", "30",
+	"MKK", "5G", "20M", "HT", "1T", "48", "30",
+	"FCC", "5G", "20M", "HT", "1T", "52", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "52", "30",
+	"MKK", "5G", "20M", "HT", "1T", "52", "30",
+	"FCC", "5G", "20M", "HT", "1T", "56", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "56", "30",
+	"MKK", "5G", "20M", "HT", "1T", "56", "30",
+	"FCC", "5G", "20M", "HT", "1T", "60", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "60", "30",
+	"MKK", "5G", "20M", "HT", "1T", "60", "30",
+	"FCC", "5G", "20M", "HT", "1T", "64", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "64", "30",
+	"MKK", "5G", "20M", "HT", "1T", "64", "30",
+	"FCC", "5G", "20M", "HT", "1T", "100", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "100", "30",
+	"MKK", "5G", "20M", "HT", "1T", "100", "30",
+	"FCC", "5G", "20M", "HT", "1T", "114", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "114", "30",
+	"MKK", "5G", "20M", "HT", "1T", "114", "30",
+	"FCC", "5G", "20M", "HT", "1T", "108", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "108", "30",
+	"MKK", "5G", "20M", "HT", "1T", "108", "30",
+	"FCC", "5G", "20M", "HT", "1T", "112", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "112", "30",
+	"MKK", "5G", "20M", "HT", "1T", "112", "30",
+	"FCC", "5G", "20M", "HT", "1T", "116", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "116", "30",
+	"MKK", "5G", "20M", "HT", "1T", "116", "30",
+	"FCC", "5G", "20M", "HT", "1T", "120", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "120", "30",
+	"MKK", "5G", "20M", "HT", "1T", "120", "30",
+	"FCC", "5G", "20M", "HT", "1T", "124", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "124", "30",
+	"MKK", "5G", "20M", "HT", "1T", "124", "30",
+	"FCC", "5G", "20M", "HT", "1T", "128", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "128", "30",
+	"MKK", "5G", "20M", "HT", "1T", "128", "30",
+	"FCC", "5G", "20M", "HT", "1T", "132", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "132", "30",
+	"MKK", "5G", "20M", "HT", "1T", "132", "30",
+	"FCC", "5G", "20M", "HT", "1T", "136", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "136", "30",
+	"MKK", "5G", "20M", "HT", "1T", "136", "30",
+	"FCC", "5G", "20M", "HT", "1T", "140", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "140", "30",
+	"MKK", "5G", "20M", "HT", "1T", "140", "30",
+	"FCC", "5G", "20M", "HT", "1T", "149", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "149", "30",
+	"MKK", "5G", "20M", "HT", "1T", "149", "63",
+	"FCC", "5G", "20M", "HT", "1T", "153", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "153", "30",
+	"MKK", "5G", "20M", "HT", "1T", "153", "63",
+	"FCC", "5G", "20M", "HT", "1T", "157", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "157", "30",
+	"MKK", "5G", "20M", "HT", "1T", "157", "63",
+	"FCC", "5G", "20M", "HT", "1T", "161", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "161", "30",
+	"MKK", "5G", "20M", "HT", "1T", "161", "63",
+	"FCC", "5G", "20M", "HT", "1T", "165", "30",
+	"ETSI", "5G", "20M", "HT", "1T", "165", "30",
+	"MKK", "5G", "20M", "HT", "1T", "165", "63",
+	"FCC", "5G", "20M", "HT", "2T", "36", "28",
+	"ETSI", "5G", "20M", "HT", "2T", "36", "30",
+	"MKK", "5G", "20M", "HT", "2T", "36", "30",
+	"FCC", "5G", "20M", "HT", "2T", "40", "28",
+	"ETSI", "5G", "20M", "HT", "2T", "40", "30",
+	"MKK", "5G", "20M", "HT", "2T", "40", "30",
+	"FCC", "5G", "20M", "HT", "2T", "44", "28",
+	"ETSI", "5G", "20M", "HT", "2T", "44", "30",
+	"MKK", "5G", "20M", "HT", "2T", "44", "30",
+	"FCC", "5G", "20M", "HT", "2T", "48", "28",
+	"ETSI", "5G", "20M", "HT", "2T", "48", "30",
+	"MKK", "5G", "20M", "HT", "2T", "48", "30",
+	"FCC", "5G", "20M", "HT", "2T", "52", "34",
+	"ETSI", "5G", "20M", "HT", "2T", "52", "30",
+	"MKK", "5G", "20M", "HT", "2T", "52", "30",
+	"FCC", "5G", "20M", "HT", "2T", "56", "32",
+	"ETSI", "5G", "20M", "HT", "2T", "56", "30",
+	"MKK", "5G", "20M", "HT", "2T", "56", "30",
+	"FCC", "5G", "20M", "HT", "2T", "60", "30",
+	"ETSI", "5G", "20M", "HT", "2T", "60", "30",
+	"MKK", "5G", "20M", "HT", "2T", "60", "30",
+	"FCC", "5G", "20M", "HT", "2T", "64", "26",
+	"ETSI", "5G", "20M", "HT", "2T", "64", "30",
+	"MKK", "5G", "20M", "HT", "2T", "64", "30",
+	"FCC", "5G", "20M", "HT", "2T", "100", "28",
+	"ETSI", "5G", "20M", "HT", "2T", "100", "30",
+	"MKK", "5G", "20M", "HT", "2T", "100", "30",
+	"FCC", "5G", "20M", "HT", "2T", "114", "28",
+	"ETSI", "5G", "20M", "HT", "2T", "114", "30",
+	"MKK", "5G", "20M", "HT", "2T", "114", "30",
+	"FCC", "5G", "20M", "HT", "2T", "108", "30",
+	"ETSI", "5G", "20M", "HT", "2T", "108", "30",
+	"MKK", "5G", "20M", "HT", "2T", "108", "30",
+	"FCC", "5G", "20M", "HT", "2T", "112", "32",
+	"ETSI", "5G", "20M", "HT", "2T", "112", "30",
+	"MKK", "5G", "20M", "HT", "2T", "112", "30",
+	"FCC", "5G", "20M", "HT", "2T", "116", "32",
+	"ETSI", "5G", "20M", "HT", "2T", "116", "30",
+	"MKK", "5G", "20M", "HT", "2T", "116", "30",
+	"FCC", "5G", "20M", "HT", "2T", "120", "34",
+	"ETSI", "5G", "20M", "HT", "2T", "120", "30",
+	"MKK", "5G", "20M", "HT", "2T", "120", "30",
+	"FCC", "5G", "20M", "HT", "2T", "124", "32",
+	"ETSI", "5G", "20M", "HT", "2T", "124", "30",
+	"MKK", "5G", "20M", "HT", "2T", "124", "30",
+	"FCC", "5G", "20M", "HT", "2T", "128", "30",
+	"ETSI", "5G", "20M", "HT", "2T", "128", "30",
+	"MKK", "5G", "20M", "HT", "2T", "128", "30",
+	"FCC", "5G", "20M", "HT", "2T", "132", "28",
+	"ETSI", "5G", "20M", "HT", "2T", "132", "30",
+	"MKK", "5G", "20M", "HT", "2T", "132", "30",
+	"FCC", "5G", "20M", "HT", "2T", "136", "28",
+	"ETSI", "5G", "20M", "HT", "2T", "136", "30",
+	"MKK", "5G", "20M", "HT", "2T", "136", "30",
+	"FCC", "5G", "20M", "HT", "2T", "140", "26",
+	"ETSI", "5G", "20M", "HT", "2T", "140", "30",
+	"MKK", "5G", "20M", "HT", "2T", "140", "30",
+	"FCC", "5G", "20M", "HT", "2T", "149", "34",
+	"ETSI", "5G", "20M", "HT", "2T", "149", "30",
+	"MKK", "5G", "20M", "HT", "2T", "149", "63",
+	"FCC", "5G", "20M", "HT", "2T", "153", "34",
+	"ETSI", "5G", "20M", "HT", "2T", "153", "30",
+	"MKK", "5G", "20M", "HT", "2T", "153", "63",
+	"FCC", "5G", "20M", "HT", "2T", "157", "34",
+	"ETSI", "5G", "20M", "HT", "2T", "157", "30",
+	"MKK", "5G", "20M", "HT", "2T", "157", "63",
+	"FCC", "5G", "20M", "HT", "2T", "161", "34",
+	"ETSI", "5G", "20M", "HT", "2T", "161", "30",
+	"MKK", "5G", "20M", "HT", "2T", "161", "63",
+	"FCC", "5G", "20M", "HT", "2T", "165", "34",
+	"ETSI", "5G", "20M", "HT", "2T", "165", "30",
+	"MKK", "5G", "20M", "HT", "2T", "165", "63",
+	"FCC", "5G", "40M", "HT", "1T", "38", "26",
+	"ETSI", "5G", "40M", "HT", "1T", "38", "30",
+	"MKK", "5G", "40M", "HT", "1T", "38", "30",
+	"FCC", "5G", "40M", "HT", "1T", "46", "30",
+	"ETSI", "5G", "40M", "HT", "1T", "46", "30",
+	"MKK", "5G", "40M", "HT", "1T", "46", "30",
+	"FCC", "5G", "40M", "HT", "1T", "54", "30",
+	"ETSI", "5G", "40M", "HT", "1T", "54", "30",
+	"MKK", "5G", "40M", "HT", "1T", "54", "30",
+	"FCC", "5G", "40M", "HT", "1T", "62", "26",
+	"ETSI", "5G", "40M", "HT", "1T", "62", "30",
+	"MKK", "5G", "40M", "HT", "1T", "62", "30",
+	"FCC", "5G", "40M", "HT", "1T", "102", "24",
+	"ETSI", "5G", "40M", "HT", "1T", "102", "30",
+	"MKK", "5G", "40M", "HT", "1T", "102", "30",
+	"FCC", "5G", "40M", "HT", "1T", "110", "30",
+	"ETSI", "5G", "40M", "HT", "1T", "110", "30",
+	"MKK", "5G", "40M", "HT", "1T", "110", "30",
+	"FCC", "5G", "40M", "HT", "1T", "118", "30",
+	"ETSI", "5G", "40M", "HT", "1T", "118", "30",
+	"MKK", "5G", "40M", "HT", "1T", "118", "30",
+	"FCC", "5G", "40M", "HT", "1T", "126", "30",
+	"ETSI", "5G", "40M", "HT", "1T", "126", "30",
+	"MKK", "5G", "40M", "HT", "1T", "126", "30",
+	"FCC", "5G", "40M", "HT", "1T", "134", "30",
+	"ETSI", "5G", "40M", "HT", "1T", "134", "30",
+	"MKK", "5G", "40M", "HT", "1T", "134", "30",
+	"FCC", "5G", "40M", "HT", "1T", "151", "30",
+	"ETSI", "5G", "40M", "HT", "1T", "151", "30",
+	"MKK", "5G", "40M", "HT", "1T", "151", "63",
+	"FCC", "5G", "40M", "HT", "1T", "159", "30",
+	"ETSI", "5G", "40M", "HT", "1T", "159", "30",
+	"MKK", "5G", "40M", "HT", "1T", "159", "63",
+	"FCC", "5G", "40M", "HT", "2T", "38", "28",
+	"ETSI", "5G", "40M", "HT", "2T", "38", "30",
+	"MKK", "5G", "40M", "HT", "2T", "38", "30",
+	"FCC", "5G", "40M", "HT", "2T", "46", "28",
+	"ETSI", "5G", "40M", "HT", "2T", "46", "30",
+	"MKK", "5G", "40M", "HT", "2T", "46", "30",
+	"FCC", "5G", "40M", "HT", "2T", "54", "30",
+	"ETSI", "5G", "40M", "HT", "2T", "54", "30",
+	"MKK", "5G", "40M", "HT", "2T", "54", "30",
+	"FCC", "5G", "40M", "HT", "2T", "62", "30",
+	"ETSI", "5G", "40M", "HT", "2T", "62", "30",
+	"MKK", "5G", "40M", "HT", "2T", "62", "30",
+	"FCC", "5G", "40M", "HT", "2T", "102", "26",
+	"ETSI", "5G", "40M", "HT", "2T", "102", "30",
+	"MKK", "5G", "40M", "HT", "2T", "102", "30",
+	"FCC", "5G", "40M", "HT", "2T", "110", "30",
+	"ETSI", "5G", "40M", "HT", "2T", "110", "30",
+	"MKK", "5G", "40M", "HT", "2T", "110", "30",
+	"FCC", "5G", "40M", "HT", "2T", "118", "34",
+	"ETSI", "5G", "40M", "HT", "2T", "118", "30",
+	"MKK", "5G", "40M", "HT", "2T", "118", "30",
+	"FCC", "5G", "40M", "HT", "2T", "126", "32",
+	"ETSI", "5G", "40M", "HT", "2T", "126", "30",
+	"MKK", "5G", "40M", "HT", "2T", "126", "30",
+	"FCC", "5G", "40M", "HT", "2T", "134", "30",
+	"ETSI", "5G", "40M", "HT", "2T", "134", "30",
+	"MKK", "5G", "40M", "HT", "2T", "134", "30",
+	"FCC", "5G", "40M", "HT", "2T", "151", "34",
+	"ETSI", "5G", "40M", "HT", "2T", "151", "30",
+	"MKK", "5G", "40M", "HT", "2T", "151", "63",
+	"FCC", "5G", "40M", "HT", "2T", "159", "34",
+	"ETSI", "5G", "40M", "HT", "2T", "159", "30",
+	"MKK", "5G", "40M", "HT", "2T", "159", "63",
+	"FCC", "5G", "80M", "VHT", "1T", "42", "22",
+	"ETSI", "5G", "80M", "VHT", "1T", "42", "30",
+	"MKK", "5G", "80M", "VHT", "1T", "42", "30",
+	"FCC", "5G", "80M", "VHT", "1T", "58", "20",
+	"ETSI", "5G", "80M", "VHT", "1T", "58", "30",
+	"MKK", "5G", "80M", "VHT", "1T", "58", "30",
+	"FCC", "5G", "80M", "VHT", "1T", "106", "20",
+	"ETSI", "5G", "80M", "VHT", "1T", "106", "30",
+	"MKK", "5G", "80M", "VHT", "1T", "106", "30",
+	"FCC", "5G", "80M", "VHT", "1T", "122", "28",
+	"ETSI", "5G", "80M", "VHT", "1T", "122", "30",
+	"MKK", "5G", "80M", "VHT", "1T", "122", "30",
+	"FCC", "5G", "80M", "VHT", "1T", "155", "30",
+	"ETSI", "5G", "80M", "VHT", "1T", "155", "30",
+	"MKK", "5G", "80M", "VHT", "1T", "155", "63",
+	"FCC", "5G", "80M", "VHT", "2T", "42", "28",
+	"ETSI", "5G", "80M", "VHT", "2T", "42", "30",
+	"MKK", "5G", "80M", "VHT", "2T", "42", "30",
+	"FCC", "5G", "80M", "VHT", "2T", "58", "26",
+	"ETSI", "5G", "80M", "VHT", "2T", "58", "30",
+	"MKK", "5G", "80M", "VHT", "2T", "58", "30",
+	"FCC", "5G", "80M", "VHT", "2T", "106", "28",
+	"ETSI", "5G", "80M", "VHT", "2T", "106", "30",
+	"MKK", "5G", "80M", "VHT", "2T", "106", "30",
+	"FCC", "5G", "80M", "VHT", "2T", "122", "32",
+	"ETSI", "5G", "80M", "VHT", "2T", "122", "30",
+	"MKK", "5G", "80M", "VHT", "2T", "122", "30",
+	"FCC", "5G", "80M", "VHT", "2T", "155", "34",
+	"ETSI", "5G", "80M", "VHT", "2T", "155", "30",
+	"MKK", "5G", "80M", "VHT", "2T", "155", "63"
+};*/
+
+u32 RTL8812AE_RADIOA_ARRAY[] = {
+		0x000, 0x00010000,
+		0x018, 0x0001712A,
+		0x056, 0x00051CF2,
+		0x066, 0x00040000,
+		0x01E, 0x00080000,
+		0x089, 0x00000080,
+	0xFF0F0740, 0xABCD,
+		0x086, 0x00014B38,
+	0xFF0F02C0, 0xCDEF,
+		0x086, 0x00014B38,
+	0xFF0F01C0, 0xCDEF,
+		0x086, 0x00014B38,
+	0xFF0F07D8, 0xCDEF,
+		0x086, 0x00014B3A,
+	0xFF0F07D0, 0xCDEF,
+		0x086, 0x00014B3A,
+	0xCDCDCDCD, 0xCDCD,
+		0x086, 0x00014B38,
+	0xFF0F0740, 0xDEAD,
+		0x0B1, 0x0001FC1A,
+		0x0B3, 0x000F0810,
+		0x0B4, 0x0001A78D,
+		0x0BA, 0x00086180,
+		0x018, 0x00000006,
+		0x0EF, 0x00002000,
+	0xFF0F07D8, 0xABCD,
+		0x03B, 0x0003F218,
+		0x03B, 0x00030A58,
+		0x03B, 0x0002FA58,
+		0x03B, 0x00022590,
+		0x03B, 0x0001FA50,
+		0x03B, 0x00010248,
+		0x03B, 0x00008240,
+	0xFF0F07D0, 0xCDEF,
+		0x03B, 0x0003F218,
+		0x03B, 0x00030A58,
+		0x03B, 0x0002FA58,
+		0x03B, 0x00022590,
+		0x03B, 0x0001FA50,
+		0x03B, 0x00010248,
+		0x03B, 0x00008240,
+	0xCDCDCDCD, 0xCDCD,
+		0x03B, 0x00038A58,
+		0x03B, 0x00037A58,
+		0x03B, 0x0002A590,
+		0x03B, 0x00027A50,
+		0x03B, 0x00018248,
+		0x03B, 0x00010240,
+		0x03B, 0x00008240,
+	0xFF0F07D8, 0xDEAD,
+		0x0EF, 0x00000100,
+	0xFF0F07D8, 0xABCD,
+		0x034, 0x0000A4EE,
+		0x034, 0x00009076,
+		0x034, 0x00008073,
+		0x034, 0x00007070,
+		0x034, 0x0000606D,
+		0x034, 0x0000506A,
+		0x034, 0x00004049,
+		0x034, 0x00003046,
+		0x034, 0x00002028,
+		0x034, 0x00001025,
+		0x034, 0x00000022,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x0000ADF4,
+		0x034, 0x00009DF1,
+		0x034, 0x00008DEE,
+		0x034, 0x00007DEB,
+		0x034, 0x00006DE8,
+		0x034, 0x00005CEC,
+		0x034, 0x00004CE9,
+		0x034, 0x000034EA,
+		0x034, 0x000024E7,
+		0x034, 0x0000146B,
+		0x034, 0x0000006D,
+	0xFF0F07D8, 0xDEAD,
+		0x0EF, 0x00000000,
+		0x0EF, 0x000020A2,
+		0x0DF, 0x00000080,
+		0x035, 0x00000192,
+		0x035, 0x00008192,
+		0x035, 0x00010192,
+		0x036, 0x00000024,
+		0x036, 0x00008024,
+		0x036, 0x00010024,
+		0x036, 0x00018024,
+		0x0EF, 0x00000000,
+		0x051, 0x00000C21,
+		0x052, 0x000006D9,
+		0x053, 0x000FC649,
+		0x054, 0x0000017E,
+		0x0EF, 0x00000002,
+		0x008, 0x00008400,
+		0x018, 0x0001712A,
+		0x0EF, 0x00001000,
+		0x03A, 0x00000080,
+		0x03B, 0x0003A02C,
+		0x03C, 0x00004000,
+		0x03A, 0x00000400,
+		0x03B, 0x0003202C,
+		0x03C, 0x00010000,
+		0x03A, 0x000000A0,
+		0x03B, 0x0002B064,
+		0x03C, 0x00004000,
+		0x03A, 0x000000D8,
+		0x03B, 0x00023070,
+		0x03C, 0x00004000,
+		0x03A, 0x00000468,
+		0x03B, 0x0001B870,
+		0x03C, 0x00010000,
+		0x03A, 0x00000098,
+		0x03B, 0x00012085,
+		0x03C, 0x000E4000,
+		0x03A, 0x00000418,
+		0x03B, 0x0000A080,
+		0x03C, 0x000F0000,
+		0x03A, 0x00000418,
+		0x03B, 0x00002080,
+		0x03C, 0x00010000,
+		0x03A, 0x00000080,
+		0x03B, 0x0007A02C,
+		0x03C, 0x00004000,
+		0x03A, 0x00000400,
+		0x03B, 0x0007202C,
+		0x03C, 0x00010000,
+		0x03A, 0x000000A0,
+		0x03B, 0x0006B064,
+		0x03C, 0x00004000,
+		0x03A, 0x000000D8,
+		0x03B, 0x00023070,
+		0x03C, 0x00004000,
+		0x03A, 0x00000468,
+		0x03B, 0x0005B870,
+		0x03C, 0x00010000,
+		0x03A, 0x00000098,
+		0x03B, 0x00052085,
+		0x03C, 0x000E4000,
+		0x03A, 0x00000418,
+		0x03B, 0x0004A080,
+		0x03C, 0x000F0000,
+		0x03A, 0x00000418,
+		0x03B, 0x00042080,
+		0x03C, 0x00010000,
+		0x03A, 0x00000080,
+		0x03B, 0x000BA02C,
+		0x03C, 0x00004000,
+		0x03A, 0x00000400,
+		0x03B, 0x000B202C,
+		0x03C, 0x00010000,
+		0x03A, 0x000000A0,
+		0x03B, 0x000AB064,
+		0x03C, 0x00004000,
+		0x03A, 0x000000D8,
+		0x03B, 0x000A3070,
+		0x03C, 0x00004000,
+		0x03A, 0x00000468,
+		0x03B, 0x0009B870,
+		0x03C, 0x00010000,
+		0x03A, 0x00000098,
+		0x03B, 0x00092085,
+		0x03C, 0x000E4000,
+		0x03A, 0x00000418,
+		0x03B, 0x0008A080,
+		0x03C, 0x000F0000,
+		0x03A, 0x00000418,
+		0x03B, 0x00082080,
+		0x03C, 0x00010000,
+		0x0EF, 0x00001100,
+	0xFF0F0740, 0xABCD,
+		0x034, 0x0004A0B2,
+		0x034, 0x000490AF,
+		0x034, 0x00048070,
+		0x034, 0x0004706D,
+		0x034, 0x00046050,
+		0x034, 0x0004504D,
+		0x034, 0x0004404A,
+		0x034, 0x00043047,
+		0x034, 0x0004200A,
+		0x034, 0x00041007,
+		0x034, 0x00040004,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x0004A0B2,
+		0x034, 0x000490AF,
+		0x034, 0x00048070,
+		0x034, 0x0004706D,
+		0x034, 0x00046050,
+		0x034, 0x0004504D,
+		0x034, 0x0004404A,
+		0x034, 0x00043047,
+		0x034, 0x0004200A,
+		0x034, 0x00041007,
+		0x034, 0x00040004,
+	0xFF0F01C0, 0xCDEF,
+		0x034, 0x0004A0B2,
+		0x034, 0x000490AF,
+		0x034, 0x00048070,
+		0x034, 0x0004706D,
+		0x034, 0x00046050,
+		0x034, 0x0004504D,
+		0x034, 0x0004404A,
+		0x034, 0x00043047,
+		0x034, 0x0004200A,
+		0x034, 0x00041007,
+		0x034, 0x00040004,
+	0xFF0F07D8, 0xCDEF,
+		0x034, 0x0004A0B2,
+		0x034, 0x000490AF,
+		0x034, 0x00048070,
+		0x034, 0x0004706D,
+		0x034, 0x00046050,
+		0x034, 0x0004504D,
+		0x034, 0x0004404A,
+		0x034, 0x00043047,
+		0x034, 0x0004200A,
+		0x034, 0x00041007,
+		0x034, 0x00040004,
+	0xFF0F07D0, 0xCDEF,
+		0x034, 0x0004A0B2,
+		0x034, 0x000490AF,
+		0x034, 0x00048070,
+		0x034, 0x0004706D,
+		0x034, 0x00046050,
+		0x034, 0x0004504D,
+		0x034, 0x0004404A,
+		0x034, 0x00043047,
+		0x034, 0x0004200A,
+		0x034, 0x00041007,
+		0x034, 0x00040004,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x0004ADF5,
+		0x034, 0x00049DF2,
+		0x034, 0x00048DEF,
+		0x034, 0x00047DEC,
+		0x034, 0x00046DE9,
+		0x034, 0x00045DC9,
+		0x034, 0x00044CE8,
+		0x034, 0x000438CA,
+		0x034, 0x00042889,
+		0x034, 0x0004184A,
+		0x034, 0x0004044A,
+	0xFF0F0740, 0xDEAD,
+	0xFF0F0740, 0xABCD,
+		0x034, 0x0002A0B2,
+		0x034, 0x000290AF,
+		0x034, 0x00028070,
+		0x034, 0x0002706D,
+		0x034, 0x00026050,
+		0x034, 0x0002504D,
+		0x034, 0x0002404A,
+		0x034, 0x00023047,
+		0x034, 0x0002200A,
+		0x034, 0x00021007,
+		0x034, 0x00020004,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x0002A0B2,
+		0x034, 0x000290AF,
+		0x034, 0x00028070,
+		0x034, 0x0002706D,
+		0x034, 0x00026050,
+		0x034, 0x0002504D,
+		0x034, 0x0002404A,
+		0x034, 0x00023047,
+		0x034, 0x0002200A,
+		0x034, 0x00021007,
+		0x034, 0x00020004,
+	0xFF0F01C0, 0xCDEF,
+		0x034, 0x0002A0B2,
+		0x034, 0x000290AF,
+		0x034, 0x00028070,
+		0x034, 0x0002706D,
+		0x034, 0x00026050,
+		0x034, 0x0002504D,
+		0x034, 0x0002404A,
+		0x034, 0x00023047,
+		0x034, 0x0002200A,
+		0x034, 0x00021007,
+		0x034, 0x00020004,
+	0xFF0F07D8, 0xCDEF,
+		0x034, 0x0002A0B2,
+		0x034, 0x000290AF,
+		0x034, 0x00028070,
+		0x034, 0x0002706D,
+		0x034, 0x00026050,
+		0x034, 0x0002504D,
+		0x034, 0x0002404A,
+		0x034, 0x00023047,
+		0x034, 0x0002200A,
+		0x034, 0x00021007,
+		0x034, 0x00020004,
+	0xFF0F07D0, 0xCDEF,
+		0x034, 0x0002A0B2,
+		0x034, 0x000290AF,
+		0x034, 0x00028070,
+		0x034, 0x0002706D,
+		0x034, 0x00026050,
+		0x034, 0x0002504D,
+		0x034, 0x0002404A,
+		0x034, 0x00023047,
+		0x034, 0x0002200A,
+		0x034, 0x00021007,
+		0x034, 0x00020004,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x0002ADF5,
+		0x034, 0x00029DF2,
+		0x034, 0x00028DEF,
+		0x034, 0x00027DEC,
+		0x034, 0x00026DE9,
+		0x034, 0x00025DC9,
+		0x034, 0x00024CE8,
+		0x034, 0x000238CA,
+		0x034, 0x00022889,
+		0x034, 0x0002184A,
+		0x034, 0x0002044A,
+	0xFF0F0740, 0xDEAD,
+	0xFF0F0740, 0xABCD,
+		0x034, 0x0000A0B2,
+		0x034, 0x000090AF,
+		0x034, 0x00008070,
+		0x034, 0x0000706D,
+		0x034, 0x00006050,
+		0x034, 0x0000504D,
+		0x034, 0x0000404A,
+		0x034, 0x00003047,
+		0x034, 0x0000200A,
+		0x034, 0x00001007,
+		0x034, 0x00000004,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x0000A0B2,
+		0x034, 0x000090AF,
+		0x034, 0x00008070,
+		0x034, 0x0000706D,
+		0x034, 0x00006050,
+		0x034, 0x0000504D,
+		0x034, 0x0000404A,
+		0x034, 0x00003047,
+		0x034, 0x0000200A,
+		0x034, 0x00001007,
+		0x034, 0x00000004,
+	0xFF0F01C0, 0xCDEF,
+		0x034, 0x0000A0B2,
+		0x034, 0x000090AF,
+		0x034, 0x00008070,
+		0x034, 0x0000706D,
+		0x034, 0x00006050,
+		0x034, 0x0000504D,
+		0x034, 0x0000404A,
+		0x034, 0x00003047,
+		0x034, 0x0000200A,
+		0x034, 0x00001007,
+		0x034, 0x00000004,
+	0xFF0F07D8, 0xCDEF,
+		0x034, 0x0000A0B2,
+		0x034, 0x000090AF,
+		0x034, 0x00008070,
+		0x034, 0x0000706D,
+		0x034, 0x00006050,
+		0x034, 0x0000504D,
+		0x034, 0x0000404A,
+		0x034, 0x00003047,
+		0x034, 0x0000200A,
+		0x034, 0x00001007,
+		0x034, 0x00000004,
+	0xFF0F07D0, 0xCDEF,
+		0x034, 0x0000A0B2,
+		0x034, 0x000090AF,
+		0x034, 0x00008070,
+		0x034, 0x0000706D,
+		0x034, 0x00006050,
+		0x034, 0x0000504D,
+		0x034, 0x0000404A,
+		0x034, 0x00003047,
+		0x034, 0x0000200A,
+		0x034, 0x00001007,
+		0x034, 0x00000004,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x0000AFF7,
+		0x034, 0x00009DF7,
+		0x034, 0x00008DF4,
+		0x034, 0x00007DF1,
+		0x034, 0x00006DEE,
+		0x034, 0x00005DCD,
+		0x034, 0x00004CEB,
+		0x034, 0x000038CC,
+		0x034, 0x0000288B,
+		0x034, 0x0000184C,
+		0x034, 0x0000044C,
+	0xFF0F0740, 0xDEAD,
+		0x0EF, 0x00000000,
+	0xFF0F0740, 0xABCD,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x000001D4,
+		0x035, 0x000081D4,
+		0x035, 0x000101D4,
+		0x035, 0x000201B4,
+		0x035, 0x000281B4,
+		0x035, 0x000301B4,
+		0x035, 0x000401B4,
+		0x035, 0x000481B4,
+		0x035, 0x000501B4,
+	0xFF0F02C0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x000001D4,
+		0x035, 0x000081D4,
+		0x035, 0x000101D4,
+		0x035, 0x000201B4,
+		0x035, 0x000281B4,
+		0x035, 0x000301B4,
+		0x035, 0x000401B4,
+		0x035, 0x000481B4,
+		0x035, 0x000501B4,
+	0xFF0F01C0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x000001D4,
+		0x035, 0x000081D4,
+		0x035, 0x000101D4,
+		0x035, 0x000201B4,
+		0x035, 0x000281B4,
+		0x035, 0x000301B4,
+		0x035, 0x000401B4,
+		0x035, 0x000481B4,
+		0x035, 0x000501B4,
+	0xFF0F07D8, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x000001D4,
+		0x035, 0x000081D4,
+		0x035, 0x000101D4,
+		0x035, 0x000201B4,
+		0x035, 0x000281B4,
+		0x035, 0x000301B4,
+		0x035, 0x000401B4,
+		0x035, 0x000481B4,
+		0x035, 0x000501B4,
+	0xFF0F07D0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x000001D4,
+		0x035, 0x000081D4,
+		0x035, 0x000101D4,
+		0x035, 0x000201B4,
+		0x035, 0x000281B4,
+		0x035, 0x000301B4,
+		0x035, 0x000401B4,
+		0x035, 0x000481B4,
+		0x035, 0x000501B4,
+	0xCDCDCDCD, 0xCDCD,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x00000188,
+		0x035, 0x00008147,
+		0x035, 0x00010147,
+		0x035, 0x000201D7,
+		0x035, 0x000281D7,
+		0x035, 0x000301D7,
+		0x035, 0x000401D8,
+		0x035, 0x000481D8,
+		0x035, 0x000501D8,
+	0xFF0F0740, 0xDEAD,
+		0x0EF, 0x00000000,
+	0xFF0F0740, 0xABCD,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00004BFB,
+		0x036, 0x0000CBFB,
+		0x036, 0x00014BFB,
+		0x036, 0x0001CBFB,
+		0x036, 0x00024F4B,
+		0x036, 0x0002CF4B,
+		0x036, 0x00034F4B,
+		0x036, 0x0003CF4B,
+		0x036, 0x00044F4B,
+		0x036, 0x0004CF4B,
+		0x036, 0x00054F4B,
+		0x036, 0x0005CF4B,
+	0xFF0F02C0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00004BFB,
+		0x036, 0x0000CBFB,
+		0x036, 0x00014BFB,
+		0x036, 0x0001CBFB,
+		0x036, 0x00024F4B,
+		0x036, 0x0002CF4B,
+		0x036, 0x00034F4B,
+		0x036, 0x0003CF4B,
+		0x036, 0x00044F4B,
+		0x036, 0x0004CF4B,
+		0x036, 0x00054F4B,
+		0x036, 0x0005CF4B,
+	0xFF0F01C0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00004BFB,
+		0x036, 0x0000CBFB,
+		0x036, 0x00014BFB,
+		0x036, 0x0001CBFB,
+		0x036, 0x00024F4B,
+		0x036, 0x0002CF4B,
+		0x036, 0x00034F4B,
+		0x036, 0x0003CF4B,
+		0x036, 0x00044F4B,
+		0x036, 0x0004CF4B,
+		0x036, 0x00054F4B,
+		0x036, 0x0005CF4B,
+	0xFF0F07D8, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00004BFB,
+		0x036, 0x0000CBFB,
+		0x036, 0x00014BFB,
+		0x036, 0x0001CBFB,
+		0x036, 0x00024F4B,
+		0x036, 0x0002CF4B,
+		0x036, 0x00034F4B,
+		0x036, 0x0003CF4B,
+		0x036, 0x00044F4B,
+		0x036, 0x0004CF4B,
+		0x036, 0x00054F4B,
+		0x036, 0x0005CF4B,
+	0xFF0F07D0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00004BFB,
+		0x036, 0x0000CBFB,
+		0x036, 0x00014BFB,
+		0x036, 0x0001CBFB,
+		0x036, 0x00024F4B,
+		0x036, 0x0002CF4B,
+		0x036, 0x00034F4B,
+		0x036, 0x0003CF4B,
+		0x036, 0x00044F4B,
+		0x036, 0x0004CF4B,
+		0x036, 0x00054F4B,
+		0x036, 0x0005CF4B,
+	0xCDCDCDCD, 0xCDCD,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00084EB4,
+		0x036, 0x0008CC35,
+		0x036, 0x00094C35,
+		0x036, 0x0009CC35,
+		0x036, 0x000A4935,
+		0x036, 0x000ACC35,
+		0x036, 0x000B4C35,
+		0x036, 0x000BCC35,
+		0x036, 0x000C4EB4,
+		0x036, 0x000CCEB5,
+		0x036, 0x000D4EB5,
+		0x036, 0x000DCEB5,
+	0xFF0F0740, 0xDEAD,
+		0x0EF, 0x00000000,
+		0x0EF, 0x00000008,
+	0xFF0F0740, 0xABCD,
+		0x03C, 0x000002CC,
+		0x03C, 0x00000522,
+		0x03C, 0x00000902,
+	0xFF0F02C0, 0xCDEF,
+		0x03C, 0x000002CC,
+		0x03C, 0x00000522,
+		0x03C, 0x00000902,
+	0xFF0F01C0, 0xCDEF,
+		0x03C, 0x000002CC,
+		0x03C, 0x00000522,
+		0x03C, 0x00000902,
+	0xFF0F07D8, 0xCDEF,
+		0x03C, 0x000002CC,
+		0x03C, 0x00000522,
+		0x03C, 0x00000902,
+	0xFF0F07D0, 0xCDEF,
+		0x03C, 0x000002CC,
+		0x03C, 0x00000522,
+		0x03C, 0x00000902,
+	0xCDCDCDCD, 0xCDCD,
+		0x03C, 0x000002A8,
+		0x03C, 0x000005A2,
+		0x03C, 0x00000880,
+	0xFF0F0740, 0xDEAD,
+		0x0EF, 0x00000000,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000002,
+		0x0DF, 0x00000080,
+		0x01F, 0x00040064,
+	0xFF0F0740, 0xABCD,
+		0x061, 0x000FDD43,
+		0x062, 0x00038F4B,
+		0x063, 0x00032117,
+		0x064, 0x000194AC,
+		0x065, 0x000931D1,
+	0xFF0F02C0, 0xCDEF,
+		0x061, 0x000FDD43,
+		0x062, 0x00038F4B,
+		0x063, 0x00032117,
+		0x064, 0x000194AC,
+		0x065, 0x000931D1,
+	0xFF0F01C0, 0xCDEF,
+		0x061, 0x000FDD43,
+		0x062, 0x00038F4B,
+		0x063, 0x00032117,
+		0x064, 0x000194AC,
+		0x065, 0x000931D1,
+	0xFF0F07D8, 0xCDEF,
+		0x061, 0x000FDD43,
+		0x062, 0x00038F4B,
+		0x063, 0x00032117,
+		0x064, 0x000194AC,
+		0x065, 0x000931D1,
+	0xFF0F07D0, 0xCDEF,
+		0x061, 0x000FDD43,
+		0x062, 0x00038F4B,
+		0x063, 0x00032117,
+		0x064, 0x000194AC,
+		0x065, 0x000931D1,
+	0xCDCDCDCD, 0xCDCD,
+		0x061, 0x000E5D53,
+		0x062, 0x00038FCD,
+		0x063, 0x000314EB,
+		0x064, 0x000196AC,
+		0x065, 0x000911D7,
+	0xFF0F0740, 0xDEAD,
+		0x008, 0x00008400,
+		0x01C, 0x000739D2,
+		0x0B4, 0x0001E78D,
+		0x018, 0x0001F12A,
+		0x0FE, 0x00000000,
+		0x0FE, 0x00000000,
+		0x0FE, 0x00000000,
+		0x0FE, 0x00000000,
+		0x0B4, 0x0001A78D,
+		0x018, 0x0001712A,
+};
+
+u32 RTL8812AE_RADIOB_ARRAY[] = {
+		0x056, 0x00051CF2,
+		0x066, 0x00040000,
+		0x089, 0x00000080,
+	0xFF0F0740, 0xABCD,
+		0x086, 0x00014B38,
+	0xFF0F01C0, 0xCDEF,
+		0x086, 0x00014B38,
+	0xFF0F02C0, 0xCDEF,
+		0x086, 0x00014B38,
+	0xFF0F07D8, 0xCDEF,
+		0x086, 0x00014B3A,
+	0xFF0F07D0, 0xCDEF,
+		0x086, 0x00014B3A,
+	0xCDCDCDCD, 0xCDCD,
+		0x086, 0x00014B38,
+	0xFF0F0740, 0xDEAD,
+		0x018, 0x00000006,
+		0x0EF, 0x00002000,
+	0xFF0F07D8, 0xABCD,
+		0x03B, 0x0003F218,
+		0x03B, 0x00030A58,
+		0x03B, 0x0002FA58,
+		0x03B, 0x00022590,
+		0x03B, 0x0001FA50,
+		0x03B, 0x00010248,
+		0x03B, 0x00008240,
+	0xFF0F07D0, 0xCDEF,
+		0x03B, 0x0003F218,
+		0x03B, 0x00030A58,
+		0x03B, 0x0002FA58,
+		0x03B, 0x00022590,
+		0x03B, 0x0001FA50,
+		0x03B, 0x00010248,
+		0x03B, 0x00008240,
+	0xCDCDCDCD, 0xCDCD,
+		0x03B, 0x00038A58,
+		0x03B, 0x00037A58,
+		0x03B, 0x0002A590,
+		0x03B, 0x00027A50,
+		0x03B, 0x00018248,
+		0x03B, 0x00010240,
+		0x03B, 0x00008240,
+	0xFF0F07D8, 0xDEAD,
+		0x0EF, 0x00000100,
+	0xFF0F07D8, 0xABCD,
+		0x034, 0x0000A4EE,
+		0x034, 0x00009076,
+		0x034, 0x00008073,
+		0x034, 0x00007070,
+		0x034, 0x0000606D,
+		0x034, 0x0000506A,
+		0x034, 0x00004049,
+		0x034, 0x00003046,
+		0x034, 0x00002028,
+		0x034, 0x00001025,
+		0x034, 0x00000022,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x0000ADF4,
+		0x034, 0x00009DF1,
+		0x034, 0x00008DEE,
+		0x034, 0x00007DEB,
+		0x034, 0x00006DE8,
+		0x034, 0x00005CEC,
+		0x034, 0x00004CE9,
+		0x034, 0x000034EA,
+		0x034, 0x000024E7,
+		0x034, 0x0000146B,
+		0x034, 0x0000006D,
+	0xFF0F07D8, 0xDEAD,
+		0x0EF, 0x00000000,
+		0x0EF, 0x000020A2,
+		0x0DF, 0x00000080,
+		0x035, 0x00000192,
+		0x035, 0x00008192,
+		0x035, 0x00010192,
+		0x036, 0x00000024,
+		0x036, 0x00008024,
+		0x036, 0x00010024,
+		0x036, 0x00018024,
+		0x0EF, 0x00000000,
+		0x051, 0x00000C21,
+		0x052, 0x000006D9,
+		0x053, 0x000FC649,
+		0x054, 0x0000017E,
+		0x0EF, 0x00000002,
+		0x008, 0x00008400,
+		0x018, 0x0001712A,
+		0x0EF, 0x00001000,
+		0x03A, 0x00000080,
+		0x03B, 0x0003A02C,
+		0x03C, 0x00004000,
+		0x03A, 0x00000400,
+		0x03B, 0x0003202C,
+		0x03C, 0x00010000,
+		0x03A, 0x000000A0,
+		0x03B, 0x0002B064,
+		0x03C, 0x00004000,
+		0x03A, 0x000000D8,
+		0x03B, 0x00023070,
+		0x03C, 0x00004000,
+		0x03A, 0x00000468,
+		0x03B, 0x0001B870,
+		0x03C, 0x00010000,
+		0x03A, 0x00000098,
+		0x03B, 0x00012085,
+		0x03C, 0x000E4000,
+		0x03A, 0x00000418,
+		0x03B, 0x0000A080,
+		0x03C, 0x000F0000,
+		0x03A, 0x00000418,
+		0x03B, 0x00002080,
+		0x03C, 0x00010000,
+		0x03A, 0x00000080,
+		0x03B, 0x0007A02C,
+		0x03C, 0x00004000,
+		0x03A, 0x00000400,
+		0x03B, 0x0007202C,
+		0x03C, 0x00010000,
+		0x03A, 0x000000A0,
+		0x03B, 0x0006B064,
+		0x03C, 0x00004000,
+		0x03A, 0x000000D8,
+		0x03B, 0x00063070,
+		0x03C, 0x00004000,
+		0x03A, 0x00000468,
+		0x03B, 0x0005B870,
+		0x03C, 0x00010000,
+		0x03A, 0x00000098,
+		0x03B, 0x00052085,
+		0x03C, 0x000E4000,
+		0x03A, 0x00000418,
+		0x03B, 0x0004A080,
+		0x03C, 0x000F0000,
+		0x03A, 0x00000418,
+		0x03B, 0x00042080,
+		0x03C, 0x00010000,
+		0x03A, 0x00000080,
+		0x03B, 0x000BA02C,
+		0x03C, 0x00004000,
+		0x03A, 0x00000400,
+		0x03B, 0x000B202C,
+		0x03C, 0x00010000,
+		0x03A, 0x000000A0,
+		0x03B, 0x000AB064,
+		0x03C, 0x00004000,
+		0x03A, 0x000000D8,
+		0x03B, 0x000A3070,
+		0x03C, 0x00004000,
+		0x03A, 0x00000468,
+		0x03B, 0x0009B870,
+		0x03C, 0x00010000,
+		0x03A, 0x00000098,
+		0x03B, 0x00092085,
+		0x03C, 0x000E4000,
+		0x03A, 0x00000418,
+		0x03B, 0x0008A080,
+		0x03C, 0x000F0000,
+		0x03A, 0x00000418,
+		0x03B, 0x00082080,
+		0x03C, 0x00010000,
+		0x0EF, 0x00001100,
+	0xFF0F0740, 0xABCD,
+		0x034, 0x0004A0B2,
+		0x034, 0x000490AF,
+		0x034, 0x00048070,
+		0x034, 0x0004706D,
+		0x034, 0x00046050,
+		0x034, 0x0004504D,
+		0x034, 0x0004404A,
+		0x034, 0x00043047,
+		0x034, 0x0004200A,
+		0x034, 0x00041007,
+		0x034, 0x00040004,
+	0xFF0F01C0, 0xCDEF,
+		0x034, 0x0004A0B2,
+		0x034, 0x000490AF,
+		0x034, 0x00048070,
+		0x034, 0x0004706D,
+		0x034, 0x00046050,
+		0x034, 0x0004504D,
+		0x034, 0x0004404A,
+		0x034, 0x00043047,
+		0x034, 0x0004200A,
+		0x034, 0x00041007,
+		0x034, 0x00040004,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x0004A0B2,
+		0x034, 0x000490AF,
+		0x034, 0x00048070,
+		0x034, 0x0004706D,
+		0x034, 0x00046050,
+		0x034, 0x0004504D,
+		0x034, 0x0004404A,
+		0x034, 0x00043047,
+		0x034, 0x0004200A,
+		0x034, 0x00041007,
+		0x034, 0x00040004,
+	0xFF0F07D8, 0xCDEF,
+		0x034, 0x0004A0B2,
+		0x034, 0x000490AF,
+		0x034, 0x00048070,
+		0x034, 0x0004706D,
+		0x034, 0x00046050,
+		0x034, 0x0004504D,
+		0x034, 0x0004404A,
+		0x034, 0x00043047,
+		0x034, 0x0004200A,
+		0x034, 0x00041007,
+		0x034, 0x00040004,
+	0xFF0F07D0, 0xCDEF,
+		0x034, 0x0004A0B2,
+		0x034, 0x000490AF,
+		0x034, 0x00048070,
+		0x034, 0x0004706D,
+		0x034, 0x00046050,
+		0x034, 0x0004504D,
+		0x034, 0x0004404A,
+		0x034, 0x00043047,
+		0x034, 0x0004200A,
+		0x034, 0x00041007,
+		0x034, 0x00040004,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x0004ADF5,
+		0x034, 0x00049DF2,
+		0x034, 0x00048DEF,
+		0x034, 0x00047DEC,
+		0x034, 0x00046DE9,
+		0x034, 0x00045DC9,
+		0x034, 0x00044CE8,
+		0x034, 0x000438CA,
+		0x034, 0x00042889,
+		0x034, 0x0004184A,
+		0x034, 0x0004044A,
+	0xFF0F0740, 0xDEAD,
+	0xFF0F0740, 0xABCD,
+		0x034, 0x0002A0B2,
+		0x034, 0x000290AF,
+		0x034, 0x00028070,
+		0x034, 0x0002706D,
+		0x034, 0x00026050,
+		0x034, 0x0002504D,
+		0x034, 0x0002404A,
+		0x034, 0x00023047,
+		0x034, 0x0002200A,
+		0x034, 0x00021007,
+		0x034, 0x00020004,
+	0xFF0F01C0, 0xCDEF,
+		0x034, 0x0002A0B2,
+		0x034, 0x000290AF,
+		0x034, 0x00028070,
+		0x034, 0x0002706D,
+		0x034, 0x00026050,
+		0x034, 0x0002504D,
+		0x034, 0x0002404A,
+		0x034, 0x00023047,
+		0x034, 0x0002200A,
+		0x034, 0x00021007,
+		0x034, 0x00020004,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x0002A0B2,
+		0x034, 0x000290AF,
+		0x034, 0x00028070,
+		0x034, 0x0002706D,
+		0x034, 0x00026050,
+		0x034, 0x0002504D,
+		0x034, 0x0002404A,
+		0x034, 0x00023047,
+		0x034, 0x0002200A,
+		0x034, 0x00021007,
+		0x034, 0x00020004,
+	0xFF0F07D8, 0xCDEF,
+		0x034, 0x0002A0B2,
+		0x034, 0x000290AF,
+		0x034, 0x00028070,
+		0x034, 0x0002706D,
+		0x034, 0x00026050,
+		0x034, 0x0002504D,
+		0x034, 0x0002404A,
+		0x034, 0x00023047,
+		0x034, 0x0002200A,
+		0x034, 0x00021007,
+		0x034, 0x00020004,
+	0xFF0F07D0, 0xCDEF,
+		0x034, 0x0002A0B2,
+		0x034, 0x000290AF,
+		0x034, 0x00028070,
+		0x034, 0x0002706D,
+		0x034, 0x00026050,
+		0x034, 0x0002504D,
+		0x034, 0x0002404A,
+		0x034, 0x00023047,
+		0x034, 0x0002200A,
+		0x034, 0x00021007,
+		0x034, 0x00020004,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x0002ADF5,
+		0x034, 0x00029DF2,
+		0x034, 0x00028DEF,
+		0x034, 0x00027DEC,
+		0x034, 0x00026DE9,
+		0x034, 0x00025DC9,
+		0x034, 0x00024CE8,
+		0x034, 0x000238CA,
+		0x034, 0x00022889,
+		0x034, 0x0002184A,
+		0x034, 0x0002044A,
+	0xFF0F0740, 0xDEAD,
+	0xFF0F0740, 0xABCD,
+		0x034, 0x0000A0B2,
+		0x034, 0x000090AF,
+		0x034, 0x00008070,
+		0x034, 0x0000706D,
+		0x034, 0x00006050,
+		0x034, 0x0000504D,
+		0x034, 0x0000404A,
+		0x034, 0x00003047,
+		0x034, 0x0000200A,
+		0x034, 0x00001007,
+		0x034, 0x00000004,
+	0xFF0F01C0, 0xCDEF,
+		0x034, 0x0000A0B2,
+		0x034, 0x000090AF,
+		0x034, 0x00008070,
+		0x034, 0x0000706D,
+		0x034, 0x00006050,
+		0x034, 0x0000504D,
+		0x034, 0x0000404A,
+		0x034, 0x00003047,
+		0x034, 0x0000200A,
+		0x034, 0x00001007,
+		0x034, 0x00000004,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x0000A0B2,
+		0x034, 0x000090AF,
+		0x034, 0x00008070,
+		0x034, 0x0000706D,
+		0x034, 0x00006050,
+		0x034, 0x0000504D,
+		0x034, 0x0000404A,
+		0x034, 0x00003047,
+		0x034, 0x0000200A,
+		0x034, 0x00001007,
+		0x034, 0x00000004,
+	0xFF0F07D8, 0xCDEF,
+		0x034, 0x0000A0B2,
+		0x034, 0x000090AF,
+		0x034, 0x00008070,
+		0x034, 0x0000706D,
+		0x034, 0x00006050,
+		0x034, 0x0000504D,
+		0x034, 0x0000404A,
+		0x034, 0x00003047,
+		0x034, 0x0000200A,
+		0x034, 0x00001007,
+		0x034, 0x00000004,
+	0xFF0F07D0, 0xCDEF,
+		0x034, 0x0000A0B2,
+		0x034, 0x000090AF,
+		0x034, 0x00008070,
+		0x034, 0x0000706D,
+		0x034, 0x00006050,
+		0x034, 0x0000504D,
+		0x034, 0x0000404A,
+		0x034, 0x00003047,
+		0x034, 0x0000200A,
+		0x034, 0x00001007,
+		0x034, 0x00000004,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x0000AFF7,
+		0x034, 0x00009DF7,
+		0x034, 0x00008DF4,
+		0x034, 0x00007DF1,
+		0x034, 0x00006DEE,
+		0x034, 0x00005DCD,
+		0x034, 0x00004CEB,
+		0x034, 0x000038CC,
+		0x034, 0x0000288B,
+		0x034, 0x0000184C,
+		0x034, 0x0000044C,
+	0xFF0F0740, 0xDEAD,
+		0x0EF, 0x00000000,
+	0xFF0F0740, 0xABCD,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x000001C5,
+		0x035, 0x000081C5,
+		0x035, 0x000101C5,
+		0x035, 0x00020174,
+		0x035, 0x00028174,
+		0x035, 0x00030174,
+		0x035, 0x00040185,
+		0x035, 0x00048185,
+		0x035, 0x00050185,
+		0x0EF, 0x00000000,
+	0xFF0F01C0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x000001C5,
+		0x035, 0x000081C5,
+		0x035, 0x000101C5,
+		0x035, 0x00020174,
+		0x035, 0x00028174,
+		0x035, 0x00030174,
+		0x035, 0x00040185,
+		0x035, 0x00048185,
+		0x035, 0x00050185,
+		0x0EF, 0x00000000,
+	0xFF0F02C0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x000001C5,
+		0x035, 0x000081C5,
+		0x035, 0x000101C5,
+		0x035, 0x00020174,
+		0x035, 0x00028174,
+		0x035, 0x00030174,
+		0x035, 0x00040185,
+		0x035, 0x00048185,
+		0x035, 0x00050185,
+		0x0EF, 0x00000000,
+	0xFF0F07D8, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x000001C5,
+		0x035, 0x000081C5,
+		0x035, 0x000101C5,
+		0x035, 0x00020174,
+		0x035, 0x00028174,
+		0x035, 0x00030174,
+		0x035, 0x00040185,
+		0x035, 0x00048185,
+		0x035, 0x00050185,
+		0x0EF, 0x00000000,
+	0xFF0F07D0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x000001C5,
+		0x035, 0x000081C5,
+		0x035, 0x000101C5,
+		0x035, 0x00020174,
+		0x035, 0x00028174,
+		0x035, 0x00030174,
+		0x035, 0x00040185,
+		0x035, 0x00048185,
+		0x035, 0x00050185,
+		0x0EF, 0x00000000,
+	0xCDCDCDCD, 0xCDCD,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+		0x035, 0x00000186,
+		0x035, 0x00008186,
+		0x035, 0x00010185,
+		0x035, 0x000201D5,
+		0x035, 0x000281D5,
+		0x035, 0x000301D5,
+		0x035, 0x000401D5,
+		0x035, 0x000481D5,
+		0x035, 0x000501D5,
+		0x0EF, 0x00000000,
+	0xFF0F0740, 0xDEAD,
+	0xFF0F0740, 0xABCD,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00005B8B,
+		0x036, 0x0000DB8B,
+		0x036, 0x00015B8B,
+		0x036, 0x0001DB8B,
+		0x036, 0x000262DB,
+		0x036, 0x0002E2DB,
+		0x036, 0x000362DB,
+		0x036, 0x0003E2DB,
+		0x036, 0x0004553B,
+		0x036, 0x0004D53B,
+		0x036, 0x0005553B,
+		0x036, 0x0005D53B,
+	0xFF0F01C0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00005B8B,
+		0x036, 0x0000DB8B,
+		0x036, 0x00015B8B,
+		0x036, 0x0001DB8B,
+		0x036, 0x000262DB,
+		0x036, 0x0002E2DB,
+		0x036, 0x000362DB,
+		0x036, 0x0003E2DB,
+		0x036, 0x0004553B,
+		0x036, 0x0004D53B,
+		0x036, 0x0005553B,
+		0x036, 0x0005D53B,
+	0xFF0F02C0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00005B8B,
+		0x036, 0x0000DB8B,
+		0x036, 0x00015B8B,
+		0x036, 0x0001DB8B,
+		0x036, 0x000262DB,
+		0x036, 0x0002E2DB,
+		0x036, 0x000362DB,
+		0x036, 0x0003E2DB,
+		0x036, 0x0004553B,
+		0x036, 0x0004D53B,
+		0x036, 0x0005553B,
+		0x036, 0x0005D53B,
+	0xFF0F07D8, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00005B8B,
+		0x036, 0x0000DB8B,
+		0x036, 0x00015B8B,
+		0x036, 0x0001DB8B,
+		0x036, 0x000262DB,
+		0x036, 0x0002E2DB,
+		0x036, 0x000362DB,
+		0x036, 0x0003E2DB,
+		0x036, 0x0004553B,
+		0x036, 0x0004D53B,
+		0x036, 0x0005553B,
+		0x036, 0x0005D53B,
+	0xFF0F07D0, 0xCDEF,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00005B8B,
+		0x036, 0x0000DB8B,
+		0x036, 0x00015B8B,
+		0x036, 0x0001DB8B,
+		0x036, 0x000262DB,
+		0x036, 0x0002E2DB,
+		0x036, 0x000362DB,
+		0x036, 0x0003E2DB,
+		0x036, 0x0004553B,
+		0x036, 0x0004D53B,
+		0x036, 0x0005553B,
+		0x036, 0x0005D53B,
+	0xCDCDCDCD, 0xCDCD,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+		0x036, 0x00084EB4,
+		0x036, 0x0008C9B4,
+		0x036, 0x000949B4,
+		0x036, 0x0009C9B4,
+		0x036, 0x000A4935,
+		0x036, 0x000AC935,
+		0x036, 0x000B4935,
+		0x036, 0x000BC935,
+		0x036, 0x000C4EB4,
+		0x036, 0x000CCEB4,
+		0x036, 0x000D4EB4,
+		0x036, 0x000DCEB4,
+	0xFF0F0740, 0xDEAD,
+		0x0EF, 0x00000000,
+		0x0EF, 0x00000008,
+	0xFF0F0740, 0xABCD,
+		0x03C, 0x000002DC,
+		0x03C, 0x00000524,
+		0x03C, 0x00000902,
+	0xFF0F01C0, 0xCDEF,
+		0x03C, 0x000002DC,
+		0x03C, 0x00000524,
+		0x03C, 0x00000902,
+	0xFF0F02C0, 0xCDEF,
+		0x03C, 0x000002DC,
+		0x03C, 0x00000524,
+		0x03C, 0x00000902,
+	0xFF0F07D8, 0xCDEF,
+		0x03C, 0x000002DC,
+		0x03C, 0x00000524,
+		0x03C, 0x00000902,
+	0xFF0F07D0, 0xCDEF,
+		0x03C, 0x000002DC,
+		0x03C, 0x00000524,
+		0x03C, 0x00000902,
+	0xCDCDCDCD, 0xCDCD,
+		0x03C, 0x000002AA,
+		0x03C, 0x000005A2,
+		0x03C, 0x00000880,
+	0xFF0F0740, 0xDEAD,
+		0x0EF, 0x00000000,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000002,
+		0x0DF, 0x00000080,
+	0xFF0F0740, 0xABCD,
+		0x061, 0x000EAC43,
+		0x062, 0x00038F47,
+		0x063, 0x00031157,
+		0x064, 0x0001C4AC,
+		0x065, 0x000931D1,
+	0xFF0F01C0, 0xCDEF,
+		0x061, 0x000EAC43,
+		0x062, 0x00038F47,
+		0x063, 0x00031157,
+		0x064, 0x0001C4AC,
+		0x065, 0x000931D1,
+	0xFF0F02C0, 0xCDEF,
+		0x061, 0x000EAC43,
+		0x062, 0x00038F47,
+		0x063, 0x00031157,
+		0x064, 0x0001C4AC,
+		0x065, 0x000931D1,
+	0xFF0F07D8, 0xCDEF,
+		0x061, 0x000EAC43,
+		0x062, 0x00038F47,
+		0x063, 0x00031157,
+		0x064, 0x0001C4AC,
+		0x065, 0x000931D1,
+	0xFF0F07D0, 0xCDEF,
+		0x061, 0x000EAC43,
+		0x062, 0x00038F47,
+		0x063, 0x00031157,
+		0x064, 0x0001C4AC,
+		0x065, 0x000931D1,
+	0xCDCDCDCD, 0xCDCD,
+		0x061, 0x000E5D53,
+		0x062, 0x00038FCD,
+		0x063, 0x000314EB,
+		0x064, 0x000196AC,
+		0x065, 0x000931D7,
+	0xFF0F0740, 0xDEAD,
+		0x008, 0x00008400,
+};
+
+u32 RTL8821AE_RADIOA_ARRAY[] = {
+		0x018, 0x0001712A,
+		0x056, 0x00051CF2,
+		0x066, 0x00040000,
+		0x000, 0x00010000,
+		0x01E, 0x00080000,
+		0x082, 0x00000830,
+		0x083, 0x00021800,
+		0x084, 0x00028000,
+		0x085, 0x00048000,
+		0x086, 0x00094838,
+		0x087, 0x00044980,
+		0x088, 0x00048000,
+		0x089, 0x0000D480,
+		0x08A, 0x00042240,
+		0x08B, 0x000F0380,
+		0x08C, 0x00090000,
+		0x08D, 0x00022852,
+		0x08E, 0x00065540,
+		0x08F, 0x00088001,
+		0x0EF, 0x00020000,
+		0x03E, 0x00000380,
+		0x03F, 0x00090018,
+		0x03E, 0x00020380,
+		0x03F, 0x000A0018,
+		0x03E, 0x00040308,
+		0x03F, 0x000A0018,
+		0x03E, 0x00060018,
+		0x03F, 0x000A0018,
+		0x0EF, 0x00000000,
+		0x018, 0x0001712A,
+		0x089, 0x00000080,
+		0x08B, 0x00080180,
+		0x0EF, 0x00001000,
+		0x03A, 0x00000244,
+		0x03B, 0x00038027,
+		0x03C, 0x00082000,
+		0x03A, 0x00000244,
+		0x03B, 0x00030113,
+		0x03C, 0x00082000,
+		0x03A, 0x0000014C,
+		0x03B, 0x00028027,
+		0x03C, 0x00082000,
+		0x03A, 0x000000CC,
+		0x03B, 0x00027027,
+		0x03C, 0x00042000,
+		0x03A, 0x0000014C,
+		0x03B, 0x0001F913,
+		0x03C, 0x00042000,
+		0x03A, 0x0000010C,
+		0x03B, 0x00017F10,
+		0x03C, 0x00012000,
+		0x03A, 0x000000D0,
+		0x03B, 0x00008027,
+		0x03C, 0x000CA000,
+		0x03A, 0x00000244,
+		0x03B, 0x00078027,
+		0x03C, 0x00082000,
+		0x03A, 0x00000244,
+		0x03B, 0x00070113,
+		0x03C, 0x00082000,
+		0x03A, 0x0000014C,
+		0x03B, 0x00068027,
+		0x03C, 0x00082000,
+		0x03A, 0x000000CC,
+		0x03B, 0x00067027,
+		0x03C, 0x00042000,
+		0x03A, 0x0000014C,
+		0x03B, 0x0005F913,
+		0x03C, 0x00042000,
+		0x03A, 0x0000010C,
+		0x03B, 0x00057F10,
+		0x03C, 0x00012000,
+		0x03A, 0x000000D0,
+		0x03B, 0x00048027,
+		0x03C, 0x000CA000,
+		0x03A, 0x00000244,
+		0x03B, 0x000B8027,
+		0x03C, 0x00082000,
+		0x03A, 0x00000244,
+		0x03B, 0x000B0113,
+		0x03C, 0x00082000,
+		0x03A, 0x0000014C,
+		0x03B, 0x000A8027,
+		0x03C, 0x00082000,
+		0x03A, 0x000000CC,
+		0x03B, 0x000A7027,
+		0x03C, 0x00042000,
+		0x03A, 0x0000014C,
+		0x03B, 0x0009F913,
+		0x03C, 0x00042000,
+		0x03A, 0x0000010C,
+		0x03B, 0x00097F10,
+		0x03C, 0x00012000,
+		0x03A, 0x000000D0,
+		0x03B, 0x00088027,
+		0x03C, 0x000CA000,
+		0x0EF, 0x00000000,
+		0x0EF, 0x00001100,
+	0xFF0F0104, 0xABCD,
+		0x034, 0x0004ADF3,
+		0x034, 0x00049DF0,
+	0xFF0F0204, 0xCDEF,
+		0x034, 0x0004ADF3,
+		0x034, 0x00049DF0,
+	0xFF0F0404, 0xCDEF,
+		0x034, 0x0004ADF3,
+		0x034, 0x00049DF0,
+	0xFF0F0200, 0xCDEF,
+		0x034, 0x0004ADF5,
+		0x034, 0x00049DF2,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x0004A0F3,
+		0x034, 0x000490B1,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x0004ADF7,
+		0x034, 0x00049DF3,
+	0xFF0F0104, 0xDEAD,
+	0xFF0F0104, 0xABCD,
+		0x034, 0x00048DED,
+		0x034, 0x00047DEA,
+		0x034, 0x00046DE7,
+		0x034, 0x00045CE9,
+		0x034, 0x00044CE6,
+		0x034, 0x000438C6,
+		0x034, 0x00042886,
+		0x034, 0x00041486,
+		0x034, 0x00040447,
+	0xFF0F0204, 0xCDEF,
+		0x034, 0x00048DED,
+		0x034, 0x00047DEA,
+		0x034, 0x00046DE7,
+		0x034, 0x00045CE9,
+		0x034, 0x00044CE6,
+		0x034, 0x000438C6,
+		0x034, 0x00042886,
+		0x034, 0x00041486,
+		0x034, 0x00040447,
+	0xFF0F0404, 0xCDEF,
+		0x034, 0x00048DED,
+		0x034, 0x00047DEA,
+		0x034, 0x00046DE7,
+		0x034, 0x00045CE9,
+		0x034, 0x00044CE6,
+		0x034, 0x000438C6,
+		0x034, 0x00042886,
+		0x034, 0x00041486,
+		0x034, 0x00040447,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x000480AE,
+		0x034, 0x000470AB,
+		0x034, 0x0004608B,
+		0x034, 0x00045069,
+		0x034, 0x00044048,
+		0x034, 0x00043045,
+		0x034, 0x00042026,
+		0x034, 0x00041023,
+		0x034, 0x00040002,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x00048DEF,
+		0x034, 0x00047DEC,
+		0x034, 0x00046DE9,
+		0x034, 0x00045CCB,
+		0x034, 0x0004488D,
+		0x034, 0x0004348D,
+		0x034, 0x0004248A,
+		0x034, 0x0004108D,
+		0x034, 0x0004008A,
+	0xFF0F0104, 0xDEAD,
+	0xFF0F0200, 0xABCD,
+		0x034, 0x0002ADF4,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x0002A0F3,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x0002ADF7,
+	0xFF0F0200, 0xDEAD,
+	0xFF0F0104, 0xABCD,
+		0x034, 0x00029DF4,
+	0xFF0F0204, 0xCDEF,
+		0x034, 0x00029DF4,
+	0xFF0F0404, 0xCDEF,
+		0x034, 0x00029DF4,
+	0xFF0F0200, 0xCDEF,
+		0x034, 0x00029DF1,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x000290F0,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x00029DF2,
+	0xFF0F0104, 0xDEAD,
+	0xFF0F0104, 0xABCD,
+		0x034, 0x00028DF1,
+		0x034, 0x00027DEE,
+		0x034, 0x00026DEB,
+		0x034, 0x00025CEC,
+		0x034, 0x00024CE9,
+		0x034, 0x000238CA,
+		0x034, 0x00022889,
+		0x034, 0x00021489,
+		0x034, 0x0002044A,
+	0xFF0F0204, 0xCDEF,
+		0x034, 0x00028DF1,
+		0x034, 0x00027DEE,
+		0x034, 0x00026DEB,
+		0x034, 0x00025CEC,
+		0x034, 0x00024CE9,
+		0x034, 0x000238CA,
+		0x034, 0x00022889,
+		0x034, 0x00021489,
+		0x034, 0x0002044A,
+	0xFF0F0404, 0xCDEF,
+		0x034, 0x00028DF1,
+		0x034, 0x00027DEE,
+		0x034, 0x00026DEB,
+		0x034, 0x00025CEC,
+		0x034, 0x00024CE9,
+		0x034, 0x000238CA,
+		0x034, 0x00022889,
+		0x034, 0x00021489,
+		0x034, 0x0002044A,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x000280AF,
+		0x034, 0x000270AC,
+		0x034, 0x0002608B,
+		0x034, 0x00025069,
+		0x034, 0x00024048,
+		0x034, 0x00023045,
+		0x034, 0x00022026,
+		0x034, 0x00021023,
+		0x034, 0x00020002,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x00028DEE,
+		0x034, 0x00027DEB,
+		0x034, 0x00026CCD,
+		0x034, 0x00025CCA,
+		0x034, 0x0002488C,
+		0x034, 0x0002384C,
+		0x034, 0x00022849,
+		0x034, 0x00021449,
+		0x034, 0x0002004D,
+	0xFF0F0104, 0xDEAD,
+	0xFF0F02C0, 0xABCD,
+		0x034, 0x0000A0D7,
+		0x034, 0x000090D3,
+		0x034, 0x000080B1,
+		0x034, 0x000070AE,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x0000ADF7,
+		0x034, 0x00009DF4,
+		0x034, 0x00008DF1,
+		0x034, 0x00007DEE,
+	0xFF0F02C0, 0xDEAD,
+	0xFF0F0104, 0xABCD,
+		0x034, 0x00006DEB,
+		0x034, 0x00005CEC,
+		0x034, 0x00004CE9,
+		0x034, 0x000038CA,
+		0x034, 0x00002889,
+		0x034, 0x00001489,
+		0x034, 0x0000044A,
+	0xFF0F0204, 0xCDEF,
+		0x034, 0x00006DEB,
+		0x034, 0x00005CEC,
+		0x034, 0x00004CE9,
+		0x034, 0x000038CA,
+		0x034, 0x00002889,
+		0x034, 0x00001489,
+		0x034, 0x0000044A,
+	0xFF0F0404, 0xCDEF,
+		0x034, 0x00006DEB,
+		0x034, 0x00005CEC,
+		0x034, 0x00004CE9,
+		0x034, 0x000038CA,
+		0x034, 0x00002889,
+		0x034, 0x00001489,
+		0x034, 0x0000044A,
+	0xFF0F02C0, 0xCDEF,
+		0x034, 0x0000608D,
+		0x034, 0x0000506B,
+		0x034, 0x0000404A,
+		0x034, 0x00003047,
+		0x034, 0x00002044,
+		0x034, 0x00001025,
+		0x034, 0x00000004,
+	0xCDCDCDCD, 0xCDCD,
+		0x034, 0x00006DCD,
+		0x034, 0x00005CCD,
+		0x034, 0x00004CCA,
+		0x034, 0x0000388C,
+		0x034, 0x00002888,
+		0x034, 0x00001488,
+		0x034, 0x00000486,
+	0xFF0F0104, 0xDEAD,
+		0x0EF, 0x00000000,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000040,
+	0xFF0F0104, 0xABCD,
+		0x035, 0x00000187,
+		0x035, 0x00008187,
+		0x035, 0x00010187,
+		0x035, 0x00020188,
+		0x035, 0x00028188,
+		0x035, 0x00030188,
+		0x035, 0x00040188,
+		0x035, 0x00048188,
+		0x035, 0x00050188,
+	0xFF0F0204, 0xCDEF,
+		0x035, 0x00000187,
+		0x035, 0x00008187,
+		0x035, 0x00010187,
+		0x035, 0x00020188,
+		0x035, 0x00028188,
+		0x035, 0x00030188,
+		0x035, 0x00040188,
+		0x035, 0x00048188,
+		0x035, 0x00050188,
+	0xFF0F0404, 0xCDEF,
+		0x035, 0x00000187,
+		0x035, 0x00008187,
+		0x035, 0x00010187,
+		0x035, 0x00020188,
+		0x035, 0x00028188,
+		0x035, 0x00030188,
+		0x035, 0x00040188,
+		0x035, 0x00048188,
+		0x035, 0x00050188,
+	0xCDCDCDCD, 0xCDCD,
+		0x035, 0x00000145,
+		0x035, 0x00008145,
+		0x035, 0x00010145,
+		0x035, 0x00020196,
+		0x035, 0x00028196,
+		0x035, 0x00030196,
+		0x035, 0x000401C7,
+		0x035, 0x000481C7,
+		0x035, 0x000501C7,
+	0xFF0F0104, 0xDEAD,
+		0x0EF, 0x00000000,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000010,
+	0xFF0F0104, 0xABCD,
+		0x036, 0x00085733,
+		0x036, 0x0008D733,
+		0x036, 0x00095733,
+		0x036, 0x0009D733,
+		0x036, 0x000A64B4,
+		0x036, 0x000AE4B4,
+		0x036, 0x000B64B4,
+		0x036, 0x000BE4B4,
+		0x036, 0x000C64B4,
+		0x036, 0x000CE4B4,
+		0x036, 0x000D64B4,
+		0x036, 0x000DE4B4,
+	0xFF0F0204, 0xCDEF,
+		0x036, 0x00085733,
+		0x036, 0x0008D733,
+		0x036, 0x00095733,
+		0x036, 0x0009D733,
+		0x036, 0x000A64B4,
+		0x036, 0x000AE4B4,
+		0x036, 0x000B64B4,
+		0x036, 0x000BE4B4,
+		0x036, 0x000C64B4,
+		0x036, 0x000CE4B4,
+		0x036, 0x000D64B4,
+		0x036, 0x000DE4B4,
+	0xFF0F0404, 0xCDEF,
+		0x036, 0x00085733,
+		0x036, 0x0008D733,
+		0x036, 0x00095733,
+		0x036, 0x0009D733,
+		0x036, 0x000A64B4,
+		0x036, 0x000AE4B4,
+		0x036, 0x000B64B4,
+		0x036, 0x000BE4B4,
+		0x036, 0x000C64B4,
+		0x036, 0x000CE4B4,
+		0x036, 0x000D64B4,
+		0x036, 0x000DE4B4,
+	0xCDCDCDCD, 0xCDCD,
+		0x036, 0x000056B3,
+		0x036, 0x0000D6B3,
+		0x036, 0x000156B3,
+		0x036, 0x0001D6B3,
+		0x036, 0x00026634,
+		0x036, 0x0002E634,
+		0x036, 0x00036634,
+		0x036, 0x0003E634,
+		0x036, 0x000467B4,
+		0x036, 0x0004E7B4,
+		0x036, 0x000567B4,
+		0x036, 0x0005E7B4,
+	0xFF0F0104, 0xDEAD,
+		0x0EF, 0x00000000,
+		0x0EF, 0x00000008,
+	0xFF0F0104, 0xABCD,
+		0x03C, 0x000001C8,
+		0x03C, 0x00000492,
+	0xFF0F0204, 0xCDEF,
+		0x03C, 0x000001C8,
+		0x03C, 0x00000492,
+	0xFF0F0404, 0xCDEF,
+		0x03C, 0x000001C8,
+		0x03C, 0x00000492,
+	0xCDCDCDCD, 0xCDCD,
+		0x03C, 0x0000022A,
+		0x03C, 0x00000594,
+	0xFF0F0104, 0xDEAD,
+	0xFF0F0104, 0xABCD,
+		0x03C, 0x00000800,
+	0xFF0F0204, 0xCDEF,
+		0x03C, 0x00000800,
+	0xFF0F0404, 0xCDEF,
+		0x03C, 0x00000800,
+	0xFF0F02C0, 0xCDEF,
+		0x03C, 0x00000820,
+	0xCDCDCDCD, 0xCDCD,
+		0x03C, 0x00000900,
+	0xFF0F0104, 0xDEAD,
+		0x0EF, 0x00000000,
+		0x018, 0x0001712A,
+		0x0EF, 0x00000002,
+	0xFF0F0104, 0xABCD,
+		0x008, 0x0004E400,
+	0xFF0F0204, 0xCDEF,
+		0x008, 0x0004E400,
+	0xFF0F0404, 0xCDEF,
+		0x008, 0x0004E400,
+	0xCDCDCDCD, 0xCDCD,
+		0x008, 0x00002000,
+	0xFF0F0104, 0xDEAD,
+		0x0EF, 0x00000000,
+		0x0DF, 0x000000C0,
+		0x01F, 0x00040064,
+	0xFF0F0104, 0xABCD,
+		0x058, 0x000A7284,
+		0x059, 0x000600EC,
+	0xFF0F0204, 0xCDEF,
+		0x058, 0x000A7284,
+		0x059, 0x000600EC,
+	0xFF0F0404, 0xCDEF,
+		0x058, 0x000A7284,
+		0x059, 0x000600EC,
+	0xCDCDCDCD, 0xCDCD,
+		0x058, 0x00081184,
+		0x059, 0x0006016C,
+	0xFF0F0104, 0xDEAD,
+	0xFF0F0104, 0xABCD,
+		0x061, 0x000E8D73,
+		0x062, 0x00093FC5,
+	0xFF0F0204, 0xCDEF,
+		0x061, 0x000E8D73,
+		0x062, 0x00093FC5,
+	0xFF0F0404, 0xCDEF,
+		0x061, 0x000E8D73,
+		0x062, 0x00093FC5,
+	0xCDCDCDCD, 0xCDCD,
+		0x061, 0x000EAD53,
+		0x062, 0x00093BC4,
+	0xFF0F0104, 0xDEAD,
+	0xFF0F0104, 0xABCD,
+		0x063, 0x000110E9,
+	0xFF0F0204, 0xCDEF,
+		0x063, 0x000110E9,
+	0xFF0F0404, 0xCDEF,
+		0x063, 0x000110E9,
+	0xFF0F0200, 0xCDEF,
+		0x063, 0x000710E9,
+	0xFF0F02C0, 0xCDEF,
+		0x063, 0x000110E9,
+	0xCDCDCDCD, 0xCDCD,
+		0x063, 0x000714E9,
+	0xFF0F0104, 0xDEAD,
+	0xFF0F0104, 0xABCD,
+		0x064, 0x0001C27C,
+	0xFF0F0204, 0xCDEF,
+		0x064, 0x0001C27C,
+	0xFF0F0404, 0xCDEF,
+		0x064, 0x0001C27C,
+	0xCDCDCDCD, 0xCDCD,
+		0x064, 0x0001C67C,
+	0xFF0F0104, 0xDEAD,
+	0xFF0F0200, 0xABCD,
+		0x065, 0x00093016,
+	0xFF0F02C0, 0xCDEF,
+		0x065, 0x00093015,
+	0xCDCDCDCD, 0xCDCD,
+		0x065, 0x00091016,
+	0xFF0F0200, 0xDEAD,
+		0x018, 0x00000006,
+		0x0EF, 0x00002000,
+		0x03B, 0x0003824B,
+		0x03B, 0x0003024B,
+		0x03B, 0x0002844B,
+		0x03B, 0x00020F4B,
+		0x03B, 0x00018F4B,
+		0x03B, 0x000104B2,
+		0x03B, 0x00008049,
+		0x03B, 0x00000148,
+		0x03B, 0x0007824B,
+		0x03B, 0x0007024B,
+		0x03B, 0x0006824B,
+		0x03B, 0x00060F4B,
+		0x03B, 0x00058F4B,
+		0x03B, 0x000504B2,
+		0x03B, 0x00048049,
+		0x03B, 0x00040148,
+		0x0EF, 0x00000000,
+		0x0EF, 0x00000100,
+		0x034, 0x0000ADF3,
+		0x034, 0x00009DEF,
+		0x034, 0x00008DEC,
+		0x034, 0x00007DE9,
+		0x034, 0x00006CED,
+		0x034, 0x00005CE9,
+		0x034, 0x000044E9,
+		0x034, 0x000034E6,
+		0x034, 0x0000246A,
+		0x034, 0x00001467,
+		0x034, 0x00000068,
+		0x0EF, 0x00000000,
+		0x0ED, 0x00000010,
+		0x044, 0x0000ADF2,
+		0x044, 0x00009DEF,
+		0x044, 0x00008DEC,
+		0x044, 0x00007DE9,
+		0x044, 0x00006CEC,
+		0x044, 0x00005CE9,
+		0x044, 0x000044EC,
+		0x044, 0x000034E9,
+		0x044, 0x0000246C,
+		0x044, 0x00001469,
+		0x044, 0x0000006C,
+		0x0ED, 0x00000000,
+		0x0ED, 0x00000001,
+		0x040, 0x00038DA7,
+		0x040, 0x000300C2,
+		0x040, 0x000288E2,
+		0x040, 0x000200B8,
+		0x040, 0x000188A5,
+		0x040, 0x00010FBC,
+		0x040, 0x00008F71,
+		0x040, 0x00000240,
+		0x0ED, 0x00000000,
+		0x0EF, 0x000020A2,
+		0x0DF, 0x00000080,
+		0x035, 0x00000120,
+		0x035, 0x00008120,
+		0x035, 0x00010120,
+		0x036, 0x00000085,
+		0x036, 0x00008085,
+		0x036, 0x00010085,
+		0x036, 0x00018085,
+		0x0EF, 0x00000000,
+		0x051, 0x00000C31,
+		0x052, 0x00000622,
+		0x053, 0x000FC70B,
+		0x054, 0x0000017E,
+		0x056, 0x00051DF3,
+		0x051, 0x00000C01,
+		0x052, 0x000006D6,
+		0x053, 0x000FC649,
+		0x070, 0x00049661,
+		0x071, 0x0007843E,
+		0x072, 0x00000382,
+		0x074, 0x00051400,
+		0x035, 0x00000160,
+		0x035, 0x00008160,
+		0x035, 0x00010160,
+		0x036, 0x00000124,
+		0x036, 0x00008124,
+		0x036, 0x00010124,
+		0x036, 0x00018124,
+		0x0ED, 0x0000000C,
+		0x045, 0x00000140,
+		0x045, 0x00008140,
+		0x045, 0x00010140,
+		0x046, 0x00000124,
+		0x046, 0x00008124,
+		0x046, 0x00010124,
+		0x046, 0x00018124,
+		0x0DF, 0x00000088,
+		0x0B3, 0x000F0E18,
+		0x0B4, 0x0001214C,
+		0x0B7, 0x0003000C,
+		0x01C, 0x000539D2,
+		0x018, 0x0001F12A,
+		0x0FE, 0x00000000,
+		0x0FE, 0x00000000,
+		0x018, 0x0001712A,
+};
+
+u32 RTL8812AE_MAC_REG_ARRAY[] = {
+		0x010, 0x0000000C,
+	0xFF0F0180, 0xABCD,
+		0x025, 0x0000000F,
+	0xFF0F01C0, 0xCDEF,
+		0x025, 0x0000000F,
+	0xCDCDCDCD, 0xCDCD,
+		0x025, 0x0000006F,
+	0xFF0F0180, 0xDEAD,
+		0x072, 0x00000000,
+		0x428, 0x0000000A,
+		0x429, 0x00000010,
+		0x430, 0x00000000,
+		0x431, 0x00000000,
+		0x432, 0x00000000,
+		0x433, 0x00000001,
+		0x434, 0x00000004,
+		0x435, 0x00000005,
+		0x436, 0x00000007,
+		0x437, 0x00000008,
+		0x43C, 0x00000004,
+		0x43D, 0x00000005,
+		0x43E, 0x00000007,
+		0x43F, 0x00000008,
+		0x440, 0x0000005D,
+		0x441, 0x00000001,
+		0x442, 0x00000000,
+		0x444, 0x00000010,
+		0x445, 0x00000000,
+		0x446, 0x00000000,
+		0x447, 0x00000000,
+		0x448, 0x00000000,
+		0x449, 0x000000F0,
+		0x44A, 0x0000000F,
+		0x44B, 0x0000003E,
+		0x44C, 0x00000010,
+		0x44D, 0x00000000,
+		0x44E, 0x00000000,
+		0x44F, 0x00000000,
+		0x450, 0x00000000,
+		0x451, 0x000000F0,
+		0x452, 0x0000000F,
+		0x453, 0x00000000,
+		0x45B, 0x00000080,
+		0x460, 0x00000066,
+		0x461, 0x00000066,
+		0x4C8, 0x000000FF,
+		0x4C9, 0x00000008,
+		0x4CC, 0x000000FF,
+		0x4CD, 0x000000FF,
+		0x4CE, 0x00000001,
+		0x500, 0x00000026,
+		0x501, 0x000000A2,
+		0x502, 0x0000002F,
+		0x503, 0x00000000,
+		0x504, 0x00000028,
+		0x505, 0x000000A3,
+		0x506, 0x0000005E,
+		0x507, 0x00000000,
+		0x508, 0x0000002B,
+		0x509, 0x000000A4,
+		0x50A, 0x0000005E,
+		0x50B, 0x00000000,
+		0x50C, 0x0000004F,
+		0x50D, 0x000000A4,
+		0x50E, 0x00000000,
+		0x50F, 0x00000000,
+		0x512, 0x0000001C,
+		0x514, 0x0000000A,
+		0x516, 0x0000000A,
+		0x525, 0x0000004F,
+		0x550, 0x00000010,
+		0x551, 0x00000010,
+		0x559, 0x00000002,
+		0x55C, 0x00000050,
+		0x55D, 0x000000FF,
+		0x604, 0x00000001,
+		0x605, 0x00000030,
+		0x607, 0x00000003,
+		0x608, 0x0000000E,
+		0x609, 0x0000002A,
+		0x620, 0x000000FF,
+		0x621, 0x000000FF,
+		0x622, 0x000000FF,
+		0x623, 0x000000FF,
+		0x624, 0x000000FF,
+		0x625, 0x000000FF,
+		0x626, 0x000000FF,
+		0x627, 0x000000FF,
+		0x638, 0x00000050,
+		0x63C, 0x0000000A,
+		0x63D, 0x0000000A,
+		0x63E, 0x0000000E,
+		0x63F, 0x0000000E,
+		0x640, 0x00000080,
+		0x642, 0x00000040,
+		0x643, 0x00000000,
+		0x652, 0x000000C8,
+		0x66E, 0x00000005,
+		0x700, 0x00000021,
+		0x701, 0x00000043,
+		0x702, 0x00000065,
+		0x703, 0x00000087,
+		0x708, 0x00000021,
+		0x709, 0x00000043,
+		0x70A, 0x00000065,
+		0x70B, 0x00000087,
+		0x718, 0x00000040,
+};
+
+u32 RTL8821AE_MAC_REG_ARRAY[] = {
+		0x428, 0x0000000A,
+		0x429, 0x00000010,
+		0x430, 0x00000000,
+		0x431, 0x00000000,
+		0x432, 0x00000000,
+		0x433, 0x00000001,
+		0x434, 0x00000004,
+		0x435, 0x00000005,
+		0x436, 0x00000007,
+		0x437, 0x00000008,
+		0x43C, 0x00000004,
+		0x43D, 0x00000005,
+		0x43E, 0x00000007,
+		0x43F, 0x00000008,
+		0x440, 0x0000005D,
+		0x441, 0x00000001,
+		0x442, 0x00000000,
+		0x444, 0x00000010,
+		0x445, 0x00000000,
+		0x446, 0x00000000,
+		0x447, 0x00000000,
+		0x448, 0x00000000,
+		0x449, 0x000000F0,
+		0x44A, 0x0000000F,
+		0x44B, 0x0000003E,
+		0x44C, 0x00000010,
+		0x44D, 0x00000000,
+		0x44E, 0x00000000,
+		0x44F, 0x00000000,
+		0x450, 0x00000000,
+		0x451, 0x000000F0,
+		0x452, 0x0000000F,
+		0x453, 0x00000000,
+		0x456, 0x0000005E,
+		0x460, 0x00000066,
+		0x461, 0x00000066,
+		0x4C8, 0x0000003F,
+		0x4C9, 0x000000FF,
+		0x4CC, 0x000000FF,
+		0x4CD, 0x000000FF,
+		0x4CE, 0x00000001,
+		0x500, 0x00000026,
+		0x501, 0x000000A2,
+		0x502, 0x0000002F,
+		0x503, 0x00000000,
+		0x504, 0x00000028,
+		0x505, 0x000000A3,
+		0x506, 0x0000005E,
+		0x507, 0x00000000,
+		0x508, 0x0000002B,
+		0x509, 0x000000A4,
+		0x50A, 0x0000005E,
+		0x50B, 0x00000000,
+		0x50C, 0x0000004F,
+		0x50D, 0x000000A4,
+		0x50E, 0x00000000,
+		0x50F, 0x00000000,
+		0x512, 0x0000001C,
+		0x514, 0x0000000A,
+		0x516, 0x0000000A,
+		0x525, 0x0000004F,
+		0x550, 0x00000010,
+		0x551, 0x00000010,
+		0x559, 0x00000002,
+		0x55C, 0x00000050,
+		0x55D, 0x000000FF,
+		0x605, 0x00000030,
+		0x607, 0x00000007,
+		0x608, 0x0000000E,
+		0x609, 0x0000002A,
+		0x620, 0x000000FF,
+		0x621, 0x000000FF,
+		0x622, 0x000000FF,
+		0x623, 0x000000FF,
+		0x624, 0x000000FF,
+		0x625, 0x000000FF,
+		0x626, 0x000000FF,
+		0x627, 0x000000FF,
+		0x638, 0x00000050,
+		0x63C, 0x0000000A,
+		0x63D, 0x0000000A,
+		0x63E, 0x0000000E,
+		0x63F, 0x0000000E,
+		0x640, 0x00000040,
+		0x642, 0x00000040,
+		0x643, 0x00000000,
+		0x652, 0x000000C8,
+		0x66E, 0x00000005,
+		0x700, 0x00000021,
+		0x701, 0x00000043,
+		0x702, 0x00000065,
+		0x703, 0x00000087,
+		0x708, 0x00000021,
+		0x709, 0x00000043,
+		0x70A, 0x00000065,
+		0x70B, 0x00000087,
+		0x718, 0x00000040,
+};
+
+u32 RTL8812AE_AGC_TAB_ARRAY[] = {
+	0xFF0F07D8, 0xABCD,
+		0x81C, 0xFC000001,
+		0x81C, 0xFB020001,
+		0x81C, 0xFA040001,
+		0x81C, 0xF9060001,
+		0x81C, 0xF8080001,
+		0x81C, 0xF70A0001,
+		0x81C, 0xF60C0001,
+		0x81C, 0xF50E0001,
+		0x81C, 0xF4100001,
+		0x81C, 0xF3120001,
+		0x81C, 0xF2140001,
+		0x81C, 0xF1160001,
+		0x81C, 0xF0180001,
+		0x81C, 0xEF1A0001,
+		0x81C, 0xEE1C0001,
+		0x81C, 0xED1E0001,
+		0x81C, 0xEC200001,
+		0x81C, 0xEB220001,
+		0x81C, 0xEA240001,
+		0x81C, 0xCD260001,
+		0x81C, 0xCC280001,
+		0x81C, 0xCB2A0001,
+		0x81C, 0xCA2C0001,
+		0x81C, 0xC92E0001,
+		0x81C, 0xC8300001,
+		0x81C, 0xA6320001,
+		0x81C, 0xA5340001,
+		0x81C, 0xA4360001,
+		0x81C, 0xA3380001,
+		0x81C, 0xA23A0001,
+		0x81C, 0x883C0001,
+		0x81C, 0x873E0001,
+		0x81C, 0x86400001,
+		0x81C, 0x85420001,
+		0x81C, 0x84440001,
+		0x81C, 0x83460001,
+		0x81C, 0x82480001,
+		0x81C, 0x814A0001,
+		0x81C, 0x484C0001,
+		0x81C, 0x474E0001,
+		0x81C, 0x46500001,
+		0x81C, 0x45520001,
+		0x81C, 0x44540001,
+		0x81C, 0x43560001,
+		0x81C, 0x42580001,
+		0x81C, 0x415A0001,
+		0x81C, 0x255C0001,
+		0x81C, 0x245E0001,
+		0x81C, 0x23600001,
+		0x81C, 0x22620001,
+		0x81C, 0x21640001,
+		0x81C, 0x21660001,
+		0x81C, 0x21680001,
+		0x81C, 0x216A0001,
+		0x81C, 0x216C0001,
+		0x81C, 0x216E0001,
+		0x81C, 0x21700001,
+		0x81C, 0x21720001,
+		0x81C, 0x21740001,
+		0x81C, 0x21760001,
+		0x81C, 0x21780001,
+		0x81C, 0x217A0001,
+		0x81C, 0x217C0001,
+		0x81C, 0x217E0001,
+	0xFF0F07D0, 0xCDEF,
+		0x81C, 0xF9000001,
+		0x81C, 0xF8020001,
+		0x81C, 0xF7040001,
+		0x81C, 0xF6060001,
+		0x81C, 0xF5080001,
+		0x81C, 0xF40A0001,
+		0x81C, 0xF30C0001,
+		0x81C, 0xF20E0001,
+		0x81C, 0xF1100001,
+		0x81C, 0xF0120001,
+		0x81C, 0xEF140001,
+		0x81C, 0xEE160001,
+		0x81C, 0xED180001,
+		0x81C, 0xEC1A0001,
+		0x81C, 0xEB1C0001,
+		0x81C, 0xEA1E0001,
+		0x81C, 0xCD200001,
+		0x81C, 0xCC220001,
+		0x81C, 0xCB240001,
+		0x81C, 0xCA260001,
+		0x81C, 0xC9280001,
+		0x81C, 0xC82A0001,
+		0x81C, 0xC72C0001,
+		0x81C, 0xC62E0001,
+		0x81C, 0xA5300001,
+		0x81C, 0xA4320001,
+		0x81C, 0xA3340001,
+		0x81C, 0xA2360001,
+		0x81C, 0x88380001,
+		0x81C, 0x873A0001,
+		0x81C, 0x863C0001,
+		0x81C, 0x853E0001,
+		0x81C, 0x84400001,
+		0x81C, 0x83420001,
+		0x81C, 0x82440001,
+		0x81C, 0x81460001,
+		0x81C, 0x48480001,
+		0x81C, 0x474A0001,
+		0x81C, 0x464C0001,
+		0x81C, 0x454E0001,
+		0x81C, 0x44500001,
+		0x81C, 0x43520001,
+		0x81C, 0x42540001,
+		0x81C, 0x41560001,
+		0x81C, 0x25580001,
+		0x81C, 0x245A0001,
+		0x81C, 0x235C0001,
+		0x81C, 0x225E0001,
+		0x81C, 0x21600001,
+		0x81C, 0x21620001,
+		0x81C, 0x21640001,
+		0x81C, 0x21660001,
+		0x81C, 0x21680001,
+		0x81C, 0x216A0001,
+		0x81C, 0x236C0001,
+		0x81C, 0x226E0001,
+		0x81C, 0x21700001,
+		0x81C, 0x21720001,
+		0x81C, 0x21740001,
+		0x81C, 0x21760001,
+		0x81C, 0x21780001,
+		0x81C, 0x217A0001,
+		0x81C, 0x217C0001,
+		0x81C, 0x217E0001,
+	0xCDCDCDCD, 0xCDCD,
+		0x81C, 0xFF000001,
+		0x81C, 0xFF020001,
+		0x81C, 0xFF040001,
+		0x81C, 0xFF060001,
+		0x81C, 0xFF080001,
+		0x81C, 0xFE0A0001,
+		0x81C, 0xFD0C0001,
+		0x81C, 0xFC0E0001,
+		0x81C, 0xFB100001,
+		0x81C, 0xFA120001,
+		0x81C, 0xF9140001,
+		0x81C, 0xF8160001,
+		0x81C, 0xF7180001,
+		0x81C, 0xF61A0001,
+		0x81C, 0xF51C0001,
+		0x81C, 0xF41E0001,
+		0x81C, 0xF3200001,
+		0x81C, 0xF2220001,
+		0x81C, 0xF1240001,
+		0x81C, 0xF0260001,
+		0x81C, 0xEF280001,
+		0x81C, 0xEE2A0001,
+		0x81C, 0xED2C0001,
+		0x81C, 0xEC2E0001,
+		0x81C, 0xEB300001,
+		0x81C, 0xEA320001,
+		0x81C, 0xE9340001,
+		0x81C, 0xE8360001,
+		0x81C, 0xE7380001,
+		0x81C, 0xE63A0001,
+		0x81C, 0xE53C0001,
+		0x81C, 0xC73E0001,
+		0x81C, 0xC6400001,
+		0x81C, 0xC5420001,
+		0x81C, 0xC4440001,
+		0x81C, 0xC3460001,
+		0x81C, 0xC2480001,
+		0x81C, 0xC14A0001,
+		0x81C, 0xA74C0001,
+		0x81C, 0xA64E0001,
+		0x81C, 0xA5500001,
+		0x81C, 0xA4520001,
+		0x81C, 0xA3540001,
+		0x81C, 0xA2560001,
+		0x81C, 0xA1580001,
+		0x81C, 0x675A0001,
+		0x81C, 0x665C0001,
+		0x81C, 0x655E0001,
+		0x81C, 0x64600001,
+		0x81C, 0x63620001,
+		0x81C, 0x48640001,
+		0x81C, 0x47660001,
+		0x81C, 0x46680001,
+		0x81C, 0x456A0001,
+		0x81C, 0x446C0001,
+		0x81C, 0x436E0001,
+		0x81C, 0x42700001,
+		0x81C, 0x41720001,
+		0x81C, 0x41740001,
+		0x81C, 0x41760001,
+		0x81C, 0x41780001,
+		0x81C, 0x417A0001,
+		0x81C, 0x417C0001,
+		0x81C, 0x417E0001,
+	0xFF0F07D8, 0xDEAD,
+	0xFF0F0180, 0xABCD,
+		0x81C, 0xFC800001,
+		0x81C, 0xFB820001,
+		0x81C, 0xFA840001,
+		0x81C, 0xF9860001,
+		0x81C, 0xF8880001,
+		0x81C, 0xF78A0001,
+		0x81C, 0xF68C0001,
+		0x81C, 0xF58E0001,
+		0x81C, 0xF4900001,
+		0x81C, 0xF3920001,
+		0x81C, 0xF2940001,
+		0x81C, 0xF1960001,
+		0x81C, 0xF0980001,
+		0x81C, 0xEF9A0001,
+		0x81C, 0xEE9C0001,
+		0x81C, 0xED9E0001,
+		0x81C, 0xECA00001,
+		0x81C, 0xEBA20001,
+		0x81C, 0xEAA40001,
+		0x81C, 0xE9A60001,
+		0x81C, 0xE8A80001,
+		0x81C, 0xE7AA0001,
+		0x81C, 0xE6AC0001,
+		0x81C, 0xE5AE0001,
+		0x81C, 0xE4B00001,
+		0x81C, 0xE3B20001,
+		0x81C, 0xA8B40001,
+		0x81C, 0xA7B60001,
+		0x81C, 0xA6B80001,
+		0x81C, 0xA5BA0001,
+		0x81C, 0xA4BC0001,
+		0x81C, 0xA3BE0001,
+		0x81C, 0xA2C00001,
+		0x81C, 0xA1C20001,
+		0x81C, 0x68C40001,
+		0x81C, 0x67C60001,
+		0x81C, 0x66C80001,
+		0x81C, 0x65CA0001,
+		0x81C, 0x64CC0001,
+		0x81C, 0x47CE0001,
+		0x81C, 0x46D00001,
+		0x81C, 0x45D20001,
+		0x81C, 0x44D40001,
+		0x81C, 0x43D60001,
+		0x81C, 0x42D80001,
+		0x81C, 0x08DA0001,
+		0x81C, 0x07DC0001,
+		0x81C, 0x06DE0001,
+		0x81C, 0x05E00001,
+		0x81C, 0x04E20001,
+		0x81C, 0x03E40001,
+		0x81C, 0x02E60001,
+		0x81C, 0x01E80001,
+		0x81C, 0x01EA0001,
+		0x81C, 0x01EC0001,
+		0x81C, 0x01EE0001,
+		0x81C, 0x01F00001,
+		0x81C, 0x01F20001,
+		0x81C, 0x01F40001,
+		0x81C, 0x01F60001,
+		0x81C, 0x01F80001,
+		0x81C, 0x01FA0001,
+		0x81C, 0x01FC0001,
+		0x81C, 0x01FE0001,
+	0xFF0F0280, 0xCDEF,
+		0x81C, 0xFC800001,
+		0x81C, 0xFB820001,
+		0x81C, 0xFA840001,
+		0x81C, 0xF9860001,
+		0x81C, 0xF8880001,
+		0x81C, 0xF78A0001,
+		0x81C, 0xF68C0001,
+		0x81C, 0xF58E0001,
+		0x81C, 0xF4900001,
+		0x81C, 0xF3920001,
+		0x81C, 0xF2940001,
+		0x81C, 0xF1960001,
+		0x81C, 0xF0980001,
+		0x81C, 0xEF9A0001,
+		0x81C, 0xEE9C0001,
+		0x81C, 0xED9E0001,
+		0x81C, 0xECA00001,
+		0x81C, 0xEBA20001,
+		0x81C, 0xEAA40001,
+		0x81C, 0xE9A60001,
+		0x81C, 0xE8A80001,
+		0x81C, 0xE7AA0001,
+		0x81C, 0xE6AC0001,
+		0x81C, 0xE5AE0001,
+		0x81C, 0xE4B00001,
+		0x81C, 0xE3B20001,
+		0x81C, 0xA8B40001,
+		0x81C, 0xA7B60001,
+		0x81C, 0xA6B80001,
+		0x81C, 0xA5BA0001,
+		0x81C, 0xA4BC0001,
+		0x81C, 0xA3BE0001,
+		0x81C, 0xA2C00001,
+		0x81C, 0xA1C20001,
+		0x81C, 0x68C40001,
+		0x81C, 0x67C60001,
+		0x81C, 0x66C80001,
+		0x81C, 0x65CA0001,
+		0x81C, 0x64CC0001,
+		0x81C, 0x47CE0001,
+		0x81C, 0x46D00001,
+		0x81C, 0x45D20001,
+		0x81C, 0x44D40001,
+		0x81C, 0x43D60001,
+		0x81C, 0x42D80001,
+		0x81C, 0x08DA0001,
+		0x81C, 0x07DC0001,
+		0x81C, 0x06DE0001,
+		0x81C, 0x05E00001,
+		0x81C, 0x04E20001,
+		0x81C, 0x03E40001,
+		0x81C, 0x02E60001,
+		0x81C, 0x01E80001,
+		0x81C, 0x01EA0001,
+		0x81C, 0x01EC0001,
+		0x81C, 0x01EE0001,
+		0x81C, 0x01F00001,
+		0x81C, 0x01F20001,
+		0x81C, 0x01F40001,
+		0x81C, 0x01F60001,
+		0x81C, 0x01F80001,
+		0x81C, 0x01FA0001,
+		0x81C, 0x01FC0001,
+		0x81C, 0x01FE0001,
+	0xFF0F01C0, 0xCDEF,
+		0x81C, 0xFC800001,
+		0x81C, 0xFB820001,
+		0x81C, 0xFA840001,
+		0x81C, 0xF9860001,
+		0x81C, 0xF8880001,
+		0x81C, 0xF78A0001,
+		0x81C, 0xF68C0001,
+		0x81C, 0xF58E0001,
+		0x81C, 0xF4900001,
+		0x81C, 0xF3920001,
+		0x81C, 0xF2940001,
+		0x81C, 0xF1960001,
+		0x81C, 0xF0980001,
+		0x81C, 0xEF9A0001,
+		0x81C, 0xEE9C0001,
+		0x81C, 0xED9E0001,
+		0x81C, 0xECA00001,
+		0x81C, 0xEBA20001,
+		0x81C, 0xEAA40001,
+		0x81C, 0xE9A60001,
+		0x81C, 0xE8A80001,
+		0x81C, 0xE7AA0001,
+		0x81C, 0xE6AC0001,
+		0x81C, 0xE5AE0001,
+		0x81C, 0xE4B00001,
+		0x81C, 0xE3B20001,
+		0x81C, 0xA8B40001,
+		0x81C, 0xA7B60001,
+		0x81C, 0xA6B80001,
+		0x81C, 0xA5BA0001,
+		0x81C, 0xA4BC0001,
+		0x81C, 0xA3BE0001,
+		0x81C, 0xA2C00001,
+		0x81C, 0xA1C20001,
+		0x81C, 0x68C40001,
+		0x81C, 0x67C60001,
+		0x81C, 0x66C80001,
+		0x81C, 0x65CA0001,
+		0x81C, 0x64CC0001,
+		0x81C, 0x47CE0001,
+		0x81C, 0x46D00001,
+		0x81C, 0x45D20001,
+		0x81C, 0x44D40001,
+		0x81C, 0x43D60001,
+		0x81C, 0x42D80001,
+		0x81C, 0x08DA0001,
+		0x81C, 0x07DC0001,
+		0x81C, 0x06DE0001,
+		0x81C, 0x05E00001,
+		0x81C, 0x04E20001,
+		0x81C, 0x03E40001,
+		0x81C, 0x02E60001,
+		0x81C, 0x01E80001,
+		0x81C, 0x01EA0001,
+		0x81C, 0x01EC0001,
+		0x81C, 0x01EE0001,
+		0x81C, 0x01F00001,
+		0x81C, 0x01F20001,
+		0x81C, 0x01F40001,
+		0x81C, 0x01F60001,
+		0x81C, 0x01F80001,
+		0x81C, 0x01FA0001,
+		0x81C, 0x01FC0001,
+		0x81C, 0x01FE0001,
+	0xFF0F02C0, 0xCDEF,
+		0x81C, 0xFC800001,
+		0x81C, 0xFB820001,
+		0x81C, 0xFA840001,
+		0x81C, 0xF9860001,
+		0x81C, 0xF8880001,
+		0x81C, 0xF78A0001,
+		0x81C, 0xF68C0001,
+		0x81C, 0xF58E0001,
+		0x81C, 0xF4900001,
+		0x81C, 0xF3920001,
+		0x81C, 0xF2940001,
+		0x81C, 0xF1960001,
+		0x81C, 0xF0980001,
+		0x81C, 0xEF9A0001,
+		0x81C, 0xEE9C0001,
+		0x81C, 0xED9E0001,
+		0x81C, 0xECA00001,
+		0x81C, 0xEBA20001,
+		0x81C, 0xEAA40001,
+		0x81C, 0xE9A60001,
+		0x81C, 0xE8A80001,
+		0x81C, 0xE7AA0001,
+		0x81C, 0xE6AC0001,
+		0x81C, 0xE5AE0001,
+		0x81C, 0xE4B00001,
+		0x81C, 0xE3B20001,
+		0x81C, 0xA8B40001,
+		0x81C, 0xA7B60001,
+		0x81C, 0xA6B80001,
+		0x81C, 0xA5BA0001,
+		0x81C, 0xA4BC0001,
+		0x81C, 0xA3BE0001,
+		0x81C, 0xA2C00001,
+		0x81C, 0xA1C20001,
+		0x81C, 0x68C40001,
+		0x81C, 0x67C60001,
+		0x81C, 0x66C80001,
+		0x81C, 0x65CA0001,
+		0x81C, 0x64CC0001,
+		0x81C, 0x47CE0001,
+		0x81C, 0x46D00001,
+		0x81C, 0x45D20001,
+		0x81C, 0x44D40001,
+		0x81C, 0x43D60001,
+		0x81C, 0x42D80001,
+		0x81C, 0x08DA0001,
+		0x81C, 0x07DC0001,
+		0x81C, 0x06DE0001,
+		0x81C, 0x05E00001,
+		0x81C, 0x04E20001,
+		0x81C, 0x03E40001,
+		0x81C, 0x02E60001,
+		0x81C, 0x01E80001,
+		0x81C, 0x01EA0001,
+		0x81C, 0x01EC0001,
+		0x81C, 0x01EE0001,
+		0x81C, 0x01F00001,
+		0x81C, 0x01F20001,
+		0x81C, 0x01F40001,
+		0x81C, 0x01F60001,
+		0x81C, 0x01F80001,
+		0x81C, 0x01FA0001,
+		0x81C, 0x01FC0001,
+		0x81C, 0x01FE0001,
+	0xFF0F07D8, 0xCDEF,
+		0x81C, 0xFC800001,
+		0x81C, 0xFB820001,
+		0x81C, 0xFA840001,
+		0x81C, 0xF9860001,
+		0x81C, 0xF8880001,
+		0x81C, 0xF78A0001,
+		0x81C, 0xF68C0001,
+		0x81C, 0xF58E0001,
+		0x81C, 0xF4900001,
+		0x81C, 0xF3920001,
+		0x81C, 0xF2940001,
+		0x81C, 0xF1960001,
+		0x81C, 0xF0980001,
+		0x81C, 0xEF9A0001,
+		0x81C, 0xEE9C0001,
+		0x81C, 0xED9E0001,
+		0x81C, 0xECA00001,
+		0x81C, 0xEBA20001,
+		0x81C, 0xEAA40001,
+		0x81C, 0xE9A60001,
+		0x81C, 0xE8A80001,
+		0x81C, 0xE7AA0001,
+		0x81C, 0xE6AC0001,
+		0x81C, 0xE5AE0001,
+		0x81C, 0xE4B00001,
+		0x81C, 0xE3B20001,
+		0x81C, 0xA8B40001,
+		0x81C, 0xA7B60001,
+		0x81C, 0xA6B80001,
+		0x81C, 0xA5BA0001,
+		0x81C, 0xA4BC0001,
+		0x81C, 0xA3BE0001,
+		0x81C, 0xA2C00001,
+		0x81C, 0xA1C20001,
+		0x81C, 0x68C40001,
+		0x81C, 0x67C60001,
+		0x81C, 0x66C80001,
+		0x81C, 0x65CA0001,
+		0x81C, 0x64CC0001,
+		0x81C, 0x47CE0001,
+		0x81C, 0x46D00001,
+		0x81C, 0x45D20001,
+		0x81C, 0x44D40001,
+		0x81C, 0x43D60001,
+		0x81C, 0x42D80001,
+		0x81C, 0x08DA0001,
+		0x81C, 0x07DC0001,
+		0x81C, 0x06DE0001,
+		0x81C, 0x05E00001,
+		0x81C, 0x04E20001,
+		0x81C, 0x03E40001,
+		0x81C, 0x02E60001,
+		0x81C, 0x01E80001,
+		0x81C, 0x01EA0001,
+		0x81C, 0x01EC0001,
+		0x81C, 0x01EE0001,
+		0x81C, 0x01F00001,
+		0x81C, 0x01F20001,
+		0x81C, 0x01F40001,
+		0x81C, 0x01F60001,
+		0x81C, 0x01F80001,
+		0x81C, 0x01FA0001,
+		0x81C, 0x01FC0001,
+		0x81C, 0x01FE0001,
+	0xFF0F07D0, 0xCDEF,
+		0x81C, 0xFC800001,
+		0x81C, 0xFB820001,
+		0x81C, 0xFA840001,
+		0x81C, 0xF9860001,
+		0x81C, 0xF8880001,
+		0x81C, 0xF78A0001,
+		0x81C, 0xF68C0001,
+		0x81C, 0xF58E0001,
+		0x81C, 0xF4900001,
+		0x81C, 0xF3920001,
+		0x81C, 0xF2940001,
+		0x81C, 0xF1960001,
+		0x81C, 0xF0980001,
+		0x81C, 0xEF9A0001,
+		0x81C, 0xEE9C0001,
+		0x81C, 0xED9E0001,
+		0x81C, 0xECA00001,
+		0x81C, 0xEBA20001,
+		0x81C, 0xEAA40001,
+		0x81C, 0xE9A60001,
+		0x81C, 0xE8A80001,
+		0x81C, 0xE7AA0001,
+		0x81C, 0xE6AC0001,
+		0x81C, 0xE5AE0001,
+		0x81C, 0xE4B00001,
+		0x81C, 0xE3B20001,
+		0x81C, 0xA8B40001,
+		0x81C, 0xA7B60001,
+		0x81C, 0xA6B80001,
+		0x81C, 0xA5BA0001,
+		0x81C, 0xA4BC0001,
+		0x81C, 0xA3BE0001,
+		0x81C, 0xA2C00001,
+		0x81C, 0xA1C20001,
+		0x81C, 0x68C40001,
+		0x81C, 0x67C60001,
+		0x81C, 0x66C80001,
+		0x81C, 0x65CA0001,
+		0x81C, 0x64CC0001,
+		0x81C, 0x47CE0001,
+		0x81C, 0x46D00001,
+		0x81C, 0x45D20001,
+		0x81C, 0x44D40001,
+		0x81C, 0x43D60001,
+		0x81C, 0x42D80001,
+		0x81C, 0x08DA0001,
+		0x81C, 0x07DC0001,
+		0x81C, 0x06DE0001,
+		0x81C, 0x05E00001,
+		0x81C, 0x04E20001,
+		0x81C, 0x03E40001,
+		0x81C, 0x02E60001,
+		0x81C, 0x01E80001,
+		0x81C, 0x01EA0001,
+		0x81C, 0x01EC0001,
+		0x81C, 0x01EE0001,
+		0x81C, 0x01F00001,
+		0x81C, 0x01F20001,
+		0x81C, 0x01F40001,
+		0x81C, 0x01F60001,
+		0x81C, 0x01F80001,
+		0x81C, 0x01FA0001,
+		0x81C, 0x01FC0001,
+		0x81C, 0x01FE0001,
+	0xCDCDCDCD, 0xCDCD,
+		0x81C, 0xFF800001,
+		0x81C, 0xFF820001,
+		0x81C, 0xFF840001,
+		0x81C, 0xFE860001,
+		0x81C, 0xFD880001,
+		0x81C, 0xFC8A0001,
+		0x81C, 0xFB8C0001,
+		0x81C, 0xFA8E0001,
+		0x81C, 0xF9900001,
+		0x81C, 0xF8920001,
+		0x81C, 0xF7940001,
+		0x81C, 0xF6960001,
+		0x81C, 0xF5980001,
+		0x81C, 0xF49A0001,
+		0x81C, 0xF39C0001,
+		0x81C, 0xF29E0001,
+		0x81C, 0xF1A00001,
+		0x81C, 0xF0A20001,
+		0x81C, 0xEFA40001,
+		0x81C, 0xEEA60001,
+		0x81C, 0xEDA80001,
+		0x81C, 0xECAA0001,
+		0x81C, 0xEBAC0001,
+		0x81C, 0xEAAE0001,
+		0x81C, 0xE9B00001,
+		0x81C, 0xE8B20001,
+		0x81C, 0xE7B40001,
+		0x81C, 0xE6B60001,
+		0x81C, 0xE5B80001,
+		0x81C, 0xE4BA0001,
+		0x81C, 0xE3BC0001,
+		0x81C, 0xA8BE0001,
+		0x81C, 0xA7C00001,
+		0x81C, 0xA6C20001,
+		0x81C, 0xA5C40001,
+		0x81C, 0xA4C60001,
+		0x81C, 0xA3C80001,
+		0x81C, 0xA2CA0001,
+		0x81C, 0xA1CC0001,
+		0x81C, 0x68CE0001,
+		0x81C, 0x67D00001,
+		0x81C, 0x66D20001,
+		0x81C, 0x65D40001,
+		0x81C, 0x64D60001,
+		0x81C, 0x47D80001,
+		0x81C, 0x46DA0001,
+		0x81C, 0x45DC0001,
+		0x81C, 0x44DE0001,
+		0x81C, 0x43E00001,
+		0x81C, 0x42E20001,
+		0x81C, 0x08E40001,
+		0x81C, 0x07E60001,
+		0x81C, 0x06E80001,
+		0x81C, 0x05EA0001,
+		0x81C, 0x04EC0001,
+		0x81C, 0x03EE0001,
+		0x81C, 0x02F00001,
+		0x81C, 0x01F20001,
+		0x81C, 0x01F40001,
+		0x81C, 0x01F60001,
+		0x81C, 0x01F80001,
+		0x81C, 0x01FA0001,
+		0x81C, 0x01FC0001,
+		0x81C, 0x01FE0001,
+	0xFF0F0180, 0xDEAD,
+		0xC50, 0x00000022,
+		0xC50, 0x00000020,
+		0xE50, 0x00000022,
+		0xE50, 0x00000020,
+};
+
+u32 RTL8821AE_AGC_TAB_ARRAY[] = {
+		0x81C, 0xBF000001,
+		0x81C, 0xBF020001,
+		0x81C, 0xBF040001,
+		0x81C, 0xBF060001,
+		0x81C, 0xBE080001,
+		0x81C, 0xBD0A0001,
+		0x81C, 0xBC0C0001,
+		0x81C, 0xBA0E0001,
+		0x81C, 0xB9100001,
+		0x81C, 0xB8120001,
+		0x81C, 0xB7140001,
+		0x81C, 0xB6160001,
+		0x81C, 0xB5180001,
+		0x81C, 0xB41A0001,
+		0x81C, 0xB31C0001,
+		0x81C, 0xB21E0001,
+		0x81C, 0xB1200001,
+		0x81C, 0xB0220001,
+		0x81C, 0xAF240001,
+		0x81C, 0xAE260001,
+		0x81C, 0xAD280001,
+		0x81C, 0xAC2A0001,
+		0x81C, 0xAB2C0001,
+		0x81C, 0xAA2E0001,
+		0x81C, 0xA9300001,
+		0x81C, 0xA8320001,
+		0x81C, 0xA7340001,
+		0x81C, 0xA6360001,
+		0x81C, 0xA5380001,
+		0x81C, 0xA43A0001,
+		0x81C, 0xA33C0001,
+		0x81C, 0x673E0001,
+		0x81C, 0x66400001,
+		0x81C, 0x65420001,
+		0x81C, 0x64440001,
+		0x81C, 0x63460001,
+		0x81C, 0x62480001,
+		0x81C, 0x614A0001,
+		0x81C, 0x474C0001,
+		0x81C, 0x464E0001,
+		0x81C, 0x45500001,
+		0x81C, 0x44520001,
+		0x81C, 0x43540001,
+		0x81C, 0x42560001,
+		0x81C, 0x41580001,
+		0x81C, 0x285A0001,
+		0x81C, 0x275C0001,
+		0x81C, 0x265E0001,
+		0x81C, 0x25600001,
+		0x81C, 0x24620001,
+		0x81C, 0x0A640001,
+		0x81C, 0x09660001,
+		0x81C, 0x08680001,
+		0x81C, 0x076A0001,
+		0x81C, 0x066C0001,
+		0x81C, 0x056E0001,
+		0x81C, 0x04700001,
+		0x81C, 0x03720001,
+		0x81C, 0x02740001,
+		0x81C, 0x01760001,
+		0x81C, 0x01780001,
+		0x81C, 0x017A0001,
+		0x81C, 0x017C0001,
+		0x81C, 0x017E0001,
+	0xFF0F02C0, 0xABCD,
+		0x81C, 0xFB000101,
+		0x81C, 0xFA020101,
+		0x81C, 0xF9040101,
+		0x81C, 0xF8060101,
+		0x81C, 0xF7080101,
+		0x81C, 0xF60A0101,
+		0x81C, 0xF50C0101,
+		0x81C, 0xF40E0101,
+		0x81C, 0xF3100101,
+		0x81C, 0xF2120101,
+		0x81C, 0xF1140101,
+		0x81C, 0xF0160101,
+		0x81C, 0xEF180101,
+		0x81C, 0xEE1A0101,
+		0x81C, 0xED1C0101,
+		0x81C, 0xEC1E0101,
+		0x81C, 0xEB200101,
+		0x81C, 0xEA220101,
+		0x81C, 0xE9240101,
+		0x81C, 0xE8260101,
+		0x81C, 0xE7280101,
+		0x81C, 0xE62A0101,
+		0x81C, 0xE52C0101,
+		0x81C, 0xE42E0101,
+		0x81C, 0xE3300101,
+		0x81C, 0xA5320101,
+		0x81C, 0xA4340101,
+		0x81C, 0xA3360101,
+		0x81C, 0x87380101,
+		0x81C, 0x863A0101,
+		0x81C, 0x853C0101,
+		0x81C, 0x843E0101,
+		0x81C, 0x69400101,
+		0x81C, 0x68420101,
+		0x81C, 0x67440101,
+		0x81C, 0x66460101,
+		0x81C, 0x49480101,
+		0x81C, 0x484A0101,
+		0x81C, 0x474C0101,
+		0x81C, 0x2A4E0101,
+		0x81C, 0x29500101,
+		0x81C, 0x28520101,
+		0x81C, 0x27540101,
+		0x81C, 0x26560101,
+		0x81C, 0x25580101,
+		0x81C, 0x245A0101,
+		0x81C, 0x235C0101,
+		0x81C, 0x055E0101,
+		0x81C, 0x04600101,
+		0x81C, 0x03620101,
+		0x81C, 0x02640101,
+		0x81C, 0x01660101,
+		0x81C, 0x01680101,
+		0x81C, 0x016A0101,
+		0x81C, 0x016C0101,
+		0x81C, 0x016E0101,
+		0x81C, 0x01700101,
+		0x81C, 0x01720101,
+	0xCDCDCDCD, 0xCDCD,
+		0x81C, 0xFF000101,
+		0x81C, 0xFF020101,
+		0x81C, 0xFE040101,
+		0x81C, 0xFD060101,
+		0x81C, 0xFC080101,
+		0x81C, 0xFD0A0101,
+		0x81C, 0xFC0C0101,
+		0x81C, 0xFB0E0101,
+		0x81C, 0xFA100101,
+		0x81C, 0xF9120101,
+		0x81C, 0xF8140101,
+		0x81C, 0xF7160101,
+		0x81C, 0xF6180101,
+		0x81C, 0xF51A0101,
+		0x81C, 0xF41C0101,
+		0x81C, 0xF31E0101,
+		0x81C, 0xF2200101,
+		0x81C, 0xF1220101,
+		0x81C, 0xF0240101,
+		0x81C, 0xEF260101,
+		0x81C, 0xEE280101,
+		0x81C, 0xED2A0101,
+		0x81C, 0xEC2C0101,
+		0x81C, 0xEB2E0101,
+		0x81C, 0xEA300101,
+		0x81C, 0xE9320101,
+		0x81C, 0xE8340101,
+		0x81C, 0xE7360101,
+		0x81C, 0xE6380101,
+		0x81C, 0xE53A0101,
+		0x81C, 0xE43C0101,
+		0x81C, 0xE33E0101,
+		0x81C, 0xA5400101,
+		0x81C, 0xA4420101,
+		0x81C, 0xA3440101,
+		0x81C, 0x87460101,
+		0x81C, 0x86480101,
+		0x81C, 0x854A0101,
+		0x81C, 0x844C0101,
+		0x81C, 0x694E0101,
+		0x81C, 0x68500101,
+		0x81C, 0x67520101,
+		0x81C, 0x66540101,
+		0x81C, 0x49560101,
+		0x81C, 0x48580101,
+		0x81C, 0x475A0101,
+		0x81C, 0x2A5C0101,
+		0x81C, 0x295E0101,
+		0x81C, 0x28600101,
+		0x81C, 0x27620101,
+		0x81C, 0x26640101,
+		0x81C, 0x25660101,
+		0x81C, 0x24680101,
+		0x81C, 0x236A0101,
+		0x81C, 0x056C0101,
+		0x81C, 0x046E0101,
+		0x81C, 0x03700101,
+		0x81C, 0x02720101,
+	0xFF0F02C0, 0xDEAD,
+		0x81C, 0x01740101,
+		0x81C, 0x01760101,
+		0x81C, 0x01780101,
+		0x81C, 0x017A0101,
+		0x81C, 0x017C0101,
+		0x81C, 0x017E0101,
+		0xC50, 0x00000022,
+		0xC50, 0x00000020,
+
+};
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/table.h b/drivers/staging/rtl8821ae/rtl8821ae/table.h
new file mode 100644
index 0000000..b9d7b26
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/table.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on  2010/ 5/18,  1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_TABLE__H_
+#define __RTL8821AE_TABLE__H_
+
+#include <linux/types.h>
+#define  RTL8821AEPHY_REG_1TARRAYLEN	344
+extern u32 RTL8821AE_PHY_REG_ARRAY[];
+#define  RTL8812AEPHY_REG_1TARRAYLEN	490
+extern u32 RTL8812AE_PHY_REG_ARRAY[];
+#define RTL8821AEPHY_REG_ARRAY_PGLEN 	90
+extern u32 RTL8821AE_PHY_REG_ARRAY_PG[];
+#define RTL8812AEPHY_REG_ARRAY_PGLEN 	276
+extern u32 RTL8812AE_PHY_REG_ARRAY_PG[];
+//#define	RTL8723BE_RADIOA_1TARRAYLEN 	206
+//extern u8 *RTL8821AE_TXPWR_LMT_ARRAY[];
+#define	RTL8812AE_RADIOA_1TARRAYLEN 	1264
+extern u32 RTL8812AE_RADIOA_ARRAY[];
+#define	RTL8812AE_RADIOB_1TARRAYLEN 	1240
+extern u32 RTL8812AE_RADIOB_ARRAY[];
+#define	RTL8821AE_RADIOA_1TARRAYLEN 	1176
+extern u32 RTL8821AE_RADIOA_ARRAY[];
+#define RTL8821AEMAC_1T_ARRAYLEN  		194
+extern u32 RTL8821AE_MAC_REG_ARRAY[];
+#define RTL8812AEMAC_1T_ARRAYLEN  		214
+extern u32 RTL8812AE_MAC_REG_ARRAY[];
+#define RTL8821AEAGCTAB_1TARRAYLEN 		382
+extern u32 RTL8821AE_AGC_TAB_ARRAY[];
+#define RTL8812AEAGCTAB_1TARRAYLEN 		1312
+extern u32 RTL8812AE_AGC_TAB_ARRAY[];
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/trx.c b/drivers/staging/rtl8821ae/rtl8821ae/trx.c
new file mode 100644
index 0000000..75ae438
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/trx.c
@@ -0,0 +1,1050 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "../stats.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "trx.h"
+#include "led.h"
+#include "dm.h"
+#include "phy.h"
+u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
+{
+	u16 fc = rtl_get_fc(skb);
+
+	if (unlikely(ieee80211_is_beacon(fc)))
+		return QSLT_BEACON;
+	if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
+		return QSLT_MGNT;
+
+	return skb->priority;
+}
+
+/* mac80211's rate_idx is like this:
+ *
+ * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
+ *
+ * B/G rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
+ *
+ * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
+ * A rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
+ */
+static int _rtl8821ae_rate_mapping(struct ieee80211_hw *hw,
+	bool isht, u8 desc_rate)
+{
+	int rate_idx;
+
+	if (false == isht) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+		if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
+#else
+		if (IEEE80211_BAND_2GHZ == hw->conf.channel->band) {
+#endif
+			switch (desc_rate) {
+			case DESC_RATE1M:
+				rate_idx = 0;
+				break;
+			case DESC_RATE2M:
+				rate_idx = 1;
+				break;
+			case DESC_RATE5_5M:
+				rate_idx = 2;
+				break;
+			case DESC_RATE11M:
+				rate_idx = 3;
+				break;
+			case DESC_RATE6M:
+				rate_idx = 4;
+				break;
+			case DESC_RATE9M:
+				rate_idx = 5;
+				break;
+			case DESC_RATE12M:
+				rate_idx = 6;
+				break;
+			case DESC_RATE18M:
+				rate_idx = 7;
+				break;
+			case DESC_RATE24M:
+				rate_idx = 8;
+				break;
+			case DESC_RATE36M:
+				rate_idx = 9;
+				break;
+			case DESC_RATE48M:
+				rate_idx = 10;
+				break;
+			case DESC_RATE54M:
+				rate_idx = 11;
+				break;
+			default:
+				rate_idx = 0;
+				break;
+			}
+		} else {
+			switch (desc_rate) {
+			case DESC_RATE6M:
+				rate_idx = 0;
+				break;
+			case DESC_RATE9M:
+				rate_idx = 1;
+				break;
+			case DESC_RATE12M:
+				rate_idx = 2;
+				break;
+			case DESC_RATE18M:
+				rate_idx = 3;
+				break;
+			case DESC_RATE24M:
+				rate_idx = 4;
+				break;
+			case DESC_RATE36M:
+				rate_idx = 5;
+				break;
+			case DESC_RATE48M:
+				rate_idx = 6;
+				break;
+			case DESC_RATE54M:
+				rate_idx = 7;
+				break;
+			default:
+				rate_idx = 0;
+				break;
+			}
+		}
+	} else {
+		switch(desc_rate) {
+		case DESC_RATEMCS0:
+			rate_idx = 0;
+			break;
+		case DESC_RATEMCS1:
+			rate_idx = 1;
+			break;
+		case DESC_RATEMCS2:
+			rate_idx = 2;
+			break;
+		case DESC_RATEMCS3:
+			rate_idx = 3;
+			break;
+		case DESC_RATEMCS4:
+			rate_idx = 4;
+			break;
+		case DESC_RATEMCS5:
+			rate_idx = 5;
+			break;
+		case DESC_RATEMCS6:
+			rate_idx = 6;
+			break;
+		case DESC_RATEMCS7:
+			rate_idx = 7;
+			break;
+		case DESC_RATEMCS8:
+			rate_idx = 8;
+			break;
+		case DESC_RATEMCS9:
+			rate_idx = 9;
+			break;
+		case DESC_RATEMCS10:
+			rate_idx = 10;
+			break;
+		case DESC_RATEMCS11:
+			rate_idx = 11;
+			break;
+		case DESC_RATEMCS12:
+			rate_idx = 12;
+			break;
+		case DESC_RATEMCS13:
+			rate_idx = 13;
+			break;
+		case DESC_RATEMCS14:
+			rate_idx = 14;
+			break;
+		case DESC_RATEMCS15:
+			rate_idx = 15;
+			break;
+		default:
+			rate_idx = 0;
+			break;
+		}
+	}
+	return rate_idx;
+}
+
+static void _rtl8821ae_query_rxphystatus(struct ieee80211_hw *hw,
+			struct rtl_stats *pstatus, u8 *pdesc,
+			struct rx_fwinfo_8821ae *p_drvinfo, bool bpacket_match_bssid,
+			bool bpacket_toself, bool b_packet_beacon)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+	struct phy_sts_cck_8821ae_t *cck_buf;
+	struct phy_status_rpt *p_phystRpt = (struct phy_status_rpt *)p_drvinfo;
+	struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+	char rx_pwr_all = 0, rx_pwr[4];
+	u8 rf_rx_num = 0, evm, pwdb_all;
+	u8 i, max_spatial_stream;
+	u32 rssi, total_rssi = 0;
+	bool b_is_cck = pstatus->b_is_cck;
+	u8 lan_idx,vga_idx;
+
+	/* Record it for next packet processing */
+	pstatus->b_packet_matchbssid = bpacket_match_bssid;
+	pstatus->b_packet_toself = bpacket_toself;
+	pstatus->b_packet_beacon = b_packet_beacon;
+	pstatus->rx_mimo_signalquality[0] = -1;
+	pstatus->rx_mimo_signalquality[1] = -1;
+
+	if (b_is_cck) {
+		u8 cck_highpwr;
+		u8 cck_agc_rpt;
+		/* CCK Driver info Structure is not the same as OFDM packet. */
+		cck_buf = (struct phy_sts_cck_8821ae_t *)p_drvinfo;
+		cck_agc_rpt = cck_buf->cck_agc_rpt;
+
+		/* (1)Hardware does not provide RSSI for CCK */
+		/* (2)PWDB, Average PWDB cacluated by
+		 * hardware (for rate adaptive) */
+		if (ppsc->rfpwr_state == ERFON)
+			cck_highpwr = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2,
+							 BIT(9));
+		else
+			cck_highpwr = false;
+
+		lan_idx = ((cck_agc_rpt & 0xE0) >> 5);
+		vga_idx = (cck_agc_rpt & 0x1f);
+		switch (lan_idx) {
+			case 7:
+				if(vga_idx <= 27)
+					rx_pwr_all = -100 + 2*(27-vga_idx); /*VGA_idx = 27~2*/
+				else
+					rx_pwr_all = -100;
+				break;
+			case 6:
+				rx_pwr_all = -48 + 2*(2-vga_idx); /*VGA_idx = 2~0*/
+				break;
+			case 5:
+				rx_pwr_all = -42 + 2*(7-vga_idx); /*VGA_idx = 7~5*/
+				break;
+			case 4:
+				rx_pwr_all = -36 + 2*(7-vga_idx); /*VGA_idx = 7~4*/
+				break;
+			case 3:
+				rx_pwr_all = -24 + 2*(7-vga_idx); /*VGA_idx = 7~0*/
+				break;
+			case 2:
+				if(cck_highpwr)
+					rx_pwr_all = -12 + 2*(5-vga_idx); /*VGA_idx = 5~0*/
+				else
+					rx_pwr_all = -6+ 2*(5-vga_idx);
+				break;
+			case 1:
+				rx_pwr_all = 8-2*vga_idx;
+				break;
+			case 0:
+				rx_pwr_all = 14-2*vga_idx;
+				break;
+			default:
+				break;
+		}
+		rx_pwr_all += 6;
+		pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+		/* CCK gain is smaller than OFDM/MCS gain,  */
+		/* so we add gain diff by experiences,
+		 * the val is 6 */
+		pwdb_all += 6;
+		if(pwdb_all > 100)
+			pwdb_all = 100;
+		/* modify the offset to make the same
+		 * gain index with OFDM. */
+		if(pwdb_all > 34 && pwdb_all <= 42)
+			pwdb_all -= 2;
+		else if(pwdb_all > 26 && pwdb_all <= 34)
+			pwdb_all -= 6;
+		else if(pwdb_all > 14 && pwdb_all <= 26)
+			pwdb_all -= 8;
+		else if(pwdb_all > 4 && pwdb_all <= 14)
+			pwdb_all -= 4;
+		if (cck_highpwr == false){
+			if (pwdb_all >= 80)
+				pwdb_all =((pwdb_all-80)<<1)+((pwdb_all-80)>>1)+80;
+			else if((pwdb_all <= 78) && (pwdb_all >= 20))
+				pwdb_all += 3;
+			if(pwdb_all>100)
+				pwdb_all = 100;
+		}
+
+		pstatus->rx_pwdb_all = pwdb_all;
+		pstatus->recvsignalpower = rx_pwr_all;
+
+		/* (3) Get Signal Quality (EVM) */
+		if (bpacket_match_bssid) {
+			u8 sq;
+
+			if (pstatus->rx_pwdb_all > 40)
+				sq = 100;
+			else {
+				sq = cck_buf->sq_rpt;
+				if (sq > 64)
+					sq = 0;
+				else if (sq < 20)
+					sq = 100;
+				else
+					sq = ((64 - sq) * 100) / 44;
+			}
+
+			pstatus->signalquality = sq;
+			pstatus->rx_mimo_signalquality[0] = sq;
+			pstatus->rx_mimo_signalquality[1] = -1;
+		}
+	} else {
+		rtlpriv->dm.brfpath_rxenable[0] =
+		    rtlpriv->dm.brfpath_rxenable[1] = true;
+
+		/* (1)Get RSSI for HT rate */
+		for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
+
+			/* we will judge RF RX path now. */
+			if (rtlpriv->dm.brfpath_rxenable[i])
+				rf_rx_num++;
+
+			rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
+
+			/* Translate DBM to percentage. */
+			rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
+			total_rssi += rssi;
+
+			/* Get Rx snr value in DB */
+			rtlpriv->stats.rx_snr_db[i] = (long)(p_drvinfo->rxsnr[i] / 2);
+
+			/* Record Signal Strength for next packet */
+			if (bpacket_match_bssid)
+				pstatus->rx_mimo_signalstrength[i] = (u8) rssi;
+		}
+
+		/* (2)PWDB, Average PWDB cacluated by
+		 * hardware (for rate adaptive) */
+		rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+
+		pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+		pstatus->rx_pwdb_all = pwdb_all;
+		pstatus->rxpower = rx_pwr_all;
+		pstatus->recvsignalpower = rx_pwr_all;
+
+		/* (3)EVM of HT rate */
+		if (pstatus->b_is_ht && pstatus->rate >= DESC_RATEMCS8 &&
+		    pstatus->rate <= DESC_RATEMCS15)
+			max_spatial_stream = 2;
+		else
+			max_spatial_stream = 1;
+
+		for (i = 0; i < max_spatial_stream; i++) {
+			evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+
+			if (bpacket_match_bssid) {
+				/* Fill value in RFD, Get the first
+				 * spatial stream only */
+				if (i == 0)
+					pstatus->signalquality = (u8) (evm & 0xff);
+				pstatus->rx_mimo_signalquality[i] = (u8) (evm & 0xff);
+			}
+		}
+	}
+
+	/* UI BSS List signal strength(in percentage),
+	 * make it good looking, from 0~100. */
+	if (b_is_cck)
+		pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+			pwdb_all));
+	else if (rf_rx_num != 0)
+		pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+			total_rssi /= rf_rx_num));
+	/*HW antenna diversity*/
+	rtldm->fat_table.antsel_rx_keep_0 = p_phystRpt->ant_sel;
+	rtldm->fat_table.antsel_rx_keep_1 = p_phystRpt->ant_sel_b;
+	rtldm->fat_table.antsel_rx_keep_2 = p_phystRpt->antsel_rx_keep_2;
+
+}
+#if 0
+static void _rtl8821ae_smart_antenna(struct ieee80211_hw *hw,
+	struct rtl_stats *pstatus)
+{
+	struct rtl_dm *rtldm= rtl_dm(rtl_priv(hw));
+	struct rtl_efuse *rtlefuse =rtl_efuse(rtl_priv(hw));
+	u8 antsel_tr_mux;
+	struct fast_ant_trainning *pfat_table = &(rtldm->fat_table);
+
+	if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV) {
+		if (pfat_table->fat_state == FAT_TRAINING_STATE) {
+			if (pstatus->b_packet_toself) {
+				antsel_tr_mux = (pfat_table->antsel_rx_keep_2 << 2) |
+					(pfat_table->antsel_rx_keep_1 << 1) | pfat_table->antsel_rx_keep_0;
+				pfat_table->ant_sum_rssi[antsel_tr_mux] += pstatus->rx_pwdb_all;
+				pfat_table->ant_rssi_cnt[antsel_tr_mux]++;
+			}
+		}
+	} else if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) ||
+	(rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)) {
+		if (pstatus->b_packet_toself || pstatus->b_packet_matchbssid) {
+			antsel_tr_mux = (pfat_table->antsel_rx_keep_2 << 2) |
+					(pfat_table->antsel_rx_keep_1 << 1) | pfat_table->antsel_rx_keep_0;
+			rtl8821ae_dm_ant_sel_statistics(hw, antsel_tr_mux, 0, pstatus->rx_pwdb_all);
+		}
+
+	}
+}
+#endif
+static void _rtl8821ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+		struct sk_buff *skb, struct rtl_stats *pstatus,
+		u8 *pdesc, struct rx_fwinfo_8821ae *p_drvinfo)
+{
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+	struct ieee80211_hdr *hdr;
+	u8 *tmp_buf;
+	u8 *praddr;
+	u8 *psaddr;
+	u16 fc, type;
+	bool b_packet_matchbssid, b_packet_toself, b_packet_beacon;
+
+	tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
+
+	hdr = (struct ieee80211_hdr *)tmp_buf;
+	fc = le16_to_cpu(hdr->frame_control);
+	type = WLAN_FC_GET_TYPE(fc);
+	praddr = hdr->addr1;
+	psaddr = ieee80211_get_SA(hdr);
+	memcpy(pstatus->psaddr, psaddr, ETH_ALEN);
+
+	b_packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
+	     (!ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ?
+				  hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ?
+				  hdr->addr2 : hdr->addr3)) && (!pstatus->b_hwerror) &&
+				  (!pstatus->b_crc) && (!pstatus->b_icv));
+
+	b_packet_toself = b_packet_matchbssid &&
+	    (!ether_addr_equal(praddr, rtlefuse->dev_addr));
+
+	if (ieee80211_is_beacon(fc))
+		b_packet_beacon = true;
+	else
+		b_packet_beacon = false;
+
+	if (b_packet_beacon && b_packet_matchbssid)
+		rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++;
+
+	_rtl8821ae_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo,
+				   b_packet_matchbssid, b_packet_toself,
+				   b_packet_beacon);
+	/*_rtl8821ae_smart_antenna(hw, pstatus); */
+	rtl_process_phyinfo(hw, tmp_buf, pstatus);
+}
+
+static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
+				      u8 *virtualaddress)
+{
+	u32 dwtmp = 0;
+	memset(virtualaddress, 0, 8);
+
+	SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
+	if (ptcb_desc->empkt_num == 1)
+		dwtmp = ptcb_desc->empkt_len[0];
+	else {
+		dwtmp = ptcb_desc->empkt_len[0];
+		dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+		dwtmp += ptcb_desc->empkt_len[1];
+	}
+	SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
+
+	if (ptcb_desc->empkt_num <= 3)
+		dwtmp = ptcb_desc->empkt_len[2];
+	else {
+		dwtmp = ptcb_desc->empkt_len[2];
+		dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+		dwtmp += ptcb_desc->empkt_len[3];
+	}
+	SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
+	if (ptcb_desc->empkt_num <= 5)
+		dwtmp = ptcb_desc->empkt_len[4];
+	else {
+		dwtmp = ptcb_desc->empkt_len[4];
+		dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+		dwtmp += ptcb_desc->empkt_len[5];
+	}
+	SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
+	SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
+	if (ptcb_desc->empkt_num <= 7)
+		dwtmp = ptcb_desc->empkt_len[6];
+	else {
+		dwtmp = ptcb_desc->empkt_len[6];
+		dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+		dwtmp += ptcb_desc->empkt_len[7];
+	}
+	SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
+	if (ptcb_desc->empkt_num <= 9)
+		dwtmp = ptcb_desc->empkt_len[8];
+	else {
+		dwtmp = ptcb_desc->empkt_len[8];
+		dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+		dwtmp += ptcb_desc->empkt_len[9];
+	}
+	SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
+}
+
+bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
+			   struct rtl_stats *status,
+			   struct ieee80211_rx_status *rx_status,
+			   u8 *pdesc, struct sk_buff *skb)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rx_fwinfo_8821ae *p_drvinfo;
+	struct ieee80211_hdr *hdr;
+
+	u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+
+	status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+	status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+	    RX_DRV_INFO_SIZE_UNIT;
+	status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+	status->b_icv = (u16) GET_RX_DESC_ICV(pdesc);
+	status->b_crc = (u16) GET_RX_DESC_CRC32(pdesc);
+	status->b_hwerror = (status->b_crc | status->b_icv);
+	status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+	status->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
+	status->b_shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+	status->b_isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+	status->b_isfirst_ampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+	status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+	status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+	status->macid = GET_RX_DESC_MACID(pdesc);
+	status->b_is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+
+	status->b_is_cck = RX_HAL_IS_CCK_RATE(status->rate);
+
+	if (GET_RX_STATUS_DESC_RPT_SEL(pdesc))
+		status->packet_report_type = C2H_PACKET;
+	else
+		status->packet_report_type = NORMAL_RX;
+
+	if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+		status->wake_match = BIT(2);
+	else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+		status->wake_match = BIT(1);
+	else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
+		status->wake_match = BIT(0);
+	else
+		status->wake_match = 0;
+
+	if (status->wake_match)
+		RT_TRACE(COMP_RXDESC,DBG_LOUD,
+		("GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",status->wake_match ));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+	rx_status->freq = hw->conf.chandef.chan->center_freq;
+	rx_status->band = hw->conf.chandef.chan->band;
+#else
+	rx_status->freq = hw->conf.channel->center_freq;
+	rx_status->band = hw->conf.channel->band;
+#endif
+
+	hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size
+			+ status->rx_bufshift);
+
+	if (status->b_crc)
+		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+	if (status->rx_is40Mhzpacket)
+		rx_status->flag |= RX_FLAG_40MHZ;
+
+	if (status->b_is_ht)
+		rx_status->flag |= RX_FLAG_HT;
+
+	rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+
+	/* hw will set status->decrypted true, if it finds the
+	 * frame is open data frame or mgmt frame. */
+	/* So hw will not decryption robust managment frame
+	 * for IEEE80211w but still set status->decrypted
+	 * true, so here we should set it back to undecrypted
+	 * for IEEE80211w frame, and mac80211 sw will help
+	 * to decrypt it */
+	if (status->decrypted) {
+		if (!hdr) {
+			WARN_ON_ONCE(true);
+			pr_err("decrypted is true but hdr NULL, from skb %p\n",
+				rtl_get_hdr(skb));
+				return false;
+		}
+
+		if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+			(ieee80211_has_protected(hdr->frame_control)))
+			rx_status->flag &= ~RX_FLAG_DECRYPTED;
+		else
+			rx_status->flag |= RX_FLAG_DECRYPTED;
+	}
+
+	/* rate_idx: index of data rate into band's
+	 * supported rates or MCS index if HT rates
+	 * are use (RX_FLAG_HT)*/
+	/* Notice: this is diff with windows define */
+	rx_status->rate_idx = _rtl8821ae_rate_mapping(hw,
+				status->b_is_ht, status->rate);
+
+	rx_status->mactime = status->timestamp_low;
+	if (phystatus == true) {
+		p_drvinfo = (struct rx_fwinfo_8821ae *)(skb->data +
+						     status->rx_bufshift);
+
+		_rtl8821ae_translate_rx_signal_stuff(hw,
+						   skb, status, pdesc,
+						   p_drvinfo);
+	}
+
+	/*rx_status->qual = status->signal; */
+	rx_status->signal = status->recvsignalpower + 10;
+	/*rx_status->noise = -status->noise; */
+	if (status->packet_report_type == TX_REPORT2){
+		status->macid_valid_entry[0] = GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
+		status->macid_valid_entry[1] = GET_RX_RPT2_DESC_MACID_VALID_2(pdesc);
+	}
+	return true;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+			  struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+			  struct ieee80211_tx_info *info, struct sk_buff *skb,
+			  u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
+#else
+/*<delete in kernel end>*/
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+			  struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_sta *sta,
+			  struct sk_buff *skb,
+			  u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+	struct ieee80211_sta *sta = info->control.sta;
+#endif
+/*<delete in kernel end>*/
+	u8 *pdesc = (u8 *) pdesc_tx;
+	u16 seq_number;
+	u16 fc = le16_to_cpu(hdr->frame_control);
+	unsigned int buf_len = 0;
+	unsigned int skb_len = skb->len;
+	u8 fw_qsel = _rtl8821ae_map_hwqueue_to_fwqueue(skb, hw_queue);
+	bool b_firstseg = ((hdr->seq_ctrl &
+			    cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+	bool b_lastseg = ((hdr->frame_control &
+			   cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+	dma_addr_t mapping;
+	u8 bw_40 = 0;
+	u8 short_gi = 0;
+
+	if (mac->opmode == NL80211_IFTYPE_STATION) {
+		bw_40 = mac->bw_40;
+	} else if (mac->opmode == NL80211_IFTYPE_AP ||
+		mac->opmode == NL80211_IFTYPE_ADHOC) {
+		if (sta)
+			bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+	}
+	seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+	rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
+	/* reserve 8 byte for AMPDU early mode */
+	if (rtlhal->b_earlymode_enable) {
+		skb_push(skb, EM_HDR_LEN);
+		memset(skb->data, 0, EM_HDR_LEN);
+	}
+	buf_len = skb->len;
+	mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
+				 PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+		RT_TRACE(COMP_SEND, DBG_TRACE,
+			 ("DMA mapping error"));
+		return;
+	}
+	CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8821ae));
+	if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
+		b_firstseg = true;
+		b_lastseg = true;
+	}
+	if (b_firstseg) {
+		if (rtlhal->b_earlymode_enable) {
+			SET_TX_DESC_PKT_OFFSET(pdesc, 1);
+			SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN + EM_HDR_LEN);
+			if (ptcb_desc->empkt_num) {
+				RT_TRACE(COMP_SEND, DBG_TRACE,
+					 ("Insert 8 byte.pTcb->EMPktNum:%d\n",
+					  ptcb_desc->empkt_num));
+				_rtl8821ae_insert_emcontent(ptcb_desc, (u8 *)(skb->data));
+			}
+		} else {
+			SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+		}
+
+		/* ptcb_desc->use_driver_rate = true; */
+		SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+		if (ptcb_desc->hw_rate > DESC_RATEMCS0) {
+			short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
+		} else {
+			short_gi = (ptcb_desc->use_shortpreamble) ? 1 :0;
+		}
+		SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi);
+
+		if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+			SET_TX_DESC_AGG_ENABLE(pdesc, 1);
+			SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+		}
+		SET_TX_DESC_SEQ(pdesc, seq_number);
+		SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->b_rts_enable &&
+						!ptcb_desc->b_cts_enable) ? 1 : 0));
+		SET_TX_DESC_HW_RTS_ENABLE(pdesc,0);
+		SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->b_cts_enable) ? 1 : 0));
+	/*	SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->b_rts_stbc) ? 1 : 0));*/
+
+		SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
+	/*	SET_TX_DESC_RTS_BW(pdesc, 0);*/
+		SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
+		SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <= DESC_RATE54M) ?
+			(ptcb_desc->b_rts_use_shortpreamble ? 1 : 0) :
+			(ptcb_desc->b_rts_use_shortgi ? 1 : 0)));
+
+		if(ptcb_desc->btx_enable_sw_calc_duration)
+			SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
+
+		if (bw_40) {
+			if (ptcb_desc->b_packet_bw) {
+				SET_TX_DESC_DATA_BW(pdesc, 1);
+				SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+			} else {
+				SET_TX_DESC_DATA_BW(pdesc, 0);
+				SET_TX_DESC_TX_SUB_CARRIER(pdesc, mac->cur_40_prime_sc);
+			}
+		} else {
+			SET_TX_DESC_DATA_BW(pdesc, 0);
+			SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+		}
+
+		SET_TX_DESC_LINIP(pdesc, 0);
+		SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
+		if (sta) {
+			u8 ampdu_density = sta->ht_cap.ampdu_density;
+			SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+		}
+		if (info->control.hw_key) {
+			struct ieee80211_key_conf *keyconf = info->control.hw_key;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+			switch (keyconf->cipher) {
+			case WLAN_CIPHER_SUITE_WEP40:
+			case WLAN_CIPHER_SUITE_WEP104:
+			case WLAN_CIPHER_SUITE_TKIP:
+				SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+				break;
+			case WLAN_CIPHER_SUITE_CCMP:
+				SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+				break;
+			default:
+				SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+				break;
+
+			}
+/*<delete in kernel start>*/
+#else
+			switch (keyconf->alg) {
+			case ALG_WEP:
+			case ALG_TKIP:
+				SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+				break;
+			case ALG_CCMP:
+				SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+				break;
+			default:
+				SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+				break;
+
+			}
+#endif
+/*<delete in kernel end>*/
+		}
+
+		SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+		SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+		SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
+		SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ? 1 : 0);
+		SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+
+#if 0
+		SET_TX_DESC_USE_RATE(pdesc, 1);
+		SET_TX_DESC_TX_RATE(pdesc, 0x04);
+
+		SET_TX_DESC_RETRY_LIMIT_ENABLE(pdesc, 1);
+		SET_TX_DESC_DATA_RETRY_LIMIT(pdesc, 0x3f);
+#endif
+
+		/*SET_TX_DESC_PWR_STATUS(pdesc, pwr_status);*/
+		/* Set TxRate and RTSRate in TxDesc  */
+		/* This prevent Tx initial rate of new-coming packets */
+		/* from being overwritten by retried  packet rate.*/
+		if (!ptcb_desc->use_driver_rate) {
+			/*SET_TX_DESC_RTS_RATE(pdesc, 0x08); */
+			/* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */
+		}
+		if (ieee80211_is_data_qos(fc)) {
+			if (mac->rdg_en) {
+				RT_TRACE(COMP_SEND, DBG_TRACE,
+					("Enable RDG function.\n"));
+				SET_TX_DESC_RDG_ENABLE(pdesc, 1);
+				SET_TX_DESC_HTC(pdesc, 1);
+			}
+		}
+	}
+
+	SET_TX_DESC_FIRST_SEG(pdesc, (b_firstseg ? 1 : 0));
+	SET_TX_DESC_LAST_SEG(pdesc, (b_lastseg ? 1 : 0));
+	SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
+	SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+	//if (rtlpriv->dm.b_useramask) {
+	if(1){
+		SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
+		SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+	} else {
+		SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
+		SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+	}
+/*	if (ieee80211_is_data_qos(fc))
+		SET_TX_DESC_QOS(pdesc, 1);
+*/
+	if (!ieee80211_is_data_qos(fc))  {
+		SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+		SET_TX_DESC_HWSEQ_SEL(pdesc, 0);
+	}
+	SET_TX_DESC_MORE_FRAG(pdesc, (b_lastseg ? 0 : 1));
+	if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+	    is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
+		SET_TX_DESC_BMC(pdesc, 1);
+	}
+
+	rtl8821ae_dm_set_tx_ant_by_tx_info(hw,pdesc,ptcb_desc->mac_id);
+	RT_TRACE(COMP_SEND, DBG_TRACE, ("\n"));
+}
+
+void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
+			     u8 *pdesc, bool b_firstseg,
+			     bool b_lastseg, struct sk_buff *skb)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	u8 fw_queue = QSLT_BEACON;
+
+	dma_addr_t mapping = pci_map_single(rtlpci->pdev,
+					    skb->data, skb->len,
+					    PCI_DMA_TODEVICE);
+
+	if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+		RT_TRACE(COMP_SEND, DBG_TRACE,
+			 ("DMA mapping error"));
+		return;
+	}
+	CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+
+	SET_TX_DESC_FIRST_SEG(pdesc, 1);
+	SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+	SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+
+	SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+
+	SET_TX_DESC_USE_RATE(pdesc, 1);
+	SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
+	SET_TX_DESC_DISABLE_FB(pdesc, 1);
+
+	SET_TX_DESC_DATA_BW(pdesc, 0);
+
+	SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+
+	SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+/*
+	if(IsCtrlNDPA(VirtualAddress) || IsMgntNDPA(VirtualAddress))
+	{
+		SET_TX_DESC_DATA_RETRY_LIMIT_8812(pDesc, 5);
+		SET_TX_DESC_RETRY_LIMIT_ENABLE_8812(pDesc, 1);
+
+		if(IsMgntNDPA(VirtualAddress))
+		{
+			SET_TX_DESC_NDPA_8812(pDesc, 1);
+			SET_TX_DESC_RTS_SC_8812(pDesc, SCMapping_8812(Adapter, pTcb));
+		}
+		else
+		{
+			SET_TX_DESC_NDPA_8812(pDesc, 2);
+			SET_TX_DESC_RTS_SC_8812(pDesc, SCMapping_8812(Adapter, pTcb));
+		}
+	}*/
+
+	SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
+
+	SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+
+	SET_TX_DESC_MACID(pdesc, 0);
+
+	SET_TX_DESC_OWN(pdesc, 1);
+
+	RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+		      "H2C Tx Cmd Content\n",
+		      pdesc, TX_DESC_SIZE);
+}
+
+void rtl8821ae_set_desc(struct ieee80211_hw * hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+{
+	if (istx == true) {
+		switch (desc_name) {
+		case HW_DESC_OWN:
+			SET_TX_DESC_OWN(pdesc, 1);
+			break;
+		case HW_DESC_TX_NEXTDESC_ADDR:
+			SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+			break;
+		default:
+			RT_ASSERT(false, ("ERR txdesc :%d"
+					  " not process\n", desc_name));
+			break;
+		}
+	} else {
+		switch (desc_name) {
+		case HW_DESC_RXOWN:
+			SET_RX_DESC_OWN(pdesc, 1);
+			break;
+		case HW_DESC_RXBUFF_ADDR:
+			SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+			break;
+		case HW_DESC_RXPKT_LEN:
+			SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
+			break;
+		case HW_DESC_RXERO:
+			SET_RX_DESC_EOR(pdesc, 1);
+			break;
+		default:
+			RT_ASSERT(false, ("ERR rxdesc :%d "
+					  "not process\n", desc_name));
+			break;
+		}
+	}
+}
+
+u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+{
+	u32 ret = 0;
+
+	if (istx == true) {
+		switch (desc_name) {
+		case HW_DESC_OWN:
+			ret = GET_TX_DESC_OWN(pdesc);
+			break;
+		case HW_DESC_TXBUFF_ADDR:
+			ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
+			break;
+		default:
+			RT_ASSERT(false, ("ERR txdesc :%d "
+					  "not process\n", desc_name));
+			break;
+		}
+	} else {
+		switch (desc_name) {
+		case HW_DESC_OWN:
+			ret = GET_RX_DESC_OWN(pdesc);
+			break;
+		case HW_DESC_RXPKT_LEN:
+			ret = GET_RX_DESC_PKT_LEN(pdesc);
+			break;
+		default:
+			RT_ASSERT(false, ("ERR rxdesc :%d "
+					  "not process\n", desc_name));
+			break;
+		}
+	}
+	return ret;
+}
+
+bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw,
+				 u8 hw_queue, u16 index)
+{
+	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+	u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+	u8 own = (u8) rtl8821ae_get_desc(entry, true, HW_DESC_OWN);
+
+	/*
+	 *beacon packet will only use the first
+	 *descriptor defautly,and the own may not
+	 *be cleared by the hardware
+	 */
+	if (own)
+		return false;
+	else
+		return true;
+}
+
+
+void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (hw_queue == BEACON_QUEUE) {
+		rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
+	} else {
+		rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
+			       BIT(0) << (hw_queue));
+	}
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/trx.h b/drivers/staging/rtl8821ae/rtl8821ae/trx.h
new file mode 100644
index 0000000..da93e5c
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/trx.h
@@ -0,0 +1,641 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_TRX_H__
+#define __RTL8821AE_TRX_H__
+
+#define TX_DESC_SIZE					40
+#define TX_DESC_AGGR_SUBFRAME_SIZE		32
+
+#define RX_DESC_SIZE					32
+#define RX_DRV_INFO_SIZE_UNIT			8
+
+#define	TX_DESC_NEXT_DESC_OFFSET		40
+#define USB_HWDESC_HEADER_LEN			40
+#define CRCLENGTH						4
+
+#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
+#define SET_TX_DESC_OFFSET(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
+#define SET_TX_DESC_BMC(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
+#define SET_TX_DESC_HTC(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
+#define SET_TX_DESC_LAST_SEG(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
+#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
+#define SET_TX_DESC_LINIP(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
+#define SET_TX_DESC_NO_ACM(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
+#define SET_TX_DESC_GF(__pdesc, __val) 				\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_TX_DESC_OWN(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_TX_DESC_PKT_SIZE(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 0, 16)
+#define GET_TX_DESC_OFFSET(__pdesc)					\
+	LE_BITS_TO_4BYTE(__pdesc, 16, 8)
+#define GET_TX_DESC_BMC(__pdesc)					\
+	LE_BITS_TO_4BYTE(__pdesc, 24, 1)
+#define GET_TX_DESC_HTC(__pdesc)					\
+	LE_BITS_TO_4BYTE(__pdesc, 25, 1)
+#define GET_TX_DESC_LAST_SEG(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_TX_DESC_FIRST_SEG(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_TX_DESC_LINIP(__pdesc) 					\
+	LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_TX_DESC_NO_ACM(__pdesc)					\
+	LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_TX_DESC_GF(__pdesc)						\
+	LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_TX_DESC_OWN(__pdesc)					\
+	LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_TX_DESC_MACID(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val)
+#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
+#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
+#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
+#define SET_TX_DESC_PIFS(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
+#define SET_TX_DESC_RATE_ID(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val)
+#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
+#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
+#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val)
+
+
+#define SET_TX_DESC_PAID(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val)
+#define SET_TX_DESC_CCA_RTS(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val)
+#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val)
+#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val)	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
+#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
+#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
+#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
+#define SET_TX_DESC_RAW(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
+#define SET_TX_DESC_SPE_RPT(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
+#define SET_TX_DESC_BT_INT(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val)
+#define SET_TX_DESC_GID(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val)
+
+
+#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val)
+#define SET_TX_DESC_CHK_EN(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val)
+#define SET_TX_DESC_EARLY_MODE(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val)
+#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val)
+#define SET_TX_DESC_USE_RATE(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val)
+#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val)
+#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val)
+#define SET_TX_DESC_CTS2SELF(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val)
+#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val)
+#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val)
+#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val)
+#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val)
+#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val)
+#define SET_TX_DESC_NDPA(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val)
+#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val)
+#define SET_TX_DESC_TX_ANT(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 4, __val)
+
+#define SET_TX_DESC_TX_RATE(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) 			\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val)
+#define SET_TX_DESC_RTS_RATE(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val)
+
+
+#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) 	\
+	SET_BITS_TO_LE_1BYTE(__pdesc+20, 6, 1, __val)
+#define SET_TX_DESC_DATA_BW(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val)
+#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_STBC(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val)
+#define SET_TX_DESC_CTROL_STBC(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val)
+#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val)
+#define SET_TX_DESC_RTS_SC(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+
+
+#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
+
+#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) 		\
+	LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
+
+#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val)
+
+#define SET_TX_DESC_SEQ(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val)
+
+#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
+
+#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) 		\
+	LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
+
+
+#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val)
+
+#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) 		\
+	LE_BITS_TO_4BYTE(__pdesc+48, 0, 32)
+
+#define GET_RX_DESC_PKT_LEN(__pdesc) 			\
+	LE_BITS_TO_4BYTE(__pdesc, 0, 14)
+#define GET_RX_DESC_CRC32(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 14, 1)
+#define GET_RX_DESC_ICV(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 15, 1)
+#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) 		\
+	LE_BITS_TO_4BYTE(__pdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__pdesc) 			\
+	LE_BITS_TO_4BYTE(__pdesc, 20, 3)
+#define GET_RX_DESC_QOS(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 24, 2)
+#define GET_RX_DESC_PHYST(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_RX_DESC_LS(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_RX_DESC_FS(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_RX_DESC_EOR(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_RX_DESC_OWN(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_RX_DESC_PKT_LEN(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
+#define SET_RX_DESC_EOR(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_RX_DESC_OWN(__pdesc, __val) 		\
+	SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_RX_DESC_MACID(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 0, 7)
+#define GET_RX_DESC_TID(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 8, 4)
+#define GET_RX_DESC_AMSDU(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
+#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+#define GET_RX_DESC_PAGGR(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__pdesc) 			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+#define GET_RX_DESC_CHKERR(__pdesc) 			\
+	LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
+#define GET_RX_DESC_IPVER(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
+#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 22, 1)
+#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 23, 1)
+#define GET_RX_DESC_PAM(__pdesc)				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
+#define GET_RX_DESC_MD(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
+#define GET_RX_DESC_MF(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
+
+
+#define GET_RX_DESC_SEQ(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
+#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+8, 18, 6)
+#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
+
+
+#define GET_RX_DESC_RXMCS(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
+#define GET_RX_DESC_RXHT(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
+#define GET_RX_STATUS_DESC_RX_GF(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+12, 7, 1)
+#define GET_RX_DESC_HTC(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
+#define GET_RX_STATUS_DESC_EOSP(__pdesc)		\
+	LE_BITS_TO_4BYTE( __pdesc+12, 11, 1)
+#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc)		\
+	LE_BITS_TO_4BYTE( __pdesc+12, 12, 2)
+
+#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc)	\
+	LE_BITS_TO_4BYTE( __pdesc+12, 29, 1)
+#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc)	\
+	LE_BITS_TO_4BYTE( __pdesc+12, 30, 1)
+#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc)	\
+	LE_BITS_TO_4BYTE( __pdesc+12, 31, 1)
+
+#define GET_RX_DESC_SPLCP(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+16, 0, 1)
+#define GET_RX_STATUS_DESC_LDPC(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+16, 1, 1)
+#define GET_RX_STATUS_DESC_STBC(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+16, 2, 1)
+#define GET_RX_DESC_BW(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+16, 4, 2)
+
+#define GET_RX_DESC_TSFL(__pdesc) 				\
+	LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
+
+#define GET_RX_DESC_BUFF_ADDR(__pdesc) 			\
+	LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
+#define GET_RX_DESC_BUFF_ADDR64(__pdesc) 		\
+	LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
+
+#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) 	\
+	SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
+#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
+	SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+
+
+/* TX report 2 format in Rx desc*/
+
+#define GET_RX_RPT2_DESC_PKT_LEN(__pRxStatusDesc)	\
+	LE_BITS_TO_4BYTE( __pRxStatusDesc, 0, 9)
+#define GET_RX_RPT2_DESC_MACID_VALID_1(__pRxStatusDesc)	\
+	LE_BITS_TO_4BYTE( __pRxStatusDesc+16, 0, 32)
+#define GET_RX_RPT2_DESC_MACID_VALID_2(__pRxStatusDesc)	\
+	LE_BITS_TO_4BYTE( __pRxStatusDesc+20, 0, 32)
+
+#define SET_EARLYMODE_PKTNUM(__paddr, __value) 	\
+	SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value)
+#define SET_EARLYMODE_LEN0(__paddr, __value) 	\
+	SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value)
+#define SET_EARLYMODE_LEN1(__paddr, __value) 	\
+	SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value)
+#define SET_EARLYMODE_LEN2_1(__paddr, __value) 	\
+	SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value)
+#define SET_EARLYMODE_LEN2_2(__paddr, __value) 	\
+	SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value)
+#define SET_EARLYMODE_LEN3(__paddr, __value) 	\
+	SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value)
+#define SET_EARLYMODE_LEN4(__paddr, __value) 	\
+	SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value)
+
+#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size)		\
+do {								\
+	if(_size > TX_DESC_NEXT_DESC_OFFSET)			\
+		memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET);	\
+	else							\
+		memset(__pdesc, 0, _size);			\
+} while (0);
+
+#define RX_HAL_IS_CCK_RATE(rxmcs)\
+	(rxmcs == DESC_RATE1M ||\
+	 rxmcs == DESC_RATE2M ||\
+	 rxmcs == DESC_RATE5_5M ||\
+	 rxmcs == DESC_RATE11M)
+
+#define IS_LITTLE_ENDIAN	1
+
+struct phy_rx_agc_info_t {
+	#if IS_LITTLE_ENDIAN
+		u8	gain:7,trsw:1;
+	#else
+		u8	trsw:1,gain:7;
+	#endif
+};
+struct phy_status_rpt{
+	struct phy_rx_agc_info_t path_agc[2];
+	u8	ch_corr[2];
+	u8	cck_sig_qual_ofdm_pwdb_all;
+	u8	cck_agc_rpt_ofdm_cfosho_a;
+	u8	cck_rpt_b_ofdm_cfosho_b;
+	u8	rsvd_1;//ch_corr_msb;
+	u8	noise_power_db_msb;
+	u8	path_cfotail[2];
+	u8	pcts_mask[2];
+	u8	stream_rxevm[2];
+	u8	path_rxsnr[2];
+	u8 	noise_power_db_lsb;
+	u8	rsvd_2[3];
+	u8 	stream_csi[2];
+	u8 	stream_target_csi[2];
+	u8 	sig_evm;
+	u8 	rsvd_3;
+#if IS_LITTLE_ENDIAN
+	u8 	antsel_rx_keep_2:1;	/*ex_intf_flg:1;*/
+	u8 	sgi_en:1;
+	u8 	rxsc:2;
+	u8 	idle_long:1;
+	u8 	r_ant_train_en:1;
+	u8 	ant_sel_b:1;
+	u8 	ant_sel:1;
+#else	/* _BIG_ENDIAN_	*/
+	u8 	ant_sel:1;
+	u8 	ant_sel_b:1;
+	u8 	r_ant_train_en:1;
+	u8 	idle_long:1;
+	u8 	rxsc:2;
+	u8 	sgi_en:1;
+	u8 	antsel_rx_keep_2:1;	/*ex_intf_flg:1;*/
+#endif
+}__packed;
+
+struct rx_fwinfo_8821ae {
+	u8 gain_trsw[4];
+	u8 pwdb_all;
+	u8 cfosho[4];
+	u8 cfotail[4];
+	char rxevm[2];
+	char rxsnr[4];
+	u8 pdsnr[2];
+	u8 csi_current[2];
+	u8 csi_target[2];
+	u8 sigevm;
+	u8 max_ex_pwr;
+	u8 ex_intf_flag:1;
+	u8 sgi_en:1;
+	u8 rxsc:2;
+	u8 reserve:4;
+} __packed;
+
+struct tx_desc_8821ae {
+	u32 pktsize:16;
+	u32 offset:8;
+	u32 bmc:1;
+	u32 htc:1;
+	u32 lastseg:1;
+	u32 firstseg:1;
+	u32 linip:1;
+	u32 noacm:1;
+	u32 gf:1;
+	u32 own:1;
+
+	u32 macid:6;
+	u32 rsvd0:2;
+	u32 queuesel:5;
+	u32 rd_nav_ext:1;
+	u32 lsig_txop_en:1;
+	u32 pifs:1;
+	u32 rateid:4;
+	u32 nav_usehdr:1;
+	u32 en_descid:1;
+	u32 sectype:2;
+	u32 pktoffset:8;
+
+	u32 rts_rc:6;
+	u32 data_rc:6;
+	u32 agg_en:1;
+	u32 rdg_en:1;
+	u32 bar_retryht:2;
+	u32 agg_break:1;
+	u32 morefrag:1;
+	u32 raw:1;
+	u32 ccx:1;
+	u32 ampdudensity:3;
+	u32 bt_int:1;
+	u32 ant_sela:1;
+	u32 ant_selb:1;
+	u32 txant_cck:2;
+	u32 txant_l:2;
+	u32 txant_ht:2;
+
+	u32 nextheadpage:8;
+	u32 tailpage:8;
+	u32 seq:12;
+	u32 cpu_handle:1;
+	u32 tag1:1;
+	u32 trigger_int:1;
+	u32 hwseq_en:1;
+
+	u32 rtsrate:5;
+	u32 apdcfe:1;
+	u32 qos:1;
+	u32 hwseq_ssn:1;
+	u32 userrate:1;
+	u32 dis_rtsfb:1;
+	u32 dis_datafb:1;
+	u32 cts2self:1;
+	u32 rts_en:1;
+	u32 hwrts_en:1;
+	u32 portid:1;
+	u32 pwr_status:3;
+	u32 waitdcts:1;
+	u32 cts2ap_en:1;
+	u32 txsc:2;
+	u32 stbc:2;
+	u32 txshort:1;
+	u32 txbw:1;
+	u32 rtsshort:1;
+	u32 rtsbw:1;
+	u32 rtssc:2;
+	u32 rtsstbc:2;
+
+	u32 txrate:6;
+	u32 shortgi:1;
+	u32 ccxt:1;
+	u32 txrate_fb_lmt:5;
+	u32 rtsrate_fb_lmt:4;
+	u32 retrylmt_en:1;
+	u32 txretrylmt:6;
+	u32 usb_txaggnum:8;
+
+	u32 txagca:5;
+	u32 txagcb:5;
+	u32 usemaxlen:1;
+	u32 maxaggnum:5;
+	u32 mcsg1maxlen:4;
+	u32 mcsg2maxlen:4;
+	u32 mcsg3maxlen:4;
+	u32 mcs7sgimaxlen:4;
+
+	u32 txbuffersize:16;
+	u32 sw_offset30:8;
+	u32 sw_offset31:4;
+	u32 rsvd1:1;
+	u32 antsel_c:1;
+	u32 null_0:1;
+	u32 null_1:1;
+
+	u32 txbuffaddr;
+	u32 txbufferaddr64;
+	u32 nextdescaddress;
+	u32 nextdescaddress64;
+
+	u32 reserve_pass_pcie_mm_limit[4];
+} __packed;
+
+struct rx_desc_8821ae {
+	u32 length:14;
+	u32 crc32:1;
+	u32 icverror:1;
+	u32 drv_infosize:4;
+	u32 security:3;
+	u32 qos:1;
+	u32 shift:2;
+	u32 phystatus:1;
+	u32 swdec:1;
+	u32 lastseg:1;
+	u32 firstseg:1;
+	u32 eor:1;
+	u32 own:1;
+
+	u32 macid:6;
+	u32 tid:4;
+	u32 hwrsvd:5;
+	u32 paggr:1;
+	u32 faggr:1;
+	u32 a1_fit:4;
+	u32 a2_fit:4;
+	u32 pam:1;
+	u32 pwr:1;
+	u32 moredata:1;
+	u32 morefrag:1;
+	u32 type:2;
+	u32 mc:1;
+	u32 bc:1;
+
+	u32 seq:12;
+	u32 frag:4;
+	u32 nextpktlen:14;
+	u32 nextind:1;
+	u32 rsvd:1;
+
+	u32 rxmcs:6;
+	u32 rxht:1;
+	u32 amsdu:1;
+	u32 splcp:1;
+	u32 bandwidth:1;
+	u32 htc:1;
+	u32 tcpchk_rpt:1;
+	u32 ipcchk_rpt:1;
+	u32 tcpchk_valid:1;
+	u32 hwpcerr:1;
+	u32 hwpcind:1;
+	u32 iv0:16;
+
+	u32 iv1;
+
+	u32 tsfl;
+
+	u32 bufferaddress;
+	u32 bufferaddress64;
+
+} __packed;
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+			  struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+			  struct ieee80211_tx_info *info, struct sk_buff *skb,
+			  u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
+#else
+/*<delete in kernel end>*/
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+			  struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_sta *sta,
+			  struct sk_buff *skb,
+			  u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
+			   struct rtl_stats *status,
+			   struct ieee80211_rx_status *rx_status,
+			   u8 *pdesc, struct sk_buff *skb);
+void rtl8821ae_set_desc(struct ieee80211_hw * hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw,
+				 u8 hw_queue, u16 index);
+void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
+void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+			     bool b_firstseg, bool b_lastseg,
+			     struct sk_buff *skb);
+#endif
diff --git a/drivers/staging/rtl8821ae/stats.c b/drivers/staging/rtl8821ae/stats.c
new file mode 100644
index 0000000..a20c0f8
--- /dev/null
+++ b/drivers/staging/rtl8821ae/stats.c
@@ -0,0 +1,283 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "wifi.h"
+#include "stats.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+u8 rtl_query_rxpwrpercentage(char antpower)
+{
+	if ((antpower <= -100) || (antpower >= 20))
+		return 0;
+	else if (antpower >= 0)
+		return 100;
+	else
+		return (100 + antpower);
+}
+//EXPORT_SYMBOL(rtl_query_rxpwrpercentage);
+
+u8 rtl_evm_db_to_percentage(char value)
+{
+	char ret_val;
+	ret_val = value;
+
+	if (ret_val >= 0)
+		ret_val = 0;
+	if (ret_val <= -33)
+		ret_val = -33;
+	ret_val = 0 - ret_val;
+	ret_val *= 3;
+	if (ret_val == 99)
+		ret_val = 100;
+
+	return ret_val;
+}
+//EXPORT_SYMBOL(rtl_evm_db_to_percentage);
+
+long rtl_translate_todbm(struct ieee80211_hw *hw,
+			 u8 signal_strength_index)
+{
+	long signal_power;
+
+	signal_power = (long)((signal_strength_index + 1) >> 1);
+	signal_power -= 95;
+	return signal_power;
+}
+
+long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig)
+{
+	long retsig;
+
+	if (currsig >= 61 && currsig <= 100)
+		retsig = 90 + ((currsig - 60) / 4);
+	else if (currsig >= 41 && currsig <= 60)
+		retsig = 78 + ((currsig - 40) / 2);
+	else if (currsig >= 31 && currsig <= 40)
+		retsig = 66 + (currsig - 30);
+	else if (currsig >= 21 && currsig <= 30)
+		retsig = 54 + (currsig - 20);
+	else if (currsig >= 5 && currsig <= 20)
+		retsig = 42 + (((currsig - 5) * 2) / 3);
+	else if (currsig == 4)
+		retsig = 36;
+	else if (currsig == 3)
+		retsig = 27;
+	else if (currsig == 2)
+		retsig = 18;
+	else if (currsig == 1)
+		retsig = 9;
+	else
+		retsig = currsig;
+
+	return retsig;
+}
+//EXPORT_SYMBOL(rtl_signal_scale_mapping);
+
+void rtl_process_ui_rssi(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	u8 rfpath;
+	u32 last_rssi, tmpval;
+
+	if (!pstatus->b_packet_toself && !pstatus->b_packet_beacon)
+		return;
+
+	rtlpriv->stats.pwdb_all_cnt += pstatus->rx_pwdb_all;
+	rtlpriv->stats.rssi_calculate_cnt++;
+
+	if (rtlpriv->stats.ui_rssi.total_num++ >= PHY_RSSI_SLID_WIN_MAX) {
+		rtlpriv->stats.ui_rssi.total_num = PHY_RSSI_SLID_WIN_MAX;
+		last_rssi = rtlpriv->stats.ui_rssi.elements[
+			rtlpriv->stats.ui_rssi.index];
+		rtlpriv->stats.ui_rssi.total_val -= last_rssi;
+	}
+	rtlpriv->stats.ui_rssi.total_val += pstatus->signalstrength;
+	rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.index++] =
+	    pstatus->signalstrength;
+	if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
+		rtlpriv->stats.ui_rssi.index = 0;
+	tmpval = rtlpriv->stats.ui_rssi.total_val /
+		rtlpriv->stats.ui_rssi.total_num;
+	rtlpriv->stats.signal_strength = rtl_translate_todbm(hw,
+		(u8) tmpval);
+	pstatus->rssi = rtlpriv->stats.signal_strength;
+
+	if (pstatus->b_is_cck)
+		return;
+
+	for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+	     rfpath++) {
+		if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+			rtlpriv->stats.rx_rssi_percentage[rfpath] =
+			    pstatus->rx_mimo_signalstrength[rfpath];
+
+		}
+		if (pstatus->rx_mimo_signalstrength[rfpath] >
+		    rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+			rtlpriv->stats.rx_rssi_percentage[rfpath] =
+			    ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+			      (RX_SMOOTH_FACTOR - 1)) +
+			     (pstatus->rx_mimo_signalstrength[rfpath])) /
+			    (RX_SMOOTH_FACTOR);
+			rtlpriv->stats.rx_rssi_percentage[rfpath] =
+			    rtlpriv->stats.rx_rssi_percentage[rfpath] + 1;
+		} else {
+			rtlpriv->stats.rx_rssi_percentage[rfpath] =
+			    ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+			      (RX_SMOOTH_FACTOR - 1)) +
+			     (pstatus->rx_mimo_signalstrength[rfpath])) /
+			    (RX_SMOOTH_FACTOR);
+		}
+		rtlpriv->stats.rx_snr_db[rfpath] = pstatus->rx_snr[rfpath];
+		rtlpriv->stats.rx_evm_dbm[rfpath] =
+					pstatus->rx_mimo_evm_dbm[rfpath];
+		rtlpriv->stats.rx_cfo_short[rfpath] =
+					pstatus->cfo_short[rfpath];
+		rtlpriv->stats.rx_cfo_tail[rfpath] = pstatus->cfo_tail[rfpath];
+	}
+}
+
+static void rtl_update_rxsignalstatistics(struct ieee80211_hw *hw,
+					  struct rtl_stats *pstatus)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	int weighting = 0;
+
+	if (rtlpriv->stats.recv_signal_power == 0)
+		rtlpriv->stats.recv_signal_power = pstatus->recvsignalpower;
+	if (pstatus->recvsignalpower > rtlpriv->stats.recv_signal_power)
+		weighting = 5;
+	else if (pstatus->recvsignalpower < rtlpriv->stats.recv_signal_power)
+		weighting = (-5);
+	rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power *
+		5 + pstatus->recvsignalpower + weighting) / 6;
+}
+
+static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_sta_info *drv_priv = NULL;
+	struct ieee80211_sta *sta = NULL;
+	long undecorated_smoothed_pwdb;
+
+	rcu_read_lock();
+	if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+		sta = rtl_find_sta(hw, pstatus->psaddr);
+
+	/* adhoc or ap mode */
+	if (sta) {
+		drv_priv = (struct rtl_sta_info *) sta->drv_priv;
+		undecorated_smoothed_pwdb =
+			drv_priv->rssi_stat.undecorated_smoothed_pwdb;
+	} else {
+		undecorated_smoothed_pwdb =
+			rtlpriv->dm.undecorated_smoothed_pwdb;
+	}
+
+	if (undecorated_smoothed_pwdb < 0)
+		undecorated_smoothed_pwdb = pstatus->rx_pwdb_all;
+	if (pstatus->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
+		undecorated_smoothed_pwdb = (((undecorated_smoothed_pwdb) *
+		      (RX_SMOOTH_FACTOR - 1)) +
+		     (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+		undecorated_smoothed_pwdb = undecorated_smoothed_pwdb + 1;
+	} else {
+		undecorated_smoothed_pwdb = (((undecorated_smoothed_pwdb) *
+		      (RX_SMOOTH_FACTOR - 1)) +
+		     (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+	}
+
+	if(sta) {
+		drv_priv->rssi_stat.undecorated_smoothed_pwdb =
+			undecorated_smoothed_pwdb;
+	} else {
+		rtlpriv->dm.undecorated_smoothed_pwdb = undecorated_smoothed_pwdb;
+	}
+	rcu_read_unlock();
+
+	rtl_update_rxsignalstatistics(hw, pstatus);
+}
+
+static void rtl_process_ui_link_quality(struct ieee80211_hw *hw,
+					struct rtl_stats *pstatus)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 last_evm, n_stream, tmpval;
+
+	if (pstatus->signalquality == 0)
+		return;
+
+	if (rtlpriv->stats.ui_link_quality.total_num++ >=
+	    PHY_LINKQUALITY_SLID_WIN_MAX) {
+		rtlpriv->stats.ui_link_quality.total_num =
+		    PHY_LINKQUALITY_SLID_WIN_MAX;
+		last_evm = rtlpriv->stats.ui_link_quality.elements[
+			rtlpriv->stats.ui_link_quality.index];
+		rtlpriv->stats.ui_link_quality.total_val -= last_evm;
+	}
+	rtlpriv->stats.ui_link_quality.total_val += pstatus->signalquality;
+	rtlpriv->stats.ui_link_quality.elements[
+		rtlpriv->stats.ui_link_quality.index++] =
+							pstatus->signalquality;
+	if (rtlpriv->stats.ui_link_quality.index >=
+	    PHY_LINKQUALITY_SLID_WIN_MAX)
+		rtlpriv->stats.ui_link_quality.index = 0;
+	tmpval = rtlpriv->stats.ui_link_quality.total_val /
+	    rtlpriv->stats.ui_link_quality.total_num;
+	rtlpriv->stats.signal_quality = tmpval;
+	rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+	for (n_stream = 0; n_stream < 2; n_stream++) {
+		if (pstatus->rx_mimo_signalquality[n_stream] != -1) {
+			if (rtlpriv->stats.rx_evm_percentage[n_stream] == 0) {
+				rtlpriv->stats.rx_evm_percentage[n_stream] =
+				    pstatus->rx_mimo_signalquality[n_stream];
+			}
+			rtlpriv->stats.rx_evm_percentage[n_stream] =
+			    ((rtlpriv->stats.rx_evm_percentage[n_stream]
+			      * (RX_SMOOTH_FACTOR - 1)) +
+			     (pstatus->rx_mimo_signalquality[n_stream] * 1)) /
+			    (RX_SMOOTH_FACTOR);
+		}
+	}
+}
+
+void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
+			 struct rtl_stats *pstatus)
+{
+
+	if (!pstatus->b_packet_matchbssid)
+		return;
+
+	rtl_process_ui_rssi(hw, pstatus);
+	rtl_process_pwdb(hw, pstatus);
+	rtl_process_ui_link_quality(hw, pstatus);
+}
+//EXPORT_SYMBOL(rtl_process_phyinfo);
diff --git a/drivers/staging/rtl8821ae/stats.h b/drivers/staging/rtl8821ae/stats.h
new file mode 100644
index 0000000..d69d0cf
--- /dev/null
+++ b/drivers/staging/rtl8821ae/stats.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_STATS_H__
+#define __RTL_STATS_H__
+
+#define	PHY_RSSI_SLID_WIN_MAX			100
+#define	PHY_LINKQUALITY_SLID_WIN_MAX		20
+#define	PHY_BEACON_RSSI_SLID_WIN_MAX		10
+
+/* Rx smooth factor */
+#define	RX_SMOOTH_FACTOR			20
+
+u8 rtl_query_rxpwrpercentage(char antpower);
+u8 rtl_evm_db_to_percentage(char value);
+long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig);
+void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
+			 struct rtl_stats *pstatus);
+
+#endif
diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h
new file mode 100644
index 0000000..76bef93
--- /dev/null
+++ b/drivers/staging/rtl8821ae/wifi.h
@@ -0,0 +1,2532 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_WIFI_H__
+#define __RTL_WIFI_H__
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/version.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "debug.h"
+
+
+#define RF_CHANGE_BY_INIT		0
+#define RF_CHANGE_BY_IPS 		BIT(28)
+#define RF_CHANGE_BY_PS 		BIT(29)
+#define RF_CHANGE_BY_HW 		BIT(30)
+#define RF_CHANGE_BY_SW 		BIT(31)
+
+#define IQK_ADDA_REG_NUM		16
+#define IQK_MAC_REG_NUM			4
+#define IQK_THRESHOLD			8
+
+#define MAX_KEY_LEN			61
+#define KEY_BUF_SIZE			5
+
+/* QoS related. */
+/*aci: 0x00	Best Effort*/
+/*aci: 0x01	Background*/
+/*aci: 0x10	Video*/
+/*aci: 0x11	Voice*/
+/*Max: define total number.*/
+#define AC0_BE				0
+#define AC1_BK				1
+#define AC2_VI				2
+#define AC3_VO				3
+#define AC_MAX				4
+#define QOS_QUEUE_NUM			4
+#define RTL_MAC80211_NUM_QUEUE		5
+
+#define QBSS_LOAD_SIZE			5
+#define MAX_WMMELE_LENGTH		64
+
+#define TOTAL_CAM_ENTRY 		32
+
+/*slot time for 11g. */
+#define RTL_SLOT_TIME_9			9
+#define RTL_SLOT_TIME_20		20
+
+/*related with tcp/ip. */
+/*if_ehther.h*/
+#define ETH_P_PAE 			0x888E	/*Port Access Entity
+						 *(IEEE 802.1X) */
+#define ETH_P_IP        		0x0800	/*Internet Protocol packet */
+#define ETH_P_ARP       		0x0806	/*Address Resolution packet */
+#define SNAP_SIZE 			6
+#define PROTOC_TYPE_SIZE		2
+
+/*related with 802.11 frame*/
+#define MAC80211_3ADDR_LEN 		24
+#define MAC80211_4ADDR_LEN 		30
+
+#define CHANNEL_MAX_NUMBER		(14 + 24 + 21)	/* 14 is the max
+							 * channel number */
+#define CHANNEL_MAX_NUMBER_2G		14
+#define CHANNEL_MAX_NUMBER_5G	    	54 /* Please refer to
+					    *"phy_GetChnlGroup8812A" and
+					    * "Hal_ReadTxPowerInfo8812A"*/
+#define CHANNEL_MAX_NUMBER_5G_80M	7
+#define CHANNEL_GROUP_MAX		(3 + 9)	/* ch1~3, ch4~9, ch10~14
+						 * total three groups */
+#define MAX_PG_GROUP 			13
+#define	CHANNEL_GROUP_MAX_2G		3
+#define	CHANNEL_GROUP_IDX_5GL		3
+#define	CHANNEL_GROUP_IDX_5GM		6
+#define	CHANNEL_GROUP_IDX_5GH		9
+#define	CHANNEL_GROUP_MAX_5G		9
+#define CHANNEL_MAX_NUMBER_2G		14
+#define AVG_THERMAL_NUM			8
+#define AVG_THERMAL_NUM_92E		4
+#define AVG_THERMAL_NUM_88E		4
+#define AVG_THERMAL_NUM_8723BE		4
+#define MAX_TID_COUNT			9
+#define MAX_NUM_RATES			264
+
+/*for 88E use*/
+/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/
+#define MAX_TX_COUNT			4
+#define	MAX_RF_PATH			4
+#define	MAX_CHNL_GROUP_24G		6
+#define	MAX_CHNL_GROUP_5G		14
+
+/* BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON. */
+#define MAX_TX_QUEUE 			9
+
+#define TX_PWR_BY_RATE_NUM_BAND		2
+#define TX_PWR_BY_RATE_NUM_RF		4
+#define TX_PWR_BY_RATE_NUM_SECTION	12
+#define MAX_BASE_NUM_IN_PHY_REG_PG_24G  6
+#define MAX_BASE_NUM_IN_PHY_REG_PG_5G	5
+
+#define DELTA_SWINGIDX_SIZE	30
+#define BAND_NUM 				3
+/*Now, it's just for 8192ee
+ *not OK yet, keep it 0*/
+#define DMA_IS_64BIT 0
+#define RTL8192EE_SEG_NUM		1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
+
+struct txpower_info_2g {
+	u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
+	u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
+	/*If only one tx, only BW20 and OFDM are used.*/
+	u8 cck_diff[MAX_RF_PATH][MAX_TX_COUNT];
+	u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
+	u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
+	u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+struct txpower_info_5g {
+	u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_5G];
+	/*If only one tx, only BW20, OFDM, BW80 and BW160 are used.*/
+	u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
+	u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
+	u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+	u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
+	u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+
+/* for early mode */
+#define EM_HDR_LEN			8
+#define FCS_LEN				4
+
+#define MAX_VIRTUAL_MAC			1
+
+enum rf_tx_num {
+	RF_1TX = 0,
+	RF_2TX,
+	RF_MAX_TX_NUM,
+	RF_TX_NUM_NONIMPLEMENT,
+};
+
+enum rate_section {
+	CCK = 0,
+	OFDM,
+	HT_MCS0_MCS7,
+	HT_MCS8_MCS15,
+	VHT_1SSMCS0_1SSMCS9,
+	VHT_2SSMCS0_2SSMCS9,
+};
+
+enum intf_type {
+	INTF_PCI = 0,
+	INTF_USB = 1,
+};
+
+enum radio_path {
+	RF90_PATH_A = 0,
+	RF90_PATH_B = 1,
+	RF90_PATH_C = 2,
+	RF90_PATH_D = 3,
+};
+
+enum rt_eeprom_type {
+	EEPROM_93C46,
+	EEPROM_93C56,
+	EEPROM_BOOT_EFUSE,
+};
+
+enum rtl_status {
+	RTL_STATUS_INTERFACE_START = 0,
+};
+
+enum hardware_type {
+	HARDWARE_TYPE_RTL8192E,
+	HARDWARE_TYPE_RTL8192U,
+	HARDWARE_TYPE_RTL8192SE,
+	HARDWARE_TYPE_RTL8192SU,
+	HARDWARE_TYPE_RTL8192CE,
+	HARDWARE_TYPE_RTL8192CU,
+	HARDWARE_TYPE_RTL8192DE,
+	HARDWARE_TYPE_RTL8192DU,
+	HARDWARE_TYPE_RTL8723AE,
+	HARDWARE_TYPE_RTL8188EE,
+	HARDWARE_TYPE_RTL8723BE,
+	HARDWARE_TYPE_RTL8192EE,
+	HARDWARE_TYPE_RTL8821AE,
+	HARDWARE_TYPE_RTL8812AE,
+	/* keep it last */
+	HARDWARE_TYPE_NUM
+};
+
+enum scan_operation_backup_opt {
+	SCAN_OPT_BACKUP_BAND0=0,
+	SCAN_OPT_BACKUP_BAND1,
+	SCAN_OPT_RESTORE,
+	SCAN_OPT_MAX
+};
+
+/*RF state.*/
+enum rf_pwrstate {
+	ERFON,
+	ERFSLEEP,
+	ERFOFF
+};
+
+struct bb_reg_def {
+	u32 rfintfs;
+	u32 rfintfi;
+	u32 rfintfo;
+	u32 rfintfe;
+	u32 rf3wire_offset;
+	u32 rflssi_select;
+	u32 rftxgain_stage;
+	u32 rfhssi_para1;
+	u32 rfhssi_para2;
+	u32 rfswitch_control;
+	u32 rfagc_control1;
+	u32 rfagc_control2;
+	u32 rfrxiq_imbalance;
+	u32 rfrx_afe;
+	u32 rftxiq_imbalance;
+	u32 rftx_afe;
+	u32 rflssi_readback;
+	u32 rflssi_readbackpi;
+};
+
+enum io_type {
+	IO_CMD_PAUSE_BAND0_DM_BY_SCAN = 0,
+	IO_CMD_PAUSE_BAND1_DM_BY_SCAN = 1,
+	IO_CMD_RESUME_DM_BY_SCAN = 2,
+};
+
+enum hw_variables {
+	HW_VAR_ETHER_ADDR,
+	HW_VAR_MULTICAST_REG,
+	HW_VAR_BASIC_RATE,
+	HW_VAR_BSSID,
+	HW_VAR_MEDIA_STATUS,
+	HW_VAR_SECURITY_CONF,
+	HW_VAR_BEACON_INTERVAL,
+	HW_VAR_ATIM_WINDOW,
+	HW_VAR_LISTEN_INTERVAL,
+	HW_VAR_CS_COUNTER,
+	HW_VAR_DEFAULTKEY0,
+	HW_VAR_DEFAULTKEY1,
+	HW_VAR_DEFAULTKEY2,
+	HW_VAR_DEFAULTKEY3,
+	HW_VAR_SIFS,
+	HW_VAR_DIFS,
+	HW_VAR_EIFS,
+	HW_VAR_SLOT_TIME,
+	HW_VAR_ACK_PREAMBLE,
+	HW_VAR_CW_CONFIG,
+	HW_VAR_CW_VALUES,
+	HW_VAR_RATE_FALLBACK_CONTROL,
+	HW_VAR_CONTENTION_WINDOW,
+	HW_VAR_RETRY_COUNT,
+	HW_VAR_TR_SWITCH,
+	HW_VAR_COMMAND,
+	HW_VAR_WPA_CONFIG,
+	HW_VAR_AMPDU_MIN_SPACE,
+	HW_VAR_SHORTGI_DENSITY,
+	HW_VAR_AMPDU_FACTOR,
+	HW_VAR_MCS_RATE_AVAILABLE,
+	HW_VAR_AC_PARAM,
+	HW_VAR_ACM_CTRL,
+	HW_VAR_DIS_Req_Qsize,
+	HW_VAR_CCX_CHNL_LOAD,
+	HW_VAR_CCX_NOISE_HISTOGRAM,
+	HW_VAR_CCX_CLM_NHM,
+	HW_VAR_TxOPLimit,
+	HW_VAR_TURBO_MODE,
+	HW_VAR_RF_STATE,
+	HW_VAR_RF_OFF_BY_HW,
+	HW_VAR_BUS_SPEED,
+	HW_VAR_SET_DEV_POWER,
+
+	HW_VAR_RCR,
+	HW_VAR_RATR_0,
+	HW_VAR_RRSR,
+	HW_VAR_CPU_RST,
+	HW_VAR_CECHK_BSSID,
+	HW_VAR_LBK_MODE,
+	HW_VAR_AES_11N_FIX,
+	HW_VAR_USB_RX_AGGR,
+	HW_VAR_USER_CONTROL_TURBO_MODE,
+	HW_VAR_RETRY_LIMIT,
+	HW_VAR_INIT_TX_RATE,
+	HW_VAR_TX_RATE_REG,
+	HW_VAR_EFUSE_USAGE,
+	HW_VAR_EFUSE_BYTES,
+	HW_VAR_AUTOLOAD_STATUS,
+	HW_VAR_RF_2R_DISABLE,
+	HW_VAR_SET_RPWM,
+	HW_VAR_H2C_FW_PWRMODE,
+	HW_VAR_H2C_FW_JOINBSSRPT,
+	HW_VAR_H2C_FW_MEDIASTATUSRPT,
+	HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+	HW_VAR_FW_PSMODE_STATUS,
+	HW_VAR_RESUME_CLK_ON,
+	HW_VAR_FW_LPS_ACTION,
+	HW_VAR_1X1_RECV_COMBINE,
+	HW_VAR_STOP_SEND_BEACON,
+	HW_VAR_TSF_TIMER,
+	HW_VAR_IO_CMD,
+
+	HW_VAR_RF_RECOVERY,
+	HW_VAR_H2C_FW_UPDATE_GTK,
+	HW_VAR_WF_MASK,
+	HW_VAR_WF_CRC,
+	HW_VAR_WF_IS_MAC_ADDR,
+	HW_VAR_H2C_FW_OFFLOAD,
+	HW_VAR_RESET_WFCRC,
+
+	HW_VAR_HANDLE_FW_C2H,
+	HW_VAR_DL_FW_RSVD_PAGE,
+	HW_VAR_AID,
+	HW_VAR_HW_SEQ_ENABLE,
+	HW_VAR_CORRECT_TSF,
+	HW_VAR_BCN_VALID,
+	HW_VAR_FWLPS_RF_ON,
+	HW_VAR_DUAL_TSF_RST,
+	HW_VAR_SWITCH_EPHY_WoWLAN,
+	HW_VAR_INT_MIGRATION,
+	HW_VAR_INT_AC,
+	HW_VAR_RF_TIMING,
+
+	HAL_DEF_WOWLAN,
+	HW_VAR_MRC,
+	HW_VAR_KEEP_ALIVE,
+	HW_VAR_NAV_UPPER,
+};
+
+enum rt_media_status {
+	RT_MEDIA_DISCONNECT = 0,
+	RT_MEDIA_CONNECT = 1
+};
+
+enum rt_oem_id {
+	RT_CID_DEFAULT = 0,
+	RT_CID_8187_ALPHA0 = 1,
+	RT_CID_8187_SERCOMM_PS = 2,
+	RT_CID_8187_HW_LED = 3,
+	RT_CID_8187_NETGEAR = 4,
+	RT_CID_WHQL = 5,
+	RT_CID_819x_CAMEO = 6,
+	RT_CID_819x_RUNTOP = 7,
+	RT_CID_819x_Senao = 8,
+	RT_CID_TOSHIBA = 9,
+	RT_CID_819x_Netcore = 10,
+	RT_CID_Nettronix = 11,
+	RT_CID_DLINK = 12,
+	RT_CID_PRONET = 13,
+	RT_CID_COREGA = 14,
+	RT_CID_819x_ALPHA = 15,
+	RT_CID_819x_Sitecom = 16,
+	RT_CID_CCX = 17,
+	RT_CID_819x_Lenovo = 18,
+	RT_CID_819x_QMI = 19,
+	RT_CID_819x_Edimax_Belkin = 20,
+	RT_CID_819x_Sercomm_Belkin = 21,
+	RT_CID_819x_CAMEO1 = 22,
+	RT_CID_819x_MSI = 23,
+	RT_CID_819x_Acer = 24,
+	RT_CID_819x_HP = 27,
+	RT_CID_819x_CLEVO = 28,
+	RT_CID_819x_Arcadyan_Belkin = 29,
+	RT_CID_819x_SAMSUNG = 30,
+	RT_CID_819x_WNC_COREGA = 31,
+	RT_CID_819x_Foxcoon = 32,
+	RT_CID_819x_DELL = 33,
+	RT_CID_819x_PRONETS = 34,
+	RT_CID_819x_Edimax_ASUS = 35,
+	RT_CID_NETGEAR = 36,
+	RT_CID_PLANEX = 37,
+	RT_CID_CC_C = 38,
+};
+
+enum hw_descs {
+	HW_DESC_OWN,
+	HW_DESC_RXOWN,
+	HW_DESC_TX_NEXTDESC_ADDR,
+	HW_DESC_TXBUFF_ADDR,
+	HW_DESC_RXBUFF_ADDR,
+	HW_DESC_RXPKT_LEN,
+	HW_DESC_RXERO,
+	HW_DESC_RX_PREPARE,
+};
+
+enum prime_sc {
+	PRIME_CHNL_OFFSET_DONT_CARE = 0,
+	PRIME_CHNL_OFFSET_LOWER = 1,
+	PRIME_CHNL_OFFSET_UPPER = 2,
+};
+
+enum rf_type {
+	RF_1T1R = 0,
+	RF_1T2R = 1,
+	RF_2T2R = 2,
+	RF_2T2R_GREEN = 3,
+};
+
+enum ht_channel_width {
+	HT_CHANNEL_WIDTH_20 = 0,
+	HT_CHANNEL_WIDTH_20_40 = 1,
+	HT_CHANNEL_WIDTH_80 = 2,
+};
+
+/* Ref: 802.11i sepc D10.0 7.3.2.25.1
+Cipher Suites Encryption Algorithms */
+enum rt_enc_alg {
+	NO_ENCRYPTION = 0,
+	WEP40_ENCRYPTION = 1,
+	TKIP_ENCRYPTION = 2,
+	RSERVED_ENCRYPTION = 3,
+	AESCCMP_ENCRYPTION = 4,
+	WEP104_ENCRYPTION = 5,
+	AESCMAC_ENCRYPTION = 6,	/*IEEE802.11w */
+};
+
+enum rtl_hal_state {
+	_HAL_STATE_STOP = 0,
+	_HAL_STATE_START = 1,
+};
+
+enum rtl_var_map {
+	/*reg map */
+	SYS_ISO_CTRL = 0,
+	SYS_FUNC_EN,
+	SYS_CLK,
+	MAC_RCR_AM,
+	MAC_RCR_AB,
+	MAC_RCR_ACRC32,
+	MAC_RCR_ACF,
+	MAC_RCR_AAP,
+	MAC_HIMR,
+	MAC_HIMRE,
+	MAC_HSISR,
+
+	/*efuse map */
+	EFUSE_TEST,
+	EFUSE_CTRL,
+	EFUSE_CLK,
+	EFUSE_CLK_CTRL,
+	EFUSE_PWC_EV12V,
+	EFUSE_FEN_ELDR,
+	EFUSE_LOADER_CLK_EN,
+	EFUSE_ANA8M,
+	EFUSE_HWSET_MAX_SIZE,
+	EFUSE_MAX_SECTION_MAP,
+	EFUSE_REAL_CONTENT_SIZE,
+	EFUSE_OOB_PROTECT_BYTES_LEN,
+	EFUSE_ACCESS,
+	/*CAM map */
+	RWCAM,
+	WCAMI,
+	RCAMO,
+	CAMDBG,
+	SECR,
+	SEC_CAM_NONE,
+	SEC_CAM_WEP40,
+	SEC_CAM_TKIP,
+	SEC_CAM_AES,
+	SEC_CAM_WEP104,
+
+	/*IMR map */
+	RTL_IMR_BCNDMAINT6,	/*Beacon DMA Interrupt 6 */
+	RTL_IMR_BCNDMAINT5,	/*Beacon DMA Interrupt 5 */
+	RTL_IMR_BCNDMAINT4,	/*Beacon DMA Interrupt 4 */
+	RTL_IMR_BCNDMAINT3,	/*Beacon DMA Interrupt 3 */
+	RTL_IMR_BCNDMAINT2,	/*Beacon DMA Interrupt 2 */
+	RTL_IMR_BCNDMAINT1,	/*Beacon DMA Interrupt 1 */
+	RTL_IMR_BCNDOK8,	/*Beacon Queue DMA OK Interrup 8 */
+	RTL_IMR_BCNDOK7,	/*Beacon Queue DMA OK Interrup 7 */
+	RTL_IMR_BCNDOK6,	/*Beacon Queue DMA OK Interrup 6 */
+	RTL_IMR_BCNDOK5,	/*Beacon Queue DMA OK Interrup 5 */
+	RTL_IMR_BCNDOK4,	/*Beacon Queue DMA OK Interrup 4 */
+	RTL_IMR_BCNDOK3,	/*Beacon Queue DMA OK Interrup 3 */
+	RTL_IMR_BCNDOK2,	/*Beacon Queue DMA OK Interrup 2 */
+	RTL_IMR_BCNDOK1,	/*Beacon Queue DMA OK Interrup 1 */
+	RTL_IMR_TIMEOUT2,	/*Timeout interrupt 2 */
+	RTL_IMR_TIMEOUT1,	/*Timeout interrupt 1 */
+	RTL_IMR_TXFOVW,		/*Transmit FIFO Overflow */
+	RTL_IMR_PSTIMEOUT,	/*Power save time out interrupt */
+	RTL_IMR_BcnInt,		/*Beacon DMA Interrupt 0 */
+	RTL_IMR_RXFOVW,		/*Receive FIFO Overflow */
+	RTL_IMR_RDU,		/*Receive Descriptor Unavailable */
+	RTL_IMR_ATIMEND,	/*For 92C,ATIM Window End Interrupt */
+	RTL_IMR_BDOK,		/*Beacon Queue DMA OK Interrup */
+	RTL_IMR_HIGHDOK,	/*High Queue DMA OK Interrupt */
+	RTL_IMR_COMDOK,		/*Command Queue DMA OK Interrupt*/
+	RTL_IMR_TBDOK,		/*Transmit Beacon OK interrup */
+	RTL_IMR_MGNTDOK,	/*Management Queue DMA OK Interrupt */
+	RTL_IMR_TBDER,		/*For 92C,Transmit Beacon Error Interrupt */
+	RTL_IMR_BKDOK,		/*AC_BK DMA OK Interrupt */
+	RTL_IMR_BEDOK,		/*AC_BE DMA OK Interrupt */
+	RTL_IMR_VIDOK,		/*AC_VI DMA OK Interrupt */
+	RTL_IMR_VODOK,		/*AC_VO DMA Interrupt */
+	RTL_IMR_ROK,		/*Receive DMA OK Interrupt */
+	RTL_IMR_HSISR_IND,  /*HSISR Interrupt*/
+	RTL_IBSS_INT_MASKS,	/*(RTL_IMR_BcnInt | RTL_IMR_TBDOK |
+				 * RTL_IMR_TBDER) */
+	RTL_IMR_C2HCMD,		/*fw interrupt*/
+
+	/*CCK Rates, TxHT = 0 */
+	RTL_RC_CCK_RATE1M,
+	RTL_RC_CCK_RATE2M,
+	RTL_RC_CCK_RATE5_5M,
+	RTL_RC_CCK_RATE11M,
+
+	/*OFDM Rates, TxHT = 0 */
+	RTL_RC_OFDM_RATE6M,
+	RTL_RC_OFDM_RATE9M,
+	RTL_RC_OFDM_RATE12M,
+	RTL_RC_OFDM_RATE18M,
+	RTL_RC_OFDM_RATE24M,
+	RTL_RC_OFDM_RATE36M,
+	RTL_RC_OFDM_RATE48M,
+	RTL_RC_OFDM_RATE54M,
+
+	RTL_RC_HT_RATEMCS7,
+	RTL_RC_HT_RATEMCS15,
+
+	/*keep it last */
+	RTL_VAR_MAP_MAX,
+};
+
+/*Firmware PS mode for control LPS.*/
+enum _fw_ps_mode {
+	FW_PS_ACTIVE_MODE = 0,
+	FW_PS_MIN_MODE = 1,
+	FW_PS_MAX_MODE = 2,
+	FW_PS_DTIM_MODE = 3,
+	FW_PS_VOIP_MODE = 4,
+	FW_PS_UAPSD_WMM_MODE = 5,
+	FW_PS_UAPSD_MODE = 6,
+	FW_PS_IBSS_MODE = 7,
+	FW_PS_WWLAN_MODE = 8,
+	FW_PS_PM_Radio_Off = 9,
+	FW_PS_PM_Card_Disable = 10,
+};
+
+enum rt_psmode {
+	EACTIVE,		/*Active/Continuous access. */
+	EMAXPS,			/*Max power save mode. */
+	EFASTPS,		/*Fast power save mode. */
+	EAUTOPS,		/*Auto power save mode. */
+};
+
+/*LED related.*/
+enum led_ctl_mode {
+	LED_CTL_POWER_ON = 1,
+	LED_CTL_LINK = 2,
+	LED_CTL_NO_LINK = 3,
+	LED_CTL_TX = 4,
+	LED_CTL_RX = 5,
+	LED_CTL_SITE_SURVEY = 6,
+	LED_CTL_POWER_OFF = 7,
+	LED_CTL_START_TO_LINK = 8,
+	LED_CTL_START_WPS = 9,
+	LED_CTL_STOP_WPS = 10,
+};
+
+enum rtl_led_pin {
+	LED_PIN_GPIO0,
+	LED_PIN_LED0,
+	LED_PIN_LED1,
+	LED_PIN_LED2
+};
+
+/*QoS related.*/
+/*acm implementation method.*/
+enum acm_method {
+	eAcmWay0_SwAndHw = 0,
+	eAcmWay1_HW = 1,
+	eAcmWay2_SW = 2,
+};
+
+enum macphy_mode {
+	SINGLEMAC_SINGLEPHY = 0,
+	DUALMAC_DUALPHY,
+	DUALMAC_SINGLEPHY,
+};
+
+enum band_type {
+	BAND_ON_2_4G = 0,
+	BAND_ON_5G,
+	BAND_ON_BOTH,
+	BANDMAX
+};
+
+/*aci/aifsn Field.
+Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/
+union aci_aifsn {
+	u8 char_data;
+
+	struct {
+		u8 aifsn:4;
+		u8 acm:1;
+		u8 aci:2;
+		u8 reserved:1;
+	} f;			/* Field */
+};
+
+/*mlme related.*/
+enum wireless_mode {
+	WIRELESS_MODE_UNKNOWN = 0x00,
+	WIRELESS_MODE_A = 0x01,
+	WIRELESS_MODE_B = 0x02,
+	WIRELESS_MODE_G = 0x04,
+	WIRELESS_MODE_AUTO = 0x08,
+	WIRELESS_MODE_N_24G = 0x10,
+	WIRELESS_MODE_N_5G = 0x20,
+	WIRELESS_MODE_AC_5G = 0x40,
+	WIRELESS_MODE_AC_24G  = 0x80
+};
+
+enum ratr_table_mode {
+	RATR_INX_WIRELESS_NGB = 0,		// BGN 40 Mhz 2SS 1SS
+	RATR_INX_WIRELESS_NG = 1,		// GN or N
+	RATR_INX_WIRELESS_NB = 2,		// BGN 20 Mhz 2SS 1SS  or BN
+	RATR_INX_WIRELESS_N = 3,
+	RATR_INX_WIRELESS_GB = 4,
+	RATR_INX_WIRELESS_G = 5,
+	RATR_INX_WIRELESS_B = 6,
+	RATR_INX_WIRELESS_MC = 7,
+	RATR_INX_WIRELESS_AC_5N = 8,
+	RATR_INX_WIRELESS_AC_24N = 9,
+};
+
+enum rtl_link_state {
+	MAC80211_NOLINK = 0,
+	MAC80211_LINKING = 1,
+	MAC80211_LINKED = 2,
+	MAC80211_LINKED_SCANNING = 3,
+};
+
+enum act_category {
+	ACT_CAT_QOS = 1,
+	ACT_CAT_DLS = 2,
+	ACT_CAT_BA = 3,
+	ACT_CAT_HT = 7,
+	ACT_CAT_WMM = 17,
+};
+
+enum ba_action {
+	ACT_ADDBAREQ = 0,
+	ACT_ADDBARSP = 1,
+	ACT_DELBA = 2,
+};
+
+enum rt_polarity_ctl {
+	RT_POLARITY_LOW_ACT = 0,
+	RT_POLARITY_HIGH_ACT = 1,
+};
+
+
+struct octet_string {
+	u8 *octet;
+	u16 length;
+};
+
+struct rtl_hdr_3addr {
+	__le16 frame_ctl;
+	__le16 duration_id;
+	u8 addr1[ETH_ALEN];
+	u8 addr2[ETH_ALEN];
+	u8 addr3[ETH_ALEN];
+	__le16 seq_ctl;
+	u8 payload[0];
+} __packed;
+
+struct rtl_info_element {
+	u8 id;
+	u8 len;
+	u8 data[0];
+} __packed;
+
+struct rtl_probe_rsp {
+	struct rtl_hdr_3addr header;
+	u32 time_stamp[2];
+	__le16 beacon_interval;
+	__le16 capability;
+	/*SSID, supported rates, FH params, DS params,
+	   CF params, IBSS params, TIM (if beacon), RSN */
+	struct rtl_info_element info_element[0];
+} __packed;
+
+/*LED related.*/
+/*ledpin Identify how to implement this SW led.*/
+struct rtl_led {
+	void *hw;
+	enum rtl_led_pin ledpin;
+	bool b_ledon;
+};
+
+struct rtl_led_ctl {
+	bool bled_opendrain;
+	struct rtl_led sw_led0;
+	struct rtl_led sw_led1;
+};
+
+struct rtl_qos_parameters {
+	__le16 cw_min;
+	__le16 cw_max;
+	u8 aifs;
+	u8 flag;
+	__le16 tx_op;
+} __packed;
+
+struct rt_smooth_data {
+	u32 elements[100];	/*array to store values */
+	u32 index;		/*index to current array to store */
+	u32 total_num;		/*num of valid elements */
+	u32 total_val;		/*sum of valid elements */
+};
+
+struct rtl_ht_agg {
+	u16 txq_id;
+	u16 wait_for_ba;
+	u16 start_idx;
+	u64 bitmap;
+	u32 rate_n_flags;
+	u8 agg_state;
+	u8 rx_agg_state;
+};
+
+struct rtl_tid_data {
+	u16 seq_number;
+	struct rtl_ht_agg agg;
+};
+
+struct rssi_sta{
+	long undecorated_smoothed_pwdb;
+};
+
+struct rtl_sta_info {
+	struct list_head list;
+	u8 ratr_index;
+	u8 wireless_mode;
+	u8 mimo_ps;
+	u8 mac_addr[6];
+	struct rtl_tid_data tids[MAX_TID_COUNT];
+
+	/* just used for ap adhoc or mesh*/
+	struct rssi_sta rssi_stat;
+} __packed;
+
+#ifdef VIF_TODO
+struct rtl_vif {
+	unsigned int id;
+	/* struct ieee80211_vif __rcu *vif; */
+	struct ieee80211_vif *vif;
+};
+
+struct rtl_vif_info {
+	struct list_head list;
+	bool active;
+	unsigned int id;
+	struct sk_buff *beacon;
+	bool enable_beacon;
+};
+
+struct vif_priv {
+	struct list_head vif_list;
+
+	/* interface mode settings */
+	unsigned long vif_bitmap;
+	unsigned int vifs;
+	struct rtl_vif vif[MAX_VIRTUAL_MAC];
+
+	/* beaconing */
+	spinlock_t beacon_lock;
+	unsigned int global_pretbtt;
+	unsigned int global_beacon_int;
+	/* struct rtl_vif_info __rcu *beacon_iter; */
+	struct rtl_vif_info *beacon_iter;
+	unsigned int beacon_enabled;
+};
+#endif
+
+struct false_alarm_statistics {
+	u32 cnt_parity_fail;
+	u32 cnt_rate_illegal;
+	u32 cnt_crc8_fail;
+	u32 cnt_mcs_fail;
+	u32 cnt_fast_fsync_fail;
+	u32 cnt_sb_search_fail;
+	u32 cnt_ofdm_fail;
+	u32 cnt_cck_fail;
+	u32 cnt_all;
+	u32 cnt_ofdm_cca;
+	u32 cnt_cck_cca;
+	u32 cnt_cca_all;
+	u32 cnt_bw_usc;
+	u32 cnt_bw_lsc;
+};
+
+struct init_gain {
+	u8 xaagccore1;
+	u8 xbagccore1;
+	u8 xcagccore1;
+	u8 xdagccore1;
+	u8 cca;
+
+};
+
+struct wireless_stats {
+	unsigned long txbytesunicast;
+	unsigned long txbytesmulticast;
+	unsigned long txbytesbroadcast;
+	unsigned long rxbytesunicast;
+
+	long rx_snr_db[4];
+	/*Correct smoothed ss in Dbm, only used
+	   in driver to report real power now. */
+	long recv_signal_power;
+	long signal_quality;
+	long last_sigstrength_inpercent;
+
+	u32 rssi_calculate_cnt;
+	u32 pwdb_all_cnt;
+
+	/*Transformed, in dbm. Beautified signal
+	   strength for UI, not correct. */
+	long signal_strength;
+
+	u8 rx_rssi_percentage[4];
+	u8 rx_evm_dbm[4];
+	u8 rx_evm_percentage[2];
+
+	u16 rx_cfo_short[4];
+	u16 rx_cfo_tail[4];
+
+	struct rt_smooth_data ui_rssi;
+	struct rt_smooth_data ui_link_quality;
+};
+
+struct rate_adaptive {
+	u8 rate_adaptive_disabled;
+	u8 ratr_state;
+	u16 reserve;
+
+	u32 high_rssi_thresh_for_ra;
+	u32 high2low_rssi_thresh_for_ra;
+	u8 low2high_rssi_thresh_for_ra;
+	u32 low_rssi_thresh_for_ra;
+	u32 upper_rssi_threshold_ratr;
+	u32 middleupper_rssi_threshold_ratr;
+	u32 middle_rssi_threshold_ratr;
+	u32 middlelow_rssi_threshold_ratr;
+	u32 low_rssi_threshold_ratr;
+	u32 ultralow_rssi_threshold_ratr;
+	u32 low_rssi_threshold_ratr_40m;
+	u32 low_rssi_threshold_ratr_20m;
+	u8 ping_rssi_enable;
+	u32 ping_rssi_ratr;
+	u32 ping_rssi_thresh_for_ra;
+	u32 last_ratr;
+	u8 pre_ratr_state;
+	u8 ldpc_thres;
+	bool use_ldpc;
+	bool lower_rts_rate;
+	bool is_special_data;
+};
+
+struct regd_pair_mapping {
+	u16 reg_dmnenum;
+	u16 reg_5ghz_ctl;
+	u16 reg_2ghz_ctl;
+};
+
+struct dynamic_primary_cca{
+	u8 pricca_flag;
+	u8 intf_flag;
+	u8 intf_type;
+	u8 dup_rts_flag;
+	u8 monitor_flag;
+	u8 ch_offset;
+	u8 mf_state;
+};
+
+struct rtl_regulatory {
+	char alpha2[2];
+	u16 country_code;
+	u16 max_power_level;
+	u32 tp_scale;
+	u16 current_rd;
+	u16 current_rd_ext;
+	int16_t power_limit;
+	struct regd_pair_mapping *regpair;
+};
+
+struct rtl_rfkill {
+	bool rfkill_state;	/*0 is off, 1 is on */
+};
+
+/*for P2P PS**/
+#define	P2P_MAX_NOA_NUM		2
+
+enum p2p_role {
+	P2P_ROLE_DISABLE = 0,
+	P2P_ROLE_DEVICE = 1,
+	P2P_ROLE_CLIENT = 2,
+	P2P_ROLE_GO = 3
+};
+
+enum p2p_ps_state {
+	P2P_PS_DISABLE = 0,
+	P2P_PS_ENABLE = 1,
+	P2P_PS_SCAN = 2,
+	P2P_PS_SCAN_DONE = 3,
+	P2P_PS_ALLSTASLEEP = 4, // for P2P GO
+};
+
+enum p2p_ps_mode {
+	P2P_PS_NONE = 0,
+	P2P_PS_CTWINDOW = 1,
+	P2P_PS_NOA = 2,
+	P2P_PS_MIX = 3, // CTWindow and NoA
+};
+
+struct rtl_p2p_ps_info {
+	enum p2p_ps_mode p2p_ps_mode; /* indicate p2p ps mode */
+	enum p2p_ps_state p2p_ps_state; /* indicate p2p ps state */
+	u8 noa_index; /* Identifies and instance of Notice of Absence timing. */
+	/* Client traffic window. A period of time in TU after TBTT. */
+	u8 ctwindow;
+	u8 opp_ps; /* opportunistic power save. */
+	u8 noa_num; /* number of NoA descriptor in P2P IE. */
+	/* Count for owner, Type of client. */
+	u8 noa_count_type[P2P_MAX_NOA_NUM];
+	/* Max duration for owner, preferred or
+	 * min acceptable duration for client. */
+	u32 noa_duration[P2P_MAX_NOA_NUM];
+	/* Length of interval for owner, preferred or
+	 * max acceptable interval of client. */
+	u32 noa_interval[P2P_MAX_NOA_NUM];
+	/* schedule expressed in terms of the lower 4 bytes of the TSF timer. */
+	u32 noa_start_time[P2P_MAX_NOA_NUM];
+};
+
+ struct p2p_ps_offload_t {
+	u8 Offload_En:1;
+	u8 role:1; /* 1: Owner, 0: Client */
+	u8 CTWindow_En:1;
+	u8 NoA0_En:1;
+	u8 NoA1_En:1;
+	u8 AllStaSleep:1;
+	u8 discovery:1;
+	u8 reserved:1;
+};
+
+#define IQK_MATRIX_REG_NUM	8
+#define IQK_MATRIX_SETTINGS_NUM	 (14+24+21) // Channels_2_4G_NUM + Channels_5G_20M_NUM + Channels_5G
+struct iqk_matrix_regs {
+	bool b_iqk_done;
+	long value[1][IQK_MATRIX_REG_NUM];
+};
+
+struct rtl_phy {
+	struct bb_reg_def phyreg_def[4];	/*Radio A/B/C/D */
+	struct init_gain initgain_backup;
+	enum io_type current_io_type;
+
+	u8 rf_mode;
+	u8 rf_type;
+	u8 current_chan_bw;
+	u8 set_bwmode_inprogress;
+	u8 sw_chnl_inprogress;
+	u8 sw_chnl_stage;
+	u8 sw_chnl_step;
+	u8 current_channel;
+	u8 h2c_box_num;
+	u8 set_io_inprogress;
+	u8 lck_inprogress;
+
+	/* record for power tracking */
+	s32 reg_e94;
+	s32 reg_e9c;
+	s32 reg_ea4;
+	s32 reg_eac;
+	s32 reg_eb4;
+	s32 reg_ebc;
+	s32 reg_ec4;
+	s32 reg_ecc;
+	u8 rfpienable;
+	u8 reserve_0;
+	u16 reserve_1;
+	u32 reg_c04, reg_c08, reg_874;
+	u32 adda_backup[16];
+	u32 iqk_mac_backup[IQK_MAC_REG_NUM];
+	u32 iqk_bb_backup[10];
+	bool iqk_initialized;
+
+	bool rfpath_rx_enable[MAX_RF_PATH];
+	/*Jaguar*/
+	u8 reg_837;
+	/* Dul mac */
+	bool b_need_iqk;
+	struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM];
+
+	bool b_rfpi_enable;
+
+	bool b_iqk_in_progress;
+
+	u8 pwrgroup_cnt;
+	u8 bcck_high_power;
+	/* this is for 88E & 8723A */
+	u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
+	/* this is for 92EE */
+	u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND]
+				   [TX_PWR_BY_RATE_NUM_RF]
+				   [TX_PWR_BY_RATE_NUM_RF]
+				   [TX_PWR_BY_RATE_NUM_SECTION];
+	u8 txpwr_by_rate_base_24g[TX_PWR_BY_RATE_NUM_RF]
+				 [TX_PWR_BY_RATE_NUM_RF]
+				 [MAX_BASE_NUM_IN_PHY_REG_PG_24G];
+
+	u8 txpwr_by_rate_base_5g[TX_PWR_BY_RATE_NUM_RF]
+				[TX_PWR_BY_RATE_NUM_RF]
+				[MAX_BASE_NUM_IN_PHY_REG_PG_5G];
+	u8 default_initialgain[4];
+
+	/* the current Tx power level */
+	u8 cur_cck_txpwridx;
+	u8 cur_ofdm24g_txpwridx;
+	u8 cur_bw20_txpwridx;
+	u8 cur_bw40_txpwridx;
+
+	u32 rfreg_chnlval[2];
+	bool b_apk_done;
+	u32 reg_rf3c[2];	/* pathA / pathB  */
+
+	u32 backup_rf_0x1a;/*92ee*/
+	/* bfsync */
+	u8 framesync;
+	u32 framesync_c34;
+
+	u8 num_total_rfpath;
+	u16 rf_pathmap;
+
+	u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/
+
+	enum rt_polarity_ctl polarity_ctl;
+};
+
+#define RTL_AGG_STOP 						0
+#define RTL_AGG_PROGRESS					1
+#define RTL_AGG_START 						2
+#define RTL_AGG_OPERATIONAL					3
+#define RTL_RX_AGG_START 					1
+#define RTL_RX_AGG_STOP 					0
+
+struct rtl_priv;
+struct rtl_io {
+	struct device *dev;
+
+	/*PCI MEM map */
+	unsigned long pci_mem_end;	/*shared mem end        */
+	unsigned long pci_mem_start;	/*shared mem start */
+
+	/*PCI IO map */
+	unsigned long pci_base_addr;	/*device I/O address */
+
+	void (*write8_async) (struct rtl_priv * rtlpriv, u32 addr, u8 val);
+	void (*write16_async) (struct rtl_priv * rtlpriv, u32 addr, u16 val);
+	void (*write32_async) (struct rtl_priv * rtlpriv, u32 addr, u32 val);
+
+	u8(*read8_sync) (struct rtl_priv * rtlpriv, u32 addr);
+	u16(*read16_sync) (struct rtl_priv * rtlpriv, u32 addr);
+	u32(*read32_sync) (struct rtl_priv * rtlpriv, u32 addr);
+
+};
+
+struct rtl_mac {
+	u8 mac_addr[ETH_ALEN];
+	u8 mac80211_registered;
+	u8 beacon_enabled;
+
+	u32 tx_ss_num;
+	u32 rx_ss_num;
+
+	struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+	struct ieee80211_hw *hw;
+	struct ieee80211_vif *vif;
+	enum nl80211_iftype opmode;
+
+	/*Probe Beacon management */
+	enum rtl_link_state link_state;
+
+	int n_channels;
+	int n_bitrates;
+
+	bool offchan_deley;
+	u8 p2p;	/*using p2p role*/
+	bool p2p_in_use;
+
+	/*filters */
+	u32 rx_conf;
+
+	bool act_scanning;
+	u8 cnt_after_linked;
+	bool skip_scan;
+
+	/* early mode */
+	/* skb wait queue */
+	struct sk_buff_head skb_waitq[MAX_TID_COUNT];
+
+	/*RDG*/
+	bool rdg_en;
+
+	/*AP*/
+	u8 bssid[6];
+	u32 vendor;
+	u32 basic_rates; /* b/g rates */
+	u8 ht_enable;
+	u8 bw_40;
+	u8 mode;		/* wireless mode */
+	u8 slot_time;
+	u8 short_preamble;
+	u8 use_cts_protect;
+	u8 cur_40_prime_sc;
+	u8 cur_40_prime_sc_bk;
+	u8 cur_80_prime_sc;
+	u64 tsf;
+	u8 retry_short;
+	u8 retry_long;
+	u16 assoc_id;
+	bool bhiddenssid;
+
+	/*IBSS*/
+	int beacon_interval;
+
+	/*AMPDU*/
+	u8 min_space_cfg;	/*For Min spacing configurations */
+	u8 max_mss_density;
+	u8 current_ampdu_factor;
+	u8 current_ampdu_density;
+
+	/*QOS & EDCA */
+	struct ieee80211_tx_queue_params edca_param[RTL_MAC80211_NUM_QUEUE];
+	struct rtl_qos_parameters ac[AC_MAX];
+};
+
+struct rtl_hal {
+	struct ieee80211_hw *hw;
+
+	bool driver_is_goingto_unload;
+	bool up_first_time;
+	bool bfirst_init;
+	bool being_init_adapter;
+	bool b_bbrf_ready;
+	bool b_mac_func_enable;
+	bool b_pre_edcca_enable;
+
+	enum intf_type interface;
+	u16 hw_type;		/*92c or 92d or 92s and so on */
+	u8 ic_class;
+	u8 oem_id;
+	u32 version;		/*version of chip */
+	u8 state;		/*stop 0, start 1 */
+	u8 boad_type;
+
+	/*firmware */
+	u32 fwsize;
+	u8 *pfirmware;
+	u16 fw_version;
+	u16 fw_subversion;
+	bool b_h2c_setinprogress;
+	u8 last_hmeboxnum;
+	bool bfw_ready;
+
+	/*Reserve page start offset except beacon in TxQ. */
+	u8 fw_rsvdpage_startoffset;
+	u8 h2c_txcmd_seq;
+	u8 current_ra_rate;
+
+	/* FW Cmd IO related */
+	u16 fwcmd_iomap;
+	u32 fwcmd_ioparam;
+	bool set_fwcmd_inprogress;
+	u8 current_fwcmd_io;
+
+	bool bfw_clk_change_in_progress;
+	bool ballow_sw_to_change_hwclc;
+	u8 fw_ps_state;
+	 struct p2p_ps_offload_t p2p_ps_offload;
+	/**/
+	bool driver_going2unload;
+
+	/*AMPDU init min space*/
+	u8 minspace_cfg;	/*For Min spacing configurations */
+
+	/* Dul mac */
+	enum macphy_mode macphymode;
+	enum band_type current_bandtype;	/* 0:2.4G, 1:5G */
+	enum band_type current_bandtypebackup;
+	enum band_type bandset;
+	/* dual MAC 0--Mac0 1--Mac1 */
+	u32 interfaceindex;
+	/* just for DulMac S3S4 */
+	u8 macphyctl_reg;
+	bool b_earlymode_enable;
+	u8 max_earlymode_num;
+	/* Dul mac*/
+	bool during_mac0init_radiob;
+	bool during_mac1init_radioa;
+	bool reloadtxpowerindex;
+	/* True if IMR or IQK  have done
+	for 2.4G in scan progress */
+	bool b_load_imrandiqk_setting_for2g;
+
+	bool disable_amsdu_8k;
+	bool bmaster_of_dmsp;
+	bool bslave_of_dmsp;
+
+	u16 rx_tag;/*for 92ee*/
+	u8 rts_en;
+};
+
+struct rtl_security {
+	/*default 0 */
+	bool use_sw_sec;
+
+	bool being_setkey;
+	bool use_defaultkey;
+	/*Encryption Algorithm for Unicast Packet */
+	enum rt_enc_alg pairwise_enc_algorithm;
+	/*Encryption Algorithm for Brocast/Multicast */
+	enum rt_enc_alg group_enc_algorithm;
+	/*Cam Entry Bitmap */
+	u32 hwsec_cam_bitmap;
+	u8 hwsec_cam_sta_addr[TOTAL_CAM_ENTRY][ETH_ALEN];
+	/*local Key buffer, indx 0 is for
+	   pairwise key 1-4 is for agoup key. */
+	u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN];
+	u8 key_len[KEY_BUF_SIZE];
+
+	/*The pointer of Pairwise Key,
+	   it always points to KeyBuf[4] */
+	u8 *pairwise_key;
+};
+
+struct rtl_dig {
+	u8 dig_enable_flag;
+	u8 dig_ext_port_stage;
+
+	u32 rssi_lowthresh;
+	u32 rssi_highthresh;
+
+	u32 fa_lowthresh;
+	u32 fa_highthresh;
+
+	u8 cursta_connectstate;
+	u8 presta_connectstate;
+	u8 curmultista_connectstate;
+
+	u8 pre_igvalue;
+	u8 cur_igvalue;
+
+	char backoff_val;
+	char backoff_val_range_max;
+	char backoff_val_range_min;
+	u8 rx_gain_range_max;
+	u8 rx_gain_range_min;
+	u8 rssi_val_min;
+	u8 min_undecorated_pwdb_for_dm;
+	long last_min_undecorated_pwdb_for_dm;
+
+	u8 pre_cck_pd_state;
+	u8 cur_cck_pd_state;
+
+	u8 large_fa_hit;
+	u8 forbidden_igi;
+	u32 recover_cnt;
+
+};
+
+struct rtl_pstbl {
+	u8 pre_ccastate;
+	u8 cur_ccasate;
+
+	u8 pre_rfstate;
+	u8 cur_rfstate;
+
+	long rssi_val_min;
+
+};
+
+#define ASSOCIATE_ENTRY_NUM	32+1
+
+struct fast_ant_trainning{
+	u8 bssid[6];
+	u8 antsel_rx_keep_0;
+	u8 antsel_rx_keep_1;
+	u8 antsel_rx_keep_2;
+	u32 ant_sum_rssi[7];
+	u32 ant_rssi_cnt[7];
+	u32 ant_ave_rssi[7];
+	u8 fat_state;
+	u32 train_idx;
+	u8 antsel_a[ASSOCIATE_ENTRY_NUM];
+	u8 antsel_b[ASSOCIATE_ENTRY_NUM];
+	u8 antsel_c[ASSOCIATE_ENTRY_NUM];
+	u32 main_ant_sum[ASSOCIATE_ENTRY_NUM];
+	u32 aux_ant_sum[ASSOCIATE_ENTRY_NUM];
+	u32 main_ant_cnt[ASSOCIATE_ENTRY_NUM];
+	u32 aux_ant_cnt[ASSOCIATE_ENTRY_NUM];
+	u8 rx_idle_ant;
+	bool b_becomelinked;
+};
+
+struct dm_phy_dbg_info {
+	char rx_snrdb[4];
+	u64 num_qry_phy_status;
+	u64 num_qry_phy_status_cck;
+	u64 num_qry_phy_status_ofdm;
+	u16 num_qry_beacon_pkt;
+	u16 num_non_be_pkt;
+	s32 rx_evm[4];
+};
+
+struct rtl_dm {
+	/*PHY status for DM */
+	long entry_min_undecoratedsmoothed_pwdb;
+	long undecorated_smoothed_pwdb;	/*out dm */
+	long entry_max_undecoratedsmoothed_pwdb;
+	bool b_dm_initialgain_enable;
+	bool bdynamic_txpower_enable;
+	bool bcurrent_turbo_edca;
+	bool bis_any_nonbepkts;	/*out dm */
+	bool bis_cur_rdlstate;
+	bool btxpower_trackinginit;
+	bool b_disable_framebursting;
+	bool b_cck_inch14;
+	bool btxpower_tracking;
+	bool b_useramask;
+	bool brfpath_rxenable[4];
+	bool binform_fw_driverctrldm;
+	bool bcurrent_mrc_switch;
+	u8 txpowercount;
+
+	u8 thermalvalue_rxgain;
+	u8 thermalvalue_iqk;
+	u8 thermalvalue_lck;
+	u8 thermalvalue;
+	u8 thermalvalue_avg[AVG_THERMAL_NUM];
+	u8 thermalvalue_avg_index;
+	bool bdone_txpower;
+	u8 last_dtp_lvl;
+	u8 dynamic_txhighpower_lvl;	/*Tx high power level */
+	u8 dm_flag;	/*Indicate if each dynamic mechanism's status. */
+	u8 dm_type;
+	u8 txpower_track_control;
+	bool binterrupt_migration;
+	bool bdisable_tx_int;
+	char ofdm_index[MAX_RF_PATH];
+	u8 default_ofdm_index;
+	u8 default_cck_index;
+	char cck_index;
+	char delta_power_index[MAX_RF_PATH];
+	char delta_power_index_last[MAX_RF_PATH];
+	char power_index_offset[MAX_RF_PATH];
+	char aboslute_ofdm_swing_idx[MAX_RF_PATH];
+	char remnant_ofdm_swing_idx[MAX_RF_PATH];
+	char remnant_cck_idx;
+	bool modify_txagc_flag_path_a;
+	bool modify_txagc_flag_path_b;
+
+	bool b_one_entry_only;
+	struct dm_phy_dbg_info dbginfo;
+	/* Dynamic ATC switch */
+
+	bool atc_status;
+	bool large_cfo_hit;
+	bool is_freeze;
+	int cfo_tail[2];
+	int cfo_ave_pre;
+	int crystal_cap;
+	u8 cfo_threshold;
+	u32 packet_count;
+	u32 packet_count_pre;
+	u8 tx_rate;
+
+
+	/*88e tx power tracking*/
+	u8 bb_swing_idx_ofdm[MAX_RF_PATH];
+	u8 bb_swing_idx_ofdm_current;
+	u8 bb_swing_idx_ofdm_base[MAX_RF_PATH];
+	bool bb_swing_flag_Ofdm;
+	u8 bb_swing_idx_cck;
+	u8 bb_swing_idx_cck_current;
+	u8 bb_swing_idx_cck_base;
+	bool bb_swing_flag_cck;
+
+	char bb_swing_diff_2g;
+	char bb_swing_diff_5g;
+
+	u8 delta_swing_table_idx_24gccka_p[DELTA_SWINGIDX_SIZE];
+    u8 delta_swing_table_idx_24gccka_n[DELTA_SWINGIDX_SIZE];
+    u8 delta_swing_table_idx_24gcckb_p[DELTA_SWINGIDX_SIZE];
+	u8 delta_swing_table_idx_24gcckb_n[DELTA_SWINGIDX_SIZE];
+	u8 delta_swing_table_idx_24ga_p[DELTA_SWINGIDX_SIZE];
+   	u8 delta_swing_table_idx_24ga_n[DELTA_SWINGIDX_SIZE];
+    u8 delta_swing_table_idx_24gb_p[DELTA_SWINGIDX_SIZE];
+   	u8 delta_swing_table_idx_24gb_n[DELTA_SWINGIDX_SIZE];
+    u8 delta_swing_table_idx_5ga_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
+    u8 delta_swing_table_idx_5ga_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
+    u8 delta_swing_table_idx_5gb_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
+    u8 delta_swing_table_idx_5gb_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
+    u8 delta_swing_table_idx_24ga_p_8188e[DELTA_SWINGIDX_SIZE];
+    u8 delta_swing_table_idx_24ga_n_8188e[DELTA_SWINGIDX_SIZE];
+
+
+	/* DMSP */
+	bool supp_phymode_switch;
+
+	/* DulMac */
+	struct rtl_dig dm_digtable;
+	struct rtl_pstbl dm_pstable;
+	struct fast_ant_trainning fat_table;
+
+	u8	resp_tx_path;
+	u8 	path_sel;
+	u32	patha_sum;
+	u32	pathb_sum;
+	u32	patha_cnt;
+	u32	pathb_cnt;
+
+	u8 pre_channel;
+	u8 *p_channel;
+	u8 linked_interval;
+
+	u64 last_tx_ok_cnt;
+	u64 last_rx_ok_cnt;
+};
+
+#define	EFUSE_MAX_LOGICAL_SIZE		256
+
+struct rtl_efuse {
+	bool bautoLoad_ok;
+	bool bootfromefuse;
+	u16 max_physical_size;
+
+	u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
+	u16 efuse_usedbytes;
+	u8 efuse_usedpercentage;
+#ifdef EFUSE_REPG_WORKAROUND
+	bool efuse_re_pg_sec1flag;
+	u8 efuse_re_pg_data[8];
+#endif
+
+	u8 autoload_failflag;
+	u8 autoload_status;
+
+	short epromtype;
+	u16 eeprom_vid;
+	u16 eeprom_did;
+	u16 eeprom_svid;
+	u16 eeprom_smid;
+	u8 eeprom_oemid;
+	u16 eeprom_channelplan;
+	u8 eeprom_version;
+
+	u8 dev_addr[6];
+	u8 board_type;
+	u8 wowlan_enable;
+	u8 antenna_div_cfg;
+	u8 antenna_div_type;
+
+	bool b_txpwr_fromeprom;
+	u8 eeprom_crystalcap;
+	u8 eeprom_tssi[2];
+	u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
+	u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
+	u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
+	u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
+	u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
+	u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX];
+
+
+	u8 internal_pa_5g[2];	/* pathA / pathB */
+	u8 eeprom_c9;
+	u8 eeprom_cc;
+
+	/*For power group */
+	u8 eeprom_pwrgroup[2][3];
+	u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
+	u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
+
+	u8 txpwrlevel_cck[MAX_RF_PATH][CHANNEL_MAX_NUMBER_2G];
+	/*For HT 40MHZ pwr */
+	u8 txpwrlevel_ht40_1s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+	/*For HT 40MHZ pwr */
+	u8 txpwrlevel_ht40_2s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+	char txpwr_cckdiff[MAX_RF_PATH][MAX_TX_COUNT]; /*CCK_24G_Diff*/
+	/*HT 20<->40 Pwr diff */
+	char txpwr_ht20diff[MAX_RF_PATH][MAX_TX_COUNT];	/*BW20_24G_Diff*/
+	char txpwr_ht40diff[MAX_RF_PATH][MAX_TX_COUNT];/*BW40_24G_Diff*/
+	/*For HT<->legacy pwr diff */
+	char txpwr_legacyhtdiff[MAX_RF_PATH][MAX_TX_COUNT];/*OFDM_24G_Diff*/
+
+	u8 txpwr_5g_bw40base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+	u8 txpwr_5g_bw80base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M];
+	char txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT];
+	char txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT];
+	char txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT];
+	char txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT];
+
+	u8 txpwr_safetyflag;		/* Band edge enable flag */
+	u16 eeprom_txpowerdiff;
+	u8 legacy_httxpowerdiff;	/* Legacy to HT rate power diff */
+	u8 antenna_txpwdiff[3];
+
+	u8 eeprom_regulatory;
+	u8 eeprom_thermalmeter;
+	u8 thermalmeter[2];/*ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
+	u16 tssi_13dbm;
+	u8 crystalcap;		/* CrystalCap. */
+	u8 delta_iqk;
+	u8 delta_lck;
+
+	u8 legacy_ht_txpowerdiff;	/*Legacy to HT rate power diff */
+	bool b_apk_thermalmeterignore;
+
+	bool b1x1_recvcombine;
+	bool b1ss_support;
+
+	/*channel plan */
+	u8 channel_plan;
+};
+
+struct rtl_ps_ctl {
+	bool pwrdomain_protect;
+	bool b_in_powersavemode;
+	bool rfchange_inprogress;
+	bool b_swrf_processing;
+	bool b_hwradiooff;
+	/*
+	 * just for PCIE ASPM
+	 * If it supports ASPM, Offset[560h] = 0x40,
+	 * otherwise Offset[560h] = 0x00.
+	 * */
+	bool b_support_aspm;
+	bool b_support_backdoor;
+
+	/*for LPS */
+	enum rt_psmode dot11_psmode;	/*Power save mode configured. */
+	bool b_swctrl_lps;
+	bool b_fwctrl_lps;
+	u8 fwctrl_psmode;
+	/*For Fw control LPS mode */
+	u8 b_reg_fwctrl_lps;
+	/*Record Fw PS mode status. */
+	bool b_fw_current_inpsmode;
+	u8 reg_max_lps_awakeintvl;
+	bool report_linked;
+	bool b_low_power_enable;/*for 32k*/
+
+	/*for IPS */
+	bool b_inactiveps;
+
+	u32 rfoff_reason;
+
+	/*RF OFF Level */
+	u32 cur_ps_level;
+	u32 reg_rfps_level;
+
+	/*just for PCIE ASPM */
+	u8 const_amdpci_aspm;
+
+	enum rf_pwrstate inactive_pwrstate;
+	enum rf_pwrstate rfpwr_state;	/*cur power state */
+
+	/* for SW LPS*/
+	bool sw_ps_enabled;
+	bool state;
+	bool state_inap;
+	bool multi_buffered;
+	u16 nullfunc_seq;
+	unsigned int dtim_counter;
+	unsigned int sleep_ms;
+	unsigned long last_sleep_jiffies;
+	unsigned long last_awake_jiffies;
+	unsigned long last_delaylps_stamp_jiffies;
+	unsigned long last_dtim;
+	unsigned long last_beacon;
+	unsigned long last_action;
+	unsigned long last_slept;
+
+	/*For P2P PS */
+	struct rtl_p2p_ps_info p2p_ps_info;
+	u8 pwr_mode;
+	u8 smart_ps;
+};
+
+struct rtl_stats {
+	u8 psaddr[ETH_ALEN];
+	u32 mac_time[2];
+	s8 rssi;
+	u8 signal;
+	u8 noise;
+	u8 rate;		/* hw desc rate */
+	u8 rawdata;
+	u8 received_channel;
+	u8 control;
+	u8 mask;
+	u8 freq;
+	u16 len;
+	u64 tsf;
+	u32 beacon_time;
+	u8 nic_type;
+	u16 length;
+	u8 signalquality;	/*in 0-100 index. */
+	/*
+	 * Real power in dBm for this packet,
+	 * no beautification and aggregation.
+	 * */
+	s32 recvsignalpower;
+	s8 rxpower;		/*in dBm Translate from PWdB */
+	u8 signalstrength;	/*in 0-100 index. */
+	u16 b_hwerror:1;
+	u16 b_crc:1;
+	u16 b_icv:1;
+	u16 b_shortpreamble:1;
+	u16 antenna:1;
+	u16 decrypted:1;
+	u16 wakeup:1;
+	u32 timestamp_low;
+	u32 timestamp_high;
+	bool b_shift;
+
+	u8 rx_drvinfo_size;
+	u8 rx_bufshift;
+	bool b_isampdu;
+	bool b_isfirst_ampdu;
+	bool rx_is40Mhzpacket;
+	u32 rx_pwdb_all;
+	u8 rx_mimo_signalstrength[4];	/*in 0~100 index */
+	s8 rx_mimo_signalquality[4];
+	u8 rx_mimo_evm_dbm[4];
+	u16 cfo_short[4]; 		/* per-path's Cfo_short */
+	u16 cfo_tail[4];
+
+	u8 rx_pwr[4]; /* per-path's pwdb */
+	u8 rx_snr[4]; /* per-path's SNR */
+	u8 bandwidth;
+	u8 bt_coex_pwr_adjust;
+	bool b_packet_matchbssid;
+	bool b_is_cck;
+	bool b_is_ht;
+	bool b_packet_toself;
+	bool b_packet_beacon;	/*for rssi */
+	char cck_adc_pwdb[4];	/*for rx path selection */
+
+	u8 packet_report_type;
+
+	u32 macid;
+	u8 wake_match;
+	u32 bt_rx_rssi_percentage;
+	u32 macid_valid_entry[2];
+};
+
+struct rt_link_detect {
+	/* count for raoming */
+	u32 bcn_rx_inperiod;
+	u32 roam_times;
+
+	u32 num_tx_in4period[4];
+	u32 num_rx_in4period[4];
+
+	u32 num_tx_inperiod;
+	u32 num_rx_inperiod;
+
+	bool b_busytraffic;
+	bool b_tx_busy_traffic;
+	bool b_rx_busy_traffic;
+	bool b_higher_busytraffic;
+	bool b_higher_busyrxtraffic;
+
+	u32 tidtx_in4period[MAX_TID_COUNT][4];
+	u32 tidtx_inperiod[MAX_TID_COUNT];
+	bool higher_busytxtraffic[MAX_TID_COUNT];
+};
+
+struct rtl_tcb_desc {
+	u8 b_packet_bw:1;
+	u8 b_multicast:1;
+	u8 b_broadcast:1;
+
+	u8 b_rts_stbc:1;
+	u8 b_rts_enable:1;
+	u8 b_cts_enable:1;
+	u8 b_rts_use_shortpreamble:1;
+	u8 b_rts_use_shortgi:1;
+	u8 rts_sc:1;
+	u8 b_rts_bw:1;
+	u8 rts_rate;
+
+	u8 use_shortgi:1;
+	u8 use_shortpreamble:1;
+	u8 use_driver_rate:1;
+	u8 disable_ratefallback:1;
+
+	u8 ratr_index;
+	u8 mac_id;
+	u8 hw_rate;
+
+	u8 b_last_inipkt:1;
+	u8 b_cmd_or_init:1;
+	u8 queue_index;
+
+	/* early mode */
+	u8 empkt_num;
+	/* The max value by HW */
+	u32 empkt_len[10];
+	bool btx_enable_sw_calc_duration;
+	/* used for hal construct pkt,
+	 * we may set desc when tx */
+	u8 self_desc;
+};
+
+struct proxim {
+	bool proxim_on;
+
+	void *proximity_priv;
+	int (*proxim_rx)(struct ieee80211_hw *hw, struct rtl_stats *status,
+			 struct sk_buff *skb);
+	u8  (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
+};
+
+struct rtl_hal_ops {
+	int (*init_sw_vars) (struct ieee80211_hw * hw);
+	void (*deinit_sw_vars) (struct ieee80211_hw * hw);
+	void (*read_eeprom_info) (struct ieee80211_hw * hw);
+	void (*interrupt_recognized) (struct ieee80211_hw * hw,
+				      u32 * p_inta, u32 * p_intb);
+	int (*hw_init) (struct ieee80211_hw * hw);
+	void (*hw_disable) (struct ieee80211_hw * hw);
+	void (*hw_suspend) (struct ieee80211_hw * hw);
+	void (*hw_resume) (struct ieee80211_hw * hw);
+	void (*enable_interrupt) (struct ieee80211_hw * hw);
+	void (*disable_interrupt) (struct ieee80211_hw * hw);
+	int (*set_network_type) (struct ieee80211_hw * hw,
+				 enum nl80211_iftype type);
+	void (*set_chk_bssid)(struct ieee80211_hw *hw,
+			      bool check_bssid);
+	void (*set_bw_mode) (struct ieee80211_hw * hw,
+			     enum nl80211_channel_type ch_type);
+	 u8(*switch_channel) (struct ieee80211_hw * hw);
+	void (*set_qos) (struct ieee80211_hw * hw, int aci);
+	void (*set_bcn_reg) (struct ieee80211_hw * hw);
+	void (*set_bcn_intv) (struct ieee80211_hw * hw);
+	void (*update_interrupt_mask) (struct ieee80211_hw * hw,
+				       u32 add_msr, u32 rm_msr);
+	void (*get_hw_reg) (struct ieee80211_hw * hw, u8 variable, u8 * val);
+	void (*set_hw_reg) (struct ieee80211_hw * hw, u8 variable, u8 * val);
+	void (*update_rate_tbl) (struct ieee80211_hw * hw,
+				 struct ieee80211_sta *sta, u8 rssi_level);
+	void (*pre_fill_tx_bd_desc) (struct ieee80211_hw *hw, u8 *tx_bd_desc,
+				     u8 *desc, u8 queue_index,
+				     struct sk_buff *skb, dma_addr_t addr);
+	u16 (*rx_desc_buff_remained_cnt) (struct ieee80211_hw *hw,
+					  u8 queue_index);
+	void (*rx_check_dma_ok) (struct ieee80211_hw *hw, u8 *header_desc,
+				 u8 queue_index);
+	void (*fill_tx_desc) (struct ieee80211_hw * hw,
+			      struct ieee80211_hdr * hdr,
+			      u8 * pdesc_tx, u8 * pbd_desc,
+			      struct ieee80211_tx_info * info,
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+/*<delete in kernel end>*/
+			      struct ieee80211_sta *sta,
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+			      struct sk_buff * skb, u8 hw_queue,
+			      struct rtl_tcb_desc *ptcb_desc);
+	void (*fill_tx_cmddesc) (struct ieee80211_hw * hw, u8 * pdesc,
+				 bool b_firstseg, bool b_lastseg,
+				 struct sk_buff * skb);
+	 bool(*query_rx_desc) (struct ieee80211_hw * hw,
+			       struct rtl_stats * status,
+			       struct ieee80211_rx_status * rx_status,
+			       u8 * pdesc, struct sk_buff * skb);
+	void (*set_channel_access) (struct ieee80211_hw * hw);
+	 bool(*radio_onoff_checking) (struct ieee80211_hw * hw, u8 * valid);
+	void (*dm_watchdog) (struct ieee80211_hw * hw);
+	void (*scan_operation_backup) (struct ieee80211_hw * hw, u8 operation);
+	 bool(*set_rf_power_state) (struct ieee80211_hw * hw,
+				    enum rf_pwrstate rfpwr_state);
+	void (*led_control) (struct ieee80211_hw * hw,
+			     enum led_ctl_mode ledaction);
+	void (*set_desc) (struct ieee80211_hw *hw, u8 * pdesc, bool istx,
+			  u8 desc_name, u8 * val);
+	 u32(*get_desc) (u8 * pdesc, bool istx, u8 desc_name);
+	bool (*is_tx_desc_closed) (struct ieee80211_hw *hw,
+				   u8 hw_queue, u16 index);
+	void (*tx_polling) (struct ieee80211_hw * hw, u8 hw_queue);
+	void (*enable_hw_sec) (struct ieee80211_hw * hw);
+	void (*set_key) (struct ieee80211_hw * hw, u32 key_index,
+			 u8 * p_macaddr, bool is_group, u8 enc_algo,
+			 bool is_wepkey, bool clear_all);
+	void (*init_sw_leds) (struct ieee80211_hw * hw);
+	 u32(*get_bbreg) (struct ieee80211_hw * hw, u32 regaddr, u32 bitmask);
+	void (*set_bbreg) (struct ieee80211_hw * hw, u32 regaddr, u32 bitmask,
+			   u32 data);
+	 u32(*get_rfreg) (struct ieee80211_hw * hw, enum radio_path rfpath,
+			  u32 regaddr, u32 bitmask);
+	void (*set_rfreg) (struct ieee80211_hw * hw, enum radio_path rfpath,
+			   u32 regaddr, u32 bitmask, u32 data);
+	void (*allow_all_destaddr)(struct ieee80211_hw *hw,
+		bool allow_all_da, bool write_into_reg);
+	void (*linked_set_reg) (struct ieee80211_hw * hw);
+	void (*check_switch_to_dmdp) (struct ieee80211_hw * hw);
+	void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
+	void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw);
+	void (*c2h_command_handle) (struct ieee80211_hw *hw);
+	void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw, bool mstate);
+	void (*bt_turn_off_bt_coexist_before_enter_lps) (struct ieee80211_hw *hw);
+	void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
+			      u32 cmd_len, u8 *p_cmdbuffer);
+	bool (*get_btc_status) (void);
+  	u32 (*rx_command_packet_handler)(struct ieee80211_hw *hw, struct rtl_stats status, struct sk_buff *skb);
+};
+
+struct rtl_intf_ops {
+	/*com */
+	void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+	int (*adapter_start) (struct ieee80211_hw * hw);
+	void (*adapter_stop) (struct ieee80211_hw * hw);
+	bool (*check_buddy_priv)(struct ieee80211_hw *hw,
+			struct rtl_priv **buddy_priv);
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+	int (*adapter_tx) (struct ieee80211_hw * hw, struct sk_buff * skb,
+			   struct rtl_tcb_desc *ptcb_desc);
+#else
+/*<delete in kernel end>*/
+	int (*adapter_tx) (struct ieee80211_hw *hw,
+			   struct ieee80211_sta *sta,
+			   struct sk_buff *skb,
+			   struct rtl_tcb_desc *ptcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+	void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
+#else
+	void (*flush)(struct ieee80211_hw *hw, bool drop);
+#endif
+	int (*reset_trx_ring) (struct ieee80211_hw * hw);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+	bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
+#else
+/*<delete in kernel end>*/
+	bool (*waitq_insert) (struct ieee80211_hw *hw,
+			      struct ieee80211_sta *sta,
+			      struct sk_buff *skb);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+	/*pci */
+	void (*disable_aspm) (struct ieee80211_hw * hw);
+	void (*enable_aspm) (struct ieee80211_hw * hw);
+
+	/*usb */
+};
+
+struct rtl_mod_params {
+	/* default: 0 = using hardware encryption */
+	bool sw_crypto;
+
+	/* default: 1 = using no linked power save */
+	bool b_inactiveps;
+
+	/* default: 1 = using linked sw power save */
+	bool b_swctrl_lps;
+
+	/* default: 1 = using linked fw power save */
+	bool b_fwctrl_lps;
+};
+
+struct rtl_hal_cfg {
+	u8 bar_id;
+	bool write_readback;
+	char *name;
+	char *fw_name;
+	struct rtl_hal_ops *ops;
+	struct rtl_mod_params *mod_params;
+
+	/*this map used for some registers or vars
+	   defined int HAL but used in MAIN */
+	u32 maps[RTL_VAR_MAP_MAX];
+
+};
+
+struct rtl_locks {
+	/* mutex */
+	struct mutex conf_mutex;
+
+	/*spin lock */
+	spinlock_t ips_lock;
+	spinlock_t irq_th_lock;
+	spinlock_t h2c_lock;
+	spinlock_t rf_ps_lock;
+	spinlock_t rf_lock;
+	spinlock_t lps_lock;
+	spinlock_t waitq_lock;
+	spinlock_t entry_list_lock;
+
+	/*FW clock change */
+	spinlock_t fw_ps_lock;
+
+	/*Dul mac*/
+	spinlock_t cck_and_rw_pagea_lock;
+
+	/*Easy concurrent*/
+	spinlock_t check_sendpkt_lock;
+
+	spinlock_t iqk_lock;
+};
+
+struct rtl_works {
+	struct ieee80211_hw *hw;
+
+	/*timer */
+	struct timer_list watchdog_timer;
+	struct timer_list dualmac_easyconcurrent_retrytimer;
+	struct timer_list fw_clockoff_timer;
+	struct timer_list fast_antenna_trainning_timer;
+	/*task */
+	struct tasklet_struct irq_tasklet;
+	struct tasklet_struct irq_prepare_bcn_tasklet;
+
+	/*work queue */
+	struct workqueue_struct *rtl_wq;
+	struct delayed_work watchdog_wq;
+	struct delayed_work ips_nic_off_wq;
+
+	/* For SW LPS */
+	struct delayed_work ps_work;
+	struct delayed_work ps_rfon_wq;
+	struct delayed_work fwevt_wq;
+};
+
+struct rtl_debug {
+	u32 dbgp_type[DBGP_TYPE_MAX];
+	u32 global_debuglevel;
+	u64 global_debugcomponents;
+
+	/* add for proc debug */
+	struct proc_dir_entry *proc_dir;
+	char proc_name[20];
+};
+
+#define MIMO_PS_STATIC			0
+#define MIMO_PS_DYNAMIC			1
+#define MIMO_PS_NOLIMIT			3
+
+struct rtl_dualmac_easy_concurrent_ctl {
+	enum band_type currentbandtype_backfordmdp;
+	bool bclose_bbandrf_for_dmsp;
+	bool bchange_to_dmdp;
+	bool bchange_to_dmsp;
+	bool bswitch_in_process;
+};
+
+struct rtl_dmsp_ctl {
+	bool bactivescan_for_slaveofdmsp;
+	bool bscan_for_anothermac_fordmsp;
+	bool bscan_for_itself_fordmsp;
+	bool bwritedig_for_anothermacofdmsp;
+	u32 curdigvalue_for_anothermacofdmsp;
+	bool bchangecckpdstate_for_anothermacofdmsp;
+	u8 curcckpdstate_for_anothermacofdmsp;
+	bool bchangetxhighpowerlvl_for_anothermacofdmsp;
+	u8 curtxhighlvl_for_anothermacofdmsp;
+	long rssivalmin_for_anothermacofdmsp;
+};
+
+struct rtl_global_var {
+	/* from this list we can get
+	 * other adapter's rtl_priv */
+	struct list_head glb_priv_list;
+	spinlock_t glb_list_lock;
+};
+
+struct rtl_btc_info {
+	u8 bt_type;
+	u8 btcoexist;
+	u8 ant_num;
+};
+
+struct rtl_btc_ops {
+	void (*btc_init_variables) (struct rtl_priv *rtlpriv);
+	void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
+	void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
+	void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
+	void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype);
+	void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action);
+	void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv,
+					enum rt_media_status mstatus);
+	void (*btc_periodical) (struct rtl_priv *rtlpriv);
+	void (*btc_halt_notify) (void);
+	void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv,
+				   u8 * tmp_buf, u8 length);
+	bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv);
+	bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv);
+	bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
+};
+
+struct rtl_bt_coexist {
+	struct rtl_btc_ops *btc_ops;
+	struct rtl_btc_info btc_info;
+};
+
+
+struct rtl_priv {
+	struct list_head list;
+#ifdef VIF_TODO
+	struct vif_priv vif_priv;
+#endif
+	struct rtl_priv *buddy_priv;
+	struct rtl_global_var *glb_var;
+	struct rtl_dualmac_easy_concurrent_ctl easy_concurrent_ctl;
+	struct rtl_dmsp_ctl dmsp_ctl;
+	struct rtl_locks locks;
+	struct rtl_works works;
+	struct rtl_mac mac80211;
+	struct rtl_hal rtlhal;
+	struct rtl_regulatory regd;
+	struct rtl_rfkill rfkill;
+	struct rtl_io io;
+	struct rtl_phy phy;
+	struct rtl_dm dm;
+	struct rtl_security sec;
+	struct rtl_efuse efuse;
+
+	struct rtl_ps_ctl psc;
+	struct rate_adaptive ra;
+	struct dynamic_primary_cca primarycca;
+	struct wireless_stats stats;
+	struct rt_link_detect link_info;
+	struct false_alarm_statistics falsealm_cnt;
+
+	struct rtl_rate_priv *rate_priv;
+
+	struct rtl_debug dbg;
+
+	/* sta entry list for ap adhoc or mesh */
+	struct list_head entry_list;
+
+	/*
+	 *hal_cfg : for diff cards
+	 *intf_ops : for diff interrface usb/pcie
+	 */
+	struct rtl_hal_cfg *cfg;
+	struct rtl_intf_ops *intf_ops;
+
+	/*this var will be set by set_bit,
+	   and was used to indicate status of
+	   interface or hardware */
+	unsigned long status;
+
+	/* intel Proximity, should be alloc mem
+	 * in intel Proximity module and can only
+	 * be used in intel Proximity mode */
+	struct proxim proximity;
+
+	/*for bt coexist use*/
+	struct rtl_bt_coexist btcoexist;
+
+	/* seperate 92ee from other ICs,
+	  * 92ee use new trx flow. */
+	bool use_new_trx_flow;
+	/*This must be the last item so
+	   that it points to the data allocated
+	   beyond  this structure like:
+	   rtl_pci_priv or rtl_usb_priv */
+	u8 priv[0];
+};
+
+#define rtl_priv(hw)		(((struct rtl_priv *)(hw)->priv))
+#define rtl_mac(rtlpriv)	(&((rtlpriv)->mac80211))
+#define rtl_hal(rtlpriv)	(&((rtlpriv)->rtlhal))
+#define rtl_efuse(rtlpriv)	(&((rtlpriv)->efuse))
+#define rtl_psc(rtlpriv)	(&((rtlpriv)->psc))
+#define rtl_sec(rtlpriv) 	(&((rtlpriv)->sec))
+#define rtl_dm(rtlpriv)	(&((rtlpriv)->dm))
+/***************************************
+    Bluetooth Co-existance Related
+****************************************/
+
+enum bt_ant_num {
+        ANT_X2 = 0,
+        ANT_X1 = 1,
+};
+
+enum bt_co_type {
+        BT_2WIRE = 0,
+        BT_ISSC_3WIRE = 1,
+        BT_ACCEL = 2,
+        BT_CSR_BC4 = 3,
+        BT_CSR_BC8 = 4,
+        BT_RTL8756 = 5,
+        BT_RTL8723A = 6,
+        BT_RTL8821A = 7,
+        BT_RTL8723B = 8,
+        BT_RTL8192E = 9,
+        BT_RTL8812A = 11,
+};
+
+enum bt_total_ant_num{
+	ANT_TOTAL_X2 = 0,
+	ANT_TOTAL_X1 = 1
+};
+
+enum bt_cur_state {
+        BT_OFF = 0,
+        BT_ON = 1,
+};
+
+enum bt_service_type {
+        BT_SCO = 0,
+        BT_A2DP = 1,
+        BT_HID = 2,
+        BT_HID_IDLE = 3,
+        BT_SCAN = 4,
+        BT_IDLE = 5,
+        BT_OTHER_ACTION = 6,
+        BT_BUSY = 7,
+        BT_OTHERBUSY = 8,
+        BT_PAN = 9,
+};
+
+enum bt_radio_shared {
+        BT_RADIO_SHARED = 0,
+        BT_RADIO_INDIVIDUAL = 1,
+};
+
+struct bt_coexist_info {
+
+	/* EEPROM BT info. */
+	u8 eeprom_bt_coexist;
+	u8 eeprom_bt_type;
+	u8 eeprom_bt_ant_num;
+	u8 eeprom_bt_ant_isolation;
+	u8 eeprom_bt_radio_shared;
+
+	u8 bt_coexistence;
+	u8 bt_ant_num;
+	u8 bt_coexist_type;
+	u8 bt_state;
+	u8 bt_cur_state;	/* 0:on, 1:off */
+	u8 bt_ant_isolation;	/* 0:good, 1:bad */
+	u8 bt_pape_ctrl;	/* 0:SW, 1:SW/HW dynamic */
+	u8 bt_service;
+	u8 bt_radio_shared_type;
+	u8 bt_rfreg_origin_1e;
+	u8 bt_rfreg_origin_1f;
+	u8 bt_rssi_state;
+	u32 ratio_tx;
+	u32 ratio_pri;
+	u32 bt_edca_ul;
+	u32 bt_edca_dl;
+
+	bool b_init_set;
+	bool b_bt_busy_traffic;
+	bool b_bt_traffic_mode_set;
+	bool b_bt_non_traffic_mode_set;
+
+	bool b_fw_coexist_all_off;
+	bool b_sw_coexist_all_off;
+	bool b_hw_coexist_all_off;
+	u32 current_state;
+	u32 previous_state;
+	u32 current_state_h;
+	u32 previous_state_h;
+
+	u8 bt_pre_rssi_state;
+	u8 bt_pre_rssi_state1;
+
+	u8 b_reg_bt_iso;
+	u8 b_reg_bt_sco;
+	bool b_balance_on;
+	u8 bt_active_zero_cnt;
+	bool b_cur_bt_disabled;
+	bool b_pre_bt_disabled;
+
+	u8 bt_profile_case;
+	u8 bt_profile_action;
+	bool b_bt_busy;
+	bool b_hold_for_bt_operation;
+	u8 lps_counter;
+};
+
+
+/****************************************
+	mem access macro define start
+	Call endian free function when
+	1. Read/write packet content.
+	2. Before write integer to IO.
+	3. After read integer from IO.
+****************************************/
+/* Convert little data endian to host */
+#define EF1BYTE(_val)		\
+	((u8)(_val))
+#define EF2BYTE(_val)		\
+	(le16_to_cpu(_val))
+#define EF4BYTE(_val)		\
+	(le32_to_cpu(_val))
+
+/* Read data from memory */
+#define READEF1BYTE(_ptr)	\
+	EF1BYTE(*((u8 *)(_ptr)))
+#define READEF2BYTE(_ptr)	\
+	EF2BYTE(*((u16 *)(_ptr)))
+#define READEF4BYTE(_ptr)	\
+	EF4BYTE(*((u32 *)(_ptr)))
+
+/* Write data to memory */
+#define WRITEEF1BYTE(_ptr, _val)	\
+	(*((u8 *)(_ptr)))=EF1BYTE(_val)
+#define WRITEEF2BYTE(_ptr, _val)	\
+	(*((u16 *)(_ptr)))=EF2BYTE(_val)
+#define WRITEEF4BYTE(_ptr, _val)	\
+	(*((u32 *)(_ptr)))=EF4BYTE(_val)
+
+/*Example:
+BIT_LEN_MASK_32(0) => 0x00000000
+BIT_LEN_MASK_32(1) => 0x00000001
+BIT_LEN_MASK_32(2) => 0x00000003
+BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
+#define BIT_LEN_MASK_32(__bitlen)	 \
+	(0xFFFFFFFF >> (32 - (__bitlen)))
+#define BIT_LEN_MASK_16(__bitlen)	 \
+	(0xFFFF >> (16 - (__bitlen)))
+#define BIT_LEN_MASK_8(__bitlen) \
+	(0xFF >> (8 - (__bitlen)))
+
+/*Example:
+BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
+BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
+#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
+	(BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
+	(BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
+	(BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
+
+/*Description:
+Return 4-byte value in host byte ordering from
+4-byte pointer in little-endian system.*/
+#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
+	(EF4BYTE(*((u32 *)(__pstart))))
+#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
+	(EF2BYTE(*((u16 *)(__pstart))))
+#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
+	(EF1BYTE(*((u8 *)(__pstart))))
+
+/*Description:
+Translate subfield (continuous bits in little-endian) of 4-byte
+value to host byte ordering.*/
+#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+	( \
+		( LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset) )  & \
+		BIT_LEN_MASK_32(__bitlen) \
+	)
+#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+	( \
+		( LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset) ) & \
+		BIT_LEN_MASK_16(__bitlen) \
+	)
+#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+	( \
+		( LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset) ) & \
+		BIT_LEN_MASK_8(__bitlen) \
+	)
+
+/*Description:
+Mask subfield (continuous bits in little-endian) of 4-byte value
+and return the result in 4-byte value in host byte ordering.*/
+#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+	( \
+		LE_P4BYTE_TO_HOST_4BYTE(__pstart)  & \
+		( ~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) ) \
+	)
+#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+	( \
+		LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
+		( ~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) ) \
+	)
+#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+	( \
+		LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
+		( ~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) ) \
+	)
+
+/*Description:
+Set subfield of little-endian 4-byte value to specified value.	*/
+#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
+	*((u32 *)(__pstart)) = EF4BYTE \
+	( \
+		LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
+		( (((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset) )\
+       );
+#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
+	*((u16 *)(__pstart)) = EF2BYTE \
+	( \
+		LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
+		( (((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset) )\
+       );
+#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
+	*((u8 *)(__pstart)) = EF1BYTE \
+	( \
+		LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
+		( (((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset) ) \
+       );
+
+#define	N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
+	(__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
+
+/****************************************
+	mem access macro define end
+****************************************/
+
+#define byte(x,n) ((x >> (8 * n)) & 0xff)
+
+#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC)
+#define RTL_WATCH_DOG_TIME    	2000
+#define MSECS(t)		msecs_to_jiffies(t)
+#define WLAN_FC_GET_VERS(fc) 	((fc) & IEEE80211_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc) 	((fc) & IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) 	((fc) & IEEE80211_FCTL_STYPE)
+#define WLAN_FC_MORE_DATA(fc) 	((fc) & IEEE80211_FCTL_MOREDATA)
+#define SEQ_TO_SN(seq) 		(((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) 		(((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN 			((IEEE80211_SCTL_SEQ) >> 4)
+
+#define	RT_RF_OFF_LEVL_ASPM		BIT(0)	/*PCI ASPM */
+#define	RT_RF_OFF_LEVL_CLK_REQ		BIT(1)	/*PCI clock request */
+#define	RT_RF_OFF_LEVL_PCI_D3		BIT(2)	/*PCI D3 mode */
+/*NIC halt, re-initialize hw parameters*/
+#define	RT_RF_OFF_LEVL_HALT_NIC		BIT(3)
+#define	RT_RF_OFF_LEVL_FREE_FW		BIT(4)	/*FW free, re-download the FW */
+#define	RT_RF_OFF_LEVL_FW_32K		BIT(5)	/*FW in 32k */
+/*Always enable ASPM and Clock Req in initialization.*/
+#define	RT_RF_PS_LEVEL_ALWAYS_ASPM	BIT(6)
+/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/
+#define	RT_PS_LEVEL_ASPM		BIT(7)
+/*When LPS is on, disable 2R if no packet is received or transmittd.*/
+#define	RT_RF_LPS_DISALBE_2R		BIT(30)
+#define	RT_RF_LPS_LEVEL_ASPM		BIT(31)	/*LPS with ASPM */
+#define	RT_IN_PS_LEVEL(ppsc, _ps_flg)		\
+	((ppsc->cur_ps_level & _ps_flg) ? true : false)
+#define	RT_CLEAR_PS_LEVEL(ppsc, _ps_flg)	\
+	(ppsc->cur_ps_level &= (~(_ps_flg)))
+#define	RT_SET_PS_LEVEL(ppsc, _ps_flg)		\
+	(ppsc->cur_ps_level |= _ps_flg)
+
+#define container_of_dwork_rtl(x,y,z) \
+	container_of(container_of(x, struct delayed_work, work), y, z)
+
+#define FILL_OCTET_STRING(_os,_octet,_len)	\
+		(_os).octet=(u8*)(_octet);		\
+		(_os).length=(_len);
+
+#define CP_MACADDR(des,src)	\
+	((des)[0]=(src)[0],(des)[1]=(src)[1],\
+	(des)[2]=(src)[2],(des)[3]=(src)[3],\
+	(des)[4]=(src)[4],(des)[5]=(src)[5])
+
+static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
+{
+	return rtlpriv->io.read8_sync(rtlpriv, addr);
+}
+
+static inline u16 rtl_read_word(struct rtl_priv *rtlpriv, u32 addr)
+{
+	return rtlpriv->io.read16_sync(rtlpriv, addr);
+}
+
+static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr)
+{
+	return rtlpriv->io.read32_sync(rtlpriv, addr);
+}
+
+static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
+{
+	rtlpriv->io.write8_async(rtlpriv, addr, val8);
+
+	if (rtlpriv->cfg->write_readback)
+		rtlpriv->io.read8_sync(rtlpriv, addr);
+}
+
+static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
+{
+	rtlpriv->io.write16_async(rtlpriv, addr, val16);
+
+	if (rtlpriv->cfg->write_readback)
+		rtlpriv->io.read16_sync(rtlpriv, addr);
+}
+
+static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
+				   u32 addr, u32 val32)
+{
+	rtlpriv->io.write32_async(rtlpriv, addr, val32);
+
+	if (rtlpriv->cfg->write_readback)
+		rtlpriv->io.read32_sync(rtlpriv, addr);
+}
+
+static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
+				u32 regaddr, u32 bitmask)
+{
+	return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_bbreg(hw,
+								    regaddr,
+								    bitmask);
+}
+
+static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
+				 u32 bitmask, u32 data)
+{
+	((struct rtl_priv *)(hw)->priv)->cfg->ops->set_bbreg(hw,
+							     regaddr, bitmask,
+							     data);
+
+}
+
+static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw,
+				enum radio_path rfpath, u32 regaddr,
+				u32 bitmask)
+{
+	return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_rfreg(hw,
+								    rfpath,
+								    regaddr,
+								    bitmask);
+}
+
+static inline void rtl_set_rfreg(struct ieee80211_hw *hw,
+				 enum radio_path rfpath, u32 regaddr,
+				 u32 bitmask, u32 data)
+{
+	((struct rtl_priv *)(hw)->priv)->cfg->ops->set_rfreg(hw,
+							     rfpath, regaddr,
+							     bitmask, data);
+}
+
+static inline bool is_hal_stop(struct rtl_hal *rtlhal)
+{
+	return (_HAL_STATE_STOP == rtlhal->state);
+}
+
+static inline void set_hal_start(struct rtl_hal *rtlhal)
+{
+	rtlhal->state = _HAL_STATE_START;
+}
+
+static inline void set_hal_stop(struct rtl_hal *rtlhal)
+{
+	rtlhal->state = _HAL_STATE_STOP;
+}
+
+static inline u8 get_rf_type(struct rtl_phy *rtlphy)
+{
+	return rtlphy->rf_type;
+}
+
+static inline struct ieee80211_hdr *rtl_get_hdr(struct sk_buff *skb)
+{
+	return (struct ieee80211_hdr *)(skb->data);
+}
+
+static inline u16 rtl_get_fc(struct sk_buff *skb)
+{
+	return le16_to_cpu(rtl_get_hdr(skb)->frame_control);
+}
+
+static inline u16 rtl_get_tid_h(struct ieee80211_hdr *hdr)
+{
+	return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static inline u16 rtl_get_tid(struct sk_buff *skb)
+{
+	return rtl_get_tid_h(rtl_get_hdr(skb));
+}
+
+static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
+						 u8 *mac_addr)
+{
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	return ieee80211_find_sta(mac->vif, mac_addr);
+}
+
+struct ieee80211_hw *rtl_pci_get_hw_pointer(void);
+#endif
diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c
index 3c8d28b..81ff852 100644
--- a/drivers/staging/usbip/userspace/libsrc/names.c
+++ b/drivers/staging/usbip/userspace/libsrc/names.c
@@ -169,15 +169,15 @@
 	struct pool *p;
 
 	p = calloc(1, sizeof(struct pool));
-	if (!p) {
+	if (!p)
+		return NULL;
+
+	p->mem = calloc(1, size);
+	if (!p->mem) {
 		free(p);
 		return NULL;
 	}
 
-	p->mem = calloc(1, size);
-	if (!p->mem)
-		return NULL;
-
 	p->next = pool_head;
 	pool_head = p;
 
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index 9b51586..0141bc3 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -149,7 +149,8 @@
 	case USB_SPEED_WIRELESS:
 		break;
 	default:
-		pr_err("speed %d\n", speed);
+		pr_err("Failed attach request for unsupported USB speed: %s\n",
+			usb_speed_string(speed));
 		return -EINVAL;
 	}
 
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 4a1ddaf..187fc06 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -1061,7 +1061,7 @@
 		goto out;
 	}
 
-	if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN + 1) {
+	if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN) {
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
deleted file mode 100644
index 9d1f2a2..0000000
--- a/drivers/staging/zsmalloc/Kconfig
+++ /dev/null
@@ -1,24 +0,0 @@
-config ZSMALLOC
-	bool "Memory allocator for compressed pages"
-	depends on MMU
-	default n
-	help
-	  zsmalloc is a slab-based memory allocator designed to store
-	  compressed RAM pages.  zsmalloc uses virtual memory mapping
-	  in order to reduce fragmentation.  However, this results in a
-	  non-standard allocator interface where a handle, not a pointer, is
-	  returned by an alloc().  This handle must be mapped in order to
-	  access the allocated space.
-
-config PGTABLE_MAPPING
-	bool "Use page table mapping to access object in zsmalloc"
-	depends on ZSMALLOC
-	help
-	  By default, zsmalloc uses a copy-based object mapping method to
-	  access allocations that span two pages. However, if a particular
-	  architecture (ex, ARM) performs VM mapping faster than copying,
-	  then you should select this. This causes zsmalloc to use page table
-	  mapping rather than copying for object mapping.
-
-	  You can check speed with zsmalloc benchmark[1].
-	  [1] https://github.com/spartacus06/zsmalloc
diff --git a/drivers/staging/zsmalloc/Makefile b/drivers/staging/zsmalloc/Makefile
deleted file mode 100644
index b134848..0000000
--- a/drivers/staging/zsmalloc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-zsmalloc-y 		:= zsmalloc-main.o
-
-obj-$(CONFIG_ZSMALLOC)	+= zsmalloc.o
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 1830368..dc2d84a 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -3,6 +3,7 @@
 	tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
 	depends on SCSI && BLOCK
 	select CONFIGFS_FS
+	select CRC_T10DIF
 	default n
 	help
 	Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
@@ -13,6 +14,7 @@
 
 config TCM_IBLOCK
 	tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+	select BLK_DEV_INTEGRITY
 	help
 	Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
 	access to Linux/Block devices using BIO
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0086719..7f1a7ce 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -52,7 +52,7 @@
 static LIST_HEAD(g_tiqn_list);
 static LIST_HEAD(g_np_list);
 static DEFINE_SPINLOCK(tiqn_lock);
-static DEFINE_SPINLOCK(np_lock);
+static DEFINE_MUTEX(np_lock);
 
 static struct idr tiqn_idr;
 struct idr sess_idr;
@@ -307,6 +307,9 @@
 	return false;
 }
 
+/*
+ * Called with mutex np_lock held
+ */
 static struct iscsi_np *iscsit_get_np(
 	struct __kernel_sockaddr_storage *sockaddr,
 	int network_transport)
@@ -314,11 +317,10 @@
 	struct iscsi_np *np;
 	bool match;
 
-	spin_lock_bh(&np_lock);
 	list_for_each_entry(np, &g_np_list, np_list) {
-		spin_lock(&np->np_thread_lock);
+		spin_lock_bh(&np->np_thread_lock);
 		if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
-			spin_unlock(&np->np_thread_lock);
+			spin_unlock_bh(&np->np_thread_lock);
 			continue;
 		}
 
@@ -330,13 +332,11 @@
 			 * while iscsi_tpg_add_network_portal() is called.
 			 */
 			np->np_exports++;
-			spin_unlock(&np->np_thread_lock);
-			spin_unlock_bh(&np_lock);
+			spin_unlock_bh(&np->np_thread_lock);
 			return np;
 		}
-		spin_unlock(&np->np_thread_lock);
+		spin_unlock_bh(&np->np_thread_lock);
 	}
-	spin_unlock_bh(&np_lock);
 
 	return NULL;
 }
@@ -350,16 +350,22 @@
 	struct sockaddr_in6 *sock_in6;
 	struct iscsi_np *np;
 	int ret;
+
+	mutex_lock(&np_lock);
+
 	/*
 	 * Locate the existing struct iscsi_np if already active..
 	 */
 	np = iscsit_get_np(sockaddr, network_transport);
-	if (np)
+	if (np) {
+		mutex_unlock(&np_lock);
 		return np;
+	}
 
 	np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
 	if (!np) {
 		pr_err("Unable to allocate memory for struct iscsi_np\n");
+		mutex_unlock(&np_lock);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -382,6 +388,7 @@
 	ret = iscsi_target_setup_login_socket(np, sockaddr);
 	if (ret != 0) {
 		kfree(np);
+		mutex_unlock(&np_lock);
 		return ERR_PTR(ret);
 	}
 
@@ -390,6 +397,7 @@
 		pr_err("Unable to create kthread: iscsi_np\n");
 		ret = PTR_ERR(np->np_thread);
 		kfree(np);
+		mutex_unlock(&np_lock);
 		return ERR_PTR(ret);
 	}
 	/*
@@ -400,10 +408,10 @@
 	 * point because iscsi_np has not been added to g_np_list yet.
 	 */
 	np->np_exports = 1;
+	np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
 
-	spin_lock_bh(&np_lock);
 	list_add_tail(&np->np_list, &g_np_list);
-	spin_unlock_bh(&np_lock);
+	mutex_unlock(&np_lock);
 
 	pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
 		np->np_ip, np->np_port, np->np_transport->name);
@@ -470,9 +478,9 @@
 
 	np->np_transport->iscsit_free_np(np);
 
-	spin_lock_bh(&np_lock);
+	mutex_lock(&np_lock);
 	list_del(&np->np_list);
-	spin_unlock_bh(&np_lock);
+	mutex_unlock(&np_lock);
 
 	pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
 		np->np_ip, np->np_port, np->np_transport->name);
@@ -622,7 +630,7 @@
 {
 	struct iscsi_cmd *cmd;
 
-	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
 	if (!cmd)
 		return -1;
 
@@ -2475,7 +2483,7 @@
 	if (!conn_p)
 		return;
 
-	cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+	cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
 	if (!cmd) {
 		iscsit_dec_conn_usage_count(conn_p);
 		return;
@@ -3951,7 +3959,7 @@
 
 	switch (hdr->opcode & ISCSI_OPCODE_MASK) {
 	case ISCSI_OP_SCSI_CMD:
-		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
 		if (!cmd)
 			goto reject;
 
@@ -3963,28 +3971,28 @@
 	case ISCSI_OP_NOOP_OUT:
 		cmd = NULL;
 		if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
-			cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+			cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
 			if (!cmd)
 				goto reject;
 		}
 		ret = iscsit_handle_nop_out(conn, cmd, buf);
 		break;
 	case ISCSI_OP_SCSI_TMFUNC:
-		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
 		if (!cmd)
 			goto reject;
 
 		ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
 		break;
 	case ISCSI_OP_TEXT:
-		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
 		if (!cmd)
 			goto reject;
 
 		ret = iscsit_handle_text_cmd(conn, cmd, buf);
 		break;
 	case ISCSI_OP_LOGOUT:
-		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
 		if (!cmd)
 			goto reject;
 
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index e048d64..cda4d80 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -507,7 +507,9 @@
 	u32 last_statsn;
 	int found_cmd;
 
-	if (conn->exp_statsn > begrun) {
+	if (!begrun) {
+		begrun = conn->exp_statsn;
+	} else if (conn->exp_statsn > begrun) {
 		pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
 			" 0x%08x but already got ExpStatSN: 0x%08x on CID:"
 			" %hu.\n", begrun, runlength, conn->exp_statsn,
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 83c965c..582ba84 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1192,7 +1192,7 @@
 	 */
 alloc_tags:
 	tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
-	tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
+	tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
 	tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
 
 	ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 0819e68..e655b04 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -152,13 +152,16 @@
  * May be called from software interrupt (timer) context for allocating
  * iSCSI NopINs.
  */
-struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
 {
 	struct iscsi_cmd *cmd;
 	struct se_session *se_sess = conn->sess->se_sess;
 	int size, tag;
 
-	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask);
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
+	if (tag < 0)
+		return NULL;
+
 	size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
 	cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
 	memset(cmd, 0, size);
@@ -926,7 +929,7 @@
 	u8 state;
 	struct iscsi_cmd *cmd;
 
-	cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
+	cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
 	if (!cmd)
 		return -1;
 
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index e4fc34a..561a424 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -9,7 +9,7 @@
 extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
 extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
 extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
-extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
+extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
 extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
 extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
 extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 1b41e67..fadad7c 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -217,7 +217,8 @@
 			scsi_bufflen(sc), tcm_loop_sam_attr(sc),
 			sc->sc_data_direction, 0,
 			scsi_sglist(sc), scsi_sg_count(sc),
-			sgl_bidi, sgl_bidi_count);
+			sgl_bidi, sgl_bidi_count,
+			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
 	if (rc < 0) {
 		set_host_byte(sc, DID_NO_CONNECT);
 		goto out_done;
@@ -462,7 +463,7 @@
 {
 	struct tcm_loop_hba *tl_hba;
 	struct Scsi_Host *sh;
-	int error;
+	int error, host_prot;
 
 	tl_hba = to_tcm_loop_hba(dev);
 
@@ -486,6 +487,13 @@
 	sh->max_channel = 0;
 	sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
 
+	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
+		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
+		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
+
+	scsi_host_set_prot(sh, host_prot);
+	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
+
 	error = scsi_add_host(sh, &tl_hba->dev);
 	if (error) {
 		pr_err("%s: scsi_add_host failed\n", __func__);
@@ -1228,7 +1236,7 @@
 
 /* Start items for tcm_loop_naa_cit */
 
-struct se_portal_group *tcm_loop_make_naa_tpg(
+static struct se_portal_group *tcm_loop_make_naa_tpg(
 	struct se_wwn *wwn,
 	struct config_group *group,
 	const char *name)
@@ -1273,7 +1281,7 @@
 	return &tl_tpg->tl_se_tpg;
 }
 
-void tcm_loop_drop_naa_tpg(
+static void tcm_loop_drop_naa_tpg(
 	struct se_portal_group *se_tpg)
 {
 	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
@@ -1305,7 +1313,7 @@
 
 /* Start items for tcm_loop_cit */
 
-struct se_wwn *tcm_loop_make_scsi_hba(
+static struct se_wwn *tcm_loop_make_scsi_hba(
 	struct target_fabric_configfs *tf,
 	struct config_group *group,
 	const char *name)
@@ -1375,7 +1383,7 @@
 	return ERR_PTR(ret);
 }
 
-void tcm_loop_drop_scsi_hba(
+static void tcm_loop_drop_scsi_hba(
 	struct se_wwn *wwn)
 {
 	struct tcm_loop_hba *tl_hba = container_of(wwn,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fdcee32..c3d9df6 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -41,11 +41,14 @@
 #include "target_core_alua.h"
 #include "target_core_ua.h"
 
-static sense_reason_t core_alua_check_transition(int state, int *primary);
+static sense_reason_t core_alua_check_transition(int state, int valid,
+						 int *primary);
 static int core_alua_set_tg_pt_secondary_state(
 		struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
 		struct se_port *port, int explicit, int offline);
 
+static char *core_alua_dump_state(int state);
+
 static u16 alua_lu_gps_counter;
 static u32 alua_lu_gps_count;
 
@@ -55,6 +58,86 @@
 struct t10_alua_lu_gp *default_lu_gp;
 
 /*
+ * REPORT REFERRALS
+ *
+ * See sbc3r35 section 5.23
+ */
+sense_reason_t
+target_emulate_report_referrals(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct t10_alua_lba_map *map;
+	struct t10_alua_lba_map_member *map_mem;
+	unsigned char *buf;
+	u32 rd_len = 0, off;
+
+	if (cmd->data_length < 4) {
+		pr_warn("REPORT REFERRALS allocation length %u too"
+			" small\n", cmd->data_length);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	off = 4;
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	if (list_empty(&dev->t10_alua.lba_map_list)) {
+		spin_unlock(&dev->t10_alua.lba_map_lock);
+		transport_kunmap_data_sg(cmd);
+
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+	}
+
+	list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+			    lba_map_list) {
+		int desc_num = off + 3;
+		int pg_num;
+
+		off += 4;
+		if (cmd->data_length > off)
+			put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
+		off += 8;
+		if (cmd->data_length > off)
+			put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
+		off += 8;
+		rd_len += 20;
+		pg_num = 0;
+		list_for_each_entry(map_mem, &map->lba_map_mem_list,
+				    lba_map_mem_list) {
+			int alua_state = map_mem->lba_map_mem_alua_state;
+			int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
+
+			if (cmd->data_length > off)
+				buf[off] = alua_state & 0x0f;
+			off += 2;
+			if (cmd->data_length > off)
+				buf[off] = (alua_pg_id >> 8) & 0xff;
+			off++;
+			if (cmd->data_length > off)
+				buf[off] = (alua_pg_id & 0xff);
+			off++;
+			rd_len += 4;
+			pg_num++;
+		}
+		if (cmd->data_length > desc_num)
+			buf[desc_num] = pg_num;
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+
+	/*
+	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+	 */
+	put_unaligned_be16(rd_len, &buf[2]);
+
+	transport_kunmap_data_sg(cmd);
+
+	target_complete_cmd(cmd, GOOD);
+	return 0;
+}
+
+/*
  * REPORT_TARGET_PORT_GROUPS
  *
  * See spc4r17 section 6.27
@@ -210,7 +293,7 @@
 	unsigned char *ptr;
 	sense_reason_t rc = TCM_NO_SENSE;
 	u32 len = 4; /* Skip over RESERVED area in header */
-	int alua_access_state, primary = 0;
+	int alua_access_state, primary = 0, valid_states;
 	u16 tg_pt_id, rtpi;
 
 	if (!l_port)
@@ -252,6 +335,7 @@
 		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 		goto out;
 	}
+	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
 
 	ptr = &buf[4]; /* Skip over RESERVED area in header */
 
@@ -263,7 +347,8 @@
 		 * the state is a primary or secondary target port asymmetric
 		 * access state.
 		 */
-		rc = core_alua_check_transition(alua_access_state, &primary);
+		rc = core_alua_check_transition(alua_access_state,
+						valid_states, &primary);
 		if (rc) {
 			/*
 			 * If the SET TARGET PORT GROUPS attempts to establish
@@ -386,6 +471,81 @@
 	return 0;
 }
 
+static inline int core_alua_state_lba_dependent(
+	struct se_cmd *cmd,
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	u8 *alua_ascq)
+{
+	struct se_device *dev = cmd->se_dev;
+	u64 segment_size, segment_mult, sectors, lba;
+
+	/* Only need to check for cdb actually containing LBAs */
+	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
+		return 0;
+
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	segment_size = dev->t10_alua.lba_map_segment_size;
+	segment_mult = dev->t10_alua.lba_map_segment_multiplier;
+	sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+	lba = cmd->t_task_lba;
+	while (lba < cmd->t_task_lba + sectors) {
+		struct t10_alua_lba_map *cur_map = NULL, *map;
+		struct t10_alua_lba_map_member *map_mem;
+
+		list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+				    lba_map_list) {
+			u64 start_lba, last_lba;
+			u64 first_lba = map->lba_map_first_lba;
+
+			if (segment_mult) {
+				u64 tmp = lba;
+				start_lba = do_div(tmp, segment_size * segment_mult);
+
+				last_lba = first_lba + segment_size - 1;
+				if (start_lba >= first_lba &&
+				    start_lba <= last_lba) {
+					lba += segment_size;
+					cur_map = map;
+					break;
+				}
+			} else {
+				last_lba = map->lba_map_last_lba;
+				if (lba >= first_lba && lba <= last_lba) {
+					lba = last_lba + 1;
+					cur_map = map;
+					break;
+				}
+			}
+		}
+		if (!cur_map) {
+			spin_unlock(&dev->t10_alua.lba_map_lock);
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			return 1;
+		}
+		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
+				    lba_map_mem_list) {
+			if (map_mem->lba_map_mem_alua_pg_id !=
+			    tg_pt_gp->tg_pt_gp_id)
+				continue;
+			switch(map_mem->lba_map_mem_alua_state) {
+			case ALUA_ACCESS_STATE_STANDBY:
+				spin_unlock(&dev->t10_alua.lba_map_lock);
+				*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+				return 1;
+			case ALUA_ACCESS_STATE_UNAVAILABLE:
+				spin_unlock(&dev->t10_alua.lba_map_lock);
+				*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+				return 1;
+			default:
+				break;
+			}
+		}
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+	return 0;
+}
+
 static inline int core_alua_state_standby(
 	struct se_cmd *cmd,
 	unsigned char *cdb,
@@ -583,6 +743,9 @@
 	case ALUA_ACCESS_STATE_TRANSITION:
 		ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
 		break;
+	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+		ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
+		break;
 	/*
 	 * OFFLINE is a secondary ALUA target port group access state, that is
 	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
@@ -618,17 +781,36 @@
  * Check implicit and explicit ALUA state change request.
  */
 static sense_reason_t
-core_alua_check_transition(int state, int *primary)
+core_alua_check_transition(int state, int valid, int *primary)
 {
+	/*
+	 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+	 * defined as primary target port asymmetric access states.
+	 */
 	switch (state) {
 	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+		if (!(valid & ALUA_AO_SUP))
+			goto not_supported;
+		*primary = 1;
+		break;
 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		if (!(valid & ALUA_AN_SUP))
+			goto not_supported;
+		*primary = 1;
+		break;
 	case ALUA_ACCESS_STATE_STANDBY:
+		if (!(valid & ALUA_S_SUP))
+			goto not_supported;
+		*primary = 1;
+		break;
 	case ALUA_ACCESS_STATE_UNAVAILABLE:
-		/*
-		 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
-		 * defined as primary target port asymmetric access states.
-		 */
+		if (!(valid & ALUA_U_SUP))
+			goto not_supported;
+		*primary = 1;
+		break;
+	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+		if (!(valid & ALUA_LBD_SUP))
+			goto not_supported;
 		*primary = 1;
 		break;
 	case ALUA_ACCESS_STATE_OFFLINE:
@@ -636,14 +818,27 @@
 		 * OFFLINE state is defined as a secondary target port
 		 * asymmetric access state.
 		 */
+		if (!(valid & ALUA_O_SUP))
+			goto not_supported;
 		*primary = 0;
 		break;
+	case ALUA_ACCESS_STATE_TRANSITION:
+		/*
+		 * Transitioning is set internally, and
+		 * cannot be selected manually.
+		 */
+		goto not_supported;
 	default:
 		pr_err("Unknown ALUA access state: 0x%02x\n", state);
 		return TCM_INVALID_PARAMETER_LIST;
 	}
 
 	return 0;
+
+not_supported:
+	pr_err("ALUA access state %s not supported",
+	       core_alua_dump_state(state));
+	return TCM_INVALID_PARAMETER_LIST;
 }
 
 static char *core_alua_dump_state(int state)
@@ -653,12 +848,16 @@
 		return "Active/Optimized";
 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 		return "Active/NonOptimized";
+	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+		return "LBA Dependent";
 	case ALUA_ACCESS_STATE_STANDBY:
 		return "Standby";
 	case ALUA_ACCESS_STATE_UNAVAILABLE:
 		return "Unavailable";
 	case ALUA_ACCESS_STATE_OFFLINE:
 		return "Offline";
+	case ALUA_ACCESS_STATE_TRANSITION:
+		return "Transitioning";
 	default:
 		return "Unknown";
 	}
@@ -735,58 +934,49 @@
  * Called with tg_pt_gp->tg_pt_gp_md_mutex held
  */
 static int core_alua_update_tpg_primary_metadata(
-	struct t10_alua_tg_pt_gp *tg_pt_gp,
-	int primary_state,
-	unsigned char *md_buf)
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
 {
+	unsigned char *md_buf;
 	struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
 	char path[ALUA_METADATA_PATH_LEN];
-	int len;
+	int len, rc;
+
+	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+	if (!md_buf) {
+		pr_err("Unable to allocate buf for ALUA metadata\n");
+		return -ENOMEM;
+	}
 
 	memset(path, 0, ALUA_METADATA_PATH_LEN);
 
-	len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+	len = snprintf(md_buf, ALUA_MD_BUF_LEN,
 			"tg_pt_gp_id=%hu\n"
 			"alua_access_state=0x%02x\n"
 			"alua_access_status=0x%02x\n",
-			tg_pt_gp->tg_pt_gp_id, primary_state,
+			tg_pt_gp->tg_pt_gp_id,
+			tg_pt_gp->tg_pt_gp_alua_pending_state,
 			tg_pt_gp->tg_pt_gp_alua_access_status);
 
 	snprintf(path, ALUA_METADATA_PATH_LEN,
 		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
 		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
 
-	return core_alua_write_tpg_metadata(path, md_buf, len);
+	rc = core_alua_write_tpg_metadata(path, md_buf, len);
+	kfree(md_buf);
+	return rc;
 }
 
-static int core_alua_do_transition_tg_pt(
-	struct t10_alua_tg_pt_gp *tg_pt_gp,
-	struct se_port *l_port,
-	struct se_node_acl *nacl,
-	unsigned char *md_buf,
-	int new_state,
-	int explicit)
+static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
 {
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
+		struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
 	struct se_dev_entry *se_deve;
 	struct se_lun_acl *lacl;
 	struct se_port *port;
 	struct t10_alua_tg_pt_gp_member *mem;
-	int old_state = 0;
-	/*
-	 * Save the old primary ALUA access state, and set the current state
-	 * to ALUA_ACCESS_STATE_TRANSITION.
-	 */
-	old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
-	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
-			ALUA_ACCESS_STATE_TRANSITION);
-	tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
-				ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
-				ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
-	/*
-	 * Check for the optional ALUA primary state transition delay
-	 */
-	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
-		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+	bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
+			 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
 
 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 	list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
@@ -821,9 +1011,12 @@
 			if (!lacl)
 				continue;
 
-			if (explicit &&
-			   (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
-			   (l_port != NULL) && (l_port == port))
+			if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
+			     ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+			   (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
+			    (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
+			   (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
+			    (tg_pt_gp->tg_pt_gp_alua_port == port))
 				continue;
 
 			core_scsi3_ua_allocate(lacl->se_lun_nacl,
@@ -851,20 +1044,102 @@
 	 */
 	if (tg_pt_gp->tg_pt_gp_write_metadata) {
 		mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
-		core_alua_update_tpg_primary_metadata(tg_pt_gp,
-					new_state, md_buf);
+		core_alua_update_tpg_primary_metadata(tg_pt_gp);
 		mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
 	}
 	/*
 	 * Set the current primary ALUA access state to the requested new state
 	 */
-	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+		   tg_pt_gp->tg_pt_gp_alua_pending_state);
 
 	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
 		" from primary access state %s to %s\n", (explicit) ? "explicit" :
 		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
-		tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
-		core_alua_dump_state(new_state));
+		tg_pt_gp->tg_pt_gp_id,
+		core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
+		core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+	smp_mb__after_atomic_dec();
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+	if (tg_pt_gp->tg_pt_gp_transition_complete)
+		complete(tg_pt_gp->tg_pt_gp_transition_complete);
+}
+
+static int core_alua_do_transition_tg_pt(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	int new_state,
+	int explicit)
+{
+	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+	DECLARE_COMPLETION_ONSTACK(wait);
+
+	/* Nothing to be done here */
+	if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
+		return 0;
+
+	if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+		return -EAGAIN;
+
+	/*
+	 * Flush any pending transitions
+	 */
+	if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
+	    atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
+	    ALUA_ACCESS_STATE_TRANSITION) {
+		/* Just in case */
+		tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+		tg_pt_gp->tg_pt_gp_transition_complete = &wait;
+		flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+		wait_for_completion(&wait);
+		tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+		return 0;
+	}
+
+	/*
+	 * Save the old primary ALUA access state, and set the current state
+	 * to ALUA_ACCESS_STATE_TRANSITION.
+	 */
+	tg_pt_gp->tg_pt_gp_alua_previous_state =
+		atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+	tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+			ALUA_ACCESS_STATE_TRANSITION);
+	tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
+				ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+				ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
+
+	/*
+	 * Check for the optional ALUA primary state transition delay
+	 */
+	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+	/*
+	 * Take a reference for workqueue item
+	 */
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+	smp_mb__after_atomic_inc();
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+	if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
+		unsigned long transition_tmo;
+
+		transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
+		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+				   &tg_pt_gp->tg_pt_gp_transition_work,
+				   transition_tmo);
+	} else {
+		tg_pt_gp->tg_pt_gp_transition_complete = &wait;
+		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+				   &tg_pt_gp->tg_pt_gp_transition_work, 0);
+		wait_for_completion(&wait);
+		tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+	}
 
 	return 0;
 }
@@ -878,23 +1153,15 @@
 	int explicit)
 {
 	struct se_device *dev;
-	struct se_port *port;
-	struct se_node_acl *nacl;
 	struct t10_alua_lu_gp *lu_gp;
 	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
-	unsigned char *md_buf;
-	int primary;
+	int primary, valid_states, rc = 0;
 
-	if (core_alua_check_transition(new_state, &primary) != 0)
+	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
+	if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
 		return -EINVAL;
 
-	md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
-	if (!md_buf) {
-		pr_err("Unable to allocate buf for ALUA metadata\n");
-		return -ENOMEM;
-	}
-
 	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
 	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
 	lu_gp = local_lu_gp_mem->lu_gp;
@@ -911,12 +1178,13 @@
 		 * core_alua_do_transition_tg_pt() will always return
 		 * success.
 		 */
-		core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
-					md_buf, new_state, explicit);
+		l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
+		l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
+		rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
+						   new_state, explicit);
 		atomic_dec(&lu_gp->lu_gp_ref_cnt);
 		smp_mb__after_atomic_dec();
-		kfree(md_buf);
-		return 0;
+		return rc;
 	}
 	/*
 	 * For all other LU groups aside from 'default_lu_gp', walk all of
@@ -951,11 +1219,11 @@
 				continue;
 
 			if (l_tg_pt_gp == tg_pt_gp) {
-				port = l_port;
-				nacl = l_nacl;
+				tg_pt_gp->tg_pt_gp_alua_port = l_port;
+				tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
 			} else {
-				port = NULL;
-				nacl = NULL;
+				tg_pt_gp->tg_pt_gp_alua_port = NULL;
+				tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
 			}
 			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
 			smp_mb__after_atomic_inc();
@@ -964,12 +1232,14 @@
 			 * core_alua_do_transition_tg_pt() will always return
 			 * success.
 			 */
-			core_alua_do_transition_tg_pt(tg_pt_gp, port,
-					nacl, md_buf, new_state, explicit);
+			rc = core_alua_do_transition_tg_pt(tg_pt_gp,
+					new_state, explicit);
 
 			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 			atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
 			smp_mb__after_atomic_dec();
+			if (rc)
+				break;
 		}
 		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
@@ -979,16 +1249,18 @@
 	}
 	spin_unlock(&lu_gp->lu_gp_lock);
 
-	pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
-		" Group IDs: %hu %s transition to primary state: %s\n",
-		config_item_name(&lu_gp->lu_gp_group.cg_item),
-		l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit",
-		core_alua_dump_state(new_state));
+	if (!rc) {
+		pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
+			 " Group IDs: %hu %s transition to primary state: %s\n",
+			 config_item_name(&lu_gp->lu_gp_group.cg_item),
+			 l_tg_pt_gp->tg_pt_gp_id,
+			 (explicit) ? "explicit" : "implicit",
+			 core_alua_dump_state(new_state));
+	}
 
 	atomic_dec(&lu_gp->lu_gp_ref_cnt);
 	smp_mb__after_atomic_dec();
-	kfree(md_buf);
-	return 0;
+	return rc;
 }
 
 /*
@@ -996,13 +1268,18 @@
  */
 static int core_alua_update_tpg_secondary_metadata(
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
-	struct se_port *port,
-	unsigned char *md_buf,
-	u32 md_buf_len)
+	struct se_port *port)
 {
+	unsigned char *md_buf;
 	struct se_portal_group *se_tpg = port->sep_tpg;
 	char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
-	int len;
+	int len, rc;
+
+	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+	if (!md_buf) {
+		pr_err("Unable to allocate buf for ALUA metadata\n");
+		return -ENOMEM;
+	}
 
 	memset(path, 0, ALUA_METADATA_PATH_LEN);
 	memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
@@ -1014,7 +1291,7 @@
 		snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
 				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
 
-	len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
+	len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
 			"alua_tg_pt_status=0x%02x\n",
 			atomic_read(&port->sep_tg_pt_secondary_offline),
 			port->sep_tg_pt_secondary_stat);
@@ -1023,7 +1300,10 @@
 			se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
 			port->sep_lun->unpacked_lun);
 
-	return core_alua_write_tpg_metadata(path, md_buf, len);
+	rc = core_alua_write_tpg_metadata(path, md_buf, len);
+	kfree(md_buf);
+
+	return rc;
 }
 
 static int core_alua_set_tg_pt_secondary_state(
@@ -1033,8 +1313,6 @@
 	int offline)
 {
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
-	unsigned char *md_buf;
-	u32 md_buf_len;
 	int trans_delay_msecs;
 
 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1055,7 +1333,6 @@
 	else
 		atomic_set(&port->sep_tg_pt_secondary_offline, 0);
 
-	md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
 	port->sep_tg_pt_secondary_stat = (explicit) ?
 			ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
 			ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
@@ -1077,23 +1354,115 @@
 	 * secondary state and status
 	 */
 	if (port->sep_tg_pt_secondary_write_md) {
-		md_buf = kzalloc(md_buf_len, GFP_KERNEL);
-		if (!md_buf) {
-			pr_err("Unable to allocate md_buf for"
-				" secondary ALUA access metadata\n");
-			return -ENOMEM;
-		}
 		mutex_lock(&port->sep_tg_pt_md_mutex);
-		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
-				md_buf, md_buf_len);
+		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
 		mutex_unlock(&port->sep_tg_pt_md_mutex);
-
-		kfree(md_buf);
 	}
 
 	return 0;
 }
 
+struct t10_alua_lba_map *
+core_alua_allocate_lba_map(struct list_head *list,
+			   u64 first_lba, u64 last_lba)
+{
+	struct t10_alua_lba_map *lba_map;
+
+	lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
+	if (!lba_map) {
+		pr_err("Unable to allocate struct t10_alua_lba_map\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
+	lba_map->lba_map_first_lba = first_lba;
+	lba_map->lba_map_last_lba = last_lba;
+
+	list_add_tail(&lba_map->lba_map_list, list);
+	return lba_map;
+}
+
+int
+core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
+			       int pg_id, int state)
+{
+	struct t10_alua_lba_map_member *lba_map_mem;
+
+	list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
+			    lba_map_mem_list) {
+		if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
+			pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
+			return -EINVAL;
+		}
+	}
+
+	lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
+	if (!lba_map_mem) {
+		pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
+		return -ENOMEM;
+	}
+	lba_map_mem->lba_map_mem_alua_state = state;
+	lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
+
+	list_add_tail(&lba_map_mem->lba_map_mem_list,
+		      &lba_map->lba_map_mem_list);
+	return 0;
+}
+
+void
+core_alua_free_lba_map(struct list_head *lba_list)
+{
+	struct t10_alua_lba_map *lba_map, *lba_map_tmp;
+	struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
+
+	list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
+				 lba_map_list) {
+		list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
+					 &lba_map->lba_map_mem_list,
+					 lba_map_mem_list) {
+			list_del(&lba_map_mem->lba_map_mem_list);
+			kmem_cache_free(t10_alua_lba_map_mem_cache,
+					lba_map_mem);
+		}
+		list_del(&lba_map->lba_map_list);
+		kmem_cache_free(t10_alua_lba_map_cache, lba_map);
+	}
+}
+
+void
+core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
+		      int segment_size, int segment_mult)
+{
+	struct list_head old_lba_map_list;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	int activate = 0, supported;
+
+	INIT_LIST_HEAD(&old_lba_map_list);
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	dev->t10_alua.lba_map_segment_size = segment_size;
+	dev->t10_alua.lba_map_segment_multiplier = segment_mult;
+	list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
+	if (lba_map_list) {
+		list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
+		activate = 1;
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
+			    tg_pt_gp_list) {
+
+		if (!tg_pt_gp->tg_pt_gp_valid_id)
+			continue;
+		supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
+		if (activate)
+			supported |= ALUA_LBD_SUP;
+		else
+			supported &= ~ALUA_LBD_SUP;
+		tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
+	}
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+	core_alua_free_lba_map(&old_lba_map_list);
+}
+
 struct t10_alua_lu_gp *
 core_alua_allocate_lu_gp(const char *name, int def_group)
 {
@@ -1346,8 +1715,9 @@
 	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
 	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
 	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
+	INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+			  core_alua_do_transition_tg_pt_work);
 	tg_pt_gp->tg_pt_gp_dev = dev;
-	tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
 		ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
 	/*
@@ -1475,6 +1845,8 @@
 	dev->t10_alua.alua_tg_pt_gps_counter--;
 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
+	flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+
 	/*
 	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
 	 * core_alua_get_tg_pt_gp_by_name() in
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 88e2e83..0a7d65e 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -13,12 +13,13 @@
 /*
  * ASYMMETRIC ACCESS STATE field
  *
- * from spc4r17 section 6.27 Table 245
+ * from spc4r36j section 6.37 Table 307
  */
 #define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED	0x0
 #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED	0x1
 #define ALUA_ACCESS_STATE_STANDBY		0x2
 #define ALUA_ACCESS_STATE_UNAVAILABLE		0x3
+#define ALUA_ACCESS_STATE_LBA_DEPENDENT		0x4
 #define ALUA_ACCESS_STATE_OFFLINE		0xe
 #define ALUA_ACCESS_STATE_TRANSITION		0xf
 
@@ -78,18 +79,30 @@
  */
 #define ALUA_SECONDARY_METADATA_WWN_LEN			256
 
+/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
+#define ALUA_MD_BUF_LEN					1024
+
 extern struct kmem_cache *t10_alua_lu_gp_cache;
 extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
 extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
 extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+extern struct kmem_cache *t10_alua_lba_map_cache;
+extern struct kmem_cache *t10_alua_lba_map_mem_cache;
 
 extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
 extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
 extern int core_alua_check_nonop_delay(struct se_cmd *);
 extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
 				struct se_device *, struct se_port *,
 				struct se_node_acl *, int, int);
 extern char *core_alua_dump_status(int);
+extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
+				struct list_head *, u64, u64);
+extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
+extern void core_alua_free_lba_map(struct list_head *);
+extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
+				int, int);
 extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
 extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
 extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 272755d..f0e85b1 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -643,6 +643,15 @@
 DEF_DEV_ATTRIB(emulate_3pc);
 SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
 
+DEF_DEV_ATTRIB(pi_prot_type);
+SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
+SE_DEV_ATTR_RO(hw_pi_prot_type);
+
+DEF_DEV_ATTRIB(pi_prot_format);
+SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
+
 DEF_DEV_ATTRIB(enforce_pr_isids);
 SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
 
@@ -702,6 +711,9 @@
 	&target_core_dev_attrib_emulate_tpws.attr,
 	&target_core_dev_attrib_emulate_caw.attr,
 	&target_core_dev_attrib_emulate_3pc.attr,
+	&target_core_dev_attrib_pi_prot_type.attr,
+	&target_core_dev_attrib_hw_pi_prot_type.attr,
+	&target_core_dev_attrib_pi_prot_format.attr,
 	&target_core_dev_attrib_enforce_pr_isids.attr,
 	&target_core_dev_attrib_is_nonrot.attr,
 	&target_core_dev_attrib_emulate_rest_reord.attr,
@@ -1741,6 +1753,176 @@
 	.store	= target_core_store_alua_lu_gp,
 };
 
+static ssize_t target_core_show_dev_lba_map(void *p, char *page)
+{
+	struct se_device *dev = p;
+	struct t10_alua_lba_map *map;
+	struct t10_alua_lba_map_member *mem;
+	char *b = page;
+	int bl = 0;
+	char state;
+
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	if (!list_empty(&dev->t10_alua.lba_map_list))
+	    bl += sprintf(b + bl, "%u %u\n",
+			  dev->t10_alua.lba_map_segment_size,
+			  dev->t10_alua.lba_map_segment_multiplier);
+	list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
+		bl += sprintf(b + bl, "%llu %llu",
+			      map->lba_map_first_lba, map->lba_map_last_lba);
+		list_for_each_entry(mem, &map->lba_map_mem_list,
+				    lba_map_mem_list) {
+			switch (mem->lba_map_mem_alua_state) {
+			case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+				state = 'O';
+				break;
+			case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+				state = 'A';
+				break;
+			case ALUA_ACCESS_STATE_STANDBY:
+				state = 'S';
+				break;
+			case ALUA_ACCESS_STATE_UNAVAILABLE:
+				state = 'U';
+				break;
+			default:
+				state = '.';
+				break;
+			}
+			bl += sprintf(b + bl, " %d:%c",
+				      mem->lba_map_mem_alua_pg_id, state);
+		}
+		bl += sprintf(b + bl, "\n");
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+	return bl;
+}
+
+static ssize_t target_core_store_dev_lba_map(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_device *dev = p;
+	struct t10_alua_lba_map *lba_map = NULL;
+	struct list_head lba_list;
+	char *map_entries, *ptr;
+	char state;
+	int pg_num = -1, pg;
+	int ret = 0, num = 0, pg_id, alua_state;
+	unsigned long start_lba = -1, end_lba = -1;
+	unsigned long segment_size = -1, segment_mult = -1;
+
+	map_entries = kstrdup(page, GFP_KERNEL);
+	if (!map_entries)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&lba_list);
+	while ((ptr = strsep(&map_entries, "\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		if (num == 0) {
+			if (sscanf(ptr, "%lu %lu\n",
+				   &segment_size, &segment_mult) != 2) {
+				pr_err("Invalid line %d\n", num);
+				ret = -EINVAL;
+				break;
+			}
+			num++;
+			continue;
+		}
+		if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
+			pr_err("Invalid line %d\n", num);
+			ret = -EINVAL;
+			break;
+		}
+		ptr = strchr(ptr, ' ');
+		if (!ptr) {
+			pr_err("Invalid line %d, missing end lba\n", num);
+			ret = -EINVAL;
+			break;
+		}
+		ptr++;
+		ptr = strchr(ptr, ' ');
+		if (!ptr) {
+			pr_err("Invalid line %d, missing state definitions\n",
+			       num);
+			ret = -EINVAL;
+			break;
+		}
+		ptr++;
+		lba_map = core_alua_allocate_lba_map(&lba_list,
+						     start_lba, end_lba);
+		if (IS_ERR(lba_map)) {
+			ret = PTR_ERR(lba_map);
+			break;
+		}
+		pg = 0;
+		while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
+			switch (state) {
+			case 'O':
+				alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
+				break;
+			case 'A':
+				alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
+				break;
+			case 'S':
+				alua_state = ALUA_ACCESS_STATE_STANDBY;
+				break;
+			case 'U':
+				alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
+				break;
+			default:
+				pr_err("Invalid ALUA state '%c'\n", state);
+				ret = -EINVAL;
+				goto out;
+			}
+
+			ret = core_alua_allocate_lba_map_mem(lba_map,
+							     pg_id, alua_state);
+			if (ret) {
+				pr_err("Invalid target descriptor %d:%c "
+				       "at line %d\n",
+				       pg_id, state, num);
+				break;
+			}
+			pg++;
+			ptr = strchr(ptr, ' ');
+			if (ptr)
+				ptr++;
+			else
+				break;
+		}
+		if (pg_num == -1)
+		    pg_num = pg;
+		else if (pg != pg_num) {
+			pr_err("Only %d from %d port groups definitions "
+			       "at line %d\n", pg, pg_num, num);
+			ret = -EINVAL;
+			break;
+		}
+		num++;
+	}
+out:
+	if (ret) {
+		core_alua_free_lba_map(&lba_list);
+		count = ret;
+	} else
+		core_alua_set_lba_map(dev, &lba_list,
+				      segment_size, segment_mult);
+	kfree(map_entries);
+	return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "lba_map",
+		    .ca_mode = S_IRUGO | S_IWUSR },
+	.show	= target_core_show_dev_lba_map,
+	.store	= target_core_store_dev_lba_map,
+};
+
 static struct configfs_attribute *lio_core_dev_attrs[] = {
 	&target_core_attr_dev_info.attr,
 	&target_core_attr_dev_control.attr,
@@ -1748,6 +1930,7 @@
 	&target_core_attr_dev_udev_path.attr,
 	&target_core_attr_dev_enable.attr,
 	&target_core_attr_dev_alua_lu_gp.attr,
+	&target_core_attr_dev_lba_map.attr,
 	NULL,
 };
 
@@ -2054,6 +2237,13 @@
 			" transition while TPGS_IMPLICIT_ALUA is disabled\n");
 		return -EINVAL;
 	}
+	if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
+	    new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
+		/* LBA DEPENDENT is only allowed with implicit ALUA */
+		pr_err("Unable to process implicit configfs ALUA transition"
+		       " while explicit ALUA management is enabled\n");
+		return -EINVAL;
+	}
 
 	ret = core_alua_do_port_transition(tg_pt_gp, dev,
 					NULL, NULL, new_state, 0);
@@ -2188,7 +2378,7 @@
 			       tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
 SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
 				tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
-SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO);
 
 SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
 			       tg_pt_gp_alua_supported_states, ALUA_U_SUP);
@@ -2937,7 +3127,7 @@
 	 * and ALUA Logical Unit Group and Target Port Group infrastructure.
 	 */
 	target_cg = &subsys->su_group;
-	target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2,
+	target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
 				GFP_KERNEL);
 	if (!target_cg->default_groups) {
 		pr_err("Unable to allocate target_cg->default_groups\n");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d06de84..65001e1 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -918,6 +918,90 @@
 	return 0;
 }
 
+int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
+{
+	int rc, old_prot = dev->dev_attrib.pi_prot_type;
+
+	if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
+		pr_err("Illegal value %d for pi_prot_type\n", flag);
+		return -EINVAL;
+	}
+	if (flag == 2) {
+		pr_err("DIF TYPE2 protection currently not supported\n");
+		return -ENOSYS;
+	}
+	if (dev->dev_attrib.hw_pi_prot_type) {
+		pr_warn("DIF protection enabled on underlying hardware,"
+			" ignoring\n");
+		return 0;
+	}
+	if (!dev->transport->init_prot || !dev->transport->free_prot) {
+		pr_err("DIF protection not supported by backend: %s\n",
+		       dev->transport->name);
+		return -ENOSYS;
+	}
+	if (!(dev->dev_flags & DF_CONFIGURED)) {
+		pr_err("DIF protection requires device to be configured\n");
+		return -ENODEV;
+	}
+	if (dev->export_count) {
+		pr_err("dev[%p]: Unable to change SE Device PROT type while"
+		       " export_count is %d\n", dev, dev->export_count);
+		return -EINVAL;
+	}
+
+	dev->dev_attrib.pi_prot_type = flag;
+
+	if (flag && !old_prot) {
+		rc = dev->transport->init_prot(dev);
+		if (rc) {
+			dev->dev_attrib.pi_prot_type = old_prot;
+			return rc;
+		}
+
+	} else if (!flag && old_prot) {
+		dev->transport->free_prot(dev);
+	}
+	pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
+
+	return 0;
+}
+
+int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
+{
+	int rc;
+
+	if (!flag)
+		return 0;
+
+	if (flag != 1) {
+		pr_err("Illegal value %d for pi_prot_format\n", flag);
+		return -EINVAL;
+	}
+	if (!dev->transport->format_prot) {
+		pr_err("DIF protection format not supported by backend %s\n",
+		       dev->transport->name);
+		return -ENOSYS;
+	}
+	if (!(dev->dev_flags & DF_CONFIGURED)) {
+		pr_err("DIF protection format requires device to be configured\n");
+		return -ENODEV;
+	}
+	if (dev->export_count) {
+		pr_err("dev[%p]: Unable to format SE Device PROT type while"
+		       " export_count is %d\n", dev, dev->export_count);
+		return -EINVAL;
+	}
+
+	rc = dev->transport->format_prot(dev);
+	if (rc)
+		return rc;
+
+	pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
+
+	return 0;
+}
+
 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
 {
 	if ((flag != 0) && (flag != 1)) {
@@ -1117,23 +1201,23 @@
 struct se_lun *core_dev_add_lun(
 	struct se_portal_group *tpg,
 	struct se_device *dev,
-	u32 lun)
+	u32 unpacked_lun)
 {
-	struct se_lun *lun_p;
+	struct se_lun *lun;
 	int rc;
 
-	lun_p = core_tpg_pre_addlun(tpg, lun);
-	if (IS_ERR(lun_p))
-		return lun_p;
+	lun = core_tpg_alloc_lun(tpg, unpacked_lun);
+	if (IS_ERR(lun))
+		return lun;
 
-	rc = core_tpg_post_addlun(tpg, lun_p,
+	rc = core_tpg_add_lun(tpg, lun,
 				TRANSPORT_LUNFLAGS_READ_WRITE, dev);
 	if (rc < 0)
 		return ERR_PTR(rc);
 
 	pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
 		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
-		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 		tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
 	/*
 	 * Update LUN maps for dynamically added initiators when
@@ -1154,7 +1238,7 @@
 		spin_unlock_irq(&tpg->acl_node_lock);
 	}
 
-	return lun_p;
+	return lun;
 }
 
 /*      core_dev_del_lun():
@@ -1420,6 +1504,7 @@
 	dev->dev_link_magic = SE_DEV_LINK_MAGIC;
 	dev->se_hba = hba;
 	dev->transport = hba->transport;
+	dev->prot_length = sizeof(struct se_dif_v1_tuple);
 
 	INIT_LIST_HEAD(&dev->dev_list);
 	INIT_LIST_HEAD(&dev->dev_sep_list);
@@ -1444,6 +1529,8 @@
 	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
 	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
 	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
+	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
+	spin_lock_init(&dev->t10_alua.lba_map_lock);
 
 	dev->t10_wwn.t10_dev = dev;
 	dev->t10_alua.t10_dev = dev;
@@ -1460,6 +1547,7 @@
 	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
 	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
 	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
+	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
 	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
 	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
 	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
@@ -1588,9 +1676,13 @@
 	}
 
 	core_alua_free_lu_gp_mem(dev);
+	core_alua_set_lba_map(dev, NULL, 0, 0);
 	core_scsi3_free_all_registrations(dev);
 	se_release_vpd_for_dev(dev);
 
+	if (dev->transport->free_prot)
+		dev->transport->free_prot(dev);
+
 	dev->transport->free_device(dev);
 }
 
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index dae2ad6..7de9f04 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -906,7 +906,7 @@
 	lun_cg->default_groups[1] = NULL;
 
 	port_stat_grp = &lun->port_stat_grps.stat_group;
-	port_stat_grp->default_groups =  kzalloc(sizeof(struct config_group) * 3,
+	port_stat_grp->default_groups =  kzalloc(sizeof(struct config_group *) * 4,
 				GFP_KERNEL);
 	if (!port_stat_grp->default_groups) {
 		pr_err("Unable to allocate port_stat_grp->default_groups\n");
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 78241a5..cf991a9 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -257,6 +257,72 @@
 	kfree(fd_dev);
 }
 
+static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+			 int is_write)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	struct fd_dev *dev = FD_DEV(se_dev);
+	struct file *prot_fd = dev->fd_prot_file;
+	struct scatterlist *sg;
+	loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
+	unsigned char *buf;
+	u32 prot_size, len, size;
+	int rc, ret = 1, i;
+
+	prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
+		     se_dev->prot_length;
+
+	if (!is_write) {
+		fd_prot->prot_buf = vzalloc(prot_size);
+		if (!fd_prot->prot_buf) {
+			pr_err("Unable to allocate fd_prot->prot_buf\n");
+			return -ENOMEM;
+		}
+		buf = fd_prot->prot_buf;
+
+		fd_prot->prot_sg_nents = cmd->t_prot_nents;
+		fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
+					   fd_prot->prot_sg_nents, GFP_KERNEL);
+		if (!fd_prot->prot_sg) {
+			pr_err("Unable to allocate fd_prot->prot_sg\n");
+			vfree(fd_prot->prot_buf);
+			return -ENOMEM;
+		}
+		size = prot_size;
+
+		for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
+
+			len = min_t(u32, PAGE_SIZE, size);
+			sg_set_buf(sg, buf, len);
+			size -= len;
+			buf += len;
+		}
+	}
+
+	if (is_write) {
+		rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos);
+		if (rc < 0 || prot_size != rc) {
+			pr_err("kernel_write() for fd_do_prot_rw failed:"
+			       " %d\n", rc);
+			ret = -EINVAL;
+		}
+	} else {
+		rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size);
+		if (rc < 0) {
+			pr_err("kernel_read() for fd_do_prot_rw failed:"
+			       " %d\n", rc);
+			ret = -EINVAL;
+		}
+	}
+
+	if (is_write || ret < 0) {
+		kfree(fd_prot->prot_sg);
+		vfree(fd_prot->prot_buf);
+	}
+
+	return ret;
+}
+
 static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
 		u32 sgl_nents, int is_write)
 {
@@ -551,6 +617,8 @@
 	      enum dma_data_direction data_direction)
 {
 	struct se_device *dev = cmd->se_dev;
+	struct fd_prot fd_prot;
+	sense_reason_t rc;
 	int ret = 0;
 
 	/*
@@ -558,8 +626,48 @@
 	 * physical memory addresses to struct iovec virtual memory.
 	 */
 	if (data_direction == DMA_FROM_DEVICE) {
+		memset(&fd_prot, 0, sizeof(struct fd_prot));
+
+		if (cmd->prot_type) {
+			ret = fd_do_prot_rw(cmd, &fd_prot, false);
+			if (ret < 0)
+				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
+
 		ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
+
+		if (ret > 0 && cmd->prot_type) {
+			u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+			rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
+						 0, fd_prot.prot_sg, 0);
+			if (rc) {
+				kfree(fd_prot.prot_sg);
+				vfree(fd_prot.prot_buf);
+				return rc;
+			}
+			kfree(fd_prot.prot_sg);
+			vfree(fd_prot.prot_buf);
+		}
 	} else {
+		memset(&fd_prot, 0, sizeof(struct fd_prot));
+
+		if (cmd->prot_type) {
+			u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+			ret = fd_do_prot_rw(cmd, &fd_prot, false);
+			if (ret < 0)
+				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+			rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors,
+						  0, fd_prot.prot_sg, 0);
+			if (rc) {
+				kfree(fd_prot.prot_sg);
+				vfree(fd_prot.prot_buf);
+				return rc;
+			}
+		}
+
 		ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
 		/*
 		 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
@@ -576,10 +684,19 @@
 
 			vfs_fsync_range(fd_dev->fd_file, start, end, 1);
 		}
+
+		if (ret > 0 && cmd->prot_type) {
+			ret = fd_do_prot_rw(cmd, &fd_prot, true);
+			if (ret < 0)
+				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
 	}
 
-	if (ret < 0)
+	if (ret < 0) {
+		kfree(fd_prot.prot_sg);
+		vfree(fd_prot.prot_buf);
 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
 
 	if (ret)
 		target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -700,6 +817,140 @@
 		       dev->dev_attrib.block_size);
 }
 
+static int fd_init_prot(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = FD_DEV(dev);
+	struct file *prot_file, *file = fd_dev->fd_file;
+	struct inode *inode;
+	int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+	char buf[FD_MAX_DEV_PROT_NAME];
+
+	if (!file) {
+		pr_err("Unable to locate fd_dev->fd_file\n");
+		return -ENODEV;
+	}
+
+	inode = file->f_mapping->host;
+	if (S_ISBLK(inode->i_mode)) {
+		pr_err("FILEIO Protection emulation only supported on"
+		       " !S_ISBLK\n");
+		return -ENOSYS;
+	}
+
+	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
+		flags &= ~O_DSYNC;
+
+	snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
+		 fd_dev->fd_dev_name);
+
+	prot_file = filp_open(buf, flags, 0600);
+	if (IS_ERR(prot_file)) {
+		pr_err("filp_open(%s) failed\n", buf);
+		ret = PTR_ERR(prot_file);
+		return ret;
+	}
+	fd_dev->fd_prot_file = prot_file;
+
+	return 0;
+}
+
+static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
+			       u32 unit_size, u32 *ref_tag, u16 app_tag,
+			       bool inc_reftag)
+{
+	unsigned char *p = buf;
+	int i;
+
+	for (i = 0; i < unit_size; i += dev->prot_length) {
+		*((u16 *)&p[0]) = 0xffff;
+		*((__be16 *)&p[2]) = cpu_to_be16(app_tag);
+		*((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
+
+		if (inc_reftag)
+			(*ref_tag)++;
+
+		p += dev->prot_length;
+	}
+}
+
+static int fd_format_prot(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = FD_DEV(dev);
+	struct file *prot_fd = fd_dev->fd_prot_file;
+	sector_t prot_length, prot;
+	unsigned char *buf;
+	loff_t pos = 0;
+	u32 ref_tag = 0;
+	int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
+	int rc, ret = 0, size, len;
+	bool inc_reftag = false;
+
+	if (!dev->dev_attrib.pi_prot_type) {
+		pr_err("Unable to format_prot while pi_prot_type == 0\n");
+		return -ENODEV;
+	}
+	if (!prot_fd) {
+		pr_err("Unable to locate fd_dev->fd_prot_file\n");
+		return -ENODEV;
+	}
+
+	switch (dev->dev_attrib.pi_prot_type) {
+	case TARGET_DIF_TYPE3_PROT:
+		ref_tag = 0xffffffff;
+		break;
+	case TARGET_DIF_TYPE2_PROT:
+	case TARGET_DIF_TYPE1_PROT:
+		inc_reftag = true;
+		break;
+	default:
+		break;
+	}
+
+	buf = vzalloc(unit_size);
+	if (!buf) {
+		pr_err("Unable to allocate FILEIO prot buf\n");
+		return -ENOMEM;
+	}
+
+	prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
+	size = prot_length;
+
+	pr_debug("Using FILEIO prot_length: %llu\n",
+		 (unsigned long long)prot_length);
+
+	for (prot = 0; prot < prot_length; prot += unit_size) {
+
+		fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
+				   inc_reftag);
+
+		len = min(unit_size, size);
+
+		rc = kernel_write(prot_fd, buf, len, pos);
+		if (rc != len) {
+			pr_err("vfs_write to prot file failed: %d\n", rc);
+			ret = -ENODEV;
+			goto out;
+		}
+		pos += len;
+		size -= len;
+	}
+
+out:
+	vfree(buf);
+	return ret;
+}
+
+static void fd_free_prot(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = FD_DEV(dev);
+
+	if (!fd_dev->fd_prot_file)
+		return;
+
+	filp_close(fd_dev->fd_prot_file, NULL);
+	fd_dev->fd_prot_file = NULL;
+}
+
 static struct sbc_ops fd_sbc_ops = {
 	.execute_rw		= fd_execute_rw,
 	.execute_sync_cache	= fd_execute_sync_cache,
@@ -730,6 +981,9 @@
 	.show_configfs_dev_params = fd_show_configfs_dev_params,
 	.get_device_type	= sbc_get_device_type,
 	.get_blocks		= fd_get_blocks,
+	.init_prot		= fd_init_prot,
+	.format_prot		= fd_format_prot,
+	.free_prot		= fd_free_prot,
 };
 
 static int __init fileio_module_init(void)
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index d7772c1..182cbb2 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -4,6 +4,7 @@
 #define FD_VERSION		"4.0"
 
 #define FD_MAX_DEV_NAME		256
+#define FD_MAX_DEV_PROT_NAME	FD_MAX_DEV_NAME + 16
 #define FD_DEVICE_QUEUE_DEPTH	32
 #define FD_MAX_DEVICE_QUEUE_DEPTH 128
 #define FD_BLOCKSIZE		512
@@ -18,6 +19,13 @@
 #define FBDF_HAS_PATH		0x01
 #define FBDF_HAS_SIZE		0x02
 #define FDBD_HAS_BUFFERED_IO_WCE 0x04
+#define FDBD_FORMAT_UNIT_SIZE	2048
+
+struct fd_prot {
+	unsigned char	*prot_buf;
+	struct scatterlist *prot_sg;
+	u32 prot_sg_nents;
+};
 
 struct fd_dev {
 	struct se_device dev;
@@ -32,6 +40,7 @@
 	u32		fd_block_size;
 	unsigned long long fd_dev_size;
 	struct file	*fd_file;
+	struct file	*fd_prot_file;
 	/* FILEIO HBA device is connected to */
 	struct fd_host *fd_host;
 } ____cacheline_aligned;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c87959f..554d4f7 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -91,6 +91,7 @@
 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 	struct request_queue *q;
 	struct block_device *bd = NULL;
+	struct blk_integrity *bi;
 	fmode_t mode;
 	int ret = -ENOMEM;
 
@@ -155,8 +156,40 @@
 	if (blk_queue_nonrot(q))
 		dev->dev_attrib.is_nonrot = 1;
 
+	bi = bdev_get_integrity(bd);
+	if (bi) {
+		struct bio_set *bs = ib_dev->ibd_bio_set;
+
+		if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
+		    !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
+			pr_err("IBLOCK export of blk_integrity: %s not"
+			       " supported\n", bi->name);
+			ret = -ENOSYS;
+			goto out_blkdev_put;
+		}
+
+		if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
+			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
+		} else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
+			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
+		}
+
+		if (dev->dev_attrib.pi_prot_type) {
+			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
+				pr_err("Unable to allocate bioset for PI\n");
+				ret = -ENOMEM;
+				goto out_blkdev_put;
+			}
+			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
+				 bs->bio_integrity_pool);
+		}
+		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
+	}
+
 	return 0;
 
+out_blkdev_put:
+	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
 out_free_bioset:
 	bioset_free(ib_dev->ibd_bio_set);
 	ib_dev->ibd_bio_set = NULL;
@@ -170,8 +203,10 @@
 
 	if (ib_dev->ibd_bd != NULL)
 		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
-	if (ib_dev->ibd_bio_set != NULL)
+	if (ib_dev->ibd_bio_set != NULL) {
+		bioset_integrity_free(ib_dev->ibd_bio_set);
 		bioset_free(ib_dev->ibd_bio_set);
+	}
 	kfree(ib_dev);
 }
 
@@ -319,7 +354,7 @@
 	bio->bi_bdev = ib_dev->ibd_bd;
 	bio->bi_private = cmd;
 	bio->bi_end_io = &iblock_bio_done;
-	bio->bi_sector = lba;
+	bio->bi_iter.bi_sector = lba;
 
 	return bio;
 }
@@ -586,13 +621,58 @@
 	return bl;
 }
 
+static int
+iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct blk_integrity *bi;
+	struct bio_integrity_payload *bip;
+	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+	struct scatterlist *sg;
+	int i, rc;
+
+	bi = bdev_get_integrity(ib_dev->ibd_bd);
+	if (!bi) {
+		pr_err("Unable to locate bio_integrity\n");
+		return -ENODEV;
+	}
+
+	bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
+	if (!bip) {
+		pr_err("Unable to allocate bio_integrity_payload\n");
+		return -ENOMEM;
+	}
+
+	bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
+			 dev->prot_length;
+	bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
+
+	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
+		 (unsigned long long)bip->bip_iter.bi_sector);
+
+	for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
+
+		rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
+					    sg->offset);
+		if (rc != sg->length) {
+			pr_err("bio_integrity_add_page() failed; %d\n", rc);
+			return -ENOMEM;
+		}
+
+		pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
+			 sg_page(sg), sg->length, sg->offset);
+	}
+
+	return 0;
+}
+
 static sense_reason_t
 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 		  enum dma_data_direction data_direction)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct iblock_req *ibr;
-	struct bio *bio;
+	struct bio *bio, *bio_start;
 	struct bio_list list;
 	struct scatterlist *sg;
 	u32 sg_num = sgl_nents;
@@ -655,6 +735,7 @@
 	if (!bio)
 		goto fail_free_ibr;
 
+	bio_start = bio;
 	bio_list_init(&list);
 	bio_list_add(&list, bio);
 
@@ -688,6 +769,12 @@
 		sg_num--;
 	}
 
+	if (cmd->prot_type) {
+		int rc = iblock_alloc_bip(cmd, bio_start);
+		if (rc)
+			goto fail_put_bios;
+	}
+
 	iblock_submit_bios(&list, rw);
 	iblock_complete_cmd(cmd);
 	return 0;
@@ -763,7 +850,7 @@
 	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
 }
 
-bool iblock_get_write_cache(struct se_device *dev)
+static bool iblock_get_write_cache(struct se_device *dev)
 {
 	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 	struct block_device *bd = ib_dev->ibd_bd;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 47b63b0..de9cab7 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -35,6 +35,8 @@
 int	se_dev_set_emulate_tpws(struct se_device *, int);
 int	se_dev_set_emulate_caw(struct se_device *, int);
 int	se_dev_set_emulate_3pc(struct se_device *, int);
+int	se_dev_set_pi_prot_type(struct se_device *, int);
+int	se_dev_set_pi_prot_format(struct se_device *, int);
 int	se_dev_set_enforce_pr_isids(struct se_device *, int);
 int	se_dev_set_is_nonrot(struct se_device *, int);
 int	se_dev_set_emulate_rest_reord(struct se_device *dev, int);
@@ -77,9 +79,9 @@
 		const char *);
 void	core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
 void	core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
-struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
-int	core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
-		u32, void *);
+struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
+int	core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
+		u32, struct se_device *);
 struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
 int	core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
 
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 2f5d779..3013287 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -2009,7 +2009,7 @@
 	struct t10_reservation *pr_tmpl = &dev->t10_pr;
 	unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
 	sense_reason_t ret = TCM_NO_SENSE;
-	int pr_holder = 0;
+	int pr_holder = 0, type;
 
 	if (!se_sess || !se_lun) {
 		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
@@ -2131,6 +2131,7 @@
 			ret = TCM_RESERVATION_CONFLICT;
 			goto out;
 		}
+		type = pr_reg->pr_res_type;
 
 		spin_lock(&pr_tmpl->registration_lock);
 		/*
@@ -2161,6 +2162,7 @@
 		 * Release the calling I_T Nexus registration now..
 		 */
 		__core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
+		pr_reg = NULL;
 
 		/*
 		 * From spc4r17, section 5.7.11.3 Unregistering
@@ -2174,8 +2176,8 @@
 		 * RESERVATIONS RELEASED.
 		 */
 		if (pr_holder &&
-		    (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
-		     pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+		    (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+		     type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
 			list_for_each_entry(pr_reg_p,
 					&pr_tmpl->registration_list,
 					pr_reg_list) {
@@ -2194,7 +2196,8 @@
 	ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
 
 out:
-	core_scsi3_put_pr_reg(pr_reg);
+	if (pr_reg)
+		core_scsi3_put_pr_reg(pr_reg);
 	return ret;
 }
 
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index ed75cdd..2ee2936 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -43,6 +43,11 @@
 #define PR_APTPL_MAX_IPORT_LEN			256
 #define PR_APTPL_MAX_TPORT_LEN			256
 
+/*
+ *  Function defined in target_core_spc.c
+ */
+void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
+
 extern struct kmem_cache *t10_pr_reg_cache;
 
 extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4ffe5f2..66a5aba 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -78,23 +78,14 @@
 	hba->hba_ptr = NULL;
 }
 
-/*	rd_release_device_space():
- *
- *
- */
-static void rd_release_device_space(struct rd_dev *rd_dev)
+static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+				 u32 sg_table_count)
 {
-	u32 i, j, page_count = 0, sg_per_table;
-	struct rd_dev_sg_table *sg_table;
 	struct page *pg;
 	struct scatterlist *sg;
+	u32 i, j, page_count = 0, sg_per_table;
 
-	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
-		return;
-
-	sg_table = rd_dev->sg_table_array;
-
-	for (i = 0; i < rd_dev->sg_table_count; i++) {
+	for (i = 0; i < sg_table_count; i++) {
 		sg = sg_table[i].sg_table;
 		sg_per_table = sg_table[i].rd_sg_count;
 
@@ -105,16 +96,28 @@
 				page_count++;
 			}
 		}
-
 		kfree(sg);
 	}
 
+	kfree(sg_table);
+	return page_count;
+}
+
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+	u32 page_count;
+
+	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+		return;
+
+	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
+					  rd_dev->sg_table_count);
+
 	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
 		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
 		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
 		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
 
-	kfree(sg_table);
 	rd_dev->sg_table_array = NULL;
 	rd_dev->sg_table_count = 0;
 }
@@ -124,38 +127,15 @@
  *
  *
  */
-static int rd_build_device_space(struct rd_dev *rd_dev)
+static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+				 u32 total_sg_needed, unsigned char init_payload)
 {
-	u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+	u32 i = 0, j, page_offset = 0, sg_per_table;
 	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
 				sizeof(struct scatterlist));
-	struct rd_dev_sg_table *sg_table;
 	struct page *pg;
 	struct scatterlist *sg;
-
-	if (rd_dev->rd_page_count <= 0) {
-		pr_err("Illegal page count: %u for Ramdisk device\n",
-			rd_dev->rd_page_count);
-		return -EINVAL;
-	}
-
-	/* Don't need backing pages for NULLIO */
-	if (rd_dev->rd_flags & RDF_NULLIO)
-		return 0;
-
-	total_sg_needed = rd_dev->rd_page_count;
-
-	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
-
-	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
-	if (!sg_table) {
-		pr_err("Unable to allocate memory for Ramdisk"
-			" scatterlist tables\n");
-		return -ENOMEM;
-	}
-
-	rd_dev->sg_table_array = sg_table;
-	rd_dev->sg_table_count = sg_tables;
+	unsigned char *p;
 
 	while (total_sg_needed) {
 		sg_per_table = (total_sg_needed > max_sg_per_table) ?
@@ -186,16 +166,114 @@
 			}
 			sg_assign_page(&sg[j], pg);
 			sg[j].length = PAGE_SIZE;
+
+			p = kmap(pg);
+			memset(p, init_payload, PAGE_SIZE);
+			kunmap(pg);
 		}
 
 		page_offset += sg_per_table;
 		total_sg_needed -= sg_per_table;
 	}
 
+	return 0;
+}
+
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+	struct rd_dev_sg_table *sg_table;
+	u32 sg_tables, total_sg_needed;
+	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+	int rc;
+
+	if (rd_dev->rd_page_count <= 0) {
+		pr_err("Illegal page count: %u for Ramdisk device\n",
+		       rd_dev->rd_page_count);
+		return -EINVAL;
+	}
+
+	/* Don't need backing pages for NULLIO */
+	if (rd_dev->rd_flags & RDF_NULLIO)
+		return 0;
+
+	total_sg_needed = rd_dev->rd_page_count;
+
+	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+	if (!sg_table) {
+		pr_err("Unable to allocate memory for Ramdisk"
+		       " scatterlist tables\n");
+		return -ENOMEM;
+	}
+
+	rd_dev->sg_table_array = sg_table;
+	rd_dev->sg_table_count = sg_tables;
+
+	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
+	if (rc)
+		return rc;
+
 	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
-		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
-		rd_dev->rd_dev_id, rd_dev->rd_page_count,
-		rd_dev->sg_table_count);
+		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+		 rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		 rd_dev->sg_table_count);
+
+	return 0;
+}
+
+static void rd_release_prot_space(struct rd_dev *rd_dev)
+{
+	u32 page_count;
+
+	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
+		return;
+
+	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
+					  rd_dev->sg_prot_count);
+
+	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
+		 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
+		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+	rd_dev->sg_prot_array = NULL;
+	rd_dev->sg_prot_count = 0;
+}
+
+static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
+{
+	struct rd_dev_sg_table *sg_table;
+	u32 total_sg_needed, sg_tables;
+	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+	int rc;
+
+	if (rd_dev->rd_flags & RDF_NULLIO)
+		return 0;
+
+	total_sg_needed = rd_dev->rd_page_count / prot_length;
+
+	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+	if (!sg_table) {
+		pr_err("Unable to allocate memory for Ramdisk protection"
+		       " scatterlist tables\n");
+		return -ENOMEM;
+	}
+
+	rd_dev->sg_prot_array = sg_table;
+	rd_dev->sg_prot_count = sg_tables;
+
+	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
+	if (rc)
+		return rc;
+
+	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
+		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
 
 	return 0;
 }
@@ -278,6 +356,26 @@
 	return NULL;
 }
 
+static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
+{
+	struct rd_dev_sg_table *sg_table;
+	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+
+	i = page / sg_per_table;
+	if (i < rd_dev->sg_prot_count) {
+		sg_table = &rd_dev->sg_prot_array[i];
+		if ((sg_table->page_start_offset <= page) &&
+		     (sg_table->page_end_offset >= page))
+			return sg_table;
+	}
+
+	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
+			page);
+
+	return NULL;
+}
+
 static sense_reason_t
 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 	      enum dma_data_direction data_direction)
@@ -292,6 +390,7 @@
 	u32 rd_page;
 	u32 src_len;
 	u64 tmp;
+	sense_reason_t rc;
 
 	if (dev->rd_flags & RDF_NULLIO) {
 		target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -314,6 +413,28 @@
 			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
 			cmd->t_task_lba, rd_size, rd_page, rd_offset);
 
+	if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
+		struct rd_dev_sg_table *prot_table;
+		struct scatterlist *prot_sg;
+		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+		u32 prot_offset, prot_page;
+
+		tmp = cmd->t_task_lba * se_dev->prot_length;
+		prot_offset = do_div(tmp, PAGE_SIZE);
+		prot_page = tmp;
+
+		prot_table = rd_get_prot_table(dev, prot_page);
+		if (!prot_table)
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
+
+		rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
+					  prot_sg, prot_offset);
+		if (rc)
+			return rc;
+	}
+
 	src_len = PAGE_SIZE - rd_offset;
 	sg_miter_start(&m, sgl, sgl_nents,
 			data_direction == DMA_FROM_DEVICE ?
@@ -375,6 +496,28 @@
 	}
 	sg_miter_stop(&m);
 
+	if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
+		struct rd_dev_sg_table *prot_table;
+		struct scatterlist *prot_sg;
+		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+		u32 prot_offset, prot_page;
+
+		tmp = cmd->t_task_lba * se_dev->prot_length;
+		prot_offset = do_div(tmp, PAGE_SIZE);
+		prot_page = tmp;
+
+		prot_table = rd_get_prot_table(dev, prot_page);
+		if (!prot_table)
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
+
+		rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
+					 prot_sg, prot_offset);
+		if (rc)
+			return rc;
+	}
+
 	target_complete_cmd(cmd, SAM_STAT_GOOD);
 	return 0;
 }
@@ -456,6 +599,23 @@
 	return blocks_long;
 }
 
+static int rd_init_prot(struct se_device *dev)
+{
+	struct rd_dev *rd_dev = RD_DEV(dev);
+
+        if (!dev->dev_attrib.pi_prot_type)
+		return 0;
+
+	return rd_build_prot_space(rd_dev, dev->prot_length);
+}
+
+static void rd_free_prot(struct se_device *dev)
+{
+	struct rd_dev *rd_dev = RD_DEV(dev);
+
+	rd_release_prot_space(rd_dev);
+}
+
 static struct sbc_ops rd_sbc_ops = {
 	.execute_rw		= rd_execute_rw,
 };
@@ -481,6 +641,8 @@
 	.show_configfs_dev_params = rd_show_configfs_dev_params,
 	.get_device_type	= sbc_get_device_type,
 	.get_blocks		= rd_get_blocks,
+	.init_prot		= rd_init_prot,
+	.free_prot		= rd_free_prot,
 };
 
 int __init rd_module_init(void)
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 1789d1e..cc46a6a 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -33,8 +33,12 @@
 	u32		rd_page_count;
 	/* Number of SG tables in sg_table_array */
 	u32		sg_table_count;
+	/* Number of SG tables in sg_prot_array */
+	u32		sg_prot_count;
 	/* Array of rd_dev_sg_table_t containing scatterlists */
 	struct rd_dev_sg_table *sg_table_array;
+	/* Array of rd_dev_sg_table containing protection scatterlists */
+	struct rd_dev_sg_table *sg_prot_array;
 	/* Ramdisk HBA device is connected to */
 	struct rd_host *rd_host;
 } ____cacheline_aligned;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 52ae54e..a448944 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -23,6 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/ratelimit.h>
+#include <linux/crc-t10dif.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_tcq.h>
@@ -33,7 +34,7 @@
 
 #include "target_core_internal.h"
 #include "target_core_ua.h"
-
+#include "target_core_alua.h"
 
 static sense_reason_t
 sbc_emulate_readcapacity(struct se_cmd *cmd)
@@ -105,6 +106,11 @@
 	buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
 	buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
 	buf[11] = dev->dev_attrib.block_size & 0xff;
+	/*
+	 * Set P_TYPE and PROT_EN bits for DIF support
+	 */
+	if (dev->dev_attrib.pi_prot_type)
+		buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
 
 	if (dev->transport->get_lbppbe)
 		buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
@@ -563,6 +569,44 @@
 	return TCM_NO_SENSE;
 }
 
+static bool
+sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
+	       u32 sectors)
+{
+	if (!cmd->t_prot_sg || !cmd->t_prot_nents)
+		return true;
+
+	switch (dev->dev_attrib.pi_prot_type) {
+	case TARGET_DIF_TYPE3_PROT:
+		if (!(cdb[1] & 0xe0))
+			return true;
+
+		cmd->reftag_seed = 0xffffffff;
+		break;
+	case TARGET_DIF_TYPE2_PROT:
+		if (cdb[1] & 0xe0)
+			return false;
+
+		cmd->reftag_seed = cmd->t_task_lba;
+		break;
+	case TARGET_DIF_TYPE1_PROT:
+		if (!(cdb[1] & 0xe0))
+			return true;
+
+		cmd->reftag_seed = cmd->t_task_lba;
+		break;
+	case TARGET_DIF_TYPE0_PROT:
+	default:
+		return true;
+	}
+
+	cmd->prot_type = dev->dev_attrib.pi_prot_type;
+	cmd->prot_length = dev->prot_length * sectors;
+	cmd->prot_handover = PROT_SEPERATED;
+
+	return true;
+}
+
 sense_reason_t
 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 {
@@ -583,6 +627,10 @@
 	case READ_10:
 		sectors = transport_get_sectors_10(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
+
+		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 		cmd->execute_rw = ops->execute_rw;
 		cmd->execute_cmd = sbc_execute_rw;
@@ -590,6 +638,10 @@
 	case READ_12:
 		sectors = transport_get_sectors_12(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
+
+		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 		cmd->execute_rw = ops->execute_rw;
 		cmd->execute_cmd = sbc_execute_rw;
@@ -597,6 +649,10 @@
 	case READ_16:
 		sectors = transport_get_sectors_16(cdb);
 		cmd->t_task_lba = transport_lba_64(cdb);
+
+		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 		cmd->execute_rw = ops->execute_rw;
 		cmd->execute_cmd = sbc_execute_rw;
@@ -612,6 +668,10 @@
 	case WRITE_VERIFY:
 		sectors = transport_get_sectors_10(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
+
+		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+
 		if (cdb[1] & 0x8)
 			cmd->se_cmd_flags |= SCF_FUA;
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -621,6 +681,10 @@
 	case WRITE_12:
 		sectors = transport_get_sectors_12(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
+
+		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+
 		if (cdb[1] & 0x8)
 			cmd->se_cmd_flags |= SCF_FUA;
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -630,6 +694,10 @@
 	case WRITE_16:
 		sectors = transport_get_sectors_16(cdb);
 		cmd->t_task_lba = transport_lba_64(cdb);
+
+		if (!sbc_check_prot(dev, cmd, cdb, sectors))
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+
 		if (cdb[1] & 0x8)
 			cmd->se_cmd_flags |= SCF_FUA;
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -731,6 +799,9 @@
 		case SAI_READ_CAPACITY_16:
 			cmd->execute_cmd = sbc_emulate_readcapacity_16;
 			break;
+		case SAI_REPORT_REFERRALS:
+			cmd->execute_cmd = target_emulate_report_referrals;
+			break;
 		default:
 			pr_err("Unsupported SA: 0x%02x\n",
 				cmd->t_task_cdb[1] & 0x1f);
@@ -959,3 +1030,190 @@
 	return ret;
 }
 EXPORT_SYMBOL(sbc_execute_unmap);
+
+static sense_reason_t
+sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
+		  const void *p, sector_t sector, unsigned int ei_lba)
+{
+	int block_size = dev->dev_attrib.block_size;
+	__be16 csum;
+
+	csum = cpu_to_be16(crc_t10dif(p, block_size));
+
+	if (sdt->guard_tag != csum) {
+		pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
+			" csum 0x%04x\n", (unsigned long long)sector,
+			be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
+		return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+	}
+
+	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
+	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
+		pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
+		       " sector MSB: 0x%08x\n", (unsigned long long)sector,
+		       be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
+		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+	}
+
+	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
+	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
+		pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
+		       " ei_lba: 0x%08x\n", (unsigned long long)sector,
+			be32_to_cpu(sdt->ref_tag), ei_lba);
+		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+	}
+
+	return 0;
+}
+
+static void
+sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
+		  struct scatterlist *sg, int sg_off)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct scatterlist *psg;
+	void *paddr, *addr;
+	unsigned int i, len, left;
+	unsigned int offset = 0;
+
+	left = sectors * dev->prot_length;
+
+	for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
+
+		len = min(psg->length, left);
+		if (offset >= sg->length) {
+			sg = sg_next(sg);
+			offset = 0;
+			sg_off = sg->offset;
+		}
+
+		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+		addr = kmap_atomic(sg_page(sg)) + sg_off;
+
+		if (read)
+			memcpy(paddr, addr, len);
+		else
+			memcpy(addr, paddr, len);
+
+		left -= len;
+		offset += len;
+		kunmap_atomic(paddr);
+		kunmap_atomic(addr);
+	}
+}
+
+sense_reason_t
+sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+		     unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_dif_v1_tuple *sdt;
+	struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+	sector_t sector = start;
+	void *daddr, *paddr;
+	int i, j, offset = 0;
+	sense_reason_t rc;
+
+	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+
+		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+			if (offset >= psg->length) {
+				kunmap_atomic(paddr);
+				psg = sg_next(psg);
+				paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+				offset = 0;
+			}
+
+			sdt = paddr + offset;
+
+			pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
+				 " app_tag: 0x%04x ref_tag: %u\n",
+				 (unsigned long long)sector, sdt->guard_tag,
+				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+			rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+					       ei_lba);
+			if (rc) {
+				kunmap_atomic(paddr);
+				kunmap_atomic(daddr);
+				cmd->bad_sector = sector;
+				return rc;
+			}
+
+			sector++;
+			ei_lba++;
+			offset += sizeof(struct se_dif_v1_tuple);
+		}
+
+		kunmap_atomic(paddr);
+		kunmap_atomic(daddr);
+	}
+	sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
+
+	return 0;
+}
+EXPORT_SYMBOL(sbc_dif_verify_write);
+
+sense_reason_t
+sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+		    unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_dif_v1_tuple *sdt;
+	struct scatterlist *dsg;
+	sector_t sector = start;
+	void *daddr, *paddr;
+	int i, j, offset = sg_off;
+	sense_reason_t rc;
+
+	for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+		paddr = kmap_atomic(sg_page(sg)) + sg->offset;
+
+		for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+			if (offset >= sg->length) {
+				kunmap_atomic(paddr);
+				sg = sg_next(sg);
+				paddr = kmap_atomic(sg_page(sg)) + sg->offset;
+				offset = 0;
+			}
+
+			sdt = paddr + offset;
+
+			pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
+				 " app_tag: 0x%04x ref_tag: %u\n",
+				 (unsigned long long)sector, sdt->guard_tag,
+				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+			if (sdt->app_tag == cpu_to_be16(0xffff)) {
+				sector++;
+				offset += sizeof(struct se_dif_v1_tuple);
+				continue;
+			}
+
+			rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+					       ei_lba);
+			if (rc) {
+				kunmap_atomic(paddr);
+				kunmap_atomic(daddr);
+				cmd->bad_sector = sector;
+				return rc;
+			}
+
+			sector++;
+			ei_lba++;
+			offset += sizeof(struct se_dif_v1_tuple);
+		}
+
+		kunmap_atomic(paddr);
+		kunmap_atomic(daddr);
+	}
+	sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
+
+	return 0;
+}
+EXPORT_SYMBOL(sbc_dif_verify_read);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 021c3f4..3bebc71 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -100,6 +100,11 @@
 	 */
 	if (dev->dev_attrib.emulate_3pc)
 		buf[5] |= 0x8;
+	/*
+	 * Set Protection (PROTECT) bit when DIF has been enabled.
+	 */
+	if (dev->dev_attrib.pi_prot_type)
+		buf[5] |= 0x1;
 
 	buf[7] = 0x2; /* CmdQue=1 */
 
@@ -267,7 +272,7 @@
 	port = lun->lun_sep;
 	if (port) {
 		struct t10_alua_lu_gp *lu_gp;
-		u32 padding, scsi_name_len;
+		u32 padding, scsi_name_len, scsi_target_len;
 		u16 lu_gp_id = 0;
 		u16 tg_pt_gp_id = 0;
 		u16 tpgt;
@@ -365,16 +370,6 @@
 		 * section 7.5.1 Table 362
 		 */
 check_scsi_name:
-		scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
-		/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
-		scsi_name_len += 10;
-		/* Check for 4-byte padding */
-		padding = ((-scsi_name_len) & 3);
-		if (padding != 0)
-			scsi_name_len += padding;
-		/* Header size + Designation descriptor */
-		scsi_name_len += 4;
-
 		buf[off] =
 			(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
 		buf[off++] |= 0x3; /* CODE SET == UTF-8 */
@@ -402,13 +397,57 @@
 		 * shall be no larger than 256 and shall be a multiple
 		 * of four.
 		 */
+		padding = ((-scsi_name_len) & 3);
 		if (padding)
 			scsi_name_len += padding;
+		if (scsi_name_len > 256)
+			scsi_name_len = 256;
 
 		buf[off-1] = scsi_name_len;
 		off += scsi_name_len;
 		/* Header size + Designation descriptor */
 		len += (scsi_name_len + 4);
+
+		/*
+		 * Target device designator
+		 */
+		buf[off] =
+			(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOCIATION == target device: 10b */
+		buf[off] |= 0x20;
+		/* DESIGNATOR TYPE == SCSI name string */
+		buf[off++] |= 0x8;
+		off += 2; /* Skip over Reserved and length */
+		/*
+		 * SCSI name string identifer containing, $FABRIC_MOD
+		 * dependent information.  For LIO-Target and iSCSI
+		 * Target Port, this means "<iSCSI name>" in
+		 * UTF-8 encoding.
+		 */
+		scsi_target_len = sprintf(&buf[off], "%s",
+					  tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+		scsi_target_len += 1 /* Include  NULL terminator */;
+		/*
+		 * The null-terminated, null-padded (see 4.4.2) SCSI
+		 * NAME STRING field contains a UTF-8 format string.
+		 * The number of bytes in the SCSI NAME STRING field
+		 * (i.e., the value in the DESIGNATOR LENGTH field)
+		 * shall be no larger than 256 and shall be a multiple
+		 * of four.
+		 */
+		padding = ((-scsi_target_len) & 3);
+		if (padding)
+			scsi_target_len += padding;
+		if (scsi_target_len > 256)
+			scsi_target_len = 256;
+
+		buf[off-1] = scsi_target_len;
+		off += scsi_target_len;
+
+		/* Header size + Designation descriptor */
+		len += (scsi_target_len + 4);
 	}
 	buf[2] = ((len >> 8) & 0xff);
 	buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
@@ -436,12 +475,26 @@
 	struct se_device *dev = cmd->se_dev;
 
 	buf[3] = 0x3c;
+	/*
+	 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
+	 * only for TYPE3 protection.
+	 */
+	if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+		buf[4] = 0x5;
+	else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
+		buf[4] = 0x4;
+
 	/* Set HEADSUP, ORDSUP, SIMPSUP */
 	buf[5] = 0x07;
 
 	/* If WriteCache emulation is enabled, set V_SUP */
 	if (spc_check_dev_wce(dev))
 		buf[6] = 0x01;
+	/* If an LBA map is present set R_SUP */
+	spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
+	if (!list_empty(&dev->t10_alua.lba_map_list))
+		buf[8] = 0x10;
+	spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
 	return 0;
 }
 
@@ -600,6 +653,20 @@
 	return 0;
 }
 
+/* Referrals VPD page */
+static sense_reason_t
+spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[3] = 0x0c;
+	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
+	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
+
+	return 0;
+}
+
 static sense_reason_t
 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
 
@@ -614,6 +681,7 @@
 	{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
 	{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
 	{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
+	{ .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
 };
 
 /* supported vital product data pages */
@@ -643,11 +711,15 @@
 	struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
 	unsigned char *rbuf;
 	unsigned char *cdb = cmd->t_task_cdb;
-	unsigned char buf[SE_INQUIRY_BUF];
+	unsigned char *buf;
 	sense_reason_t ret;
 	int p;
 
-	memset(buf, 0, SE_INQUIRY_BUF);
+	buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
+	if (!buf) {
+		pr_err("Unable to allocate response buffer for INQUIRY\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
 
 	if (dev == tpg->tpg_virt_lun0.lun_se_dev)
 		buf[0] = 0x3f; /* Not connected */
@@ -680,9 +752,10 @@
 out:
 	rbuf = transport_kmap_data_sg(cmd);
 	if (rbuf) {
-		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+		memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
 		transport_kunmap_data_sg(cmd);
 	}
+	kfree(buf);
 
 	if (!ret)
 		target_complete_cmd(cmd, GOOD);
@@ -785,6 +858,19 @@
 	 * status (see SAM-4).
 	 */
 	p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
+	/*
+	 * From spc4r30, section 7.5.7 Control mode page
+	 *
+	 * Application Tag Owner (ATO) bit set to one.
+	 *
+	 * If the ATO bit is set to one the device server shall not modify the
+	 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
+	 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
+	 * TAG field.
+	 */
+	if (dev->dev_attrib.pi_prot_type)
+		p[5] |= 0x80;
+
 	p[8] = 0xff;
 	p[9] = 0xff;
 	p[11] = 30;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 2a573de..c036595 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -656,7 +656,7 @@
 	spin_lock_init(&lun->lun_sep_lock);
 	init_completion(&lun->lun_ref_comp);
 
-	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+	ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
 	if (ret < 0)
 		return ret;
 
@@ -781,7 +781,7 @@
 }
 EXPORT_SYMBOL(core_tpg_deregister);
 
-struct se_lun *core_tpg_pre_addlun(
+struct se_lun *core_tpg_alloc_lun(
 	struct se_portal_group *tpg,
 	u32 unpacked_lun)
 {
@@ -811,11 +811,11 @@
 	return lun;
 }
 
-int core_tpg_post_addlun(
+int core_tpg_add_lun(
 	struct se_portal_group *tpg,
 	struct se_lun *lun,
 	u32 lun_access,
-	void *lun_ptr)
+	struct se_device *dev)
 {
 	int ret;
 
@@ -823,7 +823,7 @@
 	if (ret < 0)
 		return ret;
 
-	ret = core_dev_export(lun_ptr, tpg, lun);
+	ret = core_dev_export(dev, tpg, lun);
 	if (ret < 0) {
 		percpu_ref_cancel_init(&lun->lun_ref);
 		return ret;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 91953da..24b4f65d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -62,6 +62,8 @@
 struct kmem_cache *t10_alua_lu_gp_mem_cache;
 struct kmem_cache *t10_alua_tg_pt_gp_cache;
 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+struct kmem_cache *t10_alua_lba_map_cache;
+struct kmem_cache *t10_alua_lba_map_mem_cache;
 
 static void transport_complete_task_attr(struct se_cmd *cmd);
 static void transport_handle_queue_full(struct se_cmd *cmd,
@@ -128,14 +130,36 @@
 				"mem_t failed\n");
 		goto out_free_tg_pt_gp_cache;
 	}
+	t10_alua_lba_map_cache = kmem_cache_create(
+			"t10_alua_lba_map_cache",
+			sizeof(struct t10_alua_lba_map),
+			__alignof__(struct t10_alua_lba_map), 0, NULL);
+	if (!t10_alua_lba_map_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lba_map_"
+				"cache failed\n");
+		goto out_free_tg_pt_gp_mem_cache;
+	}
+	t10_alua_lba_map_mem_cache = kmem_cache_create(
+			"t10_alua_lba_map_mem_cache",
+			sizeof(struct t10_alua_lba_map_member),
+			__alignof__(struct t10_alua_lba_map_member), 0, NULL);
+	if (!t10_alua_lba_map_mem_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
+				"cache failed\n");
+		goto out_free_lba_map_cache;
+	}
 
 	target_completion_wq = alloc_workqueue("target_completion",
 					       WQ_MEM_RECLAIM, 0);
 	if (!target_completion_wq)
-		goto out_free_tg_pt_gp_mem_cache;
+		goto out_free_lba_map_mem_cache;
 
 	return 0;
 
+out_free_lba_map_mem_cache:
+	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
+out_free_lba_map_cache:
+	kmem_cache_destroy(t10_alua_lba_map_cache);
 out_free_tg_pt_gp_mem_cache:
 	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
 out_free_tg_pt_gp_cache:
@@ -164,6 +188,8 @@
 	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
 	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
 	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+	kmem_cache_destroy(t10_alua_lba_map_cache);
+	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
 }
 
 /* This code ensures unique mib indexes are handed out. */
@@ -568,10 +594,11 @@
 {
 	struct se_lun *lun = cmd->se_lun;
 
-	if (!lun || !cmd->lun_ref_active)
+	if (!lun)
 		return;
 
-	percpu_ref_put(&lun->lun_ref);
+	if (cmpxchg(&cmd->lun_ref_active, true, false))
+		percpu_ref_put(&lun->lun_ref);
 }
 
 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
@@ -642,9 +669,6 @@
 		return;
 	}
 
-	if (!success)
-		cmd->transport_state |= CMD_T_FAILED;
-
 	/*
 	 * Check for case where an explicit ABORT_TASK has been received
 	 * and transport_wait_for_tasks() will be waiting for completion..
@@ -654,7 +678,7 @@
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		complete(&cmd->t_transport_stop_comp);
 		return;
-	} else if (cmd->transport_state & CMD_T_FAILED) {
+	} else if (!success) {
 		INIT_WORK(&cmd->work, target_complete_failure_work);
 	} else {
 		INIT_WORK(&cmd->work, target_complete_ok_work);
@@ -1284,6 +1308,8 @@
  * @sgl_count: scatterlist count for unidirectional mapping
  * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
  * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
+ * @sgl_prot: struct scatterlist memory protection information
+ * @sgl_prot_count: scatterlist count for protection information
  *
  * Returns non zero to signal active I/O shutdown failure.  All other
  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
@@ -1296,7 +1322,8 @@
 		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
 		u32 data_length, int task_attr, int data_dir, int flags,
 		struct scatterlist *sgl, u32 sgl_count,
-		struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
+		struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+		struct scatterlist *sgl_prot, u32 sgl_prot_count)
 {
 	struct se_portal_group *se_tpg;
 	sense_reason_t rc;
@@ -1338,6 +1365,14 @@
 		target_put_sess_cmd(se_sess, se_cmd);
 		return 0;
 	}
+	/*
+	 * Save pointers for SGLs containing protection information,
+	 * if present.
+	 */
+	if (sgl_prot_count) {
+		se_cmd->t_prot_sg = sgl_prot;
+		se_cmd->t_prot_nents = sgl_prot_count;
+	}
 
 	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
 	if (rc != 0) {
@@ -1380,6 +1415,7 @@
 			return 0;
 		}
 	}
+
 	/*
 	 * Check if we need to delay processing because of ALUA
 	 * Active/NonOptimized primary access state..
@@ -1419,7 +1455,7 @@
 {
 	return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
 			unpacked_lun, data_length, task_attr, data_dir,
-			flags, NULL, 0, NULL, 0);
+			flags, NULL, 0, NULL, 0, NULL, 0);
 }
 EXPORT_SYMBOL(target_submit_cmd);
 
@@ -2455,6 +2491,19 @@
 	return 0;
 }
 
+static
+void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector)
+{
+	/* Place failed LBA in sense data information descriptor 0. */
+	buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc;
+	buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */
+	buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa;
+	buffer[SPC_VALIDITY_OFFSET] = 0x80;
+
+	/* Descriptor Information: failing sector */
+	put_unaligned_be64(bad_sector, &buffer[12]);
+}
+
 int
 transport_send_check_condition_and_sense(struct se_cmd *cmd,
 		sense_reason_t reason, int from_transport)
@@ -2648,6 +2697,39 @@
 		buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
 		buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
 		break;
+	case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+		/* CURRENT ERROR */
+		buffer[0] = 0x70;
+		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ILLEGAL REQUEST */
+		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* LOGICAL BLOCK GUARD CHECK FAILED */
+		buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+		buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
+		transport_err_sector_info(buffer, cmd->bad_sector);
+		break;
+	case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+		/* CURRENT ERROR */
+		buffer[0] = 0x70;
+		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ILLEGAL REQUEST */
+		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
+		buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+		buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
+		transport_err_sector_info(buffer, cmd->bad_sector);
+		break;
+	case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+		/* CURRENT ERROR */
+		buffer[0] = 0x70;
+		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ILLEGAL REQUEST */
+		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
+		buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+		buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
+		transport_err_sector_info(buffer, cmd->bad_sector);
+		break;
 	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
 	default:
 		/* CURRENT ERROR */
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index b04467e..505519b 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -98,7 +98,6 @@
 		pr_err("Unable to allocate struct se_ua\n");
 		return -ENOMEM;
 	}
-	INIT_LIST_HEAD(&ua->ua_dev_list);
 	INIT_LIST_HEAD(&ua->ua_nacl_list);
 
 	ua->ua_nacl = nacl;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 6b88a99..669c536 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -40,10 +40,6 @@
 
 static struct workqueue_struct *xcopy_wq = NULL;
 /*
- * From target_core_spc.c
- */
-extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
-/*
  * From target_core_device.c
  */
 extern struct mutex g_device_mutex;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 479ec56..8b2c1aa 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -438,7 +438,7 @@
 	struct se_session *se_sess = sess->se_sess;
 	int tag;
 
-	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
 	if (tag < 0)
 		goto busy;
 
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index c6932fb..e879da8 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -267,7 +267,7 @@
 	return found;
 }
 
-struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
+static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
 {
 	struct ft_node_acl *acl;
 
@@ -552,7 +552,7 @@
 	.fabric_drop_nodeacl =		&ft_del_acl,
 };
 
-int ft_register_configfs(void)
+static int ft_register_configfs(void)
 {
 	struct target_fabric_configfs *fabric;
 	int ret;
@@ -599,7 +599,7 @@
 	return 0;
 }
 
-void ft_deregister_configfs(void)
+static void ft_deregister_configfs(void)
 {
 	if (!ft_configfs)
 		return;
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 6496872..b01659b 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -255,13 +255,7 @@
 	/* Register as a vio device to receive callbacks */
 	return platform_driver_register(&hvc_opal_driver);
 }
-module_init(hvc_opal_init);
-
-static void __exit hvc_opal_exit(void)
-{
-	platform_driver_unregister(&hvc_opal_driver);
-}
-module_exit(hvc_opal_exit);
+device_initcall(hvc_opal_init);
 
 static void udbg_opal_putc(char c)
 {
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 0069bb8..08c8792 100644
--- a/drivers/tty/hvc/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
@@ -102,17 +102,7 @@
 
 	return 0;
 }
-module_init(hvc_rtas_init);
-
-/* This will tear down the tty portion of the driver */
-static void __exit hvc_rtas_exit(void)
-{
-	/* Really the fun isn't over until the worker thread breaks down and
-	 * the tty cleans up */
-	if (hvc_rtas_dev)
-		hvc_remove(hvc_rtas_dev);
-}
-module_exit(hvc_rtas_exit);
+device_initcall(hvc_rtas_init);
 
 /* This will happen prior to module init.  There is no tty at this time? */
 static int __init hvc_rtas_console_init(void)
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index 7222827..9cf573d 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -80,14 +80,7 @@
 
 	return 0;
 }
-module_init(hvc_udbg_init);
-
-static void __exit hvc_udbg_exit(void)
-{
-	if (hvc_udbg_dev)
-		hvc_remove(hvc_udbg_dev);
-}
-module_exit(hvc_udbg_exit);
+device_initcall(hvc_udbg_init);
 
 static int __init hvc_udbg_console_init(void)
 {
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 636c9ba..2dc2831 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -561,18 +561,7 @@
 #endif
 	return r;
 }
-
-static void __exit xen_hvc_fini(void)
-{
-	struct xencons_info *entry, *next;
-
-	if (list_empty(&xenconsoles))
-			return;
-
-	list_for_each_entry_safe(entry, next, &xenconsoles, list) {
-		xen_console_remove(entry);
-	}
-}
+device_initcall(xen_hvc_init);
 
 static int xen_cons_init(void)
 {
@@ -598,10 +587,6 @@
 	hvc_instantiate(HVC_COOKIE, 0, ops);
 	return 0;
 }
-
-
-module_init(xen_hvc_init);
-module_exit(xen_hvc_fini);
 console_initcall(xen_cons_init);
 
 #ifdef CONFIG_EARLY_PRINTK
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index f34461c..2ebe47b 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1090,6 +1090,7 @@
 {
 	unsigned int addr = 0;
 	unsigned int modem = 0;
+	unsigned int brk = 0;
 	struct gsm_dlci *dlci;
 	int len = clen;
 	u8 *dp = data;
@@ -1116,6 +1117,16 @@
 		if (len == 0)
 			return;
 	}
+	len--;
+	if (len > 0) {
+		while (gsm_read_ea(&brk, *dp++) == 0) {
+			len--;
+			if (len == 0)
+				return;
+		}
+		modem <<= 7;
+		modem |= (brk & 0x7f);
+	}
 	tty = tty_port_tty_get(&dlci->port);
 	gsm_process_modem(tty, dlci, modem, clen);
 	if (tty) {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cb8017a..d15624c 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -817,8 +817,7 @@
 	struct n_tty_data *ldata = tty->disc_data;
 	size_t echoed;
 
-	if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
-	    ldata->echo_mark == ldata->echo_tail)
+	if (ldata->echo_mark == ldata->echo_tail)
 		return;
 
 	mutex_lock(&ldata->output_lock);
@@ -1244,7 +1243,8 @@
 	if (L_ECHO(tty)) {
 		echo_char(c, tty);
 		commit_echoes(tty);
-	}
+	} else
+		process_echoes(tty);
 	isig(signal, tty);
 	return;
 }
@@ -1274,7 +1274,7 @@
 	if (I_IXON(tty)) {
 		if (c == START_CHAR(tty)) {
 			start_tty(tty);
-			commit_echoes(tty);
+			process_echoes(tty);
 			return 0;
 		}
 		if (c == STOP_CHAR(tty)) {
@@ -1820,8 +1820,10 @@
 	 * Fix tty hang when I_IXON(tty) is cleared, but the tty
 	 * been stopped by STOP_CHAR(tty) before it.
 	 */
-	if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped)
+	if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
 		start_tty(tty);
+		process_echoes(tty);
+	}
 
 	/* The termios change make the tty ready for I/O */
 	if (waitqueue_active(&tty->write_wait))
@@ -1896,7 +1898,7 @@
 static inline int input_available_p(struct tty_struct *tty, int poll)
 {
 	struct n_tty_data *ldata = tty->disc_data;
-	int amt = poll && !TIME_CHAR(tty) ? MIN_CHAR(tty) : 1;
+	int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1;
 
 	if (ldata->icanon && !L_EXTPROC(tty)) {
 		if (ldata->canon_head != ldata->read_tail)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 61ecd70..69932b7 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2433,6 +2433,24 @@
 	serial_dl_write(up, quot);
 
 	/*
+	 * XR17V35x UARTs have an extra fractional divisor register (DLD)
+	 *
+	 * We need to recalculate all of the registers, because DLM and DLL
+	 * are already rounded to a whole integer.
+	 *
+	 * When recalculating we use a 32x clock instead of a 16x clock to
+	 * allow 1-bit for rounding in the fractional part.
+	 */
+	if (up->port.type == PORT_XR17V35X) {
+		unsigned int baud_x32 = (port->uartclk * 2) / baud;
+		u16 quot = baud_x32 / 32;
+		u8 quot_frac = DIV_ROUND_CLOSEST(baud_x32 % 32, 2);
+
+		serial_dl_write(up, quot);
+		serial_port_out(port, 0x2, quot_frac & 0xf);
+	}
+
+	/*
 	 * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
 	 * is written without DLAB set, this mode will be disabled.
 	 */
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index faa64e6..ed31135 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -391,7 +391,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int dw8250_suspend(struct device *dev)
 {
 	struct dw8250_data *data = dev_get_drvdata(dev);
@@ -409,7 +409,7 @@
 
 	return 0;
 }
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_PM_RUNTIME
 static int dw8250_runtime_suspend(struct device *dev)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 50228ee..0ff3e36 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -783,7 +783,8 @@
 {
 	unsigned int bar;
 
-	if ((priv->dev->subsystem_device & 0xff00) == 0x3000) {
+	if ((priv->dev->device != PCI_DEVICE_ID_NETMOS_9865) &&
+	    (priv->dev->subsystem_device & 0xff00) == 0x3000) {
 		/* netmos apparently orders BARs by datasheet layout, so serial
 		 * ports get BARs 0 and 3 (or 1 and 4 for memmapped)
 		 */
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 649d512..78e82b0 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -29,10 +29,7 @@
 #include <linux/sysrq.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
-
-#include <bcm63xx_irq.h>
-#include <bcm63xx_regs.h>
-#include <bcm63xx_io.h>
+#include <linux/serial_bcm63xx.h>
 
 #define BCM63XX_NR_UARTS	2
 
@@ -81,13 +78,13 @@
 static inline unsigned int bcm_uart_readl(struct uart_port *port,
 					 unsigned int offset)
 {
-	return bcm_readl(port->membase + offset);
+	return __raw_readl(port->membase + offset);
 }
 
 static inline void bcm_uart_writel(struct uart_port *port,
 				  unsigned int value, unsigned int offset)
 {
-	bcm_writel(value, port->membase + offset);
+	__raw_writel(value, port->membase + offset);
 }
 
 /*
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index ec06505..97888f4 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -421,6 +421,7 @@
 
 static struct psc_fifoc __iomem *psc_fifoc;
 static unsigned int psc_fifoc_irq;
+static struct clk *psc_fifoc_clk;
 
 static void mpc512x_psc_fifo_init(struct uart_port *port)
 {
@@ -568,36 +569,73 @@
 /* Init PSC FIFO Controller */
 static int __init mpc512x_psc_fifoc_init(void)
 {
+	int err;
 	struct device_node *np;
+	struct clk *clk;
+
+	/* default error code, potentially overwritten by clock calls */
+	err = -ENODEV;
 
 	np = of_find_compatible_node(NULL, NULL,
 				     "fsl,mpc5121-psc-fifo");
 	if (!np) {
 		pr_err("%s: Can't find FIFOC node\n", __func__);
-		return -ENODEV;
+		goto out_err;
 	}
 
+	clk = of_clk_get(np, 0);
+	if (IS_ERR(clk)) {
+		/* backwards compat with device trees that lack clock specs */
+		clk = clk_get_sys(np->name, "ipg");
+	}
+	if (IS_ERR(clk)) {
+		pr_err("%s: Can't lookup FIFO clock\n", __func__);
+		err = PTR_ERR(clk);
+		goto out_ofnode_put;
+	}
+	if (clk_prepare_enable(clk)) {
+		pr_err("%s: Can't enable FIFO clock\n", __func__);
+		clk_put(clk);
+		goto out_ofnode_put;
+	}
+	psc_fifoc_clk = clk;
+
 	psc_fifoc = of_iomap(np, 0);
 	if (!psc_fifoc) {
 		pr_err("%s: Can't map FIFOC\n", __func__);
-		of_node_put(np);
-		return -ENODEV;
+		goto out_clk_disable;
 	}
 
 	psc_fifoc_irq = irq_of_parse_and_map(np, 0);
-	of_node_put(np);
 	if (psc_fifoc_irq == 0) {
 		pr_err("%s: Can't get FIFOC irq\n", __func__);
-		iounmap(psc_fifoc);
-		return -ENODEV;
+		goto out_unmap;
 	}
 
+	of_node_put(np);
 	return 0;
+
+out_unmap:
+	iounmap(psc_fifoc);
+out_clk_disable:
+	clk_disable_unprepare(psc_fifoc_clk);
+	clk_put(psc_fifoc_clk);
+out_ofnode_put:
+	of_node_put(np);
+out_err:
+	return err;
 }
 
 static void __exit mpc512x_psc_fifoc_uninit(void)
 {
 	iounmap(psc_fifoc);
+
+	/* disable the clock, errors are not fatal */
+	if (psc_fifoc_clk) {
+		clk_disable_unprepare(psc_fifoc_clk);
+		clk_put(psc_fifoc_clk);
+		psc_fifoc_clk = NULL;
+	}
 }
 
 /* 512x specific interrupt handler. The caller holds the port lock */
@@ -619,29 +657,55 @@
 }
 
 static struct clk *psc_mclk_clk[MPC52xx_PSC_MAXNUM];
+static struct clk *psc_ipg_clk[MPC52xx_PSC_MAXNUM];
 
 /* called from within the .request_port() callback (allocation) */
 static int mpc512x_psc_alloc_clock(struct uart_port *port)
 {
 	int psc_num;
-	char clk_name[16];
 	struct clk *clk;
 	int err;
 
 	psc_num = (port->mapbase & 0xf00) >> 8;
-	snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
-	clk = devm_clk_get(port->dev, clk_name);
+
+	clk = devm_clk_get(port->dev, "mclk");
 	if (IS_ERR(clk)) {
 		dev_err(port->dev, "Failed to get MCLK!\n");
-		return PTR_ERR(clk);
+		err = PTR_ERR(clk);
+		goto out_err;
 	}
 	err = clk_prepare_enable(clk);
 	if (err) {
 		dev_err(port->dev, "Failed to enable MCLK!\n");
-		return err;
+		goto out_err;
 	}
 	psc_mclk_clk[psc_num] = clk;
+
+	clk = devm_clk_get(port->dev, "ipg");
+	if (IS_ERR(clk)) {
+		dev_err(port->dev, "Failed to get IPG clock!\n");
+		err = PTR_ERR(clk);
+		goto out_err;
+	}
+	err = clk_prepare_enable(clk);
+	if (err) {
+		dev_err(port->dev, "Failed to enable IPG clock!\n");
+		goto out_err;
+	}
+	psc_ipg_clk[psc_num] = clk;
+
 	return 0;
+
+out_err:
+	if (psc_mclk_clk[psc_num]) {
+		clk_disable_unprepare(psc_mclk_clk[psc_num]);
+		psc_mclk_clk[psc_num] = NULL;
+	}
+	if (psc_ipg_clk[psc_num]) {
+		clk_disable_unprepare(psc_ipg_clk[psc_num]);
+		psc_ipg_clk[psc_num] = NULL;
+	}
+	return err;
 }
 
 /* called from within the .release_port() callback (release) */
@@ -656,6 +720,10 @@
 		clk_disable_unprepare(clk);
 		psc_mclk_clk[psc_num] = NULL;
 	}
+	if (psc_ipg_clk[psc_num]) {
+		clk_disable_unprepare(psc_ipg_clk[psc_num]);
+		psc_ipg_clk[psc_num] = NULL;
+	}
 }
 
 /* implementation of the .clock() callback (enable/disable) */
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index fa511eb..77f0351 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -738,9 +738,6 @@
 			return retval;
 		}
 		disable_irq(up->wakeirq);
-	} else {
-		dev_info(up->port.dev, "no wakeirq for uart%d\n",
-			 up->port.line);
 	}
 
 	dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
@@ -1604,8 +1601,11 @@
 					    flags & SER_RS485_RTS_AFTER_SEND);
 		if (ret < 0)
 			return ret;
-	} else
+	} else if (up->rts_gpio == -EPROBE_DEFER) {
+		return -EPROBE_DEFER;
+	} else {
 		up->rts_gpio = -EINVAL;
+	}
 
 	if (of_property_read_u32_array(np, "rs485-rts-delay",
 				    rs485_delay, 2) == 0) {
@@ -1687,6 +1687,9 @@
 	up->port.iotype = UPIO_MEM;
 	up->port.irq = uartirq;
 	up->wakeirq = wakeirq;
+	if (!up->wakeirq)
+		dev_info(up->port.dev, "no wakeirq for uart%d\n",
+			 up->port.line);
 
 	up->port.regshift = 2;
 	up->port.fifosize = 64;
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 49a2ffd..b7bfe24 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -542,8 +542,10 @@
 	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
 			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
 			SIRFUART_IO_MODE);
-	sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
 	spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+	spin_lock(&port->lock);
+	sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
+	spin_unlock(&port->lock);
 	if (sirfport->rx_io_count == 4) {
 		spin_lock_irqsave(&sirfport->rx_lock, flags);
 		sirfport->rx_io_count = 0;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index c74a00a..bd2715a 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1267,16 +1267,17 @@
  *	@p: output buffer of at least 7 bytes
  *
  *	Generate a name from a driver reference and write it to the output
- *	buffer.
+ *	buffer. Return the number of bytes written.
  *
  *	Locking: None
  */
-static void tty_line_name(struct tty_driver *driver, int index, char *p)
+static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
 {
 	if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
-		strcpy(p, driver->name);
+		return sprintf(p, "%s", driver->name);
 	else
-		sprintf(p, "%s%d", driver->name, index + driver->name_base);
+		return sprintf(p, "%s%d", driver->name,
+			       index + driver->name_base);
 }
 
 /**
@@ -3545,9 +3546,19 @@
 		if (i >= ARRAY_SIZE(cs))
 			break;
 	}
-	while (i--)
-		count += sprintf(buf + count, "%s%d%c",
-				 cs[i]->name, cs[i]->index, i ? ' ':'\n');
+	while (i--) {
+		struct tty_driver *driver;
+		const char *name = cs[i]->name;
+		int index = cs[i]->index;
+
+		driver = cs[i]->device(cs[i], &index);
+		if (driver) {
+			count += tty_line_name(driver, index, buf + count);
+			count += sprintf(buf + count, "%c", i ? ' ':'\n');
+		} else
+			count += sprintf(buf + count, "%s%d%c",
+					 name, index, i ? ' ':'\n');
+	}
 	console_unlock();
 
 	return count;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 61b1137..23b5d32 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1164,6 +1164,8 @@
 			scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
 				    vc->vc_screenbuf_size >> 1);
 			set_origin(vc);
+			if (CON_IS_VISIBLE(vc))
+				update_screen(vc);
 			/* fall through */
 		case 2: /* erase whole display */
 			count = vc->vc_cols * vc->vc_rows;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 5d01558..ab90a01 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -63,8 +63,10 @@
 	dynid->id.idProduct = idProduct;
 	dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
 	if (fields > 2 && bInterfaceClass) {
-		if (bInterfaceClass > 255)
-			return -EINVAL;
+		if (bInterfaceClass > 255) {
+			retval = -EINVAL;
+			goto fail;
+		}
 
 		dynid->id.bInterfaceClass = (u8)bInterfaceClass;
 		dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS;
@@ -73,17 +75,21 @@
 	if (fields > 4) {
 		const struct usb_device_id *id = id_table;
 
-		if (!id)
-			return -ENODEV;
+		if (!id) {
+			retval = -ENODEV;
+			goto fail;
+		}
 
 		for (; id->match_flags; id++)
 			if (id->idVendor == refVendor && id->idProduct == refProduct)
 				break;
 
-		if (id->match_flags)
+		if (id->match_flags) {
 			dynid->id.driver_info = id->driver_info;
-		else
-			return -ENODEV;
+		} else {
+			retval = -ENODEV;
+			goto fail;
+		}
 	}
 
 	spin_lock(&dynids->lock);
@@ -95,6 +101,10 @@
 	if (retval)
 		return retval;
 	return count;
+
+fail:
+	kfree(dynid);
+	return retval;
 }
 EXPORT_SYMBOL_GPL(usb_store_new_id);
 
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 199aaea..2518c32 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1032,7 +1032,6 @@
 					dev_name(&usb_dev->dev), retval);
 			return retval;
 		}
-		usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
 	}
 
 	retval = usb_new_device (usb_dev);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index babba88..64ea219 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -128,7 +128,7 @@
 	return usb_get_intfdata(hdev->actconfig->interface[0]);
 }
 
-int usb_device_supports_lpm(struct usb_device *udev)
+static int usb_device_supports_lpm(struct usb_device *udev)
 {
 	/* USB 2.1 (and greater) devices indicate LPM support through
 	 * their USB 2.0 Extended Capabilities BOS descriptor.
@@ -149,11 +149,6 @@
 				"Power management will be impacted.\n");
 		return 0;
 	}
-
-	/* udev is root hub */
-	if (!udev->parent)
-		return 1;
-
 	if (udev->parent->lpm_capable)
 		return 1;
 
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index c493836..8238577 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -35,7 +35,6 @@
 		unsigned int size);
 extern int usb_get_bos_descriptor(struct usb_device *dev);
 extern void usb_release_bos_descriptor(struct usb_device *dev);
-extern int usb_device_supports_lpm(struct usb_device *udev);
 extern char *usb_cache_string(struct usb_device *udev, int index);
 extern int usb_set_configuration(struct usb_device *dev, int configuration);
 extern int usb_choose_configuration(struct usb_device *udev);
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 8565d87..1d12988 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -216,7 +216,7 @@
 	int retval = 0;
 
 	if (!select_phy)
-		return -ENODEV;
+		return 0;
 
 	usbcfg = readl(hsotg->regs + GUSBCFG);
 
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index f59484d..4d918ed 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2565,25 +2565,14 @@
 				     struct usb_host_endpoint *ep)
 {
 	struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
-	int is_control = usb_endpoint_xfer_control(&ep->desc);
-	int is_out = usb_endpoint_dir_out(&ep->desc);
-	int epnum = usb_endpoint_num(&ep->desc);
-	struct usb_device *udev;
 	unsigned long flags;
 
 	dev_dbg(hsotg->dev,
 		"DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
 		ep->desc.bEndpointAddress);
 
-	udev = to_usb_device(hsotg->dev);
-
 	spin_lock_irqsave(&hsotg->lock, flags);
-
-	usb_settoggle(udev, epnum, is_out, 0);
-	if (is_control)
-		usb_settoggle(udev, epnum, !is_out, 0);
 	dwc2_hcd_endpoint_reset(hsotg, ep);
-
 	spin_unlock_irqrestore(&hsotg->lock, flags);
 }
 
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index d01d0d3..eaba547 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -124,6 +124,9 @@
 	int retval;
 	int irq;
 
+	if (usb_disabled())
+		return -ENODEV;
+
 	match = of_match_device(dwc2_of_match_table, &dev->dev);
 	if (match && match->data) {
 		params = match->data;
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index abd5050..9162d1b 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -261,19 +261,8 @@
 	struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct clk *clk;
 	int err;
-	char clk_name[10];
-	int base, clk_num;
 
-	base = pdev->resource->start & 0xf000;
-	if (base == 0x3000)
-		clk_num = 1;
-	else if (base == 0x4000)
-		clk_num = 2;
-	else
-		return -ENODEV;
-
-	snprintf(clk_name, sizeof(clk_name), "usb%d_clk", clk_num);
-	clk = devm_clk_get(pdev->dev.parent, clk_name);
+	clk = devm_clk_get(pdev->dev.parent, "ipg");
 	if (IS_ERR(clk)) {
 		dev_err(&pdev->dev, "failed to get clk\n");
 		return PTR_ERR(clk);
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index b016d38..eb009a4 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -203,12 +203,12 @@
 				addr, (unsigned int)temp);
 
 	addr = &ir_set->erst_base;
-	temp_64 = readq(addr);
+	temp_64 = xhci_read_64(xhci, addr);
 	xhci_dbg(xhci, "  %p: ir_set.erst_base = @%08llx\n",
 			addr, temp_64);
 
 	addr = &ir_set->erst_dequeue;
-	temp_64 = readq(addr);
+	temp_64 = xhci_read_64(xhci, addr);
 	xhci_dbg(xhci, "  %p: ir_set.erst_dequeue = @%08llx\n",
 			addr, temp_64);
 }
@@ -412,7 +412,7 @@
 {
 	u64 val;
 
-	val = readq(&xhci->op_regs->cmd_ring);
+	val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 	xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
 			lower_32_bits(val));
 	xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 873c272..bce4391 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1958,7 +1958,7 @@
 		xhci_warn(xhci, "WARN something wrong with SW event ring "
 				"dequeue ptr.\n");
 	/* Update HC event ring dequeue pointer */
-	temp = readq(&xhci->ir_set->erst_dequeue);
+	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 	temp &= ERST_PTR_MASK;
 	/* Don't clear the EHB bit (which is RW1C) because
 	 * there might be more events to service.
@@ -1967,7 +1967,7 @@
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			"// Write event ring dequeue pointer, "
 			"preserving EHB bit");
-	writeq(((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
 			&xhci->ir_set->erst_dequeue);
 }
 
@@ -2269,7 +2269,7 @@
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			"// Device context base array address = 0x%llx (DMA), %p (virt)",
 			(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
-	writeq(dma, &xhci->op_regs->dcbaa_ptr);
+	xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
 
 	/*
 	 * Initialize the ring segment pool.  The ring must be a contiguous
@@ -2312,13 +2312,13 @@
 			(unsigned long long)xhci->cmd_ring->first_seg->dma);
 
 	/* Set the address in the Command Ring Control register */
-	val_64 = readq(&xhci->op_regs->cmd_ring);
+	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
 		(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
 		xhci->cmd_ring->cycle_state;
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			"// Setting command ring address to 0x%x", val);
-	writeq(val_64, &xhci->op_regs->cmd_ring);
+	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
 	xhci_dbg_cmd_ptrs(xhci);
 
 	xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
@@ -2396,10 +2396,10 @@
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			"// Set ERST base address for ir_set 0 = 0x%llx",
 			(unsigned long long)xhci->erst.erst_dma_addr);
-	val_64 = readq(&xhci->ir_set->erst_base);
+	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
 	val_64 &= ERST_PTR_MASK;
 	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
-	writeq(val_64, &xhci->ir_set->erst_base);
+	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
 
 	/* Set the event ring dequeue address */
 	xhci_set_hc_event_deq(xhci);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 3c898c1..04f986d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -142,6 +142,11 @@
 				"QUIRK: Resetting on resume");
 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 	}
+	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+			pdev->device == 0x0015 &&
+			pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
+			pdev->subsystem_device == 0xc0cd)
+		xhci->quirks |= XHCI_RESET_ON_RESUME;
 	if (pdev->vendor == PCI_VENDOR_ID_VIA)
 		xhci->quirks |= XHCI_RESET_ON_RESUME;
 }
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a0b248c..0ed64eb 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -307,13 +307,14 @@
 		return 0;
 	}
 
-	temp_64 = readq(&xhci->op_regs->cmd_ring);
+	temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 	if (!(temp_64 & CMD_RING_RUNNING)) {
 		xhci_dbg(xhci, "Command ring had been stopped\n");
 		return 0;
 	}
 	xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
-	writeq(temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
+	xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+			&xhci->op_regs->cmd_ring);
 
 	/* Section 4.6.1.2 of xHCI 1.0 spec says software should
 	 * time the completion od all xHCI commands, including
@@ -2864,8 +2865,9 @@
 		/* Clear the event handler busy flag (RW1C);
 		 * the event ring should be empty.
 		 */
-		temp_64 = readq(&xhci->ir_set->erst_dequeue);
-		writeq(temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
+		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+		xhci_write_64(xhci, temp_64 | ERST_EHB,
+				&xhci->ir_set->erst_dequeue);
 		spin_unlock(&xhci->lock);
 
 		return IRQ_HANDLED;
@@ -2877,7 +2879,7 @@
 	 */
 	while (xhci_handle_event(xhci) > 0) {}
 
-	temp_64 = readq(&xhci->ir_set->erst_dequeue);
+	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 	/* If necessary, update the HW's version of the event ring deq ptr. */
 	if (event_ring_deq != xhci->event_ring->dequeue) {
 		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
@@ -2892,7 +2894,7 @@
 
 	/* Clear the event handler busy flag (RW1C); event ring is empty. */
 	temp_64 |= ERST_EHB;
-	writeq(temp_64, &xhci->ir_set->erst_dequeue);
+	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
 
 	spin_unlock(&xhci->lock);
 
@@ -2965,58 +2967,8 @@
 	}
 
 	while (1) {
-		if (room_on_ring(xhci, ep_ring, num_trbs)) {
-			union xhci_trb *trb = ep_ring->enqueue;
-			unsigned int usable = ep_ring->enq_seg->trbs +
-					TRBS_PER_SEGMENT - 1 - trb;
-			u32 nop_cmd;
-
-			/*
-			 * Section 4.11.7.1 TD Fragments states that a link
-			 * TRB must only occur at the boundary between
-			 * data bursts (eg 512 bytes for 480M).
-			 * While it is possible to split a large fragment
-			 * we don't know the size yet.
-			 * Simplest solution is to fill the trb before the
-			 * LINK with nop commands.
-			 */
-			if (num_trbs == 1 || num_trbs <= usable || usable == 0)
-				break;
-
-			if (ep_ring->type != TYPE_BULK)
-				/*
-				 * While isoc transfers might have a buffer that
-				 * crosses a 64k boundary it is unlikely.
-				 * Since we can't add NOPs without generating
-				 * gaps in the traffic just hope it never
-				 * happens at the end of the ring.
-				 * This could be fixed by writing a LINK TRB
-				 * instead of the first NOP - however the
-				 * TRB_TYPE_LINK_LE32() calls would all need
-				 * changing to check the ring length.
-				 */
-				break;
-
-			if (num_trbs >= TRBS_PER_SEGMENT) {
-				xhci_err(xhci, "Too many fragments %d, max %d\n",
-						num_trbs, TRBS_PER_SEGMENT - 1);
-				return -EINVAL;
-			}
-
-			nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
-					ep_ring->cycle_state);
-			ep_ring->num_trbs_free -= usable;
-			do {
-				trb->generic.field[0] = 0;
-				trb->generic.field[1] = 0;
-				trb->generic.field[2] = 0;
-				trb->generic.field[3] = nop_cmd;
-				trb++;
-			} while (--usable);
-			ep_ring->enqueue = trb;
-			if (room_on_ring(xhci, ep_ring, num_trbs))
-				break;
-		}
+		if (room_on_ring(xhci, ep_ring, num_trbs))
+			break;
 
 		if (ep_ring == xhci->cmd_ring) {
 			xhci_err(xhci, "Do not support expand command ring\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ad36439..6fe577d 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -611,7 +611,7 @@
 	xhci_dbg(xhci, "Event ring:\n");
 	xhci_debug_ring(xhci, xhci->event_ring);
 	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
-	temp_64 = readq(&xhci->ir_set->erst_dequeue);
+	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 	temp_64 &= ~ERST_PTR_MASK;
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
@@ -756,11 +756,11 @@
 {
 	xhci->s3.command = readl(&xhci->op_regs->command);
 	xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
-	xhci->s3.dcbaa_ptr = readq(&xhci->op_regs->dcbaa_ptr);
+	xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
 	xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
 	xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
-	xhci->s3.erst_base = readq(&xhci->ir_set->erst_base);
-	xhci->s3.erst_dequeue = readq(&xhci->ir_set->erst_dequeue);
+	xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+	xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 	xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
 	xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
 }
@@ -769,11 +769,11 @@
 {
 	writel(xhci->s3.command, &xhci->op_regs->command);
 	writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
-	writeq(xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
+	xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
 	writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
 	writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
-	writeq(xhci->s3.erst_base, &xhci->ir_set->erst_base);
-	writeq(xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
+	xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
+	xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
 	writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
 	writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
 }
@@ -783,7 +783,7 @@
 	u64	val_64;
 
 	/* step 2: initialize command ring buffer */
-	val_64 = readq(&xhci->op_regs->cmd_ring);
+	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
 		(xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
 				      xhci->cmd_ring->dequeue) &
@@ -792,7 +792,7 @@
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			"// Setting command ring address to 0x%llx",
 			(long unsigned long) val_64);
-	writeq(val_64, &xhci->op_regs->cmd_ring);
+	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
 }
 
 /*
@@ -3842,7 +3842,7 @@
 	if (ret) {
 		return ret;
 	}
-	temp_64 = readq(&xhci->op_regs->dcbaa_ptr);
+	temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
 			"Op regs DCBAA ptr = %#016llx", temp_64);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
@@ -4730,11 +4730,8 @@
 	struct device		*dev = hcd->self.controller;
 	int			retval;
 
-	/* Limit the block layer scatter-gather lists to half a segment. */
-	hcd->self.sg_tablesize = TRBS_PER_SEGMENT / 2;
-
-	/* support to build packet from discontinuous buffers */
-	hcd->self.no_sg_constraint = 1;
+	/* Accept arbitrarily long scatter-gather lists */
+	hcd->self.sg_tablesize = ~0;
 
 	/* XHCI controllers don't stop the ep queue on short packets :| */
 	hcd->self.no_stop_on_short = 1;
@@ -4760,6 +4757,14 @@
 		/* xHCI private pointer was set in xhci_pci_probe for the second
 		 * registered roothub.
 		 */
+		xhci = hcd_to_xhci(hcd);
+		/*
+		 * Support arbitrarily aligned sg-list entries on hosts without
+		 * TD fragment rules (which are currently unsupported).
+		 */
+		if (xhci->hci_version < 0x100)
+			hcd->self.no_sg_constraint = 1;
+
 		return 0;
 	}
 
@@ -4788,6 +4793,9 @@
 	if (xhci->hci_version > 0x96)
 		xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
 
+	if (xhci->hci_version < 0x100)
+		hcd->self.no_sg_constraint = 1;
+
 	/* Make sure the HC is halted. */
 	retval = xhci_halt(xhci);
 	if (retval)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f8416639..58ed9d0 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -28,17 +28,6 @@
 #include <linux/kernel.h>
 #include <linux/usb/hcd.h>
 
-/*
- * Registers should always be accessed with double word or quad word accesses.
- *
- * Some xHCI implementations may support 64-bit address pointers.  Registers
- * with 64-bit address pointers should be written to with dword accesses by
- * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
- * xHCI implementations that do not support 64-bit address pointers will ignore
- * the high dword, and write order is irrelevant.
- */
-#include <asm-generic/io-64-nonatomic-lo-hi.h>
-
 /* Code sharing between pci-quirks and xhci hcd */
 #include	"xhci-ext-caps.h"
 #include "pci-quirks.h"
@@ -1279,7 +1268,7 @@
  * since the command ring is 64-byte aligned.
  * It must also be greater than 16.
  */
-#define TRBS_PER_SEGMENT	256
+#define TRBS_PER_SEGMENT	64
 /* Allow two commands + a link TRB, along with any reserved command TRBs */
 #define MAX_RSVD_CMD_TRBS	(TRBS_PER_SEGMENT - 3)
 #define TRB_SEGMENT_SIZE	(TRBS_PER_SEGMENT*16)
@@ -1614,6 +1603,34 @@
 #define xhci_warn_ratelimited(xhci, fmt, args...) \
 	dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
 
+/*
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers.  Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
+		__le64 __iomem *regs)
+{
+	__u32 __iomem *ptr = (__u32 __iomem *) regs;
+	u64 val_lo = readl(ptr);
+	u64 val_hi = readl(ptr + 1);
+	return val_lo + (val_hi << 32);
+}
+static inline void xhci_write_64(struct xhci_hcd *xhci,
+				 const u64 val, __le64 __iomem *regs)
+{
+	__u32 __iomem *ptr = (__u32 __iomem *) regs;
+	u32 val_lo = lower_32_bits(val);
+	u32 val_hi = upper_32_bits(val);
+
+	writel(val_lo, ptr);
+	writel(val_hi, ptr + 1);
+}
+
 static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
 {
 	return xhci->quirks & XHCI_LINK_TRB_QUIRK;
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index e6f61e4..8afa813 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -130,7 +130,7 @@
 
 	phy = __usb_find_phy(&phy_list, type);
 	if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
-		pr_err("unable to find transceiver of type %s\n",
+		pr_debug("PHY: unable to find transceiver of type %s\n",
 			usb_phy_type_string(type));
 		goto err0;
 	}
@@ -228,7 +228,7 @@
 
 	phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
 	if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
-		pr_err("unable to find transceiver\n");
+		dev_dbg(dev, "unable to find transceiver\n");
 		goto err0;
 	}
 
@@ -424,10 +424,8 @@
 	unsigned long flags;
 
 	phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL);
-	if (!phy_bind) {
-		pr_err("phy_bind(): No memory for phy_bind");
+	if (!phy_bind)
 		return -ENOMEM;
-	}
 
 	phy_bind->dev_name = dev_name;
 	phy_bind->phy_dev_name = phy_dev_name;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ce0d7b0..ee1f00f 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -152,6 +152,7 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -191,6 +192,8 @@
 	{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
 	{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+	{ USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a7019d1..1e2d369 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -50,6 +50,7 @@
 #define TI_XDS100V2_PID		0xa6d0
 
 #define FTDI_NXTCAM_PID		0xABB8 /* NXTCam for Mindstorms NXT */
+#define FTDI_EV3CON_PID		0xABB9 /* Mindstorms EV3 Console Adapter */
 
 /* US Interface Navigator (http://www.usinterface.com/) */
 #define FTDI_USINT_CAT_PID	0xb810	/* Navigator CAT and 2nd PTT lines */
@@ -363,6 +364,12 @@
 /* Sprog II (Andrew Crosland's SprogII DCC interface) */
 #define FTDI_SPROG_II		0xF0C8
 
+/*
+ * Two of the Tagsys RFID Readers
+ */
+#define FTDI_TAGSYS_LP101_PID	0xF0E9	/* Tagsys L-P101 RFID*/
+#define FTDI_TAGSYS_P200X_PID	0xF0EE	/* Tagsys Medio P200x RFID*/
+
 /* an infrared receiver for user access control with IR tags */
 #define FTDI_PIEGROUP_PID	0xF208	/* Product Id */
 
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5c86f57..216d20a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1362,7 +1362,8 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
+	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c65437c..968a402 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -139,6 +139,9 @@
 	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)},	/* Sierra Wireless EM7700 Device Management */
 	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)},	/* Sierra Wireless EM7700 NMEA */
 	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)},	/* Sierra Wireless EM7700 Modem */
+	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)},	/* Netgear AirCard 340U Device Management */
+	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)},	/* Netgear AirCard 340U NMEA */
+	{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)},	/* Netgear AirCard 340U Modem */
 
 	{ }				/* Terminating entry */
 };
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index f112b07..fb79775 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -71,7 +71,8 @@
 
 /* Suunto ANT+ USB Driver */
 #define SUUNTO_IDS()			\
-	{ USB_DEVICE(0x0fcf, 0x1008) }
+	{ USB_DEVICE(0x0fcf, 0x1008) },	\
+	{ USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
 DEVICE(suunto, SUUNTO_IDS);
 
 /* Siemens USB/MPI adapter */
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 8470e1b..1dd0604 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -18,7 +18,9 @@
 
 	  This option depends on 'SCSI' support being enabled, but you
 	  probably also need 'SCSI device support: SCSI disk support'
-	  (BLK_DEV_SD) for most USB storage devices.
+	  (BLK_DEV_SD) for most USB storage devices.  Some devices also
+	  will require 'Probe all LUNs on each SCSI device'
+	  (SCSI_MULTI_LUN).
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called usb-storage.
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 18509e6..9d38ddc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -78,6 +78,8 @@
 
 static int slave_alloc (struct scsi_device *sdev)
 {
+	struct us_data *us = host_to_us(sdev->host);
+
 	/*
 	 * Set the INQUIRY transfer length to 36.  We don't use any of
 	 * the extra data and many devices choke if asked for more or
@@ -102,6 +104,10 @@
 	 */
 	blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
 
+	/* Tell the SCSI layer if we know there is more than one LUN */
+	if (us->protocol == USB_PR_BULK && us->max_lun > 0)
+		sdev->sdev_bflags |= BLIST_FORCELUN;
+
 	return 0;
 }
 
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 65a6a75..82e8ed0 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,7 +31,7 @@
 		"Cypress ISD-300LP",
 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
 
-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
 		"Super Top",
 		"USB 2.0  SATA BRIDGE",
 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ad06255..adbeb25 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1455,6 +1455,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_FIX_CAPACITY ),
 
+/* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */
+UNUSUAL_DEV(  0x0fca, 0x8004, 0x0201, 0x0201,
+		"Research In Motion",
+		"BlackBerry Bold 9000",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_MAX_SECTORS_64 ),
+
 /* Reported by Michael Stattmann <michael@stattmann.com> */
 UNUSUAL_DEV(  0x0fce, 0xd008, 0x0000, 0x0000,
 		"Sony Ericsson",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9a68409..a0fa5de 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -70,7 +70,12 @@
 };
 
 struct vhost_net_ubuf_ref {
-	struct kref kref;
+	/* refcount follows semantics similar to kref:
+	 *  0: object is released
+	 *  1: no outstanding ubufs
+	 * >1: outstanding ubufs
+	 */
+	atomic_t refcount;
 	wait_queue_head_t wait;
 	struct vhost_virtqueue *vq;
 };
@@ -116,14 +121,6 @@
 	vhost_net_zcopy_mask |= 0x1 << vq;
 }
 
-static void vhost_net_zerocopy_done_signal(struct kref *kref)
-{
-	struct vhost_net_ubuf_ref *ubufs;
-
-	ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
-	wake_up(&ubufs->wait);
-}
-
 static struct vhost_net_ubuf_ref *
 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
 {
@@ -134,21 +131,24 @@
 	ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
 	if (!ubufs)
 		return ERR_PTR(-ENOMEM);
-	kref_init(&ubufs->kref);
+	atomic_set(&ubufs->refcount, 1);
 	init_waitqueue_head(&ubufs->wait);
 	ubufs->vq = vq;
 	return ubufs;
 }
 
-static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
 {
-	kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
+	int r = atomic_sub_return(1, &ubufs->refcount);
+	if (unlikely(!r))
+		wake_up(&ubufs->wait);
+	return r;
 }
 
 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
 {
-	kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
-	wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
+	vhost_net_ubuf_put(ubufs);
+	wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
 }
 
 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
@@ -306,23 +306,26 @@
 {
 	struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
 	struct vhost_virtqueue *vq = ubufs->vq;
-	int cnt = atomic_read(&ubufs->kref.refcount);
+	int cnt;
+
+	rcu_read_lock_bh();
 
 	/* set len to mark this desc buffers done DMA */
 	vq->heads[ubuf->desc].len = success ?
 		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
-	vhost_net_ubuf_put(ubufs);
+	cnt = vhost_net_ubuf_put(ubufs);
 
 	/*
 	 * Trigger polling thread if guest stopped submitting new buffers:
-	 * in this case, the refcount after decrement will eventually reach 1
-	 * so here it is 2.
+	 * in this case, the refcount after decrement will eventually reach 1.
 	 * We also trigger polling periodically after each 16 packets
 	 * (the value 16 here is more or less arbitrary, it's tuned to trigger
 	 * less than 10% of times).
 	 */
-	if (cnt <= 2 || !(cnt % 16))
+	if (cnt <= 1 || !(cnt % 16))
 		vhost_poll_queue(&vq->poll);
+
+	rcu_read_unlock_bh();
 }
 
 /* Expects to be always run from workqueue - which acts as
@@ -420,7 +423,7 @@
 			msg.msg_control = ubuf;
 			msg.msg_controllen = sizeof(ubuf);
 			ubufs = nvq->ubufs;
-			kref_get(&ubufs->kref);
+			atomic_inc(&ubufs->refcount);
 			nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
 		} else {
 			msg.msg_control = NULL;
@@ -780,7 +783,7 @@
 		vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
 		n->tx_flush = false;
-		kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
+		atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
 	}
 }
@@ -800,6 +803,8 @@
 		fput(tx_sock->file);
 	if (rx_sock)
 		fput(rx_sock->file);
+	/* Make sure no callbacks are outstanding */
+	synchronize_rcu_bh();
 	/* We do an extra flush before freeing memory,
 	 * since jobs can re-queue themselves. */
 	vhost_net_flush(n);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 1e4c75c..0a025b8 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -728,7 +728,7 @@
 	}
 	se_sess = tv_nexus->tvn_se_sess;
 
-	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
 	if (tag < 0) {
 		pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
 		return ERR_PTR(-ENOMEM);
@@ -889,7 +889,7 @@
 			cmd->tvc_lun, cmd->tvc_exp_data_len,
 			cmd->tvc_task_attr, cmd->tvc_data_direction,
 			TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
-			sg_bidi_ptr, sg_no_bidi);
+			sg_bidi_ptr, sg_no_bidi, NULL, 0);
 	if (rc < 0) {
 		transport_send_check_condition_and_sense(se_cmd,
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 22262a3..dade5b7 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -364,7 +364,7 @@
 
 config FB_IMX
 	tristate "Freescale i.MX1/21/25/27 LCD support"
-	depends on FB && IMX_HAVE_PLATFORM_IMX_FB
+	depends on FB && ARCH_MXC
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 93cf15e..7de847d 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -228,7 +228,7 @@
 
 	rc = device_register(&new_ld->dev);
 	if (rc) {
-		kfree(new_ld);
+		put_device(&new_ld->dev);
 		return ERR_PTR(rc);
 	}
 
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 4ad24f2..cecd3de 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -488,7 +488,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_FONTS
+#ifdef CONFIG_FONT_SUPPORT
 static struct sti_cooked_font *
 sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
 {
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig
index 1129d0e..75c8a8e 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/exynos/Kconfig
@@ -22,7 +22,8 @@
 
 config EXYNOS_LCD_S6E8AX0
 	bool "S6E8AX0 MIPI AMOLED LCD Driver"
-	depends on (EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE)
+	depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE
+	depends on (LCD_CLASS_DEVICE = y)
 	default n
 	help
 	  If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index cde4619..7309ac7 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1577,10 +1577,10 @@
 static int do_unregister_framebuffer(struct fb_info *fb_info);
 
 #define VGA_FB_PHYS 0xA0000
-static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
-				     const char *name, bool primary)
+static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
+					      const char *name, bool primary)
 {
-	int i;
+	int i, ret;
 
 	/* check all firmware fbs and kick off if the base addr overlaps */
 	for (i = 0 ; i < FB_MAX; i++) {
@@ -1599,22 +1599,29 @@
 			printk(KERN_INFO "fb: conflicting fb hw usage "
 			       "%s vs %s - removing generic driver\n",
 			       name, registered_fb[i]->fix.id);
-			do_unregister_framebuffer(registered_fb[i]);
+			ret = do_unregister_framebuffer(registered_fb[i]);
+			if (ret)
+				return ret;
 		}
 	}
+
+	return 0;
 }
 
 static int do_register_framebuffer(struct fb_info *fb_info)
 {
-	int i;
+	int i, ret;
 	struct fb_event event;
 	struct fb_videomode mode;
 
 	if (fb_check_foreignness(fb_info))
 		return -ENOSYS;
 
-	do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
-					 fb_is_primary_device(fb_info));
+	ret = do_remove_conflicting_framebuffers(fb_info->apertures,
+						 fb_info->fix.id,
+						 fb_is_primary_device(fb_info));
+	if (ret)
+		return ret;
 
 	if (num_registered_fb == FB_MAX)
 		return -ENXIO;
@@ -1739,12 +1746,16 @@
 }
 EXPORT_SYMBOL(unlink_framebuffer);
 
-void remove_conflicting_framebuffers(struct apertures_struct *a,
-				     const char *name, bool primary)
+int remove_conflicting_framebuffers(struct apertures_struct *a,
+				    const char *name, bool primary)
 {
+	int ret;
+
 	mutex_lock(&registration_lock);
-	do_remove_conflicting_framebuffers(a, name, primary);
+	ret = do_remove_conflicting_framebuffers(a, name, primary);
 	mutex_unlock(&registration_lock);
+
+	return ret;
 }
 EXPORT_SYMBOL(remove_conflicting_framebuffers);
 
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index f51646f..77d6221 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -2160,8 +2160,8 @@
 	*five_taps = false;
 
 	do {
-		in_height = DIV_ROUND_UP(height, *decim_y);
-		in_width = DIV_ROUND_UP(width, *decim_x);
+		in_height = height / *decim_y;
+		in_width = width / *decim_x;
 		*core_clk = dispc.feat->calc_core_clk(pclk, in_width,
 				in_height, out_width, out_height, mem_to_mem);
 		error = (in_width > maxsinglelinewidth || !*core_clk ||
@@ -2199,8 +2199,8 @@
 			dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
 
 	do {
-		in_height = DIV_ROUND_UP(height, *decim_y);
-		in_width = DIV_ROUND_UP(width, *decim_x);
+		in_height = height / *decim_y;
+		in_width = width / *decim_x;
 		*five_taps = in_height > out_height;
 
 		if (in_width > maxsinglelinewidth)
@@ -2268,7 +2268,7 @@
 {
 	u16 in_width, in_width_max;
 	int decim_x_min = *decim_x;
-	u16 in_height = DIV_ROUND_UP(height, *decim_y);
+	u16 in_height = height / *decim_y;
 	const int maxsinglelinewidth =
 				dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
 	const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
@@ -2287,7 +2287,7 @@
 		return -EINVAL;
 
 	do {
-		in_width = DIV_ROUND_UP(width, *decim_x);
+		in_width = width / *decim_x;
 	} while (*decim_x <= *x_predecim &&
 			in_width > maxsinglelinewidth && ++*decim_x);
 
@@ -2466,8 +2466,8 @@
 	if (r)
 		return r;
 
-	in_width = DIV_ROUND_UP(in_width, x_predecim);
-	in_height = DIV_ROUND_UP(in_height, y_predecim);
+	in_width = in_width / x_predecim;
+	in_height = in_height / y_predecim;
 
 	if (color_mode == OMAP_DSS_COLOR_YUV2 ||
 			color_mode == OMAP_DSS_COLOR_UYVY ||
@@ -3726,7 +3726,6 @@
 	}
 
 	pm_runtime_enable(&pdev->dev);
-	pm_runtime_irq_safe(&pdev->dev);
 
 	r = dispc_runtime_get();
 	if (r)
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 7411f26..23ef21f 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -117,7 +117,7 @@
 	/* outputs */
 
 	struct dsi_clock_info dsi_cinfo;
-	unsigned long long fck;
+	unsigned long fck;
 	struct dispc_clock_info dispc_cinfo;
 };
 
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index efb9ee9..ba806c9 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -46,7 +46,7 @@
 struct sdi_clk_calc_ctx {
 	unsigned long pck_min, pck_max;
 
-	unsigned long long fck;
+	unsigned long fck;
 	struct dispc_clock_info dispc_cinfo;
 };
 
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index a06edbf..1b5d48c 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -884,7 +884,7 @@
 		if (done == count)
 			goto out;
 	}
-	if ((uintptr_t)addr & 0x2) {
+	if ((uintptr_t)(addr + done) & 0x2) {
 		if ((count - done) < 2) {
 			*(u8 *)(buf + done) = ioread8(addr + done);
 			done += 1;
@@ -938,7 +938,7 @@
 		if (done == count)
 			goto out;
 	}
-	if ((uintptr_t)addr & 0x2) {
+	if ((uintptr_t)(addr + done) & 0x2) {
 		if ((count - done) < 2) {
 			iowrite8(*(u8 *)(buf + done), addr + done);
 			done += 1;
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 16830d8..9911cd5 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -1289,7 +1289,7 @@
 		if (done == count)
 			goto out;
 	}
-	if ((uintptr_t)addr & 0x2) {
+	if ((uintptr_t)(addr + done) & 0x2) {
 		if ((count - done) < 2) {
 			*(u8 *)(buf + done) = ioread8(addr + done);
 			done += 1;
@@ -1371,7 +1371,7 @@
 		if (done == count)
 			goto out;
 	}
-	if ((uintptr_t)addr & 0x2) {
+	if ((uintptr_t)(addr + done) & 0x2) {
 		if ((count - done) < 2) {
 			iowrite8(*(u8 *)(buf + done), addr + done);
 			done += 1;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 5be6e91..79d2589 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -87,6 +87,14 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called da9055_wdt.
 
+config GPIO_WATCHDOG
+	tristate "Watchdog device controlled through GPIO-line"
+	depends on OF_GPIO
+	select WATCHDOG_CORE
+	help
+	  If you say yes here you get support for watchdog device
+	  controlled through GPIO-line.
+
 config WM831X_WATCHDOG
 	tristate "WM831x watchdog"
 	depends on MFD_WM831X
@@ -109,7 +117,7 @@
 
 config ARM_SP805_WATCHDOG
 	tristate "ARM SP805 Watchdog"
-	depends on ARM && ARM_AMBA
+	depends on (ARM || ARM64) && ARM_AMBA
 	select WATCHDOG_CORE
 	help
 	  ARM Primecell SP805 Watchdog timer. This will reboot your system when
@@ -188,6 +196,7 @@
 	tristate "S3C2410 Watchdog"
 	depends on HAVE_S3C2410_WATCHDOG
 	select WATCHDOG_CORE
+	select MFD_SYSCON if ARCH_EXYNOS5
 	help
 	  Watchdog timer block in the Samsung SoCs. This will reboot
 	  the system when the timer expires with the watchdog enabled.
@@ -214,10 +223,10 @@
 
 config DW_WATCHDOG
 	tristate "Synopsys DesignWare watchdog"
-	depends on ARM && HAVE_CLK
+	depends on HAS_IOMEM
 	help
 	  Say Y here if to include support for the Synopsys DesignWare
-	  watchdog timer found in many ARM chips.
+	  watchdog timer found in many chips.
 	  To compile this driver as a module, choose M here: the
 	  module will be called dw_wdt.
 
@@ -270,10 +279,11 @@
 
 config DAVINCI_WATCHDOG
 	tristate "DaVinci watchdog"
-	depends on ARCH_DAVINCI
+	depends on ARCH_DAVINCI || ARCH_KEYSTONE
+	select WATCHDOG_CORE
 	help
 	  Say Y here if to include support for the watchdog timer
-	  in the DaVinci DM644x/DM646x processors.
+	  in the DaVinci DM644x/DM646x or Keystone processors.
 	  To compile this driver as a module, choose M here: the
 	  module will be called davinci_wdt.
 
@@ -883,13 +893,22 @@
 	Most people will say N.
 
 config W83627HF_WDT
-	tristate "W83627HF/W83627DHG Watchdog Timer"
+	tristate "Watchdog timer for W83627HF/W83627DHG and compatibles"
 	depends on X86
 	select WATCHDOG_CORE
 	---help---
-	  This is the driver for the hardware watchdog on the W83627HF chipset
-	  as used in Advantech PC-9578 and Tyan S2721-533 motherboards
-	  (and likely others). The driver also supports the W83627DHG chip.
+	  This is the driver for the hardware watchdog on the following
+	  Super I/O chips.
+		W83627DHG/DHG-P/EHF/EHG/F/G/HF/S/SF/THF/UHG/UG
+		W83637HF
+		W83667HG/HG-B
+		W83687THF
+		W83697HF
+		W83697UG
+		NCT6775
+		NCT6776
+		NCT6779
+
 	  This watchdog simply watches your kernel to make sure it doesn't
 	  freeze, and if it does, it reboots your computer after a certain
 	  amount of time.
@@ -1139,6 +1158,28 @@
 	  To compile this driver as a loadable module, choose M here.
 	  The module will be called bcm2835_wdt.
 
+config BCM_KONA_WDT
+	tristate "BCM Kona Watchdog"
+	depends on ARCH_BCM
+	select WATCHDOG_CORE
+	help
+	  Support for the watchdog timer on the following Broadcom BCM281xx
+	  family, which includes BCM11130, BCM11140, BCM11351, BCM28145 and
+	  BCM28155 variants.
+
+	  Say 'Y' or 'M' here to enable the driver. The module will be called
+	  bcm_kona_wdt.
+
+config BCM_KONA_WDT_DEBUG
+	bool "DEBUGFS support for BCM Kona Watchdog"
+	depends on BCM_KONA_WDT
+	help
+	  If enabled, adds /sys/kernel/debug/bcm_kona_wdt/info which provides
+	  access to the driver's internal data structures as well as watchdog
+	  timer hardware registres.
+
+	  If in doubt, say 'N'.
+
 config LANTIQ_WDT
 	tristate "Lantiq SoC watchdog"
 	depends on LANTIQ
@@ -1171,6 +1212,7 @@
 config 8xxx_WDT
 	tristate "MPC8xxx Platform Watchdog Timer"
 	depends on PPC_8xx || PPC_83xx || PPC_86xx
+	select WATCHDOG_CORE
 	help
 	  This driver is for a SoC level watchdog that exists on some
 	  Freescale PowerPC processors. So far this driver supports:
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 91bd95a..985a66c 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -57,6 +57,7 @@
 obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
 obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o
 obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o
+obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
 
 # AVR32 Architecture
 obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -171,6 +172,7 @@
 # Architecture Independent
 obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
 obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
+obj-$(CONFIG_GPIO_WATCHDOG)	+= gpio_wdt.o
 obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
 obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
 obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index fbb7b94..3a17fbd 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -301,7 +301,7 @@
  *	want to register another driver on the same PCI id.
  */
 
-static DEFINE_PCI_DEVICE_TABLE(ali_pci_tbl) __used = {
+static const struct pci_device_id ali_pci_tbl[] __used = {
 	{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
 	{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
 	{ 0, },
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 12f0b76..996b2f7 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -414,7 +414,7 @@
 module_init(alim7101_wdt_init);
 module_exit(alim7101_wdt_unload);
 
-static DEFINE_PCI_DEVICE_TABLE(alim7101_pci_tbl) __used = {
+static const struct pci_device_id alim7101_pci_tbl[] __used = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
 	{ }
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index be37dde..489729b 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -19,11 +19,13 @@
 
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/platform_device.h>
+#include <linux/reboot.h>
 #include <linux/types.h>
 #include <linux/watchdog.h>
 #include <linux/jiffies.h>
@@ -31,22 +33,33 @@
 #include <linux/bitops.h>
 #include <linux/uaccess.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 
 #include "at91sam9_wdt.h"
 
 #define DRV_NAME "AT91SAM9 Watchdog"
 
-#define wdt_read(field) \
-	__raw_readl(at91wdt_private.base + field)
-#define wdt_write(field, val) \
-	__raw_writel((val), at91wdt_private.base + field)
+#define wdt_read(wdt, field) \
+	__raw_readl((wdt)->base + (field))
+#define wdt_write(wtd, field, val) \
+	__raw_writel((val), (wdt)->base + (field))
 
 /* AT91SAM9 watchdog runs a 12bit counter @ 256Hz,
  * use this to convert a watchdog
  * value from/to milliseconds.
  */
-#define ms_to_ticks(t)	(((t << 8) / 1000) - 1)
-#define ticks_to_ms(t)	(((t + 1) * 1000) >> 8)
+#define ticks_to_hz_rounddown(t)	((((t) + 1) * HZ) >> 8)
+#define ticks_to_hz_roundup(t)		(((((t) + 1) * HZ) + 255) >> 8)
+#define ticks_to_secs(t)		(((t) + 1) >> 8)
+#define secs_to_ticks(s)		((s) ? (((s) << 8) - 1) : 0)
+
+#define WDT_MR_RESET	0x3FFF2FFF
+
+/* Watchdog max counter value in ticks */
+#define WDT_COUNTER_MAX_TICKS	0xFFF
+
+/* Watchdog max delta/value in secs */
+#define WDT_COUNTER_MAX_SECS	ticks_to_secs(WDT_COUNTER_MAX_TICKS)
 
 /* Hardware timeout in seconds */
 #define WDT_HW_TIMEOUT 2
@@ -66,23 +79,40 @@
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
 	"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static struct watchdog_device at91_wdt_dev;
-static void at91_ping(unsigned long data);
-
-static struct {
+#define to_wdt(wdd) container_of(wdd, struct at91wdt, wdd)
+struct at91wdt {
+	struct watchdog_device wdd;
 	void __iomem *base;
 	unsigned long next_heartbeat;	/* the next_heartbeat for the timer */
 	struct timer_list timer;	/* The timer that pings the watchdog */
-} at91wdt_private;
+	u32 mr;
+	u32 mr_mask;
+	unsigned long heartbeat;	/* WDT heartbeat in jiffies */
+	bool nowayout;
+	unsigned int irq;
+};
 
 /* ......................................................................... */
 
+static irqreturn_t wdt_interrupt(int irq, void *dev_id)
+{
+	struct at91wdt *wdt = (struct at91wdt *)dev_id;
+
+	if (wdt_read(wdt, AT91_WDT_SR)) {
+		pr_crit("at91sam9 WDT software reset\n");
+		emergency_restart();
+		pr_crit("Reboot didn't ?????\n");
+	}
+
+	return IRQ_HANDLED;
+}
+
 /*
  * Reload the watchdog timer.  (ie, pat the watchdog)
  */
-static inline void at91_wdt_reset(void)
+static inline void at91_wdt_reset(struct at91wdt *wdt)
 {
-	wdt_write(AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
+	wdt_write(wdt, AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
 }
 
 /*
@@ -90,26 +120,21 @@
  */
 static void at91_ping(unsigned long data)
 {
-	if (time_before(jiffies, at91wdt_private.next_heartbeat) ||
-	    (!watchdog_active(&at91_wdt_dev))) {
-		at91_wdt_reset();
-		mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
-	} else
+	struct at91wdt *wdt = (struct at91wdt *)data;
+	if (time_before(jiffies, wdt->next_heartbeat) ||
+	    !watchdog_active(&wdt->wdd)) {
+		at91_wdt_reset(wdt);
+		mod_timer(&wdt->timer, jiffies + wdt->heartbeat);
+	} else {
 		pr_crit("I will reset your machine !\n");
-}
-
-static int at91_wdt_ping(struct watchdog_device *wdd)
-{
-	/* calculate when the next userspace timeout will be */
-	at91wdt_private.next_heartbeat = jiffies + wdd->timeout * HZ;
-	return 0;
+	}
 }
 
 static int at91_wdt_start(struct watchdog_device *wdd)
 {
-	/* calculate the next userspace timeout and modify the timer */
-	at91_wdt_ping(wdd);
-	mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
+	struct at91wdt *wdt = to_wdt(wdd);
+	/* calculate when the next userspace timeout will be */
+	wdt->next_heartbeat = jiffies + wdd->timeout * HZ;
 	return 0;
 }
 
@@ -122,39 +147,104 @@
 static int at91_wdt_set_timeout(struct watchdog_device *wdd, unsigned int new_timeout)
 {
 	wdd->timeout = new_timeout;
-	return 0;
+	return at91_wdt_start(wdd);
 }
 
-/*
- * Set the watchdog time interval in 1/256Hz (write-once)
- * Counter is 12 bit.
- */
-static int at91_wdt_settimeout(unsigned int timeout)
+static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
 {
-	unsigned int reg;
-	unsigned int mr;
+	u32 tmp;
+	u32 delta;
+	u32 value;
+	int err;
+	u32 mask = wdt->mr_mask;
+	unsigned long min_heartbeat = 1;
+	unsigned long max_heartbeat;
+	struct device *dev = &pdev->dev;
 
-	/* Check if disabled */
-	mr = wdt_read(AT91_WDT_MR);
-	if (mr & AT91_WDT_WDDIS) {
-		pr_err("sorry, watchdog is disabled\n");
-		return -EIO;
+	tmp = wdt_read(wdt, AT91_WDT_MR);
+	if ((tmp & mask) != (wdt->mr & mask)) {
+		if (tmp == WDT_MR_RESET) {
+			wdt_write(wdt, AT91_WDT_MR, wdt->mr);
+			tmp = wdt_read(wdt, AT91_WDT_MR);
+		}
+	}
+
+	if (tmp & AT91_WDT_WDDIS) {
+		if (wdt->mr & AT91_WDT_WDDIS)
+			return 0;
+		dev_err(dev, "watchdog is disabled\n");
+		return -EINVAL;
+	}
+
+	value = tmp & AT91_WDT_WDV;
+	delta = (tmp & AT91_WDT_WDD) >> 16;
+
+	if (delta < value)
+		min_heartbeat = ticks_to_hz_roundup(value - delta);
+
+	max_heartbeat = ticks_to_hz_rounddown(value);
+	if (!max_heartbeat) {
+		dev_err(dev,
+			"heartbeat is too small for the system to handle it correctly\n");
+		return -EINVAL;
 	}
 
 	/*
-	 * All counting occurs at SLOW_CLOCK / 128 = 256 Hz
-	 *
-	 * Since WDV is a 12-bit counter, the maximum period is
-	 * 4096 / 256 = 16 seconds.
+	 * Try to reset the watchdog counter 4 or 2 times more often than
+	 * actually requested, to avoid spurious watchdog reset.
+	 * If this is not possible because of the min_heartbeat value, reset
+	 * it at the min_heartbeat period.
 	 */
-	reg = AT91_WDT_WDRSTEN	/* causes watchdog reset */
-		/* | AT91_WDT_WDRPROC	causes processor reset only */
-		| AT91_WDT_WDDBGHLT	/* disabled in debug mode */
-		| AT91_WDT_WDD		/* restart at any time */
-		| (timeout & AT91_WDT_WDV);  /* timer value */
-	wdt_write(AT91_WDT_MR, reg);
+	if ((max_heartbeat / 4) >= min_heartbeat)
+		wdt->heartbeat = max_heartbeat / 4;
+	else if ((max_heartbeat / 2) >= min_heartbeat)
+		wdt->heartbeat = max_heartbeat / 2;
+	else
+		wdt->heartbeat = min_heartbeat;
+
+	if (max_heartbeat < min_heartbeat + 4)
+		dev_warn(dev,
+			 "min heartbeat and max heartbeat might be too close for the system to handle it correctly\n");
+
+	if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
+		err = request_irq(wdt->irq, wdt_interrupt,
+				  IRQF_SHARED | IRQF_IRQPOLL,
+				  pdev->name, wdt);
+		if (err)
+			return err;
+	}
+
+	if ((tmp & wdt->mr_mask) != (wdt->mr & wdt->mr_mask))
+		dev_warn(dev,
+			 "watchdog already configured differently (mr = %x expecting %x)\n",
+			 tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
+
+	setup_timer(&wdt->timer, at91_ping, (unsigned long)wdt);
+
+	/*
+	 * Use min_heartbeat the first time to avoid spurious watchdog reset:
+	 * we don't know for how long the watchdog counter is running, and
+	 *  - resetting it right now might trigger a watchdog fault reset
+	 *  - waiting for heartbeat time might lead to a watchdog timeout
+	 *    reset
+	 */
+	mod_timer(&wdt->timer, jiffies + min_heartbeat);
+
+	/* Try to set timeout from device tree first */
+	if (watchdog_init_timeout(&wdt->wdd, 0, dev))
+		watchdog_init_timeout(&wdt->wdd, heartbeat, dev);
+	watchdog_set_nowayout(&wdt->wdd, wdt->nowayout);
+	err = watchdog_register_device(&wdt->wdd);
+	if (err)
+		goto out_stop_timer;
+
+	wdt->next_heartbeat = jiffies + wdt->wdd.timeout * HZ;
 
 	return 0;
+
+out_stop_timer:
+	del_timer(&wdt->timer);
+	return err;
 }
 
 /* ......................................................................... */
@@ -169,61 +259,123 @@
 	.owner =	THIS_MODULE,
 	.start =	at91_wdt_start,
 	.stop =		at91_wdt_stop,
-	.ping =		at91_wdt_ping,
 	.set_timeout =	at91_wdt_set_timeout,
 };
 
-static struct watchdog_device at91_wdt_dev = {
-	.info =		&at91_wdt_info,
-	.ops =		&at91_wdt_ops,
-	.timeout =	WDT_HEARTBEAT,
-	.min_timeout =	1,
-	.max_timeout =	0xFFFF,
-};
+#if defined(CONFIG_OF)
+static int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
+{
+	u32 min = 0;
+	u32 max = WDT_COUNTER_MAX_SECS;
+	const char *tmp;
+
+	/* Get the interrupts property */
+	wdt->irq = irq_of_parse_and_map(np, 0);
+	if (!wdt->irq)
+		dev_warn(wdt->wdd.parent, "failed to get IRQ from DT\n");
+
+	if (!of_property_read_u32_index(np, "atmel,max-heartbeat-sec", 0,
+					&max)) {
+		if (!max || max > WDT_COUNTER_MAX_SECS)
+			max = WDT_COUNTER_MAX_SECS;
+
+		if (!of_property_read_u32_index(np, "atmel,min-heartbeat-sec",
+						0, &min)) {
+			if (min >= max)
+				min = max - 1;
+		}
+	}
+
+	min = secs_to_ticks(min);
+	max = secs_to_ticks(max);
+
+	wdt->mr_mask = 0x3FFFFFFF;
+	wdt->mr = 0;
+	if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
+	    !strcmp(tmp, "software")) {
+		wdt->mr |= AT91_WDT_WDFIEN;
+		wdt->mr_mask &= ~AT91_WDT_WDRPROC;
+	} else {
+		wdt->mr |= AT91_WDT_WDRSTEN;
+	}
+
+	if (!of_property_read_string(np, "atmel,reset-type", &tmp) &&
+	    !strcmp(tmp, "proc"))
+		wdt->mr |= AT91_WDT_WDRPROC;
+
+	if (of_property_read_bool(np, "atmel,disable")) {
+		wdt->mr |= AT91_WDT_WDDIS;
+		wdt->mr_mask &= AT91_WDT_WDDIS;
+	}
+
+	if (of_property_read_bool(np, "atmel,idle-halt"))
+		wdt->mr |= AT91_WDT_WDIDLEHLT;
+
+	if (of_property_read_bool(np, "atmel,dbg-halt"))
+		wdt->mr |= AT91_WDT_WDDBGHLT;
+
+	wdt->mr |= max | ((max - min) << 16);
+
+	return 0;
+}
+#else
+static inline int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
+{
+	return 0;
+}
+#endif
 
 static int __init at91wdt_probe(struct platform_device *pdev)
 {
 	struct resource	*r;
-	int res;
+	int err;
+	struct at91wdt *wdt;
+
+	wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+	if (!wdt)
+		return -ENOMEM;
+
+	wdt->mr = (WDT_HW_TIMEOUT * 256) | AT91_WDT_WDRSTEN | AT91_WDT_WDD |
+		  AT91_WDT_WDDBGHLT | AT91_WDT_WDIDLEHLT;
+	wdt->mr_mask = 0x3FFFFFFF;
+	wdt->nowayout = nowayout;
+	wdt->wdd.parent = &pdev->dev;
+	wdt->wdd.info = &at91_wdt_info;
+	wdt->wdd.ops = &at91_wdt_ops;
+	wdt->wdd.timeout = WDT_HEARTBEAT;
+	wdt->wdd.min_timeout = 1;
+	wdt->wdd.max_timeout = 0xFFFF;
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!r)
-		return -ENODEV;
-	at91wdt_private.base = ioremap(r->start, resource_size(r));
-	if (!at91wdt_private.base) {
-		dev_err(&pdev->dev, "failed to map registers, aborting.\n");
-		return -ENOMEM;
+	wdt->base = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(wdt->base))
+		return PTR_ERR(wdt->base);
+
+	if (pdev->dev.of_node) {
+		err = of_at91wdt_init(pdev->dev.of_node, wdt);
+		if (err)
+			return err;
 	}
 
-	at91_wdt_dev.parent = &pdev->dev;
-	watchdog_init_timeout(&at91_wdt_dev, heartbeat, &pdev->dev);
-	watchdog_set_nowayout(&at91_wdt_dev, nowayout);
+	err = at91_wdt_init(pdev, wdt);
+	if (err)
+		return err;
 
-	/* Set watchdog */
-	res = at91_wdt_settimeout(ms_to_ticks(WDT_HW_TIMEOUT * 1000));
-	if (res)
-		return res;
-
-	res = watchdog_register_device(&at91_wdt_dev);
-	if (res)
-		return res;
-
-	at91wdt_private.next_heartbeat = jiffies + at91_wdt_dev.timeout * HZ;
-	setup_timer(&at91wdt_private.timer, at91_ping, 0);
-	mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
+	platform_set_drvdata(pdev, wdt);
 
 	pr_info("enabled (heartbeat=%d sec, nowayout=%d)\n",
-		at91_wdt_dev.timeout, nowayout);
+		wdt->wdd.timeout, wdt->nowayout);
 
 	return 0;
 }
 
 static int __exit at91wdt_remove(struct platform_device *pdev)
 {
-	watchdog_unregister_device(&at91_wdt_dev);
+	struct at91wdt *wdt = platform_get_drvdata(pdev);
+	watchdog_unregister_device(&wdt->wdd);
 
 	pr_warn("I quit now, hardware will probably reboot!\n");
-	del_timer(&at91wdt_private.timer);
+	del_timer(&wdt->timer);
 
 	return 0;
 }
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
new file mode 100644
index 0000000..9c24809
--- /dev/null
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define SECWDOG_CTRL_REG		0x00000000
+#define SECWDOG_COUNT_REG		0x00000004
+
+#define SECWDOG_RESERVED_MASK		0x1dffffff
+#define SECWDOG_WD_LOAD_FLAG		0x10000000
+#define SECWDOG_EN_MASK			0x08000000
+#define SECWDOG_SRSTEN_MASK		0x04000000
+#define SECWDOG_RES_MASK		0x00f00000
+#define SECWDOG_COUNT_MASK		0x000fffff
+
+#define SECWDOG_MAX_COUNT		SECWDOG_COUNT_MASK
+#define SECWDOG_CLKS_SHIFT		20
+#define SECWDOG_MAX_RES			15
+#define SECWDOG_DEFAULT_RESOLUTION	4
+#define SECWDOG_MAX_TRY			1000
+
+#define SECS_TO_TICKS(x, w)		((x) << (w)->resolution)
+#define TICKS_TO_SECS(x, w)		((x) >> (w)->resolution)
+
+#define BCM_KONA_WDT_NAME		"bcm_kona_wdt"
+
+struct bcm_kona_wdt {
+	void __iomem *base;
+	/*
+	 * One watchdog tick is 1/(2^resolution) seconds. Resolution can take
+	 * the values 0-15, meaning one tick can be 1s to 30.52us. Our default
+	 * resolution of 4 means one tick is 62.5ms.
+	 *
+	 * The watchdog counter is 20 bits. Depending on resolution, the maximum
+	 * counter value of 0xfffff expires after about 12 days (resolution 0)
+	 * down to only 32s (resolution 15). The default resolution of 4 gives
+	 * us a maximum of about 18 hours and 12 minutes before the watchdog
+	 * times out.
+	 */
+	int resolution;
+	spinlock_t lock;
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+	unsigned long busy_count;
+	struct dentry *debugfs;
+#endif
+};
+
+static int secure_register_read(struct bcm_kona_wdt *wdt, uint32_t offset)
+{
+	uint32_t val;
+	unsigned count = 0;
+
+	/*
+	 * If the WD_LOAD_FLAG is set, the watchdog counter field is being
+	 * updated in hardware. Once the WD timer is updated in hardware, it
+	 * gets cleared.
+	 */
+	do {
+		if (unlikely(count > 1))
+			udelay(5);
+		val = readl_relaxed(wdt->base + offset);
+		count++;
+	} while ((val & SECWDOG_WD_LOAD_FLAG) && count < SECWDOG_MAX_TRY);
+
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+	/* Remember the maximum number iterations due to WD_LOAD_FLAG */
+	if (count > wdt->busy_count)
+		wdt->busy_count = count;
+#endif
+
+	/* This is the only place we return a negative value. */
+	if (val & SECWDOG_WD_LOAD_FLAG)
+		return -ETIMEDOUT;
+
+	/* We always mask out reserved bits. */
+	val &= SECWDOG_RESERVED_MASK;
+
+	return val;
+}
+
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+
+static int bcm_kona_wdt_dbg_show(struct seq_file *s, void *data)
+{
+	int ctl_val, cur_val, ret;
+	unsigned long flags;
+	struct bcm_kona_wdt *wdt = s->private;
+
+	if (!wdt)
+		return seq_puts(s, "No device pointer\n");
+
+	spin_lock_irqsave(&wdt->lock, flags);
+	ctl_val = secure_register_read(wdt, SECWDOG_CTRL_REG);
+	cur_val = secure_register_read(wdt, SECWDOG_COUNT_REG);
+	spin_unlock_irqrestore(&wdt->lock, flags);
+
+	if (ctl_val < 0 || cur_val < 0) {
+		ret = seq_puts(s, "Error accessing hardware\n");
+	} else {
+		int ctl, cur, ctl_sec, cur_sec, res;
+
+		ctl = ctl_val & SECWDOG_COUNT_MASK;
+		res = (ctl_val & SECWDOG_RES_MASK) >> SECWDOG_CLKS_SHIFT;
+		cur = cur_val & SECWDOG_COUNT_MASK;
+		ctl_sec = TICKS_TO_SECS(ctl, wdt);
+		cur_sec = TICKS_TO_SECS(cur, wdt);
+		ret = seq_printf(s, "Resolution: %d / %d\n"
+				"Control: %d s / %d (%#x) ticks\n"
+				"Current: %d s / %d (%#x) ticks\n"
+				"Busy count: %lu\n", res,
+				wdt->resolution, ctl_sec, ctl, ctl, cur_sec,
+				cur, cur, wdt->busy_count);
+	}
+
+	return ret;
+}
+
+static int bcm_kona_dbg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, bcm_kona_wdt_dbg_show, inode->i_private);
+}
+
+static const struct file_operations bcm_kona_dbg_operations = {
+	.open		= bcm_kona_dbg_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void bcm_kona_wdt_debug_init(struct platform_device *pdev)
+{
+	struct dentry *dir;
+	struct bcm_kona_wdt *wdt = platform_get_drvdata(pdev);
+
+	if (!wdt)
+		return;
+
+	wdt->debugfs = NULL;
+
+	dir = debugfs_create_dir(BCM_KONA_WDT_NAME, NULL);
+	if (IS_ERR_OR_NULL(dir))
+		return;
+
+	if (debugfs_create_file("info", S_IFREG | S_IRUGO, dir, wdt,
+				&bcm_kona_dbg_operations))
+		wdt->debugfs = dir;
+	else
+		debugfs_remove_recursive(dir);
+}
+
+static void bcm_kona_wdt_debug_exit(struct platform_device *pdev)
+{
+	struct bcm_kona_wdt *wdt = platform_get_drvdata(pdev);
+
+	if (wdt && wdt->debugfs) {
+		debugfs_remove_recursive(wdt->debugfs);
+		wdt->debugfs = NULL;
+	}
+}
+
+#else
+
+static void bcm_kona_wdt_debug_init(struct platform_device *pdev) {}
+static void bcm_kona_wdt_debug_exit(struct platform_device *pdev) {}
+
+#endif /* CONFIG_BCM_KONA_WDT_DEBUG */
+
+static int bcm_kona_wdt_ctrl_reg_modify(struct bcm_kona_wdt *wdt,
+					unsigned mask, unsigned newval)
+{
+	int val;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&wdt->lock, flags);
+
+	val = secure_register_read(wdt, SECWDOG_CTRL_REG);
+	if (val < 0) {
+		ret = val;
+	} else {
+		val &= ~mask;
+		val |= newval;
+		writel_relaxed(val, wdt->base + SECWDOG_CTRL_REG);
+	}
+
+	spin_unlock_irqrestore(&wdt->lock, flags);
+
+	return ret;
+}
+
+static int bcm_kona_wdt_set_resolution_reg(struct bcm_kona_wdt *wdt)
+{
+	if (wdt->resolution > SECWDOG_MAX_RES)
+		return -EINVAL;
+
+	return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_RES_MASK,
+					wdt->resolution << SECWDOG_CLKS_SHIFT);
+}
+
+static int bcm_kona_wdt_set_timeout_reg(struct watchdog_device *wdog,
+					unsigned watchdog_flags)
+{
+	struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+
+	return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_COUNT_MASK,
+					SECS_TO_TICKS(wdog->timeout, wdt) |
+					watchdog_flags);
+}
+
+static int bcm_kona_wdt_set_timeout(struct watchdog_device *wdog,
+	unsigned int t)
+{
+	wdog->timeout = t;
+	return 0;
+}
+
+static unsigned int bcm_kona_wdt_get_timeleft(struct watchdog_device *wdog)
+{
+	struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+	int val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&wdt->lock, flags);
+	val = secure_register_read(wdt, SECWDOG_COUNT_REG);
+	spin_unlock_irqrestore(&wdt->lock, flags);
+
+	if (val < 0)
+		return val;
+
+	return TICKS_TO_SECS(val & SECWDOG_COUNT_MASK, wdt);
+}
+
+static int bcm_kona_wdt_start(struct watchdog_device *wdog)
+{
+	return bcm_kona_wdt_set_timeout_reg(wdog,
+					SECWDOG_EN_MASK | SECWDOG_SRSTEN_MASK);
+}
+
+static int bcm_kona_wdt_stop(struct watchdog_device *wdog)
+{
+	struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+
+	return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_EN_MASK |
+					    SECWDOG_SRSTEN_MASK, 0);
+}
+
+static struct watchdog_ops bcm_kona_wdt_ops = {
+	.owner =	THIS_MODULE,
+	.start =	bcm_kona_wdt_start,
+	.stop =		bcm_kona_wdt_stop,
+	.set_timeout =	bcm_kona_wdt_set_timeout,
+	.get_timeleft =	bcm_kona_wdt_get_timeleft,
+};
+
+static struct watchdog_info bcm_kona_wdt_info = {
+	.options =	WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
+			WDIOF_KEEPALIVEPING,
+	.identity =	"Broadcom Kona Watchdog Timer",
+};
+
+static struct watchdog_device bcm_kona_wdt_wdd = {
+	.info =		&bcm_kona_wdt_info,
+	.ops =		&bcm_kona_wdt_ops,
+	.min_timeout =	1,
+	.max_timeout =	SECWDOG_MAX_COUNT >> SECWDOG_DEFAULT_RESOLUTION,
+	.timeout =	SECWDOG_MAX_COUNT >> SECWDOG_DEFAULT_RESOLUTION,
+};
+
+static void bcm_kona_wdt_shutdown(struct platform_device *pdev)
+{
+	bcm_kona_wdt_stop(&bcm_kona_wdt_wdd);
+}
+
+static int bcm_kona_wdt_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct bcm_kona_wdt *wdt;
+	struct resource *res;
+	int ret;
+
+	wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+	if (!wdt)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	wdt->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(wdt->base))
+		return -ENODEV;
+
+	wdt->resolution = SECWDOG_DEFAULT_RESOLUTION;
+	ret = bcm_kona_wdt_set_resolution_reg(wdt);
+	if (ret) {
+		dev_err(dev, "Failed to set resolution (error: %d)", ret);
+		return ret;
+	}
+
+	spin_lock_init(&wdt->lock);
+	platform_set_drvdata(pdev, wdt);
+	watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
+
+	ret = bcm_kona_wdt_set_timeout_reg(&bcm_kona_wdt_wdd, 0);
+	if (ret) {
+		dev_err(dev, "Failed set watchdog timeout");
+		return ret;
+	}
+
+	ret = watchdog_register_device(&bcm_kona_wdt_wdd);
+	if (ret) {
+		dev_err(dev, "Failed to register watchdog device");
+		return ret;
+	}
+
+	bcm_kona_wdt_debug_init(pdev);
+	dev_dbg(dev, "Broadcom Kona Watchdog Timer");
+
+	return 0;
+}
+
+static int bcm_kona_wdt_remove(struct platform_device *pdev)
+{
+	bcm_kona_wdt_debug_exit(pdev);
+	bcm_kona_wdt_shutdown(pdev);
+	watchdog_unregister_device(&bcm_kona_wdt_wdd);
+	dev_dbg(&pdev->dev, "Watchdog driver disabled");
+
+	return 0;
+}
+
+static const struct of_device_id bcm_kona_wdt_of_match[] = {
+	{ .compatible = "brcm,kona-wdt", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcm_kona_wdt_of_match);
+
+static struct platform_driver bcm_kona_wdt_driver = {
+	.driver = {
+			.name = BCM_KONA_WDT_NAME,
+			.owner = THIS_MODULE,
+			.of_match_table = bcm_kona_wdt_of_match,
+		  },
+	.probe = bcm_kona_wdt_probe,
+	.remove = bcm_kona_wdt_remove,
+	.shutdown = bcm_kona_wdt_shutdown,
+};
+
+module_platform_driver(bcm_kona_wdt_driver);
+
+MODULE_ALIAS("platform:" BCM_KONA_WDT_NAME);
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom Kona Watchdog Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 12591f6..b1bae03 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -3,7 +3,7 @@
  *
  * Watchdog driver for DaVinci DM644x/DM646x processors
  *
- * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2006-2013 Texas Instruments.
  *
  * 2007 (c) MontaVista Software, Inc. This file is licensed under
  * the terms of the GNU General Public License version 2. This program
@@ -15,18 +15,12 @@
 #include <linux/moduleparam.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
 #include <linux/watchdog.h>
 #include <linux/init.h>
-#include <linux/bitops.h>
 #include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/device.h>
 #include <linux/clk.h>
-#include <linux/slab.h>
 #include <linux/err.h>
 
 #define MODULE_NAME "DAVINCI-WDT: "
@@ -61,142 +55,103 @@
 #define WDKEY_SEQ0		(0xa5c6 << 16)
 #define WDKEY_SEQ1		(0xda7e << 16)
 
-static int heartbeat = DEFAULT_HEARTBEAT;
+static int heartbeat;
 
-static DEFINE_SPINLOCK(io_lock);
-static unsigned long wdt_status;
-#define WDT_IN_USE        0
-#define WDT_OK_TO_CLOSE   1
-#define WDT_REGION_INITED 2
-#define WDT_DEVICE_INITED 3
+/*
+ * struct to hold data for each WDT device
+ * @base - base io address of WD device
+ * @clk - source clock of WDT
+ * @wdd - hold watchdog device as is in WDT core
+ */
+struct davinci_wdt_device {
+	void __iomem		*base;
+	struct clk		*clk;
+	struct watchdog_device	wdd;
+};
 
-static void __iomem	*wdt_base;
-struct clk		*wdt_clk;
-
-static void wdt_service(void)
-{
-	spin_lock(&io_lock);
-
-	/* put watchdog in service state */
-	iowrite32(WDKEY_SEQ0, wdt_base + WDTCR);
-	/* put watchdog in active state */
-	iowrite32(WDKEY_SEQ1, wdt_base + WDTCR);
-
-	spin_unlock(&io_lock);
-}
-
-static void wdt_enable(void)
+static int davinci_wdt_start(struct watchdog_device *wdd)
 {
 	u32 tgcr;
 	u32 timer_margin;
 	unsigned long wdt_freq;
+	struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
 
-	wdt_freq = clk_get_rate(wdt_clk);
-
-	spin_lock(&io_lock);
+	wdt_freq = clk_get_rate(davinci_wdt->clk);
 
 	/* disable, internal clock source */
-	iowrite32(0, wdt_base + TCR);
+	iowrite32(0, davinci_wdt->base + TCR);
 	/* reset timer, set mode to 64-bit watchdog, and unreset */
-	iowrite32(0, wdt_base + TGCR);
+	iowrite32(0, davinci_wdt->base + TGCR);
 	tgcr = TIMMODE_64BIT_WDOG | TIM12RS_UNRESET | TIM34RS_UNRESET;
-	iowrite32(tgcr, wdt_base + TGCR);
+	iowrite32(tgcr, davinci_wdt->base + TGCR);
 	/* clear counter regs */
-	iowrite32(0, wdt_base + TIM12);
-	iowrite32(0, wdt_base + TIM34);
+	iowrite32(0, davinci_wdt->base + TIM12);
+	iowrite32(0, davinci_wdt->base + TIM34);
 	/* set timeout period */
-	timer_margin = (((u64)heartbeat * wdt_freq) & 0xffffffff);
-	iowrite32(timer_margin, wdt_base + PRD12);
-	timer_margin = (((u64)heartbeat * wdt_freq) >> 32);
-	iowrite32(timer_margin, wdt_base + PRD34);
+	timer_margin = (((u64)wdd->timeout * wdt_freq) & 0xffffffff);
+	iowrite32(timer_margin, davinci_wdt->base + PRD12);
+	timer_margin = (((u64)wdd->timeout * wdt_freq) >> 32);
+	iowrite32(timer_margin, davinci_wdt->base + PRD34);
 	/* enable run continuously */
-	iowrite32(ENAMODE12_PERIODIC, wdt_base + TCR);
+	iowrite32(ENAMODE12_PERIODIC, davinci_wdt->base + TCR);
 	/* Once the WDT is in pre-active state write to
 	 * TIM12, TIM34, PRD12, PRD34, TCR, TGCR, WDTCR are
 	 * write protected (except for the WDKEY field)
 	 */
 	/* put watchdog in pre-active state */
-	iowrite32(WDKEY_SEQ0 | WDEN, wdt_base + WDTCR);
+	iowrite32(WDKEY_SEQ0 | WDEN, davinci_wdt->base + WDTCR);
 	/* put watchdog in active state */
-	iowrite32(WDKEY_SEQ1 | WDEN, wdt_base + WDTCR);
-
-	spin_unlock(&io_lock);
-}
-
-static int davinci_wdt_open(struct inode *inode, struct file *file)
-{
-	if (test_and_set_bit(WDT_IN_USE, &wdt_status))
-		return -EBUSY;
-
-	wdt_enable();
-
-	return nonseekable_open(inode, file);
-}
-
-static ssize_t
-davinci_wdt_write(struct file *file, const char *data, size_t len,
-		  loff_t *ppos)
-{
-	if (len)
-		wdt_service();
-
-	return len;
-}
-
-static const struct watchdog_info ident = {
-	.options = WDIOF_KEEPALIVEPING,
-	.identity = "DaVinci Watchdog",
-};
-
-static long davinci_wdt_ioctl(struct file *file,
-					unsigned int cmd, unsigned long arg)
-{
-	int ret = -ENOTTY;
-
-	switch (cmd) {
-	case WDIOC_GETSUPPORT:
-		ret = copy_to_user((struct watchdog_info *)arg, &ident,
-				   sizeof(ident)) ? -EFAULT : 0;
-		break;
-
-	case WDIOC_GETSTATUS:
-	case WDIOC_GETBOOTSTATUS:
-		ret = put_user(0, (int *)arg);
-		break;
-
-	case WDIOC_KEEPALIVE:
-		wdt_service();
-		ret = 0;
-		break;
-
-	case WDIOC_GETTIMEOUT:
-		ret = put_user(heartbeat, (int *)arg);
-		break;
-	}
-	return ret;
-}
-
-static int davinci_wdt_release(struct inode *inode, struct file *file)
-{
-	wdt_service();
-	clear_bit(WDT_IN_USE, &wdt_status);
-
+	iowrite32(WDKEY_SEQ1 | WDEN, davinci_wdt->base + WDTCR);
 	return 0;
 }
 
-static const struct file_operations davinci_wdt_fops = {
-	.owner = THIS_MODULE,
-	.llseek = no_llseek,
-	.write = davinci_wdt_write,
-	.unlocked_ioctl = davinci_wdt_ioctl,
-	.open = davinci_wdt_open,
-	.release = davinci_wdt_release,
+static int davinci_wdt_ping(struct watchdog_device *wdd)
+{
+	struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
+
+	/* put watchdog in service state */
+	iowrite32(WDKEY_SEQ0, davinci_wdt->base + WDTCR);
+	/* put watchdog in active state */
+	iowrite32(WDKEY_SEQ1, davinci_wdt->base + WDTCR);
+	return 0;
+}
+
+static unsigned int davinci_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+	u64 timer_counter;
+	unsigned long freq;
+	u32 val;
+	struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
+
+	/* if timeout has occured then return 0 */
+	val = ioread32(davinci_wdt->base + WDTCR);
+	if (val & WDFLAG)
+		return 0;
+
+	freq = clk_get_rate(davinci_wdt->clk);
+
+	if (!freq)
+		return 0;
+
+	timer_counter = ioread32(davinci_wdt->base + TIM12);
+	timer_counter |= ((u64)ioread32(davinci_wdt->base + TIM34) << 32);
+
+	do_div(timer_counter, freq);
+
+	return wdd->timeout - timer_counter;
+}
+
+static const struct watchdog_info davinci_wdt_info = {
+	.options = WDIOF_KEEPALIVEPING,
+	.identity = "DaVinci/Keystone Watchdog",
 };
 
-static struct miscdevice davinci_wdt_miscdev = {
-	.minor = WATCHDOG_MINOR,
-	.name = "watchdog",
-	.fops = &davinci_wdt_fops,
+static const struct watchdog_ops davinci_wdt_ops = {
+	.owner		= THIS_MODULE,
+	.start		= davinci_wdt_start,
+	.stop		= davinci_wdt_ping,
+	.ping		= davinci_wdt_ping,
+	.get_timeleft	= davinci_wdt_get_timeleft,
 };
 
 static int davinci_wdt_probe(struct platform_device *pdev)
@@ -204,37 +159,53 @@
 	int ret = 0;
 	struct device *dev = &pdev->dev;
 	struct resource  *wdt_mem;
+	struct watchdog_device *wdd;
+	struct davinci_wdt_device *davinci_wdt;
 
-	wdt_clk = devm_clk_get(dev, NULL);
-	if (WARN_ON(IS_ERR(wdt_clk)))
-		return PTR_ERR(wdt_clk);
+	davinci_wdt = devm_kzalloc(dev, sizeof(*davinci_wdt), GFP_KERNEL);
+	if (!davinci_wdt)
+		return -ENOMEM;
 
-	clk_prepare_enable(wdt_clk);
+	davinci_wdt->clk = devm_clk_get(dev, NULL);
+	if (WARN_ON(IS_ERR(davinci_wdt->clk)))
+		return PTR_ERR(davinci_wdt->clk);
 
-	if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
-		heartbeat = DEFAULT_HEARTBEAT;
+	clk_prepare_enable(davinci_wdt->clk);
 
-	dev_info(dev, "heartbeat %d sec\n", heartbeat);
+	platform_set_drvdata(pdev, davinci_wdt);
+
+	wdd			= &davinci_wdt->wdd;
+	wdd->info		= &davinci_wdt_info;
+	wdd->ops		= &davinci_wdt_ops;
+	wdd->min_timeout	= 1;
+	wdd->max_timeout	= MAX_HEARTBEAT;
+	wdd->timeout		= DEFAULT_HEARTBEAT;
+
+	watchdog_init_timeout(wdd, heartbeat, dev);
+
+	dev_info(dev, "heartbeat %d sec\n", wdd->timeout);
+
+	watchdog_set_drvdata(wdd, davinci_wdt);
+	watchdog_set_nowayout(wdd, 1);
 
 	wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	wdt_base = devm_ioremap_resource(dev, wdt_mem);
-	if (IS_ERR(wdt_base))
-		return PTR_ERR(wdt_base);
+	davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem);
+	if (IS_ERR(davinci_wdt->base))
+		return PTR_ERR(davinci_wdt->base);
 
-	ret = misc_register(&davinci_wdt_miscdev);
-	if (ret < 0) {
-		dev_err(dev, "cannot register misc device\n");
-	} else {
-		set_bit(WDT_DEVICE_INITED, &wdt_status);
-	}
+	ret = watchdog_register_device(wdd);
+	if (ret < 0)
+		dev_err(dev, "cannot register watchdog device\n");
 
 	return ret;
 }
 
 static int davinci_wdt_remove(struct platform_device *pdev)
 {
-	misc_deregister(&davinci_wdt_miscdev);
-	clk_disable_unprepare(wdt_clk);
+	struct davinci_wdt_device *davinci_wdt = platform_get_drvdata(pdev);
+
+	watchdog_unregister_device(&davinci_wdt->wdd);
+	clk_disable_unprepare(davinci_wdt->clk);
 
 	return 0;
 }
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index a46f5c7..ee4f86b 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -8,7 +8,7 @@
  * 2 of the License, or (at your option) any later version.
  *
  * This file implements a driver for the Synopsys DesignWare watchdog device
- * in the many ARM subsystems. The watchdog has 16 different timeout periods
+ * in the many subsystems. The watchdog has 16 different timeout periods
  * and these are a function of the input clock frequency.
  *
  * The DesignWare watchdog cannot be stopped once it has been started so we
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
new file mode 100644
index 0000000..220a9e0
--- /dev/null
+++ b/drivers/watchdog/gpio_wdt.c
@@ -0,0 +1,254 @@
+/*
+ * Driver for watchdog device controlled through GPIO-line
+ *
+ * Author: 2013, Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#define SOFT_TIMEOUT_MIN	1
+#define SOFT_TIMEOUT_DEF	60
+#define SOFT_TIMEOUT_MAX	0xffff
+
+enum {
+	HW_ALGO_TOGGLE,
+	HW_ALGO_LEVEL,
+};
+
+struct gpio_wdt_priv {
+	int			gpio;
+	bool			active_low;
+	bool			state;
+	unsigned int		hw_algo;
+	unsigned int		hw_margin;
+	unsigned long		last_jiffies;
+	struct notifier_block	notifier;
+	struct timer_list	timer;
+	struct watchdog_device	wdd;
+};
+
+static void gpio_wdt_disable(struct gpio_wdt_priv *priv)
+{
+	gpio_set_value_cansleep(priv->gpio, !priv->active_low);
+
+	/* Put GPIO back to tristate */
+	if (priv->hw_algo == HW_ALGO_TOGGLE)
+		gpio_direction_input(priv->gpio);
+}
+
+static int gpio_wdt_start(struct watchdog_device *wdd)
+{
+	struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+	priv->state = priv->active_low;
+	gpio_direction_output(priv->gpio, priv->state);
+	priv->last_jiffies = jiffies;
+	mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin);
+
+	return 0;
+}
+
+static int gpio_wdt_stop(struct watchdog_device *wdd)
+{
+	struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+	mod_timer(&priv->timer, 0);
+	gpio_wdt_disable(priv);
+
+	return 0;
+}
+
+static int gpio_wdt_ping(struct watchdog_device *wdd)
+{
+	struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+	priv->last_jiffies = jiffies;
+
+	return 0;
+}
+
+static int gpio_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
+{
+	wdd->timeout = t;
+
+	return gpio_wdt_ping(wdd);
+}
+
+static void gpio_wdt_hwping(unsigned long data)
+{
+	struct watchdog_device *wdd = (struct watchdog_device *)data;
+	struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+	if (time_after(jiffies, priv->last_jiffies +
+		       msecs_to_jiffies(wdd->timeout * 1000))) {
+		dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
+		return;
+	}
+
+	/* Restart timer */
+	mod_timer(&priv->timer, jiffies + priv->hw_margin);
+
+	switch (priv->hw_algo) {
+	case HW_ALGO_TOGGLE:
+		/* Toggle output pin */
+		priv->state = !priv->state;
+		gpio_set_value_cansleep(priv->gpio, priv->state);
+		break;
+	case HW_ALGO_LEVEL:
+		/* Pulse */
+		gpio_set_value_cansleep(priv->gpio, !priv->active_low);
+		udelay(1);
+		gpio_set_value_cansleep(priv->gpio, priv->active_low);
+		break;
+	}
+}
+
+static int gpio_wdt_notify_sys(struct notifier_block *nb, unsigned long code,
+			       void *unused)
+{
+	struct gpio_wdt_priv *priv = container_of(nb, struct gpio_wdt_priv,
+						  notifier);
+
+	mod_timer(&priv->timer, 0);
+
+	switch (code) {
+	case SYS_HALT:
+	case SYS_POWER_OFF:
+		gpio_wdt_disable(priv);
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static const struct watchdog_info gpio_wdt_ident = {
+	.options	= WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING |
+			  WDIOF_SETTIMEOUT,
+	.identity	= "GPIO Watchdog",
+};
+
+static const struct watchdog_ops gpio_wdt_ops = {
+	.owner		= THIS_MODULE,
+	.start		= gpio_wdt_start,
+	.stop		= gpio_wdt_stop,
+	.ping		= gpio_wdt_ping,
+	.set_timeout	= gpio_wdt_set_timeout,
+};
+
+static int gpio_wdt_probe(struct platform_device *pdev)
+{
+	struct gpio_wdt_priv *priv;
+	enum of_gpio_flags flags;
+	unsigned int hw_margin;
+	unsigned long f = 0;
+	const char *algo;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
+	if (!gpio_is_valid(priv->gpio))
+		return priv->gpio;
+
+	priv->active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+	ret = of_property_read_string(pdev->dev.of_node, "hw_algo", &algo);
+	if (ret)
+		return ret;
+	if (!strncmp(algo, "toggle", 6)) {
+		priv->hw_algo = HW_ALGO_TOGGLE;
+		f = GPIOF_IN;
+	} else if (!strncmp(algo, "level", 5)) {
+		priv->hw_algo = HW_ALGO_LEVEL;
+		f = priv->active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+	} else {
+		return -EINVAL;
+	}
+
+	ret = devm_gpio_request_one(&pdev->dev, priv->gpio, f,
+				    dev_name(&pdev->dev));
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+				   "hw_margin_ms", &hw_margin);
+	if (ret)
+		return ret;
+	/* Disallow values lower than 2 and higher than 65535 ms */
+	if (hw_margin < 2 || hw_margin > 65535)
+		return -EINVAL;
+
+	/* Use safe value (1/2 of real timeout) */
+	priv->hw_margin = msecs_to_jiffies(hw_margin / 2);
+
+	watchdog_set_drvdata(&priv->wdd, priv);
+
+	priv->wdd.info		= &gpio_wdt_ident;
+	priv->wdd.ops		= &gpio_wdt_ops;
+	priv->wdd.min_timeout	= SOFT_TIMEOUT_MIN;
+	priv->wdd.max_timeout	= SOFT_TIMEOUT_MAX;
+
+	if (watchdog_init_timeout(&priv->wdd, 0, &pdev->dev) < 0)
+		priv->wdd.timeout = SOFT_TIMEOUT_DEF;
+
+	setup_timer(&priv->timer, gpio_wdt_hwping, (unsigned long)&priv->wdd);
+
+	ret = watchdog_register_device(&priv->wdd);
+	if (ret)
+		return ret;
+
+	priv->notifier.notifier_call = gpio_wdt_notify_sys;
+	ret = register_reboot_notifier(&priv->notifier);
+	if (ret)
+		watchdog_unregister_device(&priv->wdd);
+
+	return ret;
+}
+
+static int gpio_wdt_remove(struct platform_device *pdev)
+{
+	struct gpio_wdt_priv *priv = platform_get_drvdata(pdev);
+
+	del_timer_sync(&priv->timer);
+	unregister_reboot_notifier(&priv->notifier);
+	watchdog_unregister_device(&priv->wdd);
+
+	return 0;
+}
+
+static const struct of_device_id gpio_wdt_dt_ids[] = {
+	{ .compatible = "linux,wdt-gpio", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gpio_wdt_dt_ids);
+
+static struct platform_driver gpio_wdt_driver = {
+	.driver	= {
+		.name		= "gpio-wdt",
+		.owner		= THIS_MODULE,
+		.of_match_table	= gpio_wdt_dt_ids,
+	},
+	.probe	= gpio_wdt_probe,
+	.remove	= gpio_wdt_remove,
+};
+module_platform_driver(gpio_wdt_driver);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("GPIO Watchdog");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 45b979d..2b75e8b 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -39,7 +39,7 @@
 #endif /* CONFIG_HPWDT_NMI_DECODING */
 #include <asm/nmi.h>
 
-#define HPWDT_VERSION			"1.3.2"
+#define HPWDT_VERSION			"1.3.3"
 #define SECS_TO_TICKS(secs)		((secs) * 1000 / 128)
 #define TICKS_TO_SECS(ticks)		((ticks) * 128 / 1000)
 #define HPWDT_MAX_TIMER			TICKS_TO_SECS(65535)
@@ -55,7 +55,7 @@
 static unsigned long __iomem *hpwdt_timer_reg;
 static unsigned long __iomem *hpwdt_timer_con;
 
-static DEFINE_PCI_DEVICE_TABLE(hpwdt_devices) = {
+static const struct pci_device_id hpwdt_devices[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) },	/* iLO2 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) },	/* iLO3 */
 	{0},			/* terminate list */
@@ -501,8 +501,13 @@
 				"but unable to determine source.\n");
 		}
 	}
-	panic("An NMI occurred, please see the Integrated "
-		"Management Log for details.\n");
+	panic("An NMI occurred. Depending on your system the reason "
+		"for the NMI is logged in any one of the following "
+		"resources:\n"
+		"1. Integrated Management Log (IML)\n"
+		"2. OA Syslog\n"
+		"3. OA Forward Progress Log\n"
+		"4. iLO Event Log");
 
 out:
 	return NMI_DONE;
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index a72fe93..25a2bfd 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -334,7 +334,7 @@
 /*
  * Data for PCI driver interface
  */
-static DEFINE_PCI_DEVICE_TABLE(esb_pci_tbl) = {
+static const struct pci_device_id esb_pci_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), },
 	{ 0, },                 /* End of list */
 };
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index b4786bc..dd51d95 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -2,6 +2,7 @@
  * Watchdog driver for IMX2 and later processors
  *
  *  Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <w.sang@pengutronix.de>
+ *  Copyright (C) 2014 Freescale Semiconductor, Inc.
  *
  * some parts adapted by similar drivers from Darius Augulis and Vladimir
  * Zapolskiy, additional improvements by Wim Van Sebroeck.
@@ -40,6 +41,7 @@
 #define IMX2_WDT_WCR_WT		(0xFF << 8)	/* -> Watchdog Timeout Field */
 #define IMX2_WDT_WCR_WRE	(1 << 3)	/* -> WDOG Reset Enable */
 #define IMX2_WDT_WCR_WDE	(1 << 2)	/* -> Watchdog Enable */
+#define IMX2_WDT_WCR_WDZST	(1 << 0)	/* -> Watchdog timer Suspend */
 
 #define IMX2_WDT_WSR		0x02		/* Service Register */
 #define IMX2_WDT_SEQ1		0x5555		/* -> service sequence 1 */
@@ -87,6 +89,8 @@
 {
 	u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR);
 
+	/* Suspend timer in low power mode, write once-only */
+	val |= IMX2_WDT_WCR_WDZST;
 	/* Strip the old watchdog Time-Out value */
 	val &= ~IMX2_WDT_WCR_WT;
 	/* Generate reset if WDOG times out */
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 4166e4d..4aa3a8a 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -19,6 +19,8 @@
 #include <linux/watchdog.h>
 #include <linux/moduleparam.h>
 
+#include <asm/system_misc.h>
+
 #define REG_COUNT			0x4
 #define REG_MODE			0x8
 #define REG_ENABLE			0xC
@@ -29,8 +31,17 @@
 	unsigned int clock_frequency;
 };
 
+static struct moxart_wdt_dev *moxart_restart_ctx;
+
 static int heartbeat;
 
+static void moxart_wdt_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+	writel(1, moxart_restart_ctx->base + REG_COUNT);
+	writel(0x5ab9, moxart_restart_ctx->base + REG_MODE);
+	writel(0x03, moxart_restart_ctx->base + REG_ENABLE);
+}
+
 static int moxart_wdt_stop(struct watchdog_device *wdt_dev)
 {
 	struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
@@ -125,6 +136,9 @@
 	if (err)
 		return err;
 
+	moxart_restart_ctx = moxart_wdt;
+	arm_pm_restart = moxart_wdt_restart;
+
 	dev_dbg(dev, "Watchdog enabled (heartbeat=%d sec, nowayout=%d)\n",
 		moxart_wdt->dev.timeout, nowayout);
 
@@ -135,6 +149,7 @@
 {
 	struct moxart_wdt_dev *moxart_wdt = platform_get_drvdata(pdev);
 
+	arm_pm_restart = NULL;
 	moxart_wdt_stop(&moxart_wdt->dev);
 	watchdog_unregister_device(&moxart_wdt->dev);
 
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index d821520..c1f65b4 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -73,9 +73,7 @@
  * to 0
  */
 static int prescale = 1;
-static unsigned int timeout_sec;
 
-static unsigned long wdt_is_open;
 static DEFINE_SPINLOCK(wdt_spinlock);
 
 static void mpc8xxx_wdt_keepalive(void)
@@ -87,39 +85,23 @@
 	spin_unlock(&wdt_spinlock);
 }
 
+static struct watchdog_device mpc8xxx_wdt_dev;
 static void mpc8xxx_wdt_timer_ping(unsigned long arg);
-static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0, 0);
+static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0,
+		(unsigned long)&mpc8xxx_wdt_dev);
 
 static void mpc8xxx_wdt_timer_ping(unsigned long arg)
 {
+	struct watchdog_device *w = (struct watchdog_device *)arg;
+
 	mpc8xxx_wdt_keepalive();
 	/* We're pinging it twice faster than needed, just to be sure. */
-	mod_timer(&wdt_timer, jiffies + HZ * timeout_sec / 2);
+	mod_timer(&wdt_timer, jiffies + HZ * w->timeout / 2);
 }
 
-static void mpc8xxx_wdt_pr_warn(const char *msg)
-{
-	pr_crit("%s, expect the %s soon!\n", msg,
-		reset ? "reset" : "machine check exception");
-}
-
-static ssize_t mpc8xxx_wdt_write(struct file *file, const char __user *buf,
-				 size_t count, loff_t *ppos)
-{
-	if (count)
-		mpc8xxx_wdt_keepalive();
-	return count;
-}
-
-static int mpc8xxx_wdt_open(struct inode *inode, struct file *file)
+static int mpc8xxx_wdt_start(struct watchdog_device *w)
 {
 	u32 tmp = SWCRR_SWEN;
-	if (test_and_set_bit(0, &wdt_is_open))
-		return -EBUSY;
-
-	/* Once we start the watchdog we can't stop it */
-	if (nowayout)
-		__module_get(THIS_MODULE);
 
 	/* Good, fire up the show */
 	if (prescale)
@@ -133,59 +115,37 @@
 
 	del_timer_sync(&wdt_timer);
 
-	return nonseekable_open(inode, file);
-}
-
-static int mpc8xxx_wdt_release(struct inode *inode, struct file *file)
-{
-	if (!nowayout)
-		mpc8xxx_wdt_timer_ping(0);
-	else
-		mpc8xxx_wdt_pr_warn("watchdog closed");
-	clear_bit(0, &wdt_is_open);
 	return 0;
 }
 
-static long mpc8xxx_wdt_ioctl(struct file *file, unsigned int cmd,
-							unsigned long arg)
+static int mpc8xxx_wdt_ping(struct watchdog_device *w)
 {
-	void __user *argp = (void __user *)arg;
-	int __user *p = argp;
-	static const struct watchdog_info ident = {
-		.options = WDIOF_KEEPALIVEPING,
-		.firmware_version = 1,
-		.identity = "MPC8xxx",
-	};
-
-	switch (cmd) {
-	case WDIOC_GETSUPPORT:
-		return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
-	case WDIOC_GETSTATUS:
-	case WDIOC_GETBOOTSTATUS:
-		return put_user(0, p);
-	case WDIOC_KEEPALIVE:
-		mpc8xxx_wdt_keepalive();
-		return 0;
-	case WDIOC_GETTIMEOUT:
-		return put_user(timeout_sec, p);
-	default:
-		return -ENOTTY;
-	}
+	mpc8xxx_wdt_keepalive();
+	return 0;
 }
 
-static const struct file_operations mpc8xxx_wdt_fops = {
-	.owner		= THIS_MODULE,
-	.llseek		= no_llseek,
-	.write		= mpc8xxx_wdt_write,
-	.unlocked_ioctl	= mpc8xxx_wdt_ioctl,
-	.open		= mpc8xxx_wdt_open,
-	.release	= mpc8xxx_wdt_release,
+static int mpc8xxx_wdt_stop(struct watchdog_device *w)
+{
+	mod_timer(&wdt_timer, jiffies);
+	return 0;
+}
+
+static struct watchdog_info mpc8xxx_wdt_info = {
+	.options = WDIOF_KEEPALIVEPING,
+	.firmware_version = 1,
+	.identity = "MPC8xxx",
 };
 
-static struct miscdevice mpc8xxx_wdt_miscdev = {
-	.minor	= WATCHDOG_MINOR,
-	.name	= "watchdog",
-	.fops	= &mpc8xxx_wdt_fops,
+static struct watchdog_ops mpc8xxx_wdt_ops = {
+	.owner = THIS_MODULE,
+	.start = mpc8xxx_wdt_start,
+	.ping = mpc8xxx_wdt_ping,
+	.stop = mpc8xxx_wdt_stop,
+};
+
+static struct watchdog_device mpc8xxx_wdt_dev = {
+	.info = &mpc8xxx_wdt_info,
+	.ops = &mpc8xxx_wdt_ops,
 };
 
 static const struct of_device_id mpc8xxx_wdt_match[];
@@ -197,6 +157,7 @@
 	const struct mpc8xxx_wdt_type *wdt_type;
 	u32 freq = fsl_get_sys_freq();
 	bool enabled;
+	unsigned int timeout_sec;
 
 	match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev);
 	if (!match)
@@ -223,6 +184,7 @@
 	else
 		timeout_sec = timeout / freq;
 
+	mpc8xxx_wdt_dev.timeout = timeout_sec;
 #ifdef MODULE
 	ret = mpc8xxx_wdt_init_late();
 	if (ret)
@@ -238,7 +200,7 @@
 	 * userspace handles it.
 	 */
 	if (enabled)
-		mpc8xxx_wdt_timer_ping(0);
+		mod_timer(&wdt_timer, jiffies);
 	return 0;
 err_unmap:
 	iounmap(wd_base);
@@ -248,9 +210,10 @@
 
 static int mpc8xxx_wdt_remove(struct platform_device *ofdev)
 {
-	mpc8xxx_wdt_pr_warn("watchdog removed");
+	pr_crit("Watchdog removed, expect the %s soon!\n",
+		reset ? "reset" : "machine check exception");
 	del_timer_sync(&wdt_timer);
-	misc_deregister(&mpc8xxx_wdt_miscdev);
+	watchdog_unregister_device(&mpc8xxx_wdt_dev);
 	iounmap(wd_base);
 
 	return 0;
@@ -302,10 +265,11 @@
 	if (!wd_base)
 		return -ENODEV;
 
-	ret = misc_register(&mpc8xxx_wdt_miscdev);
+	watchdog_set_nowayout(&mpc8xxx_wdt_dev, nowayout);
+
+	ret = watchdog_register_device(&mpc8xxx_wdt_dev);
 	if (ret) {
-		pr_err("cannot register miscdev on minor=%d (err=%d)\n",
-		       WATCHDOG_MINOR, ret);
+		pr_err("cannot register watchdog device (err=%d)\n", ret);
 		return ret;
 	}
 	return 0;
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 231e5b9..0b9ec61 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -289,7 +289,7 @@
  * register a pci_driver, because someone else might one day
  * want to register another driver on the same PCI id.
  */
-static DEFINE_PCI_DEVICE_TABLE(tco_pci_tbl) = {
+static const struct pci_device_id tco_pci_tbl[] = {
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
 	  PCI_ANY_ID, PCI_ANY_ID, },
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index b4864f2..c0d07ee 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -801,7 +801,7 @@
 	cards_found--;
 }
 
-static DEFINE_PCI_DEVICE_TABLE(pcipcwd_pci_tbl) = {
+static const struct pci_device_id pcipcwd_pci_tbl[] = {
 	{ PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD,
 		PCI_ANY_ID, PCI_ANY_ID, },
 	{ 0 },			/* End of list */
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 7d8fd04..aec946d 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -40,6 +40,8 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 
 #define S3C2410_WTCON		0x00
 #define S3C2410_WTDAT		0x04
@@ -60,6 +62,16 @@
 #define CONFIG_S3C2410_WATCHDOG_ATBOOT		(0)
 #define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME	(15)
 
+#define EXYNOS5_RST_STAT_REG_OFFSET		0x0404
+#define EXYNOS5_WDT_DISABLE_REG_OFFSET		0x0408
+#define EXYNOS5_WDT_MASK_RESET_REG_OFFSET	0x040c
+#define QUIRK_HAS_PMU_CONFIG			(1 << 0)
+#define QUIRK_HAS_RST_STAT			(1 << 1)
+
+/* These quirks require that we have a PMU register map */
+#define QUIRKS_HAVE_PMUREG			(QUIRK_HAS_PMU_CONFIG | \
+						 QUIRK_HAS_RST_STAT)
+
 static bool nowayout	= WATCHDOG_NOWAYOUT;
 static int tmr_margin;
 static int tmr_atboot	= CONFIG_S3C2410_WATCHDOG_ATBOOT;
@@ -83,6 +95,30 @@
 			"0 to reboot (default 0)");
 MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)");
 
+/**
+ * struct s3c2410_wdt_variant - Per-variant config data
+ *
+ * @disable_reg: Offset in pmureg for the register that disables the watchdog
+ * timer reset functionality.
+ * @mask_reset_reg: Offset in pmureg for the register that masks the watchdog
+ * timer reset functionality.
+ * @mask_bit: Bit number for the watchdog timer in the disable register and the
+ * mask reset register.
+ * @rst_stat_reg: Offset in pmureg for the register that has the reset status.
+ * @rst_stat_bit: Bit number in the rst_stat register indicating a watchdog
+ * reset.
+ * @quirks: A bitfield of quirks.
+ */
+
+struct s3c2410_wdt_variant {
+	int disable_reg;
+	int mask_reset_reg;
+	int mask_bit;
+	int rst_stat_reg;
+	int rst_stat_bit;
+	u32 quirks;
+};
+
 struct s3c2410_wdt {
 	struct device		*dev;
 	struct clk		*clock;
@@ -93,8 +129,54 @@
 	unsigned long		wtdat_save;
 	struct watchdog_device	wdt_device;
 	struct notifier_block	freq_transition;
+	struct s3c2410_wdt_variant *drv_data;
+	struct regmap *pmureg;
 };
 
+static const struct s3c2410_wdt_variant drv_data_s3c2410 = {
+	.quirks = 0
+};
+
+#ifdef CONFIG_OF
+static const struct s3c2410_wdt_variant drv_data_exynos5250  = {
+	.disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
+	.mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
+	.mask_bit = 20,
+	.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+	.rst_stat_bit = 20,
+	.quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
+	.disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
+	.mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
+	.mask_bit = 0,
+	.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+	.rst_stat_bit = 9,
+	.quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
+};
+
+static const struct of_device_id s3c2410_wdt_match[] = {
+	{ .compatible = "samsung,s3c2410-wdt",
+	  .data = &drv_data_s3c2410 },
+	{ .compatible = "samsung,exynos5250-wdt",
+	  .data = &drv_data_exynos5250 },
+	{ .compatible = "samsung,exynos5420-wdt",
+	  .data = &drv_data_exynos5420 },
+	{},
+};
+MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
+#endif
+
+static const struct platform_device_id s3c2410_wdt_ids[] = {
+	{
+		.name = "s3c2410-wdt",
+		.driver_data = (unsigned long)&drv_data_s3c2410,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(platform, s3c2410_wdt_ids);
+
 /* watchdog control routines */
 
 #define DBG(fmt, ...)					\
@@ -110,6 +192,35 @@
 	return container_of(nb, struct s3c2410_wdt, freq_transition);
 }
 
+static int s3c2410wdt_mask_and_disable_reset(struct s3c2410_wdt *wdt, bool mask)
+{
+	int ret;
+	u32 mask_val = 1 << wdt->drv_data->mask_bit;
+	u32 val = 0;
+
+	/* No need to do anything if no PMU CONFIG needed */
+	if (!(wdt->drv_data->quirks & QUIRK_HAS_PMU_CONFIG))
+		return 0;
+
+	if (mask)
+		val = mask_val;
+
+	ret = regmap_update_bits(wdt->pmureg,
+			wdt->drv_data->disable_reg,
+			mask_val, val);
+	if (ret < 0)
+		goto error;
+
+	ret = regmap_update_bits(wdt->pmureg,
+			wdt->drv_data->mask_reset_reg,
+			mask_val, val);
+ error:
+	if (ret < 0)
+		dev_err(wdt->dev, "failed to update reg(%d)\n", ret);
+
+	return ret;
+}
+
 static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
 {
 	struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
@@ -188,7 +299,7 @@
 	if (timeout < 1)
 		return -EINVAL;
 
-	freq /= 128;
+	freq = DIV_ROUND_UP(freq, 128);
 	count = timeout * freq;
 
 	DBG("%s: count=%d, timeout=%d, freq=%lu\n",
@@ -200,21 +311,18 @@
 	*/
 
 	if (count >= 0x10000) {
-		for (divisor = 1; divisor <= 0x100; divisor++) {
-			if ((count / divisor) < 0x10000)
-				break;
-		}
+		divisor = DIV_ROUND_UP(count, 0xffff);
 
-		if ((count / divisor) >= 0x10000) {
+		if (divisor > 0x100) {
 			dev_err(wdt->dev, "timeout %d too big\n", timeout);
 			return -EINVAL;
 		}
 	}
 
 	DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n",
-	    __func__, timeout, divisor, count, count/divisor);
+	    __func__, timeout, divisor, count, DIV_ROUND_UP(count, divisor));
 
-	count /= divisor;
+	count = DIV_ROUND_UP(count, divisor);
 	wdt->count = count;
 
 	/* update the pre-scaler */
@@ -264,7 +372,7 @@
 	return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
 
 static int s3c2410wdt_cpufreq_transition(struct notifier_block *nb,
 					  unsigned long val, void *data)
@@ -331,6 +439,37 @@
 }
 #endif
 
+static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt)
+{
+	unsigned int rst_stat;
+	int ret;
+
+	if (!(wdt->drv_data->quirks & QUIRK_HAS_RST_STAT))
+		return 0;
+
+	ret = regmap_read(wdt->pmureg, wdt->drv_data->rst_stat_reg, &rst_stat);
+	if (ret)
+		dev_warn(wdt->dev, "Couldn't get RST_STAT register\n");
+	else if (rst_stat & BIT(wdt->drv_data->rst_stat_bit))
+		return WDIOF_CARDRESET;
+
+	return 0;
+}
+
+/* s3c2410_get_wdt_driver_data */
+static inline struct s3c2410_wdt_variant *
+get_wdt_drv_data(struct platform_device *pdev)
+{
+	if (pdev->dev.of_node) {
+		const struct of_device_id *match;
+		match = of_match_node(s3c2410_wdt_match, pdev->dev.of_node);
+		return (struct s3c2410_wdt_variant *)match->data;
+	} else {
+		return (struct s3c2410_wdt_variant *)
+			platform_get_device_id(pdev)->driver_data;
+	}
+}
+
 static int s3c2410wdt_probe(struct platform_device *pdev)
 {
 	struct device *dev;
@@ -353,6 +492,16 @@
 	spin_lock_init(&wdt->lock);
 	wdt->wdt_device = s3c2410_wdd;
 
+	wdt->drv_data = get_wdt_drv_data(pdev);
+	if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) {
+		wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
+						"samsung,syscon-phandle");
+		if (IS_ERR(wdt->pmureg)) {
+			dev_err(dev, "syscon regmap lookup failed.\n");
+			return PTR_ERR(wdt->pmureg);
+		}
+	}
+
 	wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (wdt_irq == NULL) {
 		dev_err(dev, "no irq resource specified\n");
@@ -415,12 +564,18 @@
 
 	watchdog_set_nowayout(&wdt->wdt_device, nowayout);
 
+	wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt);
+
 	ret = watchdog_register_device(&wdt->wdt_device);
 	if (ret) {
 		dev_err(dev, "cannot register watchdog (%d)\n", ret);
 		goto err_cpufreq;
 	}
 
+	ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+	if (ret < 0)
+		goto err_unregister;
+
 	if (tmr_atboot && started == 0) {
 		dev_info(dev, "starting watchdog timer\n");
 		s3c2410wdt_start(&wdt->wdt_device);
@@ -445,6 +600,9 @@
 
 	return 0;
 
+ err_unregister:
+	watchdog_unregister_device(&wdt->wdt_device);
+
  err_cpufreq:
 	s3c2410wdt_cpufreq_deregister(wdt);
 
@@ -458,8 +616,13 @@
 
 static int s3c2410wdt_remove(struct platform_device *dev)
 {
+	int ret;
 	struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
 
+	ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+	if (ret < 0)
+		return ret;
+
 	watchdog_unregister_device(&wdt->wdt_device);
 
 	s3c2410wdt_cpufreq_deregister(wdt);
@@ -474,6 +637,8 @@
 {
 	struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
 
+	s3c2410wdt_mask_and_disable_reset(wdt, true);
+
 	s3c2410wdt_stop(&wdt->wdt_device);
 }
 
@@ -481,12 +646,17 @@
 
 static int s3c2410wdt_suspend(struct device *dev)
 {
+	int ret;
 	struct s3c2410_wdt *wdt = dev_get_drvdata(dev);
 
 	/* Save watchdog state, and turn it off. */
 	wdt->wtcon_save = readl(wdt->reg_base + S3C2410_WTCON);
 	wdt->wtdat_save = readl(wdt->reg_base + S3C2410_WTDAT);
 
+	ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+	if (ret < 0)
+		return ret;
+
 	/* Note that WTCNT doesn't need to be saved. */
 	s3c2410wdt_stop(&wdt->wdt_device);
 
@@ -495,6 +665,7 @@
 
 static int s3c2410wdt_resume(struct device *dev)
 {
+	int ret;
 	struct s3c2410_wdt *wdt = dev_get_drvdata(dev);
 
 	/* Restore watchdog state. */
@@ -502,6 +673,10 @@
 	writel(wdt->wtdat_save, wdt->reg_base + S3C2410_WTCNT);/* Reset count */
 	writel(wdt->wtcon_save, wdt->reg_base + S3C2410_WTCON);
 
+	ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+	if (ret < 0)
+		return ret;
+
 	dev_info(dev, "watchdog %sabled\n",
 		(wdt->wtcon_save & S3C2410_WTCON_ENABLE) ? "en" : "dis");
 
@@ -512,18 +687,11 @@
 static SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops, s3c2410wdt_suspend,
 			s3c2410wdt_resume);
 
-#ifdef CONFIG_OF
-static const struct of_device_id s3c2410_wdt_match[] = {
-	{ .compatible = "samsung,s3c2410-wdt" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
-#endif
-
 static struct platform_driver s3c2410wdt_driver = {
 	.probe		= s3c2410wdt_probe,
 	.remove		= s3c2410wdt_remove,
 	.shutdown	= s3c2410wdt_shutdown,
+	.id_table	= s3c2410_wdt_ids,
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.name	= "s3c2410-wdt",
@@ -538,4 +706,3 @@
 	      "Dimitry Andric <dimitry.andric@tomtom.com>");
 MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c2410-wdt");
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index ced3edc..702d078 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -212,7 +212,7 @@
 		.name = "sirfsoc-wdt",
 		.owner = THIS_MODULE,
 		.pm = &sirfsoc_wdt_pm_ops,
-		.of_match_table	= of_match_ptr(sirfsoc_wdt_of_match),
+		.of_match_table	= sirfsoc_wdt_of_match,
 	},
 	.probe = sirfsoc_wdt_probe,
 	.remove = sirfsoc_wdt_remove,
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index ce63a1b..5cca9cd 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -303,7 +303,7 @@
  * register a pci_driver, because someone else might
  * want to register another driver on the same PCI id.
  */
-static DEFINE_PCI_DEVICE_TABLE(sp5100_tco_pci_tbl) = {
+static const struct pci_device_id sp5100_tco_pci_tbl[] = {
 	{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
 	  PCI_ANY_ID, },
 	{ 0, },			/* End of list */
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 1a68f76..d2cd9f0 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -239,7 +239,7 @@
 	pci_disable_device(pdev);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = {
+static const struct pci_device_id wdt_pci_table[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index e24b210..b1da0c1 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -44,10 +44,13 @@
 #define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT"
 #define WATCHDOG_TIMEOUT 60		/* 60 sec default timeout */
 
-/* You must set this - there is no sane way to probe for this board. */
-static int wdt_io = 0x2E;
-module_param(wdt_io, int, 0);
-MODULE_PARM_DESC(wdt_io, "w83627hf/thf WDT io port (default 0x2E)");
+static int wdt_io;
+static int cr_wdt_timeout;	/* WDT timeout register */
+static int cr_wdt_control;	/* WDT control register */
+
+enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
+	     w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
+	     w83667hg_b, nct6775, nct6776, nct6779 };
 
 static int timeout;			/* in seconds */
 module_param(timeout, int, 0);
@@ -72,6 +75,29 @@
 
 #define W83627HF_LD_WDT		0x08
 
+#define W83627HF_ID		0x52
+#define W83627S_ID		0x59
+#define W83697HF_ID		0x60
+#define W83697UG_ID		0x68
+#define W83637HF_ID		0x70
+#define W83627THF_ID		0x82
+#define W83687THF_ID		0x85
+#define W83627EHF_ID		0x88
+#define W83627DHG_ID		0xa0
+#define W83627UHG_ID		0xa2
+#define W83667HG_ID		0xa5
+#define W83627DHG_P_ID		0xb0
+#define W83667HG_B_ID		0xb3
+#define NCT6775_ID		0xb4
+#define NCT6776_ID		0xc3
+#define NCT6779_ID		0xc5
+
+#define W83627HF_WDT_TIMEOUT	0xf6
+#define W83697HF_WDT_TIMEOUT	0xf4
+
+#define W83627HF_WDT_CONTROL	0xf5
+#define W83697HF_WDT_CONTROL	0xf3
+
 static void superio_outb(int reg, int val)
 {
 	outb(reg, WDT_EFER);
@@ -106,10 +132,7 @@
 	release_region(wdt_io, 2);
 }
 
-/* tyan motherboards seem to set F5 to 0x4C ?
- * So explicitly init to appropriate value. */
-
-static int w83627hf_init(struct watchdog_device *wdog)
+static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
 {
 	int ret;
 	unsigned char t;
@@ -119,35 +142,83 @@
 		return ret;
 
 	superio_select(W83627HF_LD_WDT);
-	t = superio_inb(0x20);	/* check chip version	*/
-	if (t == 0x82) {	/* W83627THF		*/
-		t = (superio_inb(0x2b) & 0xf7);
-		superio_outb(0x2b, t | 0x04); /* set GPIO3 to WDT0 */
-	} else if (t == 0x88 || t == 0xa0) {	/* W83627EHF / W83627DHG */
-		t = superio_inb(0x2d);
-		superio_outb(0x2d, t & ~0x01);	/* set GPIO5 to WDT0 */
-	}
 
 	/* set CR30 bit 0 to activate GPIO2 */
 	t = superio_inb(0x30);
 	if (!(t & 0x01))
 		superio_outb(0x30, t | 0x01);
 
-	t = superio_inb(0xF6);
+	switch (chip) {
+	case w83627hf:
+	case w83627s:
+		t = superio_inb(0x2B) & ~0x10;
+		superio_outb(0x2B, t); /* set GPIO24 to WDT0 */
+		break;
+	case w83697hf:
+		/* Set pin 119 to WDTO# mode (= CR29, WDT0) */
+		t = superio_inb(0x29) & ~0x60;
+		t |= 0x20;
+		superio_outb(0x29, t);
+		break;
+	case w83697ug:
+		/* Set pin 118 to WDTO# mode */
+		t = superio_inb(0x2b) & ~0x04;
+		superio_outb(0x2b, t);
+		break;
+	case w83627thf:
+		t = (superio_inb(0x2B) & ~0x08) | 0x04;
+		superio_outb(0x2B, t); /* set GPIO3 to WDT0 */
+		break;
+	case w83627dhg:
+	case w83627dhg_p:
+		t = superio_inb(0x2D) & ~0x01; /* PIN77 -> WDT0# */
+		superio_outb(0x2D, t); /* set GPIO5 to WDT0 */
+		t = superio_inb(cr_wdt_control);
+		t |= 0x02;	/* enable the WDTO# output low pulse
+				 * to the KBRST# pin */
+		superio_outb(cr_wdt_control, t);
+		break;
+	case w83637hf:
+		break;
+	case w83687thf:
+		t = superio_inb(0x2C) & ~0x80; /* PIN47 -> WDT0# */
+		superio_outb(0x2C, t);
+		break;
+	case w83627ehf:
+	case w83627uhg:
+	case w83667hg:
+	case w83667hg_b:
+	case nct6775:
+	case nct6776:
+	case nct6779:
+		/*
+		 * These chips have a fixed WDTO# output pin (W83627UHG),
+		 * or support more than one WDTO# output pin.
+		 * Don't touch its configuration, and hope the BIOS
+		 * does the right thing.
+		 */
+		t = superio_inb(cr_wdt_control);
+		t |= 0x02;	/* enable the WDTO# output low pulse
+				 * to the KBRST# pin */
+		superio_outb(cr_wdt_control, t);
+		break;
+	default:
+		break;
+	}
+
+	t = superio_inb(cr_wdt_timeout);
 	if (t != 0) {
 		pr_info("Watchdog already running. Resetting timeout to %d sec\n",
 			wdog->timeout);
-		superio_outb(0xF6, wdog->timeout);
+		superio_outb(cr_wdt_timeout, wdog->timeout);
 	}
 
 	/* set second mode & disable keyboard turning off watchdog */
-	t = superio_inb(0xF5) & ~0x0C;
-	/* enable the WDTO# output low pulse to the KBRST# pin */
-	t |= 0x02;
-	superio_outb(0xF5, t);
+	t = superio_inb(cr_wdt_control) & ~0x0C;
+	superio_outb(cr_wdt_control, t);
 
-	/* disable keyboard & mouse turning off watchdog */
-	t = superio_inb(0xF7) & ~0xC0;
+	/* reset trigger, disable keyboard & mouse turning off watchdog */
+	t = superio_inb(0xF7) & ~0xD0;
 	superio_outb(0xF7, t);
 
 	superio_exit();
@@ -164,7 +235,7 @@
 		return ret;
 
 	superio_select(W83627HF_LD_WDT);
-	superio_outb(0xF6, timeout);
+	superio_outb(cr_wdt_timeout, timeout);
 	superio_exit();
 
 	return 0;
@@ -197,7 +268,7 @@
 		return 0;
 
 	superio_select(W83627HF_LD_WDT);
-	timeleft = superio_inb(0xF6);
+	timeleft = superio_inb(cr_wdt_timeout);
 	superio_exit();
 
 	return timeleft;
@@ -249,16 +320,123 @@
 	.notifier_call = wdt_notify_sys,
 };
 
+static int wdt_find(int addr)
+{
+	u8 val;
+	int ret;
+
+	cr_wdt_timeout = W83627HF_WDT_TIMEOUT;
+	cr_wdt_control = W83627HF_WDT_CONTROL;
+
+	ret = superio_enter();
+	if (ret)
+		return ret;
+	superio_select(W83627HF_LD_WDT);
+	val = superio_inb(0x20);
+	switch (val) {
+	case W83627HF_ID:
+		ret = w83627hf;
+		break;
+	case W83627S_ID:
+		ret = w83627s;
+		break;
+	case W83697HF_ID:
+		ret = w83697hf;
+		cr_wdt_timeout = W83697HF_WDT_TIMEOUT;
+		cr_wdt_control = W83697HF_WDT_CONTROL;
+		break;
+	case W83697UG_ID:
+		ret = w83697ug;
+		cr_wdt_timeout = W83697HF_WDT_TIMEOUT;
+		cr_wdt_control = W83697HF_WDT_CONTROL;
+		break;
+	case W83637HF_ID:
+		ret = w83637hf;
+		break;
+	case W83627THF_ID:
+		ret = w83627thf;
+		break;
+	case W83687THF_ID:
+		ret = w83687thf;
+		break;
+	case W83627EHF_ID:
+		ret = w83627ehf;
+		break;
+	case W83627DHG_ID:
+		ret = w83627dhg;
+		break;
+	case W83627DHG_P_ID:
+		ret = w83627dhg_p;
+		break;
+	case W83627UHG_ID:
+		ret = w83627uhg;
+		break;
+	case W83667HG_ID:
+		ret = w83667hg;
+		break;
+	case W83667HG_B_ID:
+		ret = w83667hg_b;
+		break;
+	case NCT6775_ID:
+		ret = nct6775;
+		break;
+	case NCT6776_ID:
+		ret = nct6776;
+		break;
+	case NCT6779_ID:
+		ret = nct6779;
+		break;
+	case 0xff:
+		ret = -ENODEV;
+		break;
+	default:
+		ret = -ENODEV;
+		pr_err("Unsupported chip ID: 0x%02x\n", val);
+		break;
+	}
+	superio_exit();
+	return ret;
+}
+
 static int __init wdt_init(void)
 {
 	int ret;
+	int chip;
+	const char * const chip_name[] = {
+		"W83627HF",
+		"W83627S",
+		"W83697HF",
+		"W83697UG",
+		"W83637HF",
+		"W83627THF",
+		"W83687THF",
+		"W83627EHF",
+		"W83627DHG",
+		"W83627UHG",
+		"W83667HG",
+		"W83667DHG-P",
+		"W83667HG-B",
+		"NCT6775",
+		"NCT6776",
+		"NCT6779",
+	};
 
-	pr_info("WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising\n");
+	wdt_io = 0x2e;
+	chip = wdt_find(0x2e);
+	if (chip < 0) {
+		wdt_io = 0x4e;
+		chip = wdt_find(0x4e);
+		if (chip < 0)
+			return chip;
+	}
+
+	pr_info("WDT driver for %s Super I/O chip initialising\n",
+		chip_name[chip]);
 
 	watchdog_init_timeout(&wdt_dev, timeout, NULL);
 	watchdog_set_nowayout(&wdt_dev, nowayout);
 
-	ret = w83627hf_init(&wdt_dev);
+	ret = w83627hf_init(&wdt_dev, chip);
 	if (ret) {
 		pr_err("failed to initialize watchdog (err=%d)\n", ret);
 		return ret;
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 461336c..cec9b55 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -78,7 +78,7 @@
 	watchdog_check_min_max_timeout(wdd);
 
 	/* try to get the timeout module parameter first */
-	if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
+	if (!watchdog_timeout_invalid(wdd, timeout_parm) && timeout_parm) {
 		wdd->timeout = timeout_parm;
 		return ret;
 	}
@@ -89,7 +89,7 @@
 	if (dev == NULL || dev->of_node == NULL)
 		return ret;
 	of_property_read_u32(dev->of_node, "timeout-sec", &t);
-	if (!watchdog_timeout_invalid(wdd, t))
+	if (!watchdog_timeout_invalid(wdd, t) && t)
 		wdd->timeout = t;
 	else
 		ret = -EINVAL;
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index ee89ba4..3dc578e 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -720,7 +720,7 @@
 }
 
 
-static DEFINE_PCI_DEVICE_TABLE(wdtpci_pci_tbl) = {
+static const struct pci_device_id wdtpci_pci_tbl[] = {
 	{
 		.vendor	   = PCI_VENDOR_ID_ACCESSIO,
 		.device	   = PCI_DEVICE_ID_ACCESSIO_WDG_CSM,
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index d75c811..45e00af 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -16,7 +16,6 @@
 dom0-$(CONFIG_X86) += pcpu.o
 obj-$(CONFIG_XEN_DOM0)			+= $(dom0-y)
 obj-$(CONFIG_BLOCK)			+= biomerge.o
-obj-$(CONFIG_XEN_XENCOMM)		+= xencomm.o
 obj-$(CONFIG_XEN_BALLOON)		+= xen-balloon.o
 obj-$(CONFIG_XEN_SELFBALLOONING)	+= xen-selfballoon.o
 obj-$(CONFIG_XEN_DEV_EVTCHN)		+= xen-evtchn.o
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 4672e00..f4a9e33 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -862,6 +862,8 @@
 			irq = ret;
 			goto out;
 		}
+		/* New interdomain events are bound to VCPU 0. */
+		bind_evtchn_to_cpu(evtchn, 0);
 	} else {
 		struct irq_info *info = info_for_irq(irq);
 		WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 1ce1c403..b84e3ab 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -837,7 +837,7 @@
 }
 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 
-int gnttab_setup_auto_xlat_frames(unsigned long addr)
+int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
 {
 	xen_pfn_t *pfn;
 	unsigned int max_nr_gframes = __max_nr_grant_frames();
@@ -849,8 +849,8 @@
 
 	vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes);
 	if (vaddr == NULL) {
-		pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
-			addr);
+		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
+			&addr);
 		return -ENOMEM;
 	}
 	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1eac073..ebd8f21 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -75,14 +75,32 @@
 
 static u64 start_dma_addr;
 
+/*
+ * Both of these functions should avoid PFN_PHYS because phys_addr_t
+ * can be 32bit when dma_addr_t is 64bit leading to a loss in
+ * information if the shift is done before casting to 64bit.
+ */
 static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
 {
-	return phys_to_machine(XPADDR(paddr)).maddr;
+	unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr));
+	dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT;
+
+	dma |= paddr & ~PAGE_MASK;
+
+	return dma;
 }
 
 static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
 {
-	return machine_to_phys(XMADDR(baddr)).paddr;
+	unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr));
+	dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
+	phys_addr_t paddr = dma;
+
+	BUG_ON(paddr != dma); /* truncation has occurred, should never happen */
+
+	paddr |= baddr & ~PAGE_MASK;
+
+	return paddr;
 }
 
 static inline dma_addr_t xen_virt_to_bus(void *address)
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 21e18c1..745ad79 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -175,6 +175,7 @@
 #endif /* CONFIG_FRONTSWAP */
 
 #define MB2PAGES(mb)	((mb) << (20 - PAGE_SHIFT))
+#define PAGES2MB(pages) ((pages) >> (20 - PAGE_SHIFT))
 
 /*
  * Use current balloon size, the goal (vm_committed_as), and hysteresis
@@ -525,6 +526,7 @@
 int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)
 {
 	bool enable = false;
+	unsigned long reserve_pages;
 
 	if (!xen_domain())
 		return -ENODEV;
@@ -549,6 +551,26 @@
 	if (!enable)
 		return -ENODEV;
 
+	/*
+	 * Give selfballoon_reserved_mb a default value(10% of total ram pages)
+	 * to make selfballoon not so aggressive.
+	 *
+	 * There are mainly two reasons:
+	 * 1) The original goal_page didn't consider some pages used by kernel
+	 *    space, like slab pages and memory used by device drivers.
+	 *
+	 * 2) The balloon driver may not give back memory to guest OS fast
+	 *    enough when the workload suddenly aquries a lot of physical memory.
+	 *
+	 * In both cases, the guest OS will suffer from memory pressure and
+	 * OOM killer may be triggered.
+	 * By reserving extra 10% of total ram pages, we can keep the system
+	 * much more reliably and response faster in some cases.
+	 */
+	if (!selfballoon_reserved_mb) {
+		reserve_pages = totalram_pages / 10;
+		selfballoon_reserved_mb = PAGES2MB(reserve_pages);
+	}
 	schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ);
 
 	return 0;
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
deleted file mode 100644
index 4793fc5..0000000
--- a/drivers/xen/xencomm.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <asm/page.h>
-#include <xen/xencomm.h>
-#include <xen/interface/xen.h>
-#include <asm/xen/xencomm.h>	/* for xencomm_is_phys_contiguous() */
-
-static int xencomm_init(struct xencomm_desc *desc,
-			void *buffer, unsigned long bytes)
-{
-	unsigned long recorded = 0;
-	int i = 0;
-
-	while ((recorded < bytes) && (i < desc->nr_addrs)) {
-		unsigned long vaddr = (unsigned long)buffer + recorded;
-		unsigned long paddr;
-		int offset;
-		int chunksz;
-
-		offset = vaddr % PAGE_SIZE; /* handle partial pages */
-		chunksz = min(PAGE_SIZE - offset, bytes - recorded);
-
-		paddr = xencomm_vtop(vaddr);
-		if (paddr == ~0UL) {
-			printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
-			       __func__, vaddr);
-			return -EINVAL;
-		}
-
-		desc->address[i++] = paddr;
-		recorded += chunksz;
-	}
-
-	if (recorded < bytes) {
-		printk(KERN_DEBUG
-		       "%s: could only translate %ld of %ld bytes\n",
-		       __func__, recorded, bytes);
-		return -ENOSPC;
-	}
-
-	/* mark remaining addresses invalid (just for safety) */
-	while (i < desc->nr_addrs)
-		desc->address[i++] = XENCOMM_INVALID;
-
-	desc->magic = XENCOMM_MAGIC;
-
-	return 0;
-}
-
-static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
-					  void *buffer, unsigned long bytes)
-{
-	struct xencomm_desc *desc;
-	unsigned long buffer_ulong = (unsigned long)buffer;
-	unsigned long start = buffer_ulong & PAGE_MASK;
-	unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
-	unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
-	unsigned long size = sizeof(*desc) +
-		sizeof(desc->address[0]) * nr_addrs;
-
-	/*
-	 * slab allocator returns at least sizeof(void*) aligned pointer.
-	 * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
-	 * cross page boundary.
-	 */
-	if (sizeof(*desc) > sizeof(void *)) {
-		unsigned long order = get_order(size);
-		desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
-							       order);
-		if (desc == NULL)
-			return NULL;
-
-		desc->nr_addrs =
-			((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
-			sizeof(*desc->address);
-	} else {
-		desc = kmalloc(size, gfp_mask);
-		if (desc == NULL)
-			return NULL;
-
-		desc->nr_addrs = nr_addrs;
-	}
-	return desc;
-}
-
-void xencomm_free(struct xencomm_handle *desc)
-{
-	if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
-		struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
-		if (sizeof(*desc__) > sizeof(void *)) {
-			unsigned long size = sizeof(*desc__) +
-				sizeof(desc__->address[0]) * desc__->nr_addrs;
-			unsigned long order = get_order(size);
-			free_pages((unsigned long)__va(desc), order);
-		} else
-			kfree(__va(desc));
-	}
-}
-
-static int xencomm_create(void *buffer, unsigned long bytes,
-			  struct xencomm_desc **ret, gfp_t gfp_mask)
-{
-	struct xencomm_desc *desc;
-	int rc;
-
-	pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
-
-	if (bytes == 0) {
-		/* don't create a descriptor; Xen recognizes NULL. */
-		BUG_ON(buffer != NULL);
-		*ret = NULL;
-		return 0;
-	}
-
-	BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
-
-	desc = xencomm_alloc(gfp_mask, buffer, bytes);
-	if (!desc) {
-		printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
-		return -ENOMEM;
-	}
-
-	rc = xencomm_init(desc, buffer, bytes);
-	if (rc) {
-		printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
-		xencomm_free((struct xencomm_handle *)__pa(desc));
-		return rc;
-	}
-
-	*ret = desc;
-	return 0;
-}
-
-static struct xencomm_handle *xencomm_create_inline(void *ptr)
-{
-	unsigned long paddr;
-
-	BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
-
-	paddr = (unsigned long)xencomm_pa(ptr);
-	BUG_ON(paddr & XENCOMM_INLINE_FLAG);
-	return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
-}
-
-/* "mini" routine, for stack-based communications: */
-static int xencomm_create_mini(void *buffer,
-	unsigned long bytes, struct xencomm_mini *xc_desc,
-	struct xencomm_desc **ret)
-{
-	int rc = 0;
-	struct xencomm_desc *desc;
-	BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
-
-	desc = (void *)xc_desc;
-
-	desc->nr_addrs = XENCOMM_MINI_ADDRS;
-
-	rc = xencomm_init(desc, buffer, bytes);
-	if (!rc)
-		*ret = desc;
-
-	return rc;
-}
-
-struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
-{
-	int rc;
-	struct xencomm_desc *desc;
-
-	if (xencomm_is_phys_contiguous((unsigned long)ptr))
-		return xencomm_create_inline(ptr);
-
-	rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
-
-	if (rc || desc == NULL)
-		return NULL;
-
-	return xencomm_pa(desc);
-}
-
-struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
-			struct xencomm_mini *xc_desc)
-{
-	int rc;
-	struct xencomm_desc *desc = NULL;
-
-	if (xencomm_is_phys_contiguous((unsigned long)ptr))
-		return xencomm_create_inline(ptr);
-
-	rc = xencomm_create_mini(ptr, bytes, xc_desc,
-				&desc);
-
-	if (rc)
-		return NULL;
-
-	return xencomm_pa(desc);
-}
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index bddc512..24a905b 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -130,8 +130,8 @@
 	if (!proc_afs)
 		goto error_dir;
 
-	if (!proc_create("cells", 0, proc_afs, &afs_proc_cells_fops) ||
-	    !proc_create("rootcell", 0, proc_afs, &afs_proc_rootcell_fops))
+	if (!proc_create("cells", 0644, proc_afs, &afs_proc_cells_fops) ||
+	    !proc_create("rootcell", 0644, proc_afs, &afs_proc_rootcell_fops))
 		goto error_tree;
 
 	_leave(" = 0");
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index fc60b31..0129b78 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -114,6 +114,14 @@
 }
 EXPORT_SYMBOL(bio_integrity_free);
 
+static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip)
+{
+	if (bip->bip_slab == BIO_POOL_NONE)
+		return BIP_INLINE_VECS;
+
+	return bvec_nr_vecs(bip->bip_slab);
+}
+
 /**
  * bio_integrity_add_page - Attach integrity metadata
  * @bio:	bio to update
@@ -129,13 +137,12 @@
 	struct bio_integrity_payload *bip = bio->bi_integrity;
 	struct bio_vec *iv;
 
-	if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
+	if (bip->bip_vcnt >= bip_integrity_vecs(bip)) {
 		printk(KERN_ERR "%s: bip_vec full\n", __func__);
 		return 0;
 	}
 
-	iv = bip_vec_idx(bip, bip->bip_vcnt);
-	BUG_ON(iv == NULL);
+	iv = bip->bip_vec + bip->bip_vcnt;
 
 	iv->bv_page = page;
 	iv->bv_len = len;
@@ -203,6 +210,12 @@
 	return sectors;
 }
 
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+					       unsigned int sectors)
+{
+	return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size;
+}
+
 /**
  * bio_integrity_tag_size - Retrieve integrity tag space
  * @bio:	bio to inspect
@@ -215,13 +228,14 @@
 {
 	struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 
-	BUG_ON(bio->bi_size == 0);
+	BUG_ON(bio->bi_iter.bi_size == 0);
 
-	return bi->tag_size * (bio->bi_size / bi->sector_size);
+	return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size);
 }
 EXPORT_SYMBOL(bio_integrity_tag_size);
 
-int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
+static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len,
+			     int set)
 {
 	struct bio_integrity_payload *bip = bio->bi_integrity;
 	struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
@@ -235,9 +249,9 @@
 	nr_sectors = bio_integrity_hw_sectors(bi,
 					DIV_ROUND_UP(len, bi->tag_size));
 
-	if (nr_sectors * bi->tuple_size > bip->bip_size) {
-		printk(KERN_ERR "%s: tag too big for bio: %u > %u\n",
-		       __func__, nr_sectors * bi->tuple_size, bip->bip_size);
+	if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) {
+		printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__,
+		       nr_sectors * bi->tuple_size, bip->bip_iter.bi_size);
 		return -1;
 	}
 
@@ -299,29 +313,30 @@
 {
 	struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 	struct blk_integrity_exchg bix;
-	struct bio_vec *bv;
-	sector_t sector = bio->bi_sector;
-	unsigned int i, sectors, total;
+	struct bio_vec bv;
+	struct bvec_iter iter;
+	sector_t sector = bio->bi_iter.bi_sector;
+	unsigned int sectors, total;
 	void *prot_buf = bio->bi_integrity->bip_buf;
 
 	total = 0;
 	bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
 	bix.sector_size = bi->sector_size;
 
-	bio_for_each_segment(bv, bio, i) {
-		void *kaddr = kmap_atomic(bv->bv_page);
-		bix.data_buf = kaddr + bv->bv_offset;
-		bix.data_size = bv->bv_len;
+	bio_for_each_segment(bv, bio, iter) {
+		void *kaddr = kmap_atomic(bv.bv_page);
+		bix.data_buf = kaddr + bv.bv_offset;
+		bix.data_size = bv.bv_len;
 		bix.prot_buf = prot_buf;
 		bix.sector = sector;
 
 		bi->generate_fn(&bix);
 
-		sectors = bv->bv_len / bi->sector_size;
+		sectors = bv.bv_len / bi->sector_size;
 		sector += sectors;
 		prot_buf += sectors * bi->tuple_size;
 		total += sectors * bi->tuple_size;
-		BUG_ON(total > bio->bi_integrity->bip_size);
+		BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
 
 		kunmap_atomic(kaddr);
 	}
@@ -386,8 +401,8 @@
 
 	bip->bip_owns_buf = 1;
 	bip->bip_buf = buf;
-	bip->bip_size = len;
-	bip->bip_sector = bio->bi_sector;
+	bip->bip_iter.bi_size = len;
+	bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
 
 	/* Map it */
 	offset = offset_in_page(buf);
@@ -442,16 +457,18 @@
 	struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 	struct blk_integrity_exchg bix;
 	struct bio_vec *bv;
-	sector_t sector = bio->bi_integrity->bip_sector;
-	unsigned int i, sectors, total, ret;
+	sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
+	unsigned int sectors, total, ret;
 	void *prot_buf = bio->bi_integrity->bip_buf;
+	int i;
 
 	ret = total = 0;
 	bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
 	bix.sector_size = bi->sector_size;
 
-	bio_for_each_segment(bv, bio, i) {
+	bio_for_each_segment_all(bv, bio, i) {
 		void *kaddr = kmap_atomic(bv->bv_page);
+
 		bix.data_buf = kaddr + bv->bv_offset;
 		bix.data_size = bv->bv_len;
 		bix.prot_buf = prot_buf;
@@ -468,7 +485,7 @@
 		sector += sectors;
 		prot_buf += sectors * bi->tuple_size;
 		total += sectors * bi->tuple_size;
-		BUG_ON(total > bio->bi_integrity->bip_size);
+		BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
 
 		kunmap_atomic(kaddr);
 	}
@@ -495,7 +512,7 @@
 
 	/* Restore original bio completion handler */
 	bio->bi_end_io = bip->bip_end_io;
-	bio_endio(bio, error);
+	bio_endio_nodec(bio, error);
 }
 
 /**
@@ -533,56 +550,6 @@
 EXPORT_SYMBOL(bio_integrity_endio);
 
 /**
- * bio_integrity_mark_head - Advance bip_vec skip bytes
- * @bip:	Integrity vector to advance
- * @skip:	Number of bytes to advance it
- */
-void bio_integrity_mark_head(struct bio_integrity_payload *bip,
-			     unsigned int skip)
-{
-	struct bio_vec *iv;
-	unsigned int i;
-
-	bip_for_each_vec(iv, bip, i) {
-		if (skip == 0) {
-			bip->bip_idx = i;
-			return;
-		} else if (skip >= iv->bv_len) {
-			skip -= iv->bv_len;
-		} else { /* skip < iv->bv_len) */
-			iv->bv_offset += skip;
-			iv->bv_len -= skip;
-			bip->bip_idx = i;
-			return;
-		}
-	}
-}
-
-/**
- * bio_integrity_mark_tail - Truncate bip_vec to be len bytes long
- * @bip:	Integrity vector to truncate
- * @len:	New length of integrity vector
- */
-void bio_integrity_mark_tail(struct bio_integrity_payload *bip,
-			     unsigned int len)
-{
-	struct bio_vec *iv;
-	unsigned int i;
-
-	bip_for_each_vec(iv, bip, i) {
-		if (len == 0) {
-			bip->bip_vcnt = i;
-			return;
-		} else if (len >= iv->bv_len) {
-			len -= iv->bv_len;
-		} else { /* len < iv->bv_len) */
-			iv->bv_len = len;
-			len = 0;
-		}
-	}
-}
-
-/**
  * bio_integrity_advance - Advance integrity vector
  * @bio:	bio whose integrity vector to update
  * @bytes_done:	number of data bytes that have been completed
@@ -595,13 +562,9 @@
 {
 	struct bio_integrity_payload *bip = bio->bi_integrity;
 	struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
-	unsigned int nr_sectors;
+	unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
 
-	BUG_ON(bip == NULL);
-	BUG_ON(bi == NULL);
-
-	nr_sectors = bio_integrity_hw_sectors(bi, bytes_done >> 9);
-	bio_integrity_mark_head(bip, nr_sectors * bi->tuple_size);
+	bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
 }
 EXPORT_SYMBOL(bio_integrity_advance);
 
@@ -621,64 +584,13 @@
 {
 	struct bio_integrity_payload *bip = bio->bi_integrity;
 	struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
-	unsigned int nr_sectors;
 
-	BUG_ON(bip == NULL);
-	BUG_ON(bi == NULL);
-	BUG_ON(!bio_flagged(bio, BIO_CLONED));
-
-	nr_sectors = bio_integrity_hw_sectors(bi, sectors);
-	bip->bip_sector = bip->bip_sector + offset;
-	bio_integrity_mark_head(bip, offset * bi->tuple_size);
-	bio_integrity_mark_tail(bip, sectors * bi->tuple_size);
+	bio_integrity_advance(bio, offset << 9);
+	bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors);
 }
 EXPORT_SYMBOL(bio_integrity_trim);
 
 /**
- * bio_integrity_split - Split integrity metadata
- * @bio:	Protected bio
- * @bp:		Resulting bio_pair
- * @sectors:	Offset
- *
- * Description: Splits an integrity page into a bio_pair.
- */
-void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
-{
-	struct blk_integrity *bi;
-	struct bio_integrity_payload *bip = bio->bi_integrity;
-	unsigned int nr_sectors;
-
-	if (bio_integrity(bio) == 0)
-		return;
-
-	bi = bdev_get_integrity(bio->bi_bdev);
-	BUG_ON(bi == NULL);
-	BUG_ON(bip->bip_vcnt != 1);
-
-	nr_sectors = bio_integrity_hw_sectors(bi, sectors);
-
-	bp->bio1.bi_integrity = &bp->bip1;
-	bp->bio2.bi_integrity = &bp->bip2;
-
-	bp->iv1 = bip->bip_vec[bip->bip_idx];
-	bp->iv2 = bip->bip_vec[bip->bip_idx];
-
-	bp->bip1.bip_vec = &bp->iv1;
-	bp->bip2.bip_vec = &bp->iv2;
-
-	bp->iv1.bv_len = sectors * bi->tuple_size;
-	bp->iv2.bv_offset += sectors * bi->tuple_size;
-	bp->iv2.bv_len -= sectors * bi->tuple_size;
-
-	bp->bip1.bip_sector = bio->bi_integrity->bip_sector;
-	bp->bip2.bip_sector = bio->bi_integrity->bip_sector + nr_sectors;
-
-	bp->bip1.bip_vcnt = bp->bip2.bip_vcnt = 1;
-	bp->bip1.bip_idx = bp->bip2.bip_idx = 0;
-}
-EXPORT_SYMBOL(bio_integrity_split);
-
-/**
  * bio_integrity_clone - Callback for cloning bios with integrity metadata
  * @bio:	New bio
  * @bio_src:	Original bio
@@ -702,9 +614,8 @@
 	memcpy(bip->bip_vec, bip_src->bip_vec,
 	       bip_src->bip_vcnt * sizeof(struct bio_vec));
 
-	bip->bip_sector = bip_src->bip_sector;
 	bip->bip_vcnt = bip_src->bip_vcnt;
-	bip->bip_idx = bip_src->bip_idx;
+	bip->bip_iter = bip_src->bip_iter;
 
 	return 0;
 }
diff --git a/fs/bio.c b/fs/bio.c
index 33d79a4..8754e7b 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -38,8 +38,6 @@
  */
 #define BIO_INLINE_VECS		4
 
-static mempool_t *bio_split_pool __read_mostly;
-
 /*
  * if you change this list, also change bvec_alloc or things will
  * break badly! cannot be bigger than what you can fit into an
@@ -273,6 +271,7 @@
 {
 	memset(bio, 0, sizeof(*bio));
 	bio->bi_flags = 1 << BIO_UPTODATE;
+	atomic_set(&bio->bi_remaining, 1);
 	atomic_set(&bio->bi_cnt, 1);
 }
 EXPORT_SYMBOL(bio_init);
@@ -295,9 +294,35 @@
 
 	memset(bio, 0, BIO_RESET_BYTES);
 	bio->bi_flags = flags|(1 << BIO_UPTODATE);
+	atomic_set(&bio->bi_remaining, 1);
 }
 EXPORT_SYMBOL(bio_reset);
 
+static void bio_chain_endio(struct bio *bio, int error)
+{
+	bio_endio(bio->bi_private, error);
+	bio_put(bio);
+}
+
+/**
+ * bio_chain - chain bio completions
+ *
+ * The caller won't have a bi_end_io called when @bio completes - instead,
+ * @parent's bi_end_io won't be called until both @parent and @bio have
+ * completed; the chained bio will also be freed when it completes.
+ *
+ * The caller must not set bi_private or bi_end_io in @bio.
+ */
+void bio_chain(struct bio *bio, struct bio *parent)
+{
+	BUG_ON(bio->bi_private || bio->bi_end_io);
+
+	bio->bi_private = parent;
+	bio->bi_end_io	= bio_chain_endio;
+	atomic_inc(&parent->bi_remaining);
+}
+EXPORT_SYMBOL(bio_chain);
+
 static void bio_alloc_rescue(struct work_struct *work)
 {
 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
@@ -473,13 +498,13 @@
 void zero_fill_bio(struct bio *bio)
 {
 	unsigned long flags;
-	struct bio_vec *bv;
-	int i;
+	struct bio_vec bv;
+	struct bvec_iter iter;
 
-	bio_for_each_segment(bv, bio, i) {
-		char *data = bvec_kmap_irq(bv, &flags);
-		memset(data, 0, bv->bv_len);
-		flush_dcache_page(bv->bv_page);
+	bio_for_each_segment(bv, bio, iter) {
+		char *data = bvec_kmap_irq(&bv, &flags);
+		memset(data, 0, bv.bv_len);
+		flush_dcache_page(bv.bv_page);
 		bvec_kunmap_irq(data, &flags);
 	}
 }
@@ -515,51 +540,49 @@
 EXPORT_SYMBOL(bio_phys_segments);
 
 /**
- * 	__bio_clone	-	clone a bio
+ * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
  * 	@bio: destination bio
  * 	@bio_src: bio to clone
  *
  *	Clone a &bio. Caller will own the returned bio, but not
  *	the actual data it points to. Reference count of returned
  * 	bio will be one.
+ *
+ * 	Caller must ensure that @bio_src is not freed before @bio.
  */
-void __bio_clone(struct bio *bio, struct bio *bio_src)
+void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 {
-	memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
-		bio_src->bi_max_vecs * sizeof(struct bio_vec));
+	BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
 
 	/*
 	 * most users will be overriding ->bi_bdev with a new target,
 	 * so we don't set nor calculate new physical/hw segment counts here
 	 */
-	bio->bi_sector = bio_src->bi_sector;
 	bio->bi_bdev = bio_src->bi_bdev;
 	bio->bi_flags |= 1 << BIO_CLONED;
 	bio->bi_rw = bio_src->bi_rw;
-	bio->bi_vcnt = bio_src->bi_vcnt;
-	bio->bi_size = bio_src->bi_size;
-	bio->bi_idx = bio_src->bi_idx;
+	bio->bi_iter = bio_src->bi_iter;
+	bio->bi_io_vec = bio_src->bi_io_vec;
 }
-EXPORT_SYMBOL(__bio_clone);
+EXPORT_SYMBOL(__bio_clone_fast);
 
 /**
- *	bio_clone_bioset -	clone a bio
+ *	bio_clone_fast - clone a bio that shares the original bio's biovec
  *	@bio: bio to clone
  *	@gfp_mask: allocation priority
  *	@bs: bio_set to allocate from
  *
- * 	Like __bio_clone, only also allocates the returned bio
+ * 	Like __bio_clone_fast, only also allocates the returned bio
  */
-struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
-			     struct bio_set *bs)
+struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
 {
 	struct bio *b;
 
-	b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs);
+	b = bio_alloc_bioset(gfp_mask, 0, bs);
 	if (!b)
 		return NULL;
 
-	__bio_clone(b, bio);
+	__bio_clone_fast(b, bio);
 
 	if (bio_integrity(bio)) {
 		int ret;
@@ -574,6 +597,79 @@
 
 	return b;
 }
+EXPORT_SYMBOL(bio_clone_fast);
+
+/**
+ * 	bio_clone_bioset - clone a bio
+ * 	@bio_src: bio to clone
+ *	@gfp_mask: allocation priority
+ *	@bs: bio_set to allocate from
+ *
+ *	Clone bio. Caller will own the returned bio, but not the actual data it
+ *	points to. Reference count of returned bio will be one.
+ */
+struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
+			     struct bio_set *bs)
+{
+	struct bvec_iter iter;
+	struct bio_vec bv;
+	struct bio *bio;
+
+	/*
+	 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
+	 * bio_src->bi_io_vec to bio->bi_io_vec.
+	 *
+	 * We can't do that anymore, because:
+	 *
+	 *  - The point of cloning the biovec is to produce a bio with a biovec
+	 *    the caller can modify: bi_idx and bi_bvec_done should be 0.
+	 *
+	 *  - The original bio could've had more than BIO_MAX_PAGES biovecs; if
+	 *    we tried to clone the whole thing bio_alloc_bioset() would fail.
+	 *    But the clone should succeed as long as the number of biovecs we
+	 *    actually need to allocate is fewer than BIO_MAX_PAGES.
+	 *
+	 *  - Lastly, bi_vcnt should not be looked at or relied upon by code
+	 *    that does not own the bio - reason being drivers don't use it for
+	 *    iterating over the biovec anymore, so expecting it to be kept up
+	 *    to date (i.e. for clones that share the parent biovec) is just
+	 *    asking for trouble and would force extra work on
+	 *    __bio_clone_fast() anyways.
+	 */
+
+	bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
+	if (!bio)
+		return NULL;
+
+	bio->bi_bdev		= bio_src->bi_bdev;
+	bio->bi_rw		= bio_src->bi_rw;
+	bio->bi_iter.bi_sector	= bio_src->bi_iter.bi_sector;
+	bio->bi_iter.bi_size	= bio_src->bi_iter.bi_size;
+
+	if (bio->bi_rw & REQ_DISCARD)
+		goto integrity_clone;
+
+	if (bio->bi_rw & REQ_WRITE_SAME) {
+		bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
+		goto integrity_clone;
+	}
+
+	bio_for_each_segment(bv, bio_src, iter)
+		bio->bi_io_vec[bio->bi_vcnt++] = bv;
+
+integrity_clone:
+	if (bio_integrity(bio_src)) {
+		int ret;
+
+		ret = bio_integrity_clone(bio, bio_src, gfp_mask);
+		if (ret < 0) {
+			bio_put(bio);
+			return NULL;
+		}
+	}
+
+	return bio;
+}
 EXPORT_SYMBOL(bio_clone_bioset);
 
 /**
@@ -612,7 +708,7 @@
 	if (unlikely(bio_flagged(bio, BIO_CLONED)))
 		return 0;
 
-	if (((bio->bi_size + len) >> 9) > max_sectors)
+	if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
 		return 0;
 
 	/*
@@ -635,8 +731,9 @@
 					   simulate merging updated prev_bvec
 					   as new bvec. */
 					.bi_bdev = bio->bi_bdev,
-					.bi_sector = bio->bi_sector,
-					.bi_size = bio->bi_size - prev_bv_len,
+					.bi_sector = bio->bi_iter.bi_sector,
+					.bi_size = bio->bi_iter.bi_size -
+						prev_bv_len,
 					.bi_rw = bio->bi_rw,
 				};
 
@@ -684,8 +781,8 @@
 	if (q->merge_bvec_fn) {
 		struct bvec_merge_data bvm = {
 			.bi_bdev = bio->bi_bdev,
-			.bi_sector = bio->bi_sector,
-			.bi_size = bio->bi_size,
+			.bi_sector = bio->bi_iter.bi_sector,
+			.bi_size = bio->bi_iter.bi_size,
 			.bi_rw = bio->bi_rw,
 		};
 
@@ -708,7 +805,7 @@
 	bio->bi_vcnt++;
 	bio->bi_phys_segments++;
  done:
-	bio->bi_size += len;
+	bio->bi_iter.bi_size += len;
 	return len;
 }
 
@@ -807,28 +904,7 @@
 	if (bio_integrity(bio))
 		bio_integrity_advance(bio, bytes);
 
-	bio->bi_sector += bytes >> 9;
-	bio->bi_size -= bytes;
-
-	if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
-		return;
-
-	while (bytes) {
-		if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
-			WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
-				  bio->bi_idx, bio->bi_vcnt);
-			break;
-		}
-
-		if (bytes >= bio_iovec(bio)->bv_len) {
-			bytes -= bio_iovec(bio)->bv_len;
-			bio->bi_idx++;
-		} else {
-			bio_iovec(bio)->bv_len -= bytes;
-			bio_iovec(bio)->bv_offset += bytes;
-			bytes = 0;
-		}
-	}
+	bio_advance_iter(bio, &bio->bi_iter, bytes);
 }
 EXPORT_SYMBOL(bio_advance);
 
@@ -874,117 +950,80 @@
  */
 void bio_copy_data(struct bio *dst, struct bio *src)
 {
-	struct bio_vec *src_bv, *dst_bv;
-	unsigned src_offset, dst_offset, bytes;
+	struct bvec_iter src_iter, dst_iter;
+	struct bio_vec src_bv, dst_bv;
 	void *src_p, *dst_p;
+	unsigned bytes;
 
-	src_bv = bio_iovec(src);
-	dst_bv = bio_iovec(dst);
-
-	src_offset = src_bv->bv_offset;
-	dst_offset = dst_bv->bv_offset;
+	src_iter = src->bi_iter;
+	dst_iter = dst->bi_iter;
 
 	while (1) {
-		if (src_offset == src_bv->bv_offset + src_bv->bv_len) {
-			src_bv++;
-			if (src_bv == bio_iovec_idx(src, src->bi_vcnt)) {
-				src = src->bi_next;
-				if (!src)
-					break;
+		if (!src_iter.bi_size) {
+			src = src->bi_next;
+			if (!src)
+				break;
 
-				src_bv = bio_iovec(src);
-			}
-
-			src_offset = src_bv->bv_offset;
+			src_iter = src->bi_iter;
 		}
 
-		if (dst_offset == dst_bv->bv_offset + dst_bv->bv_len) {
-			dst_bv++;
-			if (dst_bv == bio_iovec_idx(dst, dst->bi_vcnt)) {
-				dst = dst->bi_next;
-				if (!dst)
-					break;
+		if (!dst_iter.bi_size) {
+			dst = dst->bi_next;
+			if (!dst)
+				break;
 
-				dst_bv = bio_iovec(dst);
-			}
-
-			dst_offset = dst_bv->bv_offset;
+			dst_iter = dst->bi_iter;
 		}
 
-		bytes = min(dst_bv->bv_offset + dst_bv->bv_len - dst_offset,
-			    src_bv->bv_offset + src_bv->bv_len - src_offset);
+		src_bv = bio_iter_iovec(src, src_iter);
+		dst_bv = bio_iter_iovec(dst, dst_iter);
 
-		src_p = kmap_atomic(src_bv->bv_page);
-		dst_p = kmap_atomic(dst_bv->bv_page);
+		bytes = min(src_bv.bv_len, dst_bv.bv_len);
 
-		memcpy(dst_p + dst_offset,
-		       src_p + src_offset,
+		src_p = kmap_atomic(src_bv.bv_page);
+		dst_p = kmap_atomic(dst_bv.bv_page);
+
+		memcpy(dst_p + dst_bv.bv_offset,
+		       src_p + src_bv.bv_offset,
 		       bytes);
 
 		kunmap_atomic(dst_p);
 		kunmap_atomic(src_p);
 
-		src_offset += bytes;
-		dst_offset += bytes;
+		bio_advance_iter(src, &src_iter, bytes);
+		bio_advance_iter(dst, &dst_iter, bytes);
 	}
 }
 EXPORT_SYMBOL(bio_copy_data);
 
 struct bio_map_data {
-	struct bio_vec *iovecs;
-	struct sg_iovec *sgvecs;
 	int nr_sgvecs;
 	int is_our_pages;
+	struct sg_iovec sgvecs[];
 };
 
 static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
 			     struct sg_iovec *iov, int iov_count,
 			     int is_our_pages)
 {
-	memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
 	memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
 	bmd->nr_sgvecs = iov_count;
 	bmd->is_our_pages = is_our_pages;
 	bio->bi_private = bmd;
 }
 
-static void bio_free_map_data(struct bio_map_data *bmd)
-{
-	kfree(bmd->iovecs);
-	kfree(bmd->sgvecs);
-	kfree(bmd);
-}
-
 static struct bio_map_data *bio_alloc_map_data(int nr_segs,
 					       unsigned int iov_count,
 					       gfp_t gfp_mask)
 {
-	struct bio_map_data *bmd;
-
 	if (iov_count > UIO_MAXIOV)
 		return NULL;
 
-	bmd = kmalloc(sizeof(*bmd), gfp_mask);
-	if (!bmd)
-		return NULL;
-
-	bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
-	if (!bmd->iovecs) {
-		kfree(bmd);
-		return NULL;
-	}
-
-	bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
-	if (bmd->sgvecs)
-		return bmd;
-
-	kfree(bmd->iovecs);
-	kfree(bmd);
-	return NULL;
+	return kmalloc(sizeof(struct bio_map_data) +
+		       sizeof(struct sg_iovec) * iov_count, gfp_mask);
 }
 
-static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
-			  struct sg_iovec *iov, int iov_count,
+static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
 			  int to_user, int from_user, int do_free_page)
 {
 	int ret = 0, i;
@@ -994,7 +1033,7 @@
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		char *bv_addr = page_address(bvec->bv_page);
-		unsigned int bv_len = iovecs[i].bv_len;
+		unsigned int bv_len = bvec->bv_len;
 
 		while (bv_len && iov_idx < iov_count) {
 			unsigned int bytes;
@@ -1054,14 +1093,14 @@
 		 * don't copy into a random user address space, just free.
 		 */
 		if (current->mm)
-			ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
-					     bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+			ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs,
+					     bio_data_dir(bio) == READ,
 					     0, bmd->is_our_pages);
 		else if (bmd->is_our_pages)
 			bio_for_each_segment_all(bvec, bio, i)
 				__free_page(bvec->bv_page);
 	}
-	bio_free_map_data(bmd);
+	kfree(bmd);
 	bio_put(bio);
 	return ret;
 }
@@ -1175,7 +1214,7 @@
 	 */
 	if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
 	    (map_data && map_data->from_user)) {
-		ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
+		ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0);
 		if (ret)
 			goto cleanup;
 	}
@@ -1189,7 +1228,7 @@
 
 	bio_put(bio);
 out_bmd:
-	bio_free_map_data(bmd);
+	kfree(bmd);
 	return ERR_PTR(ret);
 }
 
@@ -1485,7 +1524,7 @@
 	if (IS_ERR(bio))
 		return bio;
 
-	if (bio->bi_size == len)
+	if (bio->bi_iter.bi_size == len)
 		return bio;
 
 	/*
@@ -1506,16 +1545,15 @@
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		char *addr = page_address(bvec->bv_page);
-		int len = bmd->iovecs[i].bv_len;
 
 		if (read)
-			memcpy(p, addr, len);
+			memcpy(p, addr, bvec->bv_len);
 
 		__free_page(bvec->bv_page);
-		p += len;
+		p += bvec->bv_len;
 	}
 
-	bio_free_map_data(bmd);
+	kfree(bmd);
 	bio_put(bio);
 }
 
@@ -1686,11 +1724,11 @@
 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 void bio_flush_dcache_pages(struct bio *bi)
 {
-	int i;
-	struct bio_vec *bvec;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
 
-	bio_for_each_segment(bvec, bi, i)
-		flush_dcache_page(bvec->bv_page);
+	bio_for_each_segment(bvec, bi, iter)
+		flush_dcache_page(bvec.bv_page);
 }
 EXPORT_SYMBOL(bio_flush_dcache_pages);
 #endif
@@ -1711,96 +1749,86 @@
  **/
 void bio_endio(struct bio *bio, int error)
 {
-	if (error)
-		clear_bit(BIO_UPTODATE, &bio->bi_flags);
-	else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-		error = -EIO;
+	while (bio) {
+		BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
 
-	if (bio->bi_end_io)
-		bio->bi_end_io(bio, error);
+		if (error)
+			clear_bit(BIO_UPTODATE, &bio->bi_flags);
+		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+			error = -EIO;
+
+		if (!atomic_dec_and_test(&bio->bi_remaining))
+			return;
+
+		/*
+		 * Need to have a real endio function for chained bios,
+		 * otherwise various corner cases will break (like stacking
+		 * block devices that save/restore bi_end_io) - however, we want
+		 * to avoid unbounded recursion and blowing the stack. Tail call
+		 * optimization would handle this, but compiling with frame
+		 * pointers also disables gcc's sibling call optimization.
+		 */
+		if (bio->bi_end_io == bio_chain_endio) {
+			struct bio *parent = bio->bi_private;
+			bio_put(bio);
+			bio = parent;
+		} else {
+			if (bio->bi_end_io)
+				bio->bi_end_io(bio, error);
+			bio = NULL;
+		}
+	}
 }
 EXPORT_SYMBOL(bio_endio);
 
-void bio_pair_release(struct bio_pair *bp)
+/**
+ * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
+ * @bio:	bio
+ * @error:	error, if any
+ *
+ * For code that has saved and restored bi_end_io; thing hard before using this
+ * function, probably you should've cloned the entire bio.
+ **/
+void bio_endio_nodec(struct bio *bio, int error)
 {
-	if (atomic_dec_and_test(&bp->cnt)) {
-		struct bio *master = bp->bio1.bi_private;
-
-		bio_endio(master, bp->error);
-		mempool_free(bp, bp->bio2.bi_private);
-	}
+	atomic_inc(&bio->bi_remaining);
+	bio_endio(bio, error);
 }
-EXPORT_SYMBOL(bio_pair_release);
+EXPORT_SYMBOL(bio_endio_nodec);
 
-static void bio_pair_end_1(struct bio *bi, int err)
-{
-	struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
-
-	if (err)
-		bp->error = err;
-
-	bio_pair_release(bp);
-}
-
-static void bio_pair_end_2(struct bio *bi, int err)
-{
-	struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
-
-	if (err)
-		bp->error = err;
-
-	bio_pair_release(bp);
-}
-
-/*
- * split a bio - only worry about a bio with a single page in its iovec
+/**
+ * bio_split - split a bio
+ * @bio:	bio to split
+ * @sectors:	number of sectors to split from the front of @bio
+ * @gfp:	gfp mask
+ * @bs:		bio set to allocate from
+ *
+ * Allocates and returns a new bio which represents @sectors from the start of
+ * @bio, and updates @bio to represent the remaining sectors.
+ *
+ * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
+ * responsibility to ensure that @bio is not freed before the split.
  */
-struct bio_pair *bio_split(struct bio *bi, int first_sectors)
+struct bio *bio_split(struct bio *bio, int sectors,
+		      gfp_t gfp, struct bio_set *bs)
 {
-	struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
+	struct bio *split = NULL;
 
-	if (!bp)
-		return bp;
+	BUG_ON(sectors <= 0);
+	BUG_ON(sectors >= bio_sectors(bio));
 
-	trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
-				bi->bi_sector + first_sectors);
+	split = bio_clone_fast(bio, gfp, bs);
+	if (!split)
+		return NULL;
 
-	BUG_ON(bio_segments(bi) > 1);
-	atomic_set(&bp->cnt, 3);
-	bp->error = 0;
-	bp->bio1 = *bi;
-	bp->bio2 = *bi;
-	bp->bio2.bi_sector += first_sectors;
-	bp->bio2.bi_size -= first_sectors << 9;
-	bp->bio1.bi_size = first_sectors << 9;
+	split->bi_iter.bi_size = sectors << 9;
 
-	if (bi->bi_vcnt != 0) {
-		bp->bv1 = *bio_iovec(bi);
-		bp->bv2 = *bio_iovec(bi);
+	if (bio_integrity(split))
+		bio_integrity_trim(split, 0, sectors);
 
-		if (bio_is_rw(bi)) {
-			bp->bv2.bv_offset += first_sectors << 9;
-			bp->bv2.bv_len -= first_sectors << 9;
-			bp->bv1.bv_len = first_sectors << 9;
-		}
+	bio_advance(bio, split->bi_iter.bi_size);
 
-		bp->bio1.bi_io_vec = &bp->bv1;
-		bp->bio2.bi_io_vec = &bp->bv2;
-
-		bp->bio1.bi_max_vecs = 1;
-		bp->bio2.bi_max_vecs = 1;
-	}
-
-	bp->bio1.bi_end_io = bio_pair_end_1;
-	bp->bio2.bi_end_io = bio_pair_end_2;
-
-	bp->bio1.bi_private = bi;
-	bp->bio2.bi_private = bio_split_pool;
-
-	if (bio_integrity(bi))
-		bio_integrity_split(bi, bp, first_sectors);
-
-	return bp;
+	return split;
 }
 EXPORT_SYMBOL(bio_split);
 
@@ -1814,80 +1842,20 @@
 {
 	/* 'bio' is a cloned bio which we need to trim to match
 	 * the given offset and size.
-	 * This requires adjusting bi_sector, bi_size, and bi_io_vec
 	 */
-	int i;
-	struct bio_vec *bvec;
-	int sofar = 0;
 
 	size <<= 9;
-	if (offset == 0 && size == bio->bi_size)
+	if (offset == 0 && size == bio->bi_iter.bi_size)
 		return;
 
 	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
 
 	bio_advance(bio, offset << 9);
 
-	bio->bi_size = size;
-
-	/* avoid any complications with bi_idx being non-zero*/
-	if (bio->bi_idx) {
-		memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
-			(bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
-		bio->bi_vcnt -= bio->bi_idx;
-		bio->bi_idx = 0;
-	}
-	/* Make sure vcnt and last bv are not too big */
-	bio_for_each_segment(bvec, bio, i) {
-		if (sofar + bvec->bv_len > size)
-			bvec->bv_len = size - sofar;
-		if (bvec->bv_len == 0) {
-			bio->bi_vcnt = i;
-			break;
-		}
-		sofar += bvec->bv_len;
-	}
+	bio->bi_iter.bi_size = size;
 }
 EXPORT_SYMBOL_GPL(bio_trim);
 
-/**
- *      bio_sector_offset - Find hardware sector offset in bio
- *      @bio:           bio to inspect
- *      @index:         bio_vec index
- *      @offset:        offset in bv_page
- *
- *      Return the number of hardware sectors between beginning of bio
- *      and an end point indicated by a bio_vec index and an offset
- *      within that vector's page.
- */
-sector_t bio_sector_offset(struct bio *bio, unsigned short index,
-			   unsigned int offset)
-{
-	unsigned int sector_sz;
-	struct bio_vec *bv;
-	sector_t sectors;
-	int i;
-
-	sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
-	sectors = 0;
-
-	if (index >= bio->bi_idx)
-		index = bio->bi_vcnt - 1;
-
-	bio_for_each_segment_all(bv, bio, i) {
-		if (i == index) {
-			if (offset > bv->bv_offset)
-				sectors += (offset - bv->bv_offset) / sector_sz;
-			break;
-		}
-
-		sectors += bv->bv_len / sector_sz;
-	}
-
-	return sectors;
-}
-EXPORT_SYMBOL(bio_sector_offset);
-
 /*
  * create memory pools for biovec's in a bio_set.
  * use the global biovec slabs created for general use.
@@ -2065,11 +2033,6 @@
 	if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
 		panic("bio: can't create integrity pool\n");
 
-	bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
-						     sizeof(struct bio_pair));
-	if (!bio_split_pool)
-		panic("bio: can't create split pool\n");
-
 	return 0;
 }
 subsys_initcall(init_bio);
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index aa976ec..a66768e 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -1,6 +1,7 @@
 config BTRFS_FS
 	tristate "Btrfs filesystem support"
-	select LIBCRC32C
+	select CRYPTO
+	select CRYPTO_CRC32C
 	select ZLIB_INFLATE
 	select ZLIB_DEFLATE
 	select LZO_COMPRESS
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 1a44e42..f341a98 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -9,7 +9,7 @@
 	   export.o tree-log.o free-space-cache.o zlib.o lzo.o \
 	   compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
 	   reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
-	   uuid-tree.o
+	   uuid-tree.o props.o hash.o
 
 btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
 btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 3775947..aded3ef 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -66,6 +66,16 @@
 	return 0;
 }
 
+static void free_inode_elem_list(struct extent_inode_elem *eie)
+{
+	struct extent_inode_elem *eie_next;
+
+	for (; eie; eie = eie_next) {
+		eie_next = eie->next;
+		kfree(eie);
+	}
+}
+
 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
 				u64 extent_item_pos,
 				struct extent_inode_elem **eie)
@@ -209,18 +219,19 @@
 }
 
 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
-				struct ulist *parents, int level,
-				struct btrfs_key *key_for_search, u64 time_seq,
-				u64 wanted_disk_byte,
-				const u64 *extent_item_pos)
+			   struct ulist *parents, struct __prelim_ref *ref,
+			   int level, u64 time_seq, const u64 *extent_item_pos)
 {
 	int ret = 0;
 	int slot;
 	struct extent_buffer *eb;
 	struct btrfs_key key;
+	struct btrfs_key *key_for_search = &ref->key_for_search;
 	struct btrfs_file_extent_item *fi;
 	struct extent_inode_elem *eie = NULL, *old = NULL;
 	u64 disk_byte;
+	u64 wanted_disk_byte = ref->wanted_disk_byte;
+	u64 count = 0;
 
 	if (level != 0) {
 		eb = path->nodes[level];
@@ -238,7 +249,7 @@
 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
 		ret = btrfs_next_old_leaf(root, path, time_seq);
 
-	while (!ret) {
+	while (!ret && count < ref->count) {
 		eb = path->nodes[0];
 		slot = path->slots[0];
 
@@ -254,6 +265,7 @@
 		if (disk_byte == wanted_disk_byte) {
 			eie = NULL;
 			old = NULL;
+			count++;
 			if (extent_item_pos) {
 				ret = check_extent_in_eb(&key, eb, fi,
 						*extent_item_pos,
@@ -273,6 +285,7 @@
 					old = old->next;
 				old->next = eie;
 			}
+			eie = NULL;
 		}
 next:
 		ret = btrfs_next_old_item(root, path, time_seq);
@@ -280,6 +293,8 @@
 
 	if (ret > 0)
 		ret = 0;
+	else if (ret < 0)
+		free_inode_elem_list(eie);
 	return ret;
 }
 
@@ -299,23 +314,34 @@
 	int ret = 0;
 	int root_level;
 	int level = ref->level;
+	int index;
 
 	root_key.objectid = ref->root_id;
 	root_key.type = BTRFS_ROOT_ITEM_KEY;
 	root_key.offset = (u64)-1;
+
+	index = srcu_read_lock(&fs_info->subvol_srcu);
+
 	root = btrfs_read_fs_root_no_name(fs_info, &root_key);
 	if (IS_ERR(root)) {
+		srcu_read_unlock(&fs_info->subvol_srcu, index);
 		ret = PTR_ERR(root);
 		goto out;
 	}
 
 	root_level = btrfs_old_root_level(root, time_seq);
 
-	if (root_level + 1 == level)
+	if (root_level + 1 == level) {
+		srcu_read_unlock(&fs_info->subvol_srcu, index);
 		goto out;
+	}
 
 	path->lowest_level = level;
 	ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
+
+	/* root node has been locked, we can release @subvol_srcu safely here */
+	srcu_read_unlock(&fs_info->subvol_srcu, index);
+
 	pr_debug("search slot in root %llu (level %d, ref count %d) returned "
 		 "%d for key (%llu %u %llu)\n",
 		 ref->root_id, level, ref->count, ret,
@@ -334,9 +360,8 @@
 		eb = path->nodes[level];
 	}
 
-	ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
-				time_seq, ref->wanted_disk_byte,
-				extent_item_pos);
+	ret = add_all_parents(root, path, parents, ref, level, time_seq,
+			      extent_item_pos);
 out:
 	path->lowest_level = 0;
 	btrfs_release_path(path);
@@ -376,10 +401,16 @@
 			continue;
 		err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
 					     parents, extent_item_pos);
-		if (err == -ENOMEM)
-			goto out;
-		if (err)
+		/*
+		 * we can only tolerate ENOENT,otherwise,we should catch error
+		 * and return directly.
+		 */
+		if (err == -ENOENT) {
 			continue;
+		} else if (err) {
+			ret = err;
+			goto out;
+		}
 
 		/* we put the first parent into the ref at hand */
 		ULIST_ITER_INIT(&uiter);
@@ -538,14 +569,13 @@
 	if (extent_op && extent_op->update_key)
 		btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
 
-	while ((n = rb_prev(n))) {
+	spin_lock(&head->lock);
+	n = rb_first(&head->ref_root);
+	while (n) {
 		struct btrfs_delayed_ref_node *node;
 		node = rb_entry(n, struct btrfs_delayed_ref_node,
 				rb_node);
-		if (node->bytenr != head->node.bytenr)
-			break;
-		WARN_ON(node->is_head);
-
+		n = rb_next(n);
 		if (node->seq > seq)
 			continue;
 
@@ -612,10 +642,10 @@
 			WARN_ON(1);
 		}
 		if (ret)
-			return ret;
+			break;
 	}
-
-	return 0;
+	spin_unlock(&head->lock);
+	return ret;
 }
 
 /*
@@ -828,6 +858,7 @@
 	struct list_head prefs_delayed;
 	struct list_head prefs;
 	struct __prelim_ref *ref;
+	struct extent_inode_elem *eie = NULL;
 
 	INIT_LIST_HEAD(&prefs);
 	INIT_LIST_HEAD(&prefs_delayed);
@@ -882,15 +913,15 @@
 				btrfs_put_delayed_ref(&head->node);
 				goto again;
 			}
+			spin_unlock(&delayed_refs->lock);
 			ret = __add_delayed_refs(head, time_seq,
 						 &prefs_delayed);
 			mutex_unlock(&head->mutex);
-			if (ret) {
-				spin_unlock(&delayed_refs->lock);
+			if (ret)
 				goto out;
-			}
+		} else {
+			spin_unlock(&delayed_refs->lock);
 		}
-		spin_unlock(&delayed_refs->lock);
 	}
 
 	if (path->slots[0]) {
@@ -941,7 +972,6 @@
 				goto out;
 		}
 		if (ref->count && ref->parent) {
-			struct extent_inode_elem *eie = NULL;
 			if (extent_item_pos && !ref->inode_list) {
 				u32 bsz;
 				struct extent_buffer *eb;
@@ -976,6 +1006,7 @@
 					eie = eie->next;
 				eie->next = ref->inode_list;
 			}
+			eie = NULL;
 		}
 		list_del(&ref->list);
 		kmem_cache_free(btrfs_prelim_ref_cache, ref);
@@ -994,7 +1025,8 @@
 		list_del(&ref->list);
 		kmem_cache_free(btrfs_prelim_ref_cache, ref);
 	}
-
+	if (ret < 0)
+		free_inode_elem_list(eie);
 	return ret;
 }
 
@@ -1002,7 +1034,6 @@
 {
 	struct ulist_node *node = NULL;
 	struct extent_inode_elem *eie;
-	struct extent_inode_elem *eie_next;
 	struct ulist_iterator uiter;
 
 	ULIST_ITER_INIT(&uiter);
@@ -1010,10 +1041,7 @@
 		if (!node->aux)
 			continue;
 		eie = (struct extent_inode_elem *)(uintptr_t)node->aux;
-		for (; eie; eie = eie_next) {
-			eie_next = eie->next;
-			kfree(eie);
-		}
+		free_inode_elem_list(eie);
 		node->aux = 0;
 	}
 
@@ -1101,44 +1129,13 @@
 		if (!node)
 			break;
 		bytenr = node->val;
+		cond_resched();
 	}
 
 	ulist_free(tmp);
 	return 0;
 }
 
-
-static int __inode_info(u64 inum, u64 ioff, u8 key_type,
-			struct btrfs_root *fs_root, struct btrfs_path *path,
-			struct btrfs_key *found_key)
-{
-	int ret;
-	struct btrfs_key key;
-	struct extent_buffer *eb;
-
-	key.type = key_type;
-	key.objectid = inum;
-	key.offset = ioff;
-
-	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
-	if (ret < 0)
-		return ret;
-
-	eb = path->nodes[0];
-	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
-		ret = btrfs_next_leaf(fs_root, path);
-		if (ret)
-			return ret;
-		eb = path->nodes[0];
-	}
-
-	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
-	if (found_key->type != key.type || found_key->objectid != key.objectid)
-		return 1;
-
-	return 0;
-}
-
 /*
  * this makes the path point to (inum INODE_ITEM ioff)
  */
@@ -1146,16 +1143,16 @@
 			struct btrfs_path *path)
 {
 	struct btrfs_key key;
-	return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
-				&key);
+	return btrfs_find_item(fs_root, path, inum, ioff,
+			BTRFS_INODE_ITEM_KEY, &key);
 }
 
 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
 				struct btrfs_path *path,
 				struct btrfs_key *found_key)
 {
-	return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
-				found_key);
+	return btrfs_find_item(fs_root, path, inum, ioff,
+			BTRFS_INODE_REF_KEY, found_key);
 }
 
 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
@@ -1335,20 +1332,45 @@
 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
 	if (ret < 0)
 		return ret;
-	ret = btrfs_previous_item(fs_info->extent_root, path,
-					0, BTRFS_EXTENT_ITEM_KEY);
-	if (ret < 0)
-		return ret;
 
-	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
+	while (1) {
+		u32 nritems;
+		if (path->slots[0] == 0) {
+			btrfs_set_path_blocking(path);
+			ret = btrfs_prev_leaf(fs_info->extent_root, path);
+			if (ret != 0) {
+				if (ret > 0) {
+					pr_debug("logical %llu is not within "
+						 "any extent\n", logical);
+					ret = -ENOENT;
+				}
+				return ret;
+			}
+		} else {
+			path->slots[0]--;
+		}
+		nritems = btrfs_header_nritems(path->nodes[0]);
+		if (nritems == 0) {
+			pr_debug("logical %llu is not within any extent\n",
+				 logical);
+			return -ENOENT;
+		}
+		if (path->slots[0] == nritems)
+			path->slots[0]--;
+
+		btrfs_item_key_to_cpu(path->nodes[0], found_key,
+				      path->slots[0]);
+		if (found_key->type == BTRFS_EXTENT_ITEM_KEY ||
+		    found_key->type == BTRFS_METADATA_ITEM_KEY)
+			break;
+	}
+
 	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
 		size = fs_info->extent_root->leafsize;
 	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
 		size = found_key->offset;
 
-	if ((found_key->type != BTRFS_EXTENT_ITEM_KEY &&
-	     found_key->type != BTRFS_METADATA_ITEM_KEY) ||
-	    found_key->objectid > logical ||
+	if (found_key->objectid > logical ||
 	    found_key->objectid + size <= logical) {
 		pr_debug("logical %llu is not within any extent\n", logical);
 		return -ENOENT;
@@ -1601,7 +1623,6 @@
 	struct btrfs_key found_key;
 
 	while (!ret) {
-		path->leave_spinning = 1;
 		ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
 				     &found_key);
 		if (ret < 0)
@@ -1614,9 +1635,12 @@
 
 		parent = found_key.offset;
 		slot = path->slots[0];
-		eb = path->nodes[0];
-		/* make sure we can use eb after releasing the path */
-		atomic_inc(&eb->refs);
+		eb = btrfs_clone_extent_buffer(path->nodes[0]);
+		if (!eb) {
+			ret = -ENOMEM;
+			break;
+		}
+		extent_buffer_get(eb);
 		btrfs_tree_read_lock(eb);
 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
 		btrfs_release_path(path);
@@ -1674,17 +1698,20 @@
 		++found;
 
 		slot = path->slots[0];
-		eb = path->nodes[0];
-		/* make sure we can use eb after releasing the path */
-		atomic_inc(&eb->refs);
+		eb = btrfs_clone_extent_buffer(path->nodes[0]);
+		if (!eb) {
+			ret = -ENOMEM;
+			break;
+		}
+		extent_buffer_get(eb);
 
 		btrfs_tree_read_lock(eb);
 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
 		btrfs_release_path(path);
 
 		leaf = path->nodes[0];
-		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
-		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+		item_size = btrfs_item_size_nr(leaf, slot);
+		ptr = btrfs_item_ptr_offset(leaf, slot);
 		cur_offset = 0;
 
 		while (cur_offset < item_size) {
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index ac0b39d..8fed212 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -43,6 +43,7 @@
 #define BTRFS_INODE_COPY_EVERYTHING		8
 #define BTRFS_INODE_IN_DELALLOC_LIST		9
 #define BTRFS_INODE_READDIO_NEED_LOCK		10
+#define BTRFS_INODE_HAS_PROPS		        11
 
 /* in memory btrfs inode */
 struct btrfs_inode {
@@ -135,6 +136,9 @@
 	 */
 	u64 index_cnt;
 
+	/* Cache the directory index number to speed the dir/file remove */
+	u64 dir_index;
+
 	/* the fsync log has some corner cases that mean we have to check
 	 * directories to see if any unlinks have been done before
 	 * the directory was logged.  See tree-log.c for all the
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 131d828..0e8388e 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -92,11 +92,11 @@
 #include <linux/slab.h>
 #include <linux/buffer_head.h>
 #include <linux/mutex.h>
-#include <linux/crc32c.h>
 #include <linux/genhd.h>
 #include <linux/blkdev.h>
 #include "ctree.h"
 #include "disk-io.h"
+#include "hash.h"
 #include "transaction.h"
 #include "extent_io.h"
 #include "volumes.h"
@@ -1456,10 +1456,14 @@
 	btrfsic_read_from_block_data(block_ctx, &file_extent_item,
 				     file_extent_item_offset,
 				     sizeof(struct btrfs_file_extent_item));
-	next_bytenr = btrfs_stack_file_extent_disk_bytenr(&file_extent_item) +
-		      btrfs_stack_file_extent_offset(&file_extent_item);
-	generation = btrfs_stack_file_extent_generation(&file_extent_item);
-	num_bytes = btrfs_stack_file_extent_num_bytes(&file_extent_item);
+	next_bytenr = btrfs_stack_file_extent_disk_bytenr(&file_extent_item);
+	if (btrfs_stack_file_extent_compression(&file_extent_item) ==
+	    BTRFS_COMPRESS_NONE) {
+		next_bytenr += btrfs_stack_file_extent_offset(&file_extent_item);
+		num_bytes = btrfs_stack_file_extent_num_bytes(&file_extent_item);
+	} else {
+		num_bytes = btrfs_stack_file_extent_disk_num_bytes(&file_extent_item);
+	}
 	generation = btrfs_stack_file_extent_generation(&file_extent_item);
 
 	if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
@@ -1695,7 +1699,7 @@
 			return -1;
 		}
 		bio->bi_bdev = block_ctx->dev->bdev;
-		bio->bi_sector = dev_bytenr >> 9;
+		bio->bi_iter.bi_sector = dev_bytenr >> 9;
 
 		for (j = i; j < num_pages; j++) {
 			ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -1819,7 +1823,7 @@
 		size_t sublen = i ? PAGE_CACHE_SIZE :
 				    (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
 
-		crc = crc32c(crc, data, sublen);
+		crc = btrfs_crc32c(crc, data, sublen);
 	}
 	btrfs_csum_final(crc, csum);
 	if (memcmp(csum, h->csum, state->csum_size))
@@ -3013,7 +3017,7 @@
 		int bio_is_patched;
 		char **mapped_datav;
 
-		dev_bytenr = 512 * bio->bi_sector;
+		dev_bytenr = 512 * bio->bi_iter.bi_sector;
 		bio_is_patched = 0;
 		if (dev_state->state->print_mask &
 		    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
@@ -3021,8 +3025,8 @@
 			       "submit_bio(rw=0x%x, bi_vcnt=%u,"
 			       " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
 			       rw, bio->bi_vcnt,
-			       (unsigned long long)bio->bi_sector, dev_bytenr,
-			       bio->bi_bdev);
+			       (unsigned long long)bio->bi_iter.bi_sector,
+			       dev_bytenr, bio->bi_bdev);
 
 		mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
 				       GFP_NOFS);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 1499b27..b01fb6c 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -128,11 +128,10 @@
 		kunmap_atomic(kaddr);
 
 		if (csum != *cb_sum) {
-			printk(KERN_INFO "btrfs csum failed ino %llu "
-			       "extent %llu csum %u "
-			       "wanted %u mirror %d\n",
-			       btrfs_ino(inode), disk_start, csum, *cb_sum,
-			       cb->mirror_num);
+			btrfs_info(BTRFS_I(inode)->root->fs_info,
+			   "csum failed ino %llu extent %llu csum %u wanted %u mirror %d",
+			   btrfs_ino(inode), disk_start, csum, *cb_sum,
+			   cb->mirror_num);
 			ret = -EIO;
 			goto fail;
 		}
@@ -172,7 +171,8 @@
 		goto out;
 
 	inode = cb->inode;
-	ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9);
+	ret = check_compressed_csum(inode, cb,
+				    (u64)bio->bi_iter.bi_sector << 9);
 	if (ret)
 		goto csum_failed;
 
@@ -201,18 +201,16 @@
 	if (cb->errors) {
 		bio_io_error(cb->orig_bio);
 	} else {
-		int bio_index = 0;
-		struct bio_vec *bvec = cb->orig_bio->bi_io_vec;
+		int i;
+		struct bio_vec *bvec;
 
 		/*
 		 * we have verified the checksum already, set page
 		 * checked so the end_io handlers know about it
 		 */
-		while (bio_index < cb->orig_bio->bi_vcnt) {
+		bio_for_each_segment_all(bvec, cb->orig_bio, i)
 			SetPageChecked(bvec->bv_page);
-			bvec++;
-			bio_index++;
-		}
+
 		bio_endio(cb->orig_bio, 0);
 	}
 
@@ -372,7 +370,7 @@
 	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
 		page = compressed_pages[pg_index];
 		page->mapping = inode->i_mapping;
-		if (bio->bi_size)
+		if (bio->bi_iter.bi_size)
 			ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
 							   PAGE_CACHE_SIZE,
 							   bio, 0);
@@ -412,7 +410,8 @@
 			bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
 		}
 		if (bytes_left < PAGE_CACHE_SIZE) {
-			printk("bytes left %lu compress len %lu nr %lu\n",
+			btrfs_info(BTRFS_I(inode)->root->fs_info,
+					"bytes left %lu compress len %lu nr %lu",
 			       bytes_left, cb->compressed_len, cb->nr_pages);
 		}
 		bytes_left -= PAGE_CACHE_SIZE;
@@ -506,7 +505,7 @@
 
 		if (!em || last_offset < em->start ||
 		    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
-		    (em->block_start >> 9) != cb->orig_bio->bi_sector) {
+		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
 			free_extent_map(em);
 			unlock_extent(tree, last_offset, end);
 			unlock_page(page);
@@ -552,7 +551,7 @@
  * in it.  We don't actually do IO on those pages but allocate new ones
  * to hold the compressed pages on disk.
  *
- * bio->bi_sector points to the compressed extent on disk
+ * bio->bi_iter.bi_sector points to the compressed extent on disk
  * bio->bi_io_vec points to all of the inode pages
  * bio->bi_vcnt is a count of pages
  *
@@ -573,7 +572,7 @@
 	struct page *page;
 	struct block_device *bdev;
 	struct bio *comp_bio;
-	u64 cur_disk_byte = (u64)bio->bi_sector << 9;
+	u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
 	u64 em_len;
 	u64 em_start;
 	struct extent_map *em;
@@ -659,7 +658,7 @@
 		page->mapping = inode->i_mapping;
 		page->index = em_start >> PAGE_CACHE_SHIFT;
 
-		if (comp_bio->bi_size)
+		if (comp_bio->bi_iter.bi_size)
 			ret = tree->ops->merge_bio_hook(READ, page, 0,
 							PAGE_CACHE_SIZE,
 							comp_bio, 0);
@@ -687,8 +686,8 @@
 							comp_bio, sums);
 				BUG_ON(ret); /* -ENOMEM */
 			}
-			sums += (comp_bio->bi_size + root->sectorsize - 1) /
-				root->sectorsize;
+			sums += (comp_bio->bi_iter.bi_size +
+				 root->sectorsize - 1) / root->sectorsize;
 
 			ret = btrfs_map_bio(root, READ, comp_bio,
 					    mirror_num, 0);
@@ -1011,6 +1010,8 @@
 		bytes = min(bytes, working_bytes);
 		kaddr = kmap_atomic(page_out);
 		memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
+		if (*pg_index == (vcnt - 1) && *pg_offset == 0)
+			memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
 		kunmap_atomic(kaddr);
 		flush_dcache_page(page_out);
 
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 316136b..cbd3a7d 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -39,9 +39,8 @@
 			      struct extent_buffer *src_buf);
 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
 		    int level, int slot);
-static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
+static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
 				 struct extent_buffer *eb);
-static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 
 struct btrfs_path *btrfs_alloc_path(void)
 {
@@ -475,6 +474,8 @@
  * the index is the shifted logical of the *new* root node for root replace
  * operations, or the shifted logical of the affected block for all other
  * operations.
+ *
+ * Note: must be called with write lock (tree_mod_log_write_lock).
  */
 static noinline int
 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
@@ -483,24 +484,9 @@
 	struct rb_node **new;
 	struct rb_node *parent = NULL;
 	struct tree_mod_elem *cur;
-	int ret = 0;
 
 	BUG_ON(!tm);
 
-	tree_mod_log_write_lock(fs_info);
-	if (list_empty(&fs_info->tree_mod_seq_list)) {
-		tree_mod_log_write_unlock(fs_info);
-		/*
-		 * Ok we no longer care about logging modifications, free up tm
-		 * and return 0.  Any callers shouldn't be using tm after
-		 * calling tree_mod_log_insert, but if they do we can just
-		 * change this to return a special error code to let the callers
-		 * do their own thing.
-		 */
-		kfree(tm);
-		return 0;
-	}
-
 	spin_lock(&fs_info->tree_mod_seq_lock);
 	tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
 	spin_unlock(&fs_info->tree_mod_seq_lock);
@@ -518,18 +504,13 @@
 			new = &((*new)->rb_left);
 		else if (cur->seq > tm->seq)
 			new = &((*new)->rb_right);
-		else {
-			ret = -EEXIST;
-			kfree(tm);
-			goto out;
-		}
+		else
+			return -EEXIST;
 	}
 
 	rb_link_node(&tm->node, parent, new);
 	rb_insert_color(&tm->node, tm_root);
-out:
-	tree_mod_log_write_unlock(fs_info);
-	return ret;
+	return 0;
 }
 
 /*
@@ -545,19 +526,38 @@
 		return 1;
 	if (eb && btrfs_header_level(eb) == 0)
 		return 1;
+
+	tree_mod_log_write_lock(fs_info);
+	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
+		tree_mod_log_write_unlock(fs_info);
+		return 1;
+	}
+
 	return 0;
 }
 
-static inline int
-__tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
-			  struct extent_buffer *eb, int slot,
-			  enum mod_log_op op, gfp_t flags)
+/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
+static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
+				    struct extent_buffer *eb)
+{
+	smp_mb();
+	if (list_empty(&(fs_info)->tree_mod_seq_list))
+		return 0;
+	if (eb && btrfs_header_level(eb) == 0)
+		return 0;
+
+	return 1;
+}
+
+static struct tree_mod_elem *
+alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
+		    enum mod_log_op op, gfp_t flags)
 {
 	struct tree_mod_elem *tm;
 
 	tm = kzalloc(sizeof(*tm), flags);
 	if (!tm)
-		return -ENOMEM;
+		return NULL;
 
 	tm->index = eb->start >> PAGE_CACHE_SHIFT;
 	if (op != MOD_LOG_KEY_ADD) {
@@ -567,8 +567,9 @@
 	tm->op = op;
 	tm->slot = slot;
 	tm->generation = btrfs_node_ptr_generation(eb, slot);
+	RB_CLEAR_NODE(&tm->node);
 
-	return __tree_mod_log_insert(fs_info, tm);
+	return tm;
 }
 
 static noinline int
@@ -576,10 +577,27 @@
 			struct extent_buffer *eb, int slot,
 			enum mod_log_op op, gfp_t flags)
 {
-	if (tree_mod_dont_log(fs_info, eb))
+	struct tree_mod_elem *tm;
+	int ret;
+
+	if (!tree_mod_need_log(fs_info, eb))
 		return 0;
 
-	return __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
+	tm = alloc_tree_mod_elem(eb, slot, op, flags);
+	if (!tm)
+		return -ENOMEM;
+
+	if (tree_mod_dont_log(fs_info, eb)) {
+		kfree(tm);
+		return 0;
+	}
+
+	ret = __tree_mod_log_insert(fs_info, tm);
+	tree_mod_log_write_unlock(fs_info);
+	if (ret)
+		kfree(tm);
+
+	return ret;
 }
 
 static noinline int
@@ -587,27 +605,24 @@
 			 struct extent_buffer *eb, int dst_slot, int src_slot,
 			 int nr_items, gfp_t flags)
 {
-	struct tree_mod_elem *tm;
-	int ret;
+	struct tree_mod_elem *tm = NULL;
+	struct tree_mod_elem **tm_list = NULL;
+	int ret = 0;
 	int i;
+	int locked = 0;
 
-	if (tree_mod_dont_log(fs_info, eb))
+	if (!tree_mod_need_log(fs_info, eb))
 		return 0;
 
-	/*
-	 * When we override something during the move, we log these removals.
-	 * This can only happen when we move towards the beginning of the
-	 * buffer, i.e. dst_slot < src_slot.
-	 */
-	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
-		ret = __tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
-				MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
-		BUG_ON(ret < 0);
-	}
+	tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
+	if (!tm_list)
+		return -ENOMEM;
 
 	tm = kzalloc(sizeof(*tm), flags);
-	if (!tm)
-		return -ENOMEM;
+	if (!tm) {
+		ret = -ENOMEM;
+		goto free_tms;
+	}
 
 	tm->index = eb->start >> PAGE_CACHE_SHIFT;
 	tm->slot = src_slot;
@@ -615,25 +630,70 @@
 	tm->move.nr_items = nr_items;
 	tm->op = MOD_LOG_MOVE_KEYS;
 
-	return __tree_mod_log_insert(fs_info, tm);
+	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
+		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
+		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
+		if (!tm_list[i]) {
+			ret = -ENOMEM;
+			goto free_tms;
+		}
+	}
+
+	if (tree_mod_dont_log(fs_info, eb))
+		goto free_tms;
+	locked = 1;
+
+	/*
+	 * When we override something during the move, we log these removals.
+	 * This can only happen when we move towards the beginning of the
+	 * buffer, i.e. dst_slot < src_slot.
+	 */
+	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
+		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
+		if (ret)
+			goto free_tms;
+	}
+
+	ret = __tree_mod_log_insert(fs_info, tm);
+	if (ret)
+		goto free_tms;
+	tree_mod_log_write_unlock(fs_info);
+	kfree(tm_list);
+
+	return 0;
+free_tms:
+	for (i = 0; i < nr_items; i++) {
+		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
+			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
+		kfree(tm_list[i]);
+	}
+	if (locked)
+		tree_mod_log_write_unlock(fs_info);
+	kfree(tm_list);
+	kfree(tm);
+
+	return ret;
 }
 
-static inline void
-__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
+static inline int
+__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
+		       struct tree_mod_elem **tm_list,
+		       int nritems)
 {
-	int i;
-	u32 nritems;
+	int i, j;
 	int ret;
 
-	if (btrfs_header_level(eb) == 0)
-		return;
-
-	nritems = btrfs_header_nritems(eb);
 	for (i = nritems - 1; i >= 0; i--) {
-		ret = __tree_mod_log_insert_key(fs_info, eb, i,
-				MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
-		BUG_ON(ret < 0);
+		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
+		if (ret) {
+			for (j = nritems - 1; j > i; j--)
+				rb_erase(&tm_list[j]->node,
+					 &fs_info->tree_mod_log);
+			return ret;
+		}
 	}
+
+	return 0;
 }
 
 static noinline int
@@ -642,17 +702,38 @@
 			 struct extent_buffer *new_root, gfp_t flags,
 			 int log_removal)
 {
-	struct tree_mod_elem *tm;
+	struct tree_mod_elem *tm = NULL;
+	struct tree_mod_elem **tm_list = NULL;
+	int nritems = 0;
+	int ret = 0;
+	int i;
 
-	if (tree_mod_dont_log(fs_info, NULL))
+	if (!tree_mod_need_log(fs_info, NULL))
 		return 0;
 
-	if (log_removal)
-		__tree_mod_log_free_eb(fs_info, old_root);
+	if (log_removal && btrfs_header_level(old_root) > 0) {
+		nritems = btrfs_header_nritems(old_root);
+		tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
+				  flags);
+		if (!tm_list) {
+			ret = -ENOMEM;
+			goto free_tms;
+		}
+		for (i = 0; i < nritems; i++) {
+			tm_list[i] = alloc_tree_mod_elem(old_root, i,
+			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
+			if (!tm_list[i]) {
+				ret = -ENOMEM;
+				goto free_tms;
+			}
+		}
+	}
 
 	tm = kzalloc(sizeof(*tm), flags);
-	if (!tm)
-		return -ENOMEM;
+	if (!tm) {
+		ret = -ENOMEM;
+		goto free_tms;
+	}
 
 	tm->index = new_root->start >> PAGE_CACHE_SHIFT;
 	tm->old_root.logical = old_root->start;
@@ -660,7 +741,30 @@
 	tm->generation = btrfs_header_generation(old_root);
 	tm->op = MOD_LOG_ROOT_REPLACE;
 
-	return __tree_mod_log_insert(fs_info, tm);
+	if (tree_mod_dont_log(fs_info, NULL))
+		goto free_tms;
+
+	if (tm_list)
+		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
+	if (!ret)
+		ret = __tree_mod_log_insert(fs_info, tm);
+
+	tree_mod_log_write_unlock(fs_info);
+	if (ret)
+		goto free_tms;
+	kfree(tm_list);
+
+	return ret;
+
+free_tms:
+	if (tm_list) {
+		for (i = 0; i < nritems; i++)
+			kfree(tm_list[i]);
+		kfree(tm_list);
+	}
+	kfree(tm);
+
+	return ret;
 }
 
 static struct tree_mod_elem *
@@ -729,31 +833,75 @@
 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
 }
 
-static noinline void
+static noinline int
 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
 		     struct extent_buffer *src, unsigned long dst_offset,
 		     unsigned long src_offset, int nr_items)
 {
-	int ret;
+	int ret = 0;
+	struct tree_mod_elem **tm_list = NULL;
+	struct tree_mod_elem **tm_list_add, **tm_list_rem;
 	int i;
+	int locked = 0;
 
-	if (tree_mod_dont_log(fs_info, NULL))
-		return;
+	if (!tree_mod_need_log(fs_info, NULL))
+		return 0;
 
 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
-		return;
+		return 0;
+
+	tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
+			  GFP_NOFS);
+	if (!tm_list)
+		return -ENOMEM;
+
+	tm_list_add = tm_list;
+	tm_list_rem = tm_list + nr_items;
+	for (i = 0; i < nr_items; i++) {
+		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
+		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
+		if (!tm_list_rem[i]) {
+			ret = -ENOMEM;
+			goto free_tms;
+		}
+
+		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
+		    MOD_LOG_KEY_ADD, GFP_NOFS);
+		if (!tm_list_add[i]) {
+			ret = -ENOMEM;
+			goto free_tms;
+		}
+	}
+
+	if (tree_mod_dont_log(fs_info, NULL))
+		goto free_tms;
+	locked = 1;
 
 	for (i = 0; i < nr_items; i++) {
-		ret = __tree_mod_log_insert_key(fs_info, src,
-						i + src_offset,
-						MOD_LOG_KEY_REMOVE, GFP_NOFS);
-		BUG_ON(ret < 0);
-		ret = __tree_mod_log_insert_key(fs_info, dst,
-						     i + dst_offset,
-						     MOD_LOG_KEY_ADD,
-						     GFP_NOFS);
-		BUG_ON(ret < 0);
+		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
+		if (ret)
+			goto free_tms;
+		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
+		if (ret)
+			goto free_tms;
 	}
+
+	tree_mod_log_write_unlock(fs_info);
+	kfree(tm_list);
+
+	return 0;
+
+free_tms:
+	for (i = 0; i < nr_items * 2; i++) {
+		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
+			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
+		kfree(tm_list[i]);
+	}
+	if (locked)
+		tree_mod_log_write_unlock(fs_info);
+	kfree(tm_list);
+
+	return ret;
 }
 
 static inline void
@@ -772,18 +920,58 @@
 {
 	int ret;
 
-	ret = __tree_mod_log_insert_key(fs_info, eb, slot,
+	ret = tree_mod_log_insert_key(fs_info, eb, slot,
 					MOD_LOG_KEY_REPLACE,
 					atomic ? GFP_ATOMIC : GFP_NOFS);
 	BUG_ON(ret < 0);
 }
 
-static noinline void
+static noinline int
 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
 {
+	struct tree_mod_elem **tm_list = NULL;
+	int nritems = 0;
+	int i;
+	int ret = 0;
+
+	if (btrfs_header_level(eb) == 0)
+		return 0;
+
+	if (!tree_mod_need_log(fs_info, NULL))
+		return 0;
+
+	nritems = btrfs_header_nritems(eb);
+	tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
+			  GFP_NOFS);
+	if (!tm_list)
+		return -ENOMEM;
+
+	for (i = 0; i < nritems; i++) {
+		tm_list[i] = alloc_tree_mod_elem(eb, i,
+		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
+		if (!tm_list[i]) {
+			ret = -ENOMEM;
+			goto free_tms;
+		}
+	}
+
 	if (tree_mod_dont_log(fs_info, eb))
-		return;
-	__tree_mod_log_free_eb(fs_info, eb);
+		goto free_tms;
+
+	ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
+	tree_mod_log_write_unlock(fs_info);
+	if (ret)
+		goto free_tms;
+	kfree(tm_list);
+
+	return 0;
+
+free_tms:
+	for (i = 0; i < nritems; i++)
+		kfree(tm_list[i]);
+	kfree(tm_list);
+
+	return ret;
 }
 
 static noinline void
@@ -1041,8 +1229,13 @@
 		btrfs_set_node_ptr_generation(parent, parent_slot,
 					      trans->transid);
 		btrfs_mark_buffer_dirty(parent);
-		if (last_ref)
-			tree_mod_log_free_eb(root->fs_info, buf);
+		if (last_ref) {
+			ret = tree_mod_log_free_eb(root->fs_info, buf);
+			if (ret) {
+				btrfs_abort_transaction(trans, root, ret);
+				return ret;
+			}
+		}
 		btrfs_free_tree_block(trans, root, buf, parent_start,
 				      last_ref);
 	}
@@ -1287,8 +1480,8 @@
 		old = read_tree_block(root, logical, blocksize, 0);
 		if (WARN_ON(!old || !extent_buffer_uptodate(old))) {
 			free_extent_buffer(old);
-			pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
-				logical);
+			btrfs_warn(root->fs_info,
+				"failed to read tree block %llu from get_old_root", logical);
 		} else {
 			eb = btrfs_clone_extent_buffer(old);
 			free_extent_buffer(old);
@@ -2462,6 +2655,49 @@
 	return 0;
 }
 
+int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path,
+		u64 iobjectid, u64 ioff, u8 key_type,
+		struct btrfs_key *found_key)
+{
+	int ret;
+	struct btrfs_key key;
+	struct extent_buffer *eb;
+	struct btrfs_path *path;
+
+	key.type = key_type;
+	key.objectid = iobjectid;
+	key.offset = ioff;
+
+	if (found_path == NULL) {
+		path = btrfs_alloc_path();
+		if (!path)
+			return -ENOMEM;
+	} else
+		path = found_path;
+
+	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
+	if ((ret < 0) || (found_key == NULL)) {
+		if (path != found_path)
+			btrfs_free_path(path);
+		return ret;
+	}
+
+	eb = path->nodes[0];
+	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
+		ret = btrfs_next_leaf(fs_root, path);
+		if (ret)
+			return ret;
+		eb = path->nodes[0];
+	}
+
+	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
+	if (found_key->type != key.type ||
+			found_key->objectid != key.objectid)
+		return 1;
+
+	return 0;
+}
+
 /*
  * look for key in the tree.  path is filled in with nodes along the way
  * if key is found, we return zero and you can find the item in the leaf
@@ -2495,6 +2731,7 @@
 	lowest_level = p->lowest_level;
 	WARN_ON(lowest_level && ins_len > 0);
 	WARN_ON(p->nodes[0] != NULL);
+	BUG_ON(!cow && ins_len);
 
 	if (ins_len < 0) {
 		lowest_unlock = 2;
@@ -2603,8 +2840,6 @@
 			}
 		}
 cow_done:
-		BUG_ON(!cow && ins_len);
-
 		p->nodes[level] = b;
 		btrfs_clear_path_blocking(p, NULL, 0);
 
@@ -2614,13 +2849,19 @@
 		 * It is safe to drop the lock on our parent before we
 		 * go through the expensive btree search on b.
 		 *
-		 * If cow is true, then we might be changing slot zero,
-		 * which may require changing the parent.  So, we can't
-		 * drop the lock until after we know which slot we're
-		 * operating on.
+		 * If we're inserting or deleting (ins_len != 0), then we might
+		 * be changing slot zero, which may require changing the parent.
+		 * So, we can't drop the lock until after we know which slot
+		 * we're operating on.
 		 */
-		if (!cow)
-			btrfs_unlock_up_safe(p, level + 1);
+		if (!ins_len && !p->keep_locks) {
+			int u = level + 1;
+
+			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
+				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
+				p->locks[u] = 0;
+			}
+		}
 
 		ret = key_search(b, key, level, &prev_cmp, &slot);
 
@@ -2648,7 +2889,7 @@
 			 * which means we must have a write lock
 			 * on the parent
 			 */
-			if (slot == 0 && cow &&
+			if (slot == 0 && ins_len &&
 			    write_lock_level < level + 1) {
 				write_lock_level = level + 1;
 				btrfs_release_path(p);
@@ -2901,7 +3142,9 @@
 			if (ret < 0)
 				return ret;
 			if (!ret) {
-				p->slots[0] = btrfs_header_nritems(leaf) - 1;
+				leaf = p->nodes[0];
+				if (p->slots[0] == btrfs_header_nritems(leaf))
+					p->slots[0]--;
 				return 0;
 			}
 			if (!return_any)
@@ -3022,8 +3265,12 @@
 	} else
 		push_items = min(src_nritems - 8, push_items);
 
-	tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
-			     push_items);
+	ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
+				   push_items);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		return ret;
+	}
 	copy_extent_buffer(dst, src,
 			   btrfs_node_key_ptr_offset(dst_nritems),
 			   btrfs_node_key_ptr_offset(0),
@@ -3093,8 +3340,12 @@
 				      (dst_nritems) *
 				      sizeof(struct btrfs_key_ptr));
 
-	tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
-			     src_nritems - push_items, push_items);
+	ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
+				   src_nritems - push_items, push_items);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		return ret;
+	}
 	copy_extent_buffer(dst, src,
 			   btrfs_node_key_ptr_offset(0),
 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
@@ -3295,7 +3546,12 @@
 			    btrfs_header_chunk_tree_uuid(split),
 			    BTRFS_UUID_SIZE);
 
-	tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
+	ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
+				   mid, c_nritems - mid);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		return ret;
+	}
 	copy_extent_buffer(split, c,
 			   btrfs_node_key_ptr_offset(0),
 			   btrfs_node_key_ptr_offset(mid),
@@ -3362,8 +3618,8 @@
 	int ret;
 	ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
 	if (ret < 0) {
-		printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
-		       "used %d nritems %d\n",
+		btrfs_crit(root->fs_info,
+			"leaf free space ret %d, leaf data size %lu, used %d nritems %d",
 		       ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
 		       leaf_space_used(leaf, 0, nritems), nritems);
 	}
@@ -3571,6 +3827,19 @@
 	if (left_nritems == 0)
 		goto out_unlock;
 
+	if (path->slots[0] == left_nritems && !empty) {
+		/* Key greater than all keys in the leaf, right neighbor has
+		 * enough room for it and we're not emptying our leaf to delete
+		 * it, therefore use right neighbor to insert the new item and
+		 * no need to touch/dirty our left leaft. */
+		btrfs_tree_unlock(left);
+		free_extent_buffer(left);
+		path->nodes[0] = right;
+		path->slots[0] = 0;
+		path->slots[1]++;
+		return 0;
+	}
+
 	return __push_leaf_right(trans, root, path, min_data_size, empty,
 				right, free_space, left_nritems, min_slot);
 out_unlock:
@@ -3887,14 +4156,17 @@
 	int progress = 0;
 	int slot;
 	u32 nritems;
+	int space_needed = data_size;
 
 	slot = path->slots[0];
+	if (slot < btrfs_header_nritems(path->nodes[0]))
+		space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
 
 	/*
 	 * try to push all the items after our slot into the
 	 * right leaf
 	 */
-	ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
+	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
 	if (ret < 0)
 		return ret;
 
@@ -3914,7 +4186,7 @@
 
 	/* try to push all the items before our slot into the next leaf */
 	slot = path->slots[0];
-	ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
+	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
 	if (ret < 0)
 		return ret;
 
@@ -3958,13 +4230,18 @@
 
 	/* first try to make some room by pushing left and right */
 	if (data_size && path->nodes[1]) {
-		wret = push_leaf_right(trans, root, path, data_size,
-				       data_size, 0, 0);
+		int space_needed = data_size;
+
+		if (slot < btrfs_header_nritems(l))
+			space_needed -= btrfs_leaf_free_space(root, l);
+
+		wret = push_leaf_right(trans, root, path, space_needed,
+				       space_needed, 0, 0);
 		if (wret < 0)
 			return wret;
 		if (wret) {
-			wret = push_leaf_left(trans, root, path, data_size,
-					      data_size, 0, (u32)-1);
+			wret = push_leaf_left(trans, root, path, space_needed,
+					      space_needed, 0, (u32)-1);
 			if (wret < 0)
 				return wret;
 		}
@@ -4432,7 +4709,7 @@
 	BUG_ON(slot < 0);
 	if (slot >= nritems) {
 		btrfs_print_leaf(root, leaf);
-		printk(KERN_CRIT "slot %d too large, nritems %d\n",
+		btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
 		       slot, nritems);
 		BUG_ON(1);
 	}
@@ -4495,7 +4772,7 @@
 
 	if (btrfs_leaf_free_space(root, leaf) < total_size) {
 		btrfs_print_leaf(root, leaf);
-		printk(KERN_CRIT "not enough freespace need %u have %d\n",
+		btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
 		       total_size, btrfs_leaf_free_space(root, leaf));
 		BUG();
 	}
@@ -4505,7 +4782,7 @@
 
 		if (old_data < data_end) {
 			btrfs_print_leaf(root, leaf);
-			printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
+			btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
 			       slot, old_data, data_end);
 			BUG_ON(1);
 		}
@@ -4817,7 +5094,7 @@
  * This may release the path, and so you may lose any locks held at the
  * time you call it.
  */
-static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
+int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
 {
 	struct btrfs_key key;
 	struct btrfs_disk_key found_key;
@@ -5240,7 +5517,7 @@
 
 			if (!left_start_ctransid || !right_start_ctransid) {
 				WARN(1, KERN_WARNING
-					"btrfs: btrfs_compare_tree detected "
+					"BTRFS: btrfs_compare_tree detected "
 					"a change in one of the trees while "
 					"iterating. This is probably a "
 					"bug.\n");
@@ -5680,3 +5957,46 @@
 	}
 	return 1;
 }
+
+/*
+ * search in extent tree to find a previous Metadata/Data extent item with
+ * min objecitd.
+ *
+ * returns 0 if something is found, 1 if nothing was found and < 0 on error
+ */
+int btrfs_previous_extent_item(struct btrfs_root *root,
+			struct btrfs_path *path, u64 min_objectid)
+{
+	struct btrfs_key found_key;
+	struct extent_buffer *leaf;
+	u32 nritems;
+	int ret;
+
+	while (1) {
+		if (path->slots[0] == 0) {
+			btrfs_set_path_blocking(path);
+			ret = btrfs_prev_leaf(root, path);
+			if (ret != 0)
+				return ret;
+		} else {
+			path->slots[0]--;
+		}
+		leaf = path->nodes[0];
+		nritems = btrfs_header_nritems(leaf);
+		if (nritems == 0)
+			return 1;
+		if (path->slots[0] == nritems)
+			path->slots[0]--;
+
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+		if (found_key.objectid < min_objectid)
+			break;
+		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
+		    found_key.type == BTRFS_METADATA_ITEM_KEY)
+			return 0;
+		if (found_key.objectid == min_objectid &&
+		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
+			break;
+	}
+	return 1;
+}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7506825..2c1a42c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -521,9 +521,15 @@
 #define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF	(1ULL << 6)
 #define BTRFS_FEATURE_INCOMPAT_RAID56		(1ULL << 7)
 #define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA	(1ULL << 8)
+#define BTRFS_FEATURE_INCOMPAT_NO_HOLES		(1ULL << 9)
 
 #define BTRFS_FEATURE_COMPAT_SUPP		0ULL
+#define BTRFS_FEATURE_COMPAT_SAFE_SET		0ULL
+#define BTRFS_FEATURE_COMPAT_SAFE_CLEAR		0ULL
 #define BTRFS_FEATURE_COMPAT_RO_SUPP		0ULL
+#define BTRFS_FEATURE_COMPAT_RO_SAFE_SET	0ULL
+#define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR	0ULL
+
 #define BTRFS_FEATURE_INCOMPAT_SUPP			\
 	(BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF |		\
 	 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL |	\
@@ -532,7 +538,12 @@
 	 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO |		\
 	 BTRFS_FEATURE_INCOMPAT_RAID56 |		\
 	 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF |		\
-	 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
+	 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA |	\
+	 BTRFS_FEATURE_INCOMPAT_NO_HOLES)
+
+#define BTRFS_FEATURE_INCOMPAT_SAFE_SET			\
+	(BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
+#define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR		0ULL
 
 /*
  * A leaf is full of items. offset and size tell us where to find
@@ -1094,7 +1105,7 @@
 } __attribute__ ((__packed__));
 
 struct btrfs_space_info {
-	u64 flags;
+	spinlock_t lock;
 
 	u64 total_bytes;	/* total bytes in the space,
 				   this doesn't take mirrors into account */
@@ -1104,14 +1115,25 @@
 				   transaction finishes */
 	u64 bytes_reserved;	/* total bytes the allocator has reserved for
 				   current allocations */
-	u64 bytes_readonly;	/* total bytes that are read only */
-
 	u64 bytes_may_use;	/* number of bytes that may be used for
 				   delalloc/allocations */
+	u64 bytes_readonly;	/* total bytes that are read only */
+
+	unsigned int full:1;	/* indicates that we cannot allocate any more
+				   chunks for this space */
+	unsigned int chunk_alloc:1;	/* set if we are allocating a chunk */
+
+	unsigned int flush:1;		/* set if we are trying to make space */
+
+	unsigned int force_alloc;	/* set if we need to force a chunk
+					   alloc for this space */
+
 	u64 disk_used;		/* total bytes used on disk */
 	u64 disk_total;		/* total bytes on disk, takes mirrors into
 				   account */
 
+	u64 flags;
+
 	/*
 	 * bytes_pinned is kept in line with what is actually pinned, as in
 	 * we've called update_block_group and dropped the bytes_used counter
@@ -1124,22 +1146,15 @@
 	 */
 	struct percpu_counter total_bytes_pinned;
 
-	unsigned int full:1;	/* indicates that we cannot allocate any more
-				   chunks for this space */
-	unsigned int chunk_alloc:1;	/* set if we are allocating a chunk */
-
-	unsigned int flush:1;		/* set if we are trying to make space */
-
-	unsigned int force_alloc;	/* set if we need to force a chunk
-					   alloc for this space */
-
 	struct list_head list;
 
+	struct rw_semaphore groups_sem;
 	/* for block groups in our same type */
 	struct list_head block_groups[BTRFS_NR_RAID_TYPES];
-	spinlock_t lock;
-	struct rw_semaphore groups_sem;
 	wait_queue_head_t wait;
+
+	struct kobject kobj;
+	struct kobject block_group_kobjs[BTRFS_NR_RAID_TYPES];
 };
 
 #define	BTRFS_BLOCK_RSV_GLOBAL		1
@@ -1346,6 +1361,7 @@
 
 	u64 generation;
 	u64 last_trans_committed;
+	u64 avg_delayed_ref_runtime;
 
 	/*
 	 * this is updated to the current trans every time a full commit
@@ -1448,7 +1464,6 @@
 	spinlock_t tree_mod_seq_lock;
 	atomic64_t tree_mod_seq;
 	struct list_head tree_mod_seq_list;
-	struct seq_list tree_mod_seq_elem;
 
 	/* this protects tree_mod_log */
 	rwlock_t tree_mod_log_lock;
@@ -1515,6 +1530,8 @@
 	int thread_pool_size;
 
 	struct kobject super_kobj;
+	struct kobject *space_info_kobj;
+	struct kobject *device_dir_kobj;
 	struct completion kobj_unregister;
 	int do_barriers;
 	int closing;
@@ -1643,6 +1660,10 @@
 	spinlock_t reada_lock;
 	struct radix_tree_root reada_tree;
 
+	/* Extent buffer radix tree */
+	spinlock_t buffer_lock;
+	struct radix_tree_root buffer_radix;
+
 	/* next backup root to be overwritten */
 	int backup_root_index;
 
@@ -1795,6 +1816,12 @@
 	struct list_head ordered_extents;
 	struct list_head ordered_root;
 	u64 nr_ordered_extents;
+
+	/*
+	 * Number of currently running SEND ioctls to prevent
+	 * manipulation with the read-only status via SUBVOL_SETFLAGS
+	 */
+	int send_in_progress;
 };
 
 struct btrfs_ioctl_defrag_range_args {
@@ -1997,6 +2024,7 @@
 #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
 #define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR	(1 << 22)
 #define BTRFS_MOUNT_RESCAN_UUID_TREE	(1 << 23)
+#define	BTRFS_MOUNT_CHANGE_INODE_CACHE	(1 << 24)
 
 #define BTRFS_DEFAULT_COMMIT_INTERVAL	(30)
 
@@ -2925,6 +2953,10 @@
 			 struct btrfs_file_extent_item, generation, 64);
 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes,
 			 struct btrfs_file_extent_item, num_bytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes,
+			 struct btrfs_file_extent_item, disk_num_bytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression,
+			 struct btrfs_file_extent_item, compression, 8);
 
 static inline unsigned long
 btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e)
@@ -2958,15 +2990,6 @@
 BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item,
 		   other_encoding, 16);
 
-/* this returns the number of file bytes represented by the inline item.
- * If an item is compressed, this is the uncompressed size
- */
-static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb,
-					       struct btrfs_file_extent_item *e)
-{
-	return btrfs_file_extent_ram_bytes(eb, e);
-}
-
 /*
  * this returns the number of bytes used by the item on disk, minus the
  * size of any extent headers.  If a file is compressed on disk, this is
@@ -2980,6 +3003,32 @@
 	return btrfs_item_size(eb, e) - offset;
 }
 
+/* this returns the number of file bytes represented by the inline item.
+ * If an item is compressed, this is the uncompressed size
+ */
+static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb,
+					       int slot,
+					       struct btrfs_file_extent_item *fi)
+{
+	struct btrfs_map_token token;
+
+	btrfs_init_map_token(&token);
+	/*
+	 * return the space used on disk if this item isn't
+	 * compressed or encoded
+	 */
+	if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 &&
+	    btrfs_token_file_extent_encryption(eb, fi, &token) == 0 &&
+	    btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) {
+		return btrfs_file_extent_inline_item_len(eb,
+							 btrfs_item_nr(slot));
+	}
+
+	/* otherwise use the ram bytes field */
+	return btrfs_token_file_extent_ram_bytes(eb, fi, &token);
+}
+
+
 /* btrfs_dev_stats_item */
 static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb,
 					struct btrfs_dev_stats_item *ptr,
@@ -3143,6 +3192,8 @@
 
 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
 				       struct btrfs_root *root);
+int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
+				       struct btrfs_root *root);
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root, unsigned long count);
@@ -3163,6 +3214,7 @@
 						 struct btrfs_fs_info *info,
 						 u64 bytenr);
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
+int get_block_group_index(struct btrfs_block_group_cache *cache);
 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
 					struct btrfs_root *root, u32 blocksize,
 					u64 parent, u64 root_objectid,
@@ -3301,6 +3353,8 @@
 int btrfs_previous_item(struct btrfs_root *root,
 			struct btrfs_path *path, u64 min_objectid,
 			int type);
+int btrfs_previous_extent_item(struct btrfs_root *root,
+			struct btrfs_path *path, u64 min_objectid);
 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
 			     struct btrfs_key *new_key);
 struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
@@ -3350,6 +3404,8 @@
 			 struct btrfs_root *root,
 			 struct btrfs_path *path,
 			 struct btrfs_key *new_key);
+int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
+		u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
 		      *root, struct btrfs_key *key, struct btrfs_path *p, int
 		      ins_len, int cow);
@@ -3399,6 +3455,7 @@
 }
 
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
+int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
 			u64 time_seq);
 static inline int btrfs_next_old_item(struct btrfs_root *root,
@@ -3563,12 +3620,6 @@
 			   struct btrfs_root *root,
 			   const char *name, int name_len,
 			   u64 inode_objectid, u64 ref_objectid, u64 *index);
-int btrfs_get_inode_ref_index(struct btrfs_trans_handle *trans,
-			      struct btrfs_root *root,
-			      struct btrfs_path *path,
-			      const char *name, int name_len,
-			      u64 inode_objectid, u64 ref_objectid, int mod,
-			      u64 *ret_index);
 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
 			     struct btrfs_root *root,
 			     struct btrfs_path *path, u64 objectid);
@@ -3676,7 +3727,9 @@
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
 			      struct extent_state **cached_state);
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
-			     struct btrfs_root *new_root, u64 new_dirid);
+			     struct btrfs_root *new_root,
+			     struct btrfs_root *parent_root,
+			     u64 new_dirid);
 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
 			 size_t size, struct bio *bio,
 			 unsigned long bio_flags);
@@ -3745,7 +3798,10 @@
 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 			 struct btrfs_root *root, struct inode *inode,
 			 struct btrfs_path *path, u64 start, u64 end,
-			 u64 *drop_end, int drop_cache);
+			 u64 *drop_end, int drop_cache,
+			 int replace_extent,
+			 u32 extent_item_size,
+			 int *key_inserted);
 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root, struct inode *inode, u64 start,
 		       u64 end, int drop_cache);
@@ -3764,6 +3820,8 @@
 /* sysfs.c */
 int btrfs_init_sysfs(void);
 void btrfs_exit_sysfs(void);
+int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info);
+void btrfs_sysfs_remove_one(struct btrfs_fs_info *fs_info);
 
 /* xattr.c */
 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
@@ -3796,14 +3854,20 @@
 	btrfs_printk(fs_info, KERN_NOTICE fmt, ##args)
 #define btrfs_info(fs_info, fmt, args...) \
 	btrfs_printk(fs_info, KERN_INFO fmt, ##args)
+
+#ifdef DEBUG
 #define btrfs_debug(fs_info, fmt, args...) \
 	btrfs_printk(fs_info, KERN_DEBUG fmt, ##args)
+#else
+#define btrfs_debug(fs_info, fmt, args...) \
+    no_printk(KERN_DEBUG fmt, ##args)
+#endif
 
 #ifdef CONFIG_BTRFS_ASSERT
 
 static inline void assfail(char *expr, char *file, int line)
 {
-	printk(KERN_ERR "BTRFS assertion failed: %s, file: %s, line: %d",
+	pr_err("BTRFS: assertion failed: %s, file: %s, line: %d",
 	       expr, file, line);
 	BUG();
 }
@@ -3841,7 +3905,7 @@
 		if (!(features & flag)) {
 			features |= flag;
 			btrfs_set_super_incompat_flags(disk_super, features);
-			printk(KERN_INFO "btrfs: setting %llu feature flag\n",
+			btrfs_info(fs_info, "setting %llu feature flag",
 					 flag);
 		}
 		spin_unlock(&fs_info->super_lock);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 8d292fb..451b00c 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -55,8 +55,7 @@
 	delayed_node->inode_id = inode_id;
 	atomic_set(&delayed_node->refs, 0);
 	delayed_node->count = 0;
-	delayed_node->in_list = 0;
-	delayed_node->inode_dirty = 0;
+	delayed_node->flags = 0;
 	delayed_node->ins_root = RB_ROOT;
 	delayed_node->del_root = RB_ROOT;
 	mutex_init(&delayed_node->mutex);
@@ -172,7 +171,7 @@
 				     int mod)
 {
 	spin_lock(&root->lock);
-	if (node->in_list) {
+	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 		if (!list_empty(&node->p_list))
 			list_move_tail(&node->p_list, &root->prepare_list);
 		else if (mod)
@@ -182,7 +181,7 @@
 		list_add_tail(&node->p_list, &root->prepare_list);
 		atomic_inc(&node->refs);	/* inserted into list */
 		root->nodes++;
-		node->in_list = 1;
+		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 	}
 	spin_unlock(&root->lock);
 }
@@ -192,13 +191,13 @@
 				       struct btrfs_delayed_node *node)
 {
 	spin_lock(&root->lock);
-	if (node->in_list) {
+	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 		root->nodes--;
 		atomic_dec(&node->refs);	/* not in the list */
 		list_del_init(&node->n_list);
 		if (!list_empty(&node->p_list))
 			list_del_init(&node->p_list);
-		node->in_list = 0;
+		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 	}
 	spin_unlock(&root->lock);
 }
@@ -231,7 +230,8 @@
 
 	delayed_root = node->root->fs_info->delayed_root;
 	spin_lock(&delayed_root->lock);
-	if (!node->in_list) {	/* not in the list */
+	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
+		/* not in the list */
 		if (list_empty(&delayed_root->node_list))
 			goto out;
 		p = delayed_root->node_list.next;
@@ -1004,9 +1004,10 @@
 {
 	struct btrfs_delayed_root *delayed_root;
 
-	if (delayed_node && delayed_node->inode_dirty) {
+	if (delayed_node &&
+	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 		BUG_ON(!delayed_node->root);
-		delayed_node->inode_dirty = 0;
+		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 		delayed_node->count--;
 
 		delayed_root = delayed_node->root->fs_info->delayed_root;
@@ -1014,6 +1015,18 @@
 	}
 }
 
+static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
+{
+	struct btrfs_delayed_root *delayed_root;
+
+	ASSERT(delayed_node->root);
+	clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
+	delayed_node->count--;
+
+	delayed_root = delayed_node->root->fs_info->delayed_root;
+	finish_one_item(delayed_root);
+}
+
 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
 					struct btrfs_root *root,
 					struct btrfs_path *path,
@@ -1022,13 +1035,19 @@
 	struct btrfs_key key;
 	struct btrfs_inode_item *inode_item;
 	struct extent_buffer *leaf;
+	int mod;
 	int ret;
 
 	key.objectid = node->inode_id;
 	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
 	key.offset = 0;
 
-	ret = btrfs_lookup_inode(trans, root, path, &key, 1);
+	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
+		mod = -1;
+	else
+		mod = 1;
+
+	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
 	if (ret > 0) {
 		btrfs_release_path(path);
 		return -ENOENT;
@@ -1036,19 +1055,58 @@
 		return ret;
 	}
 
-	btrfs_unlock_up_safe(path, 1);
 	leaf = path->nodes[0];
 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
 				    struct btrfs_inode_item);
 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
 			    sizeof(struct btrfs_inode_item));
 	btrfs_mark_buffer_dirty(leaf);
-	btrfs_release_path(path);
 
+	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
+		goto no_iref;
+
+	path->slots[0]++;
+	if (path->slots[0] >= btrfs_header_nritems(leaf))
+		goto search;
+again:
+	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+	if (key.objectid != node->inode_id)
+		goto out;
+
+	if (key.type != BTRFS_INODE_REF_KEY &&
+	    key.type != BTRFS_INODE_EXTREF_KEY)
+		goto out;
+
+	/*
+	 * Delayed iref deletion is for the inode who has only one link,
+	 * so there is only one iref. The case that several irefs are
+	 * in the same item doesn't exist.
+	 */
+	btrfs_del_item(trans, root, path);
+out:
+	btrfs_release_delayed_iref(node);
+no_iref:
+	btrfs_release_path(path);
+err_out:
 	btrfs_delayed_inode_release_metadata(root, node);
 	btrfs_release_delayed_inode(node);
 
-	return 0;
+	return ret;
+
+search:
+	btrfs_release_path(path);
+
+	btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
+	key.offset = -1;
+	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+	if (ret < 0)
+		goto err_out;
+	ASSERT(ret);
+
+	ret = 0;
+	leaf = path->nodes[0];
+	path->slots[0]--;
+	goto again;
 }
 
 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
@@ -1059,7 +1117,7 @@
 	int ret;
 
 	mutex_lock(&node->mutex);
-	if (!node->inode_dirty) {
+	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
 		mutex_unlock(&node->mutex);
 		return 0;
 	}
@@ -1203,7 +1261,7 @@
 		return 0;
 
 	mutex_lock(&delayed_node->mutex);
-	if (!delayed_node->inode_dirty) {
+	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 		mutex_unlock(&delayed_node->mutex);
 		btrfs_release_delayed_node(delayed_node);
 		return 0;
@@ -1227,7 +1285,7 @@
 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
 
 	mutex_lock(&delayed_node->mutex);
-	if (delayed_node->inode_dirty)
+	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
 		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
 						   path, delayed_node);
 	else
@@ -1300,36 +1358,9 @@
 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
 
 	__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
-	/*
-	 * Maybe new delayed items have been inserted, so we need requeue
-	 * the work. Besides that, we must dequeue the empty delayed nodes
-	 * to avoid the race between delayed items balance and the worker.
-	 * The race like this:
-	 * 	Task1				Worker thread
-	 * 					count == 0, needn't requeue
-	 * 					  also needn't insert the
-	 * 					  delayed node into prepare
-	 * 					  list again.
-	 * 	add lots of delayed items
-	 * 	queue the delayed node
-	 * 	  already in the list,
-	 * 	  and not in the prepare
-	 * 	  list, it means the delayed
-	 * 	  node is being dealt with
-	 * 	  by the worker.
-	 * 	do delayed items balance
-	 * 	  the delayed node is being
-	 * 	  dealt with by the worker
-	 * 	  now, just wait.
-	 * 	  				the worker goto idle.
-	 * Task1 will sleep until the transaction is commited.
-	 */
-	mutex_lock(&delayed_node->mutex);
-	btrfs_dequeue_delayed_node(root->fs_info->delayed_root, delayed_node);
-	mutex_unlock(&delayed_node->mutex);
 
 	trans->block_rsv = block_rsv;
-	btrfs_end_transaction_dmeta(trans, root);
+	btrfs_end_transaction(trans, root);
 	btrfs_btree_balance_dirty_nodelay(root);
 
 release_path:
@@ -1376,52 +1407,41 @@
 	WARN_ON(btrfs_first_delayed_node(delayed_root));
 }
 
-static int refs_newer(struct btrfs_delayed_root *delayed_root,
-		      int seq, int count)
+static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
 {
 	int val = atomic_read(&delayed_root->items_seq);
 
-	if (val < seq || val >= seq + count)
+	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
 		return 1;
+
+	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
+		return 1;
+
 	return 0;
 }
 
 void btrfs_balance_delayed_items(struct btrfs_root *root)
 {
 	struct btrfs_delayed_root *delayed_root;
-	int seq;
 
 	delayed_root = btrfs_get_delayed_root(root);
 
 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
 		return;
 
-	seq = atomic_read(&delayed_root->items_seq);
-
 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
+		int seq;
 		int ret;
-		DEFINE_WAIT(__wait);
+
+		seq = atomic_read(&delayed_root->items_seq);
 
 		ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
 		if (ret)
 			return;
 
-		while (1) {
-			prepare_to_wait(&delayed_root->wait, &__wait,
-					TASK_INTERRUPTIBLE);
-
-			if (refs_newer(delayed_root, seq,
-				       BTRFS_DELAYED_BATCH) ||
-			    atomic_read(&delayed_root->items) <
-			    BTRFS_DELAYED_BACKGROUND) {
-				break;
-			}
-			if (!signal_pending(current))
-				schedule();
-			else
-				break;
-		}
-		finish_wait(&delayed_root->wait, &__wait);
+		wait_event_interruptible(delayed_root->wait,
+					 could_end_wait(delayed_root, seq));
+		return;
 	}
 
 	btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH);
@@ -1472,9 +1492,9 @@
 	mutex_lock(&delayed_node->mutex);
 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
 	if (unlikely(ret)) {
-		printk(KERN_ERR "err add delayed dir index item(name: %.*s) "
+		btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
 				"into the insertion tree of the delayed node"
-				"(root id: %llu, inode id: %llu, errno: %d)\n",
+				"(root id: %llu, inode id: %llu, errno: %d)",
 				name_len, name, delayed_node->root->objectid,
 				delayed_node->inode_id, ret);
 		BUG();
@@ -1544,9 +1564,9 @@
 	mutex_lock(&node->mutex);
 	ret = __btrfs_add_delayed_deletion_item(node, item);
 	if (unlikely(ret)) {
-		printk(KERN_ERR "err add delayed dir index item(index: %llu) "
+		btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
 				"into the deletion tree of the delayed node"
-				"(root id: %llu, inode id: %llu, errno: %d)\n",
+				"(root id: %llu, inode id: %llu, errno: %d)",
 				index, node->root->objectid, node->inode_id,
 				ret);
 		BUG();
@@ -1759,7 +1779,7 @@
 		return -ENOENT;
 
 	mutex_lock(&delayed_node->mutex);
-	if (!delayed_node->inode_dirty) {
+	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 		mutex_unlock(&delayed_node->mutex);
 		btrfs_release_delayed_node(delayed_node);
 		return -ENOENT;
@@ -1810,7 +1830,7 @@
 		return PTR_ERR(delayed_node);
 
 	mutex_lock(&delayed_node->mutex);
-	if (delayed_node->inode_dirty) {
+	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
 		goto release_node;
 	}
@@ -1821,7 +1841,7 @@
 		goto release_node;
 
 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
-	delayed_node->inode_dirty = 1;
+	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 	delayed_node->count++;
 	atomic_inc(&root->fs_info->delayed_root->items);
 release_node:
@@ -1830,6 +1850,41 @@
 	return ret;
 }
 
+int btrfs_delayed_delete_inode_ref(struct inode *inode)
+{
+	struct btrfs_delayed_node *delayed_node;
+
+	delayed_node = btrfs_get_or_create_delayed_node(inode);
+	if (IS_ERR(delayed_node))
+		return PTR_ERR(delayed_node);
+
+	/*
+	 * We don't reserve space for inode ref deletion is because:
+	 * - We ONLY do async inode ref deletion for the inode who has only
+	 *   one link(i_nlink == 1), it means there is only one inode ref.
+	 *   And in most case, the inode ref and the inode item are in the
+	 *   same leaf, and we will deal with them at the same time.
+	 *   Since we are sure we will reserve the space for the inode item,
+	 *   it is unnecessary to reserve space for inode ref deletion.
+	 * - If the inode ref and the inode item are not in the same leaf,
+	 *   We also needn't worry about enospc problem, because we reserve
+	 *   much more space for the inode update than it needs.
+	 * - At the worst, we can steal some space from the global reservation.
+	 *   It is very rare.
+	 */
+	mutex_lock(&delayed_node->mutex);
+	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
+		goto release_node;
+
+	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
+	delayed_node->count++;
+	atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
+release_node:
+	mutex_unlock(&delayed_node->mutex);
+	btrfs_release_delayed_node(delayed_node);
+	return 0;
+}
+
 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
 {
 	struct btrfs_root *root = delayed_node->root;
@@ -1852,7 +1907,10 @@
 		btrfs_release_delayed_item(prev_item);
 	}
 
-	if (delayed_node->inode_dirty) {
+	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
+		btrfs_release_delayed_iref(delayed_node);
+
+	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 		btrfs_delayed_inode_release_metadata(root, delayed_node);
 		btrfs_release_delayed_inode(delayed_node);
 	}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index a4b38f9..f70119f 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -48,6 +48,10 @@
 	wait_queue_head_t wait;
 };
 
+#define BTRFS_DELAYED_NODE_IN_LIST	0
+#define BTRFS_DELAYED_NODE_INODE_DIRTY	1
+#define BTRFS_DELAYED_NODE_DEL_IREF	2
+
 struct btrfs_delayed_node {
 	u64 inode_id;
 	u64 bytes_reserved;
@@ -65,8 +69,7 @@
 	struct btrfs_inode_item inode_item;
 	atomic_t refs;
 	u64 index_cnt;
-	bool in_list;
-	bool inode_dirty;
+	unsigned long flags;
 	int count;
 };
 
@@ -125,6 +128,7 @@
 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
 			       struct btrfs_root *root, struct inode *inode);
 int btrfs_fill_inode(struct inode *inode, u32 *rdev);
+int btrfs_delayed_delete_inode_ref(struct inode *inode);
 
 /* Used for drop dead root */
 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index e4d467b..f3bff89 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -161,35 +161,61 @@
 	return NULL;
 }
 
+/* insert a new ref to head ref rbtree */
+static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
+						   struct rb_node *node)
+{
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent_node = NULL;
+	struct btrfs_delayed_ref_head *entry;
+	struct btrfs_delayed_ref_head *ins;
+	u64 bytenr;
+
+	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
+	bytenr = ins->node.bytenr;
+	while (*p) {
+		parent_node = *p;
+		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
+				 href_node);
+
+		if (bytenr < entry->node.bytenr)
+			p = &(*p)->rb_left;
+		else if (bytenr > entry->node.bytenr)
+			p = &(*p)->rb_right;
+		else
+			return entry;
+	}
+
+	rb_link_node(node, parent_node, p);
+	rb_insert_color(node, root);
+	return NULL;
+}
+
 /*
  * find an head entry based on bytenr. This returns the delayed ref
  * head if it was able to find one, or NULL if nothing was in that spot.
  * If return_bigger is given, the next bigger entry is returned if no exact
  * match is found.
  */
-static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
-				  u64 bytenr,
-				  struct btrfs_delayed_ref_node **last,
-				  int return_bigger)
+static struct btrfs_delayed_ref_head *
+find_ref_head(struct rb_root *root, u64 bytenr,
+	      struct btrfs_delayed_ref_head **last, int return_bigger)
 {
 	struct rb_node *n;
-	struct btrfs_delayed_ref_node *entry;
+	struct btrfs_delayed_ref_head *entry;
 	int cmp = 0;
 
 again:
 	n = root->rb_node;
 	entry = NULL;
 	while (n) {
-		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
-		WARN_ON(!entry->in_tree);
+		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
 		if (last)
 			*last = entry;
 
-		if (bytenr < entry->bytenr)
+		if (bytenr < entry->node.bytenr)
 			cmp = -1;
-		else if (bytenr > entry->bytenr)
-			cmp = 1;
-		else if (!btrfs_delayed_ref_is_head(entry))
+		else if (bytenr > entry->node.bytenr)
 			cmp = 1;
 		else
 			cmp = 0;
@@ -203,12 +229,12 @@
 	}
 	if (entry && return_bigger) {
 		if (cmp > 0) {
-			n = rb_next(&entry->rb_node);
+			n = rb_next(&entry->href_node);
 			if (!n)
 				n = rb_first(root);
-			entry = rb_entry(n, struct btrfs_delayed_ref_node,
-					 rb_node);
-			bytenr = entry->bytenr;
+			entry = rb_entry(n, struct btrfs_delayed_ref_head,
+					 href_node);
+			bytenr = entry->node.bytenr;
 			return_bigger = 0;
 			goto again;
 		}
@@ -243,33 +269,38 @@
 
 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
 				    struct btrfs_delayed_ref_root *delayed_refs,
+				    struct btrfs_delayed_ref_head *head,
 				    struct btrfs_delayed_ref_node *ref)
 {
-	rb_erase(&ref->rb_node, &delayed_refs->root);
+	if (btrfs_delayed_ref_is_head(ref)) {
+		head = btrfs_delayed_node_to_head(ref);
+		rb_erase(&head->href_node, &delayed_refs->href_root);
+	} else {
+		assert_spin_locked(&head->lock);
+		rb_erase(&ref->rb_node, &head->ref_root);
+	}
 	ref->in_tree = 0;
 	btrfs_put_delayed_ref(ref);
-	delayed_refs->num_entries--;
+	atomic_dec(&delayed_refs->num_entries);
 	if (trans->delayed_ref_updates)
 		trans->delayed_ref_updates--;
 }
 
 static int merge_ref(struct btrfs_trans_handle *trans,
 		     struct btrfs_delayed_ref_root *delayed_refs,
+		     struct btrfs_delayed_ref_head *head,
 		     struct btrfs_delayed_ref_node *ref, u64 seq)
 {
 	struct rb_node *node;
-	int merged = 0;
 	int mod = 0;
 	int done = 0;
 
-	node = rb_prev(&ref->rb_node);
-	while (node) {
+	node = rb_next(&ref->rb_node);
+	while (!done && node) {
 		struct btrfs_delayed_ref_node *next;
 
 		next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-		node = rb_prev(node);
-		if (next->bytenr != ref->bytenr)
-			break;
+		node = rb_next(node);
 		if (seq && next->seq >= seq)
 			break;
 		if (comp_entry(ref, next, 0))
@@ -289,12 +320,11 @@
 			mod = -next->ref_mod;
 		}
 
-		merged++;
-		drop_delayed_ref(trans, delayed_refs, next);
+		drop_delayed_ref(trans, delayed_refs, head, next);
 		ref->ref_mod += mod;
 		if (ref->ref_mod == 0) {
-			drop_delayed_ref(trans, delayed_refs, ref);
-			break;
+			drop_delayed_ref(trans, delayed_refs, head, ref);
+			done = 1;
 		} else {
 			/*
 			 * You can't have multiples of the same ref on a tree
@@ -303,13 +333,8 @@
 			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
 				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
 		}
-
-		if (done)
-			break;
-		node = rb_prev(&ref->rb_node);
 	}
-
-	return merged;
+	return done;
 }
 
 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
@@ -320,6 +345,14 @@
 	struct rb_node *node;
 	u64 seq = 0;
 
+	assert_spin_locked(&head->lock);
+	/*
+	 * We don't have too much refs to merge in the case of delayed data
+	 * refs.
+	 */
+	if (head->is_data)
+		return;
+
 	spin_lock(&fs_info->tree_mod_seq_lock);
 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
 		struct seq_list *elem;
@@ -330,22 +363,19 @@
 	}
 	spin_unlock(&fs_info->tree_mod_seq_lock);
 
-	node = rb_prev(&head->node.rb_node);
+	node = rb_first(&head->ref_root);
 	while (node) {
 		struct btrfs_delayed_ref_node *ref;
 
 		ref = rb_entry(node, struct btrfs_delayed_ref_node,
 			       rb_node);
-		if (ref->bytenr != head->node.bytenr)
-			break;
-
 		/* We can't merge refs that are outside of our seq count */
 		if (seq && ref->seq >= seq)
 			break;
-		if (merge_ref(trans, delayed_refs, ref, seq))
-			node = rb_prev(&head->node.rb_node);
+		if (merge_ref(trans, delayed_refs, head, ref, seq))
+			node = rb_first(&head->ref_root);
 		else
-			node = rb_prev(node);
+			node = rb_next(&ref->rb_node);
 	}
 }
 
@@ -373,71 +403,52 @@
 	return ret;
 }
 
-int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
-			   struct list_head *cluster, u64 start)
+struct btrfs_delayed_ref_head *
+btrfs_select_ref_head(struct btrfs_trans_handle *trans)
 {
-	int count = 0;
 	struct btrfs_delayed_ref_root *delayed_refs;
-	struct rb_node *node;
-	struct btrfs_delayed_ref_node *ref;
 	struct btrfs_delayed_ref_head *head;
+	u64 start;
+	bool loop = false;
 
 	delayed_refs = &trans->transaction->delayed_refs;
-	if (start == 0) {
-		node = rb_first(&delayed_refs->root);
-	} else {
-		ref = NULL;
-		find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
-		if (ref) {
-			node = &ref->rb_node;
-		} else
-			node = rb_first(&delayed_refs->root);
-	}
+
 again:
-	while (node && count < 32) {
-		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-		if (btrfs_delayed_ref_is_head(ref)) {
-			head = btrfs_delayed_node_to_head(ref);
-			if (list_empty(&head->cluster)) {
-				list_add_tail(&head->cluster, cluster);
-				delayed_refs->run_delayed_start =
-					head->node.bytenr;
-				count++;
-
-				WARN_ON(delayed_refs->num_heads_ready == 0);
-				delayed_refs->num_heads_ready--;
-			} else if (count) {
-				/* the goal of the clustering is to find extents
-				 * that are likely to end up in the same extent
-				 * leaf on disk.  So, we don't want them spread
-				 * all over the tree.  Stop now if we've hit
-				 * a head that was already in use
-				 */
-				break;
-			}
-		}
-		node = rb_next(node);
-	}
-	if (count) {
-		return 0;
-	} else if (start) {
-		/*
-		 * we've gone to the end of the rbtree without finding any
-		 * clusters.  start from the beginning and try again
-		 */
+	start = delayed_refs->run_delayed_start;
+	head = find_ref_head(&delayed_refs->href_root, start, NULL, 1);
+	if (!head && !loop) {
+		delayed_refs->run_delayed_start = 0;
 		start = 0;
-		node = rb_first(&delayed_refs->root);
-		goto again;
+		loop = true;
+		head = find_ref_head(&delayed_refs->href_root, start, NULL, 1);
+		if (!head)
+			return NULL;
+	} else if (!head && loop) {
+		return NULL;
 	}
-	return 1;
-}
 
-void btrfs_release_ref_cluster(struct list_head *cluster)
-{
-	struct list_head *pos, *q;
+	while (head->processing) {
+		struct rb_node *node;
 
-	list_for_each_safe(pos, q, cluster)
-		list_del_init(pos);
+		node = rb_next(&head->href_node);
+		if (!node) {
+			if (loop)
+				return NULL;
+			delayed_refs->run_delayed_start = 0;
+			start = 0;
+			loop = true;
+			goto again;
+		}
+		head = rb_entry(node, struct btrfs_delayed_ref_head,
+				href_node);
+	}
+
+	head->processing = 1;
+	WARN_ON(delayed_refs->num_heads_ready == 0);
+	delayed_refs->num_heads_ready--;
+	delayed_refs->run_delayed_start = head->node.bytenr +
+		head->node.num_bytes;
+	return head;
 }
 
 /*
@@ -451,6 +462,7 @@
 static noinline void
 update_existing_ref(struct btrfs_trans_handle *trans,
 		    struct btrfs_delayed_ref_root *delayed_refs,
+		    struct btrfs_delayed_ref_head *head,
 		    struct btrfs_delayed_ref_node *existing,
 		    struct btrfs_delayed_ref_node *update)
 {
@@ -463,7 +475,7 @@
 		 */
 		existing->ref_mod--;
 		if (existing->ref_mod == 0)
-			drop_delayed_ref(trans, delayed_refs, existing);
+			drop_delayed_ref(trans, delayed_refs, head, existing);
 		else
 			WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
 				existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
@@ -533,9 +545,13 @@
 		}
 	}
 	/*
-	 * update the reference mod on the head to reflect this new operation
+	 * update the reference mod on the head to reflect this new operation,
+	 * only need the lock for this case cause we could be processing it
+	 * currently, for refs we just added we know we're a-ok.
 	 */
+	spin_lock(&existing_ref->lock);
 	existing->ref_mod += update->ref_mod;
+	spin_unlock(&existing_ref->lock);
 }
 
 /*
@@ -543,13 +559,13 @@
  * this does all the dirty work in terms of maintaining the correct
  * overall modification count.
  */
-static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
-					struct btrfs_trans_handle *trans,
-					struct btrfs_delayed_ref_node *ref,
-					u64 bytenr, u64 num_bytes,
-					int action, int is_data)
+static noinline struct btrfs_delayed_ref_head *
+add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+		     struct btrfs_trans_handle *trans,
+		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
+		     u64 num_bytes, int action, int is_data)
 {
-	struct btrfs_delayed_ref_node *existing;
+	struct btrfs_delayed_ref_head *existing;
 	struct btrfs_delayed_ref_head *head_ref = NULL;
 	struct btrfs_delayed_ref_root *delayed_refs;
 	int count_mod = 1;
@@ -596,38 +612,43 @@
 	head_ref = btrfs_delayed_node_to_head(ref);
 	head_ref->must_insert_reserved = must_insert_reserved;
 	head_ref->is_data = is_data;
+	head_ref->ref_root = RB_ROOT;
+	head_ref->processing = 0;
 
-	INIT_LIST_HEAD(&head_ref->cluster);
+	spin_lock_init(&head_ref->lock);
 	mutex_init(&head_ref->mutex);
 
 	trace_add_delayed_ref_head(ref, head_ref, action);
 
-	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
-
+	existing = htree_insert(&delayed_refs->href_root,
+				&head_ref->href_node);
 	if (existing) {
-		update_existing_head_ref(existing, ref);
+		update_existing_head_ref(&existing->node, ref);
 		/*
 		 * we've updated the existing ref, free the newly
 		 * allocated ref
 		 */
 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+		head_ref = existing;
 	} else {
 		delayed_refs->num_heads++;
 		delayed_refs->num_heads_ready++;
-		delayed_refs->num_entries++;
+		atomic_inc(&delayed_refs->num_entries);
 		trans->delayed_ref_updates++;
 	}
+	return head_ref;
 }
 
 /*
  * helper to insert a delayed tree ref into the rbtree.
  */
-static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
-					 struct btrfs_trans_handle *trans,
-					 struct btrfs_delayed_ref_node *ref,
-					 u64 bytenr, u64 num_bytes, u64 parent,
-					 u64 ref_root, int level, int action,
-					 int for_cow)
+static noinline void
+add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+		     struct btrfs_trans_handle *trans,
+		     struct btrfs_delayed_ref_head *head_ref,
+		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
+		     u64 num_bytes, u64 parent, u64 ref_root, int level,
+		     int action, int for_cow)
 {
 	struct btrfs_delayed_ref_node *existing;
 	struct btrfs_delayed_tree_ref *full_ref;
@@ -663,30 +684,33 @@
 
 	trace_add_delayed_tree_ref(ref, full_ref, action);
 
-	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
-
+	spin_lock(&head_ref->lock);
+	existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
 	if (existing) {
-		update_existing_ref(trans, delayed_refs, existing, ref);
+		update_existing_ref(trans, delayed_refs, head_ref, existing,
+				    ref);
 		/*
 		 * we've updated the existing ref, free the newly
 		 * allocated ref
 		 */
 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
 	} else {
-		delayed_refs->num_entries++;
+		atomic_inc(&delayed_refs->num_entries);
 		trans->delayed_ref_updates++;
 	}
+	spin_unlock(&head_ref->lock);
 }
 
 /*
  * helper to insert a delayed data ref into the rbtree.
  */
-static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
-					 struct btrfs_trans_handle *trans,
-					 struct btrfs_delayed_ref_node *ref,
-					 u64 bytenr, u64 num_bytes, u64 parent,
-					 u64 ref_root, u64 owner, u64 offset,
-					 int action, int for_cow)
+static noinline void
+add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+		     struct btrfs_trans_handle *trans,
+		     struct btrfs_delayed_ref_head *head_ref,
+		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
+		     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
+		     u64 offset, int action, int for_cow)
 {
 	struct btrfs_delayed_ref_node *existing;
 	struct btrfs_delayed_data_ref *full_ref;
@@ -724,19 +748,21 @@
 
 	trace_add_delayed_data_ref(ref, full_ref, action);
 
-	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
-
+	spin_lock(&head_ref->lock);
+	existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
 	if (existing) {
-		update_existing_ref(trans, delayed_refs, existing, ref);
+		update_existing_ref(trans, delayed_refs, head_ref, existing,
+				    ref);
 		/*
 		 * we've updated the existing ref, free the newly
 		 * allocated ref
 		 */
 		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
 	} else {
-		delayed_refs->num_entries++;
+		atomic_inc(&delayed_refs->num_entries);
 		trans->delayed_ref_updates++;
 	}
+	spin_unlock(&head_ref->lock);
 }
 
 /*
@@ -775,10 +801,10 @@
 	 * insert both the head node and the new ref without dropping
 	 * the spin lock
 	 */
-	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
-				   num_bytes, action, 0);
+	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
+					bytenr, num_bytes, action, 0);
 
-	add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
+	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
 				   num_bytes, parent, ref_root, level, action,
 				   for_cow);
 	spin_unlock(&delayed_refs->lock);
@@ -823,10 +849,10 @@
 	 * insert both the head node and the new ref without dropping
 	 * the spin lock
 	 */
-	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
-				   num_bytes, action, 1);
+	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
+					bytenr, num_bytes, action, 1);
 
-	add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
+	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
 				   num_bytes, parent, ref_root, owner, offset,
 				   action, for_cow);
 	spin_unlock(&delayed_refs->lock);
@@ -869,14 +895,10 @@
 struct btrfs_delayed_ref_head *
 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
 {
-	struct btrfs_delayed_ref_node *ref;
 	struct btrfs_delayed_ref_root *delayed_refs;
 
 	delayed_refs = &trans->transaction->delayed_refs;
-	ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
-	if (ref)
-		return btrfs_delayed_node_to_head(ref);
-	return NULL;
+	return find_ref_head(&delayed_refs->href_root, bytenr, NULL, 0);
 }
 
 void btrfs_delayed_ref_exit(void)
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 70b962c..4ba9b93 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -81,7 +81,10 @@
 	 */
 	struct mutex mutex;
 
-	struct list_head cluster;
+	spinlock_t lock;
+	struct rb_root ref_root;
+
+	struct rb_node href_node;
 
 	struct btrfs_delayed_extent_op *extent_op;
 	/*
@@ -98,6 +101,7 @@
 	 */
 	unsigned int must_insert_reserved:1;
 	unsigned int is_data:1;
+	unsigned int processing:1;
 };
 
 struct btrfs_delayed_tree_ref {
@@ -116,7 +120,8 @@
 };
 
 struct btrfs_delayed_ref_root {
-	struct rb_root root;
+	/* head ref rbtree */
+	struct rb_root href_root;
 
 	/* this spin lock protects the rbtree and the entries inside */
 	spinlock_t lock;
@@ -124,7 +129,7 @@
 	/* how many delayed ref updates we've queued, used by the
 	 * throttling code
 	 */
-	unsigned long num_entries;
+	atomic_t num_entries;
 
 	/* total number of head nodes in tree */
 	unsigned long num_heads;
@@ -133,15 +138,6 @@
 	unsigned long num_heads_ready;
 
 	/*
-	 * bumped when someone is making progress on the delayed
-	 * refs, so that other procs know they are just adding to
-	 * contention intead of helping
-	 */
-	atomic_t procs_running_refs;
-	atomic_t ref_seq;
-	wait_queue_head_t wait;
-
-	/*
 	 * set when the tree is flushing before a transaction commit,
 	 * used by the throttling code to decide if new updates need
 	 * to be run right away
@@ -226,9 +222,9 @@
 	mutex_unlock(&head->mutex);
 }
 
-int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
-			   struct list_head *cluster, u64 search_start);
-void btrfs_release_ref_cluster(struct list_head *cluster);
+
+struct btrfs_delayed_ref_head *
+btrfs_select_ref_head(struct btrfs_trans_handle *trans);
 
 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
 			    struct btrfs_delayed_ref_root *delayed_refs,
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 2cfc3df..564c926 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -102,7 +102,8 @@
 	ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item);
 
 	if (item_size != sizeof(struct btrfs_dev_replace_item)) {
-		pr_warn("btrfs: dev_replace entry found has unexpected size, ignore entry\n");
+		btrfs_warn(fs_info,
+			"dev_replace entry found has unexpected size, ignore entry");
 		goto no_valid_dev_replace_entry_found;
 	}
 
@@ -145,13 +146,19 @@
 		if (!dev_replace->srcdev &&
 		    !btrfs_test_opt(dev_root, DEGRADED)) {
 			ret = -EIO;
-			pr_warn("btrfs: cannot mount because device replace operation is ongoing and\n" "srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?\n",
-				src_devid);
+			btrfs_warn(fs_info,
+			   "cannot mount because device replace operation is ongoing and");
+			btrfs_warn(fs_info,
+			   "srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?",
+			   src_devid);
 		}
 		if (!dev_replace->tgtdev &&
 		    !btrfs_test_opt(dev_root, DEGRADED)) {
 			ret = -EIO;
-			pr_warn("btrfs: cannot mount because device replace operation is ongoing and\n" "tgtdev (devid %llu) is missing, need to run btrfs dev scan?\n",
+			btrfs_warn(fs_info,
+			   "cannot mount because device replace operation is ongoing and");
+			btrfs_warn(fs_info,
+			   "tgtdev (devid %llu) is missing, need to run 'btrfs dev scan'?",
 				BTRFS_DEV_REPLACE_DEVID);
 		}
 		if (dev_replace->tgtdev) {
@@ -210,7 +217,7 @@
 	}
 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
 	if (ret < 0) {
-		pr_warn("btrfs: error %d while searching for dev_replace item!\n",
+		btrfs_warn(fs_info, "error %d while searching for dev_replace item!",
 			ret);
 		goto out;
 	}
@@ -230,7 +237,7 @@
 		 */
 		ret = btrfs_del_item(trans, dev_root, path);
 		if (ret != 0) {
-			pr_warn("btrfs: delete too small dev_replace item failed %d!\n",
+			btrfs_warn(fs_info, "delete too small dev_replace item failed %d!",
 				ret);
 			goto out;
 		}
@@ -243,7 +250,7 @@
 		ret = btrfs_insert_empty_item(trans, dev_root, path,
 					      &key, sizeof(*ptr));
 		if (ret < 0) {
-			pr_warn("btrfs: insert dev_replace item failed %d!\n",
+			btrfs_warn(fs_info, "insert dev_replace item failed %d!",
 				ret);
 			goto out;
 		}
@@ -305,7 +312,7 @@
 	struct btrfs_device *src_device = NULL;
 
 	if (btrfs_fs_incompat(fs_info, RAID56)) {
-		pr_warn("btrfs: dev_replace cannot yet handle RAID5/RAID6\n");
+		btrfs_warn(fs_info, "dev_replace cannot yet handle RAID5/RAID6");
 		return -EINVAL;
 	}
 
@@ -325,7 +332,7 @@
 	ret = btrfs_init_dev_replace_tgtdev(root, args->start.tgtdev_name,
 					    &tgt_device);
 	if (ret) {
-		pr_err("btrfs: target device %s is invalid!\n",
+		btrfs_err(fs_info, "target device %s is invalid!",
 		       args->start.tgtdev_name);
 		mutex_unlock(&fs_info->volume_mutex);
 		return -EINVAL;
@@ -341,7 +348,7 @@
 	}
 
 	if (tgt_device->total_bytes < src_device->total_bytes) {
-		pr_err("btrfs: target device is smaller than source device!\n");
+		btrfs_err(fs_info, "target device is smaller than source device!");
 		ret = -EINVAL;
 		goto leave_no_lock;
 	}
@@ -366,7 +373,7 @@
 	dev_replace->tgtdev = tgt_device;
 
 	printk_in_rcu(KERN_INFO
-		      "btrfs: dev_replace from %s (devid %llu) to %s started\n",
+		      "BTRFS: dev_replace from %s (devid %llu) to %s started\n",
 		      src_device->missing ? "<missing disk>" :
 		        rcu_str_deref(src_device->name),
 		      src_device->devid,
@@ -489,7 +496,7 @@
 
 	if (scrub_ret) {
 		printk_in_rcu(KERN_ERR
-			      "btrfs: btrfs_scrub_dev(%s, %llu, %s) failed %d\n",
+			      "BTRFS: btrfs_scrub_dev(%s, %llu, %s) failed %d\n",
 			      src_device->missing ? "<missing disk>" :
 			        rcu_str_deref(src_device->name),
 			      src_device->devid,
@@ -504,7 +511,7 @@
 	}
 
 	printk_in_rcu(KERN_INFO
-		      "btrfs: dev_replace from %s (devid %llu) to %s) finished\n",
+		      "BTRFS: dev_replace from %s (devid %llu) to %s) finished\n",
 		      src_device->missing ? "<missing disk>" :
 		        rcu_str_deref(src_device->name),
 		      src_device->devid,
@@ -699,7 +706,7 @@
 			BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
 		dev_replace->time_stopped = get_seconds();
 		dev_replace->item_needs_writeback = 1;
-		pr_info("btrfs: suspending dev_replace for unmount\n");
+		btrfs_info(fs_info, "suspending dev_replace for unmount");
 		break;
 	}
 
@@ -728,8 +735,9 @@
 		break;
 	}
 	if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) {
-		pr_info("btrfs: cannot continue dev_replace, tgtdev is missing\n"
-			"btrfs: you may cancel the operation after 'mount -o degraded'\n");
+		btrfs_info(fs_info, "cannot continue dev_replace, tgtdev is missing");
+		btrfs_info(fs_info,
+			"you may cancel the operation after 'mount -o degraded'");
 		btrfs_dev_replace_unlock(dev_replace);
 		return 0;
 	}
@@ -755,14 +763,14 @@
 		kfree(status_args);
 		do_div(progress, 10);
 		printk_in_rcu(KERN_INFO
-			      "btrfs: continuing dev_replace from %s (devid %llu) to %s @%u%%\n",
-			      dev_replace->srcdev->missing ? "<missing disk>" :
-				rcu_str_deref(dev_replace->srcdev->name),
-			      dev_replace->srcdev->devid,
-			      dev_replace->tgtdev ?
-				rcu_str_deref(dev_replace->tgtdev->name) :
-				"<missing target disk>",
-			      (unsigned int)progress);
+			"BTRFS: continuing dev_replace from %s (devid %llu) to %s @%u%%\n",
+			dev_replace->srcdev->missing ? "<missing disk>" :
+			rcu_str_deref(dev_replace->srcdev->name),
+			dev_replace->srcdev->devid,
+			dev_replace->tgtdev ?
+			rcu_str_deref(dev_replace->tgtdev->name) :
+			"<missing target disk>",
+			(unsigned int)progress);
 	}
 	btrfs_dev_replace_continue_on_mount(fs_info);
 	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index c031ea3..a0691df 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -261,7 +261,7 @@
 	 * see if there is room in the item to insert this
 	 * name
 	 */
-	data_size = sizeof(*di) + name_len + sizeof(struct btrfs_item);
+	data_size = sizeof(*di) + name_len;
 	leaf = path->nodes[0];
 	slot = path->slots[0];
 	if (data_size + btrfs_item_size_nr(leaf, slot) +
@@ -459,7 +459,7 @@
 	u8 type = btrfs_dir_type(leaf, dir_item);
 
 	if (type >= BTRFS_FT_MAX) {
-		printk(KERN_CRIT "btrfs: invalid dir item type: %d\n",
+		btrfs_crit(root->fs_info, "invalid dir item type: %d",
 		       (int)type);
 		return 1;
 	}
@@ -468,7 +468,7 @@
 		namelen = XATTR_NAME_MAX;
 
 	if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
-		printk(KERN_CRIT "btrfs: invalid dir item name len: %u\n",
+		btrfs_crit(root->fs_info, "invalid dir item name len: %u",
 		       (unsigned)btrfs_dir_data_len(leaf, dir_item));
 		return 1;
 	}
@@ -476,7 +476,7 @@
 	/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
 	if ((btrfs_dir_data_len(leaf, dir_item) +
 	     btrfs_dir_name_len(leaf, dir_item)) > BTRFS_MAX_XATTR_SIZE(root)) {
-		printk(KERN_CRIT "btrfs: invalid dir item name + data len: %u + %u\n",
+		btrfs_crit(root->fs_info, "invalid dir item name + data len: %u + %u",
 		       (unsigned)btrfs_dir_name_len(leaf, dir_item),
 		       (unsigned)btrfs_dir_data_len(leaf, dir_item));
 		return 1;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8072cfa..81ea553 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -26,7 +26,6 @@
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
-#include <linux/crc32c.h>
 #include <linux/slab.h>
 #include <linux/migrate.h>
 #include <linux/ratelimit.h>
@@ -35,6 +34,7 @@
 #include <asm/unaligned.h>
 #include "ctree.h"
 #include "disk-io.h"
+#include "hash.h"
 #include "transaction.h"
 #include "btrfs_inode.h"
 #include "volumes.h"
@@ -48,6 +48,7 @@
 #include "rcu-string.h"
 #include "dev-replace.h"
 #include "raid56.h"
+#include "sysfs.h"
 
 #ifdef CONFIG_X86
 #include <asm/cpufeature.h>
@@ -243,7 +244,7 @@
 
 u32 btrfs_csum_data(char *data, u32 seed, size_t len)
 {
-	return crc32c(seed, data, len);
+	return btrfs_crc32c(seed, data, len);
 }
 
 void btrfs_csum_final(u32 crc, char *result)
@@ -299,11 +300,11 @@
 			memcpy(&found, result, csum_size);
 
 			read_extent_buffer(buf, &val, 0, csum_size);
-			printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
-				       "failed on %llu wanted %X found %X "
-				       "level %d\n",
-				       root->fs_info->sb->s_id, buf->start,
-				       val, found, btrfs_header_level(buf));
+			printk_ratelimited(KERN_INFO
+				"BTRFS: %s checksum verify failed on %llu wanted %X found %X "
+				"level %d\n",
+				root->fs_info->sb->s_id, buf->start,
+				val, found, btrfs_header_level(buf));
 			if (result != (char *)&inline_result)
 				kfree(result);
 			return 1;
@@ -382,13 +383,14 @@
 			ret = 1;
 
 		if (ret && btrfs_super_generation(disk_sb) < 10) {
-			printk(KERN_WARNING "btrfs: super block crcs don't match, older mkfs detected\n");
+			printk(KERN_WARNING
+				"BTRFS: super block crcs don't match, older mkfs detected\n");
 			ret = 0;
 		}
 	}
 
 	if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
-		printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n",
+		printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n",
 				csum_type);
 		ret = 1;
 	}
@@ -464,13 +466,10 @@
 
 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
 {
-	struct extent_io_tree *tree;
 	u64 start = page_offset(page);
 	u64 found_start;
 	struct extent_buffer *eb;
 
-	tree = &BTRFS_I(page->mapping->host)->io_tree;
-
 	eb = (struct extent_buffer *)page->private;
 	if (page != eb->pages[0])
 		return 0;
@@ -500,8 +499,8 @@
 }
 
 #define CORRUPT(reason, eb, root, slot)				\
-	printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu,"	\
-	       "root=%llu, slot=%d\n", reason,			\
+	btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu,"	\
+		   "root=%llu, slot=%d", reason,			\
 	       btrfs_header_bytenr(eb),	root->objectid, slot)
 
 static noinline int check_leaf(struct btrfs_root *root,
@@ -569,7 +568,6 @@
 				      u64 phy_offset, struct page *page,
 				      u64 start, u64 end, int mirror)
 {
-	struct extent_io_tree *tree;
 	u64 found_start;
 	int found_level;
 	struct extent_buffer *eb;
@@ -580,7 +578,6 @@
 	if (!page->private)
 		goto out;
 
-	tree = &BTRFS_I(page->mapping->host)->io_tree;
 	eb = (struct extent_buffer *)page->private;
 
 	/* the pending IO might have been the only thing that kept this buffer
@@ -600,21 +597,21 @@
 
 	found_start = btrfs_header_bytenr(eb);
 	if (found_start != eb->start) {
-		printk_ratelimited(KERN_INFO "btrfs bad tree block start "
+		printk_ratelimited(KERN_INFO "BTRFS: bad tree block start "
 			       "%llu %llu\n",
 			       found_start, eb->start);
 		ret = -EIO;
 		goto err;
 	}
 	if (check_tree_block_fsid(root, eb)) {
-		printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
+		printk_ratelimited(KERN_INFO "BTRFS: bad fsid on block %llu\n",
 			       eb->start);
 		ret = -EIO;
 		goto err;
 	}
 	found_level = btrfs_header_level(eb);
 	if (found_level >= BTRFS_MAX_LEVEL) {
-		btrfs_info(root->fs_info, "bad tree block level %d\n",
+		btrfs_info(root->fs_info, "bad tree block level %d",
 			   (int)btrfs_header_level(eb));
 		ret = -EIO;
 		goto err;
@@ -842,20 +839,17 @@
 
 static int btree_csum_one_bio(struct bio *bio)
 {
-	struct bio_vec *bvec = bio->bi_io_vec;
-	int bio_index = 0;
+	struct bio_vec *bvec;
 	struct btrfs_root *root;
-	int ret = 0;
+	int i, ret = 0;
 
-	WARN_ON(bio->bi_vcnt <= 0);
-	while (bio_index < bio->bi_vcnt) {
+	bio_for_each_segment_all(bvec, bio, i) {
 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
 		ret = csum_dirty_buffer(root, bvec->bv_page);
 		if (ret)
 			break;
-		bio_index++;
-		bvec++;
 	}
+
 	return ret;
 }
 
@@ -967,11 +961,9 @@
 static int btree_writepages(struct address_space *mapping,
 			    struct writeback_control *wbc)
 {
-	struct extent_io_tree *tree;
 	struct btrfs_fs_info *fs_info;
 	int ret;
 
-	tree = &BTRFS_I(mapping->host)->io_tree;
 	if (wbc->sync_mode == WB_SYNC_NONE) {
 
 		if (wbc->for_kupdate)
@@ -1010,8 +1002,9 @@
 	extent_invalidatepage(tree, page, offset);
 	btree_releasepage(page, GFP_NOFS);
 	if (PagePrivate(page)) {
-		printk(KERN_WARNING "btrfs warning page private not zero "
-		       "on page %llu\n", (unsigned long long)page_offset(page));
+		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
+			   "page private not zero on page %llu",
+			   (unsigned long long)page_offset(page));
 		ClearPagePrivate(page);
 		set_page_private(page, 0);
 		page_cache_release(page);
@@ -1095,21 +1088,13 @@
 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
 					    u64 bytenr, u32 blocksize)
 {
-	struct inode *btree_inode = root->fs_info->btree_inode;
-	struct extent_buffer *eb;
-	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, bytenr);
-	return eb;
+	return find_extent_buffer(root->fs_info, bytenr);
 }
 
 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
 						 u64 bytenr, u32 blocksize)
 {
-	struct inode *btree_inode = root->fs_info->btree_inode;
-	struct extent_buffer *eb;
-
-	eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
-				 bytenr, blocksize);
-	return eb;
+	return alloc_extent_buffer(root->fs_info, bytenr, blocksize);
 }
 
 
@@ -1273,7 +1258,6 @@
 	struct btrfs_root *root;
 	struct btrfs_key key;
 	int ret = 0;
-	u64 bytenr;
 	uuid_le uuid;
 
 	root = btrfs_alloc_root(fs_info);
@@ -1295,7 +1279,6 @@
 		goto fail;
 	}
 
-	bytenr = leaf->start;
 	memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
 	btrfs_set_header_bytenr(leaf, leaf->start);
 	btrfs_set_header_generation(leaf, trans->transid);
@@ -1616,7 +1599,8 @@
 	if (ret)
 		goto fail;
 
-	ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
+	ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID,
+			location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL);
 	if (ret < 0)
 		goto fail;
 	if (ret == 0)
@@ -1684,18 +1668,16 @@
 {
 	struct bio *bio;
 	struct end_io_wq *end_io_wq;
-	struct btrfs_fs_info *fs_info;
 	int error;
 
 	end_io_wq = container_of(work, struct end_io_wq, work);
 	bio = end_io_wq->bio;
-	fs_info = end_io_wq->info;
 
 	error = end_io_wq->error;
 	bio->bi_private = end_io_wq->private;
 	bio->bi_end_io = end_io_wq->end_io;
 	kfree(end_io_wq);
-	bio_endio(bio, error);
+	bio_endio_nodec(bio, error);
 }
 
 static int cleaner_kthread(void *arg)
@@ -2080,6 +2062,12 @@
 		for (i = 0; i < ret; i++)
 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
 	}
+
+	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
+		btrfs_free_log_root_tree(NULL, fs_info);
+		btrfs_destroy_pinned_extent(fs_info->tree_root,
+					    fs_info->pinned_extents);
+	}
 }
 
 int open_ctree(struct super_block *sb,
@@ -2154,6 +2142,7 @@
 	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
 
 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
+	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
 	INIT_LIST_HEAD(&fs_info->trans_list);
 	INIT_LIST_HEAD(&fs_info->dead_roots);
 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
@@ -2167,6 +2156,7 @@
 	spin_lock_init(&fs_info->free_chunk_lock);
 	spin_lock_init(&fs_info->tree_mod_seq_lock);
 	spin_lock_init(&fs_info->super_lock);
+	spin_lock_init(&fs_info->buffer_lock);
 	rwlock_init(&fs_info->tree_mod_log_lock);
 	mutex_init(&fs_info->reloc_mutex);
 	seqlock_init(&fs_info->profiles_lock);
@@ -2198,7 +2188,7 @@
 	fs_info->free_chunk_space = 0;
 	fs_info->tree_mod_log = RB_ROOT;
 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
-
+	fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64);
 	/* readahead state */
 	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
 	spin_lock_init(&fs_info->reada_lock);
@@ -2337,7 +2327,7 @@
 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
 	 */
 	if (btrfs_check_super_csum(bh->b_data)) {
-		printk(KERN_ERR "btrfs: superblock checksum mismatch\n");
+		printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
 		err = -EINVAL;
 		goto fail_alloc;
 	}
@@ -2356,7 +2346,7 @@
 
 	ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
 	if (ret) {
-		printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
+		printk(KERN_ERR "BTRFS: superblock contains fatal errors\n");
 		err = -EINVAL;
 		goto fail_alloc;
 	}
@@ -2421,7 +2411,7 @@
 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
 
 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
-		printk(KERN_ERR "btrfs: has skinny extents\n");
+		printk(KERN_ERR "BTRFS: has skinny extents\n");
 
 	/*
 	 * flag our filesystem as having big metadata blocks if
@@ -2429,7 +2419,7 @@
 	 */
 	if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
 		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
-			printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
+			printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
 		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
 	}
 
@@ -2446,7 +2436,7 @@
 	 */
 	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
 	    (sectorsize != leafsize)) {
-		printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
+		printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes "
 				"are not allowed for mixed block groups on %s\n",
 				sb->s_id);
 		goto fail_alloc;
@@ -2583,12 +2573,12 @@
 	sb->s_blocksize_bits = blksize_bits(sectorsize);
 
 	if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
-		printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
+		printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id);
 		goto fail_sb_buffer;
 	}
 
 	if (sectorsize != PAGE_SIZE) {
-		printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
+		printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) "
 		       "found on %s\n", (unsigned long)sectorsize, sb->s_id);
 		goto fail_sb_buffer;
 	}
@@ -2597,7 +2587,7 @@
 	ret = btrfs_read_sys_array(tree_root);
 	mutex_unlock(&fs_info->chunk_mutex);
 	if (ret) {
-		printk(KERN_WARNING "btrfs: failed to read the system "
+		printk(KERN_WARNING "BTRFS: failed to read the system "
 		       "array on %s\n", sb->s_id);
 		goto fail_sb_buffer;
 	}
@@ -2614,7 +2604,7 @@
 					   blocksize, generation);
 	if (!chunk_root->node ||
 	    !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
-		printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
+		printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n",
 		       sb->s_id);
 		goto fail_tree_roots;
 	}
@@ -2626,7 +2616,7 @@
 
 	ret = btrfs_read_chunk_tree(chunk_root);
 	if (ret) {
-		printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
+		printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n",
 		       sb->s_id);
 		goto fail_tree_roots;
 	}
@@ -2638,7 +2628,7 @@
 	btrfs_close_extra_devices(fs_info, fs_devices, 0);
 
 	if (!fs_devices->latest_bdev) {
-		printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
+		printk(KERN_CRIT "BTRFS: failed to read devices on %s\n",
 		       sb->s_id);
 		goto fail_tree_roots;
 	}
@@ -2653,7 +2643,7 @@
 					  blocksize, generation);
 	if (!tree_root->node ||
 	    !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
-		printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
+		printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
 		       sb->s_id);
 
 		goto recovery_tree_root;
@@ -2724,50 +2714,56 @@
 
 	ret = btrfs_recover_balance(fs_info);
 	if (ret) {
-		printk(KERN_WARNING "btrfs: failed to recover balance\n");
+		printk(KERN_WARNING "BTRFS: failed to recover balance\n");
 		goto fail_block_groups;
 	}
 
 	ret = btrfs_init_dev_stats(fs_info);
 	if (ret) {
-		printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
+		printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n",
 		       ret);
 		goto fail_block_groups;
 	}
 
 	ret = btrfs_init_dev_replace(fs_info);
 	if (ret) {
-		pr_err("btrfs: failed to init dev_replace: %d\n", ret);
+		pr_err("BTRFS: failed to init dev_replace: %d\n", ret);
 		goto fail_block_groups;
 	}
 
 	btrfs_close_extra_devices(fs_info, fs_devices, 1);
 
+	ret = btrfs_sysfs_add_one(fs_info);
+	if (ret) {
+		pr_err("BTRFS: failed to init sysfs interface: %d\n", ret);
+		goto fail_block_groups;
+	}
+
 	ret = btrfs_init_space_info(fs_info);
 	if (ret) {
-		printk(KERN_ERR "Failed to initial space info: %d\n", ret);
-		goto fail_block_groups;
+		printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret);
+		goto fail_sysfs;
 	}
 
 	ret = btrfs_read_block_groups(extent_root);
 	if (ret) {
-		printk(KERN_ERR "Failed to read block groups: %d\n", ret);
-		goto fail_block_groups;
+		printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret);
+		goto fail_sysfs;
 	}
 	fs_info->num_tolerated_disk_barrier_failures =
 		btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
 	if (fs_info->fs_devices->missing_devices >
 	     fs_info->num_tolerated_disk_barrier_failures &&
 	    !(sb->s_flags & MS_RDONLY)) {
-		printk(KERN_WARNING
-		       "Btrfs: too many missing devices, writeable mount is not allowed\n");
-		goto fail_block_groups;
+		printk(KERN_WARNING "BTRFS: "
+			"too many missing devices, writeable mount is not allowed\n");
+		goto fail_sysfs;
 	}
 
 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
 					       "btrfs-cleaner");
 	if (IS_ERR(fs_info->cleaner_kthread))
-		goto fail_block_groups;
+		goto fail_sysfs;
 
 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
 						   tree_root,
@@ -2778,11 +2774,15 @@
 	if (!btrfs_test_opt(tree_root, SSD) &&
 	    !btrfs_test_opt(tree_root, NOSSD) &&
 	    !fs_info->fs_devices->rotating) {
-		printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
+		printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD "
 		       "mode\n");
 		btrfs_set_opt(fs_info->mount_opt, SSD);
 	}
 
+	/* Set the real inode map cache flag */
+	if (btrfs_test_opt(tree_root, CHANGE_INODE_CACHE))
+		btrfs_set_opt(tree_root->fs_info->mount_opt, INODE_MAP_CACHE);
+
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 	if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
 		ret = btrfsic_mount(tree_root, fs_devices,
@@ -2791,7 +2791,7 @@
 				    1 : 0,
 				    fs_info->check_integrity_print_mask);
 		if (ret)
-			printk(KERN_WARNING "btrfs: failed to initialize"
+			printk(KERN_WARNING "BTRFS: failed to initialize"
 			       " integrity check module %s\n", sb->s_id);
 	}
 #endif
@@ -2804,7 +2804,7 @@
 		u64 bytenr = btrfs_super_log_root(disk_super);
 
 		if (fs_devices->rw_devices == 0) {
-			printk(KERN_WARNING "Btrfs log replay required "
+			printk(KERN_WARNING "BTRFS: log replay required "
 			       "on RO media\n");
 			err = -EIO;
 			goto fail_qgroup;
@@ -2827,7 +2827,7 @@
 						      generation + 1);
 		if (!log_tree_root->node ||
 		    !extent_buffer_uptodate(log_tree_root->node)) {
-			printk(KERN_ERR "btrfs: failed to read log tree\n");
+			printk(KERN_ERR "BTRFS: failed to read log tree\n");
 			free_extent_buffer(log_tree_root->node);
 			kfree(log_tree_root);
 			goto fail_trans_kthread;
@@ -2861,7 +2861,7 @@
 		ret = btrfs_recover_relocation(tree_root);
 		if (ret < 0) {
 			printk(KERN_WARNING
-			       "btrfs: failed to recover relocation\n");
+			       "BTRFS: failed to recover relocation\n");
 			err = -EINVAL;
 			goto fail_qgroup;
 		}
@@ -2891,14 +2891,14 @@
 
 	ret = btrfs_resume_balance_async(fs_info);
 	if (ret) {
-		printk(KERN_WARNING "btrfs: failed to resume balance\n");
+		printk(KERN_WARNING "BTRFS: failed to resume balance\n");
 		close_ctree(tree_root);
 		return ret;
 	}
 
 	ret = btrfs_resume_dev_replace_async(fs_info);
 	if (ret) {
-		pr_warn("btrfs: failed to resume dev_replace\n");
+		pr_warn("BTRFS: failed to resume dev_replace\n");
 		close_ctree(tree_root);
 		return ret;
 	}
@@ -2906,20 +2906,20 @@
 	btrfs_qgroup_rescan_resume(fs_info);
 
 	if (create_uuid_tree) {
-		pr_info("btrfs: creating UUID tree\n");
+		pr_info("BTRFS: creating UUID tree\n");
 		ret = btrfs_create_uuid_tree(fs_info);
 		if (ret) {
-			pr_warn("btrfs: failed to create the UUID tree %d\n",
+			pr_warn("BTRFS: failed to create the UUID tree %d\n",
 				ret);
 			close_ctree(tree_root);
 			return ret;
 		}
 	} else if (check_uuid_tree ||
 		   btrfs_test_opt(tree_root, RESCAN_UUID_TREE)) {
-		pr_info("btrfs: checking UUID tree\n");
+		pr_info("BTRFS: checking UUID tree\n");
 		ret = btrfs_check_uuid_tree(fs_info);
 		if (ret) {
-			pr_warn("btrfs: failed to check the UUID tree %d\n",
+			pr_warn("BTRFS: failed to check the UUID tree %d\n",
 				ret);
 			close_ctree(tree_root);
 			return ret;
@@ -2945,6 +2945,9 @@
 	 */
 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
 
+fail_sysfs:
+	btrfs_sysfs_remove_one(fs_info);
+
 fail_block_groups:
 	btrfs_put_block_group_cache(fs_info);
 	btrfs_free_block_groups(fs_info);
@@ -3000,7 +3003,7 @@
 		struct btrfs_device *device = (struct btrfs_device *)
 			bh->b_private;
 
-		printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
+		printk_ratelimited_in_rcu(KERN_WARNING "BTRFS: lost page write due to "
 					  "I/O error on %s\n",
 					  rcu_str_deref(device->name));
 		/* note, we dont' set_buffer_write_io_error because we have
@@ -3119,7 +3122,7 @@
 			bh = __getblk(device->bdev, bytenr / 4096,
 				      BTRFS_SUPER_INFO_SIZE);
 			if (!bh) {
-				printk(KERN_ERR "btrfs: couldn't get super "
+				printk(KERN_ERR "BTRFS: couldn't get super "
 				       "buffer head for bytenr %Lu\n", bytenr);
 				errors++;
 				continue;
@@ -3140,7 +3143,10 @@
 		 * we fua the first super.  The others we allow
 		 * to go down lazy.
 		 */
-		ret = btrfsic_submit_bh(WRITE_FUA, bh);
+		if (i == 0)
+			ret = btrfsic_submit_bh(WRITE_FUA, bh);
+		else
+			ret = btrfsic_submit_bh(WRITE_SYNC, bh);
 		if (ret)
 			errors++;
 	}
@@ -3186,7 +3192,7 @@
 		wait_for_completion(&device->flush_wait);
 
 		if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
-			printk_in_rcu("btrfs: disabling barriers on dev %s\n",
+			printk_in_rcu("BTRFS: disabling barriers on dev %s\n",
 				      rcu_str_deref(device->name));
 			device->nobarriers = 1;
 		} else if (!bio_flagged(bio, BIO_UPTODATE)) {
@@ -3407,7 +3413,7 @@
 			total_errors++;
 	}
 	if (total_errors > max_errors) {
-		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
+		btrfs_err(root->fs_info, "%d errors while writing supers",
 		       total_errors);
 		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
@@ -3455,10 +3461,8 @@
 	if (btrfs_root_refs(&root->root_item) == 0)
 		synchronize_srcu(&fs_info->subvol_srcu);
 
-	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
+	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
 		btrfs_free_log(NULL, root);
-		btrfs_free_log_root_tree(NULL, fs_info);
-	}
 
 	__btrfs_remove_free_space_cache(root->free_ino_pinned);
 	__btrfs_remove_free_space_cache(root->free_ino_ctl);
@@ -3563,14 +3567,12 @@
 	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
 		ret = btrfs_commit_super(root);
 		if (ret)
-			printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
+			btrfs_err(root->fs_info, "commit super ret %d", ret);
 	}
 
 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
 		btrfs_error_commit_super(root);
 
-	btrfs_put_block_group_cache(fs_info);
-
 	kthread_stop(fs_info->transaction_kthread);
 	kthread_stop(fs_info->cleaner_kthread);
 
@@ -3580,12 +3582,16 @@
 	btrfs_free_qgroup_config(root->fs_info);
 
 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
-		printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
+		btrfs_info(root->fs_info, "at unmount delalloc count %lld",
 		       percpu_counter_sum(&fs_info->delalloc_bytes));
 	}
 
+	btrfs_sysfs_remove_one(fs_info);
+
 	del_fs_roots(fs_info);
 
+	btrfs_put_block_group_cache(fs_info);
+
 	btrfs_free_block_groups(fs_info);
 
 	btrfs_stop_all_workers(fs_info);
@@ -3803,55 +3809,54 @@
 	delayed_refs = &trans->delayed_refs;
 
 	spin_lock(&delayed_refs->lock);
-	if (delayed_refs->num_entries == 0) {
+	if (atomic_read(&delayed_refs->num_entries) == 0) {
 		spin_unlock(&delayed_refs->lock);
-		printk(KERN_INFO "delayed_refs has NO entry\n");
+		btrfs_info(root->fs_info, "delayed_refs has NO entry");
 		return ret;
 	}
 
-	while ((node = rb_first(&delayed_refs->root)) != NULL) {
-		struct btrfs_delayed_ref_head *head = NULL;
+	while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
+		struct btrfs_delayed_ref_head *head;
 		bool pin_bytes = false;
 
-		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-		atomic_set(&ref->refs, 1);
-		if (btrfs_delayed_ref_is_head(ref)) {
+		head = rb_entry(node, struct btrfs_delayed_ref_head,
+				href_node);
+		if (!mutex_trylock(&head->mutex)) {
+			atomic_inc(&head->node.refs);
+			spin_unlock(&delayed_refs->lock);
 
-			head = btrfs_delayed_node_to_head(ref);
-			if (!mutex_trylock(&head->mutex)) {
-				atomic_inc(&ref->refs);
-				spin_unlock(&delayed_refs->lock);
-
-				/* Need to wait for the delayed ref to run */
-				mutex_lock(&head->mutex);
-				mutex_unlock(&head->mutex);
-				btrfs_put_delayed_ref(ref);
-
-				spin_lock(&delayed_refs->lock);
-				continue;
-			}
-
-			if (head->must_insert_reserved)
-				pin_bytes = true;
-			btrfs_free_delayed_extent_op(head->extent_op);
-			delayed_refs->num_heads--;
-			if (list_empty(&head->cluster))
-				delayed_refs->num_heads_ready--;
-			list_del_init(&head->cluster);
-		}
-
-		ref->in_tree = 0;
-		rb_erase(&ref->rb_node, &delayed_refs->root);
-		delayed_refs->num_entries--;
-		spin_unlock(&delayed_refs->lock);
-		if (head) {
-			if (pin_bytes)
-				btrfs_pin_extent(root, ref->bytenr,
-						 ref->num_bytes, 1);
+			mutex_lock(&head->mutex);
 			mutex_unlock(&head->mutex);
+			btrfs_put_delayed_ref(&head->node);
+			spin_lock(&delayed_refs->lock);
+			continue;
 		}
-		btrfs_put_delayed_ref(ref);
+		spin_lock(&head->lock);
+		while ((node = rb_first(&head->ref_root)) != NULL) {
+			ref = rb_entry(node, struct btrfs_delayed_ref_node,
+				       rb_node);
+			ref->in_tree = 0;
+			rb_erase(&ref->rb_node, &head->ref_root);
+			atomic_dec(&delayed_refs->num_entries);
+			btrfs_put_delayed_ref(ref);
+		}
+		if (head->must_insert_reserved)
+			pin_bytes = true;
+		btrfs_free_delayed_extent_op(head->extent_op);
+		delayed_refs->num_heads--;
+		if (head->processing == 0)
+			delayed_refs->num_heads_ready--;
+		atomic_dec(&delayed_refs->num_entries);
+		head->node.in_tree = 0;
+		rb_erase(&head->href_node, &delayed_refs->href_root);
+		spin_unlock(&head->lock);
+		spin_unlock(&delayed_refs->lock);
+		mutex_unlock(&head->mutex);
 
+		if (pin_bytes)
+			btrfs_pin_extent(root, head->node.bytenr,
+					 head->node.num_bytes, 1);
+		btrfs_put_delayed_ref(&head->node);
 		cond_resched();
 		spin_lock(&delayed_refs->lock);
 	}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9c01509..32312e0 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -35,6 +35,7 @@
 #include "locking.h"
 #include "free-space-cache.h"
 #include "math.h"
+#include "sysfs.h"
 
 #undef SCRAMBLE_DELAYED_REFS
 
@@ -441,7 +442,8 @@
 			if (ret)
 				break;
 
-			if (need_resched()) {
+			if (need_resched() ||
+			    rwsem_is_contended(&fs_info->extent_commit_sem)) {
 				caching_ctl->progress = last;
 				btrfs_release_path(path);
 				up_read(&fs_info->extent_commit_sem);
@@ -855,12 +857,14 @@
 			btrfs_put_delayed_ref(&head->node);
 			goto search_again;
 		}
+		spin_lock(&head->lock);
 		if (head->extent_op && head->extent_op->update_flags)
 			extent_flags |= head->extent_op->flags_to_set;
 		else
 			BUG_ON(num_refs == 0);
 
 		num_refs += head->node.ref_mod;
+		spin_unlock(&head->lock);
 		mutex_unlock(&head->mutex);
 	}
 	spin_unlock(&delayed_refs->lock);
@@ -1070,11 +1074,11 @@
 	__le64 lenum;
 
 	lenum = cpu_to_le64(root_objectid);
-	high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
+	high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
 	lenum = cpu_to_le64(owner);
-	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
+	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
 	lenum = cpu_to_le64(offset);
-	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
+	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
 
 	return ((u64)high_crc << 31) ^ (u64)low_crc;
 }
@@ -2285,64 +2289,62 @@
 select_delayed_ref(struct btrfs_delayed_ref_head *head)
 {
 	struct rb_node *node;
-	struct btrfs_delayed_ref_node *ref;
-	int action = BTRFS_ADD_DELAYED_REF;
-again:
+	struct btrfs_delayed_ref_node *ref, *last = NULL;;
+
 	/*
 	 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
 	 * this prevents ref count from going down to zero when
 	 * there still are pending delayed ref.
 	 */
-	node = rb_prev(&head->node.rb_node);
-	while (1) {
-		if (!node)
-			break;
+	node = rb_first(&head->ref_root);
+	while (node) {
 		ref = rb_entry(node, struct btrfs_delayed_ref_node,
 				rb_node);
-		if (ref->bytenr != head->node.bytenr)
-			break;
-		if (ref->action == action)
+		if (ref->action == BTRFS_ADD_DELAYED_REF)
 			return ref;
-		node = rb_prev(node);
+		else if (last == NULL)
+			last = ref;
+		node = rb_next(node);
 	}
-	if (action == BTRFS_ADD_DELAYED_REF) {
-		action = BTRFS_DROP_DELAYED_REF;
-		goto again;
-	}
-	return NULL;
+	return last;
 }
 
 /*
  * Returns 0 on success or if called with an already aborted transaction.
  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
  */
-static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
-				       struct btrfs_root *root,
-				       struct list_head *cluster)
+static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
+					     struct btrfs_root *root,
+					     unsigned long nr)
 {
 	struct btrfs_delayed_ref_root *delayed_refs;
 	struct btrfs_delayed_ref_node *ref;
 	struct btrfs_delayed_ref_head *locked_ref = NULL;
 	struct btrfs_delayed_extent_op *extent_op;
 	struct btrfs_fs_info *fs_info = root->fs_info;
+	ktime_t start = ktime_get();
 	int ret;
-	int count = 0;
+	unsigned long count = 0;
+	unsigned long actual_count = 0;
 	int must_insert_reserved = 0;
 
 	delayed_refs = &trans->transaction->delayed_refs;
 	while (1) {
 		if (!locked_ref) {
-			/* pick a new head ref from the cluster list */
-			if (list_empty(cluster))
+			if (count >= nr)
 				break;
 
-			locked_ref = list_entry(cluster->next,
-				     struct btrfs_delayed_ref_head, cluster);
+			spin_lock(&delayed_refs->lock);
+			locked_ref = btrfs_select_ref_head(trans);
+			if (!locked_ref) {
+				spin_unlock(&delayed_refs->lock);
+				break;
+			}
 
 			/* grab the lock that says we are going to process
 			 * all the refs for this head */
 			ret = btrfs_delayed_ref_lock(trans, locked_ref);
-
+			spin_unlock(&delayed_refs->lock);
 			/*
 			 * we may have dropped the spin lock to get the head
 			 * mutex lock, and that might have given someone else
@@ -2363,6 +2365,7 @@
 		 * finish.  If we merged anything we need to re-loop so we can
 		 * get a good ref.
 		 */
+		spin_lock(&locked_ref->lock);
 		btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
 					 locked_ref);
 
@@ -2374,17 +2377,15 @@
 
 		if (ref && ref->seq &&
 		    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
-			/*
-			 * there are still refs with lower seq numbers in the
-			 * process of being added. Don't run this ref yet.
-			 */
-			list_del_init(&locked_ref->cluster);
+			spin_unlock(&locked_ref->lock);
 			btrfs_delayed_ref_unlock(locked_ref);
-			locked_ref = NULL;
+			spin_lock(&delayed_refs->lock);
+			locked_ref->processing = 0;
 			delayed_refs->num_heads_ready++;
 			spin_unlock(&delayed_refs->lock);
+			locked_ref = NULL;
 			cond_resched();
-			spin_lock(&delayed_refs->lock);
+			count++;
 			continue;
 		}
 
@@ -2399,6 +2400,8 @@
 		locked_ref->extent_op = NULL;
 
 		if (!ref) {
+
+
 			/* All delayed refs have been processed, Go ahead
 			 * and send the head node to run_one_delayed_ref,
 			 * so that any accounting fixes can happen
@@ -2411,8 +2414,7 @@
 			}
 
 			if (extent_op) {
-				spin_unlock(&delayed_refs->lock);
-
+				spin_unlock(&locked_ref->lock);
 				ret = run_delayed_extent_op(trans, root,
 							    ref, extent_op);
 				btrfs_free_delayed_extent_op(extent_op);
@@ -2426,19 +2428,39 @@
 					 */
 					if (must_insert_reserved)
 						locked_ref->must_insert_reserved = 1;
+					locked_ref->processing = 0;
 					btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
-					spin_lock(&delayed_refs->lock);
 					btrfs_delayed_ref_unlock(locked_ref);
 					return ret;
 				}
-
-				goto next;
+				continue;
 			}
-		}
 
-		ref->in_tree = 0;
-		rb_erase(&ref->rb_node, &delayed_refs->root);
-		delayed_refs->num_entries--;
+			/*
+			 * Need to drop our head ref lock and re-aqcuire the
+			 * delayed ref lock and then re-check to make sure
+			 * nobody got added.
+			 */
+			spin_unlock(&locked_ref->lock);
+			spin_lock(&delayed_refs->lock);
+			spin_lock(&locked_ref->lock);
+			if (rb_first(&locked_ref->ref_root)) {
+				spin_unlock(&locked_ref->lock);
+				spin_unlock(&delayed_refs->lock);
+				continue;
+			}
+			ref->in_tree = 0;
+			delayed_refs->num_heads--;
+			rb_erase(&locked_ref->href_node,
+				 &delayed_refs->href_root);
+			spin_unlock(&delayed_refs->lock);
+		} else {
+			actual_count++;
+			ref->in_tree = 0;
+			rb_erase(&ref->rb_node, &locked_ref->ref_root);
+		}
+		atomic_dec(&delayed_refs->num_entries);
+
 		if (!btrfs_delayed_ref_is_head(ref)) {
 			/*
 			 * when we play the delayed ref, also correct the
@@ -2455,20 +2477,18 @@
 			default:
 				WARN_ON(1);
 			}
-		} else {
-			list_del_init(&locked_ref->cluster);
 		}
-		spin_unlock(&delayed_refs->lock);
+		spin_unlock(&locked_ref->lock);
 
 		ret = run_one_delayed_ref(trans, root, ref, extent_op,
 					  must_insert_reserved);
 
 		btrfs_free_delayed_extent_op(extent_op);
 		if (ret) {
+			locked_ref->processing = 0;
 			btrfs_delayed_ref_unlock(locked_ref);
 			btrfs_put_delayed_ref(ref);
 			btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
-			spin_lock(&delayed_refs->lock);
 			return ret;
 		}
 
@@ -2484,11 +2504,29 @@
 		}
 		btrfs_put_delayed_ref(ref);
 		count++;
-next:
 		cond_resched();
-		spin_lock(&delayed_refs->lock);
 	}
-	return count;
+
+	/*
+	 * We don't want to include ref heads since we can have empty ref heads
+	 * and those will drastically skew our runtime down since we just do
+	 * accounting, no actual extent tree updates.
+	 */
+	if (actual_count > 0) {
+		u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
+		u64 avg;
+
+		/*
+		 * We weigh the current average higher than our current runtime
+		 * to avoid large swings in the average.
+		 */
+		spin_lock(&delayed_refs->lock);
+		avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
+		avg = div64_u64(avg, 4);
+		fs_info->avg_delayed_ref_runtime = avg;
+		spin_unlock(&delayed_refs->lock);
+	}
+	return 0;
 }
 
 #ifdef SCRAMBLE_DELAYED_REFS
@@ -2570,16 +2608,6 @@
 	return ret;
 }
 
-static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
-		      int count)
-{
-	int val = atomic_read(&delayed_refs->ref_seq);
-
-	if (val < seq || val >= seq + count)
-		return 1;
-	return 0;
-}
-
 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
 {
 	u64 num_bytes;
@@ -2596,7 +2624,7 @@
 	return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
 }
 
-int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
+int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
 				       struct btrfs_root *root)
 {
 	struct btrfs_block_rsv *global_rsv;
@@ -2625,6 +2653,22 @@
 	return ret;
 }
 
+int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
+				       struct btrfs_root *root)
+{
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	u64 num_entries =
+		atomic_read(&trans->transaction->delayed_refs.num_entries);
+	u64 avg_runtime;
+
+	smp_mb();
+	avg_runtime = fs_info->avg_delayed_ref_runtime;
+	if (num_entries * avg_runtime >= NSEC_PER_SEC)
+		return 1;
+
+	return btrfs_check_space_for_delayed_refs(trans, root);
+}
+
 /*
  * this starts processing the delayed reference count updates and
  * extent insertions we have queued up so far.  count can be
@@ -2640,13 +2684,10 @@
 {
 	struct rb_node *node;
 	struct btrfs_delayed_ref_root *delayed_refs;
-	struct btrfs_delayed_ref_node *ref;
-	struct list_head cluster;
+	struct btrfs_delayed_ref_head *head;
 	int ret;
-	u64 delayed_start;
 	int run_all = count == (unsigned long)-1;
 	int run_most = 0;
-	int loops;
 
 	/* We'll clean this up in btrfs_cleanup_transaction */
 	if (trans->aborted)
@@ -2658,130 +2699,40 @@
 	btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
 
 	delayed_refs = &trans->transaction->delayed_refs;
-	INIT_LIST_HEAD(&cluster);
 	if (count == 0) {
-		count = delayed_refs->num_entries * 2;
+		count = atomic_read(&delayed_refs->num_entries) * 2;
 		run_most = 1;
 	}
 
-	if (!run_all && !run_most) {
-		int old;
-		int seq = atomic_read(&delayed_refs->ref_seq);
-
-progress:
-		old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
-		if (old) {
-			DEFINE_WAIT(__wait);
-			if (delayed_refs->flushing ||
-			    !btrfs_should_throttle_delayed_refs(trans, root))
-				return 0;
-
-			prepare_to_wait(&delayed_refs->wait, &__wait,
-					TASK_UNINTERRUPTIBLE);
-
-			old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
-			if (old) {
-				schedule();
-				finish_wait(&delayed_refs->wait, &__wait);
-
-				if (!refs_newer(delayed_refs, seq, 256))
-					goto progress;
-				else
-					return 0;
-			} else {
-				finish_wait(&delayed_refs->wait, &__wait);
-				goto again;
-			}
-		}
-
-	} else {
-		atomic_inc(&delayed_refs->procs_running_refs);
-	}
-
 again:
-	loops = 0;
-	spin_lock(&delayed_refs->lock);
-
 #ifdef SCRAMBLE_DELAYED_REFS
 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
 #endif
-
-	while (1) {
-		if (!(run_all || run_most) &&
-		    !btrfs_should_throttle_delayed_refs(trans, root))
-			break;
-
-		/*
-		 * go find something we can process in the rbtree.  We start at
-		 * the beginning of the tree, and then build a cluster
-		 * of refs to process starting at the first one we are able to
-		 * lock
-		 */
-		delayed_start = delayed_refs->run_delayed_start;
-		ret = btrfs_find_ref_cluster(trans, &cluster,
-					     delayed_refs->run_delayed_start);
-		if (ret)
-			break;
-
-		ret = run_clustered_refs(trans, root, &cluster);
-		if (ret < 0) {
-			btrfs_release_ref_cluster(&cluster);
-			spin_unlock(&delayed_refs->lock);
-			btrfs_abort_transaction(trans, root, ret);
-			atomic_dec(&delayed_refs->procs_running_refs);
-			wake_up(&delayed_refs->wait);
-			return ret;
-		}
-
-		atomic_add(ret, &delayed_refs->ref_seq);
-
-		count -= min_t(unsigned long, ret, count);
-
-		if (count == 0)
-			break;
-
-		if (delayed_start >= delayed_refs->run_delayed_start) {
-			if (loops == 0) {
-				/*
-				 * btrfs_find_ref_cluster looped. let's do one
-				 * more cycle. if we don't run any delayed ref
-				 * during that cycle (because we can't because
-				 * all of them are blocked), bail out.
-				 */
-				loops = 1;
-			} else {
-				/*
-				 * no runnable refs left, stop trying
-				 */
-				BUG_ON(run_all);
-				break;
-			}
-		}
-		if (ret) {
-			/* refs were run, let's reset staleness detection */
-			loops = 0;
-		}
+	ret = __btrfs_run_delayed_refs(trans, root, count);
+	if (ret < 0) {
+		btrfs_abort_transaction(trans, root, ret);
+		return ret;
 	}
 
 	if (run_all) {
-		if (!list_empty(&trans->new_bgs)) {
-			spin_unlock(&delayed_refs->lock);
+		if (!list_empty(&trans->new_bgs))
 			btrfs_create_pending_block_groups(trans, root);
-			spin_lock(&delayed_refs->lock);
-		}
 
-		node = rb_first(&delayed_refs->root);
-		if (!node)
+		spin_lock(&delayed_refs->lock);
+		node = rb_first(&delayed_refs->href_root);
+		if (!node) {
+			spin_unlock(&delayed_refs->lock);
 			goto out;
+		}
 		count = (unsigned long)-1;
 
 		while (node) {
-			ref = rb_entry(node, struct btrfs_delayed_ref_node,
-				       rb_node);
-			if (btrfs_delayed_ref_is_head(ref)) {
-				struct btrfs_delayed_ref_head *head;
+			head = rb_entry(node, struct btrfs_delayed_ref_head,
+					href_node);
+			if (btrfs_delayed_ref_is_head(&head->node)) {
+				struct btrfs_delayed_ref_node *ref;
 
-				head = btrfs_delayed_node_to_head(ref);
+				ref = &head->node;
 				atomic_inc(&ref->refs);
 
 				spin_unlock(&delayed_refs->lock);
@@ -2795,20 +2746,16 @@
 				btrfs_put_delayed_ref(ref);
 				cond_resched();
 				goto again;
+			} else {
+				WARN_ON(1);
 			}
 			node = rb_next(node);
 		}
 		spin_unlock(&delayed_refs->lock);
-		schedule_timeout(1);
+		cond_resched();
 		goto again;
 	}
 out:
-	atomic_dec(&delayed_refs->procs_running_refs);
-	smp_mb();
-	if (waitqueue_active(&delayed_refs->wait))
-		wake_up(&delayed_refs->wait);
-
-	spin_unlock(&delayed_refs->lock);
 	assert_qgroups_uptodate(trans);
 	return 0;
 }
@@ -2850,12 +2797,13 @@
 	struct rb_node *node;
 	int ret = 0;
 
-	ret = -ENOENT;
 	delayed_refs = &trans->transaction->delayed_refs;
 	spin_lock(&delayed_refs->lock);
 	head = btrfs_find_delayed_ref_head(trans, bytenr);
-	if (!head)
-		goto out;
+	if (!head) {
+		spin_unlock(&delayed_refs->lock);
+		return 0;
+	}
 
 	if (!mutex_trylock(&head->mutex)) {
 		atomic_inc(&head->node.refs);
@@ -2872,40 +2820,35 @@
 		btrfs_put_delayed_ref(&head->node);
 		return -EAGAIN;
 	}
-
-	node = rb_prev(&head->node.rb_node);
-	if (!node)
-		goto out_unlock;
-
-	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-
-	if (ref->bytenr != bytenr)
-		goto out_unlock;
-
-	ret = 1;
-	if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
-		goto out_unlock;
-
-	data_ref = btrfs_delayed_node_to_data_ref(ref);
-
-	node = rb_prev(node);
-	if (node) {
-		int seq = ref->seq;
-
-		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-		if (ref->bytenr == bytenr && ref->seq == seq)
-			goto out_unlock;
-	}
-
-	if (data_ref->root != root->root_key.objectid ||
-	    data_ref->objectid != objectid || data_ref->offset != offset)
-		goto out_unlock;
-
-	ret = 0;
-out_unlock:
-	mutex_unlock(&head->mutex);
-out:
 	spin_unlock(&delayed_refs->lock);
+
+	spin_lock(&head->lock);
+	node = rb_first(&head->ref_root);
+	while (node) {
+		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
+		node = rb_next(node);
+
+		/* If it's a shared ref we know a cross reference exists */
+		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
+			ret = 1;
+			break;
+		}
+
+		data_ref = btrfs_delayed_node_to_data_ref(ref);
+
+		/*
+		 * If our ref doesn't match the one we're currently looking at
+		 * then we have a cross reference.
+		 */
+		if (data_ref->root != root->root_key.objectid ||
+		    data_ref->objectid != objectid ||
+		    data_ref->offset != offset) {
+			ret = 1;
+			break;
+		}
+	}
+	spin_unlock(&head->lock);
+	mutex_unlock(&head->mutex);
 	return ret;
 }
 
@@ -3402,6 +3345,23 @@
 	return readonly;
 }
 
+static const char *alloc_name(u64 flags)
+{
+	switch (flags) {
+	case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
+		return "mixed";
+	case BTRFS_BLOCK_GROUP_METADATA:
+		return "metadata";
+	case BTRFS_BLOCK_GROUP_DATA:
+		return "data";
+	case BTRFS_BLOCK_GROUP_SYSTEM:
+		return "system";
+	default:
+		WARN_ON(1);
+		return "invalid-combination";
+	};
+}
+
 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
 			     u64 total_bytes, u64 bytes_used,
 			     struct btrfs_space_info **space_info)
@@ -3439,8 +3399,10 @@
 		return ret;
 	}
 
-	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
+	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
 		INIT_LIST_HEAD(&found->block_groups[i]);
+		kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype);
+	}
 	init_rwsem(&found->groups_sem);
 	spin_lock_init(&found->lock);
 	found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
@@ -3457,11 +3419,21 @@
 	found->chunk_alloc = 0;
 	found->flush = 0;
 	init_waitqueue_head(&found->wait);
+
+	ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
+				    info->space_info_kobj, "%s",
+				    alloc_name(found->flags));
+	if (ret) {
+		kfree(found);
+		return ret;
+	}
+
 	*space_info = found;
 	list_add_rcu(&found->list, &info->space_info);
 	if (flags & BTRFS_BLOCK_GROUP_DATA)
 		info->data_sinfo = found;
-	return 0;
+
+	return ret;
 }
 
 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
@@ -4637,7 +4609,7 @@
 			     u64 num_bytes)
 {
 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
-	if (global_rsv->full || global_rsv == block_rsv ||
+	if (global_rsv == block_rsv ||
 	    block_rsv->space_info != global_rsv->space_info)
 		global_rsv = NULL;
 	block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
@@ -5916,24 +5888,16 @@
 {
 	struct btrfs_delayed_ref_head *head;
 	struct btrfs_delayed_ref_root *delayed_refs;
-	struct btrfs_delayed_ref_node *ref;
-	struct rb_node *node;
 	int ret = 0;
 
 	delayed_refs = &trans->transaction->delayed_refs;
 	spin_lock(&delayed_refs->lock);
 	head = btrfs_find_delayed_ref_head(trans, bytenr);
 	if (!head)
-		goto out;
+		goto out_delayed_unlock;
 
-	node = rb_prev(&head->node.rb_node);
-	if (!node)
-		goto out;
-
-	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-
-	/* there are still entries for this ref, we can't drop it */
-	if (ref->bytenr == bytenr)
+	spin_lock(&head->lock);
+	if (rb_first(&head->ref_root))
 		goto out;
 
 	if (head->extent_op) {
@@ -5955,19 +5919,19 @@
 	 * ahead and process it.
 	 */
 	head->node.in_tree = 0;
-	rb_erase(&head->node.rb_node, &delayed_refs->root);
+	rb_erase(&head->href_node, &delayed_refs->href_root);
 
-	delayed_refs->num_entries--;
+	atomic_dec(&delayed_refs->num_entries);
 
 	/*
 	 * we don't take a ref on the node because we're removing it from the
 	 * tree, so we just steal the ref the tree was holding.
 	 */
 	delayed_refs->num_heads--;
-	if (list_empty(&head->cluster))
+	if (head->processing == 0)
 		delayed_refs->num_heads_ready--;
-
-	list_del_init(&head->cluster);
+	head->processing = 0;
+	spin_unlock(&head->lock);
 	spin_unlock(&delayed_refs->lock);
 
 	BUG_ON(head->extent_op);
@@ -5978,6 +5942,9 @@
 	btrfs_put_delayed_ref(&head->node);
 	return ret;
 out:
+	spin_unlock(&head->lock);
+
+out_delayed_unlock:
 	spin_unlock(&delayed_refs->lock);
 	return 0;
 }
@@ -6145,11 +6112,29 @@
 	return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
 }
 
-static int get_block_group_index(struct btrfs_block_group_cache *cache)
+int get_block_group_index(struct btrfs_block_group_cache *cache)
 {
 	return __get_raid_index(cache->flags);
 }
 
+static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
+	[BTRFS_RAID_RAID10]	= "raid10",
+	[BTRFS_RAID_RAID1]	= "raid1",
+	[BTRFS_RAID_DUP]	= "dup",
+	[BTRFS_RAID_RAID0]	= "raid0",
+	[BTRFS_RAID_SINGLE]	= "single",
+	[BTRFS_RAID_RAID5]	= "raid5",
+	[BTRFS_RAID_RAID6]	= "raid6",
+};
+
+static const char *get_raid_name(enum btrfs_raid_types type)
+{
+	if (type >= BTRFS_NR_RAID_TYPES)
+		return NULL;
+
+	return btrfs_raid_type_names[type];
+}
+
 enum btrfs_loop_type {
 	LOOP_CACHING_NOWAIT = 0,
 	LOOP_CACHING_WAIT = 1,
@@ -6177,7 +6162,6 @@
 	struct btrfs_root *root = orig_root->fs_info->extent_root;
 	struct btrfs_free_cluster *last_ptr = NULL;
 	struct btrfs_block_group_cache *block_group = NULL;
-	struct btrfs_block_group_cache *used_block_group;
 	u64 search_start = 0;
 	u64 max_extent_size = 0;
 	int empty_cluster = 2 * 1024 * 1024;
@@ -6186,7 +6170,6 @@
 	int index = __get_raid_index(flags);
 	int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
 		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
-	bool found_uncached_bg = false;
 	bool failed_cluster_refill = false;
 	bool failed_alloc = false;
 	bool use_cluster = true;
@@ -6239,7 +6222,6 @@
 	if (search_start == hint_byte) {
 		block_group = btrfs_lookup_block_group(root->fs_info,
 						       search_start);
-		used_block_group = block_group;
 		/*
 		 * we don't want to use the block group if it doesn't match our
 		 * allocation bits, or if its not cached.
@@ -6276,7 +6258,6 @@
 		u64 offset;
 		int cached;
 
-		used_block_group = block_group;
 		btrfs_get_block_group(block_group);
 		search_start = block_group->key.objectid;
 
@@ -6304,7 +6285,6 @@
 have_block_group:
 		cached = block_group_cache_done(block_group);
 		if (unlikely(!cached)) {
-			found_uncached_bg = true;
 			ret = cache_block_group(block_group, 0);
 			BUG_ON(ret < 0);
 			ret = 0;
@@ -6320,6 +6300,7 @@
 		 * lets look there
 		 */
 		if (last_ptr) {
+			struct btrfs_block_group_cache *used_block_group;
 			unsigned long aligned_cluster;
 			/*
 			 * the refill lock keeps out other
@@ -6330,10 +6311,8 @@
 			if (used_block_group != block_group &&
 			    (!used_block_group ||
 			     used_block_group->ro ||
-			     !block_group_bits(used_block_group, flags))) {
-				used_block_group = block_group;
+			     !block_group_bits(used_block_group, flags)))
 				goto refill_cluster;
-			}
 
 			if (used_block_group != block_group)
 				btrfs_get_block_group(used_block_group);
@@ -6347,17 +6326,19 @@
 				/* we have a block, we're done */
 				spin_unlock(&last_ptr->refill_lock);
 				trace_btrfs_reserve_extent_cluster(root,
-					block_group, search_start, num_bytes);
+						used_block_group,
+						search_start, num_bytes);
+				if (used_block_group != block_group) {
+					btrfs_put_block_group(block_group);
+					block_group = used_block_group;
+				}
 				goto checks;
 			}
 
 			WARN_ON(last_ptr->block_group != used_block_group);
-			if (used_block_group != block_group) {
+			if (used_block_group != block_group)
 				btrfs_put_block_group(used_block_group);
-				used_block_group = block_group;
-			}
 refill_cluster:
-			BUG_ON(used_block_group != block_group);
 			/* If we are on LOOP_NO_EMPTY_SIZE, we can't
 			 * set up a new clusters, so lets just skip it
 			 * and let the allocator find whatever block
@@ -6476,25 +6457,25 @@
 			goto loop;
 		}
 checks:
-		search_start = stripe_align(root, used_block_group,
+		search_start = stripe_align(root, block_group,
 					    offset, num_bytes);
 
 		/* move on to the next group */
 		if (search_start + num_bytes >
-		    used_block_group->key.objectid + used_block_group->key.offset) {
-			btrfs_add_free_space(used_block_group, offset, num_bytes);
+		    block_group->key.objectid + block_group->key.offset) {
+			btrfs_add_free_space(block_group, offset, num_bytes);
 			goto loop;
 		}
 
 		if (offset < search_start)
-			btrfs_add_free_space(used_block_group, offset,
+			btrfs_add_free_space(block_group, offset,
 					     search_start - offset);
 		BUG_ON(offset > search_start);
 
-		ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
+		ret = btrfs_update_reserved_bytes(block_group, num_bytes,
 						  alloc_type);
 		if (ret == -EAGAIN) {
-			btrfs_add_free_space(used_block_group, offset, num_bytes);
+			btrfs_add_free_space(block_group, offset, num_bytes);
 			goto loop;
 		}
 
@@ -6504,16 +6485,12 @@
 
 		trace_btrfs_reserve_extent(orig_root, block_group,
 					   search_start, num_bytes);
-		if (used_block_group != block_group)
-			btrfs_put_block_group(used_block_group);
 		btrfs_put_block_group(block_group);
 		break;
 loop:
 		failed_cluster_refill = false;
 		failed_alloc = false;
 		BUG_ON(index != get_block_group_index(block_group));
-		if (used_block_group != block_group)
-			btrfs_put_block_group(used_block_group);
 		btrfs_put_block_group(block_group);
 	}
 	up_read(&space_info->groups_sem);
@@ -6584,12 +6561,12 @@
 	int index = 0;
 
 	spin_lock(&info->lock);
-	printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
+	printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
 	       info->flags,
 	       info->total_bytes - info->bytes_used - info->bytes_pinned -
 	       info->bytes_reserved - info->bytes_readonly,
 	       (info->full) ? "" : "not ");
-	printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
+	printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
 	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
 	       info->total_bytes, info->bytes_used, info->bytes_pinned,
 	       info->bytes_reserved, info->bytes_may_use,
@@ -6603,7 +6580,9 @@
 again:
 	list_for_each_entry(cache, &info->block_groups[index], list) {
 		spin_lock(&cache->lock);
-		printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
+		printk(KERN_INFO "BTRFS: "
+			   "block group %llu has %llu bytes, "
+			   "%llu used %llu pinned %llu reserved %s\n",
 		       cache->key.objectid, cache->key.offset,
 		       btrfs_block_group_used(&cache->item), cache->pinned,
 		       cache->reserved, cache->ro ? "[readonly]" : "");
@@ -6966,7 +6945,7 @@
 				/*DEFAULT_RATELIMIT_BURST*/ 1);
 		if (__ratelimit(&_rs))
 			WARN(1, KERN_DEBUG
-				"btrfs: block rsv returned %d\n", ret);
+				"BTRFS: block rsv returned %d\n", ret);
 	}
 try_reserve:
 	ret = reserve_metadata_bytes(root, block_rsv, blocksize,
@@ -7714,7 +7693,7 @@
 
 			btrfs_end_transaction_throttle(trans, tree_root);
 			if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
-				pr_debug("btrfs: drop snapshot early exit\n");
+				pr_debug("BTRFS: drop snapshot early exit\n");
 				err = -EAGAIN;
 				goto out_free;
 			}
@@ -7779,7 +7758,7 @@
 	 */
 	if (!for_reloc && root_dropped == false)
 		btrfs_add_dead_root(root);
-	if (err)
+	if (err && err != -EAGAIN)
 		btrfs_std_error(root->fs_info, err);
 	return err;
 }
@@ -8333,6 +8312,8 @@
 	release_global_block_rsv(info);
 
 	while (!list_empty(&info->space_info)) {
+		int i;
+
 		space_info = list_entry(info->space_info.next,
 					struct btrfs_space_info,
 					list);
@@ -8343,9 +8324,17 @@
 				dump_space_info(space_info, 0, 0);
 			}
 		}
-		percpu_counter_destroy(&space_info->total_bytes_pinned);
 		list_del(&space_info->list);
-		kfree(space_info);
+		for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
+			struct kobject *kobj;
+			kobj = &space_info->block_group_kobjs[i];
+			if (kobj->parent) {
+				kobject_del(kobj);
+				kobject_put(kobj);
+			}
+		}
+		kobject_del(&space_info->kobj);
+		kobject_put(&space_info->kobj);
 	}
 	return 0;
 }
@@ -8356,10 +8345,57 @@
 	int index = get_block_group_index(cache);
 
 	down_write(&space_info->groups_sem);
+	if (list_empty(&space_info->block_groups[index])) {
+		struct kobject *kobj = &space_info->block_group_kobjs[index];
+		int ret;
+
+		kobject_get(&space_info->kobj); /* put in release */
+		ret = kobject_add(kobj, &space_info->kobj, "%s",
+				  get_raid_name(index));
+		if (ret) {
+			pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
+			kobject_put(&space_info->kobj);
+		}
+	}
 	list_add_tail(&cache->list, &space_info->block_groups[index]);
 	up_write(&space_info->groups_sem);
 }
 
+static struct btrfs_block_group_cache *
+btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
+{
+	struct btrfs_block_group_cache *cache;
+
+	cache = kzalloc(sizeof(*cache), GFP_NOFS);
+	if (!cache)
+		return NULL;
+
+	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+					GFP_NOFS);
+	if (!cache->free_space_ctl) {
+		kfree(cache);
+		return NULL;
+	}
+
+	cache->key.objectid = start;
+	cache->key.offset = size;
+	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+
+	cache->sectorsize = root->sectorsize;
+	cache->fs_info = root->fs_info;
+	cache->full_stripe_len = btrfs_full_stripe_len(root,
+					       &root->fs_info->mapping_tree,
+					       start);
+	atomic_set(&cache->count, 1);
+	spin_lock_init(&cache->lock);
+	INIT_LIST_HEAD(&cache->list);
+	INIT_LIST_HEAD(&cache->cluster_list);
+	INIT_LIST_HEAD(&cache->new_bg_list);
+	btrfs_init_free_space_ctl(cache);
+
+	return cache;
+}
+
 int btrfs_read_block_groups(struct btrfs_root *root)
 {
 	struct btrfs_path *path;
@@ -8395,26 +8431,16 @@
 			break;
 		if (ret != 0)
 			goto error;
+
 		leaf = path->nodes[0];
 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-		cache = kzalloc(sizeof(*cache), GFP_NOFS);
+
+		cache = btrfs_create_block_group_cache(root, found_key.objectid,
+						       found_key.offset);
 		if (!cache) {
 			ret = -ENOMEM;
 			goto error;
 		}
-		cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
-						GFP_NOFS);
-		if (!cache->free_space_ctl) {
-			kfree(cache);
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		atomic_set(&cache->count, 1);
-		spin_lock_init(&cache->lock);
-		cache->fs_info = info;
-		INIT_LIST_HEAD(&cache->list);
-		INIT_LIST_HEAD(&cache->cluster_list);
 
 		if (need_clear) {
 			/*
@@ -8435,16 +8461,10 @@
 		read_extent_buffer(leaf, &cache->item,
 				   btrfs_item_ptr_offset(leaf, path->slots[0]),
 				   sizeof(cache->item));
-		memcpy(&cache->key, &found_key, sizeof(found_key));
+		cache->flags = btrfs_block_group_flags(&cache->item);
 
 		key.objectid = found_key.objectid + found_key.offset;
 		btrfs_release_path(path);
-		cache->flags = btrfs_block_group_flags(&cache->item);
-		cache->sectorsize = root->sectorsize;
-		cache->full_stripe_len = btrfs_full_stripe_len(root,
-					       &root->fs_info->mapping_tree,
-					       found_key.objectid);
-		btrfs_init_free_space_ctl(cache);
 
 		/*
 		 * We need to exclude the super stripes now so that the space
@@ -8458,8 +8478,7 @@
 			 * case.
 			 */
 			free_excluded_extents(root, cache);
-			kfree(cache->free_space_ctl);
-			kfree(cache);
+			btrfs_put_block_group(cache);
 			goto error;
 		}
 
@@ -8590,38 +8609,15 @@
 
 	root->fs_info->last_trans_log_full_commit = trans->transid;
 
-	cache = kzalloc(sizeof(*cache), GFP_NOFS);
+	cache = btrfs_create_block_group_cache(root, chunk_offset, size);
 	if (!cache)
 		return -ENOMEM;
-	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
-					GFP_NOFS);
-	if (!cache->free_space_ctl) {
-		kfree(cache);
-		return -ENOMEM;
-	}
-
-	cache->key.objectid = chunk_offset;
-	cache->key.offset = size;
-	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
-	cache->sectorsize = root->sectorsize;
-	cache->fs_info = root->fs_info;
-	cache->full_stripe_len = btrfs_full_stripe_len(root,
-					       &root->fs_info->mapping_tree,
-					       chunk_offset);
-
-	atomic_set(&cache->count, 1);
-	spin_lock_init(&cache->lock);
-	INIT_LIST_HEAD(&cache->list);
-	INIT_LIST_HEAD(&cache->cluster_list);
-	INIT_LIST_HEAD(&cache->new_bg_list);
-
-	btrfs_init_free_space_ctl(cache);
 
 	btrfs_set_block_group_used(&cache->item, bytes_used);
 	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
-	cache->flags = type;
 	btrfs_set_block_group_flags(&cache->item, type);
 
+	cache->flags = type;
 	cache->last_byte_to_unpin = (u64)-1;
 	cache->cached = BTRFS_CACHE_FINISHED;
 	ret = exclude_super_stripes(root, cache);
@@ -8631,8 +8627,7 @@
 		 * case.
 		 */
 		free_excluded_extents(root, cache);
-		kfree(cache->free_space_ctl);
-		kfree(cache);
+		btrfs_put_block_group(cache);
 		return ret;
 	}
 
@@ -8796,8 +8791,11 @@
 	 * are still on the list after taking the semaphore
 	 */
 	list_del_init(&block_group->list);
-	if (list_empty(&block_group->space_info->block_groups[index]))
+	if (list_empty(&block_group->space_info->block_groups[index])) {
+		kobject_del(&block_group->space_info->block_group_kobjs[index]);
+		kobject_put(&block_group->space_info->block_group_kobjs[index]);
 		clear_avail_alloc_bits(root->fs_info, block_group->flags);
+	}
 	up_write(&block_group->space_info->groups_sem);
 
 	if (block_group->cached == BTRFS_CACHE_STARTED)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ff43802..85bbd01 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -59,7 +59,7 @@
 
 	while (!list_empty(&states)) {
 		state = list_entry(states.next, struct extent_state, leak_list);
-		printk(KERN_ERR "btrfs state leak: start %llu end %llu "
+		printk(KERN_ERR "BTRFS: state leak: start %llu end %llu "
 		       "state %lu in tree %p refs %d\n",
 		       state->start, state->end, state->state, state->tree,
 		       atomic_read(&state->refs));
@@ -69,7 +69,7 @@
 
 	while (!list_empty(&buffers)) {
 		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
-		printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
+		printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
 		       "refs %d\n",
 		       eb->start, eb->len, atomic_read(&eb->refs));
 		list_del(&eb->leak_list);
@@ -77,16 +77,22 @@
 	}
 }
 
-#define btrfs_debug_check_extent_io_range(inode, start, end)		\
-	__btrfs_debug_check_extent_io_range(__func__, (inode), (start), (end))
+#define btrfs_debug_check_extent_io_range(tree, start, end)		\
+	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
 static inline void __btrfs_debug_check_extent_io_range(const char *caller,
-		struct inode *inode, u64 start, u64 end)
+		struct extent_io_tree *tree, u64 start, u64 end)
 {
-	u64 isize = i_size_read(inode);
+	struct inode *inode;
+	u64 isize;
 
+	if (!tree->mapping)
+		return;
+
+	inode = tree->mapping->host;
+	isize = i_size_read(inode);
 	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
 		printk_ratelimited(KERN_DEBUG
-		    "btrfs: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
+		    "BTRFS: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
 				caller, btrfs_ino(inode), isize, start, end);
 	}
 }
@@ -124,6 +130,8 @@
 static inline struct btrfs_fs_info *
 tree_fs_info(struct extent_io_tree *tree)
 {
+	if (!tree->mapping)
+		return NULL;
 	return btrfs_sb(tree->mapping->host->i_sb);
 }
 
@@ -186,11 +194,9 @@
 			 struct address_space *mapping)
 {
 	tree->state = RB_ROOT;
-	INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
 	tree->ops = NULL;
 	tree->dirty_bytes = 0;
 	spin_lock_init(&tree->lock);
-	spin_lock_init(&tree->buffer_lock);
 	tree->mapping = mapping;
 }
 
@@ -224,12 +230,20 @@
 }
 
 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
-				   struct rb_node *node)
+				   struct rb_node *node,
+				   struct rb_node ***p_in,
+				   struct rb_node **parent_in)
 {
 	struct rb_node **p = &root->rb_node;
 	struct rb_node *parent = NULL;
 	struct tree_entry *entry;
 
+	if (p_in && parent_in) {
+		p = *p_in;
+		parent = *parent_in;
+		goto do_insert;
+	}
+
 	while (*p) {
 		parent = *p;
 		entry = rb_entry(parent, struct tree_entry, rb_node);
@@ -242,35 +256,43 @@
 			return parent;
 	}
 
+do_insert:
 	rb_link_node(node, parent, p);
 	rb_insert_color(node, root);
 	return NULL;
 }
 
 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
-				     struct rb_node **prev_ret,
-				     struct rb_node **next_ret)
+				      struct rb_node **prev_ret,
+				      struct rb_node **next_ret,
+				      struct rb_node ***p_ret,
+				      struct rb_node **parent_ret)
 {
 	struct rb_root *root = &tree->state;
-	struct rb_node *n = root->rb_node;
+	struct rb_node **n = &root->rb_node;
 	struct rb_node *prev = NULL;
 	struct rb_node *orig_prev = NULL;
 	struct tree_entry *entry;
 	struct tree_entry *prev_entry = NULL;
 
-	while (n) {
-		entry = rb_entry(n, struct tree_entry, rb_node);
-		prev = n;
+	while (*n) {
+		prev = *n;
+		entry = rb_entry(prev, struct tree_entry, rb_node);
 		prev_entry = entry;
 
 		if (offset < entry->start)
-			n = n->rb_left;
+			n = &(*n)->rb_left;
 		else if (offset > entry->end)
-			n = n->rb_right;
+			n = &(*n)->rb_right;
 		else
-			return n;
+			return *n;
 	}
 
+	if (p_ret)
+		*p_ret = n;
+	if (parent_ret)
+		*parent_ret = prev;
+
 	if (prev_ret) {
 		orig_prev = prev;
 		while (prev && offset > prev_entry->end) {
@@ -292,18 +314,27 @@
 	return NULL;
 }
 
-static inline struct rb_node *tree_search(struct extent_io_tree *tree,
-					  u64 offset)
+static inline struct rb_node *
+tree_search_for_insert(struct extent_io_tree *tree,
+		       u64 offset,
+		       struct rb_node ***p_ret,
+		       struct rb_node **parent_ret)
 {
 	struct rb_node *prev = NULL;
 	struct rb_node *ret;
 
-	ret = __etree_search(tree, offset, &prev, NULL);
+	ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
 	if (!ret)
 		return prev;
 	return ret;
 }
 
+static inline struct rb_node *tree_search(struct extent_io_tree *tree,
+					  u64 offset)
+{
+	return tree_search_for_insert(tree, offset, NULL, NULL);
+}
+
 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
 		     struct extent_state *other)
 {
@@ -385,23 +416,25 @@
  */
 static int insert_state(struct extent_io_tree *tree,
 			struct extent_state *state, u64 start, u64 end,
+			struct rb_node ***p,
+			struct rb_node **parent,
 			unsigned long *bits)
 {
 	struct rb_node *node;
 
 	if (end < start)
-		WARN(1, KERN_ERR "btrfs end < start %llu %llu\n",
+		WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
 		       end, start);
 	state->start = start;
 	state->end = end;
 
 	set_state_bits(tree, state, bits);
 
-	node = tree_insert(&tree->state, end, &state->rb_node);
+	node = tree_insert(&tree->state, end, &state->rb_node, p, parent);
 	if (node) {
 		struct extent_state *found;
 		found = rb_entry(node, struct extent_state, rb_node);
-		printk(KERN_ERR "btrfs found node %llu %llu on insert of "
+		printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
 		       "%llu %llu\n",
 		       found->start, found->end, start, end);
 		return -EEXIST;
@@ -444,7 +477,8 @@
 	prealloc->state = orig->state;
 	orig->start = split;
 
-	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
+	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node,
+			   NULL, NULL);
 	if (node) {
 		free_extent_state(prealloc);
 		return -EEXIST;
@@ -542,7 +576,7 @@
 	int err;
 	int clear = 0;
 
-	btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+	btrfs_debug_check_extent_io_range(tree, start, end);
 
 	if (bits & EXTENT_DELALLOC)
 		bits |= EXTENT_NORESERVE;
@@ -702,7 +736,7 @@
 	struct extent_state *state;
 	struct rb_node *node;
 
-	btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+	btrfs_debug_check_extent_io_range(tree, start, end);
 
 	spin_lock(&tree->lock);
 again:
@@ -783,11 +817,13 @@
 	struct extent_state *state;
 	struct extent_state *prealloc = NULL;
 	struct rb_node *node;
+	struct rb_node **p;
+	struct rb_node *parent;
 	int err = 0;
 	u64 last_start;
 	u64 last_end;
 
-	btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+	btrfs_debug_check_extent_io_range(tree, start, end);
 
 	bits |= EXTENT_FIRST_DELALLOC;
 again:
@@ -809,14 +845,16 @@
 	 * this search will find all the extents that end after
 	 * our range starts.
 	 */
-	node = tree_search(tree, start);
+	node = tree_search_for_insert(tree, start, &p, &parent);
 	if (!node) {
 		prealloc = alloc_extent_state_atomic(prealloc);
 		BUG_ON(!prealloc);
-		err = insert_state(tree, prealloc, start, end, &bits);
+		err = insert_state(tree, prealloc, start, end,
+				   &p, &parent, &bits);
 		if (err)
 			extent_io_tree_panic(tree, err);
 
+		cache_state(prealloc, cached_state);
 		prealloc = NULL;
 		goto out;
 	}
@@ -919,7 +957,7 @@
 		 * the later extent.
 		 */
 		err = insert_state(tree, prealloc, start, this_end,
-				   &bits);
+				   NULL, NULL, &bits);
 		if (err)
 			extent_io_tree_panic(tree, err);
 
@@ -1005,11 +1043,13 @@
 	struct extent_state *state;
 	struct extent_state *prealloc = NULL;
 	struct rb_node *node;
+	struct rb_node **p;
+	struct rb_node *parent;
 	int err = 0;
 	u64 last_start;
 	u64 last_end;
 
-	btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+	btrfs_debug_check_extent_io_range(tree, start, end);
 
 again:
 	if (!prealloc && (mask & __GFP_WAIT)) {
@@ -1032,17 +1072,19 @@
 	 * this search will find all the extents that end after
 	 * our range starts.
 	 */
-	node = tree_search(tree, start);
+	node = tree_search_for_insert(tree, start, &p, &parent);
 	if (!node) {
 		prealloc = alloc_extent_state_atomic(prealloc);
 		if (!prealloc) {
 			err = -ENOMEM;
 			goto out;
 		}
-		err = insert_state(tree, prealloc, start, end, &bits);
-		prealloc = NULL;
+		err = insert_state(tree, prealloc, start, end,
+				   &p, &parent, &bits);
 		if (err)
 			extent_io_tree_panic(tree, err);
+		cache_state(prealloc, cached_state);
+		prealloc = NULL;
 		goto out;
 	}
 	state = rb_entry(node, struct extent_state, rb_node);
@@ -1135,7 +1177,7 @@
 		 * the later extent.
 		 */
 		err = insert_state(tree, prealloc, start, this_end,
-				   &bits);
+				   NULL, NULL, &bits);
 		if (err)
 			extent_io_tree_panic(tree, err);
 		cache_state(prealloc, cached_state);
@@ -1984,7 +2026,7 @@
 	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
 	if (!bio)
 		return -EIO;
-	bio->bi_size = 0;
+	bio->bi_iter.bi_size = 0;
 	map_length = length;
 
 	ret = btrfs_map_block(fs_info, WRITE, logical,
@@ -1995,7 +2037,7 @@
 	}
 	BUG_ON(mirror_num != bbio->mirror_num);
 	sector = bbio->stripes[mirror_num-1].physical >> 9;
-	bio->bi_sector = sector;
+	bio->bi_iter.bi_sector = sector;
 	dev = bbio->stripes[mirror_num-1].dev;
 	kfree(bbio);
 	if (!dev || !dev->bdev || !dev->writeable) {
@@ -2012,9 +2054,10 @@
 		return -EIO;
 	}
 
-	printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
-		      "(dev %s sector %llu)\n", page->mapping->host->i_ino,
-		      start, rcu_str_deref(dev->name), sector);
+	printk_ratelimited_in_rcu(KERN_INFO
+			"BTRFS: read error corrected: ino %lu off %llu "
+		    "(dev %s sector %llu)\n", page->mapping->host->i_ino,
+		    start, rcu_str_deref(dev->name), sector);
 
 	bio_put(bio);
 	return 0;
@@ -2156,7 +2199,7 @@
 			return -EIO;
 		}
 
-		if (em->start > start || em->start + em->len < start) {
+		if (em->start > start || em->start + em->len <= start) {
 			free_extent_map(em);
 			em = NULL;
 		}
@@ -2268,9 +2311,9 @@
 		return -EIO;
 	}
 	bio->bi_end_io = failed_bio->bi_end_io;
-	bio->bi_sector = failrec->logical >> 9;
+	bio->bi_iter.bi_sector = failrec->logical >> 9;
 	bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
-	bio->bi_size = 0;
+	bio->bi_iter.bi_size = 0;
 
 	btrfs_failed_bio = btrfs_io_bio(failed_bio);
 	if (btrfs_failed_bio->csum) {
@@ -2332,37 +2375,39 @@
  */
 static void end_bio_extent_writepage(struct bio *bio, int err)
 {
-	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-	struct extent_io_tree *tree;
+	struct bio_vec *bvec;
 	u64 start;
 	u64 end;
+	int i;
 
-	do {
+	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
-		tree = &BTRFS_I(page->mapping->host)->io_tree;
 
 		/* We always issue full-page reads, but if some block
 		 * in a page fails to read, blk_update_request() will
 		 * advance bv_offset and adjust bv_len to compensate.
 		 * Print a warning for nonzero offsets, and an error
 		 * if they don't add up to a full page.  */
-		if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
-			printk("%s page write in btrfs with offset %u and length %u\n",
-			       bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
-			       ? KERN_ERR "partial" : KERN_INFO "incomplete",
-			       bvec->bv_offset, bvec->bv_len);
+		if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
+			if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
+				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
+				   "partial page write in btrfs with offset %u and length %u",
+					bvec->bv_offset, bvec->bv_len);
+			else
+				btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
+				   "incomplete page write in btrfs with offset %u and "
+				   "length %u",
+					bvec->bv_offset, bvec->bv_len);
+		}
 
 		start = page_offset(page);
 		end = start + bvec->bv_offset + bvec->bv_len - 1;
 
-		if (--bvec >= bio->bi_io_vec)
-			prefetchw(&bvec->bv_page->flags);
-
 		if (end_extent_writepage(page, err, start, end))
 			continue;
 
 		end_page_writeback(page);
-	} while (bvec >= bio->bi_io_vec);
+	}
 
 	bio_put(bio);
 }
@@ -2392,9 +2437,8 @@
  */
 static void end_bio_extent_readpage(struct bio *bio, int err)
 {
+	struct bio_vec *bvec;
 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
-	struct bio_vec *bvec = bio->bi_io_vec;
 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
 	struct extent_io_tree *tree;
 	u64 offset = 0;
@@ -2405,16 +2449,17 @@
 	u64 extent_len = 0;
 	int mirror;
 	int ret;
+	int i;
 
 	if (err)
 		uptodate = 0;
 
-	do {
+	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
 		struct inode *inode = page->mapping->host;
 
 		pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
-			 "mirror=%lu\n", (u64)bio->bi_sector, err,
+			 "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
 			 io_bio->mirror_num);
 		tree = &BTRFS_I(inode)->io_tree;
 
@@ -2423,19 +2468,22 @@
 		 * advance bv_offset and adjust bv_len to compensate.
 		 * Print a warning for nonzero offsets, and an error
 		 * if they don't add up to a full page.  */
-		if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
-			printk("%s page read in btrfs with offset %u and length %u\n",
-			       bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
-			       ? KERN_ERR "partial" : KERN_INFO "incomplete",
-			       bvec->bv_offset, bvec->bv_len);
+		if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
+			if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
+				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
+				   "partial page read in btrfs with offset %u and length %u",
+					bvec->bv_offset, bvec->bv_len);
+			else
+				btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
+				   "incomplete page read in btrfs with offset %u and "
+				   "length %u",
+					bvec->bv_offset, bvec->bv_len);
+		}
 
 		start = page_offset(page);
 		end = start + bvec->bv_offset + bvec->bv_len - 1;
 		len = bvec->bv_len;
 
-		if (++bvec <= bvec_end)
-			prefetchw(&bvec->bv_page->flags);
-
 		mirror = io_bio->mirror_num;
 		if (likely(uptodate && tree->ops &&
 			   tree->ops->readpage_end_io_hook)) {
@@ -2516,7 +2564,7 @@
 			extent_start = start;
 			extent_len = end + 1 - start;
 		}
-	} while (bvec <= bvec_end);
+	}
 
 	if (extent_len)
 		endio_readpage_release_extent(tree, extent_start, extent_len,
@@ -2547,9 +2595,8 @@
 	}
 
 	if (bio) {
-		bio->bi_size = 0;
 		bio->bi_bdev = bdev;
-		bio->bi_sector = first_sector;
+		bio->bi_iter.bi_sector = first_sector;
 		btrfs_bio = btrfs_io_bio(bio);
 		btrfs_bio->csum = NULL;
 		btrfs_bio->csum_allocated = NULL;
@@ -2643,7 +2690,7 @@
 	if (bio_ret && *bio_ret) {
 		bio = *bio_ret;
 		if (old_compressed)
-			contig = bio->bi_sector == sector;
+			contig = bio->bi_iter.bi_sector == sector;
 		else
 			contig = bio_end_sector(bio) == sector;
 
@@ -3287,8 +3334,8 @@
 
 			set_range_writeback(tree, cur, cur + iosize - 1);
 			if (!PageWriteback(page)) {
-				printk(KERN_ERR "btrfs warning page %lu not "
-				       "writeback, cur %llu end %llu\n",
+				btrfs_err(BTRFS_I(inode)->root->fs_info,
+					   "page %lu not writeback, cur %llu end %llu",
 				       page->index, cur, end);
 			}
 
@@ -3410,20 +3457,18 @@
 
 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
 {
-	int uptodate = err == 0;
-	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+	struct bio_vec *bvec;
 	struct extent_buffer *eb;
-	int done;
+	int i, done;
 
-	do {
+	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
 
-		bvec--;
 		eb = (struct extent_buffer *)page->private;
 		BUG_ON(!eb);
 		done = atomic_dec_and_test(&eb->io_pages);
 
-		if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
+		if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
 			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
 			ClearPageUptodate(page);
 			SetPageError(page);
@@ -3435,10 +3480,9 @@
 			continue;
 
 		end_extent_buffer_writeback(eb);
-	} while (bvec >= bio->bi_io_vec);
+	}
 
 	bio_put(bio);
-
 }
 
 static int write_one_eb(struct extent_buffer *eb,
@@ -3447,6 +3491,7 @@
 			struct extent_page_data *epd)
 {
 	struct block_device *bdev = fs_info->fs_devices->latest_bdev;
+	struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
 	u64 offset = eb->start;
 	unsigned long i, num_pages;
 	unsigned long bio_flags = 0;
@@ -3464,7 +3509,7 @@
 
 		clear_page_dirty_for_io(p);
 		set_page_writeback(p);
-		ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
+		ret = submit_extent_page(rw, tree, p, offset >> 9,
 					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
 					 -1, end_bio_extent_buffer_writepage,
 					 0, epd->bio_flags, bio_flags);
@@ -4082,12 +4127,10 @@
 	struct extent_map *em = NULL;
 	struct extent_state *cached_state = NULL;
 	struct btrfs_path *path;
-	struct btrfs_file_extent_item *item;
 	int end = 0;
 	u64 em_start = 0;
 	u64 em_len = 0;
 	u64 em_end = 0;
-	unsigned long emflags;
 
 	if (len == 0)
 		return -EINVAL;
@@ -4112,8 +4155,6 @@
 	}
 	WARN_ON(!ret);
 	path->slots[0]--;
-	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
-			      struct btrfs_file_extent_item);
 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
 	found_type = btrfs_key_type(&found_key);
 
@@ -4181,7 +4222,6 @@
 			offset_in_extent = em_start - em->start;
 		em_end = extent_map_end(em);
 		em_len = em_end - em_start;
-		emflags = em->flags;
 		disko = 0;
 		flags = 0;
 
@@ -4333,10 +4373,9 @@
 	__free_extent_buffer(eb);
 }
 
-static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
-						   u64 start,
-						   unsigned long len,
-						   gfp_t mask)
+static struct extent_buffer *
+__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
+		      unsigned long len, gfp_t mask)
 {
 	struct extent_buffer *eb = NULL;
 
@@ -4345,7 +4384,7 @@
 		return NULL;
 	eb->start = start;
 	eb->len = len;
-	eb->tree = tree;
+	eb->fs_info = fs_info;
 	eb->bflags = 0;
 	rwlock_init(&eb->lock);
 	atomic_set(&eb->write_locks, 0);
@@ -4477,13 +4516,14 @@
 	}
 }
 
-struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
-					 		u64 start)
+struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
+					 u64 start)
 {
 	struct extent_buffer *eb;
 
 	rcu_read_lock();
-	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
+	eb = radix_tree_lookup(&fs_info->buffer_radix,
+			       start >> PAGE_CACHE_SHIFT);
 	if (eb && atomic_inc_not_zero(&eb->refs)) {
 		rcu_read_unlock();
 		mark_extent_buffer_accessed(eb);
@@ -4494,7 +4534,7 @@
 	return NULL;
 }
 
-struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
+struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 					  u64 start, unsigned long len)
 {
 	unsigned long num_pages = num_extent_pages(start, len);
@@ -4503,16 +4543,15 @@
 	struct extent_buffer *eb;
 	struct extent_buffer *exists = NULL;
 	struct page *p;
-	struct address_space *mapping = tree->mapping;
+	struct address_space *mapping = fs_info->btree_inode->i_mapping;
 	int uptodate = 1;
 	int ret;
 
-
-	eb = find_extent_buffer(tree, start);
+	eb = find_extent_buffer(fs_info, start);
 	if (eb)
 		return eb;
 
-	eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
+	eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
 	if (!eb)
 		return NULL;
 
@@ -4567,12 +4606,13 @@
 	if (ret)
 		goto free_eb;
 
-	spin_lock(&tree->buffer_lock);
-	ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
-	spin_unlock(&tree->buffer_lock);
+	spin_lock(&fs_info->buffer_lock);
+	ret = radix_tree_insert(&fs_info->buffer_radix,
+				start >> PAGE_CACHE_SHIFT, eb);
+	spin_unlock(&fs_info->buffer_lock);
 	radix_tree_preload_end();
 	if (ret == -EEXIST) {
-		exists = find_extent_buffer(tree, start);
+		exists = find_extent_buffer(fs_info, start);
 		if (exists)
 			goto free_eb;
 		else
@@ -4580,6 +4620,7 @@
 	}
 	/* add one reference for the tree */
 	check_buffer_tree_ref(eb);
+	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
 
 	/*
 	 * there is a race where release page may have
@@ -4623,17 +4664,17 @@
 {
 	WARN_ON(atomic_read(&eb->refs) == 0);
 	if (atomic_dec_and_test(&eb->refs)) {
-		if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
-			spin_unlock(&eb->refs_lock);
-		} else {
-			struct extent_io_tree *tree = eb->tree;
+		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
+			struct btrfs_fs_info *fs_info = eb->fs_info;
 
 			spin_unlock(&eb->refs_lock);
 
-			spin_lock(&tree->buffer_lock);
-			radix_tree_delete(&tree->buffer,
+			spin_lock(&fs_info->buffer_lock);
+			radix_tree_delete(&fs_info->buffer_radix,
 					  eb->start >> PAGE_CACHE_SHIFT);
-			spin_unlock(&tree->buffer_lock);
+			spin_unlock(&fs_info->buffer_lock);
+		} else {
+			spin_unlock(&eb->refs_lock);
 		}
 
 		/* Should be safe to release our pages at this point */
@@ -5112,12 +5153,12 @@
 	unsigned long src_i;
 
 	if (src_offset + len > dst->len) {
-		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
+		printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
 		       "len %lu dst len %lu\n", src_offset, len, dst->len);
 		BUG_ON(1);
 	}
 	if (dst_offset + len > dst->len) {
-		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
+		printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
 		       "len %lu dst len %lu\n", dst_offset, len, dst->len);
 		BUG_ON(1);
 	}
@@ -5159,12 +5200,12 @@
 	unsigned long src_i;
 
 	if (src_offset + len > dst->len) {
-		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
+		printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
 		       "len %lu len %lu\n", src_offset, len, dst->len);
 		BUG_ON(1);
 	}
 	if (dst_offset + len > dst->len) {
-		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
+		printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
 		       "len %lu len %lu\n", dst_offset, len, dst->len);
 		BUG_ON(1);
 	}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 19620c5..58b27e5 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -43,6 +43,7 @@
 #define EXTENT_BUFFER_WRITEBACK 7
 #define EXTENT_BUFFER_IOERR 8
 #define EXTENT_BUFFER_DUMMY 9
+#define EXTENT_BUFFER_IN_TREE 10
 
 /* these are flags for extent_clear_unlock_delalloc */
 #define PAGE_UNLOCK		(1 << 0)
@@ -94,12 +95,10 @@
 
 struct extent_io_tree {
 	struct rb_root state;
-	struct radix_tree_root buffer;
 	struct address_space *mapping;
 	u64 dirty_bytes;
 	int track_uptodate;
 	spinlock_t lock;
-	spinlock_t buffer_lock;
 	struct extent_io_ops *ops;
 };
 
@@ -130,7 +129,7 @@
 	unsigned long map_start;
 	unsigned long map_len;
 	unsigned long bflags;
-	struct extent_io_tree *tree;
+	struct btrfs_fs_info *fs_info;
 	spinlock_t refs_lock;
 	atomic_t refs;
 	atomic_t io_pages;
@@ -266,11 +265,11 @@
 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
 void set_page_extent_mapped(struct page *page);
 
-struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
+struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 					  u64 start, unsigned long len);
 struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len);
 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
-struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
+struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
 					 u64 start);
 void free_extent_buffer(struct extent_buffer *eb);
 void free_extent_buffer_stale(struct extent_buffer *eb);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index a4a7a1a..996ad56b 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -79,12 +79,21 @@
 	}
 }
 
-static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
-				   struct rb_node *node)
+/* simple helper to do math around the end of an extent, handling wrap */
+static u64 range_end(u64 start, u64 len)
+{
+	if (start + len < start)
+		return (u64)-1;
+	return start + len;
+}
+
+static int tree_insert(struct rb_root *root, struct extent_map *em)
 {
 	struct rb_node **p = &root->rb_node;
 	struct rb_node *parent = NULL;
-	struct extent_map *entry;
+	struct extent_map *entry = NULL;
+	struct rb_node *orig_parent = NULL;
+	u64 end = range_end(em->start, em->len);
 
 	while (*p) {
 		parent = *p;
@@ -92,19 +101,37 @@
 
 		WARN_ON(!entry->in_tree);
 
-		if (offset < entry->start)
+		if (em->start < entry->start)
 			p = &(*p)->rb_left;
-		else if (offset >= extent_map_end(entry))
+		else if (em->start >= extent_map_end(entry))
 			p = &(*p)->rb_right;
 		else
-			return parent;
+			return -EEXIST;
 	}
 
-	entry = rb_entry(node, struct extent_map, rb_node);
-	entry->in_tree = 1;
-	rb_link_node(node, parent, p);
-	rb_insert_color(node, root);
-	return NULL;
+	orig_parent = parent;
+	while (parent && em->start >= extent_map_end(entry)) {
+		parent = rb_next(parent);
+		entry = rb_entry(parent, struct extent_map, rb_node);
+	}
+	if (parent)
+		if (end > entry->start && em->start < extent_map_end(entry))
+			return -EEXIST;
+
+	parent = orig_parent;
+	entry = rb_entry(parent, struct extent_map, rb_node);
+	while (parent && em->start < entry->start) {
+		parent = rb_prev(parent);
+		entry = rb_entry(parent, struct extent_map, rb_node);
+	}
+	if (parent)
+		if (end > entry->start && em->start < extent_map_end(entry))
+			return -EEXIST;
+
+	em->in_tree = 1;
+	rb_link_node(&em->rb_node, orig_parent, p);
+	rb_insert_color(&em->rb_node, root);
+	return 0;
 }
 
 /*
@@ -228,7 +255,7 @@
 		merge = rb_entry(rb, struct extent_map, rb_node);
 	if (rb && mergable_maps(em, merge)) {
 		em->len += merge->len;
-		em->block_len += merge->len;
+		em->block_len += merge->block_len;
 		rb_erase(&merge->rb_node, &tree->map);
 		merge->in_tree = 0;
 		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
@@ -310,20 +337,11 @@
 		       struct extent_map *em, int modified)
 {
 	int ret = 0;
-	struct rb_node *rb;
-	struct extent_map *exist;
 
-	exist = lookup_extent_mapping(tree, em->start, em->len);
-	if (exist) {
-		free_extent_map(exist);
-		ret = -EEXIST;
+	ret = tree_insert(&tree->map, em);
+	if (ret)
 		goto out;
-	}
-	rb = tree_insert(&tree->map, em->start, &em->rb_node);
-	if (rb) {
-		ret = -EEXIST;
-		goto out;
-	}
+
 	atomic_inc(&em->refs);
 
 	em->mod_start = em->start;
@@ -337,14 +355,6 @@
 	return ret;
 }
 
-/* simple helper to do math around the end of an extent, handling wrap */
-static u64 range_end(u64 start, u64 len)
-{
-	if (start + len < start)
-		return (u64)-1;
-	return start + len;
-}
-
 static struct extent_map *
 __lookup_extent_mapping(struct extent_map_tree *tree,
 			u64 start, u64 len, int strict)
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 6f38488..127555b 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -182,7 +182,7 @@
 	if (!path)
 		return -ENOMEM;
 
-	nblocks = bio->bi_size >> inode->i_sb->s_blocksize_bits;
+	nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
 	if (!dst) {
 		if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
 			btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size,
@@ -201,7 +201,7 @@
 		csum = (u8 *)dst;
 	}
 
-	if (bio->bi_size > PAGE_CACHE_SIZE * 8)
+	if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
 		path->reada = 2;
 
 	WARN_ON(bio->bi_vcnt <= 0);
@@ -217,7 +217,7 @@
 		path->skip_locking = 1;
 	}
 
-	disk_bytenr = (u64)bio->bi_sector << 9;
+	disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
 	if (dio)
 		offset = logical_offset;
 	while (bio_index < bio->bi_vcnt) {
@@ -246,8 +246,8 @@
 						offset + bvec->bv_len - 1,
 						EXTENT_NODATASUM, GFP_NOFS);
 				} else {
-					printk(KERN_INFO "btrfs no csum found "
-					       "for inode %llu start %llu\n",
+					btrfs_info(BTRFS_I(inode)->root->fs_info,
+						   "no csum found for inode %llu start %llu",
 					       btrfs_ino(inode), offset);
 				}
 				item = NULL;
@@ -302,7 +302,7 @@
 			      struct btrfs_dio_private *dip, struct bio *bio,
 			      u64 offset)
 {
-	int len = (bio->bi_sector << 9) - dip->disk_bytenr;
+	int len = (bio->bi_iter.bi_sector << 9) - dip->disk_bytenr;
 	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 	int ret;
 
@@ -447,11 +447,12 @@
 	u64 offset;
 
 	WARN_ON(bio->bi_vcnt <= 0);
-	sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS);
+	sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
+		       GFP_NOFS);
 	if (!sums)
 		return -ENOMEM;
 
-	sums->len = bio->bi_size;
+	sums->len = bio->bi_iter.bi_size;
 	INIT_LIST_HEAD(&sums->list);
 
 	if (contig)
@@ -461,7 +462,7 @@
 
 	ordered = btrfs_lookup_ordered_extent(inode, offset);
 	BUG_ON(!ordered); /* Logic error */
-	sums->bytenr = (u64)bio->bi_sector << 9;
+	sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
 	index = 0;
 
 	while (bio_index < bio->bi_vcnt) {
@@ -476,7 +477,7 @@
 			btrfs_add_ordered_sum(inode, ordered, sums);
 			btrfs_put_ordered_extent(ordered);
 
-			bytes_left = bio->bi_size - total_bytes;
+			bytes_left = bio->bi_iter.bi_size - total_bytes;
 
 			sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
 				       GFP_NOFS);
@@ -484,7 +485,7 @@
 			sums->len = bytes_left;
 			ordered = btrfs_lookup_ordered_extent(inode, offset);
 			BUG_ON(!ordered); /* Logic error */
-			sums->bytenr = ((u64)bio->bi_sector << 9) +
+			sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) +
 				       total_bytes;
 			index = 0;
 		}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 82d0342..0165b86 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -692,7 +692,10 @@
 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 			 struct btrfs_root *root, struct inode *inode,
 			 struct btrfs_path *path, u64 start, u64 end,
-			 u64 *drop_end, int drop_cache)
+			 u64 *drop_end, int drop_cache,
+			 int replace_extent,
+			 u32 extent_item_size,
+			 int *key_inserted)
 {
 	struct extent_buffer *leaf;
 	struct btrfs_file_extent_item *fi;
@@ -712,6 +715,7 @@
 	int modify_tree = -1;
 	int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
 	int found = 0;
+	int leafs_visited = 0;
 
 	if (drop_cache)
 		btrfs_drop_extent_cache(inode, start, end - 1, 0);
@@ -733,6 +737,7 @@
 				path->slots[0]--;
 		}
 		ret = 0;
+		leafs_visited++;
 next_slot:
 		leaf = path->nodes[0];
 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
@@ -744,6 +749,7 @@
 				ret = 0;
 				break;
 			}
+			leafs_visited++;
 			leaf = path->nodes[0];
 			recow = 1;
 		}
@@ -766,7 +772,8 @@
 				btrfs_file_extent_num_bytes(leaf, fi);
 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 			extent_end = key.offset +
-				btrfs_file_extent_inline_len(leaf, fi);
+				btrfs_file_extent_inline_len(leaf,
+						     path->slots[0], fi);
 		} else {
 			WARN_ON(1);
 			extent_end = search_start;
@@ -927,14 +934,44 @@
 	}
 
 	if (!ret && del_nr > 0) {
+		/*
+		 * Set path->slots[0] to first slot, so that after the delete
+		 * if items are move off from our leaf to its immediate left or
+		 * right neighbor leafs, we end up with a correct and adjusted
+		 * path->slots[0] for our insertion.
+		 */
+		path->slots[0] = del_slot;
 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
 		if (ret)
 			btrfs_abort_transaction(trans, root, ret);
+
+		leaf = path->nodes[0];
+		/*
+		 * leaf eb has flag EXTENT_BUFFER_STALE if it was deleted (that
+		 * is, its contents got pushed to its neighbors), in which case
+		 * it means path->locks[0] == 0
+		 */
+		if (!ret && replace_extent && leafs_visited == 1 &&
+		    path->locks[0] &&
+		    btrfs_leaf_free_space(root, leaf) >=
+		    sizeof(struct btrfs_item) + extent_item_size) {
+
+			key.objectid = ino;
+			key.type = BTRFS_EXTENT_DATA_KEY;
+			key.offset = start;
+			setup_items_for_insert(root, path, &key,
+					       &extent_item_size,
+					       extent_item_size,
+					       sizeof(struct btrfs_item) +
+					       extent_item_size, 1);
+			*key_inserted = 1;
+		}
 	}
 
+	if (!replace_extent || !(*key_inserted))
+		btrfs_release_path(path);
 	if (drop_end)
 		*drop_end = found ? min(end, extent_end) : end;
-	btrfs_release_path(path);
 	return ret;
 }
 
@@ -949,7 +986,7 @@
 	if (!path)
 		return -ENOMEM;
 	ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
-				   drop_cache);
+				   drop_cache, 0, 0, NULL);
 	btrfs_free_path(path);
 	return ret;
 }
@@ -1235,29 +1272,18 @@
 }
 
 /*
- * this gets pages into the page cache and locks them down, it also properly
- * waits for data=ordered extents to finish before allowing the pages to be
- * modified.
+ * this just gets pages into the page cache and locks them down.
  */
-static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
-			 struct page **pages, size_t num_pages,
-			 loff_t pos, unsigned long first_index,
-			 size_t write_bytes, bool force_uptodate)
+static noinline int prepare_pages(struct inode *inode, struct page **pages,
+				  size_t num_pages, loff_t pos,
+				  size_t write_bytes, bool force_uptodate)
 {
-	struct extent_state *cached_state = NULL;
 	int i;
 	unsigned long index = pos >> PAGE_CACHE_SHIFT;
-	struct inode *inode = file_inode(file);
 	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
 	int err = 0;
-	int faili = 0;
-	u64 start_pos;
-	u64 last_pos;
+	int faili;
 
-	start_pos = pos & ~((u64)root->sectorsize - 1);
-	last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
-
-again:
 	for (i = 0; i < num_pages; i++) {
 		pages[i] = find_or_create_page(inode->i_mapping, index + i,
 					       mask | __GFP_WRITE);
@@ -1280,48 +1306,7 @@
 		}
 		wait_on_page_writeback(pages[i]);
 	}
-	faili = num_pages - 1;
-	err = 0;
-	if (start_pos < inode->i_size) {
-		struct btrfs_ordered_extent *ordered;
-		lock_extent_bits(&BTRFS_I(inode)->io_tree,
-				 start_pos, last_pos - 1, 0, &cached_state);
-		ordered = btrfs_lookup_first_ordered_extent(inode,
-							    last_pos - 1);
-		if (ordered &&
-		    ordered->file_offset + ordered->len > start_pos &&
-		    ordered->file_offset < last_pos) {
-			btrfs_put_ordered_extent(ordered);
-			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
-					     start_pos, last_pos - 1,
-					     &cached_state, GFP_NOFS);
-			for (i = 0; i < num_pages; i++) {
-				unlock_page(pages[i]);
-				page_cache_release(pages[i]);
-			}
-			err = btrfs_wait_ordered_range(inode, start_pos,
-						       last_pos - start_pos);
-			if (err)
-				goto fail;
-			goto again;
-		}
-		if (ordered)
-			btrfs_put_ordered_extent(ordered);
 
-		clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
-				  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
-				  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
-				  0, 0, &cached_state, GFP_NOFS);
-		unlock_extent_cached(&BTRFS_I(inode)->io_tree,
-				     start_pos, last_pos - 1, &cached_state,
-				     GFP_NOFS);
-	}
-	for (i = 0; i < num_pages; i++) {
-		if (clear_page_dirty_for_io(pages[i]))
-			account_page_redirty(pages[i]);
-		set_page_extent_mapped(pages[i]);
-		WARN_ON(!PageLocked(pages[i]));
-	}
 	return 0;
 fail:
 	while (faili >= 0) {
@@ -1333,6 +1318,75 @@
 
 }
 
+/*
+ * This function locks the extent and properly waits for data=ordered extents
+ * to finish before allowing the pages to be modified if need.
+ *
+ * The return value:
+ * 1 - the extent is locked
+ * 0 - the extent is not locked, and everything is OK
+ * -EAGAIN - need re-prepare the pages
+ * the other < 0 number - Something wrong happens
+ */
+static noinline int
+lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
+				size_t num_pages, loff_t pos,
+				u64 *lockstart, u64 *lockend,
+				struct extent_state **cached_state)
+{
+	u64 start_pos;
+	u64 last_pos;
+	int i;
+	int ret = 0;
+
+	start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
+	last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1;
+
+	if (start_pos < inode->i_size) {
+		struct btrfs_ordered_extent *ordered;
+		lock_extent_bits(&BTRFS_I(inode)->io_tree,
+				 start_pos, last_pos, 0, cached_state);
+		ordered = btrfs_lookup_first_ordered_extent(inode, last_pos);
+		if (ordered &&
+		    ordered->file_offset + ordered->len > start_pos &&
+		    ordered->file_offset <= last_pos) {
+			btrfs_put_ordered_extent(ordered);
+			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+					     start_pos, last_pos,
+					     cached_state, GFP_NOFS);
+			for (i = 0; i < num_pages; i++) {
+				unlock_page(pages[i]);
+				page_cache_release(pages[i]);
+			}
+			ret = btrfs_wait_ordered_range(inode, start_pos,
+						last_pos - start_pos + 1);
+			if (ret)
+				return ret;
+			else
+				return -EAGAIN;
+		}
+		if (ordered)
+			btrfs_put_ordered_extent(ordered);
+
+		clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
+				  last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
+				  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+				  0, 0, cached_state, GFP_NOFS);
+		*lockstart = start_pos;
+		*lockend = last_pos;
+		ret = 1;
+	}
+
+	for (i = 0; i < num_pages; i++) {
+		if (clear_page_dirty_for_io(pages[i]))
+			account_page_redirty(pages[i]);
+		set_page_extent_mapped(pages[i]);
+		WARN_ON(!PageLocked(pages[i]));
+	}
+
+	return ret;
+}
+
 static noinline int check_can_nocow(struct inode *inode, loff_t pos,
 				    size_t *write_bytes)
 {
@@ -1381,13 +1435,17 @@
 	struct inode *inode = file_inode(file);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct page **pages = NULL;
+	struct extent_state *cached_state = NULL;
 	u64 release_bytes = 0;
+	u64 lockstart;
+	u64 lockend;
 	unsigned long first_index;
 	size_t num_written = 0;
 	int nrptrs;
 	int ret = 0;
 	bool only_release_metadata = false;
 	bool force_page_uptodate = false;
+	bool need_unlock;
 
 	nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
 		     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
@@ -1456,18 +1514,31 @@
 		}
 
 		release_bytes = reserve_bytes;
-
+		need_unlock = false;
+again:
 		/*
 		 * This is going to setup the pages array with the number of
 		 * pages we want, so we don't really need to worry about the
 		 * contents of pages from loop to loop
 		 */
-		ret = prepare_pages(root, file, pages, num_pages,
-				    pos, first_index, write_bytes,
+		ret = prepare_pages(inode, pages, num_pages,
+				    pos, write_bytes,
 				    force_page_uptodate);
 		if (ret)
 			break;
 
+		ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
+						      pos, &lockstart, &lockend,
+						      &cached_state);
+		if (ret < 0) {
+			if (ret == -EAGAIN)
+				goto again;
+			break;
+		} else if (ret > 0) {
+			need_unlock = true;
+			ret = 0;
+		}
+
 		copied = btrfs_copy_from_user(pos, num_pages,
 					   write_bytes, pages, i);
 
@@ -1512,19 +1583,21 @@
 		}
 
 		release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
-		if (copied > 0) {
+
+		if (copied > 0)
 			ret = btrfs_dirty_pages(root, inode, pages,
 						dirty_pages, pos, copied,
 						NULL);
-			if (ret) {
-				btrfs_drop_pages(pages, num_pages);
-				break;
-			}
+		if (need_unlock)
+			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+					     lockstart, lockend, &cached_state,
+					     GFP_NOFS);
+		if (ret) {
+			btrfs_drop_pages(pages, num_pages);
+			break;
 		}
 
 		release_bytes = 0;
-		btrfs_drop_pages(pages, num_pages);
-
 		if (only_release_metadata && copied > 0) {
 			u64 lockstart = round_down(pos, root->sectorsize);
 			u64 lockend = lockstart +
@@ -1536,6 +1609,8 @@
 			only_release_metadata = false;
 		}
 
+		btrfs_drop_pages(pages, num_pages);
+
 		cond_resched();
 
 		balance_dirty_pages_ratelimited(inode->i_mapping);
@@ -1857,12 +1932,24 @@
 	if (file->private_data)
 		btrfs_ioctl_trans_end(file);
 
+	/*
+	 * We use start here because we will need to wait on the IO to complete
+	 * in btrfs_sync_log, which could require joining a transaction (for
+	 * example checking cross references in the nocow path).  If we use join
+	 * here we could get into a situation where we're waiting on IO to
+	 * happen that is blocked on a transaction trying to commit.  With start
+	 * we inc the extwriter counter, so we wait for all extwriters to exit
+	 * before we start blocking join'ers.  This comment is to keep somebody
+	 * from thinking they are super smart and changing this to
+	 * btrfs_join_transaction *cough*Josef*cough*.
+	 */
 	trans = btrfs_start_transaction(root, 0);
 	if (IS_ERR(trans)) {
 		ret = PTR_ERR(trans);
 		mutex_unlock(&inode->i_mutex);
 		goto out;
 	}
+	trans->sync = true;
 
 	ret = btrfs_log_dentry_safe(trans, root, dentry);
 	if (ret < 0) {
@@ -1963,11 +2050,13 @@
 	struct btrfs_key key;
 	int ret;
 
+	if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+		goto out;
+
 	key.objectid = btrfs_ino(inode);
 	key.type = BTRFS_EXTENT_DATA_KEY;
 	key.offset = offset;
 
-
 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 	if (ret < 0)
 		return ret;
@@ -2064,8 +2153,10 @@
 	u64 drop_end;
 	int ret = 0;
 	int err = 0;
+	int rsv_count;
 	bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
 			  ((offset + len - 1) >> PAGE_CACHE_SHIFT));
+	bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
 
 	ret = btrfs_wait_ordered_range(inode, offset, len);
 	if (ret)
@@ -2125,7 +2216,7 @@
 		 * we need to try again.
 		 */
 		if ((!ordered ||
-		    (ordered->file_offset + ordered->len < lockstart ||
+		    (ordered->file_offset + ordered->len <= lockstart ||
 		     ordered->file_offset > lockend)) &&
 		     !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
 				     lockend, EXTENT_UPTODATE, 0,
@@ -2163,9 +2254,10 @@
 	/*
 	 * 1 - update the inode
 	 * 1 - removing the extents in the range
-	 * 1 - adding the hole extent
+	 * 1 - adding the hole extent if no_holes isn't set
 	 */
-	trans = btrfs_start_transaction(root, 3);
+	rsv_count = no_holes ? 2 : 3;
+	trans = btrfs_start_transaction(root, rsv_count);
 	if (IS_ERR(trans)) {
 		err = PTR_ERR(trans);
 		goto out_free;
@@ -2179,7 +2271,7 @@
 	while (cur_offset < lockend) {
 		ret = __btrfs_drop_extents(trans, root, inode, path,
 					   cur_offset, lockend + 1,
-					   &drop_end, 1);
+					   &drop_end, 1, 0, 0, NULL);
 		if (ret != -ENOSPC)
 			break;
 
@@ -2202,7 +2294,7 @@
 		btrfs_end_transaction(trans, root);
 		btrfs_btree_balance_dirty(root);
 
-		trans = btrfs_start_transaction(root, 3);
+		trans = btrfs_start_transaction(root, rsv_count);
 		if (IS_ERR(trans)) {
 			ret = PTR_ERR(trans);
 			trans = NULL;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 057be95..73f3de7 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -347,8 +347,8 @@
 			btrfs_readpage(NULL, page);
 			lock_page(page);
 			if (!PageUptodate(page)) {
-				printk(KERN_ERR "btrfs: error reading free "
-				       "space cache\n");
+				btrfs_err(BTRFS_I(inode)->root->fs_info,
+					   "error reading free space cache");
 				io_ctl_drop_pages(io_ctl);
 				return -EIO;
 			}
@@ -405,7 +405,7 @@
 
 	gen = io_ctl->cur;
 	if (le64_to_cpu(*gen) != generation) {
-		printk_ratelimited(KERN_ERR "btrfs: space cache generation "
+		printk_ratelimited(KERN_ERR "BTRFS: space cache generation "
 				   "(%Lu) does not match inode (%Lu)\n", *gen,
 				   generation);
 		io_ctl_unmap_page(io_ctl);
@@ -463,7 +463,7 @@
 			      PAGE_CACHE_SIZE - offset);
 	btrfs_csum_final(crc, (char *)&crc);
 	if (val != crc) {
-		printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
+		printk_ratelimited(KERN_ERR "BTRFS: csum mismatch on free "
 				   "space cache\n");
 		io_ctl_unmap_page(io_ctl);
 		return -EIO;
@@ -1902,7 +1902,7 @@
 	spin_unlock(&ctl->tree_lock);
 
 	if (ret) {
-		printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
+		printk(KERN_CRIT "BTRFS: unable to add free space :%d\n", ret);
 		ASSERT(ret != -EEXIST);
 	}
 
@@ -2011,14 +2011,15 @@
 		info = rb_entry(n, struct btrfs_free_space, offset_index);
 		if (info->bytes >= bytes && !block_group->ro)
 			count++;
-		printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
-		       info->offset, info->bytes,
+		btrfs_crit(block_group->fs_info,
+			   "entry offset %llu, bytes %llu, bitmap %s",
+			   info->offset, info->bytes,
 		       (info->bitmap) ? "yes" : "no");
 	}
-	printk(KERN_INFO "block group has cluster?: %s\n",
+	btrfs_info(block_group->fs_info, "block group has cluster?: %s",
 	       list_empty(&block_group->cluster_list) ? "no" : "yes");
-	printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
-	       "\n", count);
+	btrfs_info(block_group->fs_info,
+		   "%d blocks of free space at or bigger than bytes is", count);
 }
 
 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
@@ -2421,7 +2422,6 @@
 	struct btrfs_free_space *entry = NULL;
 	struct btrfs_free_space *last;
 	struct rb_node *node;
-	u64 window_start;
 	u64 window_free;
 	u64 max_extent;
 	u64 total_size = 0;
@@ -2443,7 +2443,6 @@
 		entry = rb_entry(node, struct btrfs_free_space, offset_index);
 	}
 
-	window_start = entry->offset;
 	window_free = entry->bytes;
 	max_extent = entry->bytes;
 	first = entry;
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
new file mode 100644
index 0000000..85889aa
--- /dev/null
+++ b/fs/btrfs/hash.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2014 Filipe David Borba Manana <fdmanana@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <crypto/hash.h>
+#include <linux/err.h>
+#include "hash.h"
+
+static struct crypto_shash *tfm;
+
+int __init btrfs_hash_init(void)
+{
+	tfm = crypto_alloc_shash("crc32c", 0, 0);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	return 0;
+}
+
+void btrfs_hash_exit(void)
+{
+	crypto_free_shash(tfm);
+}
+
+u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
+{
+	struct {
+		struct shash_desc shash;
+		char ctx[crypto_shash_descsize(tfm)];
+	} desc;
+	int err;
+
+	desc.shash.tfm = tfm;
+	desc.shash.flags = 0;
+	*(u32 *)desc.ctx = crc;
+
+	err = crypto_shash_update(&desc.shash, address, length);
+	BUG_ON(err);
+
+	return *(u32 *)desc.ctx;
+}
diff --git a/fs/btrfs/hash.h b/fs/btrfs/hash.h
index 1d98281..118a231 100644
--- a/fs/btrfs/hash.h
+++ b/fs/btrfs/hash.h
@@ -19,10 +19,15 @@
 #ifndef __HASH__
 #define __HASH__
 
-#include <linux/crc32c.h>
+int __init btrfs_hash_init(void);
+
+void btrfs_hash_exit(void);
+
+u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length);
+
 static inline u64 btrfs_name_hash(const char *name, int len)
 {
-	return crc32c((u32)~1, name, len);
+	return btrfs_crc32c((u32)~1, name, len);
 }
 
 /*
@@ -31,7 +36,7 @@
 static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name,
 				    int len)
 {
-	return (u64) crc32c(parent_objectid, name, len);
+	return (u64) btrfs_crc32c(parent_objectid, name, len);
 }
 
 #endif
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index ec82fae..2be38df 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -91,32 +91,6 @@
 	return 0;
 }
 
-static struct btrfs_inode_ref *
-btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans,
-		       struct btrfs_root *root,
-		       struct btrfs_path *path,
-		       const char *name, int name_len,
-		       u64 inode_objectid, u64 ref_objectid, int ins_len,
-		       int cow)
-{
-	int ret;
-	struct btrfs_key key;
-	struct btrfs_inode_ref *ref;
-
-	key.objectid = inode_objectid;
-	key.type = BTRFS_INODE_REF_KEY;
-	key.offset = ref_objectid;
-
-	ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
-	if (ret < 0)
-		return ERR_PTR(ret);
-	if (ret > 0)
-		return NULL;
-	if (!find_name_in_backref(path, name, name_len, &ref))
-		return NULL;
-	return ref;
-}
-
 /* Returns NULL if no extref found */
 struct btrfs_inode_extref *
 btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
@@ -144,45 +118,6 @@
 	return extref;
 }
 
-int btrfs_get_inode_ref_index(struct btrfs_trans_handle *trans,
-			      struct btrfs_root *root,
-			      struct btrfs_path *path,
-			      const char *name, int name_len,
-			      u64 inode_objectid, u64 ref_objectid, int mod,
-			      u64 *ret_index)
-{
-	struct btrfs_inode_ref *ref;
-	struct btrfs_inode_extref *extref;
-	int ins_len = mod < 0 ? -1 : 0;
-	int cow = mod != 0;
-
-	ref = btrfs_lookup_inode_ref(trans, root, path, name, name_len,
-				     inode_objectid, ref_objectid, ins_len,
-				     cow);
-	if (IS_ERR(ref))
-		return PTR_ERR(ref);
-
-	if (ref != NULL) {
-		*ret_index = btrfs_inode_ref_index(path->nodes[0], ref);
-		return 0;
-	}
-
-	btrfs_release_path(path);
-
-	extref = btrfs_lookup_inode_extref(trans, root, path, name,
-					   name_len, inode_objectid,
-					   ref_objectid, ins_len, cow);
-	if (IS_ERR(extref))
-		return PTR_ERR(extref);
-
-	if (extref) {
-		*ret_index = btrfs_inode_extref_index(path->nodes[0], extref);
-		return 0;
-	}
-
-	return -ENOENT;
-}
-
 static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
 				  struct btrfs_root *root,
 				  const char *name, int name_len,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 514b291..d3d4448 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -58,9 +58,10 @@
 #include "inode-map.h"
 #include "backref.h"
 #include "hash.h"
+#include "props.h"
 
 struct btrfs_iget_args {
-	u64 ino;
+	struct btrfs_key *location;
 	struct btrfs_root *root;
 };
 
@@ -125,13 +126,12 @@
  * no overlapping inline items exist in the btree
  */
 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
+				struct btrfs_path *path, int extent_inserted,
 				struct btrfs_root *root, struct inode *inode,
 				u64 start, size_t size, size_t compressed_size,
 				int compress_type,
 				struct page **compressed_pages)
 {
-	struct btrfs_key key;
-	struct btrfs_path *path;
 	struct extent_buffer *leaf;
 	struct page *page = NULL;
 	char *kaddr;
@@ -140,29 +140,29 @@
 	int err = 0;
 	int ret;
 	size_t cur_size = size;
-	size_t datasize;
 	unsigned long offset;
 
 	if (compressed_size && compressed_pages)
 		cur_size = compressed_size;
 
-	path = btrfs_alloc_path();
-	if (!path)
-		return -ENOMEM;
-
-	path->leave_spinning = 1;
-
-	key.objectid = btrfs_ino(inode);
-	key.offset = start;
-	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
-	datasize = btrfs_file_extent_calc_inline_size(cur_size);
-
 	inode_add_bytes(inode, size);
-	ret = btrfs_insert_empty_item(trans, root, path, &key,
-				      datasize);
-	if (ret) {
-		err = ret;
-		goto fail;
+
+	if (!extent_inserted) {
+		struct btrfs_key key;
+		size_t datasize;
+
+		key.objectid = btrfs_ino(inode);
+		key.offset = start;
+		btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
+
+		datasize = btrfs_file_extent_calc_inline_size(cur_size);
+		path->leave_spinning = 1;
+		ret = btrfs_insert_empty_item(trans, root, path, &key,
+					      datasize);
+		if (ret) {
+			err = ret;
+			goto fail;
+		}
 	}
 	leaf = path->nodes[0];
 	ei = btrfs_item_ptr(leaf, path->slots[0],
@@ -203,7 +203,7 @@
 		page_cache_release(page);
 	}
 	btrfs_mark_buffer_dirty(leaf);
-	btrfs_free_path(path);
+	btrfs_release_path(path);
 
 	/*
 	 * we're an inline extent, so nobody can
@@ -219,7 +219,6 @@
 
 	return ret;
 fail:
-	btrfs_free_path(path);
 	return err;
 }
 
@@ -242,6 +241,9 @@
 	u64 aligned_end = ALIGN(end, root->sectorsize);
 	u64 data_len = inline_len;
 	int ret;
+	struct btrfs_path *path;
+	int extent_inserted = 0;
+	u32 extent_item_size;
 
 	if (compressed_size)
 		data_len = compressed_size;
@@ -256,12 +258,27 @@
 		return 1;
 	}
 
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
 	trans = btrfs_join_transaction(root);
-	if (IS_ERR(trans))
+	if (IS_ERR(trans)) {
+		btrfs_free_path(path);
 		return PTR_ERR(trans);
+	}
 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
-	ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1);
+	if (compressed_size && compressed_pages)
+		extent_item_size = btrfs_file_extent_calc_inline_size(
+		   compressed_size);
+	else
+		extent_item_size = btrfs_file_extent_calc_inline_size(
+		    inline_len);
+
+	ret = __btrfs_drop_extents(trans, root, inode, path,
+				   start, aligned_end, NULL,
+				   1, 1, extent_item_size, &extent_inserted);
 	if (ret) {
 		btrfs_abort_transaction(trans, root, ret);
 		goto out;
@@ -269,7 +286,8 @@
 
 	if (isize > actual_end)
 		inline_len = min_t(u64, isize, actual_end);
-	ret = insert_inline_extent(trans, root, inode, start,
+	ret = insert_inline_extent(trans, path, extent_inserted,
+				   root, inode, start,
 				   inline_len, compressed_size,
 				   compress_type, compressed_pages);
 	if (ret && ret != -ENOSPC) {
@@ -284,6 +302,7 @@
 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
 out:
+	btrfs_free_path(path);
 	btrfs_end_transaction(trans, root);
 	return ret;
 }
@@ -1262,7 +1281,8 @@
 			nocow = 1;
 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 			extent_end = found_key.offset +
-				btrfs_file_extent_inline_len(leaf, fi);
+				btrfs_file_extent_inline_len(leaf,
+						     path->slots[0], fi);
 			extent_end = ALIGN(extent_end, root->sectorsize);
 		} else {
 			BUG_ON(1);
@@ -1577,7 +1597,7 @@
 			 unsigned long bio_flags)
 {
 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
-	u64 logical = (u64)bio->bi_sector << 9;
+	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
 	u64 length = 0;
 	u64 map_length;
 	int ret;
@@ -1585,7 +1605,7 @@
 	if (bio_flags & EXTENT_BIO_COMPRESSED)
 		return 0;
 
-	length = bio->bi_size;
+	length = bio->bi_iter.bi_size;
 	map_length = length;
 	ret = btrfs_map_block(root->fs_info, rw, logical,
 			      &map_length, NULL, 0);
@@ -1841,14 +1861,13 @@
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
 	struct btrfs_key ins;
+	int extent_inserted = 0;
 	int ret;
 
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
 
-	path->leave_spinning = 1;
-
 	/*
 	 * we may be replacing one extent in the tree with another.
 	 * The new extent is pinned in the extent map, and we don't want
@@ -1858,17 +1877,23 @@
 	 * the caller is expected to unpin it and allow it to be merged
 	 * with the others.
 	 */
-	ret = btrfs_drop_extents(trans, root, inode, file_pos,
-				 file_pos + num_bytes, 0);
+	ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
+				   file_pos + num_bytes, NULL, 0,
+				   1, sizeof(*fi), &extent_inserted);
 	if (ret)
 		goto out;
 
-	ins.objectid = btrfs_ino(inode);
-	ins.offset = file_pos;
-	ins.type = BTRFS_EXTENT_DATA_KEY;
-	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
-	if (ret)
-		goto out;
+	if (!extent_inserted) {
+		ins.objectid = btrfs_ino(inode);
+		ins.offset = file_pos;
+		ins.type = BTRFS_EXTENT_DATA_KEY;
+
+		path->leave_spinning = 1;
+		ret = btrfs_insert_empty_item(trans, root, path, &ins,
+					      sizeof(*fi));
+		if (ret)
+			goto out;
+	}
 	leaf = path->nodes[0];
 	fi = btrfs_item_ptr(leaf, path->slots[0],
 			    struct btrfs_file_extent_item);
@@ -2290,7 +2315,7 @@
 		u64 extent_len;
 		struct btrfs_key found_key;
 
-		ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
+		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 		if (ret < 0)
 			goto out_free_path;
 
@@ -2543,12 +2568,6 @@
 	return NULL;
 }
 
-/*
- * helper function for btrfs_finish_ordered_io, this
- * just reads in some of the csum leaves to prime them into ram
- * before we start the transaction.  It limits the amount of btree
- * reads required while inside the transaction.
- */
 /* as ordered data IO finishes, this gets called so we can finish
  * an ordered extent if the range of bytes in the file it covers are
  * fully written.
@@ -2610,7 +2629,7 @@
 			EXTENT_DEFRAG, 1, cached_state);
 	if (ret) {
 		u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
-		if (last_snapshot >= BTRFS_I(inode)->generation)
+		if (0 && last_snapshot >= BTRFS_I(inode)->generation)
 			/* the inode is shared */
 			new = record_old_file_extents(inode, ordered_extent);
 
@@ -3248,7 +3267,8 @@
  * slot is the slot the inode is in, objectid is the objectid of the inode
  */
 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
-					  int slot, u64 objectid)
+					  int slot, u64 objectid,
+					  int *first_xattr_slot)
 {
 	u32 nritems = btrfs_header_nritems(leaf);
 	struct btrfs_key found_key;
@@ -3264,6 +3284,7 @@
 	}
 
 	slot++;
+	*first_xattr_slot = -1;
 	while (slot < nritems) {
 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
@@ -3273,6 +3294,8 @@
 
 		/* we found an xattr, assume we've got an acl */
 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
+			if (*first_xattr_slot == -1)
+				*first_xattr_slot = slot;
 			if (found_key.offset == xattr_access ||
 			    found_key.offset == xattr_default)
 				return 1;
@@ -3301,6 +3324,8 @@
 	 * something larger than an xattr.  We have to assume the inode
 	 * has acls
 	 */
+	if (*first_xattr_slot == -1)
+		*first_xattr_slot = slot;
 	return 1;
 }
 
@@ -3315,10 +3340,12 @@
 	struct btrfs_timespec *tspec;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_key location;
+	unsigned long ptr;
 	int maybe_acls;
 	u32 rdev;
 	int ret;
 	bool filled = false;
+	int first_xattr_slot;
 
 	ret = btrfs_fill_inode(inode, &rdev);
 	if (!ret)
@@ -3328,7 +3355,6 @@
 	if (!path)
 		goto make_bad;
 
-	path->leave_spinning = 1;
 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
 
 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
@@ -3338,7 +3364,7 @@
 	leaf = path->nodes[0];
 
 	if (filled)
-		goto cache_acl;
+		goto cache_index;
 
 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
 				    struct btrfs_inode_item);
@@ -3381,18 +3407,51 @@
 
 	BTRFS_I(inode)->index_cnt = (u64)-1;
 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
+
+cache_index:
+	path->slots[0]++;
+	if (inode->i_nlink != 1 ||
+	    path->slots[0] >= btrfs_header_nritems(leaf))
+		goto cache_acl;
+
+	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
+	if (location.objectid != btrfs_ino(inode))
+		goto cache_acl;
+
+	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+	if (location.type == BTRFS_INODE_REF_KEY) {
+		struct btrfs_inode_ref *ref;
+
+		ref = (struct btrfs_inode_ref *)ptr;
+		BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
+	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
+		struct btrfs_inode_extref *extref;
+
+		extref = (struct btrfs_inode_extref *)ptr;
+		BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
+								     extref);
+	}
 cache_acl:
 	/*
 	 * try to precache a NULL acl entry for files that don't have
 	 * any xattrs or acls
 	 */
 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
-					   btrfs_ino(inode));
+					   btrfs_ino(inode), &first_xattr_slot);
+	if (first_xattr_slot != -1) {
+		path->slots[0] = first_xattr_slot;
+		ret = btrfs_load_inode_props(inode, path);
+		if (ret)
+			btrfs_err(root->fs_info,
+				  "error loading props for ino %llu (root %llu): %d\n",
+				  btrfs_ino(inode),
+				  root->root_key.objectid, ret);
+	}
+	btrfs_free_path(path);
+
 	if (!maybe_acls)
 		cache_no_acl(inode);
 
-	btrfs_free_path(path);
-
 	switch (inode->i_mode & S_IFMT) {
 	case S_IFREG:
 		inode->i_mapping->a_ops = &btrfs_aops;
@@ -3496,7 +3555,6 @@
 		goto failed;
 	}
 
-	btrfs_unlock_up_safe(path, 1);
 	leaf = path->nodes[0];
 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
 				    struct btrfs_inode_item);
@@ -3593,6 +3651,24 @@
 		goto err;
 	btrfs_release_path(path);
 
+	/*
+	 * If we don't have dir index, we have to get it by looking up
+	 * the inode ref, since we get the inode ref, remove it directly,
+	 * it is unnecessary to do delayed deletion.
+	 *
+	 * But if we have dir index, needn't search inode ref to get it.
+	 * Since the inode ref is close to the inode item, it is better
+	 * that we delay to delete it, and just do this deletion when
+	 * we update the inode item.
+	 */
+	if (BTRFS_I(inode)->dir_index) {
+		ret = btrfs_delayed_delete_inode_ref(inode);
+		if (!ret) {
+			index = BTRFS_I(inode)->dir_index;
+			goto skip_backref;
+		}
+	}
+
 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
 				  dir_ino, &index);
 	if (ret) {
@@ -3602,7 +3678,7 @@
 		btrfs_abort_transaction(trans, root, ret);
 		goto err;
 	}
-
+skip_backref:
 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
 	if (ret) {
 		btrfs_abort_transaction(trans, root, ret);
@@ -3948,7 +4024,7 @@
 				    btrfs_file_extent_num_bytes(leaf, fi);
 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 				item_end += btrfs_file_extent_inline_len(leaf,
-									 fi);
+							 path->slots[0], fi);
 			}
 			item_end--;
 		}
@@ -4018,6 +4094,12 @@
 					inode_sub_bytes(inode, item_end + 1 -
 							new_size);
 				}
+
+				/*
+				 * update the ram bytes to properly reflect
+				 * the new size of our item
+				 */
+				btrfs_set_file_extent_ram_bytes(leaf, fi, size);
 				size =
 				    btrfs_file_extent_calc_inline_size(size);
 				btrfs_truncate_item(root, path, size, 1);
@@ -4203,6 +4285,49 @@
 	return ret;
 }
 
+static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
+			     u64 offset, u64 len)
+{
+	struct btrfs_trans_handle *trans;
+	int ret;
+
+	/*
+	 * Still need to make sure the inode looks like it's been updated so
+	 * that any holes get logged if we fsync.
+	 */
+	if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
+		BTRFS_I(inode)->last_trans = root->fs_info->generation;
+		BTRFS_I(inode)->last_sub_trans = root->log_transid;
+		BTRFS_I(inode)->last_log_commit = root->last_log_commit;
+		return 0;
+	}
+
+	/*
+	 * 1 - for the one we're dropping
+	 * 1 - for the one we're adding
+	 * 1 - for updating the inode.
+	 */
+	trans = btrfs_start_transaction(root, 3);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
+
+	ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
+	if (ret) {
+		btrfs_abort_transaction(trans, root, ret);
+		btrfs_end_transaction(trans, root);
+		return ret;
+	}
+
+	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
+				       0, 0, len, 0, len, 0, 0, 0);
+	if (ret)
+		btrfs_abort_transaction(trans, root, ret);
+	else
+		btrfs_update_inode(trans, root, inode);
+	btrfs_end_transaction(trans, root);
+	return ret;
+}
+
 /*
  * This function puts in dummy file extents for the area we're creating a hole
  * for.  So if we are truncating this file to a larger size we need to insert
@@ -4211,7 +4336,6 @@
  */
 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
 {
-	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct extent_map *em = NULL;
@@ -4266,31 +4390,10 @@
 			struct extent_map *hole_em;
 			hole_size = last_byte - cur_offset;
 
-			trans = btrfs_start_transaction(root, 3);
-			if (IS_ERR(trans)) {
-				err = PTR_ERR(trans);
+			err = maybe_insert_hole(root, inode, cur_offset,
+						hole_size);
+			if (err)
 				break;
-			}
-
-			err = btrfs_drop_extents(trans, root, inode,
-						 cur_offset,
-						 cur_offset + hole_size, 1);
-			if (err) {
-				btrfs_abort_transaction(trans, root, err);
-				btrfs_end_transaction(trans, root);
-				break;
-			}
-
-			err = btrfs_insert_file_extent(trans, root,
-					btrfs_ino(inode), cur_offset, 0,
-					0, hole_size, 0, hole_size,
-					0, 0, 0);
-			if (err) {
-				btrfs_abort_transaction(trans, root, err);
-				btrfs_end_transaction(trans, root);
-				break;
-			}
-
 			btrfs_drop_extent_cache(inode, cur_offset,
 						cur_offset + hole_size - 1, 0);
 			hole_em = alloc_extent_map();
@@ -4309,7 +4412,7 @@
 			hole_em->ram_bytes = hole_size;
 			hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
-			hole_em->generation = trans->transid;
+			hole_em->generation = root->fs_info->generation;
 
 			while (1) {
 				write_lock(&em_tree->lock);
@@ -4322,17 +4425,14 @@
 							hole_size - 1, 0);
 			}
 			free_extent_map(hole_em);
-next:
-			btrfs_update_inode(trans, root, inode);
-			btrfs_end_transaction(trans, root);
 		}
+next:
 		free_extent_map(em);
 		em = NULL;
 		cur_offset = last_byte;
 		if (cur_offset >= block_end)
 			break;
 	}
-
 	free_extent_map(em);
 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
 			     GFP_NOFS);
@@ -4474,6 +4574,64 @@
 	return err;
 }
 
+/*
+ * While truncating the inode pages during eviction, we get the VFS calling
+ * btrfs_invalidatepage() against each page of the inode. This is slow because
+ * the calls to btrfs_invalidatepage() result in a huge amount of calls to
+ * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
+ * extent_state structures over and over, wasting lots of time.
+ *
+ * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
+ * those expensive operations on a per page basis and do only the ordered io
+ * finishing, while we release here the extent_map and extent_state structures,
+ * without the excessive merging and splitting.
+ */
+static void evict_inode_truncate_pages(struct inode *inode)
+{
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
+	struct rb_node *node;
+
+	ASSERT(inode->i_state & I_FREEING);
+	truncate_inode_pages(&inode->i_data, 0);
+
+	write_lock(&map_tree->lock);
+	while (!RB_EMPTY_ROOT(&map_tree->map)) {
+		struct extent_map *em;
+
+		node = rb_first(&map_tree->map);
+		em = rb_entry(node, struct extent_map, rb_node);
+		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+		clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
+		remove_extent_mapping(map_tree, em);
+		free_extent_map(em);
+	}
+	write_unlock(&map_tree->lock);
+
+	spin_lock(&io_tree->lock);
+	while (!RB_EMPTY_ROOT(&io_tree->state)) {
+		struct extent_state *state;
+		struct extent_state *cached_state = NULL;
+
+		node = rb_first(&io_tree->state);
+		state = rb_entry(node, struct extent_state, rb_node);
+		atomic_inc(&state->refs);
+		spin_unlock(&io_tree->lock);
+
+		lock_extent_bits(io_tree, state->start, state->end,
+				 0, &cached_state);
+		clear_extent_bit(io_tree, state->start, state->end,
+				 EXTENT_LOCKED | EXTENT_DIRTY |
+				 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+				 EXTENT_DEFRAG, 1, 1,
+				 &cached_state, GFP_NOFS);
+		free_extent_state(state);
+
+		spin_lock(&io_tree->lock);
+	}
+	spin_unlock(&io_tree->lock);
+}
+
 void btrfs_evict_inode(struct inode *inode)
 {
 	struct btrfs_trans_handle *trans;
@@ -4484,7 +4642,8 @@
 
 	trace_btrfs_inode_evict(inode);
 
-	truncate_inode_pages(&inode->i_data, 0);
+	evict_inode_truncate_pages(inode);
+
 	if (inode->i_nlink &&
 	    ((btrfs_root_refs(&root->root_item) != 0 &&
 	      root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
@@ -4659,9 +4818,9 @@
 	}
 
 	err = -ENOENT;
-	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
-				  BTRFS_I(dir)->root->root_key.objectid,
-				  location->objectid);
+	ret = btrfs_find_item(root->fs_info->tree_root, path,
+				BTRFS_I(dir)->root->root_key.objectid,
+				location->objectid, BTRFS_ROOT_REF_KEY, NULL);
 	if (ret) {
 		if (ret < 0)
 			err = ret;
@@ -4822,7 +4981,9 @@
 static int btrfs_init_locked_inode(struct inode *inode, void *p)
 {
 	struct btrfs_iget_args *args = p;
-	inode->i_ino = args->ino;
+	inode->i_ino = args->location->objectid;
+	memcpy(&BTRFS_I(inode)->location, args->location,
+	       sizeof(*args->location));
 	BTRFS_I(inode)->root = args->root;
 	return 0;
 }
@@ -4830,19 +4991,19 @@
 static int btrfs_find_actor(struct inode *inode, void *opaque)
 {
 	struct btrfs_iget_args *args = opaque;
-	return args->ino == btrfs_ino(inode) &&
+	return args->location->objectid == BTRFS_I(inode)->location.objectid &&
 		args->root == BTRFS_I(inode)->root;
 }
 
 static struct inode *btrfs_iget_locked(struct super_block *s,
-				       u64 objectid,
+				       struct btrfs_key *location,
 				       struct btrfs_root *root)
 {
 	struct inode *inode;
 	struct btrfs_iget_args args;
-	unsigned long hashval = btrfs_inode_hash(objectid, root);
+	unsigned long hashval = btrfs_inode_hash(location->objectid, root);
 
-	args.ino = objectid;
+	args.location = location;
 	args.root = root;
 
 	inode = iget5_locked(s, hashval, btrfs_find_actor,
@@ -4859,13 +5020,11 @@
 {
 	struct inode *inode;
 
-	inode = btrfs_iget_locked(s, location->objectid, root);
+	inode = btrfs_iget_locked(s, location, root);
 	if (!inode)
 		return ERR_PTR(-ENOMEM);
 
 	if (inode->i_state & I_NEW) {
-		BTRFS_I(inode)->root = root;
-		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
 		btrfs_read_locked_inode(inode);
 		if (!is_bad_inode(inode)) {
 			inode_tree_add(inode);
@@ -4921,7 +5080,7 @@
 		return ERR_PTR(ret);
 
 	if (location.objectid == 0)
-		return NULL;
+		return ERR_PTR(-ENOENT);
 
 	if (location.type == BTRFS_INODE_ITEM_KEY) {
 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
@@ -4985,10 +5144,17 @@
 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
 				   unsigned int flags)
 {
-	struct dentry *ret;
+	struct inode *inode;
 
-	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
-	return ret;
+	inode = btrfs_lookup_dentry(dir, dentry);
+	if (IS_ERR(inode)) {
+		if (PTR_ERR(inode) == -ENOENT)
+			inode = NULL;
+		else
+			return ERR_CAST(inode);
+	}
+
+	return d_materialise_unique(dentry, inode);
 }
 
 unsigned char btrfs_filetype_table[] = {
@@ -5358,7 +5524,6 @@
 	u32 sizes[2];
 	unsigned long ptr;
 	int ret;
-	int owner;
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -5392,6 +5557,7 @@
 	 * number
 	 */
 	BTRFS_I(inode)->index_cnt = 2;
+	BTRFS_I(inode)->dir_index = *index;
 	BTRFS_I(inode)->root = root;
 	BTRFS_I(inode)->generation = trans->transid;
 	inode->i_generation = BTRFS_I(inode)->generation;
@@ -5404,11 +5570,6 @@
 	 */
 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
 
-	if (S_ISDIR(mode))
-		owner = 0;
-	else
-		owner = 1;
-
 	key[0].objectid = objectid;
 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
 	key[0].offset = 0;
@@ -5473,6 +5634,12 @@
 
 	btrfs_update_root_times(trans, root);
 
+	ret = btrfs_inode_inherit_props(trans, inode, dir);
+	if (ret)
+		btrfs_err(root->fs_info,
+			  "error inheriting props for ino %llu (root %llu): %d",
+			  btrfs_ino(inode), root->root_key.objectid, ret);
+
 	return inode;
 fail:
 	if (dir)
@@ -5741,6 +5908,8 @@
 		goto fail;
 	}
 
+	/* There are several dir indexes for this inode, clear the cache. */
+	BTRFS_I(inode)->dir_index = 0ULL;
 	inc_nlink(inode);
 	inode_inc_iversion(inode);
 	inode->i_ctime = CURRENT_TIME;
@@ -6004,7 +6173,7 @@
 		       btrfs_file_extent_num_bytes(leaf, item);
 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
 		size_t size;
-		size = btrfs_file_extent_inline_len(leaf, item);
+		size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
 		extent_end = ALIGN(extent_start + size, root->sectorsize);
 	}
 next:
@@ -6073,7 +6242,7 @@
 			goto out;
 		}
 
-		size = btrfs_file_extent_inline_len(leaf, item);
+		size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
 		extent_offset = page_offset(page) + pg_offset - extent_start;
 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
 				size - extent_offset);
@@ -6390,6 +6559,7 @@
 	int slot;
 	int found_type;
 	bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
+
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -6433,6 +6603,10 @@
 	if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
 		goto out;
 
+	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
+	if (extent_end <= offset)
+		goto out;
+
 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 	if (disk_bytenr == 0)
 		goto out;
@@ -6450,8 +6624,6 @@
 		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
 	}
 
-	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
-
 	if (btrfs_extent_readonly(root, disk_bytenr))
 		goto out;
 	btrfs_release_path(path);
@@ -6783,17 +6955,16 @@
 static void btrfs_endio_direct_read(struct bio *bio, int err)
 {
 	struct btrfs_dio_private *dip = bio->bi_private;
-	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
-	struct bio_vec *bvec = bio->bi_io_vec;
+	struct bio_vec *bvec;
 	struct inode *inode = dip->inode;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct bio *dio_bio;
 	u32 *csums = (u32 *)dip->csum;
-	int index = 0;
 	u64 start;
+	int i;
 
 	start = dip->logical_offset;
-	do {
+	bio_for_each_segment_all(bvec, bio, i) {
 		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 			struct page *page = bvec->bv_page;
 			char *kaddr;
@@ -6809,18 +6980,16 @@
 			local_irq_restore(flags);
 
 			flush_dcache_page(bvec->bv_page);
-			if (csum != csums[index]) {
+			if (csum != csums[i]) {
 				btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
 					  btrfs_ino(inode), start, csum,
-					  csums[index]);
+					  csums[i]);
 				err = -EIO;
 			}
 		}
 
 		start += bvec->bv_len;
-		bvec++;
-		index++;
-	} while (bvec <= bvec_end);
+	}
 
 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
 		      dip->logical_offset + dip->bytes - 1);
@@ -6898,10 +7067,11 @@
 	struct btrfs_dio_private *dip = bio->bi_private;
 
 	if (err) {
-		printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
-		      "sector %#Lx len %u err no %d\n",
+		btrfs_err(BTRFS_I(dip->inode)->root->fs_info,
+			  "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
 		      btrfs_ino(dip->inode), bio->bi_rw,
-		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
+		      (unsigned long long)bio->bi_iter.bi_sector,
+		      bio->bi_iter.bi_size, err);
 		dip->errors = 1;
 
 		/*
@@ -6992,7 +7162,7 @@
 	struct bio *bio;
 	struct bio *orig_bio = dip->orig_bio;
 	struct bio_vec *bvec = orig_bio->bi_io_vec;
-	u64 start_sector = orig_bio->bi_sector;
+	u64 start_sector = orig_bio->bi_iter.bi_sector;
 	u64 file_offset = dip->logical_offset;
 	u64 submit_len = 0;
 	u64 map_length;
@@ -7000,7 +7170,7 @@
 	int ret = 0;
 	int async_submit = 0;
 
-	map_length = orig_bio->bi_size;
+	map_length = orig_bio->bi_iter.bi_size;
 	ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
 			      &map_length, NULL, 0);
 	if (ret) {
@@ -7008,7 +7178,7 @@
 		return -EIO;
 	}
 
-	if (map_length >= orig_bio->bi_size) {
+	if (map_length >= orig_bio->bi_iter.bi_size) {
 		bio = orig_bio;
 		goto submit;
 	}
@@ -7060,7 +7230,7 @@
 			bio->bi_private = dip;
 			bio->bi_end_io = btrfs_end_dio_bio;
 
-			map_length = orig_bio->bi_size;
+			map_length = orig_bio->bi_iter.bi_size;
 			ret = btrfs_map_block(root->fs_info, rw,
 					      start_sector << 9,
 					      &map_length, NULL, 0);
@@ -7118,7 +7288,8 @@
 
 	if (!skip_sum && !write) {
 		csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
-		sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits;
+		sum_len = dio_bio->bi_iter.bi_size >>
+			inode->i_sb->s_blocksize_bits;
 		sum_len *= csum_size;
 	} else {
 		sum_len = 0;
@@ -7133,8 +7304,8 @@
 	dip->private = dio_bio->bi_private;
 	dip->inode = inode;
 	dip->logical_offset = file_offset;
-	dip->bytes = dio_bio->bi_size;
-	dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
+	dip->bytes = dio_bio->bi_iter.bi_size;
+	dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
 	io_bio->bi_private = dip;
 	dip->errors = 0;
 	dip->orig_bio = io_bio;
@@ -7371,6 +7542,7 @@
 	struct extent_state *cached_state = NULL;
 	u64 page_start = page_offset(page);
 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
+	int inode_evicting = inode->i_state & I_FREEING;
 
 	/*
 	 * we have the page locked, so new writeback can't start,
@@ -7386,17 +7558,21 @@
 		btrfs_releasepage(page, GFP_NOFS);
 		return;
 	}
-	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
-	ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
+
+	if (!inode_evicting)
+		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
+	ordered = btrfs_lookup_ordered_extent(inode, page_start);
 	if (ordered) {
 		/*
 		 * IO on this page will never be started, so we need
 		 * to account for any ordered extents now
 		 */
-		clear_extent_bit(tree, page_start, page_end,
-				 EXTENT_DIRTY | EXTENT_DELALLOC |
-				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
-				 EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS);
+		if (!inode_evicting)
+			clear_extent_bit(tree, page_start, page_end,
+					 EXTENT_DIRTY | EXTENT_DELALLOC |
+					 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
+					 EXTENT_DEFRAG, 1, 0, &cached_state,
+					 GFP_NOFS);
 		/*
 		 * whoever cleared the private bit is responsible
 		 * for the finish_ordered_io
@@ -7420,14 +7596,22 @@
 				btrfs_finish_ordered_io(ordered);
 		}
 		btrfs_put_ordered_extent(ordered);
-		cached_state = NULL;
-		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
+		if (!inode_evicting) {
+			cached_state = NULL;
+			lock_extent_bits(tree, page_start, page_end, 0,
+					 &cached_state);
+		}
 	}
-	clear_extent_bit(tree, page_start, page_end,
-		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
-		 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
-		 &cached_state, GFP_NOFS);
-	__btrfs_releasepage(page, GFP_NOFS);
+
+	if (!inode_evicting) {
+		clear_extent_bit(tree, page_start, page_end,
+				 EXTENT_LOCKED | EXTENT_DIRTY |
+				 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+				 EXTENT_DEFRAG, 1, 1,
+				 &cached_state, GFP_NOFS);
+
+		__btrfs_releasepage(page, GFP_NOFS);
+	}
 
 	ClearPageChecked(page);
 	if (PagePrivate(page)) {
@@ -7737,7 +7921,9 @@
  * create a new subvolume directory/inode (helper for the ioctl).
  */
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
-			     struct btrfs_root *new_root, u64 new_dirid)
+			     struct btrfs_root *new_root,
+			     struct btrfs_root *parent_root,
+			     u64 new_dirid)
 {
 	struct inode *inode;
 	int err;
@@ -7755,6 +7941,12 @@
 	set_nlink(inode, 1);
 	btrfs_i_size_write(inode, 0);
 
+	err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
+	if (err)
+		btrfs_err(new_root->fs_info,
+			  "error inheriting subvolume %llu properties: %d\n",
+			  new_root->root_key.objectid, err);
+
 	err = btrfs_update_inode(trans, new_root, inode);
 
 	iput(inode);
@@ -7780,6 +7972,7 @@
 	ei->flags = 0;
 	ei->csum_bytes = 0;
 	ei->index_cnt = (u64)-1;
+	ei->dir_index = 0;
 	ei->last_unlink_trans = 0;
 	ei->last_log_commit = 0;
 
@@ -8067,6 +8260,7 @@
 	if (ret)
 		goto out_fail;
 
+	BTRFS_I(old_inode)->dir_index = 0ULL;
 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
 		/* force full log commit if subvolume involved. */
 		root->fs_info->last_trans_log_full_commit = trans->transid;
@@ -8155,6 +8349,9 @@
 		goto out_fail;
 	}
 
+	if (old_inode->i_nlink == 1)
+		BTRFS_I(old_inode)->dir_index = index;
+
 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
 		struct dentry *parent = new_dentry->d_parent;
 		btrfs_log_new_name(trans, old_inode, old_dir, parent);
@@ -8290,7 +8487,7 @@
 {
 	int ret;
 
-	if (root->fs_info->sb->s_flags & MS_RDONLY)
+	if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
 		return -EROFS;
 
 	ret = __start_delalloc_inodes(root, delay_iput);
@@ -8316,7 +8513,7 @@
 	struct list_head splice;
 	int ret;
 
-	if (fs_info->sb->s_flags & MS_RDONLY)
+	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
 		return -EROFS;
 
 	INIT_LIST_HEAD(&splice);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index ad27dce..a6d8efa 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -56,6 +56,8 @@
 #include "rcu-string.h"
 #include "send.h"
 #include "dev-replace.h"
+#include "props.h"
+#include "sysfs.h"
 
 static int btrfs_clone(struct inode *src, struct inode *inode,
 		       u64 off, u64 olen, u64 olen_aligned, u64 destoff);
@@ -190,6 +192,9 @@
 	unsigned int i_oldflags;
 	umode_t mode;
 
+	if (!inode_owner_or_capable(inode))
+		return -EPERM;
+
 	if (btrfs_root_readonly(root))
 		return -EROFS;
 
@@ -200,9 +205,6 @@
 	if (ret)
 		return ret;
 
-	if (!inode_owner_or_capable(inode))
-		return -EACCES;
-
 	ret = mnt_want_write_file(file);
 	if (ret)
 		return ret;
@@ -280,9 +282,25 @@
 	if (flags & FS_NOCOMP_FL) {
 		ip->flags &= ~BTRFS_INODE_COMPRESS;
 		ip->flags |= BTRFS_INODE_NOCOMPRESS;
+
+		ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
+		if (ret && ret != -ENODATA)
+			goto out_drop;
 	} else if (flags & FS_COMPR_FL) {
+		const char *comp;
+
 		ip->flags |= BTRFS_INODE_COMPRESS;
 		ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
+
+		if (root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
+			comp = "lzo";
+		else
+			comp = "zlib";
+		ret = btrfs_set_prop(inode, "btrfs.compression",
+				     comp, strlen(comp), 0);
+		if (ret)
+			goto out_drop;
+
 	} else {
 		ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
 	}
@@ -392,6 +410,7 @@
 	struct btrfs_root *new_root;
 	struct btrfs_block_rsv block_rsv;
 	struct timespec cur_time = CURRENT_TIME;
+	struct inode *inode;
 	int ret;
 	int err;
 	u64 objectid;
@@ -417,7 +436,9 @@
 	trans = btrfs_start_transaction(root, 0);
 	if (IS_ERR(trans)) {
 		ret = PTR_ERR(trans);
-		goto out;
+		btrfs_subvolume_release_metadata(root, &block_rsv,
+						 qgroup_reserved);
+		return ret;
 	}
 	trans->block_rsv = &block_rsv;
 	trans->bytes_reserved = block_rsv.size;
@@ -500,7 +521,7 @@
 
 	btrfs_record_root_in_trans(trans, new_root);
 
-	ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
+	ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
 	if (ret) {
 		/* We potentially lose an unused inode item here */
 		btrfs_abort_transaction(trans, root, ret);
@@ -542,6 +563,8 @@
 fail:
 	trans->block_rsv = NULL;
 	trans->bytes_reserved = 0;
+	btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
+
 	if (async_transid) {
 		*async_transid = trans->transid;
 		err = btrfs_commit_transaction_async(trans, root, 1);
@@ -553,10 +576,12 @@
 	if (err && !ret)
 		ret = err;
 
-	if (!ret)
-		d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
-out:
-	btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
+	if (!ret) {
+		inode = btrfs_lookup_dentry(dir, dentry);
+		if (IS_ERR(inode))
+			return PTR_ERR(inode);
+		d_instantiate(dentry, inode);
+	}
 	return ret;
 }
 
@@ -642,7 +667,7 @@
 		ret = PTR_ERR(inode);
 		goto fail;
 	}
-	BUG_ON(!inode);
+
 	d_instantiate(dentry, inode);
 	ret = 0;
 fail:
@@ -1011,7 +1036,7 @@
 static int cluster_pages_for_defrag(struct inode *inode,
 				    struct page **pages,
 				    unsigned long start_index,
-				    int num_pages)
+				    unsigned long num_pages)
 {
 	unsigned long file_end;
 	u64 isize = i_size_read(inode);
@@ -1169,8 +1194,8 @@
 	int defrag_count = 0;
 	int compress_type = BTRFS_COMPRESS_ZLIB;
 	int extent_thresh = range->extent_thresh;
-	int max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
-	int cluster = max_cluster;
+	unsigned long max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
+	unsigned long cluster = max_cluster;
 	u64 new_align = ~((u64)128 * 1024 - 1);
 	struct page **pages = NULL;
 
@@ -1254,7 +1279,7 @@
 			break;
 
 		if (btrfs_defrag_cancelled(root->fs_info)) {
-			printk(KERN_DEBUG "btrfs: defrag_file cancelled\n");
+			printk(KERN_DEBUG "BTRFS: defrag_file cancelled\n");
 			ret = -EAGAIN;
 			break;
 		}
@@ -1416,20 +1441,20 @@
 			ret = -EINVAL;
 			goto out_free;
 		}
-		printk(KERN_INFO "btrfs: resizing devid %llu\n", devid);
+		btrfs_info(root->fs_info, "resizing devid %llu", devid);
 	}
 
 	device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
 	if (!device) {
-		printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
+		btrfs_info(root->fs_info, "resizer unable to find device %llu",
 		       devid);
 		ret = -ENODEV;
 		goto out_free;
 	}
 
 	if (!device->writeable) {
-		printk(KERN_INFO "btrfs: resizer unable to apply on "
-		       "readonly device %llu\n",
+		btrfs_info(root->fs_info,
+			   "resizer unable to apply on readonly device %llu",
 		       devid);
 		ret = -EPERM;
 		goto out_free;
@@ -1466,6 +1491,10 @@
 		}
 		new_size = old_size - new_size;
 	} else if (mod > 0) {
+		if (new_size > ULLONG_MAX - old_size) {
+			ret = -EINVAL;
+			goto out_free;
+		}
 		new_size = old_size + new_size;
 	}
 
@@ -1481,7 +1510,7 @@
 	do_div(new_size, root->sectorsize);
 	new_size *= root->sectorsize;
 
-	printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n",
+	printk_in_rcu(KERN_INFO "BTRFS: new size for %s is %llu\n",
 		      rcu_str_deref(device->name), new_size);
 
 	if (new_size > old_size) {
@@ -1542,9 +1571,15 @@
 
 		src_inode = file_inode(src.file);
 		if (src_inode->i_sb != file_inode(file)->i_sb) {
-			printk(KERN_INFO "btrfs: Snapshot src from "
-			       "another FS\n");
+			btrfs_info(BTRFS_I(src_inode)->root->fs_info,
+				   "Snapshot src from another FS");
 			ret = -EINVAL;
+		} else if (!inode_owner_or_capable(src_inode)) {
+			/*
+			 * Subvolume creation is not restricted, but snapshots
+			 * are limited to own subvolumes only
+			 */
+			ret = -EPERM;
 		} else {
 			ret = btrfs_mksubvol(&file->f_path, name, namelen,
 					     BTRFS_I(src_inode)->root,
@@ -1662,6 +1697,9 @@
 	u64 flags;
 	int ret = 0;
 
+	if (!inode_owner_or_capable(inode))
+		return -EPERM;
+
 	ret = mnt_want_write_file(file);
 	if (ret)
 		goto out;
@@ -1686,11 +1724,6 @@
 		goto out_drop_write;
 	}
 
-	if (!inode_owner_or_capable(inode)) {
-		ret = -EACCES;
-		goto out_drop_write;
-	}
-
 	down_write(&root->fs_info->subvol_sem);
 
 	/* nothing to do */
@@ -1698,12 +1731,28 @@
 		goto out_drop_sem;
 
 	root_flags = btrfs_root_flags(&root->root_item);
-	if (flags & BTRFS_SUBVOL_RDONLY)
+	if (flags & BTRFS_SUBVOL_RDONLY) {
 		btrfs_set_root_flags(&root->root_item,
 				     root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
-	else
-		btrfs_set_root_flags(&root->root_item,
+	} else {
+		/*
+		 * Block RO -> RW transition if this subvolume is involved in
+		 * send
+		 */
+		spin_lock(&root->root_item_lock);
+		if (root->send_in_progress == 0) {
+			btrfs_set_root_flags(&root->root_item,
 				     root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
+			spin_unlock(&root->root_item_lock);
+		} else {
+			spin_unlock(&root->root_item_lock);
+			btrfs_warn(root->fs_info,
+			"Attempt to set subvolume %llu read-write during send",
+					root->root_key.objectid);
+			ret = -EPERM;
+			goto out_drop_sem;
+		}
+	}
 
 	trans = btrfs_start_transaction(root, 1);
 	if (IS_ERR(trans)) {
@@ -1910,7 +1959,7 @@
 		key.offset = (u64)-1;
 		root = btrfs_read_fs_root_no_name(info, &key);
 		if (IS_ERR(root)) {
-			printk(KERN_ERR "could not find root %llu\n",
+			printk(KERN_ERR "BTRFS: could not find root %llu\n",
 			       sk->tree_id);
 			btrfs_free_path(path);
 			return -ENOENT;
@@ -2000,7 +2049,7 @@
 	key.offset = (u64)-1;
 	root = btrfs_read_fs_root_no_name(info, &key);
 	if (IS_ERR(root)) {
-		printk(KERN_ERR "could not find root %llu\n", tree_id);
+		printk(KERN_ERR "BTRFS: could not find root %llu\n", tree_id);
 		ret = -ENOENT;
 		goto out;
 	}
@@ -2838,12 +2887,14 @@
 		 * note the key will change type as we walk through the
 		 * tree.
 		 */
+		path->leave_spinning = 1;
 		ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
 				0, 0);
 		if (ret < 0)
 			goto out;
 
 		nritems = btrfs_header_nritems(path->nodes[0]);
+process_slot:
 		if (path->slots[0] >= nritems) {
 			ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
 			if (ret < 0)
@@ -2870,11 +2921,6 @@
 			u8 comp;
 			u64 endoff;
 
-			size = btrfs_item_size_nr(leaf, slot);
-			read_extent_buffer(leaf, buf,
-					   btrfs_item_ptr_offset(leaf, slot),
-					   size);
-
 			extent = btrfs_item_ptr(leaf, slot,
 						struct btrfs_file_extent_item);
 			comp = btrfs_file_extent_compression(leaf, extent);
@@ -2893,11 +2939,20 @@
 				datal = btrfs_file_extent_ram_bytes(leaf,
 								    extent);
 			}
-			btrfs_release_path(path);
 
 			if (key.offset + datal <= off ||
-			    key.offset >= off + len - 1)
-				goto next;
+			    key.offset >= off + len - 1) {
+				path->slots[0]++;
+				goto process_slot;
+			}
+
+			size = btrfs_item_size_nr(leaf, slot);
+			read_extent_buffer(leaf, buf,
+					   btrfs_item_ptr_offset(leaf, slot),
+					   size);
+
+			btrfs_release_path(path);
+			path->leave_spinning = 0;
 
 			memcpy(&new_key, &key, sizeof(new_key));
 			new_key.objectid = btrfs_ino(inode);
@@ -3068,7 +3123,6 @@
 			}
 			ret = btrfs_end_transaction(trans, root);
 		}
-next:
 		btrfs_release_path(path);
 		key.offset++;
 	}
@@ -3196,9 +3250,17 @@
 
 	unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
 out_unlock:
-	mutex_unlock(&src->i_mutex);
-	if (!same_inode)
-		mutex_unlock(&inode->i_mutex);
+	if (!same_inode) {
+		if (inode < src) {
+			mutex_unlock(&src->i_mutex);
+			mutex_unlock(&inode->i_mutex);
+		} else {
+			mutex_unlock(&inode->i_mutex);
+			mutex_unlock(&src->i_mutex);
+		}
+	} else {
+		mutex_unlock(&src->i_mutex);
+	}
 out_fput:
 	fdput(src_file);
 out_drop_write:
@@ -3321,8 +3383,8 @@
 	if (IS_ERR_OR_NULL(di)) {
 		btrfs_free_path(path);
 		btrfs_end_transaction(trans, root);
-		printk(KERN_ERR "Umm, you don't have the default dir item, "
-		       "this isn't going to work\n");
+		btrfs_err(new_root->fs_info, "Umm, you don't have the default dir"
+			   "item, this isn't going to work");
 		ret = -ENOENT;
 		goto out;
 	}
@@ -4303,6 +4365,9 @@
 	int ret = 0;
 	int received_uuid_changed;
 
+	if (!inode_owner_or_capable(inode))
+		return -EPERM;
+
 	ret = mnt_want_write_file(file);
 	if (ret < 0)
 		return ret;
@@ -4319,11 +4384,6 @@
 		goto out;
 	}
 
-	if (!inode_owner_or_capable(inode)) {
-		ret = -EACCES;
-		goto out;
-	}
-
 	sa = memdup_user(arg, sizeof(*sa));
 	if (IS_ERR(sa)) {
 		ret = PTR_ERR(sa);
@@ -4409,8 +4469,8 @@
 	len = strnlen(label, BTRFS_LABEL_SIZE);
 
 	if (len == BTRFS_LABEL_SIZE) {
-		pr_warn("btrfs: label is too long, return the first %zu bytes\n",
-			--len);
+		btrfs_warn(root->fs_info,
+			"label is too long, return the first %zu bytes", --len);
 	}
 
 	ret = copy_to_user(arg, label, len);
@@ -4433,7 +4493,7 @@
 		return -EFAULT;
 
 	if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
-		pr_err("btrfs: unable to set label with more than %d bytes\n",
+		btrfs_err(root->fs_info, "unable to set label with more than %d bytes",
 		       BTRFS_LABEL_SIZE - 1);
 		return -EINVAL;
 	}
@@ -4451,13 +4511,173 @@
 	spin_lock(&root->fs_info->super_lock);
 	strcpy(super_block->label, label);
 	spin_unlock(&root->fs_info->super_lock);
-	ret = btrfs_end_transaction(trans, root);
+	ret = btrfs_commit_transaction(trans, root);
 
 out_unlock:
 	mnt_drop_write_file(file);
 	return ret;
 }
 
+#define INIT_FEATURE_FLAGS(suffix) \
+	{ .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
+	  .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
+	  .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
+
+static int btrfs_ioctl_get_supported_features(struct file *file,
+					      void __user *arg)
+{
+	static struct btrfs_ioctl_feature_flags features[3] = {
+		INIT_FEATURE_FLAGS(SUPP),
+		INIT_FEATURE_FLAGS(SAFE_SET),
+		INIT_FEATURE_FLAGS(SAFE_CLEAR)
+	};
+
+	if (copy_to_user(arg, &features, sizeof(features)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
+{
+	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct btrfs_super_block *super_block = root->fs_info->super_copy;
+	struct btrfs_ioctl_feature_flags features;
+
+	features.compat_flags = btrfs_super_compat_flags(super_block);
+	features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
+	features.incompat_flags = btrfs_super_incompat_flags(super_block);
+
+	if (copy_to_user(arg, &features, sizeof(features)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int check_feature_bits(struct btrfs_root *root,
+			      enum btrfs_feature_set set,
+			      u64 change_mask, u64 flags, u64 supported_flags,
+			      u64 safe_set, u64 safe_clear)
+{
+	const char *type = btrfs_feature_set_names[set];
+	char *names;
+	u64 disallowed, unsupported;
+	u64 set_mask = flags & change_mask;
+	u64 clear_mask = ~flags & change_mask;
+
+	unsupported = set_mask & ~supported_flags;
+	if (unsupported) {
+		names = btrfs_printable_features(set, unsupported);
+		if (names) {
+			btrfs_warn(root->fs_info,
+			   "this kernel does not support the %s feature bit%s",
+			   names, strchr(names, ',') ? "s" : "");
+			kfree(names);
+		} else
+			btrfs_warn(root->fs_info,
+			   "this kernel does not support %s bits 0x%llx",
+			   type, unsupported);
+		return -EOPNOTSUPP;
+	}
+
+	disallowed = set_mask & ~safe_set;
+	if (disallowed) {
+		names = btrfs_printable_features(set, disallowed);
+		if (names) {
+			btrfs_warn(root->fs_info,
+			   "can't set the %s feature bit%s while mounted",
+			   names, strchr(names, ',') ? "s" : "");
+			kfree(names);
+		} else
+			btrfs_warn(root->fs_info,
+			   "can't set %s bits 0x%llx while mounted",
+			   type, disallowed);
+		return -EPERM;
+	}
+
+	disallowed = clear_mask & ~safe_clear;
+	if (disallowed) {
+		names = btrfs_printable_features(set, disallowed);
+		if (names) {
+			btrfs_warn(root->fs_info,
+			   "can't clear the %s feature bit%s while mounted",
+			   names, strchr(names, ',') ? "s" : "");
+			kfree(names);
+		} else
+			btrfs_warn(root->fs_info,
+			   "can't clear %s bits 0x%llx while mounted",
+			   type, disallowed);
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+#define check_feature(root, change_mask, flags, mask_base)	\
+check_feature_bits(root, FEAT_##mask_base, change_mask, flags,	\
+		   BTRFS_FEATURE_ ## mask_base ## _SUPP,	\
+		   BTRFS_FEATURE_ ## mask_base ## _SAFE_SET,	\
+		   BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
+
+static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
+{
+	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct btrfs_super_block *super_block = root->fs_info->super_copy;
+	struct btrfs_ioctl_feature_flags flags[2];
+	struct btrfs_trans_handle *trans;
+	u64 newflags;
+	int ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (copy_from_user(flags, arg, sizeof(flags)))
+		return -EFAULT;
+
+	/* Nothing to do */
+	if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
+	    !flags[0].incompat_flags)
+		return 0;
+
+	ret = check_feature(root, flags[0].compat_flags,
+			    flags[1].compat_flags, COMPAT);
+	if (ret)
+		return ret;
+
+	ret = check_feature(root, flags[0].compat_ro_flags,
+			    flags[1].compat_ro_flags, COMPAT_RO);
+	if (ret)
+		return ret;
+
+	ret = check_feature(root, flags[0].incompat_flags,
+			    flags[1].incompat_flags, INCOMPAT);
+	if (ret)
+		return ret;
+
+	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
+
+	spin_lock(&root->fs_info->super_lock);
+	newflags = btrfs_super_compat_flags(super_block);
+	newflags |= flags[0].compat_flags & flags[1].compat_flags;
+	newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
+	btrfs_set_super_compat_flags(super_block, newflags);
+
+	newflags = btrfs_super_compat_ro_flags(super_block);
+	newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
+	newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
+	btrfs_set_super_compat_ro_flags(super_block, newflags);
+
+	newflags = btrfs_super_incompat_flags(super_block);
+	newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
+	newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
+	btrfs_set_super_incompat_flags(super_block, newflags);
+	spin_unlock(&root->fs_info->super_lock);
+
+	return btrfs_commit_transaction(trans, root);
+}
+
 long btrfs_ioctl(struct file *file, unsigned int
 		cmd, unsigned long arg)
 {
@@ -4576,6 +4796,12 @@
 		return btrfs_ioctl_set_fslabel(file, argp);
 	case BTRFS_IOC_FILE_EXTENT_SAME:
 		return btrfs_ioctl_file_extent_same(file, argp);
+	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
+		return btrfs_ioctl_get_supported_features(file, argp);
+	case BTRFS_IOC_GET_FEATURES:
+		return btrfs_ioctl_get_features(file, argp);
+	case BTRFS_IOC_SET_FEATURES:
+		return btrfs_ioctl_set_features(file, argp);
 	}
 
 	return -ENOTTY;
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index b6a6f07..b47f669 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -141,7 +141,7 @@
 		ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
 				       &out_len, workspace->mem);
 		if (ret != LZO_E_OK) {
-			printk(KERN_DEBUG "btrfs deflate in loop returned %d\n",
+			printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n",
 			       ret);
 			ret = -1;
 			goto out;
@@ -357,7 +357,7 @@
 		if (need_unmap)
 			kunmap(pages_in[page_in_index - 1]);
 		if (ret != LZO_E_OK) {
-			printk(KERN_WARNING "btrfs decompress failed\n");
+			printk(KERN_WARNING "BTRFS: decompress failed\n");
 			ret = -1;
 			break;
 		}
@@ -401,7 +401,7 @@
 	out_len = PAGE_CACHE_SIZE;
 	ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
 	if (ret != LZO_E_OK) {
-		printk(KERN_WARNING "btrfs decompress failed!\n");
+		printk(KERN_WARNING "BTRFS: decompress failed!\n");
 		ret = -1;
 		goto out;
 	}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 69582d5..b16450b 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -336,13 +336,14 @@
 		      entry->len);
 	*file_offset = dec_end;
 	if (dec_start > dec_end) {
-		printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
-		       dec_start, dec_end);
+		btrfs_crit(BTRFS_I(inode)->root->fs_info,
+			"bad ordering dec_start %llu end %llu", dec_start, dec_end);
 	}
 	to_dec = dec_end - dec_start;
 	if (to_dec > entry->bytes_left) {
-		printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
-		       entry->bytes_left, to_dec);
+		btrfs_crit(BTRFS_I(inode)->root->fs_info,
+			"bad ordered accounting left %llu size %llu",
+			entry->bytes_left, to_dec);
 	}
 	entry->bytes_left -= to_dec;
 	if (!uptodate)
@@ -401,7 +402,8 @@
 	}
 
 	if (io_size > entry->bytes_left) {
-		printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
+		btrfs_crit(BTRFS_I(inode)->root->fs_info,
+			   "bad ordered accounting left %llu size %llu",
 		       entry->bytes_left, io_size);
 	}
 	entry->bytes_left -= io_size;
@@ -520,7 +522,8 @@
 	spin_lock_irq(&tree->lock);
 	node = &entry->rb_node;
 	rb_erase(node, &tree->tree);
-	tree->last = NULL;
+	if (tree->last == node)
+		tree->last = NULL;
 	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
 	spin_unlock_irq(&tree->lock);
 
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
index 24cad16..65793ed 100644
--- a/fs/btrfs/orphan.c
+++ b/fs/btrfs/orphan.c
@@ -69,23 +69,3 @@
 	btrfs_free_path(path);
 	return ret;
 }
-
-int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset)
-{
-	struct btrfs_path *path;
-	struct btrfs_key key;
-	int ret;
-
-	key.objectid = BTRFS_ORPHAN_OBJECTID;
-	key.type = BTRFS_ORPHAN_ITEM_KEY;
-	key.offset = offset;
-
-	path = btrfs_alloc_path();
-	if (!path)
-		return -ENOMEM;
-
-	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-
-	btrfs_free_path(path);
-	return ret;
-}
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 417053b..6efd70d 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -154,7 +154,7 @@
 			    u32 item_size)
 {
 	if (!IS_ALIGNED(item_size, sizeof(u64))) {
-		pr_warn("btrfs: uuid item with illegal size %lu!\n",
+		pr_warn("BTRFS: uuid item with illegal size %lu!\n",
 			(unsigned long)item_size);
 		return;
 	}
@@ -249,7 +249,7 @@
 			    BTRFS_FILE_EXTENT_INLINE) {
 				printk(KERN_INFO "\t\tinline extent data "
 				       "size %u\n",
-				       btrfs_file_extent_inline_len(l, fi));
+				       btrfs_file_extent_inline_len(l, i, fi));
 				break;
 			}
 			printk(KERN_INFO "\t\textent data disk bytenr %llu "
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
new file mode 100644
index 0000000..129b1dd
--- /dev/null
+++ b/fs/btrfs/props.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright (C) 2014 Filipe David Borba Manana <fdmanana@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/hashtable.h>
+#include "props.h"
+#include "btrfs_inode.h"
+#include "hash.h"
+#include "transaction.h"
+#include "xattr.h"
+
+#define BTRFS_PROP_HANDLERS_HT_BITS 8
+static DEFINE_HASHTABLE(prop_handlers_ht, BTRFS_PROP_HANDLERS_HT_BITS);
+
+struct prop_handler {
+	struct hlist_node node;
+	const char *xattr_name;
+	int (*validate)(const char *value, size_t len);
+	int (*apply)(struct inode *inode, const char *value, size_t len);
+	const char *(*extract)(struct inode *inode);
+	int inheritable;
+};
+
+static int prop_compression_validate(const char *value, size_t len);
+static int prop_compression_apply(struct inode *inode,
+				  const char *value,
+				  size_t len);
+static const char *prop_compression_extract(struct inode *inode);
+
+static struct prop_handler prop_handlers[] = {
+	{
+		.xattr_name = XATTR_BTRFS_PREFIX "compression",
+		.validate = prop_compression_validate,
+		.apply = prop_compression_apply,
+		.extract = prop_compression_extract,
+		.inheritable = 1
+	},
+	{
+		.xattr_name = NULL
+	}
+};
+
+void __init btrfs_props_init(void)
+{
+	struct prop_handler *p;
+
+	hash_init(prop_handlers_ht);
+
+	for (p = &prop_handlers[0]; p->xattr_name; p++) {
+		u64 h = btrfs_name_hash(p->xattr_name, strlen(p->xattr_name));
+
+		hash_add(prop_handlers_ht, &p->node, h);
+	}
+}
+
+static const struct hlist_head *find_prop_handlers_by_hash(const u64 hash)
+{
+	struct hlist_head *h;
+
+	h = &prop_handlers_ht[hash_min(hash, BTRFS_PROP_HANDLERS_HT_BITS)];
+	if (hlist_empty(h))
+		return NULL;
+
+	return h;
+}
+
+static const struct prop_handler *
+find_prop_handler(const char *name,
+		  const struct hlist_head *handlers)
+{
+	struct prop_handler *h;
+
+	if (!handlers) {
+		u64 hash = btrfs_name_hash(name, strlen(name));
+
+		handlers = find_prop_handlers_by_hash(hash);
+		if (!handlers)
+			return NULL;
+	}
+
+	hlist_for_each_entry(h, handlers, node)
+		if (!strcmp(h->xattr_name, name))
+			return h;
+
+	return NULL;
+}
+
+static int __btrfs_set_prop(struct btrfs_trans_handle *trans,
+			    struct inode *inode,
+			    const char *name,
+			    const char *value,
+			    size_t value_len,
+			    int flags)
+{
+	const struct prop_handler *handler;
+	int ret;
+
+	if (strlen(name) <= XATTR_BTRFS_PREFIX_LEN)
+		return -EINVAL;
+
+	handler = find_prop_handler(name, NULL);
+	if (!handler)
+		return -EINVAL;
+
+	if (value_len == 0) {
+		ret = __btrfs_setxattr(trans, inode, handler->xattr_name,
+				       NULL, 0, flags);
+		if (ret)
+			return ret;
+
+		ret = handler->apply(inode, NULL, 0);
+		ASSERT(ret == 0);
+
+		return ret;
+	}
+
+	ret = handler->validate(value, value_len);
+	if (ret)
+		return ret;
+	ret = __btrfs_setxattr(trans, inode, handler->xattr_name,
+			       value, value_len, flags);
+	if (ret)
+		return ret;
+	ret = handler->apply(inode, value, value_len);
+	if (ret) {
+		__btrfs_setxattr(trans, inode, handler->xattr_name,
+				 NULL, 0, flags);
+		return ret;
+	}
+
+	set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags);
+
+	return 0;
+}
+
+int btrfs_set_prop(struct inode *inode,
+		   const char *name,
+		   const char *value,
+		   size_t value_len,
+		   int flags)
+{
+	return __btrfs_set_prop(NULL, inode, name, value, value_len, flags);
+}
+
+static int iterate_object_props(struct btrfs_root *root,
+				struct btrfs_path *path,
+				u64 objectid,
+				void (*iterator)(void *,
+						 const struct prop_handler *,
+						 const char *,
+						 size_t),
+				void *ctx)
+{
+	int ret;
+	char *name_buf = NULL;
+	char *value_buf = NULL;
+	int name_buf_len = 0;
+	int value_buf_len = 0;
+
+	while (1) {
+		struct btrfs_key key;
+		struct btrfs_dir_item *di;
+		struct extent_buffer *leaf;
+		u32 total_len, cur, this_len;
+		int slot;
+		const struct hlist_head *handlers;
+
+		slot = path->slots[0];
+		leaf = path->nodes[0];
+
+		if (slot >= btrfs_header_nritems(leaf)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0)
+				goto out;
+			else if (ret > 0)
+				break;
+			continue;
+		}
+
+		btrfs_item_key_to_cpu(leaf, &key, slot);
+		if (key.objectid != objectid)
+			break;
+		if (key.type != BTRFS_XATTR_ITEM_KEY)
+			break;
+
+		handlers = find_prop_handlers_by_hash(key.offset);
+		if (!handlers)
+			goto next_slot;
+
+		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+		cur = 0;
+		total_len = btrfs_item_size_nr(leaf, slot);
+
+		while (cur < total_len) {
+			u32 name_len = btrfs_dir_name_len(leaf, di);
+			u32 data_len = btrfs_dir_data_len(leaf, di);
+			unsigned long name_ptr, data_ptr;
+			const struct prop_handler *handler;
+
+			this_len = sizeof(*di) + name_len + data_len;
+			name_ptr = (unsigned long)(di + 1);
+			data_ptr = name_ptr + name_len;
+
+			if (name_len <= XATTR_BTRFS_PREFIX_LEN ||
+			    memcmp_extent_buffer(leaf, XATTR_BTRFS_PREFIX,
+						 name_ptr,
+						 XATTR_BTRFS_PREFIX_LEN))
+				goto next_dir_item;
+
+			if (name_len >= name_buf_len) {
+				kfree(name_buf);
+				name_buf_len = name_len + 1;
+				name_buf = kmalloc(name_buf_len, GFP_NOFS);
+				if (!name_buf) {
+					ret = -ENOMEM;
+					goto out;
+				}
+			}
+			read_extent_buffer(leaf, name_buf, name_ptr, name_len);
+			name_buf[name_len] = '\0';
+
+			handler = find_prop_handler(name_buf, handlers);
+			if (!handler)
+				goto next_dir_item;
+
+			if (data_len > value_buf_len) {
+				kfree(value_buf);
+				value_buf_len = data_len;
+				value_buf = kmalloc(data_len, GFP_NOFS);
+				if (!value_buf) {
+					ret = -ENOMEM;
+					goto out;
+				}
+			}
+			read_extent_buffer(leaf, value_buf, data_ptr, data_len);
+
+			iterator(ctx, handler, value_buf, data_len);
+next_dir_item:
+			cur += this_len;
+			di = (struct btrfs_dir_item *)((char *) di + this_len);
+		}
+
+next_slot:
+		path->slots[0]++;
+	}
+
+	ret = 0;
+out:
+	btrfs_release_path(path);
+	kfree(name_buf);
+	kfree(value_buf);
+
+	return ret;
+}
+
+static void inode_prop_iterator(void *ctx,
+				const struct prop_handler *handler,
+				const char *value,
+				size_t len)
+{
+	struct inode *inode = ctx;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret;
+
+	ret = handler->apply(inode, value, len);
+	if (unlikely(ret))
+		btrfs_warn(root->fs_info,
+			   "error applying prop %s to ino %llu (root %llu): %d",
+			   handler->xattr_name, btrfs_ino(inode),
+			   root->root_key.objectid, ret);
+	else
+		set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags);
+}
+
+int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	u64 ino = btrfs_ino(inode);
+	int ret;
+
+	ret = iterate_object_props(root, path, ino, inode_prop_iterator, inode);
+
+	return ret;
+}
+
+static int inherit_props(struct btrfs_trans_handle *trans,
+			 struct inode *inode,
+			 struct inode *parent)
+{
+	const struct prop_handler *h;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret;
+
+	if (!test_bit(BTRFS_INODE_HAS_PROPS,
+		      &BTRFS_I(parent)->runtime_flags))
+		return 0;
+
+	for (h = &prop_handlers[0]; h->xattr_name; h++) {
+		const char *value;
+		u64 num_bytes;
+
+		if (!h->inheritable)
+			continue;
+
+		value = h->extract(parent);
+		if (!value)
+			continue;
+
+		num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+		ret = btrfs_block_rsv_add(root, trans->block_rsv,
+					  num_bytes, BTRFS_RESERVE_NO_FLUSH);
+		if (ret)
+			goto out;
+		ret = __btrfs_set_prop(trans, inode, h->xattr_name,
+				       value, strlen(value), 0);
+		btrfs_block_rsv_release(root, trans->block_rsv, num_bytes);
+		if (ret)
+			goto out;
+	}
+	ret = 0;
+out:
+	return ret;
+}
+
+int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
+			      struct inode *inode,
+			      struct inode *dir)
+{
+	if (!dir)
+		return 0;
+
+	return inherit_props(trans, inode, dir);
+}
+
+int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       struct btrfs_root *parent_root)
+{
+	struct btrfs_key key;
+	struct inode *parent_inode, *child_inode;
+	int ret;
+
+	key.objectid = BTRFS_FIRST_FREE_OBJECTID;
+	key.type = BTRFS_INODE_ITEM_KEY;
+	key.offset = 0;
+
+	parent_inode = btrfs_iget(parent_root->fs_info->sb, &key,
+				  parent_root, NULL);
+	if (IS_ERR(parent_inode))
+		return PTR_ERR(parent_inode);
+
+	child_inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
+	if (IS_ERR(child_inode)) {
+		iput(parent_inode);
+		return PTR_ERR(child_inode);
+	}
+
+	ret = inherit_props(trans, child_inode, parent_inode);
+	iput(child_inode);
+	iput(parent_inode);
+
+	return ret;
+}
+
+static int prop_compression_validate(const char *value, size_t len)
+{
+	if (!strncmp("lzo", value, len))
+		return 0;
+	else if (!strncmp("zlib", value, len))
+		return 0;
+
+	return -EINVAL;
+}
+
+static int prop_compression_apply(struct inode *inode,
+				  const char *value,
+				  size_t len)
+{
+	int type;
+
+	if (len == 0) {
+		BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
+		BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
+		BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE;
+
+		return 0;
+	}
+
+	if (!strncmp("lzo", value, len))
+		type = BTRFS_COMPRESS_LZO;
+	else if (!strncmp("zlib", value, len))
+		type = BTRFS_COMPRESS_ZLIB;
+	else
+		return -EINVAL;
+
+	BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
+	BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
+	BTRFS_I(inode)->force_compress = type;
+
+	return 0;
+}
+
+static const char *prop_compression_extract(struct inode *inode)
+{
+	switch (BTRFS_I(inode)->force_compress) {
+	case BTRFS_COMPRESS_ZLIB:
+		return "zlib";
+	case BTRFS_COMPRESS_LZO:
+		return "lzo";
+	}
+
+	return NULL;
+}
diff --git a/fs/btrfs/props.h b/fs/btrfs/props.h
new file mode 100644
index 0000000..100f188
--- /dev/null
+++ b/fs/btrfs/props.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 Filipe David Borba Manana <fdmanana@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_PROPS_H
+#define __BTRFS_PROPS_H
+
+#include "ctree.h"
+
+void __init btrfs_props_init(void);
+
+int btrfs_set_prop(struct inode *inode,
+		   const char *name,
+		   const char *value,
+		   size_t value_len,
+		   int flags);
+
+int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path);
+
+int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans,
+			      struct inode *inode,
+			      struct inode *dir);
+
+int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       struct btrfs_root *parent_root);
+
+#endif
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 4e6ef49..472302a 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -301,16 +301,16 @@
 
 			if (btrfs_qgroup_status_version(l, ptr) !=
 			    BTRFS_QGROUP_STATUS_VERSION) {
-				printk(KERN_ERR
-				 "btrfs: old qgroup version, quota disabled\n");
+				btrfs_err(fs_info,
+				 "old qgroup version, quota disabled");
 				goto out;
 			}
 			if (btrfs_qgroup_status_generation(l, ptr) !=
 			    fs_info->generation) {
 				flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
-				printk(KERN_ERR
-					"btrfs: qgroup generation mismatch, "
-					"marked as inconsistent\n");
+				btrfs_err(fs_info,
+					"qgroup generation mismatch, "
+					"marked as inconsistent");
 			}
 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
 									  ptr);
@@ -325,7 +325,7 @@
 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
-			printk(KERN_ERR "btrfs: inconsitent qgroup config\n");
+			btrfs_err(fs_info, "inconsitent qgroup config");
 			flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
 		}
 		if (!qgroup) {
@@ -396,8 +396,8 @@
 		ret = add_relation_rb(fs_info, found_key.objectid,
 				      found_key.offset);
 		if (ret == -ENOENT) {
-			printk(KERN_WARNING
-				"btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
+			btrfs_warn(fs_info,
+				"orphan qgroup relation 0x%llx->0x%llx",
 				found_key.objectid, found_key.offset);
 			ret = 0;	/* ignore the error */
 		}
@@ -644,8 +644,7 @@
 
 	l = path->nodes[0];
 	slot = path->slots[0];
-	qgroup_limit = btrfs_item_ptr(l, path->slots[0],
-				      struct btrfs_qgroup_limit_item);
+	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
@@ -687,8 +686,7 @@
 
 	l = path->nodes[0];
 	slot = path->slots[0];
-	qgroup_info = btrfs_item_ptr(l, path->slots[0],
-				 struct btrfs_qgroup_info_item);
+	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
@@ -1161,7 +1159,7 @@
 				       limit->rsv_excl);
 	if (ret) {
 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
-		printk(KERN_INFO "unable to update quota limit for %llu\n",
+		btrfs_info(fs_info, "unable to update quota limit for %llu",
 		       qgroupid);
 	}
 
@@ -1349,7 +1347,6 @@
 			     struct btrfs_delayed_ref_node *node,
 			     struct btrfs_delayed_extent_op *extent_op)
 {
-	struct btrfs_key ins;
 	struct btrfs_root *quota_root;
 	u64 ref_root;
 	struct btrfs_qgroup *qgroup;
@@ -1363,10 +1360,6 @@
 
 	BUG_ON(!fs_info->quota_root);
 
-	ins.objectid = node->bytenr;
-	ins.offset = node->num_bytes;
-	ins.type = BTRFS_EXTENT_ITEM_KEY;
-
 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
 		struct btrfs_delayed_tree_ref *ref;
@@ -1840,7 +1833,9 @@
 {
 	if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
 		return;
-	pr_err("btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x\n",
+	btrfs_err(trans->root->fs_info,
+		"qgroups not uptodate in trans handle %p:  list is%s empty, "
+		"seq is %#x.%x",
 		trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
 		(u32)(trans->delayed_ref_elem.seq >> 32),
 		(u32)trans->delayed_ref_elem.seq);
@@ -1902,9 +1897,17 @@
 	mutex_unlock(&fs_info->qgroup_rescan_lock);
 
 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
+		u64 num_bytes;
+
 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
-		if (found.type != BTRFS_EXTENT_ITEM_KEY)
+		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
+		    found.type != BTRFS_METADATA_ITEM_KEY)
 			continue;
+		if (found.type == BTRFS_METADATA_ITEM_KEY)
+			num_bytes = fs_info->extent_root->leafsize;
+		else
+			num_bytes = found.offset;
+
 		ret = btrfs_find_all_roots(trans, fs_info, found.objectid,
 					   tree_mod_seq_elem.seq, &roots);
 		if (ret < 0)
@@ -1949,12 +1952,12 @@
 			struct btrfs_qgroup_list *glist;
 
 			qg = (struct btrfs_qgroup *)(uintptr_t) unode->aux;
-			qg->rfer += found.offset;
-			qg->rfer_cmpr += found.offset;
+			qg->rfer += num_bytes;
+			qg->rfer_cmpr += num_bytes;
 			WARN_ON(qg->tag >= seq);
 			if (qg->refcnt - seq == roots->nnodes) {
-				qg->excl += found.offset;
-				qg->excl_cmpr += found.offset;
+				qg->excl += num_bytes;
+				qg->excl_cmpr += num_bytes;
 			}
 			qgroup_dirty(fs_info, qg);
 
@@ -2037,10 +2040,10 @@
 	mutex_unlock(&fs_info->qgroup_rescan_lock);
 
 	if (err >= 0) {
-		pr_info("btrfs: qgroup scan completed%s\n",
+		btrfs_info(fs_info, "qgroup scan completed%s",
 			err == 2 ? " (inconsistency flag cleared)" : "");
 	} else {
-		pr_err("btrfs: qgroup scan failed with %d\n", err);
+		btrfs_err(fs_info, "qgroup scan failed with %d", err);
 	}
 
 	complete_all(&fs_info->qgroup_rescan_completion);
@@ -2096,7 +2099,7 @@
 
 	if (ret) {
 err:
-		pr_info("btrfs: qgroup_rescan_init failed with %d\n", ret);
+		btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
 		return ret;
 	}
 
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 24ac218..9af0b25 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1032,8 +1032,8 @@
 
 	/* see if we can add this page onto our existing bio */
 	if (last) {
-		last_end = (u64)last->bi_sector << 9;
-		last_end += last->bi_size;
+		last_end = (u64)last->bi_iter.bi_sector << 9;
+		last_end += last->bi_iter.bi_size;
 
 		/*
 		 * we can't merge these if they are from different
@@ -1053,9 +1053,9 @@
 	if (!bio)
 		return -ENOMEM;
 
-	bio->bi_size = 0;
+	bio->bi_iter.bi_size = 0;
 	bio->bi_bdev = stripe->dev->bdev;
-	bio->bi_sector = disk_start >> 9;
+	bio->bi_iter.bi_sector = disk_start >> 9;
 	set_bit(BIO_UPTODATE, &bio->bi_flags);
 
 	bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -1111,7 +1111,7 @@
 
 	spin_lock_irq(&rbio->bio_list_lock);
 	bio_list_for_each(bio, &rbio->bio_list) {
-		start = (u64)bio->bi_sector << 9;
+		start = (u64)bio->bi_iter.bi_sector << 9;
 		stripe_offset = start - rbio->raid_map[0];
 		page_index = stripe_offset >> PAGE_CACHE_SHIFT;
 
@@ -1272,7 +1272,7 @@
 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
 			   struct bio *bio)
 {
-	u64 physical = bio->bi_sector;
+	u64 physical = bio->bi_iter.bi_sector;
 	u64 stripe_start;
 	int i;
 	struct btrfs_bio_stripe *stripe;
@@ -1298,7 +1298,7 @@
 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
 				   struct bio *bio)
 {
-	u64 logical = bio->bi_sector;
+	u64 logical = bio->bi_iter.bi_sector;
 	u64 stripe_start;
 	int i;
 
@@ -1602,8 +1602,8 @@
 						 plug_list);
 	struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
 						 plug_list);
-	u64 a_sector = ra->bio_list.head->bi_sector;
-	u64 b_sector = rb->bio_list.head->bi_sector;
+	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
+	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
 
 	if (a_sector < b_sector)
 		return -1;
@@ -1691,7 +1691,7 @@
 	if (IS_ERR(rbio))
 		return PTR_ERR(rbio);
 	bio_list_add(&rbio->bio_list, bio);
-	rbio->bio_list_bytes = bio->bi_size;
+	rbio->bio_list_bytes = bio->bi_iter.bi_size;
 
 	/*
 	 * don't plug on full rbios, just get them out the door
@@ -2044,7 +2044,7 @@
 
 	rbio->read_rebuild = 1;
 	bio_list_add(&rbio->bio_list, bio);
-	rbio->bio_list_bytes = bio->bi_size;
+	rbio->bio_list_bytes = bio->bi_iter.bi_size;
 
 	rbio->faila = find_logical_bio_stripe(rbio, bio);
 	if (rbio->faila == -1) {
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 1031b69..31c797c 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -189,8 +189,8 @@
 			 */
 #ifdef DEBUG
 			if (rec->generation != generation) {
-				printk(KERN_DEBUG "generation mismatch for "
-						"(%llu,%d,%llu) %llu != %llu\n",
+				btrfs_debug(root->fs_info,
+					   "generation mismatch for (%llu,%d,%llu) %llu != %llu",
 				       key.objectid, key.type, key.offset,
 				       rec->generation, generation);
 			}
@@ -365,8 +365,9 @@
 		goto error;
 
 	if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
-		printk(KERN_ERR "btrfs readahead: more than %d copies not "
-				"supported", BTRFS_MAX_MIRRORS);
+		btrfs_err(root->fs_info,
+			   "readahead: more than %d copies not supported",
+			   BTRFS_MAX_MIRRORS);
 		goto error;
 	}
 
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 429c73c..07b3b36 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -94,6 +94,7 @@
 
 #define LOWER	0
 #define UPPER	1
+#define RELOCATION_RESERVED_NODES	256
 
 struct backref_cache {
 	/* red black tree of all backref nodes in the cache */
@@ -176,6 +177,8 @@
 	u64 merging_rsv_size;
 	/* size of relocated tree nodes */
 	u64 nodes_relocated;
+	/* reserved size for block group relocation*/
+	u64 reserved_bytes;
 
 	u64 search_start;
 	u64 extents_found;
@@ -184,7 +187,6 @@
 	unsigned int create_reloc_tree:1;
 	unsigned int merge_reloc_tree:1;
 	unsigned int found_file_extent:1;
-	unsigned int commit_transaction:1;
 };
 
 /* stages of data relocation */
@@ -2309,9 +2311,6 @@
 		reloc_root = list_entry(list->next, struct btrfs_root,
 					root_list);
 		__del_reloc_root(reloc_root);
-		free_extent_buffer(reloc_root->node);
-		free_extent_buffer(reloc_root->commit_root);
-		kfree(reloc_root);
 	}
 }
 
@@ -2353,10 +2352,9 @@
 
 			ret = merge_reloc_root(rc, root);
 			if (ret) {
-				__del_reloc_root(reloc_root);
-				free_extent_buffer(reloc_root->node);
-				free_extent_buffer(reloc_root->commit_root);
-				kfree(reloc_root);
+				if (list_empty(&reloc_root->root_list))
+					list_add_tail(&reloc_root->root_list,
+						      &reloc_roots);
 				goto out;
 			}
 		} else {
@@ -2452,7 +2450,7 @@
 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
 				     struct reloc_control *rc,
 				     struct backref_node *node,
-				     struct backref_edge *edges[], int *nr)
+				     struct backref_edge *edges[])
 {
 	struct backref_node *next;
 	struct btrfs_root *root;
@@ -2494,7 +2492,6 @@
 	if (!root)
 		return NULL;
 
-	*nr = index;
 	next = node;
 	/* setup backref node path for btrfs_reloc_cow_block */
 	while (1) {
@@ -2590,28 +2587,36 @@
 	struct btrfs_root *root = rc->extent_root;
 	u64 num_bytes;
 	int ret;
+	u64 tmp;
 
 	num_bytes = calcu_metadata_size(rc, node, 1) * 2;
 
 	trans->block_rsv = rc->block_rsv;
-	ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
-				  BTRFS_RESERVE_FLUSH_ALL);
+	rc->reserved_bytes += num_bytes;
+	ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
+				BTRFS_RESERVE_FLUSH_ALL);
 	if (ret) {
-		if (ret == -EAGAIN)
-			rc->commit_transaction = 1;
+		if (ret == -EAGAIN) {
+			tmp = rc->extent_root->nodesize *
+				RELOCATION_RESERVED_NODES;
+			while (tmp <= rc->reserved_bytes)
+				tmp <<= 1;
+			/*
+			 * only one thread can access block_rsv at this point,
+			 * so we don't need hold lock to protect block_rsv.
+			 * we expand more reservation size here to allow enough
+			 * space for relocation and we will return eailer in
+			 * enospc case.
+			 */
+			rc->block_rsv->size = tmp + rc->extent_root->nodesize *
+					      RELOCATION_RESERVED_NODES;
+		}
 		return ret;
 	}
 
 	return 0;
 }
 
-static void release_metadata_space(struct reloc_control *rc,
-				   struct backref_node *node)
-{
-	u64 num_bytes = calcu_metadata_size(rc, node, 0) * 2;
-	btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, num_bytes);
-}
-
 /*
  * relocate a block tree, and then update pointers in upper level
  * blocks that reference the block to point to the new location.
@@ -2633,7 +2638,6 @@
 	u32 blocksize;
 	u64 bytenr;
 	u64 generation;
-	int nr;
 	int slot;
 	int ret;
 	int err = 0;
@@ -2646,7 +2650,7 @@
 		cond_resched();
 
 		upper = edge->node[UPPER];
-		root = select_reloc_root(trans, rc, upper, edges, &nr);
+		root = select_reloc_root(trans, rc, upper, edges);
 		BUG_ON(!root);
 
 		if (upper->eb && !upper->locked) {
@@ -2898,7 +2902,6 @@
 				struct btrfs_path *path)
 {
 	struct btrfs_root *root;
-	int release = 0;
 	int ret = 0;
 
 	if (!node)
@@ -2915,7 +2918,6 @@
 		ret = reserve_metadata_space(trans, rc, node);
 		if (ret)
 			goto out;
-		release = 1;
 	}
 
 	if (root) {
@@ -2940,11 +2942,8 @@
 		ret = do_relocation(trans, rc, node, key, path, 1);
 	}
 out:
-	if (ret || node->level == 0 || node->cowonly) {
-		if (release)
-			release_metadata_space(rc, node);
+	if (ret || node->level == 0 || node->cowonly)
 		remove_backref_node(&rc->backref_cache, node);
-	}
 	return ret;
 }
 
@@ -3867,29 +3866,20 @@
 int prepare_to_relocate(struct reloc_control *rc)
 {
 	struct btrfs_trans_handle *trans;
-	int ret;
 
 	rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
 					      BTRFS_BLOCK_RSV_TEMP);
 	if (!rc->block_rsv)
 		return -ENOMEM;
 
-	/*
-	 * reserve some space for creating reloc trees.
-	 * btrfs_init_reloc_root will use them when there
-	 * is no reservation in transaction handle.
-	 */
-	ret = btrfs_block_rsv_add(rc->extent_root, rc->block_rsv,
-				  rc->extent_root->nodesize * 256,
-				  BTRFS_RESERVE_FLUSH_ALL);
-	if (ret)
-		return ret;
-
 	memset(&rc->cluster, 0, sizeof(rc->cluster));
 	rc->search_start = rc->block_group->key.objectid;
 	rc->extents_found = 0;
 	rc->nodes_relocated = 0;
 	rc->merging_rsv_size = 0;
+	rc->reserved_bytes = 0;
+	rc->block_rsv->size = rc->extent_root->nodesize *
+			      RELOCATION_RESERVED_NODES;
 
 	rc->create_reloc_tree = 1;
 	set_reloc_control(rc);
@@ -3933,6 +3923,14 @@
 	}
 
 	while (1) {
+		rc->reserved_bytes = 0;
+		ret = btrfs_block_rsv_refill(rc->extent_root,
+					rc->block_rsv, rc->block_rsv->size,
+					BTRFS_RESERVE_FLUSH_ALL);
+		if (ret) {
+			err = ret;
+			break;
+		}
 		progress++;
 		trans = btrfs_start_transaction(rc->extent_root, 0);
 		if (IS_ERR(trans)) {
@@ -4011,6 +4009,12 @@
 		if (!RB_EMPTY_ROOT(&blocks)) {
 			ret = relocate_tree_blocks(trans, rc, &blocks);
 			if (ret < 0) {
+				/*
+				 * if we fail to relocate tree blocks, force to update
+				 * backref cache when committing transaction.
+				 */
+				rc->backref_cache.last_trans = trans->transid - 1;
+
 				if (ret != -EAGAIN) {
 					err = ret;
 					break;
@@ -4020,14 +4024,8 @@
 			}
 		}
 
-		if (rc->commit_transaction) {
-			rc->commit_transaction = 0;
-			ret = btrfs_commit_transaction(trans, rc->extent_root);
-			BUG_ON(ret);
-		} else {
-			btrfs_end_transaction_throttle(trans, rc->extent_root);
-			btrfs_btree_balance_dirty(rc->extent_root);
-		}
+		btrfs_end_transaction_throttle(trans, rc->extent_root);
+		btrfs_btree_balance_dirty(rc->extent_root);
 		trans = NULL;
 
 		if (rc->stage == MOVE_DATA_EXTENTS &&
@@ -4247,7 +4245,7 @@
 		goto out;
 	}
 
-	printk(KERN_INFO "btrfs: relocating block group %llu flags %llu\n",
+	btrfs_info(extent_root->fs_info, "relocating block group %llu flags %llu",
 	       rc->block_group->key.objectid, rc->block_group->flags);
 
 	ret = btrfs_start_delalloc_roots(fs_info, 0);
@@ -4269,7 +4267,7 @@
 		if (rc->extents_found == 0)
 			break;
 
-		printk(KERN_INFO "btrfs: found %llu extents\n",
+		btrfs_info(extent_root->fs_info, "found %llu extents",
 			rc->extents_found);
 
 		if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
@@ -4285,11 +4283,6 @@
 		}
 	}
 
-	filemap_write_and_wait_range(fs_info->btree_inode->i_mapping,
-				     rc->block_group->key.objectid,
-				     rc->block_group->key.objectid +
-				     rc->block_group->key.offset - 1);
-
 	WARN_ON(rc->block_group->pinned > 0);
 	WARN_ON(rc->block_group->reserved > 0);
 	WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index ec71ea4..1389b69 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -44,7 +44,7 @@
 	if (!need_reset && btrfs_root_generation(item)
 		!= btrfs_root_generation_v2(item)) {
 		if (btrfs_root_generation_v2(item) != 0) {
-			printk(KERN_WARNING "btrfs: mismatching "
+			printk(KERN_WARNING "BTRFS: mismatching "
 					"generation and generation_v2 "
 					"found in root item. This root "
 					"was probably mounted with an "
@@ -154,7 +154,7 @@
 
 	if (ret != 0) {
 		btrfs_print_leaf(root, path->nodes[0]);
-		printk(KERN_CRIT "unable to update root key %llu %u %llu\n",
+		btrfs_crit(root->fs_info, "unable to update root key %llu %u %llu",
 		       key->objectid, key->type, key->offset);
 		BUG_ON(1);
 	}
@@ -400,21 +400,6 @@
 	return err;
 }
 
-int btrfs_find_root_ref(struct btrfs_root *tree_root,
-		   struct btrfs_path *path,
-		   u64 root_id, u64 ref_id)
-{
-	struct btrfs_key key;
-	int ret;
-
-	key.objectid = root_id;
-	key.type = BTRFS_ROOT_REF_KEY;
-	key.offset = ref_id;
-
-	ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
-	return ret;
-}
-
 /*
  * add a btrfs_root_ref item.  type is either BTRFS_ROOT_REF_KEY
  * or BTRFS_ROOT_BACKREF_KEY.
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 1fd3f33..efba5d1 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -256,6 +256,8 @@
 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 			    int mirror_num, u64 physical_for_dev_replace);
 static void copy_nocow_pages_worker(struct btrfs_work *work);
+static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
+static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 
 
 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
@@ -269,6 +271,29 @@
 	wake_up(&sctx->list_wait);
 }
 
+static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+{
+	while (atomic_read(&fs_info->scrub_pause_req)) {
+		mutex_unlock(&fs_info->scrub_lock);
+		wait_event(fs_info->scrub_pause_wait,
+		   atomic_read(&fs_info->scrub_pause_req) == 0);
+		mutex_lock(&fs_info->scrub_lock);
+	}
+}
+
+static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+{
+	atomic_inc(&fs_info->scrubs_paused);
+	wake_up(&fs_info->scrub_pause_wait);
+
+	mutex_lock(&fs_info->scrub_lock);
+	__scrub_blocked_if_needed(fs_info);
+	atomic_dec(&fs_info->scrubs_paused);
+	mutex_unlock(&fs_info->scrub_lock);
+
+	wake_up(&fs_info->scrub_pause_wait);
+}
+
 /*
  * used for workers that require transaction commits (i.e., for the
  * NOCOW case)
@@ -480,7 +505,7 @@
 	 * hold all of the paths here
 	 */
 	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
-		printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
+		printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
 			"%s, sector %llu, root %llu, inode %llu, offset %llu, "
 			"length %llu, links %u (path: %s)\n", swarn->errstr,
 			swarn->logical, rcu_str_deref(swarn->dev->name),
@@ -492,7 +517,7 @@
 	return 0;
 
 err:
-	printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
+	printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
 		"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
 		"resolving failed with ret=%d\n", swarn->errstr,
 		swarn->logical, rcu_str_deref(swarn->dev->name),
@@ -555,7 +580,7 @@
 			ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
 							&ref_root, &ref_level);
 			printk_in_rcu(KERN_WARNING
-				"btrfs: %s at logical %llu on dev %s, "
+				"BTRFS: %s at logical %llu on dev %s, "
 				"sector %llu: metadata %s (level %d) in tree "
 				"%llu\n", errstr, swarn.logical,
 				rcu_str_deref(dev->name),
@@ -704,13 +729,11 @@
 	struct scrub_fixup_nodatasum *fixup;
 	struct scrub_ctx *sctx;
 	struct btrfs_trans_handle *trans = NULL;
-	struct btrfs_fs_info *fs_info;
 	struct btrfs_path *path;
 	int uncorrectable = 0;
 
 	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
 	sctx = fixup->sctx;
-	fs_info = fixup->root->fs_info;
 
 	path = btrfs_alloc_path();
 	if (!path) {
@@ -759,8 +782,8 @@
 		btrfs_dev_replace_stats_inc(
 			&sctx->dev_root->fs_info->dev_replace.
 			num_uncorrectable_read_errors);
-		printk_ratelimited_in_rcu(KERN_ERR
-			"btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
+		printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
+		    "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
 			fixup->logical, rcu_str_deref(fixup->dev->name));
 	}
 
@@ -1161,7 +1184,7 @@
 			sctx->stat.corrected_errors++;
 			spin_unlock(&sctx->stat_lock);
 			printk_ratelimited_in_rcu(KERN_ERR
-				"btrfs: fixed up error at logical %llu on dev %s\n",
+				"BTRFS: fixed up error at logical %llu on dev %s\n",
 				logical, rcu_str_deref(dev->name));
 		}
 	} else {
@@ -1170,7 +1193,7 @@
 		sctx->stat.uncorrectable_errors++;
 		spin_unlock(&sctx->stat_lock);
 		printk_ratelimited_in_rcu(KERN_ERR
-			"btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
+			"BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
 			logical, rcu_str_deref(dev->name));
 	}
 
@@ -1308,7 +1331,7 @@
 			continue;
 		}
 		bio->bi_bdev = page->dev->bdev;
-		bio->bi_sector = page->physical >> 9;
+		bio->bi_iter.bi_sector = page->physical >> 9;
 
 		bio_add_page(bio, page->page, PAGE_SIZE, 0);
 		if (btrfsic_submit_bio_wait(READ, bio))
@@ -1418,8 +1441,9 @@
 		int ret;
 
 		if (!page_bad->dev->bdev) {
-			printk_ratelimited(KERN_WARNING
-				"btrfs: scrub_repair_page_from_good_copy(bdev == NULL) is unexpected!\n");
+			printk_ratelimited(KERN_WARNING "BTRFS: "
+				"scrub_repair_page_from_good_copy(bdev == NULL) "
+				"is unexpected!\n");
 			return -EIO;
 		}
 
@@ -1427,7 +1451,7 @@
 		if (!bio)
 			return -EIO;
 		bio->bi_bdev = page_bad->dev->bdev;
-		bio->bi_sector = page_bad->physical >> 9;
+		bio->bi_iter.bi_sector = page_bad->physical >> 9;
 
 		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
 		if (PAGE_SIZE != ret) {
@@ -1520,7 +1544,7 @@
 		bio->bi_private = sbio;
 		bio->bi_end_io = scrub_wr_bio_end_io;
 		bio->bi_bdev = sbio->dev->bdev;
-		bio->bi_sector = sbio->physical >> 9;
+		bio->bi_iter.bi_sector = sbio->physical >> 9;
 		sbio->err = 0;
 	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
 		   spage->physical_for_dev_replace ||
@@ -1877,7 +1901,7 @@
 		 * This case is handled correctly (but _very_ slowly).
 		 */
 		printk_ratelimited(KERN_WARNING
-			"btrfs: scrub_submit(bio bdev == NULL) is unexpected!\n");
+			"BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
 		bio_endio(sbio->bio, -EIO);
 	} else {
 		btrfsic_submit_bio(READ, sbio->bio);
@@ -1926,7 +1950,7 @@
 		bio->bi_private = sbio;
 		bio->bi_end_io = scrub_bio_end_io;
 		bio->bi_bdev = sbio->dev->bdev;
-		bio->bi_sector = sbio->physical >> 9;
+		bio->bi_iter.bi_sector = sbio->physical >> 9;
 		sbio->err = 0;
 	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
 		   spage->physical ||
@@ -2286,8 +2310,7 @@
 
 	wait_event(sctx->list_wait,
 		   atomic_read(&sctx->bios_in_flight) == 0);
-	atomic_inc(&fs_info->scrubs_paused);
-	wake_up(&fs_info->scrub_pause_wait);
+	scrub_blocked_if_needed(fs_info);
 
 	/* FIXME it might be better to start readahead at commit root */
 	key_start.objectid = logical;
@@ -2311,16 +2334,6 @@
 	if (!IS_ERR(reada2))
 		btrfs_reada_wait(reada2);
 
-	mutex_lock(&fs_info->scrub_lock);
-	while (atomic_read(&fs_info->scrub_pause_req)) {
-		mutex_unlock(&fs_info->scrub_lock);
-		wait_event(fs_info->scrub_pause_wait,
-		   atomic_read(&fs_info->scrub_pause_req) == 0);
-		mutex_lock(&fs_info->scrub_lock);
-	}
-	atomic_dec(&fs_info->scrubs_paused);
-	mutex_unlock(&fs_info->scrub_lock);
-	wake_up(&fs_info->scrub_pause_wait);
 
 	/*
 	 * collect all data csums for the stripe to avoid seeking during
@@ -2357,22 +2370,14 @@
 			wait_event(sctx->list_wait,
 				   atomic_read(&sctx->bios_in_flight) == 0);
 			atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
-			atomic_inc(&fs_info->scrubs_paused);
-			wake_up(&fs_info->scrub_pause_wait);
-			mutex_lock(&fs_info->scrub_lock);
-			while (atomic_read(&fs_info->scrub_pause_req)) {
-				mutex_unlock(&fs_info->scrub_lock);
-				wait_event(fs_info->scrub_pause_wait,
-				   atomic_read(&fs_info->scrub_pause_req) == 0);
-				mutex_lock(&fs_info->scrub_lock);
-			}
-			atomic_dec(&fs_info->scrubs_paused);
-			mutex_unlock(&fs_info->scrub_lock);
-			wake_up(&fs_info->scrub_pause_wait);
+			scrub_blocked_if_needed(fs_info);
 		}
 
+		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
+			key.type = BTRFS_METADATA_ITEM_KEY;
+		else
+			key.type = BTRFS_EXTENT_ITEM_KEY;
 		key.objectid = logical;
-		key.type = BTRFS_EXTENT_ITEM_KEY;
 		key.offset = (u64)-1;
 
 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -2380,8 +2385,7 @@
 			goto out;
 
 		if (ret > 0) {
-			ret = btrfs_previous_item(root, path, 0,
-						  BTRFS_EXTENT_ITEM_KEY);
+			ret = btrfs_previous_extent_item(root, path, 0);
 			if (ret < 0)
 				goto out;
 			if (ret > 0) {
@@ -2439,9 +2443,9 @@
 
 			if (key.objectid < logical &&
 			    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
-				printk(KERN_ERR
-				       "btrfs scrub: tree block %llu spanning "
-				       "stripes, ignored. logical=%llu\n",
+				btrfs_err(fs_info,
+					   "scrub: tree block %llu spanning "
+					   "stripes, ignored. logical=%llu",
 				       key.objectid, logical);
 				goto next;
 			}
@@ -2683,21 +2687,9 @@
 		wait_event(sctx->list_wait,
 			   atomic_read(&sctx->bios_in_flight) == 0);
 		atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
-		atomic_inc(&fs_info->scrubs_paused);
-		wake_up(&fs_info->scrub_pause_wait);
 		wait_event(sctx->list_wait,
 			   atomic_read(&sctx->workers_pending) == 0);
-
-		mutex_lock(&fs_info->scrub_lock);
-		while (atomic_read(&fs_info->scrub_pause_req)) {
-			mutex_unlock(&fs_info->scrub_lock);
-			wait_event(fs_info->scrub_pause_wait,
-			   atomic_read(&fs_info->scrub_pause_req) == 0);
-			mutex_lock(&fs_info->scrub_lock);
-		}
-		atomic_dec(&fs_info->scrubs_paused);
-		mutex_unlock(&fs_info->scrub_lock);
-		wake_up(&fs_info->scrub_pause_wait);
+		scrub_blocked_if_needed(fs_info);
 
 		btrfs_put_block_group(cache);
 		if (ret)
@@ -2823,8 +2815,8 @@
 	 * check some assumptions
 	 */
 	if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) {
-		printk(KERN_ERR
-		       "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
+		btrfs_err(fs_info,
+			   "scrub: size assumption nodesize == leafsize (%d == %d) fails",
 		       fs_info->chunk_root->nodesize,
 		       fs_info->chunk_root->leafsize);
 		return -EINVAL;
@@ -2836,16 +2828,17 @@
 		 * the way scrub is implemented. Do not handle this
 		 * situation at all because it won't ever happen.
 		 */
-		printk(KERN_ERR
-		       "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
+		btrfs_err(fs_info,
+			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
 		       fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
 		return -EINVAL;
 	}
 
 	if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
 		/* not supported for data w/o checksums */
-		printk(KERN_ERR
-		       "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails\n",
+		btrfs_err(fs_info,
+			   "scrub: size assumption sectorsize != PAGE_SIZE "
+			   "(%d != %lu) fails",
 		       fs_info->chunk_root->sectorsize, PAGE_SIZE);
 		return -EINVAL;
 	}
@@ -2858,7 +2851,8 @@
 		 * would exhaust the array bounds of pagev member in
 		 * struct scrub_block
 		 */
-		pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n",
+		btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
+			   "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
 		       fs_info->chunk_root->nodesize,
 		       SCRUB_MAX_PAGES_PER_BLOCK,
 		       fs_info->chunk_root->sectorsize,
@@ -2908,7 +2902,13 @@
 	}
 	sctx->readonly = readonly;
 	dev->scrub_device = sctx;
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
+	/*
+	 * checking @scrub_pause_req here, we can avoid
+	 * race between committing transaction and scrubbing.
+	 */
+	__scrub_blocked_if_needed(fs_info);
 	atomic_inc(&fs_info->scrubs_running);
 	mutex_unlock(&fs_info->scrub_lock);
 
@@ -2917,9 +2917,10 @@
 		 * by holding device list mutex, we can
 		 * kick off writing super in log tree sync.
 		 */
+		mutex_lock(&fs_info->fs_devices->device_list_mutex);
 		ret = scrub_supers(sctx, dev);
+		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 	}
-	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 	if (!ret)
 		ret = scrub_enumerate_chunks(sctx, dev, start, end,
@@ -3167,7 +3168,8 @@
 	ret = iterate_inodes_from_logical(logical, fs_info, path,
 					  record_inode_for_nocow, nocow_ctx);
 	if (ret != 0 && ret != -ENOENT) {
-		pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n",
+		btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
+			"phys %llu, len %llu, mir %u, ret %d",
 			logical, physical_for_dev_replace, len, mirror_num,
 			ret);
 		not_written = 1;
@@ -3289,7 +3291,7 @@
 again:
 		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
 		if (!page) {
-			pr_err("find_or_create_page() failed\n");
+			btrfs_err(fs_info, "find_or_create_page() failed");
 			ret = -ENOMEM;
 			goto out;
 		}
@@ -3361,7 +3363,7 @@
 		return -EIO;
 	if (!dev->bdev) {
 		printk_ratelimited(KERN_WARNING
-			"btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
+			"BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
 		return -EIO;
 	}
 	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
@@ -3371,8 +3373,8 @@
 		spin_unlock(&sctx->stat_lock);
 		return -ENOMEM;
 	}
-	bio->bi_size = 0;
-	bio->bi_sector = physical_for_dev_replace >> 9;
+	bio->bi_iter.bi_size = 0;
+	bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
 	bio->bi_bdev = dev->bdev;
 	ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
 	if (ret != PAGE_CACHE_SIZE) {
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 945d1db..9dde971 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -24,12 +24,12 @@
 #include <linux/xattr.h>
 #include <linux/posix_acl_xattr.h>
 #include <linux/radix-tree.h>
-#include <linux/crc32c.h>
 #include <linux/vmalloc.h>
 #include <linux/string.h>
 
 #include "send.h"
 #include "backref.h"
+#include "hash.h"
 #include "locking.h"
 #include "disk-io.h"
 #include "btrfs_inode.h"
@@ -88,8 +88,6 @@
 	u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
 	u64 flags;	/* 'flags' member of btrfs_ioctl_send_args is u64 */
 
-	struct vfsmount *mnt;
-
 	struct btrfs_root *send_root;
 	struct btrfs_root *parent_root;
 	struct clone_root *clone_roots;
@@ -111,6 +109,7 @@
 	int cur_inode_deleted;
 	u64 cur_inode_size;
 	u64 cur_inode_mode;
+	u64 cur_inode_last_extent;
 
 	u64 send_progress;
 
@@ -122,6 +121,74 @@
 	int name_cache_size;
 
 	char *read_buf;
+
+	/*
+	 * We process inodes by their increasing order, so if before an
+	 * incremental send we reverse the parent/child relationship of
+	 * directories such that a directory with a lower inode number was
+	 * the parent of a directory with a higher inode number, and the one
+	 * becoming the new parent got renamed too, we can't rename/move the
+	 * directory with lower inode number when we finish processing it - we
+	 * must process the directory with higher inode number first, then
+	 * rename/move it and then rename/move the directory with lower inode
+	 * number. Example follows.
+	 *
+	 * Tree state when the first send was performed:
+	 *
+	 * .
+	 * |-- a                   (ino 257)
+	 *     |-- b               (ino 258)
+	 *         |
+	 *         |
+	 *         |-- c           (ino 259)
+	 *         |   |-- d       (ino 260)
+	 *         |
+	 *         |-- c2          (ino 261)
+	 *
+	 * Tree state when the second (incremental) send is performed:
+	 *
+	 * .
+	 * |-- a                   (ino 257)
+	 *     |-- b               (ino 258)
+	 *         |-- c2          (ino 261)
+	 *             |-- d2      (ino 260)
+	 *                 |-- cc  (ino 259)
+	 *
+	 * The sequence of steps that lead to the second state was:
+	 *
+	 * mv /a/b/c/d /a/b/c2/d2
+	 * mv /a/b/c /a/b/c2/d2/cc
+	 *
+	 * "c" has lower inode number, but we can't move it (2nd mv operation)
+	 * before we move "d", which has higher inode number.
+	 *
+	 * So we just memorize which move/rename operations must be performed
+	 * later when their respective parent is processed and moved/renamed.
+	 */
+
+	/* Indexed by parent directory inode number. */
+	struct rb_root pending_dir_moves;
+
+	/*
+	 * Reverse index, indexed by the inode number of a directory that
+	 * is waiting for the move/rename of its immediate parent before its
+	 * own move/rename can be performed.
+	 */
+	struct rb_root waiting_dir_moves;
+};
+
+struct pending_dir_move {
+	struct rb_node node;
+	struct list_head list;
+	u64 parent_ino;
+	u64 ino;
+	u64 gen;
+	struct list_head update_refs;
+};
+
+struct waiting_dir_move {
+	struct rb_node node;
+	u64 ino;
 };
 
 struct name_cache_entry {
@@ -145,6 +212,15 @@
 	char name[];
 };
 
+static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
+
+static int need_send_hole(struct send_ctx *sctx)
+{
+	return (sctx->parent_root && !sctx->cur_inode_new &&
+		!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
+		S_ISREG(sctx->cur_inode_mode));
+}
+
 static void fs_path_reset(struct fs_path *p)
 {
 	if (p->reversed) {
@@ -336,16 +412,6 @@
 	return ret;
 }
 
-#if 0
-static void fs_path_remove(struct fs_path *p)
-{
-	BUG_ON(p->reversed);
-	while (p->start != p->end && *p->end != '/')
-		p->end--;
-	*p->end = 0;
-}
-#endif
-
 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
 {
 	int ret;
@@ -436,30 +502,15 @@
 	return 0;
 }
 
-#if 0
-static int tlv_put_u8(struct send_ctx *sctx, u16 attr, u8 value)
-{
-	return tlv_put(sctx, attr, &value, sizeof(value));
-}
+#define TLV_PUT_DEFINE_INT(bits) \
+	static int tlv_put_u##bits(struct send_ctx *sctx,	 	\
+			u##bits attr, u##bits value)			\
+	{								\
+		__le##bits __tmp = cpu_to_le##bits(value);		\
+		return tlv_put(sctx, attr, &__tmp, sizeof(__tmp));	\
+	}
 
-static int tlv_put_u16(struct send_ctx *sctx, u16 attr, u16 value)
-{
-	__le16 tmp = cpu_to_le16(value);
-	return tlv_put(sctx, attr, &tmp, sizeof(tmp));
-}
-
-static int tlv_put_u32(struct send_ctx *sctx, u16 attr, u32 value)
-{
-	__le32 tmp = cpu_to_le32(value);
-	return tlv_put(sctx, attr, &tmp, sizeof(tmp));
-}
-#endif
-
-static int tlv_put_u64(struct send_ctx *sctx, u16 attr, u64 value)
-{
-	__le64 tmp = cpu_to_le64(value);
-	return tlv_put(sctx, attr, &tmp, sizeof(tmp));
-}
+TLV_PUT_DEFINE_INT(64)
 
 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
 			  const char *str, int len)
@@ -475,17 +526,6 @@
 	return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
 }
 
-#if 0
-static int tlv_put_timespec(struct send_ctx *sctx, u16 attr,
-			    struct timespec *ts)
-{
-	struct btrfs_timespec bts;
-	bts.sec = cpu_to_le64(ts->tv_sec);
-	bts.nsec = cpu_to_le32(ts->tv_nsec);
-	return tlv_put(sctx, attr, &bts, sizeof(bts));
-}
-#endif
-
 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
 				  struct extent_buffer *eb,
 				  struct btrfs_timespec *ts)
@@ -533,12 +573,6 @@
 		if (ret < 0) \
 			goto tlv_put_failure; \
 	} while (0)
-#define TLV_PUT_TIMESPEC(sctx, attrtype, ts) \
-	do { \
-		ret = tlv_put_timespec(sctx, attrtype, ts); \
-		if (ret < 0) \
-			goto tlv_put_failure; \
-	} while (0)
 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
 	do { \
 		ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
@@ -586,7 +620,7 @@
 	hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
 	hdr->crc = 0;
 
-	crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
+	crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
 	hdr->crc = cpu_to_le32(crc);
 
 	ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
@@ -1270,7 +1304,7 @@
 	if (!backref_ctx->found_itself) {
 		/* found a bug in backref code? */
 		ret = -EIO;
-		printk(KERN_ERR "btrfs: ERROR did not find backref in "
+		btrfs_err(sctx->send_root->fs_info, "did not find backref in "
 				"send_root. inode=%llu, offset=%llu, "
 				"disk_byte=%llu found extent=%llu\n",
 				ino, data_offset, disk_byte, found_key.objectid);
@@ -1298,6 +1332,16 @@
 	}
 
 	if (cur_clone_root) {
+		if (compressed != BTRFS_COMPRESS_NONE) {
+			/*
+			 * Offsets given by iterate_extent_inodes() are relative
+			 * to the start of the extent, we need to add logical
+			 * offset from the file extent item.
+			 * (See why at backref.c:check_extent_in_eb())
+			 */
+			cur_clone_root->offset += btrfs_file_extent_offset(eb,
+									   fi);
+		}
 		*found = cur_clone_root;
 		ret = 0;
 	} else {
@@ -1343,7 +1387,7 @@
 	BUG_ON(compression);
 
 	off = btrfs_file_extent_inline_start(ei);
-	len = btrfs_file_extent_inline_len(path->nodes[0], ei);
+	len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
 
 	ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
 
@@ -1372,7 +1416,7 @@
 		return -ENOMEM;
 
 	while (1) {
-		len = snprintf(tmp, sizeof(tmp) - 1, "o%llu-%llu-%llu",
+		len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
 				ino, gen, idx);
 		if (len >= sizeof(tmp)) {
 			/* should really not happen */
@@ -1933,6 +1977,7 @@
  */
 static int __get_cur_name_and_parent(struct send_ctx *sctx,
 				     u64 ino, u64 gen,
+				     int skip_name_cache,
 				     u64 *parent_ino,
 				     u64 *parent_gen,
 				     struct fs_path *dest)
@@ -1942,6 +1987,8 @@
 	struct btrfs_path *path = NULL;
 	struct name_cache_entry *nce = NULL;
 
+	if (skip_name_cache)
+		goto get_ref;
 	/*
 	 * First check if we already did a call to this function with the same
 	 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
@@ -1986,11 +2033,12 @@
 		goto out_cache;
 	}
 
+get_ref:
 	/*
 	 * Depending on whether the inode was already processed or not, use
 	 * send_root or parent_root for ref lookup.
 	 */
-	if (ino < sctx->send_progress)
+	if (ino < sctx->send_progress && !skip_name_cache)
 		ret = get_first_ref(sctx->send_root, ino,
 				    parent_ino, parent_gen, dest);
 	else
@@ -2014,6 +2062,8 @@
 			goto out;
 		ret = 1;
 	}
+	if (skip_name_cache)
+		goto out;
 
 out_cache:
 	/*
@@ -2081,6 +2131,9 @@
 	u64 parent_inode = 0;
 	u64 parent_gen = 0;
 	int stop = 0;
+	u64 start_ino = ino;
+	u64 start_gen = gen;
+	int skip_name_cache = 0;
 
 	name = fs_path_alloc();
 	if (!name) {
@@ -2088,19 +2141,32 @@
 		goto out;
 	}
 
+	if (is_waiting_for_move(sctx, ino))
+		skip_name_cache = 1;
+
+again:
 	dest->reversed = 1;
 	fs_path_reset(dest);
 
 	while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
 		fs_path_reset(name);
 
-		ret = __get_cur_name_and_parent(sctx, ino, gen,
+		ret = __get_cur_name_and_parent(sctx, ino, gen, skip_name_cache,
 				&parent_inode, &parent_gen, name);
 		if (ret < 0)
 			goto out;
 		if (ret)
 			stop = 1;
 
+		if (!skip_name_cache &&
+		    is_waiting_for_move(sctx, parent_inode)) {
+			ino = start_ino;
+			gen = start_gen;
+			stop = 0;
+			skip_name_cache = 1;
+			goto again;
+		}
+
 		ret = fs_path_add_path(dest, name);
 		if (ret < 0)
 			goto out;
@@ -2131,7 +2197,7 @@
 	char *name = NULL;
 	int namelen;
 
-	path = alloc_path_for_send();
+	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
 
@@ -2180,12 +2246,12 @@
 	TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
 			sctx->send_root->root_item.uuid);
 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
-			sctx->send_root->root_item.ctransid);
+		    le64_to_cpu(sctx->send_root->root_item.ctransid));
 	if (parent_root) {
 		TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
 				sctx->parent_root->root_item.uuid);
 		TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
-				sctx->parent_root->root_item.ctransid);
+			    le64_to_cpu(sctx->parent_root->root_item.ctransid));
 	}
 
 	ret = send_cmd(sctx);
@@ -2672,10 +2738,347 @@
 	return ret;
 }
 
+static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
+{
+	struct rb_node *n = sctx->waiting_dir_moves.rb_node;
+	struct waiting_dir_move *entry;
+
+	while (n) {
+		entry = rb_entry(n, struct waiting_dir_move, node);
+		if (ino < entry->ino)
+			n = n->rb_left;
+		else if (ino > entry->ino)
+			n = n->rb_right;
+		else
+			return 1;
+	}
+	return 0;
+}
+
+static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino)
+{
+	struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
+	struct rb_node *parent = NULL;
+	struct waiting_dir_move *entry, *dm;
+
+	dm = kmalloc(sizeof(*dm), GFP_NOFS);
+	if (!dm)
+		return -ENOMEM;
+	dm->ino = ino;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct waiting_dir_move, node);
+		if (ino < entry->ino) {
+			p = &(*p)->rb_left;
+		} else if (ino > entry->ino) {
+			p = &(*p)->rb_right;
+		} else {
+			kfree(dm);
+			return -EEXIST;
+		}
+	}
+
+	rb_link_node(&dm->node, parent, p);
+	rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
+	return 0;
+}
+
+static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino)
+{
+	struct rb_node *n = sctx->waiting_dir_moves.rb_node;
+	struct waiting_dir_move *entry;
+
+	while (n) {
+		entry = rb_entry(n, struct waiting_dir_move, node);
+		if (ino < entry->ino) {
+			n = n->rb_left;
+		} else if (ino > entry->ino) {
+			n = n->rb_right;
+		} else {
+			rb_erase(&entry->node, &sctx->waiting_dir_moves);
+			kfree(entry);
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino)
+{
+	struct rb_node **p = &sctx->pending_dir_moves.rb_node;
+	struct rb_node *parent = NULL;
+	struct pending_dir_move *entry, *pm;
+	struct recorded_ref *cur;
+	int exists = 0;
+	int ret;
+
+	pm = kmalloc(sizeof(*pm), GFP_NOFS);
+	if (!pm)
+		return -ENOMEM;
+	pm->parent_ino = parent_ino;
+	pm->ino = sctx->cur_ino;
+	pm->gen = sctx->cur_inode_gen;
+	INIT_LIST_HEAD(&pm->list);
+	INIT_LIST_HEAD(&pm->update_refs);
+	RB_CLEAR_NODE(&pm->node);
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct pending_dir_move, node);
+		if (parent_ino < entry->parent_ino) {
+			p = &(*p)->rb_left;
+		} else if (parent_ino > entry->parent_ino) {
+			p = &(*p)->rb_right;
+		} else {
+			exists = 1;
+			break;
+		}
+	}
+
+	list_for_each_entry(cur, &sctx->deleted_refs, list) {
+		ret = dup_ref(cur, &pm->update_refs);
+		if (ret < 0)
+			goto out;
+	}
+	list_for_each_entry(cur, &sctx->new_refs, list) {
+		ret = dup_ref(cur, &pm->update_refs);
+		if (ret < 0)
+			goto out;
+	}
+
+	ret = add_waiting_dir_move(sctx, pm->ino);
+	if (ret)
+		goto out;
+
+	if (exists) {
+		list_add_tail(&pm->list, &entry->list);
+	} else {
+		rb_link_node(&pm->node, parent, p);
+		rb_insert_color(&pm->node, &sctx->pending_dir_moves);
+	}
+	ret = 0;
+out:
+	if (ret) {
+		__free_recorded_refs(&pm->update_refs);
+		kfree(pm);
+	}
+	return ret;
+}
+
+static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
+						      u64 parent_ino)
+{
+	struct rb_node *n = sctx->pending_dir_moves.rb_node;
+	struct pending_dir_move *entry;
+
+	while (n) {
+		entry = rb_entry(n, struct pending_dir_move, node);
+		if (parent_ino < entry->parent_ino)
+			n = n->rb_left;
+		else if (parent_ino > entry->parent_ino)
+			n = n->rb_right;
+		else
+			return entry;
+	}
+	return NULL;
+}
+
+static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
+{
+	struct fs_path *from_path = NULL;
+	struct fs_path *to_path = NULL;
+	u64 orig_progress = sctx->send_progress;
+	struct recorded_ref *cur;
+	int ret;
+
+	from_path = fs_path_alloc();
+	if (!from_path)
+		return -ENOMEM;
+
+	sctx->send_progress = pm->ino;
+	ret = get_cur_path(sctx, pm->ino, pm->gen, from_path);
+	if (ret < 0)
+		goto out;
+
+	to_path = fs_path_alloc();
+	if (!to_path) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	sctx->send_progress = sctx->cur_ino + 1;
+	ret = del_waiting_dir_move(sctx, pm->ino);
+	ASSERT(ret == 0);
+
+	ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
+	if (ret < 0)
+		goto out;
+
+	ret = send_rename(sctx, from_path, to_path);
+	if (ret < 0)
+		goto out;
+
+	ret = send_utimes(sctx, pm->ino, pm->gen);
+	if (ret < 0)
+		goto out;
+
+	/*
+	 * After rename/move, need to update the utimes of both new parent(s)
+	 * and old parent(s).
+	 */
+	list_for_each_entry(cur, &pm->update_refs, list) {
+		ret = send_utimes(sctx, cur->dir, cur->dir_gen);
+		if (ret < 0)
+			goto out;
+	}
+
+out:
+	fs_path_free(from_path);
+	fs_path_free(to_path);
+	sctx->send_progress = orig_progress;
+
+	return ret;
+}
+
+static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
+{
+	if (!list_empty(&m->list))
+		list_del(&m->list);
+	if (!RB_EMPTY_NODE(&m->node))
+		rb_erase(&m->node, &sctx->pending_dir_moves);
+	__free_recorded_refs(&m->update_refs);
+	kfree(m);
+}
+
+static void tail_append_pending_moves(struct pending_dir_move *moves,
+				      struct list_head *stack)
+{
+	if (list_empty(&moves->list)) {
+		list_add_tail(&moves->list, stack);
+	} else {
+		LIST_HEAD(list);
+		list_splice_init(&moves->list, &list);
+		list_add_tail(&moves->list, stack);
+		list_splice_tail(&list, stack);
+	}
+}
+
+static int apply_children_dir_moves(struct send_ctx *sctx)
+{
+	struct pending_dir_move *pm;
+	struct list_head stack;
+	u64 parent_ino = sctx->cur_ino;
+	int ret = 0;
+
+	pm = get_pending_dir_moves(sctx, parent_ino);
+	if (!pm)
+		return 0;
+
+	INIT_LIST_HEAD(&stack);
+	tail_append_pending_moves(pm, &stack);
+
+	while (!list_empty(&stack)) {
+		pm = list_first_entry(&stack, struct pending_dir_move, list);
+		parent_ino = pm->ino;
+		ret = apply_dir_move(sctx, pm);
+		free_pending_move(sctx, pm);
+		if (ret)
+			goto out;
+		pm = get_pending_dir_moves(sctx, parent_ino);
+		if (pm)
+			tail_append_pending_moves(pm, &stack);
+	}
+	return 0;
+
+out:
+	while (!list_empty(&stack)) {
+		pm = list_first_entry(&stack, struct pending_dir_move, list);
+		free_pending_move(sctx, pm);
+	}
+	return ret;
+}
+
+static int wait_for_parent_move(struct send_ctx *sctx,
+				struct recorded_ref *parent_ref)
+{
+	int ret;
+	u64 ino = parent_ref->dir;
+	u64 parent_ino_before, parent_ino_after;
+	u64 new_gen, old_gen;
+	struct fs_path *path_before = NULL;
+	struct fs_path *path_after = NULL;
+	int len1, len2;
+
+	if (parent_ref->dir <= sctx->cur_ino)
+		return 0;
+
+	if (is_waiting_for_move(sctx, ino))
+		return 1;
+
+	ret = get_inode_info(sctx->parent_root, ino, NULL, &old_gen,
+			     NULL, NULL, NULL, NULL);
+	if (ret == -ENOENT)
+		return 0;
+	else if (ret < 0)
+		return ret;
+
+	ret = get_inode_info(sctx->send_root, ino, NULL, &new_gen,
+			     NULL, NULL, NULL, NULL);
+	if (ret < 0)
+		return ret;
+
+	if (new_gen != old_gen)
+		return 0;
+
+	path_before = fs_path_alloc();
+	if (!path_before)
+		return -ENOMEM;
+
+	ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
+			    NULL, path_before);
+	if (ret == -ENOENT) {
+		ret = 0;
+		goto out;
+	} else if (ret < 0) {
+		goto out;
+	}
+
+	path_after = fs_path_alloc();
+	if (!path_after) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
+			    NULL, path_after);
+	if (ret == -ENOENT) {
+		ret = 0;
+		goto out;
+	} else if (ret < 0) {
+		goto out;
+	}
+
+	len1 = fs_path_len(path_before);
+	len2 = fs_path_len(path_after);
+	if ((parent_ino_before != parent_ino_after) && (len1 != len2 ||
+	     memcmp(path_before->start, path_after->start, len1))) {
+		ret = 1;
+		goto out;
+	}
+	ret = 0;
+
+out:
+	fs_path_free(path_before);
+	fs_path_free(path_after);
+
+	return ret;
+}
+
 /*
  * This does all the move/link/unlink/rmdir magic.
  */
-static int process_recorded_refs(struct send_ctx *sctx)
+static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
 {
 	int ret = 0;
 	struct recorded_ref *cur;
@@ -2824,11 +3227,17 @@
 				 * dirs, we always have one new and one deleted
 				 * ref. The deleted ref is ignored later.
 				 */
-				ret = send_rename(sctx, valid_path,
-						cur->full_path);
-				if (ret < 0)
-					goto out;
-				ret = fs_path_copy(valid_path, cur->full_path);
+				if (wait_for_parent_move(sctx, cur)) {
+					ret = add_pending_dir_move(sctx,
+								   cur->dir);
+					*pending_move = 1;
+				} else {
+					ret = send_rename(sctx, valid_path,
+							  cur->full_path);
+					if (!ret)
+						ret = fs_path_copy(valid_path,
+							       cur->full_path);
+				}
 				if (ret < 0)
 					goto out;
 			} else {
@@ -3197,6 +3606,7 @@
 	struct extent_buffer *eb;
 	int slot;
 	iterate_inode_ref_t cb;
+	int pending_move = 0;
 
 	path = alloc_path_for_send();
 	if (!path)
@@ -3240,7 +3650,9 @@
 	}
 	btrfs_release_path(path);
 
-	ret = process_recorded_refs(sctx);
+	ret = process_recorded_refs(sctx, &pending_move);
+	/* Only applicable to an incremental send. */
+	ASSERT(pending_move == 0);
 
 out:
 	btrfs_free_path(path);
@@ -3706,7 +4118,7 @@
 	TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
 			clone_root->root->root_item.uuid);
 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
-			clone_root->root->root_item.ctransid);
+		    le64_to_cpu(clone_root->root->root_item.ctransid));
 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
 			clone_root->offset);
@@ -3752,6 +4164,39 @@
 	return ret;
 }
 
+static int send_hole(struct send_ctx *sctx, u64 end)
+{
+	struct fs_path *p = NULL;
+	u64 offset = sctx->cur_inode_last_extent;
+	u64 len;
+	int ret = 0;
+
+	p = fs_path_alloc();
+	if (!p)
+		return -ENOMEM;
+	memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
+	while (offset < end) {
+		len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
+
+		ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
+		if (ret < 0)
+			break;
+		ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+		if (ret < 0)
+			break;
+		TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+		TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+		TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
+		ret = send_cmd(sctx);
+		if (ret < 0)
+			break;
+		offset += len;
+	}
+tlv_put_failure:
+	fs_path_free(p);
+	return ret;
+}
+
 static int send_write_or_clone(struct send_ctx *sctx,
 			       struct btrfs_path *path,
 			       struct btrfs_key *key,
@@ -3764,12 +4209,14 @@
 	u64 len;
 	u32 l;
 	u8 type;
+	u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
 
 	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
 			struct btrfs_file_extent_item);
 	type = btrfs_file_extent_type(path->nodes[0], ei);
 	if (type == BTRFS_FILE_EXTENT_INLINE) {
-		len = btrfs_file_extent_inline_len(path->nodes[0], ei);
+		len = btrfs_file_extent_inline_len(path->nodes[0],
+						   path->slots[0], ei);
 		/*
 		 * it is possible the inline item won't cover the whole page,
 		 * but there may be items after this page.  Make
@@ -3787,7 +4234,7 @@
 		goto out;
 	}
 
-	if (clone_root) {
+	if (clone_root && IS_ALIGNED(offset + len, bs)) {
 		ret = send_clone(sctx, offset, len, clone_root);
 	} else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) {
 		ret = send_update_extent(sctx, offset, len);
@@ -3979,6 +4426,101 @@
 	return ret;
 }
 
+static int get_last_extent(struct send_ctx *sctx, u64 offset)
+{
+	struct btrfs_path *path;
+	struct btrfs_root *root = sctx->send_root;
+	struct btrfs_file_extent_item *fi;
+	struct btrfs_key key;
+	u64 extent_end;
+	u8 type;
+	int ret;
+
+	path = alloc_path_for_send();
+	if (!path)
+		return -ENOMEM;
+
+	sctx->cur_inode_last_extent = 0;
+
+	key.objectid = sctx->cur_ino;
+	key.type = BTRFS_EXTENT_DATA_KEY;
+	key.offset = offset;
+	ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
+	if (ret < 0)
+		goto out;
+	ret = 0;
+	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+	if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
+		goto out;
+
+	fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
+			    struct btrfs_file_extent_item);
+	type = btrfs_file_extent_type(path->nodes[0], fi);
+	if (type == BTRFS_FILE_EXTENT_INLINE) {
+		u64 size = btrfs_file_extent_inline_len(path->nodes[0],
+							path->slots[0], fi);
+		extent_end = ALIGN(key.offset + size,
+				   sctx->send_root->sectorsize);
+	} else {
+		extent_end = key.offset +
+			btrfs_file_extent_num_bytes(path->nodes[0], fi);
+	}
+	sctx->cur_inode_last_extent = extent_end;
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
+			   struct btrfs_key *key)
+{
+	struct btrfs_file_extent_item *fi;
+	u64 extent_end;
+	u8 type;
+	int ret = 0;
+
+	if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
+		return 0;
+
+	if (sctx->cur_inode_last_extent == (u64)-1) {
+		ret = get_last_extent(sctx, key->offset - 1);
+		if (ret)
+			return ret;
+	}
+
+	fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
+			    struct btrfs_file_extent_item);
+	type = btrfs_file_extent_type(path->nodes[0], fi);
+	if (type == BTRFS_FILE_EXTENT_INLINE) {
+		u64 size = btrfs_file_extent_inline_len(path->nodes[0],
+							path->slots[0], fi);
+		extent_end = ALIGN(key->offset + size,
+				   sctx->send_root->sectorsize);
+	} else {
+		extent_end = key->offset +
+			btrfs_file_extent_num_bytes(path->nodes[0], fi);
+	}
+
+	if (path->slots[0] == 0 &&
+	    sctx->cur_inode_last_extent < key->offset) {
+		/*
+		 * We might have skipped entire leafs that contained only
+		 * file extent items for our current inode. These leafs have
+		 * a generation number smaller (older) than the one in the
+		 * current leaf and the leaf our last extent came from, and
+		 * are located between these 2 leafs.
+		 */
+		ret = get_last_extent(sctx, key->offset - 1);
+		if (ret)
+			return ret;
+	}
+
+	if (sctx->cur_inode_last_extent < key->offset)
+		ret = send_hole(sctx, key->offset);
+	sctx->cur_inode_last_extent = extent_end;
+	return ret;
+}
+
 static int process_extent(struct send_ctx *sctx,
 			  struct btrfs_path *path,
 			  struct btrfs_key *key)
@@ -3995,7 +4537,7 @@
 			goto out;
 		if (ret) {
 			ret = 0;
-			goto out;
+			goto out_hole;
 		}
 	} else {
 		struct btrfs_file_extent_item *ei;
@@ -4031,7 +4573,10 @@
 		goto out;
 
 	ret = send_write_or_clone(sctx, path, key, found_clone);
-
+	if (ret)
+		goto out;
+out_hole:
+	ret = maybe_send_hole(sctx, path, key);
 out:
 	return ret;
 }
@@ -4054,17 +4599,25 @@
 	key.objectid = sctx->cmp_key->objectid;
 	key.type = BTRFS_EXTENT_DATA_KEY;
 	key.offset = 0;
-	while (1) {
-		ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
-		if (ret < 0)
-			goto out;
-		if (ret) {
-			ret = 0;
-			goto out;
-		}
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto out;
 
+	while (1) {
 		eb = path->nodes[0];
 		slot = path->slots[0];
+
+		if (slot >= btrfs_header_nritems(eb)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0) {
+				goto out;
+			} else if (ret > 0) {
+				ret = 0;
+				break;
+			}
+			continue;
+		}
+
 		btrfs_item_key_to_cpu(eb, &found_key, slot);
 
 		if (found_key.objectid != key.objectid ||
@@ -4077,8 +4630,7 @@
 		if (ret < 0)
 			goto out;
 
-		btrfs_release_path(path);
-		key.offset = found_key.offset + 1;
+		path->slots[0]++;
 	}
 
 out:
@@ -4086,7 +4638,9 @@
 	return ret;
 }
 
-static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end)
+static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
+					   int *pending_move,
+					   int *refs_processed)
 {
 	int ret = 0;
 
@@ -4098,17 +4652,11 @@
 	if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
 		goto out;
 
-	ret = process_recorded_refs(sctx);
+	ret = process_recorded_refs(sctx, pending_move);
 	if (ret < 0)
 		goto out;
 
-	/*
-	 * We have processed the refs and thus need to advance send_progress.
-	 * Now, calls to get_cur_xxx will take the updated refs of the current
-	 * inode into account.
-	 */
-	sctx->send_progress = sctx->cur_ino + 1;
-
+	*refs_processed = 1;
 out:
 	return ret;
 }
@@ -4124,11 +4672,29 @@
 	u64 right_gid;
 	int need_chmod = 0;
 	int need_chown = 0;
+	int pending_move = 0;
+	int refs_processed = 0;
 
-	ret = process_recorded_refs_if_needed(sctx, at_end);
+	ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
+					      &refs_processed);
 	if (ret < 0)
 		goto out;
 
+	/*
+	 * We have processed the refs and thus need to advance send_progress.
+	 * Now, calls to get_cur_xxx will take the updated refs of the current
+	 * inode into account.
+	 *
+	 * On the other hand, if our current inode is a directory and couldn't
+	 * be moved/renamed because its parent was renamed/moved too and it has
+	 * a higher inode number, we can only move/rename our current inode
+	 * after we moved/renamed its parent. Therefore in this case operate on
+	 * the old path (pre move/rename) of our current inode, and the
+	 * move/rename will be performed later.
+	 */
+	if (refs_processed && !pending_move)
+		sctx->send_progress = sctx->cur_ino + 1;
+
 	if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
 		goto out;
 	if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
@@ -4157,6 +4723,19 @@
 	}
 
 	if (S_ISREG(sctx->cur_inode_mode)) {
+		if (need_send_hole(sctx)) {
+			if (sctx->cur_inode_last_extent == (u64)-1) {
+				ret = get_last_extent(sctx, (u64)-1);
+				if (ret)
+					goto out;
+			}
+			if (sctx->cur_inode_last_extent <
+			    sctx->cur_inode_size) {
+				ret = send_hole(sctx, sctx->cur_inode_size);
+				if (ret)
+					goto out;
+			}
+		}
 		ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
 				sctx->cur_inode_size);
 		if (ret < 0)
@@ -4177,9 +4756,21 @@
 	}
 
 	/*
-	 * Need to send that every time, no matter if it actually changed
-	 * between the two trees as we have done changes to the inode before.
+	 * If other directory inodes depended on our current directory
+	 * inode's move/rename, now do their move/rename operations.
 	 */
+	if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
+		ret = apply_children_dir_moves(sctx);
+		if (ret)
+			goto out;
+	}
+
+	/*
+	 * Need to send that every time, no matter if it actually
+	 * changed between the two trees as we have done changes to
+	 * the inode before.
+	 */
+	sctx->send_progress = sctx->cur_ino + 1;
 	ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
 	if (ret < 0)
 		goto out;
@@ -4200,6 +4791,7 @@
 
 	sctx->cur_ino = key->objectid;
 	sctx->cur_inode_new_gen = 0;
+	sctx->cur_inode_last_extent = (u64)-1;
 
 	/*
 	 * Set send_progress to current inode. This will tell all get_cur_xxx
@@ -4480,14 +5072,18 @@
 	struct send_ctx *sctx = ctx;
 
 	if (result == BTRFS_COMPARE_TREE_SAME) {
-		if (key->type != BTRFS_INODE_REF_KEY &&
-		    key->type != BTRFS_INODE_EXTREF_KEY)
+		if (key->type == BTRFS_INODE_REF_KEY ||
+		    key->type == BTRFS_INODE_EXTREF_KEY) {
+			ret = compare_refs(sctx, left_path, key);
+			if (!ret)
+				return 0;
+			if (ret < 0)
+				return ret;
+		} else if (key->type == BTRFS_EXTENT_DATA_KEY) {
+			return maybe_send_hole(sctx, left_path, key);
+		} else {
 			return 0;
-		ret = compare_refs(sctx, left_path, key);
-		if (!ret)
-			return 0;
-		if (ret < 0)
-			return ret;
+		}
 		result = BTRFS_COMPARE_TREE_CHANGED;
 		ret = 0;
 	}
@@ -4522,7 +5118,6 @@
 static int full_send_tree(struct send_ctx *sctx)
 {
 	int ret;
-	struct btrfs_trans_handle *trans = NULL;
 	struct btrfs_root *send_root = sctx->send_root;
 	struct btrfs_key key;
 	struct btrfs_key found_key;
@@ -4544,19 +5139,6 @@
 	key.type = BTRFS_INODE_ITEM_KEY;
 	key.offset = 0;
 
-join_trans:
-	/*
-	 * We need to make sure the transaction does not get committed
-	 * while we do anything on commit roots. Join a transaction to prevent
-	 * this.
-	 */
-	trans = btrfs_join_transaction(send_root);
-	if (IS_ERR(trans)) {
-		ret = PTR_ERR(trans);
-		trans = NULL;
-		goto out;
-	}
-
 	/*
 	 * Make sure the tree has not changed after re-joining. We detect this
 	 * by comparing start_ctransid and ctransid. They should always match.
@@ -4566,7 +5148,7 @@
 	spin_unlock(&send_root->root_item_lock);
 
 	if (ctransid != start_ctransid) {
-		WARN(1, KERN_WARNING "btrfs: the root that you're trying to "
+		WARN(1, KERN_WARNING "BTRFS: the root that you're trying to "
 				     "send was modified in between. This is "
 				     "probably a bug.\n");
 		ret = -EIO;
@@ -4580,19 +5162,6 @@
 		goto out_finish;
 
 	while (1) {
-		/*
-		 * When someone want to commit while we iterate, end the
-		 * joined transaction and rejoin.
-		 */
-		if (btrfs_should_end_transaction(trans, send_root)) {
-			ret = btrfs_end_transaction(trans, send_root);
-			trans = NULL;
-			if (ret < 0)
-				goto out;
-			btrfs_release_path(path);
-			goto join_trans;
-		}
-
 		eb = path->nodes[0];
 		slot = path->slots[0];
 		btrfs_item_key_to_cpu(eb, &found_key, slot);
@@ -4620,12 +5189,6 @@
 
 out:
 	btrfs_free_path(path);
-	if (trans) {
-		if (!ret)
-			ret = btrfs_end_transaction(trans, send_root);
-		else
-			btrfs_end_transaction(trans, send_root);
-	}
 	return ret;
 }
 
@@ -4662,6 +5225,21 @@
 	return ret;
 }
 
+static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
+{
+	spin_lock(&root->root_item_lock);
+	root->send_in_progress--;
+	/*
+	 * Not much left to do, we don't know why it's unbalanced and
+	 * can't blindly reset it to 0.
+	 */
+	if (root->send_in_progress < 0)
+		btrfs_err(root->fs_info,
+			"send_in_progres unbalanced %d root %llu\n",
+			root->send_in_progress, root->root_key.objectid);
+	spin_unlock(&root->root_item_lock);
+}
+
 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
 {
 	int ret = 0;
@@ -4673,6 +5251,9 @@
 	struct send_ctx *sctx = NULL;
 	u32 i;
 	u64 *clone_sources_tmp = NULL;
+	int clone_sources_to_rollback = 0;
+	int sort_clone_roots = 0;
+	int index;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
@@ -4681,38 +5262,26 @@
 	fs_info = send_root->fs_info;
 
 	/*
+	 * The subvolume must remain read-only during send, protect against
+	 * making it RW.
+	 */
+	spin_lock(&send_root->root_item_lock);
+	send_root->send_in_progress++;
+	spin_unlock(&send_root->root_item_lock);
+
+	/*
 	 * This is done when we lookup the root, it should already be complete
 	 * by the time we get here.
 	 */
 	WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
 
 	/*
-	 * If we just created this root we need to make sure that the orphan
-	 * cleanup has been done and committed since we search the commit root,
-	 * so check its commit root transid with our otransid and if they match
-	 * commit the transaction to make sure everything is updated.
+	 * Userspace tools do the checks and warn the user if it's
+	 * not RO.
 	 */
-	down_read(&send_root->fs_info->extent_commit_sem);
-	if (btrfs_header_generation(send_root->commit_root) ==
-	    btrfs_root_otransid(&send_root->root_item)) {
-		struct btrfs_trans_handle *trans;
-
-		up_read(&send_root->fs_info->extent_commit_sem);
-
-		trans = btrfs_attach_transaction_barrier(send_root);
-		if (IS_ERR(trans)) {
-			if (PTR_ERR(trans) != -ENOENT) {
-				ret = PTR_ERR(trans);
-				goto out;
-			}
-			/* ENOENT means theres no transaction */
-		} else {
-			ret = btrfs_commit_transaction(trans, send_root);
-			if (ret)
-				goto out;
-		}
-	} else {
-		up_read(&send_root->fs_info->extent_commit_sem);
+	if (!btrfs_root_readonly(send_root)) {
+		ret = -EPERM;
+		goto out;
 	}
 
 	arg = memdup_user(arg_, sizeof(*arg));
@@ -4753,8 +5322,6 @@
 		goto out;
 	}
 
-	sctx->mnt = mnt_file->f_path.mnt;
-
 	sctx->send_root = send_root;
 	sctx->clone_roots_cnt = arg->clone_sources_count;
 
@@ -4771,6 +5338,9 @@
 		goto out;
 	}
 
+	sctx->pending_dir_moves = RB_ROOT;
+	sctx->waiting_dir_moves = RB_ROOT;
+
 	sctx->clone_roots = vzalloc(sizeof(struct clone_root) *
 			(arg->clone_sources_count + 1));
 	if (!sctx->clone_roots) {
@@ -4798,11 +5368,27 @@
 			key.objectid = clone_sources_tmp[i];
 			key.type = BTRFS_ROOT_ITEM_KEY;
 			key.offset = (u64)-1;
+
+			index = srcu_read_lock(&fs_info->subvol_srcu);
+
 			clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
 			if (IS_ERR(clone_root)) {
+				srcu_read_unlock(&fs_info->subvol_srcu, index);
 				ret = PTR_ERR(clone_root);
 				goto out;
 			}
+			clone_sources_to_rollback = i + 1;
+			spin_lock(&clone_root->root_item_lock);
+			clone_root->send_in_progress++;
+			if (!btrfs_root_readonly(clone_root)) {
+				spin_unlock(&clone_root->root_item_lock);
+				srcu_read_unlock(&fs_info->subvol_srcu, index);
+				ret = -EPERM;
+				goto out;
+			}
+			spin_unlock(&clone_root->root_item_lock);
+			srcu_read_unlock(&fs_info->subvol_srcu, index);
+
 			sctx->clone_roots[i].root = clone_root;
 		}
 		vfree(clone_sources_tmp);
@@ -4813,11 +5399,27 @@
 		key.objectid = arg->parent_root;
 		key.type = BTRFS_ROOT_ITEM_KEY;
 		key.offset = (u64)-1;
+
+		index = srcu_read_lock(&fs_info->subvol_srcu);
+
 		sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
 		if (IS_ERR(sctx->parent_root)) {
+			srcu_read_unlock(&fs_info->subvol_srcu, index);
 			ret = PTR_ERR(sctx->parent_root);
 			goto out;
 		}
+
+		spin_lock(&sctx->parent_root->root_item_lock);
+		sctx->parent_root->send_in_progress++;
+		if (!btrfs_root_readonly(sctx->parent_root)) {
+			spin_unlock(&sctx->parent_root->root_item_lock);
+			srcu_read_unlock(&fs_info->subvol_srcu, index);
+			ret = -EPERM;
+			goto out;
+		}
+		spin_unlock(&sctx->parent_root->root_item_lock);
+
+		srcu_read_unlock(&fs_info->subvol_srcu, index);
 	}
 
 	/*
@@ -4831,6 +5433,7 @@
 	sort(sctx->clone_roots, sctx->clone_roots_cnt,
 			sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
 			NULL);
+	sort_clone_roots = 1;
 
 	ret = send_subvol(sctx);
 	if (ret < 0)
@@ -4846,6 +5449,48 @@
 	}
 
 out:
+	WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
+	while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
+		struct rb_node *n;
+		struct pending_dir_move *pm;
+
+		n = rb_first(&sctx->pending_dir_moves);
+		pm = rb_entry(n, struct pending_dir_move, node);
+		while (!list_empty(&pm->list)) {
+			struct pending_dir_move *pm2;
+
+			pm2 = list_first_entry(&pm->list,
+					       struct pending_dir_move, list);
+			free_pending_move(sctx, pm2);
+		}
+		free_pending_move(sctx, pm);
+	}
+
+	WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
+	while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
+		struct rb_node *n;
+		struct waiting_dir_move *dm;
+
+		n = rb_first(&sctx->waiting_dir_moves);
+		dm = rb_entry(n, struct waiting_dir_move, node);
+		rb_erase(&dm->node, &sctx->waiting_dir_moves);
+		kfree(dm);
+	}
+
+	if (sort_clone_roots) {
+		for (i = 0; i < sctx->clone_roots_cnt; i++)
+			btrfs_root_dec_send_in_progress(
+					sctx->clone_roots[i].root);
+	} else {
+		for (i = 0; sctx && i < clone_sources_to_rollback; i++)
+			btrfs_root_dec_send_in_progress(
+					sctx->clone_roots[i].root);
+
+		btrfs_root_dec_send_in_progress(send_root);
+	}
+	if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
+		btrfs_root_dec_send_in_progress(sctx->parent_root);
+
 	kfree(arg);
 	vfree(clone_sources_tmp);
 
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index d71a11d..d04db81 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -48,6 +48,8 @@
 #include "transaction.h"
 #include "btrfs_inode.h"
 #include "print-tree.h"
+#include "hash.h"
+#include "props.h"
 #include "xattr.h"
 #include "volumes.h"
 #include "export.h"
@@ -152,11 +154,12 @@
 		vaf.fmt = fmt;
 		vaf.va = &args;
 
-		printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: errno=%d %s (%pV)\n",
+		printk(KERN_CRIT
+			"BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
 			sb->s_id, function, line, errno, errstr, &vaf);
 		va_end(args);
 	} else {
-		printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: errno=%d %s\n",
+		printk(KERN_CRIT "BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
 			sb->s_id, function, line, errno, errstr);
 	}
 
@@ -250,7 +253,7 @@
 	 */
 	if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED,
 				&root->fs_info->fs_state)) {
-		WARN(1, KERN_DEBUG "btrfs: Transaction aborted (error %d)\n",
+		WARN(1, KERN_DEBUG "BTRFS: Transaction aborted (error %d)\n",
 				errno);
 	}
 	trans->aborted = errno;
@@ -294,8 +297,8 @@
 		panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
 			s_id, function, line, &vaf, errno, errstr);
 
-	printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
-	       s_id, function, line, &vaf, errno, errstr);
+	btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
+		   function, line, &vaf, errno, errstr);
 	va_end(args);
 	/* Caller calls BUG() */
 }
@@ -322,7 +325,9 @@
 	Opt_no_space_cache, Opt_recovery, Opt_skip_balance,
 	Opt_check_integrity, Opt_check_integrity_including_extent_data,
 	Opt_check_integrity_print_mask, Opt_fatal_errors, Opt_rescan_uuid_tree,
-	Opt_commit_interval,
+	Opt_commit_interval, Opt_barrier, Opt_nodefrag, Opt_nodiscard,
+	Opt_noenospc_debug, Opt_noflushoncommit, Opt_acl, Opt_datacow,
+	Opt_datasum, Opt_treelog, Opt_noinode_cache,
 	Opt_err,
 };
 
@@ -332,8 +337,11 @@
 	{Opt_subvolid, "subvolid=%s"},
 	{Opt_device, "device=%s"},
 	{Opt_nodatasum, "nodatasum"},
+	{Opt_datasum, "datasum"},
 	{Opt_nodatacow, "nodatacow"},
+	{Opt_datacow, "datacow"},
 	{Opt_nobarrier, "nobarrier"},
+	{Opt_barrier, "barrier"},
 	{Opt_max_inline, "max_inline=%s"},
 	{Opt_alloc_start, "alloc_start=%s"},
 	{Opt_thread_pool, "thread_pool=%d"},
@@ -344,18 +352,25 @@
 	{Opt_ssd, "ssd"},
 	{Opt_ssd_spread, "ssd_spread"},
 	{Opt_nossd, "nossd"},
+	{Opt_acl, "acl"},
 	{Opt_noacl, "noacl"},
 	{Opt_notreelog, "notreelog"},
+	{Opt_treelog, "treelog"},
 	{Opt_flushoncommit, "flushoncommit"},
+	{Opt_noflushoncommit, "noflushoncommit"},
 	{Opt_ratio, "metadata_ratio=%d"},
 	{Opt_discard, "discard"},
+	{Opt_nodiscard, "nodiscard"},
 	{Opt_space_cache, "space_cache"},
 	{Opt_clear_cache, "clear_cache"},
 	{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
 	{Opt_enospc_debug, "enospc_debug"},
+	{Opt_noenospc_debug, "noenospc_debug"},
 	{Opt_subvolrootid, "subvolrootid=%d"},
 	{Opt_defrag, "autodefrag"},
+	{Opt_nodefrag, "noautodefrag"},
 	{Opt_inode_cache, "inode_cache"},
+	{Opt_noinode_cache, "noinode_cache"},
 	{Opt_no_space_cache, "nospace_cache"},
 	{Opt_recovery, "recovery"},
 	{Opt_skip_balance, "skip_balance"},
@@ -368,6 +383,20 @@
 	{Opt_err, NULL},
 };
 
+#define btrfs_set_and_info(root, opt, fmt, args...)			\
+{									\
+	if (!btrfs_test_opt(root, opt))					\
+		btrfs_info(root->fs_info, fmt, ##args);			\
+	btrfs_set_opt(root->fs_info->mount_opt, opt);			\
+}
+
+#define btrfs_clear_and_info(root, opt, fmt, args...)			\
+{									\
+	if (btrfs_test_opt(root, opt))					\
+		btrfs_info(root->fs_info, fmt, ##args);			\
+	btrfs_clear_opt(root->fs_info->mount_opt, opt);			\
+}
+
 /*
  * Regular mount options parser.  Everything that is needed only when
  * reading in a new superblock is parsed here.
@@ -383,6 +412,7 @@
 	int ret = 0;
 	char *compress_type;
 	bool compress_force = false;
+	bool compress = false;
 
 	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
 	if (cache_gen)
@@ -409,7 +439,7 @@
 		token = match_token(p, tokens, args);
 		switch (token) {
 		case Opt_degraded:
-			printk(KERN_INFO "btrfs: allowing degraded mounts\n");
+			btrfs_info(root->fs_info, "allowing degraded mounts");
 			btrfs_set_opt(info->mount_opt, DEGRADED);
 			break;
 		case Opt_subvol:
@@ -422,27 +452,45 @@
 			 */
 			break;
 		case Opt_nodatasum:
-			printk(KERN_INFO "btrfs: setting nodatasum\n");
-			btrfs_set_opt(info->mount_opt, NODATASUM);
+			btrfs_set_and_info(root, NODATASUM,
+					   "setting nodatasum");
+			break;
+		case Opt_datasum:
+			if (btrfs_test_opt(root, NODATASUM)) {
+				if (btrfs_test_opt(root, NODATACOW))
+					btrfs_info(root->fs_info, "setting datasum, datacow enabled");
+				else
+					btrfs_info(root->fs_info, "setting datasum");
+			}
+			btrfs_clear_opt(info->mount_opt, NODATACOW);
+			btrfs_clear_opt(info->mount_opt, NODATASUM);
 			break;
 		case Opt_nodatacow:
-			if (!btrfs_test_opt(root, COMPRESS) ||
-				!btrfs_test_opt(root, FORCE_COMPRESS)) {
-					printk(KERN_INFO "btrfs: setting nodatacow, compression disabled\n");
-			} else {
-				printk(KERN_INFO "btrfs: setting nodatacow\n");
+			if (!btrfs_test_opt(root, NODATACOW)) {
+				if (!btrfs_test_opt(root, COMPRESS) ||
+				    !btrfs_test_opt(root, FORCE_COMPRESS)) {
+					btrfs_info(root->fs_info,
+						   "setting nodatacow, compression disabled");
+				} else {
+					btrfs_info(root->fs_info, "setting nodatacow");
+				}
 			}
 			btrfs_clear_opt(info->mount_opt, COMPRESS);
 			btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 			btrfs_set_opt(info->mount_opt, NODATACOW);
 			btrfs_set_opt(info->mount_opt, NODATASUM);
 			break;
+		case Opt_datacow:
+			btrfs_clear_and_info(root, NODATACOW,
+					     "setting datacow");
+			break;
 		case Opt_compress_force:
 		case Opt_compress_force_type:
 			compress_force = true;
 			/* Fallthrough */
 		case Opt_compress:
 		case Opt_compress_type:
+			compress = true;
 			if (token == Opt_compress ||
 			    token == Opt_compress_force ||
 			    strcmp(args[0].from, "zlib") == 0) {
@@ -469,34 +517,36 @@
 			}
 
 			if (compress_force) {
-				btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
-				pr_info("btrfs: force %s compression\n",
-					compress_type);
-			} else if (btrfs_test_opt(root, COMPRESS)) {
-				pr_info("btrfs: use %s compression\n",
-					compress_type);
+				btrfs_set_and_info(root, FORCE_COMPRESS,
+						   "force %s compression",
+						   compress_type);
+			} else if (compress) {
+				if (!btrfs_test_opt(root, COMPRESS))
+					btrfs_info(root->fs_info,
+						   "btrfs: use %s compression\n",
+						   compress_type);
 			}
 			break;
 		case Opt_ssd:
-			printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
-			btrfs_set_opt(info->mount_opt, SSD);
+			btrfs_set_and_info(root, SSD,
+					   "use ssd allocation scheme");
 			break;
 		case Opt_ssd_spread:
-			printk(KERN_INFO "btrfs: use spread ssd "
-			       "allocation scheme\n");
-			btrfs_set_opt(info->mount_opt, SSD);
-			btrfs_set_opt(info->mount_opt, SSD_SPREAD);
+			btrfs_set_and_info(root, SSD_SPREAD,
+					   "use spread ssd allocation scheme");
 			break;
 		case Opt_nossd:
-			printk(KERN_INFO "btrfs: not using ssd allocation "
-			       "scheme\n");
-			btrfs_set_opt(info->mount_opt, NOSSD);
+			btrfs_clear_and_info(root, NOSSD,
+					     "not using ssd allocation scheme");
 			btrfs_clear_opt(info->mount_opt, SSD);
-			btrfs_clear_opt(info->mount_opt, SSD_SPREAD);
+			break;
+		case Opt_barrier:
+			btrfs_clear_and_info(root, NOBARRIER,
+					     "turning on barriers");
 			break;
 		case Opt_nobarrier:
-			printk(KERN_INFO "btrfs: turning off barriers\n");
-			btrfs_set_opt(info->mount_opt, NOBARRIER);
+			btrfs_set_and_info(root, NOBARRIER,
+					   "turning off barriers");
 			break;
 		case Opt_thread_pool:
 			ret = match_int(&args[0], &intarg);
@@ -516,11 +566,11 @@
 				kfree(num);
 
 				if (info->max_inline) {
-					info->max_inline = max_t(u64,
+					info->max_inline = min_t(u64,
 						info->max_inline,
 						root->sectorsize);
 				}
-				printk(KERN_INFO "btrfs: max_inline at %llu\n",
+				btrfs_info(root->fs_info, "max_inline at %llu",
 					info->max_inline);
 			} else {
 				ret = -ENOMEM;
@@ -534,24 +584,34 @@
 				info->alloc_start = memparse(num, NULL);
 				mutex_unlock(&info->chunk_mutex);
 				kfree(num);
-				printk(KERN_INFO
-					"btrfs: allocations start at %llu\n",
+				btrfs_info(root->fs_info, "allocations start at %llu",
 					info->alloc_start);
 			} else {
 				ret = -ENOMEM;
 				goto out;
 			}
 			break;
+		case Opt_acl:
+			root->fs_info->sb->s_flags |= MS_POSIXACL;
+			break;
 		case Opt_noacl:
 			root->fs_info->sb->s_flags &= ~MS_POSIXACL;
 			break;
 		case Opt_notreelog:
-			printk(KERN_INFO "btrfs: disabling tree log\n");
-			btrfs_set_opt(info->mount_opt, NOTREELOG);
+			btrfs_set_and_info(root, NOTREELOG,
+					   "disabling tree log");
+			break;
+		case Opt_treelog:
+			btrfs_clear_and_info(root, NOTREELOG,
+					     "enabling tree log");
 			break;
 		case Opt_flushoncommit:
-			printk(KERN_INFO "btrfs: turning on flush-on-commit\n");
-			btrfs_set_opt(info->mount_opt, FLUSHONCOMMIT);
+			btrfs_set_and_info(root, FLUSHONCOMMIT,
+					   "turning on flush-on-commit");
+			break;
+		case Opt_noflushoncommit:
+			btrfs_clear_and_info(root, FLUSHONCOMMIT,
+					     "turning off flush-on-commit");
 			break;
 		case Opt_ratio:
 			ret = match_int(&args[0], &intarg);
@@ -559,7 +619,7 @@
 				goto out;
 			} else if (intarg >= 0) {
 				info->metadata_ratio = intarg;
-				printk(KERN_INFO "btrfs: metadata ratio %d\n",
+				btrfs_info(root->fs_info, "metadata ratio %d",
 				       info->metadata_ratio);
 			} else {
 				ret = -EINVAL;
@@ -567,25 +627,35 @@
 			}
 			break;
 		case Opt_discard:
-			btrfs_set_opt(info->mount_opt, DISCARD);
+			btrfs_set_and_info(root, DISCARD,
+					   "turning on discard");
+			break;
+		case Opt_nodiscard:
+			btrfs_clear_and_info(root, DISCARD,
+					     "turning off discard");
 			break;
 		case Opt_space_cache:
-			btrfs_set_opt(info->mount_opt, SPACE_CACHE);
+			btrfs_set_and_info(root, SPACE_CACHE,
+					   "enabling disk space caching");
 			break;
 		case Opt_rescan_uuid_tree:
 			btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
 			break;
 		case Opt_no_space_cache:
-			printk(KERN_INFO "btrfs: disabling disk space caching\n");
-			btrfs_clear_opt(info->mount_opt, SPACE_CACHE);
+			btrfs_clear_and_info(root, SPACE_CACHE,
+					     "disabling disk space caching");
 			break;
 		case Opt_inode_cache:
-			printk(KERN_INFO "btrfs: enabling inode map caching\n");
-			btrfs_set_opt(info->mount_opt, INODE_MAP_CACHE);
+			btrfs_set_and_info(root, CHANGE_INODE_CACHE,
+					   "enabling inode map caching");
+			break;
+		case Opt_noinode_cache:
+			btrfs_clear_and_info(root, CHANGE_INODE_CACHE,
+					     "disabling inode map caching");
 			break;
 		case Opt_clear_cache:
-			printk(KERN_INFO "btrfs: force clearing of disk cache\n");
-			btrfs_set_opt(info->mount_opt, CLEAR_CACHE);
+			btrfs_set_and_info(root, CLEAR_CACHE,
+					   "force clearing of disk cache");
 			break;
 		case Opt_user_subvol_rm_allowed:
 			btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
@@ -593,12 +663,19 @@
 		case Opt_enospc_debug:
 			btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
 			break;
+		case Opt_noenospc_debug:
+			btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG);
+			break;
 		case Opt_defrag:
-			printk(KERN_INFO "btrfs: enabling auto defrag\n");
-			btrfs_set_opt(info->mount_opt, AUTO_DEFRAG);
+			btrfs_set_and_info(root, AUTO_DEFRAG,
+					   "enabling auto defrag");
+			break;
+		case Opt_nodefrag:
+			btrfs_clear_and_info(root, AUTO_DEFRAG,
+					     "disabling auto defrag");
 			break;
 		case Opt_recovery:
-			printk(KERN_INFO "btrfs: enabling auto recovery\n");
+			btrfs_info(root->fs_info, "enabling auto recovery");
 			btrfs_set_opt(info->mount_opt, RECOVERY);
 			break;
 		case Opt_skip_balance:
@@ -606,14 +683,14 @@
 			break;
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 		case Opt_check_integrity_including_extent_data:
-			printk(KERN_INFO "btrfs: enabling check integrity"
-			       " including extent data\n");
+			btrfs_info(root->fs_info,
+				   "enabling check integrity including extent data");
 			btrfs_set_opt(info->mount_opt,
 				      CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
 			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 			break;
 		case Opt_check_integrity:
-			printk(KERN_INFO "btrfs: enabling check integrity\n");
+			btrfs_info(root->fs_info, "enabling check integrity");
 			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 			break;
 		case Opt_check_integrity_print_mask:
@@ -622,8 +699,7 @@
 				goto out;
 			} else if (intarg >= 0) {
 				info->check_integrity_print_mask = intarg;
-				printk(KERN_INFO "btrfs:"
-				       " check_integrity_print_mask 0x%x\n",
+				btrfs_info(root->fs_info, "check_integrity_print_mask 0x%x",
 				       info->check_integrity_print_mask);
 			} else {
 				ret = -EINVAL;
@@ -634,8 +710,8 @@
 		case Opt_check_integrity_including_extent_data:
 		case Opt_check_integrity:
 		case Opt_check_integrity_print_mask:
-			printk(KERN_ERR "btrfs: support for check_integrity*"
-			       " not compiled in!\n");
+			btrfs_err(root->fs_info,
+				"support for check_integrity* not compiled in!");
 			ret = -EINVAL;
 			goto out;
 #endif
@@ -655,28 +731,24 @@
 			intarg = 0;
 			ret = match_int(&args[0], &intarg);
 			if (ret < 0) {
-				printk(KERN_ERR
-					"btrfs: invalid commit interval\n");
+				btrfs_err(root->fs_info, "invalid commit interval");
 				ret = -EINVAL;
 				goto out;
 			}
 			if (intarg > 0) {
 				if (intarg > 300) {
-					printk(KERN_WARNING
-					    "btrfs: excessive commit interval %d\n",
+					btrfs_warn(root->fs_info, "excessive commit interval %d",
 							intarg);
 				}
 				info->commit_interval = intarg;
 			} else {
-				printk(KERN_INFO
-				    "btrfs: using default commit interval %ds\n",
+				btrfs_info(root->fs_info, "using default commit interval %ds",
 				    BTRFS_DEFAULT_COMMIT_INTERVAL);
 				info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
 			}
 			break;
 		case Opt_err:
-			printk(KERN_INFO "btrfs: unrecognized mount option "
-			       "'%s'\n", p);
+			btrfs_info(root->fs_info, "unrecognized mount option '%s'", p);
 			ret = -EINVAL;
 			goto out;
 		default:
@@ -685,7 +757,7 @@
 	}
 out:
 	if (!ret && btrfs_test_opt(root, SPACE_CACHE))
-		printk(KERN_INFO "btrfs: disk space caching is enabled\n");
+		btrfs_info(root->fs_info, "disk space caching is enabled");
 	kfree(orig);
 	return ret;
 }
@@ -748,7 +820,8 @@
 			break;
 		case Opt_subvolrootid:
 			printk(KERN_WARNING
-				"btrfs: 'subvolrootid' mount option is deprecated and has no effect\n");
+				"BTRFS: 'subvolrootid' mount option is deprecated and has "
+				"no effect\n");
 			break;
 		case Opt_device:
 			device_name = match_strdup(&args[0]);
@@ -782,6 +855,7 @@
 	struct btrfs_path *path;
 	struct btrfs_key location;
 	struct inode *inode;
+	struct dentry *dentry;
 	u64 dir_id;
 	int new = 0;
 
@@ -852,7 +926,13 @@
 		return dget(sb->s_root);
 	}
 
-	return d_obtain_alias(inode);
+	dentry = d_obtain_alias(inode);
+	if (!IS_ERR(dentry)) {
+		spin_lock(&dentry->d_lock);
+		dentry->d_flags &= ~DCACHE_DISCONNECTED;
+		spin_unlock(&dentry->d_lock);
+	}
+	return dentry;
 }
 
 static int btrfs_fill_super(struct super_block *sb,
@@ -877,7 +957,7 @@
 	sb->s_flags |= MS_I_VERSION;
 	err = open_ctree(sb, fs_devices, (char *)data);
 	if (err) {
-		printk("btrfs: open_ctree failed\n");
+		printk(KERN_ERR "BTRFS: open_ctree failed\n");
 		return err;
 	}
 
@@ -1115,7 +1195,7 @@
 		dput(root);
 		root = ERR_PTR(-EINVAL);
 		deactivate_locked_super(s);
-		printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n",
+		printk(KERN_ERR "BTRFS: '%s' is not a valid subvolume\n",
 				subvol_name);
 	}
 
@@ -1240,7 +1320,7 @@
 
 	fs_info->thread_pool_size = new_pool_size;
 
-	printk(KERN_INFO "btrfs: resize thread pool %d -> %d\n",
+	btrfs_info(fs_info, "resize thread pool %d -> %d",
 	       old_pool_size, new_pool_size);
 
 	btrfs_set_max_workers(&fs_info->generic_worker, new_pool_size);
@@ -1346,7 +1426,7 @@
 	} else {
 		if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
 			btrfs_err(fs_info,
-				"Remounting read-write after error is not allowed\n");
+				"Remounting read-write after error is not allowed");
 			ret = -EINVAL;
 			goto restore;
 		}
@@ -1358,8 +1438,8 @@
 		if (fs_info->fs_devices->missing_devices >
 		     fs_info->num_tolerated_disk_barrier_failures &&
 		    !(*flags & MS_RDONLY)) {
-			printk(KERN_WARNING
-			       "Btrfs: too many missing devices, writeable remount is not allowed\n");
+			btrfs_warn(fs_info,
+				"too many missing devices, writeable remount is not allowed");
 			ret = -EACCES;
 			goto restore;
 		}
@@ -1384,16 +1464,15 @@
 
 		ret = btrfs_resume_dev_replace_async(fs_info);
 		if (ret) {
-			pr_warn("btrfs: failed to resume dev_replace\n");
+			btrfs_warn(fs_info, "failed to resume dev_replace");
 			goto restore;
 		}
 
 		if (!fs_info->uuid_root) {
-			pr_info("btrfs: creating UUID tree\n");
+			btrfs_info(fs_info, "creating UUID tree");
 			ret = btrfs_create_uuid_tree(fs_info);
 			if (ret) {
-				pr_warn("btrfs: failed to create the uuid tree"
-					"%d\n", ret);
+				btrfs_warn(fs_info, "failed to create the UUID tree %d", ret);
 				goto restore;
 			}
 		}
@@ -1773,7 +1852,7 @@
 static void btrfs_interface_exit(void)
 {
 	if (misc_deregister(&btrfs_misc) < 0)
-		printk(KERN_INFO "btrfs: misc_deregister failed for control device\n");
+		printk(KERN_INFO "BTRFS: misc_deregister failed for control device\n");
 }
 
 static void btrfs_print_info(void)
@@ -1818,10 +1897,16 @@
 {
 	int err;
 
-	err = btrfs_init_sysfs();
+	err = btrfs_hash_init();
 	if (err)
 		return err;
 
+	btrfs_props_init();
+
+	err = btrfs_init_sysfs();
+	if (err)
+		goto free_hash;
+
 	btrfs_init_compress();
 
 	err = btrfs_init_cachep();
@@ -1895,6 +1980,8 @@
 free_compress:
 	btrfs_exit_compress();
 	btrfs_exit_sysfs();
+free_hash:
+	btrfs_hash_exit();
 	return err;
 }
 
@@ -1913,9 +2000,10 @@
 	btrfs_exit_sysfs();
 	btrfs_cleanup_fs_uuids();
 	btrfs_exit_compress();
+	btrfs_hash_exit();
 }
 
-module_init(init_btrfs_fs)
+late_initcall(init_btrfs_fs);
 module_exit(exit_btrfs_fs)
 
 MODULE_LICENSE("GPL");
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 5b326cd..865f4cf 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -22,24 +22,647 @@
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/kobject.h>
+#include <linux/bug.h>
+#include <linux/genhd.h>
 
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
+#include "sysfs.h"
+#include "volumes.h"
+
+static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj);
+
+static u64 get_features(struct btrfs_fs_info *fs_info,
+			enum btrfs_feature_set set)
+{
+	struct btrfs_super_block *disk_super = fs_info->super_copy;
+	if (set == FEAT_COMPAT)
+		return btrfs_super_compat_flags(disk_super);
+	else if (set == FEAT_COMPAT_RO)
+		return btrfs_super_compat_ro_flags(disk_super);
+	else
+		return btrfs_super_incompat_flags(disk_super);
+}
+
+static void set_features(struct btrfs_fs_info *fs_info,
+			 enum btrfs_feature_set set, u64 features)
+{
+	struct btrfs_super_block *disk_super = fs_info->super_copy;
+	if (set == FEAT_COMPAT)
+		btrfs_set_super_compat_flags(disk_super, features);
+	else if (set == FEAT_COMPAT_RO)
+		btrfs_set_super_compat_ro_flags(disk_super, features);
+	else
+		btrfs_set_super_incompat_flags(disk_super, features);
+}
+
+static int can_modify_feature(struct btrfs_feature_attr *fa)
+{
+	int val = 0;
+	u64 set, clear;
+	switch (fa->feature_set) {
+	case FEAT_COMPAT:
+		set = BTRFS_FEATURE_COMPAT_SAFE_SET;
+		clear = BTRFS_FEATURE_COMPAT_SAFE_CLEAR;
+		break;
+	case FEAT_COMPAT_RO:
+		set = BTRFS_FEATURE_COMPAT_RO_SAFE_SET;
+		clear = BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR;
+		break;
+	case FEAT_INCOMPAT:
+		set = BTRFS_FEATURE_INCOMPAT_SAFE_SET;
+		clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR;
+		break;
+	default:
+		printk(KERN_WARNING "btrfs: sysfs: unknown feature set %d\n",
+				fa->feature_set);
+		return 0;
+	}
+
+	if (set & fa->feature_bit)
+		val |= 1;
+	if (clear & fa->feature_bit)
+		val |= 2;
+
+	return val;
+}
+
+static ssize_t btrfs_feature_attr_show(struct kobject *kobj,
+				       struct kobj_attribute *a, char *buf)
+{
+	int val = 0;
+	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+	struct btrfs_feature_attr *fa = to_btrfs_feature_attr(a);
+	if (fs_info) {
+		u64 features = get_features(fs_info, fa->feature_set);
+		if (features & fa->feature_bit)
+			val = 1;
+	} else
+		val = can_modify_feature(fa);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t btrfs_feature_attr_store(struct kobject *kobj,
+					struct kobj_attribute *a,
+					const char *buf, size_t count)
+{
+	struct btrfs_fs_info *fs_info;
+	struct btrfs_feature_attr *fa = to_btrfs_feature_attr(a);
+	struct btrfs_trans_handle *trans;
+	u64 features, set, clear;
+	unsigned long val;
+	int ret;
+
+	fs_info = to_fs_info(kobj);
+	if (!fs_info)
+		return -EPERM;
+
+	ret = kstrtoul(skip_spaces(buf), 0, &val);
+	if (ret)
+		return ret;
+
+	if (fa->feature_set == FEAT_COMPAT) {
+		set = BTRFS_FEATURE_COMPAT_SAFE_SET;
+		clear = BTRFS_FEATURE_COMPAT_SAFE_CLEAR;
+	} else if (fa->feature_set == FEAT_COMPAT_RO) {
+		set = BTRFS_FEATURE_COMPAT_RO_SAFE_SET;
+		clear = BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR;
+	} else {
+		set = BTRFS_FEATURE_INCOMPAT_SAFE_SET;
+		clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR;
+	}
+
+	features = get_features(fs_info, fa->feature_set);
+
+	/* Nothing to do */
+	if ((val && (features & fa->feature_bit)) ||
+	    (!val && !(features & fa->feature_bit)))
+		return count;
+
+	if ((val && !(set & fa->feature_bit)) ||
+	    (!val && !(clear & fa->feature_bit))) {
+		btrfs_info(fs_info,
+			"%sabling feature %s on mounted fs is not supported.",
+			val ? "En" : "Dis", fa->kobj_attr.attr.name);
+		return -EPERM;
+	}
+
+	btrfs_info(fs_info, "%s %s feature flag",
+		   val ? "Setting" : "Clearing", fa->kobj_attr.attr.name);
+
+	trans = btrfs_start_transaction(fs_info->fs_root, 0);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
+
+	spin_lock(&fs_info->super_lock);
+	features = get_features(fs_info, fa->feature_set);
+	if (val)
+		features |= fa->feature_bit;
+	else
+		features &= ~fa->feature_bit;
+	set_features(fs_info, fa->feature_set, features);
+	spin_unlock(&fs_info->super_lock);
+
+	ret = btrfs_commit_transaction(trans, fs_info->fs_root);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static umode_t btrfs_feature_visible(struct kobject *kobj,
+				     struct attribute *attr, int unused)
+{
+	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+	umode_t mode = attr->mode;
+
+	if (fs_info) {
+		struct btrfs_feature_attr *fa;
+		u64 features;
+
+		fa = attr_to_btrfs_feature_attr(attr);
+		features = get_features(fs_info, fa->feature_set);
+
+		if (can_modify_feature(fa))
+			mode |= S_IWUSR;
+		else if (!(features & fa->feature_bit))
+			mode = 0;
+	}
+
+	return mode;
+}
+
+BTRFS_FEAT_ATTR_INCOMPAT(mixed_backref, MIXED_BACKREF);
+BTRFS_FEAT_ATTR_INCOMPAT(default_subvol, DEFAULT_SUBVOL);
+BTRFS_FEAT_ATTR_INCOMPAT(mixed_groups, MIXED_GROUPS);
+BTRFS_FEAT_ATTR_INCOMPAT(compress_lzo, COMPRESS_LZO);
+BTRFS_FEAT_ATTR_INCOMPAT(big_metadata, BIG_METADATA);
+BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF);
+BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56);
+BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA);
+BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES);
+
+static struct attribute *btrfs_supported_feature_attrs[] = {
+	BTRFS_FEAT_ATTR_PTR(mixed_backref),
+	BTRFS_FEAT_ATTR_PTR(default_subvol),
+	BTRFS_FEAT_ATTR_PTR(mixed_groups),
+	BTRFS_FEAT_ATTR_PTR(compress_lzo),
+	BTRFS_FEAT_ATTR_PTR(big_metadata),
+	BTRFS_FEAT_ATTR_PTR(extended_iref),
+	BTRFS_FEAT_ATTR_PTR(raid56),
+	BTRFS_FEAT_ATTR_PTR(skinny_metadata),
+	BTRFS_FEAT_ATTR_PTR(no_holes),
+	NULL
+};
+
+static const struct attribute_group btrfs_feature_attr_group = {
+	.name = "features",
+	.is_visible = btrfs_feature_visible,
+	.attrs = btrfs_supported_feature_attrs,
+};
+
+static ssize_t btrfs_show_u64(u64 *value_ptr, spinlock_t *lock, char *buf)
+{
+	u64 val;
+	if (lock)
+		spin_lock(lock);
+	val = *value_ptr;
+	if (lock)
+		spin_unlock(lock);
+	return snprintf(buf, PAGE_SIZE, "%llu\n", val);
+}
+
+static ssize_t global_rsv_size_show(struct kobject *kobj,
+				    struct kobj_attribute *ka, char *buf)
+{
+	struct btrfs_fs_info *fs_info = to_fs_info(kobj->parent);
+	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
+	return btrfs_show_u64(&block_rsv->size, &block_rsv->lock, buf);
+}
+BTRFS_ATTR(global_rsv_size, 0444, global_rsv_size_show);
+
+static ssize_t global_rsv_reserved_show(struct kobject *kobj,
+					struct kobj_attribute *a, char *buf)
+{
+	struct btrfs_fs_info *fs_info = to_fs_info(kobj->parent);
+	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
+	return btrfs_show_u64(&block_rsv->reserved, &block_rsv->lock, buf);
+}
+BTRFS_ATTR(global_rsv_reserved, 0444, global_rsv_reserved_show);
+
+#define to_space_info(_kobj) container_of(_kobj, struct btrfs_space_info, kobj)
+
+static ssize_t raid_bytes_show(struct kobject *kobj,
+			       struct kobj_attribute *attr, char *buf);
+BTRFS_RAID_ATTR(total_bytes, raid_bytes_show);
+BTRFS_RAID_ATTR(used_bytes, raid_bytes_show);
+
+static ssize_t raid_bytes_show(struct kobject *kobj,
+			       struct kobj_attribute *attr, char *buf)
+
+{
+	struct btrfs_space_info *sinfo = to_space_info(kobj->parent);
+	struct btrfs_block_group_cache *block_group;
+	int index = kobj - sinfo->block_group_kobjs;
+	u64 val = 0;
+
+	down_read(&sinfo->groups_sem);
+	list_for_each_entry(block_group, &sinfo->block_groups[index], list) {
+		if (&attr->attr == BTRFS_RAID_ATTR_PTR(total_bytes))
+			val += block_group->key.offset;
+		else
+			val += btrfs_block_group_used(&block_group->item);
+	}
+	up_read(&sinfo->groups_sem);
+	return snprintf(buf, PAGE_SIZE, "%llu\n", val);
+}
+
+static struct attribute *raid_attributes[] = {
+	BTRFS_RAID_ATTR_PTR(total_bytes),
+	BTRFS_RAID_ATTR_PTR(used_bytes),
+	NULL
+};
+
+static void release_raid_kobj(struct kobject *kobj)
+{
+	kobject_put(kobj->parent);
+}
+
+struct kobj_type btrfs_raid_ktype = {
+	.sysfs_ops = &kobj_sysfs_ops,
+	.release = release_raid_kobj,
+	.default_attrs = raid_attributes,
+};
+
+#define SPACE_INFO_ATTR(field)						\
+static ssize_t btrfs_space_info_show_##field(struct kobject *kobj,	\
+					     struct kobj_attribute *a,	\
+					     char *buf)			\
+{									\
+	struct btrfs_space_info *sinfo = to_space_info(kobj);		\
+	return btrfs_show_u64(&sinfo->field, &sinfo->lock, buf);	\
+}									\
+BTRFS_ATTR(field, 0444, btrfs_space_info_show_##field)
+
+static ssize_t btrfs_space_info_show_total_bytes_pinned(struct kobject *kobj,
+						       struct kobj_attribute *a,
+						       char *buf)
+{
+	struct btrfs_space_info *sinfo = to_space_info(kobj);
+	s64 val = percpu_counter_sum(&sinfo->total_bytes_pinned);
+	return snprintf(buf, PAGE_SIZE, "%lld\n", val);
+}
+
+SPACE_INFO_ATTR(flags);
+SPACE_INFO_ATTR(total_bytes);
+SPACE_INFO_ATTR(bytes_used);
+SPACE_INFO_ATTR(bytes_pinned);
+SPACE_INFO_ATTR(bytes_reserved);
+SPACE_INFO_ATTR(bytes_may_use);
+SPACE_INFO_ATTR(disk_used);
+SPACE_INFO_ATTR(disk_total);
+BTRFS_ATTR(total_bytes_pinned, 0444, btrfs_space_info_show_total_bytes_pinned);
+
+static struct attribute *space_info_attrs[] = {
+	BTRFS_ATTR_PTR(flags),
+	BTRFS_ATTR_PTR(total_bytes),
+	BTRFS_ATTR_PTR(bytes_used),
+	BTRFS_ATTR_PTR(bytes_pinned),
+	BTRFS_ATTR_PTR(bytes_reserved),
+	BTRFS_ATTR_PTR(bytes_may_use),
+	BTRFS_ATTR_PTR(disk_used),
+	BTRFS_ATTR_PTR(disk_total),
+	BTRFS_ATTR_PTR(total_bytes_pinned),
+	NULL,
+};
+
+static void space_info_release(struct kobject *kobj)
+{
+	struct btrfs_space_info *sinfo = to_space_info(kobj);
+	percpu_counter_destroy(&sinfo->total_bytes_pinned);
+	kfree(sinfo);
+}
+
+struct kobj_type space_info_ktype = {
+	.sysfs_ops = &kobj_sysfs_ops,
+	.release = space_info_release,
+	.default_attrs = space_info_attrs,
+};
+
+static const struct attribute *allocation_attrs[] = {
+	BTRFS_ATTR_PTR(global_rsv_reserved),
+	BTRFS_ATTR_PTR(global_rsv_size),
+	NULL,
+};
+
+static ssize_t btrfs_label_show(struct kobject *kobj,
+				struct kobj_attribute *a, char *buf)
+{
+	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+	return snprintf(buf, PAGE_SIZE, "%s\n", fs_info->super_copy->label);
+}
+
+static ssize_t btrfs_label_store(struct kobject *kobj,
+				 struct kobj_attribute *a,
+				 const char *buf, size_t len)
+{
+	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root = fs_info->fs_root;
+	int ret;
+
+	if (len >= BTRFS_LABEL_SIZE) {
+		pr_err("BTRFS: unable to set label with more than %d bytes\n",
+		       BTRFS_LABEL_SIZE - 1);
+		return -EINVAL;
+	}
+
+	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
+
+	spin_lock(&root->fs_info->super_lock);
+	strcpy(fs_info->super_copy->label, buf);
+	spin_unlock(&root->fs_info->super_lock);
+	ret = btrfs_commit_transaction(trans, root);
+
+	if (!ret)
+		return len;
+
+	return ret;
+}
+BTRFS_ATTR_RW(label, 0644, btrfs_label_show, btrfs_label_store);
+
+static struct attribute *btrfs_attrs[] = {
+	BTRFS_ATTR_PTR(label),
+	NULL,
+};
+
+static void btrfs_release_super_kobj(struct kobject *kobj)
+{
+	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+	complete(&fs_info->kobj_unregister);
+}
+
+static struct kobj_type btrfs_ktype = {
+	.sysfs_ops	= &kobj_sysfs_ops,
+	.release	= btrfs_release_super_kobj,
+	.default_attrs	= btrfs_attrs,
+};
+
+static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj)
+{
+	if (kobj->ktype != &btrfs_ktype)
+		return NULL;
+	return container_of(kobj, struct btrfs_fs_info, super_kobj);
+}
+
+#define NUM_FEATURE_BITS 64
+static char btrfs_unknown_feature_names[3][NUM_FEATURE_BITS][13];
+static struct btrfs_feature_attr btrfs_feature_attrs[3][NUM_FEATURE_BITS];
+
+static u64 supported_feature_masks[3] = {
+	[FEAT_COMPAT]    = BTRFS_FEATURE_COMPAT_SUPP,
+	[FEAT_COMPAT_RO] = BTRFS_FEATURE_COMPAT_RO_SUPP,
+	[FEAT_INCOMPAT]  = BTRFS_FEATURE_INCOMPAT_SUPP,
+};
+
+static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
+{
+	int set;
+
+	for (set = 0; set < FEAT_MAX; set++) {
+		int i;
+		struct attribute *attrs[2];
+		struct attribute_group agroup = {
+			.name = "features",
+			.attrs = attrs,
+		};
+		u64 features = get_features(fs_info, set);
+		features &= ~supported_feature_masks[set];
+
+		if (!features)
+			continue;
+
+		attrs[1] = NULL;
+		for (i = 0; i < NUM_FEATURE_BITS; i++) {
+			struct btrfs_feature_attr *fa;
+
+			if (!(features & (1ULL << i)))
+				continue;
+
+			fa = &btrfs_feature_attrs[set][i];
+			attrs[0] = &fa->kobj_attr.attr;
+			if (add) {
+				int ret;
+				ret = sysfs_merge_group(&fs_info->super_kobj,
+							&agroup);
+				if (ret)
+					return ret;
+			} else
+				sysfs_unmerge_group(&fs_info->super_kobj,
+						    &agroup);
+		}
+
+	}
+	return 0;
+}
+
+static void __btrfs_sysfs_remove_one(struct btrfs_fs_info *fs_info)
+{
+	kobject_del(&fs_info->super_kobj);
+	kobject_put(&fs_info->super_kobj);
+	wait_for_completion(&fs_info->kobj_unregister);
+}
+
+void btrfs_sysfs_remove_one(struct btrfs_fs_info *fs_info)
+{
+	if (fs_info->space_info_kobj) {
+		sysfs_remove_files(fs_info->space_info_kobj, allocation_attrs);
+		kobject_del(fs_info->space_info_kobj);
+		kobject_put(fs_info->space_info_kobj);
+	}
+	kobject_del(fs_info->device_dir_kobj);
+	kobject_put(fs_info->device_dir_kobj);
+	addrm_unknown_feature_attrs(fs_info, false);
+	sysfs_remove_group(&fs_info->super_kobj, &btrfs_feature_attr_group);
+	__btrfs_sysfs_remove_one(fs_info);
+}
+
+const char * const btrfs_feature_set_names[3] = {
+	[FEAT_COMPAT]	 = "compat",
+	[FEAT_COMPAT_RO] = "compat_ro",
+	[FEAT_INCOMPAT]	 = "incompat",
+};
+
+char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags)
+{
+	size_t bufsize = 4096; /* safe max, 64 names * 64 bytes */
+	int len = 0;
+	int i;
+	char *str;
+
+	str = kmalloc(bufsize, GFP_KERNEL);
+	if (!str)
+		return str;
+
+	for (i = 0; i < ARRAY_SIZE(btrfs_feature_attrs[set]); i++) {
+		const char *name;
+
+		if (!(flags & (1ULL << i)))
+			continue;
+
+		name = btrfs_feature_attrs[set][i].kobj_attr.attr.name;
+		len += snprintf(str + len, bufsize - len, "%s%s",
+				len ? "," : "", name);
+	}
+
+	return str;
+}
+
+static void init_feature_attrs(void)
+{
+	struct btrfs_feature_attr *fa;
+	int set, i;
+
+	BUILD_BUG_ON(ARRAY_SIZE(btrfs_unknown_feature_names) !=
+		     ARRAY_SIZE(btrfs_feature_attrs));
+	BUILD_BUG_ON(ARRAY_SIZE(btrfs_unknown_feature_names[0]) !=
+		     ARRAY_SIZE(btrfs_feature_attrs[0]));
+
+	memset(btrfs_feature_attrs, 0, sizeof(btrfs_feature_attrs));
+	memset(btrfs_unknown_feature_names, 0,
+	       sizeof(btrfs_unknown_feature_names));
+
+	for (i = 0; btrfs_supported_feature_attrs[i]; i++) {
+		struct btrfs_feature_attr *sfa;
+		struct attribute *a = btrfs_supported_feature_attrs[i];
+		int bit;
+		sfa = attr_to_btrfs_feature_attr(a);
+		bit = ilog2(sfa->feature_bit);
+		fa = &btrfs_feature_attrs[sfa->feature_set][bit];
+
+		fa->kobj_attr.attr.name = sfa->kobj_attr.attr.name;
+	}
+
+	for (set = 0; set < FEAT_MAX; set++) {
+		for (i = 0; i < ARRAY_SIZE(btrfs_feature_attrs[set]); i++) {
+			char *name = btrfs_unknown_feature_names[set][i];
+			fa = &btrfs_feature_attrs[set][i];
+
+			if (fa->kobj_attr.attr.name)
+				continue;
+
+			snprintf(name, 13, "%s:%u",
+				 btrfs_feature_set_names[set], i);
+
+			fa->kobj_attr.attr.name = name;
+			fa->kobj_attr.attr.mode = S_IRUGO;
+			fa->feature_set = set;
+			fa->feature_bit = 1ULL << i;
+		}
+	}
+}
+
+static int add_device_membership(struct btrfs_fs_info *fs_info)
+{
+	int error = 0;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+	struct btrfs_device *dev;
+
+	fs_info->device_dir_kobj = kobject_create_and_add("devices",
+						&fs_info->super_kobj);
+	if (!fs_info->device_dir_kobj)
+		return -ENOMEM;
+
+	list_for_each_entry(dev, &fs_devices->devices, dev_list) {
+		struct hd_struct *disk;
+		struct kobject *disk_kobj;
+
+		if (!dev->bdev)
+			continue;
+
+		disk = dev->bdev->bd_part;
+		disk_kobj = &part_to_dev(disk)->kobj;
+
+		error = sysfs_create_link(fs_info->device_dir_kobj,
+					  disk_kobj, disk_kobj->name);
+		if (error)
+			break;
+	}
+
+	return error;
+}
 
 /* /sys/fs/btrfs/ entry */
 static struct kset *btrfs_kset;
 
+int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info)
+{
+	int error;
+
+	init_completion(&fs_info->kobj_unregister);
+	fs_info->super_kobj.kset = btrfs_kset;
+	error = kobject_init_and_add(&fs_info->super_kobj, &btrfs_ktype, NULL,
+				     "%pU", fs_info->fsid);
+	if (error)
+		return error;
+
+	error = sysfs_create_group(&fs_info->super_kobj,
+				   &btrfs_feature_attr_group);
+	if (error) {
+		__btrfs_sysfs_remove_one(fs_info);
+		return error;
+	}
+
+	error = addrm_unknown_feature_attrs(fs_info, true);
+	if (error)
+		goto failure;
+
+	error = add_device_membership(fs_info);
+	if (error)
+		goto failure;
+
+	fs_info->space_info_kobj = kobject_create_and_add("allocation",
+						  &fs_info->super_kobj);
+	if (!fs_info->space_info_kobj) {
+		error = -ENOMEM;
+		goto failure;
+	}
+
+	error = sysfs_create_files(fs_info->space_info_kobj, allocation_attrs);
+	if (error)
+		goto failure;
+
+	return 0;
+failure:
+	btrfs_sysfs_remove_one(fs_info);
+	return error;
+}
+
 int btrfs_init_sysfs(void)
 {
+	int ret;
 	btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj);
 	if (!btrfs_kset)
 		return -ENOMEM;
+
+	init_feature_attrs();
+
+	ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_feature_attr_group);
+	if (ret) {
+		kset_unregister(btrfs_kset);
+		return ret;
+	}
+
 	return 0;
 }
 
 void btrfs_exit_sysfs(void)
 {
+	sysfs_remove_group(&btrfs_kset->kobj, &btrfs_feature_attr_group);
 	kset_unregister(btrfs_kset);
 }
 
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
new file mode 100644
index 0000000..f3cea37
--- /dev/null
+++ b/fs/btrfs/sysfs.h
@@ -0,0 +1,64 @@
+#ifndef _BTRFS_SYSFS_H_
+#define _BTRFS_SYSFS_H_
+
+enum btrfs_feature_set {
+	FEAT_COMPAT,
+	FEAT_COMPAT_RO,
+	FEAT_INCOMPAT,
+	FEAT_MAX
+};
+
+#define __INIT_KOBJ_ATTR(_name, _mode, _show, _store)			\
+{									\
+	.attr	= { .name = __stringify(_name), .mode = _mode },	\
+	.show	= _show,						\
+	.store	= _store,						\
+}
+
+#define BTRFS_ATTR_RW(_name, _mode, _show, _store)			\
+static struct kobj_attribute btrfs_attr_##_name =			\
+			__INIT_KOBJ_ATTR(_name, _mode, _show, _store)
+#define BTRFS_ATTR(_name, _mode, _show)					\
+	BTRFS_ATTR_RW(_name, _mode, _show, NULL)
+#define BTRFS_ATTR_PTR(_name)    (&btrfs_attr_##_name.attr)
+
+#define BTRFS_RAID_ATTR(_name, _show)					\
+static struct kobj_attribute btrfs_raid_attr_##_name =			\
+			__INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
+#define BTRFS_RAID_ATTR_PTR(_name)    (&btrfs_raid_attr_##_name.attr)
+
+
+struct btrfs_feature_attr {
+	struct kobj_attribute kobj_attr;
+	enum btrfs_feature_set feature_set;
+	u64 feature_bit;
+};
+
+#define BTRFS_FEAT_ATTR(_name, _feature_set, _prefix, _feature_bit)	     \
+static struct btrfs_feature_attr btrfs_attr_##_name = {			     \
+	.kobj_attr = __INIT_KOBJ_ATTR(_name, S_IRUGO,			     \
+				      btrfs_feature_attr_show,		     \
+				      btrfs_feature_attr_store),	     \
+	.feature_set	= _feature_set,					     \
+	.feature_bit	= _prefix ##_## _feature_bit,			     \
+}
+#define BTRFS_FEAT_ATTR_PTR(_name)    (&btrfs_attr_##_name.kobj_attr.attr)
+
+#define BTRFS_FEAT_ATTR_COMPAT(name, feature) \
+	BTRFS_FEAT_ATTR(name, FEAT_COMPAT, BTRFS_FEATURE_COMPAT, feature)
+#define BTRFS_FEAT_ATTR_COMPAT_RO(name, feature) \
+	BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT, feature)
+#define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \
+	BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature)
+
+/* convert from attribute */
+#define to_btrfs_feature_attr(a) \
+			container_of(a, struct btrfs_feature_attr, kobj_attr)
+#define attr_to_btrfs_attr(a) container_of(a, struct kobj_attribute, attr)
+#define attr_to_btrfs_feature_attr(a) \
+			to_btrfs_feature_attr(attr_to_btrfs_attr(a))
+char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags);
+extern const char * const btrfs_feature_set_names[3];
+extern struct kobj_type space_info_ktype;
+extern struct kobj_type btrfs_raid_ktype;
+#endif /* _BTRFS_SYSFS_H_ */
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index b353bc8..312560a 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -21,7 +21,7 @@
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 
-#define test_msg(fmt, ...) pr_info("btrfs: selftest: " fmt, ##__VA_ARGS__)
+#define test_msg(fmt, ...) pr_info("BTRFS: selftest: " fmt, ##__VA_ARGS__)
 
 int btrfs_test_free_space_cache(void);
 int btrfs_test_extent_buffer_operations(void);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c6a872a..34cd831 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -62,7 +62,7 @@
 	WARN_ON(atomic_read(&transaction->use_count) == 0);
 	if (atomic_dec_and_test(&transaction->use_count)) {
 		BUG_ON(!list_empty(&transaction->list));
-		WARN_ON(transaction->delayed_refs.root.rb_node);
+		WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
 		while (!list_empty(&transaction->pending_chunks)) {
 			struct extent_map *em;
 
@@ -183,8 +183,8 @@
 	atomic_set(&cur_trans->use_count, 2);
 	cur_trans->start_time = get_seconds();
 
-	cur_trans->delayed_refs.root = RB_ROOT;
-	cur_trans->delayed_refs.num_entries = 0;
+	cur_trans->delayed_refs.href_root = RB_ROOT;
+	atomic_set(&cur_trans->delayed_refs.num_entries, 0);
 	cur_trans->delayed_refs.num_heads_ready = 0;
 	cur_trans->delayed_refs.num_heads = 0;
 	cur_trans->delayed_refs.flushing = 0;
@@ -196,17 +196,14 @@
 	 */
 	smp_mb();
 	if (!list_empty(&fs_info->tree_mod_seq_list))
-		WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
+		WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
 			"creating a fresh transaction\n");
 	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
-		WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
+		WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
 			"creating a fresh transaction\n");
 	atomic64_set(&fs_info->tree_mod_seq, 0);
 
 	spin_lock_init(&cur_trans->delayed_refs.lock);
-	atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0);
-	atomic_set(&cur_trans->delayed_refs.ref_seq, 0);
-	init_waitqueue_head(&cur_trans->delayed_refs.wait);
 
 	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
 	INIT_LIST_HEAD(&cur_trans->ordered_operations);
@@ -472,6 +469,7 @@
 	h->type = type;
 	h->allocating_chunk = false;
 	h->reloc_reserved = false;
+	h->sync = false;
 	INIT_LIST_HEAD(&h->qgroup_ref_list);
 	INIT_LIST_HEAD(&h->new_bgs);
 
@@ -647,7 +645,7 @@
 				  struct btrfs_root *root)
 {
 	if (root->fs_info->global_block_rsv.space_info->full &&
-	    btrfs_should_throttle_delayed_refs(trans, root))
+	    btrfs_check_space_for_delayed_refs(trans, root))
 		return 1;
 
 	return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
@@ -711,8 +709,8 @@
 		btrfs_create_pending_block_groups(trans, root);
 
 	trans->delayed_ref_updates = 0;
-	if (btrfs_should_throttle_delayed_refs(trans, root)) {
-		cur = max_t(unsigned long, cur, 1);
+	if (!trans->sync && btrfs_should_throttle_delayed_refs(trans, root)) {
+		cur = max_t(unsigned long, cur, 32);
 		trans->delayed_ref_updates = 0;
 		btrfs_run_delayed_refs(trans, root, cur);
 	}
@@ -788,12 +786,6 @@
 	return __btrfs_end_transaction(trans, root, 1);
 }
 
-int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root)
-{
-	return __btrfs_end_transaction(trans, root, 1);
-}
-
 /*
  * when btree blocks are allocated, they have some corresponding bits set for
  * them in one of two extent_io trees.  This is used to make sure all of
@@ -1105,7 +1097,7 @@
 			break;
 
 		if (btrfs_defrag_cancelled(root->fs_info)) {
-			printk(KERN_DEBUG "btrfs: defrag_root cancelled\n");
+			pr_debug("BTRFS: defrag_root cancelled\n");
 			ret = -EAGAIN;
 			break;
 		}
@@ -1746,6 +1738,8 @@
 		goto cleanup_transaction;
 
 	btrfs_wait_delalloc_flush(root->fs_info);
+
+	btrfs_scrub_pause(root);
 	/*
 	 * Ok now we need to make sure to block out any other joins while we
 	 * commit the transaction.  We could have started a join before setting
@@ -1810,7 +1804,6 @@
 
 	WARN_ON(cur_trans != trans->transaction);
 
-	btrfs_scrub_pause(root);
 	/* btrfs_commit_tree_roots is responsible for getting the
 	 * various roots consistent with each other.  Every pointer
 	 * in the tree of tree roots has to point to the most up to date
@@ -1833,6 +1826,15 @@
 		goto cleanup_transaction;
 	}
 
+	/*
+	 * Since the transaction is done, we should set the inode map cache flag
+	 * before any other comming transaction.
+	 */
+	if (btrfs_test_opt(root, CHANGE_INODE_CACHE))
+		btrfs_set_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
+	else
+		btrfs_clear_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
+
 	/* commit_fs_roots gets rid of all the tree log roots, it is now
 	 * safe to free the root of tree log roots
 	 */
@@ -1975,10 +1977,23 @@
 	}
 	root = list_first_entry(&fs_info->dead_roots,
 			struct btrfs_root, root_list);
+	/*
+	 * Make sure root is not involved in send,
+	 * if we fail with first root, we return
+	 * directly rather than continue.
+	 */
+	spin_lock(&root->root_item_lock);
+	if (root->send_in_progress) {
+		spin_unlock(&fs_info->trans_lock);
+		spin_unlock(&root->root_item_lock);
+		return 0;
+	}
+	spin_unlock(&root->root_item_lock);
+
 	list_del_init(&root->root_list);
 	spin_unlock(&fs_info->trans_lock);
 
-	pr_debug("btrfs: cleaner removing %llu\n", root->objectid);
+	pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
 
 	btrfs_kill_all_delayed_nodes(root);
 
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 7657d11..6ac037e 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -93,6 +93,7 @@
 	short adding_csums;
 	bool allocating_chunk;
 	bool reloc_reserved;
+	bool sync;
 	unsigned int type;
 	/*
 	 * this root is only needed to validate that the root passed to
@@ -154,8 +155,6 @@
 				   int wait_for_unblock);
 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
 				   struct btrfs_root *root);
-int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root);
 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
 				 struct btrfs_root *root);
 void btrfs_throttle(struct btrfs_root *root);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9f7fc51..39d83da 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -570,7 +570,7 @@
 		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
 			nbytes = 0;
 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
-		size = btrfs_file_extent_inline_len(eb, item);
+		size = btrfs_file_extent_inline_len(eb, slot, item);
 		nbytes = btrfs_file_extent_ram_bytes(eb, item);
 		extent_end = ALIGN(start + size, root->sectorsize);
 	} else {
@@ -1238,7 +1238,8 @@
 			      struct btrfs_root *root, u64 offset)
 {
 	int ret;
-	ret = btrfs_find_orphan_item(root, offset);
+	ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID,
+			offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
 	if (ret > 0)
 		ret = btrfs_insert_orphan_item(trans, root, offset);
 	return ret;
@@ -3194,7 +3195,7 @@
 static noinline int copy_items(struct btrfs_trans_handle *trans,
 			       struct inode *inode,
 			       struct btrfs_path *dst_path,
-			       struct extent_buffer *src,
+			       struct btrfs_path *src_path, u64 *last_extent,
 			       int start_slot, int nr, int inode_only)
 {
 	unsigned long src_offset;
@@ -3202,6 +3203,8 @@
 	struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
 	struct btrfs_file_extent_item *extent;
 	struct btrfs_inode_item *inode_item;
+	struct extent_buffer *src = src_path->nodes[0];
+	struct btrfs_key first_key, last_key, key;
 	int ret;
 	struct btrfs_key *ins_keys;
 	u32 *ins_sizes;
@@ -3209,6 +3212,9 @@
 	int i;
 	struct list_head ordered_sums;
 	int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
+	bool has_extents = false;
+	bool need_find_last_extent = (*last_extent == 0);
+	bool done = false;
 
 	INIT_LIST_HEAD(&ordered_sums);
 
@@ -3217,6 +3223,8 @@
 	if (!ins_data)
 		return -ENOMEM;
 
+	first_key.objectid = (u64)-1;
+
 	ins_sizes = (u32 *)ins_data;
 	ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
 
@@ -3237,6 +3245,9 @@
 
 		src_offset = btrfs_item_ptr_offset(src, start_slot + i);
 
+		if ((i == (nr - 1)))
+			last_key = ins_keys[i];
+
 		if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
 			inode_item = btrfs_item_ptr(dst_path->nodes[0],
 						    dst_path->slots[0],
@@ -3248,6 +3259,21 @@
 					   src_offset, ins_sizes[i]);
 		}
 
+		/*
+		 * We set need_find_last_extent here in case we know we were
+		 * processing other items and then walk into the first extent in
+		 * the inode.  If we don't hit an extent then nothing changes,
+		 * we'll do the last search the next time around.
+		 */
+		if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
+			has_extents = true;
+			if (need_find_last_extent &&
+			    first_key.objectid == (u64)-1)
+				first_key = ins_keys[i];
+		} else {
+			need_find_last_extent = false;
+		}
+
 		/* take a reference on file data extents so that truncates
 		 * or deletes of this inode don't have to relog the inode
 		 * again
@@ -3312,6 +3338,128 @@
 		list_del(&sums->list);
 		kfree(sums);
 	}
+
+	if (!has_extents)
+		return ret;
+
+	/*
+	 * Because we use btrfs_search_forward we could skip leaves that were
+	 * not modified and then assume *last_extent is valid when it really
+	 * isn't.  So back up to the previous leaf and read the end of the last
+	 * extent before we go and fill in holes.
+	 */
+	if (need_find_last_extent) {
+		u64 len;
+
+		ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
+		if (ret < 0)
+			return ret;
+		if (ret)
+			goto fill_holes;
+		if (src_path->slots[0])
+			src_path->slots[0]--;
+		src = src_path->nodes[0];
+		btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
+		if (key.objectid != btrfs_ino(inode) ||
+		    key.type != BTRFS_EXTENT_DATA_KEY)
+			goto fill_holes;
+		extent = btrfs_item_ptr(src, src_path->slots[0],
+					struct btrfs_file_extent_item);
+		if (btrfs_file_extent_type(src, extent) ==
+		    BTRFS_FILE_EXTENT_INLINE) {
+			len = btrfs_file_extent_inline_len(src,
+							   src_path->slots[0],
+							   extent);
+			*last_extent = ALIGN(key.offset + len,
+					     log->sectorsize);
+		} else {
+			len = btrfs_file_extent_num_bytes(src, extent);
+			*last_extent = key.offset + len;
+		}
+	}
+fill_holes:
+	/* So we did prev_leaf, now we need to move to the next leaf, but a few
+	 * things could have happened
+	 *
+	 * 1) A merge could have happened, so we could currently be on a leaf
+	 * that holds what we were copying in the first place.
+	 * 2) A split could have happened, and now not all of the items we want
+	 * are on the same leaf.
+	 *
+	 * So we need to adjust how we search for holes, we need to drop the
+	 * path and re-search for the first extent key we found, and then walk
+	 * forward until we hit the last one we copied.
+	 */
+	if (need_find_last_extent) {
+		/* btrfs_prev_leaf could return 1 without releasing the path */
+		btrfs_release_path(src_path);
+		ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
+					src_path, 0, 0);
+		if (ret < 0)
+			return ret;
+		ASSERT(ret == 0);
+		src = src_path->nodes[0];
+		i = src_path->slots[0];
+	} else {
+		i = start_slot;
+	}
+
+	/*
+	 * Ok so here we need to go through and fill in any holes we may have
+	 * to make sure that holes are punched for those areas in case they had
+	 * extents previously.
+	 */
+	while (!done) {
+		u64 offset, len;
+		u64 extent_end;
+
+		if (i >= btrfs_header_nritems(src_path->nodes[0])) {
+			ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
+			if (ret < 0)
+				return ret;
+			ASSERT(ret == 0);
+			src = src_path->nodes[0];
+			i = 0;
+		}
+
+		btrfs_item_key_to_cpu(src, &key, i);
+		if (!btrfs_comp_cpu_keys(&key, &last_key))
+			done = true;
+		if (key.objectid != btrfs_ino(inode) ||
+		    key.type != BTRFS_EXTENT_DATA_KEY) {
+			i++;
+			continue;
+		}
+		extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
+		if (btrfs_file_extent_type(src, extent) ==
+		    BTRFS_FILE_EXTENT_INLINE) {
+			len = btrfs_file_extent_inline_len(src, i, extent);
+			extent_end = ALIGN(key.offset + len, log->sectorsize);
+		} else {
+			len = btrfs_file_extent_num_bytes(src, extent);
+			extent_end = key.offset + len;
+		}
+		i++;
+
+		if (*last_extent == key.offset) {
+			*last_extent = extent_end;
+			continue;
+		}
+		offset = *last_extent;
+		len = key.offset - *last_extent;
+		ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
+					       offset, 0, 0, len, 0, len, 0,
+					       0, 0);
+		if (ret)
+			break;
+		*last_extent = offset + len;
+	}
+	/*
+	 * Need to let the callers know we dropped the path so they should
+	 * re-search.
+	 */
+	if (!ret && need_find_last_extent)
+		ret = 1;
 	return ret;
 }
 
@@ -3349,21 +3497,27 @@
 	int ret;
 	int index = log->log_transid % 2;
 	bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
-
-	ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
-				   em->start + em->len, NULL, 0);
-	if (ret)
-		return ret;
+	int extent_inserted = 0;
 
 	INIT_LIST_HEAD(&ordered_sums);
 	btrfs_init_map_token(&token);
-	key.objectid = btrfs_ino(inode);
-	key.type = BTRFS_EXTENT_DATA_KEY;
-	key.offset = em->start;
 
-	ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*fi));
+	ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
+				   em->start + em->len, NULL, 0, 1,
+				   sizeof(*fi), &extent_inserted);
 	if (ret)
 		return ret;
+
+	if (!extent_inserted) {
+		key.objectid = btrfs_ino(inode);
+		key.type = BTRFS_EXTENT_DATA_KEY;
+		key.offset = em->start;
+
+		ret = btrfs_insert_empty_item(trans, log, path, &key,
+					      sizeof(*fi));
+		if (ret)
+			return ret;
+	}
 	leaf = path->nodes[0];
 	fi = btrfs_item_ptr(leaf, path->slots[0],
 			    struct btrfs_file_extent_item);
@@ -3485,7 +3639,11 @@
 		 * start over after this.
 		 */
 
-		wait_event(ordered->wait, ordered->csum_bytes_left == 0);
+		if (ordered->csum_bytes_left) {
+			btrfs_start_ordered_extent(inode, ordered, 0);
+			wait_event(ordered->wait,
+				   ordered->csum_bytes_left == 0);
+		}
 
 		list_for_each_entry(sum, &ordered->list, list) {
 			ret = btrfs_csum_file_blocks(trans, log, sum);
@@ -3630,6 +3788,7 @@
 	struct btrfs_key max_key;
 	struct btrfs_root *log = root->log_root;
 	struct extent_buffer *src = NULL;
+	u64 last_extent = 0;
 	int err = 0;
 	int ret;
 	int nritems;
@@ -3745,11 +3904,15 @@
 			goto next_slot;
 		}
 
-		ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
-				 ins_nr, inode_only);
-		if (ret) {
+		ret = copy_items(trans, inode, dst_path, path, &last_extent,
+				 ins_start_slot, ins_nr, inode_only);
+		if (ret < 0) {
 			err = ret;
 			goto out_unlock;
+		} if (ret) {
+			ins_nr = 0;
+			btrfs_release_path(path);
+			continue;
 		}
 		ins_nr = 1;
 		ins_start_slot = path->slots[0];
@@ -3763,13 +3926,14 @@
 			goto again;
 		}
 		if (ins_nr) {
-			ret = copy_items(trans, inode, dst_path, src,
-					 ins_start_slot,
+			ret = copy_items(trans, inode, dst_path, path,
+					 &last_extent, ins_start_slot,
 					 ins_nr, inode_only);
-			if (ret) {
+			if (ret < 0) {
 				err = ret;
 				goto out_unlock;
 			}
+			ret = 0;
 			ins_nr = 0;
 		}
 		btrfs_release_path(path);
@@ -3784,12 +3948,13 @@
 		}
 	}
 	if (ins_nr) {
-		ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
-				 ins_nr, inode_only);
-		if (ret) {
+		ret = copy_items(trans, inode, dst_path, path, &last_extent,
+				 ins_start_slot, ins_nr, inode_only);
+		if (ret < 0) {
 			err = ret;
 			goto out_unlock;
 		}
+		ret = 0;
 		ins_nr = 0;
 	}
 
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index b0a523b2..840a38b 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -5,8 +5,8 @@
  */
 
 #include <linux/slab.h>
-#include <linux/export.h>
 #include "ulist.h"
+#include "ctree.h"
 
 /*
  * ulist is a generic data structure to hold a collection of unique u64
@@ -14,10 +14,6 @@
  * enumerating it.
  * It is possible to store an auxiliary value along with the key.
  *
- * The implementation is preliminary and can probably be sped up
- * significantly. A first step would be to store the values in an rbtree
- * as soon as ULIST_SIZE is exceeded.
- *
  * A sample usage for ulists is the enumeration of directed graphs without
  * visiting a node twice. The pseudo-code could look like this:
  *
@@ -50,12 +46,10 @@
  */
 void ulist_init(struct ulist *ulist)
 {
-	ulist->nnodes = 0;
-	ulist->nodes = ulist->int_nodes;
-	ulist->nodes_alloced = ULIST_SIZE;
+	INIT_LIST_HEAD(&ulist->nodes);
 	ulist->root = RB_ROOT;
+	ulist->nnodes = 0;
 }
-EXPORT_SYMBOL(ulist_init);
 
 /**
  * ulist_fini - free up additionally allocated memory for the ulist
@@ -64,18 +58,17 @@
  * This is useful in cases where the base 'struct ulist' has been statically
  * allocated.
  */
-void ulist_fini(struct ulist *ulist)
+static void ulist_fini(struct ulist *ulist)
 {
-	/*
-	 * The first ULIST_SIZE elements are stored inline in struct ulist.
-	 * Only if more elements are alocated they need to be freed.
-	 */
-	if (ulist->nodes_alloced > ULIST_SIZE)
-		kfree(ulist->nodes);
-	ulist->nodes_alloced = 0;	/* in case ulist_fini is called twice */
+	struct ulist_node *node;
+	struct ulist_node *next;
+
+	list_for_each_entry_safe(node, next, &ulist->nodes, list) {
+		kfree(node);
+	}
 	ulist->root = RB_ROOT;
+	INIT_LIST_HEAD(&ulist->nodes);
 }
-EXPORT_SYMBOL(ulist_fini);
 
 /**
  * ulist_reinit - prepare a ulist for reuse
@@ -89,7 +82,6 @@
 	ulist_fini(ulist);
 	ulist_init(ulist);
 }
-EXPORT_SYMBOL(ulist_reinit);
 
 /**
  * ulist_alloc - dynamically allocate a ulist
@@ -108,7 +100,6 @@
 
 	return ulist;
 }
-EXPORT_SYMBOL(ulist_alloc);
 
 /**
  * ulist_free - free dynamically allocated ulist
@@ -123,7 +114,6 @@
 	ulist_fini(ulist);
 	kfree(ulist);
 }
-EXPORT_SYMBOL(ulist_free);
 
 static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
 {
@@ -192,63 +182,32 @@
 int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
 		    u64 *old_aux, gfp_t gfp_mask)
 {
-	int ret = 0;
-	struct ulist_node *node = NULL;
+	int ret;
+	struct ulist_node *node;
+
 	node = ulist_rbtree_search(ulist, val);
 	if (node) {
 		if (old_aux)
 			*old_aux = node->aux;
 		return 0;
 	}
+	node = kmalloc(sizeof(*node), gfp_mask);
+	if (!node)
+		return -ENOMEM;
 
-	if (ulist->nnodes >= ulist->nodes_alloced) {
-		u64 new_alloced = ulist->nodes_alloced + 128;
-		struct ulist_node *new_nodes;
-		void *old = NULL;
-		int i;
+	node->val = val;
+	node->aux = aux;
+#ifdef CONFIG_BTRFS_DEBUG
+	node->seqnum = ulist->nnodes;
+#endif
 
-		for (i = 0; i < ulist->nnodes; i++)
-			rb_erase(&ulist->nodes[i].rb_node, &ulist->root);
-
-		/*
-		 * if nodes_alloced == ULIST_SIZE no memory has been allocated
-		 * yet, so pass NULL to krealloc
-		 */
-		if (ulist->nodes_alloced > ULIST_SIZE)
-			old = ulist->nodes;
-
-		new_nodes = krealloc(old, sizeof(*new_nodes) * new_alloced,
-				     gfp_mask);
-		if (!new_nodes)
-			return -ENOMEM;
-
-		if (!old)
-			memcpy(new_nodes, ulist->int_nodes,
-			       sizeof(ulist->int_nodes));
-
-		ulist->nodes = new_nodes;
-		ulist->nodes_alloced = new_alloced;
-
-		/*
-		 * krealloc actually uses memcpy, which does not copy rb_node
-		 * pointers, so we have to do it ourselves.  Otherwise we may
-		 * be bitten by crashes.
-		 */
-		for (i = 0; i < ulist->nnodes; i++) {
-			ret = ulist_rbtree_insert(ulist, &ulist->nodes[i]);
-			if (ret < 0)
-				return ret;
-		}
-	}
-	ulist->nodes[ulist->nnodes].val = val;
-	ulist->nodes[ulist->nnodes].aux = aux;
-	ret = ulist_rbtree_insert(ulist, &ulist->nodes[ulist->nnodes]);
-	BUG_ON(ret);
-	++ulist->nnodes;
+	ret = ulist_rbtree_insert(ulist, node);
+	ASSERT(!ret);
+	list_add_tail(&node->list, &ulist->nodes);
+	ulist->nnodes++;
 
 	return 1;
 }
-EXPORT_SYMBOL(ulist_add);
 
 /**
  * ulist_next - iterate ulist
@@ -268,11 +227,25 @@
  */
 struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
 {
-	if (ulist->nnodes == 0)
-		return NULL;
-	if (uiter->i < 0 || uiter->i >= ulist->nnodes)
-		return NULL;
+	struct ulist_node *node;
 
-	return &ulist->nodes[uiter->i++];
+	if (list_empty(&ulist->nodes))
+		return NULL;
+	if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
+		return NULL;
+	if (uiter->cur_list) {
+		uiter->cur_list = uiter->cur_list->next;
+	} else {
+		uiter->cur_list = ulist->nodes.next;
+#ifdef CONFIG_BTRFS_DEBUG
+		uiter->i = 0;
+#endif
+	}
+	node = list_entry(uiter->cur_list, struct ulist_node, list);
+#ifdef CONFIG_BTRFS_DEBUG
+	ASSERT(node->seqnum == uiter->i);
+	ASSERT(uiter->i >= 0 && uiter->i < ulist->nnodes);
+	uiter->i++;
+#endif
+	return node;
 }
-EXPORT_SYMBOL(ulist_next);
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
index fb36731..7f78cbf 100644
--- a/fs/btrfs/ulist.h
+++ b/fs/btrfs/ulist.h
@@ -17,18 +17,12 @@
  * enumerating it.
  * It is possible to store an auxiliary value along with the key.
  *
- * The implementation is preliminary and can probably be sped up
- * significantly. A first step would be to store the values in an rbtree
- * as soon as ULIST_SIZE is exceeded.
  */
-
-/*
- * number of elements statically allocated inside struct ulist
- */
-#define ULIST_SIZE 16
-
 struct ulist_iterator {
+#ifdef CONFIG_BTRFS_DEBUG
 	int i;
+#endif
+	struct list_head *cur_list;  /* hint to start search */
 };
 
 /*
@@ -37,6 +31,12 @@
 struct ulist_node {
 	u64 val;		/* value to store */
 	u64 aux;		/* auxiliary value saved along with the val */
+
+#ifdef CONFIG_BTRFS_DEBUG
+	int seqnum;		/* sequence number this node is added */
+#endif
+
+	struct list_head list;  /* used to link node */
 	struct rb_node rb_node;	/* used to speed up search */
 };
 
@@ -46,28 +46,11 @@
 	 */
 	unsigned long nnodes;
 
-	/*
-	 * number of nodes we already have room for
-	 */
-	unsigned long nodes_alloced;
-
-	/*
-	 * pointer to the array storing the elements. The first ULIST_SIZE
-	 * elements are stored inline. In this case the it points to int_nodes.
-	 * After exceeding ULIST_SIZE, dynamic memory is allocated.
-	 */
-	struct ulist_node *nodes;
-
+	struct list_head nodes;
 	struct rb_root root;
-
-	/*
-	 * inline storage space for the first ULIST_SIZE entries
-	 */
-	struct ulist_node int_nodes[ULIST_SIZE];
 };
 
 void ulist_init(struct ulist *ulist);
-void ulist_fini(struct ulist *ulist);
 void ulist_reinit(struct ulist *ulist);
 struct ulist *ulist_alloc(gfp_t gfp_mask);
 void ulist_free(struct ulist *ulist);
@@ -77,6 +60,6 @@
 struct ulist_node *ulist_next(struct ulist *ulist,
 			      struct ulist_iterator *uiter);
 
-#define ULIST_ITER_INIT(uiter) ((uiter)->i = 0)
+#define ULIST_ITER_INIT(uiter) ((uiter)->cur_list = NULL)
 
 #endif
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index fbda900..f6a4c03 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -69,7 +69,7 @@
 	ret = -ENOENT;
 
 	if (!IS_ALIGNED(item_size, sizeof(u64))) {
-		pr_warn("btrfs: uuid item with illegal size %lu!\n",
+		btrfs_warn(uuid_root->fs_info, "uuid item with illegal size %lu!",
 			(unsigned long)item_size);
 		goto out;
 	}
@@ -137,7 +137,8 @@
 		offset = btrfs_item_ptr_offset(eb, slot);
 		offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le);
 	} else if (ret < 0) {
-		pr_warn("btrfs: insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!\n",
+		btrfs_warn(uuid_root->fs_info, "insert uuid item failed %d "
+			"(0x%016llx, 0x%016llx) type %u!",
 			ret, (unsigned long long)key.objectid,
 			(unsigned long long)key.offset, type);
 		goto out;
@@ -183,7 +184,7 @@
 
 	ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
 	if (ret < 0) {
-		pr_warn("btrfs: error %d while searching for uuid item!\n",
+		btrfs_warn(uuid_root->fs_info, "error %d while searching for uuid item!",
 			ret);
 		goto out;
 	}
@@ -197,7 +198,7 @@
 	offset = btrfs_item_ptr_offset(eb, slot);
 	item_size = btrfs_item_size_nr(eb, slot);
 	if (!IS_ALIGNED(item_size, sizeof(u64))) {
-		pr_warn("btrfs: uuid item with illegal size %lu!\n",
+		btrfs_warn(uuid_root->fs_info, "uuid item with illegal size %lu!",
 			(unsigned long)item_size);
 		ret = -ENOENT;
 		goto out;
@@ -299,7 +300,7 @@
 		offset = btrfs_item_ptr_offset(leaf, slot);
 		item_size = btrfs_item_size_nr(leaf, slot);
 		if (!IS_ALIGNED(item_size, sizeof(u64))) {
-			pr_warn("btrfs: uuid item with illegal size %lu!\n",
+			btrfs_warn(fs_info, "uuid item with illegal size %lu!",
 				(unsigned long)item_size);
 			goto skip;
 		}
@@ -349,6 +350,6 @@
 out:
 	btrfs_free_path(path);
 	if (ret)
-		pr_warn("btrfs: btrfs_uuid_tree_iterate failed %d\n", ret);
+		btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
 	return 0;
 }
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 92303f4..bab0b84 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -125,7 +125,7 @@
 
 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
 	if (ret)
-		pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
+		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
 			action,
 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
 			&disk_to_dev(bdev->bd_disk)->kobj);
@@ -200,7 +200,7 @@
 
 	if (IS_ERR(*bdev)) {
 		ret = PTR_ERR(*bdev);
-		printk(KERN_INFO "btrfs: open %s failed\n", device_path);
+		printk(KERN_INFO "BTRFS: open %s failed\n", device_path);
 		goto error;
 	}
 
@@ -912,9 +912,9 @@
 	if (disk_super->label[0]) {
 		if (disk_super->label[BTRFS_LABEL_SIZE - 1])
 			disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
-		printk(KERN_INFO "btrfs: device label %s ", disk_super->label);
+		printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
 	} else {
-		printk(KERN_INFO "btrfs: device fsid %pU ", disk_super->fsid);
+		printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
 	}
 
 	printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
@@ -1813,7 +1813,7 @@
 		}
 
 		if (!*device) {
-			pr_err("btrfs: no missing device found\n");
+			btrfs_err(root->fs_info, "no missing device found");
 			return -ENOENT;
 		}
 
@@ -3052,7 +3052,7 @@
 error:
 	btrfs_free_path(path);
 	if (enospc_errors) {
-		printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
+		btrfs_info(fs_info, "%d enospc errors during balance",
 		       enospc_errors);
 		if (!ret)
 			ret = -ENOSPC;
@@ -3138,8 +3138,8 @@
 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
-			printk(KERN_ERR "btrfs: with mixed groups data and "
-			       "metadata balance options must be the same\n");
+			btrfs_err(fs_info, "with mixed groups data and "
+				   "metadata balance options must be the same");
 			ret = -EINVAL;
 			goto out;
 		}
@@ -3165,8 +3165,8 @@
 	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
 	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
 	     (bctl->data.target & ~allowed))) {
-		printk(KERN_ERR "btrfs: unable to start balance with target "
-		       "data profile %llu\n",
+		btrfs_err(fs_info, "unable to start balance with target "
+			   "data profile %llu",
 		       bctl->data.target);
 		ret = -EINVAL;
 		goto out;
@@ -3174,8 +3174,8 @@
 	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
 	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
 	     (bctl->meta.target & ~allowed))) {
-		printk(KERN_ERR "btrfs: unable to start balance with target "
-		       "metadata profile %llu\n",
+		btrfs_err(fs_info,
+			   "unable to start balance with target metadata profile %llu",
 		       bctl->meta.target);
 		ret = -EINVAL;
 		goto out;
@@ -3183,8 +3183,8 @@
 	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
 	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
 	     (bctl->sys.target & ~allowed))) {
-		printk(KERN_ERR "btrfs: unable to start balance with target "
-		       "system profile %llu\n",
+		btrfs_err(fs_info,
+			   "unable to start balance with target system profile %llu",
 		       bctl->sys.target);
 		ret = -EINVAL;
 		goto out;
@@ -3193,7 +3193,7 @@
 	/* allow dup'ed data chunks only in mixed mode */
 	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
 	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
-		printk(KERN_ERR "btrfs: dup for data is not allowed\n");
+		btrfs_err(fs_info, "dup for data is not allowed");
 		ret = -EINVAL;
 		goto out;
 	}
@@ -3213,11 +3213,10 @@
 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
 		     !(bctl->meta.target & allowed))) {
 			if (bctl->flags & BTRFS_BALANCE_FORCE) {
-				printk(KERN_INFO "btrfs: force reducing metadata "
-				       "integrity\n");
+				btrfs_info(fs_info, "force reducing metadata integrity");
 			} else {
-				printk(KERN_ERR "btrfs: balance will reduce metadata "
-				       "integrity, use force if you want this\n");
+				btrfs_err(fs_info, "balance will reduce metadata "
+					   "integrity, use force if you want this");
 				ret = -EINVAL;
 				goto out;
 			}
@@ -3303,7 +3302,7 @@
 	mutex_lock(&fs_info->balance_mutex);
 
 	if (fs_info->balance_ctl) {
-		printk(KERN_INFO "btrfs: continuing balance\n");
+		btrfs_info(fs_info, "continuing balance");
 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
 	}
 
@@ -3325,7 +3324,7 @@
 	spin_unlock(&fs_info->balance_lock);
 
 	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
-		printk(KERN_INFO "btrfs: force skipping balance\n");
+		btrfs_info(fs_info, "force skipping balance");
 		return 0;
 	}
 
@@ -3543,7 +3542,7 @@
 						  BTRFS_UUID_KEY_SUBVOL,
 						  key.objectid);
 			if (ret < 0) {
-				pr_warn("btrfs: uuid_tree_add failed %d\n",
+				btrfs_warn(fs_info, "uuid_tree_add failed %d",
 					ret);
 				break;
 			}
@@ -3555,7 +3554,7 @@
 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
 						  key.objectid);
 			if (ret < 0) {
-				pr_warn("btrfs: uuid_tree_add failed %d\n",
+				btrfs_warn(fs_info, "uuid_tree_add failed %d",
 					ret);
 				break;
 			}
@@ -3590,7 +3589,7 @@
 	if (trans && !IS_ERR(trans))
 		btrfs_end_transaction(trans, fs_info->uuid_root);
 	if (ret)
-		pr_warn("btrfs: btrfs_uuid_scan_kthread failed %d\n", ret);
+		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
 	else
 		fs_info->update_uuid_tree_gen = 1;
 	up(&fs_info->uuid_tree_rescan_sem);
@@ -3654,7 +3653,7 @@
 	 */
 	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
 	if (ret < 0) {
-		pr_warn("btrfs: iterating uuid_tree failed %d\n", ret);
+		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
 		up(&fs_info->uuid_tree_rescan_sem);
 		return ret;
 	}
@@ -3695,7 +3694,7 @@
 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
 	if (IS_ERR(task)) {
 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
-		pr_warn("btrfs: failed to start uuid_scan task\n");
+		btrfs_warn(fs_info, "failed to start uuid_scan task");
 		up(&fs_info->uuid_tree_rescan_sem);
 		return PTR_ERR(task);
 	}
@@ -3711,7 +3710,7 @@
 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
 	if (IS_ERR(task)) {
 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
-		pr_warn("btrfs: failed to start uuid_rescan task\n");
+		btrfs_warn(fs_info, "failed to start uuid_rescan task");
 		up(&fs_info->uuid_tree_rescan_sem);
 		return PTR_ERR(task);
 	}
@@ -4033,7 +4032,7 @@
 		max_stripe_size = 32 * 1024 * 1024;
 		max_chunk_size = 2 * max_stripe_size;
 	} else {
-		printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
+		btrfs_err(info, "invalid chunk type 0x%llx requested\n",
 		       type);
 		BUG_ON(1);
 	}
@@ -4065,7 +4064,7 @@
 
 		if (!device->writeable) {
 			WARN(1, KERN_ERR
-			       "btrfs: read-only device in alloc_list\n");
+			       "BTRFS: read-only device in alloc_list\n");
 			continue;
 		}
 
@@ -5193,13 +5192,13 @@
 	read_unlock(&em_tree->lock);
 
 	if (!em) {
-		printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
+		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
 		       chunk_start);
 		return -EIO;
 	}
 
 	if (em->start != chunk_start) {
-		printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
+		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
 		       em->start, chunk_start);
 		free_extent_map(em);
 		return -EIO;
@@ -5298,6 +5297,13 @@
 			bio_put(bio);
 			bio = bbio->orig_bio;
 		}
+
+ 		/*
+		 * We have original bio now. So increment bi_remaining to
+		 * account for it in endio
+		 */
+		atomic_inc(&bio->bi_remaining);
+
 		bio->bi_private = bbio->private;
 		bio->bi_end_io = bbio->end_io;
 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
@@ -5411,7 +5417,7 @@
 	if (!q->merge_bvec_fn)
 		return 1;
 
-	bvm.bi_size = bio->bi_size - prev->bv_len;
+	bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
 	if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
 		return 0;
 	return 1;
@@ -5426,7 +5432,7 @@
 	bio->bi_private = bbio;
 	btrfs_io_bio(bio)->stripe_index = dev_nr;
 	bio->bi_end_io = btrfs_end_bio;
-	bio->bi_sector = physical >> 9;
+	bio->bi_iter.bi_sector = physical >> 9;
 #ifdef DEBUG
 	{
 		struct rcu_string *name;
@@ -5464,7 +5470,7 @@
 	while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
 		if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
 				 bvec->bv_offset) < bvec->bv_len) {
-			u64 len = bio->bi_size;
+			u64 len = bio->bi_iter.bi_size;
 
 			atomic_inc(&bbio->stripes_pending);
 			submit_stripe_bio(root, bbio, bio, physical, dev_nr,
@@ -5486,7 +5492,7 @@
 		bio->bi_private = bbio->private;
 		bio->bi_end_io = bbio->end_io;
 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
-		bio->bi_sector = logical >> 9;
+		bio->bi_iter.bi_sector = logical >> 9;
 		kfree(bbio);
 		bio_endio(bio, -EIO);
 	}
@@ -5497,7 +5503,7 @@
 {
 	struct btrfs_device *dev;
 	struct bio *first_bio = bio;
-	u64 logical = (u64)bio->bi_sector << 9;
+	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
 	u64 length = 0;
 	u64 map_length;
 	u64 *raid_map = NULL;
@@ -5506,7 +5512,7 @@
 	int total_devs = 1;
 	struct btrfs_bio *bbio = NULL;
 
-	length = bio->bi_size;
+	length = bio->bi_iter.bi_size;
 	map_length = length;
 
 	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
@@ -6123,7 +6129,8 @@
 	BUG_ON(!path);
 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
 	if (ret < 0) {
-		printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
+		printk_in_rcu(KERN_WARNING "BTRFS: "
+			"error %d while searching for dev_stats item for device %s!\n",
 			      ret, rcu_str_deref(device->name));
 		goto out;
 	}
@@ -6133,7 +6140,8 @@
 		/* need to delete old one and insert a new one */
 		ret = btrfs_del_item(trans, dev_root, path);
 		if (ret != 0) {
-			printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
+			printk_in_rcu(KERN_WARNING "BTRFS: "
+				"delete too small dev_stats item for device %s failed %d!\n",
 				      rcu_str_deref(device->name), ret);
 			goto out;
 		}
@@ -6146,7 +6154,8 @@
 		ret = btrfs_insert_empty_item(trans, dev_root, path,
 					      &key, sizeof(*ptr));
 		if (ret < 0) {
-			printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
+			printk_in_rcu(KERN_WARNING "BTRFS: "
+					  "insert dev_stats item for device %s failed %d!\n",
 				      rcu_str_deref(device->name), ret);
 			goto out;
 		}
@@ -6199,16 +6208,14 @@
 {
 	if (!dev->dev_stats_valid)
 		return;
-	printk_ratelimited_in_rcu(KERN_ERR
-			   "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
+	printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
+			   "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
 			   rcu_str_deref(dev->name),
 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
-			   btrfs_dev_stat_read(dev,
-					       BTRFS_DEV_STAT_CORRUPTION_ERRS),
-			   btrfs_dev_stat_read(dev,
-					       BTRFS_DEV_STAT_GENERATION_ERRS));
+			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
+			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
 }
 
 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
@@ -6221,7 +6228,8 @@
 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
 		return; /* all values == 0, suppress message */
 
-	printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
+	printk_in_rcu(KERN_INFO "BTRFS: "
+		   "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
 	       rcu_str_deref(dev->name),
 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
@@ -6242,12 +6250,10 @@
 	mutex_unlock(&fs_devices->device_list_mutex);
 
 	if (!dev) {
-		printk(KERN_WARNING
-		       "btrfs: get dev_stats failed, device not found\n");
+		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
 		return -ENODEV;
 	} else if (!dev->dev_stats_valid) {
-		printk(KERN_WARNING
-		       "btrfs: get dev_stats failed, not yet valid\n");
+		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
 		return -ENODEV;
 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 3d1c301..ad8328d 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -28,6 +28,7 @@
 #include "transaction.h"
 #include "xattr.h"
 #include "disk-io.h"
+#include "props.h"
 
 
 ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
@@ -332,7 +333,8 @@
 			XATTR_SECURITY_PREFIX_LEN) ||
 	       !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
 	       !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
-	       !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
+	       !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
+		!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
 }
 
 ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
@@ -374,6 +376,10 @@
 	if (!btrfs_is_valid_xattr(name))
 		return -EOPNOTSUPP;
 
+	if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+		return btrfs_set_prop(dentry->d_inode, name,
+				      value, size, flags);
+
 	if (size == 0)
 		value = "";  /* empty EA, do not remove */
 
@@ -403,6 +409,10 @@
 	if (!btrfs_is_valid_xattr(name))
 		return -EOPNOTSUPP;
 
+	if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+		return btrfs_set_prop(dentry->d_inode, name,
+				      NULL, 0, XATTR_REPLACE);
+
 	return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0,
 				XATTR_REPLACE);
 }
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 9acb846..8e57191 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -97,7 +97,7 @@
 	*total_in = 0;
 
 	if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) {
-		printk(KERN_WARNING "btrfs: deflateInit failed\n");
+		printk(KERN_WARNING "BTRFS: deflateInit failed\n");
 		ret = -1;
 		goto out;
 	}
@@ -125,7 +125,7 @@
 	while (workspace->def_strm.total_in < len) {
 		ret = zlib_deflate(&workspace->def_strm, Z_SYNC_FLUSH);
 		if (ret != Z_OK) {
-			printk(KERN_DEBUG "btrfs: deflate in loop returned %d\n",
+			printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n",
 			       ret);
 			zlib_deflateEnd(&workspace->def_strm);
 			ret = -1;
@@ -252,7 +252,7 @@
 	}
 
 	if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
-		printk(KERN_WARNING "btrfs: inflateInit failed\n");
+		printk(KERN_WARNING "BTRFS: inflateInit failed\n");
 		return -1;
 	}
 	while (workspace->inf_strm.total_in < srclen) {
@@ -336,7 +336,7 @@
 	}
 
 	if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
-		printk(KERN_WARNING "btrfs: inflateInit failed\n");
+		printk(KERN_WARNING "BTRFS: inflateInit failed\n");
 		return -1;
 	}
 
diff --git a/fs/buffer.c b/fs/buffer.c
index 6024877..27265a8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -654,14 +654,16 @@
 static void __set_page_dirty(struct page *page,
 		struct address_space *mapping, int warn)
 {
-	spin_lock_irq(&mapping->tree_lock);
+	unsigned long flags;
+
+	spin_lock_irqsave(&mapping->tree_lock, flags);
 	if (page->mapping) {	/* Race with truncate? */
 		WARN_ON_ONCE(warn && !PageUptodate(page));
 		account_page_dirtied(page, mapping);
 		radix_tree_tag_set(&mapping->page_tree,
 				page_index(page), PAGECACHE_TAG_DIRTY);
 	}
-	spin_unlock_irq(&mapping->tree_lock);
+	spin_unlock_irqrestore(&mapping->tree_lock, flags);
 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 }
 
@@ -1312,7 +1314,7 @@
 		}
 		while (out < BH_LRU_SIZE)
 			bhs[out++] = NULL;
-		memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
+		memcpy(this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
 	}
 	bh_lru_unlock();
 
@@ -2982,11 +2984,11 @@
 	 * let it through, and the IO layer will turn it into
 	 * an EIO.
 	 */
-	if (unlikely(bio->bi_sector >= maxsector))
+	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
 		return;
 
-	maxsector -= bio->bi_sector;
-	bytes = bio->bi_size;
+	maxsector -= bio->bi_iter.bi_sector;
+	bytes = bio->bi_iter.bi_size;
 	if (likely((bytes >> 9) <= maxsector))
 		return;
 
@@ -2994,7 +2996,7 @@
 	bytes = maxsector << 9;
 
 	/* Truncate the bio.. */
-	bio->bi_size = bytes;
+	bio->bi_iter.bi_size = bytes;
 	bio->bi_io_vec[0].bv_len = bytes;
 
 	/* ..and clear the end of the buffer for reads */
@@ -3029,14 +3031,14 @@
 	 */
 	bio = bio_alloc(GFP_NOIO, 1);
 
-	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
 	bio->bi_io_vec[0].bv_page = bh->b_page;
 	bio->bi_io_vec[0].bv_len = bh->b_size;
 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
 
 	bio->bi_vcnt = 1;
-	bio->bi_size = bh->b_size;
+	bio->bi_iter.bi_size = bh->b_size;
 
 	bio->bi_end_io = end_bio_bh_io_sync;
 	bio->bi_private = bh;
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index f691128..21887d6 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -54,11 +54,6 @@
 	return acl;
 }
 
-void ceph_forget_all_cached_acls(struct inode *inode)
-{
-	forget_all_cached_acls(inode);
-}
-
 struct posix_acl *ceph_get_acl(struct inode *inode, int type)
 {
 	int size;
@@ -66,13 +61,6 @@
 	char *value = NULL;
 	struct posix_acl *acl;
 
-	if (!IS_POSIXACL(inode))
-		return NULL;
-
-	acl = ceph_get_cached_acl(inode, type);
-	if (acl != ACL_NOT_CACHED)
-		return acl;
-
 	switch (type) {
 	case ACL_TYPE_ACCESS:
 		name = POSIX_ACL_XATTR_ACCESS;
@@ -107,14 +95,14 @@
 	return acl;
 }
 
-static int ceph_set_acl(struct dentry *dentry, struct inode *inode,
-				struct posix_acl *acl, int type)
+int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
 	int ret = 0, size = 0;
 	const char *name = NULL;
 	char *value = NULL;
 	struct iattr newattrs;
 	umode_t new_mode = inode->i_mode, old_mode = inode->i_mode;
+	struct dentry *dentry;
 
 	if (acl) {
 		ret = posix_acl_valid(acl);
@@ -158,30 +146,29 @@
 			goto out_free;
 	}
 
+	dentry = d_find_alias(inode);
 	if (new_mode != old_mode) {
 		newattrs.ia_mode = new_mode;
 		newattrs.ia_valid = ATTR_MODE;
 		ret = ceph_setattr(dentry, &newattrs);
 		if (ret)
-			goto out_free;
+			goto out_dput;
 	}
 
-	if (value)
-		ret = __ceph_setxattr(dentry, name, value, size, 0);
-	else
-		ret = __ceph_removexattr(dentry, name);
-
+	ret = __ceph_setxattr(dentry, name, value, size, 0);
 	if (ret) {
 		if (new_mode != old_mode) {
 			newattrs.ia_mode = old_mode;
 			newattrs.ia_valid = ATTR_MODE;
 			ceph_setattr(dentry, &newattrs);
 		}
-		goto out_free;
+		goto out_dput;
 	}
 
 	ceph_set_cached_acl(inode, type, acl);
 
+out_dput:
+	dput(dentry);
 out_free:
 	kfree(value);
 out:
@@ -190,42 +177,24 @@
 
 int ceph_init_acl(struct dentry *dentry, struct inode *inode, struct inode *dir)
 {
-	struct posix_acl *acl = NULL;
-	int ret = 0;
+	struct posix_acl *default_acl, *acl;
+	int error;
 
-	if (!S_ISLNK(inode->i_mode)) {
-		if (IS_POSIXACL(dir)) {
-			acl = ceph_get_acl(dir, ACL_TYPE_DEFAULT);
-			if (IS_ERR(acl)) {
-				ret = PTR_ERR(acl);
-				goto out;
-			}
-		}
+	error = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
+	if (error)
+		return error;
 
-		if (!acl)
-			inode->i_mode &= ~current_umask();
-	}
-
-	if (IS_POSIXACL(dir) && acl) {
-		if (S_ISDIR(inode->i_mode)) {
-			ret = ceph_set_acl(dentry, inode, acl,
-						ACL_TYPE_DEFAULT);
-			if (ret)
-				goto out_release;
-		}
-		ret = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
-		if (ret < 0)
-			goto out;
-		else if (ret > 0)
-			ret = ceph_set_acl(dentry, inode, acl, ACL_TYPE_ACCESS);
-		else
-			cache_no_acl(inode);
-	} else {
+	if (!default_acl && !acl)
 		cache_no_acl(inode);
-	}
 
-out_release:
-	posix_acl_release(acl);
-out:
-	return ret;
+	if (default_acl) {
+		error = ceph_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+		posix_acl_release(default_acl);
+	}
+	if (acl) {
+		if (!error)
+			error = ceph_set_acl(inode, acl, ACL_TYPE_ACCESS);
+		posix_acl_release(acl);
+	}
+	return error;
 }
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 619616d..45eda6d 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -100,6 +100,14 @@
 	return p & 0xffffffff;
 }
 
+static int fpos_cmp(loff_t l, loff_t r)
+{
+	int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
+	if (v)
+		return v;
+	return (int)(fpos_off(l) - fpos_off(r));
+}
+
 /*
  * When possible, we try to satisfy a readdir by peeking at the
  * dcache.  We make this work by carefully ordering dentries on
@@ -156,7 +164,7 @@
 		if (!d_unhashed(dentry) && dentry->d_inode &&
 		    ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
 		    ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
-		    ctx->pos <= di->offset)
+		    fpos_cmp(ctx->pos, di->offset) <= 0)
 			break;
 		dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
 		     dentry->d_name.len, dentry->d_name.name, di->offset,
@@ -695,9 +703,8 @@
 	ceph_mdsc_put_request(req);
 
 	if (!err)
-		err = ceph_init_acl(dentry, dentry->d_inode, dir);
-
-	if (err)
+		ceph_init_acl(dentry, dentry->d_inode, dir);
+	else
 		d_drop(dentry);
 	return err;
 }
@@ -735,7 +742,9 @@
 	if (!err && !req->r_reply_info.head->is_dentry)
 		err = ceph_handle_notrace_create(dir, dentry);
 	ceph_mdsc_put_request(req);
-	if (err)
+	if (!err)
+		ceph_init_acl(dentry, dentry->d_inode, dir);
+	else
 		d_drop(dentry);
 	return err;
 }
@@ -776,7 +785,9 @@
 		err = ceph_handle_notrace_create(dir, dentry);
 	ceph_mdsc_put_request(req);
 out:
-	if (err < 0)
+	if (!err)
+		ceph_init_acl(dentry, dentry->d_inode, dir);
+	else
 		d_drop(dentry);
 	return err;
 }
@@ -1303,6 +1314,7 @@
 	.listxattr = ceph_listxattr,
 	.removexattr = ceph_removexattr,
 	.get_acl = ceph_get_acl,
+	.set_acl = ceph_set_acl,
 	.mknod = ceph_mknod,
 	.symlink = ceph_symlink,
 	.mkdir = ceph_mkdir,
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index dfd2ce3..09c7afe 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -286,6 +286,7 @@
 	} else {
 		dout("atomic_open finish_open on dn %p\n", dn);
 		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
+			ceph_init_acl(dentry, dentry->d_inode, dir);
 			*opened |= FILE_CREATED;
 		}
 		err = finish_open(file, dentry, ceph_open, opened);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 8b8b506..32d519d 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -97,6 +97,7 @@
 	.listxattr = ceph_listxattr,
 	.removexattr = ceph_removexattr,
 	.get_acl = ceph_get_acl,
+	.set_acl = ceph_set_acl,
 };
 
 
@@ -1616,6 +1617,7 @@
 	.listxattr = ceph_listxattr,
 	.removexattr = ceph_removexattr,
 	.get_acl = ceph_get_acl,
+	.set_acl = ceph_set_acl,
 };
 
 /*
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 2df963f..10a4ccb 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -144,7 +144,11 @@
 	Opt_ino32,
 	Opt_noino32,
 	Opt_fscache,
-	Opt_nofscache
+	Opt_nofscache,
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+	Opt_acl,
+#endif
+	Opt_noacl
 };
 
 static match_table_t fsopt_tokens = {
@@ -172,6 +176,10 @@
 	{Opt_noino32, "noino32"},
 	{Opt_fscache, "fsc"},
 	{Opt_nofscache, "nofsc"},
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+	{Opt_acl, "acl"},
+#endif
+	{Opt_noacl, "noacl"},
 	{-1, NULL}
 };
 
@@ -271,6 +279,14 @@
 	case Opt_nofscache:
 		fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
 		break;
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+	case Opt_acl:
+		fsopt->sb_flags |= MS_POSIXACL;
+		break;
+#endif
+	case Opt_noacl:
+		fsopt->sb_flags &= ~MS_POSIXACL;
+		break;
 	default:
 		BUG_ON(token);
 	}
@@ -438,6 +454,13 @@
 	else
 		seq_puts(m, ",nofsc");
 
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+	if (fsopt->sb_flags & MS_POSIXACL)
+		seq_puts(m, ",acl");
+	else
+		seq_puts(m, ",noacl");
+#endif
+
 	if (fsopt->wsize)
 		seq_printf(m, ",wsize=%d", fsopt->wsize);
 	if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
@@ -819,9 +842,6 @@
 
 	s->s_flags = fsc->mount_options->sb_flags;
 	s->s_maxbytes = 1ULL << 40;  /* temp value until we get mdsmap */
-#ifdef CONFIG_CEPH_FS_POSIX_ACL
-	s->s_flags |= MS_POSIXACL;
-#endif
 
 	s->s_xattr = ceph_xattr_handlers;
 	s->s_fs_info = fsc;
@@ -911,6 +931,10 @@
 	struct ceph_options *opt = NULL;
 
 	dout("ceph_mount\n");
+
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+	flags |= MS_POSIXACL;
+#endif
 	err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
 	if (err < 0) {
 		res = ERR_PTR(err);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 3459339..d8801a9 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -13,6 +13,7 @@
 #include <linux/wait.h>
 #include <linux/writeback.h>
 #include <linux/slab.h>
+#include <linux/posix_acl.h>
 
 #include <linux/ceph/libceph.h>
 
@@ -741,12 +742,18 @@
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
 
 struct posix_acl *ceph_get_acl(struct inode *, int);
+int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type);
 int ceph_init_acl(struct dentry *, struct inode *, struct inode *);
-void ceph_forget_all_cached_acls(struct inode *inode);
+
+static inline void ceph_forget_all_cached_acls(struct inode *inode)
+{
+       forget_all_cached_acls(inode);
+}
 
 #else
 
 #define ceph_get_acl NULL
+#define ceph_set_acl NULL
 
 static inline int ceph_init_acl(struct dentry *dentry, struct inode *inode,
 				struct inode *dir)
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 898b656..a55ec37 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -12,6 +12,9 @@
 #define XATTR_CEPH_PREFIX "ceph."
 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
 
+static int __remove_xattr(struct ceph_inode_info *ci,
+			  struct ceph_inode_xattr *xattr);
+
 /*
  * List of handlers for synthetic system.* attributes. Other
  * attributes are handled directly.
@@ -319,8 +322,7 @@
 static int __set_xattr(struct ceph_inode_info *ci,
 			   const char *name, int name_len,
 			   const char *val, int val_len,
-			   int dirty,
-			   int should_free_name, int should_free_val,
+			   int flags, int update_xattr,
 			   struct ceph_inode_xattr **newxattr)
 {
 	struct rb_node **p;
@@ -349,12 +351,31 @@
 		xattr = NULL;
 	}
 
+	if (update_xattr) {
+		int err = 0;
+		if (xattr && (flags & XATTR_CREATE))
+			err = -EEXIST;
+		else if (!xattr && (flags & XATTR_REPLACE))
+			err = -ENODATA;
+		if (err) {
+			kfree(name);
+			kfree(val);
+			return err;
+		}
+		if (update_xattr < 0) {
+			if (xattr)
+				__remove_xattr(ci, xattr);
+			kfree(name);
+			return 0;
+		}
+	}
+
 	if (!xattr) {
 		new = 1;
 		xattr = *newxattr;
 		xattr->name = name;
 		xattr->name_len = name_len;
-		xattr->should_free_name = should_free_name;
+		xattr->should_free_name = update_xattr;
 
 		ci->i_xattrs.count++;
 		dout("__set_xattr count=%d\n", ci->i_xattrs.count);
@@ -364,7 +385,7 @@
 		if (xattr->should_free_val)
 			kfree((void *)xattr->val);
 
-		if (should_free_name) {
+		if (update_xattr) {
 			kfree((void *)name);
 			name = xattr->name;
 		}
@@ -379,8 +400,8 @@
 		xattr->val = "";
 
 	xattr->val_len = val_len;
-	xattr->dirty = dirty;
-	xattr->should_free_val = (val && should_free_val);
+	xattr->dirty = update_xattr;
+	xattr->should_free_val = (val && update_xattr);
 
 	if (new) {
 		rb_link_node(&xattr->node, parent, p);
@@ -442,7 +463,7 @@
 			  struct ceph_inode_xattr *xattr)
 {
 	if (!xattr)
-		return -EOPNOTSUPP;
+		return -ENODATA;
 
 	rb_erase(&xattr->node, &ci->i_xattrs.index);
 
@@ -588,7 +609,7 @@
 			p += len;
 
 			err = __set_xattr(ci, name, namelen, val, len,
-					  0, 0, 0, &xattrs[numattr]);
+					  0, 0, &xattrs[numattr]);
 
 			if (err < 0)
 				goto bad;
@@ -850,6 +871,9 @@
 
 	dout("setxattr value=%.*s\n", (int)size, value);
 
+	if (!value)
+		flags |= CEPH_XATTR_REMOVE;
+
 	/* do request */
 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
 				       USE_AUTH_MDS);
@@ -892,7 +916,7 @@
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	int issued;
 	int err;
-	int dirty;
+	int dirty = 0;
 	int name_len = strlen(name);
 	int val_len = size;
 	char *newname = NULL;
@@ -953,12 +977,14 @@
 		goto retry;
 	}
 
-	err = __set_xattr(ci, newname, name_len, newval,
-			  val_len, 1, 1, 1, &xattr);
+	err = __set_xattr(ci, newname, name_len, newval, val_len,
+			  flags, value ? 1 : -1, &xattr);
 
-	dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
-	ci->i_xattrs.dirty = true;
-	inode->i_ctime = CURRENT_TIME;
+	if (!err) {
+		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
+		ci->i_xattrs.dirty = true;
+		inode->i_ctime = CURRENT_TIME;
+	}
 
 	spin_unlock(&ci->i_ceph_lock);
 	if (dirty)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 51f5e0e..7ff866d 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -865,8 +865,8 @@
 	return rc;
 }
 
-static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
-		__u16 fid, u32 *pacllen)
+struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+		const struct cifs_fid *cifsfid, u32 *pacllen)
 {
 	struct cifs_ntsd *pntsd = NULL;
 	unsigned int xid;
@@ -877,7 +877,8 @@
 		return ERR_CAST(tlink);
 
 	xid = get_xid();
-	rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
+	rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd,
+				pacllen);
 	free_xid(xid);
 
 	cifs_put_tlink(tlink);
@@ -895,9 +896,10 @@
 	int oplock = 0;
 	unsigned int xid;
 	int rc, create_options = 0;
-	__u16 fid;
 	struct cifs_tcon *tcon;
 	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+	struct cifs_fid fid;
+	struct cifs_open_parms oparms;
 
 	if (IS_ERR(tlink))
 		return ERR_CAST(tlink);
@@ -908,12 +910,19 @@
 	if (backup_cred(cifs_sb))
 		create_options |= CREATE_OPEN_BACKUP_INTENT;
 
-	rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
-			create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
-			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+	oparms.tcon = tcon;
+	oparms.cifs_sb = cifs_sb;
+	oparms.desired_access = READ_CONTROL;
+	oparms.create_options = create_options;
+	oparms.disposition = FILE_OPEN;
+	oparms.path = path;
+	oparms.fid = &fid;
+	oparms.reconnect = false;
+
+	rc = CIFS_open(xid, &oparms, &oplock, NULL);
 	if (!rc) {
-		rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
-		CIFSSMBClose(xid, tcon, fid);
+		rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen);
+		CIFSSMBClose(xid, tcon, fid.netfid);
 	}
 
 	cifs_put_tlink(tlink);
@@ -938,7 +947,7 @@
 	if (!open_file)
 		return get_cifs_acl_by_path(cifs_sb, path, pacllen);
 
-	pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen);
+	pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
 	cifsFileInfo_put(open_file);
 	return pntsd;
 }
@@ -950,10 +959,11 @@
 	int oplock = 0;
 	unsigned int xid;
 	int rc, access_flags, create_options = 0;
-	__u16 fid;
 	struct cifs_tcon *tcon;
 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+	struct cifs_fid fid;
+	struct cifs_open_parms oparms;
 
 	if (IS_ERR(tlink))
 		return PTR_ERR(tlink);
@@ -969,18 +979,25 @@
 	else
 		access_flags = WRITE_DAC;
 
-	rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
-			create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
-			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+	oparms.tcon = tcon;
+	oparms.cifs_sb = cifs_sb;
+	oparms.desired_access = access_flags;
+	oparms.create_options = create_options;
+	oparms.disposition = FILE_OPEN;
+	oparms.path = path;
+	oparms.fid = &fid;
+	oparms.reconnect = false;
+
+	rc = CIFS_open(xid, &oparms, &oplock, NULL);
 	if (rc) {
 		cifs_dbg(VFS, "Unable to open file to set ACL\n");
 		goto out;
 	}
 
-	rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
+	rc = CIFSSMBSetCIFSACL(xid, tcon, fid.netfid, pnntsd, acllen, aclflag);
 	cifs_dbg(NOISY, "SetCIFSACL rc = %d\n", rc);
 
-	CIFSSMBClose(xid, tcon, fid);
+	CIFSSMBClose(xid, tcon, fid.netfid);
 out:
 	free_xid(xid);
 	cifs_put_tlink(tlink);
@@ -990,19 +1007,31 @@
 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
 int
 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
-		  struct inode *inode, const char *path, const __u16 *pfid)
+		  struct inode *inode, const char *path,
+		  const struct cifs_fid *pfid)
 {
 	struct cifs_ntsd *pntsd = NULL;
 	u32 acllen = 0;
 	int rc = 0;
+	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+	struct cifs_tcon *tcon;
 
 	cifs_dbg(NOISY, "converting ACL to mode for %s\n", path);
 
-	if (pfid)
-		pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
-	else
-		pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
+	if (IS_ERR(tlink))
+		return PTR_ERR(tlink);
+	tcon = tlink_tcon(tlink);
 
+	if (pfid && (tcon->ses->server->ops->get_acl_by_fid))
+		pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid,
+							  &acllen);
+	else if (tcon->ses->server->ops->get_acl)
+		pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
+							&acllen);
+	else {
+		cifs_put_tlink(tlink);
+		return -EOPNOTSUPP;
+	}
 	/* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
 	if (IS_ERR(pntsd)) {
 		rc = PTR_ERR(pntsd);
@@ -1014,6 +1043,8 @@
 			cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc);
 	}
 
+	cifs_put_tlink(tlink);
+
 	return rc;
 }
 
@@ -1027,15 +1058,30 @@
 	__u32 secdesclen = 0;
 	struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
 	struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
+	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+	struct cifs_tcon *tcon;
+
+	if (IS_ERR(tlink))
+		return PTR_ERR(tlink);
+	tcon = tlink_tcon(tlink);
 
 	cifs_dbg(NOISY, "set ACL from mode for %s\n", path);
 
 	/* Get the security descriptor */
-	pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
+
+	if (tcon->ses->server->ops->get_acl == NULL) {
+		cifs_put_tlink(tlink);
+		return -EOPNOTSUPP;
+	}
+
+	pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
+						&secdesclen);
 	if (IS_ERR(pntsd)) {
 		rc = PTR_ERR(pntsd);
 		cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
-		goto out;
+		cifs_put_tlink(tlink);
+		return rc;
 	}
 
 	/*
@@ -1048,6 +1094,7 @@
 	pnntsd = kmalloc(secdesclen, GFP_KERNEL);
 	if (!pnntsd) {
 		kfree(pntsd);
+		cifs_put_tlink(tlink);
 		return -ENOMEM;
 	}
 
@@ -1056,14 +1103,18 @@
 
 	cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
 
+	if (tcon->ses->server->ops->set_acl == NULL)
+		rc = -EOPNOTSUPP;
+
 	if (!rc) {
 		/* Set the security descriptor */
-		rc = set_cifs_acl(pnntsd, secdesclen, inode, path, aclflag);
+		rc = tcon->ses->server->ops->set_acl(pnntsd, secdesclen, inode,
+						     path, aclflag);
 		cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
 	}
+	cifs_put_tlink(tlink);
 
 	kfree(pnntsd);
 	kfree(pntsd);
-out:
 	return rc;
 }
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index f918a99..cf32f03 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -323,7 +323,8 @@
 	/* async read from the server */
 	int (*async_readv)(struct cifs_readdata *);
 	/* async write to the server */
-	int (*async_writev)(struct cifs_writedata *);
+	int (*async_writev)(struct cifs_writedata *,
+			    void (*release)(struct kref *));
 	/* sync read from the server */
 	int (*sync_read)(const unsigned int, struct cifsFileInfo *,
 			 struct cifs_io_parms *, unsigned int *, char **,
@@ -370,8 +371,12 @@
 	void (*new_lease_key)(struct cifs_fid *);
 	int (*generate_signingkey)(struct cifs_ses *);
 	int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *);
-	int (*query_mf_symlink)(const unsigned char *, char *, unsigned int *,
-				struct cifs_sb_info *, unsigned int);
+	int (*query_mf_symlink)(unsigned int, struct cifs_tcon *,
+				struct cifs_sb_info *, const unsigned char *,
+				char *, unsigned int *);
+	int (*create_mf_symlink)(unsigned int, struct cifs_tcon *,
+				 struct cifs_sb_info *, const unsigned char *,
+				 char *, unsigned int *);
 	/* if we can do cache read operations */
 	bool (*is_read_op)(__u32);
 	/* set oplock level for the inode */
@@ -385,6 +390,18 @@
 			struct cifsFileInfo *target_file, u64 src_off, u64 len,
 			u64 dest_off);
 	int (*validate_negotiate)(const unsigned int, struct cifs_tcon *);
+	ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *,
+			const unsigned char *, const unsigned char *, char *,
+			size_t, const struct nls_table *, int);
+	int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
+			const char *, const void *, const __u16,
+			const struct nls_table *, int);
+	struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
+			const char *, u32 *);
+	struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *,
+			const struct cifs_fid *, u32 *);
+	int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
+			int);
 };
 
 struct smb_version_values {
@@ -1054,7 +1071,7 @@
 	unsigned int			pagesz;
 	unsigned int			tailsz;
 	unsigned int			nr_pages;
-	struct page			*pages[1];
+	struct page			*pages[];
 };
 
 /*
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 2c29db6..acc4ee8 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -151,7 +151,7 @@
 
 extern int cifs_get_inode_info(struct inode **inode, const char *full_path,
 			       FILE_ALL_INFO *data, struct super_block *sb,
-			       int xid, const __u16 *fid);
+			       int xid, const struct cifs_fid *fid);
 extern int cifs_get_inode_info_unix(struct inode **pinode,
 			const unsigned char *search_path,
 			struct super_block *sb, unsigned int xid);
@@ -162,11 +162,13 @@
 				      const unsigned int xid);
 extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
 			      struct cifs_fattr *fattr, struct inode *inode,
-			      const char *path, const __u16 *pfid);
+			      const char *path, const struct cifs_fid *pfid);
 extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64,
 					kuid_t, kgid_t);
 extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
 					const char *, u32 *);
+extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *,
+						const struct cifs_fid *, u32 *);
 extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
 				const char *, int);
 
@@ -362,11 +364,8 @@
 			       const struct nls_table *nls_codepage);
 extern int CIFSSMB_set_compression(const unsigned int xid,
 				   struct cifs_tcon *tcon, __u16 fid);
-extern int CIFSSMBOpen(const unsigned int xid, struct cifs_tcon *tcon,
-			const char *fileName, const int disposition,
-			const int access_flags, const int omode,
-			__u16 *netfid, int *pOplock, FILE_ALL_INFO *,
-			const struct nls_table *nls_codepage, int remap);
+extern int CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms,
+		     int *oplock, FILE_ALL_INFO *buf);
 extern int SMBLegacyOpen(const unsigned int xid, struct cifs_tcon *tcon,
 			const char *fileName, const int disposition,
 			const int access_flags, const int omode,
@@ -476,8 +475,8 @@
 extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
 			const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
 extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
-extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
-extern int CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
+extern bool couldbe_mf_symlink(const struct cifs_fattr *fattr);
+extern int check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
 			      struct cifs_sb_info *cifs_sb,
 			      struct cifs_fattr *fattr,
 			      const unsigned char *path);
@@ -491,12 +490,18 @@
 int cifs_async_readv(struct cifs_readdata *rdata);
 int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
 
-int cifs_async_writev(struct cifs_writedata *wdata);
+int cifs_async_writev(struct cifs_writedata *wdata,
+		      void (*release)(struct kref *kref));
 void cifs_writev_complete(struct work_struct *work);
 struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
 						work_func_t complete);
 void cifs_writedata_release(struct kref *refcount);
-int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
-			unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
-			unsigned int xid);
+int cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+			  struct cifs_sb_info *cifs_sb,
+			  const unsigned char *path, char *pbuf,
+			  unsigned int *pbytes_read);
+int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+			   struct cifs_sb_info *cifs_sb,
+			   const unsigned char *path, char *pbuf,
+			   unsigned int *pbytes_written);
 #endif			/* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index d707edb..f3264bd 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1273,104 +1273,124 @@
 }
 
 int
-CIFSSMBOpen(const unsigned int xid, struct cifs_tcon *tcon,
-	    const char *fileName, const int openDisposition,
-	    const int access_flags, const int create_options, __u16 *netfid,
-	    int *pOplock, FILE_ALL_INFO *pfile_info,
-	    const struct nls_table *nls_codepage, int remap)
+CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms, int *oplock,
+	  FILE_ALL_INFO *buf)
 {
 	int rc = -EACCES;
-	OPEN_REQ *pSMB = NULL;
-	OPEN_RSP *pSMBr = NULL;
+	OPEN_REQ *req = NULL;
+	OPEN_RSP *rsp = NULL;
 	int bytes_returned;
 	int name_len;
 	__u16 count;
+	struct cifs_sb_info *cifs_sb = oparms->cifs_sb;
+	struct cifs_tcon *tcon = oparms->tcon;
+	int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR;
+	const struct nls_table *nls = cifs_sb->local_nls;
+	int create_options = oparms->create_options;
+	int desired_access = oparms->desired_access;
+	int disposition = oparms->disposition;
+	const char *path = oparms->path;
 
 openRetry:
-	rc = smb_init(SMB_COM_NT_CREATE_ANDX, 24, tcon, (void **) &pSMB,
-		      (void **) &pSMBr);
+	rc = smb_init(SMB_COM_NT_CREATE_ANDX, 24, tcon, (void **)&req,
+		      (void **)&rsp);
 	if (rc)
 		return rc;
 
-	pSMB->AndXCommand = 0xFF;	/* none */
+	/* no commands go after this */
+	req->AndXCommand = 0xFF;
 
-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-		count = 1;	/* account for one byte pad to word boundary */
-		name_len =
-		    cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1),
-				       fileName, PATH_MAX, nls_codepage, remap);
-		name_len++;	/* trailing null */
+	if (req->hdr.Flags2 & SMBFLG2_UNICODE) {
+		/* account for one byte pad to word boundary */
+		count = 1;
+		name_len = cifsConvertToUTF16((__le16 *)(req->fileName + 1),
+					      path, PATH_MAX, nls, remap);
+		/* trailing null */
+		name_len++;
 		name_len *= 2;
-		pSMB->NameLength = cpu_to_le16(name_len);
-	} else {		/* BB improve check for buffer overruns BB */
-		count = 0;	/* no pad */
-		name_len = strnlen(fileName, PATH_MAX);
-		name_len++;	/* trailing null */
-		pSMB->NameLength = cpu_to_le16(name_len);
-		strncpy(pSMB->fileName, fileName, name_len);
+		req->NameLength = cpu_to_le16(name_len);
+	} else {
+		/* BB improve check for buffer overruns BB */
+		/* no pad */
+		count = 0;
+		name_len = strnlen(path, PATH_MAX);
+		/* trailing null */
+		name_len++;
+		req->NameLength = cpu_to_le16(name_len);
+		strncpy(req->fileName, path, name_len);
 	}
-	if (*pOplock & REQ_OPLOCK)
-		pSMB->OpenFlags = cpu_to_le32(REQ_OPLOCK);
-	else if (*pOplock & REQ_BATCHOPLOCK)
-		pSMB->OpenFlags = cpu_to_le32(REQ_BATCHOPLOCK);
-	pSMB->DesiredAccess = cpu_to_le32(access_flags);
-	pSMB->AllocationSize = 0;
-	/* set file as system file if special file such
-	   as fifo and server expecting SFU style and
-	   no Unix extensions */
-	if (create_options & CREATE_OPTION_SPECIAL)
-		pSMB->FileAttributes = cpu_to_le32(ATTR_SYSTEM);
-	else
-		pSMB->FileAttributes = cpu_to_le32(ATTR_NORMAL);
 
-	/* XP does not handle ATTR_POSIX_SEMANTICS */
-	/* but it helps speed up case sensitive checks for other
-	servers such as Samba */
+	if (*oplock & REQ_OPLOCK)
+		req->OpenFlags = cpu_to_le32(REQ_OPLOCK);
+	else if (*oplock & REQ_BATCHOPLOCK)
+		req->OpenFlags = cpu_to_le32(REQ_BATCHOPLOCK);
+
+	req->DesiredAccess = cpu_to_le32(desired_access);
+	req->AllocationSize = 0;
+
+	/*
+	 * Set file as system file if special file such as fifo and server
+	 * expecting SFU style and no Unix extensions.
+	 */
+	if (create_options & CREATE_OPTION_SPECIAL)
+		req->FileAttributes = cpu_to_le32(ATTR_SYSTEM);
+	else
+		req->FileAttributes = cpu_to_le32(ATTR_NORMAL);
+
+	/*
+	 * XP does not handle ATTR_POSIX_SEMANTICS but it helps speed up case
+	 * sensitive checks for other servers such as Samba.
+	 */
 	if (tcon->ses->capabilities & CAP_UNIX)
-		pSMB->FileAttributes |= cpu_to_le32(ATTR_POSIX_SEMANTICS);
+		req->FileAttributes |= cpu_to_le32(ATTR_POSIX_SEMANTICS);
 
 	if (create_options & CREATE_OPTION_READONLY)
-		pSMB->FileAttributes |= cpu_to_le32(ATTR_READONLY);
+		req->FileAttributes |= cpu_to_le32(ATTR_READONLY);
 
-	pSMB->ShareAccess = cpu_to_le32(FILE_SHARE_ALL);
-	pSMB->CreateDisposition = cpu_to_le32(openDisposition);
-	pSMB->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK);
+	req->ShareAccess = cpu_to_le32(FILE_SHARE_ALL);
+	req->CreateDisposition = cpu_to_le32(disposition);
+	req->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK);
+
 	/* BB Expirement with various impersonation levels and verify */
-	pSMB->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION);
-	pSMB->SecurityFlags =
-	    SECURITY_CONTEXT_TRACKING | SECURITY_EFFECTIVE_ONLY;
+	req->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION);
+	req->SecurityFlags = SECURITY_CONTEXT_TRACKING|SECURITY_EFFECTIVE_ONLY;
 
 	count += name_len;
-	inc_rfc1001_len(pSMB, count);
+	inc_rfc1001_len(req, count);
 
-	pSMB->ByteCount = cpu_to_le16(count);
-	/* long_op set to 1 to allow for oplock break timeouts */
-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-			(struct smb_hdr *)pSMBr, &bytes_returned, 0);
+	req->ByteCount = cpu_to_le16(count);
+	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *)req,
+			 (struct smb_hdr *)rsp, &bytes_returned, 0);
 	cifs_stats_inc(&tcon->stats.cifs_stats.num_opens);
 	if (rc) {
 		cifs_dbg(FYI, "Error in Open = %d\n", rc);
-	} else {
-		*pOplock = pSMBr->OplockLevel; /* 1 byte no need to le_to_cpu */
-		*netfid = pSMBr->Fid;	/* cifs fid stays in le */
-		/* Let caller know file was created so we can set the mode. */
-		/* Do we care about the CreateAction in any other cases? */
-		if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
-			*pOplock |= CIFS_CREATE_ACTION;
-		if (pfile_info) {
-			memcpy((char *)pfile_info, (char *)&pSMBr->CreationTime,
-				36 /* CreationTime to Attributes */);
-			/* the file_info buf is endian converted by caller */
-			pfile_info->AllocationSize = pSMBr->AllocationSize;
-			pfile_info->EndOfFile = pSMBr->EndOfFile;
-			pfile_info->NumberOfLinks = cpu_to_le32(1);
-			pfile_info->DeletePending = 0;
-		}
+		cifs_buf_release(req);
+		if (rc == -EAGAIN)
+			goto openRetry;
+		return rc;
 	}
 
-	cifs_buf_release(pSMB);
-	if (rc == -EAGAIN)
-		goto openRetry;
+	/* 1 byte no need to le_to_cpu */
+	*oplock = rsp->OplockLevel;
+	/* cifs fid stays in le */
+	oparms->fid->netfid = rsp->Fid;
+
+	/* Let caller know file was created so we can set the mode. */
+	/* Do we care about the CreateAction in any other cases? */
+	if (cpu_to_le32(FILE_CREATE) == rsp->CreateAction)
+		*oplock |= CIFS_CREATE_ACTION;
+
+	if (buf) {
+		/* copy from CreationTime to Attributes */
+		memcpy((char *)buf, (char *)&rsp->CreationTime, 36);
+		/* the file_info buf is endian converted by caller */
+		buf->AllocationSize = rsp->AllocationSize;
+		buf->EndOfFile = rsp->EndOfFile;
+		buf->NumberOfLinks = cpu_to_le32(1);
+		buf->DeletePending = 0;
+	}
+
+	cifs_buf_release(req);
 	return rc;
 }
 
@@ -1890,7 +1910,7 @@
 
 	do {
 		server = tlink_tcon(wdata->cfile->tlink)->ses->server;
-		rc = server->ops->async_writev(wdata);
+		rc = server->ops->async_writev(wdata, cifs_writedata_release);
 	} while (rc == -EAGAIN);
 
 	for (i = 0; i < wdata->nr_pages; i++) {
@@ -1942,15 +1962,9 @@
 {
 	struct cifs_writedata *wdata;
 
-	/* this would overflow */
-	if (nr_pages == 0) {
-		cifs_dbg(VFS, "%s: called with nr_pages == 0!\n", __func__);
-		return NULL;
-	}
-
 	/* writedata + number of page pointers */
 	wdata = kzalloc(sizeof(*wdata) +
-			sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
+			sizeof(struct page *) * nr_pages, GFP_NOFS);
 	if (wdata != NULL) {
 		kref_init(&wdata->refcount);
 		INIT_LIST_HEAD(&wdata->list);
@@ -2011,7 +2025,8 @@
 
 /* cifs_async_writev - send an async write, and set up mid to handle result */
 int
-cifs_async_writev(struct cifs_writedata *wdata)
+cifs_async_writev(struct cifs_writedata *wdata,
+		  void (*release)(struct kref *kref))
 {
 	int rc = -EACCES;
 	WRITE_REQ *smb = NULL;
@@ -2085,7 +2100,7 @@
 	if (rc == 0)
 		cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
 	else
-		kref_put(&wdata->refcount, cifs_writedata_release);
+		kref_put(&wdata->refcount, release);
 
 async_writev_out:
 	cifs_small_buf_release(smb);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index a514e0a..3db0c5f 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -378,7 +378,7 @@
 					      xid);
 	else {
 		rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb,
-					 xid, &fid->netfid);
+					 xid, fid);
 		if (newinode) {
 			if (server->ops->set_lease_key)
 				server->ops->set_lease_key(newinode, fid);
@@ -565,12 +565,13 @@
 	int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
 	struct cifs_sb_info *cifs_sb;
 	struct tcon_link *tlink;
-	struct cifs_tcon *pTcon;
+	struct cifs_tcon *tcon;
 	struct cifs_io_parms io_parms;
 	char *full_path = NULL;
 	struct inode *newinode = NULL;
 	int oplock = 0;
-	u16 fileHandle;
+	struct cifs_fid fid;
+	struct cifs_open_parms oparms;
 	FILE_ALL_INFO *buf = NULL;
 	unsigned int bytes_written;
 	struct win_dev *pdev;
@@ -583,7 +584,7 @@
 	if (IS_ERR(tlink))
 		return PTR_ERR(tlink);
 
-	pTcon = tlink_tcon(tlink);
+	tcon = tlink_tcon(tlink);
 
 	xid = get_xid();
 
@@ -593,7 +594,7 @@
 		goto mknod_out;
 	}
 
-	if (pTcon->unix_ext) {
+	if (tcon->unix_ext) {
 		struct cifs_unix_set_info_args args = {
 			.mode	= mode & ~current_umask(),
 			.ctime	= NO_CHANGE_64,
@@ -608,7 +609,7 @@
 			args.uid = INVALID_UID; /* no change */
 			args.gid = INVALID_GID; /* no change */
 		}
-		rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
+		rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
 					    cifs_sb->local_nls,
 					    cifs_sb->mnt_cifs_flags &
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -640,42 +641,44 @@
 	if (backup_cred(cifs_sb))
 		create_options |= CREATE_OPEN_BACKUP_INTENT;
 
-	rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE,
-			 GENERIC_WRITE, create_options,
-			 &fileHandle, &oplock, buf, cifs_sb->local_nls,
-			 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+	oparms.tcon = tcon;
+	oparms.cifs_sb = cifs_sb;
+	oparms.desired_access = GENERIC_WRITE;
+	oparms.create_options = create_options;
+	oparms.disposition = FILE_CREATE;
+	oparms.path = full_path;
+	oparms.fid = &fid;
+	oparms.reconnect = false;
+
+	rc = CIFS_open(xid, &oparms, &oplock, buf);
 	if (rc)
 		goto mknod_out;
 
-	/* BB Do not bother to decode buf since no local inode yet to put
-	 * timestamps in, but we can reuse it safely */
+	/*
+	 * BB Do not bother to decode buf since no local inode yet to put
+	 * timestamps in, but we can reuse it safely.
+	 */
 
 	pdev = (struct win_dev *)buf;
-	io_parms.netfid = fileHandle;
+	io_parms.netfid = fid.netfid;
 	io_parms.pid = current->tgid;
-	io_parms.tcon = pTcon;
+	io_parms.tcon = tcon;
 	io_parms.offset = 0;
 	io_parms.length = sizeof(struct win_dev);
 	if (S_ISCHR(mode)) {
 		memcpy(pdev->type, "IntxCHR", 8);
-		pdev->major =
-		      cpu_to_le64(MAJOR(device_number));
-		pdev->minor =
-		      cpu_to_le64(MINOR(device_number));
-		rc = CIFSSMBWrite(xid, &io_parms,
-			&bytes_written, (char *)pdev,
-			NULL, 0);
+		pdev->major = cpu_to_le64(MAJOR(device_number));
+		pdev->minor = cpu_to_le64(MINOR(device_number));
+		rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, (char *)pdev,
+				  NULL, 0);
 	} else if (S_ISBLK(mode)) {
 		memcpy(pdev->type, "IntxBLK", 8);
-		pdev->major =
-		      cpu_to_le64(MAJOR(device_number));
-		pdev->minor =
-		      cpu_to_le64(MINOR(device_number));
-		rc = CIFSSMBWrite(xid, &io_parms,
-			&bytes_written, (char *)pdev,
-			NULL, 0);
+		pdev->major = cpu_to_le64(MAJOR(device_number));
+		pdev->minor = cpu_to_le64(MINOR(device_number));
+		rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, (char *)pdev,
+				  NULL, 0);
 	} /* else if (S_ISFIFO) */
-	CIFSSMBClose(xid, pTcon, fileHandle);
+	CIFSSMBClose(xid, tcon, fid.netfid);
 	d_drop(direntry);
 
 	/* FIXME: add code here to set EAs */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 5a5a872..53c1507 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -244,7 +244,7 @@
 					      xid);
 	else
 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
-					 xid, &fid->netfid);
+					 xid, fid);
 
 out:
 	kfree(buf);
@@ -678,7 +678,7 @@
 
 	/*
 	 * Can not refresh inode by passing in file_info buf to be returned by
-	 * CIFSSMBOpen and then calling get_inode_info with returned buf since
+	 * ops->open and then calling get_inode_info with returned buf since
 	 * file might have write behind data that needs to be flushed and server
 	 * version of file size can be stale. If we knew for sure that inode was
 	 * not dirty locally we could do this.
@@ -2043,7 +2043,8 @@
 			}
 			wdata->pid = wdata->cfile->pid;
 			server = tlink_tcon(wdata->cfile->tlink)->ses->server;
-			rc = server->ops->async_writev(wdata);
+			rc = server->ops->async_writev(wdata,
+							cifs_writedata_release);
 		} while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
 
 		for (i = 0; i < nr_pages; ++i)
@@ -2331,9 +2332,20 @@
 }
 
 static void
-cifs_uncached_writev_complete(struct work_struct *work)
+cifs_uncached_writedata_release(struct kref *refcount)
 {
 	int i;
+	struct cifs_writedata *wdata = container_of(refcount,
+					struct cifs_writedata, refcount);
+
+	for (i = 0; i < wdata->nr_pages; i++)
+		put_page(wdata->pages[i]);
+	cifs_writedata_release(refcount);
+}
+
+static void
+cifs_uncached_writev_complete(struct work_struct *work)
+{
 	struct cifs_writedata *wdata = container_of(work,
 					struct cifs_writedata, work);
 	struct inode *inode = wdata->cfile->dentry->d_inode;
@@ -2347,12 +2359,7 @@
 
 	complete(&wdata->done);
 
-	if (wdata->result != -EAGAIN) {
-		for (i = 0; i < wdata->nr_pages; i++)
-			put_page(wdata->pages[i]);
-	}
-
-	kref_put(&wdata->refcount, cifs_writedata_release);
+	kref_put(&wdata->refcount, cifs_uncached_writedata_release);
 }
 
 /* attempt to send write to server, retry on any -EAGAIN errors */
@@ -2370,7 +2377,8 @@
 			if (rc != 0)
 				continue;
 		}
-		rc = server->ops->async_writev(wdata);
+		rc = server->ops->async_writev(wdata,
+					       cifs_uncached_writedata_release);
 	} while (rc == -EAGAIN);
 
 	return rc;
@@ -2381,7 +2389,7 @@
 		 unsigned long nr_segs, loff_t *poffset)
 {
 	unsigned long nr_pages, i;
-	size_t copied, len, cur_len;
+	size_t bytes, copied, len, cur_len;
 	ssize_t total_written = 0;
 	loff_t offset;
 	struct iov_iter it;
@@ -2436,14 +2444,45 @@
 
 		save_len = cur_len;
 		for (i = 0; i < nr_pages; i++) {
-			copied = min_t(const size_t, cur_len, PAGE_SIZE);
+			bytes = min_t(const size_t, cur_len, PAGE_SIZE);
 			copied = iov_iter_copy_from_user(wdata->pages[i], &it,
-							 0, copied);
+							 0, bytes);
 			cur_len -= copied;
 			iov_iter_advance(&it, copied);
+			/*
+			 * If we didn't copy as much as we expected, then that
+			 * may mean we trod into an unmapped area. Stop copying
+			 * at that point. On the next pass through the big
+			 * loop, we'll likely end up getting a zero-length
+			 * write and bailing out of it.
+			 */
+			if (copied < bytes)
+				break;
 		}
 		cur_len = save_len - cur_len;
 
+		/*
+		 * If we have no data to send, then that probably means that
+		 * the copy above failed altogether. That's most likely because
+		 * the address in the iovec was bogus. Set the rc to -EFAULT,
+		 * free anything we allocated and bail out.
+		 */
+		if (!cur_len) {
+			for (i = 0; i < nr_pages; i++)
+				put_page(wdata->pages[i]);
+			kfree(wdata);
+			rc = -EFAULT;
+			break;
+		}
+
+		/*
+		 * i + 1 now represents the number of pages we actually used in
+		 * the copy phase above. Bring nr_pages down to that, and free
+		 * any pages that we didn't use.
+		 */
+		for ( ; nr_pages > i + 1; nr_pages--)
+			put_page(wdata->pages[nr_pages - 1]);
+
 		wdata->sync_mode = WB_SYNC_ALL;
 		wdata->nr_pages = nr_pages;
 		wdata->offset = (__u64)offset;
@@ -2454,7 +2493,8 @@
 		wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
 		rc = cifs_uncached_retry_writev(wdata);
 		if (rc) {
-			kref_put(&wdata->refcount, cifs_writedata_release);
+			kref_put(&wdata->refcount,
+				 cifs_uncached_writedata_release);
 			break;
 		}
 
@@ -2496,7 +2536,7 @@
 			}
 		}
 		list_del_init(&wdata->list);
-		kref_put(&wdata->refcount, cifs_writedata_release);
+		kref_put(&wdata->refcount, cifs_uncached_writedata_release);
 	}
 
 	if (total_written > 0)
@@ -2559,8 +2599,8 @@
 	if (rc > 0) {
 		ssize_t err;
 
-		err = generic_write_sync(file, pos, rc);
-		if (err < 0 && rc > 0)
+		err = generic_write_sync(file, iocb->ki_pos - rc, rc);
+		if (err < 0)
 			rc = err;
 	}
 
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 49719b8..aadc2b6 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -383,10 +383,10 @@
 
 	/* check for Minshall+French symlinks */
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
-		int tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
-					       full_path);
+		int tmprc = check_mf_symlink(xid, tcon, cifs_sb, &fattr,
+					     full_path);
 		if (tmprc)
-			cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
+			cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
 	}
 
 	if (*pinode == NULL) {
@@ -404,18 +404,20 @@
 }
 
 static int
-cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
+cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
 	      struct cifs_sb_info *cifs_sb, unsigned int xid)
 {
 	int rc;
 	int oplock = 0;
-	__u16 netfid;
 	struct tcon_link *tlink;
 	struct cifs_tcon *tcon;
+	struct cifs_fid fid;
+	struct cifs_open_parms oparms;
 	struct cifs_io_parms io_parms;
 	char buf[24];
 	unsigned int bytes_read;
 	char *pbuf;
+	int buf_type = CIFS_NO_BUFFER;
 
 	pbuf = buf;
 
@@ -436,62 +438,69 @@
 		return PTR_ERR(tlink);
 	tcon = tlink_tcon(tlink);
 
-	rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, GENERIC_READ,
-			 CREATE_NOT_DIR, &netfid, &oplock, NULL,
-			 cifs_sb->local_nls,
-			 cifs_sb->mnt_cifs_flags &
-				CIFS_MOUNT_MAP_SPECIAL_CHR);
-	if (rc == 0) {
-		int buf_type = CIFS_NO_BUFFER;
-			/* Read header */
-		io_parms.netfid = netfid;
-		io_parms.pid = current->tgid;
-		io_parms.tcon = tcon;
-		io_parms.offset = 0;
-		io_parms.length = 24;
-		rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf,
-				 &buf_type);
-		if ((rc == 0) && (bytes_read >= 8)) {
-			if (memcmp("IntxBLK", pbuf, 8) == 0) {
-				cifs_dbg(FYI, "Block device\n");
-				fattr->cf_mode |= S_IFBLK;
-				fattr->cf_dtype = DT_BLK;
-				if (bytes_read == 24) {
-					/* we have enough to decode dev num */
-					__u64 mjr; /* major */
-					__u64 mnr; /* minor */
-					mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
-					mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
-					fattr->cf_rdev = MKDEV(mjr, mnr);
-				}
-			} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
-				cifs_dbg(FYI, "Char device\n");
-				fattr->cf_mode |= S_IFCHR;
-				fattr->cf_dtype = DT_CHR;
-				if (bytes_read == 24) {
-					/* we have enough to decode dev num */
-					__u64 mjr; /* major */
-					__u64 mnr; /* minor */
-					mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
-					mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
-					fattr->cf_rdev = MKDEV(mjr, mnr);
-				}
-			} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
-				cifs_dbg(FYI, "Symlink\n");
-				fattr->cf_mode |= S_IFLNK;
-				fattr->cf_dtype = DT_LNK;
-			} else {
-				fattr->cf_mode |= S_IFREG; /* file? */
-				fattr->cf_dtype = DT_REG;
-				rc = -EOPNOTSUPP;
-			}
-		} else {
-			fattr->cf_mode |= S_IFREG; /* then it is a file */
-			fattr->cf_dtype = DT_REG;
-			rc = -EOPNOTSUPP; /* or some unknown SFU type */
-		}
-		CIFSSMBClose(xid, tcon, netfid);
+	oparms.tcon = tcon;
+	oparms.cifs_sb = cifs_sb;
+	oparms.desired_access = GENERIC_READ;
+	oparms.create_options = CREATE_NOT_DIR;
+	oparms.disposition = FILE_OPEN;
+	oparms.path = path;
+	oparms.fid = &fid;
+	oparms.reconnect = false;
+
+	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+	if (rc) {
+		cifs_put_tlink(tlink);
+		return rc;
 	}
+
+	/* Read header */
+	io_parms.netfid = fid.netfid;
+	io_parms.pid = current->tgid;
+	io_parms.tcon = tcon;
+	io_parms.offset = 0;
+	io_parms.length = 24;
+
+	rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
+	if ((rc == 0) && (bytes_read >= 8)) {
+		if (memcmp("IntxBLK", pbuf, 8) == 0) {
+			cifs_dbg(FYI, "Block device\n");
+			fattr->cf_mode |= S_IFBLK;
+			fattr->cf_dtype = DT_BLK;
+			if (bytes_read == 24) {
+				/* we have enough to decode dev num */
+				__u64 mjr; /* major */
+				__u64 mnr; /* minor */
+				mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
+				mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
+				fattr->cf_rdev = MKDEV(mjr, mnr);
+			}
+		} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
+			cifs_dbg(FYI, "Char device\n");
+			fattr->cf_mode |= S_IFCHR;
+			fattr->cf_dtype = DT_CHR;
+			if (bytes_read == 24) {
+				/* we have enough to decode dev num */
+				__u64 mjr; /* major */
+				__u64 mnr; /* minor */
+				mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
+				mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
+				fattr->cf_rdev = MKDEV(mjr, mnr);
+			}
+		} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
+			cifs_dbg(FYI, "Symlink\n");
+			fattr->cf_mode |= S_IFLNK;
+			fattr->cf_dtype = DT_LNK;
+		} else {
+			fattr->cf_mode |= S_IFREG; /* file? */
+			fattr->cf_dtype = DT_REG;
+			rc = -EOPNOTSUPP;
+		}
+	} else {
+		fattr->cf_mode |= S_IFREG; /* then it is a file */
+		fattr->cf_dtype = DT_REG;
+		rc = -EOPNOTSUPP; /* or some unknown SFU type */
+	}
+	CIFSSMBClose(xid, tcon, fid.netfid);
 	cifs_put_tlink(tlink);
 	return rc;
 }
@@ -518,10 +527,15 @@
 		return PTR_ERR(tlink);
 	tcon = tlink_tcon(tlink);
 
-	rc = CIFSSMBQAllEAs(xid, tcon, path, "SETFILEBITS",
-			    ea_value, 4 /* size of buf */, cifs_sb->local_nls,
-			    cifs_sb->mnt_cifs_flags &
-				CIFS_MOUNT_MAP_SPECIAL_CHR);
+	if (tcon->ses->server->ops->query_all_EAs == NULL) {
+		cifs_put_tlink(tlink);
+		return -EOPNOTSUPP;
+	}
+
+	rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path,
+			"SETFILEBITS", ea_value, 4 /* size of buf */,
+			cifs_sb->local_nls,
+			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
 	cifs_put_tlink(tlink);
 	if (rc < 0)
 		return (int)rc;
@@ -663,7 +677,7 @@
 int
 cifs_get_inode_info(struct inode **inode, const char *full_path,
 		    FILE_ALL_INFO *data, struct super_block *sb, int xid,
-		    const __u16 *fid)
+		    const struct cifs_fid *fid)
 {
 	bool validinum = false;
 	__u16 srchflgs;
@@ -800,10 +814,10 @@
 
 	/* check for Minshall+French symlinks */
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
-		tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
-					   full_path);
+		tmprc = check_mf_symlink(xid, tcon, cifs_sb, &fattr,
+					 full_path);
 		if (tmprc)
-			cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
+			cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
 	}
 
 	if (!*inode) {
@@ -1032,7 +1046,8 @@
 {
 	int oplock = 0;
 	int rc;
-	__u16 netfid;
+	struct cifs_fid fid;
+	struct cifs_open_parms oparms;
 	struct inode *inode = dentry->d_inode;
 	struct cifsInodeInfo *cifsInode = CIFS_I(inode);
 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -1055,10 +1070,16 @@
 		goto out;
 	}
 
-	rc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN,
-			 DELETE|FILE_WRITE_ATTRIBUTES, CREATE_NOT_DIR,
-			 &netfid, &oplock, NULL, cifs_sb->local_nls,
-			 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+	oparms.tcon = tcon;
+	oparms.cifs_sb = cifs_sb;
+	oparms.desired_access = DELETE | FILE_WRITE_ATTRIBUTES;
+	oparms.create_options = CREATE_NOT_DIR;
+	oparms.disposition = FILE_OPEN;
+	oparms.path = full_path;
+	oparms.fid = &fid;
+	oparms.reconnect = false;
+
+	rc = CIFS_open(xid, &oparms, &oplock, NULL);
 	if (rc != 0)
 		goto out;
 
@@ -1079,7 +1100,7 @@
 			goto out_close;
 		}
 		info_buf->Attributes = cpu_to_le32(dosattr);
-		rc = CIFSSMBSetFileInfo(xid, tcon, info_buf, netfid,
+		rc = CIFSSMBSetFileInfo(xid, tcon, info_buf, fid.netfid,
 					current->tgid);
 		/* although we would like to mark the file hidden
  		   if that fails we will still try to rename it */
@@ -1090,7 +1111,8 @@
 	}
 
 	/* rename the file */
-	rc = CIFSSMBRenameOpenFile(xid, tcon, netfid, NULL, cifs_sb->local_nls,
+	rc = CIFSSMBRenameOpenFile(xid, tcon, fid.netfid, NULL,
+				   cifs_sb->local_nls,
 				   cifs_sb->mnt_cifs_flags &
 					    CIFS_MOUNT_MAP_SPECIAL_CHR);
 	if (rc != 0) {
@@ -1100,7 +1122,7 @@
 
 	/* try to set DELETE_ON_CLOSE */
 	if (!cifsInode->delete_pending) {
-		rc = CIFSSMBSetFileDisposition(xid, tcon, true, netfid,
+		rc = CIFSSMBSetFileDisposition(xid, tcon, true, fid.netfid,
 					       current->tgid);
 		/*
 		 * some samba versions return -ENOENT when we try to set the
@@ -1120,7 +1142,7 @@
 	}
 
 out_close:
-	CIFSSMBClose(xid, tcon, netfid);
+	CIFSSMBClose(xid, tcon, fid.netfid);
 out:
 	kfree(info_buf);
 	cifs_put_tlink(tlink);
@@ -1132,13 +1154,13 @@
 	 * them anyway.
 	 */
 undo_rename:
-	CIFSSMBRenameOpenFile(xid, tcon, netfid, dentry->d_name.name,
+	CIFSSMBRenameOpenFile(xid, tcon, fid.netfid, dentry->d_name.name,
 				cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
 					    CIFS_MOUNT_MAP_SPECIAL_CHR);
 undo_setattr:
 	if (dosattr != origattr) {
 		info_buf->Attributes = cpu_to_le32(origattr);
-		if (!CIFSSMBSetFileInfo(xid, tcon, info_buf, netfid,
+		if (!CIFSSMBSetFileInfo(xid, tcon, info_buf, fid.netfid,
 					current->tgid))
 			cifsInode->cifsAttrs = origattr;
 	}
@@ -1549,7 +1571,8 @@
 	struct tcon_link *tlink;
 	struct cifs_tcon *tcon;
 	struct TCP_Server_Info *server;
-	__u16 srcfid;
+	struct cifs_fid fid;
+	struct cifs_open_parms oparms;
 	int oplock, rc;
 
 	tlink = cifs_sb_tlink(cifs_sb);
@@ -1576,17 +1599,23 @@
 	if (to_dentry->d_parent != from_dentry->d_parent)
 		goto do_rename_exit;
 
+	oparms.tcon = tcon;
+	oparms.cifs_sb = cifs_sb;
 	/* open the file to be renamed -- we need DELETE perms */
-	rc = CIFSSMBOpen(xid, tcon, from_path, FILE_OPEN, DELETE,
-			 CREATE_NOT_DIR, &srcfid, &oplock, NULL,
-			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
-				CIFS_MOUNT_MAP_SPECIAL_CHR);
+	oparms.desired_access = DELETE;
+	oparms.create_options = CREATE_NOT_DIR;
+	oparms.disposition = FILE_OPEN;
+	oparms.path = from_path;
+	oparms.fid = &fid;
+	oparms.reconnect = false;
+
+	rc = CIFS_open(xid, &oparms, &oplock, NULL);
 	if (rc == 0) {
-		rc = CIFSSMBRenameOpenFile(xid, tcon, srcfid,
+		rc = CIFSSMBRenameOpenFile(xid, tcon, fid.netfid,
 				(const char *) to_dentry->d_name.name,
 				cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
 					CIFS_MOUNT_MAP_SPECIAL_CHR);
-		CIFSSMBClose(xid, tcon, srcfid);
+		CIFSSMBClose(xid, tcon, fid.netfid);
 	}
 do_rename_exit:
 	cifs_put_tlink(tlink);
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 92aee08..264ece7 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -29,6 +29,10 @@
 #include "cifs_debug.h"
 #include "cifs_fs_sb.h"
 
+/*
+ * M-F Symlink Functions - Begin
+ */
+
 #define CIFS_MF_SYMLINK_LEN_OFFSET (4+1)
 #define CIFS_MF_SYMLINK_MD5_OFFSET (CIFS_MF_SYMLINK_LEN_OFFSET+(4+1))
 #define CIFS_MF_SYMLINK_LINK_OFFSET (CIFS_MF_SYMLINK_MD5_OFFSET+(32+1))
@@ -91,10 +95,8 @@
 }
 
 static int
-CIFSParseMFSymlink(const u8 *buf,
-		   unsigned int buf_len,
-		   unsigned int *_link_len,
-		   char **_link_str)
+parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
+		 char **_link_str)
 {
 	int rc;
 	unsigned int link_len;
@@ -137,7 +139,7 @@
 }
 
 static int
-CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
+format_mf_symlink(u8 *buf, unsigned int buf_len, const char *link_str)
 {
 	int rc;
 	unsigned int link_len;
@@ -180,120 +182,10 @@
 	return 0;
 }
 
-static int
-CIFSCreateMFSymLink(const unsigned int xid, struct cifs_tcon *tcon,
-		    const char *fromName, const char *toName,
-		    struct cifs_sb_info *cifs_sb)
-{
-	int rc;
-	int oplock = 0;
-	int remap;
-	int create_options = CREATE_NOT_DIR;
-	__u16 netfid = 0;
-	u8 *buf;
-	unsigned int bytes_written = 0;
-	struct cifs_io_parms io_parms;
-	struct nls_table *nls_codepage;
-
-	nls_codepage = cifs_sb->local_nls;
-	remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR;
-
-	buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	rc = CIFSFormatMFSymlink(buf, CIFS_MF_SYMLINK_FILE_SIZE, toName);
-	if (rc != 0) {
-		kfree(buf);
-		return rc;
-	}
-
-	if (backup_cred(cifs_sb))
-		create_options |= CREATE_OPEN_BACKUP_INTENT;
-
-	rc = CIFSSMBOpen(xid, tcon, fromName, FILE_CREATE, GENERIC_WRITE,
-			 create_options, &netfid, &oplock, NULL,
-			 nls_codepage, remap);
-	if (rc != 0) {
-		kfree(buf);
-		return rc;
-	}
-
-	io_parms.netfid = netfid;
-	io_parms.pid = current->tgid;
-	io_parms.tcon = tcon;
-	io_parms.offset = 0;
-	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
-
-	rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, buf, NULL, 0);
-	CIFSSMBClose(xid, tcon, netfid);
-	kfree(buf);
-	if (rc != 0)
-		return rc;
-
-	if (bytes_written != CIFS_MF_SYMLINK_FILE_SIZE)
-		return -EIO;
-
-	return 0;
-}
-
-static int
-CIFSQueryMFSymLink(const unsigned int xid, struct cifs_tcon *tcon,
-		   const unsigned char *searchName, char **symlinkinfo,
-		   const struct nls_table *nls_codepage, int remap)
-{
-	int rc;
-	int oplock = 0;
-	__u16 netfid = 0;
-	u8 *buf;
-	char *pbuf;
-	unsigned int bytes_read = 0;
-	int buf_type = CIFS_NO_BUFFER;
-	unsigned int link_len = 0;
-	struct cifs_io_parms io_parms;
-	FILE_ALL_INFO file_info;
-
-	rc = CIFSSMBOpen(xid, tcon, searchName, FILE_OPEN, GENERIC_READ,
-			 CREATE_NOT_DIR, &netfid, &oplock, &file_info,
-			 nls_codepage, remap);
-	if (rc != 0)
-		return rc;
-
-	if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
-		CIFSSMBClose(xid, tcon, netfid);
-		/* it's not a symlink */
-		return -EINVAL;
-	}
-
-	buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-	pbuf = buf;
-	io_parms.netfid = netfid;
-	io_parms.pid = current->tgid;
-	io_parms.tcon = tcon;
-	io_parms.offset = 0;
-	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
-
-	rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
-	CIFSSMBClose(xid, tcon, netfid);
-	if (rc != 0) {
-		kfree(buf);
-		return rc;
-	}
-
-	rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, symlinkinfo);
-	kfree(buf);
-	if (rc != 0)
-		return rc;
-
-	return 0;
-}
-
 bool
-CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr)
+couldbe_mf_symlink(const struct cifs_fattr *fattr)
 {
-	if (!(fattr->cf_mode & S_IFREG))
+	if (!S_ISREG(fattr->cf_mode))
 		/* it's not a symlink */
 		return false;
 
@@ -304,66 +196,80 @@
 	return true;
 }
 
-int
-open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
-			unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
-			unsigned int xid)
+static int
+create_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+		  struct cifs_sb_info *cifs_sb, const char *fromName,
+		  const char *toName)
 {
 	int rc;
-	int oplock = 0;
-	__u16 netfid = 0;
-	struct tcon_link *tlink;
-	struct cifs_tcon *ptcon;
-	struct cifs_io_parms io_parms;
-	int buf_type = CIFS_NO_BUFFER;
-	FILE_ALL_INFO file_info;
+	u8 *buf;
+	unsigned int bytes_written = 0;
 
-	tlink = cifs_sb_tlink(cifs_sb);
-	if (IS_ERR(tlink))
-		return PTR_ERR(tlink);
-	ptcon = tlink_tcon(tlink);
+	buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
 
-	rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ,
-			 CREATE_NOT_DIR, &netfid, &oplock, &file_info,
-			 cifs_sb->local_nls,
-			 cifs_sb->mnt_cifs_flags &
-				CIFS_MOUNT_MAP_SPECIAL_CHR);
-	if (rc != 0) {
-		cifs_put_tlink(tlink);
-		return rc;
-	}
+	rc = format_mf_symlink(buf, CIFS_MF_SYMLINK_FILE_SIZE, toName);
+	if (rc)
+		goto out;
 
-	if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
-		CIFSSMBClose(xid, ptcon, netfid);
-		cifs_put_tlink(tlink);
-		/* it's not a symlink */
-		return rc;
-	}
+	rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon, cifs_sb,
+					fromName, buf, &bytes_written);
+	if (rc)
+		goto out;
 
-	io_parms.netfid = netfid;
-	io_parms.pid = current->tgid;
-	io_parms.tcon = ptcon;
-	io_parms.offset = 0;
-	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
-
-	rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
-	CIFSSMBClose(xid, ptcon, netfid);
-	cifs_put_tlink(tlink);
+	if (bytes_written != CIFS_MF_SYMLINK_FILE_SIZE)
+		rc = -EIO;
+out:
+	kfree(buf);
 	return rc;
 }
 
-
-int
-CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
-		   struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
-		   const unsigned char *path)
+static int
+query_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+		 struct cifs_sb_info *cifs_sb, const unsigned char *path,
+		 char **symlinkinfo)
 {
 	int rc;
 	u8 *buf = NULL;
 	unsigned int link_len = 0;
 	unsigned int bytes_read = 0;
 
-	if (!CIFSCouldBeMFSymlink(fattr))
+	buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (tcon->ses->server->ops->query_mf_symlink)
+		rc = tcon->ses->server->ops->query_mf_symlink(xid, tcon,
+					      cifs_sb, path, buf, &bytes_read);
+	else
+		rc = -ENOSYS;
+
+	if (rc)
+		goto out;
+
+	if (bytes_read == 0) { /* not a symlink */
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = parse_mf_symlink(buf, bytes_read, &link_len, symlinkinfo);
+out:
+	kfree(buf);
+	return rc;
+}
+
+int
+check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+		 struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
+		 const unsigned char *path)
+{
+	int rc;
+	u8 *buf = NULL;
+	unsigned int link_len = 0;
+	unsigned int bytes_read = 0;
+
+	if (!couldbe_mf_symlink(fattr))
 		/* it's not a symlink */
 		return 0;
 
@@ -372,8 +278,8 @@
 		return -ENOMEM;
 
 	if (tcon->ses->server->ops->query_mf_symlink)
-		rc = tcon->ses->server->ops->query_mf_symlink(path, buf,
-						&bytes_read, cifs_sb, xid);
+		rc = tcon->ses->server->ops->query_mf_symlink(xid, tcon,
+					      cifs_sb, path, buf, &bytes_read);
 	else
 		rc = -ENOSYS;
 
@@ -383,7 +289,7 @@
 	if (bytes_read == 0) /* not a symlink */
 		goto out;
 
-	rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL);
+	rc = parse_mf_symlink(buf, bytes_read, &link_len, NULL);
 	if (rc == -EINVAL) {
 		/* it's not a symlink */
 		rc = 0;
@@ -403,6 +309,95 @@
 	return rc;
 }
 
+/*
+ * SMB 1.0 Protocol specific functions
+ */
+
+int
+cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+		      struct cifs_sb_info *cifs_sb, const unsigned char *path,
+		      char *pbuf, unsigned int *pbytes_read)
+{
+	int rc;
+	int oplock = 0;
+	struct cifs_fid fid;
+	struct cifs_open_parms oparms;
+	struct cifs_io_parms io_parms;
+	int buf_type = CIFS_NO_BUFFER;
+	FILE_ALL_INFO file_info;
+
+	oparms.tcon = tcon;
+	oparms.cifs_sb = cifs_sb;
+	oparms.desired_access = GENERIC_READ;
+	oparms.create_options = CREATE_NOT_DIR;
+	oparms.disposition = FILE_OPEN;
+	oparms.path = path;
+	oparms.fid = &fid;
+	oparms.reconnect = false;
+
+	rc = CIFS_open(xid, &oparms, &oplock, &file_info);
+	if (rc)
+		return rc;
+
+	if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE))
+		/* it's not a symlink */
+		goto out;
+
+	io_parms.netfid = fid.netfid;
+	io_parms.pid = current->tgid;
+	io_parms.tcon = tcon;
+	io_parms.offset = 0;
+	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
+
+	rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
+out:
+	CIFSSMBClose(xid, tcon, fid.netfid);
+	return rc;
+}
+
+int
+cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+		       struct cifs_sb_info *cifs_sb, const unsigned char *path,
+		       char *pbuf, unsigned int *pbytes_written)
+{
+	int rc;
+	int oplock = 0;
+	struct cifs_fid fid;
+	struct cifs_open_parms oparms;
+	struct cifs_io_parms io_parms;
+	int create_options = CREATE_NOT_DIR;
+
+	if (backup_cred(cifs_sb))
+		create_options |= CREATE_OPEN_BACKUP_INTENT;
+
+	oparms.tcon = tcon;
+	oparms.cifs_sb = cifs_sb;
+	oparms.desired_access = GENERIC_WRITE;
+	oparms.create_options = create_options;
+	oparms.disposition = FILE_OPEN;
+	oparms.path = path;
+	oparms.fid = &fid;
+	oparms.reconnect = false;
+
+	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+	if (rc)
+		return rc;
+
+	io_parms.netfid = fid.netfid;
+	io_parms.pid = current->tgid;
+	io_parms.tcon = tcon;
+	io_parms.offset = 0;
+	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
+
+	rc = CIFSSMBWrite(xid, &io_parms, pbytes_written, pbuf, NULL, 0);
+	CIFSSMBClose(xid, tcon, fid.netfid);
+	return rc;
+}
+
+/*
+ * M-F Symlink Functions - End
+ */
+
 int
 cifs_hardlink(struct dentry *old_file, struct inode *inode,
 	      struct dentry *direntry)
@@ -438,8 +433,10 @@
 						CIFS_MOUNT_MAP_SPECIAL_CHR);
 	else {
 		server = tcon->ses->server;
-		if (!server->ops->create_hardlink)
-			return -ENOSYS;
+		if (!server->ops->create_hardlink) {
+			rc = -ENOSYS;
+			goto cifs_hl_exit;
+		}
 		rc = server->ops->create_hardlink(xid, tcon, from_name, to_name,
 						  cifs_sb);
 		if ((rc == -EIO) || (rc == -EINVAL))
@@ -530,15 +527,10 @@
 	 * and fallback to UNIX Extensions Symlinks.
 	 */
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
-		rc = CIFSQueryMFSymLink(xid, tcon, full_path, &target_path,
-					cifs_sb->local_nls,
-					cifs_sb->mnt_cifs_flags &
-						CIFS_MOUNT_MAP_SPECIAL_CHR);
+		rc = query_mf_symlink(xid, tcon, cifs_sb, full_path,
+				      &target_path);
 
-	if ((rc != 0) && cap_unix(tcon->ses))
-		rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path,
-					     cifs_sb->local_nls);
-	else if (rc != 0 && server->ops->query_symlink)
+	if (rc != 0 && server->ops->query_symlink)
 		rc = server->ops->query_symlink(xid, tcon, full_path,
 						&target_path, cifs_sb);
 
@@ -587,8 +579,7 @@
 
 	/* BB what if DFS and this volume is on different share? BB */
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
-		rc = CIFSCreateMFSymLink(xid, pTcon, full_path, symname,
-					cifs_sb);
+		rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
 	else if (pTcon->unix_ext)
 		rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
 					   cifs_sb->local_nls);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 5940eca..b15862e 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -749,7 +749,7 @@
 	}
 
 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) &&
-	    CIFSCouldBeMFSymlink(&fattr))
+	    couldbe_mf_symlink(&fattr))
 		/*
 		 * trying to get the type and mode can be slow,
 		 * so just call those regular files for now, and mark
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 5f5ba0d..526fb89 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -560,17 +560,24 @@
 	if (!rc && (le32_to_cpu(data->Attributes) & ATTR_REPARSE)) {
 		int tmprc;
 		int oplock = 0;
-		__u16 netfid;
+		struct cifs_fid fid;
+		struct cifs_open_parms oparms;
+
+		oparms.tcon = tcon;
+		oparms.cifs_sb = cifs_sb;
+		oparms.desired_access = FILE_READ_ATTRIBUTES;
+		oparms.create_options = 0;
+		oparms.disposition = FILE_OPEN;
+		oparms.path = full_path;
+		oparms.fid = &fid;
+		oparms.reconnect = false;
 
 		/* Need to check if this is a symbolic link or not */
-		tmprc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN,
-				    FILE_READ_ATTRIBUTES, 0, &netfid, &oplock,
-				    NULL, cifs_sb->local_nls,
-			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+		tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
 		if (tmprc == -EOPNOTSUPP)
 			*symlink = true;
 		else
-			CIFSSMBClose(xid, tcon, netfid);
+			CIFSSMBClose(xid, tcon, fid.netfid);
 	}
 
 	return rc;
@@ -705,12 +712,7 @@
 				     oparms->cifs_sb->local_nls,
 				     oparms->cifs_sb->mnt_cifs_flags
 						& CIFS_MOUNT_MAP_SPECIAL_CHR);
-	return CIFSSMBOpen(xid, oparms->tcon, oparms->path,
-			   oparms->disposition, oparms->desired_access,
-			   oparms->create_options, &oparms->fid->netfid, oplock,
-			   buf, oparms->cifs_sb->local_nls,
-			   oparms->cifs_sb->mnt_cifs_flags &
-						CIFS_MOUNT_MAP_SPECIAL_CHR);
+	return CIFS_open(xid, oparms, oplock, buf);
 }
 
 static void
@@ -761,8 +763,9 @@
 {
 	int oplock = 0;
 	int rc;
-	__u16 netfid;
 	__u32 netpid;
+	struct cifs_fid fid;
+	struct cifs_open_parms oparms;
 	struct cifsFileInfo *open_file;
 	struct cifsInodeInfo *cinode = CIFS_I(inode);
 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -772,7 +775,7 @@
 	/* if the file is already open for write, just use that fileid */
 	open_file = find_writable_file(cinode, true);
 	if (open_file) {
-		netfid = open_file->fid.netfid;
+		fid.netfid = open_file->fid.netfid;
 		netpid = open_file->pid;
 		tcon = tlink_tcon(open_file->tlink);
 		goto set_via_filehandle;
@@ -796,12 +799,17 @@
 		goto out;
 	}
 
-	cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n");
-	rc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN,
-			 SYNCHRONIZE | FILE_WRITE_ATTRIBUTES, CREATE_NOT_DIR,
-			 &netfid, &oplock, NULL, cifs_sb->local_nls,
-			 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+	oparms.tcon = tcon;
+	oparms.cifs_sb = cifs_sb;
+	oparms.desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES;
+	oparms.create_options = CREATE_NOT_DIR;
+	oparms.disposition = FILE_OPEN;
+	oparms.path = full_path;
+	oparms.fid = &fid;
+	oparms.reconnect = false;
 
+	cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n");
+	rc = CIFS_open(xid, &oparms, &oplock, NULL);
 	if (rc != 0) {
 		if (rc == -EIO)
 			rc = -EINVAL;
@@ -811,12 +819,12 @@
 	netpid = current->tgid;
 
 set_via_filehandle:
-	rc = CIFSSMBSetFileInfo(xid, tcon, buf, netfid, netpid);
+	rc = CIFSSMBSetFileInfo(xid, tcon, buf, fid.netfid, netpid);
 	if (!rc)
 		cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
 
 	if (open_file == NULL)
-		CIFSSMBClose(xid, tcon, netfid);
+		CIFSSMBClose(xid, tcon, fid.netfid);
 	else
 		cifsFileInfo_put(open_file);
 out:
@@ -908,33 +916,80 @@
 }
 
 static int
+cifs_unix_dfs_readlink(const unsigned int xid, struct cifs_tcon *tcon,
+		       const unsigned char *searchName, char **symlinkinfo,
+		       const struct nls_table *nls_codepage)
+{
+#ifdef CONFIG_CIFS_DFS_UPCALL
+	int rc;
+	unsigned int num_referrals = 0;
+	struct dfs_info3_param *referrals = NULL;
+
+	rc = get_dfs_path(xid, tcon->ses, searchName, nls_codepage,
+			  &num_referrals, &referrals, 0);
+
+	if (!rc && num_referrals > 0) {
+		*symlinkinfo = kstrndup(referrals->node_name,
+					strlen(referrals->node_name),
+					GFP_KERNEL);
+		if (!*symlinkinfo)
+			rc = -ENOMEM;
+		free_dfs_info_array(referrals, num_referrals);
+	}
+	return rc;
+#else /* No DFS support */
+	return -EREMOTE;
+#endif
+}
+
+static int
 cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
 		   const char *full_path, char **target_path,
 		   struct cifs_sb_info *cifs_sb)
 {
 	int rc;
 	int oplock = 0;
-	__u16 netfid;
+	struct cifs_fid fid;
+	struct cifs_open_parms oparms;
 
 	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
 
-	rc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN,
-			 FILE_READ_ATTRIBUTES, OPEN_REPARSE_POINT, &netfid,
-			 &oplock, NULL, cifs_sb->local_nls,
-			 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
-	if (rc)
-		return rc;
+	/* Check for unix extensions */
+	if (cap_unix(tcon->ses)) {
+		rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path,
+					     cifs_sb->local_nls);
+		if (rc == -EREMOTE)
+			rc = cifs_unix_dfs_readlink(xid, tcon, full_path,
+						    target_path,
+						    cifs_sb->local_nls);
 
-	rc = CIFSSMBQuerySymLink(xid, tcon, netfid, target_path,
-				 cifs_sb->local_nls);
-	if (rc) {
-		CIFSSMBClose(xid, tcon, netfid);
-		return rc;
+		goto out;
 	}
 
+	oparms.tcon = tcon;
+	oparms.cifs_sb = cifs_sb;
+	oparms.desired_access = FILE_READ_ATTRIBUTES;
+	oparms.create_options = OPEN_REPARSE_POINT;
+	oparms.disposition = FILE_OPEN;
+	oparms.path = full_path;
+	oparms.fid = &fid;
+	oparms.reconnect = false;
+
+	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+	if (rc)
+		goto out;
+
+	rc = CIFSSMBQuerySymLink(xid, tcon, fid.netfid, target_path,
+				 cifs_sb->local_nls);
+	if (rc)
+		goto out_close;
+
 	convert_delimiter(*target_path, '/');
-	CIFSSMBClose(xid, tcon, netfid);
-	cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
+out_close:
+	CIFSSMBClose(xid, tcon, fid.netfid);
+out:
+	if (!rc)
+		cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
 	return rc;
 }
 
@@ -1009,8 +1064,18 @@
 	.mand_lock = cifs_mand_lock,
 	.mand_unlock_range = cifs_unlock_range,
 	.push_mand_locks = cifs_push_mandatory_locks,
-	.query_mf_symlink = open_query_close_cifs_symlink,
+	.query_mf_symlink = cifs_query_mf_symlink,
+	.create_mf_symlink = cifs_create_mf_symlink,
 	.is_read_op = cifs_is_read_op,
+#ifdef CONFIG_CIFS_XATTR
+	.query_all_EAs = CIFSSMBQAllEAs,
+	.set_EA = CIFSSMBSetEA,
+#endif /* CIFS_XATTR */
+#ifdef CONFIG_CIFS_ACL
+	.get_acl = get_cifs_acl,
+	.get_acl_by_fid = get_cifs_acl_by_fid,
+	.set_acl = set_cifs_acl,
+#endif /* CIFS_ACL */
 };
 
 struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index c383508..bc0bb9c 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -57,4 +57,7 @@
 #define SMB2_CMACAES_SIZE (16)
 #define SMB3_SIGNKEY_SIZE (16)
 
+/* Maximum buffer size value we can send with 1 credit */
+#define SMB2_MAX_BUFFER_SIZE 65536
+
 #endif	/* _SMB2_GLOB_H */
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 757da3e..192f51a 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -182,11 +182,8 @@
 	/* start with specified wsize, or default */
 	wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
 	wsize = min_t(unsigned int, wsize, server->max_write);
-	/*
-	 * limit write size to 2 ** 16, because we don't support multicredit
-	 * requests now.
-	 */
-	wsize = min_t(unsigned int, wsize, 2 << 15);
+	/* set it to the maximum buffer size value we can send with 1 credit */
+	wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
 
 	return wsize;
 }
@@ -200,11 +197,8 @@
 	/* start with specified rsize, or default */
 	rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
 	rsize = min_t(unsigned int, rsize, server->max_read);
-	/*
-	 * limit write size to 2 ** 16, because we don't support multicredit
-	 * requests now.
-	 */
-	rsize = min_t(unsigned int, rsize, 2 << 15);
+	/* set it to the maximum buffer size value we can send with 1 credit */
+	rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
 
 	return rsize;
 }
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 2013234..8603447 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -413,7 +413,9 @@
 
 	/* SMB2 only has an extended negflavor */
 	server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
-	server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
+	/* set it to the maximum buffer size value we can send with 1 credit */
+	server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
+			       SMB2_MAX_BUFFER_SIZE);
 	server->max_read = le32_to_cpu(rsp->MaxReadSize);
 	server->max_write = le32_to_cpu(rsp->MaxWriteSize);
 	/* BB Do we need to validate the SecurityMode? */
@@ -1890,7 +1892,8 @@
 
 /* smb2_async_writev - send an async write, and set up mid to handle result */
 int
-smb2_async_writev(struct cifs_writedata *wdata)
+smb2_async_writev(struct cifs_writedata *wdata,
+		  void (*release)(struct kref *kref))
 {
 	int rc = -EACCES;
 	struct smb2_write_req *req = NULL;
@@ -1938,7 +1941,7 @@
 				smb2_writev_callback, wdata, 0);
 
 	if (rc) {
-		kref_put(&wdata->refcount, cifs_writedata_release);
+		kref_put(&wdata->refcount, release);
 		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
 	}
 
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 93adc64..0ce48db 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -123,7 +123,8 @@
 extern int smb2_async_readv(struct cifs_readdata *rdata);
 extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
 		     unsigned int *nbytes, char **buf, int *buf_type);
-extern int smb2_async_writev(struct cifs_writedata *wdata);
+extern int smb2_async_writev(struct cifs_writedata *wdata,
+			     void (*release)(struct kref *kref));
 extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 		      unsigned int *nbytes, struct kvec *iov, int n_vec);
 extern int SMB2_echo(struct TCP_Server_Info *server);
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 09afda4..5ac836a 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -82,9 +82,11 @@
 			goto remove_ea_exit;
 
 		ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
-		rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, NULL,
-			(__u16)0, cifs_sb->local_nls,
-			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+		if (pTcon->ses->server->ops->set_EA)
+			rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
+				full_path, ea_name, NULL, (__u16)0,
+				cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+					CIFS_MOUNT_MAP_SPECIAL_CHR);
 	}
 remove_ea_exit:
 	kfree(full_path);
@@ -149,18 +151,22 @@
 			cifs_dbg(FYI, "attempt to set cifs inode metadata\n");
 
 		ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
-		rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
-			(__u16)value_size, cifs_sb->local_nls,
-			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+		if (pTcon->ses->server->ops->set_EA)
+			rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
+				full_path, ea_name, ea_value, (__u16)value_size,
+				cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+					CIFS_MOUNT_MAP_SPECIAL_CHR);
 	} else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)
 		   == 0) {
 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 			goto set_ea_exit;
 
 		ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
-		rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
-			(__u16)value_size, cifs_sb->local_nls,
-			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+		if (pTcon->ses->server->ops->set_EA)
+			rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
+				full_path, ea_name, ea_value, (__u16)value_size,
+				cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+					CIFS_MOUNT_MAP_SPECIAL_CHR);
 	} else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
 			strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
 #ifdef CONFIG_CIFS_ACL
@@ -170,8 +176,12 @@
 			rc = -ENOMEM;
 		} else {
 			memcpy(pacl, ea_value, value_size);
-			rc = set_cifs_acl(pacl, value_size,
-				direntry->d_inode, full_path, CIFS_ACL_DACL);
+			if (pTcon->ses->server->ops->set_acl)
+				rc = pTcon->ses->server->ops->set_acl(pacl,
+						value_size, direntry->d_inode,
+						full_path, CIFS_ACL_DACL);
+			else
+				rc = -EOPNOTSUPP;
 			if (rc == 0) /* force revalidate of the inode */
 				CIFS_I(direntry->d_inode)->time = 0;
 			kfree(pacl);
@@ -272,17 +282,21 @@
 			/* revalidate/getattr then populate from inode */
 		} /* BB add else when above is implemented */
 		ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
-		rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
-			buf_size, cifs_sb->local_nls,
-			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+		if (pTcon->ses->server->ops->query_all_EAs)
+			rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
+				full_path, ea_name, ea_value, buf_size,
+				cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+					CIFS_MOUNT_MAP_SPECIAL_CHR);
 	} else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 			goto get_ea_exit;
 
 		ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
-		rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
-			buf_size, cifs_sb->local_nls,
-			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+		if (pTcon->ses->server->ops->query_all_EAs)
+			rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
+				full_path, ea_name, ea_value, buf_size,
+				cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+					CIFS_MOUNT_MAP_SPECIAL_CHR);
 	} else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
 			  strlen(POSIX_ACL_XATTR_ACCESS)) == 0) {
 #ifdef CONFIG_CIFS_POSIX
@@ -313,8 +327,11 @@
 			u32 acllen;
 			struct cifs_ntsd *pacl;
 
-			pacl = get_cifs_acl(cifs_sb, direntry->d_inode,
-						full_path, &acllen);
+			if (pTcon->ses->server->ops->get_acl == NULL)
+				goto get_ea_exit; /* rc already EOPNOTSUPP */
+
+			pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
+					direntry->d_inode, full_path, &acllen);
 			if (IS_ERR(pacl)) {
 				rc = PTR_ERR(pacl);
 				cifs_dbg(VFS, "%s: error %zd getting sec desc\n",
@@ -400,11 +417,12 @@
 	/* if proc/fs/cifs/streamstoxattr is set then
 		search server for EAs or streams to
 		returns as xattrs */
-	rc = CIFSSMBQAllEAs(xid, pTcon, full_path, NULL, data,
-				buf_size, cifs_sb->local_nls,
-				cifs_sb->mnt_cifs_flags &
-					CIFS_MOUNT_MAP_SPECIAL_CHR);
 
+	if (pTcon->ses->server->ops->query_all_EAs)
+		rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
+				full_path, NULL, data, buf_size,
+				cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+					CIFS_MOUNT_MAP_SPECIAL_CHR);
 list_ea_exit:
 	kfree(full_path);
 	free_xid(xid);
diff --git a/fs/dcookies.c b/fs/dcookies.c
index ab5954b..ac44a69 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -204,7 +204,7 @@
 }
 
 #ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, size_t, len)
+COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, compat_size_t, len)
 {
 #ifdef __BIG_ENDIAN
 	return sys_lookup_dcookie(((u64)w0 << 32) | w1, buf, len);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 0e04142..160a548 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -375,7 +375,7 @@
 	bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
 	bio->bi_bdev = bdev;
-	bio->bi_sector = first_sector;
+	bio->bi_iter.bi_sector = first_sector;
 	if (dio->is_async)
 		bio->bi_end_io = dio_bio_end_aio;
 	else
@@ -719,7 +719,7 @@
 	if (sdio->bio) {
 		loff_t cur_offset = sdio->cur_page_fs_offset;
 		loff_t bio_next_offset = sdio->logical_offset_in_bio +
-			sdio->bio->bi_size;
+			sdio->bio->bi_iter.bi_size;
 
 		/*
 		 * See whether this new request is contiguous with the old.
diff --git a/fs/exec.c b/fs/exec.c
index e1529b4..3d78fcc 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -748,11 +748,10 @@
 
 #endif /* CONFIG_MMU */
 
-struct file *open_exec(const char *name)
+static struct file *do_open_exec(struct filename *name)
 {
 	struct file *file;
 	int err;
-	struct filename tmp = { .name = name };
 	static const struct open_flags open_exec_flags = {
 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
 		.acc_mode = MAY_EXEC | MAY_OPEN,
@@ -760,7 +759,7 @@
 		.lookup_flags = LOOKUP_FOLLOW,
 	};
 
-	file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags);
+	file = do_filp_open(AT_FDCWD, name, &open_exec_flags);
 	if (IS_ERR(file))
 		goto out;
 
@@ -784,6 +783,12 @@
 	fput(file);
 	return ERR_PTR(err);
 }
+
+struct file *open_exec(const char *name)
+{
+	struct filename tmp = { .name = name };
+	return do_open_exec(&tmp);
+}
 EXPORT_SYMBOL(open_exec);
 
 int kernel_read(struct file *file, loff_t offset,
@@ -1162,7 +1167,7 @@
 	return -ENOMEM;
 }
 
-void free_bprm(struct linux_binprm *bprm)
+static void free_bprm(struct linux_binprm *bprm)
 {
 	free_arg_pages(bprm);
 	if (bprm->cred) {
@@ -1432,7 +1437,7 @@
 /*
  * sys_execve() executes a new program.
  */
-static int do_execve_common(const char *filename,
+static int do_execve_common(struct filename *filename,
 				struct user_arg_ptr argv,
 				struct user_arg_ptr envp)
 {
@@ -1441,6 +1446,9 @@
 	struct files_struct *displaced;
 	int retval;
 
+	if (IS_ERR(filename))
+		return PTR_ERR(filename);
+
 	/*
 	 * We move the actual failure in case of RLIMIT_NPROC excess from
 	 * set*uid() to execve() because too many poorly written programs
@@ -1473,7 +1481,7 @@
 	check_unsafe_exec(bprm);
 	current->in_execve = 1;
 
-	file = open_exec(filename);
+	file = do_open_exec(filename);
 	retval = PTR_ERR(file);
 	if (IS_ERR(file))
 		goto out_unmark;
@@ -1481,8 +1489,7 @@
 	sched_exec();
 
 	bprm->file = file;
-	bprm->filename = filename;
-	bprm->interp = filename;
+	bprm->filename = bprm->interp = filename->name;
 
 	retval = bprm_mm_init(bprm);
 	if (retval)
@@ -1523,6 +1530,7 @@
 	acct_update_integrals(current);
 	task_numa_free(current);
 	free_bprm(bprm);
+	putname(filename);
 	if (displaced)
 		put_files_struct(displaced);
 	return retval;
@@ -1544,10 +1552,11 @@
 	if (displaced)
 		reset_files_struct(displaced);
 out_ret:
+	putname(filename);
 	return retval;
 }
 
-int do_execve(const char *filename,
+int do_execve(struct filename *filename,
 	const char __user *const __user *__argv,
 	const char __user *const __user *__envp)
 {
@@ -1557,7 +1566,7 @@
 }
 
 #ifdef CONFIG_COMPAT
-static int compat_do_execve(const char *filename,
+static int compat_do_execve(struct filename *filename,
 	const compat_uptr_t __user *__argv,
 	const compat_uptr_t __user *__envp)
 {
@@ -1607,25 +1616,13 @@
 		const char __user *const __user *, argv,
 		const char __user *const __user *, envp)
 {
-	struct filename *path = getname(filename);
-	int error = PTR_ERR(path);
-	if (!IS_ERR(path)) {
-		error = do_execve(path->name, argv, envp);
-		putname(path);
-	}
-	return error;
+	return do_execve(getname(filename), argv, envp);
 }
 #ifdef CONFIG_COMPAT
 asmlinkage long compat_sys_execve(const char __user * filename,
 	const compat_uptr_t __user * argv,
 	const compat_uptr_t __user * envp)
 {
-	struct filename *path = getname(filename);
-	int error = PTR_ERR(path);
-	if (!IS_ERR(path)) {
-		error = compat_do_execve(path->name, argv, envp);
-		putname(path);
-	}
-	return error;
+	return compat_do_execve(getname(filename), argv, envp);
 }
 #endif
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ece5556..d3a534f 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -771,6 +771,8 @@
 	if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime))		       \
 		(einode)->xtime.tv_sec = 				       \
 			(signed)le32_to_cpu((raw_inode)->xtime);	       \
+	else								       \
+		(einode)->xtime.tv_sec = 0;				       \
 	if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra))	       \
 		ext4_decode_extra_time(&(einode)->xtime,		       \
 				       raw_inode->xtime ## _extra);	       \
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 10cff47..74bc2d5 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3906,6 +3906,7 @@
 		} else
 			err = ret;
 		map->m_flags |= EXT4_MAP_MAPPED;
+		map->m_pblk = newblock;
 		if (allocated > map->m_len)
 			allocated = map->m_len;
 		map->m_len = allocated;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 43e64f6..1a50739 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -152,7 +152,7 @@
 	if (ret > 0) {
 		ssize_t err;
 
-		err = generic_write_sync(file, pos, ret);
+		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
 		if (err < 0 && ret > 0)
 			ret = err;
 	}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 6bea806..a2a837f 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -140,7 +140,7 @@
 	handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
 	if (IS_ERR(handle)) {
 		err = -EINVAL;
-		goto swap_boot_out;
+		goto journal_err_out;
 	}
 
 	/* Protect extent tree against block allocations via delalloc */
@@ -198,6 +198,7 @@
 
 	ext4_double_up_write_data_sem(inode, inode_bl);
 
+journal_err_out:
 	ext4_inode_resume_unlocked_dio(inode);
 	ext4_inode_resume_unlocked_dio(inode_bl);
 
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index d488f80..ab95508 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -65,9 +65,9 @@
 {
 	int i;
 	int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
+	struct bio_vec *bvec;
 
-	for (i = 0; i < bio->bi_vcnt; i++) {
-		struct bio_vec *bvec = &bio->bi_io_vec[i];
+	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
 		struct buffer_head *bh, *head;
 		unsigned bio_start = bvec->bv_offset;
@@ -298,7 +298,7 @@
 static void ext4_end_bio(struct bio *bio, int error)
 {
 	ext4_io_end_t *io_end = bio->bi_private;
-	sector_t bi_sector = bio->bi_sector;
+	sector_t bi_sector = bio->bi_iter.bi_sector;
 
 	BUG_ON(!io_end);
 	bio->bi_end_io = NULL;
@@ -366,7 +366,7 @@
 	bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
 	if (!bio)
 		return -ENOMEM;
-	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
 	bio->bi_end_io = ext4_end_bio;
 	bio->bi_private = ext4_get_io_end(io->io_end);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index c5adbb3..f3b84cd 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -243,6 +243,7 @@
 	ext4_group_t group;
 	ext4_group_t last_group;
 	unsigned overhead;
+	__u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
 
 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
 
@@ -266,7 +267,7 @@
 	src_group++;
 	for (; src_group <= last_group; src_group++) {
 		overhead = ext4_group_overhead_blocks(sb, src_group);
-		if (overhead != 0)
+		if (overhead == 0)
 			last_blk += group_data[src_group - group].blocks_count;
 		else
 			break;
@@ -280,8 +281,7 @@
 		group = ext4_get_group_number(sb, start_blk - 1);
 		group -= group_data[0].group;
 		group_data[group].free_blocks_count--;
-		if (flexbg_size > 1)
-			flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+		flex_gd->bg_flags[group] &= uninit_mask;
 	}
 
 	/* Allocate inode bitmaps */
@@ -292,22 +292,30 @@
 		group = ext4_get_group_number(sb, start_blk - 1);
 		group -= group_data[0].group;
 		group_data[group].free_blocks_count--;
-		if (flexbg_size > 1)
-			flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+		flex_gd->bg_flags[group] &= uninit_mask;
 	}
 
 	/* Allocate inode tables */
 	for (; it_index < flex_gd->count; it_index++) {
-		if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk)
+		unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
+		ext4_fsblk_t next_group_start;
+
+		if (start_blk + itb > last_blk)
 			goto next_group;
 		group_data[it_index].inode_table = start_blk;
-		group = ext4_get_group_number(sb, start_blk - 1);
+		group = ext4_get_group_number(sb, start_blk);
+		next_group_start = ext4_group_first_block_no(sb, group + 1);
 		group -= group_data[0].group;
-		group_data[group].free_blocks_count -=
-					EXT4_SB(sb)->s_itb_per_group;
-		if (flexbg_size > 1)
-			flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
 
+		if (start_blk + itb > next_group_start) {
+			flex_gd->bg_flags[group + 1] &= uninit_mask;
+			overhead = start_blk + itb - next_group_start;
+			group_data[group + 1].free_blocks_count -= overhead;
+			itb -= overhead;
+		}
+
+		group_data[group].free_blocks_count -= itb;
+		flex_gd->bg_flags[group] &= uninit_mask;
 		start_blk += EXT4_SB(sb)->s_itb_per_group;
 	}
 
@@ -401,7 +409,7 @@
 		start = ext4_group_first_block_no(sb, group);
 		group -= flex_gd->groups[0].group;
 
-		count2 = sb->s_blocksize * 8 - (block - start);
+		count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
 		if (count2 > count)
 			count2 = count;
 
@@ -620,7 +628,7 @@
 			if (err)
 				goto out;
 			count = group_table_count[j];
-			start = group_data[i].block_bitmap;
+			start = (&group_data[i].block_bitmap)[j];
 			block = start;
 		}
 
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1f7784d..710fed2 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3695,16 +3695,22 @@
 	for (i = 0; i < 4; i++)
 		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
 	sbi->s_def_hash_version = es->s_def_hash_version;
-	i = le32_to_cpu(es->s_flags);
-	if (i & EXT2_FLAGS_UNSIGNED_HASH)
-		sbi->s_hash_unsigned = 3;
-	else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
+	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
+		i = le32_to_cpu(es->s_flags);
+		if (i & EXT2_FLAGS_UNSIGNED_HASH)
+			sbi->s_hash_unsigned = 3;
+		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
 #ifdef __CHAR_UNSIGNED__
-		es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
-		sbi->s_hash_unsigned = 3;
+			if (!(sb->s_flags & MS_RDONLY))
+				es->s_flags |=
+					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
+			sbi->s_hash_unsigned = 3;
 #else
-		es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
+			if (!(sb->s_flags & MS_RDONLY))
+				es->s_flags |=
+					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
 #endif
+		}
 	}
 
 	/* Handle clustersize */
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 0ae5587..2261ccd 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -26,40 +26,33 @@
 
 static void f2fs_read_end_io(struct bio *bio, int err)
 {
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+	struct bio_vec *bvec;
+	int i;
 
-	do {
+	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
 
-		if (--bvec >= bio->bi_io_vec)
-			prefetchw(&bvec->bv_page->flags);
-
-		if (unlikely(!uptodate)) {
+		if (!err) {
+			SetPageUptodate(page);
+		} else {
 			ClearPageUptodate(page);
 			SetPageError(page);
-		} else {
-			SetPageUptodate(page);
 		}
 		unlock_page(page);
-	} while (bvec >= bio->bi_io_vec);
-
+	}
 	bio_put(bio);
 }
 
 static void f2fs_write_end_io(struct bio *bio, int err)
 {
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-	struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb);
+	struct f2fs_sb_info *sbi = F2FS_SB(bio->bi_io_vec->bv_page->mapping->host->i_sb);
+	struct bio_vec *bvec;
+	int i;
 
-	do {
+	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
 
-		if (--bvec >= bio->bi_io_vec)
-			prefetchw(&bvec->bv_page->flags);
-
-		if (unlikely(!uptodate)) {
+		if (unlikely(err)) {
 			SetPageError(page);
 			set_bit(AS_EIO, &page->mapping->flags);
 			set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
@@ -67,7 +60,7 @@
 		}
 		end_page_writeback(page);
 		dec_page_count(sbi, F2FS_WRITEBACK);
-	} while (bvec >= bio->bi_io_vec);
+	}
 
 	if (bio->bi_private)
 		complete(bio->bi_private);
@@ -91,7 +84,7 @@
 	bio = bio_alloc(GFP_NOIO, npages);
 
 	bio->bi_bdev = sbi->sb->s_bdev;
-	bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
+	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
 	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
 
 	return bio;
diff --git a/fs/file.c b/fs/file.c
index 771578b..db25c2b 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -34,7 +34,7 @@
 	 * vmalloc() if the allocation size will be considered "large" by the VM.
 	 */
 	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
-		void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
+		void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY);
 		if (data != NULL)
 			return data;
 	}
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index e1959ef..b5ebc2d 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -50,6 +50,8 @@
 	struct fscache_object *xobj;
 	struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
 
+	ASSERT(RB_EMPTY_NODE(&obj->objlist_link));
+
 	write_lock(&fscache_object_list_lock);
 
 	while (*p) {
@@ -75,6 +77,9 @@
  */
 void fscache_objlist_remove(struct fscache_object *obj)
 {
+	if (RB_EMPTY_NODE(&obj->objlist_link))
+		return;
+
 	write_lock(&fscache_object_list_lock);
 
 	BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 53d35c5..d3b4539 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -314,6 +314,9 @@
 	object->cache = cache;
 	object->cookie = cookie;
 	object->parent = NULL;
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+	RB_CLEAR_NODE(&object->objlist_link);
+#endif
 
 	object->oob_event_mask = 0;
 	for (t = object->oob_table; t->events; t++)
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 58f0640..7669379 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -273,7 +273,7 @@
 		nrvecs = max(nrvecs/2, 1U);
 	}
 
-	bio->bi_sector = blkno * (sb->s_blocksize >> 9);
+	bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
 	bio->bi_bdev = sb->s_bdev;
 	bio->bi_end_io = gfs2_end_log_write;
 	bio->bi_private = sdp;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 1e712b5..c6872d0 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -238,7 +238,7 @@
 	lock_page(page);
 
 	bio = bio_alloc(GFP_NOFS, 1);
-	bio->bi_sector = sector * (sb->s_blocksize >> 9);
+	bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
 	bio->bi_bdev = sb->s_bdev;
 	bio_add_page(bio, page, PAGE_SIZE, 0);
 
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 9ee6298..bdec665 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -529,7 +529,7 @@
 	.setxattr		= generic_setxattr,
 	.getxattr		= generic_getxattr,
 	.listxattr		= hfsplus_listxattr,
-	.removexattr		= hfsplus_removexattr,
+	.removexattr		= generic_removexattr,
 #ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
 	.get_acl		= hfsplus_get_posix_acl,
 	.set_acl		= hfsplus_set_posix_acl,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 4551cbd..fa929f3 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -331,7 +331,7 @@
 	.setxattr	= generic_setxattr,
 	.getxattr	= generic_getxattr,
 	.listxattr	= hfsplus_listxattr,
-	.removexattr	= hfsplus_removexattr,
+	.removexattr	= generic_removexattr,
 #ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
 	.get_acl	= hfsplus_get_posix_acl,
 	.set_acl	= hfsplus_set_posix_acl,
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index e9a97a0..3f99964 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -63,7 +63,7 @@
 	sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
 
 	bio = bio_alloc(GFP_NOIO, 1);
-	bio->bi_sector = sector;
+	bio->bi_iter.bi_sector = sector;
 	bio->bi_bdev = sb->s_bdev;
 
 	if (!(rw & WRITE) && data)
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 0b4a5c9..4e27edc 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -11,6 +11,8 @@
 #include "xattr.h"
 #include "acl.h"
 
+static int hfsplus_removexattr(struct inode *inode, const char *name);
+
 const struct xattr_handler *hfsplus_xattr_handlers[] = {
 	&hfsplus_xattr_osx_handler,
 	&hfsplus_xattr_user_handler,
@@ -274,14 +276,8 @@
 				HFSPLUS_IS_RSRC(inode))
 		return -EOPNOTSUPP;
 
-	if (strncmp(name, XATTR_MAC_OSX_PREFIX,
-				XATTR_MAC_OSX_PREFIX_LEN) == 0)
-		name += XATTR_MAC_OSX_PREFIX_LEN;
-
-	if (value == NULL) {
-		value = "";
-		size = 0;
-	}
+	if (value == NULL)
+		return hfsplus_removexattr(inode, name);
 
 	err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
 	if (err) {
@@ -399,16 +395,11 @@
 	return err;
 }
 
-static inline int is_osx_xattr(const char *xattr_name)
-{
-	return !is_known_namespace(xattr_name);
-}
-
 static int name_len(const char *xattr_name, int xattr_name_len)
 {
 	int len = xattr_name_len + 1;
 
-	if (is_osx_xattr(xattr_name))
+	if (!is_known_namespace(xattr_name))
 		len += XATTR_MAC_OSX_PREFIX_LEN;
 
 	return len;
@@ -419,7 +410,7 @@
 	int len = name_len;
 	int offset = 0;
 
-	if (is_osx_xattr(xattr_name)) {
+	if (!is_known_namespace(xattr_name)) {
 		strncpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN);
 		offset += XATTR_MAC_OSX_PREFIX_LEN;
 		len += XATTR_MAC_OSX_PREFIX_LEN;
@@ -497,18 +488,6 @@
 				HFSPLUS_IS_RSRC(inode))
 		return -EOPNOTSUPP;
 
-	if (strncmp(name, XATTR_MAC_OSX_PREFIX,
-				XATTR_MAC_OSX_PREFIX_LEN) == 0) {
-		/* skip "osx." prefix */
-		name += XATTR_MAC_OSX_PREFIX_LEN;
-		/*
-		 * Don't allow retrieving properly prefixed attributes
-		 * by prepending them with "osx."
-		 */
-		if (is_known_namespace(name))
-			return -EOPNOTSUPP;
-	}
-
 	if (!strcmp_xattr_finder_info(name))
 		return hfsplus_getxattr_finder_info(inode, value, size);
 
@@ -743,28 +722,18 @@
 	return res;
 }
 
-int hfsplus_removexattr(struct dentry *dentry, const char *name)
+static int hfsplus_removexattr(struct inode *inode, const char *name)
 {
 	int err = 0;
-	struct inode *inode = dentry->d_inode;
 	struct hfs_find_data cat_fd;
 	u16 flags;
 	u16 cat_entry_type;
 	int is_xattr_acl_deleted = 0;
 	int is_all_xattrs_deleted = 0;
 
-	if ((!S_ISREG(inode->i_mode) &&
-			!S_ISDIR(inode->i_mode)) ||
-				HFSPLUS_IS_RSRC(inode))
-		return -EOPNOTSUPP;
-
 	if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
 		return -EOPNOTSUPP;
 
-	if (strncmp(name, XATTR_MAC_OSX_PREFIX,
-				XATTR_MAC_OSX_PREFIX_LEN) == 0)
-		name += XATTR_MAC_OSX_PREFIX_LEN;
-
 	if (!strcmp_xattr_finder_info(name))
 		return -EOPNOTSUPP;
 
@@ -838,8 +807,12 @@
 	if (len > HFSPLUS_ATTR_MAX_STRLEN)
 		return -EOPNOTSUPP;
 
-	strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
-	strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
+	/*
+	 * Don't allow retrieving properly prefixed attributes
+	 * by prepending them with "osx."
+	 */
+	if (is_known_namespace(name))
+		return -EOPNOTSUPP;
 
 	return hfsplus_getxattr(dentry, xattr_name, buffer, size);
 }
@@ -857,12 +830,13 @@
 	if (len > HFSPLUS_ATTR_MAX_STRLEN)
 		return -EOPNOTSUPP;
 
+	/*
+	 * Don't allow setting properly prefixed attributes
+	 * by prepending them with "osx."
+	 */
 	if (is_known_namespace(name))
 		return -EOPNOTSUPP;
 
-	strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
-	strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
-
 	return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
 }
 
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index 9e21449..288530c 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -40,8 +40,6 @@
 
 ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
 
-int hfsplus_removexattr(struct dentry *dentry, const char *name);
-
 int hfsplus_init_security(struct inode *inode, struct inode *dir,
 				const struct qstr *qstr);
 
diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c
index cdb84a8..58b5106 100644
--- a/fs/hpfs/alloc.c
+++ b/fs/hpfs/alloc.c
@@ -8,6 +8,58 @@
 
 #include "hpfs_fn.h"
 
+static void hpfs_claim_alloc(struct super_block *s, secno sec)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (sbi->sb_n_free != (unsigned)-1) {
+		if (unlikely(!sbi->sb_n_free)) {
+			hpfs_error(s, "free count underflow, allocating sector %08x", sec);
+			sbi->sb_n_free = -1;
+			return;
+		}
+		sbi->sb_n_free--;
+	}
+}
+
+static void hpfs_claim_free(struct super_block *s, secno sec)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (sbi->sb_n_free != (unsigned)-1) {
+		if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) {
+			hpfs_error(s, "free count overflow, freeing sector %08x", sec);
+			sbi->sb_n_free = -1;
+			return;
+		}
+		sbi->sb_n_free++;
+	}
+}
+
+static void hpfs_claim_dirband_alloc(struct super_block *s, secno sec)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (sbi->sb_n_free_dnodes != (unsigned)-1) {
+		if (unlikely(!sbi->sb_n_free_dnodes)) {
+			hpfs_error(s, "dirband free count underflow, allocating sector %08x", sec);
+			sbi->sb_n_free_dnodes = -1;
+			return;
+		}
+		sbi->sb_n_free_dnodes--;
+	}
+}
+
+static void hpfs_claim_dirband_free(struct super_block *s, secno sec)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (sbi->sb_n_free_dnodes != (unsigned)-1) {
+		if (unlikely(sbi->sb_n_free_dnodes >= sbi->sb_dirband_size / 4)) {
+			hpfs_error(s, "dirband free count overflow, freeing sector %08x", sec);
+			sbi->sb_n_free_dnodes = -1;
+			return;
+		}
+		sbi->sb_n_free_dnodes++;
+	}
+}
+
 /*
  * Check if a sector is allocated in bitmap
  * This is really slow. Turned on only if chk==2
@@ -203,9 +255,15 @@
 	}
 	sec = 0;
 	ret:
+	if (sec) {
+		i = 0;
+		do
+			hpfs_claim_alloc(s, sec + i);
+		while (unlikely(++i < n));
+	}
 	if (sec && f_p) {
 		for (i = 0; i < forward; i++) {
-			if (!hpfs_alloc_if_possible(s, sec + i + 1)) {
+			if (!hpfs_alloc_if_possible(s, sec + n + i)) {
 				hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
 				sec = 0;
 				break;
@@ -228,6 +286,7 @@
 	nr >>= 2;
 	sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
 	if (!sec) return 0;
+	hpfs_claim_dirband_alloc(s, sec);
 	return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
 }
 
@@ -242,6 +301,7 @@
 		bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
 		hpfs_mark_4buffers_dirty(&qbh);
 		hpfs_brelse4(&qbh);
+		hpfs_claim_alloc(s, sec);
 		return 1;
 	}
 	hpfs_brelse4(&qbh);
@@ -275,6 +335,7 @@
 		return;
 	}
 	bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
+	hpfs_claim_free(s, sec);
 	if (!--n) {
 		hpfs_mark_4buffers_dirty(&qbh);
 		hpfs_brelse4(&qbh);
@@ -359,6 +420,7 @@
 		bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
 		hpfs_mark_4buffers_dirty(&qbh);
 		hpfs_brelse4(&qbh);
+		hpfs_claim_dirband_free(s, dno);
 	}
 }
 
@@ -366,7 +428,7 @@
 			 dnode_secno *dno, struct quad_buffer_head *qbh)
 {
 	struct dnode *d;
-	if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) {
+	if (hpfs_get_free_dnodes(s) > FREE_DNODES_ADD) {
 		if (!(*dno = alloc_in_dirband(s, near)))
 			if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
 	} else {
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index 4d0a1af..139ef16 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -86,7 +86,6 @@
 void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
 		   int ahead)
 {
-	struct buffer_head *bh;
 	char *data;
 
 	hpfs_lock_assert(s);
@@ -100,34 +99,32 @@
 
 	hpfs_prefetch_sectors(s, secno, 4 + ahead);
 
+	if (!(qbh->bh[0] = sb_bread(s, secno + 0))) goto bail0;
+	if (!(qbh->bh[1] = sb_bread(s, secno + 1))) goto bail1;
+	if (!(qbh->bh[2] = sb_bread(s, secno + 2))) goto bail2;
+	if (!(qbh->bh[3] = sb_bread(s, secno + 3))) goto bail3;
+
+	if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
+	    likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
+	    likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
+		return qbh->data = qbh->bh[0]->b_data;
+	}
+
 	qbh->data = data = kmalloc(2048, GFP_NOFS);
 	if (!data) {
 		printk("HPFS: hpfs_map_4sectors: out of memory\n");
-		goto bail;
+		goto bail4;
 	}
 
-	qbh->bh[0] = bh = sb_bread(s, secno);
-	if (!bh)
-		goto bail0;
-	memcpy(data, bh->b_data, 512);
-
-	qbh->bh[1] = bh = sb_bread(s, secno + 1);
-	if (!bh)
-		goto bail1;
-	memcpy(data + 512, bh->b_data, 512);
-
-	qbh->bh[2] = bh = sb_bread(s, secno + 2);
-	if (!bh)
-		goto bail2;
-	memcpy(data + 2 * 512, bh->b_data, 512);
-
-	qbh->bh[3] = bh = sb_bread(s, secno + 3);
-	if (!bh)
-		goto bail3;
-	memcpy(data + 3 * 512, bh->b_data, 512);
+	memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512);
+	memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512);
+	memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512);
+	memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512);
 
 	return data;
 
+ bail4:
+	brelse(qbh->bh[3]);
  bail3:
 	brelse(qbh->bh[2]);
  bail2:
@@ -135,9 +132,6 @@
  bail1:
 	brelse(qbh->bh[0]);
  bail0:
-	kfree(data);
-	printk("HPFS: hpfs_map_4sectors: read error\n");
- bail:
 	return NULL;
 }
 
@@ -155,44 +149,54 @@
 		return NULL;
 	}
 
-	/*return hpfs_map_4sectors(s, secno, qbh, 0);*/
+	if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0;
+	if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1;
+	if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2;
+	if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3;
+
+	if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
+	    likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
+	    likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
+		return qbh->data = qbh->bh[0]->b_data;
+	}
+
 	if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
 		printk("HPFS: hpfs_get_4sectors: out of memory\n");
-		return NULL;
+		goto bail4;
 	}
-	if (!(hpfs_get_sector(s, secno, &qbh->bh[0]))) goto bail0;
-	if (!(hpfs_get_sector(s, secno + 1, &qbh->bh[1]))) goto bail1;
-	if (!(hpfs_get_sector(s, secno + 2, &qbh->bh[2]))) goto bail2;
-	if (!(hpfs_get_sector(s, secno + 3, &qbh->bh[3]))) goto bail3;
-	memcpy(qbh->data, qbh->bh[0]->b_data, 512);
-	memcpy(qbh->data + 512, qbh->bh[1]->b_data, 512);
-	memcpy(qbh->data + 2*512, qbh->bh[2]->b_data, 512);
-	memcpy(qbh->data + 3*512, qbh->bh[3]->b_data, 512);
 	return qbh->data;
 
-	bail3:	brelse(qbh->bh[2]);
-	bail2:	brelse(qbh->bh[1]);
-	bail1:	brelse(qbh->bh[0]);
-	bail0:
+bail4:
+	brelse(qbh->bh[3]);
+bail3:
+	brelse(qbh->bh[2]);
+bail2:
+	brelse(qbh->bh[1]);
+bail1:
+	brelse(qbh->bh[0]);
+bail0:
 	return NULL;
 }
 	
 
 void hpfs_brelse4(struct quad_buffer_head *qbh)
 {
-	brelse(qbh->bh[3]);
-	brelse(qbh->bh[2]);
-	brelse(qbh->bh[1]);
+	if (unlikely(qbh->data != qbh->bh[0]->b_data))
+		kfree(qbh->data);
 	brelse(qbh->bh[0]);
-	kfree(qbh->data);
+	brelse(qbh->bh[1]);
+	brelse(qbh->bh[2]);
+	brelse(qbh->bh[3]);
 }	
 
 void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
 {
-	memcpy(qbh->bh[0]->b_data, qbh->data, 512);
-	memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512);
-	memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
-	memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
+	if (unlikely(qbh->data != qbh->bh[0]->b_data)) {
+		memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512);
+		memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512);
+		memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
+		memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
+	}
 	mark_buffer_dirty(qbh->bh[0]);
 	mark_buffer_dirty(qbh->bh[1]);
 	mark_buffer_dirty(qbh->bh[2]);
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 6797bf8..3ba49c0 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -312,7 +312,7 @@
 __printf(2, 3)
 void hpfs_error(struct super_block *, const char *, ...);
 int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
-unsigned hpfs_count_one_bitmap(struct super_block *, secno);
+unsigned hpfs_get_free_dnodes(struct super_block *);
 
 /*
  * local time (HPFS) to GMT (Unix)
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index b8d01ef..4534ff6 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -121,7 +121,7 @@
 	call_rcu(&hpfs_sb(s)->rcu, lazy_free_sbi);
 }
 
-unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
+static unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
 {
 	struct quad_buffer_head qbh;
 	unsigned long *bits;
@@ -129,7 +129,7 @@
 
 	bits = hpfs_map_4sectors(s, secno, &qbh, 0);
 	if (!bits)
-		return 0;
+		return (unsigned)-1;
 	count = bitmap_weight(bits, 2048 * BITS_PER_BYTE);
 	hpfs_brelse4(&qbh);
 	return count;
@@ -144,30 +144,45 @@
 		hpfs_prefetch_bitmap(s, n);
 	}
 	for (n = 0; n < n_bands; n++) {
+		unsigned c;
 		hpfs_prefetch_bitmap(s, n + COUNT_RD_AHEAD);
-		count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
+		c = hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
+		if (c != (unsigned)-1)
+			count += c;
 	}
 	return count;
 }
 
+unsigned hpfs_get_free_dnodes(struct super_block *s)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	if (sbi->sb_n_free_dnodes == (unsigned)-1) {
+		unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap);
+		if (c == (unsigned)-1)
+			return 0;
+		sbi->sb_n_free_dnodes = c;
+	}
+	return sbi->sb_n_free_dnodes;
+}
+
 static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct super_block *s = dentry->d_sb;
 	struct hpfs_sb_info *sbi = hpfs_sb(s);
 	u64 id = huge_encode_dev(s->s_bdev->bd_dev);
+
 	hpfs_lock(s);
 
-	/*if (sbi->sb_n_free == -1) {*/
+	if (sbi->sb_n_free == (unsigned)-1)
 		sbi->sb_n_free = count_bitmaps(s);
-		sbi->sb_n_free_dnodes = hpfs_count_one_bitmap(s, sbi->sb_dmap);
-	/*}*/
+
 	buf->f_type = s->s_magic;
 	buf->f_bsize = 512;
 	buf->f_blocks = sbi->sb_fs_size;
 	buf->f_bfree = sbi->sb_n_free;
 	buf->f_bavail = sbi->sb_n_free;
 	buf->f_files = sbi->sb_dirband_size / 4;
-	buf->f_ffree = sbi->sb_n_free_dnodes;
+	buf->f_ffree = hpfs_get_free_dnodes(s);
 	buf->f_fsid.val[0] = (u32)id;
 	buf->f_fsid.val[1] = (u32)(id >> 32);
 	buf->f_namelen = 254;
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 8360674..60bb365 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -514,11 +514,13 @@
 	 * similarly constrained call sites
 	 */
 	ret = start_this_handle(journal, handle, GFP_NOFS);
-	if (ret < 0)
+	if (ret < 0) {
 		jbd2_journal_free_reserved(handle);
+		return ret;
+	}
 	handle->h_type = type;
 	handle->h_line_no = line_no;
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL(jbd2_journal_start_reserved);
 
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index e973b85..5a8ea16 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -86,6 +86,8 @@
 		rc = posix_acl_equiv_mode(acl, &inode->i_mode);
 		if (rc < 0)
 			return rc;
+		inode->i_ctime = CURRENT_TIME;
+		mark_inode_dirty(inode);
 		if (rc == 0)
 			acl = NULL;
 		break;
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 360d27c..8d811e0 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1998,20 +1998,20 @@
 
 	bio = bio_alloc(GFP_NOFS, 1);
 
-	bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+	bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
 	bio->bi_bdev = log->bdev;
 	bio->bi_io_vec[0].bv_page = bp->l_page;
 	bio->bi_io_vec[0].bv_len = LOGPSIZE;
 	bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
 	bio->bi_vcnt = 1;
-	bio->bi_size = LOGPSIZE;
+	bio->bi_iter.bi_size = LOGPSIZE;
 
 	bio->bi_end_io = lbmIODone;
 	bio->bi_private = bp;
 	/*check if journaling to disk has been disabled*/
 	if (log->no_integrity) {
-		bio->bi_size = 0;
+		bio->bi_iter.bi_size = 0;
 		lbmIODone(bio, 0);
 	} else {
 		submit_bio(READ_SYNC, bio);
@@ -2144,21 +2144,21 @@
 	jfs_info("lbmStartIO\n");
 
 	bio = bio_alloc(GFP_NOFS, 1);
-	bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+	bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
 	bio->bi_bdev = log->bdev;
 	bio->bi_io_vec[0].bv_page = bp->l_page;
 	bio->bi_io_vec[0].bv_len = LOGPSIZE;
 	bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
 	bio->bi_vcnt = 1;
-	bio->bi_size = LOGPSIZE;
+	bio->bi_iter.bi_size = LOGPSIZE;
 
 	bio->bi_end_io = lbmIODone;
 	bio->bi_private = bp;
 
 	/* check if journaling to disk has been disabled */
 	if (log->no_integrity) {
-		bio->bi_size = 0;
+		bio->bi_iter.bi_size = 0;
 		lbmIODone(bio, 0);
 	} else {
 		submit_bio(WRITE_SYNC, bio);
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index d165cde..49ba7ff 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -416,7 +416,7 @@
 			 * count from hitting zero before we're through
 			 */
 			inc_io(page);
-			if (!bio->bi_size)
+			if (!bio->bi_iter.bi_size)
 				goto dump_bio;
 			submit_bio(WRITE, bio);
 			nr_underway++;
@@ -438,7 +438,7 @@
 
 		bio = bio_alloc(GFP_NOFS, 1);
 		bio->bi_bdev = inode->i_sb->s_bdev;
-		bio->bi_sector = pblock << (inode->i_blkbits - 9);
+		bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
 		bio->bi_end_io = metapage_write_end_io;
 		bio->bi_private = page;
 
@@ -452,7 +452,7 @@
 	if (bio) {
 		if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
 				goto add_failed;
-		if (!bio->bi_size)
+		if (!bio->bi_iter.bi_size)
 			goto dump_bio;
 
 		submit_bio(WRITE, bio);
@@ -517,7 +517,8 @@
 
 			bio = bio_alloc(GFP_NOFS, 1);
 			bio->bi_bdev = inode->i_sb->s_bdev;
-			bio->bi_sector = pblock << (inode->i_blkbits - 9);
+			bio->bi_iter.bi_sector =
+				pblock << (inode->i_blkbits - 9);
 			bio->bi_end_io = metapage_read_end_io;
 			bio->bi_private = page;
 			len = xlen << inode->i_blkbits;
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 5324e4e..46325d5 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -791,6 +791,19 @@
 			/* Completely new ea list */
 			xattr_size = sizeof (struct jfs_ea_list);
 
+		/*
+		 * The size of EA value is limitted by on-disk format up to
+		 *  __le16, there would be an overflow if the size is equal
+		 * to XATTR_SIZE_MAX (65536).  In order to avoid this issue,
+		 * we can pre-checkup the value size against USHRT_MAX, and
+		 * return -E2BIG in this case, which is consistent with the
+		 * VFS setxattr interface.
+		 */
+		if (value_len >= USHRT_MAX) {
+			rc = -E2BIG;
+			goto release;
+		}
+
 		ea = (struct jfs_ea *) ((char *) ealist + xattr_size);
 		ea->flag = 0;
 		ea->namelen = namelen;
@@ -805,7 +818,7 @@
 	/* DEBUG - If we did this right, these number match */
 	if (xattr_size != new_size) {
 		printk(KERN_ERR
-		       "jfs_xsetattr: xattr_size = %d, new_size = %d\n",
+		       "__jfs_setxattr: xattr_size = %d, new_size = %d\n",
 		       xattr_size, new_size);
 
 		rc = -EINVAL;
@@ -841,9 +854,6 @@
 	int rc;
 	tid_t tid;
 
-	if ((rc = can_set_xattr(inode, name, value, value_len)))
-		return rc;
-
 	/*
 	 * If this is a request for a synthetic attribute in the system.*
 	 * namespace use the generic infrastructure to resolve a handler
@@ -852,6 +862,9 @@
 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
 		return generic_setxattr(dentry, name, value, value_len, flags);
 
+	if ((rc = can_set_xattr(inode, name, value, value_len)))
+		return rc;
+
 	if (value == NULL) {	/* empty EA, do not remove */
 		value = "";
 		value_len = 0;
@@ -1021,9 +1034,6 @@
 	int rc;
 	tid_t tid;
 
-	if ((rc = can_set_xattr(inode, name, NULL, 0)))
-		return rc;
-
 	/*
 	 * If this is a request for a synthetic attribute in the system.*
 	 * namespace use the generic infrastructure to resolve a handler
@@ -1032,6 +1042,9 @@
 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
 		return generic_removexattr(dentry, name);
 
+	if ((rc = can_set_xattr(inode, name, NULL, 0)))
+		return rc;
+
 	tid = txBegin(inode->i_sb, 0);
 	mutex_lock(&ji->commit_mutex);
 	rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
@@ -1048,7 +1061,7 @@
  * attributes are handled directly.
  */
 const struct xattr_handler *jfs_xattr_handlers[] = {
-#ifdef JFS_POSIX_ACL
+#ifdef CONFIG_JFS_POSIX_ACL
 	&posix_acl_access_xattr_handler,
 	&posix_acl_default_xattr_handler,
 #endif
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 5104cf5..bd6e18b 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -187,19 +187,23 @@
 
 	kn->u.completion = (void *)&wait;
 
-	rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
+	if (kn->flags & KERNFS_LOCKDEP)
+		rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
 	/* atomic_add_return() is a mb(), put_active() will always see
 	 * the updated kn->u.completion.
 	 */
 	v = atomic_add_return(KN_DEACTIVATED_BIAS, &kn->active);
 
 	if (v != KN_DEACTIVATED_BIAS) {
-		lock_contended(&kn->dep_map, _RET_IP_);
+		if (kn->flags & KERNFS_LOCKDEP)
+			lock_contended(&kn->dep_map, _RET_IP_);
 		wait_for_completion(&wait);
 	}
 
-	lock_acquired(&kn->dep_map, _RET_IP_);
-	rwsem_release(&kn->dep_map, 1, _RET_IP_);
+	if (kn->flags & KERNFS_LOCKDEP) {
+		lock_acquired(&kn->dep_map, _RET_IP_);
+		rwsem_release(&kn->dep_map, 1, _RET_IP_);
+	}
 }
 
 /**
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index e066a39..ab798a88 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -779,6 +779,7 @@
 	struct nlm_file		*file = block->b_file;
 	struct nlm_lock		*lock = &block->b_call->a_args.lock;
 	int			error;
+	loff_t			fl_start, fl_end;
 
 	dprintk("lockd: grant blocked lock %p\n", block);
 
@@ -796,9 +797,16 @@
 	}
 
 	/* Try the lock operation again */
+	/* vfs_lock_file() can mangle fl_start and fl_end, but we need
+	 * them unchanged for the GRANT_MSG
+	 */
 	lock->fl.fl_flags |= FL_SLEEP;
+	fl_start = lock->fl.fl_start;
+	fl_end = lock->fl.fl_end;
 	error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
 	lock->fl.fl_flags &= ~FL_SLEEP;
+	lock->fl.fl_start = fl_start;
+	lock->fl.fl_end = fl_end;
 
 	switch (error) {
 	case 0:
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 0f95f0d..76279e1 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -26,9 +26,9 @@
 	bio_vec.bv_len = PAGE_SIZE;
 	bio_vec.bv_offset = 0;
 	bio.bi_vcnt = 1;
-	bio.bi_size = PAGE_SIZE;
 	bio.bi_bdev = bdev;
-	bio.bi_sector = page->index * (PAGE_SIZE >> 9);
+	bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
+	bio.bi_iter.bi_size = PAGE_SIZE;
 
 	return submit_bio_wait(rw, &bio);
 }
@@ -56,22 +56,18 @@
 static void writeseg_end_io(struct bio *bio, int err)
 {
 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+	struct bio_vec *bvec;
+	int i;
 	struct super_block *sb = bio->bi_private;
 	struct logfs_super *super = logfs_super(sb);
-	struct page *page;
 
 	BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
 	BUG_ON(err);
-	BUG_ON(bio->bi_vcnt == 0);
-	do {
-		page = bvec->bv_page;
-		if (--bvec >= bio->bi_io_vec)
-			prefetchw(&bvec->bv_page->flags);
 
-		end_page_writeback(page);
-		page_cache_release(page);
-	} while (bvec >= bio->bi_io_vec);
+	bio_for_each_segment_all(bvec, bio, i) {
+		end_page_writeback(bvec->bv_page);
+		page_cache_release(bvec->bv_page);
+	}
 	bio_put(bio);
 	if (atomic_dec_and_test(&super->s_pending_writes))
 		wake_up(&wq);
@@ -96,9 +92,9 @@
 		if (i >= max_pages) {
 			/* Block layer cannot split bios :( */
 			bio->bi_vcnt = i;
-			bio->bi_size = i * PAGE_SIZE;
+			bio->bi_iter.bi_size = i * PAGE_SIZE;
 			bio->bi_bdev = super->s_bdev;
-			bio->bi_sector = ofs >> 9;
+			bio->bi_iter.bi_sector = ofs >> 9;
 			bio->bi_private = sb;
 			bio->bi_end_io = writeseg_end_io;
 			atomic_inc(&super->s_pending_writes);
@@ -123,9 +119,9 @@
 		unlock_page(page);
 	}
 	bio->bi_vcnt = nr_pages;
-	bio->bi_size = nr_pages * PAGE_SIZE;
+	bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
 	bio->bi_bdev = super->s_bdev;
-	bio->bi_sector = ofs >> 9;
+	bio->bi_iter.bi_sector = ofs >> 9;
 	bio->bi_private = sb;
 	bio->bi_end_io = writeseg_end_io;
 	atomic_inc(&super->s_pending_writes);
@@ -188,9 +184,9 @@
 		if (i >= max_pages) {
 			/* Block layer cannot split bios :( */
 			bio->bi_vcnt = i;
-			bio->bi_size = i * PAGE_SIZE;
+			bio->bi_iter.bi_size = i * PAGE_SIZE;
 			bio->bi_bdev = super->s_bdev;
-			bio->bi_sector = ofs >> 9;
+			bio->bi_iter.bi_sector = ofs >> 9;
 			bio->bi_private = sb;
 			bio->bi_end_io = erase_end_io;
 			atomic_inc(&super->s_pending_writes);
@@ -209,9 +205,9 @@
 		bio->bi_io_vec[i].bv_offset = 0;
 	}
 	bio->bi_vcnt = nr_pages;
-	bio->bi_size = nr_pages * PAGE_SIZE;
+	bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
 	bio->bi_bdev = super->s_bdev;
-	bio->bi_sector = ofs >> 9;
+	bio->bi_iter.bi_sector = ofs >> 9;
 	bio->bi_private = sb;
 	bio->bi_end_io = erase_end_io;
 	atomic_inc(&super->s_pending_writes);
diff --git a/fs/mpage.c b/fs/mpage.c
index 0face1c..4979ffa 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -43,16 +43,14 @@
  */
 static void mpage_end_io(struct bio *bio, int err)
 {
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+	struct bio_vec *bv;
+	int i;
 
-	do {
-		struct page *page = bvec->bv_page;
+	bio_for_each_segment_all(bv, bio, i) {
+		struct page *page = bv->bv_page;
 
-		if (--bvec >= bio->bi_io_vec)
-			prefetchw(&bvec->bv_page->flags);
 		if (bio_data_dir(bio) == READ) {
-			if (uptodate) {
+			if (!err) {
 				SetPageUptodate(page);
 			} else {
 				ClearPageUptodate(page);
@@ -60,14 +58,15 @@
 			}
 			unlock_page(page);
 		} else { /* bio_data_dir(bio) == WRITE */
-			if (!uptodate) {
+			if (err) {
 				SetPageError(page);
 				if (page->mapping)
 					set_bit(AS_EIO, &page->mapping->flags);
 			}
 			end_page_writeback(page);
 		}
-	} while (bvec >= bio->bi_io_vec);
+	}
+
 	bio_put(bio);
 }
 
@@ -94,7 +93,7 @@
 
 	if (bio) {
 		bio->bi_bdev = bdev;
-		bio->bi_sector = first_sector;
+		bio->bi_iter.bi_sector = first_sector;
 	}
 	return bio;
 }
diff --git a/fs/namei.c b/fs/namei.c
index bcb838e..385f781 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -196,6 +196,7 @@
 		goto error;
 
 	result->uptr = filename;
+	result->aname = NULL;
 	audit_getname(result);
 	return result;
 
@@ -209,7 +210,35 @@
 {
 	return getname_flags(filename, 0, NULL);
 }
-EXPORT_SYMBOL(getname);
+
+/*
+ * The "getname_kernel()" interface doesn't do pathnames longer
+ * than EMBEDDED_NAME_MAX. Deal with it - you're a kernel user.
+ */
+struct filename *
+getname_kernel(const char * filename)
+{
+	struct filename *result;
+	char *kname;
+	int len;
+
+	len = strlen(filename);
+	if (len >= EMBEDDED_NAME_MAX)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	result = __getname();
+	if (unlikely(!result))
+		return ERR_PTR(-ENOMEM);
+
+	kname = (char *)result + sizeof(*result);
+	result->name = kname;
+	result->uptr = NULL;
+	result->aname = NULL;
+	result->separate = false;
+
+	strlcpy(kname, filename, EMBEDDED_NAME_MAX);
+	return result;
+}
 
 #ifdef CONFIG_AUDITSYSCALL
 void putname(struct filename *name)
@@ -3927,10 +3956,13 @@
 	done_path_create(&new_path, new_dentry);
 	if (delegated_inode) {
 		error = break_deleg_wait(&delegated_inode);
-		if (!error)
+		if (!error) {
+			path_put(&old_path);
 			goto retry;
+		}
 	}
 	if (retry_estale(error, how)) {
+		path_put(&old_path);
 		how |= LOOKUP_REVAL;
 		goto retry;
 	}
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index e242bbf..56ff823 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -134,8 +134,8 @@
 	if (bio) {
 		get_parallel(bio->bi_private);
 		dprintk("%s submitting %s bio %u@%llu\n", __func__,
-			rw == READ ? "read" : "write",
-			bio->bi_size, (unsigned long long)bio->bi_sector);
+			rw == READ ? "read" : "write", bio->bi_iter.bi_size,
+			(unsigned long long)bio->bi_iter.bi_sector);
 		submit_bio(rw, bio);
 	}
 	return NULL;
@@ -156,7 +156,8 @@
 	}
 
 	if (bio) {
-		bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+		bio->bi_iter.bi_sector = isect - be->be_f_offset +
+			be->be_v_offset;
 		bio->bi_bdev = be->be_mdev;
 		bio->bi_end_io = end_io;
 		bio->bi_private = par;
@@ -201,18 +202,14 @@
 static void bl_end_io_read(struct bio *bio, int err)
 {
 	struct parallel_io *par = bio->bi_private;
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+	struct bio_vec *bvec;
+	int i;
 
-	do {
-		struct page *page = bvec->bv_page;
+	if (!err)
+		bio_for_each_segment_all(bvec, bio, i)
+			SetPageUptodate(bvec->bv_page);
 
-		if (--bvec >= bio->bi_io_vec)
-			prefetchw(&bvec->bv_page->flags);
-		if (uptodate)
-			SetPageUptodate(page);
-	} while (bvec >= bio->bi_io_vec);
-	if (!uptodate) {
+	if (err) {
 		struct nfs_read_data *rdata = par->data;
 		struct nfs_pgio_header *header = rdata->header;
 
@@ -383,20 +380,16 @@
 static void bl_end_io_write_zero(struct bio *bio, int err)
 {
 	struct parallel_io *par = bio->bi_private;
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+	struct bio_vec *bvec;
+	int i;
 
-	do {
-		struct page *page = bvec->bv_page;
-
-		if (--bvec >= bio->bi_io_vec)
-			prefetchw(&bvec->bv_page->flags);
+	bio_for_each_segment_all(bvec, bio, i) {
 		/* This is the zeroing page we added */
-		end_page_writeback(page);
-		page_cache_release(page);
-	} while (bvec >= bio->bi_io_vec);
+		end_page_writeback(bvec->bv_page);
+		page_cache_release(bvec->bv_page);
+	}
 
-	if (unlikely(!uptodate)) {
+	if (unlikely(err)) {
 		struct nfs_write_data *data = par->data;
 		struct nfs_pgio_header *header = data->header;
 
@@ -519,7 +512,7 @@
 	isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
 		(offset / SECTOR_SIZE);
 
-	bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+	bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
 	bio->bi_bdev = be->be_mdev;
 	bio->bi_end_io = bl_read_single_end_io;
 
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index b266f73..4a48fe4 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -274,6 +274,15 @@
 	return -EBADCOOKIE;
 }
 
+static bool
+nfs_readdir_inode_mapping_valid(struct nfs_inode *nfsi)
+{
+	if (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))
+		return false;
+	smp_rmb();
+	return !test_bit(NFS_INO_INVALIDATING, &nfsi->flags);
+}
+
 static
 int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
 {
@@ -287,8 +296,8 @@
 			struct nfs_open_dir_context *ctx = desc->file->private_data;
 
 			new_pos = desc->current_index + i;
-			if (ctx->attr_gencount != nfsi->attr_gencount
-			    || (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))) {
+			if (ctx->attr_gencount != nfsi->attr_gencount ||
+			    !nfs_readdir_inode_mapping_valid(nfsi)) {
 				ctx->duped = 0;
 				ctx->attr_gencount = nfsi->attr_gencount;
 			} else if (new_pos < desc->ctx->pos) {
@@ -1837,6 +1846,11 @@
 							GFP_KERNEL)) {
 		SetPageUptodate(page);
 		unlock_page(page);
+		/*
+		 * add_to_page_cache_lru() grabs an extra page refcount.
+		 * Drop it here to avoid leaking this page later.
+		 */
+		page_cache_release(page);
 	} else
 		__free_page(page);
 
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index ea00b34..360114a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -164,17 +164,16 @@
 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
 		nfs_fscache_invalidate(inode);
 		nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-					| NFS_INO_INVALID_LABEL
 					| NFS_INO_INVALID_DATA
 					| NFS_INO_INVALID_ACCESS
 					| NFS_INO_INVALID_ACL
 					| NFS_INO_REVAL_PAGECACHE;
 	} else
 		nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-					| NFS_INO_INVALID_LABEL
 					| NFS_INO_INVALID_ACCESS
 					| NFS_INO_INVALID_ACL
 					| NFS_INO_REVAL_PAGECACHE;
+	nfs_zap_label_cache_locked(nfsi);
 }
 
 void nfs_zap_caches(struct inode *inode)
@@ -266,6 +265,13 @@
 }
 
 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
+static void nfs_clear_label_invalid(struct inode *inode)
+{
+	spin_lock(&inode->i_lock);
+	NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_LABEL;
+	spin_unlock(&inode->i_lock);
+}
+
 void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
 					struct nfs4_label *label)
 {
@@ -283,6 +289,7 @@
 					__func__,
 					(char *)label->label,
 					label->len, error);
+		nfs_clear_label_invalid(inode);
 	}
 }
 
@@ -977,11 +984,11 @@
 		if (ret < 0)
 			return ret;
 	}
-	spin_lock(&inode->i_lock);
-	nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
-	if (S_ISDIR(inode->i_mode))
+	if (S_ISDIR(inode->i_mode)) {
+		spin_lock(&inode->i_lock);
 		memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
-	spin_unlock(&inode->i_lock);
+		spin_unlock(&inode->i_lock);
+	}
 	nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
 	nfs_fscache_wait_on_invalidate(inode);
 
@@ -1008,6 +1015,7 @@
 int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
+	unsigned long *bitlock = &nfsi->flags;
 	int ret = 0;
 
 	/* swapfiles are not supposed to be shared. */
@@ -1019,12 +1027,46 @@
 		if (ret < 0)
 			goto out;
 	}
-	if (nfsi->cache_validity & NFS_INO_INVALID_DATA) {
-		trace_nfs_invalidate_mapping_enter(inode);
-		ret = nfs_invalidate_mapping(inode, mapping);
-		trace_nfs_invalidate_mapping_exit(inode, ret);
+
+	/*
+	 * We must clear NFS_INO_INVALID_DATA first to ensure that
+	 * invalidations that come in while we're shooting down the mappings
+	 * are respected. But, that leaves a race window where one revalidator
+	 * can clear the flag, and then another checks it before the mapping
+	 * gets invalidated. Fix that by serializing access to this part of
+	 * the function.
+	 *
+	 * At the same time, we need to allow other tasks to see whether we
+	 * might be in the middle of invalidating the pages, so we only set
+	 * the bit lock here if it looks like we're going to be doing that.
+	 */
+	for (;;) {
+		ret = wait_on_bit(bitlock, NFS_INO_INVALIDATING,
+				  nfs_wait_bit_killable, TASK_KILLABLE);
+		if (ret)
+			goto out;
+		spin_lock(&inode->i_lock);
+		if (test_bit(NFS_INO_INVALIDATING, bitlock)) {
+			spin_unlock(&inode->i_lock);
+			continue;
+		}
+		if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
+			break;
+		spin_unlock(&inode->i_lock);
+		goto out;
 	}
 
+	set_bit(NFS_INO_INVALIDATING, bitlock);
+	smp_wmb();
+	nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
+	spin_unlock(&inode->i_lock);
+	trace_nfs_invalidate_mapping_enter(inode);
+	ret = nfs_invalidate_mapping(inode, mapping);
+	trace_nfs_invalidate_mapping_exit(inode, ret);
+
+	clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
+	smp_mb__after_clear_bit();
+	wake_up_bit(bitlock, NFS_INO_INVALIDATING);
 out:
 	return ret;
 }
@@ -1613,7 +1655,7 @@
 		inode->i_blocks = fattr->du.nfs2.blocks;
 
 	/* Update attrtimeo value if we're out of the unstable period */
-	if (invalid & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) {
+	if (invalid & NFS_INO_INVALID_ATTR) {
 		nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
 		nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
 		nfsi->attrtimeo_timestamp = now;
@@ -1626,7 +1668,6 @@
 		}
 	}
 	invalid &= ~NFS_INO_INVALID_ATTR;
-	invalid &= ~NFS_INO_INVALID_LABEL;
 	/* Don't invalidate the data if we were to blame */
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
 				|| S_ISLNK(inode->i_mode)))
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 8b5cc04..b46cf5a 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -176,7 +176,8 @@
 extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
 						      struct nfs_fh *);
 extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
-					struct sockaddr *sap, size_t salen);
+					struct sockaddr *sap, size_t salen,
+					struct net *net);
 extern void nfs_free_server(struct nfs_server *server);
 extern struct nfs_server *nfs_clone_server(struct nfs_server *,
 					   struct nfs_fh *,
@@ -279,9 +280,18 @@
 	}
 	return;
 }
+
+static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
+{
+	if (nfs_server_capable(&nfsi->vfs_inode, NFS_CAP_SECURITY_LABEL))
+		nfsi->cache_validity |= NFS_INO_INVALID_LABEL;
+}
 #else
 static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
 static inline void nfs4_label_free(void *label) {}
+static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
+{
+}
 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
 
 /* proc.c */
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 9a5ca03..871d6ed 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -80,7 +80,7 @@
 	}
 
 	if (res.acl_access != NULL) {
-		if (posix_acl_equiv_mode(res.acl_access, NULL) ||
+		if ((posix_acl_equiv_mode(res.acl_access, NULL) == 0) ||
 		    res.acl_access->a_count == 0) {
 			posix_acl_release(res.acl_access);
 			res.acl_access = NULL;
@@ -113,7 +113,7 @@
 	return ERR_PTR(status);
 }
 
-int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
 		struct posix_acl *dfacl)
 {
 	struct nfs_server *server = NFS_SERVER(inode);
@@ -198,6 +198,15 @@
 	return status;
 }
 
+int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+		struct posix_acl *dfacl)
+{
+	int ret;
+	ret = __nfs3_proc_setacls(inode, acl, dfacl);
+	return (ret == -EOPNOTSUPP) ? 0 : ret;
+
+}
+
 int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
 	struct posix_acl *alloc = NULL, *dfacl = NULL;
@@ -225,7 +234,7 @@
 		if (IS_ERR(alloc))
 			goto fail;
 	}
-	status = nfs3_proc_setacls(inode, acl, dfacl);
+	status = __nfs3_proc_setacls(inode, acl, dfacl);
 	posix_acl_release(alloc);
 	return status;
 
@@ -233,25 +242,6 @@
 	return PTR_ERR(alloc);
 }
 
-int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
-		umode_t mode)
-{
-	struct posix_acl *default_acl, *acl;
-	int error;
-
-	error = posix_acl_create(dir, &mode, &default_acl, &acl);
-	if (error)
-		return (error == -EOPNOTSUPP) ? 0 : error;
-
-	error = nfs3_proc_setacls(inode, acl, default_acl);
-
-	if (acl)
-		posix_acl_release(acl);
-	if (default_acl)
-		posix_acl_release(default_acl);
-	return error;
-}
-
 const struct xattr_handler *nfs3_xattr_handlers[] = {
 	&posix_acl_access_xattr_handler,
 	&posix_acl_default_xattr_handler,
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index d2255d7..a462ef0 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -18,6 +18,7 @@
 #include <linux/lockd/bind.h>
 #include <linux/nfs_mount.h>
 #include <linux/freezer.h>
+#include <linux/xattr.h>
 
 #include "iostat.h"
 #include "internal.h"
@@ -924,11 +925,11 @@
 	.permission	= nfs_permission,
 	.getattr	= nfs_getattr,
 	.setattr	= nfs_setattr,
+#ifdef CONFIG_NFS_V3_ACL
 	.listxattr	= generic_listxattr,
 	.getxattr	= generic_getxattr,
 	.setxattr	= generic_setxattr,
 	.removexattr	= generic_removexattr,
-#ifdef CONFIG_NFS_V3_ACL
 	.get_acl	= nfs3_get_acl,
 	.set_acl	= nfs3_set_acl,
 #endif
@@ -938,11 +939,11 @@
 	.permission	= nfs_permission,
 	.getattr	= nfs_getattr,
 	.setattr	= nfs_setattr,
+#ifdef CONFIG_NFS_V3_ACL
 	.listxattr	= generic_listxattr,
 	.getxattr	= generic_getxattr,
 	.setxattr	= generic_setxattr,
 	.removexattr	= generic_removexattr,
-#ifdef CONFIG_NFS_V3_ACL
 	.get_acl	= nfs3_get_acl,
 	.set_acl	= nfs3_set_acl,
 #endif
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 5609edc..a5b27c2 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -270,6 +270,7 @@
 extern int nfs41_setup_sequence(struct nfs4_session *session,
 		struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
 		struct rpc_task *task);
+extern int nfs41_sequence_done(struct rpc_task *, struct nfs4_sequence_res *);
 extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *);
 extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *);
 extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 73d4ecd..0e46d3d 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -170,7 +170,7 @@
 void nfs40_shutdown_client(struct nfs_client *clp)
 {
 	if (clp->cl_slot_tbl) {
-		nfs4_release_slot_table(clp->cl_slot_tbl);
+		nfs4_shutdown_slot_table(clp->cl_slot_tbl);
 		kfree(clp->cl_slot_tbl);
 	}
 }
@@ -372,10 +372,7 @@
 	__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
 	__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
 
-	error = -EINVAL;
-	if (gssd_running(clp->cl_net))
-		error = nfs_create_rpc_client(clp, timeparms,
-					      RPC_AUTH_GSS_KRB5I);
+	error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I);
 	if (error == -EINVAL)
 		error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
 	if (error < 0)
@@ -1138,6 +1135,7 @@
  * @hostname: new end-point's hostname
  * @sap: new end-point's socket address
  * @salen: size of "sap"
+ * @net: net namespace
  *
  * The nfs_server must be quiescent before this function is invoked.
  * Either its session is drained (NFSv4.1+), or its transport is
@@ -1146,13 +1144,13 @@
  * Returns zero on success, or a negative errno value.
  */
 int nfs4_update_server(struct nfs_server *server, const char *hostname,
-		       struct sockaddr *sap, size_t salen)
+		       struct sockaddr *sap, size_t salen, struct net *net)
 {
 	struct nfs_client *clp = server->nfs_client;
 	struct rpc_clnt *clnt = server->client;
 	struct xprt_create xargs = {
 		.ident		= clp->cl_proto,
-		.net		= &init_net,
+		.net		= net,
 		.dstaddr	= sap,
 		.addrlen	= salen,
 		.servername	= hostname,
@@ -1192,7 +1190,7 @@
 	error = nfs4_set_client(server, hostname, sap, salen, buf,
 				clp->cl_rpcclient->cl_auth->au_flavor,
 				clp->cl_proto, clnt->cl_timeout,
-				clp->cl_minorversion, clp->cl_net);
+				clp->cl_minorversion, net);
 	nfs_put_client(clp);
 	if (error != 0) {
 		nfs_server_insert_lists(server);
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 03fd8be..12c8132 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -335,8 +335,10 @@
 	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
 
 	if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags) &&
-	    task->tk_status == 0)
+	    task->tk_status == 0) {
+		nfs41_sequence_done(task, &rdata->res.seq_res);
 		return;
+	}
 
 	/* Note this may cause RPC to be resent */
 	rdata->header->mds_ops->rpc_call_done(task, data);
@@ -442,8 +444,10 @@
 	struct nfs_write_data *wdata = data;
 
 	if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags) &&
-	    task->tk_status == 0)
+	    task->tk_status == 0) {
+		nfs41_sequence_done(task, &wdata->res.seq_res);
 		return;
+	}
 
 	/* Note this may cause RPC to be resent */
 	wdata->header->mds_ops->rpc_call_done(task, data);
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 4e7f05d..3d5dbf8 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -121,9 +121,8 @@
 }
 
 static size_t nfs_parse_server_name(char *string, size_t len,
-		struct sockaddr *sa, size_t salen, struct nfs_server *server)
+		struct sockaddr *sa, size_t salen, struct net *net)
 {
-	struct net *net = rpc_net_ns(server->client);
 	ssize_t ret;
 
 	ret = rpc_pton(net, string, len, sa, salen);
@@ -223,6 +222,7 @@
 				     const struct nfs4_fs_location *location)
 {
 	const size_t addr_bufsize = sizeof(struct sockaddr_storage);
+	struct net *net = rpc_net_ns(NFS_SB(mountdata->sb)->client);
 	struct vfsmount *mnt = ERR_PTR(-ENOENT);
 	char *mnt_path;
 	unsigned int maxbuflen;
@@ -248,8 +248,7 @@
 			continue;
 
 		mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
-				mountdata->addr, addr_bufsize,
-				NFS_SB(mountdata->sb));
+				mountdata->addr, addr_bufsize, net);
 		if (mountdata->addrlen == 0)
 			continue;
 
@@ -419,6 +418,7 @@
 		const struct nfs4_fs_location *location)
 {
 	const size_t addr_bufsize = sizeof(struct sockaddr_storage);
+	struct net *net = rpc_net_ns(server->client);
 	struct sockaddr *sap;
 	unsigned int s;
 	size_t salen;
@@ -440,7 +440,7 @@
 			continue;
 
 		salen = nfs_parse_server_name(buf->data, buf->len,
-						sap, addr_bufsize, server);
+						sap, addr_bufsize, net);
 		if (salen == 0)
 			continue;
 		rpc_set_port(sap, NFS_PORT);
@@ -450,7 +450,7 @@
 		if (hostname == NULL)
 			break;
 
-		error = nfs4_update_server(server, hostname, sap, salen);
+		error = nfs4_update_server(server, hostname, sap, salen, net);
 		kfree(hostname);
 		if (error == 0)
 			break;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a196532..2da6a69 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -539,7 +539,7 @@
 	struct nfs4_slot *slot = res->sr_slot;
 	struct nfs4_slot_table *tbl;
 
-	if (!RPC_WAS_SENT(task))
+	if (slot == NULL)
 		goto out;
 
 	tbl = slot->table;
@@ -559,15 +559,10 @@
 {
 	struct nfs4_session *session;
 	struct nfs4_slot_table *tbl;
+	struct nfs4_slot *slot = res->sr_slot;
 	bool send_new_highest_used_slotid = false;
 
-	if (!res->sr_slot) {
-		/* just wake up the next guy waiting since
-		 * we may have not consumed a slot after all */
-		dprintk("%s: No slot\n", __func__);
-		return;
-	}
-	tbl = res->sr_slot->table;
+	tbl = slot->table;
 	session = tbl->session;
 
 	spin_lock(&tbl->slot_tbl_lock);
@@ -577,11 +572,11 @@
 	if (tbl->highest_used_slotid > tbl->target_highest_slotid)
 		send_new_highest_used_slotid = true;
 
-	if (nfs41_wake_and_assign_slot(tbl, res->sr_slot)) {
+	if (nfs41_wake_and_assign_slot(tbl, slot)) {
 		send_new_highest_used_slotid = false;
 		goto out_unlock;
 	}
-	nfs4_free_slot(tbl, res->sr_slot);
+	nfs4_free_slot(tbl, slot);
 
 	if (tbl->highest_used_slotid != NFS4_NO_SLOT)
 		send_new_highest_used_slotid = false;
@@ -592,19 +587,20 @@
 		nfs41_server_notify_highest_slotid_update(session->clp);
 }
 
-static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
 {
 	struct nfs4_session *session;
-	struct nfs4_slot *slot;
+	struct nfs4_slot *slot = res->sr_slot;
 	struct nfs_client *clp;
 	bool interrupted = false;
 	int ret = 1;
 
+	if (slot == NULL)
+		goto out_noaction;
 	/* don't increment the sequence number if the task wasn't sent */
 	if (!RPC_WAS_SENT(task))
 		goto out;
 
-	slot = res->sr_slot;
 	session = slot->table->session;
 
 	if (slot->interrupted) {
@@ -679,6 +675,7 @@
 	/* The session may be reset by one of the error handlers. */
 	dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
 	nfs41_sequence_free_slot(res);
+out_noaction:
 	return ret;
 retry_nowait:
 	if (rpc_restart_call_prepare(task)) {
@@ -692,6 +689,7 @@
 	rpc_delay(task, NFS4_POLL_RETRY_MAX);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(nfs41_sequence_done);
 
 static int nfs4_sequence_done(struct rpc_task *task,
 			       struct nfs4_sequence_res *res)
@@ -1622,15 +1620,15 @@
 {
 	struct nfs4_opendata *data = calldata;
 
-	nfs40_setup_sequence(data->o_arg.server, &data->o_arg.seq_args,
-				&data->o_res.seq_res, task);
+	nfs40_setup_sequence(data->o_arg.server, &data->c_arg.seq_args,
+				&data->c_res.seq_res, task);
 }
 
 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
 {
 	struct nfs4_opendata *data = calldata;
 
-	nfs40_sequence_done(task, &data->o_res.seq_res);
+	nfs40_sequence_done(task, &data->c_res.seq_res);
 
 	data->rpc_status = task->tk_status;
 	if (data->rpc_status == 0) {
@@ -1688,7 +1686,7 @@
 	};
 	int status;
 
-	nfs4_init_sequence(&data->o_arg.seq_args, &data->o_res.seq_res, 1);
+	nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
 	kref_get(&data->kref);
 	data->rpc_done = 0;
 	data->rpc_status = 0;
@@ -2744,7 +2742,8 @@
 				NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
 				NFS_CAP_CTIME|NFS_CAP_MTIME|
 				NFS_CAP_SECURITY_LABEL);
-		if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
+		if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
+				res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
 			server->caps |= NFS_CAP_ACLS;
 		if (res.has_links != 0)
 			server->caps |= NFS_CAP_HARDLINKS;
@@ -4321,9 +4320,7 @@
 
 static inline int nfs4_server_supports_acls(struct nfs_server *server)
 {
-	return (server->caps & NFS_CAP_ACLS)
-		&& (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
-		&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
+	return server->caps & NFS_CAP_ACLS;
 }
 
 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index cf883c7..e799dc3 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -231,14 +231,23 @@
 	return ret;
 }
 
+/*
+ * nfs4_release_slot_table - release all slot table entries
+ */
+static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
+{
+	nfs4_shrink_slot_table(tbl, 0);
+}
+
 /**
- * nfs4_release_slot_table - release resources attached to a slot table
+ * nfs4_shutdown_slot_table - release resources attached to a slot table
  * @tbl: slot table to shut down
  *
  */
-void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
+void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
 {
-	nfs4_shrink_slot_table(tbl, 0);
+	nfs4_release_slot_table(tbl);
+	rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
 }
 
 /**
@@ -422,7 +431,7 @@
 	spin_unlock(&tbl->slot_tbl_lock);
 }
 
-static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
+static void nfs4_release_session_slot_tables(struct nfs4_session *session)
 {
 	nfs4_release_slot_table(&session->fc_slot_table);
 	nfs4_release_slot_table(&session->bc_slot_table);
@@ -450,7 +459,7 @@
 	if (status && tbl->slots == NULL)
 		/* Fore and back channel share a connection so get
 		 * both slot tables or neither */
-		nfs4_destroy_session_slot_tables(ses);
+		nfs4_release_session_slot_tables(ses);
 	return status;
 }
 
@@ -470,6 +479,12 @@
 	return session;
 }
 
+static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
+{
+	nfs4_shutdown_slot_table(&session->fc_slot_table);
+	nfs4_shutdown_slot_table(&session->bc_slot_table);
+}
+
 void nfs4_destroy_session(struct nfs4_session *session)
 {
 	struct rpc_xprt *xprt;
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index 2323061..b34ada9 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -74,7 +74,7 @@
 
 extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
 		unsigned int max_reqs, const char *queue);
-extern void nfs4_release_slot_table(struct nfs4_slot_table *tbl);
+extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
 extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
 extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
 extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e5be725..e1a4721 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1015,8 +1015,11 @@
 	if (ret == -EIO)
 		/* A lost lock - don't even consider delegations */
 		goto out;
-	if (nfs4_copy_delegation_stateid(dst, state->inode, fmode))
+	/* returns true if delegation stateid found and copied */
+	if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) {
+		ret = 0;
 		goto out;
+	}
 	if (ret != -ENOENT)
 		/* nfs4_copy_delegation_stateid() didn't over-write
 		 * dst, so it still has the lock stateid which we now
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 8c21d69..72f3bf1 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3449,7 +3449,7 @@
 {
 	__be32 *p;
 
-	*res = ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL;
+	*res = 0;
 	if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U)))
 		return -EIO;
 	if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) {
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 89fe741..59f838c 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -36,6 +36,7 @@
 	__print_flags(v, "|", \
 			{ 1 << NFS_INO_ADVISE_RDPLUS, "ADVISE_RDPLUS" }, \
 			{ 1 << NFS_INO_STALE, "STALE" }, \
+			{ 1 << NFS_INO_INVALIDATING, "INVALIDATING" }, \
 			{ 1 << NFS_INO_FLUSHING, "FLUSHING" }, \
 			{ 1 << NFS_INO_FSCACHE, "FSCACHE" }, \
 			{ 1 << NFS_INO_COMMIT, "COMMIT" }, \
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index a44a872..9a3b6a4 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -909,9 +909,14 @@
  */
 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
 {
+	struct nfs_inode *nfsi = NFS_I(inode);
+
 	if (nfs_have_delegated_attributes(inode))
 		goto out;
-	if (NFS_I(inode)->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
+	if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
+		return false;
+	smp_rmb();
+	if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
 		return false;
 out:
 	return PageUptodate(page) != 0;
diff --git a/fs/nfsd/acl.h b/fs/nfsd/acl.h
index 8b68218..a812fd1 100644
--- a/fs/nfsd/acl.h
+++ b/fs/nfsd/acl.h
@@ -45,7 +45,7 @@
 
 struct nfs4_acl *nfs4_acl_new(int);
 int nfs4_acl_get_whotype(char *, u32);
-int nfs4_acl_write_who(int who, char *p);
+__be32 nfs4_acl_write_who(int who, __be32 **p, int *len);
 
 int nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
 		struct nfs4_acl **acl);
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
index d5c5b3e..b582f9a 100644
--- a/fs/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -84,12 +84,4 @@
 void	nfsd_cache_update(struct svc_rqst *, int, __be32 *);
 int	nfsd_reply_cache_stats_open(struct inode *, struct file *);
 
-#ifdef CONFIG_NFSD_V4
-void	nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp);
-#else  /* CONFIG_NFSD_V4 */
-static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
-{
-}
-#endif /* CONFIG_NFSD_V4 */
-
 #endif /* NFSCACHE_H */
diff --git a/fs/nfsd/idmap.h b/fs/nfsd/idmap.h
index bf95f6b..66e58db 100644
--- a/fs/nfsd/idmap.h
+++ b/fs/nfsd/idmap.h
@@ -56,7 +56,7 @@
 
 __be32 nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, kuid_t *);
 __be32 nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, kgid_t *);
-int nfsd_map_uid_to_name(struct svc_rqst *, kuid_t, char *);
-int nfsd_map_gid_to_name(struct svc_rqst *, kgid_t, char *);
+__be32 nfsd4_encode_user(struct svc_rqst *, kuid_t, __be32 **, int *);
+__be32 nfsd4_encode_group(struct svc_rqst *, kgid_t, __be32 **, int *);
 
 #endif /* LINUX_NFSD_IDMAP_H */
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 849a7c3..d32b3aa 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -95,6 +95,7 @@
 	time_t nfsd4_grace;
 
 	bool nfsd_net_up;
+	bool lockd_up;
 
 	/*
 	 * Time of server startup
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 14d9ecb..de6e39e 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -168,7 +168,7 @@
 	      struct kstat *stat)
 {
 	*p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
-	*p++ = htonl((u32) stat->mode);
+	*p++ = htonl((u32) (stat->mode & S_IALLUGO));
 	*p++ = htonl((u32) stat->nlink);
 	*p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
 	*p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
@@ -842,21 +842,21 @@
 
 static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen)
 {
-	struct svc_fh	fh;
+	struct svc_fh	*fh = &cd->scratch;
 	__be32 err;
 
-	fh_init(&fh, NFS3_FHSIZE);
-	err = compose_entry_fh(cd, &fh, name, namlen);
+	fh_init(fh, NFS3_FHSIZE);
+	err = compose_entry_fh(cd, fh, name, namlen);
 	if (err) {
 		*p++ = 0;
 		*p++ = 0;
 		goto out;
 	}
-	p = encode_post_op_attr(cd->rqstp, p, &fh);
+	p = encode_post_op_attr(cd->rqstp, p, fh);
 	*p++ = xdr_one;			/* yes, a file handle follows */
-	p = encode_fh(p, &fh);
+	p = encode_fh(p, fh);
 out:
-	fh_put(&fh);
+	fh_put(fh);
 	return p;
 }
 
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 649ad7c..d190e33 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -38,6 +38,7 @@
 #include <linux/nfs_fs.h>
 #include <linux/export.h>
 #include "nfsfh.h"
+#include "nfsd.h"
 #include "acl.h"
 #include "vfs.h"
 
@@ -150,17 +151,15 @@
 		pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
 		if (IS_ERR(pacl))
 			return PTR_ERR(pacl);
-		/* allocate for worst case: one (deny, allow) pair each: */
-		size += 2 * pacl->a_count;
 	}
+	/* allocate for worst case: one (deny, allow) pair each: */
+	size += 2 * pacl->a_count;
 
 	if (S_ISDIR(inode->i_mode)) {
 		flags = NFS4_ACL_DIR;
 		dpacl = get_acl(inode, ACL_TYPE_DEFAULT);
 		if (dpacl)
 			size += 2 * dpacl->a_count;
-	} else {
-		dpacl = NULL;
 	}
 
 	*acl = nfs4_acl_new(size);
@@ -169,8 +168,7 @@
 		goto out;
 	}
 
-	if (pacl)
-		_posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
+	_posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
 
 	if (dpacl)
 		_posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT);
@@ -916,17 +914,22 @@
 	return NFS4_ACL_WHO_NAMED;
 }
 
-int
-nfs4_acl_write_who(int who, char *p)
+__be32 nfs4_acl_write_who(int who, __be32 **p, int *len)
 {
 	int i;
+	int bytes;
 
 	for (i = 0; i < ARRAY_SIZE(s2t_map); i++) {
-		if (s2t_map[i].type == who) {
-			memcpy(p, s2t_map[i].string, s2t_map[i].stringlen);
-			return s2t_map[i].stringlen;
-		}
+		if (s2t_map[i].type != who)
+			continue;
+		bytes = 4 + (XDR_QUADLEN(s2t_map[i].stringlen) << 2);
+		if (bytes > *len)
+			return nfserr_resource;
+		*p = xdr_encode_opaque(*p, s2t_map[i].string,
+					s2t_map[i].stringlen);
+		*len -= bytes;
+		return 0;
 	}
-	BUG();
+	WARN_ON_ONCE(1);
 	return -1;
 }
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 4832fd8..c0dfde6 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -551,27 +551,46 @@
 	return 0;
 }
 
-static int
-idmap_id_to_name(struct svc_rqst *rqstp, int type, u32 id, char *name)
+static __be32 encode_ascii_id(u32 id, __be32 **p, int *buflen)
+{
+	char buf[11];
+	int len;
+	int bytes;
+
+	len = sprintf(buf, "%u", id);
+	bytes = 4 + (XDR_QUADLEN(len) << 2);
+	if (bytes > *buflen)
+		return nfserr_resource;
+	*p = xdr_encode_opaque(*p, buf, len);
+	*buflen -= bytes;
+	return 0;
+}
+
+static __be32 idmap_id_to_name(struct svc_rqst *rqstp, int type, u32 id, __be32 **p, int *buflen)
 {
 	struct ent *item, key = {
 		.id = id,
 		.type = type,
 	};
 	int ret;
+	int bytes;
 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
 	strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
 	ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item);
 	if (ret == -ENOENT)
-		return sprintf(name, "%u", id);
+		return encode_ascii_id(id, p, buflen);
 	if (ret)
-		return ret;
+		return nfserrno(ret);
 	ret = strlen(item->name);
-	BUG_ON(ret > IDMAP_NAMESZ);
-	memcpy(name, item->name, ret);
+	WARN_ON_ONCE(ret > IDMAP_NAMESZ);
+	bytes = 4 + (XDR_QUADLEN(ret) << 2);
+	if (bytes > *buflen)
+		return nfserr_resource;
+	*p = xdr_encode_opaque(*p, item->name, ret);
+	*buflen -= bytes;
 	cache_put(&item->h, nn->idtoname_cache);
-	return ret;
+	return 0;
 }
 
 static bool
@@ -603,12 +622,11 @@
 	return idmap_name_to_id(rqstp, type, name, namelen, id);
 }
 
-static int
-do_id_to_name(struct svc_rqst *rqstp, int type, u32 id, char *name)
+static __be32 encode_name_from_id(struct svc_rqst *rqstp, int type, u32 id, __be32 **p, int *buflen)
 {
 	if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
-		return sprintf(name, "%u", id);
-	return idmap_id_to_name(rqstp, type, id, name);
+		return encode_ascii_id(id, p, buflen);
+	return idmap_id_to_name(rqstp, type, id, p, buflen);
 }
 
 __be32
@@ -637,16 +655,14 @@
 	return status;
 }
 
-int
-nfsd_map_uid_to_name(struct svc_rqst *rqstp, kuid_t uid, char *name)
+__be32 nfsd4_encode_user(struct svc_rqst *rqstp, kuid_t uid,  __be32 **p, int *buflen)
 {
 	u32 id = from_kuid(&init_user_ns, uid);
-	return do_id_to_name(rqstp, IDMAP_TYPE_USER, id, name);
+	return encode_name_from_id(rqstp, IDMAP_TYPE_USER, id, p, buflen);
 }
 
-int
-nfsd_map_gid_to_name(struct svc_rqst *rqstp, kgid_t gid, char *name)
+__be32 nfsd4_encode_group(struct svc_rqst *rqstp, kgid_t gid, __be32 **p, int *buflen)
 {
 	u32 id = from_kgid(&init_user_ns, gid);
-	return do_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name);
+	return encode_name_from_id(rqstp, IDMAP_TYPE_GROUP, id, p, buflen);
 }
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 825b8a9..82189b2 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -231,17 +231,16 @@
 }
 
 static __be32
-do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
+do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh **resfh)
 {
 	struct svc_fh *current_fh = &cstate->current_fh;
-	struct svc_fh *resfh;
 	int accmode;
 	__be32 status;
 
-	resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
-	if (!resfh)
+	*resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
+	if (!*resfh)
 		return nfserr_jukebox;
-	fh_init(resfh, NFS4_FHSIZE);
+	fh_init(*resfh, NFS4_FHSIZE);
 	open->op_truncate = 0;
 
 	if (open->op_create) {
@@ -266,12 +265,12 @@
 		 */
 		status = do_nfsd_create(rqstp, current_fh, open->op_fname.data,
 					open->op_fname.len, &open->op_iattr,
-					resfh, open->op_createmode,
+					*resfh, open->op_createmode,
 					(u32 *)open->op_verf.data,
 					&open->op_truncate, &open->op_created);
 
 		if (!status && open->op_label.len)
-			nfsd4_security_inode_setsecctx(resfh, &open->op_label, open->op_bmval);
+			nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
 
 		/*
 		 * Following rfc 3530 14.2.16, use the returned bitmask
@@ -281,31 +280,32 @@
 		if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0)
 			open->op_bmval[1] = (FATTR4_WORD1_TIME_ACCESS |
 							FATTR4_WORD1_TIME_MODIFY);
-	} else {
+	} else
+		/*
+		 * Note this may exit with the parent still locked.
+		 * We will hold the lock until nfsd4_open's final
+		 * lookup, to prevent renames or unlinks until we've had
+		 * a chance to an acquire a delegation if appropriate.
+		 */
 		status = nfsd_lookup(rqstp, current_fh,
-				     open->op_fname.data, open->op_fname.len, resfh);
-		fh_unlock(current_fh);
-	}
+				     open->op_fname.data, open->op_fname.len, *resfh);
 	if (status)
 		goto out;
-	status = nfsd_check_obj_isreg(resfh);
+	status = nfsd_check_obj_isreg(*resfh);
 	if (status)
 		goto out;
 
 	if (is_create_with_attrs(open) && open->op_acl != NULL)
-		do_set_nfs4_acl(rqstp, resfh, open->op_acl, open->op_bmval);
+		do_set_nfs4_acl(rqstp, *resfh, open->op_acl, open->op_bmval);
 
-	nfsd4_set_open_owner_reply_cache(cstate, open, resfh);
+	nfsd4_set_open_owner_reply_cache(cstate, open, *resfh);
 	accmode = NFSD_MAY_NOP;
 	if (open->op_created ||
 			open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
 		accmode |= NFSD_MAY_OWNER_OVERRIDE;
-	status = do_open_permission(rqstp, resfh, open, accmode);
+	status = do_open_permission(rqstp, *resfh, open, accmode);
 	set_change_info(&open->op_cinfo, current_fh);
-	fh_dup2(current_fh, resfh);
 out:
-	fh_put(resfh);
-	kfree(resfh);
 	return status;
 }
 
@@ -358,6 +358,7 @@
 	   struct nfsd4_open *open)
 {
 	__be32 status;
+	struct svc_fh *resfh = NULL;
 	struct nfsd4_compoundres *resp;
 	struct net *net = SVC_NET(rqstp);
 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -424,7 +425,7 @@
 	switch (open->op_claim_type) {
 		case NFS4_OPEN_CLAIM_DELEGATE_CUR:
 		case NFS4_OPEN_CLAIM_NULL:
-			status = do_open_lookup(rqstp, cstate, open);
+			status = do_open_lookup(rqstp, cstate, open, &resfh);
 			if (status)
 				goto out;
 			break;
@@ -440,6 +441,7 @@
 			status = do_open_fhandle(rqstp, cstate, open);
 			if (status)
 				goto out;
+			resfh = &cstate->current_fh;
 			break;
 		case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
              	case NFS4_OPEN_CLAIM_DELEGATE_PREV:
@@ -459,9 +461,14 @@
 	 * successful, it (1) truncates the file if open->op_truncate was
 	 * set, (2) sets open->op_stateid, (3) sets open->op_delegation.
 	 */
-	status = nfsd4_process_open2(rqstp, &cstate->current_fh, open);
+	status = nfsd4_process_open2(rqstp, resfh, open);
 	WARN_ON(status && open->op_created);
 out:
+	if (resfh && resfh != &cstate->current_fh) {
+		fh_dup2(&cstate->current_fh, resfh);
+		fh_put(resfh);
+		kfree(resfh);
+	}
 	nfsd4_cleanup_open_state(open, status);
 	if (open->op_openowner && !nfsd4_has_session(cstate))
 		cstate->replay_owner = &open->op_openowner->oo_owner;
@@ -1070,8 +1077,10 @@
 				    cstate->current_fh.fh_dentry, &p,
 				    count, verify->ve_bmval,
 				    rqstp, 0);
-
-	/* this means that nfsd4_encode_fattr() ran out of space */
+	/*
+	 * If nfsd4_encode_fattr() ran out of space, assume that's because
+	 * the attributes are longer (hence different) than those given:
+	 */
 	if (status == nfserr_resource)
 		status = nfserr_not_same;
 	if (status)
@@ -1525,7 +1534,8 @@
 static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
 {
 	return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
-		1 + 1 + 2 + /* eir_flags, spr_how, spo_must_enforce & _allow */\
+		1 + 1 + /* eir_flags, spr_how */\
+		4 + /* spo_must_enforce & _allow with bitmap */\
 		2 + /*eir_server_owner.so_minor_id */\
 		/* eir_server_owner.so_major_id<> */\
 		XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
@@ -1882,6 +1892,7 @@
 		.vs_proc	= nfsd_procedures4,
 		.vs_dispatch	= nfsd_dispatch,
 		.vs_xdrsize	= NFS4_SVC_XDRSIZE,
+		.vs_rpcb_optnl	= 1,
 };
 
 /*
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 105d6fa..d5d070f 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -832,10 +832,11 @@
 	spin_unlock(&nfsd_drc_lock);
 }
 
-static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *attrs)
+static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
+					   struct nfsd4_channel_attrs *battrs)
 {
-	int numslots = attrs->maxreqs;
-	int slotsize = slot_bytes(attrs);
+	int numslots = fattrs->maxreqs;
+	int slotsize = slot_bytes(fattrs);
 	struct nfsd4_session *new;
 	int mem, i;
 
@@ -852,6 +853,10 @@
 		if (!new->se_slots[i])
 			goto out_free;
 	}
+
+	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
+	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
+
 	return new;
 out_free:
 	while (i--)
@@ -997,8 +1002,7 @@
 	list_add(&new->se_perclnt, &clp->cl_sessions);
 	spin_unlock(&clp->cl_lock);
 	spin_unlock(&nn->client_lock);
-	memcpy(&new->se_fchannel, &cses->fore_channel,
-			sizeof(struct nfsd4_channel_attrs));
+
 	if (cses->flags & SESSION4_BACK_CHAN) {
 		struct sockaddr *sa = svc_addr(rqstp);
 		/*
@@ -1851,6 +1855,11 @@
 	return nfs_ok;
 }
 
+#define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
+				 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
+#define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
+				 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
+
 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
 {
 	ca->headerpadsz = 0;
@@ -1861,9 +1870,9 @@
 	 * less than 1k.  Tighten up this estimate in the unlikely event
 	 * it turns out to be a problem for some client:
 	 */
-	if (ca->maxreq_sz < NFS4_enc_cb_recall_sz + RPC_MAX_HEADER_WITH_AUTH)
+	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
 		return nfserr_toosmall;
-	if (ca->maxresp_sz < NFS4_dec_cb_recall_sz + RPC_MAX_REPHEADER_WITH_AUTH)
+	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
 		return nfserr_toosmall;
 	ca->maxresp_cached = 0;
 	if (ca->maxops < 2)
@@ -1913,9 +1922,9 @@
 		return status;
 	status = check_backchannel_attrs(&cr_ses->back_channel);
 	if (status)
-		return status;
+		goto out_release_drc_mem;
 	status = nfserr_jukebox;
-	new = alloc_session(&cr_ses->fore_channel);
+	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
 	if (!new)
 		goto out_release_drc_mem;
 	conn = alloc_conn_from_crses(rqstp, cr_ses);
@@ -3034,18 +3043,18 @@
 	if (!fl)
 		return -ENOMEM;
 	fl->fl_file = find_readable_file(fp);
-	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
 	status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
-	if (status) {
-		list_del_init(&dp->dl_perclnt);
-		locks_free_lock(fl);
-		return status;
-	}
+	if (status)
+		goto out_free;
+	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
 	fp->fi_lease = fl;
 	fp->fi_deleg_file = get_file(fl->fl_file);
 	atomic_set(&fp->fi_delegees, 1);
 	list_add(&dp->dl_perfile, &fp->fi_delegations);
 	return 0;
+out_free:
+	locks_free_lock(fl);
+	return status;
 }
 
 static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
@@ -3125,6 +3134,7 @@
 				goto out_no_deleg;
 			break;
 		case NFS4_OPEN_CLAIM_NULL:
+		case NFS4_OPEN_CLAIM_FH:
 			/*
 			 * Let's not give out any delegations till everyone's
 			 * had the chance to reclaim theirs....
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index ee7237f..63f2395 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -103,11 +103,6 @@
 	(x) = (u64)ntohl(*p++) << 32;		\
 	(x) |= ntohl(*p++);			\
 } while (0)
-#define READTIME(x)       do {			\
-	p++;					\
-	(x) = ntohl(*p++);			\
-	p++;					\
-} while (0)
 #define READMEM(x,nbytes) do {			\
 	x = (char *)p;				\
 	p += XDR_QUADLEN(nbytes);		\
@@ -190,6 +185,15 @@
 	return (clid->cl_boot == 0) && (clid->cl_id == 0);
 }
 
+/**
+ * defer_free - mark an allocation as deferred freed
+ * @argp: NFSv4 compound argument structure to be freed with
+ * @release: release callback to free @p, typically kfree()
+ * @p: pointer to be freed
+ *
+ * Marks @p to be freed when processing the compound operation
+ * described in @argp finishes.
+ */
 static int
 defer_free(struct nfsd4_compoundargs *argp,
 		void (*release)(const void *), void *p)
@@ -206,6 +210,16 @@
 	return 0;
 }
 
+/**
+ * savemem - duplicate a chunk of memory for later processing
+ * @argp: NFSv4 compound argument structure to be freed with
+ * @p: pointer to be duplicated
+ * @nbytes: length to be duplicated
+ *
+ * Returns a pointer to a copy of @nbytes bytes of memory at @p
+ * that are preserved until processing of the NFSv4 compound
+ * operation described by @argp finishes.
+ */
 static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
 {
 	if (p == argp->tmp) {
@@ -257,7 +271,6 @@
 	int expected_len, len = 0;
 	u32 dummy32;
 	char *buf;
-	int host_err;
 
 	DECODE_HEAD;
 	iattr->ia_valid = 0;
@@ -284,10 +297,9 @@
 			return nfserr_resource;
 
 		*acl = nfs4_acl_new(nace);
-		if (*acl == NULL) {
-			host_err = -ENOMEM;
-			goto out_nfserr;
-		}
+		if (*acl == NULL)
+			return nfserr_jukebox;
+
 		defer_free(argp, kfree, *acl);
 
 		(*acl)->naces = nace;
@@ -425,10 +437,6 @@
 		goto xdr_error;
 
 	DECODE_TAIL;
-
-out_nfserr:
-	status = nfserrno(host_err);
-	goto out;
 }
 
 static __be32
@@ -1957,56 +1965,16 @@
 	};
 }
 
-static __be32
-nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, kuid_t uid, kgid_t gid,
-			__be32 **p, int *buflen)
-{
-	int status;
-
-	if (*buflen < (XDR_QUADLEN(IDMAP_NAMESZ) << 2) + 4)
-		return nfserr_resource;
-	if (whotype != NFS4_ACL_WHO_NAMED)
-		status = nfs4_acl_write_who(whotype, (u8 *)(*p + 1));
-	else if (gid_valid(gid))
-		status = nfsd_map_gid_to_name(rqstp, gid, (u8 *)(*p + 1));
-	else
-		status = nfsd_map_uid_to_name(rqstp, uid, (u8 *)(*p + 1));
-	if (status < 0)
-		return nfserrno(status);
-	*p = xdr_encode_opaque(*p, NULL, status);
-	*buflen -= (XDR_QUADLEN(status) << 2) + 4;
-	BUG_ON(*buflen < 0);
-	return 0;
-}
-
-static inline __be32
-nfsd4_encode_user(struct svc_rqst *rqstp, kuid_t user, __be32 **p, int *buflen)
-{
-	return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, user, INVALID_GID,
-				 p, buflen);
-}
-
-static inline __be32
-nfsd4_encode_group(struct svc_rqst *rqstp, kgid_t group, __be32 **p, int *buflen)
-{
-	return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, INVALID_UID, group,
-				 p, buflen);
-}
-
 static inline __be32
 nfsd4_encode_aclname(struct svc_rqst *rqstp, struct nfs4_ace *ace,
 		__be32 **p, int *buflen)
 {
-	kuid_t uid = INVALID_UID;
-	kgid_t gid = INVALID_GID;
-
-	if (ace->whotype == NFS4_ACL_WHO_NAMED) {
-		if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
-			gid = ace->who_gid;
-		else
-			uid = ace->who_uid;
-	}
-	return nfsd4_encode_name(rqstp, ace->whotype, uid, gid, p, buflen);
+	if (ace->whotype != NFS4_ACL_WHO_NAMED)
+		return nfs4_acl_write_who(ace->whotype, p, buflen);
+	else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
+		return nfsd4_encode_group(rqstp, ace->who_gid, p, buflen);
+	else
+		return nfsd4_encode_user(rqstp, ace->who_uid, p, buflen);
 }
 
 #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
@@ -2090,7 +2058,7 @@
 	u32 bmval1 = bmval[1];
 	u32 bmval2 = bmval[2];
 	struct kstat stat;
-	struct svc_fh tempfh;
+	struct svc_fh *tempfh = NULL;
 	struct kstatfs statfs;
 	int buflen = count << 2;
 	__be32 *attrlenp;
@@ -2137,11 +2105,15 @@
 			goto out_nfserr;
 	}
 	if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) {
-		fh_init(&tempfh, NFS4_FHSIZE);
-		status = fh_compose(&tempfh, exp, dentry, NULL);
+		tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
+		status = nfserr_jukebox;
+		if (!tempfh)
+			goto out;
+		fh_init(tempfh, NFS4_FHSIZE);
+		status = fh_compose(tempfh, exp, dentry, NULL);
 		if (status)
 			goto out;
-		fhp = &tempfh;
+		fhp = tempfh;
 	}
 	if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
 			| FATTR4_WORD0_SUPPORTED_ATTRS)) {
@@ -2222,8 +2194,10 @@
 		if ((buflen -= 4) < 0)
 			goto out_resource;
 		dummy = nfs4_file_type(stat.mode);
-		if (dummy == NF4BAD)
-			goto out_serverfault;
+		if (dummy == NF4BAD) {
+			status = nfserr_serverfault;
+			goto out;
+		}
 		WRITE32(dummy);
 	}
 	if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) {
@@ -2317,8 +2291,6 @@
 			WRITE32(ace->flag);
 			WRITE32(ace->access_mask & NFS4_ACE_MASK_ALL);
 			status = nfsd4_encode_aclname(rqstp, ace, &p, &buflen);
-			if (status == nfserr_resource)
-				goto out_resource;
 			if (status)
 				goto out;
 		}
@@ -2379,8 +2351,6 @@
 	}
 	if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
 		status = nfsd4_encode_fs_locations(rqstp, exp, &p, &buflen);
-		if (status == nfserr_resource)
-			goto out_resource;
 		if (status)
 			goto out;
 	}
@@ -2431,15 +2401,11 @@
 	}
 	if (bmval1 & FATTR4_WORD1_OWNER) {
 		status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen);
-		if (status == nfserr_resource)
-			goto out_resource;
 		if (status)
 			goto out;
 	}
 	if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
 		status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen);
-		if (status == nfserr_resource)
-			goto out_resource;
 		if (status)
 			goto out;
 	}
@@ -2533,8 +2499,8 @@
 		security_release_secctx(context, contextlen);
 #endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
 	kfree(acl);
-	if (fhp == &tempfh)
-		fh_put(&tempfh);
+	if (tempfh)
+		fh_put(tempfh);
 	return status;
 out_nfserr:
 	status = nfserrno(err);
@@ -2542,9 +2508,6 @@
 out_resource:
 	status = nfserr_resource;
 	goto out;
-out_serverfault:
-	status = nfserr_serverfault;
-	goto out;
 }
 
 static inline int attributes_need_mount(u32 *bmval)
@@ -2621,17 +2584,14 @@
 static __be32 *
 nfsd4_encode_rdattr_error(__be32 *p, int buflen, __be32 nfserr)
 {
-	__be32 *attrlenp;
-
 	if (buflen < 6)
 		return NULL;
 	*p++ = htonl(2);
 	*p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
 	*p++ = htonl(0);			 /* bmval1 */
 
-	attrlenp = p++;
+	*p++ = htonl(4);     /* attribute length */
 	*p++ = nfserr;       /* no htonl */
-	*attrlenp = htonl((char *)p - (char *)attrlenp - 4);
 	return p;
 }
 
@@ -3244,7 +3204,7 @@
 
 		if (rpcauth_get_gssinfo(pf, &info) == 0) {
 			supported++;
-			RESERVE_SPACE(4 + 4 + info.oid.len + 4 + 4);
+			RESERVE_SPACE(4 + 4 + XDR_LEN(info.oid.len) + 4 + 4);
 			WRITE32(RPC_AUTH_GSS);
 			WRITE32(info.oid.len);
 			WRITEMEM(info.oid.data, info.oid.len);
@@ -3379,35 +3339,43 @@
 		8 /* eir_clientid */ +
 		4 /* eir_sequenceid */ +
 		4 /* eir_flags */ +
-		4 /* spr_how */ +
-		8 /* spo_must_enforce, spo_must_allow */ +
-		8 /* so_minor_id */ +
-		4 /* so_major_id.len */ +
-		(XDR_QUADLEN(major_id_sz) * 4) +
-		4 /* eir_server_scope.len */ +
-		(XDR_QUADLEN(server_scope_sz) * 4) +
-		4 /* eir_server_impl_id.count (0) */);
+		4 /* spr_how */);
 
 	WRITEMEM(&exid->clientid, 8);
 	WRITE32(exid->seqid);
 	WRITE32(exid->flags);
 
 	WRITE32(exid->spa_how);
+	ADJUST_ARGS();
+
 	switch (exid->spa_how) {
 	case SP4_NONE:
 		break;
 	case SP4_MACH_CRED:
+		/* spo_must_enforce, spo_must_allow */
+		RESERVE_SPACE(16);
+
 		/* spo_must_enforce bitmap: */
 		WRITE32(2);
 		WRITE32(nfs4_minimal_spo_must_enforce[0]);
 		WRITE32(nfs4_minimal_spo_must_enforce[1]);
 		/* empty spo_must_allow bitmap: */
 		WRITE32(0);
+
+		ADJUST_ARGS();
 		break;
 	default:
 		WARN_ON_ONCE(1);
 	}
 
+	RESERVE_SPACE(
+		8 /* so_minor_id */ +
+		4 /* so_major_id.len */ +
+		(XDR_QUADLEN(major_id_sz) * 4) +
+		4 /* eir_server_scope.len */ +
+		(XDR_QUADLEN(server_scope_sz) * 4) +
+		4 /* eir_server_impl_id.count (0) */);
+
 	/* The server_owner struct */
 	WRITE64(minor_id);      /* Minor id */
 	/* major id */
@@ -3474,28 +3442,6 @@
 }
 
 static __be32
-nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, __be32 nfserr,
-			     struct nfsd4_destroy_session *destroy_session)
-{
-	return nfserr;
-}
-
-static __be32
-nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
-			  struct nfsd4_free_stateid *free_stateid)
-{
-	__be32 *p;
-
-	if (nfserr)
-		return nfserr;
-
-	RESERVE_SPACE(4);
-	*p++ = nfserr;
-	ADJUST_ARGS();
-	return nfserr;
-}
-
-static __be32
 nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
 		      struct nfsd4_sequence *seq)
 {
@@ -3593,8 +3539,8 @@
 	[OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
 	[OP_EXCHANGE_ID]	= (nfsd4_enc)nfsd4_encode_exchange_id,
 	[OP_CREATE_SESSION]	= (nfsd4_enc)nfsd4_encode_create_session,
-	[OP_DESTROY_SESSION]	= (nfsd4_enc)nfsd4_encode_destroy_session,
-	[OP_FREE_STATEID]	= (nfsd4_enc)nfsd4_encode_free_stateid,
+	[OP_DESTROY_SESSION]	= (nfsd4_enc)nfsd4_encode_noop,
+	[OP_FREE_STATEID]	= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_GET_DIR_DELEGATION]	= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_GETDEVICEINFO]	= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_GETDEVICELIST]	= (nfsd4_enc)nfsd4_encode_noop,
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index b6af150..f8f060f 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -132,13 +132,6 @@
 }
 
 static void
-nfsd_reply_cache_unhash(struct svc_cacherep *rp)
-{
-	hlist_del_init(&rp->c_hash);
-	list_del_init(&rp->c_lru);
-}
-
-static void
 nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
 {
 	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
@@ -416,22 +409,8 @@
 
 	/*
 	 * Since the common case is a cache miss followed by an insert,
-	 * preallocate an entry. First, try to reuse the first entry on the LRU
-	 * if it works, then go ahead and prune the LRU list.
+	 * preallocate an entry.
 	 */
-	spin_lock(&cache_lock);
-	if (!list_empty(&lru_head)) {
-		rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
-		if (nfsd_cache_entry_expired(rp) ||
-		    num_drc_entries >= max_drc_entries) {
-			nfsd_reply_cache_unhash(rp);
-			prune_cache_entries();
-			goto search_cache;
-		}
-	}
-
-	/* No expired ones available, allocate a new one. */
-	spin_unlock(&cache_lock);
 	rp = nfsd_reply_cache_alloc();
 	spin_lock(&cache_lock);
 	if (likely(rp)) {
@@ -439,7 +418,9 @@
 		drc_mem_usage += sizeof(*rp);
 	}
 
-search_cache:
+	/* go ahead and prune the cache */
+	prune_cache_entries();
+
 	found = nfsd_cache_search(rqstp, csum);
 	if (found) {
 		if (likely(rp))
@@ -453,15 +434,6 @@
 		goto out;
 	}
 
-	/*
-	 * We're keeping the one we just allocated. Are we now over the
-	 * limit? Prune one off the tip of the LRU in trade for the one we
-	 * just allocated if so.
-	 */
-	if (num_drc_entries >= max_drc_entries)
-		nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
-						struct svc_cacherep, c_lru));
-
 	nfsdstats.rcmisses++;
 	rqstp->rq_cacherep = rp;
 	rp->c_state = RC_INPROG;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 760c85a..9a4a5f9 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -241,6 +241,15 @@
 	nfsd_racache_shutdown();
 }
 
+static bool nfsd_needs_lockd(void)
+{
+#if defined(CONFIG_NFSD_V3)
+	return (nfsd_versions[2] != NULL) || (nfsd_versions[3] != NULL);
+#else
+	return (nfsd_versions[2] != NULL);
+#endif
+}
+
 static int nfsd_startup_net(int nrservs, struct net *net)
 {
 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -255,9 +264,14 @@
 	ret = nfsd_init_socks(net);
 	if (ret)
 		goto out_socks;
-	ret = lockd_up(net);
-	if (ret)
-		goto out_socks;
+
+	if (nfsd_needs_lockd() && !nn->lockd_up) {
+		ret = lockd_up(net);
+		if (ret)
+			goto out_socks;
+		nn->lockd_up = 1;
+	}
+
 	ret = nfs4_state_start_net(net);
 	if (ret)
 		goto out_lockd;
@@ -266,7 +280,10 @@
 	return 0;
 
 out_lockd:
-	lockd_down(net);
+	if (nn->lockd_up) {
+		lockd_down(net);
+		nn->lockd_up = 0;
+	}
 out_socks:
 	nfsd_shutdown_generic();
 	return ret;
@@ -277,7 +294,10 @@
 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
 	nfs4_state_shutdown_net(net);
-	lockd_down(net);
+	if (nn->lockd_up) {
+		lockd_down(net);
+		nn->lockd_up = 0;
+	}
 	nn->nfsd_net_up = false;
 	nfsd_shutdown_generic();
 }
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 9c769a4..b17d932 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -152,7 +152,7 @@
 	type = (stat->mode & S_IFMT);
 
 	*p++ = htonl(nfs_ftypes[type >> 12]);
-	*p++ = htonl((u32) stat->mode);
+	*p++ = htonl((u32) (stat->mode & S_IALLUGO));
 	*p++ = htonl((u32) stat->nlink);
 	*p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
 	*p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 1426eb6..017d3cb 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -207,7 +207,12 @@
 				goto out_nfserr;
 		}
 	} else {
-		fh_lock(fhp);
+		/*
+		 * In the nfsd4_open() case, this may be held across
+		 * subsequent open and delegation acquisition which may
+		 * need to take the child's i_mutex:
+		 */
+		fh_lock_nested(fhp, I_MUTEX_PARENT);
 		dentry = lookup_one_len(name, dparent, len);
 		host_err = PTR_ERR(dentry);
 		if (IS_ERR(dentry))
@@ -273,13 +278,6 @@
 	return err;
 }
 
-static int nfsd_break_lease(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 0;
-	return break_lease(inode, O_WRONLY | O_NONBLOCK);
-}
-
 /*
  * Commit metadata changes to stable storage.
  */
@@ -348,8 +346,7 @@
 
 	/* Revoke setuid/setgid on chown */
 	if (!S_ISDIR(inode->i_mode) &&
-	    (((iap->ia_valid & ATTR_UID) && !uid_eq(iap->ia_uid, inode->i_uid)) ||
-	     ((iap->ia_valid & ATTR_GID) && !gid_eq(iap->ia_gid, inode->i_gid)))) {
+	    ((iap->ia_valid & ATTR_UID) || (iap->ia_valid & ATTR_GID))) {
 		iap->ia_valid |= ATTR_KILL_PRIV;
 		if (iap->ia_valid & ATTR_MODE) {
 			/* we're setting mode too, just clear the s*id bits */
@@ -449,16 +446,10 @@
 		goto out_put_write_access;
 	}
 
-	host_err = nfsd_break_lease(inode);
-	if (host_err)
-		goto out_put_write_access_nfserror;
-
 	fh_lock(fhp);
 	host_err = notify_change(dentry, iap, NULL);
 	fh_unlock(fhp);
 
-out_put_write_access_nfserror:
-	err = nfserrno(host_err);
 out_put_write_access:
 	if (size_change)
 		put_write_access(inode);
@@ -1609,11 +1600,6 @@
 	err = nfserr_noent;
 	if (!dold->d_inode)
 		goto out_dput;
-	host_err = nfsd_break_lease(dold->d_inode);
-	if (host_err) {
-		err = nfserrno(host_err);
-		goto out_dput;
-	}
 	host_err = vfs_link(dold, dirp, dnew, NULL);
 	if (!host_err) {
 		err = nfserrno(commit_metadata(ffhp));
@@ -1707,14 +1693,6 @@
 	if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
 		goto out_dput_new;
 
-	host_err = nfsd_break_lease(odentry->d_inode);
-	if (host_err)
-		goto out_dput_new;
-	if (ndentry->d_inode) {
-		host_err = nfsd_break_lease(ndentry->d_inode);
-		if (host_err)
-			goto out_dput_new;
-	}
 	host_err = vfs_rename(fdir, odentry, tdir, ndentry, NULL);
 	if (!host_err) {
 		host_err = commit_metadata(tfhp);
@@ -1784,16 +1762,12 @@
 	if (!type)
 		type = rdentry->d_inode->i_mode & S_IFMT;
 
-	host_err = nfsd_break_lease(rdentry->d_inode);
-	if (host_err)
-		goto out_put;
 	if (type != S_IFDIR)
 		host_err = vfs_unlink(dirp, rdentry, NULL);
 	else
 		host_err = vfs_rmdir(dirp, rdentry);
 	if (!host_err)
 		host_err = commit_metadata(fhp);
-out_put:
 	dput(rdentry);
 
 out_nfserr:
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 1bc1d44..fbe90bd 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -86,8 +86,6 @@
 __be32		nfsd_rename(struct svc_rqst *,
 				struct svc_fh *, char *, int,
 				struct svc_fh *, char *, int);
-__be32		nfsd_remove(struct svc_rqst *,
-				struct svc_fh *, char *, int);
 __be32		nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type,
 				char *name, int len);
 __be32		nfsd_readdir(struct svc_rqst *, struct svc_fh *,
diff --git a/fs/nfsd/xdr3.h b/fs/nfsd/xdr3.h
index b6d5542..335e04a 100644
--- a/fs/nfsd/xdr3.h
+++ b/fs/nfsd/xdr3.h
@@ -174,6 +174,9 @@
 struct nfsd3_readdirres {
 	__be32			status;
 	struct svc_fh		fh;
+	/* Just to save kmalloc on every readdirplus entry (svc_fh is a
+	 * little large for the stack): */
+	struct svc_fh		scratch;
 	int			count;
 	__be32			verf[2];
 
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index b3ed644..d278a0d 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -228,7 +228,7 @@
 	u32		op_create;     	    /* request */
 	u32		op_createmode;      /* request */
 	u32		op_bmval[3];        /* request */
-	struct iattr	iattr;              /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
+	struct iattr	op_iattr;           /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
 	nfs4_verifier	op_verf __attribute__((aligned(32)));
 					    /* EXCLUSIVE4 */
 	clientid_t	op_clientid;        /* request */
@@ -250,7 +250,6 @@
 	struct nfs4_acl *op_acl;
 	struct xdr_netobj op_label;
 };
-#define op_iattr	iattr
 
 struct nfsd4_open_confirm {
 	stateid_t	oc_req_stateid		/* request */;
@@ -374,7 +373,6 @@
 
 struct nfsd4_free_stateid {
 	stateid_t	fr_stateid;         /* request */
-	__be32		fr_status;          /* response */
 };
 
 /* also used for NVERIFY */
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 2d8be51..dc3a9efd 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -416,7 +416,8 @@
 	}
 	if (likely(bio)) {
 		bio->bi_bdev = nilfs->ns_bdev;
-		bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
+		bio->bi_iter.bi_sector =
+			start << (nilfs->ns_blocksize_bits - 9);
 	}
 	return bio;
 }
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 5877262..0e792f5 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -16,12 +16,6 @@
 {
 	struct fanotify_event_info *old, *new;
 
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-	/* dont merge two permission events */
-	if ((old_fsn->mask & FAN_ALL_PERM_EVENTS) &&
-	    (new_fsn->mask & FAN_ALL_PERM_EVENTS))
-		return false;
-#endif
 	pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
 	old = FANOTIFY_E(old_fsn);
 	new = FANOTIFY_E(new_fsn);
@@ -34,14 +28,23 @@
 }
 
 /* and the list better be locked by something too! */
-static struct fsnotify_event *fanotify_merge(struct list_head *list,
-					     struct fsnotify_event *event)
+static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
 {
 	struct fsnotify_event *test_event;
 	bool do_merge = false;
 
 	pr_debug("%s: list=%p event=%p\n", __func__, list, event);
 
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+	/*
+	 * Don't merge a permission event with any other event so that we know
+	 * the event structure we have created in fanotify_handle_event() is the
+	 * one we should check for permission response.
+	 */
+	if (event->mask & FAN_ALL_PERM_EVENTS)
+		return 0;
+#endif
+
 	list_for_each_entry_reverse(test_event, list, list) {
 		if (should_merge(test_event, event)) {
 			do_merge = true;
@@ -50,10 +53,10 @@
 	}
 
 	if (!do_merge)
-		return NULL;
+		return 0;
 
 	test_event->mask |= event->mask;
-	return test_event;
+	return 1;
 }
 
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
@@ -149,7 +152,6 @@
 	int ret = 0;
 	struct fanotify_event_info *event;
 	struct fsnotify_event *fsn_event;
-	struct fsnotify_event *notify_fsn_event;
 
 	BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
 	BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
@@ -188,21 +190,19 @@
 	event->response = 0;
 #endif
 
-	notify_fsn_event = fsnotify_add_notify_event(group, fsn_event,
-						     fanotify_merge);
-	if (notify_fsn_event) {
+	ret = fsnotify_add_notify_event(group, fsn_event, fanotify_merge);
+	if (ret) {
+		BUG_ON(mask & FAN_ALL_PERM_EVENTS);
 		/* Our event wasn't used in the end. Free it. */
 		fsnotify_destroy_event(group, fsn_event);
-		if (IS_ERR(notify_fsn_event))
-			return PTR_ERR(notify_fsn_event);
-		/* We need to ask about a different events after a merge... */
-		event = FANOTIFY_E(notify_fsn_event);
-		fsn_event = notify_fsn_event;
+		ret = 0;
 	}
 
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-	if (fsn_event->mask & FAN_ALL_PERM_EVENTS)
+	if (mask & FAN_ALL_PERM_EVENTS) {
 		ret = fanotify_get_response_from_access(group, event);
+		fsnotify_destroy_event(group, fsn_event);
+	}
 #endif
 	return ret;
 }
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index 0e90174..32a2f03 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -4,6 +4,13 @@
 
 extern struct kmem_cache *fanotify_event_cachep;
 
+/*
+ * Lifetime of the structure differs for normal and permission events. In both
+ * cases the structure is allocated in fanotify_handle_event(). For normal
+ * events the structure is freed immediately after reporting it to userspace.
+ * For permission events we free it only after we receive response from
+ * userspace.
+ */
 struct fanotify_event_info {
 	struct fsnotify_event fse;
 	/*
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 1fd66ab..b6175fa 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -319,7 +319,12 @@
 			if (IS_ERR(kevent))
 				break;
 			ret = copy_event_to_user(group, kevent, buf);
-			fsnotify_destroy_event(group, kevent);
+			/*
+			 * Permission events get destroyed after we
+			 * receive response
+			 */
+			if (!(kevent->mask & FAN_ALL_PERM_EVENTS))
+				fsnotify_destroy_event(group, kevent);
 			if (ret < 0)
 				break;
 			buf += ret;
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index aad1a35..d5ee563 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -53,15 +53,13 @@
 	return false;
 }
 
-static struct fsnotify_event *inotify_merge(struct list_head *list,
-					    struct fsnotify_event *event)
+static int inotify_merge(struct list_head *list,
+			  struct fsnotify_event *event)
 {
 	struct fsnotify_event *last_event;
 
 	last_event = list_entry(list->prev, struct fsnotify_event, list);
-	if (!event_compare(last_event, event))
-		return NULL;
-	return last_event;
+	return event_compare(last_event, event);
 }
 
 int inotify_handle_event(struct fsnotify_group *group,
@@ -73,9 +71,8 @@
 {
 	struct inotify_inode_mark *i_mark;
 	struct inotify_event_info *event;
-	struct fsnotify_event *added_event;
 	struct fsnotify_event *fsn_event;
-	int ret = 0;
+	int ret;
 	int len = 0;
 	int alloc_len = sizeof(struct inotify_event_info);
 
@@ -110,18 +107,16 @@
 	if (len)
 		strcpy(event->name, file_name);
 
-	added_event = fsnotify_add_notify_event(group, fsn_event, inotify_merge);
-	if (added_event) {
+	ret = fsnotify_add_notify_event(group, fsn_event, inotify_merge);
+	if (ret) {
 		/* Our event wasn't used in the end. Free it. */
 		fsnotify_destroy_event(group, fsn_event);
-		if (IS_ERR(added_event))
-			ret = PTR_ERR(added_event);
 	}
 
 	if (inode_mark->mask & IN_ONESHOT)
 		fsnotify_destroy_mark(inode_mark, group);
 
-	return ret;
+	return 0;
 }
 
 static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 952237b..18b3c44 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -79,15 +79,15 @@
 
 /*
  * Add an event to the group notification queue.  The group can later pull this
- * event off the queue to deal with.  If the event is successfully added to the
- * group's notification queue, a reference is taken on event.
+ * event off the queue to deal with.  The function returns 0 if the event was
+ * added to the queue, 1 if the event was merged with some other queued event.
  */
-struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group,
-						 struct fsnotify_event *event,
-						 struct fsnotify_event *(*merge)(struct list_head *,
-										 struct fsnotify_event *))
+int fsnotify_add_notify_event(struct fsnotify_group *group,
+			      struct fsnotify_event *event,
+			      int (*merge)(struct list_head *,
+					   struct fsnotify_event *))
 {
-	struct fsnotify_event *return_event = NULL;
+	int ret = 0;
 	struct list_head *list = &group->notification_list;
 
 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
@@ -98,14 +98,14 @@
 		/* Queue overflow event only if it isn't already queued */
 		if (list_empty(&group->overflow_event.list))
 			event = &group->overflow_event;
-		return_event = event;
+		ret = 1;
 	}
 
 	if (!list_empty(list) && merge) {
-		return_event = merge(list, event);
-		if (return_event) {
+		ret = merge(list, event);
+		if (ret) {
 			mutex_unlock(&group->notification_mutex);
-			return return_event;
+			return ret;
 		}
 	}
 
@@ -115,7 +115,7 @@
 
 	wake_up(&group->notification_waitq);
 	kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
-	return return_event;
+	return ret;
 }
 
 /*
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index ea4ba9d..db9bd8a 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2134,7 +2134,7 @@
 	ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
 	mutex_unlock(&inode->i_mutex);
 	if (ret > 0) {
-		int err = generic_write_sync(file, pos, ret);
+		int err = generic_write_sync(file, iocb->ki_pos - ret, ret);
 		if (err < 0)
 			ret = err;
 	}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 8750ae1..e2edff3 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -4742,6 +4742,7 @@
 				enum ocfs2_alloc_restarted *reason_ret)
 {
 	int status = 0, err = 0;
+	int need_free = 0;
 	int free_extents;
 	enum ocfs2_alloc_restarted reason = RESTART_NONE;
 	u32 bit_off, num_bits;
@@ -4796,7 +4797,8 @@
 					      OCFS2_JOURNAL_ACCESS_WRITE);
 	if (status < 0) {
 		mlog_errno(status);
-		goto leave;
+		need_free = 1;
+		goto bail;
 	}
 
 	block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
@@ -4807,7 +4809,8 @@
 				     num_bits, flags, meta_ac);
 	if (status < 0) {
 		mlog_errno(status);
-		goto leave;
+		need_free = 1;
+		goto bail;
 	}
 
 	ocfs2_journal_dirty(handle, et->et_root_bh);
@@ -4821,6 +4824,19 @@
 		reason = RESTART_TRANS;
 	}
 
+bail:
+	if (need_free) {
+		if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
+			ocfs2_free_local_alloc_bits(osb, handle, data_ac,
+					bit_off, num_bits);
+		else
+			ocfs2_free_clusters(handle,
+					data_ac->ac_inode,
+					data_ac->ac_bh,
+					ocfs2_clusters_to_blocks(osb->sb, bit_off),
+					num_bits);
+	}
+
 leave:
 	if (reason_ret)
 		*reason_ret = reason;
@@ -6805,6 +6821,8 @@
 					 struct buffer_head *di_bh)
 {
 	int ret, i, has_data, num_pages = 0;
+	int need_free = 0;
+	u32 bit_off, num;
 	handle_t *handle;
 	u64 uninitialized_var(block);
 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
@@ -6850,7 +6868,6 @@
 	}
 
 	if (has_data) {
-		u32 bit_off, num;
 		unsigned int page_end;
 		u64 phys;
 
@@ -6886,6 +6903,7 @@
 		ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
 		if (ret) {
 			mlog_errno(ret);
+			need_free = 1;
 			goto out_commit;
 		}
 
@@ -6896,6 +6914,7 @@
 		ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
 		if (ret) {
 			mlog_errno(ret);
+			need_free = 1;
 			goto out_commit;
 		}
 
@@ -6927,6 +6946,7 @@
 		ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL);
 		if (ret) {
 			mlog_errno(ret);
+			need_free = 1;
 			goto out_commit;
 		}
 
@@ -6938,6 +6958,18 @@
 		dquot_free_space_nodirty(inode,
 					  ocfs2_clusters_to_bytes(osb->sb, 1));
 
+	if (need_free) {
+		if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
+			ocfs2_free_local_alloc_bits(osb, handle, data_ac,
+					bit_off, num);
+		else
+			ocfs2_free_clusters(handle,
+					data_ac->ac_inode,
+					data_ac->ac_bh,
+					ocfs2_clusters_to_blocks(osb->sb, bit_off),
+					num);
+	}
+
 	ocfs2_commit_trans(osb, handle);
 
 out_unlock:
@@ -7126,7 +7158,7 @@
 	if (end > i_size_read(inode))
 		end = i_size_read(inode);
 
-	BUG_ON(start >= end);
+	BUG_ON(start > end);
 
 	if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
 	    !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 73920ff..bf482df 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -413,7 +413,7 @@
 	}
 
 	/* Must put everything in 512 byte sectors for the bio... */
-	bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9);
+	bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
 	bio->bi_bdev = reg->hr_bdev;
 	bio->bi_private = wc;
 	bio->bi_end_io = o2hb_bio_end_io;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index d77d71e..8450262bc 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -185,6 +185,9 @@
 			      file->f_path.dentry->d_name.name,
 			      (unsigned long long)datasync);
 
+	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+		return -EROFS;
+
 	err = filemap_write_and_wait_range(inode->i_mapping, start, end);
 	if (err)
 		return err;
@@ -474,11 +477,6 @@
 		goto bail;
 	}
 
-	/* lets handle the simple truncate cases before doing any more
-	 * cluster locking. */
-	if (new_i_size == le64_to_cpu(fe->i_size))
-		goto bail;
-
 	down_write(&OCFS2_I(inode)->ip_alloc_sem);
 
 	ocfs2_resv_discard(&osb->osb_la_resmap,
@@ -718,7 +716,8 @@
  * While a write will already be ordering the data, a truncate will not.
  * Thus, we need to explicitly order the zeroed pages.
  */
-static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
+static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
+						struct buffer_head *di_bh)
 {
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	handle_t *handle = NULL;
@@ -735,7 +734,14 @@
 	}
 
 	ret = ocfs2_jbd2_file_inode(handle, inode);
-	if (ret < 0)
+	if (ret < 0) {
+		mlog_errno(ret);
+		goto out;
+	}
+
+	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+				      OCFS2_JOURNAL_ACCESS_WRITE);
+	if (ret)
 		mlog_errno(ret);
 
 out:
@@ -751,7 +757,7 @@
  * to be too fragile to do exactly what we need without us having to
  * worry about recursive locking in ->write_begin() and ->write_end(). */
 static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
-				 u64 abs_to)
+				 u64 abs_to, struct buffer_head *di_bh)
 {
 	struct address_space *mapping = inode->i_mapping;
 	struct page *page;
@@ -759,6 +765,7 @@
 	handle_t *handle = NULL;
 	int ret = 0;
 	unsigned zero_from, zero_to, block_start, block_end;
+	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 
 	BUG_ON(abs_from >= abs_to);
 	BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
@@ -801,7 +808,8 @@
 		}
 
 		if (!handle) {
-			handle = ocfs2_zero_start_ordered_transaction(inode);
+			handle = ocfs2_zero_start_ordered_transaction(inode,
+								      di_bh);
 			if (IS_ERR(handle)) {
 				ret = PTR_ERR(handle);
 				handle = NULL;
@@ -818,8 +826,22 @@
 			ret = 0;
 	}
 
-	if (handle)
+	if (handle) {
+		/*
+		 * fs-writeback will release the dirty pages without page lock
+		 * whose offset are over inode size, the release happens at
+		 * block_write_full_page_endio().
+		 */
+		i_size_write(inode, abs_to);
+		inode->i_blocks = ocfs2_inode_sector_count(inode);
+		di->i_size = cpu_to_le64((u64)i_size_read(inode));
+		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
+		di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+		di->i_mtime_nsec = di->i_ctime_nsec;
+		ocfs2_journal_dirty(handle, di_bh);
 		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+	}
 
 out_unlock:
 	unlock_page(page);
@@ -915,7 +937,7 @@
  * has made sure that the entire range needs zeroing.
  */
 static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
-				   u64 range_end)
+				   u64 range_end, struct buffer_head *di_bh)
 {
 	int rc = 0;
 	u64 next_pos;
@@ -931,7 +953,7 @@
 		next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
 		if (next_pos > range_end)
 			next_pos = range_end;
-		rc = ocfs2_write_zero_page(inode, zero_pos, next_pos);
+		rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
 		if (rc < 0) {
 			mlog_errno(rc);
 			break;
@@ -977,7 +999,7 @@
 			range_end = zero_to_size;
 
 		ret = ocfs2_zero_extend_range(inode, range_start,
-					      range_end);
+					      range_end, di_bh);
 		if (ret) {
 			mlog_errno(ret);
 			break;
@@ -1145,14 +1167,14 @@
 		goto bail_unlock_rw;
 	}
 
-	if (size_change && attr->ia_size != i_size_read(inode)) {
+	if (size_change) {
 		status = inode_newsize_ok(inode, attr->ia_size);
 		if (status)
 			goto bail_unlock;
 
 		inode_dio_wait(inode);
 
-		if (i_size_read(inode) > attr->ia_size) {
+		if (i_size_read(inode) >= attr->ia_size) {
 			if (ocfs2_should_order_data(inode)) {
 				status = ocfs2_begin_ordered_truncate(inode,
 								      attr->ia_size);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index cd5496b..0440134 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -781,6 +781,48 @@
 	return status;
 }
 
+int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
+				handle_t *handle,
+				struct ocfs2_alloc_context *ac,
+				u32 bit_off,
+				u32 num_bits)
+{
+	int status, start;
+	u32 clear_bits;
+	struct inode *local_alloc_inode;
+	void *bitmap;
+	struct ocfs2_dinode *alloc;
+	struct ocfs2_local_alloc *la;
+
+	BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
+
+	local_alloc_inode = ac->ac_inode;
+	alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
+	la = OCFS2_LOCAL_ALLOC(alloc);
+
+	bitmap = la->la_bitmap;
+	start = bit_off - le32_to_cpu(la->la_bm_off);
+	clear_bits = num_bits;
+
+	status = ocfs2_journal_access_di(handle,
+			INODE_CACHE(local_alloc_inode),
+			osb->local_alloc_bh,
+			OCFS2_JOURNAL_ACCESS_WRITE);
+	if (status < 0) {
+		mlog_errno(status);
+		goto bail;
+	}
+
+	while (clear_bits--)
+		ocfs2_clear_bit(start++, bitmap);
+
+	le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits);
+	ocfs2_journal_dirty(handle, osb->local_alloc_bh);
+
+bail:
+	return status;
+}
+
 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
 {
 	u32 count;
diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h
index 1be9b58..44a7d1f 100644
--- a/fs/ocfs2/localalloc.h
+++ b/fs/ocfs2/localalloc.h
@@ -55,6 +55,12 @@
 				 u32 *bit_off,
 				 u32 *num_bits);
 
+int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
+				handle_t *handle,
+				struct ocfs2_alloc_context *ac,
+				u32 bit_off,
+				u32 num_bits);
+
 void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb,
 				      unsigned int num_clusters);
 void ocfs2_la_enable_worker(struct work_struct *work);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index f4d609b..3683643 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -664,6 +664,7 @@
 	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
 	struct ocfs2_dir_lookup_result lookup = { NULL, };
 	sigset_t oldset;
+	u64 old_de_ino;
 
 	trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno,
 			 old_dentry->d_name.len, old_dentry->d_name.name,
@@ -686,6 +687,22 @@
 		goto out;
 	}
 
+	err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
+			old_dentry->d_name.len, &old_de_ino);
+	if (err) {
+		err = -ENOENT;
+		goto out;
+	}
+
+	/*
+	 * Check whether another node removed the source inode while we
+	 * were in the vfs.
+	 */
+	if (old_de_ino != OCFS2_I(inode)->ip_blkno) {
+		err = -ENOENT;
+		goto out;
+	}
+
 	err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
 					dentry->d_name.len);
 	if (err)
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 38bae5a..11c54fd 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -521,8 +521,11 @@
 		return -EOPNOTSUPP;
 
 	acl = get_acl(inode, ACL_TYPE_ACCESS);
-	if (IS_ERR_OR_NULL(acl))
+	if (IS_ERR_OR_NULL(acl)) {
+		if (acl == ERR_PTR(-EOPNOTSUPP))
+			return 0;
 		return PTR_ERR(acl);
+	}
 
 	ret = __posix_acl_chmod(&acl, GFP_KERNEL, mode);
 	if (ret)
@@ -544,14 +547,15 @@
 		goto no_acl;
 
 	p = get_acl(dir, ACL_TYPE_DEFAULT);
-	if (IS_ERR(p))
+	if (IS_ERR(p)) {
+		if (p == ERR_PTR(-EOPNOTSUPP))
+			goto apply_umask;
 		return PTR_ERR(p);
-
-	if (!p) {
-		*mode &= ~current_umask();
-		goto no_acl;
 	}
 
+	if (!p)
+		goto apply_umask;
+
 	*acl = posix_acl_clone(p, GFP_NOFS);
 	if (!*acl)
 		return -ENOMEM;
@@ -575,6 +579,8 @@
 	}
 	return 0;
 
+apply_umask:
+	*mode &= ~current_umask();
 no_acl:
 	*default_acl = NULL;
 	*acl = NULL;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 2ca7ba0..88d4585 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -468,17 +468,24 @@
 			return rc;
 		}
 		nhdr_ptr = notes_section;
-		while (real_sz < max_sz) {
-			if (nhdr_ptr->n_namesz == 0)
-				break;
+		while (nhdr_ptr->n_namesz != 0) {
 			sz = sizeof(Elf64_Nhdr) +
 				((nhdr_ptr->n_namesz + 3) & ~3) +
 				((nhdr_ptr->n_descsz + 3) & ~3);
+			if ((real_sz + sz) > max_sz) {
+				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
+					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
+				break;
+			}
 			real_sz += sz;
 			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
 		}
 		kfree(notes_section);
 		phdr_ptr->p_memsz = real_sz;
+		if (real_sz == 0) {
+			pr_warn("Warning: Zero PT_NOTE entries found\n");
+			return -EINVAL;
+		}
 	}
 
 	return 0;
@@ -648,17 +655,24 @@
 			return rc;
 		}
 		nhdr_ptr = notes_section;
-		while (real_sz < max_sz) {
-			if (nhdr_ptr->n_namesz == 0)
-				break;
+		while (nhdr_ptr->n_namesz != 0) {
 			sz = sizeof(Elf32_Nhdr) +
 				((nhdr_ptr->n_namesz + 3) & ~3) +
 				((nhdr_ptr->n_descsz + 3) & ~3);
+			if ((real_sz + sz) > max_sz) {
+				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
+					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
+				break;
+			}
 			real_sz += sz;
 			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
 		}
 		kfree(notes_section);
 		phdr_ptr->p_memsz = real_sz;
+		if (real_sz == 0) {
+			pr_warn("Warning: Zero PT_NOTE entries found\n");
+			return -EINVAL;
+		}
 	}
 
 	return 0;
diff --git a/fs/read_write.c b/fs/read_write.c
index 1193ffd..edc5746 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -964,9 +964,9 @@
 	return ret;
 }
 
-COMPAT_SYSCALL_DEFINE3(readv, unsigned long, fd,
+COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
 		const struct compat_iovec __user *,vec,
-		unsigned long, vlen)
+		compat_ulong_t, vlen)
 {
 	struct fd f = fdget(fd);
 	ssize_t ret;
@@ -1001,9 +1001,9 @@
 	return ret;
 }
 
-COMPAT_SYSCALL_DEFINE5(preadv, unsigned long, fd,
+COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
 		const struct compat_iovec __user *,vec,
-		unsigned long, vlen, u32, pos_low, u32, pos_high)
+		compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
 {
 	loff_t pos = ((loff_t)pos_high << 32) | pos_low;
 	return compat_sys_preadv64(fd, vec, vlen, pos);
@@ -1031,9 +1031,9 @@
 	return ret;
 }
 
-COMPAT_SYSCALL_DEFINE3(writev, unsigned long, fd,
+COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
 		const struct compat_iovec __user *, vec,
-		unsigned long, vlen)
+		compat_ulong_t, vlen)
 {
 	struct fd f = fdget(fd);
 	ssize_t ret;
@@ -1068,9 +1068,9 @@
 	return ret;
 }
 
-COMPAT_SYSCALL_DEFINE5(pwritev, unsigned long, fd,
+COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
 		const struct compat_iovec __user *,vec,
-		unsigned long, vlen, u32, pos_low, u32, pos_high)
+		compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
 {
 	loff_t pos = ((loff_t)pos_high << 32) | pos_low;
 	return compat_sys_pwritev64(fd, vec, vlen, pos);
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 2b7882b..9a3c68c 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -324,23 +324,17 @@
 			switch (flag) {
 			case M_INSERT:	/* insert item into L[0] */
 
-				if (item_pos == tb->lnum[0] - 1
-				    && tb->lbytes != -1) {
+				if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
 					/* part of new item falls into L[0] */
 					int new_item_len;
 					int version;
 
-					ret_val =
-					    leaf_shift_left(tb, tb->lnum[0] - 1,
-							    -1);
+					ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
 
 					/* Calculate item length to insert to S[0] */
-					new_item_len =
-					    ih_item_len(ih) - tb->lbytes;
+					new_item_len = ih_item_len(ih) - tb->lbytes;
 					/* Calculate and check item length to insert to L[0] */
-					put_ih_item_len(ih,
-							ih_item_len(ih) -
-							new_item_len);
+					put_ih_item_len(ih, ih_item_len(ih) - new_item_len);
 
 					RFALSE(ih_item_len(ih) <= 0,
 					       "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d",
@@ -349,30 +343,18 @@
 					/* Insert new item into L[0] */
 					buffer_info_init_left(tb, &bi);
 					leaf_insert_into_buf(&bi,
-							     n + item_pos -
-							     ret_val, ih, body,
-							     zeros_num >
-							     ih_item_len(ih) ?
-							     ih_item_len(ih) :
-							     zeros_num);
+							n + item_pos - ret_val, ih, body,
+							zeros_num > ih_item_len(ih) ? ih_item_len(ih) : zeros_num);
 
 					version = ih_version(ih);
 
 					/* Calculate key component, item length and body to insert into S[0] */
-					set_le_ih_k_offset(ih,
-							   le_ih_k_offset(ih) +
-							   (tb->
-							    lbytes <<
-							    (is_indirect_le_ih
-							     (ih) ? tb->tb_sb->
-							     s_blocksize_bits -
-							     UNFM_P_SHIFT :
-							     0)));
+					set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
+							(tb-> lbytes << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
 
 					put_ih_item_len(ih, new_item_len);
 					if (tb->lbytes > zeros_num) {
-						body +=
-						    (tb->lbytes - zeros_num);
+						body += (tb->lbytes - zeros_num);
 						zeros_num = 0;
 					} else
 						zeros_num -= tb->lbytes;
@@ -383,15 +365,10 @@
 				} else {
 					/* new item in whole falls into L[0] */
 					/* Shift lnum[0]-1 items to L[0] */
-					ret_val =
-					    leaf_shift_left(tb, tb->lnum[0] - 1,
-							    tb->lbytes);
+					ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes);
 					/* Insert new item into L[0] */
 					buffer_info_init_left(tb, &bi);
-					leaf_insert_into_buf(&bi,
-							     n + item_pos -
-							     ret_val, ih, body,
-							     zeros_num);
+					leaf_insert_into_buf(&bi, n + item_pos - ret_val, ih, body, zeros_num);
 					tb->insert_size[0] = 0;
 					zeros_num = 0;
 				}
@@ -399,264 +376,117 @@
 
 			case M_PASTE:	/* append item in L[0] */
 
-				if (item_pos == tb->lnum[0] - 1
-				    && tb->lbytes != -1) {
+				if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
 					/* we must shift the part of the appended item */
-					if (is_direntry_le_ih
-					    (B_N_PITEM_HEAD(tbS0, item_pos))) {
+					if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) {
 
 						RFALSE(zeros_num,
 						       "PAP-12090: invalid parameter in case of a directory");
 						/* directory item */
 						if (tb->lbytes > pos_in_item) {
 							/* new directory entry falls into L[0] */
-							struct item_head
-							    *pasted;
-							int l_pos_in_item =
-							    pos_in_item;
+							struct item_head *pasted;
+							int l_pos_in_item = pos_in_item;
 
 							/* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */
-							ret_val =
-							    leaf_shift_left(tb,
-									    tb->
-									    lnum
-									    [0],
-									    tb->
-									    lbytes
-									    -
-									    1);
-							if (ret_val
-							    && !item_pos) {
-								pasted =
-								    B_N_PITEM_HEAD
-								    (tb->L[0],
-								     B_NR_ITEMS
-								     (tb->
-								      L[0]) -
-								     1);
-								l_pos_in_item +=
-								    I_ENTRY_COUNT
-								    (pasted) -
-								    (tb->
-								     lbytes -
-								     1);
+							ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes-1);
+							if (ret_val && !item_pos) {
+								pasted = B_N_PITEM_HEAD(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1);
+								l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes -1);
 							}
 
 							/* Append given directory entry to directory item */
 							buffer_info_init_left(tb, &bi);
-							leaf_paste_in_buffer
-							    (&bi,
-							     n + item_pos -
-							     ret_val,
-							     l_pos_in_item,
-							     tb->insert_size[0],
-							     body, zeros_num);
+							leaf_paste_in_buffer(&bi, n + item_pos - ret_val, l_pos_in_item, tb->insert_size[0], body, zeros_num);
 
 							/* previous string prepared space for pasting new entry, following string pastes this entry */
 
 							/* when we have merge directory item, pos_in_item has been changed too */
 
 							/* paste new directory entry. 1 is entry number */
-							leaf_paste_entries(&bi,
-									   n +
-									   item_pos
-									   -
-									   ret_val,
-									   l_pos_in_item,
-									   1,
-									   (struct
-									    reiserfs_de_head
-									    *)
-									   body,
-									   body
-									   +
-									   DEH_SIZE,
-									   tb->
-									   insert_size
-									   [0]
-							    );
+							leaf_paste_entries(&bi, n + item_pos - ret_val, l_pos_in_item,
+									   1, (struct reiserfs_de_head *) body,
+									   body + DEH_SIZE, tb->insert_size[0]);
 							tb->insert_size[0] = 0;
 						} else {
 							/* new directory item doesn't fall into L[0] */
 							/* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */
-							leaf_shift_left(tb,
-									tb->
-									lnum[0],
-									tb->
-									lbytes);
+							leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
 						}
 						/* Calculate new position to append in item body */
 						pos_in_item -= tb->lbytes;
 					} else {
 						/* regular object */
-						RFALSE(tb->lbytes <= 0,
-						       "PAP-12095: there is nothing to shift to L[0]. lbytes=%d",
-						       tb->lbytes);
-						RFALSE(pos_in_item !=
-						       ih_item_len
-						       (B_N_PITEM_HEAD
-							(tbS0, item_pos)),
+						RFALSE(tb->lbytes <= 0, "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", tb->lbytes);
+						RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),
 						       "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d",
-						       ih_item_len
-						       (B_N_PITEM_HEAD
-							(tbS0, item_pos)),
-						       pos_in_item);
+						       ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),pos_in_item);
 
 						if (tb->lbytes >= pos_in_item) {
 							/* appended item will be in L[0] in whole */
 							int l_n;
 
 							/* this bytes number must be appended to the last item of L[h] */
-							l_n =
-							    tb->lbytes -
-							    pos_in_item;
+							l_n = tb->lbytes - pos_in_item;
 
 							/* Calculate new insert_size[0] */
-							tb->insert_size[0] -=
-							    l_n;
+							tb->insert_size[0] -= l_n;
 
-							RFALSE(tb->
-							       insert_size[0] <=
-							       0,
+							RFALSE(tb->insert_size[0] <= 0,
 							       "PAP-12105: there is nothing to paste into L[0]. insert_size=%d",
-							       tb->
-							       insert_size[0]);
-							ret_val =
-							    leaf_shift_left(tb,
-									    tb->
-									    lnum
-									    [0],
-									    ih_item_len
-									    (B_N_PITEM_HEAD
-									     (tbS0,
-									      item_pos)));
+							       tb->insert_size[0]);
+							ret_val = leaf_shift_left(tb, tb->lnum[0], ih_item_len
+									    (B_N_PITEM_HEAD(tbS0, item_pos)));
 							/* Append to body of item in L[0] */
 							buffer_info_init_left(tb, &bi);
 							leaf_paste_in_buffer
-							    (&bi,
-							     n + item_pos -
-							     ret_val,
-							     ih_item_len
-							     (B_N_PITEM_HEAD
-							      (tb->L[0],
-							       n + item_pos -
-							       ret_val)), l_n,
-							     body,
-							     zeros_num >
-							     l_n ? l_n :
-							     zeros_num);
+							    (&bi, n + item_pos - ret_val, ih_item_len
+							     (B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val)),
+							     l_n, body,
+							     zeros_num > l_n ? l_n : zeros_num);
 							/* 0-th item in S0 can be only of DIRECT type when l_n != 0 */
 							{
 								int version;
-								int temp_l =
-								    l_n;
+								int temp_l = l_n;
 
-								RFALSE
-								    (ih_item_len
-								     (B_N_PITEM_HEAD
-								      (tbS0,
-								       0)),
+								RFALSE(ih_item_len(B_N_PITEM_HEAD(tbS0, 0)),
 								     "PAP-12106: item length must be 0");
-								RFALSE
-								    (comp_short_le_keys
-								     (B_N_PKEY
-								      (tbS0, 0),
-								      B_N_PKEY
-								      (tb->L[0],
-								       n +
-								       item_pos
-								       -
-								       ret_val)),
+								RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY
+								      (tb->L[0], n + item_pos - ret_val)),
 								     "PAP-12107: items must be of the same file");
 								if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) {
-									temp_l =
-									    l_n
-									    <<
-									    (tb->
-									     tb_sb->
-									     s_blocksize_bits
-									     -
-									     UNFM_P_SHIFT);
+									temp_l = l_n << (tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT);
 								}
 								/* update key of first item in S0 */
-								version =
-								    ih_version
-								    (B_N_PITEM_HEAD
-								     (tbS0, 0));
-								set_le_key_k_offset
-								    (version,
-								     B_N_PKEY
-								     (tbS0, 0),
-								     le_key_k_offset
-								     (version,
-								      B_N_PKEY
-								      (tbS0,
-								       0)) +
-								     temp_l);
+								version = ih_version(B_N_PITEM_HEAD(tbS0, 0));
+								set_le_key_k_offset(version, B_N_PKEY(tbS0, 0),
+								     le_key_k_offset(version,B_N_PKEY(tbS0, 0)) + temp_l);
 								/* update left delimiting key */
-								set_le_key_k_offset
-								    (version,
-								     B_N_PDELIM_KEY
-								     (tb->
-								      CFL[0],
-								      tb->
-								      lkey[0]),
-								     le_key_k_offset
-								     (version,
-								      B_N_PDELIM_KEY
-								      (tb->
-								       CFL[0],
-								       tb->
-								       lkey[0]))
-								     + temp_l);
+								set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]),
+								     le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0])) + temp_l);
 							}
 
 							/* Calculate new body, position in item and insert_size[0] */
 							if (l_n > zeros_num) {
-								body +=
-								    (l_n -
-								     zeros_num);
+								body += (l_n - zeros_num);
 								zeros_num = 0;
 							} else
-								zeros_num -=
-								    l_n;
+								zeros_num -= l_n;
 							pos_in_item = 0;
 
-							RFALSE
-							    (comp_short_le_keys
-							     (B_N_PKEY(tbS0, 0),
-							      B_N_PKEY(tb->L[0],
-								       B_NR_ITEMS
-								       (tb->
-									L[0]) -
-								       1))
-							     ||
-							     !op_is_left_mergeable
-							     (B_N_PKEY(tbS0, 0),
-							      tbS0->b_size)
-							     ||
-							     !op_is_left_mergeable
-							     (B_N_PDELIM_KEY
-							      (tb->CFL[0],
-							       tb->lkey[0]),
-							      tbS0->b_size),
+							RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1))
+							     || !op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)
+							     || !op_is_left_mergeable(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), tbS0->b_size),
 							     "PAP-12120: item must be merge-able with left neighboring item");
 						} else {	/* only part of the appended item will be in L[0] */
 
 							/* Calculate position in item for append in S[0] */
-							pos_in_item -=
-							    tb->lbytes;
+							pos_in_item -= tb->lbytes;
 
-							RFALSE(pos_in_item <= 0,
-							       "PAP-12125: no place for paste. pos_in_item=%d",
-							       pos_in_item);
+							RFALSE(pos_in_item <= 0, "PAP-12125: no place for paste. pos_in_item=%d", pos_in_item);
 
 							/* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
-							leaf_shift_left(tb,
-									tb->
-									lnum[0],
-									tb->
-									lbytes);
+							leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
 						}
 					}
 				} else {	/* appended item will be in L[0] in whole */
@@ -665,52 +495,30 @@
 
 					if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) {	/* if we paste into first item of S[0] and it is left mergable */
 						/* then increment pos_in_item by the size of the last item in L[0] */
-						pasted =
-						    B_N_PITEM_HEAD(tb->L[0],
-								   n - 1);
+						pasted = B_N_PITEM_HEAD(tb->L[0], n - 1);
 						if (is_direntry_le_ih(pasted))
-							pos_in_item +=
-							    ih_entry_count
-							    (pasted);
+							pos_in_item += ih_entry_count(pasted);
 						else
-							pos_in_item +=
-							    ih_item_len(pasted);
+							pos_in_item += ih_item_len(pasted);
 					}
 
 					/* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
-					ret_val =
-					    leaf_shift_left(tb, tb->lnum[0],
-							    tb->lbytes);
+					ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
 					/* Append to body of item in L[0] */
 					buffer_info_init_left(tb, &bi);
-					leaf_paste_in_buffer(&bi,
-							     n + item_pos -
-							     ret_val,
+					leaf_paste_in_buffer(&bi, n + item_pos - ret_val,
 							     pos_in_item,
 							     tb->insert_size[0],
 							     body, zeros_num);
 
 					/* if appended item is directory, paste entry */
-					pasted =
-					    B_N_PITEM_HEAD(tb->L[0],
-							   n + item_pos -
-							   ret_val);
+					pasted = B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val);
 					if (is_direntry_le_ih(pasted))
-						leaf_paste_entries(&bi,
-								   n +
-								   item_pos -
-								   ret_val,
-								   pos_in_item,
-								   1,
-								   (struct
-								    reiserfs_de_head
-								    *)body,
-								   body +
-								   DEH_SIZE,
-								   tb->
-								   insert_size
-								   [0]
-						    );
+						leaf_paste_entries(&bi, n + item_pos - ret_val,
+								   pos_in_item, 1,
+								   (struct reiserfs_de_head *) body,
+								   body + DEH_SIZE,
+								   tb->insert_size[0]);
 					/* if appended item is indirect item, put unformatted node into un list */
 					if (is_indirect_le_ih(pasted))
 						set_ih_free_space(pasted, 0);
@@ -722,13 +530,7 @@
 				reiserfs_panic(tb->tb_sb, "PAP-12130",
 					       "lnum > 0: unexpected mode: "
 					       " %s(%d)",
-					       (flag ==
-						M_DELETE) ? "DELETE" : ((flag ==
-									 M_CUT)
-									? "CUT"
-									:
-									"UNKNOWN"),
-					       flag);
+					       (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
 			}
 		} else {
 			/* new item doesn't fall into L[0] */
@@ -748,14 +550,12 @@
 		case M_INSERT:	/* insert item */
 			if (n - tb->rnum[0] < item_pos) {	/* new item or its part falls to R[0] */
 				if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) {	/* part of new item falls into R[0] */
-					loff_t old_key_comp, old_len,
-					    r_zeros_number;
+					loff_t old_key_comp, old_len, r_zeros_number;
 					const char *r_body;
 					int version;
 					loff_t offset;
 
-					leaf_shift_right(tb, tb->rnum[0] - 1,
-							 -1);
+					leaf_shift_right(tb, tb->rnum[0] - 1, -1);
 
 					version = ih_version(ih);
 					/* Remember key component and item length */
@@ -763,29 +563,17 @@
 					old_len = ih_item_len(ih);
 
 					/* Calculate key component and item length to insert into R[0] */
-					offset =
-					    le_ih_k_offset(ih) +
-					    ((old_len -
-					      tb->
-					      rbytes) << (is_indirect_le_ih(ih)
-							  ? tb->tb_sb->
-							  s_blocksize_bits -
-							  UNFM_P_SHIFT : 0));
+					offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << (is_indirect_le_ih(ih) ? tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT : 0));
 					set_le_ih_k_offset(ih, offset);
 					put_ih_item_len(ih, tb->rbytes);
 					/* Insert part of the item into R[0] */
 					buffer_info_init_right(tb, &bi);
 					if ((old_len - tb->rbytes) > zeros_num) {
 						r_zeros_number = 0;
-						r_body =
-						    body + (old_len -
-							    tb->rbytes) -
-						    zeros_num;
+						r_body = body + (old_len - tb->rbytes) - zeros_num;
 					} else {
 						r_body = body;
-						r_zeros_number =
-						    zeros_num - (old_len -
-								 tb->rbytes);
+						r_zeros_number = zeros_num - (old_len - tb->rbytes);
 						zeros_num -= r_zeros_number;
 					}
 
@@ -798,25 +586,18 @@
 
 					/* Calculate key component and item length to insert into S[0] */
 					set_le_ih_k_offset(ih, old_key_comp);
-					put_ih_item_len(ih,
-							old_len - tb->rbytes);
+					put_ih_item_len(ih, old_len - tb->rbytes);
 
 					tb->insert_size[0] -= tb->rbytes;
 
 				} else {	/* whole new item falls into R[0] */
 
 					/* Shift rnum[0]-1 items to R[0] */
-					ret_val =
-					    leaf_shift_right(tb,
-							     tb->rnum[0] - 1,
-							     tb->rbytes);
+					ret_val = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
 					/* Insert new item into R[0] */
 					buffer_info_init_right(tb, &bi);
-					leaf_insert_into_buf(&bi,
-							     item_pos - n +
-							     tb->rnum[0] - 1,
-							     ih, body,
-							     zeros_num);
+					leaf_insert_into_buf(&bi, item_pos - n + tb->rnum[0] - 1,
+							     ih, body, zeros_num);
 
 					if (item_pos - n + tb->rnum[0] - 1 == 0) {
 						replace_key(tb, tb->CFR[0],
@@ -841,200 +622,97 @@
 
 						RFALSE(zeros_num,
 						       "PAP-12145: invalid parameter in case of a directory");
-						entry_count =
-						    I_ENTRY_COUNT(B_N_PITEM_HEAD
-								  (tbS0,
-								   item_pos));
+						entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD
+								  (tbS0, item_pos));
 						if (entry_count - tb->rbytes <
 						    pos_in_item)
 							/* new directory entry falls into R[0] */
 						{
 							int paste_entry_position;
 
-							RFALSE(tb->rbytes - 1 >=
-							       entry_count
-							       || !tb->
-							       insert_size[0],
+							RFALSE(tb->rbytes - 1 >= entry_count || !tb-> insert_size[0],
 							       "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d",
-							       tb->rbytes,
-							       entry_count);
+							       tb->rbytes, entry_count);
 							/* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */
-							leaf_shift_right(tb,
-									 tb->
-									 rnum
-									 [0],
-									 tb->
-									 rbytes
-									 - 1);
+							leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1);
 							/* Paste given directory entry to directory item */
-							paste_entry_position =
-							    pos_in_item -
-							    entry_count +
-							    tb->rbytes - 1;
+							paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1;
 							buffer_info_init_right(tb, &bi);
-							leaf_paste_in_buffer
-							    (&bi, 0,
-							     paste_entry_position,
-							     tb->insert_size[0],
-							     body, zeros_num);
+							leaf_paste_in_buffer(&bi, 0, paste_entry_position, tb->insert_size[0], body, zeros_num);
 							/* paste entry */
-							leaf_paste_entries(&bi,
-									   0,
-									   paste_entry_position,
-									   1,
-									   (struct
-									    reiserfs_de_head
-									    *)
-									   body,
-									   body
-									   +
-									   DEH_SIZE,
-									   tb->
-									   insert_size
-									   [0]
-							    );
+							leaf_paste_entries(&bi, 0, paste_entry_position, 1,
+									   (struct reiserfs_de_head *) body,
+									   body + DEH_SIZE, tb->insert_size[0]);
 
-							if (paste_entry_position
-							    == 0) {
+							if (paste_entry_position == 0) {
 								/* change delimiting keys */
-								replace_key(tb,
-									    tb->
-									    CFR
-									    [0],
-									    tb->
-									    rkey
-									    [0],
-									    tb->
-									    R
-									    [0],
-									    0);
+								replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0],0);
 							}
 
 							tb->insert_size[0] = 0;
 							pos_in_item++;
 						} else {	/* new directory entry doesn't fall into R[0] */
 
-							leaf_shift_right(tb,
-									 tb->
-									 rnum
-									 [0],
-									 tb->
-									 rbytes);
+							leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
 						}
 					} else {	/* regular object */
 
-						int n_shift, n_rem,
-						    r_zeros_number;
+						int n_shift, n_rem, r_zeros_number;
 						const char *r_body;
 
 						/* Calculate number of bytes which must be shifted from appended item */
-						if ((n_shift =
-						     tb->rbytes -
-						     tb->insert_size[0]) < 0)
+						if ((n_shift = tb->rbytes - tb->insert_size[0]) < 0)
 							n_shift = 0;
 
-						RFALSE(pos_in_item !=
-						       ih_item_len
-						       (B_N_PITEM_HEAD
-							(tbS0, item_pos)),
+						RFALSE(pos_in_item != ih_item_len
+						       (B_N_PITEM_HEAD(tbS0, item_pos)),
 						       "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d",
-						       pos_in_item,
-						       ih_item_len
-						       (B_N_PITEM_HEAD
-							(tbS0, item_pos)));
+						       pos_in_item, ih_item_len
+						       (B_N_PITEM_HEAD(tbS0, item_pos)));
 
-						leaf_shift_right(tb,
-								 tb->rnum[0],
-								 n_shift);
+						leaf_shift_right(tb, tb->rnum[0], n_shift);
 						/* Calculate number of bytes which must remain in body after appending to R[0] */
-						if ((n_rem =
-						     tb->insert_size[0] -
-						     tb->rbytes) < 0)
+						if ((n_rem = tb->insert_size[0] - tb->rbytes) < 0)
 							n_rem = 0;
 
 						{
 							int version;
-							unsigned long temp_rem =
-							    n_rem;
+							unsigned long temp_rem = n_rem;
 
-							version =
-							    ih_version
-							    (B_N_PITEM_HEAD
-							     (tb->R[0], 0));
-							if (is_indirect_le_key
-							    (version,
-							     B_N_PKEY(tb->R[0],
-								      0))) {
-								temp_rem =
-								    n_rem <<
-								    (tb->tb_sb->
-								     s_blocksize_bits
-								     -
-								     UNFM_P_SHIFT);
+							version = ih_version(B_N_PITEM_HEAD(tb->R[0], 0));
+							if (is_indirect_le_key(version, B_N_PKEY(tb->R[0], 0))) {
+								temp_rem = n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT);
 							}
-							set_le_key_k_offset
-							    (version,
-							     B_N_PKEY(tb->R[0],
-								      0),
-							     le_key_k_offset
-							     (version,
-							      B_N_PKEY(tb->R[0],
-								       0)) +
-							     temp_rem);
-							set_le_key_k_offset
-							    (version,
-							     B_N_PDELIM_KEY(tb->
-									    CFR
-									    [0],
-									    tb->
-									    rkey
-									    [0]),
-							     le_key_k_offset
-							     (version,
-							      B_N_PDELIM_KEY
-							      (tb->CFR[0],
-							       tb->rkey[0])) +
-							     temp_rem);
+							set_le_key_k_offset(version, B_N_PKEY(tb->R[0], 0),
+							     le_key_k_offset(version, B_N_PKEY(tb->R[0], 0)) + temp_rem);
+							set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]),
+							     le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])) + temp_rem);
 						}
 /*		  k_offset (B_N_PKEY(tb->R[0],0)) += n_rem;
 		  k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/
-						do_balance_mark_internal_dirty
-						    (tb, tb->CFR[0], 0);
+						do_balance_mark_internal_dirty(tb, tb->CFR[0], 0);
 
 						/* Append part of body into R[0] */
 						buffer_info_init_right(tb, &bi);
 						if (n_rem > zeros_num) {
 							r_zeros_number = 0;
-							r_body =
-							    body + n_rem -
-							    zeros_num;
+							r_body = body + n_rem - zeros_num;
 						} else {
 							r_body = body;
-							r_zeros_number =
-							    zeros_num - n_rem;
-							zeros_num -=
-							    r_zeros_number;
+							r_zeros_number = zeros_num - n_rem;
+							zeros_num -= r_zeros_number;
 						}
 
-						leaf_paste_in_buffer(&bi, 0,
-								     n_shift,
-								     tb->
-								     insert_size
-								     [0] -
-								     n_rem,
-								     r_body,
-								     r_zeros_number);
+						leaf_paste_in_buffer(&bi, 0, n_shift,
+								     tb->insert_size[0] - n_rem,
+								     r_body, r_zeros_number);
 
-						if (is_indirect_le_ih
-						    (B_N_PITEM_HEAD
-						     (tb->R[0], 0))) {
+						if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->R[0], 0))) {
 #if 0
 							RFALSE(n_rem,
 							       "PAP-12160: paste more than one unformatted node pointer");
 #endif
-							set_ih_free_space
-							    (B_N_PITEM_HEAD
-							     (tb->R[0], 0), 0);
+							set_ih_free_space(B_N_PITEM_HEAD(tb->R[0], 0), 0);
 						}
 						tb->insert_size[0] = n_rem;
 						if (!n_rem)
@@ -1044,58 +722,28 @@
 
 					struct item_head *pasted;
 
-					ret_val =
-					    leaf_shift_right(tb, tb->rnum[0],
-							     tb->rbytes);
+					ret_val = leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
 					/* append item in R[0] */
 					if (pos_in_item >= 0) {
 						buffer_info_init_right(tb, &bi);
-						leaf_paste_in_buffer(&bi,
-								     item_pos -
-								     n +
-								     tb->
-								     rnum[0],
-								     pos_in_item,
-								     tb->
-								     insert_size
-								     [0], body,
-								     zeros_num);
+						leaf_paste_in_buffer(&bi, item_pos - n + tb->rnum[0], pos_in_item,
+								     tb->insert_size[0], body, zeros_num);
 					}
 
 					/* paste new entry, if item is directory item */
-					pasted =
-					    B_N_PITEM_HEAD(tb->R[0],
-							   item_pos - n +
-							   tb->rnum[0]);
-					if (is_direntry_le_ih(pasted)
-					    && pos_in_item >= 0) {
-						leaf_paste_entries(&bi,
-								   item_pos -
-								   n +
-								   tb->rnum[0],
-								   pos_in_item,
-								   1,
-								   (struct
-								    reiserfs_de_head
-								    *)body,
-								   body +
-								   DEH_SIZE,
-								   tb->
-								   insert_size
-								   [0]
-						    );
+					pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]);
+					if (is_direntry_le_ih(pasted) && pos_in_item >= 0) {
+						leaf_paste_entries(&bi, item_pos - n + tb->rnum[0],
+								   pos_in_item, 1,
+								   (struct reiserfs_de_head *) body,
+								   body + DEH_SIZE, tb->insert_size[0]);
 						if (!pos_in_item) {
 
-							RFALSE(item_pos - n +
-							       tb->rnum[0],
+							RFALSE(item_pos - n + tb->rnum[0],
 							       "PAP-12165: directory item must be first item of node when pasting is in 0th position");
 
 							/* update delimiting keys */
-							replace_key(tb,
-								    tb->CFR[0],
-								    tb->rkey[0],
-								    tb->R[0],
-								    0);
+							replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
 						}
 					}
 
@@ -1111,22 +759,16 @@
 		default:	/* cases d and t */
 			reiserfs_panic(tb->tb_sb, "PAP-12175",
 				       "rnum > 0: unexpected mode: %s(%d)",
-				       (flag ==
-					M_DELETE) ? "DELETE" : ((flag ==
-								 M_CUT) ? "CUT"
-								: "UNKNOWN"),
-				       flag);
+				       (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
 		}
 
 	}
 
 	/* tb->rnum[0] > 0 */
 	RFALSE(tb->blknum[0] > 3,
-	       "PAP-12180: blknum can not be %d. It must be <= 3",
-	       tb->blknum[0]);
+	       "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]);
 	RFALSE(tb->blknum[0] < 0,
-	       "PAP-12185: blknum can not be %d. It must be >= 0",
-	       tb->blknum[0]);
+	       "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]);
 
 	/* if while adding to a node we discover that it is possible to split
 	   it in two, and merge the left part into the left neighbor and the
@@ -1177,8 +819,7 @@
 
 			if (n - snum[i] < item_pos) {	/* new item or it's part falls to first new node S_new[i] */
 				if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) {	/* part of new item falls into S_new[i] */
-					int old_key_comp, old_len,
-					    r_zeros_number;
+					int old_key_comp, old_len, r_zeros_number;
 					const char *r_body;
 					int version;
 
@@ -1192,15 +833,8 @@
 					old_len = ih_item_len(ih);
 
 					/* Calculate key component and item length to insert into S_new[i] */
-					set_le_ih_k_offset(ih,
-							   le_ih_k_offset(ih) +
-							   ((old_len -
-							     sbytes[i]) <<
-							    (is_indirect_le_ih
-							     (ih) ? tb->tb_sb->
-							     s_blocksize_bits -
-							     UNFM_P_SHIFT :
-							     0)));
+					set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
+							   ((old_len - sbytes[i]) << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
 
 					put_ih_item_len(ih, sbytes[i]);
 
@@ -1209,39 +843,29 @@
 
 					if ((old_len - sbytes[i]) > zeros_num) {
 						r_zeros_number = 0;
-						r_body =
-						    body + (old_len -
-							    sbytes[i]) -
-						    zeros_num;
+						r_body = body + (old_len - sbytes[i]) - zeros_num;
 					} else {
 						r_body = body;
-						r_zeros_number =
-						    zeros_num - (old_len -
-								 sbytes[i]);
+						r_zeros_number = zeros_num - (old_len - sbytes[i]);
 						zeros_num -= r_zeros_number;
 					}
 
-					leaf_insert_into_buf(&bi, 0, ih, r_body,
-							     r_zeros_number);
+					leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeros_number);
 
 					/* Calculate key component and item length to insert into S[i] */
 					set_le_ih_k_offset(ih, old_key_comp);
-					put_ih_item_len(ih,
-							old_len - sbytes[i]);
+					put_ih_item_len(ih, old_len - sbytes[i]);
 					tb->insert_size[0] -= sbytes[i];
 				} else {	/* whole new item falls into S_new[i] */
 
 					/* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */
 					leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
-							snum[i] - 1, sbytes[i],
-							S_new[i]);
+							snum[i] - 1, sbytes[i], S_new[i]);
 
 					/* Insert new item into S_new[i] */
 					buffer_info_init_bh(tb, &bi, S_new[i]);
-					leaf_insert_into_buf(&bi,
-							     item_pos - n +
-							     snum[i] - 1, ih,
-							     body, zeros_num);
+					leaf_insert_into_buf(&bi, item_pos - n + snum[i] - 1,
+							     ih, body, zeros_num);
 
 					zeros_num = tb->insert_size[0] = 0;
 				}
@@ -1268,150 +892,73 @@
 
 						int entry_count;
 
-						entry_count =
-						    ih_entry_count(aux_ih);
+						entry_count = ih_entry_count(aux_ih);
 
-						if (entry_count - sbytes[i] <
-						    pos_in_item
-						    && pos_in_item <=
-						    entry_count) {
+						if (entry_count - sbytes[i] < pos_in_item && pos_in_item <= entry_count) {
 							/* new directory entry falls into S_new[i] */
 
-							RFALSE(!tb->
-							       insert_size[0],
-							       "PAP-12215: insert_size is already 0");
-							RFALSE(sbytes[i] - 1 >=
-							       entry_count,
+							RFALSE(!tb->insert_size[0], "PAP-12215: insert_size is already 0");
+							RFALSE(sbytes[i] - 1 >= entry_count,
 							       "PAP-12220: there are no so much entries (%d), only %d",
-							       sbytes[i] - 1,
-							       entry_count);
+							       sbytes[i] - 1, entry_count);
 
 							/* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */
-							leaf_move_items
-							    (LEAF_FROM_S_TO_SNEW,
-							     tb, snum[i],
-							     sbytes[i] - 1,
-							     S_new[i]);
+							leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i] - 1, S_new[i]);
 							/* Paste given directory entry to directory item */
 							buffer_info_init_bh(tb, &bi, S_new[i]);
-							leaf_paste_in_buffer
-							    (&bi, 0,
-							     pos_in_item -
-							     entry_count +
-							     sbytes[i] - 1,
-							     tb->insert_size[0],
-							     body, zeros_num);
+							leaf_paste_in_buffer(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1,
+							     tb->insert_size[0], body, zeros_num);
 							/* paste new directory entry */
-							leaf_paste_entries(&bi,
-									   0,
-									   pos_in_item
-									   -
-									   entry_count
-									   +
-									   sbytes
-									   [i] -
-									   1, 1,
-									   (struct
-									    reiserfs_de_head
-									    *)
-									   body,
-									   body
-									   +
-									   DEH_SIZE,
-									   tb->
-									   insert_size
-									   [0]
-							    );
+							leaf_paste_entries(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1,
+									   (struct reiserfs_de_head *) body,
+									   body + DEH_SIZE, tb->insert_size[0]);
 							tb->insert_size[0] = 0;
 							pos_in_item++;
 						} else {	/* new directory entry doesn't fall into S_new[i] */
-							leaf_move_items
-							    (LEAF_FROM_S_TO_SNEW,
-							     tb, snum[i],
-							     sbytes[i],
-							     S_new[i]);
+							leaf_move_items(LEAF_FROM_S_TO_SNEW,tb, snum[i], sbytes[i], S_new[i]);
 						}
 					} else {	/* regular object */
 
-						int n_shift, n_rem,
-						    r_zeros_number;
+						int n_shift, n_rem, r_zeros_number;
 						const char *r_body;
 
-						RFALSE(pos_in_item !=
-						       ih_item_len
-						       (B_N_PITEM_HEAD
-							(tbS0, item_pos))
-						       || tb->insert_size[0] <=
-						       0,
+						RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)) || tb->insert_size[0] <= 0,
 						       "PAP-12225: item too short or insert_size <= 0");
 
 						/* Calculate number of bytes which must be shifted from appended item */
-						n_shift =
-						    sbytes[i] -
-						    tb->insert_size[0];
+						n_shift = sbytes[i] - tb->insert_size[0];
 						if (n_shift < 0)
 							n_shift = 0;
-						leaf_move_items
-						    (LEAF_FROM_S_TO_SNEW, tb,
-						     snum[i], n_shift,
-						     S_new[i]);
+						leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]);
 
 						/* Calculate number of bytes which must remain in body after append to S_new[i] */
-						n_rem =
-						    tb->insert_size[0] -
-						    sbytes[i];
+						n_rem = tb->insert_size[0] - sbytes[i];
 						if (n_rem < 0)
 							n_rem = 0;
 						/* Append part of body into S_new[0] */
 						buffer_info_init_bh(tb, &bi, S_new[i]);
 						if (n_rem > zeros_num) {
 							r_zeros_number = 0;
-							r_body =
-							    body + n_rem -
-							    zeros_num;
+							r_body = body + n_rem - zeros_num;
 						} else {
 							r_body = body;
-							r_zeros_number =
-							    zeros_num - n_rem;
-							zeros_num -=
-							    r_zeros_number;
+							r_zeros_number = zeros_num - n_rem;
+							zeros_num -= r_zeros_number;
 						}
 
-						leaf_paste_in_buffer(&bi, 0,
-								     n_shift,
-								     tb->
-								     insert_size
-								     [0] -
-								     n_rem,
-								     r_body,
-								     r_zeros_number);
+						leaf_paste_in_buffer(&bi, 0, n_shift,
+								     tb->insert_size[0] - n_rem,
+								     r_body, r_zeros_number);
 						{
 							struct item_head *tmp;
 
-							tmp =
-							    B_N_PITEM_HEAD(S_new
-									   [i],
-									   0);
+							tmp = B_N_PITEM_HEAD(S_new[i], 0);
 							if (is_indirect_le_ih
 							    (tmp)) {
-								set_ih_free_space
-								    (tmp, 0);
-								set_le_ih_k_offset
-								    (tmp,
-								     le_ih_k_offset
-								     (tmp) +
-								     (n_rem <<
-								      (tb->
-								       tb_sb->
-								       s_blocksize_bits
-								       -
-								       UNFM_P_SHIFT)));
+								set_ih_free_space(tmp, 0);
+								set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + (n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT)));
 							} else {
-								set_le_ih_k_offset
-								    (tmp,
-								     le_ih_k_offset
-								     (tmp) +
-								     n_rem);
+								set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + n_rem);
 							}
 						}
 
@@ -1426,8 +973,7 @@
 					struct item_head *pasted;
 
 #ifdef CONFIG_REISERFS_CHECK
-					struct item_head *ih_check =
-					    B_N_PITEM_HEAD(tbS0, item_pos);
+					struct item_head *ih_check = B_N_PITEM_HEAD(tbS0, item_pos);
 
 					if (!is_direntry_le_ih(ih_check)
 					    && (pos_in_item != ih_item_len(ih_check)
@@ -1439,8 +985,7 @@
 							     "to ih_item_len");
 #endif				/* CONFIG_REISERFS_CHECK */
 
-					leaf_mi =
-					    leaf_move_items(LEAF_FROM_S_TO_SNEW,
+					leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW,
 							    tb, snum[i],
 							    sbytes[i],
 							    S_new[i]);
@@ -1452,30 +997,19 @@
 					/* paste into item */
 					buffer_info_init_bh(tb, &bi, S_new[i]);
 					leaf_paste_in_buffer(&bi,
-							     item_pos - n +
-							     snum[i],
+							     item_pos - n + snum[i],
 							     pos_in_item,
 							     tb->insert_size[0],
 							     body, zeros_num);
 
-					pasted =
-					    B_N_PITEM_HEAD(S_new[i],
-							   item_pos - n +
-							   snum[i]);
+					pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]);
 					if (is_direntry_le_ih(pasted)) {
 						leaf_paste_entries(&bi,
-								   item_pos -
-								   n + snum[i],
-								   pos_in_item,
-								   1,
-								   (struct
-								    reiserfs_de_head
-								    *)body,
-								   body +
-								   DEH_SIZE,
-								   tb->
-								   insert_size
-								   [0]
+								   item_pos - n + snum[i],
+								   pos_in_item, 1,
+								   (struct reiserfs_de_head *)body,
+								   body + DEH_SIZE,
+								   tb->insert_size[0]
 						    );
 					}
 
@@ -1495,11 +1029,7 @@
 		default:	/* cases d and t */
 			reiserfs_panic(tb->tb_sb, "PAP-12245",
 				       "blknum > 2: unexpected mode: %s(%d)",
-				       (flag ==
-					M_DELETE) ? "DELETE" : ((flag ==
-								 M_CUT) ? "CUT"
-								: "UNKNOWN"),
-				       flag);
+				       (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
 		}
 
 		memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE);
@@ -1524,9 +1054,7 @@
 			/* If we insert the first key change the delimiting key */
 			if (item_pos == 0) {
 				if (tb->CFL[0])	/* can be 0 in reiserfsck */
-					replace_key(tb, tb->CFL[0], tb->lkey[0],
-						    tbS0, 0);
-
+					replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
 			}
 			break;
 
@@ -1536,53 +1064,27 @@
 				pasted = B_N_PITEM_HEAD(tbS0, item_pos);
 				/* when directory, may be new entry already pasted */
 				if (is_direntry_le_ih(pasted)) {
-					if (pos_in_item >= 0 &&
-					    pos_in_item <=
-					    ih_entry_count(pasted)) {
+					if (pos_in_item >= 0 && pos_in_item <= ih_entry_count(pasted)) {
 
 						RFALSE(!tb->insert_size[0],
 						       "PAP-12260: insert_size is 0 already");
 
 						/* prepare space */
 						buffer_info_init_tbS0(tb, &bi);
-						leaf_paste_in_buffer(&bi,
-								     item_pos,
-								     pos_in_item,
-								     tb->
-								     insert_size
-								     [0], body,
+						leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
+								     tb->insert_size[0], body,
 								     zeros_num);
 
 						/* paste entry */
-						leaf_paste_entries(&bi,
-								   item_pos,
-								   pos_in_item,
-								   1,
-								   (struct
-								    reiserfs_de_head
-								    *)body,
-								   body +
-								   DEH_SIZE,
-								   tb->
-								   insert_size
-								   [0]
-						    );
+						leaf_paste_entries(&bi, item_pos, pos_in_item, 1,
+								   (struct reiserfs_de_head *)body,
+								   body + DEH_SIZE,
+								   tb->insert_size[0]);
 						if (!item_pos && !pos_in_item) {
-							RFALSE(!tb->CFL[0]
-							       || !tb->L[0],
+							RFALSE(!tb->CFL[0] || !tb->L[0],
 							       "PAP-12270: CFL[0]/L[0] must be specified");
-							if (tb->CFL[0]) {
-								replace_key(tb,
-									    tb->
-									    CFL
-									    [0],
-									    tb->
-									    lkey
-									    [0],
-									    tbS0,
-									    0);
-
-							}
+							if (tb->CFL[0])
+								replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
 						}
 						tb->insert_size[0] = 0;
 					}
@@ -1593,13 +1095,8 @@
 						       "PAP-12275: insert size must not be %d",
 						       tb->insert_size[0]);
 						buffer_info_init_tbS0(tb, &bi);
-						leaf_paste_in_buffer(&bi,
-								     item_pos,
-								     pos_in_item,
-								     tb->
-								     insert_size
-								     [0], body,
-								     zeros_num);
+						leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
+								     tb->insert_size[0], body, zeros_num);
 
 						if (is_indirect_le_ih(pasted)) {
 #if 0
@@ -1611,8 +1108,7 @@
 							       tb->
 							       insert_size[0]);
 #endif
-							set_ih_free_space
-							    (pasted, 0);
+							set_ih_free_space(pasted, 0);
 						}
 						tb->insert_size[0] = 0;
 					}
@@ -1620,8 +1116,7 @@
 					else {
 						if (tb->insert_size[0]) {
 							print_cur_tb("12285");
-							reiserfs_panic(tb->
-								       tb_sb,
+							reiserfs_panic(tb->tb_sb,
 							    "PAP-12285",
 							    "insert_size "
 							    "must be 0 "
diff --git a/fs/super.c b/fs/super.c
index cecd780..80d5cf2 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -703,7 +703,6 @@
 	if (flags & MS_RDONLY)
 		acct_auto_close(sb);
 	shrink_dcache_sb(sb);
-	sync_filesystem(sb);
 
 	remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
 
@@ -720,6 +719,8 @@
 		}
 	}
 
+	sync_filesystem(sb);
+
 	if (sb->s_op->remount_fs) {
 		retval = sb->s_op->remount_fs(sb, &flags, data);
 		if (retval) {
diff --git a/fs/sync.c b/fs/sync.c
index f155374..e8ba024 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -222,23 +222,6 @@
 	return do_fsync(fd, 1);
 }
 
-/**
- * generic_write_sync - perform syncing after a write if file / inode is sync
- * @file:	file to which the write happened
- * @pos:	offset where the write started
- * @count:	length of the write
- *
- * This is just a simple wrapper about our general syncing function.
- */
-int generic_write_sync(struct file *file, loff_t pos, loff_t count)
-{
-	if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
-		return 0;
-	return vfs_fsync_range(file, pos, pos + count - 1,
-			       (file->f_flags & __O_SYNC) ? 0 : 1);
-}
-EXPORT_SYMBOL(generic_write_sync);
-
 /*
  * sys_sync_file_range() permits finely controlled syncing over a segment of
  * a file in the range offset .. (offset+nbytes-1) inclusive.  If nbytes is
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index a267394..db2cfb0 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -407,7 +407,7 @@
 	struct bio		*bio = bio_alloc(GFP_NOIO, nvecs);
 
 	ASSERT(bio->bi_private == NULL);
-	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
 	return bio;
 }
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 5175711..9c061ef 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1240,7 +1240,7 @@
 
 	bio = bio_alloc(GFP_NOIO, nr_pages);
 	bio->bi_bdev = bp->b_target->bt_bdev;
-	bio->bi_sector = sector;
+	bio->bi_iter.bi_sector = sector;
 	bio->bi_end_io = xfs_buf_bio_end_io;
 	bio->bi_private = bp;
 
@@ -1262,7 +1262,7 @@
 		total_nr_pages--;
 	}
 
-	if (likely(bio->bi_size)) {
+	if (likely(bio->bi_iter.bi_size)) {
 		if (xfs_buf_is_vmapped(bp)) {
 			flush_kernel_vmap_range(bp->b_addr,
 						xfs_buf_vmap_len(bp));
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 2e7989e..64b48ea 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -799,7 +799,7 @@
 		XFS_STATS_ADD(xs_write_bytes, ret);
 
 		/* Handle various SYNC-type writes */
-		err = generic_write_sync(file, pos, ret);
+		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
 		if (err < 0)
 			ret = err;
 	}
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index d2f16f1..fea6773 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -77,7 +77,7 @@
 extern u8 acpi_gbl_disable_auto_repair;
 extern u8 acpi_gbl_disable_ssdt_table_load;
 extern u8 acpi_gbl_do_not_use_xsdt;
-extern bool acpi_gbl_enable_aml_debug_object;
+extern u8 acpi_gbl_enable_aml_debug_object;
 extern u8 acpi_gbl_enable_interpreter_slack;
 extern u32 acpi_gbl_trace_flags;
 extern acpi_name acpi_gbl_trace_method_name;
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 8e4f41d..34c7bdc 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -701,6 +701,18 @@
 }
 #endif
 
+#ifndef ptep_set_numa
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+				 pte_t *ptep)
+{
+	pte_t ptent = *ptep;
+
+	ptent = pte_mknuma(ptent);
+	set_pte_at(mm, addr, ptep, ptent);
+	return;
+}
+#endif
+
 #ifndef pmd_mknuma
 static inline pmd_t pmd_mknuma(pmd_t pmd)
 {
@@ -708,6 +720,18 @@
 	return pmd_clear_flags(pmd, _PAGE_PRESENT);
 }
 #endif
+
+#ifndef pmdp_set_numa
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+				 pmd_t *pmdp)
+{
+	pmd_t pmd = *pmdp;
+
+	pmd = pmd_mknuma(pmd);
+	set_pmd_at(mm, addr, pmdp, pmd);
+	return;
+}
+#endif
 #else
 extern int pte_numa(pte_t pte);
 extern int pmd_numa(pmd_t pmd);
@@ -715,6 +739,8 @@
 extern pmd_t pmd_mknonnuma(pmd_t pmd);
 extern pte_t pte_mknuma(pte_t pte);
 extern pmd_t pmd_mknuma(pmd_t pmd);
+extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp);
 #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
 #else
 static inline int pmd_numa(pmd_t pmd)
@@ -742,10 +768,23 @@
 	return pte;
 }
 
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+				 pte_t *ptep)
+{
+	return;
+}
+
+
 static inline pmd_t pmd_mknuma(pmd_t pmd)
 {
 	return pmd;
 }
+
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+				 pmd_t *pmdp)
+{
+	return ;
+}
 #endif /* CONFIG_NUMA_BALANCING */
 
 #endif /* CONFIG_MMU */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 1d4a920..04a7f31 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -56,6 +56,7 @@
 #include <linux/mutex.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/ratelimit.h>
 #if defined(__alpha__) || defined(__powerpc__)
 #include <asm/pgtable.h>	/* For pte_wrprotect */
 #endif
@@ -136,7 +137,6 @@
 
 /* driver capabilities and requirements mask */
 #define DRIVER_USE_AGP     0x1
-#define DRIVER_REQUIRE_AGP 0x2
 #define DRIVER_PCI_DMA     0x8
 #define DRIVER_SG          0x10
 #define DRIVER_HAVE_DMA    0x20
@@ -180,9 +180,28 @@
 #define DRM_ERROR(fmt, ...)				\
 	drm_err(__func__, fmt, ##__VA_ARGS__)
 
+/**
+ * Rate limited error output.  Like DRM_ERROR() but won't flood the log.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_ERROR_RATELIMITED(fmt, ...)				\
+({									\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,	\
+				      DEFAULT_RATELIMIT_BURST);		\
+									\
+	if (__ratelimit(&_rs))						\
+		drm_err(__func__, fmt, ##__VA_ARGS__);			\
+})
+
 #define DRM_INFO(fmt, ...)				\
 	printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
 
+#define DRM_INFO_ONCE(fmt, ...)				\
+	printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+
 /**
  * Debug output.
  *
@@ -422,7 +441,6 @@
 	struct pid *pid;
 	kuid_t uid;
 	drm_magic_t magic;
-	unsigned long ioctl_count;
 	struct list_head lhead;
 	struct drm_minor *minor;
 	unsigned long lock_count;
@@ -511,7 +529,7 @@
  */
 struct drm_agp_mem {
 	unsigned long handle;		/**< handle */
-	DRM_AGP_MEM *memory;
+	struct agp_memory *memory;
 	unsigned long bound;		/**< address */
 	int pages;
 	struct list_head head;
@@ -523,7 +541,7 @@
  * \sa drm_agp_init() and drm_device::agp.
  */
 struct drm_agp_head {
-	DRM_AGP_KERN agp_info;		/**< AGP device information */
+	struct agp_kern_info agp_info;		/**< AGP device information */
 	struct list_head memory;
 	unsigned long mode;		/**< AGP mode */
 	struct agp_bridge_data *bridge;
@@ -607,13 +625,6 @@
 };
 
 /**
- * GEM specific mm private for tracking GEM objects
- */
-struct drm_gem_mm {
-	struct drm_vma_offset_manager vma_manager;
-};
-
-/**
  * This structure defines the drm_mm memory object, which will be used by the
  * DRM for its buffer objects.
  */
@@ -750,10 +761,6 @@
 	int (*set_unique)(struct drm_device *dev, struct drm_master *master,
 			  struct drm_unique *unique);
 	int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
-	/* hooks that are for PCI */
-	int (*agp_init)(struct drm_device *dev);
-	void (*agp_destroy)(struct drm_device *dev);
-
 };
 
 /**
@@ -841,6 +848,7 @@
 	 *
 	 * \param dev  DRM device.
 	 * \param crtc Id of the crtc to query.
+	 * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
 	 * \param *vpos Target location for current vertical scanout position.
 	 * \param *hpos Target location for current horizontal scanout position.
 	 * \param *stime Target location for timestamp taken immediately before
@@ -863,6 +871,7 @@
 	 *
 	 */
 	int (*get_scanout_position) (struct drm_device *dev, int crtc,
+				     unsigned int flags,
 				     int *vpos, int *hpos, ktime_t *stime,
 				     ktime_t *etime);
 
@@ -903,7 +912,7 @@
 
 	/* these have to be filled in */
 
-	irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
+	irqreturn_t(*irq_handler) (int irq, void *arg);
 	void (*irq_preinstall) (struct drm_device *dev);
 	int (*irq_postinstall) (struct drm_device *dev);
 	void (*irq_uninstall) (struct drm_device *dev);
@@ -995,8 +1004,8 @@
 	} kdriver;
 	struct drm_bus *bus;
 
-	/* List of devices hanging off this driver */
-	struct list_head device_list;
+	/* List of devices hanging off this driver with stealth attach. */
+	struct list_head legacy_dev_list;
 };
 
 #define DRM_MINOR_UNASSIGNED 0
@@ -1085,7 +1094,7 @@
  * may contain multiple heads.
  */
 struct drm_device {
-	struct list_head driver_item;	/**< list of devices per driver */
+	struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
 	char *devname;			/**< For /proc/interrupts */
 	int if_version;			/**< Highest interface version set */
 
@@ -1098,8 +1107,6 @@
 	/** \name Usage Counters */
 	/*@{ */
 	int open_count;			/**< Outstanding files open */
-	atomic_t ioctl_count;		/**< Outstanding IOCTLs pending */
-	atomic_t vma_count;		/**< Outstanding vma areas open */
 	int buf_use;			/**< Buffers in use -- cannot alloc */
 	atomic_t buf_alloc;		/**< Buffer allocation in progress */
 	/*@} */
@@ -1176,7 +1183,6 @@
 	struct drm_sg_mem *sg;	/**< Scatter gather memory */
 	unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
 	void *dev_private;		/**< device private data */
-	void *mm_private;
 	struct address_space *dev_mapping;
 	struct drm_sigdata sigdata;	   /**< For block_all_signals */
 	sigset_t sigmask;
@@ -1194,6 +1200,7 @@
 	/*@{ */
 	struct mutex object_name_lock;
 	struct idr object_name_idr;
+	struct drm_vma_offset_manager *vma_offset_manager;
 	/*@} */
 	int switch_power_state;
 
@@ -1268,6 +1275,7 @@
 				/* Memory management support (drm_memory.h) */
 #include <drm/drm_memory.h>
 
+
 				/* Misc. IOCTL support (drm_ioctl.h) */
 extern int drm_irq_by_busid(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv);
@@ -1398,8 +1406,10 @@
 						 int crtc, int *max_error,
 						 struct timeval *vblank_time,
 						 unsigned flags,
-						 struct drm_crtc *refcrtc);
-extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
+						 const struct drm_crtc *refcrtc,
+						 const struct drm_display_mode *mode);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+					    const struct drm_display_mode *mode);
 
 extern bool
 drm_mode_parse_command_line_for_connector(const char *mode_option,
@@ -1461,6 +1471,30 @@
 extern int drm_debugfs_remove_files(const struct drm_info_list *files,
 				    int count, struct drm_minor *minor);
 extern int drm_debugfs_cleanup(struct drm_minor *minor);
+#else
+static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+				   struct dentry *root)
+{
+	return 0;
+}
+
+static inline int drm_debugfs_create_files(const struct drm_info_list *files,
+					   int count, struct dentry *root,
+					   struct drm_minor *minor)
+{
+	return 0;
+}
+
+static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
+					   int count, struct drm_minor *minor)
+{
+	return 0;
+}
+
+static inline int drm_debugfs_cleanup(struct drm_minor *minor)
+{
+	return 0;
+}
 #endif
 
 				/* Info file support */
@@ -1645,6 +1679,7 @@
 
 	return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
 }
+void drm_pci_agp_destroy(struct drm_device *dev);
 
 extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
 extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
@@ -1660,7 +1695,6 @@
 
 /* platform section */
 extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
-extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device);
 
 /* returns true if currently okay to sleep */
 static __inline__ bool drm_can_sleep(void)
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
index a184eee..86a0218 100644
--- a/include/drm/drm_agpsupport.h
+++ b/include/drm/drm_agpsupport.h
@@ -10,17 +10,16 @@
 
 #if __OS_HAS_AGP
 
-void drm_free_agp(DRM_AGP_MEM * handle, int pages);
-int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
-int drm_unbind_agp(DRM_AGP_MEM * handle);
-DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+void drm_free_agp(struct agp_memory * handle, int pages);
+int drm_bind_agp(struct agp_memory * handle, unsigned int start);
+int drm_unbind_agp(struct agp_memory * handle);
+struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
 				struct page **pages,
 				unsigned long num_pages,
 				uint32_t gtt_offset,
 				uint32_t type);
 
 struct drm_agp_head *drm_agp_init(struct drm_device *dev);
-void drm_agp_destroy(struct drm_agp_head *agp);
 void drm_agp_clear(struct drm_device *dev);
 int drm_agp_acquire(struct drm_device *dev);
 int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
@@ -46,29 +45,23 @@
 int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
 int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv);
-
-static inline int drm_core_has_AGP(struct drm_device *dev)
-{
-	return drm_core_check_feature(dev, DRIVER_USE_AGP);
-}
-
 #else /* __OS_HAS_AGP */
 
-static inline void drm_free_agp(DRM_AGP_MEM * handle, int pages)
+static inline void drm_free_agp(struct agp_memory * handle, int pages)
 {
 }
 
-static inline int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start)
 {
 	return -ENODEV;
 }
 
-static inline int drm_unbind_agp(DRM_AGP_MEM * handle)
+static inline int drm_unbind_agp(struct agp_memory * handle)
 {
 	return -ENODEV;
 }
 
-static inline DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+static inline struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
 					      struct page **pages,
 					      unsigned long num_pages,
 					      uint32_t gtt_offset,
@@ -82,10 +75,6 @@
 	return NULL;
 }
 
-static inline void drm_agp_destroy(struct drm_agp_head *agp)
-{
-}
-
 static inline void drm_agp_clear(struct drm_device *dev)
 {
 }
@@ -183,12 +172,6 @@
 {
 	return -ENODEV;
 }
-
-static inline int drm_core_has_AGP(struct drm_device *dev)
-{
-	return 0;
-}
-
 #endif /* __OS_HAS_AGP */
 
 #endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index f32c5cd..8f3dee0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -30,6 +30,7 @@
 #include <linux/types.h>
 #include <linux/idr.h>
 #include <linux/fb.h>
+#include <linux/hdmi.h>
 #include <drm/drm_mode.h>
 
 #include <drm/drm_fourcc.h>
@@ -181,6 +182,7 @@
 
 	int vrefresh;		/* in Hz */
 	int hsync;		/* in kHz */
+	enum hdmi_picture_aspect picture_aspect_ratio;
 };
 
 static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
@@ -447,7 +449,7 @@
 	uint16_t *gamma_store;
 
 	/* Constants needed for precise vblank and swap timestamping. */
-	s64 framedur_ns, linedur_ns, pixeldur_ns;
+	int framedur_ns, linedur_ns, pixeldur_ns;
 
 	/* if you are using the helper */
 	void *helper_private;
@@ -905,6 +907,9 @@
 
 	/* whether async page flip is supported or not */
 	bool async_page_flip;
+
+	/* cursor size */
+	uint32_t cursor_width, cursor_height;
 };
 
 #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@ -929,6 +934,19 @@
 			 struct drm_crtc *crtc,
 			 const struct drm_crtc_funcs *funcs);
 extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
+
+/**
+ * drm_crtc_mask - find the mask of a registered CRTC
+ * @crtc: CRTC to find mask for
+ *
+ * Given a registered CRTC, return the mask bit of that CRTC for an
+ * encoder's possible_crtcs field.
+ */
+static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
+{
+	return 1 << drm_crtc_index(crtc);
+}
 
 extern void drm_connector_ida_init(void);
 extern void drm_connector_ida_destroy(void);
@@ -950,6 +968,19 @@
 			    const struct drm_encoder_funcs *funcs,
 			    int encoder_type);
 
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Return false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+				       struct drm_crtc *crtc)
+{
+	return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
+}
+
 extern int drm_plane_init(struct drm_device *dev,
 			  struct drm_plane *plane,
 			  unsigned long possible_crtcs,
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index ef6ad3a..b1388b5 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -120,8 +120,8 @@
  */
 struct drm_connector_helper_funcs {
 	int (*get_modes)(struct drm_connector *connector);
-	int (*mode_valid)(struct drm_connector *connector,
-			  struct drm_display_mode *mode);
+	enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+					   struct drm_display_mode *mode);
 	struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
 };
 
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index a92c375..1d09050 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -41,22 +41,22 @@
  * 1.2 formally includes both eDP and DPI definitions.
  */
 
-#define AUX_NATIVE_WRITE	0x8
-#define AUX_NATIVE_READ		0x9
-#define AUX_I2C_WRITE		0x0
-#define AUX_I2C_READ		0x1
-#define AUX_I2C_STATUS		0x2
-#define AUX_I2C_MOT		0x4
+#define DP_AUX_I2C_WRITE		0x0
+#define DP_AUX_I2C_READ			0x1
+#define DP_AUX_I2C_STATUS		0x2
+#define DP_AUX_I2C_MOT			0x4
+#define DP_AUX_NATIVE_WRITE		0x8
+#define DP_AUX_NATIVE_READ		0x9
 
-#define AUX_NATIVE_REPLY_ACK	(0x0 << 4)
-#define AUX_NATIVE_REPLY_NACK	(0x1 << 4)
-#define AUX_NATIVE_REPLY_DEFER	(0x2 << 4)
-#define AUX_NATIVE_REPLY_MASK	(0x3 << 4)
+#define DP_AUX_NATIVE_REPLY_ACK		(0x0 << 0)
+#define DP_AUX_NATIVE_REPLY_NACK	(0x1 << 0)
+#define DP_AUX_NATIVE_REPLY_DEFER	(0x2 << 0)
+#define DP_AUX_NATIVE_REPLY_MASK	(0x3 << 0)
 
-#define AUX_I2C_REPLY_ACK	(0x0 << 6)
-#define AUX_I2C_REPLY_NACK	(0x1 << 6)
-#define AUX_I2C_REPLY_DEFER	(0x2 << 6)
-#define AUX_I2C_REPLY_MASK	(0x3 << 6)
+#define DP_AUX_I2C_REPLY_ACK		(0x0 << 2)
+#define DP_AUX_I2C_REPLY_NACK		(0x1 << 2)
+#define DP_AUX_I2C_REPLY_DEFER		(0x2 << 2)
+#define DP_AUX_I2C_REPLY_MASK		(0x3 << 2)
 
 /* AUX CH addresses */
 /* DPCD */
@@ -266,9 +266,10 @@
 
 #define DP_TEST_REQUEST			    0x218
 # define DP_TEST_LINK_TRAINING		    (1 << 0)
-# define DP_TEST_LINK_PATTERN		    (1 << 1)
+# define DP_TEST_LINK_VIDEO_PATTERN	    (1 << 1)
 # define DP_TEST_LINK_EDID_READ		    (1 << 2)
 # define DP_TEST_LINK_PHY_TEST_PATTERN	    (1 << 3) /* DPCD >= 1.1 */
+# define DP_TEST_LINK_FAUX_PATTERN	    (1 << 4) /* DPCD >= 1.2 */
 
 #define DP_TEST_LINK_RATE		    0x219
 # define DP_LINK_RATE_162		    (0x6)
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
new file mode 100644
index 0000000..d32628a
--- /dev/null
+++ b/include/drm/drm_mipi_dsi.h
@@ -0,0 +1,158 @@
+/*
+ * MIPI DSI Bus
+ *
+ * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DRM_MIPI_DSI_H__
+#define __DRM_MIPI_DSI_H__
+
+#include <linux/device.h>
+
+struct mipi_dsi_host;
+struct mipi_dsi_device;
+
+/**
+ * struct mipi_dsi_msg - read/write DSI buffer
+ * @channel: virtual channel id
+ * @type: payload data type
+ * @tx_len: length of @tx_buf
+ * @tx_buf: data to be written
+ * @rx_len: length of @rx_buf
+ * @rx_buf: data to be read, or NULL
+ */
+struct mipi_dsi_msg {
+	u8 channel;
+	u8 type;
+
+	size_t tx_len;
+	const void *tx_buf;
+
+	size_t rx_len;
+	void *rx_buf;
+};
+
+/**
+ * struct mipi_dsi_host_ops - DSI bus operations
+ * @attach: attach DSI device to DSI host
+ * @detach: detach DSI device from DSI host
+ * @transfer: send and/or receive DSI packet, return number of received bytes,
+ * 	      or error
+ */
+struct mipi_dsi_host_ops {
+	int (*attach)(struct mipi_dsi_host *host,
+		      struct mipi_dsi_device *dsi);
+	int (*detach)(struct mipi_dsi_host *host,
+		      struct mipi_dsi_device *dsi);
+	ssize_t (*transfer)(struct mipi_dsi_host *host,
+			    struct mipi_dsi_msg *msg);
+};
+
+/**
+ * struct mipi_dsi_host - DSI host device
+ * @dev: driver model device node for this DSI host
+ * @ops: DSI host operations
+ */
+struct mipi_dsi_host {
+	struct device *dev;
+	const struct mipi_dsi_host_ops *ops;
+};
+
+int mipi_dsi_host_register(struct mipi_dsi_host *host);
+void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
+
+/* DSI mode flags */
+
+/* video mode */
+#define MIPI_DSI_MODE_VIDEO		BIT(0)
+/* video burst mode */
+#define MIPI_DSI_MODE_VIDEO_BURST	BIT(1)
+/* video pulse mode */
+#define MIPI_DSI_MODE_VIDEO_SYNC_PULSE	BIT(2)
+/* enable auto vertical count mode */
+#define MIPI_DSI_MODE_VIDEO_AUTO_VERT	BIT(3)
+/* enable hsync-end packets in vsync-pulse and v-porch area */
+#define MIPI_DSI_MODE_VIDEO_HSE		BIT(4)
+/* disable hfront-porch area */
+#define MIPI_DSI_MODE_VIDEO_HFP		BIT(5)
+/* disable hback-porch area */
+#define MIPI_DSI_MODE_VIDEO_HBP		BIT(6)
+/* disable hsync-active area */
+#define MIPI_DSI_MODE_VIDEO_HSA		BIT(7)
+/* flush display FIFO on vsync pulse */
+#define MIPI_DSI_MODE_VSYNC_FLUSH	BIT(8)
+/* disable EoT packets in HS mode */
+#define MIPI_DSI_MODE_EOT_PACKET	BIT(9)
+
+enum mipi_dsi_pixel_format {
+	MIPI_DSI_FMT_RGB888,
+	MIPI_DSI_FMT_RGB666,
+	MIPI_DSI_FMT_RGB666_PACKED,
+	MIPI_DSI_FMT_RGB565,
+};
+
+/**
+ * struct mipi_dsi_device - DSI peripheral device
+ * @host: DSI host for this peripheral
+ * @dev: driver model device node for this peripheral
+ * @channel: virtual channel assigned to the peripheral
+ * @format: pixel format for video mode
+ * @lanes: number of active data lanes
+ * @mode_flags: DSI operation mode related flags
+ */
+struct mipi_dsi_device {
+	struct mipi_dsi_host *host;
+	struct device dev;
+
+	unsigned int channel;
+	unsigned int lanes;
+	enum mipi_dsi_pixel_format format;
+	unsigned long mode_flags;
+};
+
+#define to_mipi_dsi_device(d) container_of(d, struct mipi_dsi_device, dev)
+
+int mipi_dsi_attach(struct mipi_dsi_device *dsi);
+int mipi_dsi_detach(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
+		       const void *data, size_t len);
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
+			  u8 cmd, void *data, size_t len);
+
+/**
+ * struct mipi_dsi_driver - DSI driver
+ * @driver: device driver model driver
+ * @probe: callback for device binding
+ * @remove: callback for device unbinding
+ */
+struct mipi_dsi_driver {
+	struct device_driver driver;
+	int(*probe)(struct mipi_dsi_device *dsi);
+	int(*remove)(struct mipi_dsi_device *dsi);
+};
+
+#define to_mipi_dsi_driver(d) container_of(d, struct mipi_dsi_driver, driver)
+
+static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi)
+{
+	return dev_get_drvdata(&dsi->dev);
+}
+
+static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data)
+{
+	dev_set_drvdata(&dsi->dev, data);
+}
+
+int mipi_dsi_driver_register(struct mipi_dsi_driver *driver);
+void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver);
+
+#define module_mipi_dsi_driver(__mipi_dsi_driver) \
+	module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \
+			mipi_dsi_driver_unregister)
+
+#endif /* __DRM_MIPI_DSI__ */
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 815fafc..86ab99b 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -21,7 +21,6 @@
 
 /** Current process ID */
 #define DRM_CURRENTPID			task_pid_nr(current)
-#define DRM_SUSER(p)			capable(CAP_SYS_ADMIN)
 #define DRM_UDELAY(d)			udelay(d)
 /** Read a byte from a MMIO region */
 #define DRM_READ8(map, offset)		readb(((void __iomem *)(map)->handle) + (offset))
@@ -35,45 +34,12 @@
 #define DRM_WRITE16(map, offset, val)   writew(val, ((void __iomem *)(map)->handle) + (offset))
 /** Write a dword into a MMIO region */
 #define DRM_WRITE32(map, offset, val)	writel(val, ((void __iomem *)(map)->handle) + (offset))
-/** Read memory barrier */
 
 /** Read a qword from a MMIO region - be careful using these unless you really understand them */
 #define DRM_READ64(map, offset)		readq(((void __iomem *)(map)->handle) + (offset))
 /** Write a qword into a MMIO region */
 #define DRM_WRITE64(map, offset, val)	writeq(val, ((void __iomem *)(map)->handle) + (offset))
 
-#define DRM_READMEMORYBARRIER()		rmb()
-/** Write memory barrier */
-#define DRM_WRITEMEMORYBARRIER()	wmb()
-/** Read/write memory barrier */
-#define DRM_MEMORYBARRIER()		mb()
-
-/** IRQ handler arguments and return type and values */
-#define DRM_IRQ_ARGS		int irq, void *arg
-
-/** AGP types */
-#if __OS_HAS_AGP
-#define DRM_AGP_MEM		struct agp_memory
-#define DRM_AGP_KERN		struct agp_kern_info
-#else
-/* define some dummy types for non AGP supporting kernels */
-struct no_agp_kern {
-	unsigned long aper_base;
-	unsigned long aper_size;
-};
-#define DRM_AGP_MEM             int
-#define DRM_AGP_KERN            struct no_agp_kern
-#endif
-
-/** Other copying of data to kernel space */
-#define DRM_COPY_FROM_USER(arg1, arg2, arg3)		\
-	copy_from_user(arg1, arg2, arg3)
-/** Other copying of data from kernel space */
-#define DRM_COPY_TO_USER(arg1, arg2, arg3)		\
-	copy_to_user(arg1, arg2, arg3)
-
-#define DRM_HZ HZ
-
 #define DRM_WAIT_ON( ret, queue, timeout, condition )		\
 do {								\
 	DECLARE_WAITQUEUE(entry, current);			\
@@ -97,6 +63,3 @@
 	__set_current_state(TASK_RUNNING);			\
 	remove_wait_queue(&(queue), &entry);			\
 } while (0)
-
-#define DRM_WAKEUP( queue ) wake_up( queue )
-#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
new file mode 100644
index 0000000..c2ab77a
--- /dev/null
+++ b/include/drm/drm_panel.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DRM_PANEL_H__
+#define __DRM_PANEL_H__
+
+#include <linux/list.h>
+
+struct drm_connector;
+struct drm_device;
+struct drm_panel;
+
+struct drm_panel_funcs {
+	int (*disable)(struct drm_panel *panel);
+	int (*enable)(struct drm_panel *panel);
+	int (*get_modes)(struct drm_panel *panel);
+};
+
+struct drm_panel {
+	struct drm_device *drm;
+	struct drm_connector *connector;
+	struct device *dev;
+
+	const struct drm_panel_funcs *funcs;
+
+	struct list_head list;
+};
+
+static inline int drm_panel_disable(struct drm_panel *panel)
+{
+	if (panel && panel->funcs && panel->funcs->disable)
+		return panel->funcs->disable(panel);
+
+	return panel ? -ENOSYS : -EINVAL;
+}
+
+static inline int drm_panel_enable(struct drm_panel *panel)
+{
+	if (panel && panel->funcs && panel->funcs->enable)
+		return panel->funcs->enable(panel);
+
+	return panel ? -ENOSYS : -EINVAL;
+}
+
+void drm_panel_init(struct drm_panel *panel);
+
+int drm_panel_add(struct drm_panel *panel);
+void drm_panel_remove(struct drm_panel *panel);
+
+int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
+int drm_panel_detach(struct drm_panel *panel);
+
+#ifdef CONFIG_OF
+struct drm_panel *of_drm_find_panel(struct device_node *np);
+#else
+static inline struct drm_panel *of_drm_find_panel(struct device_node *np)
+{
+	return NULL;
+}
+#endif
+
+#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 8639c85..32d34eb 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -681,6 +681,15 @@
 extern int ttm_tt_swapout(struct ttm_tt *ttm,
 			  struct file *persistent_swap_storage);
 
+/**
+ * ttm_tt_unpopulate - free pages from a ttm
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Calls the driver method to free all pages from a ttm
+ */
+extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
+
 /*
  * ttm_bo.c
  */
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index 58b0298..0097cc0 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -190,14 +190,26 @@
  * @key: Hash key
  *
  * Looks up a struct ttm_base_object with the key @key.
- * Also verifies that the object is visible to the application, by
- * comparing the @tfile argument and checking the object shareable flag.
  */
 
 extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
 						      *tfile, uint32_t key);
 
 /**
+ * ttm_base_object_lookup_for_ref
+ *
+ * @tdev: Pointer to a struct ttm_object_device.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * This function should only be used when the struct tfile associated with the
+ * caller doesn't yet have a reference to the base object.
+ */
+
+extern struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
+
+/**
  * ttm_base_object_unref
  *
  * @p_base: Pointer to a pointer referencing a struct ttm_base_object.
@@ -218,6 +230,8 @@
  * @existed: Upon completion, indicates that an identical reference object
  * already existed, and the refcount was upped on that object instead.
  *
+ * Checks that the base object is shareable and adds a ref object to it.
+ *
  * Adding a ref object to a base object is basically like referencing the
  * base object, but a user-space application holds the reference. When the
  * file corresponding to @tfile is closed, all its reference objects are
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index d1f61bf..49a8284 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -29,6 +29,8 @@
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_memory.h>
 
+struct device;
+
 /**
  * Initialize pool allocator.
  */
diff --git a/include/dt-bindings/clock/mpc512x-clock.h b/include/dt-bindings/clock/mpc512x-clock.h
new file mode 100644
index 0000000..4f94919
--- /dev/null
+++ b/include/dt-bindings/clock/mpc512x-clock.h
@@ -0,0 +1,76 @@
+/*
+ * This header provides constants for MPC512x clock specs in DT bindings.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_MPC512x_CLOCK_H
+#define _DT_BINDINGS_CLOCK_MPC512x_CLOCK_H
+
+#define MPC512x_CLK_DUMMY		0
+#define MPC512x_CLK_REF			1
+#define MPC512x_CLK_SYS			2
+#define MPC512x_CLK_DIU			3
+#define MPC512x_CLK_VIU			4
+#define MPC512x_CLK_CSB			5
+#define MPC512x_CLK_E300		6
+#define MPC512x_CLK_IPS			7
+#define MPC512x_CLK_FEC			8
+#define MPC512x_CLK_SATA		9
+#define MPC512x_CLK_PATA		10
+#define MPC512x_CLK_NFC			11
+#define MPC512x_CLK_LPC			12
+#define MPC512x_CLK_MBX_BUS		13
+#define MPC512x_CLK_MBX			14
+#define MPC512x_CLK_MBX_3D		15
+#define MPC512x_CLK_AXE			16
+#define MPC512x_CLK_USB1		17
+#define MPC512x_CLK_USB2		18
+#define MPC512x_CLK_I2C			19
+#define MPC512x_CLK_MSCAN0_MCLK		20
+#define MPC512x_CLK_MSCAN1_MCLK		21
+#define MPC512x_CLK_MSCAN2_MCLK		22
+#define MPC512x_CLK_MSCAN3_MCLK		23
+#define MPC512x_CLK_BDLC		24
+#define MPC512x_CLK_SDHC		25
+#define MPC512x_CLK_PCI			26
+#define MPC512x_CLK_PSC_MCLK_IN		27
+#define MPC512x_CLK_SPDIF_TX		28
+#define MPC512x_CLK_SPDIF_RX		29
+#define MPC512x_CLK_SPDIF_MCLK		30
+#define MPC512x_CLK_SPDIF		31
+#define MPC512x_CLK_AC97		32
+#define MPC512x_CLK_PSC0_MCLK		33
+#define MPC512x_CLK_PSC1_MCLK		34
+#define MPC512x_CLK_PSC2_MCLK		35
+#define MPC512x_CLK_PSC3_MCLK		36
+#define MPC512x_CLK_PSC4_MCLK		37
+#define MPC512x_CLK_PSC5_MCLK		38
+#define MPC512x_CLK_PSC6_MCLK		39
+#define MPC512x_CLK_PSC7_MCLK		40
+#define MPC512x_CLK_PSC8_MCLK		41
+#define MPC512x_CLK_PSC9_MCLK		42
+#define MPC512x_CLK_PSC10_MCLK		43
+#define MPC512x_CLK_PSC11_MCLK		44
+#define MPC512x_CLK_PSC_FIFO		45
+#define MPC512x_CLK_PSC0		46
+#define MPC512x_CLK_PSC1		47
+#define MPC512x_CLK_PSC2		48
+#define MPC512x_CLK_PSC3		49
+#define MPC512x_CLK_PSC4		50
+#define MPC512x_CLK_PSC5		51
+#define MPC512x_CLK_PSC6		52
+#define MPC512x_CLK_PSC7		53
+#define MPC512x_CLK_PSC8		54
+#define MPC512x_CLK_PSC9		55
+#define MPC512x_CLK_PSC10		56
+#define MPC512x_CLK_PSC11		57
+#define MPC512x_CLK_SDHC2		58
+#define MPC512x_CLK_FEC2		59
+#define MPC512x_CLK_OUT0_CLK		60
+#define MPC512x_CLK_OUT1_CLK		61
+#define MPC512x_CLK_OUT2_CLK		62
+#define MPC512x_CLK_OUT3_CLK		63
+#define MPC512x_CLK_CAN_CLK_IN		64
+
+#define MPC512x_CLK_LAST_PUBLIC		64
+
+#endif
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index c49e1a1..63d105c 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -640,6 +640,7 @@
 	spinlock_t gpio_lock;
 #ifdef CONFIG_BCMA_DRIVER_GPIO
 	struct gpio_chip gpio;
+	struct irq_domain *irq_domain;
 #endif
 };
 
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index fd8bf32..b4a745d 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -115,7 +115,6 @@
 extern int prepare_bprm_creds(struct linux_binprm *bprm);
 extern void install_exec_creds(struct linux_binprm *bprm);
 extern void set_binfmt(struct linux_binfmt *new);
-extern void free_bprm(struct linux_binprm *);
 extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
 
 #endif /* _LINUX_BINFMTS_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 060ff69..5a4d39b 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -61,25 +61,87 @@
  * various member access, note that bio_data should of course not be used
  * on highmem page vectors
  */
-#define bio_iovec_idx(bio, idx)	(&((bio)->bi_io_vec[(idx)]))
-#define bio_iovec(bio)		bio_iovec_idx((bio), (bio)->bi_idx)
-#define bio_page(bio)		bio_iovec((bio))->bv_page
-#define bio_offset(bio)		bio_iovec((bio))->bv_offset
-#define bio_segments(bio)	((bio)->bi_vcnt - (bio)->bi_idx)
-#define bio_sectors(bio)	((bio)->bi_size >> 9)
-#define bio_end_sector(bio)	((bio)->bi_sector + bio_sectors((bio)))
+#define __bvec_iter_bvec(bvec, iter)	(&(bvec)[(iter).bi_idx])
+
+#define bvec_iter_page(bvec, iter)				\
+	(__bvec_iter_bvec((bvec), (iter))->bv_page)
+
+#define bvec_iter_len(bvec, iter)				\
+	min((iter).bi_size,					\
+	    __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
+
+#define bvec_iter_offset(bvec, iter)				\
+	(__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
+
+#define bvec_iter_bvec(bvec, iter)				\
+((struct bio_vec) {						\
+	.bv_page	= bvec_iter_page((bvec), (iter)),	\
+	.bv_len		= bvec_iter_len((bvec), (iter)),	\
+	.bv_offset	= bvec_iter_offset((bvec), (iter)),	\
+})
+
+#define bio_iter_iovec(bio, iter)				\
+	bvec_iter_bvec((bio)->bi_io_vec, (iter))
+
+#define bio_iter_page(bio, iter)				\
+	bvec_iter_page((bio)->bi_io_vec, (iter))
+#define bio_iter_len(bio, iter)					\
+	bvec_iter_len((bio)->bi_io_vec, (iter))
+#define bio_iter_offset(bio, iter)				\
+	bvec_iter_offset((bio)->bi_io_vec, (iter))
+
+#define bio_page(bio)		bio_iter_page((bio), (bio)->bi_iter)
+#define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
+#define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
+
+#define bio_multiple_segments(bio)				\
+	((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
+#define bio_sectors(bio)	((bio)->bi_iter.bi_size >> 9)
+#define bio_end_sector(bio)	((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+
+/*
+ * Check whether this bio carries any data or not. A NULL bio is allowed.
+ */
+static inline bool bio_has_data(struct bio *bio)
+{
+	if (bio &&
+	    bio->bi_iter.bi_size &&
+	    !(bio->bi_rw & REQ_DISCARD))
+		return true;
+
+	return false;
+}
+
+static inline bool bio_is_rw(struct bio *bio)
+{
+	if (!bio_has_data(bio))
+		return false;
+
+	if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+		return false;
+
+	return true;
+}
+
+static inline bool bio_mergeable(struct bio *bio)
+{
+	if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+		return false;
+
+	return true;
+}
 
 static inline unsigned int bio_cur_bytes(struct bio *bio)
 {
-	if (bio->bi_vcnt)
-		return bio_iovec(bio)->bv_len;
+	if (bio_has_data(bio))
+		return bio_iovec(bio).bv_len;
 	else /* dataless requests such as discard */
-		return bio->bi_size;
+		return bio->bi_iter.bi_size;
 }
 
 static inline void *bio_data(struct bio *bio)
 {
-	if (bio->bi_vcnt)
+	if (bio_has_data(bio))
 		return page_address(bio_page(bio)) + bio_offset(bio);
 
 	return NULL;
@@ -97,19 +159,16 @@
  * permanent PIO fall back, user is probably better off disabling highmem
  * I/O completely on that queue (see ide-dma for example)
  */
-#define __bio_kmap_atomic(bio, idx)				\
-	(kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) +	\
-		bio_iovec_idx((bio), (idx))->bv_offset)
+#define __bio_kmap_atomic(bio, iter)				\
+	(kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) +	\
+		bio_iter_iovec((bio), (iter)).bv_offset)
 
-#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
+#define __bio_kunmap_atomic(addr)	kunmap_atomic(addr)
 
 /*
  * merge helpers etc
  */
 
-#define __BVEC_END(bio)		bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
-#define __BVEC_START(bio)	bio_iovec_idx((bio), (bio)->bi_idx)
-
 /* Default implementation of BIOVEC_PHYS_MERGEABLE */
 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
 	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
@@ -126,33 +185,87 @@
 	(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
 	__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
-#define BIO_SEG_BOUNDARY(q, b1, b2) \
-	BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
 
 #define bio_io_error(bio) bio_endio((bio), -EIO)
 
 /*
- * drivers should not use the __ version unless they _really_ know what
- * they're doing
- */
-#define __bio_for_each_segment(bvl, bio, i, start_idx)			\
-	for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);	\
-	     i < (bio)->bi_vcnt;					\
-	     bvl++, i++)
-
-/*
  * drivers should _never_ use the all version - the bio may have been split
  * before it got to the driver and the driver won't own all of it
  */
 #define bio_for_each_segment_all(bvl, bio, i)				\
-	for (i = 0;							\
-	     bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt;	\
-	     i++)
+	for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
 
-#define bio_for_each_segment(bvl, bio, i)				\
-	for (i = (bio)->bi_idx;						\
-	     bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt;	\
-	     i++)
+static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
+				     unsigned bytes)
+{
+	WARN_ONCE(bytes > iter->bi_size,
+		  "Attempted to advance past end of bvec iter\n");
+
+	while (bytes) {
+		unsigned len = min(bytes, bvec_iter_len(bv, *iter));
+
+		bytes -= len;
+		iter->bi_size -= len;
+		iter->bi_bvec_done += len;
+
+		if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
+			iter->bi_bvec_done = 0;
+			iter->bi_idx++;
+		}
+	}
+}
+
+#define for_each_bvec(bvl, bio_vec, iter, start)			\
+	for ((iter) = start;						\
+	     (bvl) = bvec_iter_bvec((bio_vec), (iter)),			\
+		(iter).bi_size;						\
+	     bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+
+
+static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
+				    unsigned bytes)
+{
+	iter->bi_sector += bytes >> 9;
+
+	if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+		iter->bi_size -= bytes;
+	else
+		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
+}
+
+#define __bio_for_each_segment(bvl, bio, iter, start)			\
+	for (iter = (start);						\
+	     (iter).bi_size &&						\
+		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
+	     bio_advance_iter((bio), &(iter), (bvl).bv_len))
+
+#define bio_for_each_segment(bvl, bio, iter)				\
+	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
+
+#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
+
+static inline unsigned bio_segments(struct bio *bio)
+{
+	unsigned segs = 0;
+	struct bio_vec bv;
+	struct bvec_iter iter;
+
+	/*
+	 * We special case discard/write same, because they interpret bi_size
+	 * differently:
+	 */
+
+	if (bio->bi_rw & REQ_DISCARD)
+		return 1;
+
+	if (bio->bi_rw & REQ_WRITE_SAME)
+		return 1;
+
+	bio_for_each_segment(bv, bio, iter)
+		segs++;
+
+	return segs;
+}
 
 /*
  * get a reference to a bio, so it won't disappear. the intended use is
@@ -177,16 +290,15 @@
 struct bio_integrity_payload {
 	struct bio		*bip_bio;	/* parent bio */
 
-	sector_t		bip_sector;	/* virtual start sector */
+	struct bvec_iter	bip_iter;
 
+	/* kill - should just use bip_vec */
 	void			*bip_buf;	/* generated integrity data */
-	bio_end_io_t		*bip_end_io;	/* saved I/O completion fn */
 
-	unsigned int		bip_size;
+	bio_end_io_t		*bip_end_io;	/* saved I/O completion fn */
 
 	unsigned short		bip_slab;	/* slab the bip came from */
 	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
-	unsigned short		bip_idx;	/* current bip_vec index */
 	unsigned		bip_owns_buf:1;	/* should free bip_buf */
 
 	struct work_struct	bip_work;	/* I/O completion */
@@ -196,29 +308,28 @@
 };
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
-/*
- * A bio_pair is used when we need to split a bio.
- * This can only happen for a bio that refers to just one
- * page of data, and in the unusual situation when the
- * page crosses a chunk/device boundary
- *
- * The address of the master bio is stored in bio1.bi_private
- * The address of the pool the pair was allocated from is stored
- *   in bio2.bi_private
- */
-struct bio_pair {
-	struct bio			bio1, bio2;
-	struct bio_vec			bv1, bv2;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-	struct bio_integrity_payload	bip1, bip2;
-	struct bio_vec			iv1, iv2;
-#endif
-	atomic_t			cnt;
-	int				error;
-};
-extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
-extern void bio_pair_release(struct bio_pair *dbio);
 extern void bio_trim(struct bio *bio, int offset, int size);
+extern struct bio *bio_split(struct bio *bio, int sectors,
+			     gfp_t gfp, struct bio_set *bs);
+
+/**
+ * bio_next_split - get next @sectors from a bio, splitting if necessary
+ * @bio:	bio to split
+ * @sectors:	number of sectors to split from the front of @bio
+ * @gfp:	gfp mask
+ * @bs:		bio set to allocate from
+ *
+ * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * than @sectors, returns the original bio unchanged.
+ */
+static inline struct bio *bio_next_split(struct bio *bio, int sectors,
+					 gfp_t gfp, struct bio_set *bs)
+{
+	if (sectors >= bio_sectors(bio))
+		return bio;
+
+	return bio_split(bio, sectors, gfp, bs);
+}
 
 extern struct bio_set *bioset_create(unsigned int, unsigned int);
 extern void bioset_free(struct bio_set *);
@@ -227,10 +338,12 @@
 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
 extern void bio_put(struct bio *);
 
-extern void __bio_clone(struct bio *, struct bio *);
+extern void __bio_clone_fast(struct bio *, struct bio *);
+extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
 
 extern struct bio_set *fs_bio_set;
+unsigned int bio_integrity_tag_size(struct bio *bio);
 
 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 {
@@ -254,6 +367,7 @@
 }
 
 extern void bio_endio(struct bio *, int);
+extern void bio_endio_nodec(struct bio *, int);
 struct request_queue;
 extern int bio_phys_segments(struct request_queue *, struct bio *);
 
@@ -262,12 +376,12 @@
 
 extern void bio_init(struct bio *);
 extern void bio_reset(struct bio *);
+void bio_chain(struct bio *, struct bio *);
 
 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
 			   unsigned int, unsigned int);
 extern int bio_get_nr_vecs(struct block_device *);
-extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
 				unsigned long, unsigned int, int, gfp_t);
 struct sg_iovec;
@@ -357,48 +471,18 @@
 }
 #endif
 
-static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
+static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
 				   unsigned long *flags)
 {
-	return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
+	return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
 }
 #define __bio_kunmap_irq(buf, flags)	bvec_kunmap_irq(buf, flags)
 
 #define bio_kmap_irq(bio, flags) \
-	__bio_kmap_irq((bio), (bio)->bi_idx, (flags))
+	__bio_kmap_irq((bio), (bio)->bi_iter, (flags))
 #define bio_kunmap_irq(buf,flags)	__bio_kunmap_irq(buf, flags)
 
 /*
- * Check whether this bio carries any data or not. A NULL bio is allowed.
- */
-static inline bool bio_has_data(struct bio *bio)
-{
-	if (bio && bio->bi_vcnt)
-		return true;
-
-	return false;
-}
-
-static inline bool bio_is_rw(struct bio *bio)
-{
-	if (!bio_has_data(bio))
-		return false;
-
-	if (bio->bi_rw & REQ_WRITE_SAME)
-		return false;
-
-	return true;
-}
-
-static inline bool bio_mergeable(struct bio *bio)
-{
-	if (bio->bi_rw & REQ_NOMERGE_FLAGS)
-		return false;
-
-	return true;
-}
-
-/*
  * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
  *
  * A bio_list anchors a singly-linked list of bios chained through the bi_next
@@ -559,16 +643,12 @@
 
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 
+
+
 #define bip_vec_idx(bip, idx)	(&(bip->bip_vec[(idx)]))
-#define bip_vec(bip)		bip_vec_idx(bip, 0)
 
-#define __bip_for_each_vec(bvl, bip, i, start_idx)			\
-	for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx);	\
-	     i < (bip)->bip_vcnt;					\
-	     bvl++, i++)
-
-#define bip_for_each_vec(bvl, bip, i)					\
-	__bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
+#define bip_for_each_vec(bvl, bip, iter)				\
+	for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
 
 #define bio_for_each_integrity_vec(_bvl, _bio, _iter)			\
 	for_each_bio(_bio)						\
@@ -586,7 +666,6 @@
 extern void bio_integrity_endio(struct bio *, int);
 extern void bio_integrity_advance(struct bio *, unsigned int);
 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
-extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
 extern int bioset_integrity_create(struct bio_set *, int);
 extern void bioset_integrity_free(struct bio_set *);
@@ -630,12 +709,6 @@
 	return 0;
 }
 
-static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
-				       int sectors)
-{
-	return;
-}
-
 static inline void bio_integrity_advance(struct bio *bio,
 					 unsigned int bytes_done)
 {
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index ab0e9b2..18ba8a6 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -83,6 +83,8 @@
 	 */
 	rq_timed_out_fn		*timeout;
 
+	softirq_done_fn		*complete;
+
 	/*
 	 * Override for hctx allocations (should probably go)
 	 */
@@ -113,18 +115,18 @@
 };
 
 struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
-void blk_mq_free_queue(struct request_queue *);
 int blk_mq_register_disk(struct gendisk *);
 void blk_mq_unregister_disk(struct gendisk *);
 void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
 
 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
 
-void blk_mq_insert_request(struct request_queue *, struct request *, bool);
+void blk_mq_insert_request(struct request_queue *, struct request *,
+		bool, bool);
 void blk_mq_run_queues(struct request_queue *q, bool async);
 void blk_mq_free_request(struct request *rq);
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
 struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
 struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
 
@@ -134,6 +136,8 @@
 
 void blk_mq_end_io(struct request *rq, int error);
 
+void blk_mq_complete_request(struct request *rq);
+
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
 void blk_mq_stop_hw_queues(struct request_queue *q);
@@ -159,16 +163,16 @@
 }
 
 #define queue_for_each_hw_ctx(q, hctx, i)				\
-	for ((i) = 0, hctx = (q)->queue_hw_ctx[0];			\
-	     (i) < (q)->nr_hw_queues; (i)++, hctx = (q)->queue_hw_ctx[i])
+	for ((i) = 0; (i) < (q)->nr_hw_queues &&			\
+	     ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
 
 #define queue_for_each_ctx(q, ctx, i)					\
-	for ((i) = 0, ctx = per_cpu_ptr((q)->queue_ctx, 0);		\
-	     (i) < (q)->nr_queues; (i)++, ctx = per_cpu_ptr(q->queue_ctx, (i)))
+	for ((i) = 0; (i) < (q)->nr_queues &&				\
+	     ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
 
 #define hctx_for_each_ctx(hctx, ctx, i)					\
-	for ((i) = 0, ctx = (hctx)->ctxs[0];				\
-	     (i) < (hctx)->nr_ctx; (i)++, ctx = (hctx)->ctxs[(i)])
+	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
+	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
 
 #define blk_ctx_sum(q, sum)						\
 ({									\
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 238ef0e..bbc3a6c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -28,13 +28,22 @@
 	unsigned int	bv_offset;
 };
 
+struct bvec_iter {
+	sector_t		bi_sector;	/* device address in 512 byte
+						   sectors */
+	unsigned int		bi_size;	/* residual I/O count */
+
+	unsigned int		bi_idx;		/* current index into bvl_vec */
+
+	unsigned int            bi_bvec_done;	/* number of bytes completed in
+						   current bvec */
+};
+
 /*
  * main unit of I/O for the block layer and lower layers (ie drivers and
  * stacking drivers)
  */
 struct bio {
-	sector_t		bi_sector;	/* device address in 512 byte
-						   sectors */
 	struct bio		*bi_next;	/* request queue link */
 	struct block_device	*bi_bdev;
 	unsigned long		bi_flags;	/* status, command, etc */
@@ -42,16 +51,13 @@
 						 * top bits priority
 						 */
 
-	unsigned short		bi_vcnt;	/* how many bio_vec's */
-	unsigned short		bi_idx;		/* current index into bvl_vec */
+	struct bvec_iter	bi_iter;
 
 	/* Number of segments in this BIO after
 	 * physical address coalescing is performed.
 	 */
 	unsigned int		bi_phys_segments;
 
-	unsigned int		bi_size;	/* residual I/O count */
-
 	/*
 	 * To keep track of the max segment size, we account for the
 	 * sizes of the first and last mergeable segments in this bio.
@@ -59,6 +65,8 @@
 	unsigned int		bi_seg_front_size;
 	unsigned int		bi_seg_back_size;
 
+	atomic_t		bi_remaining;
+
 	bio_end_io_t		*bi_end_io;
 
 	void			*bi_private;
@@ -74,11 +82,13 @@
 	struct bio_integrity_payload *bi_integrity;  /* data integrity */
 #endif
 
+	unsigned short		bi_vcnt;	/* how many bio_vec's */
+
 	/*
 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
 	 */
 
-	unsigned int		bi_max_vecs;	/* max bvl_vecs we can hold */
+	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
 
 	atomic_t		bi_cnt;		/* pin count */
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1b135d4..4afa4f8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -95,13 +95,10 @@
  * as well!
  */
 struct request {
-	union {
-		struct list_head queuelist;
-		struct llist_node ll_list;
-	};
+	struct list_head queuelist;
 	union {
 		struct call_single_data csd;
-		struct work_struct mq_flush_data;
+		struct work_struct mq_flush_work;
 	};
 
 	struct request_queue *q;
@@ -291,6 +288,7 @@
 	unsigned char		discard_misaligned;
 	unsigned char		cluster;
 	unsigned char		discard_zeroes_data;
+	unsigned char		raid_partial_stripes_expensive;
 };
 
 struct request_queue {
@@ -450,13 +448,8 @@
 	unsigned long		flush_pending_since;
 	struct list_head	flush_queue[2];
 	struct list_head	flush_data_in_flight;
-	union {
-		struct request	flush_rq;
-		struct {
-			spinlock_t mq_flush_lock;
-			struct work_struct mq_flush_work;
-		};
-	};
+	struct request		*flush_rq;
+	spinlock_t		mq_flush_lock;
 
 	struct mutex		sysfs_lock;
 
@@ -735,7 +728,7 @@
 };
 
 struct req_iterator {
-	int i;
+	struct bvec_iter iter;
 	struct bio *bio;
 };
 
@@ -748,10 +741,11 @@
 
 #define rq_for_each_segment(bvl, _rq, _iter)			\
 	__rq_for_each_bio(_iter.bio, _rq)			\
-		bio_for_each_segment(bvl, _iter.bio, _iter.i)
+		bio_for_each_segment(bvl, _iter.bio, _iter.iter)
 
-#define rq_iter_last(rq, _iter)					\
-		(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
+#define rq_iter_last(bvec, _iter)				\
+		(_iter.bio->bi_next == NULL &&			\
+		 bio_iter_last(bvec, _iter.iter))
 
 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 # error	"You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index b388223..db51fe4 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -264,7 +264,7 @@
 {
 	if (!align)
 		align = SMP_CACHE_BYTES;
-	return __alloc_bootmem_low(size, align, BOOTMEM_LOW_LIMIT);
+	return __alloc_bootmem_low(size, align, 0);
 }
 
 static inline void * __init memblock_virt_alloc_low_nopanic(
@@ -272,7 +272,7 @@
 {
 	if (!align)
 		align = SMP_CACHE_BYTES;
-	return __alloc_bootmem_low_nopanic(size, align, BOOTMEM_LOW_LIMIT);
+	return __alloc_bootmem_low_nopanic(size, align, 0);
 }
 
 static inline void * __init memblock_virt_alloc_from_nopanic(
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 2f0543f..f9bbbb4 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -11,7 +11,9 @@
 #define CAN_SKB_H
 
 #include <linux/types.h>
+#include <linux/skbuff.h>
 #include <linux/can.h>
+#include <net/sock.h>
 
 /*
  * The struct can_skb_priv is used to transport additional information along
@@ -42,4 +44,40 @@
 	skb_reserve(skb, sizeof(struct can_skb_priv));
 }
 
+static inline void can_skb_destructor(struct sk_buff *skb)
+{
+	sock_put(skb->sk);
+}
+
+static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
+{
+	if (sk) {
+		sock_hold(sk);
+		skb->destructor = can_skb_destructor;
+		skb->sk = sk;
+	}
+}
+
+/*
+ * returns an unshared skb owned by the original sock to be echo'ed back
+ */
+static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
+{
+	if (skb_shared(skb)) {
+		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+		if (likely(nskb)) {
+			can_skb_set_owner(nskb, skb->sk);
+			consume_skb(skb);
+			return nskb;
+		} else {
+			kfree_skb(skb);
+			return NULL;
+		}
+	}
+
+	/* we can assume to have an unshared skb with proper owner */
+	return skb;
+}
+
 #endif /* CAN_SKB_H */
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 2623cff..25bfb0e 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -373,8 +373,9 @@
 /*
  * Ceph setxattr request flags.
  */
-#define CEPH_XATTR_CREATE  1
-#define CEPH_XATTR_REPLACE 2
+#define CEPH_XATTR_CREATE  (1 << 0)
+#define CEPH_XATTR_REPLACE (1 << 1)
+#define CEPH_XATTR_REMOVE  (1 << 31)
 
 union ceph_mds_request_args {
 	struct {
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 20ee8b6..d21f2db 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -1,6 +1,7 @@
 #ifndef __FS_CEPH_MESSENGER_H
 #define __FS_CEPH_MESSENGER_H
 
+#include <linux/blk_types.h>
 #include <linux/kref.h>
 #include <linux/mutex.h>
 #include <linux/net.h>
@@ -119,8 +120,7 @@
 #ifdef CONFIG_BLOCK
 		struct {				/* bio */
 			struct bio	*bio;		/* bio from list */
-			unsigned int	vector_index;	/* vector from bio */
-			unsigned int	vector_offset;	/* bytes from vector */
+			struct bvec_iter bvec_iter;
 		};
 #endif /* CONFIG_BLOCK */
 		struct {				/* pages */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 5c09759..9450f02 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -166,6 +166,8 @@
 	 *
 	 * The ID of the root cgroup is always 0, and a new cgroup
 	 * will be assigned with a smallest available ID.
+	 *
+	 * Allocating/Removing ID must be protected by cgroup_mutex.
 	 */
 	int id;
 
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 448b229..939533d 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -544,6 +544,20 @@
  * for improved portability across platforms
  */
 
+#if IS_ENABLED(CONFIG_PPC)
+
+static inline u32 clk_readl(u32 __iomem *reg)
+{
+	return ioread32be(reg);
+}
+
+static inline void clk_writel(u32 val, u32 __iomem *reg)
+{
+	iowrite32be(val, reg);
+}
+
+#else	/* platform dependent I/O accessors */
+
 static inline u32 clk_readl(u32 __iomem *reg)
 {
 	return readl(reg);
@@ -554,5 +568,7 @@
 	writel(val, reg);
 }
 
+#endif	/* platform dependent I/O accessors */
+
 #endif /* CONFIG_COMMON_CLK */
 #endif /* CLK_PROVIDER_H */
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
index a0f9280..2e6dce6 100644
--- a/include/linux/cmdline-parser.h
+++ b/include/linux/cmdline-parser.h
@@ -37,9 +37,9 @@
 struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
 					 const char *bdev);
 
-void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-		       int slot,
-		       int (*add_part)(int, struct cmdline_subpart *, void *),
-		       void *param);
+int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+		      int slot,
+		      int (*add_part)(int, struct cmdline_subpart *, void *),
+		      void *param);
 
 #endif /* CMDLINEPARSEH */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index eb8a49d..3f448c6 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -327,17 +327,17 @@
 			      u32 arg2, u32 arg3, u32 arg4, u32 arg5);
 asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
 
-asmlinkage ssize_t compat_sys_readv(unsigned long fd,
-		const struct compat_iovec __user *vec, unsigned long vlen);
-asmlinkage ssize_t compat_sys_writev(unsigned long fd,
-		const struct compat_iovec __user *vec, unsigned long vlen);
-asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
+asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
+		const struct compat_iovec __user *vec, compat_ulong_t vlen);
+asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
+		const struct compat_iovec __user *vec, compat_ulong_t vlen);
+asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
 		const struct compat_iovec __user *vec,
-		unsigned long vlen, u32 pos_low, u32 pos_high);
-asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
+		compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
 		const struct compat_iovec __user *vec,
-		unsigned long vlen, u32 pos_low, u32 pos_high);
-asmlinkage long comat_sys_lseek(unsigned int, compat_off_t, unsigned int);
+		compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
 
 asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
 		     const compat_uptr_t __user *envp);
@@ -422,7 +422,7 @@
 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
 				  compat_long_t addr, compat_long_t data);
 
-asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
+asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
 /*
  * epoll (fs/eventpoll.c) compat bits follow ...
  */
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index ded4299..2507fd2 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -75,11 +75,7 @@
  *
  * (asm goto is automatically volatile - the naming reflects this.)
  */
-#if GCC_VERSION <= 40801
-# define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
-#else
-# define asm_volatile_goto(x...)	do { asm goto(x); } while (0)
-#endif
+#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
 
 #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
 #if GCC_VERSION >= 40400
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index f4b0aa3..a68cbe5 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -29,7 +29,7 @@
 
 enum dm_io_mem_type {
 	DM_IO_PAGE_LIST,/* Page list */
-	DM_IO_BVEC,	/* Bio vector */
+	DM_IO_BIO,	/* Bio vector */
 	DM_IO_VMA,	/* Virtual memory area */
 	DM_IO_KMEM,	/* Kernel memory */
 };
@@ -41,7 +41,7 @@
 
 	union {
 		struct page_list *pl;
-		struct bio_vec *bvec;
+		struct bio *bio;
 		void *vma;
 		void *addr;
 	} ptr;
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index dfac5ed..f886985 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -171,7 +171,7 @@
 			       size_t size, int flags, const char *);
 
 #define dma_buf_export(priv, ops, size, flags)	\
-	dma_buf_export_named(priv, ops, size, flags, __FILE__)
+	dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME)
 
 int dma_buf_fd(struct dma_buf *dmabuf, int flags);
 struct dma_buf *dma_buf_get(int fd);
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index 57c9a8a..7ac17f5 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -27,7 +27,6 @@
 
 
 #ifdef CONFIG_INTEL_IOMMU
-extern void free_dmar_iommu(struct intel_iommu *iommu);
 extern int iommu_calculate_agaw(struct intel_iommu *iommu);
 extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
 extern int dmar_disabled;
@@ -41,9 +40,6 @@
 {
 	return 0;
 }
-static inline void free_dmar_iommu(struct intel_iommu *iommu)
-{
-}
 #define dmar_disabled	(1)
 #define intel_iommu_enabled (0)
 #endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 6fd9390..c5c92d5 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -257,7 +257,7 @@
  * @dev: class device for sysfs
  * @device_node: used to add this to the device chan list
  * @local: per-cpu pointer to a struct dma_chan_percpu
- * @client-count: how many clients are using this channel
+ * @client_count: how many clients are using this channel
  * @table_count: number of appearances in the mem-to-mem allocation table
  * @private: private data for certain client-channel associations
  */
@@ -279,10 +279,10 @@
 
 /**
  * struct dma_chan_dev - relate sysfs device node to backing channel device
- * @chan - driver channel device
- * @device - sysfs device
- * @dev_id - parent dma_device dev_id
- * @idr_ref - reference count to gate release of dma_device dev_id
+ * @chan: driver channel device
+ * @device: sysfs device
+ * @dev_id: parent dma_device dev_id
+ * @idr_ref: reference count to gate release of dma_device dev_id
  */
 struct dma_chan_dev {
 	struct dma_chan *chan;
@@ -306,9 +306,8 @@
 /**
  * struct dma_slave_config - dma slave channel runtime config
  * @direction: whether the data shall go in or out on this slave
- * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
- * legal values, DMA_BIDIRECTIONAL is not acceptable since we
- * need to differentiate source and target addresses.
+ * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
+ * legal values.
  * @src_addr: this is the physical address where DMA slave data
  * should be read (RX), if the source is memory this argument is
  * ignored.
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index b029d1a..eccb0c0 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -33,6 +33,7 @@
 #define DMAR_X2APIC_OPT_OUT	0x2
 
 struct intel_iommu;
+
 #ifdef CONFIG_DMAR_TABLE
 extern struct acpi_table_header *dmar_tbl;
 struct dmar_drhd_unit {
@@ -52,6 +53,10 @@
 #define for_each_drhd_unit(drhd) \
 	list_for_each_entry(drhd, &dmar_drhd_units, list)
 
+#define for_each_active_drhd_unit(drhd)					\
+	list_for_each_entry(drhd, &dmar_drhd_units, list)		\
+		if (drhd->ignored) {} else
+
 #define for_each_active_iommu(i, drhd)					\
 	list_for_each_entry(drhd, &dmar_drhd_units, list)		\
 		if (i=drhd->iommu, drhd->ignored) {} else
@@ -62,13 +67,13 @@
 
 extern int dmar_table_init(void);
 extern int dmar_dev_scope_init(void);
+extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
+				struct pci_dev ***devices, u16 segment);
+extern void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt);
 
 /* Intel IOMMU detection */
 extern int detect_intel_iommu(void);
 extern int enable_drhd_fault_handling(void);
-
-extern int parse_ioapics_under_ir(void);
-extern int alloc_iommu(struct dmar_drhd_unit *);
 #else
 static inline int detect_intel_iommu(void)
 {
@@ -157,8 +162,6 @@
 int dmar_parse_rmrr_atsr_dev(void);
 extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
 extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
-extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
-				struct pci_dev ***devices, u16 segment);
 extern int intel_iommu_init(void);
 #else /* !CONFIG_INTEL_IOMMU: */
 static inline int intel_iommu_init(void) { return -ENODEV; }
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 70c4836..fe6ac95 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -613,8 +613,8 @@
 extern int register_framebuffer(struct fb_info *fb_info);
 extern int unregister_framebuffer(struct fb_info *fb_info);
 extern int unlink_framebuffer(struct fb_info *fb_info);
-extern void remove_conflicting_framebuffers(struct apertures_struct *a,
-				const char *name, bool primary);
+extern int remove_conflicting_framebuffers(struct apertures_struct *a,
+					   const char *name, bool primary);
 extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
 extern int fb_show_logo(struct fb_info *fb_info, int rotate);
 extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 09f553c..6082956 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2079,6 +2079,7 @@
 extern int filp_close(struct file *, fl_owner_t id);
 
 extern struct filename *getname(const char __user *);
+extern struct filename *getname_kernel(const char *);
 
 enum {
 	FILE_CREATED = 1,
@@ -2273,7 +2274,13 @@
 extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
 			   int datasync);
 extern int vfs_fsync(struct file *file, int datasync);
-extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
+static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
+{
+	if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
+		return 0;
+	return vfs_fsync_range(file, pos, pos + count - 1,
+			       (file->f_flags & __O_SYNC) ? 0 : 1);
+}
 extern void emergency_sync(void);
 extern void emergency_remount(void);
 #ifdef CONFIG_BLOCK
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 7d8d5e6..3d286ff 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -322,10 +322,10 @@
 extern void fsnotify_destroy_event(struct fsnotify_group *group,
 				   struct fsnotify_event *event);
 /* attach the event to the group notification queue */
-extern struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group,
-							struct fsnotify_event *event,
-							struct fsnotify_event *(*merge)(struct list_head *,
-											struct fsnotify_event *));
+extern int fsnotify_add_notify_event(struct fsnotify_group *group,
+				     struct fsnotify_event *event,
+				     int (*merge)(struct list_head *,
+						  struct fsnotify_event *));
 /* true if the group notification queue is empty */
 extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
 /* return, but do not dequeue the first event on the notification queue */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 4d34dbb..7a8144f 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -4,8 +4,6 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 
-#ifdef CONFIG_GPIOLIB
-
 struct device;
 struct gpio_chip;
 
@@ -18,6 +16,8 @@
  */
 struct gpio_desc;
 
+#ifdef CONFIG_GPIOLIB
+
 /* Acquire and dispose GPIOs */
 struct gpio_desc *__must_check gpiod_get(struct device *dev,
 					 const char *con_id);
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index f5b9b87..3af8472 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -281,4 +281,10 @@
 int host1x_client_register(struct host1x_client *client);
 int host1x_client_unregister(struct host1x_client *client);
 
+struct tegra_mipi_device;
+
+struct tegra_mipi_device *tegra_mipi_request(struct device *device);
+void tegra_mipi_free(struct tegra_mipi_device *device);
+int tegra_mipi_calibrate(struct tegra_mipi_device *device);
+
 #endif
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 15da677..344883d 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -875,7 +875,7 @@
 struct vmbus_channel_initiate_contact {
 	struct vmbus_channel_message_header header;
 	u32 vmbus_version_requested;
-	u32 padding2;
+	u32 target_vcpu; /* The VCPU the host should respond to */
 	u64 interrupt_page;
 	u64 monitor_page1;
 	u64 monitor_page2;
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index 017fb40..8f1b086 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -1,7 +1,7 @@
 /*
  * i2c-smbus.h - SMBus extensions to the I2C protocol
  *
- * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index d9c8dbd3..deddeb8 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -342,11 +342,25 @@
 }
 #endif /* I2C_BOARDINFO */
 
-/*
+/**
+ * struct i2c_algorithm - represent I2C transfer method
+ * @master_xfer: Issue a set of i2c transactions to the given I2C adapter
+ *   defined by the msgs array, with num messages available to transfer via
+ *   the adapter specified by adap.
+ * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this
+ *   is not present, then the bus layer will try and convert the SMBus calls
+ *   into I2C transfers instead.
+ * @functionality: Return the flags that this algorithm/adapter pair supports
+ *   from the I2C_FUNC_* flags.
+ *
  * The following structs are for those who like to implement new bus drivers:
  * i2c_algorithm is the interface to a class of hardware solutions which can
  * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
  * to name two of the most common.
+ *
+ * The return codes from the @master_xfer field should indicate the type of
+ * error code that occured during the transfer, as documented in the kernel
+ * Documentation file Documentation/i2c/fault-codes.
  */
 struct i2c_algorithm {
 	/* If an adapter algorithm can't do I2C-level access, set master_xfer
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d380c5e..2c4bed5 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -288,6 +288,7 @@
 
 struct ir_table {
 	struct irte *base;
+	unsigned long *bitmap;
 };
 #endif
 
@@ -347,8 +348,6 @@
 extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
 extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
 
-extern int alloc_iommu(struct dmar_drhd_unit *drhd);
-extern void free_iommu(struct intel_iommu *iommu);
 extern int dmar_enable_qi(struct intel_iommu *iommu);
 extern void dmar_disable_qi(struct intel_iommu *iommu);
 extern int dmar_reenable_qi(struct intel_iommu *iommu);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0053add..a2678d3 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -158,6 +158,11 @@
 					 devname, dev_id);
 }
 
+extern int __must_check
+devm_request_any_context_irq(struct device *dev, unsigned int irq,
+		 irq_handler_t handler, unsigned long irqflags,
+		 const char *devname, void *dev_id);
+
 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
 
 /*
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a444c79..b96a5b2 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -24,9 +24,10 @@
 #include <linux/types.h>
 #include <trace/events/iommu.h>
 
-#define IOMMU_READ	(1)
-#define IOMMU_WRITE	(2)
-#define IOMMU_CACHE	(4) /* DMA cache coherency */
+#define IOMMU_READ	(1 << 0)
+#define IOMMU_WRITE	(1 << 1)
+#define IOMMU_CACHE	(1 << 2) /* DMA cache coherency */
+#define IOMMU_EXEC	(1 << 3)
 
 struct iommu_ops;
 struct iommu_group;
@@ -247,6 +248,11 @@
 	return NULL;
 }
 
+static inline struct iommu_group *iommu_group_get_by_id(int id)
+{
+	return NULL;
+}
+
 static inline void iommu_domain_free(struct iommu_domain *domain)
 {
 }
@@ -291,8 +297,8 @@
 	return 0;
 }
 
-static inline int domain_has_cap(struct iommu_domain *domain,
-				 unsigned long cap)
+static inline int iommu_domain_has_cap(struct iommu_domain *domain,
+				       unsigned long cap)
 {
 	return 0;
 }
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index d235e88..1f44466 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -294,6 +294,12 @@
  */
 extern unsigned int jiffies_to_msecs(const unsigned long j);
 extern unsigned int jiffies_to_usecs(const unsigned long j);
+
+static inline u64 jiffies_to_nsecs(const unsigned long j)
+{
+	return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
+}
+
 extern unsigned long msecs_to_jiffies(const unsigned int m);
 extern unsigned long usecs_to_jiffies(const unsigned int u);
 extern unsigned long timespec_to_jiffies(const struct timespec *value);
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index dfb4f2f..6b06d37 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -310,7 +310,8 @@
 kgdb_handle_exception(int ex_vector, int signo, int err_code,
 		      struct pt_regs *regs);
 extern int kgdb_nmicallback(int cpu, void *regs);
-extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, atomic_t *snd_rdy);
+extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
+			  atomic_t *snd_rdy);
 extern void gdbstub_exit(int status);
 
 extern int			kgdb_single_step;
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index d3e8ad2..a6a42dd 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -6,6 +6,11 @@
 #include <linux/export.h>
 #include <asm/linkage.h>
 
+/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
+#ifndef ASM_NL
+#define ASM_NL		 ;
+#endif
+
 #ifdef __cplusplus
 #define CPP_ASMLINKAGE extern "C"
 #else
@@ -75,21 +80,21 @@
 
 #ifndef ENTRY
 #define ENTRY(name) \
-  .globl name; \
-  ALIGN; \
-  name:
+	.globl name ASM_NL \
+	ALIGN ASM_NL \
+	name:
 #endif
 #endif /* LINKER_SCRIPT */
 
 #ifndef WEAK
 #define WEAK(name)	   \
-	.weak name;	   \
+	.weak name ASM_NL   \
 	name:
 #endif
 
 #ifndef END
 #define END(name) \
-  .size name, .-name
+	.size name, .-name
 #endif
 
 /* If symbol 'name' is treated as a subroutine (gets called, and returns)
@@ -98,8 +103,8 @@
  */
 #ifndef ENDPROC
 #define ENDPROC(name) \
-  .type name, @function; \
-  END(name)
+	.type name, @function ASM_NL \
+	END(name)
 #endif
 
 #endif
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index ad1ae7f..78c76cd 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -387,7 +387,7 @@
 	struct i2c_client *muic; /* slave addr 0x4a */
 	struct mutex iolock;
 
-	int type;
+	unsigned long type;
 	struct platform_device *battery; /* battery control (not fuel gauge) */
 
 	int irq;
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index 4ecb24b..d68ada5 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -163,7 +163,7 @@
 	int ono;
 	u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS];
 	u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS];
-	int type;
+	unsigned long type;
 	bool wakeup;
 };
 
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index a5a7f01..54b5458 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -252,7 +252,7 @@
 struct tps65217 {
 	struct device *dev;
 	struct tps65217_board *pdata;
-	unsigned int id;
+	unsigned long id;
 	struct regulator_desc desc[TPS65217_NUM_REGULATOR];
 	struct regulator_dev *rdev[TPS65217_NUM_REGULATOR];
 	struct regmap *regmap;
@@ -263,7 +263,7 @@
 	return dev_get_drvdata(dev);
 }
 
-static inline int tps65217_chip_id(struct tps65217 *tps65217)
+static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217)
 {
 	return tps65217->id;
 }
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 554548c..130bc8d 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -38,8 +38,10 @@
 #include <linux/pci.h>
 #include <linux/spinlock_types.h>
 #include <linux/semaphore.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/radix-tree.h>
+
 #include <linux/mlx5/device.h>
 #include <linux/mlx5/doorbell.h>
 
@@ -227,6 +229,7 @@
 	 * protect uuar allocation data structs
 	 */
 	struct mutex		lock;
+	u32			ver;
 };
 
 struct mlx5_bf {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 440a02e..e8eeebd 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -752,6 +752,9 @@
 	unsigned char id_len;
 };
 
+typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
+				       struct sk_buff *skb);
+
 /*
  * This structure defines the management hooks for network devices.
  * The following hooks can be defined; unless noted otherwise, they are
@@ -783,7 +786,7 @@
  *	Required can not be NULL.
  *
  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
- *                         void *accel_priv);
+ *                         void *accel_priv, select_queue_fallback_t fallback);
  *	Called to decide which queue to when device supports multiple
  *	transmit queues.
  *
@@ -1005,7 +1008,8 @@
 						   struct net_device *dev);
 	u16			(*ndo_select_queue)(struct net_device *dev,
 						    struct sk_buff *skb,
-						    void *accel_priv);
+						    void *accel_priv,
+						    select_queue_fallback_t fallback);
 	void			(*ndo_change_rx_flags)(struct net_device *dev,
 						       int flags);
 	void			(*ndo_set_rx_mode)(struct net_device *dev);
@@ -1551,7 +1555,6 @@
 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 				    struct sk_buff *skb,
 				    void *accel_priv);
-u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
 
 /*
  * Net namespace inlines
@@ -2276,6 +2279,26 @@
 }
 
 /**
+ * 	netdev_cap_txqueue - check if selected tx queue exceeds device queues
+ * 	@dev: network device
+ * 	@queue_index: given tx queue index
+ *
+ * 	Returns 0 if given tx queue index >= number of device tx queues,
+ * 	otherwise returns the originally passed tx queue index.
+ */
+static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
+{
+	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
+		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
+				     dev->name, queue_index,
+				     dev->real_num_tx_queues);
+		return 0;
+	}
+
+	return queue_index;
+}
+
+/**
  *	netif_running - test if up
  *	@dev: network device
  *
@@ -3068,7 +3091,12 @@
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 					struct net_device *dev);
 
-netdev_features_t netif_skb_features(struct sk_buff *skb);
+netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
+					 const struct net_device *dev);
+static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
+{
+	return netif_skb_dev_features(skb, skb->dev);
+}
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 2b00625..0ae5807 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -211,6 +211,7 @@
 #define NFS_INO_ADVISE_RDPLUS	(0)		/* advise readdirplus */
 #define NFS_INO_STALE		(1)		/* possible stale inode */
 #define NFS_INO_ACL_LRU_SET	(2)		/* Inode is on the LRU list */
+#define NFS_INO_INVALIDATING	(3)		/* inode is being invalidated */
 #define NFS_INO_FLUSHING	(4)		/* inode is flushing out data */
 #define NFS_INO_FSCACHE		(5)		/* inode can be cached by FS-Cache */
 #define NFS_INO_FSCACHE_LOCK	(6)		/* FS-Cache cookie management lock */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 3ccfcec..b2fb167 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -379,12 +379,14 @@
  * Arguments to the open_confirm call.
  */
 struct nfs_open_confirmargs {
+	struct nfs4_sequence_args	seq_args;
 	const struct nfs_fh *	fh;
 	nfs4_stateid *		stateid;
 	struct nfs_seqid *	seqid;
 };
 
 struct nfs_open_confirmres {
+	struct nfs4_sequence_res	seq_res;
 	nfs4_stateid            stateid;
 	struct nfs_seqid *	seqid;
 };
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 26ebcf4..69ae03f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -80,13 +80,14 @@
 	struct dma_pool *prp_small_pool;
 	int instance;
 	int queue_count;
-	int db_stride;
+	u32 db_stride;
 	u32 ctrl_config;
 	struct msix_entry *entry;
 	struct nvme_bar __iomem *bar;
 	struct list_head namespaces;
 	struct kref kref;
 	struct miscdevice miscdev;
+	struct work_struct reset_work;
 	char name[12];
 	char serial[20];
 	char model[40];
@@ -94,6 +95,8 @@
 	u32 max_hw_sectors;
 	u32 stripe_size;
 	u16 oncs;
+	u16 abort_limit;
+	u8 initialized;
 };
 
 /*
@@ -165,6 +168,7 @@
 struct sg_io_hdr;
 
 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
 int nvme_sg_get_version_num(int __user *ip);
 
 #endif /* _LINUX_NVME_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 70c64ba..435cb99 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -169,35 +169,15 @@
 
 extern struct device_node *of_find_node_by_name(struct device_node *from,
 	const char *name);
-#define for_each_node_by_name(dn, name) \
-	for (dn = of_find_node_by_name(NULL, name); dn; \
-	     dn = of_find_node_by_name(dn, name))
 extern struct device_node *of_find_node_by_type(struct device_node *from,
 	const char *type);
-#define for_each_node_by_type(dn, type) \
-	for (dn = of_find_node_by_type(NULL, type); dn; \
-	     dn = of_find_node_by_type(dn, type))
 extern struct device_node *of_find_compatible_node(struct device_node *from,
 	const char *type, const char *compat);
-#define for_each_compatible_node(dn, type, compatible) \
-	for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
-	     dn = of_find_compatible_node(dn, type, compatible))
 extern struct device_node *of_find_matching_node_and_match(
 	struct device_node *from,
 	const struct of_device_id *matches,
 	const struct of_device_id **match);
-static inline struct device_node *of_find_matching_node(
-	struct device_node *from,
-	const struct of_device_id *matches)
-{
-	return of_find_matching_node_and_match(from, matches, NULL);
-}
-#define for_each_matching_node(dn, matches) \
-	for (dn = of_find_matching_node(NULL, matches); dn; \
-	     dn = of_find_matching_node(dn, matches))
-#define for_each_matching_node_and_match(dn, matches, match) \
-	for (dn = of_find_matching_node_and_match(NULL, matches, match); \
-	     dn; dn = of_find_matching_node_and_match(dn, matches, match))
+
 extern struct device_node *of_find_node_by_path(const char *path);
 extern struct device_node *of_find_node_by_phandle(phandle handle);
 extern struct device_node *of_get_parent(const struct device_node *node);
@@ -209,43 +189,11 @@
 
 extern struct device_node *of_get_child_by_name(const struct device_node *node,
 					const char *name);
-#define for_each_child_of_node(parent, child) \
-	for (child = of_get_next_child(parent, NULL); child != NULL; \
-	     child = of_get_next_child(parent, child))
-
-#define for_each_available_child_of_node(parent, child) \
-	for (child = of_get_next_available_child(parent, NULL); child != NULL; \
-	     child = of_get_next_available_child(parent, child))
-
-static inline int of_get_child_count(const struct device_node *np)
-{
-	struct device_node *child;
-	int num = 0;
-
-	for_each_child_of_node(np, child)
-		num++;
-
-	return num;
-}
-
-static inline int of_get_available_child_count(const struct device_node *np)
-{
-	struct device_node *child;
-	int num = 0;
-
-	for_each_available_child_of_node(np, child)
-		num++;
-
-	return num;
-}
 
 /* cache lookup */
 extern struct device_node *of_find_next_cache_node(const struct device_node *);
 extern struct device_node *of_find_node_with_property(
 	struct device_node *from, const char *prop_name);
-#define for_each_node_with_property(dn, prop_name) \
-	for (dn = of_find_node_with_property(NULL, prop_name); dn; \
-	     dn = of_find_node_with_property(dn, prop_name))
 
 extern struct property *of_find_property(const struct device_node *np,
 					 const char *name,
@@ -367,27 +315,48 @@
 	return NULL;
 }
 
+static inline struct device_node *of_find_node_by_type(struct device_node *from,
+	const char *type)
+{
+	return NULL;
+}
+
+static inline struct device_node *of_find_matching_node_and_match(
+	struct device_node *from,
+	const struct of_device_id *matches,
+	const struct of_device_id **match)
+{
+	return NULL;
+}
+
 static inline struct device_node *of_get_parent(const struct device_node *node)
 {
 	return NULL;
 }
 
+static inline struct device_node *of_get_next_child(
+	const struct device_node *node, struct device_node *prev)
+{
+	return NULL;
+}
+
+static inline struct device_node *of_get_next_available_child(
+	const struct device_node *node, struct device_node *prev)
+{
+	return NULL;
+}
+
+static inline struct device_node *of_find_node_with_property(
+	struct device_node *from, const char *prop_name)
+{
+	return NULL;
+}
+
 static inline bool of_have_populated_dt(void)
 {
 	return false;
 }
 
-/* Kill an unused variable warning on a device_node pointer */
-static inline void __of_use_dn(const struct device_node *np)
-{
-}
-
-#define for_each_child_of_node(parent, child) \
-	while (__of_use_dn(parent), __of_use_dn(child), 0)
-
-#define for_each_available_child_of_node(parent, child) \
-	while (0)
-
 static inline struct device_node *of_get_child_by_name(
 					const struct device_node *node,
 					const char *name)
@@ -395,16 +364,6 @@
 	return NULL;
 }
 
-static inline int of_get_child_count(const struct device_node *np)
-{
-	return 0;
-}
-
-static inline int of_get_available_child_count(const struct device_node *np)
-{
-	return 0;
-}
-
 static inline int of_device_is_compatible(const struct device_node *device,
 					  const char *name)
 {
@@ -569,6 +528,13 @@
 static inline int of_node_to_nid(struct device_node *device) { return 0; }
 #endif
 
+static inline struct device_node *of_find_matching_node(
+	struct device_node *from,
+	const struct of_device_id *matches)
+{
+	return of_find_matching_node_and_match(from, matches, NULL);
+}
+
 /**
  * of_property_read_bool - Findfrom a property
  * @np:		device node from which the property value is to be read.
@@ -618,6 +584,55 @@
 		s;						\
 		s = of_prop_next_string(prop, s))
 
+#define for_each_node_by_name(dn, name) \
+	for (dn = of_find_node_by_name(NULL, name); dn; \
+	     dn = of_find_node_by_name(dn, name))
+#define for_each_node_by_type(dn, type) \
+	for (dn = of_find_node_by_type(NULL, type); dn; \
+	     dn = of_find_node_by_type(dn, type))
+#define for_each_compatible_node(dn, type, compatible) \
+	for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
+	     dn = of_find_compatible_node(dn, type, compatible))
+#define for_each_matching_node(dn, matches) \
+	for (dn = of_find_matching_node(NULL, matches); dn; \
+	     dn = of_find_matching_node(dn, matches))
+#define for_each_matching_node_and_match(dn, matches, match) \
+	for (dn = of_find_matching_node_and_match(NULL, matches, match); \
+	     dn; dn = of_find_matching_node_and_match(dn, matches, match))
+
+#define for_each_child_of_node(parent, child) \
+	for (child = of_get_next_child(parent, NULL); child != NULL; \
+	     child = of_get_next_child(parent, child))
+#define for_each_available_child_of_node(parent, child) \
+	for (child = of_get_next_available_child(parent, NULL); child != NULL; \
+	     child = of_get_next_available_child(parent, child))
+
+#define for_each_node_with_property(dn, prop_name) \
+	for (dn = of_find_node_with_property(NULL, prop_name); dn; \
+	     dn = of_find_node_with_property(dn, prop_name))
+
+static inline int of_get_child_count(const struct device_node *np)
+{
+	struct device_node *child;
+	int num = 0;
+
+	for_each_child_of_node(np, child)
+		num++;
+
+	return num;
+}
+
+static inline int of_get_available_child_count(const struct device_node *np)
+{
+	struct device_node *child;
+	int num = 0;
+
+	for_each_available_child_of_node(np, child)
+		num++;
+
+	return num;
+}
+
 #if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
 extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
 extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 8d7dd67..ef37021 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -78,11 +78,13 @@
 
 static inline void of_device_node_put(struct device *dev) { }
 
-static inline const struct of_device_id *of_match_device(
+static inline const struct of_device_id *__of_match_device(
 		const struct of_device_id *matches, const struct device *dev)
 {
 	return NULL;
 }
+#define of_match_device(matches, dev)	\
+	__of_match_device(of_match_ptr(matches), (dev))
 
 static inline struct device_node *of_cpu_device_node_get(int cpu)
 {
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e464b4e..d1fe1a7 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -228,9 +228,9 @@
 TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
 PAGEFLAG(MappedToDisk, mappedtodisk)
 
-/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
+/* PG_readahead is only used for reads; PG_reclaim is only for writes */
 PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
-PAGEFLAG(Readahead, reclaim)		/* Reminder to do async read-ahead */
+PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
 
 #ifdef CONFIG_HIGHMEM
 /*
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
index 1900bd0..f5cfdd6 100644
--- a/include/linux/percpu_ida.h
+++ b/include/linux/percpu_ida.h
@@ -4,6 +4,7 @@
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 #include <linux/spinlock_types.h>
 #include <linux/wait.h>
 #include <linux/cpumask.h>
@@ -61,7 +62,7 @@
 /* Max size of percpu freelist, */
 #define IDA_DEFAULT_PCPU_SIZE	((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
 
-int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
+int percpu_ida_alloc(struct percpu_ida *pool, int state);
 void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
 
 void percpu_ida_destroy(struct percpu_ida *pool);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index e273e5a..3f83459 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -146,7 +146,9 @@
 	phy->attrs.bus_width = bus_width;
 }
 struct phy *phy_get(struct device *dev, const char *string);
+struct phy *phy_optional_get(struct device *dev, const char *string);
 struct phy *devm_phy_get(struct device *dev, const char *string);
+struct phy *devm_phy_optional_get(struct device *dev, const char *string);
 void phy_put(struct phy *phy);
 void devm_phy_put(struct device *dev, struct phy *phy);
 struct phy *of_phy_simple_xlate(struct device *dev,
@@ -232,11 +234,23 @@
 	return ERR_PTR(-ENOSYS);
 }
 
+static inline struct phy *phy_optional_get(struct device *dev,
+					   const char *string)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
 static inline struct phy *devm_phy_get(struct device *dev, const char *string)
 {
 	return ERR_PTR(-ENOSYS);
 }
 
+static inline struct phy *devm_phy_optional_get(struct device *dev,
+						const char *string)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
 static inline void phy_put(struct phy *phy)
 {
 }
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
index 3a39428..eabac4e 100644
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ b/include/linux/platform_data/dma-imx-sdma.h
@@ -43,6 +43,11 @@
 	s32 dptc_dvfs_addr;
 	s32 utra_addr;
 	s32 ram_code_start_addr;
+	/* End of v1 array */
+	s32 mcu_2_ssish_addr;
+	s32 ssish_2_mcu_addr;
+	s32 hdmi_dma_addr;
+	/* End of v2 array */
 };
 
 /**
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
index beac6b8..bcbc6c3 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/platform_data/dma-imx.h
@@ -39,6 +39,7 @@
 	IMX_DMATYPE_IPU_MEMORY,	/* IPU Memory */
 	IMX_DMATYPE_ASRC,	/* ASRC */
 	IMX_DMATYPE_ESAI,	/* ESAI */
+	IMX_DMATYPE_SSI_DUAL,	/* SSI Dual FIFO */
 };
 
 enum imx_dma_prio {
diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h
index 239e0fc..66574ea 100644
--- a/include/linux/platform_data/dma-mmp_tdma.h
+++ b/include/linux/platform_data/dma-mmp_tdma.h
@@ -1,6 +1,4 @@
 /*
- *  linux/arch/arm/mach-mmp/include/mach/sram.h
- *
  *  SRAM Memory Management
  *
  *  Copyright (c) 2011 Marvell Semiconductors Inc.
@@ -11,8 +9,8 @@
  *
  */
 
-#ifndef __ASM_ARCH_SRAM_H
-#define __ASM_ARCH_SRAM_H
+#ifndef __DMA_MMP_TDMA_H
+#define __DMA_MMP_TDMA_H
 
 #include <linux/genalloc.h>
 
@@ -32,4 +30,4 @@
 
 extern struct gen_pool *sram_get_gpool(char *pool_name);
 
-#endif /* __ASM_ARCH_SRAM_H */
+#endif /* __DMA_MMP_TDMA_H */
diff --git a/include/linux/platform_data/dma-mv_xor.h b/include/linux/platform_data/dma-mv_xor.h
index 8ec18f6..92ffd32 100644
--- a/include/linux/platform_data/dma-mv_xor.h
+++ b/include/linux/platform_data/dma-mv_xor.h
@@ -1,11 +1,9 @@
 /*
- * arch/arm/plat-orion/include/plat/mv_xor.h
- *
  * Marvell XOR platform device data definition file.
  */
 
-#ifndef __PLAT_MV_XOR_H
-#define __PLAT_MV_XOR_H
+#ifndef __DMA_MV_XOR_H
+#define __DMA_MV_XOR_H
 
 #include <linux/dmaengine.h>
 #include <linux/mbus.h>
diff --git a/include/linux/platform_data/vsp1.h b/include/linux/platform_data/vsp1.h
index a73a456..63170e26 100644
--- a/include/linux/platform_data/vsp1.h
+++ b/include/linux/platform_data/vsp1.h
@@ -14,6 +14,8 @@
 #define __PLATFORM_VSP1_H__
 
 #define VSP1_HAS_LIF		(1 << 0)
+#define VSP1_HAS_LUT		(1 << 1)
+#define VSP1_HAS_SRU		(1 << 2)
 
 struct vsp1_platform_data {
 	unsigned int features;
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 0616ffe..03f3b05 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -75,6 +75,17 @@
 } while (0)
 
 /*
+ * This is the same regardless of which rwsem implementation that is being used.
+ * It is just a heuristic meant to be called by somebody alreadying holding the
+ * rwsem to see if somebody from an incompatible type is wanting access to the
+ * lock.
+ */
+static inline int rwsem_is_contended(struct rw_semaphore *sem)
+{
+	return !list_empty(&sem->wait_list);
+}
+
+/*
  * lock for reading
  */
 extern void down_read(struct rw_semaphore *sem);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 68a0e84..a781dec 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -128,6 +128,7 @@
 struct fs_struct;
 struct perf_event_context;
 struct blk_plug;
+struct filename;
 
 /*
  * List of flags we want to share for kernel threads,
@@ -2311,7 +2312,7 @@
 extern int allow_signal(int);
 extern int disallow_signal(int);
 
-extern int do_execve(const char *,
+extern int do_execve(struct filename *,
 		     const char __user * const __user *,
 		     const char __user * const __user *);
 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index b13cf43..8045a55 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -5,7 +5,7 @@
 extern int	     sysctl_hung_task_check_count;
 extern unsigned int  sysctl_hung_task_panic;
 extern unsigned long sysctl_hung_task_timeout_secs;
-extern unsigned long sysctl_hung_task_warnings;
+extern int sysctl_hung_task_warnings;
 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
 					 void __user *buffer,
 					 size_t *lenp, loff_t *ppos);
diff --git a/include/linux/serial_bcm63xx.h b/include/linux/serial_bcm63xx.h
new file mode 100644
index 0000000..570e964
--- /dev/null
+++ b/include/linux/serial_bcm63xx.h
@@ -0,0 +1,119 @@
+#ifndef _LINUX_SERIAL_BCM63XX_H
+#define _LINUX_SERIAL_BCM63XX_H
+
+/* UART Control Register */
+#define UART_CTL_REG			0x0
+#define UART_CTL_RXTMOUTCNT_SHIFT	0
+#define UART_CTL_RXTMOUTCNT_MASK	(0x1f << UART_CTL_RXTMOUTCNT_SHIFT)
+#define UART_CTL_RSTTXDN_SHIFT		5
+#define UART_CTL_RSTTXDN_MASK		(1 << UART_CTL_RSTTXDN_SHIFT)
+#define UART_CTL_RSTRXFIFO_SHIFT		6
+#define UART_CTL_RSTRXFIFO_MASK		(1 << UART_CTL_RSTRXFIFO_SHIFT)
+#define UART_CTL_RSTTXFIFO_SHIFT		7
+#define UART_CTL_RSTTXFIFO_MASK		(1 << UART_CTL_RSTTXFIFO_SHIFT)
+#define UART_CTL_STOPBITS_SHIFT		8
+#define UART_CTL_STOPBITS_MASK		(0xf << UART_CTL_STOPBITS_SHIFT)
+#define UART_CTL_STOPBITS_1		(0x7 << UART_CTL_STOPBITS_SHIFT)
+#define UART_CTL_STOPBITS_2		(0xf << UART_CTL_STOPBITS_SHIFT)
+#define UART_CTL_BITSPERSYM_SHIFT	12
+#define UART_CTL_BITSPERSYM_MASK	(0x3 << UART_CTL_BITSPERSYM_SHIFT)
+#define UART_CTL_XMITBRK_SHIFT		14
+#define UART_CTL_XMITBRK_MASK		(1 << UART_CTL_XMITBRK_SHIFT)
+#define UART_CTL_RSVD_SHIFT		15
+#define UART_CTL_RSVD_MASK		(1 << UART_CTL_RSVD_SHIFT)
+#define UART_CTL_RXPAREVEN_SHIFT		16
+#define UART_CTL_RXPAREVEN_MASK		(1 << UART_CTL_RXPAREVEN_SHIFT)
+#define UART_CTL_RXPAREN_SHIFT		17
+#define UART_CTL_RXPAREN_MASK		(1 << UART_CTL_RXPAREN_SHIFT)
+#define UART_CTL_TXPAREVEN_SHIFT		18
+#define UART_CTL_TXPAREVEN_MASK		(1 << UART_CTL_TXPAREVEN_SHIFT)
+#define UART_CTL_TXPAREN_SHIFT		18
+#define UART_CTL_TXPAREN_MASK		(1 << UART_CTL_TXPAREN_SHIFT)
+#define UART_CTL_LOOPBACK_SHIFT		20
+#define UART_CTL_LOOPBACK_MASK		(1 << UART_CTL_LOOPBACK_SHIFT)
+#define UART_CTL_RXEN_SHIFT		21
+#define UART_CTL_RXEN_MASK		(1 << UART_CTL_RXEN_SHIFT)
+#define UART_CTL_TXEN_SHIFT		22
+#define UART_CTL_TXEN_MASK		(1 << UART_CTL_TXEN_SHIFT)
+#define UART_CTL_BRGEN_SHIFT		23
+#define UART_CTL_BRGEN_MASK		(1 << UART_CTL_BRGEN_SHIFT)
+
+/* UART Baudword register */
+#define UART_BAUD_REG			0x4
+
+/* UART Misc Control register */
+#define UART_MCTL_REG			0x8
+#define UART_MCTL_DTR_SHIFT		0
+#define UART_MCTL_DTR_MASK		(1 << UART_MCTL_DTR_SHIFT)
+#define UART_MCTL_RTS_SHIFT		1
+#define UART_MCTL_RTS_MASK		(1 << UART_MCTL_RTS_SHIFT)
+#define UART_MCTL_RXFIFOTHRESH_SHIFT	8
+#define UART_MCTL_RXFIFOTHRESH_MASK	(0xf << UART_MCTL_RXFIFOTHRESH_SHIFT)
+#define UART_MCTL_TXFIFOTHRESH_SHIFT	12
+#define UART_MCTL_TXFIFOTHRESH_MASK	(0xf << UART_MCTL_TXFIFOTHRESH_SHIFT)
+#define UART_MCTL_RXFIFOFILL_SHIFT	16
+#define UART_MCTL_RXFIFOFILL_MASK	(0x1f << UART_MCTL_RXFIFOFILL_SHIFT)
+#define UART_MCTL_TXFIFOFILL_SHIFT	24
+#define UART_MCTL_TXFIFOFILL_MASK	(0x1f << UART_MCTL_TXFIFOFILL_SHIFT)
+
+/* UART External Input Configuration register */
+#define UART_EXTINP_REG			0xc
+#define UART_EXTINP_RI_SHIFT		0
+#define UART_EXTINP_RI_MASK		(1 << UART_EXTINP_RI_SHIFT)
+#define UART_EXTINP_CTS_SHIFT		1
+#define UART_EXTINP_CTS_MASK		(1 << UART_EXTINP_CTS_SHIFT)
+#define UART_EXTINP_DCD_SHIFT		2
+#define UART_EXTINP_DCD_MASK		(1 << UART_EXTINP_DCD_SHIFT)
+#define UART_EXTINP_DSR_SHIFT		3
+#define UART_EXTINP_DSR_MASK		(1 << UART_EXTINP_DSR_SHIFT)
+#define UART_EXTINP_IRSTAT(x)		(1 << (x + 4))
+#define UART_EXTINP_IRMASK(x)		(1 << (x + 8))
+#define UART_EXTINP_IR_RI		0
+#define UART_EXTINP_IR_CTS		1
+#define UART_EXTINP_IR_DCD		2
+#define UART_EXTINP_IR_DSR		3
+#define UART_EXTINP_RI_NOSENSE_SHIFT	16
+#define UART_EXTINP_RI_NOSENSE_MASK	(1 << UART_EXTINP_RI_NOSENSE_SHIFT)
+#define UART_EXTINP_CTS_NOSENSE_SHIFT	17
+#define UART_EXTINP_CTS_NOSENSE_MASK	(1 << UART_EXTINP_CTS_NOSENSE_SHIFT)
+#define UART_EXTINP_DCD_NOSENSE_SHIFT	18
+#define UART_EXTINP_DCD_NOSENSE_MASK	(1 << UART_EXTINP_DCD_NOSENSE_SHIFT)
+#define UART_EXTINP_DSR_NOSENSE_SHIFT	19
+#define UART_EXTINP_DSR_NOSENSE_MASK	(1 << UART_EXTINP_DSR_NOSENSE_SHIFT)
+
+/* UART Interrupt register */
+#define UART_IR_REG			0x10
+#define UART_IR_MASK(x)			(1 << (x + 16))
+#define UART_IR_STAT(x)			(1 << (x))
+#define UART_IR_EXTIP			0
+#define UART_IR_TXUNDER			1
+#define UART_IR_TXOVER			2
+#define UART_IR_TXTRESH			3
+#define UART_IR_TXRDLATCH		4
+#define UART_IR_TXEMPTY			5
+#define UART_IR_RXUNDER			6
+#define UART_IR_RXOVER			7
+#define UART_IR_RXTIMEOUT		8
+#define UART_IR_RXFULL			9
+#define UART_IR_RXTHRESH		10
+#define UART_IR_RXNOTEMPTY		11
+#define UART_IR_RXFRAMEERR		12
+#define UART_IR_RXPARERR		13
+#define UART_IR_RXBRK			14
+#define UART_IR_TXDONE			15
+
+/* UART Fifo register */
+#define UART_FIFO_REG			0x14
+#define UART_FIFO_VALID_SHIFT		0
+#define UART_FIFO_VALID_MASK		0xff
+#define UART_FIFO_FRAMEERR_SHIFT	8
+#define UART_FIFO_FRAMEERR_MASK		(1 << UART_FIFO_FRAMEERR_SHIFT)
+#define UART_FIFO_PARERR_SHIFT		9
+#define UART_FIFO_PARERR_MASK		(1 << UART_FIFO_PARERR_SHIFT)
+#define UART_FIFO_BRKDET_SHIFT		10
+#define UART_FIFO_BRKDET_MASK		(1 << UART_FIFO_BRKDET_SHIFT)
+#define UART_FIFO_ANYERR_MASK		(UART_FIFO_FRAMEERR_MASK |	\
+					UART_FIFO_PARERR_MASK |		\
+					UART_FIFO_BRKDET_MASK)
+
+#endif /* _LINUX_SERIAL_BCM63XX_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 1f689e6..3ebbbe7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2456,6 +2456,7 @@
 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
 
 struct skb_checksum_ops {
@@ -2915,5 +2916,22 @@
 {
 	return !skb->head_frag || skb_cloned(skb);
 }
+
+/**
+ * skb_gso_network_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_network_seglen is used to determine the real size of the
+ * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
+ *
+ * The MAC/L2 header is not accounted for.
+ */
+static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
+{
+	unsigned int hdr_len = skb_transport_header(skb) -
+			       skb_network_header(skb);
+	return hdr_len + skb_gso_transport_seglen(skb);
+}
 #endif	/* __KERNEL__ */
 #endif	/* _LINUX_SKBUFF_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a060142..9260abd 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -205,8 +205,8 @@
 
 #ifdef CONFIG_SLUB
 /*
- * SLUB allocates up to order 2 pages directly and otherwise
- * passes the request to the page allocator.
+ * SLUB directly allocates requests fitting in to an order-1 page
+ * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
  */
 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
@@ -217,12 +217,12 @@
 
 #ifdef CONFIG_SLOB
 /*
- * SLOB passes all page size and larger requests to the page allocator.
+ * SLOB passes all requests larger than one page to the page allocator.
  * No kmalloc array is necessary since objects of different sizes can
  * be allocated from the same page.
  */
-#define KMALLOC_SHIFT_MAX	30
 #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
+#define KMALLOC_SHIFT_MAX	30
 #ifndef KMALLOC_SHIFT_LOW
 #define KMALLOC_SHIFT_LOW	3
 #endif
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 5da22ee..6ae004e 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,12 +11,16 @@
 #include <linux/list.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
+#include <linux/llist.h>
 
 extern void cpu_idle(void);
 
 typedef void (*smp_call_func_t)(void *info);
 struct call_single_data {
-	struct list_head list;
+	union {
+		struct list_head list;
+		struct llist_node llist;
+	};
 	smp_call_func_t func;
 	void *info;
 	u16 flags;
@@ -184,6 +188,9 @@
  */
 extern void arch_disable_smp_support(void);
 
+extern void arch_enable_nonboot_cpus_begin(void);
+extern void arch_enable_nonboot_cpus_end(void);
+
 void smp_setup_processor_id(void);
 
 #endif /* __LINUX_SMP_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a1d4ca2..4203c66 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -273,7 +273,7 @@
  *	message while queuing transfers that arrive in the meantime. When the
  *	driver is finished with this message, it must call
  *	spi_finalize_current_message() so the subsystem can issue the next
- *	transfer
+ *	message
  * @unprepare_transfer_hardware: there are currently no more messages on the
  *	queue so the subsystem notifies the driver that it may relax the
  *	hardware by issuing this call
@@ -287,7 +287,10 @@
  *                  - return 1 if the transfer is still in progress. When
  *                    the driver is finished with this transfer it must
  *                    call spi_finalize_current_transfer() so the subsystem
- *                    can issue the next transfer
+ *                    can issue the next transfer. Note: transfer_one and
+ *                    transfer_one_message are mutually exclusive; when both
+ *                    are set, the generic subsystem does not call your
+ *                    transfer_one callback.
  * @unprepare_message: undo any work done by prepare_message().
  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
  *	number. Any individual value may be -ENOENT for CS lines that
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index c64999f..07ef9b8 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -486,6 +486,7 @@
 #endif /* EMBEDDED */
 #ifdef CONFIG_SSB_DRIVER_GPIO
 	struct gpio_chip gpio;
+	struct irq_domain *irq_domain;
 #endif /* DRIVER_GPIO */
 
 	/* Internal-only stuff follows. Do not touch. */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 6eecfc2..04e7632 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -368,7 +368,7 @@
 	struct svc_program *	pg_next;	/* other programs (same xprt) */
 	u32			pg_prog;	/* program number */
 	unsigned int		pg_lovers;	/* lowest version */
-	unsigned int		pg_hivers;	/* lowest version */
+	unsigned int		pg_hivers;	/* highest version */
 	unsigned int		pg_nvers;	/* number of versions */
 	struct svc_version **	pg_vers;	/* version array */
 	char *			pg_name;	/* service name */
@@ -386,8 +386,10 @@
 	struct svc_procedure *	vs_proc;	/* per-procedure info */
 	u32			vs_xdrsize;	/* xdrsize needed for this version */
 
-	unsigned int		vs_hidden : 1;	/* Don't register with portmapper.
+	unsigned int		vs_hidden : 1,	/* Don't register with portmapper.
 						 * Only used for nfsacl so far. */
+				vs_rpcb_optnl:1;/* Don't care the result of register.
+						 * Only used for nfsv4. */
 
 	/* Override dispatch function (e.g. when caching replies).
 	 * A return value of 0 means drop the request. 
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 0175d86..b84773c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -104,7 +104,7 @@
 extern void tick_clock_notify(void);
 extern int tick_check_oneshot_change(int allow_nohz);
 extern struct tick_sched *tick_get_tick_sched(int cpu);
-extern void tick_check_idle(void);
+extern void tick_irq_enter(void);
 extern int tick_oneshot_mode_active(void);
 #  ifndef arch_needs_cpu
 #   define arch_needs_cpu(cpu) (0)
@@ -112,7 +112,7 @@
 # else
 static inline void tick_clock_notify(void) { }
 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
-static inline void tick_check_idle(void) { }
+static inline void tick_irq_enter(void) { }
 static inline int tick_oneshot_mode_active(void) { return 0; }
 # endif
 
@@ -121,7 +121,7 @@
 static inline void tick_cancel_sched_timer(int cpu) { }
 static inline void tick_clock_notify(void) { }
 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
-static inline void tick_check_idle(void) { }
+static inline void tick_irq_enter(void) { }
 static inline int tick_oneshot_mode_active(void) { return 0; }
 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 
diff --git a/include/linux/usb.h b/include/linux/usb.h
index c716da1..7f6eb85 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1265,8 +1265,6 @@
  * @sg: scatter gather buffer list, the buffer size of each element in
  * 	the list (except the last) must be divisible by the endpoint's
  * 	max packet size if no_sg_constraint isn't set in 'struct usb_bus'
- * 	(FIXME: scatter-gather under xHCI is broken for periodic transfers.
- * 	Do not use urb->sg for interrupt endpoints for now, only bulk.)
  * @num_mapped_sgs: (internal) number of mapped sg entries
  * @num_sgs: number of entries in the sg list
  * @transfer_buffer_length: How big is transfer_buffer.  The transfer may
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index c557c6d..3a712e2 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -71,12 +71,14 @@
 		THP_ZERO_PAGE_ALLOC,
 		THP_ZERO_PAGE_ALLOC_FAILED,
 #endif
+#ifdef CONFIG_DEBUG_TLBFLUSH
 #ifdef CONFIG_SMP
 		NR_TLB_REMOTE_FLUSH,	/* cpu tried to flush others' tlbs */
 		NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
-#endif
+#endif /* CONFIG_SMP */
 		NR_TLB_LOCAL_FLUSH_ALL,
 		NR_TLB_LOCAL_FLUSH_ONE,
+#endif /* CONFIG_DEBUG_TLBFLUSH */
 		NR_VM_EVENT_ITEMS
 };
 
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index e4b9480..67ce70c 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -83,6 +83,14 @@
 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
 #endif /* CONFIG_NUMA_BALANCING */
 
+#ifdef CONFIG_DEBUG_TLBFLUSH
+#define count_vm_tlb_event(x)	   count_vm_event(x)
+#define count_vm_tlb_events(x, y)  count_vm_events(x, y)
+#else
+#define count_vm_tlb_event(x)     do {} while (0)
+#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
+#endif
+
 #define __count_zone_vm_events(item, zone, delta) \
 		__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
 		zone_idx(zone), delta)
@@ -142,8 +150,6 @@
 	return x;
 }
 
-extern unsigned long global_reclaimable_pages(void);
-
 #ifdef CONFIG_NUMA
 /*
  * Determine the per node value of a stat item. This function
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 594521b..704f4f6 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -419,10 +419,7 @@
 	static struct lock_class_key __key;				\
 	const char *__lock_name;					\
 									\
-	if (__builtin_constant_p(fmt))					\
-		__lock_name = (fmt);					\
-	else								\
-		__lock_name = #fmt;					\
+	__lock_name = #fmt#args;					\
 									\
 	__alloc_workqueue_key((fmt), (flags), (max_active),		\
 			      &__key, __lock_name, ##args);		\
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/include/linux/zsmalloc.h
similarity index 96%
rename from drivers/staging/zsmalloc/zsmalloc.h
rename to include/linux/zsmalloc.h
index c2eb174..e44d634 100644
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -2,6 +2,7 @@
  * zsmalloc memory allocator
  *
  * Copyright (C) 2011  Nitin Gupta
+ * Copyright (C) 2012, 2013 Minchan Kim
  *
  * This code is released using a dual license strategy: BSD/GPL
  * You can choose the license that better fits your requirements.
diff --git a/include/media/adv7604.h b/include/media/adv7604.h
index dc004bc..d262a3a 100644
--- a/include/media/adv7604.h
+++ b/include/media/adv7604.h
@@ -78,11 +78,14 @@
 	ADV7604_OP_FORMAT_SEL_SDR_ITU656_24_MODE2 = 0x8a,
 };
 
+enum adv7604_drive_strength {
+	ADV7604_DR_STR_MEDIUM_LOW = 1,
+	ADV7604_DR_STR_MEDIUM_HIGH = 2,
+	ADV7604_DR_STR_HIGH = 3,
+};
+
 /* Platform dependent definition */
 struct adv7604_platform_data {
-	/* connector - HDMI or DVI? */
-	unsigned connector_hdmi:1;
-
 	/* DIS_PWRDNB: 1 if the PWRDNB pin is unused and unconnected */
 	unsigned disable_pwrdnb:1;
 
@@ -110,6 +113,15 @@
 	unsigned replicate_av_codes:1;
 	unsigned invert_cbcr:1;
 
+	/* IO register 0x06 */
+	unsigned inv_vs_pol:1;
+	unsigned inv_hs_pol:1;
+
+	/* IO register 0x14 */
+	enum adv7604_drive_strength dr_str_data;
+	enum adv7604_drive_strength dr_str_clk;
+	enum adv7604_drive_strength dr_str_sync;
+
 	/* IO register 0x30 */
 	unsigned output_bus_lsb_to_msb:1;
 
@@ -131,16 +143,20 @@
 	u8 i2c_vdp;
 };
 
-/*
- * Mode of operation.
- * This is used as the input argument of the s_routing video op.
- */
-enum adv7604_mode {
-	ADV7604_MODE_COMP,
-	ADV7604_MODE_GR,
-	ADV7604_MODE_HDMI,
+enum adv7604_input_port {
+	ADV7604_INPUT_HDMI_PORT_A,
+	ADV7604_INPUT_HDMI_PORT_B,
+	ADV7604_INPUT_HDMI_PORT_C,
+	ADV7604_INPUT_HDMI_PORT_D,
+	ADV7604_INPUT_VGA_RGB,
+	ADV7604_INPUT_VGA_COMP,
 };
 
+#define ADV7604_EDID_PORT_A 0
+#define ADV7604_EDID_PORT_B 1
+#define ADV7604_EDID_PORT_C 2
+#define ADV7604_EDID_PORT_D 3
+
 #define V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE	(V4L2_CID_DV_CLASS_BASE + 0x1000)
 #define V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL	(V4L2_CID_DV_CLASS_BASE + 0x1001)
 #define V4L2_CID_ADV_RX_FREE_RUN_COLOR		(V4L2_CID_DV_CLASS_BASE + 0x1002)
diff --git a/include/media/adv7842.h b/include/media/adv7842.h
index c02201d..3932209 100644
--- a/include/media/adv7842.h
+++ b/include/media/adv7842.h
@@ -108,6 +108,13 @@
 	ADV7842_SELECT_SDP_YC,
 };
 
+enum adv7842_drive_strength {
+	ADV7842_DR_STR_LOW = 0,
+	ADV7842_DR_STR_MEDIUM_LOW = 1,
+	ADV7842_DR_STR_MEDIUM_HIGH = 2,
+	ADV7842_DR_STR_HIGH = 3,
+};
+
 struct adv7842_sdp_csc_coeff {
 	bool manual;
 	uint16_t scaling;
@@ -131,13 +138,18 @@
 	uint16_t hs_width;
 	uint16_t de_beg;
 	uint16_t de_end;
+	uint8_t vs_beg_o;
+	uint8_t vs_beg_e;
+	uint8_t vs_end_o;
+	uint8_t vs_end_e;
+	uint8_t de_v_beg_o;
+	uint8_t de_v_beg_e;
+	uint8_t de_v_end_o;
+	uint8_t de_v_end_e;
 };
 
 /* Platform dependent definition */
 struct adv7842_platform_data {
-	/* connector - HDMI or DVI? */
-	unsigned connector_hdmi:1;
-
 	/* chip reset during probe */
 	unsigned chip_reset:1;
 
@@ -156,12 +168,12 @@
 	/* Default mode */
 	enum adv7842_mode mode;
 
+	/* Default input */
+	unsigned input;
+
 	/* Video standard */
 	enum adv7842_vid_std_select vid_std_select;
 
-	/* Input Color Space */
-	enum adv7842_inp_color_space inp_color_space;
-
 	/* Select output format */
 	enum adv7842_op_format_sel op_format_sel;
 
@@ -181,22 +193,37 @@
 	unsigned output_bus_lsb_to_msb:1;
 
 	/* IO register 0x14 */
-	struct {
-		unsigned data:2;
-		unsigned clock:2;
-		unsigned sync:2;
-	} drive_strength;
+	enum adv7842_drive_strength dr_str_data;
+	enum adv7842_drive_strength dr_str_clk;
+	enum adv7842_drive_strength dr_str_sync;
+
+	/*
+	 * IO register 0x19: Adjustment to the LLC DLL phase in
+	 * increments of 1/32 of a clock period.
+	 */
+	unsigned llc_dll_phase:5;
 
 	/* External RAM for 3-D comb or frame synchronizer */
 	unsigned sd_ram_size; /* ram size in MB */
 	unsigned sd_ram_ddr:1; /* ddr or sdr sdram */
 
-	/* Free run */
-	unsigned hdmi_free_run_mode;
+	/* HDMI free run, CP-reg 0xBA */
+	unsigned hdmi_free_run_enable:1;
+	/* 0 = Mode 0: run when there is no TMDS clock
+	   1 = Mode 1: run when there is no TMDS clock or the
+	       video resolution does not match programmed one. */
+	unsigned hdmi_free_run_mode:1;
+
+	/* SDP free run, CP-reg 0xDD */
+	unsigned sdp_free_run_auto:1;
+	unsigned sdp_free_run_man_col_en:1;
+	unsigned sdp_free_run_cbar_en:1;
+	unsigned sdp_free_run_force:1;
 
 	struct adv7842_sdp_csc_coeff sdp_csc_coeff;
 
-	struct adv7842_sdp_io_sync_adjustment sdp_io_sync;
+	struct adv7842_sdp_io_sync_adjustment sdp_io_sync_625;
+	struct adv7842_sdp_io_sync_adjustment sdp_io_sync_525;
 
 	/* i2c addresses */
 	u8 i2c_sdp_io;
@@ -223,4 +250,8 @@
  * deinterlacer. */
 #define ADV7842_CMD_RAM_TEST _IO('V', BASE_VIDIOC_PRIVATE)
 
+#define ADV7842_EDID_PORT_A   0
+#define ADV7842_EDID_PORT_B   1
+#define ADV7842_EDID_PORT_VGA 2
+
 #endif
diff --git a/include/media/atmel-isi.h b/include/media/atmel-isi.h
index 6568230..2b02347 100644
--- a/include/media/atmel-isi.h
+++ b/include/media/atmel-isi.h
@@ -56,6 +56,7 @@
 #define		ISI_CFG1_FRATE_DIV_6		(5 << 8)
 #define		ISI_CFG1_FRATE_DIV_7		(6 << 8)
 #define		ISI_CFG1_FRATE_DIV_8		(7 << 8)
+#define		ISI_CFG1_FRATE_DIV_MASK		(7 << 8)
 #define ISI_CFG1_DISCR				(1 << 11)
 #define ISI_CFG1_FULL_MODE			(1 << 12)
 
@@ -66,6 +67,7 @@
 #define		ISI_CFG2_YCC_SWAP_MODE_1	(1 << 28)
 #define		ISI_CFG2_YCC_SWAP_MODE_2	(2 << 28)
 #define		ISI_CFG2_YCC_SWAP_MODE_3	(3 << 28)
+#define		ISI_CFG2_YCC_SWAP_MODE_MASK	(3 << 28)
 #define ISI_CFG2_IM_VSIZE_OFFSET		0
 #define ISI_CFG2_IM_HSIZE_OFFSET		16
 #define ISI_CFG2_IM_VSIZE_MASK		(0x7FF << ISI_CFG2_IM_VSIZE_OFFSET)
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 10df551..e004591 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -24,6 +24,7 @@
 #define _MEDIA_ENTITY_H
 
 #include <linux/bitops.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/media.h>
 
diff --git a/include/media/omap4iss.h b/include/media/omap4iss.h
new file mode 100644
index 0000000..0d7620d
--- /dev/null
+++ b/include/media/omap4iss.h
@@ -0,0 +1,65 @@
+#ifndef ARCH_ARM_PLAT_OMAP4_ISS_H
+#define ARCH_ARM_PLAT_OMAP4_ISS_H
+
+#include <linux/i2c.h>
+
+struct iss_device;
+
+enum iss_interface_type {
+	ISS_INTERFACE_CSI2A_PHY1,
+	ISS_INTERFACE_CSI2B_PHY2,
+};
+
+/**
+ * struct iss_csiphy_lane: CSI2 lane position and polarity
+ * @pos: position of the lane
+ * @pol: polarity of the lane
+ */
+struct iss_csiphy_lane {
+	u8 pos;
+	u8 pol;
+};
+
+#define ISS_CSIPHY1_NUM_DATA_LANES	4
+#define ISS_CSIPHY2_NUM_DATA_LANES	1
+
+/**
+ * struct iss_csiphy_lanes_cfg - CSI2 lane configuration
+ * @data: Configuration of one or two data lanes
+ * @clk: Clock lane configuration
+ */
+struct iss_csiphy_lanes_cfg {
+	struct iss_csiphy_lane data[ISS_CSIPHY1_NUM_DATA_LANES];
+	struct iss_csiphy_lane clk;
+};
+
+/**
+ * struct iss_csi2_platform_data - CSI2 interface platform data
+ * @crc: Enable the cyclic redundancy check
+ * @vpclk_div: Video port output clock control
+ */
+struct iss_csi2_platform_data {
+	unsigned crc:1;
+	unsigned vpclk_div:2;
+	struct iss_csiphy_lanes_cfg lanecfg;
+};
+
+struct iss_subdev_i2c_board_info {
+	struct i2c_board_info *board_info;
+	int i2c_adapter_id;
+};
+
+struct iss_v4l2_subdevs_group {
+	struct iss_subdev_i2c_board_info *subdevs;
+	enum iss_interface_type interface;
+	union {
+		struct iss_csi2_platform_data csi2;
+	} bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
+};
+
+struct iss_platform_data {
+	struct iss_v4l2_subdevs_group *subdevs;
+	void (*set_constraints)(struct iss_device *iss, bool enable);
+};
+
+#endif
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 6628f5d..a20ed97 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -193,6 +193,7 @@
 #define RC_MAP_VIDEOMATE_TV_PVR          "rc-videomate-tv-pvr"
 #define RC_MAP_WINFAST                   "rc-winfast"
 #define RC_MAP_WINFAST_USBII_DELUXE      "rc-winfast-usbii-deluxe"
+#define RC_MAP_SU3000                    "rc-su3000"
 
 /*
  * Please, do not just append newer Remote Controller names at the end.
diff --git a/include/media/saa6588.h b/include/media/saa6588.h
index 2c3c442..b5ec1aa 100644
--- a/include/media/saa6588.h
+++ b/include/media/saa6588.h
@@ -27,6 +27,7 @@
 
 struct saa6588_command {
 	unsigned int  block_count;
+	bool          nonblocking;
 	int           result;
 	unsigned char __user *buffer;
 	struct file   *instance;
@@ -34,7 +35,6 @@
 };
 
 /* These ioctls are internal to the kernel */
-#define SAA6588_CMD_OPEN	_IOW('R', 1, int)
 #define SAA6588_CMD_CLOSE	_IOW('R', 2, int)
 #define SAA6588_CMD_READ	_IOR('R', 3, int)
 #define SAA6588_CMD_POLL	_IOR('R', 4, int)
diff --git a/include/media/saa6752hs.h b/include/media/saa6752hs.h
deleted file mode 100644
index 3b8686e..0000000
--- a/include/media/saa6752hs.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
-    saa6752hs.h - definition for saa6752hs MPEG encoder
-
-    Copyright (C) 2003 Andrew de Quincey <adq@lidskialf.net>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/include/media/si4713.h b/include/media/si4713.h
index ed7353e..f98a0a7 100644
--- a/include/media/si4713.h
+++ b/include/media/si4713.h
@@ -23,6 +23,8 @@
  * Platform dependent definition
  */
 struct si4713_platform_data {
+	const char * const *supply_names;
+	unsigned supplies;
 	int gpio_reset; /* < 0 if not used */
 };
 
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index 528cdaf..8035167 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -45,6 +45,10 @@
 	struct list_head	available; /* Dequeueable event */
 	unsigned int		navailable;
 	u32			sequence;
+
+#if IS_ENABLED(CONFIG_V4L2_MEM2MEM_DEV)
+	struct v4l2_m2m_ctx	*m2m_ctx;
+#endif
 };
 
 /*
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 44542a2..12ea5a6 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -64,6 +64,9 @@
 };
 
 struct v4l2_m2m_ctx {
+	/* optional cap/out vb2 queues lock */
+	struct mutex			*q_lock;
+
 /* private: internal use only */
 	struct v4l2_m2m_dev		*m2m_dev;
 
@@ -229,5 +232,26 @@
 	return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
 }
 
+/* v4l2 ioctl helpers */
+
+int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
+				struct v4l2_requestbuffers *rb);
+int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
+				struct v4l2_create_buffers *create);
+int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
+				struct v4l2_buffer *buf);
+int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
+				struct v4l2_exportbuffer *eb);
+int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
+				struct v4l2_buffer *buf);
+int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
+				struct v4l2_buffer *buf);
+int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
+				enum v4l2_buf_type type);
+int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
+				enum v4l2_buf_type type);
+int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
+unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
+
 #endif /* _MEDIA_V4L2_MEM2MEM_H */
 
diff --git a/include/media/v4l2-of.h b/include/media/v4l2-of.h
index 3a8a841..541cea4 100644
--- a/include/media/v4l2-of.h
+++ b/include/media/v4l2-of.h
@@ -53,7 +53,6 @@
  * @port: identifier (value of reg property) of a port this endpoint belongs to
  * @id: identifier (value of reg property) of this endpoint
  * @local_node: pointer to device_node of this endpoint
- * @remote: phandle to remote endpoint node
  * @bus_type: bus type
  * @bus: bus configuration data structure
  * @head: list head for this structure
@@ -62,7 +61,6 @@
 	unsigned int port;
 	unsigned int id;
 	const struct device_node *local_node;
-	const __be32 *remote;
 	enum v4l2_mbus_type bus_type;
 	union {
 		struct v4l2_of_bus_parallel parallel;
@@ -72,8 +70,8 @@
 };
 
 #ifdef CONFIG_OF
-void v4l2_of_parse_endpoint(const struct device_node *node,
-				struct v4l2_of_endpoint *link);
+int v4l2_of_parse_endpoint(const struct device_node *node,
+			   struct v4l2_of_endpoint *endpoint);
 struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent,
 					struct device_node *previous);
 struct device_node *v4l2_of_get_remote_port_parent(
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 941055e..bef53ce 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -142,6 +142,7 @@
 /**
  * enum vb2_buffer_state - current video buffer state
  * @VB2_BUF_STATE_DEQUEUED:	buffer under userspace control
+ * @VB2_BUF_STATE_PREPARING:	buffer is being prepared in videobuf
  * @VB2_BUF_STATE_PREPARED:	buffer prepared in videobuf and by the driver
  * @VB2_BUF_STATE_QUEUED:	buffer queued in videobuf, but not in driver
  * @VB2_BUF_STATE_ACTIVE:	buffer queued in driver and possibly used
@@ -154,6 +155,7 @@
  */
 enum vb2_buffer_state {
 	VB2_BUF_STATE_DEQUEUED,
+	VB2_BUF_STATE_PREPARING,
 	VB2_BUF_STATE_PREPARED,
 	VB2_BUF_STATE_QUEUED,
 	VB2_BUF_STATE_ACTIVE,
@@ -250,10 +252,13 @@
  *			receive buffers with @buf_queue callback before
  *			@start_streaming is called; the driver gets the number
  *			of already queued buffers in count parameter; driver
- *			can return an error if hardware fails or not enough
- *			buffers has been queued, in such case all buffers that
- *			have been already given by the @buf_queue callback are
- *			invalidated.
+ *			can return an error if hardware fails, in that case all
+ *			buffers that have been already given by the @buf_queue
+ *			callback are invalidated.
+ *			If there were not enough queued buffers to start
+ *			streaming, then this callback returns -ENOBUFS, and the
+ *			vb2 core will retry calling @start_streaming when a new
+ *			buffer is queued.
  * @stop_streaming:	called when 'streaming' state must be disabled; driver
  *			should stop any DMA transactions or wait until they
  *			finish and give back all buffers it got from buf_queue()
@@ -321,6 +326,9 @@
  * @done_wq:	waitqueue for processes waiting for buffers ready to be dequeued
  * @alloc_ctx:	memory type/allocator-specific contexts for each plane
  * @streaming:	current streaming state
+ * @retry_start_streaming: start_streaming() was called, but there were not enough
+ *		buffers queued. If set, then retry calling start_streaming when
+ *		queuing a new buffer.
  * @fileio:	file io emulator internal data, used only if emulator is active
  */
 struct vb2_queue {
@@ -353,6 +361,7 @@
 	unsigned int			plane_sizes[VIDEO_MAX_PLANES];
 
 	unsigned int			streaming:1;
+	unsigned int			retry_start_streaming:1;
 
 	struct vb2_fileio_data		*fileio;
 };
@@ -491,6 +500,7 @@
 
 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma);
 int vb2_fop_release(struct file *file);
+int _vb2_fop_release(struct file *file, struct mutex *lock);
 ssize_t vb2_fop_write(struct file *file, const char __user *buf,
 		size_t count, loff_t *ppos);
 ssize_t vb2_fop_read(struct file *file, char __user *buf,
diff --git a/include/net/datalink.h b/include/net/datalink.h
index deb7ca7..93cb18f 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -15,4 +15,6 @@
 	struct list_head node;
 };
 
+struct datalink_proto *make_EII_client(void);
+void destroy_EII_client(struct datalink_proto *dl);
 #endif
diff --git a/include/net/dn.h b/include/net/dn.h
index ccc1558..913b73d 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -200,6 +200,8 @@
 }
 
 unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
+void dn_register_sysctl(void);
+void dn_unregister_sysctl(void);
 
 #define DN_MENUVER_ACC 0x01
 #define DN_MENUVER_USR 0x02
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index b409ad6..55df993 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -20,6 +20,8 @@
 			 struct sock *sk, int flags);
 int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
 void dn_rt_cache_flush(int delay);
+int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
+		 struct packet_type *pt, struct net_device *orig_dev);
 
 /* Masks for flags field */
 #define DN_RT_F_PID 0x07 /* Mask for packet type                      */
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
index 96f3789..2a2d6bb 100644
--- a/include/net/ethoc.h
+++ b/include/net/ethoc.h
@@ -16,6 +16,7 @@
 struct ethoc_platform_data {
 	u8 hwaddr[IFHWADDRLEN];
 	s8 phy_id;
+	u32 eth_clkfreq;
 };
 
 #endif /* !LINUX_NET_ETHOC_H */
diff --git a/include/net/ipx.h b/include/net/ipx.h
index 9e9e354..0143180 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -140,6 +140,17 @@
 }
 
 void ipxitf_down(struct ipx_interface *intrfc);
+struct ipx_interface *ipxitf_find_using_net(__be32 net);
+int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node);
+__be16 ipx_cksum(struct ipxhdr *packet, int length);
+int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
+		     unsigned char *node);
+void ipxrtr_del_routes(struct ipx_interface *intrfc);
+int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
+			struct iovec *iov, size_t len, int noblock);
+int ipxrtr_route_skb(struct sk_buff *skb);
+struct ipx_route *ipxrtr_lookup(__be32 net);
+int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
 
 static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
 {
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index da68c9a..991dcd9 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -162,6 +162,14 @@
 struct net *get_net_ns_by_pid(pid_t pid);
 struct net *get_net_ns_by_fd(int pid);
 
+#ifdef CONFIG_SYSCTL
+void ipx_register_sysctl(void);
+void ipx_unregister_sysctl(void);
+#else
+#define ipx_register_sysctl()
+#define ipx_unregister_sysctl()
+#endif
+
 #ifdef CONFIG_NET_NS
 void __put_net(struct net *net);
 
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 01ea6ee..b2ac624 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -284,6 +284,8 @@
 extern unsigned int nf_conntrack_hash_rnd;
 void init_nf_conntrack_hash_rnd(void);
 
+void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl);
+
 #define NF_CT_STAT_INC(net, count)	  __this_cpu_inc((net)->ct.stat->count)
 #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
 
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 57c8ff7..e7e14ff 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -252,6 +252,7 @@
  *	@owner: module reference
  *	@policy: netlink attribute policy
  *	@maxattr: highest netlink attribute number
+ *	@family: address family for AF-specific types
  */
 struct nft_expr_type {
 	const struct nft_expr_ops	*(*select_ops)(const struct nft_ctx *,
@@ -262,6 +263,7 @@
 	struct module			*owner;
 	const struct nla_policy		*policy;
 	unsigned int			maxattr;
+	u8				family;
 };
 
 /**
@@ -320,7 +322,6 @@
  *	struct nft_rule - nf_tables rule
  *
  *	@list: used internally
- *	@rcu_head: used internally for rcu
  *	@handle: rule handle
  *	@genmask: generation mask
  *	@dlen: length of expression data
@@ -328,7 +329,6 @@
  */
 struct nft_rule {
 	struct list_head		list;
-	struct rcu_head			rcu_head;
 	u64				handle:46,
 					genmask:2,
 					dlen:16;
@@ -389,7 +389,6 @@
  *
  *	@rules: list of rules in the chain
  *	@list: used internally
- *	@rcu_head: used internally
  *	@net: net namespace that this chain belongs to
  *	@table: table that this chain belongs to
  *	@handle: chain handle
@@ -401,7 +400,6 @@
 struct nft_chain {
 	struct list_head		rules;
 	struct list_head		list;
-	struct rcu_head			rcu_head;
 	struct net			*net;
 	struct nft_table		*table;
 	u64				handle;
@@ -529,6 +527,9 @@
 #define MODULE_ALIAS_NFT_CHAIN(family, name) \
 	MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
 
+#define MODULE_ALIAS_NFT_AF_EXPR(family, name) \
+	MODULE_ALIAS("nft-expr-" __stringify(family) "-" name)
+
 #define MODULE_ALIAS_NFT_EXPR(name) \
 	MODULE_ALIAS("nft-expr-" name)
 
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
new file mode 100644
index 0000000..36b0da2
--- /dev/null
+++ b/include/net/netfilter/nft_reject.h
@@ -0,0 +1,25 @@
+#ifndef _NFT_REJECT_H_
+#define _NFT_REJECT_H_
+
+struct nft_reject {
+	enum nft_reject_types	type:8;
+	u8			icmp_code;
+};
+
+extern const struct nla_policy nft_reject_policy[];
+
+int nft_reject_init(const struct nft_ctx *ctx,
+		    const struct nft_expr *expr,
+		    const struct nlattr * const tb[]);
+
+int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
+
+void nft_reject_ipv4_eval(const struct nft_expr *expr,
+			  struct nft_data data[NFT_REG_MAX + 1],
+			  const struct nft_pktinfo *pkt);
+
+void nft_reject_ipv6_eval(const struct nft_expr *expr,
+			  struct nft_data data[NFT_REG_MAX + 1],
+			  const struct nft_pktinfo *pkt);
+
+#endif
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index d992ca3..6ee76c8 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1653,17 +1653,6 @@
 	/* This is the last advertised value of rwnd over a SACK chunk. */
 	__u32 a_rwnd;
 
-	/* Number of bytes by which the rwnd has slopped.  The rwnd is allowed
-	 * to slop over a maximum of the association's frag_point.
-	 */
-	__u32 rwnd_over;
-
-	/* Keeps treack of rwnd pressure.  This happens when we have
-	 * a window, but not recevie buffer (i.e small packets).  This one
-	 * is releases slowly (1 PMTU at a time ).
-	 */
-	__u32 rwnd_press;
-
 	/* This is the sndbuf size in use for the association.
 	 * This corresponds to the sndbuf size for the association,
 	 * as specified in the sk->sndbuf.
@@ -1892,8 +1881,7 @@
 __u32 sctp_association_get_next_tsn(struct sctp_association *);
 
 void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
-void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
-void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
+void sctp_assoc_rwnd_update(struct sctp_association *, bool);
 void sctp_assoc_set_primary(struct sctp_association *,
 			    struct sctp_transport *);
 void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8d4a1c0..6793f32 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -226,7 +226,8 @@
 	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
 	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
 	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
-	IB_PORT_CLIENT_REG_SUP			= 1 << 25
+	IB_PORT_CLIENT_REG_SUP			= 1 << 25,
+	IB_PORT_IP_BASED_GIDS			= 1 << 26
 };
 
 enum ib_port_width {
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 66d42ed..0a4edfe 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -155,6 +155,7 @@
 /* values for service action in */
 #define	SAI_READ_CAPACITY_16  0x10
 #define SAI_GET_LBA_STATUS    0x12
+#define SAI_REPORT_REFERRALS  0x13
 /* values for VARIABLE_LENGTH_CMD service action codes
  * see spc4r17 Section D.3.5, table D.7 and D.8 */
 #define VLC_SA_RECEIVE_CREDENTIAL 0x1800
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index a12589c..ae5a171 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -94,7 +94,7 @@
 /*
  * From iscsi_target_util.c
  */
-extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
+extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
 extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
 			       unsigned char *, __be32);
 extern void iscsit_release_cmd(struct iscsi_cmd *);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 39e0114..7020e33 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -41,6 +41,9 @@
 	unsigned int (*get_io_opt)(struct se_device *);
 	unsigned char *(*get_sense_buffer)(struct se_cmd *);
 	bool (*get_write_cache)(struct se_device *);
+	int (*init_prot)(struct se_device *);
+	int (*format_prot)(struct se_device *);
+	void (*free_prot)(struct se_device *);
 };
 
 struct sbc_ops {
@@ -70,6 +73,10 @@
 	sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
 				      sector_t lba, sector_t nolb),
 	void *priv);
+sense_reason_t	sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int,
+				     unsigned int, struct scatterlist *, int);
+sense_reason_t	sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int,
+				    unsigned int, struct scatterlist *, int);
 
 void	transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
 int	transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 321301c..1772fad 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -37,6 +37,9 @@
 /* Used by transport_send_check_condition_and_sense() */
 #define SPC_SENSE_KEY_OFFSET			2
 #define SPC_ADD_SENSE_LEN_OFFSET		7
+#define SPC_DESC_TYPE_OFFSET			8
+#define SPC_ADDITIONAL_DESC_LEN_OFFSET		9
+#define SPC_VALIDITY_OFFSET			10
 #define SPC_ASC_KEY_OFFSET			12
 #define SPC_ASCQ_KEY_OFFSET			13
 #define TRANSPORT_IQN_LEN			224
@@ -112,7 +115,7 @@
 /* Queue Algorithm Modifier default for restricted reordering in control mode page */
 #define DA_EMULATE_REST_REORD			0
 
-#define SE_INQUIRY_BUF				512
+#define SE_INQUIRY_BUF				1024
 #define SE_MODE_PAGE_BUF			512
 #define SE_SENSE_BUF				96
 
@@ -205,6 +208,9 @@
 	TCM_OUT_OF_RESOURCES			= R(0x12),
 	TCM_PARAMETER_LIST_LENGTH_ERROR		= R(0x13),
 	TCM_MISCOMPARE_VERIFY			= R(0x14),
+	TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED	= R(0x15),
+	TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED	= R(0x16),
+	TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED	= R(0x17),
 #undef R
 };
 
@@ -247,10 +253,28 @@
 
 struct se_cmd;
 
+struct t10_alua_lba_map_member {
+	struct list_head lba_map_mem_list;
+	int lba_map_mem_alua_state;
+	int lba_map_mem_alua_pg_id;
+};
+
+struct t10_alua_lba_map {
+	u64 lba_map_first_lba;
+	u64 lba_map_last_lba;
+	struct list_head lba_map_list;
+	struct list_head lba_map_mem_list;
+};
+
 struct t10_alua {
 	/* ALUA Target Port Group ID */
 	u16	alua_tg_pt_gps_counter;
 	u32	alua_tg_pt_gps_count;
+	/* Referrals support */
+	spinlock_t lba_map_lock;
+	u32     lba_map_segment_size;
+	u32     lba_map_segment_multiplier;
+	struct list_head lba_map_list;
 	spinlock_t tg_pt_gps_lock;
 	struct se_device *t10_dev;
 	/* Used for default ALUA Target Port Group */
@@ -284,6 +308,8 @@
 	u16	tg_pt_gp_id;
 	int	tg_pt_gp_valid_id;
 	int	tg_pt_gp_alua_supported_states;
+	int	tg_pt_gp_alua_pending_state;
+	int	tg_pt_gp_alua_previous_state;
 	int	tg_pt_gp_alua_access_status;
 	int	tg_pt_gp_alua_access_type;
 	int	tg_pt_gp_nonop_delay_msecs;
@@ -291,9 +317,6 @@
 	int	tg_pt_gp_implicit_trans_secs;
 	int	tg_pt_gp_pref;
 	int	tg_pt_gp_write_metadata;
-	/* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
-#define ALUA_MD_BUF_LEN				1024
-	u32	tg_pt_gp_md_buf_len;
 	u32	tg_pt_gp_members;
 	atomic_t tg_pt_gp_alua_access_state;
 	atomic_t tg_pt_gp_ref_cnt;
@@ -303,6 +326,10 @@
 	struct config_group tg_pt_gp_group;
 	struct list_head tg_pt_gp_list;
 	struct list_head tg_pt_gp_mem_list;
+	struct se_port *tg_pt_gp_alua_port;
+	struct se_node_acl *tg_pt_gp_alua_nacl;
+	struct delayed_work tg_pt_gp_transition_work;
+	struct completion *tg_pt_gp_transition_complete;
 };
 
 struct t10_alua_tg_pt_gp_member {
@@ -414,6 +441,34 @@
 	struct list_head	tmr_list;
 };
 
+enum target_prot_op {
+	TARGET_PROT_NORMAL = 0,
+	TARGET_PROT_DIN_INSERT,
+	TARGET_PROT_DOUT_INSERT,
+	TARGET_PROT_DIN_STRIP,
+	TARGET_PROT_DOUT_STRIP,
+	TARGET_PROT_DIN_PASS,
+	TARGET_PROT_DOUT_PASS,
+};
+
+enum target_prot_ho {
+	PROT_SEPERATED,
+	PROT_INTERLEAVED,
+};
+
+enum target_prot_type {
+	TARGET_DIF_TYPE0_PROT,
+	TARGET_DIF_TYPE1_PROT,
+	TARGET_DIF_TYPE2_PROT,
+	TARGET_DIF_TYPE3_PROT,
+};
+
+struct se_dif_v1_tuple {
+	__be16			guard_tag;
+	__be16			app_tag;
+	__be32			ref_tag;
+};
+
 struct se_cmd {
 	/* SAM response code being sent to initiator */
 	u8			scsi_status;
@@ -470,7 +525,6 @@
 #define CMD_T_COMPLETE		(1 << 2)
 #define CMD_T_SENT		(1 << 4)
 #define CMD_T_STOP		(1 << 5)
-#define CMD_T_FAILED		(1 << 6)
 #define CMD_T_DEV_ACTIVE	(1 << 7)
 #define CMD_T_REQUEST_STOP	(1 << 8)
 #define CMD_T_BUSY		(1 << 9)
@@ -497,14 +551,24 @@
 	void			*priv;
 
 	/* Used for lun->lun_ref counting */
-	bool			lun_ref_active;
+	int			lun_ref_active;
+
+	/* DIF related members */
+	enum target_prot_op	prot_op;
+	enum target_prot_type	prot_type;
+	u32			prot_length;
+	u32			reftag_seed;
+	struct scatterlist	*t_prot_sg;
+	unsigned int		t_prot_nents;
+	enum target_prot_ho	prot_handover;
+	sense_reason_t		pi_err;
+	sector_t		bad_sector;
 };
 
 struct se_ua {
 	u8			ua_asc;
 	u8			ua_ascq;
 	struct se_node_acl	*ua_nacl;
-	struct list_head	ua_dev_list;
 	struct list_head	ua_nacl_list;
 };
 
@@ -605,6 +669,9 @@
 	int		emulate_tpws;
 	int		emulate_caw;
 	int		emulate_3pc;
+	int		pi_prot_format;
+	enum target_prot_type pi_prot_type;
+	enum target_prot_type hw_pi_prot_type;
 	int		enforce_pr_isids;
 	int		is_nonrot;
 	int		emulate_rest_reord;
@@ -736,6 +803,8 @@
 	/* Linked list for struct se_hba struct se_device list */
 	struct list_head	dev_list;
 	struct se_lun		xcopy_lun;
+	/* Protection Information */
+	int			prot_length;
 };
 
 struct se_hba {
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 4cf4fda..0218d68 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -105,7 +105,8 @@
 sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
 int	target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
 		unsigned char *, unsigned char *, u32, u32, int, int, int,
-		struct scatterlist *, u32, struct scatterlist *, u32);
+		struct scatterlist *, u32, struct scatterlist *, u32,
+		struct scatterlist *, u32);
 int	target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
 		unsigned char *, u32, u32, int, int, int);
 int	target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index e2b9576..7110897 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -24,10 +24,10 @@
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->orig_major	= d->disk->major;
 		__entry->orig_minor	= d->disk->first_minor;
-		__entry->sector		= bio->bi_sector;
-		__entry->orig_sector	= bio->bi_sector - 16;
-		__entry->nr_sector	= bio->bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		__entry->sector		= bio->bi_iter.bi_sector;
+		__entry->orig_sector	= bio->bi_iter.bi_sector - 16;
+		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 	),
 
 	TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -99,9 +99,9 @@
 
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
-		__entry->sector		= bio->bi_sector;
-		__entry->nr_sector	= bio->bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		__entry->sector		= bio->bi_iter.bi_sector;
+		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 	),
 
 	TP_printk("%d,%d  %s %llu + %u",
@@ -134,9 +134,9 @@
 
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
-		__entry->sector		= bio->bi_sector;
-		__entry->nr_sector	= bio->bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		__entry->sector		= bio->bi_iter.bi_sector;
+		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 		__entry->cache_hit = hit;
 		__entry->bypass = bypass;
 	),
@@ -162,9 +162,9 @@
 
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
-		__entry->sector		= bio->bi_sector;
-		__entry->nr_sector	= bio->bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		__entry->sector		= bio->bi_iter.bi_sector;
+		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 		__entry->writeback = writeback;
 		__entry->bypass = bypass;
 	),
@@ -247,7 +247,7 @@
 	TP_fast_assign(
 		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
 		__entry->block	= b->written;
-		__entry->keys	= b->sets[b->nsets].data->keys;
+		__entry->keys	= b->keys.set[b->keys.nsets].data->keys;
 	),
 
 	TP_printk("bucket %zu", __entry->bucket)
@@ -411,7 +411,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->free		= fifo_used(&ca->free);
+		__entry->free		= fifo_used(&ca->free[RESERVE_NONE]);
 		__entry->free_inc	= fifo_used(&ca->free_inc);
 		__entry->free_inc_size	= ca->free_inc.size;
 		__entry->unused		= fifo_used(&ca->unused);
@@ -422,8 +422,8 @@
 );
 
 TRACE_EVENT(bcache_alloc_fail,
-	TP_PROTO(struct cache *ca),
-	TP_ARGS(ca),
+	TP_PROTO(struct cache *ca, unsigned reserve),
+	TP_ARGS(ca, reserve),
 
 	TP_STRUCT__entry(
 		__field(unsigned,	free			)
@@ -433,7 +433,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->free		= fifo_used(&ca->free);
+		__entry->free		= fifo_used(&ca->free[reserve]);
 		__entry->free_inc	= fifo_used(&ca->free_inc);
 		__entry->unused		= fifo_used(&ca->unused);
 		__entry->blocked	= atomic_read(&ca->set->prio_blocked);
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 4c2301d..e76ae19 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -243,9 +243,9 @@
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev ?
 					  bio->bi_bdev->bd_dev : 0;
-		__entry->sector		= bio->bi_sector;
+		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
-		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 
@@ -280,10 +280,10 @@
 
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
-		__entry->sector		= bio->bi_sector;
+		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
 		__entry->error		= error;
-		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 	),
 
 	TP_printk("%d,%d %s %llu + %u [%d]",
@@ -308,9 +308,9 @@
 
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
-		__entry->sector		= bio->bi_sector;
+		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
-		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 
@@ -375,9 +375,9 @@
 
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
-		__entry->sector		= bio->bi_sector;
+		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
-		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 
@@ -403,7 +403,7 @@
 
 	TP_fast_assign(
 		__entry->dev		= bio ? bio->bi_bdev->bd_dev : 0;
-		__entry->sector		= bio ? bio->bi_sector : 0;
+		__entry->sector		= bio ? bio->bi_iter.bi_sector : 0;
 		__entry->nr_sector	= bio ? bio_sectors(bio) : 0;
 		blk_fill_rwbs(__entry->rwbs,
 			      bio ? bio->bi_rw : 0, __entry->nr_sector);
@@ -538,9 +538,9 @@
 
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
-		__entry->sector		= bio->bi_sector;
+		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->new_sector	= new_sector;
-		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 
@@ -579,11 +579,11 @@
 
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
-		__entry->sector		= bio->bi_sector;
+		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
 		__entry->old_dev	= dev;
 		__entry->old_sector	= from;
-		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 	),
 
 	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4832d75..3176cdc 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -208,17 +208,18 @@
 		  __entry->refs, __entry->compress_type)
 );
 
-#define show_ordered_flags(flags)					\
-	__print_symbolic(flags,						\
-		{ BTRFS_ORDERED_IO_DONE, 	"IO_DONE" 	},	\
-		{ BTRFS_ORDERED_COMPLETE, 	"COMPLETE" 	},	\
-		{ BTRFS_ORDERED_NOCOW, 		"NOCOW" 	},	\
-		{ BTRFS_ORDERED_COMPRESSED, 	"COMPRESSED" 	},	\
-		{ BTRFS_ORDERED_PREALLOC, 	"PREALLOC" 	},	\
-		{ BTRFS_ORDERED_DIRECT, 	"DIRECT" 	},	\
-		{ BTRFS_ORDERED_IOERR, 		"IOERR" 	},	\
-		{ BTRFS_ORDERED_UPDATED_ISIZE, 	"UPDATED_ISIZE"	},	\
-		{ BTRFS_ORDERED_LOGGED_CSUM, 	"LOGGED_CSUM"	})
+#define show_ordered_flags(flags)					   \
+	__print_flags(flags, "|",					   \
+		{ (1 << BTRFS_ORDERED_IO_DONE), 	"IO_DONE" 	}, \
+		{ (1 << BTRFS_ORDERED_COMPLETE), 	"COMPLETE" 	}, \
+		{ (1 << BTRFS_ORDERED_NOCOW), 		"NOCOW" 	}, \
+		{ (1 << BTRFS_ORDERED_COMPRESSED), 	"COMPRESSED" 	}, \
+		{ (1 << BTRFS_ORDERED_PREALLOC), 	"PREALLOC" 	}, \
+		{ (1 << BTRFS_ORDERED_DIRECT),	 	"DIRECT" 	}, \
+		{ (1 << BTRFS_ORDERED_IOERR), 		"IOERR" 	}, \
+		{ (1 << BTRFS_ORDERED_UPDATED_ISIZE), 	"UPDATED_ISIZE"	}, \
+		{ (1 << BTRFS_ORDERED_LOGGED_CSUM), 	"LOGGED_CSUM"	}, \
+		{ (1 << BTRFS_ORDERED_TRUNCATED), 	"TRUNCATED"	})
 
 
 DECLARE_EVENT_CLASS(btrfs__ordered_extent,
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 3b9f28d..67f38fa 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -629,8 +629,8 @@
 		__entry->dev		= sb->s_dev;
 		__entry->rw		= rw;
 		__entry->type		= type;
-		__entry->sector		= bio->bi_sector;
-		__entry->size		= bio->bi_size;
+		__entry->sector		= bio->bi_iter.bi_sector;
+		__entry->size		= bio->bi_iter.bi_size;
 	),
 
 	TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u",
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 9e9475c..e5bf9a7 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -42,7 +42,6 @@
 		u32 state,
 		u64 mperf,
 		u64 aperf,
-		u32 energy,
 		u32 freq
 		),
 
@@ -51,7 +50,6 @@
 		state,
 		mperf,
 		aperf,
-		energy,
 		freq
 		),
 
@@ -61,7 +59,6 @@
 		__field(u32, state)
 		__field(u64, mperf)
 		__field(u64, aperf)
-		__field(u32, energy)
 		__field(u32, freq)
 
 	),
@@ -72,17 +69,15 @@
 		__entry->state = state;
 		__entry->mperf = mperf;
 		__entry->aperf = aperf;
-		__entry->energy = energy;
 		__entry->freq = freq;
 		),
 
-	TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu energy=%lu freq=%lu ",
+	TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ",
 		(unsigned long)__entry->core_busy,
 		(unsigned long)__entry->scaled_busy,
 		(unsigned long)__entry->state,
 		(unsigned long long)__entry->mperf,
 		(unsigned long long)__entry->aperf,
-		(unsigned long)__entry->energy,
 		(unsigned long)__entry->freq
 		)
 
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
new file mode 100644
index 0000000..ef94eca
--- /dev/null
+++ b/include/trace/events/v4l2.h
@@ -0,0 +1,157 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM v4l2
+
+#if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_V4L2_H
+
+#include <linux/tracepoint.h>
+
+#define show_type(type)							       \
+	__print_symbolic(type,						       \
+		{ V4L2_BUF_TYPE_VIDEO_CAPTURE,	      "VIDEO_CAPTURE" },       \
+		{ V4L2_BUF_TYPE_VIDEO_OUTPUT,	      "VIDEO_OUTPUT" },	       \
+		{ V4L2_BUF_TYPE_VIDEO_OVERLAY,	      "VIDEO_OVERLAY" },       \
+		{ V4L2_BUF_TYPE_VBI_CAPTURE,	      "VBI_CAPTURE" },	       \
+		{ V4L2_BUF_TYPE_VBI_OUTPUT,	      "VBI_OUTPUT" },	       \
+		{ V4L2_BUF_TYPE_SLICED_VBI_CAPTURE,   "SLICED_VBI_CAPTURE" },  \
+		{ V4L2_BUF_TYPE_SLICED_VBI_OUTPUT,    "SLICED_VBI_OUTPUT" },   \
+		{ V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" },\
+		{ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" },\
+		{ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,  "VIDEO_OUTPUT_MPLANE" }, \
+		{ V4L2_BUF_TYPE_PRIVATE,	      "PRIVATE" })
+
+#define show_field(field)						\
+	__print_symbolic(field,						\
+		{ V4L2_FIELD_ANY,		"ANY" },		\
+		{ V4L2_FIELD_NONE,		"NONE" },		\
+		{ V4L2_FIELD_TOP,		"TOP" },		\
+		{ V4L2_FIELD_BOTTOM,		"BOTTOM" },		\
+		{ V4L2_FIELD_INTERLACED,	"INTERLACED" },		\
+		{ V4L2_FIELD_SEQ_TB,		"SEQ_TB" },		\
+		{ V4L2_FIELD_SEQ_BT,		"SEQ_BT" },		\
+		{ V4L2_FIELD_ALTERNATE,		"ALTERNATE" },		\
+		{ V4L2_FIELD_INTERLACED_TB,	"INTERLACED_TB" },      \
+		{ V4L2_FIELD_INTERLACED_BT,	"INTERLACED_BT" })
+
+#define show_timecode_type(type)					\
+	__print_symbolic(type,						\
+		{ V4L2_TC_TYPE_24FPS,		"24FPS" },		\
+		{ V4L2_TC_TYPE_25FPS,		"25FPS" },		\
+		{ V4L2_TC_TYPE_30FPS,		"30FPS" },		\
+		{ V4L2_TC_TYPE_50FPS,		"50FPS" },		\
+		{ V4L2_TC_TYPE_60FPS,		"60FPS" })
+
+#define show_flags(flags)						      \
+	__print_flags(flags, "|",					      \
+		{ V4L2_BUF_FLAG_MAPPED,		     "MAPPED" },	      \
+		{ V4L2_BUF_FLAG_QUEUED,		     "QUEUED" },	      \
+		{ V4L2_BUF_FLAG_DONE,		     "DONE" },		      \
+		{ V4L2_BUF_FLAG_KEYFRAME,	     "KEYFRAME" },	      \
+		{ V4L2_BUF_FLAG_PFRAME,		     "PFRAME" },	      \
+		{ V4L2_BUF_FLAG_BFRAME,		     "BFRAME" },	      \
+		{ V4L2_BUF_FLAG_ERROR,		     "ERROR" },		      \
+		{ V4L2_BUF_FLAG_TIMECODE,	     "TIMECODE" },	      \
+		{ V4L2_BUF_FLAG_PREPARED,	     "PREPARED" },	      \
+		{ V4L2_BUF_FLAG_NO_CACHE_INVALIDATE, "NO_CACHE_INVALIDATE" }, \
+		{ V4L2_BUF_FLAG_NO_CACHE_CLEAN,	     "NO_CACHE_CLEAN" },      \
+		{ V4L2_BUF_FLAG_TIMESTAMP_MASK,	     "TIMESTAMP_MASK" },      \
+		{ V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN,   "TIMESTAMP_UNKNOWN" },   \
+		{ V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, "TIMESTAMP_MONOTONIC" }, \
+		{ V4L2_BUF_FLAG_TIMESTAMP_COPY,	     "TIMESTAMP_COPY" })
+
+#define show_timecode_flags(flags)					  \
+	__print_flags(flags, "|",					  \
+		{ V4L2_TC_FLAG_DROPFRAME,       "DROPFRAME" },		  \
+		{ V4L2_TC_FLAG_COLORFRAME,      "COLORFRAME" },		  \
+		{ V4L2_TC_USERBITS_USERDEFINED,	"USERBITS_USERDEFINED" }, \
+		{ V4L2_TC_USERBITS_8BITCHARS,	"USERBITS_8BITCHARS" })
+
+#define V4L2_TRACE_EVENT(event_name)					\
+	TRACE_EVENT(event_name,						\
+		TP_PROTO(int minor, struct v4l2_buffer *buf),		\
+									\
+		TP_ARGS(minor, buf),					\
+									\
+		TP_STRUCT__entry(					\
+			__field(int, minor)				\
+			__field(u32, index)				\
+			__field(u32, type)				\
+			__field(u32, bytesused)				\
+			__field(u32, flags)				\
+			__field(u32, field)				\
+			__field(s64, timestamp)				\
+			__field(u32, timecode_type)			\
+			__field(u32, timecode_flags)			\
+			__field(u8, timecode_frames)			\
+			__field(u8, timecode_seconds)			\
+			__field(u8, timecode_minutes)			\
+			__field(u8, timecode_hours)			\
+			__field(u8, timecode_userbits0)			\
+			__field(u8, timecode_userbits1)			\
+			__field(u8, timecode_userbits2)			\
+			__field(u8, timecode_userbits3)			\
+			__field(u32, sequence)				\
+		),							\
+									\
+		TP_fast_assign(						\
+			__entry->minor = minor;				\
+			__entry->index = buf->index;			\
+			__entry->type = buf->type;			\
+			__entry->bytesused = buf->bytesused;		\
+			__entry->flags = buf->flags;			\
+			__entry->field = buf->field;			\
+			__entry->timestamp =				\
+				timeval_to_ns(&buf->timestamp);		\
+			__entry->timecode_type = buf->timecode.type;	\
+			__entry->timecode_flags = buf->timecode.flags;	\
+			__entry->timecode_frames =			\
+				buf->timecode.frames;			\
+			__entry->timecode_seconds =			\
+				buf->timecode.seconds;			\
+			__entry->timecode_minutes =			\
+				buf->timecode.minutes;			\
+			__entry->timecode_hours = buf->timecode.hours;	\
+			__entry->timecode_userbits0 =			\
+				buf->timecode.userbits[0];		\
+			__entry->timecode_userbits1 =			\
+				buf->timecode.userbits[1];		\
+			__entry->timecode_userbits2 =			\
+				buf->timecode.userbits[2];		\
+			__entry->timecode_userbits3 =			\
+				buf->timecode.userbits[3];		\
+			__entry->sequence = buf->sequence;		\
+		),							\
+									\
+		TP_printk("minor = %d, index = %u, type = %s, "		\
+			  "bytesused = %u, flags = %s, "		\
+			  "field = %s, timestamp = %llu, timecode = { "	\
+			  "type = %s, flags = %s, frames = %u, "	\
+			  "seconds = %u, minutes = %u, hours = %u, "	\
+			  "userbits = { %u %u %u %u } }, "		\
+			  "sequence = %u", __entry->minor,		\
+			  __entry->index, show_type(__entry->type),	\
+			  __entry->bytesused,				\
+			  show_flags(__entry->flags),			\
+			  show_field(__entry->field),			\
+			  __entry->timestamp,				\
+			  show_timecode_type(__entry->timecode_type),	\
+			  show_timecode_flags(__entry->timecode_flags),	\
+			  __entry->timecode_frames,			\
+			  __entry->timecode_seconds,			\
+			  __entry->timecode_minutes,			\
+			  __entry->timecode_hours,			\
+			  __entry->timecode_userbits0,			\
+			  __entry->timecode_userbits1,			\
+			  __entry->timecode_userbits2,			\
+			  __entry->timecode_userbits3,			\
+			  __entry->sequence				\
+		)							\
+	)
+
+V4L2_TRACE_EVENT(v4l2_dqbuf);
+V4L2_TRACE_EVENT(v4l2_qbuf);
+
+#endif /* if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/ipcbuf.h b/include/uapi/asm-generic/ipcbuf.h
index 76982b2..3dbcc1e 100644
--- a/include/uapi/asm-generic/ipcbuf.h
+++ b/include/uapi/asm-generic/ipcbuf.h
@@ -27,8 +27,8 @@
 	unsigned char		__pad1[4 - sizeof(__kernel_mode_t)];
 	unsigned short		seq;
 	unsigned short		__pad2;
-	unsigned long		__unused1;
-	unsigned long		__unused2;
+	__kernel_ulong_t	__unused1;
+	__kernel_ulong_t	__unused2;
 };
 
 #endif /* __ASM_GENERIC_IPCBUF_H */
diff --git a/include/uapi/asm-generic/msgbuf.h b/include/uapi/asm-generic/msgbuf.h
index aec850d..f55ecc4 100644
--- a/include/uapi/asm-generic/msgbuf.h
+++ b/include/uapi/asm-generic/msgbuf.h
@@ -35,13 +35,13 @@
 #if __BITS_PER_LONG != 64
 	unsigned long	__unused3;
 #endif
-	unsigned long  msg_cbytes;	/* current number of bytes on queue */
-	unsigned long  msg_qnum;	/* number of messages in queue */
-	unsigned long  msg_qbytes;	/* max number of bytes on queue */
+	__kernel_ulong_t msg_cbytes;	/* current number of bytes on queue */
+	__kernel_ulong_t msg_qnum;	/* number of messages in queue */
+	__kernel_ulong_t msg_qbytes;	/* max number of bytes on queue */
 	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
 	__kernel_pid_t msg_lrpid;	/* last receive pid */
-	unsigned long  __unused4;
-	unsigned long  __unused5;
+	__kernel_ulong_t __unused4;
+	__kernel_ulong_t __unused5;
 };
 
 #endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/include/uapi/asm-generic/shmbuf.h b/include/uapi/asm-generic/shmbuf.h
index 5768fa6..7e9fb2f 100644
--- a/include/uapi/asm-generic/shmbuf.h
+++ b/include/uapi/asm-generic/shmbuf.h
@@ -39,21 +39,21 @@
 #endif
 	__kernel_pid_t		shm_cpid;	/* pid of creator */
 	__kernel_pid_t		shm_lpid;	/* pid of last operator */
-	unsigned long		shm_nattch;	/* no. of current attaches */
-	unsigned long		__unused4;
-	unsigned long		__unused5;
+	__kernel_ulong_t	shm_nattch;	/* no. of current attaches */
+	__kernel_ulong_t	__unused4;
+	__kernel_ulong_t	__unused5;
 };
 
 struct shminfo64 {
-	unsigned long	shmmax;
-	unsigned long	shmmin;
-	unsigned long	shmmni;
-	unsigned long	shmseg;
-	unsigned long	shmall;
-	unsigned long	__unused1;
-	unsigned long	__unused2;
-	unsigned long	__unused3;
-	unsigned long	__unused4;
+	__kernel_ulong_t	shmmax;
+	__kernel_ulong_t	shmmin;
+	__kernel_ulong_t	shmmni;
+	__kernel_ulong_t	shmseg;
+	__kernel_ulong_t	shmall;
+	__kernel_ulong_t	__unused1;
+	__kernel_ulong_t	__unused2;
+	__kernel_ulong_t	__unused3;
+	__kernel_ulong_t	__unused4;
 };
 
 #endif /* __ASM_GENERIC_SHMBUF_H */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 9b24d65..b06c8ed 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -181,7 +181,6 @@
 	_DRM_AGP = 3,		  /**< AGP/GART */
 	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
 	_DRM_CONSISTENT = 5,	  /**< Consistent memory for PCI DMA */
-	_DRM_GEM = 6,		  /**< GEM object (obsolete) */
 };
 
 /**
@@ -620,6 +619,8 @@
 #define  DRM_PRIME_CAP_EXPORT		0x2
 #define DRM_CAP_TIMESTAMP_MONOTONIC	0x6
 #define DRM_CAP_ASYNC_PAGE_FLIP		0x7
+#define DRM_CAP_CURSOR_WIDTH		0x8
+#define DRM_CAP_CURSOR_HEIGHT		0x9
 
 /** DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 3a4e97b..126bfaa 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -222,6 +222,7 @@
 #define DRM_I915_GEM_SET_CACHING	0x2f
 #define DRM_I915_GEM_GET_CACHING	0x30
 #define DRM_I915_REG_READ		0x31
+#define DRM_I915_GET_RESET_STATS	0x32
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -271,6 +272,7 @@
 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
 #define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+#define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -719,7 +721,7 @@
  */
 #define I915_EXEC_IS_PINNED		(1<<10)
 
-/** Provide a hint to the kernel that the command stream and auxilliary
+/** Provide a hint to the kernel that the command stream and auxiliary
  * state buffers already holds the correct presumed addresses and so the
  * relocation process may be skipped if no buffers need to be moved in
  * preparation for the execbuffer.
@@ -1030,4 +1032,21 @@
 	__u64 offset;
 	__u64 val; /* Return value */
 };
+
+struct drm_i915_reset_stats {
+	__u32 ctx_id;
+	__u32 flags;
+
+	/* All resets since boot/module reload, for all contexts */
+	__u32 reset_count;
+
+	/* Number of batches lost when active in GPU, for this context */
+	__u32 batch_active;
+
+	/* Number of batches lost pending for execution, for this context */
+	__u32 batch_pending;
+
+	__u32 pad;
+};
+
 #endif /* _UAPI_I915_DRM_H_ */
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index fe421e8..d9ea3a7 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -985,6 +985,8 @@
 #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY	0x18
 /* query the number of render backends */
 #define RADEON_INFO_SI_BACKEND_ENABLED_MASK	0x19
+/* max engine clock - needed for OpenCL */
+#define RADEON_INFO_MAX_SCLK		0x1a
 
 
 struct drm_radeon_info {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index f854ca4..87792a5 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -28,6 +28,10 @@
 #ifndef __VMWGFX_DRM_H__
 #define __VMWGFX_DRM_H__
 
+#ifndef __KERNEL__
+#include <drm.h>
+#endif
+
 #define DRM_VMW_MAX_SURFACE_FACES 6
 #define DRM_VMW_MAX_MIP_LEVELS 24
 
@@ -55,6 +59,11 @@
 #define DRM_VMW_PRESENT              18
 #define DRM_VMW_PRESENT_READBACK     19
 #define DRM_VMW_UPDATE_LAYOUT        20
+#define DRM_VMW_CREATE_SHADER        21
+#define DRM_VMW_UNREF_SHADER         22
+#define DRM_VMW_GB_SURFACE_CREATE    23
+#define DRM_VMW_GB_SURFACE_REF       24
+#define DRM_VMW_SYNCCPU              25
 
 /*************************************************************************/
 /**
@@ -76,6 +85,9 @@
 #define DRM_VMW_PARAM_MAX_FB_SIZE      5
 #define DRM_VMW_PARAM_FIFO_HW_VERSION  6
 #define DRM_VMW_PARAM_MAX_SURF_MEMORY  7
+#define DRM_VMW_PARAM_3D_CAPS_SIZE     8
+#define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
+#define DRM_VMW_PARAM_MAX_MOB_SIZE     10
 
 /**
  * struct drm_vmw_getparam_arg
@@ -788,4 +800,253 @@
 	uint64_t rects;
 };
 
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_SHADER - Create shader
+ *
+ * Creates a shader and optionally binds it to a dma buffer containing
+ * the shader byte-code.
+ */
+
+/**
+ * enum drm_vmw_shader_type - Shader types
+ */
+enum drm_vmw_shader_type {
+	drm_vmw_shader_type_vs = 0,
+	drm_vmw_shader_type_ps,
+	drm_vmw_shader_type_gs
+};
+
+
+/**
+ * struct drm_vmw_shader_create_arg
+ *
+ * @shader_type: Shader type of the shader to create.
+ * @size: Size of the byte-code in bytes.
+ * where the shader byte-code starts
+ * @buffer_handle: Buffer handle identifying the buffer containing the
+ * shader byte-code
+ * @shader_handle: On successful completion contains a handle that
+ * can be used to subsequently identify the shader.
+ * @offset: Offset in bytes into the buffer given by @buffer_handle,
+ *
+ * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
+ */
+struct drm_vmw_shader_create_arg {
+	enum drm_vmw_shader_type shader_type;
+	uint32_t size;
+	uint32_t buffer_handle;
+	uint32_t shader_handle;
+	uint64_t offset;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_SHADER - Unreferences a shader
+ *
+ * Destroys a user-space reference to a shader, optionally destroying
+ * it.
+ */
+
+/**
+ * struct drm_vmw_shader_arg
+ *
+ * @handle: Handle identifying the shader to destroy.
+ *
+ * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
+ */
+struct drm_vmw_shader_arg {
+	uint32_t handle;
+	uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
+ *
+ * Allocates a surface handle and queues a create surface command
+ * for the host on the first use of the surface. The surface ID can
+ * be used as the surface ID in commands referencing the surface.
+ */
+
+/**
+ * enum drm_vmw_surface_flags
+ *
+ * @drm_vmw_surface_flag_shareable:     Whether the surface is shareable
+ * @drm_vmw_surface_flag_scanout:       Whether the surface is a scanout
+ *                                      surface.
+ * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
+ *                                      given.
+ */
+enum drm_vmw_surface_flags {
+	drm_vmw_surface_flag_shareable = (1 << 0),
+	drm_vmw_surface_flag_scanout = (1 << 1),
+	drm_vmw_surface_flag_create_buffer = (1 << 2)
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_req
+ *
+ * @svga3d_flags:     SVGA3d surface flags for the device.
+ * @format:           SVGA3d format.
+ * @mip_level:        Number of mip levels for all faces.
+ * @drm_surface_flags Flags as described above.
+ * @multisample_count Future use. Set to 0.
+ * @autogen_filter    Future use. Set to 0.
+ * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
+ *                    if none.
+ * @base_size         Size of the base mip level for all faces.
+ *
+ * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+struct drm_vmw_gb_surface_create_req {
+	uint32_t svga3d_flags;
+	uint32_t format;
+	uint32_t mip_levels;
+	enum drm_vmw_surface_flags drm_surface_flags;
+	uint32_t multisample_count;
+	uint32_t autogen_filter;
+	uint32_t buffer_handle;
+	uint32_t pad64;
+	struct drm_vmw_size base_size;
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_rep
+ *
+ * @handle:            Surface handle.
+ * @backup_size:       Size of backup buffers for this surface.
+ * @buffer_handle:     Handle of backup buffer. SVGA3D_INVALID_ID if none.
+ * @buffer_size:       Actual size of the buffer identified by
+ *                     @buffer_handle
+ * @buffer_map_handle: Offset into device address space for the buffer
+ *                     identified by @buffer_handle.
+ *
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
+ * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+struct drm_vmw_gb_surface_create_rep {
+	uint32_t handle;
+	uint32_t backup_size;
+	uint32_t buffer_handle;
+	uint32_t buffer_size;
+	uint64_t buffer_map_handle;
+};
+
+/**
+ * union drm_vmw_gb_surface_create_arg
+ *
+ * @req: Input argument as described above.
+ * @rep: Output argument as described above.
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+union drm_vmw_gb_surface_create_arg {
+	struct drm_vmw_gb_surface_create_rep rep;
+	struct drm_vmw_gb_surface_create_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a given handle, as previously
+ * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface handle in
+ * the command stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+
+/**
+ * struct drm_vmw_gb_surface_reference_arg
+ *
+ * @creq: The data used as input when the surface was created, as described
+ *        above at "struct drm_vmw_gb_surface_create_req"
+ * @crep: Additional data output when the surface was created, as described
+ *        above at "struct drm_vmw_gb_surface_create_rep"
+ *
+ * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
+ */
+struct drm_vmw_gb_surface_ref_rep {
+	struct drm_vmw_gb_surface_create_req creq;
+	struct drm_vmw_gb_surface_create_rep crep;
+};
+
+/**
+ * union drm_vmw_gb_surface_reference_arg
+ *
+ * @req: Input data as described above at "struct drm_vmw_surface_arg"
+ * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+union drm_vmw_gb_surface_reference_arg {
+	struct drm_vmw_gb_surface_ref_rep rep;
+	struct drm_vmw_surface_arg req;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
+ *
+ * Idles any previously submitted GPU operations on the buffer and
+ * by default blocks command submissions that reference the buffer.
+ * If the file descriptor used to grab a blocking CPU sync is closed, the
+ * cpu sync is released.
+ * The flags argument indicates how the grab / release operation should be
+ * performed:
+ */
+
+/**
+ * enum drm_vmw_synccpu_flags - Synccpu flags:
+ *
+ * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
+ * hint to the kernel to allow command submissions that references the buffer
+ * for read-only.
+ * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
+ * referencing this buffer.
+ * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
+ * -EBUSY should the buffer be busy.
+ * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
+ * while the buffer is synced for CPU. This is similar to the GEM bo idle
+ * behavior.
+ */
+enum drm_vmw_synccpu_flags {
+	drm_vmw_synccpu_read = (1 << 0),
+	drm_vmw_synccpu_write = (1 << 1),
+	drm_vmw_synccpu_dontblock = (1 << 2),
+	drm_vmw_synccpu_allow_cs = (1 << 3)
+};
+
+/**
+ * enum drm_vmw_synccpu_op - Synccpu operations:
+ *
+ * @drm_vmw_synccpu_grab:    Grab the buffer for CPU operations
+ * @drm_vmw_synccpu_release: Release a previous grab.
+ */
+enum drm_vmw_synccpu_op {
+	drm_vmw_synccpu_grab,
+	drm_vmw_synccpu_release
+};
+
+/**
+ * struct drm_vmw_synccpu_arg
+ *
+ * @op:			     The synccpu operation as described above.
+ * @handle:		     Handle identifying the buffer object.
+ * @flags:		     Flags as described above.
+ */
+struct drm_vmw_synccpu_arg {
+	enum drm_vmw_synccpu_op op;
+	enum drm_vmw_synccpu_flags flags;
+	uint32_t handle;
+	uint32_t pad64;
+};
+
 #endif
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 164a7e2..22b6ad3 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -39,6 +39,7 @@
 }
 
 #define KEY_SIZE_BITS		16
+#define KEY_MAX_U64S		8
 
 KEY_FIELD(KEY_PTRS,	high, 60, 3)
 KEY_FIELD(HEADER_SIZE,	high, 58, 2)
@@ -118,7 +119,7 @@
 	return (struct bkey *) (d + bkey_u64s(k));
 }
 
-static inline struct bkey *bkey_last(const struct bkey *k, unsigned nr_keys)
+static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys)
 {
 	__u64 *d = (void *) k;
 	return (struct bkey *) (d + nr_keys);
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 45e6189..b4d6909 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -184,6 +184,12 @@
 	__u64 reserved[124];			/* pad to 1k */
 };
 
+struct btrfs_ioctl_feature_flags {
+	__u64 compat_flags;
+	__u64 compat_ro_flags;
+	__u64 incompat_flags;
+};
+
 /* balance control ioctl modes */
 #define BTRFS_BALANCE_CTL_PAUSE		1
 #define BTRFS_BALANCE_CTL_CANCEL	2
@@ -606,5 +612,11 @@
 				    struct btrfs_ioctl_dev_replace_args)
 #define BTRFS_IOC_FILE_EXTENT_SAME _IOWR(BTRFS_IOCTL_MAGIC, 54, \
 					 struct btrfs_ioctl_same_args)
+#define BTRFS_IOC_GET_FEATURES _IOR(BTRFS_IOCTL_MAGIC, 57, \
+				   struct btrfs_ioctl_feature_flags)
+#define BTRFS_IOC_SET_FEATURES _IOW(BTRFS_IOCTL_MAGIC, 57, \
+				   struct btrfs_ioctl_feature_flags[2])
+#define BTRFS_IOC_GET_SUPPORTED_FEATURES _IOR(BTRFS_IOCTL_MAGIC, 57, \
+				   struct btrfs_ioctl_feature_flags[3])
 
 #endif /* _UAPI_LINUX_BTRFS_H */
diff --git a/include/uapi/linux/fd.h b/include/uapi/linux/fd.h
index f1f3dd5..84c517c 100644
--- a/include/uapi/linux/fd.h
+++ b/include/uapi/linux/fd.h
@@ -185,7 +185,8 @@
 				 * to clear media change status */
 	FD_UNUSED_BIT,
 	FD_DISK_CHANGED_BIT,	/* disk has been changed since last i/o */
-	FD_DISK_WRITABLE_BIT	/* disk is writable */
+	FD_DISK_WRITABLE_BIT,	/* disk is writable */
+	FD_OPEN_SHOULD_FAIL_BIT
 };
 
 #define FDSETDRVPRM _IOW(2, 0x90, struct floppy_drive_params)
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 633b93c..e9a1d2d 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -128,22 +128,13 @@
  *	IPV6 extension headers
  */
 #if __UAPI_DEF_IPPROTO_V6
-enum {
-  IPPROTO_HOPOPTS = 0,		/* IPv6 hop-by-hop options      */
-#define IPPROTO_HOPOPTS		IPPROTO_HOPOPTS
-  IPPROTO_ROUTING = 43,		/* IPv6 routing header          */
-#define IPPROTO_ROUTING		IPPROTO_ROUTING
-  IPPROTO_FRAGMENT = 44,	/* IPv6 fragmentation header    */
-#define IPPROTO_FRAGMENT	IPPROTO_FRAGMENT
-  IPPROTO_ICMPV6 = 58,		/* ICMPv6                       */
-#define IPPROTO_ICMPV6		IPPROTO_ICMPV6
-  IPPROTO_NONE = 59,		/* IPv6 no next header          */
-#define IPPROTO_NONE		IPPROTO_NONE
-  IPPROTO_DSTOPTS = 60,		/* IPv6 destination options     */
-#define IPPROTO_DSTOPTS		IPPROTO_DSTOPTS
-  IPPROTO_MH = 135,		/* IPv6 mobility header         */
-#define IPPROTO_MH		IPPROTO_MH
-};
+#define IPPROTO_HOPOPTS		0	/* IPv6 hop-by-hop options	*/
+#define IPPROTO_ROUTING		43	/* IPv6 routing header		*/
+#define IPPROTO_FRAGMENT	44	/* IPv6 fragmentation header	*/
+#define IPPROTO_ICMPV6		58	/* ICMPv6			*/
+#define IPPROTO_NONE		59	/* IPv6 no next header		*/
+#define IPPROTO_DSTOPTS		60	/* IPv6 destination options	*/
+#define IPPROTO_MH		135	/* IPv6 mobility header		*/
 #endif /* __UAPI_DEF_IPPROTO_V6 */
 
 /*
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index ed49574..d847c76 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -98,6 +98,7 @@
 
 #define MEDIA_PAD_FL_SINK		(1 << 0)
 #define MEDIA_PAD_FL_SOURCE		(1 << 1)
+#define MEDIA_PAD_FL_MUST_CONNECT	(1 << 2)
 
 struct media_pad_desc {
 	__u32 entity;		/* entity ID */
diff --git a/include/uapi/linux/mic_ioctl.h b/include/uapi/linux/mic_ioctl.h
index 7fabba5..feb0b4c 100644
--- a/include/uapi/linux/mic_ioctl.h
+++ b/include/uapi/linux/mic_ioctl.h
@@ -39,7 +39,7 @@
 #else
 	struct iovec *iov;
 #endif
-	int iovcnt;
+	__u32 iovcnt;
 	__u8 vr_idx;
 	__u8 update_used;
 	__u32 out_len;
diff --git a/include/uapi/linux/mqueue.h b/include/uapi/linux/mqueue.h
index 8b5a796..d0a2b8e 100644
--- a/include/uapi/linux/mqueue.h
+++ b/include/uapi/linux/mqueue.h
@@ -23,11 +23,11 @@
 #define MQ_BYTES_MAX	819200
 
 struct mq_attr {
-	long	mq_flags;	/* message queue flags			*/
-	long	mq_maxmsg;	/* maximum number of messages		*/
-	long	mq_msgsize;	/* maximum message size			*/
-	long	mq_curmsgs;	/* number of messages currently queued	*/
-	long	__reserved[4];	/* ignored for input, zeroed for output */
+	__kernel_long_t	mq_flags;	/* message queue flags			*/
+	__kernel_long_t	mq_maxmsg;	/* maximum number of messages		*/
+	__kernel_long_t	mq_msgsize;	/* maximum message size			*/
+	__kernel_long_t	mq_curmsgs;	/* number of messages currently queued	*/
+	__kernel_long_t	__reserved[4];	/* ignored for input, zeroed for output */
 };
 
 /*
diff --git a/include/uapi/linux/msg.h b/include/uapi/linux/msg.h
index 22d95c6..a703755 100644
--- a/include/uapi/linux/msg.h
+++ b/include/uapi/linux/msg.h
@@ -34,8 +34,8 @@
 
 /* message buffer for msgsnd and msgrcv calls */
 struct msgbuf {
-	long mtype;         /* type of message */
-	char mtext[1];      /* message text */
+	__kernel_long_t mtype;          /* type of message */
+	char mtext[1];                  /* message text */
 };
 
 /* buffer for msgctl calls IPC_INFO, MSG_INFO */
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 989c04e..e5ab622 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -350,6 +350,16 @@
 	__u32			rsvd11[5];
 };
 
+struct nvme_abort_cmd {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__u32			rsvd1[9];
+	__le16			sqid;
+	__u16			cid;
+	__u32			rsvd11[5];
+};
+
 struct nvme_download_firmware {
 	__u8			opcode;
 	__u8			flags;
@@ -384,6 +394,7 @@
 		struct nvme_download_firmware dlfw;
 		struct nvme_format_cmd format;
 		struct nvme_dsm_cmd dsm;
+		struct nvme_abort_cmd abort;
 	};
 };
 
diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h
index e0ed284..36fb3b5 100644
--- a/include/uapi/linux/resource.h
+++ b/include/uapi/linux/resource.h
@@ -23,25 +23,25 @@
 struct	rusage {
 	struct timeval ru_utime;	/* user time used */
 	struct timeval ru_stime;	/* system time used */
-	long	ru_maxrss;		/* maximum resident set size */
-	long	ru_ixrss;		/* integral shared memory size */
-	long	ru_idrss;		/* integral unshared data size */
-	long	ru_isrss;		/* integral unshared stack size */
-	long	ru_minflt;		/* page reclaims */
-	long	ru_majflt;		/* page faults */
-	long	ru_nswap;		/* swaps */
-	long	ru_inblock;		/* block input operations */
-	long	ru_oublock;		/* block output operations */
-	long	ru_msgsnd;		/* messages sent */
-	long	ru_msgrcv;		/* messages received */
-	long	ru_nsignals;		/* signals received */
-	long	ru_nvcsw;		/* voluntary context switches */
-	long	ru_nivcsw;		/* involuntary " */
+	__kernel_long_t	ru_maxrss;	/* maximum resident set size */
+	__kernel_long_t	ru_ixrss;	/* integral shared memory size */
+	__kernel_long_t	ru_idrss;	/* integral unshared data size */
+	__kernel_long_t	ru_isrss;	/* integral unshared stack size */
+	__kernel_long_t	ru_minflt;	/* page reclaims */
+	__kernel_long_t	ru_majflt;	/* page faults */
+	__kernel_long_t	ru_nswap;	/* swaps */
+	__kernel_long_t	ru_inblock;	/* block input operations */
+	__kernel_long_t	ru_oublock;	/* block output operations */
+	__kernel_long_t	ru_msgsnd;	/* messages sent */
+	__kernel_long_t	ru_msgrcv;	/* messages received */
+	__kernel_long_t	ru_nsignals;	/* signals received */
+	__kernel_long_t	ru_nvcsw;	/* voluntary context switches */
+	__kernel_long_t	ru_nivcsw;	/* involuntary " */
 };
 
 struct rlimit {
-	unsigned long	rlim_cur;
-	unsigned long	rlim_max;
+	__kernel_ulong_t	rlim_cur;
+	__kernel_ulong_t	rlim_max;
 };
 
 #define RLIM64_INFINITY		(~0ULL)
diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h
index ec36fa1..78b6941 100644
--- a/include/uapi/linux/shm.h
+++ b/include/uapi/linux/shm.h
@@ -68,11 +68,11 @@
 
 struct shm_info {
 	int used_ids;
-	unsigned long shm_tot;	/* total allocated shm */
-	unsigned long shm_rss;	/* total resident shm */
-	unsigned long shm_swp;	/* total swapped shm */
-	unsigned long swap_attempts;
-	unsigned long swap_successes;
+	__kernel_ulong_t shm_tot;	/* total allocated shm */
+	__kernel_ulong_t shm_rss;	/* total resident shm */
+	__kernel_ulong_t shm_swp;	/* total swapped shm */
+	__kernel_ulong_t swap_attempts;
+	__kernel_ulong_t swap_successes;
 };
 
 
diff --git a/include/uapi/linux/timex.h b/include/uapi/linux/timex.h
index a7ea81f..92685d8 100644
--- a/include/uapi/linux/timex.h
+++ b/include/uapi/linux/timex.h
@@ -63,27 +63,27 @@
  */
 struct timex {
 	unsigned int modes;	/* mode selector */
-	long offset;		/* time offset (usec) */
-	long freq;		/* frequency offset (scaled ppm) */
-	long maxerror;		/* maximum error (usec) */
-	long esterror;		/* estimated error (usec) */
+	__kernel_long_t offset;	/* time offset (usec) */
+	__kernel_long_t freq;	/* frequency offset (scaled ppm) */
+	__kernel_long_t maxerror;/* maximum error (usec) */
+	__kernel_long_t esterror;/* estimated error (usec) */
 	int status;		/* clock command/status */
-	long constant;		/* pll time constant */
-	long precision;		/* clock precision (usec) (read only) */
-	long tolerance;		/* clock frequency tolerance (ppm)
-				 * (read only)
-				 */
+	__kernel_long_t constant;/* pll time constant */
+	__kernel_long_t precision;/* clock precision (usec) (read only) */
+	__kernel_long_t tolerance;/* clock frequency tolerance (ppm)
+				   * (read only)
+				   */
 	struct timeval time;	/* (read only, except for ADJ_SETOFFSET) */
-	long tick;		/* (modified) usecs between clock ticks */
+	__kernel_long_t tick;	/* (modified) usecs between clock ticks */
 
-	long ppsfreq;           /* pps frequency (scaled ppm) (ro) */
-	long jitter;            /* pps jitter (us) (ro) */
+	__kernel_long_t ppsfreq;/* pps frequency (scaled ppm) (ro) */
+	__kernel_long_t jitter; /* pps jitter (us) (ro) */
 	int shift;              /* interval duration (s) (shift) (ro) */
-	long stabil;            /* pps stability (scaled ppm) (ro) */
-	long jitcnt;            /* jitter limit exceeded (ro) */
-	long calcnt;            /* calibration intervals (ro) */
-	long errcnt;            /* calibration errors (ro) */
-	long stbcnt;            /* stability limit exceeded (ro) */
+	__kernel_long_t stabil;            /* pps stability (scaled ppm) (ro) */
+	__kernel_long_t jitcnt; /* jitter limit exceeded (ro) */
+	__kernel_long_t calcnt; /* calibration intervals (ro) */
+	__kernel_long_t errcnt; /* calibration errors (ro) */
+	__kernel_long_t stbcnt; /* stability limit exceeded (ro) */
 
 	int tai;		/* TAI offset (ro) */
 
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 1666aab..2cbe605 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -164,6 +164,10 @@
  * this driver */
 #define V4L2_CID_USER_TI_VPE_BASE		(V4L2_CID_USER_BASE + 0x1050)
 
+/* The base for the saa7134 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_SAA7134_BASE		(V4L2_CID_USER_BASE + 0x1060)
+
 /* MPEG-class control IDs */
 /* The MPEG controls are applicable to all codec controls
  * and the 'MPEG' part of the define is historical */
@@ -554,6 +558,11 @@
 	V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV		= 0,
 	V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD	= 1,
 };
+#define V4L2_CID_MPEG_VIDEO_VPX_MIN_QP			(V4L2_CID_MPEG_BASE+507)
+#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP			(V4L2_CID_MPEG_BASE+508)
+#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP		(V4L2_CID_MPEG_BASE+509)
+#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP		(V4L2_CID_MPEG_BASE+510)
+#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE			(V4L2_CID_MPEG_BASE+511)
 
 /*  MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
 #define V4L2_CID_MPEG_CX2341X_BASE 				(V4L2_CTRL_CLASS_MPEG | 0x1000)
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index a960125..b5c3aab 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -110,6 +110,9 @@
 
 	/* S5C73M3 sensor specific interleaved UYVY and JPEG */
 	V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8 = 0x5001,
+
+	/* HSV - next is 0x6002 */
+	V4L2_MBUS_FMT_AHSV8888_1X32 = 0x6001,
 };
 
 /**
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 437f1b0..6ae7bbe 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -207,8 +207,8 @@
 struct v4l2_rect {
 	__s32   left;
 	__s32   top;
-	__s32   width;
-	__s32   height;
+	__u32   width;
+	__u32   height;
 };
 
 struct v4l2_fract {
diff --git a/include/uapi/linux/vsp1.h b/include/uapi/linux/vsp1.h
new file mode 100644
index 0000000..e18858f
--- /dev/null
+++ b/include/uapi/linux/vsp1.h
@@ -0,0 +1,34 @@
+/*
+ * vsp1.h
+ *
+ * Renesas R-Car VSP1 - User-space API
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VSP1_USER_H__
+#define __VSP1_USER_H__
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+/*
+ * Private IOCTLs
+ *
+ * VIDIOC_VSP1_LUT_CONFIG - Configure the lookup table
+ */
+
+#define VIDIOC_VSP1_LUT_CONFIG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct vsp1_lut_config)
+
+struct vsp1_lut_config {
+	u32 lut[256];
+};
+
+#endif	/* __VSP1_USER_H__ */
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index e4629b9..40bbc04 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -20,6 +20,9 @@
 #define XATTR_MAC_OSX_PREFIX "osx."
 #define XATTR_MAC_OSX_PREFIX_LEN (sizeof(XATTR_MAC_OSX_PREFIX) - 1)
 
+#define XATTR_BTRFS_PREFIX "btrfs."
+#define XATTR_BTRFS_PREFIX_LEN (sizeof(XATTR_BTRFS_PREFIX) - 1)
+
 #define XATTR_SECURITY_PREFIX	"security."
 #define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1)
 
diff --git a/include/uapi/xen/Kbuild b/include/uapi/xen/Kbuild
index 61257cb..5c45962 100644
--- a/include/uapi/xen/Kbuild
+++ b/include/uapi/xen/Kbuild
@@ -1,3 +1,5 @@
 # UAPI Header export list
 header-y += evtchn.h
+header-y += gntalloc.h
+header-y += gntdev.h
 header-y += privcmd.h
diff --git a/include/xen/gntalloc.h b/include/uapi/xen/gntalloc.h
similarity index 100%
rename from include/xen/gntalloc.h
rename to include/uapi/xen/gntalloc.h
diff --git a/include/xen/gntdev.h b/include/uapi/xen/gntdev.h
similarity index 100%
rename from include/xen/gntdev.h
rename to include/uapi/xen/gntdev.h
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 5acb1e4..a5af2a2 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -185,7 +185,7 @@
 };
 extern struct grant_frames xen_auto_xlat_grant_frames;
 unsigned int gnttab_max_grant_frames(void);
-int gnttab_setup_auto_xlat_frames(unsigned long addr);
+int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
 void gnttab_free_auto_xlat_frames(void);
 
 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index ae665ac..32ec05a 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -113,13 +113,13 @@
  * it's less than the number provided by the backend. The indirect_grefs field
  * in blkif_request_indirect should be filled by the frontend with the
  * grant references of the pages that are holding the indirect segments.
- * This pages are filled with an array of blkif_request_segment_aligned
- * that hold the information about the segments. The number of indirect
- * pages to use is determined by the maximum number of segments
- * a indirect request contains. Every indirect page can contain a maximum
- * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
- * so to calculate the number of indirect pages to use we have to do
- * ceil(indirect_segments/512).
+ * These pages are filled with an array of blkif_request_segment that hold the
+ * information about the segments. The number of indirect pages to use is
+ * determined by the number of segments an indirect request contains. Every
+ * indirect page can contain a maximum of
+ * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
+ * calculate the number of indirect pages to use we have to do
+ * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
  *
  * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
  * create the "feature-max-indirect-segments" node!
@@ -135,13 +135,12 @@
 
 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
 
-struct blkif_request_segment_aligned {
-	grant_ref_t gref;        /* reference to I/O buffer frame        */
-	/* @first_sect: first sector in frame to transfer (inclusive).   */
-	/* @last_sect: last sector in frame to transfer (inclusive).     */
-	uint8_t     first_sect, last_sect;
-	uint16_t    _pad; /* padding to make it 8 bytes, so it's cache-aligned */
-} __attribute__((__packed__));
+struct blkif_request_segment {
+		grant_ref_t gref;        /* reference to I/O buffer frame        */
+		/* @first_sect: first sector in frame to transfer (inclusive).   */
+		/* @last_sect: last sector in frame to transfer (inclusive).     */
+		uint8_t     first_sect, last_sect;
+};
 
 struct blkif_request_rw {
 	uint8_t        nr_segments;  /* number of segments                   */
@@ -151,12 +150,7 @@
 #endif
 	uint64_t       id;           /* private guest value, echoed in resp  */
 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-	struct blkif_request_segment {
-		grant_ref_t gref;        /* reference to I/O buffer frame        */
-		/* @first_sect: first sector in frame to transfer (inclusive).   */
-		/* @last_sect: last sector in frame to transfer (inclusive).     */
-		uint8_t     first_sect, last_sect;
-	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 } __attribute__((__packed__));
 
 struct blkif_request_discard {
diff --git a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h
deleted file mode 100644
index ac45e07..0000000
--- a/include/xen/interface/xencomm.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Copyright (C) IBM Corp. 2006
- */
-
-#ifndef _XEN_XENCOMM_H_
-#define _XEN_XENCOMM_H_
-
-/* A xencomm descriptor is a scatter/gather list containing physical
- * addresses corresponding to a virtually contiguous memory area. The
- * hypervisor translates these physical addresses to machine addresses to copy
- * to and from the virtually contiguous area.
- */
-
-#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
-#define XENCOMM_INVALID (~0UL)
-
-struct xencomm_desc {
-    uint32_t magic;
-    uint32_t nr_addrs; /* the number of entries in address[] */
-    uint64_t address[0];
-};
-
-#endif /* _XEN_XENCOMM_H_ */
diff --git a/include/xen/xencomm.h b/include/xen/xencomm.h
deleted file mode 100644
index e43b039..0000000
--- a/include/xen/xencomm.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- *          Jerone Young <jyoung5@us.ibm.com>
- */
-
-#ifndef _LINUX_XENCOMM_H_
-#define _LINUX_XENCOMM_H_
-
-#include <xen/interface/xencomm.h>
-
-#define XENCOMM_MINI_ADDRS 3
-struct xencomm_mini {
-	struct xencomm_desc _desc;
-	uint64_t address[XENCOMM_MINI_ADDRS];
-};
-
-/* To avoid additionnal virt to phys conversion, an opaque structure is
-   presented.  */
-struct xencomm_handle;
-
-extern void xencomm_free(struct xencomm_handle *desc);
-extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
-extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
-			unsigned long bytes,  struct xencomm_mini *xc_area);
-
-#if 0
-#define XENCOMM_MINI_ALIGNED(xc_desc, n)				\
-	struct xencomm_mini xc_desc ## _base[(n)]			\
-	__attribute__((__aligned__(sizeof(struct xencomm_mini))));	\
-	struct xencomm_mini *xc_desc = &xc_desc ## _base[0];
-#else
-/*
- * gcc bug workaround:
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
- * gcc doesn't handle properly stack variable with
- * __attribute__((__align__(sizeof(struct xencomm_mini))))
- */
-#define XENCOMM_MINI_ALIGNED(xc_desc, n)				\
-	unsigned char xc_desc ## _base[((n) + 1 ) *			\
-				       sizeof(struct xencomm_mini)];	\
-	struct xencomm_mini *xc_desc = (struct xencomm_mini *)		\
-		((unsigned long)xc_desc ## _base +			\
-		 (sizeof(struct xencomm_mini) -				\
-		  ((unsigned long)xc_desc ## _base) %			\
-		  sizeof(struct xencomm_mini)));
-#endif
-#define xencomm_map_no_alloc(ptr, bytes)			\
-	({ XENCOMM_MINI_ALIGNED(xc_desc, 1);			\
-		__xencomm_map_no_alloc(ptr, bytes, xc_desc); })
-
-/* provided by architecture code: */
-extern unsigned long xencomm_vtop(unsigned long vaddr);
-
-static inline void *xencomm_pa(void *ptr)
-{
-	return (void *)xencomm_vtop((unsigned long)ptr);
-}
-
-#define xen_guest_handle(hnd)  ((hnd).p)
-
-#endif /* _LINUX_XENCOMM_H_ */
diff --git a/init/Kconfig b/init/Kconfig
index 34a0a3b..009a797 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -284,7 +284,7 @@
 
 config AUDITSYSCALL
 	bool "Enable system-call auditing support"
-	depends on AUDIT && (X86 || PARISC || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
+	depends on AUDIT && (X86 || PARISC || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT) || ALPHA)
 	default y if SECURITY_SELINUX
 	help
 	  Enable low-overhead system-call auditing infrastructure that
diff --git a/init/main.c b/init/main.c
index 2fd9cef..eb03090 100644
--- a/init/main.c
+++ b/init/main.c
@@ -812,7 +812,7 @@
 static int run_init_process(const char *init_filename)
 {
 	argv_init[0] = init_filename;
-	return do_execve(init_filename,
+	return do_execve(getname_kernel(init_filename),
 		(const char __user *const __user *)argv_init,
 		(const char __user *const __user *)envp_init);
 }
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 10176cd..7aef2f4 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1719,7 +1719,7 @@
 	struct audit_context *context = current->audit_context;
 
 	BUG_ON(!context);
-	if (!context->in_syscall) {
+	if (!name->aname || !context->in_syscall) {
 #if AUDIT_DEBUG == 2
 		printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
 		       __FILE__, __LINE__, context->serial, name);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e2f46ba..105f273 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -886,7 +886,9 @@
 		 * per-subsystem and moved to css->id so that lookups are
 		 * successful until the target css is released.
 		 */
+		mutex_lock(&cgroup_mutex);
 		idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+		mutex_unlock(&cgroup_mutex);
 		cgrp->id = -1;
 
 		call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
@@ -1566,10 +1568,10 @@
 		mutex_lock(&cgroup_mutex);
 		mutex_lock(&cgroup_root_mutex);
 
-		root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
-					   0, 1, GFP_KERNEL);
-		if (root_cgrp->id < 0)
+		ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
+		if (ret < 0)
 			goto unlock_drop;
+		root_cgrp->id = ret;
 
 		/* Check for name clashes with existing mounts */
 		ret = -EBUSY;
@@ -2763,10 +2765,7 @@
 	 */
 	update_before = cgroup_serial_nr_next;
 
-	mutex_unlock(&cgroup_mutex);
-
 	/* add/rm files for all cgroups created before */
-	rcu_read_lock();
 	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
 		struct cgroup *cgrp = css->cgroup;
 
@@ -2775,23 +2774,19 @@
 
 		inode = cgrp->dentry->d_inode;
 		dget(cgrp->dentry);
-		rcu_read_unlock();
-
 		dput(prev);
 		prev = cgrp->dentry;
 
+		mutex_unlock(&cgroup_mutex);
 		mutex_lock(&inode->i_mutex);
 		mutex_lock(&cgroup_mutex);
 		if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
 			ret = cgroup_addrm_files(cgrp, cfts, is_add);
-		mutex_unlock(&cgroup_mutex);
 		mutex_unlock(&inode->i_mutex);
-
-		rcu_read_lock();
 		if (ret)
 			break;
 	}
-	rcu_read_unlock();
+	mutex_unlock(&cgroup_mutex);
 	dput(prev);
 	deactivate_super(sb);
 	return ret;
@@ -2910,9 +2905,14 @@
 		 * We should check if the process is exiting, otherwise
 		 * it will race with cgroup_exit() in that the list
 		 * entry won't be deleted though the process has exited.
+		 * Do it while holding siglock so that we don't end up
+		 * racing against cgroup_exit().
 		 */
+		spin_lock_irq(&p->sighand->siglock);
 		if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
 			list_add(&p->cg_list, &task_css_set(p)->tasks);
+		spin_unlock_irq(&p->sighand->siglock);
+
 		task_unlock(p);
 	} while_each_thread(g, p);
 	read_unlock(&tasklist_lock);
@@ -4158,7 +4158,7 @@
 	struct cgroup *cgrp;
 	struct cgroup_name *name;
 	struct cgroupfs_root *root = parent->root;
-	int ssid, err = 0;
+	int ssid, err;
 	struct cgroup_subsys *ss;
 	struct super_block *sb = root->sb;
 
@@ -4168,19 +4168,13 @@
 		return -ENOMEM;
 
 	name = cgroup_alloc_name(dentry);
-	if (!name)
+	if (!name) {
+		err = -ENOMEM;
 		goto err_free_cgrp;
+	}
 	rcu_assign_pointer(cgrp->name, name);
 
 	/*
-	 * Temporarily set the pointer to NULL, so idr_find() won't return
-	 * a half-baked cgroup.
-	 */
-	cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
-	if (cgrp->id < 0)
-		goto err_free_name;
-
-	/*
 	 * Only live parents can have children.  Note that the liveliness
 	 * check isn't strictly necessary because cgroup_mkdir() and
 	 * cgroup_rmdir() are fully synchronized by i_mutex; however, do it
@@ -4189,7 +4183,17 @@
 	 */
 	if (!cgroup_lock_live_group(parent)) {
 		err = -ENODEV;
-		goto err_free_id;
+		goto err_free_name;
+	}
+
+	/*
+	 * Temporarily set the pointer to NULL, so idr_find() won't return
+	 * a half-baked cgroup.
+	 */
+	cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
+	if (cgrp->id < 0) {
+		err = -ENOMEM;
+		goto err_unlock;
 	}
 
 	/* Grab a reference on the superblock so the hierarchy doesn't
@@ -4221,7 +4225,7 @@
 	 */
 	err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
 	if (err < 0)
-		goto err_unlock;
+		goto err_free_id;
 	lockdep_assert_held(&dentry->d_inode->i_mutex);
 
 	cgrp->serial_nr = cgroup_serial_nr_next++;
@@ -4257,12 +4261,12 @@
 
 	return 0;
 
-err_unlock:
-	mutex_unlock(&cgroup_mutex);
-	/* Release the reference count that we took on the superblock */
-	deactivate_super(sb);
 err_free_id:
 	idr_remove(&root->cgroup_idr, cgrp->id);
+	/* Release the reference count that we took on the superblock */
+	deactivate_super(sb);
+err_unlock:
+	mutex_unlock(&cgroup_mutex);
 err_free_name:
 	kfree(rcu_dereference_raw(cgrp->name));
 err_free_cgrp:
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 7d2f35e..334b398 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -736,7 +736,8 @@
 	return 1;
 }
 
-int kgdb_nmicallin(int cpu, int trapnr, void *regs, atomic_t *send_ready)
+int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
+							atomic_t *send_ready)
 {
 #ifdef CONFIG_SMP
 	if (!kgdb_io_ready(0) || !send_ready)
@@ -750,7 +751,7 @@
 		ks->cpu			= cpu;
 		ks->ex_vector		= trapnr;
 		ks->signo		= SIGTRAP;
-		ks->err_code		= KGDB_KDB_REASON_SYSTEM_NMI;
+		ks->err_code		= err_code;
 		ks->linux_regs		= regs;
 		ks->send_ready		= send_ready;
 		kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h
index 572aa4f..127d9bc 100644
--- a/kernel/debug/debug_core.h
+++ b/kernel/debug/debug_core.h
@@ -75,13 +75,11 @@
 extern int kdb_parse(const char *cmdstr);
 extern int kdb_common_init_state(struct kgdb_state *ks);
 extern int kdb_common_deinit_state(void);
-#define KGDB_KDB_REASON_SYSTEM_NMI KDB_REASON_SYSTEM_NMI
 #else /* ! CONFIG_KGDB_KDB */
 static inline int kdb_stub(struct kgdb_state *ks)
 {
 	return DBG_PASS_EVENT;
 }
-#define KGDB_KDB_REASON_SYSTEM_NMI 0
 #endif /* CONFIG_KGDB_KDB */
 
 #endif /* _DEBUG_CORE_H_ */
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 9328b80..0b9c169 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -37,7 +37,7 @@
  */
 unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT;
 
-unsigned long __read_mostly sysctl_hung_task_warnings = 10;
+int __read_mostly sysctl_hung_task_warnings = 10;
 
 static int __read_mostly did_panic;
 
@@ -98,7 +98,9 @@
 
 	if (!sysctl_hung_task_warnings)
 		return;
-	sysctl_hung_task_warnings--;
+
+	if (sysctl_hung_task_warnings > 0)
+		sysctl_hung_task_warnings--;
 
 	/*
 	 * Ok, the task did not get scheduled for more than 2 minutes,
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 4a1fef0..07cbdfe 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -40,6 +40,7 @@
 # Generic configurable interrupt chip implementation
 config GENERIC_IRQ_CHIP
        bool
+       select IRQ_DOMAIN
 
 # Generic irq_domain hw <--> linux irq number translation
 config IRQ_DOMAIN
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index bd8e788..1ef0606 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -73,6 +73,51 @@
 EXPORT_SYMBOL(devm_request_threaded_irq);
 
 /**
+ *	devm_request_any_context_irq - allocate an interrupt line for a managed device
+ *	@dev: device to request interrupt for
+ *	@irq: Interrupt line to allocate
+ *	@handler: Function to be called when the IRQ occurs
+ *	@thread_fn: function to be called in a threaded interrupt context. NULL
+ *		    for devices which handle everything in @handler
+ *	@irqflags: Interrupt type flags
+ *	@devname: An ascii name for the claiming device
+ *	@dev_id: A cookie passed back to the handler function
+ *
+ *	Except for the extra @dev argument, this function takes the
+ *	same arguments and performs the same function as
+ *	request_any_context_irq().  IRQs requested with this function will be
+ *	automatically freed on driver detach.
+ *
+ *	If an IRQ allocated with this function needs to be freed
+ *	separately, devm_free_irq() must be used.
+ */
+int devm_request_any_context_irq(struct device *dev, unsigned int irq,
+			      irq_handler_t handler, unsigned long irqflags,
+			      const char *devname, void *dev_id)
+{
+	struct irq_devres *dr;
+	int rc;
+
+	dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
+			  GFP_KERNEL);
+	if (!dr)
+		return -ENOMEM;
+
+	rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id);
+	if (rc) {
+		devres_free(dr);
+		return rc;
+	}
+
+	dr->irq = irq;
+	dr->dev_id = dev_id;
+	devres_add(dev, dr);
+
+	return 0;
+}
+EXPORT_SYMBOL(devm_request_any_context_irq);
+
+/**
  *	devm_free_irq - free an interrupt
  *	@dev: device to free interrupt for
  *	@irq: Interrupt line to free
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 192a302..8ab8e93 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -274,6 +274,7 @@
 {
 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
 }
+EXPORT_SYMBOL(irq_to_desc);
 
 static void free_desc(unsigned int irq)
 {
diff --git a/kernel/kmod.c b/kernel/kmod.c
index b086006..6b375af 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -239,7 +239,7 @@
 
 	commit_creds(new);
 
-	retval = do_execve(sub_info->path,
+	retval = do_execve(getname_kernel(sub_info->path),
 			   (const char __user *const __user *)sub_info->argv,
 			   (const char __user *const __user *)sub_info->envp);
 	if (!retval)
diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c
index d09dd10..9a58bc2 100644
--- a/kernel/power/block_io.c
+++ b/kernel/power/block_io.c
@@ -32,7 +32,7 @@
 	struct bio *bio;
 
 	bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
-	bio->bi_sector = sector;
+	bio->bi_iter.bi_sector = sector;
 	bio->bi_bdev = bdev;
 	bio->bi_end_io = end_swap_bio_read;
 
diff --git a/kernel/power/console.c b/kernel/power/console.c
index eacb8bd..aba9c54 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -9,6 +9,7 @@
 #include <linux/kbd_kern.h>
 #include <linux/vt.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include "power.h"
 
 #define SUSPEND_CONSOLE	(MAX_NR_CONSOLES-1)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b1d255f..4dae9cb 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1076,7 +1076,6 @@
 		next_seq = log_next_seq;
 
 		len = 0;
-		prev = 0;
 		while (len >= 0 && seq < next_seq) {
 			struct printk_log *msg = log_from_idx(idx);
 			int textlen;
@@ -2788,7 +2787,6 @@
 	next_idx = idx;
 
 	l = 0;
-	prev = 0;
 	while (seq < dumper->next_seq) {
 		struct printk_log *msg = log_from_idx(idx);
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7fea865..b46131e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2476,7 +2476,7 @@
 	if (time_before_eq(next, now))
 		return 0;
 
-	return jiffies_to_usecs(next - now) * NSEC_PER_USEC;
+	return jiffies_to_nsecs(next - now);
 }
 #endif
 
@@ -4347,7 +4347,9 @@
 		goto out_unlock;
 
 	rq = task_rq_lock(p, &flags);
-	time_slice = p->sched_class->get_rr_interval(rq, p);
+	time_slice = 0;
+	if (p->sched_class->get_rr_interval)
+		time_slice = p->sched_class->get_rr_interval(rq, p);
 	task_rq_unlock(rq, p, &flags);
 
 	rcu_read_unlock();
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0de2482..0dd5e09 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -351,7 +351,8 @@
  * disrupting the schedulability of the system. Otherwise, we should
  * refill the runtime and set the deadline a period in the future,
  * because keeping the current (absolute) deadline of the task would
- * result in breaking guarantees promised to other tasks.
+ * result in breaking guarantees promised to other tasks (refer to
+ * Documentation/scheduler/sched-deadline.txt for more informations).
  *
  * This function returns true if:
  *
diff --git a/kernel/smp.c b/kernel/smp.c
index bd9f940..ffee35b 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -23,17 +23,11 @@
 struct call_function_data {
 	struct call_single_data	__percpu *csd;
 	cpumask_var_t		cpumask;
-	cpumask_var_t		cpumask_ipi;
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 
-struct call_single_queue {
-	struct list_head	list;
-	raw_spinlock_t		lock;
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 
 static int
 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
@@ -47,14 +41,8 @@
 		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 				cpu_to_node(cpu)))
 			return notifier_from_errno(-ENOMEM);
-		if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
-				cpu_to_node(cpu))) {
-			free_cpumask_var(cfd->cpumask);
-			return notifier_from_errno(-ENOMEM);
-		}
 		cfd->csd = alloc_percpu(struct call_single_data);
 		if (!cfd->csd) {
-			free_cpumask_var(cfd->cpumask_ipi);
 			free_cpumask_var(cfd->cpumask);
 			return notifier_from_errno(-ENOMEM);
 		}
@@ -67,7 +55,6 @@
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
 		free_cpumask_var(cfd->cpumask);
-		free_cpumask_var(cfd->cpumask_ipi);
 		free_percpu(cfd->csd);
 		break;
 #endif
@@ -85,12 +72,8 @@
 	void *cpu = (void *)(long)smp_processor_id();
 	int i;
 
-	for_each_possible_cpu(i) {
-		struct call_single_queue *q = &per_cpu(call_single_queue, i);
-
-		raw_spin_lock_init(&q->lock);
-		INIT_LIST_HEAD(&q->list);
-	}
+	for_each_possible_cpu(i)
+		init_llist_head(&per_cpu(call_single_queue, i));
 
 	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
 	register_cpu_notifier(&hotplug_cfd_notifier);
@@ -141,18 +124,9 @@
  */
 static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
 {
-	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
-	unsigned long flags;
-	int ipi;
-
 	if (wait)
 		csd->flags |= CSD_FLAG_WAIT;
 
-	raw_spin_lock_irqsave(&dst->lock, flags);
-	ipi = list_empty(&dst->list);
-	list_add_tail(&csd->list, &dst->list);
-	raw_spin_unlock_irqrestore(&dst->lock, flags);
-
 	/*
 	 * The list addition should be visible before sending the IPI
 	 * handler locks the list to pull the entry off it because of
@@ -164,7 +138,7 @@
 	 * locking and barrier primitives. Generic code isn't really
 	 * equipped to do the right thing...
 	 */
-	if (ipi)
+	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
 		arch_send_call_function_single_ipi(cpu);
 
 	if (wait)
@@ -177,27 +151,26 @@
  */
 void generic_smp_call_function_single_interrupt(void)
 {
-	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
-	LIST_HEAD(list);
+	struct llist_node *entry, *next;
 
 	/*
 	 * Shouldn't receive this interrupt on a cpu that is not yet online.
 	 */
 	WARN_ON_ONCE(!cpu_online(smp_processor_id()));
 
-	raw_spin_lock(&q->lock);
-	list_replace_init(&q->list, &list);
-	raw_spin_unlock(&q->lock);
+	entry = llist_del_all(&__get_cpu_var(call_single_queue));
+	entry = llist_reverse_order(entry);
 
-	while (!list_empty(&list)) {
+	while (entry) {
 		struct call_single_data *csd;
 
-		csd = list_entry(list.next, struct call_single_data, list);
-		list_del(&csd->list);
+		next = entry->next;
 
+		csd = llist_entry(entry, struct call_single_data, llist);
 		csd->func(csd->info);
-
 		csd_unlock(csd);
+
+		entry = next;
 	}
 }
 
@@ -402,30 +375,17 @@
 	if (unlikely(!cpumask_weight(cfd->cpumask)))
 		return;
 
-	/*
-	 * After we put an entry into the list, cfd->cpumask may be cleared
-	 * again when another CPU sends another IPI for a SMP function call, so
-	 * cfd->cpumask will be zero.
-	 */
-	cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
-
 	for_each_cpu(cpu, cfd->cpumask) {
 		struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
-		struct call_single_queue *dst =
-					&per_cpu(call_single_queue, cpu);
-		unsigned long flags;
 
 		csd_lock(csd);
 		csd->func = func;
 		csd->info = info;
-
-		raw_spin_lock_irqsave(&dst->lock, flags);
-		list_add_tail(&csd->list, &dst->list);
-		raw_spin_unlock_irqrestore(&dst->lock, flags);
+		llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
 	}
 
 	/* Send a message to all CPUs in the map */
-	arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
+	arch_send_call_function_ipi_mask(cfd->cpumask);
 
 	if (wait) {
 		for_each_cpu(cpu, cfd->cpumask) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8509670..490fcbb 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -328,7 +328,7 @@
 		 * here, as softirq will be serviced on return from interrupt.
 		 */
 		local_bh_disable();
-		tick_check_idle();
+		tick_irq_enter();
 		_local_bh_enable();
 	}
 
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 096db74..49e13e1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -121,6 +121,8 @@
 static int sixty = 60;
 #endif
 
+static int __maybe_unused neg_one = -1;
+
 static int zero;
 static int __maybe_unused one = 1;
 static int __maybe_unused two = 2;
@@ -997,9 +999,10 @@
 	{
 		.procname	= "hung_task_warnings",
 		.data		= &sysctl_hung_task_warnings,
-		.maxlen		= sizeof(unsigned long),
+		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_doulongvec_minmax,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &neg_one,
 	},
 #endif
 #ifdef CONFIG_COMPAT
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 7a925ba..a6a5bf5 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -51,7 +51,13 @@
  * HZ shrinks, so values greater than 8 overflow 32bits when
  * HZ=100.
  */
+#if HZ < 34
+#define JIFFIES_SHIFT	6
+#elif HZ < 67
+#define JIFFIES_SHIFT	7
+#else
 #define JIFFIES_SHIFT	8
+#endif
 
 static cycle_t jiffies_read(struct clocksource *cs)
 {
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 43780ab..98977a5 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -756,6 +756,7 @@
 static void tick_broadcast_clear_oneshot(int cpu)
 {
 	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
+	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
 }
 
 static void tick_broadcast_init_next_event(struct cpumask *mask,
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 08cb0c3..9f8af69 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -533,12 +533,13 @@
 	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
 	u64 time_delta;
 
+	time_delta = timekeeping_max_deferment();
+
 	/* Read jiffies and the time when jiffies were updated last */
 	do {
 		seq = read_seqbegin(&jiffies_lock);
 		last_update = last_jiffies_update;
 		last_jiffies = jiffies;
-		time_delta = timekeeping_max_deferment();
 	} while (read_seqretry(&jiffies_lock, seq));
 
 	if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
@@ -678,18 +679,18 @@
 static void tick_nohz_full_stop_tick(struct tick_sched *ts)
 {
 #ifdef CONFIG_NO_HZ_FULL
-       int cpu = smp_processor_id();
+	int cpu = smp_processor_id();
 
-       if (!tick_nohz_full_cpu(cpu) || is_idle_task(current))
-               return;
+	if (!tick_nohz_full_cpu(cpu) || is_idle_task(current))
+		return;
 
-       if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
-	       return;
+	if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
+		return;
 
-       if (!can_stop_full_tick())
-               return;
+	if (!can_stop_full_tick())
+		return;
 
-       tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
+	tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
 #endif
 }
 
@@ -1023,7 +1024,7 @@
 #endif
 }
 
-static inline void tick_check_nohz_this_cpu(void)
+static inline void tick_nohz_irq_enter(void)
 {
 	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
 	ktime_t now;
@@ -1042,17 +1043,17 @@
 #else
 
 static inline void tick_nohz_switch_to_nohz(void) { }
-static inline void tick_check_nohz_this_cpu(void) { }
+static inline void tick_nohz_irq_enter(void) { }
 
 #endif /* CONFIG_NO_HZ_COMMON */
 
 /*
  * Called from irq_enter to notify about the possible interruption of idle()
  */
-void tick_check_idle(void)
+void tick_irq_enter(void)
 {
 	tick_check_oneshot_broadcast_this_cpu();
-	tick_check_nohz_this_cpu();
+	tick_nohz_irq_enter();
 }
 
 /*
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index f785aef..b418cb0 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -781,8 +781,8 @@
 	if (!error && !bio_flagged(bio, BIO_UPTODATE))
 		error = EIO;
 
-	__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
-			error, 0, NULL);
+	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+			bio->bi_rw, what, error, 0, NULL);
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
@@ -885,8 +885,9 @@
 	if (bt) {
 		__be64 rpdu = cpu_to_be64(pdu);
 
-		__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
-				BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
+		__blk_add_trace(bt, bio->bi_iter.bi_sector,
+				bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
+				!bio_flagged(bio, BIO_UPTODATE),
 				sizeof(rpdu), &rpdu);
 	}
 }
@@ -918,9 +919,9 @@
 	r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
 	r.sector_from = cpu_to_be64(from);
 
-	__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
-			BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
-			sizeof(r), &r);
+	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+			bio->bi_rw, BLK_TA_REMAP,
+			!bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
 }
 
 /**
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 294b8a2..fc4da2d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2397,6 +2397,13 @@
 	write &= RB_WRITE_MASK;
 	tail = write - length;
 
+	/*
+	 * If this is the first commit on the page, then it has the same
+	 * timestamp as the page itself.
+	 */
+	if (!tail)
+		delta = 0;
+
 	/* See if we shot pass the end of this buffer page */
 	if (unlikely(write > BUF_PAGE_SIZE))
 		return rb_move_tail(cpu_buffer, length, tail,
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 240fb62..dd06439 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -225,7 +225,7 @@
  *
  *	When there is no mapping defined for the user-namespace uid
  *	pair INVALID_UID is returned.  Callers are expected to test
- *	for and handle handle INVALID_UID being returned.  INVALID_UID
+ *	for and handle INVALID_UID being returned.  INVALID_UID
  *	may be tested for using uid_valid().
  */
 kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 82ef9f3..193e977 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1851,6 +1851,12 @@
 	if (worker->flags & WORKER_IDLE)
 		pool->nr_idle--;
 
+	/*
+	 * Once WORKER_DIE is set, the kworker may destroy itself at any
+	 * point.  Pin to ensure the task stays until we're done with it.
+	 */
+	get_task_struct(worker->task);
+
 	list_del_init(&worker->entry);
 	worker->flags |= WORKER_DIE;
 
@@ -1859,6 +1865,7 @@
 	spin_unlock_irq(&pool->lock);
 
 	kthread_stop(worker->task);
+	put_task_struct(worker->task);
 	kfree(worker);
 
 	spin_lock_irq(&pool->lock);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dbf94a7..a48abea 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -119,7 +119,7 @@
 
 config DEBUG_INFO
 	bool "Compile the kernel with debug info"
-	depends on DEBUG_KERNEL
+	depends on DEBUG_KERNEL && !COMPILE_TEST
 	help
           If you say Y here the resulting kernel image will include
 	  debugging info resulting in a larger kernel image.
diff --git a/lib/Makefile b/lib/Makefile
index 126b34f..48140e3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -45,6 +45,7 @@
 obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
 obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
 
+GCOV_PROFILE_hweight.o := n
 CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index c380838..2defd13 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -463,7 +463,7 @@
 	int i;
 
 	if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0)
-		return 0;
+		return overlap;
 
 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
 		if (overlap & 1 << i)
@@ -486,7 +486,7 @@
 	 * debug_dma_assert_idle() as the pfn may be marked idle
 	 * prematurely.
 	 */
-	WARN_ONCE(overlap == 0,
+	WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP,
 		  "DMA-API: exceeded %d overlapping mappings of pfn %lx\n",
 		  ACTIVE_PFN_MAX_OVERLAP, pfn);
 }
@@ -517,7 +517,11 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&radix_lock, flags);
-	if (active_pfn_dec_overlap(entry->pfn) == 0)
+	/* since we are counting overlaps the final put of the
+	 * entry->pfn will occur when the overlap count is 0.
+	 * active_pfn_dec_overlap() returns -1 in that case
+	 */
+	if (active_pfn_dec_overlap(entry->pfn) < 0)
 		radix_tree_delete(&dma_active_pfn, entry->pfn);
 	spin_unlock_irqrestore(&radix_lock, flags);
 }
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 34fd931..4dc1b99 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -9,7 +9,7 @@
 
 config FONTS
 	bool "Select compiled-in fonts"
-	depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
+	depends on FRAMEBUFFER_CONSOLE
 	help
 	  Say Y here if you would like to use fonts other than the default
 	  your frame buffer console usually use.
@@ -22,7 +22,7 @@
 
 config FONT_8x8
 	bool "VGA 8x8 font" if FONTS
-	depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
+	depends on FRAMEBUFFER_CONSOLE
 	default y if !SPARC && !FONTS
 	help
 	  This is the "high resolution" font for the VGA frame buffer (the one
@@ -45,7 +45,7 @@
 
 config FONT_6x11
 	bool "Mac console 6x11 font (not supported by all drivers)" if FONTS
-	depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
+	depends on FRAMEBUFFER_CONSOLE
 	default y if !SPARC && !FONTS && MAC
 	help
 	  Small console font with Macintosh-style high-half glyphs.  Some Mac
diff --git a/lib/genalloc.c b/lib/genalloc.c
index dda3116..bdb9a45 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -316,7 +316,7 @@
  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
  * @pool: pool to allocate from
  * @size: number of bytes to allocate from the pool
- * @dma: dma-view physical address
+ * @dma: dma-view physical address return value.  Use NULL if unneeded.
  *
  * Allocate the requested number of bytes from the specified pool.
  * Uses the pool allocation function (with first-fit algorithm by default).
@@ -334,7 +334,8 @@
 	if (!vaddr)
 		return NULL;
 
-	*dma = gen_pool_virt_to_phys(pool, vaddr);
+	if (dma)
+		*dma = gen_pool_virt_to_phys(pool, vaddr);
 
 	return (void *)vaddr;
 }
diff --git a/lib/kobject.c b/lib/kobject.c
index b0b2666..cb14aea 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -779,6 +779,7 @@
 	.show	= kobj_attr_show,
 	.store	= kobj_attr_store,
 };
+EXPORT_SYMBOL_GPL(kobj_sysfs_ops);
 
 /**
  * kset_register - initialize and add a kset.
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 9d054bf..93d145e 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -54,9 +54,7 @@
 /*
  * Try to steal tags from a remote cpu's percpu freelist.
  *
- * We first check how many percpu freelists have tags - we don't steal tags
- * unless enough percpu freelists have tags on them that it's possible more than
- * half the total tags could be stuck on remote percpu freelists.
+ * We first check how many percpu freelists have tags
  *
  * Then we iterate through the cpus until we find some tags - we don't attempt
  * to find the "best" cpu to steal from, to keep cacheline bouncing to a
@@ -69,8 +67,7 @@
 	struct percpu_ida_cpu *remote;
 
 	for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
-	     cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2;
-	     cpus_have_tags--) {
+	     cpus_have_tags; cpus_have_tags--) {
 		cpu = cpumask_next(cpu, &pool->cpus_have_tags);
 
 		if (cpu >= nr_cpu_ids) {
@@ -132,22 +129,22 @@
 /**
  * percpu_ida_alloc - allocate a tag
  * @pool: pool to allocate from
- * @gfp: gfp flags
+ * @state: task state for prepare_to_wait
  *
  * Returns a tag - an integer in the range [0..nr_tags) (passed to
  * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
  *
  * Safe to be called from interrupt context (assuming it isn't passed
- * __GFP_WAIT, of course).
+ * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
  *
  * @gfp indicates whether or not to wait until a free id is available (it's not
  * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
  * however long it takes until another thread frees an id (same semantics as a
  * mempool).
  *
- * Will not fail if passed __GFP_WAIT.
+ * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
  */
-int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
+int percpu_ida_alloc(struct percpu_ida *pool, int state)
 {
 	DEFINE_WAIT(wait);
 	struct percpu_ida_cpu *tags;
@@ -174,7 +171,8 @@
 		 *
 		 * global lock held and irqs disabled, don't need percpu lock
 		 */
-		prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
+		if (state != TASK_RUNNING)
+			prepare_to_wait(&pool->wait, &wait, state);
 
 		if (!tags->nr_free)
 			alloc_global_tags(pool, tags);
@@ -191,16 +189,22 @@
 		spin_unlock(&pool->lock);
 		local_irq_restore(flags);
 
-		if (tag >= 0 || !(gfp & __GFP_WAIT))
+		if (tag >= 0 || state == TASK_RUNNING)
 			break;
 
+		if (signal_pending_state(state, current)) {
+			tag = -ERESTARTSYS;
+			break;
+		}
+
 		schedule();
 
 		local_irq_save(flags);
 		tags = this_cpu_ptr(pool->tag_cpu);
 	}
+	if (state != TASK_RUNNING)
+		finish_wait(&pool->wait, &wait);
 
-	finish_wait(&pool->wait, &wait);
 	return tag;
 }
 EXPORT_SYMBOL_GPL(percpu_ida_alloc);
diff --git a/mm/Kconfig b/mm/Kconfig
index 723bbe0..2d9f150 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -552,3 +552,28 @@
 	  it can be cleared by hands.
 
 	  See Documentation/vm/soft-dirty.txt for more details.
+
+config ZSMALLOC
+	bool "Memory allocator for compressed pages"
+	depends on MMU
+	default n
+	help
+	  zsmalloc is a slab-based memory allocator designed to store
+	  compressed RAM pages.  zsmalloc uses virtual memory mapping
+	  in order to reduce fragmentation.  However, this results in a
+	  non-standard allocator interface where a handle, not a pointer, is
+	  returned by an alloc().  This handle must be mapped in order to
+	  access the allocated space.
+
+config PGTABLE_MAPPING
+	bool "Use page table mapping to access object in zsmalloc"
+	depends on ZSMALLOC
+	help
+	  By default, zsmalloc uses a copy-based object mapping method to
+	  access allocations that span two pages. However, if a particular
+	  architecture (ex, ARM) performs VM mapping faster than copying,
+	  then you should select this. This causes zsmalloc to use page table
+	  mapping rather than copying for object mapping.
+
+	  You can check speed with zsmalloc benchmark[1].
+	  [1] https://github.com/spartacus06/zsmalloc
diff --git a/mm/Makefile b/mm/Makefile
index 305d10a..310c90a 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -60,3 +60,4 @@
 obj-$(CONFIG_CLEANCACHE) += cleancache.o
 obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
 obj-$(CONFIG_ZBUD)	+= zbud.o
+obj-$(CONFIG_ZSMALLOC)	+= zsmalloc.o
diff --git a/mm/bounce.c b/mm/bounce.c
index 5a7d58f..523918b 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -98,27 +98,24 @@
 static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
 {
 	unsigned char *vfrom;
-	struct bio_vec *tovec, *fromvec;
-	int i;
+	struct bio_vec tovec, *fromvec = from->bi_io_vec;
+	struct bvec_iter iter;
 
-	bio_for_each_segment(tovec, to, i) {
-		fromvec = from->bi_io_vec + i;
+	bio_for_each_segment(tovec, to, iter) {
+		if (tovec.bv_page != fromvec->bv_page) {
+			/*
+			 * fromvec->bv_offset and fromvec->bv_len might have
+			 * been modified by the block layer, so use the original
+			 * copy, bounce_copy_vec already uses tovec->bv_len
+			 */
+			vfrom = page_address(fromvec->bv_page) +
+				tovec.bv_offset;
 
-		/*
-		 * not bounced
-		 */
-		if (tovec->bv_page == fromvec->bv_page)
-			continue;
+			bounce_copy_vec(&tovec, vfrom);
+			flush_dcache_page(tovec.bv_page);
+		}
 
-		/*
-		 * fromvec->bv_offset and fromvec->bv_len might have been
-		 * modified by the block layer, so use the original copy,
-		 * bounce_copy_vec already uses tovec->bv_len
-		 */
-		vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
-
-		bounce_copy_vec(tovec, vfrom);
-		flush_dcache_page(tovec->bv_page);
+		fromvec++;
 	}
 }
 
@@ -201,13 +198,14 @@
 {
 	struct bio *bio;
 	int rw = bio_data_dir(*bio_orig);
-	struct bio_vec *to, *from;
+	struct bio_vec *to, from;
+	struct bvec_iter iter;
 	unsigned i;
 
 	if (force)
 		goto bounce;
-	bio_for_each_segment(from, *bio_orig, i)
-		if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
+	bio_for_each_segment(from, *bio_orig, iter)
+		if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
 			goto bounce;
 
 	return;
diff --git a/mm/filemap.c b/mm/filemap.c
index d56d3c1..7a13f6a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2553,8 +2553,8 @@
 	if (ret > 0) {
 		ssize_t err;
 
-		err = generic_write_sync(file, pos, ret);
-		if (err < 0 && ret > 0)
+		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+		if (err < 0)
 			ret = err;
 	}
 	return ret;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 82166bf..da23eb9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1545,6 +1545,7 @@
 				entry = pmd_mknonnuma(entry);
 			entry = pmd_modify(entry, newprot);
 			ret = HPAGE_PMD_NR;
+			set_pmd_at(mm, addr, pmd, entry);
 			BUG_ON(pmd_write(entry));
 		} else {
 			struct page *page = pmd_page(*pmd);
@@ -1557,16 +1558,10 @@
 			 */
 			if (!is_huge_zero_page(page) &&
 			    !pmd_numa(*pmd)) {
-				entry = *pmd;
-				entry = pmd_mknuma(entry);
+				pmdp_set_numa(mm, addr, pmd);
 				ret = HPAGE_PMD_NR;
 			}
 		}
-
-		/* Set PMD if cleared earlier */
-		if (ret == HPAGE_PMD_NR)
-			set_pmd_at(mm, addr, pmd, entry);
-
 		spin_unlock(ptl);
 	}
 
diff --git a/mm/internal.h b/mm/internal.h
index 612c14f..29e1e76 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -83,7 +83,6 @@
  */
 extern int isolate_lru_page(struct page *page);
 extern void putback_lru_page(struct page *page);
-extern unsigned long zone_reclaimable_pages(struct zone *zone);
 extern bool zone_reclaimable(struct zone *zone);
 
 /*
diff --git a/mm/memblock.c b/mm/memblock.c
index 87d21a6..39a31e7 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1077,6 +1077,9 @@
 	if (!align)
 		align = SMP_CACHE_BYTES;
 
+	if (max_addr > memblock.current_limit)
+		max_addr = memblock.current_limit;
+
 again:
 	alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
 					    nid);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 19d5d42..53385cd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3400,7 +3400,7 @@
 static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
 						  struct kmem_cache *s)
 {
-	struct kmem_cache *new;
+	struct kmem_cache *new = NULL;
 	static char *tmp_name = NULL;
 	static DEFINE_MUTEX(mutex);	/* protects tmp_name */
 
@@ -3416,7 +3416,7 @@
 	if (!tmp_name) {
 		tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
 		if (!tmp_name)
-			return NULL;
+			goto out;
 	}
 
 	rcu_read_lock();
@@ -3426,12 +3426,11 @@
 
 	new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
 				      (s->flags & ~SLAB_PANIC), s->ctor, s);
-
 	if (new)
 		new->allocflags |= __GFP_KMEMCG;
 	else
 		new = s;
-
+out:
 	mutex_unlock(&mutex);
 	return new;
 }
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4f08a2d..2f2f34a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -945,8 +945,10 @@
 			 * to it. Similarly, page lock is shifted.
 			 */
 			if (hpage != p) {
-				put_page(hpage);
-				get_page(p);
+				if (!(flags & MF_COUNT_INCREASED)) {
+					put_page(hpage);
+					get_page(p);
+				}
 				lock_page(p);
 				unlock_page(hpage);
 				*hpagep = p;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 36cb46c..ae3c8f3 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2654,7 +2654,7 @@
 }
 
 #ifdef CONFIG_NUMA_BALANCING
-static bool __initdata numabalancing_override;
+static int __initdata numabalancing_override;
 
 static void __init check_numabalancing_enable(void)
 {
@@ -2663,9 +2663,15 @@
 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
 		numabalancing_default = true;
 
+	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
+	if (numabalancing_override)
+		set_numabalancing_state(numabalancing_override == 1);
+
 	if (nr_node_ids > 1 && !numabalancing_override) {
-		printk(KERN_INFO "Enabling automatic NUMA balancing. "
-			"Configure with numa_balancing= or the kernel.numa_balancing sysctl");
+		pr_info("%s automatic NUMA balancing. "
+			"Configure with numa_balancing= or the "
+			"kernel.numa_balancing sysctl",
+			numabalancing_default ? "Enabling" : "Disabling");
 		set_numabalancing_state(numabalancing_default);
 	}
 }
@@ -2675,18 +2681,17 @@
 	int ret = 0;
 	if (!str)
 		goto out;
-	numabalancing_override = true;
 
 	if (!strcmp(str, "enable")) {
-		set_numabalancing_state(true);
+		numabalancing_override = 1;
 		ret = 1;
 	} else if (!strcmp(str, "disable")) {
-		set_numabalancing_state(false);
+		numabalancing_override = -1;
 		ret = 1;
 	}
 out:
 	if (!ret)
-		printk(KERN_WARNING "Unable to parse numa_balancing=\n");
+		pr_warn("Unable to parse numa_balancing=\n");
 
 	return ret;
 }
@@ -2925,7 +2930,7 @@
 	unsigned short mode = MPOL_DEFAULT;
 	unsigned short flags = 0;
 
-	if (pol && pol != &default_policy) {
+	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
 		mode = pol->mode;
 		flags = pol->flags;
 	}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 7332c17..769a67a 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -58,36 +58,27 @@
 				if (pte_numa(ptent))
 					ptent = pte_mknonnuma(ptent);
 				ptent = pte_modify(ptent, newprot);
+				/*
+				 * Avoid taking write faults for pages we
+				 * know to be dirty.
+				 */
+				if (dirty_accountable && pte_dirty(ptent))
+					ptent = pte_mkwrite(ptent);
+				ptep_modify_prot_commit(mm, addr, pte, ptent);
 				updated = true;
 			} else {
 				struct page *page;
 
-				ptent = *pte;
 				page = vm_normal_page(vma, addr, oldpte);
 				if (page && !PageKsm(page)) {
 					if (!pte_numa(oldpte)) {
-						ptent = pte_mknuma(ptent);
-						set_pte_at(mm, addr, pte, ptent);
+						ptep_set_numa(mm, addr, pte);
 						updated = true;
 					}
 				}
 			}
-
-			/*
-			 * Avoid taking write faults for pages we know to be
-			 * dirty.
-			 */
-			if (dirty_accountable && pte_dirty(ptent)) {
-				ptent = pte_mkwrite(ptent);
-				updated = true;
-			}
-
 			if (updated)
 				pages++;
-
-			/* Only !prot_numa always clears the pte */
-			if (!prot_numa)
-				ptep_modify_prot_commit(mm, addr, pte, ptent);
 		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
 			swp_entry_t entry = pte_to_swp_entry(oldpte);
 
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 37b1b19..3291e82 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -178,7 +178,7 @@
 	 * implementation used by LSMs.
 	 */
 	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
-		adj -= 30;
+		points -= (points * 3) / 100;
 
 	/* Normalize to oom_score_adj units */
 	adj *= totalpages / 1000;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6380758..7106cb1 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -191,6 +191,26 @@
  * global dirtyable memory first.
  */
 
+/**
+ * zone_dirtyable_memory - number of dirtyable pages in a zone
+ * @zone: the zone
+ *
+ * Returns the zone's number of pages potentially available for dirty
+ * page cache.  This is the base value for the per-zone dirty limits.
+ */
+static unsigned long zone_dirtyable_memory(struct zone *zone)
+{
+	unsigned long nr_pages;
+
+	nr_pages = zone_page_state(zone, NR_FREE_PAGES);
+	nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+
+	nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
+	nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
+
+	return nr_pages;
+}
+
 static unsigned long highmem_dirtyable_memory(unsigned long total)
 {
 #ifdef CONFIG_HIGHMEM
@@ -198,11 +218,9 @@
 	unsigned long x = 0;
 
 	for_each_node_state(node, N_HIGH_MEMORY) {
-		struct zone *z =
-			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
+		struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 
-		x += zone_page_state(z, NR_FREE_PAGES) +
-		     zone_reclaimable_pages(z) - z->dirty_balance_reserve;
+		x += zone_dirtyable_memory(z);
 	}
 	/*
 	 * Unreclaimable memory (kernel memory or anonymous memory
@@ -238,9 +256,12 @@
 {
 	unsigned long x;
 
-	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
+	x = global_page_state(NR_FREE_PAGES);
 	x -= min(x, dirty_balance_reserve);
 
+	x += global_page_state(NR_INACTIVE_FILE);
+	x += global_page_state(NR_ACTIVE_FILE);
+
 	if (!vm_highmem_is_dirtyable)
 		x -= highmem_dirtyable_memory(x);
 
@@ -289,32 +310,6 @@
 }
 
 /**
- * zone_dirtyable_memory - number of dirtyable pages in a zone
- * @zone: the zone
- *
- * Returns the zone's number of pages potentially available for dirty
- * page cache.  This is the base value for the per-zone dirty limits.
- */
-static unsigned long zone_dirtyable_memory(struct zone *zone)
-{
-	/*
-	 * The effective global number of dirtyable pages may exclude
-	 * highmem as a big-picture measure to keep the ratio between
-	 * dirty memory and lowmem reasonable.
-	 *
-	 * But this function is purely about the individual zone and a
-	 * highmem zone can hold its share of dirty pages, so we don't
-	 * care about vm_highmem_is_dirtyable here.
-	 */
-	unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
-		zone_reclaimable_pages(zone);
-
-	/* don't allow this to underflow */
-	nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
-	return nr_pages;
-}
-
-/**
  * zone_dirty_limit - maximum number of dirty pages allowed in a zone
  * @zone: the zone
  *
@@ -2178,11 +2173,12 @@
 	if (!TestSetPageDirty(page)) {
 		struct address_space *mapping = page_mapping(page);
 		struct address_space *mapping2;
+		unsigned long flags;
 
 		if (!mapping)
 			return 1;
 
-		spin_lock_irq(&mapping->tree_lock);
+		spin_lock_irqsave(&mapping->tree_lock, flags);
 		mapping2 = page_mapping(page);
 		if (mapping2) { /* Race with truncate? */
 			BUG_ON(mapping2 != mapping);
@@ -2191,7 +2187,7 @@
 			radix_tree_tag_set(&mapping->page_tree,
 				page_index(page), PAGECACHE_TAG_DIRTY);
 		}
-		spin_unlock_irq(&mapping->tree_lock);
+		spin_unlock_irqrestore(&mapping->tree_lock, flags);
 		if (mapping->host) {
 			/* !PageAnon && !swapper_space */
 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
diff --git a/mm/page_io.c b/mm/page_io.c
index 7247be6..7c59ef6 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -31,13 +31,13 @@
 
 	bio = bio_alloc(gfp_flags, 1);
 	if (bio) {
-		bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
-		bio->bi_sector <<= PAGE_SHIFT - 9;
+		bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
+		bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
 		bio->bi_io_vec[0].bv_page = page;
 		bio->bi_io_vec[0].bv_len = PAGE_SIZE;
 		bio->bi_io_vec[0].bv_offset = 0;
 		bio->bi_vcnt = 1;
-		bio->bi_size = PAGE_SIZE;
+		bio->bi_iter.bi_size = PAGE_SIZE;
 		bio->bi_end_io = end_io;
 	}
 	return bio;
@@ -62,7 +62,7 @@
 		printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
 				imajor(bio->bi_bdev->bd_inode),
 				iminor(bio->bi_bdev->bd_inode),
-				(unsigned long long)bio->bi_sector);
+				(unsigned long long)bio->bi_iter.bi_sector);
 		ClearPageReclaim(page);
 	}
 	end_page_writeback(page);
@@ -80,7 +80,7 @@
 		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
 				imajor(bio->bi_bdev->bd_inode),
 				iminor(bio->bi_bdev->bd_inode),
-				(unsigned long long)bio->bi_sector);
+				(unsigned long long)bio->bi_iter.bi_sector);
 		goto out;
 	}
 
diff --git a/mm/readahead.c b/mm/readahead.c
index 7cdbb44..0de2360 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -211,8 +211,6 @@
 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
 		pgoff_t offset, unsigned long nr_to_read)
 {
-	int ret = 0;
-
 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
 		return -EINVAL;
 
@@ -226,15 +224,13 @@
 			this_chunk = nr_to_read;
 		err = __do_page_cache_readahead(mapping, filp,
 						offset, this_chunk, 0);
-		if (err < 0) {
-			ret = err;
-			break;
-		}
-		ret += err;
+		if (err < 0)
+			return err;
+
 		offset += this_chunk;
 		nr_to_read -= this_chunk;
 	}
-	return ret;
+	return 0;
 }
 
 /*
@@ -576,8 +572,7 @@
 	if (!mapping || !mapping->a_ops)
 		return -EINVAL;
 
-	force_page_cache_readahead(mapping, filp, index, nr);
-	return 0;
+	return force_page_cache_readahead(mapping, filp, index, nr);
 }
 
 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
diff --git a/mm/slab.c b/mm/slab.c
index eb043bf..b264214 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1946,7 +1946,7 @@
 /**
  * slab_destroy - destroy and release all objects in a slab
  * @cachep: cache pointer being destroyed
- * @slabp: slab pointer being destroyed
+ * @page: page pointer being destroyed
  *
  * Destroy all the objs in a slab, and release the mem back to the system.
  * Before calling the slab must have been unlinked from the cache.  The
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8e40321..1ec3c61 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -233,14 +233,17 @@
 	mutex_unlock(&slab_mutex);
 	put_online_cpus();
 
-	/*
-	 * There is no point in flooding logs with warnings or especially
-	 * crashing the system if we fail to create a cache for a memcg. In
-	 * this case we will be accounting the memcg allocation to the root
-	 * cgroup until we succeed to create its own cache, but it isn't that
-	 * critical.
-	 */
-	if (err && !memcg) {
+	if (err) {
+		/*
+		 * There is no point in flooding logs with warnings or
+		 * especially crashing the system if we fail to create a cache
+		 * for a memcg. In this case we will be accounting the memcg
+		 * allocation to the root cgroup until we succeed to create its
+		 * own cache, but it isn't that critical.
+		 */
+		if (!memcg)
+			return NULL;
+
 		if (flags & SLAB_PANIC)
 			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
 				name, err);
diff --git a/mm/slub.c b/mm/slub.c
index 34bb8c6..25f14ad 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -355,6 +355,21 @@
 	__bit_spin_unlock(PG_locked, &page->flags);
 }
 
+static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
+{
+	struct page tmp;
+	tmp.counters = counters_new;
+	/*
+	 * page->counters can cover frozen/inuse/objects as well
+	 * as page->_count.  If we assign to ->counters directly
+	 * we run the risk of losing updates to page->_count, so
+	 * be careful and only assign to the fields we need.
+	 */
+	page->frozen  = tmp.frozen;
+	page->inuse   = tmp.inuse;
+	page->objects = tmp.objects;
+}
+
 /* Interrupts must be disabled (for the fallback code to work right) */
 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 		void *freelist_old, unsigned long counters_old,
@@ -376,7 +391,7 @@
 		if (page->freelist == freelist_old &&
 					page->counters == counters_old) {
 			page->freelist = freelist_new;
-			page->counters = counters_new;
+			set_page_slub_counters(page, counters_new);
 			slab_unlock(page);
 			return 1;
 		}
@@ -415,7 +430,7 @@
 		if (page->freelist == freelist_old &&
 					page->counters == counters_old) {
 			page->freelist = freelist_new;
-			page->counters = counters_new;
+			set_page_slub_counters(page, counters_new);
 			slab_unlock(page);
 			local_irq_restore(flags);
 			return 1;
@@ -985,8 +1000,6 @@
 
 /*
  * Tracking of fully allocated slabs for debugging purposes.
- *
- * list_lock must be held.
  */
 static void add_full(struct kmem_cache *s,
 	struct kmem_cache_node *n, struct page *page)
@@ -994,17 +1007,16 @@
 	if (!(s->flags & SLAB_STORE_USER))
 		return;
 
+	lockdep_assert_held(&n->list_lock);
 	list_add(&page->lru, &n->full);
 }
 
-/*
- * list_lock must be held.
- */
-static void remove_full(struct kmem_cache *s, struct page *page)
+static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
 {
 	if (!(s->flags & SLAB_STORE_USER))
 		return;
 
+	lockdep_assert_held(&n->list_lock);
 	list_del(&page->lru);
 }
 
@@ -1250,7 +1262,8 @@
 			void *object, u8 val) { return 1; }
 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
 					struct page *page) {}
-static inline void remove_full(struct kmem_cache *s, struct page *page) {}
+static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
+					struct page *page) {}
 static inline unsigned long kmem_cache_flags(unsigned long object_size,
 	unsigned long flags, const char *name,
 	void (*ctor)(void *))
@@ -1504,11 +1517,9 @@
 
 /*
  * Management of partially allocated slabs.
- *
- * list_lock must be held.
  */
-static inline void add_partial(struct kmem_cache_node *n,
-				struct page *page, int tail)
+static inline void
+__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
 {
 	n->nr_partial++;
 	if (tail == DEACTIVATE_TO_TAIL)
@@ -1517,23 +1528,32 @@
 		list_add(&page->lru, &n->partial);
 }
 
-/*
- * list_lock must be held.
- */
-static inline void remove_partial(struct kmem_cache_node *n,
-					struct page *page)
+static inline void add_partial(struct kmem_cache_node *n,
+				struct page *page, int tail)
+{
+	lockdep_assert_held(&n->list_lock);
+	__add_partial(n, page, tail);
+}
+
+static inline void
+__remove_partial(struct kmem_cache_node *n, struct page *page)
 {
 	list_del(&page->lru);
 	n->nr_partial--;
 }
 
+static inline void remove_partial(struct kmem_cache_node *n,
+					struct page *page)
+{
+	lockdep_assert_held(&n->list_lock);
+	__remove_partial(n, page);
+}
+
 /*
  * Remove slab from the partial list, freeze it and
  * return the pointer to the freelist.
  *
  * Returns a list of objects or NULL if it fails.
- *
- * Must hold list_lock since we modify the partial list.
  */
 static inline void *acquire_slab(struct kmem_cache *s,
 		struct kmem_cache_node *n, struct page *page,
@@ -1543,6 +1563,8 @@
 	unsigned long counters;
 	struct page new;
 
+	lockdep_assert_held(&n->list_lock);
+
 	/*
 	 * Zap the freelist and set the frozen bit.
 	 * The old freelist is the list of objects for the
@@ -1559,7 +1581,7 @@
 		new.freelist = freelist;
 	}
 
-	VM_BUG_ON_PAGE(new.frozen, &new);
+	VM_BUG_ON(new.frozen);
 	new.frozen = 1;
 
 	if (!__cmpxchg_double_slab(s, page,
@@ -1812,7 +1834,7 @@
 			set_freepointer(s, freelist, prior);
 			new.counters = counters;
 			new.inuse--;
-			VM_BUG_ON_PAGE(!new.frozen, &new);
+			VM_BUG_ON(!new.frozen);
 
 		} while (!__cmpxchg_double_slab(s, page,
 			prior, counters,
@@ -1840,7 +1862,7 @@
 
 	old.freelist = page->freelist;
 	old.counters = page->counters;
-	VM_BUG_ON_PAGE(!old.frozen, &old);
+	VM_BUG_ON(!old.frozen);
 
 	/* Determine target state of the slab */
 	new.counters = old.counters;
@@ -1887,7 +1909,7 @@
 
 		else if (l == M_FULL)
 
-			remove_full(s, page);
+			remove_full(s, n, page);
 
 		if (m == M_PARTIAL) {
 
@@ -1952,7 +1974,7 @@
 
 			old.freelist = page->freelist;
 			old.counters = page->counters;
-			VM_BUG_ON_PAGE(!old.frozen, &old);
+			VM_BUG_ON(!old.frozen);
 
 			new.counters = old.counters;
 			new.freelist = old.freelist;
@@ -2225,7 +2247,7 @@
 		counters = page->counters;
 
 		new.counters = counters;
-		VM_BUG_ON_PAGE(!new.frozen, &new);
+		VM_BUG_ON(!new.frozen);
 
 		new.inuse = page->objects;
 		new.frozen = freelist != NULL;
@@ -2319,7 +2341,7 @@
 	 * page is pointing to the page from which the objects are obtained.
 	 * That page must be frozen for per cpu allocations to work.
 	 */
-	VM_BUG_ON_PAGE(!c->page->frozen, c->page);
+	VM_BUG_ON(!c->page->frozen);
 	c->freelist = get_freepointer(s, freelist);
 	c->tid = next_tid(c->tid);
 	local_irq_restore(flags);
@@ -2541,7 +2563,7 @@
 		new.inuse--;
 		if ((!new.inuse || !prior) && !was_frozen) {
 
-			if (kmem_cache_has_cpu_partial(s) && !prior)
+			if (kmem_cache_has_cpu_partial(s) && !prior) {
 
 				/*
 				 * Slab was on no list before and will be
@@ -2551,7 +2573,7 @@
 				 */
 				new.frozen = 1;
 
-			else { /* Needs to be taken off a list */
+			} else { /* Needs to be taken off a list */
 
 	                        n = get_node(s, page_to_nid(page));
 				/*
@@ -2600,7 +2622,7 @@
 	 */
 	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
 		if (kmem_cache_debug(s))
-			remove_full(s, page);
+			remove_full(s, n, page);
 		add_partial(n, page, DEACTIVATE_TO_TAIL);
 		stat(s, FREE_ADD_PARTIAL);
 	}
@@ -2614,9 +2636,10 @@
 		 */
 		remove_partial(n, page);
 		stat(s, FREE_REMOVE_PARTIAL);
-	} else
+	} else {
 		/* Slab must be on the full list */
-		remove_full(s, page);
+		remove_full(s, n, page);
+	}
 
 	spin_unlock_irqrestore(&n->list_lock, flags);
 	stat(s, FREE_SLAB);
@@ -2890,7 +2913,11 @@
 	init_kmem_cache_node(n);
 	inc_slabs_node(kmem_cache_node, node, page->objects);
 
-	add_partial(n, page, DEACTIVATE_TO_HEAD);
+	/*
+	 * No locks need to be taken here as it has just been
+	 * initialized and there is no concurrent access.
+	 */
+	__add_partial(n, page, DEACTIVATE_TO_HEAD);
 }
 
 static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -3176,7 +3203,7 @@
 
 	list_for_each_entry_safe(page, h, &n->partial, lru) {
 		if (!page->inuse) {
-			remove_partial(n, page);
+			__remove_partial(n, page);
 			discard_slab(s, page);
 		} else {
 			list_slab_objects(s, page,
@@ -4299,7 +4326,13 @@
 
 			page = ACCESS_ONCE(c->partial);
 			if (page) {
-				x = page->pobjects;
+				node = page_to_nid(page);
+				if (flags & SO_TOTAL)
+					WARN_ON_ONCE(1);
+				else if (flags & SO_OBJECTS)
+					WARN_ON_ONCE(1);
+				else
+					x = page->pages;
 				total += x;
 				nodes[node] += x;
 			}
@@ -5163,7 +5196,7 @@
 	}
 
 	s->kobj.kset = slab_kset;
-	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
+	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
 	if (err) {
 		kobject_put(&s->kobj);
 		return err;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 98e85e9..e76ace3 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -63,6 +63,8 @@
 	return ret;
 }
 
+static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
+
 void show_swap_cache_info(void)
 {
 	printk("%lu pages in swap cache\n", total_swapcache_pages());
@@ -286,8 +288,11 @@
 
 	page = find_get_page(swap_address_space(entry), entry.val);
 
-	if (page)
+	if (page) {
 		INC_CACHE_INFO(find_success);
+		if (TestClearPageReadahead(page))
+			atomic_inc(&swapin_readahead_hits);
+	}
 
 	INC_CACHE_INFO(find_total);
 	return page;
@@ -389,6 +394,50 @@
 	return found_page;
 }
 
+static unsigned long swapin_nr_pages(unsigned long offset)
+{
+	static unsigned long prev_offset;
+	unsigned int pages, max_pages, last_ra;
+	static atomic_t last_readahead_pages;
+
+	max_pages = 1 << ACCESS_ONCE(page_cluster);
+	if (max_pages <= 1)
+		return 1;
+
+	/*
+	 * This heuristic has been found to work well on both sequential and
+	 * random loads, swapping to hard disk or to SSD: please don't ask
+	 * what the "+ 2" means, it just happens to work well, that's all.
+	 */
+	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
+	if (pages == 2) {
+		/*
+		 * We can have no readahead hits to judge by: but must not get
+		 * stuck here forever, so check for an adjacent offset instead
+		 * (and don't even bother to check whether swap type is same).
+		 */
+		if (offset != prev_offset + 1 && offset != prev_offset - 1)
+			pages = 1;
+		prev_offset = offset;
+	} else {
+		unsigned int roundup = 4;
+		while (roundup < pages)
+			roundup <<= 1;
+		pages = roundup;
+	}
+
+	if (pages > max_pages)
+		pages = max_pages;
+
+	/* Don't shrink readahead too fast */
+	last_ra = atomic_read(&last_readahead_pages) / 2;
+	if (pages < last_ra)
+		pages = last_ra;
+	atomic_set(&last_readahead_pages, pages);
+
+	return pages;
+}
+
 /**
  * swapin_readahead - swap in pages in hope we need them soon
  * @entry: swap entry of this memory
@@ -412,11 +461,16 @@
 			struct vm_area_struct *vma, unsigned long addr)
 {
 	struct page *page;
-	unsigned long offset = swp_offset(entry);
+	unsigned long entry_offset = swp_offset(entry);
+	unsigned long offset = entry_offset;
 	unsigned long start_offset, end_offset;
-	unsigned long mask = (1UL << page_cluster) - 1;
+	unsigned long mask;
 	struct blk_plug plug;
 
+	mask = swapin_nr_pages(offset) - 1;
+	if (!mask)
+		goto skip;
+
 	/* Read a page_cluster sized and aligned cluster around offset. */
 	start_offset = offset & ~mask;
 	end_offset = offset | mask;
@@ -430,10 +484,13 @@
 						gfp_mask, vma, addr);
 		if (!page)
 			continue;
+		if (offset != entry_offset)
+			SetPageReadahead(page);
 		page_cache_release(page);
 	}
 	blk_finish_plug(&plug);
 
 	lru_add_drain();	/* Push any new pages onto the LRU now */
+skip:
 	return read_swap_cache_async(entry, gfp_mask, vma, addr);
 }
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c6c13b05..4a7f7e6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1923,7 +1923,6 @@
 	p->swap_map = NULL;
 	cluster_info = p->cluster_info;
 	p->cluster_info = NULL;
-	p->flags = 0;
 	frontswap_map = frontswap_map_get(p);
 	spin_unlock(&p->lock);
 	spin_unlock(&swap_lock);
@@ -1949,6 +1948,16 @@
 		mutex_unlock(&inode->i_mutex);
 	}
 	filp_close(swap_file, NULL);
+
+	/*
+	 * Clear the SWP_USED flag after all resources are freed so that swapon
+	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
+	 * not hold p->lock after we cleared its SWP_WRITEOK.
+	 */
+	spin_lock(&swap_lock);
+	p->flags = 0;
+	spin_unlock(&swap_lock);
+
 	err = 0;
 	atomic_inc(&proc_poll_event);
 	wake_up_interruptible(&proc_poll_wait);
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 196970a..d4042e7 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -19,6 +19,7 @@
 #include <linux/mm.h>
 #include <linux/vmstat.h>
 #include <linux/eventfd.h>
+#include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/printk.h>
 #include <linux/vmpressure.h>
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 90c4075..a9c74b4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -147,7 +147,7 @@
 }
 #endif
 
-unsigned long zone_reclaimable_pages(struct zone *zone)
+static unsigned long zone_reclaimable_pages(struct zone *zone)
 {
 	int nr;
 
@@ -3315,27 +3315,6 @@
 	wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
-/*
- * The reclaimable count would be mostly accurate.
- * The less reclaimable pages may be
- * - mlocked pages, which will be moved to unevictable list when encountered
- * - mapped pages, which may require several travels to be reclaimed
- * - dirty pages, which is not "instantly" reclaimable
- */
-unsigned long global_reclaimable_pages(void)
-{
-	int nr;
-
-	nr = global_page_state(NR_ACTIVE_FILE) +
-	     global_page_state(NR_INACTIVE_FILE);
-
-	if (get_nr_swap_pages() > 0)
-		nr += global_page_state(NR_ACTIVE_ANON) +
-		      global_page_state(NR_INACTIVE_ANON);
-
-	return nr;
-}
-
 #ifdef CONFIG_HIBERNATION
 /*
  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7249614..def5dd2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -851,12 +851,14 @@
 	"thp_zero_page_alloc",
 	"thp_zero_page_alloc_failed",
 #endif
+#ifdef CONFIG_DEBUG_TLBFLUSH
 #ifdef CONFIG_SMP
 	"nr_tlb_remote_flush",
 	"nr_tlb_remote_flush_received",
-#endif
+#endif /* CONFIG_SMP */
 	"nr_tlb_local_flush_all",
 	"nr_tlb_local_flush_one",
+#endif /* CONFIG_DEBUG_TLBFLUSH */
 
 #endif /* CONFIG_VM_EVENTS_COUNTERS */
 };
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/mm/zsmalloc.c
similarity index 98%
rename from drivers/staging/zsmalloc/zsmalloc-main.c
rename to mm/zsmalloc.c
index 7660c87..c03ca5e 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/mm/zsmalloc.c
@@ -2,6 +2,7 @@
  * zsmalloc memory allocator
  *
  * Copyright (C) 2011  Nitin Gupta
+ * Copyright (C) 2012, 2013 Minchan Kim
  *
  * This code is released using a dual license strategy: BSD/GPL
  * You can choose the license that better fits your requirements.
@@ -90,8 +91,7 @@
 #include <linux/hardirq.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
-
-#include "zsmalloc.h"
+#include <linux/zsmalloc.h>
 
 /*
  * This must be power of 2 and greater than of equal to sizeof(link_free).
diff --git a/net/9p/client.c b/net/9p/client.c
index a5e4d2d..9186550 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -204,7 +204,7 @@
 	return ret;
 }
 
-struct p9_fcall *p9_fcall_alloc(int alloc_msize)
+static struct p9_fcall *p9_fcall_alloc(int alloc_msize)
 {
 	struct p9_fcall *fc;
 	fc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, GFP_NOFS);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index cd1e1ed..ac2666c 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -340,7 +340,10 @@
 		int count = nr_pages;
 		while (nr_pages) {
 			s = rest_of_page(data);
-			pages[index++] = kmap_to_page(data);
+			if (is_vmalloc_addr(data))
+				pages[index++] = vmalloc_to_page(data);
+			else
+				pages[index++] = kmap_to_page(data);
 			data += s;
 			nr_pages--;
 		}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 512159b..8323bce 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -241,19 +241,19 @@
 	size = bat_priv->num_ifaces * sizeof(uint8_t);
 	orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
 	if (!orig_node->bat_iv.bcast_own_sum)
-		goto free_bcast_own;
+		goto free_orig_node;
 
 	hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
 				     batadv_choose_orig, orig_node,
 				     &orig_node->hash_entry);
 	if (hash_added != 0)
-		goto free_bcast_own;
+		goto free_orig_node;
 
 	return orig_node;
 
-free_bcast_own:
-	kfree(orig_node->bat_iv.bcast_own);
 free_orig_node:
+	/* free twice, as batadv_orig_node_new sets refcount to 2 */
+	batadv_orig_node_free_ref(orig_node);
 	batadv_orig_node_free_ref(orig_node);
 
 	return NULL;
@@ -266,7 +266,7 @@
 			struct batadv_orig_node *orig_neigh)
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
-	struct batadv_neigh_node *neigh_node;
+	struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
 
 	neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
 	if (!neigh_node)
@@ -281,14 +281,24 @@
 	neigh_node->orig_node = orig_neigh;
 	neigh_node->if_incoming = hard_iface;
 
-	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-		   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
-		   neigh_addr, orig_node->orig, hard_iface->net_dev->name);
-
 	spin_lock_bh(&orig_node->neigh_list_lock);
-	hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+	tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface,
+					       neigh_addr);
+	if (!tmp_neigh_node) {
+		hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+	} else {
+		kfree(neigh_node);
+		batadv_hardif_free_ref(hard_iface);
+		neigh_node = tmp_neigh_node;
+	}
 	spin_unlock_bh(&orig_node->neigh_list_lock);
 
+	if (!tmp_neigh_node)
+		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+			   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+			   neigh_addr, orig_node->orig,
+			   hard_iface->net_dev->name);
+
 out:
 	return neigh_node;
 }
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 3d417d3..b851cc5 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -241,7 +241,7 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
 	const struct batadv_hard_iface *hard_iface;
-	int min_mtu = ETH_DATA_LEN;
+	int min_mtu = INT_MAX;
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -256,8 +256,6 @@
 	}
 	rcu_read_unlock();
 
-	atomic_set(&bat_priv->packet_size_max, min_mtu);
-
 	if (atomic_read(&bat_priv->fragmentation) == 0)
 		goto out;
 
@@ -268,13 +266,21 @@
 	min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
 	min_mtu -= sizeof(struct batadv_frag_packet);
 	min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
-	atomic_set(&bat_priv->packet_size_max, min_mtu);
-
-	/* with fragmentation enabled we can fragment external packets easily */
-	min_mtu = min_t(int, min_mtu, ETH_DATA_LEN);
 
 out:
-	return min_mtu - batadv_max_header_len();
+	/* report to the other components the maximum amount of bytes that
+	 * batman-adv can send over the wire (without considering the payload
+	 * overhead). For example, this value is used by TT to compute the
+	 * maximum local table table size
+	 */
+	atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+	/* the real soft-interface MTU is computed by removing the payload
+	 * overhead from the maximum amount of bytes that was just computed.
+	 *
+	 * However batman-adv does not support MTUs bigger than ETH_DATA_LEN
+	 */
+	return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
 }
 
 /* adjusts the MTU if a new interface with a smaller MTU appeared. */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6df12a2..8539416 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -458,6 +458,42 @@
 }
 
 /**
+ * batadv_neigh_node_get - retrieve a neighbour from the list
+ * @orig_node: originator which the neighbour belongs to
+ * @hard_iface: the interface where this neighbour is connected to
+ * @addr: the address of the neighbour
+ *
+ * Looks for and possibly returns a neighbour belonging to this originator list
+ * which is connected through the provided hard interface.
+ * Returns NULL if the neighbour is not found.
+ */
+struct batadv_neigh_node *
+batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
+		      const struct batadv_hard_iface *hard_iface,
+		      const uint8_t *addr)
+{
+	struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
+		if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
+			continue;
+
+		if (tmp_neigh_node->if_incoming != hard_iface)
+			continue;
+
+		if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
+			continue;
+
+		res = tmp_neigh_node;
+		break;
+	}
+	rcu_read_unlock();
+
+	return res;
+}
+
+/**
  * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
  * @rcu: rcu pointer of the orig_ifinfo object
  */
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 37be290..db3a9ed 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -29,6 +29,10 @@
 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
 					      const uint8_t *addr);
 struct batadv_neigh_node *
+batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
+		      const struct batadv_hard_iface *hard_iface,
+		      const uint8_t *addr);
+struct batadv_neigh_node *
 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
 		      const uint8_t *neigh_addr,
 		      struct batadv_orig_node *orig_node);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 1ed9f7c..a953d5b 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -688,7 +688,7 @@
 	int is_old_ttvn;
 
 	/* check if there is enough data before accessing it */
-	if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0)
+	if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
 		return 0;
 
 	/* create a copy of the skb (in case of for re-routing) to modify it. */
@@ -918,6 +918,8 @@
 
 	if (ret != NET_RX_SUCCESS)
 		ret = batadv_route_unicast_packet(skb, recv_if);
+	else
+		consume_skb(skb);
 
 	return ret;
 }
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 579f5f0..843febd 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -254,9 +254,9 @@
 				   struct batadv_orig_node *orig_node,
 				   unsigned short vid)
 {
-	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+	struct ethhdr *ethhdr;
 	struct batadv_unicast_packet *unicast_packet;
-	int ret = NET_XMIT_DROP;
+	int ret = NET_XMIT_DROP, hdr_size;
 
 	if (!orig_node)
 		goto out;
@@ -265,12 +265,16 @@
 	case BATADV_UNICAST:
 		if (!batadv_send_skb_prepare_unicast(skb, orig_node))
 			goto out;
+
+		hdr_size = sizeof(*unicast_packet);
 		break;
 	case BATADV_UNICAST_4ADDR:
 		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
 							   orig_node,
 							   packet_subtype))
 			goto out;
+
+		hdr_size = sizeof(struct batadv_unicast_4addr_packet);
 		break;
 	default:
 		/* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -279,6 +283,7 @@
 		goto out;
 	}
 
+	ethhdr = (struct ethhdr *)(skb->data + hdr_size);
 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
 	/* inform the destination node that we are still missing a correct route
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b6071f6..959dde7 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1975,6 +1975,7 @@
 	struct hlist_head *head;
 	uint32_t i, crc_tmp, crc = 0;
 	uint8_t flags;
+	__be16 tmp_vid;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -2011,8 +2012,11 @@
 							     orig_node))
 				continue;
 
-			crc_tmp = crc32c(0, &tt_common->vid,
-					 sizeof(tt_common->vid));
+			/* use network order to read the VID: this ensures that
+			 * every node reads the bytes in the same order.
+			 */
+			tmp_vid = htons(tt_common->vid);
+			crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
 
 			/* compute the CRC on flags that have to be kept in sync
 			 * among nodes
@@ -2046,6 +2050,7 @@
 	struct hlist_head *head;
 	uint32_t i, crc_tmp, crc = 0;
 	uint8_t flags;
+	__be16 tmp_vid;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -2064,8 +2069,11 @@
 			if (tt_common->flags & BATADV_TT_CLIENT_NEW)
 				continue;
 
-			crc_tmp = crc32c(0, &tt_common->vid,
-					 sizeof(tt_common->vid));
+			/* use network order to read the VID: this ensures that
+			 * every node reads the bytes in the same order.
+			 */
+			tmp_vid = htons(tt_common->vid);
+			crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
 
 			/* compute the CRC on flags that have to be kept in sync
 			 * among nodes
@@ -2262,6 +2270,7 @@
 {
 	struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
 	struct batadv_orig_node_vlan *vlan;
+	uint32_t crc;
 	int i;
 
 	/* check if each received CRC matches the locally stored one */
@@ -2281,7 +2290,10 @@
 		if (!vlan)
 			return false;
 
-		if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc))
+		crc = vlan->tt.crc;
+		batadv_orig_node_vlan_free_ref(vlan);
+
+		if (crc != ntohl(tt_vlan_tmp->crc))
 			return false;
 	}
 
@@ -3218,7 +3230,6 @@
 
 		spin_lock_bh(&orig_node->tt_lock);
 
-		tt_change = (struct batadv_tvlv_tt_change *)tt_buff;
 		batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
 					 ttvn, tt_change);
 
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 292e619..d9fb934 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -430,6 +430,16 @@
 		del_timer(&session->timer);
 }
 
+static void hidp_process_report(struct hidp_session *session,
+				int type, const u8 *data, int len, int intr)
+{
+	if (len > HID_MAX_BUFFER_SIZE)
+		len = HID_MAX_BUFFER_SIZE;
+
+	memcpy(session->input_buf, data, len);
+	hid_input_report(session->hid, type, session->input_buf, len, intr);
+}
+
 static void hidp_process_handshake(struct hidp_session *session,
 					unsigned char param)
 {
@@ -502,7 +512,8 @@
 			hidp_input_report(session, skb);
 
 		if (session->hid)
-			hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0);
+			hidp_process_report(session, HID_INPUT_REPORT,
+					    skb->data, skb->len, 0);
 		break;
 
 	case HIDP_DATA_RTYPE_OTHER:
@@ -584,7 +595,8 @@
 			hidp_input_report(session, skb);
 
 		if (session->hid) {
-			hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1);
+			hidp_process_report(session, HID_INPUT_REPORT,
+					    skb->data, skb->len, 1);
 			BT_DBG("report len %d", skb->len);
 		}
 	} else {
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index ab52414..8798492 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -24,6 +24,7 @@
 #define __HIDP_H
 
 #include <linux/types.h>
+#include <linux/hid.h>
 #include <linux/kref.h>
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/l2cap.h>
@@ -179,6 +180,9 @@
 
 	/* Used in hidp_output_raw_report() */
 	int output_report_success; /* boolean */
+
+	/* temporary input buffer */
+	u8 input_buf[HID_MAX_BUFFER_SIZE];
 };
 
 /* HIDP init defines */
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index e4401a5..63f0455 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -187,8 +187,7 @@
 
 	spin_lock_bh(&br->lock);
 	if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
-		memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-		br_fdb_change_mac_address(br, addr->sa_data);
+		/* Mac address will be changed in br_stp_change_bridge_id(). */
 		br_stp_change_bridge_id(br, addr->sa_data);
 	}
 	spin_unlock_bh(&br->lock);
@@ -226,37 +225,11 @@
 		br_netpoll_disable(p);
 }
 
-static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
-			    gfp_t gfp)
-{
-	struct net_bridge *br = netdev_priv(dev);
-	struct net_bridge_port *p;
-	int err = 0;
-
-	list_for_each_entry(p, &br->port_list, list) {
-		if (!p->dev)
-			continue;
-		err = br_netpoll_enable(p, gfp);
-		if (err)
-			goto fail;
-	}
-
-out:
-	return err;
-
-fail:
-	br_netpoll_cleanup(dev);
-	goto out;
-}
-
-int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
+static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
 {
 	struct netpoll *np;
 	int err;
 
-	if (!p->br->dev->npinfo)
-		return 0;
-
 	np = kzalloc(sizeof(*p->np), gfp);
 	if (!np)
 		return -ENOMEM;
@@ -271,6 +244,37 @@
 	return err;
 }
 
+int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
+{
+	if (!p->br->dev->npinfo)
+		return 0;
+
+	return __br_netpoll_enable(p, gfp);
+}
+
+static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
+			    gfp_t gfp)
+{
+	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_port *p;
+	int err = 0;
+
+	list_for_each_entry(p, &br->port_list, list) {
+		if (!p->dev)
+			continue;
+		err = __br_netpoll_enable(p, gfp);
+		if (err)
+			goto fail;
+	}
+
+out:
+	return err;
+
+fail:
+	br_netpoll_cleanup(dev);
+	goto out;
+}
+
 void br_netpoll_disable(struct net_bridge_port *p)
 {
 	struct netpoll *np = p->np;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c5f5a4a..9203d5a 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -27,6 +27,9 @@
 #include "br_private.h"
 
 static struct kmem_cache *br_fdb_cache __read_mostly;
+static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
+					     const unsigned char *addr,
+					     __u16 vid);
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		      const unsigned char *addr, u16 vid);
 static void fdb_notify(struct net_bridge *br,
@@ -89,11 +92,57 @@
 	call_rcu(&f->rcu, fdb_rcu_free);
 }
 
+/* Delete a local entry if no other port had the same address. */
+static void fdb_delete_local(struct net_bridge *br,
+			     const struct net_bridge_port *p,
+			     struct net_bridge_fdb_entry *f)
+{
+	const unsigned char *addr = f->addr.addr;
+	u16 vid = f->vlan_id;
+	struct net_bridge_port *op;
+
+	/* Maybe another port has same hw addr? */
+	list_for_each_entry(op, &br->port_list, list) {
+		if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
+		    (!vid || nbp_vlan_find(op, vid))) {
+			f->dst = op;
+			f->added_by_user = 0;
+			return;
+		}
+	}
+
+	/* Maybe bridge device has same hw addr? */
+	if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
+	    (!vid || br_vlan_find(br, vid))) {
+		f->dst = NULL;
+		f->added_by_user = 0;
+		return;
+	}
+
+	fdb_delete(br, f);
+}
+
+void br_fdb_find_delete_local(struct net_bridge *br,
+			      const struct net_bridge_port *p,
+			      const unsigned char *addr, u16 vid)
+{
+	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
+	struct net_bridge_fdb_entry *f;
+
+	spin_lock_bh(&br->hash_lock);
+	f = fdb_find(head, addr, vid);
+	if (f && f->is_local && !f->added_by_user && f->dst == p)
+		fdb_delete_local(br, p, f);
+	spin_unlock_bh(&br->hash_lock);
+}
+
 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
 {
 	struct net_bridge *br = p->br;
-	bool no_vlan = (nbp_get_vlan_info(p) == NULL) ? true : false;
+	struct net_port_vlans *pv = nbp_get_vlan_info(p);
+	bool no_vlan = !pv;
 	int i;
+	u16 vid;
 
 	spin_lock_bh(&br->hash_lock);
 
@@ -104,38 +153,34 @@
 			struct net_bridge_fdb_entry *f;
 
 			f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
-			if (f->dst == p && f->is_local) {
-				/* maybe another port has same hw addr? */
-				struct net_bridge_port *op;
-				u16 vid = f->vlan_id;
-				list_for_each_entry(op, &br->port_list, list) {
-					if (op != p &&
-					    ether_addr_equal(op->dev->dev_addr,
-							     f->addr.addr) &&
-					    nbp_vlan_find(op, vid)) {
-						f->dst = op;
-						goto insert;
-					}
-				}
-
+			if (f->dst == p && f->is_local && !f->added_by_user) {
 				/* delete old one */
-				fdb_delete(br, f);
-insert:
-				/* insert new address,  may fail if invalid
-				 * address or dup.
-				 */
-				fdb_insert(br, p, newaddr, vid);
+				fdb_delete_local(br, p, f);
 
 				/* if this port has no vlan information
 				 * configured, we can safely be done at
 				 * this point.
 				 */
 				if (no_vlan)
-					goto done;
+					goto insert;
 			}
 		}
 	}
 
+insert:
+	/* insert new address,  may fail if invalid address or dup. */
+	fdb_insert(br, p, newaddr, 0);
+
+	if (no_vlan)
+		goto done;
+
+	/* Now add entries for every VLAN configured on the port.
+	 * This function runs under RTNL so the bitmap will not change
+	 * from under us.
+	 */
+	for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
+		fdb_insert(br, p, newaddr, vid);
+
 done:
 	spin_unlock_bh(&br->hash_lock);
 }
@@ -146,10 +191,12 @@
 	struct net_port_vlans *pv;
 	u16 vid = 0;
 
+	spin_lock_bh(&br->hash_lock);
+
 	/* If old entry was unassociated with any port, then delete it. */
 	f = __br_fdb_get(br, br->dev->dev_addr, 0);
 	if (f && f->is_local && !f->dst)
-		fdb_delete(br, f);
+		fdb_delete_local(br, NULL, f);
 
 	fdb_insert(br, NULL, newaddr, 0);
 
@@ -159,14 +206,16 @@
 	 */
 	pv = br_get_vlan_info(br);
 	if (!pv)
-		return;
+		goto out;
 
 	for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
 		f = __br_fdb_get(br, br->dev->dev_addr, vid);
 		if (f && f->is_local && !f->dst)
-			fdb_delete(br, f);
+			fdb_delete_local(br, NULL, f);
 		fdb_insert(br, NULL, newaddr, vid);
 	}
+out:
+	spin_unlock_bh(&br->hash_lock);
 }
 
 void br_fdb_cleanup(unsigned long _data)
@@ -235,25 +284,11 @@
 
 			if (f->is_static && !do_all)
 				continue;
-			/*
-			 * if multiple ports all have the same device address
-			 * then when one port is deleted, assign
-			 * the local entry to other port
-			 */
-			if (f->is_local) {
-				struct net_bridge_port *op;
-				list_for_each_entry(op, &br->port_list, list) {
-					if (op != p &&
-					    ether_addr_equal(op->dev->dev_addr,
-							     f->addr.addr)) {
-						f->dst = op;
-						goto skip_delete;
-					}
-				}
-			}
 
-			fdb_delete(br, f);
-		skip_delete: ;
+			if (f->is_local)
+				fdb_delete_local(br, p, f);
+			else
+				fdb_delete(br, f);
 		}
 	}
 	spin_unlock_bh(&br->hash_lock);
@@ -397,6 +432,7 @@
 		fdb->vlan_id = vid;
 		fdb->is_local = 0;
 		fdb->is_static = 0;
+		fdb->added_by_user = 0;
 		fdb->updated = fdb->used = jiffies;
 		hlist_add_head_rcu(&fdb->hlist, head);
 	}
@@ -447,7 +483,7 @@
 }
 
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
-		   const unsigned char *addr, u16 vid)
+		   const unsigned char *addr, u16 vid, bool added_by_user)
 {
 	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
 	struct net_bridge_fdb_entry *fdb;
@@ -473,13 +509,18 @@
 			/* fastpath: update of existing entry */
 			fdb->dst = source;
 			fdb->updated = jiffies;
+			if (unlikely(added_by_user))
+				fdb->added_by_user = 1;
 		}
 	} else {
 		spin_lock(&br->hash_lock);
 		if (likely(!fdb_find(head, addr, vid))) {
 			fdb = fdb_create(head, source, addr, vid);
-			if (fdb)
+			if (fdb) {
+				if (unlikely(added_by_user))
+					fdb->added_by_user = 1;
 				fdb_notify(br, fdb, RTM_NEWNEIGH);
+			}
 		}
 		/* else  we lose race and someone else inserts
 		 * it first, don't bother updating
@@ -647,6 +688,7 @@
 
 		modified = true;
 	}
+	fdb->added_by_user = 1;
 
 	fdb->used = jiffies;
 	if (modified) {
@@ -664,7 +706,7 @@
 
 	if (ndm->ndm_flags & NTF_USE) {
 		rcu_read_lock();
-		br_fdb_update(p->br, p, addr, vid);
+		br_fdb_update(p->br, p, addr, vid, true);
 		rcu_read_unlock();
 	} else {
 		spin_lock_bh(&p->br->hash_lock);
@@ -749,8 +791,7 @@
 	return err;
 }
 
-int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr,
-		       u16 vlan)
+static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vlan)
 {
 	struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
 	struct net_bridge_fdb_entry *fdb;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index cffe1d6..54d207d 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -389,6 +389,9 @@
 	if (br->dev->needed_headroom < dev->needed_headroom)
 		br->dev->needed_headroom = dev->needed_headroom;
 
+	if (br_fdb_insert(br, p, dev->dev_addr, 0))
+		netdev_err(dev, "failed insert local address bridge forwarding table\n");
+
 	spin_lock_bh(&br->lock);
 	changed_addr = br_stp_recalculate_bridge_id(br);
 
@@ -404,9 +407,6 @@
 
 	dev_set_mtu(br->dev, br_min_mtu(br));
 
-	if (br_fdb_insert(br, p, dev->dev_addr, 0))
-		netdev_err(dev, "failed insert local address bridge forwarding table\n");
-
 	kobject_uevent(&p->kobj, KOBJ_ADD);
 
 	return 0;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index bf8dc7d..28d5446 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -77,7 +77,7 @@
 	/* insert into forwarding database after filtering to avoid spoofing */
 	br = p->br;
 	if (p->flags & BR_LEARNING)
-		br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
+		br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
 
 	if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
 	    br_multicast_rcv(br, p, skb, vid))
@@ -148,7 +148,7 @@
 
 	br_vlan_get_tag(skb, &vid);
 	if (p->flags & BR_LEARNING)
-		br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
+		br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
 	return 0;	 /* process further */
 }
 
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index fcd1233..3ba11bc 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -104,6 +104,7 @@
 	mac_addr			addr;
 	unsigned char			is_local;
 	unsigned char			is_static;
+	unsigned char			added_by_user;
 	__u16				vlan_id;
 };
 
@@ -370,6 +371,9 @@
 int br_fdb_init(void);
 void br_fdb_fini(void);
 void br_fdb_flush(struct net_bridge *br);
+void br_fdb_find_delete_local(struct net_bridge *br,
+			      const struct net_bridge_port *p,
+			      const unsigned char *addr, u16 vid);
 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
 void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
 void br_fdb_cleanup(unsigned long arg);
@@ -383,8 +387,7 @@
 int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		  const unsigned char *addr, u16 vid);
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
-		   const unsigned char *addr, u16 vid);
-int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
+		   const unsigned char *addr, u16 vid, bool added_by_user);
 
 int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 		  struct net_device *dev, const unsigned char *addr);
@@ -584,6 +587,7 @@
 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
 int br_vlan_delete(struct net_bridge *br, u16 vid);
 void br_vlan_flush(struct net_bridge *br);
+bool br_vlan_find(struct net_bridge *br, u16 vid);
 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
@@ -665,6 +669,11 @@
 {
 }
 
+static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
+{
+	return false;
+}
+
 static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
 {
 	return -EOPNOTSUPP;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 656a6f3..189ba1e 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -194,6 +194,8 @@
 
 	wasroot = br_is_root_bridge(br);
 
+	br_fdb_change_mac_address(br, addr);
+
 	memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN);
 	memcpy(br->bridge_id.addr, addr, ETH_ALEN);
 	memcpy(br->dev->dev_addr, addr, ETH_ALEN);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 4ca4d0a..8249ca76 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -275,9 +275,7 @@
 	if (!pv)
 		return -EINVAL;
 
-	spin_lock_bh(&br->hash_lock);
-	fdb_delete_by_addr(br, br->dev->dev_addr, vid);
-	spin_unlock_bh(&br->hash_lock);
+	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
 
 	__vlan_del(pv, vid);
 	return 0;
@@ -295,6 +293,25 @@
 	__vlan_flush(pv);
 }
 
+bool br_vlan_find(struct net_bridge *br, u16 vid)
+{
+	struct net_port_vlans *pv;
+	bool found = false;
+
+	rcu_read_lock();
+	pv = rcu_dereference(br->vlan_info);
+
+	if (!pv)
+		goto out;
+
+	if (test_bit(vid, pv->vlan_bitmap))
+		found = true;
+
+out:
+	rcu_read_unlock();
+	return found;
+}
+
 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 {
 	if (!rtnl_trylock())
@@ -359,9 +376,7 @@
 	if (!pv)
 		return -EINVAL;
 
-	spin_lock_bh(&port->br->hash_lock);
-	fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
-	spin_unlock_bh(&port->br->hash_lock);
+	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
 
 	return __vlan_del(pv, vid);
 }
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 4dca159..edbca46 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -22,6 +22,7 @@
 #include <net/pkt_sched.h>
 #include <net/caif/caif_device.h>
 #include <net/caif/caif_layer.h>
+#include <net/caif/caif_dev.h>
 #include <net/caif/cfpkt.h>
 #include <net/caif/cfcnfg.h>
 #include <net/caif/cfserl.h>
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 353f793..a6e1154 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -15,6 +15,7 @@
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfsrvl.h>
 #include <net/caif/cfpkt.h>
+#include <net/caif/caif_dev.h>
 
 #define SRVL_CTRL_PKT_SIZE 1
 #define SRVL_FLOW_OFF 0x81
diff --git a/net/can/af_can.c b/net/can/af_can.c
index d249874..a27f8aa 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -57,6 +57,7 @@
 #include <linux/skbuff.h>
 #include <linux/can.h>
 #include <linux/can/core.h>
+#include <linux/can/skb.h>
 #include <linux/ratelimit.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -290,7 +291,7 @@
 				return -ENOMEM;
 			}
 
-			newskb->sk = skb->sk;
+			can_skb_set_owner(newskb, skb->sk);
 			newskb->ip_summed = CHECKSUM_UNNECESSARY;
 			newskb->pkt_type = PACKET_BROADCAST;
 		}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 3fc737b..dcb75c0 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -268,7 +268,7 @@
 
 	/* send with loopback */
 	skb->dev = dev;
-	skb->sk = op->sk;
+	can_skb_set_owner(skb, op->sk);
 	can_send(skb, 1);
 
 	/* update statistics */
@@ -1223,7 +1223,7 @@
 
 	can_skb_prv(skb)->ifindex = dev->ifindex;
 	skb->dev = dev;
-	skb->sk  = sk;
+	can_skb_set_owner(skb, sk);
 	err = can_send(skb, 1); /* send with loopback */
 	dev_put(dev);
 
diff --git a/net/can/raw.c b/net/can/raw.c
index 07d72d8..8be757c 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -715,6 +715,7 @@
 
 	skb->dev = dev;
 	skb->sk  = sk;
+	skb->priority = sk->sk_priority;
 
 	err = can_send(skb, ro->loopback);
 
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 2ed1304..30efc5c 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -778,13 +778,12 @@
 
 	bio = data->bio;
 	BUG_ON(!bio);
-	BUG_ON(!bio->bi_vcnt);
 
 	cursor->resid = min(length, data->bio_length);
 	cursor->bio = bio;
-	cursor->vector_index = 0;
-	cursor->vector_offset = 0;
-	cursor->last_piece = length <= bio->bi_io_vec[0].bv_len;
+	cursor->bvec_iter = bio->bi_iter;
+	cursor->last_piece =
+		cursor->resid <= bio_iter_len(bio, cursor->bvec_iter);
 }
 
 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
@@ -793,71 +792,67 @@
 {
 	struct ceph_msg_data *data = cursor->data;
 	struct bio *bio;
-	struct bio_vec *bio_vec;
-	unsigned int index;
+	struct bio_vec bio_vec;
 
 	BUG_ON(data->type != CEPH_MSG_DATA_BIO);
 
 	bio = cursor->bio;
 	BUG_ON(!bio);
 
-	index = cursor->vector_index;
-	BUG_ON(index >= (unsigned int) bio->bi_vcnt);
+	bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
 
-	bio_vec = &bio->bi_io_vec[index];
-	BUG_ON(cursor->vector_offset >= bio_vec->bv_len);
-	*page_offset = (size_t) (bio_vec->bv_offset + cursor->vector_offset);
+	*page_offset = (size_t) bio_vec.bv_offset;
 	BUG_ON(*page_offset >= PAGE_SIZE);
 	if (cursor->last_piece) /* pagelist offset is always 0 */
 		*length = cursor->resid;
 	else
-		*length = (size_t) (bio_vec->bv_len - cursor->vector_offset);
+		*length = (size_t) bio_vec.bv_len;
 	BUG_ON(*length > cursor->resid);
 	BUG_ON(*page_offset + *length > PAGE_SIZE);
 
-	return bio_vec->bv_page;
+	return bio_vec.bv_page;
 }
 
 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
 					size_t bytes)
 {
 	struct bio *bio;
-	struct bio_vec *bio_vec;
-	unsigned int index;
+	struct bio_vec bio_vec;
 
 	BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO);
 
 	bio = cursor->bio;
 	BUG_ON(!bio);
 
-	index = cursor->vector_index;
-	BUG_ON(index >= (unsigned int) bio->bi_vcnt);
-	bio_vec = &bio->bi_io_vec[index];
+	bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
 
 	/* Advance the cursor offset */
 
 	BUG_ON(cursor->resid < bytes);
 	cursor->resid -= bytes;
-	cursor->vector_offset += bytes;
-	if (cursor->vector_offset < bio_vec->bv_len)
+
+	bio_advance_iter(bio, &cursor->bvec_iter, bytes);
+
+	if (bytes < bio_vec.bv_len)
 		return false;	/* more bytes to process in this segment */
-	BUG_ON(cursor->vector_offset != bio_vec->bv_len);
 
 	/* Move on to the next segment, and possibly the next bio */
 
-	if (++index == (unsigned int) bio->bi_vcnt) {
+	if (!cursor->bvec_iter.bi_size) {
 		bio = bio->bi_next;
-		index = 0;
+		cursor->bio = bio;
+		if (bio)
+			cursor->bvec_iter = bio->bi_iter;
+		else
+			memset(&cursor->bvec_iter, 0,
+			       sizeof(cursor->bvec_iter));
 	}
-	cursor->bio = bio;
-	cursor->vector_index = index;
-	cursor->vector_offset = 0;
 
 	if (!cursor->last_piece) {
 		BUG_ON(!cursor->resid);
 		BUG_ON(!bio);
 		/* A short read is OK, so use <= rather than == */
-		if (cursor->resid <= bio->bi_io_vec[index].bv_len)
+		if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter))
 			cursor->last_piece = true;
 	}
 
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 010ff3b..0676f2b 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1427,6 +1427,40 @@
 }
 
 /*
+ * Caller should hold map_sem for read and request_mutex.
+ */
+static int __ceph_osdc_start_request(struct ceph_osd_client *osdc,
+				     struct ceph_osd_request *req,
+				     bool nofail)
+{
+	int rc;
+
+	__register_request(osdc, req);
+	req->r_sent = 0;
+	req->r_got_reply = 0;
+	rc = __map_request(osdc, req, 0);
+	if (rc < 0) {
+		if (nofail) {
+			dout("osdc_start_request failed map, "
+				" will retry %lld\n", req->r_tid);
+			rc = 0;
+		} else {
+			__unregister_request(osdc, req);
+		}
+		return rc;
+	}
+
+	if (req->r_osd == NULL) {
+		dout("send_request %p no up osds in pg\n", req);
+		ceph_monc_request_next_osdmap(&osdc->client->monc);
+	} else {
+		__send_queued(osdc);
+	}
+
+	return 0;
+}
+
+/*
  * Timeout callback, called every N seconds when 1 or more osd
  * requests has been active for more than N seconds.  When this
  * happens, we ping all OSDs with requests who have timed out to
@@ -1653,6 +1687,7 @@
 	osdmap_epoch = ceph_decode_32(&p);
 
 	/* lookup */
+	down_read(&osdc->map_sem);
 	mutex_lock(&osdc->request_mutex);
 	req = __lookup_request(osdc, tid);
 	if (req == NULL) {
@@ -1709,7 +1744,6 @@
 		dout("redirect pool %lld\n", redir.oloc.pool);
 
 		__unregister_request(osdc, req);
-		mutex_unlock(&osdc->request_mutex);
 
 		req->r_target_oloc = redir.oloc; /* struct */
 
@@ -1721,10 +1755,10 @@
 		 * successfully.  In the future we might want to follow
 		 * original request's nofail setting here.
 		 */
-		err = ceph_osdc_start_request(osdc, req, true);
+		err = __ceph_osdc_start_request(osdc, req, true);
 		BUG_ON(err);
 
-		goto done;
+		goto out_unlock;
 	}
 
 	already_completed = req->r_got_reply;
@@ -1742,8 +1776,7 @@
 		req->r_got_reply = 1;
 	} else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
 		dout("handle_reply tid %llu dup ack\n", tid);
-		mutex_unlock(&osdc->request_mutex);
-		goto done;
+		goto out_unlock;
 	}
 
 	dout("handle_reply tid %llu flags %d\n", tid, flags);
@@ -1758,6 +1791,7 @@
 		__unregister_request(osdc, req);
 
 	mutex_unlock(&osdc->request_mutex);
+	up_read(&osdc->map_sem);
 
 	if (!already_completed) {
 		if (req->r_unsafe_callback &&
@@ -1775,10 +1809,14 @@
 		complete_request(req);
 	}
 
-done:
+out:
 	dout("req=%p req->r_linger=%d\n", req, req->r_linger);
 	ceph_osdc_put_request(req);
 	return;
+out_unlock:
+	mutex_unlock(&osdc->request_mutex);
+	up_read(&osdc->map_sem);
+	goto out;
 
 bad_put:
 	req->r_result = -EIO;
@@ -1791,6 +1829,7 @@
 	ceph_osdc_put_request(req);
 bad_mutex:
 	mutex_unlock(&osdc->request_mutex);
+	up_read(&osdc->map_sem);
 bad:
 	pr_err("corrupt osd_op_reply got %d %d\n",
 	       (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
@@ -2351,34 +2390,16 @@
 			    struct ceph_osd_request *req,
 			    bool nofail)
 {
-	int rc = 0;
+	int rc;
 
 	down_read(&osdc->map_sem);
 	mutex_lock(&osdc->request_mutex);
-	__register_request(osdc, req);
-	req->r_sent = 0;
-	req->r_got_reply = 0;
-	rc = __map_request(osdc, req, 0);
-	if (rc < 0) {
-		if (nofail) {
-			dout("osdc_start_request failed map, "
-				" will retry %lld\n", req->r_tid);
-			rc = 0;
-		} else {
-			__unregister_request(osdc, req);
-		}
-		goto out_unlock;
-	}
-	if (req->r_osd == NULL) {
-		dout("send_request %p no up osds in pg\n", req);
-		ceph_monc_request_next_osdmap(&osdc->client->monc);
-	} else {
-		__send_queued(osdc);
-	}
-	rc = 0;
-out_unlock:
+
+	rc = __ceph_osdc_start_request(osdc, req, nofail);
+
 	mutex_unlock(&osdc->request_mutex);
 	up_read(&osdc->map_sem);
+
 	return rc;
 }
 EXPORT_SYMBOL(ceph_osdc_start_request);
@@ -2504,9 +2525,12 @@
 	err = -ENOMEM;
 	osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
 	if (!osdc->notify_wq)
-		goto out_msgpool;
+		goto out_msgpool_reply;
+
 	return 0;
 
+out_msgpool_reply:
+	ceph_msgpool_destroy(&osdc->msgpool_op_reply);
 out_msgpool:
 	ceph_msgpool_destroy(&osdc->msgpool_op);
 out_mempool:
diff --git a/net/compat.c b/net/compat.c
index dd32e34..f50161f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -780,21 +780,16 @@
 	if (flags & MSG_CMSG_COMPAT)
 		return -EINVAL;
 
-	if (COMPAT_USE_64BIT_TIME)
-		return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
-				      flags | MSG_CMSG_COMPAT,
-				      (struct timespec *) timeout);
-
 	if (timeout == NULL)
 		return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
 				      flags | MSG_CMSG_COMPAT, NULL);
 
-	if (get_compat_timespec(&ktspec, timeout))
+	if (compat_get_timespec(&ktspec, timeout))
 		return -EFAULT;
 
 	datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
 				   flags | MSG_CMSG_COMPAT, &ktspec);
-	if (datagrams > 0 && put_compat_timespec(&ktspec, timeout))
+	if (datagrams > 0 && compat_put_timespec(&ktspec, timeout))
 		datagrams = -EFAULT;
 
 	return datagrams;
diff --git a/net/core/dev.c b/net/core/dev.c
index 3721db7..b1b0c8d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2420,7 +2420,7 @@
  * 2. No high memory really exists on this machine.
  */
 
-static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
 {
 #ifdef CONFIG_HIGHMEM
 	int i;
@@ -2495,34 +2495,36 @@
 }
 
 static netdev_features_t harmonize_features(struct sk_buff *skb,
-	netdev_features_t features)
+					    const struct net_device *dev,
+					    netdev_features_t features)
 {
 	if (skb->ip_summed != CHECKSUM_NONE &&
 	    !can_checksum_protocol(features, skb_network_protocol(skb))) {
 		features &= ~NETIF_F_ALL_CSUM;
-	} else if (illegal_highdma(skb->dev, skb)) {
+	} else if (illegal_highdma(dev, skb)) {
 		features &= ~NETIF_F_SG;
 	}
 
 	return features;
 }
 
-netdev_features_t netif_skb_features(struct sk_buff *skb)
+netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
+					 const struct net_device *dev)
 {
 	__be16 protocol = skb->protocol;
-	netdev_features_t features = skb->dev->features;
+	netdev_features_t features = dev->features;
 
-	if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
+	if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
 		features &= ~NETIF_F_GSO_MASK;
 
 	if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
 		protocol = veh->h_vlan_encapsulated_proto;
 	} else if (!vlan_tx_tag_present(skb)) {
-		return harmonize_features(skb, features);
+		return harmonize_features(skb, dev, features);
 	}
 
-	features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+	features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
 					       NETIF_F_HW_VLAN_STAG_TX);
 
 	if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2530,9 +2532,9 @@
 				NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
 				NETIF_F_HW_VLAN_STAG_TX;
 
-	return harmonize_features(skb, features);
+	return harmonize_features(skb, dev, features);
 }
-EXPORT_SYMBOL(netif_skb_features);
+EXPORT_SYMBOL(netif_skb_dev_features);
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 			struct netdev_queue *txq)
@@ -2803,7 +2805,7 @@
  *      the BH enable code must have IRQs enabled so that it will not deadlock.
  *          --BLG
  */
-int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
 {
 	struct net_device *dev = skb->dev;
 	struct netdev_queue *txq;
@@ -4637,7 +4639,7 @@
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
 
-int netdev_adjacent_sysfs_add(struct net_device *dev,
+static int netdev_adjacent_sysfs_add(struct net_device *dev,
 			      struct net_device *adj_dev,
 			      struct list_head *dev_list)
 {
@@ -4647,7 +4649,7 @@
 	return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
 				 linkname);
 }
-void netdev_adjacent_sysfs_del(struct net_device *dev,
+static void netdev_adjacent_sysfs_del(struct net_device *dev,
 			       char *name,
 			       struct list_head *dev_list)
 {
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f409e0b..185c341 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -745,6 +745,13 @@
 			attach_rules(&ops->rules_list, dev);
 		break;
 
+	case NETDEV_CHANGENAME:
+		list_for_each_entry(ops, &net->rules_ops, list) {
+			detach_rules(&ops->rules_list, dev);
+			attach_rules(&ops->rules_list, dev);
+		}
+		break;
+
 	case NETDEV_UNREGISTER:
 		list_for_each_entry(ops, &net->rules_ops, list)
 			detach_rules(&ops->rules_list, dev);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 87577d4..e29e810 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -323,17 +323,6 @@
 	return poff;
 }
 
-static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
-{
-	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
-		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
-				     dev->name, queue_index,
-				     dev->real_num_tx_queues);
-		return 0;
-	}
-	return queue_index;
-}
-
 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
 {
 #ifdef CONFIG_XPS
@@ -372,7 +361,7 @@
 #endif
 }
 
-u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
 {
 	struct sock *sk = skb->sk;
 	int queue_index = sk_tx_queue_get(sk);
@@ -392,7 +381,6 @@
 
 	return queue_index;
 }
-EXPORT_SYMBOL(__netdev_pick_tx);
 
 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 				    struct sk_buff *skb,
@@ -403,13 +391,13 @@
 	if (dev->real_num_tx_queues != 1) {
 		const struct net_device_ops *ops = dev->netdev_ops;
 		if (ops->ndo_select_queue)
-			queue_index = ops->ndo_select_queue(dev, skb,
-							    accel_priv);
+			queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
+							    __netdev_pick_tx);
 		else
 			queue_index = __netdev_pick_tx(dev, skb);
 
 		if (!accel_priv)
-			queue_index = dev_cap_txqueue(dev, queue_index);
+			queue_index = netdev_cap_txqueue(dev, queue_index);
 	}
 
 	skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c03f3de..a664f78 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -948,6 +948,7 @@
 {
 	char *cur=opt, *delim;
 	int ipv6;
+	bool ipversion_set = false;
 
 	if (*cur != '@') {
 		if ((delim = strchr(cur, '@')) == NULL)
@@ -960,6 +961,7 @@
 	cur++;
 
 	if (*cur != '/') {
+		ipversion_set = true;
 		if ((delim = strchr(cur, '/')) == NULL)
 			goto parse_failed;
 		*delim = 0;
@@ -1002,7 +1004,7 @@
 	ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
 	if (ipv6 < 0)
 		goto parse_failed;
-	else if (np->ipv6 != (bool)ipv6)
+	else if (ipversion_set && np->ipv6 != (bool)ipv6)
 		goto parse_failed;
 	else
 		np->ipv6 = (bool)ipv6;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 393b1bc..1a0dac2 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -374,7 +374,7 @@
 	if (!master_dev)
 		return 0;
 	ops = master_dev->rtnl_link_ops;
-	if (!ops->get_slave_size)
+	if (!ops || !ops->get_slave_size)
 		return 0;
 	/* IFLA_INFO_SLAVE_DATA + nested data */
 	return nla_total_size(sizeof(struct nlattr)) +
@@ -1963,16 +1963,21 @@
 
 		dev->ifindex = ifm->ifi_index;
 
-		if (ops->newlink)
+		if (ops->newlink) {
 			err = ops->newlink(net, dev, tb, data);
-		else
+			/* Drivers should call free_netdev() in ->destructor
+			 * and unregister it on failure so that device could be
+			 * finally freed in rtnl_unlock.
+			 */
+			if (err < 0)
+				goto out;
+		} else {
 			err = register_netdevice(dev);
-
-		if (err < 0) {
-			free_netdev(dev);
-			goto out;
+			if (err < 0) {
+				free_netdev(dev);
+				goto out;
+			}
 		}
-
 		err = rtnl_configure_link(dev, ifm);
 		if (err < 0)
 			unregister_netdevice(dev);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8f519db..5976ef0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -47,6 +47,8 @@
 #include <linux/in.h>
 #include <linux/inet.h>
 #include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
 #include <linux/netdevice.h>
 #ifdef CONFIG_NET_CLS_ACT
 #include <net/pkt_sched.h>
@@ -2119,7 +2121,7 @@
 /**
  *	skb_zerocopy - Zero copy skb to skb
  *	@to: destination buffer
- *	@source: source buffer
+ *	@from: source buffer
  *	@len: number of bytes to copy from source buffer
  *	@hlen: size of linear headroom in destination buffer
  *
@@ -3916,3 +3918,26 @@
 	nf_reset_trace(skb);
 }
 EXPORT_SYMBOL_GPL(skb_scrub_packet);
+
+/**
+ * skb_gso_transport_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_transport_seglen is used to determine the real size of the
+ * individual segments, including Layer4 headers (TCP/UDP).
+ *
+ * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
+ */
+unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
+{
+	const struct skb_shared_info *shinfo = skb_shinfo(skb);
+	unsigned int hdr_len;
+
+	if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+		hdr_len = tcp_hdrlen(skb);
+	else
+		hdr_len = sizeof(struct udphdr);
+	return hdr_len + shinfo->gso_size;
+}
+EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
diff --git a/net/core/sock.c b/net/core/sock.c
index 0c127dc..5b6a943 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1775,7 +1775,9 @@
 			while (order) {
 				if (npages >= 1 << order) {
 					page = alloc_pages(sk->sk_allocation |
-							   __GFP_COMP | __GFP_NOWARN,
+							   __GFP_COMP |
+							   __GFP_NOWARN |
+							   __GFP_NORETRY,
 							   order);
 					if (page)
 						goto fill_page;
@@ -1845,7 +1847,7 @@
 		gfp_t gfp = prio;
 
 		if (order)
-			gfp |= __GFP_COMP | __GFP_NOWARN;
+			gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
 		pfrag->page = alloc_pages(gfp, order);
 		if (likely(pfrag->page)) {
 			pfrag->offset = 0;
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index c073b81..62b5828 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -8,7 +8,7 @@
 #include "tfrc.h"
 
 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-static bool tfrc_debug;
+bool tfrc_debug;
 module_param(tfrc_debug, bool, 0644);
 MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
 #endif
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index a3d8f7c..40ee7d6 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -21,6 +21,7 @@
 #include "packet_history.h"
 
 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG
+extern bool tfrc_debug;
 #define tfrc_pr_debug(format, a...)	DCCP_PR_DEBUG(tfrc_debug, format, ##a)
 #else
 #define tfrc_pr_debug(format, a...)
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2954dcb..4c04848 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2104,8 +2104,6 @@
 	.notifier_call = dn_device_event,
 };
 
-extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
-
 static struct packet_type dn_dix_packet_type __read_mostly = {
 	.type =		cpu_to_be16(ETH_P_DNA_RT),
 	.func =		dn_route_rcv,
@@ -2353,9 +2351,6 @@
 	.sendpage =	sock_no_sendpage,
 };
 
-void dn_register_sysctl(void);
-void dn_unregister_sysctl(void);
-
 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
 MODULE_AUTHOR("Linux DECnet Project Team");
 MODULE_LICENSE("GPL");
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 48b25c0..8edfea5 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -106,7 +106,6 @@
 			   unsigned short type, const void *_daddr,
 			   const void *_saddr, unsigned int len)
 {
-	struct ipv6hdr *hdr;
 	const u8 *saddr = _saddr;
 	const u8 *daddr = _daddr;
 	struct ieee802154_addr sa, da;
@@ -117,8 +116,6 @@
 	if (type != ETH_P_IPV6)
 		return 0;
 
-	hdr = ipv6_hdr(skb);
-
 	if (!saddr)
 		saddr = dev->dev_addr;
 
@@ -533,7 +530,27 @@
 	.create	= lowpan_header_create,
 };
 
+static struct lock_class_key lowpan_tx_busylock;
+static struct lock_class_key lowpan_netdev_xmit_lock_key;
+
+static void lowpan_set_lockdep_class_one(struct net_device *dev,
+					 struct netdev_queue *txq,
+					 void *_unused)
+{
+	lockdep_set_class(&txq->_xmit_lock,
+			  &lowpan_netdev_xmit_lock_key);
+}
+
+
+static int lowpan_dev_init(struct net_device *dev)
+{
+	netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
+	dev->qdisc_tx_busylock = &lowpan_tx_busylock;
+	return 0;
+}
+
 static const struct net_device_ops lowpan_netdev_ops = {
+	.ndo_init		= lowpan_dev_init,
 	.ndo_start_xmit		= lowpan_xmit,
 	.ndo_set_mac_address	= lowpan_set_address,
 };
diff --git a/net/ieee802154/6lowpan_iphc.c b/net/ieee802154/6lowpan_iphc.c
index 083f905..860aa2d 100644
--- a/net/ieee802154/6lowpan_iphc.c
+++ b/net/ieee802154/6lowpan_iphc.c
@@ -678,7 +678,7 @@
 			hc06_ptr += 3;
 		} else {
 			/* compress nothing */
-			memcpy(hc06_ptr, &hdr, 4);
+			memcpy(hc06_ptr, hdr, 4);
 			/* replace the top byte with new ECN | DSCP format */
 			*hc06_ptr = tmp;
 			hc06_ptr += 4;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ac2dff3..bdbf68b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1443,7 +1443,8 @@
 	       + nla_total_size(4) /* IFA_LOCAL */
 	       + nla_total_size(4) /* IFA_BROADCAST */
 	       + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
-	       + nla_total_size(4);  /* IFA_FLAGS */
+	       + nla_total_size(4)  /* IFA_FLAGS */
+	       + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
 }
 
 static inline u32 cstamp_delta(unsigned long cstamp)
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index e9f1217..f3869c1 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -39,6 +39,71 @@
 #include <net/route.h>
 #include <net/xfrm.h>
 
+static bool ip_may_fragment(const struct sk_buff *skb)
+{
+	return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
+	       !skb->local_df;
+}
+
+static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+{
+	if (skb->len <= mtu || skb->local_df)
+		return false;
+
+	if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+		return false;
+
+	return true;
+}
+
+static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
+{
+	unsigned int mtu;
+
+	if (skb->local_df || !skb_is_gso(skb))
+		return false;
+
+	mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
+
+	/* if seglen > mtu, do software segmentation for IP fragmentation on
+	 * output.  DF bit cannot be set since ip_forward would have sent
+	 * icmp error.
+	 */
+	return skb_gso_network_seglen(skb) > mtu;
+}
+
+/* called if GSO skb needs to be fragmented on forward */
+static int ip_forward_finish_gso(struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+	netdev_features_t features;
+	struct sk_buff *segs;
+	int ret = 0;
+
+	features = netif_skb_dev_features(skb, dst->dev);
+	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+	if (IS_ERR(segs)) {
+		kfree_skb(skb);
+		return -ENOMEM;
+	}
+
+	consume_skb(skb);
+
+	do {
+		struct sk_buff *nskb = segs->next;
+		int err;
+
+		segs->next = NULL;
+		err = dst_output(segs);
+
+		if (err && ret == 0)
+			ret = err;
+		segs = nskb;
+	} while (segs);
+
+	return ret;
+}
+
 static int ip_forward_finish(struct sk_buff *skb)
 {
 	struct ip_options *opt	= &(IPCB(skb)->opt);
@@ -49,6 +114,9 @@
 	if (unlikely(opt->optlen))
 		ip_forward_options(skb);
 
+	if (ip_gso_exceeds_dst_mtu(skb))
+		return ip_forward_finish_gso(skb);
+
 	return dst_output(skb);
 }
 
@@ -91,8 +159,7 @@
 
 	IPCB(skb)->flags |= IPSKB_FORWARDED;
 	mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
-	if (unlikely(skb->len > mtu && !skb_is_gso(skb) &&
-		     (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
+	if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) {
 		IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 			  htonl(mtu));
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index e7a92fd..ec4f762 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -178,7 +178,7 @@
 	else
 		itn = net_generic(net, ipgre_net_id);
 
-	iph = (const struct iphdr *)skb->data;
+	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 			     iph->daddr, iph->saddr, tpi->key);
 
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 054a3e9..3d4da2c 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -314,7 +314,7 @@
 	const struct iphdr *iph = ip_hdr(skb);
 	struct rtable *rt;
 
-	if (sysctl_ip_early_demux && !skb_dst(skb)) {
+	if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
 		const struct net_protocol *ipprot;
 		int protocol = iph->protocol;
 
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c0e3cb7..50228be 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -40,6 +40,7 @@
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/rculist.h>
+#include <linux/err.h>
 
 #include <net/sock.h>
 #include <net/ip.h>
@@ -100,28 +101,22 @@
 		__tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
 }
 
-static struct dst_entry *tunnel_dst_get(struct ip_tunnel *t)
+static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
 {
 	struct dst_entry *dst;
 
 	rcu_read_lock();
 	dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
-	if (dst)
+	if (dst) {
+		if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+			rcu_read_unlock();
+			tunnel_dst_reset(t);
+			return NULL;
+		}
 		dst_hold(dst);
-	rcu_read_unlock();
-	return dst;
-}
-
-static struct dst_entry *tunnel_dst_check(struct ip_tunnel *t, u32 cookie)
-{
-	struct dst_entry *dst = tunnel_dst_get(t);
-
-	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
-		tunnel_dst_reset(t);
-		return NULL;
 	}
-
-	return dst;
+	rcu_read_unlock();
+	return (struct rtable *)dst;
 }
 
 /* Often modified stats are per cpu, other are shared (netdev->stats) */
@@ -583,7 +578,7 @@
 	struct flowi4 fl4;
 	u8     tos, ttl;
 	__be16 df;
-	struct rtable *rt = NULL;	/* Route to the other host */
+	struct rtable *rt;		/* Route to the other host */
 	unsigned int max_headroom;	/* The extra header space needed */
 	__be32 dst;
 	int err;
@@ -656,8 +651,7 @@
 	init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
 			 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
 
-	if (connected)
-		rt = (struct rtable *)tunnel_dst_check(tunnel, 0);
+	rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
 
 	if (!rt) {
 		rt = ip_route_output_key(tunnel->net, &fl4);
@@ -930,7 +924,7 @@
 	}
 	rtnl_unlock();
 
-	return PTR_RET(itn->fb_tunnel_dev);
+	return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
 
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index efa1138..b3e86ea 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -273,7 +273,7 @@
 
 		msleep(1);
 
-		if time_before(jiffies, next_msg)
+		if (time_before(jiffies, next_msg))
 			continue;
 
 		elapsed = jiffies_to_msecs(jiffies - start);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 81c6910..a26ce03 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -61,6 +61,11 @@
 	  packet transformations such as the source, destination address and
 	  source and destination ports.
 
+config NFT_REJECT_IPV4
+	depends on NF_TABLES_IPV4
+	default NFT_REJECT
+	tristate
+
 config NF_TABLES_ARP
 	depends on NF_TABLES
 	tristate "ARP nf_tables support"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index c16be9d..90b8240 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -30,6 +30,7 @@
 obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o
 obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
 obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
+obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
 obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
 
 # generic IP tables 
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 9eea059d..574f7eb 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -229,7 +229,10 @@
 			ret = nf_ct_expect_related(rtcp_exp);
 			if (ret == 0)
 				break;
-			else if (ret != -EBUSY) {
+			else if (ret == -EBUSY) {
+				nf_ct_unexpect_related(rtp_exp);
+				continue;
+			} else if (ret < 0) {
 				nf_ct_unexpect_related(rtp_exp);
 				nated_port = 0;
 				break;
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
new file mode 100644
index 0000000..e79718a
--- /dev/null
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Eric Leblond <eric@regit.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/icmp.h>
+#include <net/netfilter/ipv4/nf_reject.h>
+#include <net/netfilter/nft_reject.h>
+
+void nft_reject_ipv4_eval(const struct nft_expr *expr,
+			  struct nft_data data[NFT_REG_MAX + 1],
+			  const struct nft_pktinfo *pkt)
+{
+	struct nft_reject *priv = nft_expr_priv(expr);
+
+	switch (priv->type) {
+	case NFT_REJECT_ICMP_UNREACH:
+		nf_send_unreach(pkt->skb, priv->icmp_code);
+		break;
+	case NFT_REJECT_TCP_RST:
+		nf_send_reset(pkt->skb, pkt->ops->hooknum);
+		break;
+	}
+
+	data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+EXPORT_SYMBOL_GPL(nft_reject_ipv4_eval);
+
+static struct nft_expr_type nft_reject_ipv4_type;
+static const struct nft_expr_ops nft_reject_ipv4_ops = {
+	.type		= &nft_reject_ipv4_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+	.eval		= nft_reject_ipv4_eval,
+	.init		= nft_reject_init,
+	.dump		= nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
+	.family		= NFPROTO_IPV4,
+	.name		= "reject",
+	.ops		= &nft_reject_ipv4_ops,
+	.policy		= nft_reject_policy,
+	.maxattr	= NFTA_REJECT_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_reject_ipv4_module_init(void)
+{
+	return nft_register_expr(&nft_reject_ipv4_type);
+}
+
+static void __exit nft_reject_ipv4_module_exit(void)
+{
+	nft_unregister_expr(&nft_reject_ipv4_type);
+}
+
+module_init(nft_reject_ipv4_module_init);
+module_exit(nft_reject_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject");
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 25071b4..4c011ec 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1597,6 +1597,7 @@
 	rth->rt_gateway	= 0;
 	rth->rt_uses_gateway = 0;
 	INIT_LIST_HEAD(&rth->rt_uncached);
+	RT_CACHE_STAT_INC(in_slow_tot);
 
 	rth->dst.input = ip_forward;
 	rth->dst.output = ip_output;
@@ -1695,10 +1696,11 @@
 	fl4.daddr = daddr;
 	fl4.saddr = saddr;
 	err = fib_lookup(net, &fl4, &res);
-	if (err != 0)
+	if (err != 0) {
+		if (!IN_DEV_FORWARD(in_dev))
+			err = -EHOSTUNREACH;
 		goto no_route;
-
-	RT_CACHE_STAT_INC(in_slow_tot);
+	}
 
 	if (res.type == RTN_BROADCAST)
 		goto brd_input;
@@ -1712,8 +1714,10 @@
 		goto local_input;
 	}
 
-	if (!IN_DEV_FORWARD(in_dev))
+	if (!IN_DEV_FORWARD(in_dev)) {
+		err = -EHOSTUNREACH;
 		goto no_route;
+	}
 	if (res.type != RTN_UNICAST)
 		goto martian_destination;
 
@@ -1768,6 +1772,7 @@
 	rth->rt_gateway	= 0;
 	rth->rt_uses_gateway = 0;
 	INIT_LIST_HEAD(&rth->rt_uncached);
+	RT_CACHE_STAT_INC(in_slow_tot);
 	if (res.type == RTN_UNREACHABLE) {
 		rth->dst.input= ip_error;
 		rth->dst.error= -err;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4475b3b..9f3a2db 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2229,7 +2229,7 @@
 	/*	This is a (useful) BSD violating of the RFC. There is a
 	 *	problem with TCP as specified in that the other end could
 	 *	keep a socket open forever with no application left this end.
-	 *	We use a 3 minute timeout (about the same as BSD) then kill
+	 *	We use a 1 minute timeout (about the same as BSD) then kill
 	 *	our end. If they send after that then tough - BUT: long enough
 	 *	that we won't make the old 4*rto = almost no time - whoops
 	 *	reset mistake.
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 65cf90e..227cba7 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -671,6 +671,7 @@
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	long m = mrtt; /* RTT */
+	u32 srtt = tp->srtt;
 
 	/*	The following amusing code comes from Jacobson's
 	 *	article in SIGCOMM '88.  Note that rtt and mdev
@@ -688,11 +689,9 @@
 	 * does not matter how to _calculate_ it. Seems, it was trap
 	 * that VJ failed to avoid. 8)
 	 */
-	if (m == 0)
-		m = 1;
-	if (tp->srtt != 0) {
-		m -= (tp->srtt >> 3);	/* m is now error in rtt est */
-		tp->srtt += m;		/* rtt = 7/8 rtt + 1/8 new */
+	if (srtt != 0) {
+		m -= (srtt >> 3);	/* m is now error in rtt est */
+		srtt += m;		/* rtt = 7/8 rtt + 1/8 new */
 		if (m < 0) {
 			m = -m;		/* m is now abs(error) */
 			m -= (tp->mdev >> 2);   /* similar update on mdev */
@@ -723,11 +722,12 @@
 		}
 	} else {
 		/* no previous measure. */
-		tp->srtt = m << 3;	/* take the measured time to be rtt */
+		srtt = m << 3;		/* take the measured time to be rtt */
 		tp->mdev = m << 1;	/* make sure rto = 3*rtt */
 		tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
 		tp->rtt_seq = tp->snd_nxt;
 	}
+	tp->srtt = max(1U, srtt);
 }
 
 /* Set the sk_pacing_rate to allow proper sizing of TSO packets.
@@ -746,8 +746,10 @@
 
 	rate *= max(tp->snd_cwnd, tp->packets_out);
 
-	/* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3),
-	 * be conservative and assume srtt = 1 (125 us instead of 1.25 ms)
+	/* Correction for small srtt and scheduling constraints.
+	 * For small rtt, consider noise is too high, and use
+	 * the minimal value (srtt = 1 -> 125 us for HZ=1000)
+	 *
 	 * We probably need usec resolution in the future.
 	 * Note: This also takes care of possible srtt=0 case,
 	 * when tcp_rtt_estimator() was not yet called.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 03d26b8..3be1672 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -698,7 +698,8 @@
 	if ((1 << sk->sk_state) &
 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK))
-		tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
+		tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
+			       0, GFP_ATOMIC);
 }
 /*
  * One tasklet per cpu tries to send more skbs.
@@ -1904,7 +1905,15 @@
 
 		if (atomic_read(&sk->sk_wmem_alloc) > limit) {
 			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
-			break;
+			/* It is possible TX completion already happened
+			 * before we set TSQ_THROTTLED, so we must
+			 * test again the condition.
+			 * We abuse smp_mb__after_clear_bit() because
+			 * there is no smp_mb__after_set_bit() yet
+			 */
+			smp_mb__after_clear_bit();
+			if (atomic_read(&sk->sk_wmem_alloc) > limit)
+				break;
 		}
 
 		limit = mss_now;
@@ -1977,7 +1986,7 @@
 	/* Schedule a loss probe in 2*RTT for SACK capable connections
 	 * in Open state, that are either limited by cwnd or application.
 	 */
-	if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out ||
+	if (sysctl_tcp_early_retrans < 3 || !tp->srtt || !tp->packets_out ||
 	    !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
 		return false;
 
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 25f5cee..88b4023 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -17,6 +17,8 @@
 static DEFINE_SPINLOCK(udp_offload_lock);
 static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
 
+#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
+
 struct udp_offload_priv {
 	struct udp_offload	*offload;
 	struct rcu_head		rcu;
@@ -100,8 +102,7 @@
 
 int udp_add_offload(struct udp_offload *uo)
 {
-	struct udp_offload_priv __rcu **head = &udp_offload_base;
-	struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_KERNEL);
+	struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
 
 	if (!new_offload)
 		return -ENOMEM;
@@ -109,8 +110,8 @@
 	new_offload->offload = uo;
 
 	spin_lock(&udp_offload_lock);
-	rcu_assign_pointer(new_offload->next, rcu_dereference(*head));
-	rcu_assign_pointer(*head, new_offload);
+	new_offload->next = udp_offload_base;
+	rcu_assign_pointer(udp_offload_base, new_offload);
 	spin_unlock(&udp_offload_lock);
 
 	return 0;
@@ -130,12 +131,12 @@
 
 	spin_lock(&udp_offload_lock);
 
-	uo_priv = rcu_dereference(*head);
+	uo_priv = udp_deref_protected(*head);
 	for (; uo_priv != NULL;
-		uo_priv = rcu_dereference(*head)) {
-
+	     uo_priv = udp_deref_protected(*head)) {
 		if (uo_priv->offload == uo) {
-			rcu_assign_pointer(*head, rcu_dereference(uo_priv->next));
+			rcu_assign_pointer(*head,
+					   udp_deref_protected(uo_priv->next));
 			goto unlock;
 		}
 		head = &uo_priv->next;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ad23569..fdbfeca3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2783,6 +2783,8 @@
 	ipv6_addr_set(&addr,  htonl(0xFE800000), 0, 0, 0);
 	if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
 		addrconf_add_linklocal(idev, &addr);
+	else
+		addrconf_prefix_route(&addr, 64, dev, 0, 0);
 }
 #endif
 
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index f81f596..f2610e1 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -414,7 +414,7 @@
 	addr_type = ipv6_addr_type(&hdr->daddr);
 
 	if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) ||
-	    ipv6_anycast_destination(skb))
+	    ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr))
 		saddr = &hdr->daddr;
 
 	/*
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 302d6fb..51d54dc 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -49,7 +49,7 @@
 
 int ip6_rcv_finish(struct sk_buff *skb)
 {
-	if (sysctl_ip_early_demux && !skb_dst(skb)) {
+	if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
 		const struct inet6_protocol *ipprot;
 
 		ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ef02b26..070a2fa 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -342,6 +342,20 @@
 	return mtu;
 }
 
+static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+{
+	if (skb->len <= mtu || skb->local_df)
+		return false;
+
+	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
+		return true;
+
+	if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+		return false;
+
+	return true;
+}
+
 int ip6_forward(struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
@@ -466,8 +480,7 @@
 	if (mtu < IPV6_MIN_MTU)
 		mtu = IPV6_MIN_MTU;
 
-	if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
-	    (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
+	if (ip6_pkt_too_big(skb, mtu)) {
 		/* Again, force OUTPUT device used as source address */
 		skb->dev = dst->dev;
 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 35750df..4bff1f2 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -50,6 +50,11 @@
 	  packet transformations such as the source, destination address and
 	  source and destination ports.
 
+config NFT_REJECT_IPV6
+	depends on NF_TABLES_IPV6
+	default NFT_REJECT
+	tristate
+
 config IP6_NF_IPTABLES
 	tristate "IP6 tables support (required for filtering)"
 	depends on INET && IPV6
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index d1b4928..70d3dd6 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
 obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
 obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
+obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
 
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
new file mode 100644
index 0000000..0bc19fa
--- /dev/null
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Eric Leblond <eric@regit.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_reject.h>
+#include <net/netfilter/ipv6/nf_reject.h>
+
+void nft_reject_ipv6_eval(const struct nft_expr *expr,
+			  struct nft_data data[NFT_REG_MAX + 1],
+			  const struct nft_pktinfo *pkt)
+{
+	struct nft_reject *priv = nft_expr_priv(expr);
+	struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
+
+	switch (priv->type) {
+	case NFT_REJECT_ICMP_UNREACH:
+		nf_send_unreach6(net, pkt->skb, priv->icmp_code,
+				 pkt->ops->hooknum);
+		break;
+	case NFT_REJECT_TCP_RST:
+		nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
+		break;
+	}
+
+	data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+EXPORT_SYMBOL_GPL(nft_reject_ipv6_eval);
+
+static struct nft_expr_type nft_reject_ipv6_type;
+static const struct nft_expr_ops nft_reject_ipv6_ops = {
+	.type		= &nft_reject_ipv6_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+	.eval		= nft_reject_ipv6_eval,
+	.init		= nft_reject_init,
+	.dump		= nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
+	.family		= NFPROTO_IPV6,
+	.name		= "reject",
+	.ops		= &nft_reject_ipv6_ops,
+	.policy		= nft_reject_policy,
+	.maxattr	= NFTA_REJECT_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_reject_ipv6_module_init(void)
+{
+	return nft_register_expr(&nft_reject_ipv6_type);
+}
+
+static void __exit nft_reject_ipv6_module_exit(void)
+{
+	nft_unregister_expr(&nft_reject_ipv6_type);
+}
+
+module_init(nft_reject_ipv6_module_init);
+module_exit(nft_reject_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject");
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 994e28b..00b2a6d 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -52,18 +52,12 @@
 #include <net/p8022.h>
 #include <net/psnap.h>
 #include <net/sock.h>
+#include <net/datalink.h>
 #include <net/tcp_states.h>
+#include <net/net_namespace.h>
 
 #include <asm/uaccess.h>
 
-#ifdef CONFIG_SYSCTL
-extern void ipx_register_sysctl(void);
-extern void ipx_unregister_sysctl(void);
-#else
-#define ipx_register_sysctl()
-#define ipx_unregister_sysctl()
-#endif
-
 /* Configuration Variables */
 static unsigned char ipxcfg_max_hops = 16;
 static char ipxcfg_auto_select_primary;
@@ -84,15 +78,6 @@
 struct ipx_interface *ipx_primary_net;
 struct ipx_interface *ipx_internal_net;
 
-extern int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
-			    unsigned char *node);
-extern void ipxrtr_del_routes(struct ipx_interface *intrfc);
-extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
-			       struct iovec *iov, size_t len, int noblock);
-extern int ipxrtr_route_skb(struct sk_buff *skb);
-extern struct ipx_route *ipxrtr_lookup(__be32 net);
-extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
-
 struct ipx_interface *ipx_interfaces_head(void)
 {
 	struct ipx_interface *rc = NULL;
@@ -1986,9 +1971,6 @@
 	.notifier_call	= ipxitf_device_event,
 };
 
-extern struct datalink_proto *make_EII_client(void);
-extern void destroy_EII_client(struct datalink_proto *);
-
 static const unsigned char ipx_8022_type = 0xE0;
 static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 };
 static const char ipx_EII_err_msg[] __initconst =
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 30f4519..c1f0318 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -20,15 +20,11 @@
 
 extern struct ipx_interface *ipx_internal_net;
 
-extern __be16 ipx_cksum(struct ipxhdr *packet, int length);
 extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
 extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
 			       struct sk_buff *skb, int copy);
 extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
 			       struct sk_buff *skb, int copy);
-extern int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb,
-		       char *node);
-extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
 
 struct ipx_route *ipxrtr_lookup(__be32 net)
 {
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index 2dae8a5..94425e4 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -43,7 +43,7 @@
 			rc = 0;
 		break;
 	default:
-		WARN(1, "device type not supported: %d\n", skb->dev->type);
+		break;
 	}
 	return rc;
 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index f9ae9b8..453e974 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1021,8 +1021,10 @@
 					IEEE80211_P2P_OPPPS_ENABLE_BIT;
 
 	err = ieee80211_assign_beacon(sdata, &params->beacon);
-	if (err < 0)
+	if (err < 0) {
+		ieee80211_vif_release_channel(sdata);
 		return err;
+	}
 	changed |= err;
 
 	err = drv_start_ap(sdata->local, sdata);
@@ -1032,6 +1034,7 @@
 		if (old)
 			kfree_rcu(old, rcu_head);
 		RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
+		ieee80211_vif_release_channel(sdata);
 		return err;
 	}
 
@@ -1090,8 +1093,6 @@
 	kfree(sdata->u.ap.next_beacon);
 	sdata->u.ap.next_beacon = NULL;
 
-	cancel_work_sync(&sdata->u.ap.request_smps_work);
-
 	/* turn off carrier for this interface and dependent VLANs */
 	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
 		netif_carrier_off(vlan->dev);
@@ -1103,6 +1104,7 @@
 	kfree_rcu(old_beacon, rcu_head);
 	if (old_probe_resp)
 		kfree_rcu(old_probe_resp, rcu_head);
+	sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF;
 
 	__sta_info_flush(sdata, true);
 	ieee80211_free_keys(sdata, true);
@@ -2638,6 +2640,24 @@
 	INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
 	INIT_LIST_HEAD(&roc->dependents);
 
+	/*
+	 * cookie is either the roc cookie (for normal roc)
+	 * or the SKB (for mgmt TX)
+	 */
+	if (!txskb) {
+		/* local->mtx protects this */
+		local->roc_cookie_counter++;
+		roc->cookie = local->roc_cookie_counter;
+		/* wow, you wrapped 64 bits ... more likely a bug */
+		if (WARN_ON(roc->cookie == 0)) {
+			roc->cookie = 1;
+			local->roc_cookie_counter++;
+		}
+		*cookie = roc->cookie;
+	} else {
+		*cookie = (unsigned long)txskb;
+	}
+
 	/* if there's one pending or we're scanning, queue this one */
 	if (!list_empty(&local->roc_list) ||
 	    local->scanning || local->radar_detect_enabled)
@@ -2772,24 +2792,6 @@
 	if (!queued)
 		list_add_tail(&roc->list, &local->roc_list);
 
-	/*
-	 * cookie is either the roc cookie (for normal roc)
-	 * or the SKB (for mgmt TX)
-	 */
-	if (!txskb) {
-		/* local->mtx protects this */
-		local->roc_cookie_counter++;
-		roc->cookie = local->roc_cookie_counter;
-		/* wow, you wrapped 64 bits ... more likely a bug */
-		if (WARN_ON(roc->cookie == 0)) {
-			roc->cookie = 1;
-			local->roc_cookie_counter++;
-		}
-		*cookie = roc->cookie;
-	} else {
-		*cookie = (unsigned long)txskb;
-	}
-
 	return 0;
 }
 
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index fab7b91..70dd013d 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -466,7 +466,9 @@
 			     u.ap.request_smps_work);
 
 	sdata_lock(sdata);
-	__ieee80211_request_smps_ap(sdata, sdata->u.ap.driver_smps_mode);
+	if (sdata_dereference(sdata->u.ap.beacon, sdata))
+		__ieee80211_request_smps_ap(sdata,
+					    sdata->u.ap.driver_smps_mode);
 	sdata_unlock(sdata);
 }
 
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 771080e..2796a19 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -695,12 +695,9 @@
 	struct cfg80211_bss *cbss;
 	struct beacon_data *presp;
 	struct sta_info *sta;
-	int active_ibss;
 	u16 capability;
 
-	active_ibss = ieee80211_sta_active_ibss(sdata);
-
-	if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
+	if (!is_zero_ether_addr(ifibss->bssid)) {
 		capability = WLAN_CAPABILITY_IBSS;
 
 		if (ifibss->privacy)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3dfd20a..ce1c443 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -418,20 +418,24 @@
 		return ret;
 	}
 
+	mutex_lock(&local->iflist_mtx);
+	rcu_assign_pointer(local->monitor_sdata, sdata);
+	mutex_unlock(&local->iflist_mtx);
+
 	mutex_lock(&local->mtx);
 	ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
 					IEEE80211_CHANCTX_EXCLUSIVE);
 	mutex_unlock(&local->mtx);
 	if (ret) {
+		mutex_lock(&local->iflist_mtx);
+		rcu_assign_pointer(local->monitor_sdata, NULL);
+		mutex_unlock(&local->iflist_mtx);
+		synchronize_net();
 		drv_remove_interface(local, sdata);
 		kfree(sdata);
 		return ret;
 	}
 
-	mutex_lock(&local->iflist_mtx);
-	rcu_assign_pointer(local->monitor_sdata, sdata);
-	mutex_unlock(&local->iflist_mtx);
-
 	return 0;
 }
 
@@ -770,12 +774,19 @@
 
 	ieee80211_roc_purge(local, sdata);
 
-	if (sdata->vif.type == NL80211_IFTYPE_STATION)
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_STATION:
 		ieee80211_mgd_stop(sdata);
-
-	if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+		break;
+	case NL80211_IFTYPE_ADHOC:
 		ieee80211_ibss_stop(sdata);
-
+		break;
+	case NL80211_IFTYPE_AP:
+		cancel_work_sync(&sdata->u.ap.request_smps_work);
+		break;
+	default:
+		break;
+	}
 
 	/*
 	 * Remove all stations associated with this interface.
@@ -1046,7 +1057,8 @@
 
 static u16 ieee80211_netdev_select_queue(struct net_device *dev,
 					 struct sk_buff *skb,
-					 void *accel_priv)
+					 void *accel_priv,
+					 select_queue_fallback_t fallback)
 {
 	return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
 }
@@ -1064,7 +1076,8 @@
 
 static u16 ieee80211_monitor_select_queue(struct net_device *dev,
 					  struct sk_buff *skb,
-					  void *accel_priv)
+					  void *accel_priv,
+					  select_queue_fallback_t fallback)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 27c990b..97a02d3 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -878,7 +878,7 @@
 	}
 
 	/* adjust first fragment's length */
-	skb->len = hdrlen + per_fragm;
+	skb_trim(skb, hdrlen + per_fragm);
 	return 0;
 }
 
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c374675..e9410d1 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -513,7 +513,6 @@
 
 config NFT_REJECT
 	depends on NF_TABLES
-	depends on NF_TABLES_IPV6 || !NF_TABLES_IPV6
 	default m if NETFILTER_ADVANCED=n
 	tristate "Netfilter nf_tables reject support"
 	help
@@ -521,6 +520,11 @@
 	  explicitly deny and notify via TCP reset/ICMP informational errors
 	  unallowed traffic.
 
+config NFT_REJECT_INET
+	depends on NF_TABLES_INET
+	default NFT_REJECT
+	tristate
+
 config NFT_COMPAT
 	depends on NF_TABLES
 	depends on NETFILTER_XTABLES
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index ee9c4de..bffdad7 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -79,6 +79,7 @@
 obj-$(CONFIG_NFT_NAT)		+= nft_nat.o
 obj-$(CONFIG_NFT_QUEUE)		+= nft_queue.o
 obj-$(CONFIG_NFT_REJECT) 	+= nft_reject.o
+obj-$(CONFIG_NFT_REJECT_INET)	+= nft_reject_inet.o
 obj-$(CONFIG_NFT_RBTREE)	+= nft_rbtree.o
 obj-$(CONFIG_NFT_HASH)		+= nft_hash.o
 obj-$(CONFIG_NFT_COUNTER)	+= nft_counter.o
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 59a1a85..a8eb0a8 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -871,11 +871,11 @@
 	cp->protocol	   = p->protocol;
 	ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
 	cp->cport	   = p->cport;
-	ip_vs_addr_set(p->af, &cp->vaddr, p->vaddr);
-	cp->vport	   = p->vport;
-	/* proto should only be IPPROTO_IP if d_addr is a fwmark */
+	/* proto should only be IPPROTO_IP if p->vaddr is a fwmark */
 	ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
-		       &cp->daddr, daddr);
+		       &cp->vaddr, p->vaddr);
+	cp->vport	   = p->vport;
+	ip_vs_addr_set(p->af, &cp->daddr, daddr);
 	cp->dport          = dport;
 	cp->flags	   = flags;
 	cp->fwmark         = fwmark;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 8824ed0..356bef5 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -312,6 +312,21 @@
 	nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
 }
 
+static inline bool
+nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
+			const struct nf_conntrack_tuple *tuple,
+			u16 zone)
+{
+	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+	/* A conntrack can be recreated with the equal tuple,
+	 * so we need to check that the conntrack is confirmed
+	 */
+	return nf_ct_tuple_equal(tuple, &h->tuple) &&
+		nf_ct_zone(ct) == zone &&
+		nf_ct_is_confirmed(ct);
+}
+
 /*
  * Warning :
  * - Caller must take a reference on returned object
@@ -333,8 +348,7 @@
 	local_bh_disable();
 begin:
 	hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
-		if (nf_ct_tuple_equal(tuple, &h->tuple) &&
-		    nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
+		if (nf_ct_key_equal(h, tuple, zone)) {
 			NF_CT_STAT_INC(net, found);
 			local_bh_enable();
 			return h;
@@ -372,8 +386,7 @@
 			     !atomic_inc_not_zero(&ct->ct_general.use)))
 			h = NULL;
 		else {
-			if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
-				     nf_ct_zone(ct) != zone)) {
+			if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
 				nf_ct_put(ct);
 				goto begin;
 			}
@@ -435,7 +448,9 @@
 			goto out;
 
 	add_timer(&ct->timeout);
-	nf_conntrack_get(&ct->ct_general);
+	smp_wmb();
+	/* The caller holds a reference to this object */
+	atomic_set(&ct->ct_general.use, 2);
 	__nf_conntrack_hash_insert(ct, hash, repl_hash);
 	NF_CT_STAT_INC(net, insert);
 	spin_unlock_bh(&nf_conntrack_lock);
@@ -449,6 +464,21 @@
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
 
+/* deletion from this larval template list happens via nf_ct_put() */
+void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
+{
+	__set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
+	__set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
+	nf_conntrack_get(&tmpl->ct_general);
+
+	spin_lock_bh(&nf_conntrack_lock);
+	/* Overload tuple linked list to put us in template list. */
+	hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+				 &net->ct.tmpl);
+	spin_unlock_bh(&nf_conntrack_lock);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
+
 /* Confirm a connection given skb; places it in hash table */
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
@@ -720,11 +750,10 @@
 		nf_ct_zone->id = zone;
 	}
 #endif
-	/*
-	 * changes to lookup keys must be done before setting refcnt to 1
+	/* Because we use RCU lookups, we set ct_general.use to zero before
+	 * this is inserted in any list.
 	 */
-	smp_wmb();
-	atomic_set(&ct->ct_general.use, 1);
+	atomic_set(&ct->ct_general.use, 0);
 	return ct;
 
 #ifdef CONFIG_NF_CONNTRACK_ZONES
@@ -748,6 +777,11 @@
 {
 	struct net *net = nf_ct_net(ct);
 
+	/* A freed object has refcnt == 0, that's
+	 * the golden rule for SLAB_DESTROY_BY_RCU
+	 */
+	NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
+
 	nf_ct_ext_destroy(ct);
 	nf_ct_ext_free(ct);
 	kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
@@ -843,6 +877,9 @@
 		NF_CT_STAT_INC(net, new);
 	}
 
+	/* Now it is inserted into the unconfirmed list, bump refcount */
+	nf_conntrack_get(&ct->ct_general);
+
 	/* Overload tuple linked list to put us in unconfirmed list. */
 	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
 		       &net->ct.unconfirmed);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 9858e3e..52e20c9 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -363,9 +363,8 @@
 		goto err2;
 	if (!nfct_synproxy_ext_add(ct))
 		goto err2;
-	__set_bit(IPS_TEMPLATE_BIT, &ct->status);
-	__set_bit(IPS_CONFIRMED_BIT, &ct->status);
 
+	nf_conntrack_tmpl_insert(net, ct);
 	snet->tmpl = ct;
 
 	snet->stats = alloc_percpu(struct synproxy_stats);
@@ -390,7 +389,7 @@
 {
 	struct synproxy_net *snet = synproxy_pernet(net);
 
-	nf_conntrack_free(snet->tmpl);
+	nf_ct_put(snet->tmpl);
 	synproxy_proc_exit(net);
 	free_percpu(snet->stats);
 }
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 117bbaad..adce01e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1008,10 +1008,8 @@
 	return 0;
 }
 
-static void nf_tables_rcu_chain_destroy(struct rcu_head *head)
+static void nf_tables_chain_destroy(struct nft_chain *chain)
 {
-	struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head);
-
 	BUG_ON(chain->use > 0);
 
 	if (chain->flags & NFT_BASE_CHAIN) {
@@ -1045,7 +1043,7 @@
 	if (IS_ERR(chain))
 		return PTR_ERR(chain);
 
-	if (!list_empty(&chain->rules))
+	if (!list_empty(&chain->rules) || chain->use > 0)
 		return -EBUSY;
 
 	list_del(&chain->list);
@@ -1059,7 +1057,9 @@
 			       family);
 
 	/* Make sure all rule references are gone before this is released */
-	call_rcu(&chain->rcu_head, nf_tables_rcu_chain_destroy);
+	synchronize_rcu();
+
+	nf_tables_chain_destroy(chain);
 	return 0;
 }
 
@@ -1114,35 +1114,45 @@
 }
 EXPORT_SYMBOL_GPL(nft_unregister_expr);
 
-static const struct nft_expr_type *__nft_expr_type_get(struct nlattr *nla)
+static const struct nft_expr_type *__nft_expr_type_get(u8 family,
+						       struct nlattr *nla)
 {
 	const struct nft_expr_type *type;
 
 	list_for_each_entry(type, &nf_tables_expressions, list) {
-		if (!nla_strcmp(nla, type->name))
+		if (!nla_strcmp(nla, type->name) &&
+		    (!type->family || type->family == family))
 			return type;
 	}
 	return NULL;
 }
 
-static const struct nft_expr_type *nft_expr_type_get(struct nlattr *nla)
+static const struct nft_expr_type *nft_expr_type_get(u8 family,
+						     struct nlattr *nla)
 {
 	const struct nft_expr_type *type;
 
 	if (nla == NULL)
 		return ERR_PTR(-EINVAL);
 
-	type = __nft_expr_type_get(nla);
+	type = __nft_expr_type_get(family, nla);
 	if (type != NULL && try_module_get(type->owner))
 		return type;
 
 #ifdef CONFIG_MODULES
 	if (type == NULL) {
 		nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+		request_module("nft-expr-%u-%.*s", family,
+			       nla_len(nla), (char *)nla_data(nla));
+		nfnl_lock(NFNL_SUBSYS_NFTABLES);
+		if (__nft_expr_type_get(family, nla))
+			return ERR_PTR(-EAGAIN);
+
+		nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 		request_module("nft-expr-%.*s",
 			       nla_len(nla), (char *)nla_data(nla));
 		nfnl_lock(NFNL_SUBSYS_NFTABLES);
-		if (__nft_expr_type_get(nla))
+		if (__nft_expr_type_get(family, nla))
 			return ERR_PTR(-EAGAIN);
 	}
 #endif
@@ -1193,7 +1203,7 @@
 	if (err < 0)
 		return err;
 
-	type = nft_expr_type_get(tb[NFTA_EXPR_NAME]);
+	type = nft_expr_type_get(ctx->afi->family, tb[NFTA_EXPR_NAME]);
 	if (IS_ERR(type))
 		return PTR_ERR(type);
 
@@ -1521,9 +1531,8 @@
 	return err;
 }
 
-static void nf_tables_rcu_rule_destroy(struct rcu_head *head)
+static void nf_tables_rule_destroy(struct nft_rule *rule)
 {
-	struct nft_rule *rule = container_of(head, struct nft_rule, rcu_head);
 	struct nft_expr *expr;
 
 	/*
@@ -1538,11 +1547,6 @@
 	kfree(rule);
 }
 
-static void nf_tables_rule_destroy(struct nft_rule *rule)
-{
-	call_rcu(&rule->rcu_head, nf_tables_rcu_rule_destroy);
-}
-
 #define NFT_RULE_MAXEXPRS	128
 
 static struct nft_expr_info *info;
@@ -1809,9 +1813,6 @@
 	synchronize_rcu();
 
 	list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-		/* Delete this rule from the dirty list */
-		list_del(&rupd->list);
-
 		/* This rule was inactive in the past and just became active.
 		 * Clear the next bit of the genmask since its meaning has
 		 * changed, now it is the future.
@@ -1822,6 +1823,7 @@
 					      rupd->chain, rupd->rule,
 					      NFT_MSG_NEWRULE, 0,
 					      rupd->family);
+			list_del(&rupd->list);
 			kfree(rupd);
 			continue;
 		}
@@ -1831,7 +1833,15 @@
 		nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain,
 				      rupd->rule, NFT_MSG_DELRULE, 0,
 				      rupd->family);
+	}
+
+	/* Make sure we don't see any packet traversing old rules */
+	synchronize_rcu();
+
+	/* Now we can safely release unused old rules */
+	list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
 		nf_tables_rule_destroy(rupd->rule);
+		list_del(&rupd->list);
 		kfree(rupd);
 	}
 
@@ -1844,20 +1854,26 @@
 	struct nft_rule_trans *rupd, *tmp;
 
 	list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-		/* Delete all rules from the dirty list */
-		list_del(&rupd->list);
-
 		if (!nft_rule_is_active_next(net, rupd->rule)) {
 			nft_rule_clear(net, rupd->rule);
+			list_del(&rupd->list);
 			kfree(rupd);
 			continue;
 		}
 
 		/* This rule is inactive, get rid of it */
 		list_del_rcu(&rupd->rule->list);
+	}
+
+	/* Make sure we don't see any packet accessing aborted rules */
+	synchronize_rcu();
+
+	list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
 		nf_tables_rule_destroy(rupd->rule);
+		list_del(&rupd->list);
 		kfree(rupd);
 	}
+
 	return 0;
 }
 
@@ -1943,6 +1959,9 @@
 	}
 
 	if (nla[NFTA_SET_TABLE] != NULL) {
+		if (afi == NULL)
+			return -EAFNOSUPPORT;
+
 		table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
 		if (IS_ERR(table))
 			return PTR_ERR(table);
@@ -1989,13 +2008,13 @@
 
 			if (!sscanf(i->name, name, &tmp))
 				continue;
-			if (tmp < 0 || tmp > BITS_PER_LONG * PAGE_SIZE)
+			if (tmp < 0 || tmp >= BITS_PER_BYTE * PAGE_SIZE)
 				continue;
 
 			set_bit(tmp, inuse);
 		}
 
-		n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE);
+		n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE);
 		free_page((unsigned long)inuse);
 	}
 
@@ -2428,6 +2447,8 @@
 	struct nft_ctx ctx;
 	int err;
 
+	if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
+		return -EAFNOSUPPORT;
 	if (nla[NFTA_SET_TABLE] == NULL)
 		return -EINVAL;
 
@@ -2435,9 +2456,6 @@
 	if (err < 0)
 		return err;
 
-	if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
-		return -EAFNOSUPPORT;
-
 	set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
 	if (IS_ERR(set))
 		return PTR_ERR(set);
@@ -2723,6 +2741,9 @@
 		if (nla[NFTA_SET_ELEM_DATA] == NULL &&
 		    !(elem.flags & NFT_SET_ELEM_INTERVAL_END))
 			return -EINVAL;
+		if (nla[NFTA_SET_ELEM_DATA] != NULL &&
+		    elem.flags & NFT_SET_ELEM_INTERVAL_END)
+			return -EINVAL;
 	} else {
 		if (nla[NFTA_SET_ELEM_DATA] != NULL)
 			return -EINVAL;
@@ -2977,6 +2998,9 @@
 					const struct nft_set_iter *iter,
 					const struct nft_set_elem *elem)
 {
+	if (elem->flags & NFT_SET_ELEM_INTERVAL_END)
+		return 0;
+
 	switch (elem->data.verdict) {
 	case NFT_JUMP:
 	case NFT_GOTO:
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 0d879fc..90998a6 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -103,9 +103,9 @@
 	},
 };
 
-static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
-				    const struct nft_chain *chain,
-				    int rulenum, enum nft_trace type)
+static void nft_trace_packet(const struct nft_pktinfo *pkt,
+			     const struct nft_chain *chain,
+			     int rulenum, enum nft_trace type)
 {
 	struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
 
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 917052e..46e2754 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -226,6 +226,7 @@
 		if (tb[NFTA_CT_DIRECTION] != NULL)
 			return -EINVAL;
 		break;
+	case NFT_CT_L3PROTOCOL:
 	case NFT_CT_PROTOCOL:
 	case NFT_CT_SRC:
 	case NFT_CT_DST:
@@ -311,8 +312,19 @@
 		goto nla_put_failure;
 	if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
 		goto nla_put_failure;
-	if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
-		goto nla_put_failure;
+
+	switch (priv->key) {
+	case NFT_CT_PROTOCOL:
+	case NFT_CT_SRC:
+	case NFT_CT_DST:
+	case NFT_CT_PROTO_SRC:
+	case NFT_CT_PROTO_DST:
+		if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
+			goto nla_put_failure;
+	default:
+		break;
+	}
+
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 5af7901..26c5154 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -23,7 +23,6 @@
 struct nft_log {
 	struct nf_loginfo	loginfo;
 	char			*prefix;
-	int			family;
 };
 
 static void nft_log_eval(const struct nft_expr *expr,
@@ -33,7 +32,7 @@
 	const struct nft_log *priv = nft_expr_priv(expr);
 	struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
 
-	nf_log_packet(net, priv->family, pkt->ops->hooknum, pkt->skb, pkt->in,
+	nf_log_packet(net, pkt->ops->pf, pkt->ops->hooknum, pkt->skb, pkt->in,
 		      pkt->out, &priv->loginfo, "%s", priv->prefix);
 }
 
@@ -52,8 +51,6 @@
 	struct nf_loginfo *li = &priv->loginfo;
 	const struct nlattr *nla;
 
-	priv->family = ctx->afi->family;
-
 	nla = tb[NFTA_LOG_PREFIX];
 	if (nla != NULL) {
 		priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 8a6116b..bb4ef4c 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -16,6 +16,7 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
 
 struct nft_lookup {
 	struct nft_set			*set;
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index cbea473..e8ae2f6 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -25,7 +25,6 @@
 	u16	queuenum;
 	u16	queues_total;
 	u16	flags;
-	u8	family;
 };
 
 static void nft_queue_eval(const struct nft_expr *expr,
@@ -43,7 +42,7 @@
 			queue = priv->queuenum + cpu % priv->queues_total;
 		} else {
 			queue = nfqueue_hash(pkt->skb, queue,
-					     priv->queues_total, priv->family,
+					     priv->queues_total, pkt->ops->pf,
 					     jhash_initval);
 		}
 	}
@@ -71,7 +70,6 @@
 		return -EINVAL;
 
 	init_hashrandom(&jhash_initval);
-	priv->family = ctx->afi->family;
 	priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM]));
 
 	if (tb[NFTA_QUEUE_TOTAL] != NULL)
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index ca0c1b2..e21d69d 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -69,8 +69,10 @@
 				    struct nft_rbtree_elem *rbe)
 {
 	nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
-	if (set->flags & NFT_SET_MAP)
+	if (set->flags & NFT_SET_MAP &&
+	    !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
 		nft_data_uninit(rbe->data, set->dtype);
+
 	kfree(rbe);
 }
 
@@ -108,7 +110,8 @@
 	int err;
 
 	size = sizeof(*rbe);
-	if (set->flags & NFT_SET_MAP)
+	if (set->flags & NFT_SET_MAP &&
+	    !(elem->flags & NFT_SET_ELEM_INTERVAL_END))
 		size += sizeof(rbe->data[0]);
 
 	rbe = kzalloc(size, GFP_KERNEL);
@@ -117,7 +120,8 @@
 
 	rbe->flags = elem->flags;
 	nft_data_copy(&rbe->key, &elem->key);
-	if (set->flags & NFT_SET_MAP)
+	if (set->flags & NFT_SET_MAP &&
+	    !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
 		nft_data_copy(rbe->data, &elem->data);
 
 	err = __nft_rbtree_insert(set, rbe);
@@ -153,7 +157,8 @@
 			parent = parent->rb_right;
 		else {
 			elem->cookie = rbe;
-			if (set->flags & NFT_SET_MAP)
+			if (set->flags & NFT_SET_MAP &&
+			    !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
 				nft_data_copy(&elem->data, rbe->data);
 			elem->flags = rbe->flags;
 			return 0;
@@ -177,7 +182,8 @@
 
 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
 		nft_data_copy(&elem.key, &rbe->key);
-		if (set->flags & NFT_SET_MAP)
+		if (set->flags & NFT_SET_MAP &&
+		    !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
 			nft_data_copy(&elem.data, rbe->data);
 		elem.flags = rbe->flags;
 
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 5e204711..f3448c2 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -16,65 +16,23 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
-#include <net/icmp.h>
-#include <net/netfilter/ipv4/nf_reject.h>
+#include <net/netfilter/nft_reject.h>
 
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
-#include <net/netfilter/ipv6/nf_reject.h>
-#endif
-
-struct nft_reject {
-	enum nft_reject_types	type:8;
-	u8			icmp_code;
-	u8			family;
-};
-
-static void nft_reject_eval(const struct nft_expr *expr,
-			      struct nft_data data[NFT_REG_MAX + 1],
-			      const struct nft_pktinfo *pkt)
-{
-	struct nft_reject *priv = nft_expr_priv(expr);
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
-	struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
-#endif
-	switch (priv->type) {
-	case NFT_REJECT_ICMP_UNREACH:
-		if (priv->family == NFPROTO_IPV4)
-			nf_send_unreach(pkt->skb, priv->icmp_code);
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
-		else if (priv->family == NFPROTO_IPV6)
-			nf_send_unreach6(net, pkt->skb, priv->icmp_code,
-				      pkt->ops->hooknum);
-#endif
-		break;
-	case NFT_REJECT_TCP_RST:
-		if (priv->family == NFPROTO_IPV4)
-			nf_send_reset(pkt->skb, pkt->ops->hooknum);
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
-		else if (priv->family == NFPROTO_IPV6)
-			nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
-#endif
-		break;
-	}
-
-	data[NFT_REG_VERDICT].verdict = NF_DROP;
-}
-
-static const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
+const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
 	[NFTA_REJECT_TYPE]		= { .type = NLA_U32 },
 	[NFTA_REJECT_ICMP_CODE]		= { .type = NLA_U8 },
 };
+EXPORT_SYMBOL_GPL(nft_reject_policy);
 
-static int nft_reject_init(const struct nft_ctx *ctx,
-			   const struct nft_expr *expr,
-			   const struct nlattr * const tb[])
+int nft_reject_init(const struct nft_ctx *ctx,
+		    const struct nft_expr *expr,
+		    const struct nlattr * const tb[])
 {
 	struct nft_reject *priv = nft_expr_priv(expr);
 
 	if (tb[NFTA_REJECT_TYPE] == NULL)
 		return -EINVAL;
 
-	priv->family = ctx->afi->family;
 	priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
 	switch (priv->type) {
 	case NFT_REJECT_ICMP_UNREACH:
@@ -89,8 +47,9 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(nft_reject_init);
 
-static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
+int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
 	const struct nft_reject *priv = nft_expr_priv(expr);
 
@@ -109,37 +68,7 @@
 nla_put_failure:
 	return -1;
 }
-
-static struct nft_expr_type nft_reject_type;
-static const struct nft_expr_ops nft_reject_ops = {
-	.type		= &nft_reject_type,
-	.size		= NFT_EXPR_SIZE(sizeof(struct nft_reject)),
-	.eval		= nft_reject_eval,
-	.init		= nft_reject_init,
-	.dump		= nft_reject_dump,
-};
-
-static struct nft_expr_type nft_reject_type __read_mostly = {
-	.name		= "reject",
-	.ops		= &nft_reject_ops,
-	.policy		= nft_reject_policy,
-	.maxattr	= NFTA_REJECT_MAX,
-	.owner		= THIS_MODULE,
-};
-
-static int __init nft_reject_module_init(void)
-{
-	return nft_register_expr(&nft_reject_type);
-}
-
-static void __exit nft_reject_module_exit(void)
-{
-	nft_unregister_expr(&nft_reject_type);
-}
-
-module_init(nft_reject_module_init);
-module_exit(nft_reject_module_exit);
+EXPORT_SYMBOL_GPL(nft_reject_dump);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_EXPR("reject");
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
new file mode 100644
index 0000000..8a310f2
--- /dev/null
+++ b/net/netfilter/nft_reject_inet.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_reject.h>
+
+static void nft_reject_inet_eval(const struct nft_expr *expr,
+				 struct nft_data data[NFT_REG_MAX + 1],
+				 const struct nft_pktinfo *pkt)
+{
+	switch (pkt->ops->pf) {
+	case NFPROTO_IPV4:
+		nft_reject_ipv4_eval(expr, data, pkt);
+	case NFPROTO_IPV6:
+		nft_reject_ipv6_eval(expr, data, pkt);
+	}
+}
+
+static struct nft_expr_type nft_reject_inet_type;
+static const struct nft_expr_ops nft_reject_inet_ops = {
+	.type		= &nft_reject_inet_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+	.eval		= nft_reject_inet_eval,
+	.init		= nft_reject_init,
+	.dump		= nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_inet_type __read_mostly = {
+	.family		= NFPROTO_INET,
+	.name		= "reject",
+	.ops		= &nft_reject_inet_ops,
+	.policy		= nft_reject_policy,
+	.maxattr	= NFTA_REJECT_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_reject_inet_module_init(void)
+{
+	return nft_register_expr(&nft_reject_inet_type);
+}
+
+static void __exit nft_reject_inet_module_exit(void)
+{
+	nft_unregister_expr(&nft_reject_inet_type);
+}
+
+module_init(nft_reject_inet_module_init);
+module_exit(nft_reject_inet_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_AF_EXPR(1, "reject");
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 5929be6..75747ae 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -228,12 +228,7 @@
 			goto err3;
 	}
 
-	__set_bit(IPS_TEMPLATE_BIT, &ct->status);
-	__set_bit(IPS_CONFIRMED_BIT, &ct->status);
-
-	/* Overload tuple linked list to put us in template list. */
-	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
-				 &par->net->ct.tmpl);
+	nf_conntrack_tmpl_insert(par->net, ct);
 out:
 	info->ct = ct;
 	return 0;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index df46928..e9a48ba 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -55,6 +55,7 @@
 
 #include "datapath.h"
 #include "flow.h"
+#include "flow_table.h"
 #include "flow_netlink.h"
 #include "vport-internal_dev.h"
 #include "vport-netdev.h"
@@ -160,7 +161,6 @@
 {
 	struct datapath *dp = container_of(rcu, struct datapath, rcu);
 
-	ovs_flow_tbl_destroy(&dp->table);
 	free_percpu(dp->stats_percpu);
 	release_net(ovs_dp_get_net(dp));
 	kfree(dp->ports);
@@ -466,6 +466,14 @@
 
 	skb_zerocopy(user_skb, skb, skb->len, hlen);
 
+	/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
+	if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
+		size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
+
+		if (plen > 0)
+			memset(skb_put(user_skb, plen), 0, plen);
+	}
+
 	((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
 
 	err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
@@ -852,11 +860,8 @@
 			goto err_unlock_ovs;
 
 		/* The unmasked key has to be the same for flow updates. */
-		error = -EINVAL;
-		if (!ovs_flow_cmp_unmasked_key(flow, &match)) {
-			OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
+		if (!ovs_flow_cmp_unmasked_key(flow, &match))
 			goto err_unlock_ovs;
-		}
 
 		/* Update actions. */
 		old_acts = ovsl_dereference(flow->sf_acts);
@@ -1079,6 +1084,7 @@
 	msgsize += nla_total_size(IFNAMSIZ);
 	msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
 	msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
+	msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
 
 	return msgsize;
 }
@@ -1279,7 +1285,7 @@
 err_destroy_percpu:
 	free_percpu(dp->stats_percpu);
 err_destroy_table:
-	ovs_flow_tbl_destroy(&dp->table);
+	ovs_flow_tbl_destroy(&dp->table, false);
 err_free_dp:
 	release_net(ovs_dp_get_net(dp));
 	kfree(dp);
@@ -1306,10 +1312,13 @@
 	list_del_rcu(&dp->list_node);
 
 	/* OVSP_LOCAL is datapath internal port. We need to make sure that
-	 * all port in datapath are destroyed first before freeing datapath.
+	 * all ports in datapath are destroyed first before freeing datapath.
 	 */
 	ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
 
+	/* RCU destroy the flow table */
+	ovs_flow_tbl_destroy(&dp->table, true);
+
 	call_rcu(&dp->rcu, destroy_dp_rcu);
 }
 
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index c58a0fe..3c268b3 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -153,29 +153,29 @@
 	flow_free(flow);
 }
 
-static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
-{
-	if (!mask)
-		return;
-
-	BUG_ON(!mask->ref_count);
-	mask->ref_count--;
-
-	if (!mask->ref_count) {
-		list_del_rcu(&mask->list);
-		if (deferred)
-			kfree_rcu(mask, rcu);
-		else
-			kfree(mask);
-	}
-}
-
 void ovs_flow_free(struct sw_flow *flow, bool deferred)
 {
 	if (!flow)
 		return;
 
-	flow_mask_del_ref(flow->mask, deferred);
+	if (flow->mask) {
+		struct sw_flow_mask *mask = flow->mask;
+
+		/* ovs-lock is required to protect mask-refcount and
+		 * mask list.
+		 */
+		ASSERT_OVSL();
+		BUG_ON(!mask->ref_count);
+		mask->ref_count--;
+
+		if (!mask->ref_count) {
+			list_del_rcu(&mask->list);
+			if (deferred)
+				kfree_rcu(mask, rcu);
+			else
+				kfree(mask);
+		}
+	}
 
 	if (deferred)
 		call_rcu(&flow->rcu, rcu_free_flow_callback);
@@ -188,26 +188,9 @@
 	flex_array_free(buckets);
 }
 
+
 static void __table_instance_destroy(struct table_instance *ti)
 {
-	int i;
-
-	if (ti->keep_flows)
-		goto skip_flows;
-
-	for (i = 0; i < ti->n_buckets; i++) {
-		struct sw_flow *flow;
-		struct hlist_head *head = flex_array_get(ti->buckets, i);
-		struct hlist_node *n;
-		int ver = ti->node_ver;
-
-		hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
-			hlist_del(&flow->hash_node[ver]);
-			ovs_flow_free(flow, false);
-		}
-	}
-
-skip_flows:
 	free_buckets(ti->buckets);
 	kfree(ti);
 }
@@ -258,20 +241,38 @@
 
 static void table_instance_destroy(struct table_instance *ti, bool deferred)
 {
+	int i;
+
 	if (!ti)
 		return;
 
+	if (ti->keep_flows)
+		goto skip_flows;
+
+	for (i = 0; i < ti->n_buckets; i++) {
+		struct sw_flow *flow;
+		struct hlist_head *head = flex_array_get(ti->buckets, i);
+		struct hlist_node *n;
+		int ver = ti->node_ver;
+
+		hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
+			hlist_del_rcu(&flow->hash_node[ver]);
+			ovs_flow_free(flow, deferred);
+		}
+	}
+
+skip_flows:
 	if (deferred)
 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
 	else
 		__table_instance_destroy(ti);
 }
 
-void ovs_flow_tbl_destroy(struct flow_table *table)
+void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
 {
 	struct table_instance *ti = ovsl_dereference(table->ti);
 
-	table_instance_destroy(ti, false);
+	table_instance_destroy(ti, deferred);
 }
 
 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -504,16 +505,11 @@
 
 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
 	if (mask)
-		mask->ref_count = 0;
+		mask->ref_count = 1;
 
 	return mask;
 }
 
-static void mask_add_ref(struct sw_flow_mask *mask)
-{
-	mask->ref_count++;
-}
-
 static bool mask_equal(const struct sw_flow_mask *a,
 		       const struct sw_flow_mask *b)
 {
@@ -554,9 +550,11 @@
 		mask->key = new->key;
 		mask->range = new->range;
 		list_add_rcu(&mask->list, &tbl->mask_list);
+	} else {
+		BUG_ON(!mask->ref_count);
+		mask->ref_count++;
 	}
 
-	mask_add_ref(mask);
 	flow->mask = mask;
 	return 0;
 }
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 1996e34..baaeb10 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -60,7 +60,7 @@
 
 int ovs_flow_tbl_init(struct flow_table *);
 int ovs_flow_tbl_count(struct flow_table *table);
-void ovs_flow_tbl_destroy(struct flow_table *table);
+void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
 int ovs_flow_tbl_flush(struct flow_table *flow_table);
 
 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6a2bb37..48a6a93 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -308,11 +308,27 @@
 	return po->xmit == packet_direct_xmit;
 }
 
-static u16 packet_pick_tx_queue(struct net_device *dev)
+static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
 {
 	return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
 }
 
+static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	const struct net_device_ops *ops = dev->netdev_ops;
+	u16 queue_index;
+
+	if (ops->ndo_select_queue) {
+		queue_index = ops->ndo_select_queue(dev, skb, NULL,
+						    __packet_pick_tx_queue);
+		queue_index = netdev_cap_txqueue(dev, queue_index);
+	} else {
+		queue_index = __packet_pick_tx_queue(dev, skb);
+	}
+
+	skb_set_queue_mapping(skb, queue_index);
+}
+
 /* register_prot_hook must be invoked with the po->bind_lock held,
  * or from a context in which asynchronous accesses to the packet
  * socket is not possible (packet_create()).
@@ -2285,7 +2301,8 @@
 			}
 		}
 
-		skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
+		packet_pick_tx_queue(dev, skb);
+
 		skb->destructor = tpacket_destruct_skb;
 		__packet_set_status(po, ph, TP_STATUS_SENDING);
 		packet_inc_pending(&po->tx_ring);
@@ -2499,7 +2516,8 @@
 	skb->dev = dev;
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
-	skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
+
+	packet_pick_tx_queue(dev, skb);
 
 	if (po->has_vnet_hdr) {
 		if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
@@ -3786,7 +3804,7 @@
 		 */
 			if (!tx_ring)
 				init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
-				break;
+			break;
 		default:
 			break;
 		}
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 4106ca9..7bf5b5b 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -381,6 +381,8 @@
 
 		rxrpc_assign_connection_id(conn);
 		rx->conn = conn;
+	} else {
+		spin_lock(&trans->client_lock);
 	}
 
 	/* we've got a connection with a free channel and we can now attach the
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 898492a..34b5490 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -180,7 +180,8 @@
 		if (copy > len - copied)
 			copy = len - copied;
 
-		if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+		if (skb->ip_summed == CHECKSUM_UNNECESSARY ||
+		    skb->ip_summed == CHECKSUM_PARTIAL) {
 			ret = skb_copy_datagram_iovec(skb, offset,
 						      msg->msg_iov, copy);
 		} else {
@@ -353,6 +354,10 @@
 	if (continue_call)
 		rxrpc_put_call(continue_call);
 	rxrpc_kill_skb(skb);
+	if (!(flags & MSG_PEEK)) {
+		if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
+			BUG();
+	}
 	skb_kill_datagram(&rx->sk, skb, flags);
 	rxrpc_put_call(call);
 	return -EAGAIN;
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index a255d02..fefeeb7 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -15,6 +15,11 @@
  *
  * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
  * University of Oslo, Norway.
+ *
+ * References:
+ * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00
+ * IEEE  Conference on High Performance Switching and Routing 2013 :
+ * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem"
  */
 
 #include <linux/module.h>
@@ -36,7 +41,7 @@
 	psched_time_t target;	/* user specified target delay in pschedtime */
 	u32 tupdate;		/* timer frequency (in jiffies) */
 	u32 limit;		/* number of packets that can be enqueued */
-	u32 alpha;		/* alpha and beta are between -4 and 4 */
+	u32 alpha;		/* alpha and beta are between 0 and 32 */
 	u32 beta;		/* and are used for shift relative to 1 */
 	bool ecn;		/* true if ecn is enabled */
 	bool bytemode;		/* to scale drop early prob based on pkt size */
@@ -326,10 +331,16 @@
 	if (qdelay == 0 && qlen != 0)
 		update_prob = false;
 
-	/* Add ranges for alpha and beta, more aggressive for high dropping
-	 * mode and gentle steps for light dropping mode
-	 * In light dropping mode, take gentle steps; in medium dropping mode,
-	 * take medium steps; in high dropping mode, take big steps.
+	/* In the algorithm, alpha and beta are between 0 and 2 with typical
+	 * value for alpha as 0.125. In this implementation, we use values 0-32
+	 * passed from user space to represent this. Also, alpha and beta have
+	 * unit of HZ and need to be scaled before they can used to update
+	 * probability. alpha/beta are updated locally below by 1) scaling them
+	 * appropriately 2) scaling down by 16 to come to 0-2 range.
+	 * Please see paper for details.
+	 *
+	 * We scale alpha and beta differently depending on whether we are in
+	 * light, medium or high dropping mode.
 	 */
 	if (q->vars.prob < MAX_PROB / 100) {
 		alpha =
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index fbba5b0..1cb413f 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -21,7 +21,6 @@
 #include <net/netlink.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
-#include <net/tcp.h>
 
 
 /*	Simple Token Bucket Filter.
@@ -148,16 +147,10 @@
  * Return length of individual segments of a gso packet,
  * including all headers (MAC, IP, TCP/UDP)
  */
-static unsigned int skb_gso_seglen(const struct sk_buff *skb)
+static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
 {
 	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
-	const struct skb_shared_info *shinfo = skb_shinfo(skb);
-
-	if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
-		hdr_len += tcp_hdrlen(skb);
-	else
-		hdr_len += sizeof(struct udphdr);
-	return hdr_len + shinfo->gso_size;
+	return hdr_len + skb_gso_transport_seglen(skb);
 }
 
 /* GSO packet is too big, segment it so that tbf can transmit
@@ -202,7 +195,7 @@
 	int ret;
 
 	if (qdisc_pkt_len(skb) > q->max_size) {
-		if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
+		if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
 			return tbf_segment(skb, sch);
 		return qdisc_reshape_fail(skb, sch);
 	}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 5ae6092..f558433 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1367,44 +1367,35 @@
 	return false;
 }
 
-/* Increase asoc's rwnd by len and send any window update SACK if needed. */
-void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
+/* Update asoc's rwnd for the approximated state in the buffer,
+ * and check whether SACK needs to be sent.
+ */
+void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer)
 {
+	int rx_count;
 	struct sctp_chunk *sack;
 	struct timer_list *timer;
 
-	if (asoc->rwnd_over) {
-		if (asoc->rwnd_over >= len) {
-			asoc->rwnd_over -= len;
-		} else {
-			asoc->rwnd += (len - asoc->rwnd_over);
-			asoc->rwnd_over = 0;
-		}
-	} else {
-		asoc->rwnd += len;
-	}
+	if (asoc->ep->rcvbuf_policy)
+		rx_count = atomic_read(&asoc->rmem_alloc);
+	else
+		rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
 
-	/* If we had window pressure, start recovering it
-	 * once our rwnd had reached the accumulated pressure
-	 * threshold.  The idea is to recover slowly, but up
-	 * to the initial advertised window.
-	 */
-	if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
-		int change = min(asoc->pathmtu, asoc->rwnd_press);
-		asoc->rwnd += change;
-		asoc->rwnd_press -= change;
-	}
+	if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0)
+		asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1;
+	else
+		asoc->rwnd = 0;
 
-	pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
-		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
-		 asoc->a_rwnd);
+	pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n",
+		 __func__, asoc, asoc->rwnd, rx_count,
+		 asoc->base.sk->sk_rcvbuf);
 
 	/* Send a window update SACK if the rwnd has increased by at least the
 	 * minimum of the association's PMTU and half of the receive buffer.
 	 * The algorithm used is similar to the one described in
 	 * Section 4.2.3.3 of RFC 1122.
 	 */
-	if (sctp_peer_needs_update(asoc)) {
+	if (update_peer && sctp_peer_needs_update(asoc)) {
 		asoc->a_rwnd = asoc->rwnd;
 
 		pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
@@ -1426,45 +1417,6 @@
 	}
 }
 
-/* Decrease asoc's rwnd by len. */
-void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
-{
-	int rx_count;
-	int over = 0;
-
-	if (unlikely(!asoc->rwnd || asoc->rwnd_over))
-		pr_debug("%s: association:%p has asoc->rwnd:%u, "
-			 "asoc->rwnd_over:%u!\n", __func__, asoc,
-			 asoc->rwnd, asoc->rwnd_over);
-
-	if (asoc->ep->rcvbuf_policy)
-		rx_count = atomic_read(&asoc->rmem_alloc);
-	else
-		rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
-
-	/* If we've reached or overflowed our receive buffer, announce
-	 * a 0 rwnd if rwnd would still be positive.  Store the
-	 * the potential pressure overflow so that the window can be restored
-	 * back to original value.
-	 */
-	if (rx_count >= asoc->base.sk->sk_rcvbuf)
-		over = 1;
-
-	if (asoc->rwnd >= len) {
-		asoc->rwnd -= len;
-		if (over) {
-			asoc->rwnd_press += asoc->rwnd;
-			asoc->rwnd = 0;
-		}
-	} else {
-		asoc->rwnd_over = len - asoc->rwnd;
-		asoc->rwnd = 0;
-	}
-
-	pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
-		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
-		 asoc->rwnd_press);
-}
 
 /* Build the bind address list for the association based on info from the
  * local endpoint and the remote peer.
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0f6259a..2b1738e 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -662,6 +662,8 @@
 	 */
 	sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk);
 
+	newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+
 	sk_refcnt_debug_inc(newsk);
 
 	if (newsk->sk_prot->init(newsk)) {
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 483dcd7..591b44d 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -6176,7 +6176,7 @@
 	 * PMTU.  In cases, such as loopback, this might be a rather
 	 * large spill over.
 	 */
-	if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over ||
+	if ((!chunk->data_accepted) && (!asoc->rwnd ||
 	    (datalen > asoc->rwnd + asoc->frag_point))) {
 
 		/* If this is the next TSN, consider reneging to make
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9e91d6e..981aaf8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -64,6 +64,7 @@
 #include <linux/crypto.h>
 #include <linux/slab.h>
 #include <linux/file.h>
+#include <linux/compat.h>
 
 #include <net/ip.h>
 #include <net/icmp.h>
@@ -1368,11 +1369,19 @@
 /*
  * New (hopefully final) interface for the API.
  * We use the sctp_getaddrs_old structure so that use-space library
- * can avoid any unnecessary allocations.   The only defferent part
+ * can avoid any unnecessary allocations. The only different part
  * is that we store the actual length of the address buffer into the
- * addrs_num structure member.  That way we can re-use the existing
+ * addrs_num structure member. That way we can re-use the existing
  * code.
  */
+#ifdef CONFIG_COMPAT
+struct compat_sctp_getaddrs_old {
+	sctp_assoc_t	assoc_id;
+	s32		addr_num;
+	compat_uptr_t	addrs;		/* struct sockaddr * */
+};
+#endif
+
 static int sctp_getsockopt_connectx3(struct sock *sk, int len,
 				     char __user *optval,
 				     int __user *optlen)
@@ -1381,16 +1390,30 @@
 	sctp_assoc_t assoc_id = 0;
 	int err = 0;
 
-	if (len < sizeof(param))
-		return -EINVAL;
+#ifdef CONFIG_COMPAT
+	if (is_compat_task()) {
+		struct compat_sctp_getaddrs_old param32;
 
-	if (copy_from_user(&param, optval, sizeof(param)))
-		return -EFAULT;
+		if (len < sizeof(param32))
+			return -EINVAL;
+		if (copy_from_user(&param32, optval, sizeof(param32)))
+			return -EFAULT;
 
-	err = __sctp_setsockopt_connectx(sk,
-			(struct sockaddr __user *)param.addrs,
-			param.addr_num, &assoc_id);
+		param.assoc_id = param32.assoc_id;
+		param.addr_num = param32.addr_num;
+		param.addrs = compat_ptr(param32.addrs);
+	} else
+#endif
+	{
+		if (len < sizeof(param))
+			return -EINVAL;
+		if (copy_from_user(&param, optval, sizeof(param)))
+			return -EFAULT;
+	}
 
+	err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
+					 param.addrs, param.addr_num,
+					 &assoc_id);
 	if (err == 0 || err == -EINPROGRESS) {
 		if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
 			return -EFAULT;
@@ -2092,12 +2115,6 @@
 		sctp_skb_pull(skb, copied);
 		skb_queue_head(&sk->sk_receive_queue, skb);
 
-		/* When only partial message is copied to the user, increase
-		 * rwnd by that amount. If all the data in the skb is read,
-		 * rwnd is updated when the event is freed.
-		 */
-		if (!sctp_ulpevent_is_notification(event))
-			sctp_assoc_rwnd_increase(event->asoc, copied);
 		goto out;
 	} else if ((event->msg_flags & MSG_NOTIFICATION) ||
 		   (event->msg_flags & MSG_EOR))
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 7135e61..35c8923 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -151,6 +151,7 @@
 	},
 	{
 		.procname	= "cookie_hmac_alg",
+		.data		= &init_net.sctp.sctp_hmac_alg,
 		.maxlen		= 8,
 		.mode		= 0644,
 		.proc_handler	= proc_sctp_do_hmac_alg,
@@ -401,15 +402,18 @@
 
 int sctp_sysctl_net_register(struct net *net)
 {
-	struct ctl_table *table;
-	int i;
+	struct ctl_table *table = sctp_net_table;
 
-	table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
-	if (!table)
-		return -ENOMEM;
+	if (!net_eq(net, &init_net)) {
+		int i;
 
-	for (i = 0; table[i].data; i++)
-		table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+		table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
+		if (!table)
+			return -ENOMEM;
+
+		for (i = 0; table[i].data; i++)
+			table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+	}
 
 	net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
 	return 0;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c6465..8d198ae 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -989,7 +989,7 @@
 	skb = sctp_event2skb(event);
 	/* Set the owner and charge rwnd for bytes received.  */
 	sctp_ulpevent_set_owner(event, asoc);
-	sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb));
+	sctp_assoc_rwnd_update(asoc, false);
 
 	if (!skb->data_len)
 		return;
@@ -1011,6 +1011,7 @@
 {
 	struct sk_buff *skb, *frag;
 	unsigned int	len;
+	struct sctp_association *asoc;
 
 	/* Current stack structures assume that the rcv buffer is
 	 * per socket.   For UDP style sockets this is not true as
@@ -1035,8 +1036,11 @@
 	}
 
 done:
-	sctp_assoc_rwnd_increase(event->asoc, len);
+	asoc = event->asoc;
+	sctp_association_hold(asoc);
 	sctp_ulpevent_release_owner(event);
+	sctp_assoc_rwnd_update(asoc, true);
+	sctp_association_put(asoc);
 }
 
 static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 0a2aee0..36e431e 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -108,6 +108,7 @@
 static DEFINE_SPINLOCK(pipe_version_lock);
 static struct rpc_wait_queue pipe_version_rpc_waitqueue;
 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
+static void gss_put_auth(struct gss_auth *gss_auth);
 
 static void gss_free_ctx(struct gss_cl_ctx *);
 static const struct rpc_pipe_ops gss_upcall_ops_v0;
@@ -320,6 +321,7 @@
 	if (gss_msg->ctx != NULL)
 		gss_put_ctx(gss_msg->ctx);
 	rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
+	gss_put_auth(gss_msg->auth);
 	kfree(gss_msg);
 }
 
@@ -498,9 +500,12 @@
 	default:
 		err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
 		if (err)
-			goto err_free_msg;
+			goto err_put_pipe_version;
 	};
+	kref_get(&gss_auth->kref);
 	return gss_msg;
+err_put_pipe_version:
+	put_pipe_version(gss_auth->net);
 err_free_msg:
 	kfree(gss_msg);
 err:
@@ -532,13 +537,7 @@
 
 static void warn_gssd(void)
 {
-	static unsigned long ratelimit;
-	unsigned long now = jiffies;
-
-	if (time_after(now, ratelimit)) {
-		pr_warn("RPC: AUTH_GSS upcall failed. Please check user daemon is running.\n");
-		ratelimit = now + 15*HZ;
-	}
+	dprintk("AUTH_GSS upcall failed. Please check user daemon is running.\n");
 }
 
 static inline int
@@ -997,6 +996,8 @@
 	gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
 	if (gss_auth->service == 0)
 		goto err_put_mech;
+	if (!gssd_running(gss_auth->net))
+		goto err_put_mech;
 	auth = &gss_auth->rpc_auth;
 	auth->au_cslack = GSS_CRED_SLACK >> 2;
 	auth->au_rslack = GSS_VERF_SLACK >> 2;
@@ -1068,6 +1069,12 @@
 }
 
 static void
+gss_put_auth(struct gss_auth *gss_auth)
+{
+	kref_put(&gss_auth->kref, gss_free_callback);
+}
+
+static void
 gss_destroy(struct rpc_auth *auth)
 {
 	struct gss_auth *gss_auth = container_of(auth,
@@ -1088,7 +1095,7 @@
 	gss_auth->gss_pipe[1] = NULL;
 	rpcauth_destroy_credcache(auth);
 
-	kref_put(&gss_auth->kref, gss_free_callback);
+	gss_put_auth(gss_auth);
 }
 
 /*
@@ -1259,7 +1266,7 @@
 	call_rcu(&cred->cr_rcu, gss_free_cred_callback);
 	if (ctx)
 		gss_put_ctx(ctx);
-	kref_put(&gss_auth->kref, gss_free_callback);
+	gss_put_auth(gss_auth);
 }
 
 static void
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
index 76e42e6..24589bd 100644
--- a/net/sunrpc/auth_gss/gss_krb5_keys.c
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -59,6 +59,7 @@
 #include <linux/crypto.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/sunrpc/xdr.h>
+#include <linux/lcm.h>
 
 #ifdef RPC_DEBUG
 # define RPCDBG_FACILITY        RPCDBG_AUTH
@@ -72,7 +73,7 @@
 static void krb5_nfold(u32 inbits, const u8 *in,
 		       u32 outbits, u8 *out)
 {
-	int a, b, c, lcm;
+	unsigned long ulcm;
 	int byte, i, msbit;
 
 	/* the code below is more readable if I make these bytes
@@ -82,17 +83,7 @@
 	outbits >>= 3;
 
 	/* first compute lcm(n,k) */
-
-	a = outbits;
-	b = inbits;
-
-	while (b != 0) {
-		c = b;
-		b = a%b;
-		a = c;
-	}
-
-	lcm = outbits*inbits/a;
+	ulcm = lcm(inbits, outbits);
 
 	/* now do the real work */
 
@@ -101,7 +92,7 @@
 
 	/* this will end up cycling through k lcm(k,n)/k times, which
 	   is correct */
-	for (i = lcm-1; i >= 0; i--) {
+	for (i = ulcm-1; i >= 0; i--) {
 		/* compute the msbit in k which gets added into this byte */
 		msbit = (
 			/* first, start with the msbit in the first,
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index 458f85e..abbb7dc 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -137,7 +137,6 @@
 {
 	mutex_init(&sn->gssp_lock);
 	sn->gssp_clnt = NULL;
-	init_waitqueue_head(&sn->gssp_wq);
 }
 
 int set_gssp_clnt(struct net *net)
@@ -154,7 +153,6 @@
 		sn->gssp_clnt = clnt;
 	}
 	mutex_unlock(&sn->gssp_lock);
-	wake_up(&sn->gssp_wq);
 	return ret;
 }
 
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 008cdad..0f73f45 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1263,66 +1263,35 @@
 	return ret;
 }
 
-DEFINE_SPINLOCK(use_gssp_lock);
+/*
+ * Try to set the sn->use_gss_proxy variable to a new value. We only allow
+ * it to be changed if it's currently undefined (-1). If it's any other value
+ * then return -EBUSY unless the type wouldn't have changed anyway.
+ */
+static int set_gss_proxy(struct net *net, int type)
+{
+	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+	int ret;
+
+	WARN_ON_ONCE(type != 0 && type != 1);
+	ret = cmpxchg(&sn->use_gss_proxy, -1, type);
+	if (ret != -1 && ret != type)
+		return -EBUSY;
+	return 0;
+}
 
 static bool use_gss_proxy(struct net *net)
 {
 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 
-	if (sn->use_gss_proxy != -1)
-		return sn->use_gss_proxy;
-	spin_lock(&use_gssp_lock);
-	/*
-	 * If you wanted gss-proxy, you should have said so before
-	 * starting to accept requests:
-	 */
-	sn->use_gss_proxy = 0;
-	spin_unlock(&use_gssp_lock);
-	return 0;
+	/* If use_gss_proxy is still undefined, then try to disable it */
+	if (sn->use_gss_proxy == -1)
+		set_gss_proxy(net, 0);
+	return sn->use_gss_proxy;
 }
 
 #ifdef CONFIG_PROC_FS
 
-static int set_gss_proxy(struct net *net, int type)
-{
-	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
-	int ret = 0;
-
-	WARN_ON_ONCE(type != 0 && type != 1);
-	spin_lock(&use_gssp_lock);
-	if (sn->use_gss_proxy == -1 || sn->use_gss_proxy == type)
-		sn->use_gss_proxy = type;
-	else
-		ret = -EBUSY;
-	spin_unlock(&use_gssp_lock);
-	wake_up(&sn->gssp_wq);
-	return ret;
-}
-
-static inline bool gssp_ready(struct sunrpc_net *sn)
-{
-	switch (sn->use_gss_proxy) {
-		case -1:
-			return false;
-		case 0:
-			return true;
-		case 1:
-			return sn->gssp_clnt;
-	}
-	WARN_ON_ONCE(1);
-	return false;
-}
-
-static int wait_for_gss_proxy(struct net *net, struct file *file)
-{
-	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
-
-	if (file->f_flags & O_NONBLOCK && !gssp_ready(sn))
-		return -EAGAIN;
-	return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn));
-}
-
-
 static ssize_t write_gssp(struct file *file, const char __user *buf,
 			 size_t count, loff_t *ppos)
 {
@@ -1342,10 +1311,10 @@
 		return res;
 	if (i != 1)
 		return -EINVAL;
-	res = set_gss_proxy(net, 1);
+	res = set_gssp_clnt(net);
 	if (res)
 		return res;
-	res = set_gssp_clnt(net);
+	res = set_gss_proxy(net, 1);
 	if (res)
 		return res;
 	return count;
@@ -1355,16 +1324,12 @@
 			 size_t count, loff_t *ppos)
 {
 	struct net *net = PDE_DATA(file_inode(file));
+	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 	unsigned long p = *ppos;
 	char tbuf[10];
 	size_t len;
-	int ret;
 
-	ret = wait_for_gss_proxy(net, file);
-	if (ret)
-		return ret;
-
-	snprintf(tbuf, sizeof(tbuf), "%d\n", use_gss_proxy(net));
+	snprintf(tbuf, sizeof(tbuf), "%d\n", sn->use_gss_proxy);
 	len = strlen(tbuf);
 	if (p >= len)
 		return 0;
@@ -1626,8 +1591,7 @@
 	BUG_ON(integ_len % 4);
 	*p++ = htonl(integ_len);
 	*p++ = htonl(gc->gc_seq);
-	if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
-				integ_len))
+	if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len))
 		BUG();
 	if (resbuf->tail[0].iov_base == NULL) {
 		if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
@@ -1635,10 +1599,8 @@
 		resbuf->tail[0].iov_base = resbuf->head[0].iov_base
 						+ resbuf->head[0].iov_len;
 		resbuf->tail[0].iov_len = 0;
-		resv = &resbuf->tail[0];
-	} else {
-		resv = &resbuf->tail[0];
 	}
+	resv = &resbuf->tail[0];
 	mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
 	if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
 		goto out_err;
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 890a299..e860d4f 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -64,7 +64,6 @@
 	free_page((unsigned long)xbufp->head[0].iov_base);
 	xbufp = &req->rq_snd_buf;
 	free_page((unsigned long)xbufp->head[0].iov_base);
-	list_del(&req->rq_bc_pa_list);
 	kfree(req);
 }
 
@@ -168,8 +167,10 @@
 	/*
 	 * Memory allocation failed, free the temporary list
 	 */
-	list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
+	list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) {
+		list_del(&req->rq_bc_pa_list);
 		xprt_free_allocation(req);
+	}
 
 	dprintk("RPC:       setup backchannel transport failed\n");
 	return -ENOMEM;
@@ -198,6 +199,7 @@
 	xprt_dec_alloc_count(xprt, max_reqs);
 	list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
 		dprintk("RPC:        req=%p\n", req);
+		list_del(&req->rq_bc_pa_list);
 		xprt_free_allocation(req);
 		if (--max_reqs == 0)
 			break;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index e521d20..ae333c1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1111,9 +1111,7 @@
 		*bp++ = 'x';
 		len -= 2;
 		while (blen && len >= 2) {
-			unsigned char c = *buf++;
-			*bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
-			*bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
+			bp = hex_byte_pack(bp, *buf++);
 			len -= 2;
 			blen--;
 		}
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
index 94e506f..df58268 100644
--- a/net/sunrpc/netns.h
+++ b/net/sunrpc/netns.h
@@ -27,7 +27,6 @@
 	unsigned int rpcb_is_af_local : 1;
 
 	struct mutex gssp_lock;
-	wait_queue_head_t gssp_wq;
 	struct rpc_clnt *gssp_clnt;
 	int use_gss_proxy;
 	int pipe_version;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index e7fbe36..5de6801 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -916,9 +916,6 @@
 #endif
 	}
 
-	if (error < 0)
-		printk(KERN_WARNING "svc: failed to register %sv%u RPC "
-			"service (errno %d).\n", progname, version, -error);
 	return error;
 }
 
@@ -937,6 +934,7 @@
 		 const unsigned short port)
 {
 	struct svc_program	*progp;
+	struct svc_version	*vers;
 	unsigned int		i;
 	int			error = 0;
 
@@ -946,7 +944,8 @@
 
 	for (progp = serv->sv_program; progp; progp = progp->pg_next) {
 		for (i = 0; i < progp->pg_nvers; i++) {
-			if (progp->pg_vers[i] == NULL)
+			vers = progp->pg_vers[i];
+			if (vers == NULL)
 				continue;
 
 			dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n",
@@ -955,16 +954,26 @@
 					proto == IPPROTO_UDP?  "udp" : "tcp",
 					port,
 					family,
-					progp->pg_vers[i]->vs_hidden?
-						" (but not telling portmap)" : "");
+					vers->vs_hidden ?
+					" (but not telling portmap)" : "");
 
-			if (progp->pg_vers[i]->vs_hidden)
+			if (vers->vs_hidden)
 				continue;
 
 			error = __svc_register(net, progp->pg_name, progp->pg_prog,
 						i, family, proto, port);
-			if (error < 0)
+
+			if (vers->vs_rpcb_optnl) {
+				error = 0;
+				continue;
+			}
+
+			if (error < 0) {
+				printk(KERN_WARNING "svc: failed to register "
+					"%sv%u RPC service (errno %d).\n",
+					progp->pg_name, i, -error);
 				break;
+			}
 		}
 	}
 
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 80a6640..06c6ff0 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -571,7 +571,7 @@
 	}
 }
 
-int svc_alloc_arg(struct svc_rqst *rqstp)
+static int svc_alloc_arg(struct svc_rqst *rqstp)
 {
 	struct svc_serv *serv = rqstp->rq_server;
 	struct xdr_buf *arg;
@@ -612,7 +612,7 @@
 	return 0;
 }
 
-struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
+static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
 {
 	struct svc_xprt *xprt;
 	struct svc_pool		*pool = rqstp->rq_pool;
@@ -691,7 +691,7 @@
 	return xprt;
 }
 
-void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
+static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
 {
 	spin_lock_bh(&serv->sv_lock);
 	set_bit(XPT_TEMP, &newxpt->xpt_flags);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 2a7ca8f..0addefc 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -510,6 +510,7 @@
 	struct rpc_rqst *req = task->tk_rqstp;
 	struct rpc_xprt *xprt = req->rq_xprt;
 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+	struct sock *sk = transport->inet;
 	int ret = -EAGAIN;
 
 	dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
@@ -527,7 +528,7 @@
 			 * window size
 			 */
 			set_bit(SOCK_NOSPACE, &transport->sock->flags);
-			transport->inet->sk_write_pending++;
+			sk->sk_write_pending++;
 			/* ...and wait for more buffer space */
 			xprt_wait_for_buffer_space(task, xs_nospace_callback);
 		}
@@ -537,6 +538,9 @@
 	}
 
 	spin_unlock_bh(&xprt->transport_lock);
+
+	/* Race breaker in case memory is freed before above code is called */
+	sk->sk_write_space(sk);
 	return ret;
 }
 
@@ -2964,10 +2968,9 @@
 
 	/*
 	 * Once we've associated a backchannel xprt with a connection,
-	 * we want to keep it around as long as long as the connection
-	 * lasts, in case we need to start using it for a backchannel
-	 * again; this reference won't be dropped until bc_xprt is
-	 * destroyed.
+	 * we want to keep it around as long as the connection lasts,
+	 * in case we need to start using it for a backchannel again;
+	 * this reference won't be dropped until bc_xprt is destroyed.
 	 */
 	xprt_get(xprt);
 	args->bc_xprt->xpt_bc_xprt = xprt;
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 1ff477b..5569d96 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -192,6 +192,7 @@
 
 struct tipc_skb_cb {
 	void *handle;
+	bool deferred;
 };
 
 #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/link.c b/net/tipc/link.c
index d4b5de4..da6018b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1391,6 +1391,12 @@
 	u32 hdr_size;
 	u32 min_hdr_size;
 
+	/* If this packet comes from the defer queue, the skb has already
+	 * been validated
+	 */
+	if (unlikely(TIPC_SKB_CB(buf)->deferred))
+		return 1;
+
 	if (unlikely(buf->len < MIN_H_SIZE))
 		return 0;
 
@@ -1703,6 +1709,7 @@
 				&l_ptr->newest_deferred_in, buf)) {
 		l_ptr->deferred_inqueue_sz++;
 		l_ptr->stats.deferred_recv++;
+		TIPC_SKB_CB(buf)->deferred = true;
 		if ((l_ptr->deferred_inqueue_sz % 16) == 1)
 			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
 	} else
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d89dee2..010892b 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -203,8 +203,11 @@
 
 	rdev->opencount--;
 
-	WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev &&
-		!rdev->scan_req->notified);
+	if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+		if (WARN_ON(!rdev->scan_req->notified))
+			rdev->scan_req->aborted = true;
+		___cfg80211_scan_done(rdev, false);
+	}
 }
 
 static int cfg80211_rfkill_set_block(void *data, bool blocked)
@@ -440,9 +443,6 @@
 	int i;
 	u16 ifmodes = wiphy->interface_modes;
 
-	/* support for 5/10 MHz is broken due to nl80211 API mess - disable */
-	wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ;
-
 	/*
 	 * There are major locking problems in nl80211/mac80211 for CSA,
 	 * disable for all drivers until this has been reworked.
@@ -859,8 +859,11 @@
 		break;
 	case NETDEV_DOWN:
 		cfg80211_update_iface_num(rdev, wdev->iftype, -1);
-		WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev &&
-			!rdev->scan_req->notified);
+		if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+			if (WARN_ON(!rdev->scan_req->notified))
+				rdev->scan_req->aborted = true;
+			___cfg80211_scan_done(rdev, false);
+		}
 
 		if (WARN_ON(rdev->sched_scan_req &&
 			    rdev->sched_scan_req->dev == wdev->netdev)) {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 37ec16d..f1d193b 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -62,6 +62,7 @@
 	struct rb_root bss_tree;
 	u32 bss_generation;
 	struct cfg80211_scan_request *scan_req; /* protected by RTNL */
+	struct sk_buff *scan_msg;
 	struct cfg80211_sched_scan_request *sched_scan_req;
 	unsigned long suspend_at;
 	struct work_struct scan_done_wk;
@@ -361,7 +362,8 @@
 				   struct key_params *params, int key_idx,
 				   bool pairwise, const u8 *mac_addr);
 void __cfg80211_scan_done(struct work_struct *wk);
-void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev);
+void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
+			   bool send_message);
 void __cfg80211_sched_scan_results(struct work_struct *wk);
 int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
 			       bool driver_initiated);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7a74259..4fe2e6e 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1719,9 +1719,10 @@
 				 * We can then retry with the larger buffer.
 				 */
 				if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
-				    !skb->len &&
+				    !skb->len && !state->split &&
 				    cb->min_dump_alloc < 4096) {
 					cb->min_dump_alloc = 4096;
+					state->split_start = 0;
 					rtnl_unlock();
 					return 1;
 				}
@@ -5244,7 +5245,7 @@
 	if (!rdev->ops->scan)
 		return -EOPNOTSUPP;
 
-	if (rdev->scan_req) {
+	if (rdev->scan_req || rdev->scan_msg) {
 		err = -EBUSY;
 		goto unlock;
 	}
@@ -10011,40 +10012,31 @@
 				NL80211_MCGRP_SCAN, GFP_KERNEL);
 }
 
-void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
-			    struct wireless_dev *wdev)
+struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
+				       struct wireless_dev *wdev, bool aborted)
 {
 	struct sk_buff *msg;
 
 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (!msg)
-		return;
+		return NULL;
 
 	if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
-				  NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
+				  aborted ? NL80211_CMD_SCAN_ABORTED :
+					    NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
 		nlmsg_free(msg);
-		return;
+		return NULL;
 	}
 
-	genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
-				NL80211_MCGRP_SCAN, GFP_KERNEL);
+	return msg;
 }
 
-void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
-			       struct wireless_dev *wdev)
+void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
+			      struct sk_buff *msg)
 {
-	struct sk_buff *msg;
-
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (!msg)
 		return;
 
-	if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
-				  NL80211_CMD_SCAN_ABORTED) < 0) {
-		nlmsg_free(msg);
-		return;
-	}
-
 	genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
 				NL80211_MCGRP_SCAN, GFP_KERNEL);
 }
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index b1b2313..7579974 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -8,10 +8,10 @@
 void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
 void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
 			     struct wireless_dev *wdev);
-void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
-			    struct wireless_dev *wdev);
-void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
-			       struct wireless_dev *wdev);
+struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
+				       struct wireless_dev *wdev, bool aborted);
+void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
+			      struct sk_buff *msg);
 void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
 			     struct net_device *netdev, u32 cmd);
 void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index b528e31..d1ed4ae 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -161,18 +161,25 @@
 		dev->bss_generation++;
 }
 
-void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev)
+void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
+			   bool send_message)
 {
 	struct cfg80211_scan_request *request;
 	struct wireless_dev *wdev;
+	struct sk_buff *msg;
 #ifdef CONFIG_CFG80211_WEXT
 	union iwreq_data wrqu;
 #endif
 
 	ASSERT_RTNL();
 
-	request = rdev->scan_req;
+	if (rdev->scan_msg) {
+		nl80211_send_scan_result(rdev, rdev->scan_msg);
+		rdev->scan_msg = NULL;
+		return;
+	}
 
+	request = rdev->scan_req;
 	if (!request)
 		return;
 
@@ -186,18 +193,16 @@
 	if (wdev->netdev)
 		cfg80211_sme_scan_done(wdev->netdev);
 
-	if (request->aborted) {
-		nl80211_send_scan_aborted(rdev, wdev);
-	} else {
-		if (request->flags & NL80211_SCAN_FLAG_FLUSH) {
-			/* flush entries from previous scans */
-			spin_lock_bh(&rdev->bss_lock);
-			__cfg80211_bss_expire(rdev, request->scan_start);
-			spin_unlock_bh(&rdev->bss_lock);
-		}
-		nl80211_send_scan_done(rdev, wdev);
+	if (!request->aborted &&
+	    request->flags & NL80211_SCAN_FLAG_FLUSH) {
+		/* flush entries from previous scans */
+		spin_lock_bh(&rdev->bss_lock);
+		__cfg80211_bss_expire(rdev, request->scan_start);
+		spin_unlock_bh(&rdev->bss_lock);
 	}
 
+	msg = nl80211_build_scan_msg(rdev, wdev, request->aborted);
+
 #ifdef CONFIG_CFG80211_WEXT
 	if (wdev->netdev && !request->aborted) {
 		memset(&wrqu, 0, sizeof(wrqu));
@@ -211,6 +216,11 @@
 
 	rdev->scan_req = NULL;
 	kfree(request);
+
+	if (!send_message)
+		rdev->scan_msg = msg;
+	else
+		nl80211_send_scan_result(rdev, msg);
 }
 
 void __cfg80211_scan_done(struct work_struct *wk)
@@ -221,7 +231,7 @@
 			    scan_done_wk);
 
 	rtnl_lock();
-	___cfg80211_scan_done(rdev);
+	___cfg80211_scan_done(rdev, true);
 	rtnl_unlock();
 }
 
@@ -1079,7 +1089,7 @@
 	if (IS_ERR(rdev))
 		return PTR_ERR(rdev);
 
-	if (rdev->scan_req) {
+	if (rdev->scan_req || rdev->scan_msg) {
 		err = -EBUSY;
 		goto out;
 	}
@@ -1481,7 +1491,7 @@
 	if (IS_ERR(rdev))
 		return PTR_ERR(rdev);
 
-	if (rdev->scan_req)
+	if (rdev->scan_req || rdev->scan_msg)
 		return -EAGAIN;
 
 	res = ieee80211_scan_results(rdev, info, extra, data->length);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a635091..f04d4c3 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -67,7 +67,7 @@
 	ASSERT_RDEV_LOCK(rdev);
 	ASSERT_WDEV_LOCK(wdev);
 
-	if (rdev->scan_req)
+	if (rdev->scan_req || rdev->scan_msg)
 		return -EBUSY;
 
 	if (wdev->conn->params.channel)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 0ea2a1e..464dcef 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -471,7 +471,7 @@
 
 	$camelcase_seeded = 1;
 
-	if (-d ".git") {
+	if (-e ".git") {
 		my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`;
 		chomp $git_last_include_commit;
 		$camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit";
@@ -499,7 +499,7 @@
 		return;
 	}
 
-	if (-d ".git") {
+	if (-e ".git") {
 		$files = `git ls-files "include/*.h"`;
 		@include_files = split('\n', $files);
 	}
diff --git a/scripts/coccinelle/api/pm_runtime.cocci b/scripts/coccinelle/api/pm_runtime.cocci
new file mode 100644
index 0000000..f01789e
--- /dev/null
+++ b/scripts/coccinelle/api/pm_runtime.cocci
@@ -0,0 +1,109 @@
+/// Make sure pm_runtime_* calls does not use unnecessary IS_ERR_VALUE
+//
+// Keywords: pm_runtime
+// Confidence: Medium
+// Copyright (C) 2013 Texas Instruments Incorporated - GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Options: --include-headers
+
+virtual patch
+virtual context
+virtual org
+virtual report
+
+//----------------------------------------------------------
+//  Detection
+//----------------------------------------------------------
+
+@runtime_bad_err_handle exists@
+expression ret;
+@@
+(
+ret = \(pm_runtime_idle\|
+	pm_runtime_suspend\|
+	pm_runtime_autosuspend\|
+	pm_runtime_resume\|
+	pm_request_idle\|
+	pm_request_resume\|
+	pm_request_autosuspend\|
+	pm_runtime_get\|
+	pm_runtime_get_sync\|
+	pm_runtime_put\|
+	pm_runtime_put_autosuspend\|
+	pm_runtime_put_sync\|
+	pm_runtime_put_sync_suspend\|
+	pm_runtime_put_sync_autosuspend\|
+	pm_runtime_set_active\|
+	pm_schedule_suspend\|
+	pm_runtime_barrier\|
+	pm_generic_runtime_suspend\|
+	pm_generic_runtime_resume\)(...);
+...
+IS_ERR_VALUE(ret)
+...
+)
+
+//----------------------------------------------------------
+//  For context mode
+//----------------------------------------------------------
+
+@depends on runtime_bad_err_handle && context@
+identifier pm_runtime_api;
+expression ret;
+@@
+(
+ret = pm_runtime_api(...);
+...
+* IS_ERR_VALUE(ret)
+...
+)
+
+//----------------------------------------------------------
+//  For patch mode
+//----------------------------------------------------------
+
+@depends on runtime_bad_err_handle && patch@
+identifier pm_runtime_api;
+expression ret;
+@@
+(
+ret = pm_runtime_api(...);
+...
+- IS_ERR_VALUE(ret)
++ ret < 0
+...
+)
+
+//----------------------------------------------------------
+//  For org and report mode
+//----------------------------------------------------------
+
+@r depends on runtime_bad_err_handle exists@
+position p1, p2;
+identifier pm_runtime_api;
+expression ret;
+@@
+(
+ret = pm_runtime_api@p1(...);
+...
+IS_ERR_VALUE@p2(ret)
+...
+)
+
+@script:python depends on org@
+p1 << r.p1;
+p2 << r.p2;
+pm_runtime_api << r.pm_runtime_api;
+@@
+
+cocci.print_main(pm_runtime_api,p1)
+cocci.print_secs("IS_ERR_VALUE",p2)
+
+@script:python depends on report@
+p1 << r.p1;
+p2 << r.p2;
+pm_runtime_api << r.pm_runtime_api;
+@@
+
+msg = "%s returns < 0 as error. Unecessary IS_ERR_VALUE at line %s" % (pm_runtime_api, p2[0].line)
+coccilib.report.print_report(p1[0],msg)
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 9c3986f..4198788 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -95,7 +95,7 @@
 
 my %VCS_cmds_git = (
     "execute_cmd" => \&git_execute_cmd,
-    "available" => '(which("git") ne "") && (-d ".git")',
+    "available" => '(which("git") ne "") && (-e ".git")',
     "find_signers_cmd" =>
 	"git log --no-color --follow --since=\$email_git_since " .
 	    '--numstat --no-merges ' .
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 2370863..25e5cb0 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -210,8 +210,8 @@
 				range_lo < 0x9 ? "[%X-9" : "[%X",
 				range_lo);
 			sprintf(alias + strlen(alias),
-				range_hi > 0xA ? "a-%X]" : "%X]",
-				range_lo);
+				range_hi > 0xA ? "A-%X]" : "%X]",
+				range_hi);
 		}
 	}
 	if (bcdDevice_initial_digits < (sizeof(bcdDevice_lo) * 2 - 1))
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 90e521f..f46e4dd 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -41,9 +41,9 @@
 	parisc*)
 		debarch=hppa ;;
 	mips*)
-		debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el) ;;
+		debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;;
 	arm*)
-		debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el) ;;
+		debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el || true) ;;
 	*)
 		echo "" >&2
 		echo "** ** **  WARNING  ** ** **" >&2
@@ -62,7 +62,7 @@
 	fi
 
 	# Create the package
-	dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir"
+	dpkg-gencontrol -isp $forcearch -Vkernel:debarch="${debarch:-$(dpkg --print-architecture)}" -p$pname -P"$pdir"
 	dpkg --build "$pdir" ..
 }
 
@@ -172,8 +172,15 @@
 
 # Install the maintainer scripts
 # Note: hook scripts under /etc/kernel are also executed by official Debian
-# kernel packages, as well as kernel packages built using make-kpkg
+# kernel packages, as well as kernel packages built using make-kpkg.
+# make-kpkg sets $INITRD to indicate whether an initramfs is wanted, and
+# so do we; recent versions of dracut and initramfs-tools will obey this.
 debhookdir=${KDEB_HOOKDIR:-/etc/kernel}
+if grep -q '^CONFIG_BLK_DEV_INITRD=y' $KCONFIG_CONFIG; then
+	want_initrd=Yes
+else
+	want_initrd=No
+fi
 for script in postinst postrm preinst prerm ; do
 	mkdir -p "$tmpdir$debhookdir/$script.d"
 	cat <<EOF > "$tmpdir/DEBIAN/$script"
@@ -184,6 +191,9 @@
 # Pass maintainer script parameters to hook scripts
 export DEB_MAINT_PARAMS="\$*"
 
+# Tell initramfs builder whether it's wanted
+export INITRD=$want_initrd
+
 test -d $debhookdir/$script.d && run-parts --arg="$version" --arg="/$installed_image_path" $debhookdir/$script.d
 exit 0
 EOF
@@ -288,15 +298,14 @@
 (cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
 ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
 rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
-arch=$(dpkg --print-architecture)
 
 cat <<EOF >> debian/control
 
 Package: $kernel_headers_packagename
 Provides: linux-headers, linux-headers-2.6
-Architecture: $arch
-Description: Linux kernel headers for $KERNELRELEASE on $arch
- This package provides kernel header files for $KERNELRELEASE on $arch
+Architecture: any
+Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch}
+ This package provides kernel header files for $KERNELRELEASE on \${kernel:debarch}
  .
  This is useful for people who need to build external modules
 EOF
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index d105a44..63d91e2 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -43,7 +43,8 @@
 	fi
 
 	# Check for git and a git repo.
-	if test -d .git && head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
+	if test -z "$(git rev-parse --show-cdup 2>/dev/null)" &&
+	   head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
 
 		# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
 		# it, because this version is defined in the top level Makefile.
diff --git a/security/Kconfig b/security/Kconfig
index e9c6ac7..beb86b5 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -103,7 +103,7 @@
 config LSM_MMAP_MIN_ADDR
 	int "Low address space for LSM to protect from user allocation"
 	depends on SECURITY && SECURITY_SELINUX
-	default 32768 if ARM
+	default 32768 if ARM || (ARM64 && COMPAT)
 	default 65536
 	help
 	  This is the portion of low virtual memory which should be protected
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 332ac8a..2df7b90 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -17,6 +17,7 @@
 #include <linux/inet_diag.h>
 #include <linux/xfrm.h>
 #include <linux/audit.h>
+#include <linux/sock_diag.h>
 
 #include "flask.h"
 #include "av_permissions.h"
@@ -78,6 +79,7 @@
 {
 	{ TCPDIAG_GETSOCK,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
 	{ DCCPDIAG_GETSOCK,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+	{ SOCK_DIAG_BY_FAMILY,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
 };
 
 static struct nlmsg_perm nlmsg_xfrm_perms[] =
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index c93c211..5d0144e 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1232,6 +1232,10 @@
 	struct context context;
 	int rc = 0;
 
+	/* An empty security context is never valid. */
+	if (!scontext_len)
+		return -EINVAL;
+
 	if (!ss_initialized) {
 		int i;
 
diff --git a/sound/core/init.c b/sound/core/init.c
index 1351f22..0d42fcd 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -131,6 +131,31 @@
 #define init_info_for_card(card)
 #endif
 
+static int check_empty_slot(struct module *module, int slot)
+{
+	return !slots[slot] || !*slots[slot];
+}
+
+/* return an empty slot number (>= 0) found in the given bitmask @mask.
+ * @mask == -1 == 0xffffffff means: take any free slot up to 32
+ * when no slot is available, return the original @mask as is.
+ */
+static int get_slot_from_bitmask(int mask, int (*check)(struct module *, int),
+				 struct module *module)
+{
+	int slot;
+
+	for (slot = 0; slot < SNDRV_CARDS; slot++) {
+		if (slot < 32 && !(mask & (1U << slot)))
+			continue;
+		if (!test_bit(slot, snd_cards_lock)) {
+			if (check(module, slot))
+				return slot; /* found */
+		}
+	}
+	return mask; /* unchanged */
+}
+
 /**
  *  snd_card_create - create and initialize a soundcard structure
  *  @idx: card index (address) [0 ... (SNDRV_CARDS-1)]
@@ -152,7 +177,7 @@
 		    struct snd_card **card_ret)
 {
 	struct snd_card *card;
-	int err, idx2;
+	int err;
 
 	if (snd_BUG_ON(!card_ret))
 		return -EINVAL;
@@ -167,32 +192,10 @@
 		strlcpy(card->id, xid, sizeof(card->id));
 	err = 0;
 	mutex_lock(&snd_card_mutex);
-	if (idx < 0) {
-		for (idx2 = 0; idx2 < SNDRV_CARDS; idx2++) {
-			/* idx == -1 == 0xffff means: take any free slot */
-			if (idx2 < sizeof(int) && !(idx & (1U << idx2)))
-				continue;
-			if (!test_bit(idx2, snd_cards_lock)) {
-				if (module_slot_match(module, idx2)) {
-					idx = idx2;
-					break;
-				}
-			}
-		}
-	}
-	if (idx < 0) {
-		for (idx2 = 0; idx2 < SNDRV_CARDS; idx2++) {
-			/* idx == -1 == 0xffff means: take any free slot */
-			if (idx2 < sizeof(int) && !(idx & (1U << idx2)))
-				continue;
-			if (!test_bit(idx2, snd_cards_lock)) {
-				if (!slots[idx2] || !*slots[idx2]) {
-					idx = idx2;
-					break;
-				}
-			}
-		}
-	}
+	if (idx < 0) /* first check the matching module-name slot */
+		idx = get_slot_from_bitmask(idx, module_slot_match, module);
+	if (idx < 0) /* if not matched, assign an empty slot */
+		idx = get_slot_from_bitmask(idx, check_empty_slot, module);
 	if (idx < 0)
 		err = -ENODEV;
 	else if (idx < snd_ecards_limit) {
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index f18e587..062398e 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -369,6 +369,7 @@
 			kfree(module->segments[i].data);
 		kfree(module->segments);
 	}
+	kfree(module);
 }
 
 /* firmware binary format:
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index ec4536c..dafcf82 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -932,7 +932,7 @@
 }
 EXPORT_SYMBOL_GPL(snd_hda_bus_new);
 
-#ifdef CONFIG_SND_HDA_GENERIC
+#if IS_ENABLED(CONFIG_SND_HDA_GENERIC)
 #define is_generic_config(codec) \
 	(codec->modelname && !strcmp(codec->modelname, "generic"))
 #else
@@ -1339,23 +1339,15 @@
 /*
  * Dynamic symbol binding for the codec parsers
  */
-#ifdef MODULE
-#define load_parser_sym(sym)		((int (*)(struct hda_codec *))symbol_request(sym))
-#define unload_parser_addr(addr)	symbol_put_addr(addr)
-#else
-#define load_parser_sym(sym)		(sym)
-#define unload_parser_addr(addr)	do {} while (0)
-#endif
 
 #define load_parser(codec, sym) \
-	((codec)->parser = load_parser_sym(sym))
+	((codec)->parser = (int (*)(struct hda_codec *))symbol_request(sym))
 
 static void unload_parser(struct hda_codec *codec)
 {
-	if (codec->parser) {
-		unload_parser_addr(codec->parser);
-		codec->parser = NULL;
-	}
+	if (codec->parser)
+		symbol_put_addr(codec->parser);
+	codec->parser = NULL;
 }
 
 /*
@@ -1570,7 +1562,7 @@
 EXPORT_SYMBOL_GPL(snd_hda_codec_update_widgets);
 
 
-#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
 /* if all audio out widgets are digital, let's assume the codec as a HDMI/DP */
 static bool is_likely_hdmi_codec(struct hda_codec *codec)
 {
@@ -1620,12 +1612,20 @@
 		patch = codec->preset->patch;
 	if (!patch) {
 		unload_parser(codec); /* to be sure */
-		if (is_likely_hdmi_codec(codec))
+		if (is_likely_hdmi_codec(codec)) {
+#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
 			patch = load_parser(codec, snd_hda_parse_hdmi_codec);
-#ifdef CONFIG_SND_HDA_GENERIC
-		if (!patch)
-			patch = load_parser(codec, snd_hda_parse_generic_codec);
+#elif IS_BUILTIN(CONFIG_SND_HDA_CODEC_HDMI)
+			patch = snd_hda_parse_hdmi_codec;
 #endif
+		}
+		if (!patch) {
+#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
+			patch = load_parser(codec, snd_hda_parse_generic_codec);
+#elif IS_BUILTIN(CONFIG_SND_HDA_GENERIC)
+			patch = snd_hda_parse_generic_codec;
+#endif
+		}
 		if (!patch) {
 			printk(KERN_ERR "hda-codec: No codec parser is available\n");
 			return -ENODEV;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 2b5d19e..ab2a444 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -361,6 +361,7 @@
 	unsigned int epss:1;		/* supporting EPSS? */
 	unsigned int cached_write:1;	/* write only to caches */
 	unsigned int dp_mst:1; /* support DP1.2 Multi-stream transport */
+	unsigned int dump_coef:1; /* dump processing coefs in codec proc file */
 #ifdef CONFIG_PM
 	unsigned int power_on :1;	/* current (global) power-state */
 	unsigned int d3_stop_clk:1;	/* support D3 operation without BCLK */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8321a97..d9a09bd 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3269,7 +3269,7 @@
 	mutex_unlock(&codec->control_mutex);
 	snd_hda_codec_flush_cache(codec); /* flush the updates */
 	if (err >= 0 && spec->cap_sync_hook)
-		spec->cap_sync_hook(codec, ucontrol);
+		spec->cap_sync_hook(codec, kcontrol, ucontrol);
 	return err;
 }
 
@@ -3390,7 +3390,7 @@
 		return ret;
 
 	if (spec->cap_sync_hook)
-		spec->cap_sync_hook(codec, ucontrol);
+		spec->cap_sync_hook(codec, kcontrol, ucontrol);
 
 	return ret;
 }
@@ -3795,7 +3795,7 @@
 		return 0;
 	snd_hda_activate_path(codec, path, true, false);
 	if (spec->cap_sync_hook)
-		spec->cap_sync_hook(codec, NULL);
+		spec->cap_sync_hook(codec, NULL, NULL);
 	path_power_down_sync(codec, old_path);
 	return 1;
 }
@@ -5270,7 +5270,7 @@
 	}
 
 	if (spec->cap_sync_hook)
-		spec->cap_sync_hook(codec, NULL);
+		spec->cap_sync_hook(codec, NULL, NULL);
 }
 
 /* set right pin controls for digital I/O */
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 07f7672..c908afb 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -274,6 +274,7 @@
 	void (*init_hook)(struct hda_codec *codec);
 	void (*automute_hook)(struct hda_codec *codec);
 	void (*cap_sync_hook)(struct hda_codec *codec,
+			      struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_value *ucontrol);
 
 	/* PCM hooks */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index fa2879a..e354ab1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -198,7 +198,7 @@
 #endif
 
 #if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
-#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
 #define SUPPORT_VGA_SWITCHEROO
 #endif
 #endif
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index a8cb22e..ce5a6da 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -24,9 +24,14 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <sound/core.h>
+#include <linux/module.h>
 #include "hda_codec.h"
 #include "hda_local.h"
 
+static int dump_coef = -1;
+module_param(dump_coef, int, 0644);
+MODULE_PARM_DESC(dump_coef, "Dump processing coefficients in codec proc file (-1=auto, 0=disable, 1=enable)");
+
 static char *bits_names(unsigned int bits, char *names[], int size)
 {
 	int i, n;
@@ -488,14 +493,39 @@
 		    (unsol & AC_UNSOL_ENABLED) ? 1 : 0);
 }
 
+static inline bool can_dump_coef(struct hda_codec *codec)
+{
+	switch (dump_coef) {
+	case 0: return false;
+	case 1: return true;
+	default: return codec->dump_coef;
+	}
+}
+
 static void print_proc_caps(struct snd_info_buffer *buffer,
 			    struct hda_codec *codec, hda_nid_t nid)
 {
+	unsigned int i, ncoeff, oldindex;
 	unsigned int proc_caps = snd_hda_param_read(codec, nid,
 						    AC_PAR_PROC_CAP);
+	ncoeff = (proc_caps & AC_PCAP_NUM_COEF) >> AC_PCAP_NUM_COEF_SHIFT;
 	snd_iprintf(buffer, "  Processing caps: benign=%d, ncoeff=%d\n",
-		    proc_caps & AC_PCAP_BENIGN,
-		    (proc_caps & AC_PCAP_NUM_COEF) >> AC_PCAP_NUM_COEF_SHIFT);
+		    proc_caps & AC_PCAP_BENIGN, ncoeff);
+
+	if (!can_dump_coef(codec))
+		return;
+
+	/* Note: This is racy - another process could run in parallel and change
+	   the coef index too. */
+	oldindex = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_COEF_INDEX, 0);
+	for (i = 0; i < ncoeff; i++) {
+		unsigned int val;
+		snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, i);
+		val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PROC_COEF,
+					 0);
+		snd_iprintf(buffer, "    Coeff 0x%02x: 0x%04x\n", i, val);
+	}
+	snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, oldindex);
 }
 
 static void print_conn_list(struct snd_info_buffer *buffer,
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 7a426ed..df3652a 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -244,6 +244,19 @@
 	}
 }
 
+/* Toshiba Satellite L40 implements EAPD in a standard way unlike others */
+static void ad1986a_fixup_eapd(struct hda_codec *codec,
+			       const struct hda_fixup *fix, int action)
+{
+	struct ad198x_spec *spec = codec->spec;
+
+	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+		codec->inv_eapd = 0;
+		spec->gen.keep_eapd_on = 1;
+		spec->eapd_nid = 0x1b;
+	}
+}
+
 enum {
 	AD1986A_FIXUP_INV_JACK_DETECT,
 	AD1986A_FIXUP_ULTRA,
@@ -251,6 +264,7 @@
 	AD1986A_FIXUP_3STACK,
 	AD1986A_FIXUP_LAPTOP,
 	AD1986A_FIXUP_LAPTOP_IMIC,
+	AD1986A_FIXUP_EAPD,
 };
 
 static const struct hda_fixup ad1986a_fixups[] = {
@@ -311,6 +325,10 @@
 		.chained_before = 1,
 		.chain_id = AD1986A_FIXUP_LAPTOP,
 	},
+	[AD1986A_FIXUP_EAPD] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = ad1986a_fixup_eapd,
+	},
 };
 
 static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
@@ -318,6 +336,7 @@
 	SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
 	SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
 	SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK),
+	SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40", AD1986A_FIXUP_EAPD),
 	SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
 	SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
 	SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
@@ -472,6 +491,8 @@
 static int patch_ad1983(struct hda_codec *codec)
 {
 	struct ad198x_spec *spec;
+	static hda_nid_t conn_0c[] = { 0x08 };
+	static hda_nid_t conn_0d[] = { 0x09 };
 	int err;
 
 	err = alloc_ad_spec(codec);
@@ -479,8 +500,14 @@
 		return err;
 	spec = codec->spec;
 
+	spec->gen.mixer_nid = 0x0e;
 	spec->gen.beep_nid = 0x10;
 	set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
+
+	/* limit the loopback routes not to confuse the parser */
+	snd_hda_override_conn_list(codec, 0x0c, ARRAY_SIZE(conn_0c), conn_0c);
+	snd_hda_override_conn_list(codec, 0x0d, ARRAY_SIZE(conn_0d), conn_0d);
+
 	err = ad198x_parse_auto_config(codec, false);
 	if (err < 0)
 		goto error;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4e0ec146..bcf91be 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3291,7 +3291,8 @@
 }
 
 static void cxt_update_headset_mode_hook(struct hda_codec *codec,
-			     struct snd_ctl_elem_value *ucontrol)
+					 struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
 {
 	cxt_update_headset_mode(codec);
 }
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 64f0a5e..5ef9503 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -132,6 +132,9 @@
 
 	struct hdmi_eld temp_eld;
 	struct hdmi_ops ops;
+
+	bool dyn_pin_out;
+
 	/*
 	 * Non-generic VIA/NVIDIA specific
 	 */
@@ -500,15 +503,25 @@
 
 static void hdmi_init_pin(struct hda_codec *codec, hda_nid_t pin_nid)
 {
+	struct hdmi_spec *spec = codec->spec;
+	int pin_out;
+
 	/* Unmute */
 	if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP)
 		snd_hda_codec_write(codec, pin_nid, 0,
 				AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
-	/* Enable pin out: some machines with GM965 gets broken output when
-	 * the pin is disabled or changed while using with HDMI
-	 */
+
+	if (spec->dyn_pin_out)
+		/* Disable pin out until stream is active */
+		pin_out = 0;
+	else
+		/* Enable pin out: some machines with GM965 gets broken output
+		 * when the pin is disabled or changed while using with HDMI
+		 */
+		pin_out = PIN_OUT;
+
 	snd_hda_codec_write(codec, pin_nid, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+			    AC_VERB_SET_PIN_WIDGET_CONTROL, pin_out);
 }
 
 static int hdmi_get_channel_count(struct hda_codec *codec, hda_nid_t cvt_nid)
@@ -1735,6 +1748,7 @@
 	struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
 	hda_nid_t pin_nid = per_pin->pin_nid;
 	bool non_pcm;
+	int pinctl;
 
 	non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
 	mutex_lock(&per_pin->lock);
@@ -1744,6 +1758,14 @@
 	hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
 	mutex_unlock(&per_pin->lock);
 
+	if (spec->dyn_pin_out) {
+		pinctl = snd_hda_codec_read(codec, pin_nid, 0,
+					    AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+		snd_hda_codec_write(codec, pin_nid, 0,
+				    AC_VERB_SET_PIN_WIDGET_CONTROL,
+				    pinctl | PIN_OUT);
+	}
+
 	return spec->ops.setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
 }
 
@@ -1763,6 +1785,7 @@
 	int cvt_idx, pin_idx;
 	struct hdmi_spec_per_cvt *per_cvt;
 	struct hdmi_spec_per_pin *per_pin;
+	int pinctl;
 
 	if (hinfo->nid) {
 		cvt_idx = cvt_nid_to_cvt_index(spec, hinfo->nid);
@@ -1779,6 +1802,14 @@
 			return -EINVAL;
 		per_pin = get_pin(spec, pin_idx);
 
+		if (spec->dyn_pin_out) {
+			pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
+					AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+			snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+					    AC_VERB_SET_PIN_WIDGET_CONTROL,
+					    pinctl & ~PIN_OUT);
+		}
+
 		snd_hda_spdif_ctls_unassign(codec, pin_idx);
 
 		mutex_lock(&per_pin->lock);
@@ -2840,6 +2871,7 @@
 		return err;
 
 	spec = codec->spec;
+	spec->dyn_pin_out = true;
 
 	spec->ops.chmap_cea_alloc_validate_get_type =
 		nvhdmi_chmap_cea_alloc_validate_get_type;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f9b22fb..a9a83b8 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -708,7 +708,8 @@
 }
 
 static void alc_inv_dmic_hook(struct hda_codec *codec,
-			     struct snd_ctl_elem_value *ucontrol)
+			      struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
 {
 	alc_inv_dmic_sync(codec, false);
 }
@@ -1819,7 +1820,9 @@
 	ALC889_FIXUP_DAC_ROUTE,
 	ALC889_FIXUP_MBP_VREF,
 	ALC889_FIXUP_IMAC91_VREF,
+	ALC889_FIXUP_MBA11_VREF,
 	ALC889_FIXUP_MBA21_VREF,
+	ALC889_FIXUP_MP11_VREF,
 	ALC882_FIXUP_INV_DMIC,
 	ALC882_FIXUP_NO_PRIMARY_HP,
 	ALC887_FIXUP_ASUS_BASS,
@@ -1949,6 +1952,16 @@
 		alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
 }
 
+/* Set VREF on speaker pins on mba11 */
+static void alc889_fixup_mba11_vref(struct hda_codec *codec,
+				    const struct hda_fixup *fix, int action)
+{
+	static hda_nid_t nids[1] = { 0x18 };
+
+	if (action == HDA_FIXUP_ACT_INIT)
+		alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids));
+}
+
 /* Set VREF on speaker pins on mba21 */
 static void alc889_fixup_mba21_vref(struct hda_codec *codec,
 				    const struct hda_fixup *fix, int action)
@@ -2167,12 +2180,24 @@
 		.chained = true,
 		.chain_id = ALC882_FIXUP_GPIO1,
 	},
+	[ALC889_FIXUP_MBA11_VREF] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = alc889_fixup_mba11_vref,
+		.chained = true,
+		.chain_id = ALC889_FIXUP_MBP_VREF,
+	},
 	[ALC889_FIXUP_MBA21_VREF] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc889_fixup_mba21_vref,
 		.chained = true,
 		.chain_id = ALC889_FIXUP_MBP_VREF,
 	},
+	[ALC889_FIXUP_MP11_VREF] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = alc889_fixup_mba11_vref,
+		.chained = true,
+		.chain_id = ALC885_FIXUP_MACPRO_GPIO,
+	},
 	[ALC882_FIXUP_INV_DMIC] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc_fixup_inv_dmic_0x12,
@@ -2236,13 +2261,13 @@
 	SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
 	SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF),
 	SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
-	SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_FIXUP_MACPRO_GPIO),
+	SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC889_FIXUP_MP11_VREF),
 	SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO),
 	SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO),
 	SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF),
 	SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889_FIXUP_MBP_VREF),
 	SND_PCI_QUIRK(0x106b, 0x3200, "iMac 7,1 Aluminum", ALC882_FIXUP_EAPD),
-	SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBP_VREF),
+	SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBA11_VREF),
 	SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBA21_VREF),
 	SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889_FIXUP_MBP_VREF),
 	SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
@@ -3194,7 +3219,8 @@
 
 /* turn on/off mic-mute LED per capture hook */
 static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec,
-			       struct snd_ctl_elem_value *ucontrol)
+					       struct snd_kcontrol *kcontrol,
+					       struct snd_ctl_elem_value *ucontrol)
 {
 	struct alc_spec *spec = codec->spec;
 	unsigned int oldval = spec->gpio_led;
@@ -3504,7 +3530,8 @@
 }
 
 static void alc_update_headset_mode_hook(struct hda_codec *codec,
-			     struct snd_ctl_elem_value *ucontrol)
+					 struct snd_kcontrol *kcontrol,
+					 struct snd_ctl_elem_value *ucontrol)
 {
 	alc_update_headset_mode(codec);
 }
@@ -3833,6 +3860,7 @@
 	ALC269_FIXUP_ACER_AC700,
 	ALC269_FIXUP_LIMIT_INT_MIC_BOOST,
 	ALC269VB_FIXUP_ASUS_ZENBOOK,
+	ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A,
 	ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED,
 	ALC269VB_FIXUP_ORDISSIMO_EVE2,
 	ALC283_FIXUP_CHROME_BOOK,
@@ -4126,6 +4154,17 @@
 		.chained = true,
 		.chain_id = ALC269VB_FIXUP_DMIC,
 	},
+	[ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A] = {
+		.type = HDA_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
+			/* class-D output amp +5dB */
+			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x12 },
+			{ 0x20, AC_VERB_SET_PROC_COEF, 0x2800 },
+			{}
+		},
+		.chained = true,
+		.chain_id = ALC269VB_FIXUP_ASUS_ZENBOOK,
+	},
 	[ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc269_fixup_limit_int_mic_boost,
@@ -4265,6 +4304,7 @@
 	SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0640, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1028, 0x064d, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0651, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0652, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -4282,7 +4322,7 @@
 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
-	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK),
+	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
 	SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -4292,6 +4332,7 @@
 	SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
+	SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
 	SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
@@ -5066,6 +5107,7 @@
 	SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
 	SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_AUTO_MUTE),
 	SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_AUTO_MUTE),
 	SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 6998cf2..7311bad 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -194,7 +194,7 @@
 	int default_polarity;
 
 	unsigned int mic_mute_led_gpio; /* capture mute LED GPIO */
-	bool mic_mute_led_on; /* current mic mute state */
+	unsigned int mic_enabled; /* current mic mute state (bitmask) */
 
 	/* stream */
 	unsigned int stream_delay;
@@ -324,19 +324,26 @@
 
 /* hook for controlling mic-mute LED GPIO */
 static void stac_capture_led_hook(struct hda_codec *codec,
-			       struct snd_ctl_elem_value *ucontrol)
+				  struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
 {
 	struct sigmatel_spec *spec = codec->spec;
-	bool mute;
+	unsigned int mask;
+	bool cur_mute, prev_mute;
 
-	if (!ucontrol)
+	if (!kcontrol || !ucontrol)
 		return;
 
-	mute = !(ucontrol->value.integer.value[0] ||
-		 ucontrol->value.integer.value[1]);
-	if (spec->mic_mute_led_on != mute) {
-		spec->mic_mute_led_on = mute;
-		if (mute)
+	mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+	prev_mute = !spec->mic_enabled;
+	if (ucontrol->value.integer.value[0] ||
+	    ucontrol->value.integer.value[1])
+		spec->mic_enabled |= mask;
+	else
+		spec->mic_enabled &= ~mask;
+	cur_mute = !spec->mic_enabled;
+	if (cur_mute != prev_mute) {
+		if (cur_mute)
 			spec->gpio_data |= spec->mic_mute_led_gpio;
 		else
 			spec->gpio_data &= ~spec->mic_mute_led_gpio;
@@ -4462,7 +4469,7 @@
 	if (spec->mic_mute_led_gpio) {
 		spec->gpio_mask |= spec->mic_mute_led_gpio;
 		spec->gpio_dir |= spec->mic_mute_led_gpio;
-		spec->mic_mute_led_on = true;
+		spec->mic_enabled = 0;
 		spec->gpio_data |= spec->mic_mute_led_gpio;
 
 		spec->gen.cap_sync_hook = stac_capture_led_hook;
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 5799fbc..8fe3b8c 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -39,6 +39,7 @@
 }
 
 static void update_tpacpi_micmute_led(struct hda_codec *codec,
+				      struct snd_kcontrol *kcontrol,
 				      struct snd_ctl_elem_value *ucontrol)
 {
 	if (!ucontrol || !led_set_func)
diff --git a/sound/pci/oxygen/Makefile b/sound/pci/oxygen/Makefile
index 0f87265..8f4c409 100644
--- a/sound/pci/oxygen/Makefile
+++ b/sound/pci/oxygen/Makefile
@@ -1,5 +1,5 @@
 snd-oxygen-lib-objs := oxygen_io.o oxygen_lib.o oxygen_mixer.o oxygen_pcm.o
-snd-oxygen-objs := oxygen.o xonar_dg.o
+snd-oxygen-objs := oxygen.o xonar_dg_mixer.o xonar_dg.o
 snd-virtuoso-objs := virtuoso.o xonar_lib.o \
 	xonar_pcm179x.o xonar_cs43xx.o xonar_wm87x6.o xonar_hdmi.o
 
diff --git a/sound/pci/oxygen/cs4245.h b/sound/pci/oxygen/cs4245.h
index 5e0197e..9909865 100644
--- a/sound/pci/oxygen/cs4245.h
+++ b/sound/pci/oxygen/cs4245.h
@@ -102,6 +102,9 @@
 #define CS4245_ADC_OVFL		0x02
 #define CS4245_ADC_UNDRFL	0x01
 
+#define CS4245_SPI_ADDRESS_S	(0x9e << 16)
+#define CS4245_SPI_WRITE_S	(0 << 16)
 
-#define CS4245_SPI_ADDRESS	(0x9e << 16)
-#define CS4245_SPI_WRITE	(0 << 16)
+#define CS4245_SPI_ADDRESS	0x9e
+#define CS4245_SPI_WRITE	0
+#define CS4245_SPI_READ		1
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h
index 09a24b2..c10ab07 100644
--- a/sound/pci/oxygen/oxygen.h
+++ b/sound/pci/oxygen/oxygen.h
@@ -198,7 +198,7 @@
 void oxygen_write_ac97_masked(struct oxygen *chip, unsigned int codec,
 			      unsigned int index, u16 data, u16 mask);
 
-void oxygen_write_spi(struct oxygen *chip, u8 control, unsigned int data);
+int oxygen_write_spi(struct oxygen *chip, u8 control, unsigned int data);
 void oxygen_write_i2c(struct oxygen *chip, u8 device, u8 map, u8 data);
 
 void oxygen_reset_uart(struct oxygen *chip);
diff --git a/sound/pci/oxygen/oxygen_io.c b/sound/pci/oxygen/oxygen_io.c
index 521eae4..3274907 100644
--- a/sound/pci/oxygen/oxygen_io.c
+++ b/sound/pci/oxygen/oxygen_io.c
@@ -194,23 +194,36 @@
 }
 EXPORT_SYMBOL(oxygen_write_ac97_masked);
 
-void oxygen_write_spi(struct oxygen *chip, u8 control, unsigned int data)
+static int oxygen_wait_spi(struct oxygen *chip)
 {
 	unsigned int count;
 
-	/* should not need more than 30.72 us (24 * 1.28 us) */
-	count = 10;
-	while ((oxygen_read8(chip, OXYGEN_SPI_CONTROL) & OXYGEN_SPI_BUSY)
-	       && count > 0) {
+	/*
+	 * Higher timeout to be sure: 200 us;
+	 * actual transaction should not need more than 40 us.
+	 */
+	for (count = 50; count > 0; count--) {
 		udelay(4);
-		--count;
+		if ((oxygen_read8(chip, OXYGEN_SPI_CONTROL) &
+						OXYGEN_SPI_BUSY) == 0)
+			return 0;
 	}
+	snd_printk(KERN_ERR "oxygen: SPI wait timeout\n");
+	return -EIO;
+}
 
+int oxygen_write_spi(struct oxygen *chip, u8 control, unsigned int data)
+{
+	/*
+	 * We need to wait AFTER initiating the SPI transaction,
+	 * otherwise read operations will not work.
+	 */
 	oxygen_write8(chip, OXYGEN_SPI_DATA1, data);
 	oxygen_write8(chip, OXYGEN_SPI_DATA2, data >> 8);
 	if (control & OXYGEN_SPI_DATA_LENGTH_3)
 		oxygen_write8(chip, OXYGEN_SPI_DATA3, data >> 16);
 	oxygen_write8(chip, OXYGEN_SPI_CONTROL, control);
+	return oxygen_wait_spi(chip);
 }
 EXPORT_SYMBOL(oxygen_write_spi);
 
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index c0dbb52..5988e04 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -190,6 +190,7 @@
 	if (chip->model.update_center_lfe_mix)
 		chip->model.update_center_lfe_mix(chip, chip->dac_routing > 2);
 }
+EXPORT_SYMBOL(oxygen_update_dac_routing);
 
 static int upmix_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
 {
diff --git a/sound/pci/oxygen/oxygen_regs.h b/sound/pci/oxygen/oxygen_regs.h
index 63dc7a0..8c191ba 100644
--- a/sound/pci/oxygen/oxygen_regs.h
+++ b/sound/pci/oxygen/oxygen_regs.h
@@ -318,6 +318,7 @@
 #define  OXYGEN_PLAY_MUTE23		0x0002
 #define  OXYGEN_PLAY_MUTE45		0x0004
 #define  OXYGEN_PLAY_MUTE67		0x0008
+#define  OXYGEN_PLAY_MUTE_MASK		0x000f
 #define  OXYGEN_PLAY_MULTICH_MASK	0x0010
 #define  OXYGEN_PLAY_MULTICH_I2S_DAC	0x0000
 #define  OXYGEN_PLAY_MULTICH_AC97	0x0010
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
index 77acd79..ed6f199 100644
--- a/sound/pci/oxygen/xonar_dg.c
+++ b/sound/pci/oxygen/xonar_dg.c
@@ -2,7 +2,7 @@
  * card driver for the Xonar DG/DGX
  *
  * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
- *
+ * Copyright (c) Roman Volkov <v1ron@mail.ru>
  *
  *  This driver is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License, version 2.
@@ -20,27 +20,35 @@
  * Xonar DG/DGX
  * ------------
  *
+ * CS4245 and CS4361 both will mute all outputs if any clock ratio
+ * is invalid.
+ *
  * CMI8788:
  *
  *   SPI 0 -> CS4245
  *
+ *   Playback:
  *   I²S 1 -> CS4245
  *   I²S 2 -> CS4361 (center/LFE)
  *   I²S 3 -> CS4361 (surround)
  *   I²S 4 -> CS4361 (front)
+ *   Capture:
+ *   I²S ADC 1 <- CS4245
  *
  *   GPIO 3 <- ?
  *   GPIO 4 <- headphone detect
- *   GPIO 5 -> route input jack to line-in (0) or mic-in (1)
- *   GPIO 6 -> route input jack to line-in (0) or mic-in (1)
- *   GPIO 7 -> enable rear headphone amp
+ *   GPIO 5 -> enable ADC analog circuit for the left channel
+ *   GPIO 6 -> enable ADC analog circuit for the right channel
+ *   GPIO 7 -> switch green rear output jack between CS4245 and and the first
+ *             channel of CS4361 (mechanical relay)
  *   GPIO 8 -> enable output to speakers
  *
  * CS4245:
  *
+ *   input 0 <- mic
  *   input 1 <- aux
  *   input 2 <- front mic
- *   input 4 <- line/mic
+ *   input 4 <- line
  *   DAC out -> headphones
  *   aux out -> front panel headphones
  */
@@ -56,553 +64,214 @@
 #include "xonar_dg.h"
 #include "cs4245.h"
 
-#define GPIO_MAGIC		0x0008
-#define GPIO_HP_DETECT		0x0010
-#define GPIO_INPUT_ROUTE	0x0060
-#define GPIO_HP_REAR		0x0080
-#define GPIO_OUTPUT_ENABLE	0x0100
-
-struct dg {
-	unsigned int output_sel;
-	s8 input_vol[4][2];
-	unsigned int input_sel;
-	u8 hp_vol_att;
-	u8 cs4245_regs[0x11];
-};
-
-static void cs4245_write(struct oxygen *chip, unsigned int reg, u8 value)
+int cs4245_write_spi(struct oxygen *chip, u8 reg)
 {
 	struct dg *data = chip->model_data;
+	unsigned int packet;
 
-	oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER |
-			 OXYGEN_SPI_DATA_LENGTH_3 |
-			 OXYGEN_SPI_CLOCK_1280 |
-			 (0 << OXYGEN_SPI_CODEC_SHIFT) |
-			 OXYGEN_SPI_CEN_LATCH_CLOCK_HI,
-			 CS4245_SPI_ADDRESS |
-			 CS4245_SPI_WRITE |
-			 (reg << 8) | value);
-	data->cs4245_regs[reg] = value;
+	packet = reg << 8;
+	packet |= (CS4245_SPI_ADDRESS | CS4245_SPI_WRITE) << 16;
+	packet |= data->cs4245_shadow[reg];
+
+	return oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER |
+				OXYGEN_SPI_DATA_LENGTH_3 |
+				OXYGEN_SPI_CLOCK_1280 |
+				(0 << OXYGEN_SPI_CODEC_SHIFT) |
+				OXYGEN_SPI_CEN_LATCH_CLOCK_HI,
+				packet);
 }
 
-static void cs4245_write_cached(struct oxygen *chip, unsigned int reg, u8 value)
+int cs4245_read_spi(struct oxygen *chip, u8 addr)
 {
 	struct dg *data = chip->model_data;
+	int ret;
 
-	if (value != data->cs4245_regs[reg])
-		cs4245_write(chip, reg, value);
+	ret = oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER |
+		OXYGEN_SPI_DATA_LENGTH_2 |
+		OXYGEN_SPI_CEN_LATCH_CLOCK_HI |
+		OXYGEN_SPI_CLOCK_1280 | (0 << OXYGEN_SPI_CODEC_SHIFT),
+		((CS4245_SPI_ADDRESS | CS4245_SPI_WRITE) << 8) | addr);
+	if (ret < 0)
+		return ret;
+
+	ret = oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER |
+		OXYGEN_SPI_DATA_LENGTH_2 |
+		OXYGEN_SPI_CEN_LATCH_CLOCK_HI |
+		OXYGEN_SPI_CLOCK_1280 | (0 << OXYGEN_SPI_CODEC_SHIFT),
+		(CS4245_SPI_ADDRESS | CS4245_SPI_READ) << 8);
+	if (ret < 0)
+		return ret;
+
+	data->cs4245_shadow[addr] = oxygen_read8(chip, OXYGEN_SPI_DATA1);
+
+	return 0;
 }
 
-static void cs4245_registers_init(struct oxygen *chip)
+int cs4245_shadow_control(struct oxygen *chip, enum cs4245_shadow_operation op)
 {
 	struct dg *data = chip->model_data;
+	unsigned char addr;
+	int ret;
 
-	cs4245_write(chip, CS4245_POWER_CTRL, CS4245_PDN);
-	cs4245_write(chip, CS4245_DAC_CTRL_1,
-		     data->cs4245_regs[CS4245_DAC_CTRL_1]);
-	cs4245_write(chip, CS4245_ADC_CTRL,
-		     data->cs4245_regs[CS4245_ADC_CTRL]);
-	cs4245_write(chip, CS4245_SIGNAL_SEL,
-		     data->cs4245_regs[CS4245_SIGNAL_SEL]);
-	cs4245_write(chip, CS4245_PGA_B_CTRL,
-		     data->cs4245_regs[CS4245_PGA_B_CTRL]);
-	cs4245_write(chip, CS4245_PGA_A_CTRL,
-		     data->cs4245_regs[CS4245_PGA_A_CTRL]);
-	cs4245_write(chip, CS4245_ANALOG_IN,
-		     data->cs4245_regs[CS4245_ANALOG_IN]);
-	cs4245_write(chip, CS4245_DAC_A_CTRL,
-		     data->cs4245_regs[CS4245_DAC_A_CTRL]);
-	cs4245_write(chip, CS4245_DAC_B_CTRL,
-		     data->cs4245_regs[CS4245_DAC_B_CTRL]);
-	cs4245_write(chip, CS4245_DAC_CTRL_2,
-		     CS4245_DAC_SOFT | CS4245_DAC_ZERO | CS4245_INVERT_DAC);
-	cs4245_write(chip, CS4245_INT_MASK, 0);
-	cs4245_write(chip, CS4245_POWER_CTRL, 0);
+	for (addr = 1; addr < ARRAY_SIZE(data->cs4245_shadow); addr++) {
+		ret = (op == CS4245_SAVE_TO_SHADOW ?
+			cs4245_read_spi(chip, addr) :
+			cs4245_write_spi(chip, addr));
+		if (ret < 0)
+			return ret;
+	}
+	return 0;
 }
 
 static void cs4245_init(struct oxygen *chip)
 {
 	struct dg *data = chip->model_data;
 
-	data->cs4245_regs[CS4245_DAC_CTRL_1] =
+	/* save the initial state: codec version, registers */
+	cs4245_shadow_control(chip, CS4245_SAVE_TO_SHADOW);
+
+	/*
+	 * Power up the CODEC internals, enable soft ramp & zero cross, work in
+	 * async. mode, enable aux output from DAC. Invert DAC output as in the
+	 * Windows driver.
+	 */
+	data->cs4245_shadow[CS4245_POWER_CTRL] = 0;
+	data->cs4245_shadow[CS4245_SIGNAL_SEL] =
+		CS4245_A_OUT_SEL_DAC | CS4245_ASYNCH;
+	data->cs4245_shadow[CS4245_DAC_CTRL_1] =
 		CS4245_DAC_FM_SINGLE | CS4245_DAC_DIF_LJUST;
-	data->cs4245_regs[CS4245_ADC_CTRL] =
+	data->cs4245_shadow[CS4245_DAC_CTRL_2] =
+		CS4245_DAC_SOFT | CS4245_DAC_ZERO | CS4245_INVERT_DAC;
+	data->cs4245_shadow[CS4245_ADC_CTRL] =
 		CS4245_ADC_FM_SINGLE | CS4245_ADC_DIF_LJUST;
-	data->cs4245_regs[CS4245_SIGNAL_SEL] =
-		CS4245_A_OUT_SEL_HIZ | CS4245_ASYNCH;
-	data->cs4245_regs[CS4245_PGA_B_CTRL] = 0;
-	data->cs4245_regs[CS4245_PGA_A_CTRL] = 0;
-	data->cs4245_regs[CS4245_ANALOG_IN] =
-		CS4245_PGA_SOFT | CS4245_PGA_ZERO | CS4245_SEL_INPUT_4;
-	data->cs4245_regs[CS4245_DAC_A_CTRL] = 0;
-	data->cs4245_regs[CS4245_DAC_B_CTRL] = 0;
-	cs4245_registers_init(chip);
+	data->cs4245_shadow[CS4245_ANALOG_IN] =
+		CS4245_PGA_SOFT | CS4245_PGA_ZERO;
+	data->cs4245_shadow[CS4245_PGA_B_CTRL] = 0;
+	data->cs4245_shadow[CS4245_PGA_A_CTRL] = 0;
+	data->cs4245_shadow[CS4245_DAC_A_CTRL] = 8;
+	data->cs4245_shadow[CS4245_DAC_B_CTRL] = 8;
+
+	cs4245_shadow_control(chip, CS4245_LOAD_FROM_SHADOW);
 	snd_component_add(chip->card, "CS4245");
 }
 
-static void dg_output_enable(struct oxygen *chip)
-{
-	msleep(2500);
-	oxygen_set_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE);
-}
-
-static void dg_init(struct oxygen *chip)
+void dg_init(struct oxygen *chip)
 {
 	struct dg *data = chip->model_data;
 
-	data->output_sel = 0;
-	data->input_sel = 3;
-	data->hp_vol_att = 2 * 16;
+	data->output_sel = PLAYBACK_DST_HP_FP;
+	data->input_sel = CAPTURE_SRC_MIC;
 
 	cs4245_init(chip);
-
-	oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL,
-			    GPIO_MAGIC | GPIO_HP_DETECT);
-	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
-			  GPIO_INPUT_ROUTE | GPIO_HP_REAR | GPIO_OUTPUT_ENABLE);
-	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
-			    GPIO_INPUT_ROUTE | GPIO_HP_REAR);
-	dg_output_enable(chip);
+	oxygen_write16(chip, OXYGEN_GPIO_CONTROL,
+		       GPIO_OUTPUT_ENABLE | GPIO_HP_REAR | GPIO_INPUT_ROUTE);
+	/* anti-pop delay, wait some time before enabling the output */
+	msleep(2500);
+	oxygen_write16(chip, OXYGEN_GPIO_DATA,
+		       GPIO_OUTPUT_ENABLE | GPIO_INPUT_ROUTE);
 }
 
-static void dg_cleanup(struct oxygen *chip)
+void dg_cleanup(struct oxygen *chip)
 {
 	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE);
 }
 
-static void dg_suspend(struct oxygen *chip)
+void dg_suspend(struct oxygen *chip)
 {
 	dg_cleanup(chip);
 }
 
-static void dg_resume(struct oxygen *chip)
+void dg_resume(struct oxygen *chip)
 {
-	cs4245_registers_init(chip);
-	dg_output_enable(chip);
+	cs4245_shadow_control(chip, CS4245_LOAD_FROM_SHADOW);
+	msleep(2500);
+	oxygen_set_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE);
 }
 
-static void set_cs4245_dac_params(struct oxygen *chip,
+void set_cs4245_dac_params(struct oxygen *chip,
 				  struct snd_pcm_hw_params *params)
 {
 	struct dg *data = chip->model_data;
-	u8 value;
+	unsigned char dac_ctrl;
+	unsigned char mclk_freq;
 
-	value = data->cs4245_regs[CS4245_DAC_CTRL_1] & ~CS4245_DAC_FM_MASK;
-	if (params_rate(params) <= 50000)
-		value |= CS4245_DAC_FM_SINGLE;
-	else if (params_rate(params) <= 100000)
-		value |= CS4245_DAC_FM_DOUBLE;
-	else
-		value |= CS4245_DAC_FM_QUAD;
-	cs4245_write_cached(chip, CS4245_DAC_CTRL_1, value);
+	dac_ctrl = data->cs4245_shadow[CS4245_DAC_CTRL_1] & ~CS4245_DAC_FM_MASK;
+	mclk_freq = data->cs4245_shadow[CS4245_MCLK_FREQ] & ~CS4245_MCLK1_MASK;
+	if (params_rate(params) <= 50000) {
+		dac_ctrl |= CS4245_DAC_FM_SINGLE;
+		mclk_freq |= CS4245_MCLK_1 << CS4245_MCLK1_SHIFT;
+	} else if (params_rate(params) <= 100000) {
+		dac_ctrl |= CS4245_DAC_FM_DOUBLE;
+		mclk_freq |= CS4245_MCLK_1 << CS4245_MCLK1_SHIFT;
+	} else {
+		dac_ctrl |= CS4245_DAC_FM_QUAD;
+		mclk_freq |= CS4245_MCLK_2 << CS4245_MCLK1_SHIFT;
+	}
+	data->cs4245_shadow[CS4245_DAC_CTRL_1] = dac_ctrl;
+	data->cs4245_shadow[CS4245_MCLK_FREQ] = mclk_freq;
+	cs4245_write_spi(chip, CS4245_DAC_CTRL_1);
+	cs4245_write_spi(chip, CS4245_MCLK_FREQ);
 }
 
-static void set_cs4245_adc_params(struct oxygen *chip,
+void set_cs4245_adc_params(struct oxygen *chip,
 				  struct snd_pcm_hw_params *params)
 {
 	struct dg *data = chip->model_data;
-	u8 value;
+	unsigned char adc_ctrl;
+	unsigned char mclk_freq;
 
-	value = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_ADC_FM_MASK;
-	if (params_rate(params) <= 50000)
-		value |= CS4245_ADC_FM_SINGLE;
-	else if (params_rate(params) <= 100000)
-		value |= CS4245_ADC_FM_DOUBLE;
-	else
-		value |= CS4245_ADC_FM_QUAD;
-	cs4245_write_cached(chip, CS4245_ADC_CTRL, value);
+	adc_ctrl = data->cs4245_shadow[CS4245_ADC_CTRL] & ~CS4245_ADC_FM_MASK;
+	mclk_freq = data->cs4245_shadow[CS4245_MCLK_FREQ] & ~CS4245_MCLK2_MASK;
+	if (params_rate(params) <= 50000) {
+		adc_ctrl |= CS4245_ADC_FM_SINGLE;
+		mclk_freq |= CS4245_MCLK_1 << CS4245_MCLK2_SHIFT;
+	} else if (params_rate(params) <= 100000) {
+		adc_ctrl |= CS4245_ADC_FM_DOUBLE;
+		mclk_freq |= CS4245_MCLK_1 << CS4245_MCLK2_SHIFT;
+	} else {
+		adc_ctrl |= CS4245_ADC_FM_QUAD;
+		mclk_freq |= CS4245_MCLK_2 << CS4245_MCLK2_SHIFT;
+	}
+	data->cs4245_shadow[CS4245_ADC_CTRL] = adc_ctrl;
+	data->cs4245_shadow[CS4245_MCLK_FREQ] = mclk_freq;
+	cs4245_write_spi(chip, CS4245_ADC_CTRL);
+	cs4245_write_spi(chip, CS4245_MCLK_FREQ);
 }
 
-static inline unsigned int shift_bits(unsigned int value,
-				      unsigned int shift_from,
-				      unsigned int shift_to,
-				      unsigned int mask)
-{
-	if (shift_from < shift_to)
-		return (value << (shift_to - shift_from)) & mask;
-	else
-		return (value >> (shift_from - shift_to)) & mask;
-}
-
-static unsigned int adjust_dg_dac_routing(struct oxygen *chip,
+unsigned int adjust_dg_dac_routing(struct oxygen *chip,
 					  unsigned int play_routing)
 {
-	return (play_routing & OXYGEN_PLAY_DAC0_SOURCE_MASK) |
-	       shift_bits(play_routing,
-			  OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
-			  OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
-			  OXYGEN_PLAY_DAC1_SOURCE_MASK) |
-	       shift_bits(play_routing,
-			  OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
-			  OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
-			  OXYGEN_PLAY_DAC2_SOURCE_MASK) |
-	       shift_bits(play_routing,
-			  OXYGEN_PLAY_DAC0_SOURCE_SHIFT,
-			  OXYGEN_PLAY_DAC3_SOURCE_SHIFT,
-			  OXYGEN_PLAY_DAC3_SOURCE_MASK);
-}
-
-static int output_switch_info(struct snd_kcontrol *ctl,
-			      struct snd_ctl_elem_info *info)
-{
-	static const char *const names[3] = {
-		"Speakers", "Headphones", "FP Headphones"
-	};
-
-	return snd_ctl_enum_info(info, 1, 3, names);
-}
-
-static int output_switch_get(struct snd_kcontrol *ctl,
-			     struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
 	struct dg *data = chip->model_data;
+	unsigned int routing = 0;
 
-	mutex_lock(&chip->mutex);
-	value->value.enumerated.item[0] = data->output_sel;
-	mutex_unlock(&chip->mutex);
-	return 0;
-}
-
-static int output_switch_put(struct snd_kcontrol *ctl,
-			     struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct dg *data = chip->model_data;
-	u8 reg;
-	int changed;
-
-	if (value->value.enumerated.item[0] > 2)
-		return -EINVAL;
-
-	mutex_lock(&chip->mutex);
-	changed = value->value.enumerated.item[0] != data->output_sel;
-	if (changed) {
-		data->output_sel = value->value.enumerated.item[0];
-
-		reg = data->cs4245_regs[CS4245_SIGNAL_SEL] &
-						~CS4245_A_OUT_SEL_MASK;
-		reg |= data->output_sel == 2 ?
-				CS4245_A_OUT_SEL_DAC : CS4245_A_OUT_SEL_HIZ;
-		cs4245_write_cached(chip, CS4245_SIGNAL_SEL, reg);
-
-		cs4245_write_cached(chip, CS4245_DAC_A_CTRL,
-				    data->output_sel ? data->hp_vol_att : 0);
-		cs4245_write_cached(chip, CS4245_DAC_B_CTRL,
-				    data->output_sel ? data->hp_vol_att : 0);
-
-		oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
-				      data->output_sel == 1 ? GPIO_HP_REAR : 0,
-				      GPIO_HP_REAR);
+	switch (data->output_sel) {
+	case PLAYBACK_DST_HP:
+	case PLAYBACK_DST_HP_FP:
+		oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
+			OXYGEN_PLAY_MUTE23 | OXYGEN_PLAY_MUTE45 |
+			OXYGEN_PLAY_MUTE67, OXYGEN_PLAY_MUTE_MASK);
+		break;
+	case PLAYBACK_DST_MULTICH:
+		routing = (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) |
+			  (2 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) |
+			  (1 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) |
+			  (0 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT);
+		oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
+			OXYGEN_PLAY_MUTE01, OXYGEN_PLAY_MUTE_MASK);
+		break;
 	}
-	mutex_unlock(&chip->mutex);
-	return changed;
+	return routing;
 }
 
-static int hp_volume_offset_info(struct snd_kcontrol *ctl,
-				 struct snd_ctl_elem_info *info)
-{
-	static const char *const names[3] = {
-		"< 64 ohms", "64-150 ohms", "150-300 ohms"
-	};
-
-	return snd_ctl_enum_info(info, 1, 3, names);
-}
-
-static int hp_volume_offset_get(struct snd_kcontrol *ctl,
-				struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct dg *data = chip->model_data;
-
-	mutex_lock(&chip->mutex);
-	if (data->hp_vol_att > 2 * 7)
-		value->value.enumerated.item[0] = 0;
-	else if (data->hp_vol_att > 0)
-		value->value.enumerated.item[0] = 1;
-	else
-		value->value.enumerated.item[0] = 2;
-	mutex_unlock(&chip->mutex);
-	return 0;
-}
-
-static int hp_volume_offset_put(struct snd_kcontrol *ctl,
-				struct snd_ctl_elem_value *value)
-{
-	static const s8 atts[3] = { 2 * 16, 2 * 7, 0 };
-	struct oxygen *chip = ctl->private_data;
-	struct dg *data = chip->model_data;
-	s8 att;
-	int changed;
-
-	if (value->value.enumerated.item[0] > 2)
-		return -EINVAL;
-	att = atts[value->value.enumerated.item[0]];
-	mutex_lock(&chip->mutex);
-	changed = att != data->hp_vol_att;
-	if (changed) {
-		data->hp_vol_att = att;
-		if (data->output_sel) {
-			cs4245_write_cached(chip, CS4245_DAC_A_CTRL, att);
-			cs4245_write_cached(chip, CS4245_DAC_B_CTRL, att);
-		}
-	}
-	mutex_unlock(&chip->mutex);
-	return changed;
-}
-
-static int input_vol_info(struct snd_kcontrol *ctl,
-			  struct snd_ctl_elem_info *info)
-{
-	info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-	info->count = 2;
-	info->value.integer.min = 2 * -12;
-	info->value.integer.max = 2 * 12;
-	return 0;
-}
-
-static int input_vol_get(struct snd_kcontrol *ctl,
-			 struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct dg *data = chip->model_data;
-	unsigned int idx = ctl->private_value;
-
-	mutex_lock(&chip->mutex);
-	value->value.integer.value[0] = data->input_vol[idx][0];
-	value->value.integer.value[1] = data->input_vol[idx][1];
-	mutex_unlock(&chip->mutex);
-	return 0;
-}
-
-static int input_vol_put(struct snd_kcontrol *ctl,
-			 struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct dg *data = chip->model_data;
-	unsigned int idx = ctl->private_value;
-	int changed = 0;
-
-	if (value->value.integer.value[0] < 2 * -12 ||
-	    value->value.integer.value[0] > 2 * 12 ||
-	    value->value.integer.value[1] < 2 * -12 ||
-	    value->value.integer.value[1] > 2 * 12)
-		return -EINVAL;
-	mutex_lock(&chip->mutex);
-	changed = data->input_vol[idx][0] != value->value.integer.value[0] ||
-		  data->input_vol[idx][1] != value->value.integer.value[1];
-	if (changed) {
-		data->input_vol[idx][0] = value->value.integer.value[0];
-		data->input_vol[idx][1] = value->value.integer.value[1];
-		if (idx == data->input_sel) {
-			cs4245_write_cached(chip, CS4245_PGA_A_CTRL,
-					    data->input_vol[idx][0]);
-			cs4245_write_cached(chip, CS4245_PGA_B_CTRL,
-					    data->input_vol[idx][1]);
-		}
-	}
-	mutex_unlock(&chip->mutex);
-	return changed;
-}
-
-static DECLARE_TLV_DB_SCALE(cs4245_pga_db_scale, -1200, 50, 0);
-
-static int input_sel_info(struct snd_kcontrol *ctl,
-			  struct snd_ctl_elem_info *info)
-{
-	static const char *const names[4] = {
-		"Mic", "Aux", "Front Mic", "Line"
-	};
-
-	return snd_ctl_enum_info(info, 1, 4, names);
-}
-
-static int input_sel_get(struct snd_kcontrol *ctl,
-			 struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct dg *data = chip->model_data;
-
-	mutex_lock(&chip->mutex);
-	value->value.enumerated.item[0] = data->input_sel;
-	mutex_unlock(&chip->mutex);
-	return 0;
-}
-
-static int input_sel_put(struct snd_kcontrol *ctl,
-			 struct snd_ctl_elem_value *value)
-{
-	static const u8 sel_values[4] = {
-		CS4245_SEL_MIC,
-		CS4245_SEL_INPUT_1,
-		CS4245_SEL_INPUT_2,
-		CS4245_SEL_INPUT_4
-	};
-	struct oxygen *chip = ctl->private_data;
-	struct dg *data = chip->model_data;
-	int changed;
-
-	if (value->value.enumerated.item[0] > 3)
-		return -EINVAL;
-
-	mutex_lock(&chip->mutex);
-	changed = value->value.enumerated.item[0] != data->input_sel;
-	if (changed) {
-		data->input_sel = value->value.enumerated.item[0];
-
-		cs4245_write(chip, CS4245_ANALOG_IN,
-			     (data->cs4245_regs[CS4245_ANALOG_IN] &
-							~CS4245_SEL_MASK) |
-			     sel_values[data->input_sel]);
-
-		cs4245_write_cached(chip, CS4245_PGA_A_CTRL,
-				    data->input_vol[data->input_sel][0]);
-		cs4245_write_cached(chip, CS4245_PGA_B_CTRL,
-				    data->input_vol[data->input_sel][1]);
-
-		oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
-				      data->input_sel ? 0 : GPIO_INPUT_ROUTE,
-				      GPIO_INPUT_ROUTE);
-	}
-	mutex_unlock(&chip->mutex);
-	return changed;
-}
-
-static int hpf_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info)
-{
-	static const char *const names[2] = { "Active", "Frozen" };
-
-	return snd_ctl_enum_info(info, 1, 2, names);
-}
-
-static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct dg *data = chip->model_data;
-
-	value->value.enumerated.item[0] =
-		!!(data->cs4245_regs[CS4245_ADC_CTRL] & CS4245_HPF_FREEZE);
-	return 0;
-}
-
-static int hpf_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct dg *data = chip->model_data;
-	u8 reg;
-	int changed;
-
-	mutex_lock(&chip->mutex);
-	reg = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_HPF_FREEZE;
-	if (value->value.enumerated.item[0])
-		reg |= CS4245_HPF_FREEZE;
-	changed = reg != data->cs4245_regs[CS4245_ADC_CTRL];
-	if (changed)
-		cs4245_write(chip, CS4245_ADC_CTRL, reg);
-	mutex_unlock(&chip->mutex);
-	return changed;
-}
-
-#define INPUT_VOLUME(xname, index) { \
-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
-	.name = xname, \
-	.info = input_vol_info, \
-	.get = input_vol_get, \
-	.put = input_vol_put, \
-	.tlv = { .p = cs4245_pga_db_scale }, \
-	.private_value = index, \
-}
-static const struct snd_kcontrol_new dg_controls[] = {
-	{
-		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-		.name = "Analog Output Playback Enum",
-		.info = output_switch_info,
-		.get = output_switch_get,
-		.put = output_switch_put,
-	},
-	{
-		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-		.name = "Headphones Impedance Playback Enum",
-		.info = hp_volume_offset_info,
-		.get = hp_volume_offset_get,
-		.put = hp_volume_offset_put,
-	},
-	INPUT_VOLUME("Mic Capture Volume", 0),
-	INPUT_VOLUME("Aux Capture Volume", 1),
-	INPUT_VOLUME("Front Mic Capture Volume", 2),
-	INPUT_VOLUME("Line Capture Volume", 3),
-	{
-		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-		.name = "Capture Source",
-		.info = input_sel_info,
-		.get = input_sel_get,
-		.put = input_sel_put,
-	},
-	{
-		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-		.name = "ADC High-pass Filter Capture Enum",
-		.info = hpf_info,
-		.get = hpf_get,
-		.put = hpf_put,
-	},
-};
-
-static int dg_control_filter(struct snd_kcontrol_new *template)
-{
-	if (!strncmp(template->name, "Master Playback ", 16))
-		return 1;
-	return 0;
-}
-
-static int dg_mixer_init(struct oxygen *chip)
-{
-	unsigned int i;
-	int err;
-
-	for (i = 0; i < ARRAY_SIZE(dg_controls); ++i) {
-		err = snd_ctl_add(chip->card,
-				  snd_ctl_new1(&dg_controls[i], chip));
-		if (err < 0)
-			return err;
-	}
-	return 0;
-}
-
-static void dump_cs4245_registers(struct oxygen *chip,
+void dump_cs4245_registers(struct oxygen *chip,
 				  struct snd_info_buffer *buffer)
 {
 	struct dg *data = chip->model_data;
-	unsigned int i;
+	unsigned int addr;
 
 	snd_iprintf(buffer, "\nCS4245:");
-	for (i = 1; i <= 0x10; ++i)
-		snd_iprintf(buffer, " %02x", data->cs4245_regs[i]);
+	cs4245_read_spi(chip, CS4245_INT_STATUS);
+	for (addr = 1; addr < ARRAY_SIZE(data->cs4245_shadow); addr++)
+		snd_iprintf(buffer, " %02x", data->cs4245_shadow[addr]);
 	snd_iprintf(buffer, "\n");
 }
-
-struct oxygen_model model_xonar_dg = {
-	.longname = "C-Media Oxygen HD Audio",
-	.chip = "CMI8786",
-	.init = dg_init,
-	.control_filter = dg_control_filter,
-	.mixer_init = dg_mixer_init,
-	.cleanup = dg_cleanup,
-	.suspend = dg_suspend,
-	.resume = dg_resume,
-	.set_dac_params = set_cs4245_dac_params,
-	.set_adc_params = set_cs4245_adc_params,
-	.adjust_dac_routing = adjust_dg_dac_routing,
-	.dump_registers = dump_cs4245_registers,
-	.model_data_size = sizeof(struct dg),
-	.device_config = PLAYBACK_0_TO_I2S |
-			 PLAYBACK_1_TO_SPDIF |
-			 CAPTURE_0_FROM_I2S_2 |
-			 CAPTURE_1_FROM_SPDIF,
-	.dac_channels_pcm = 6,
-	.dac_channels_mixer = 0,
-	.function_flags = OXYGEN_FUNCTION_SPI,
-	.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
-	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
-	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
-};
diff --git a/sound/pci/oxygen/xonar_dg.h b/sound/pci/oxygen/xonar_dg.h
index 5688d78..d461df3 100644
--- a/sound/pci/oxygen/xonar_dg.h
+++ b/sound/pci/oxygen/xonar_dg.h
@@ -3,6 +3,54 @@
 
 #include "oxygen.h"
 
+#define GPIO_MAGIC		0x0008
+#define GPIO_HP_DETECT		0x0010
+#define GPIO_INPUT_ROUTE	0x0060
+#define GPIO_HP_REAR		0x0080
+#define GPIO_OUTPUT_ENABLE	0x0100
+
+#define CAPTURE_SRC_MIC		0
+#define CAPTURE_SRC_FP_MIC	1
+#define CAPTURE_SRC_LINE	2
+#define CAPTURE_SRC_AUX		3
+
+#define PLAYBACK_DST_HP		0
+#define PLAYBACK_DST_HP_FP	1
+#define PLAYBACK_DST_MULTICH	2
+
+enum cs4245_shadow_operation {
+	CS4245_SAVE_TO_SHADOW,
+	CS4245_LOAD_FROM_SHADOW
+};
+
+struct dg {
+	/* shadow copy of the CS4245 register space */
+	unsigned char cs4245_shadow[17];
+	/* output select: headphone/speakers */
+	unsigned char output_sel;
+	/* volumes for all capture sources */
+	char input_vol[4][2];
+	/* input select: mic/fp mic/line/aux */
+	unsigned char input_sel;
+};
+
+/* Xonar DG control routines */
+int cs4245_write_spi(struct oxygen *chip, u8 reg);
+int cs4245_read_spi(struct oxygen *chip, u8 reg);
+int cs4245_shadow_control(struct oxygen *chip, enum cs4245_shadow_operation op);
+void dg_init(struct oxygen *chip);
+void set_cs4245_dac_params(struct oxygen *chip,
+				  struct snd_pcm_hw_params *params);
+void set_cs4245_adc_params(struct oxygen *chip,
+				  struct snd_pcm_hw_params *params);
+unsigned int adjust_dg_dac_routing(struct oxygen *chip,
+					  unsigned int play_routing);
+void dump_cs4245_registers(struct oxygen *chip,
+				struct snd_info_buffer *buffer);
+void dg_suspend(struct oxygen *chip);
+void dg_resume(struct oxygen *chip);
+void dg_cleanup(struct oxygen *chip);
+
 extern struct oxygen_model model_xonar_dg;
 
 #endif
diff --git a/sound/pci/oxygen/xonar_dg_mixer.c b/sound/pci/oxygen/xonar_dg_mixer.c
new file mode 100644
index 0000000..b885dac
--- /dev/null
+++ b/sound/pci/oxygen/xonar_dg_mixer.c
@@ -0,0 +1,477 @@
+/*
+ * Mixer controls for the Xonar DG/DGX
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Copyright (c) Roman Volkov <v1ron@mail.ru>
+ *
+ *  This driver is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2.
+ *
+ *  This driver is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this driver; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/info.h>
+#include <sound/pcm.h>
+#include <sound/tlv.h>
+#include "oxygen.h"
+#include "xonar_dg.h"
+#include "cs4245.h"
+
+/* analog output select */
+
+static int output_select_apply(struct oxygen *chip)
+{
+	struct dg *data = chip->model_data;
+
+	data->cs4245_shadow[CS4245_SIGNAL_SEL] &= ~CS4245_A_OUT_SEL_MASK;
+	if (data->output_sel == PLAYBACK_DST_HP) {
+		/* mute FP (aux output) amplifier, switch rear jack to CS4245 */
+		oxygen_set_bits8(chip, OXYGEN_GPIO_DATA, GPIO_HP_REAR);
+	} else if (data->output_sel == PLAYBACK_DST_HP_FP) {
+		/*
+		 * Unmute FP amplifier, switch rear jack to CS4361;
+		 * I2S channels 2,3,4 should be inactive.
+		 */
+		oxygen_clear_bits8(chip, OXYGEN_GPIO_DATA, GPIO_HP_REAR);
+		data->cs4245_shadow[CS4245_SIGNAL_SEL] |= CS4245_A_OUT_SEL_DAC;
+	} else {
+		/*
+		 * 2.0, 4.0, 5.1: switch to CS4361, mute FP amp.,
+		 * and change playback routing.
+		 */
+		oxygen_clear_bits8(chip, OXYGEN_GPIO_DATA, GPIO_HP_REAR);
+	}
+	return cs4245_write_spi(chip, CS4245_SIGNAL_SEL);
+}
+
+static int output_select_info(struct snd_kcontrol *ctl,
+			      struct snd_ctl_elem_info *info)
+{
+	static const char *const names[3] = {
+		"Stereo Headphones",
+		"Stereo Headphones FP",
+		"Multichannel",
+	};
+
+	return snd_ctl_enum_info(info, 1, 3, names);
+}
+
+static int output_select_get(struct snd_kcontrol *ctl,
+			     struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	value->value.enumerated.item[0] = data->output_sel;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int output_select_put(struct snd_kcontrol *ctl,
+			     struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	unsigned int new = value->value.enumerated.item[0];
+	int changed = 0;
+	int ret;
+
+	mutex_lock(&chip->mutex);
+	if (data->output_sel != new) {
+		data->output_sel = new;
+		ret = output_select_apply(chip);
+		changed = ret >= 0 ? 1 : ret;
+		oxygen_update_dac_routing(chip);
+	}
+	mutex_unlock(&chip->mutex);
+
+	return changed;
+}
+
+/* CS4245 Headphone Channels A&B Volume Control */
+
+static int hp_stereo_volume_info(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_info *info)
+{
+	info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	info->count = 2;
+	info->value.integer.min = 0;
+	info->value.integer.max = 255;
+	return 0;
+}
+
+static int hp_stereo_volume_get(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *val)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	unsigned int tmp;
+
+	mutex_lock(&chip->mutex);
+	tmp = (~data->cs4245_shadow[CS4245_DAC_A_CTRL]) & 255;
+	val->value.integer.value[0] = tmp;
+	tmp = (~data->cs4245_shadow[CS4245_DAC_B_CTRL]) & 255;
+	val->value.integer.value[1] = tmp;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int hp_stereo_volume_put(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *val)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	int ret;
+	int changed = 0;
+	long new1 = val->value.integer.value[0];
+	long new2 = val->value.integer.value[1];
+
+	if ((new1 > 255) || (new1 < 0) || (new2 > 255) || (new2 < 0))
+		return -EINVAL;
+
+	mutex_lock(&chip->mutex);
+	if ((data->cs4245_shadow[CS4245_DAC_A_CTRL] != ~new1) ||
+	    (data->cs4245_shadow[CS4245_DAC_B_CTRL] != ~new2)) {
+		data->cs4245_shadow[CS4245_DAC_A_CTRL] = ~new1;
+		data->cs4245_shadow[CS4245_DAC_B_CTRL] = ~new2;
+		ret = cs4245_write_spi(chip, CS4245_DAC_A_CTRL);
+		if (ret >= 0)
+			ret = cs4245_write_spi(chip, CS4245_DAC_B_CTRL);
+		changed = ret >= 0 ? 1 : ret;
+	}
+	mutex_unlock(&chip->mutex);
+
+	return changed;
+}
+
+/* Headphone Mute */
+
+static int hp_mute_get(struct snd_kcontrol *ctl,
+			struct snd_ctl_elem_value *val)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	val->value.integer.value[0] =
+		!(data->cs4245_shadow[CS4245_DAC_CTRL_1] & CS4245_MUTE_DAC);
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int hp_mute_put(struct snd_kcontrol *ctl,
+			struct snd_ctl_elem_value *val)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	int ret;
+	int changed;
+
+	if (val->value.integer.value[0] > 1)
+		return -EINVAL;
+	mutex_lock(&chip->mutex);
+	data->cs4245_shadow[CS4245_DAC_CTRL_1] &= ~CS4245_MUTE_DAC;
+	data->cs4245_shadow[CS4245_DAC_CTRL_1] |=
+		(~val->value.integer.value[0] << 2) & CS4245_MUTE_DAC;
+	ret = cs4245_write_spi(chip, CS4245_DAC_CTRL_1);
+	changed = ret >= 0 ? 1 : ret;
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+/* capture volume for all sources */
+
+static int input_volume_apply(struct oxygen *chip, char left, char right)
+{
+	struct dg *data = chip->model_data;
+	int ret;
+
+	data->cs4245_shadow[CS4245_PGA_A_CTRL] = left;
+	data->cs4245_shadow[CS4245_PGA_B_CTRL] = right;
+	ret = cs4245_write_spi(chip, CS4245_PGA_A_CTRL);
+	if (ret < 0)
+		return ret;
+	return cs4245_write_spi(chip, CS4245_PGA_B_CTRL);
+}
+
+static int input_vol_info(struct snd_kcontrol *ctl,
+			  struct snd_ctl_elem_info *info)
+{
+	info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	info->count = 2;
+	info->value.integer.min = 2 * -12;
+	info->value.integer.max = 2 * 12;
+	return 0;
+}
+
+static int input_vol_get(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	unsigned int idx = ctl->private_value;
+
+	mutex_lock(&chip->mutex);
+	value->value.integer.value[0] = data->input_vol[idx][0];
+	value->value.integer.value[1] = data->input_vol[idx][1];
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int input_vol_put(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	unsigned int idx = ctl->private_value;
+	int changed = 0;
+	int ret = 0;
+
+	if (value->value.integer.value[0] < 2 * -12 ||
+	    value->value.integer.value[0] > 2 * 12 ||
+	    value->value.integer.value[1] < 2 * -12 ||
+	    value->value.integer.value[1] > 2 * 12)
+		return -EINVAL;
+	mutex_lock(&chip->mutex);
+	changed = data->input_vol[idx][0] != value->value.integer.value[0] ||
+		  data->input_vol[idx][1] != value->value.integer.value[1];
+	if (changed) {
+		data->input_vol[idx][0] = value->value.integer.value[0];
+		data->input_vol[idx][1] = value->value.integer.value[1];
+		if (idx == data->input_sel) {
+			ret = input_volume_apply(chip,
+				data->input_vol[idx][0],
+				data->input_vol[idx][1]);
+		}
+		changed = ret >= 0 ? 1 : ret;
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+/* Capture Source */
+
+static int input_source_apply(struct oxygen *chip)
+{
+	struct dg *data = chip->model_data;
+
+	data->cs4245_shadow[CS4245_ANALOG_IN] &= ~CS4245_SEL_MASK;
+	if (data->input_sel == CAPTURE_SRC_FP_MIC)
+		data->cs4245_shadow[CS4245_ANALOG_IN] |= CS4245_SEL_INPUT_2;
+	else if (data->input_sel == CAPTURE_SRC_LINE)
+		data->cs4245_shadow[CS4245_ANALOG_IN] |= CS4245_SEL_INPUT_4;
+	else if (data->input_sel != CAPTURE_SRC_MIC)
+		data->cs4245_shadow[CS4245_ANALOG_IN] |= CS4245_SEL_INPUT_1;
+	return cs4245_write_spi(chip, CS4245_ANALOG_IN);
+}
+
+static int input_sel_info(struct snd_kcontrol *ctl,
+			  struct snd_ctl_elem_info *info)
+{
+	static const char *const names[4] = {
+		"Mic", "Front Mic", "Line", "Aux"
+	};
+
+	return snd_ctl_enum_info(info, 1, 4, names);
+}
+
+static int input_sel_get(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	value->value.enumerated.item[0] = data->input_sel;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int input_sel_put(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	int changed;
+	int ret;
+
+	if (value->value.enumerated.item[0] > 3)
+		return -EINVAL;
+
+	mutex_lock(&chip->mutex);
+	changed = value->value.enumerated.item[0] != data->input_sel;
+	if (changed) {
+		data->input_sel = value->value.enumerated.item[0];
+
+		ret = input_source_apply(chip);
+		if (ret >= 0)
+			ret = input_volume_apply(chip,
+				data->input_vol[data->input_sel][0],
+				data->input_vol[data->input_sel][1]);
+		changed = ret >= 0 ? 1 : ret;
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+/* ADC high-pass filter */
+
+static int hpf_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info)
+{
+	static const char *const names[2] = { "Active", "Frozen" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	value->value.enumerated.item[0] =
+		!!(data->cs4245_shadow[CS4245_ADC_CTRL] & CS4245_HPF_FREEZE);
+	return 0;
+}
+
+static int hpf_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	u8 reg;
+	int changed;
+
+	mutex_lock(&chip->mutex);
+	reg = data->cs4245_shadow[CS4245_ADC_CTRL] & ~CS4245_HPF_FREEZE;
+	if (value->value.enumerated.item[0])
+		reg |= CS4245_HPF_FREEZE;
+	changed = reg != data->cs4245_shadow[CS4245_ADC_CTRL];
+	if (changed) {
+		data->cs4245_shadow[CS4245_ADC_CTRL] = reg;
+		cs4245_write_spi(chip, CS4245_ADC_CTRL);
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+#define INPUT_VOLUME(xname, index) { \
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+	.name = xname, \
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
+		  SNDRV_CTL_ELEM_ACCESS_TLV_READ, \
+	.info = input_vol_info, \
+	.get = input_vol_get, \
+	.put = input_vol_put, \
+	.tlv = { .p = pga_db_scale }, \
+	.private_value = index, \
+}
+static const DECLARE_TLV_DB_MINMAX(hp_db_scale, -12550, 0);
+static const DECLARE_TLV_DB_MINMAX(pga_db_scale, -1200, 1200);
+static const struct snd_kcontrol_new dg_controls[] = {
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Analog Output Playback Enum",
+		.info = output_select_info,
+		.get = output_select_get,
+		.put = output_select_put,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Headphone Playback Volume",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+			  SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+		.info = hp_stereo_volume_info,
+		.get = hp_stereo_volume_get,
+		.put = hp_stereo_volume_put,
+		.tlv = { .p = hp_db_scale, },
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Headphone Playback Switch",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = snd_ctl_boolean_mono_info,
+		.get = hp_mute_get,
+		.put = hp_mute_put,
+	},
+	INPUT_VOLUME("Mic Capture Volume", CAPTURE_SRC_MIC),
+	INPUT_VOLUME("Front Mic Capture Volume", CAPTURE_SRC_FP_MIC),
+	INPUT_VOLUME("Line Capture Volume", CAPTURE_SRC_LINE),
+	INPUT_VOLUME("Aux Capture Volume", CAPTURE_SRC_AUX),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Capture Source",
+		.info = input_sel_info,
+		.get = input_sel_get,
+		.put = input_sel_put,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "ADC High-pass Filter Capture Enum",
+		.info = hpf_info,
+		.get = hpf_get,
+		.put = hpf_put,
+	},
+};
+
+static int dg_control_filter(struct snd_kcontrol_new *template)
+{
+	if (!strncmp(template->name, "Master Playback ", 16))
+		return 1;
+	return 0;
+}
+
+static int dg_mixer_init(struct oxygen *chip)
+{
+	unsigned int i;
+	int err;
+
+	output_select_apply(chip);
+	input_source_apply(chip);
+	oxygen_update_dac_routing(chip);
+
+	for (i = 0; i < ARRAY_SIZE(dg_controls); ++i) {
+		err = snd_ctl_add(chip->card,
+				  snd_ctl_new1(&dg_controls[i], chip));
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
+struct oxygen_model model_xonar_dg = {
+	.longname = "C-Media Oxygen HD Audio",
+	.chip = "CMI8786",
+	.init = dg_init,
+	.control_filter = dg_control_filter,
+	.mixer_init = dg_mixer_init,
+	.cleanup = dg_cleanup,
+	.suspend = dg_suspend,
+	.resume = dg_resume,
+	.set_dac_params = set_cs4245_dac_params,
+	.set_adc_params = set_cs4245_adc_params,
+	.adjust_dac_routing = adjust_dg_dac_routing,
+	.dump_registers = dump_cs4245_registers,
+	.model_data_size = sizeof(struct dg),
+	.device_config = PLAYBACK_0_TO_I2S |
+			 PLAYBACK_1_TO_SPDIF |
+			 CAPTURE_0_FROM_I2S_1 |
+			 CAPTURE_1_FROM_SPDIF,
+	.dac_channels_pcm = 6,
+	.dac_channels_mixer = 0,
+	.function_flags = OXYGEN_FUNCTION_SPI,
+	.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+};
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 385dec1..688151b 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -450,6 +450,17 @@
 	}
 	snd_soc_write(codec, AIC32X4_IFACE1, data);
 
+	if (params_channels(params) == 1) {
+		data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2LCHN;
+	} else {
+		if (aic32x4->swapdacs)
+			data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2RCHN;
+		else
+			data = AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN;
+	}
+	snd_soc_update_bits(codec, AIC32X4_DACSETUP, AIC32X4_DAC_CHAN_MASK,
+			data);
+
 	return 0;
 }
 
@@ -606,20 +617,15 @@
 	}
 	snd_soc_write(codec, AIC32X4_CMMODE, tmp_reg);
 
-	/* Do DACs need to be swapped? */
-	if (aic32x4->swapdacs) {
-		snd_soc_write(codec, AIC32X4_DACSETUP, AIC32X4_LDAC2RCHN | AIC32X4_RDAC2LCHN);
-	} else {
-		snd_soc_write(codec, AIC32X4_DACSETUP, AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN);
-	}
-
 	/* Mic PGA routing */
-	if (aic32x4->micpga_routing & AIC32X4_MICPGA_ROUTE_LMIC_IN2R_10K) {
+	if (aic32x4->micpga_routing & AIC32X4_MICPGA_ROUTE_LMIC_IN2R_10K)
 		snd_soc_write(codec, AIC32X4_LMICPGANIN, AIC32X4_LMICPGANIN_IN2R_10K);
-	}
-	if (aic32x4->micpga_routing & AIC32X4_MICPGA_ROUTE_RMIC_IN1L_10K) {
+	else
+		snd_soc_write(codec, AIC32X4_LMICPGANIN, AIC32X4_LMICPGANIN_CM1L_10K);
+	if (aic32x4->micpga_routing & AIC32X4_MICPGA_ROUTE_RMIC_IN1L_10K)
 		snd_soc_write(codec, AIC32X4_RMICPGANIN, AIC32X4_RMICPGANIN_IN1L_10K);
-	}
+	else
+		snd_soc_write(codec, AIC32X4_RMICPGANIN, AIC32X4_RMICPGANIN_CM1R_10K);
 
 	aic32x4_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
diff --git a/sound/soc/codecs/tlv320aic32x4.h b/sound/soc/codecs/tlv320aic32x4.h
index 3577422..995f033 100644
--- a/sound/soc/codecs/tlv320aic32x4.h
+++ b/sound/soc/codecs/tlv320aic32x4.h
@@ -120,7 +120,9 @@
 #define AIC32X4_MICBIAS_2075V		0x60
 
 #define AIC32X4_LMICPGANIN_IN2R_10K	0x10
+#define AIC32X4_LMICPGANIN_CM1L_10K	0x40
 #define AIC32X4_RMICPGANIN_IN1L_10K	0x10
+#define AIC32X4_RMICPGANIN_CM1R_10K	0x40
 
 #define AIC32X4_LMICPGAVOL_NOGAIN	0x80
 #define AIC32X4_RMICPGAVOL_NOGAIN	0x80
@@ -138,6 +140,7 @@
 #define AIC32X4_LDAC2RCHN		(0x02 << 4)
 #define AIC32X4_LDAC2LCHN		(0x01 << 4)
 #define AIC32X4_RDAC2RCHN		(0x01 << 2)
+#define AIC32X4_DAC_CHAN_MASK		0x3c
 
 #define AIC32X4_SSTEP2WCLK		0x01
 #define AIC32X4_MUTEON			0x0C
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 4cf91de..4e3e31a 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -14,6 +14,7 @@
 #include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/export.h>
 #include <linux/pm.h>
 #include <linux/gcd.h>
 #include <linux/gpio.h>
@@ -2141,6 +2142,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(wm5100_detect);
 
 static irqreturn_t wm5100_irq(int irq, void *data)
 {
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index d862f76..2c3c962 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -81,6 +81,54 @@
 	{ 0x3133, 0x1201 },
 	{ 0x3183, 0x1501 },
 	{ 0x31D3, 0x1401 },
+	{ 0x0049, 0x01ea },
+	{ 0x004a, 0x01f2 },
+	{ 0x0057, 0x01e7 },
+	{ 0x0058, 0x01fb },
+	{ 0x33ce, 0xc4f5 },
+	{ 0x33cf, 0x1361 },
+	{ 0x33d0, 0x0402 },
+	{ 0x33d1, 0x4700 },
+	{ 0x33d2, 0x026d },
+	{ 0x33d3, 0xff00 },
+	{ 0x33d4, 0x026d },
+	{ 0x33d5, 0x0101 },
+	{ 0x33d6, 0xc4f5 },
+	{ 0x33d7, 0x0361 },
+	{ 0x33d8, 0x0402 },
+	{ 0x33d9, 0x6701 },
+	{ 0x33da, 0xc4f5 },
+	{ 0x33db, 0x136f },
+	{ 0x33dc, 0xc4f5 },
+	{ 0x33dd, 0x134f },
+	{ 0x33de, 0xc4f5 },
+	{ 0x33df, 0x131f },
+	{ 0x33e0, 0x026d },
+	{ 0x33e1, 0x4f01 },
+	{ 0x33e2, 0x026d },
+	{ 0x33e3, 0xf100 },
+	{ 0x33e4, 0x026d },
+	{ 0x33e5, 0x0001 },
+	{ 0x33e6, 0xc4f5 },
+	{ 0x33e7, 0x0361 },
+	{ 0x33e8, 0x0402 },
+	{ 0x33e9, 0x6601 },
+	{ 0x33ea, 0xc4f5 },
+	{ 0x33eb, 0x136f },
+	{ 0x33ec, 0xc4f5 },
+	{ 0x33ed, 0x134f },
+	{ 0x33ee, 0xc4f5 },
+	{ 0x33ef, 0x131f },
+	{ 0x33f0, 0x026d },
+	{ 0x33f1, 0x4e01 },
+	{ 0x33f2, 0x026d },
+	{ 0x33f3, 0xf000 },
+	{ 0x33f6, 0xc4f5 },
+	{ 0x33f7, 0x1361 },
+	{ 0x33f8, 0x0402 },
+	{ 0x33f9, 0x4600 },
+	{ 0x33fa, 0x026d },
+	{ 0x33fb, 0xfe00 },
 };
 
 static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index f9090b1..5428a1f 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -164,6 +164,7 @@
 	bool baudclk_locked;
 	bool irq_stats;
 	bool offline_config;
+	bool use_dual_fifo;
 	u8 i2s_mode;
 	spinlock_t baudclk_lock;
 	struct clk *baudclk;
@@ -721,6 +722,12 @@
 				CCSR_SSI_SxCCR_DC(2));
 	}
 
+	if (ssi_private->use_dual_fifo) {
+		write_ssi_mask(&ssi->srcr, 0, CCSR_SSI_SRCR_RFEN1);
+		write_ssi_mask(&ssi->stcr, 0, CCSR_SSI_STCR_TFEN1);
+		write_ssi_mask(&ssi->scr, 0, CCSR_SSI_SCR_TCH_EN);
+	}
+
 	return 0;
 }
 
@@ -752,6 +759,15 @@
 		spin_unlock_irqrestore(&ssi_private->baudclk_lock, flags);
 	}
 
+	/* When using dual fifo mode, it is safer to ensure an even period
+	 * size. If appearing to an odd number while DMA always starts its
+	 * task from fifo0, fifo1 would be neglected at the end of each
+	 * period. But SSI would still access fifo1 with an invalid data.
+	 */
+	if (ssi_private->use_dual_fifo)
+		snd_pcm_hw_constraint_step(substream->runtime, 0,
+				SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
+
 	return 0;
 }
 
@@ -1262,18 +1278,13 @@
 		return -EINVAL;
 	hw_type = (enum fsl_ssi_type) of_id->data;
 
-	/* We only support the SSI in "I2S Slave" mode */
 	sprop = of_get_property(np, "fsl,mode", NULL);
 	if (!sprop) {
 		dev_err(&pdev->dev, "fsl,mode property is necessary\n");
 		return -EINVAL;
 	}
-	if (!strcmp(sprop, "ac97-slave")) {
+	if (!strcmp(sprop, "ac97-slave"))
 		ac97 = true;
-	} else if (strcmp(sprop, "i2s-slave")) {
-		dev_notice(&pdev->dev, "mode %s is unsupported\n", sprop);
-		return -ENODEV;
-	}
 
 	/* The DAI name is the last part of the full name of the node. */
 	p = strrchr(np->full_name, '/') + 1;
@@ -1370,7 +1381,7 @@
 
 	if (hw_type == FSL_SSI_MX21 || hw_type == FSL_SSI_MX51 ||
 			hw_type == FSL_SSI_MX35) {
-		u32 dma_events[2];
+		u32 dma_events[2], dmas[4];
 		ssi_private->ssi_on_imx = true;
 
 		ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
@@ -1391,7 +1402,7 @@
 		 */
 		ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
 		if (IS_ERR(ssi_private->baudclk))
-			dev_warn(&pdev->dev, "could not get baud clock: %ld\n",
+			dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
 				 PTR_ERR(ssi_private->baudclk));
 		else
 			clk_prepare_enable(ssi_private->baudclk);
@@ -1426,6 +1437,16 @@
 				goto error_clk;
 			}
 		}
+		/* Should this be merge with the above? */
+		if (!of_property_read_u32_array(pdev->dev.of_node, "dmas", dmas, 4)
+				&& dmas[2] == IMX_DMATYPE_SSI_DUAL) {
+			ssi_private->use_dual_fifo = true;
+			/* When using dual fifo mode, we need to keep watermark
+			 * as even numbers due to dma script limitation.
+			 */
+			ssi_private->dma_params_tx.maxburst &= ~0x1;
+			ssi_private->dma_params_rx.maxburst &= ~0x1;
+		}
 
 		shared = of_device_is_compatible(of_get_parent(np),
 			    "fsl,spba-bus");
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index 4a07f71..22ad9c5 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -30,6 +30,7 @@
 	select SND_OMAP_SOC_MCBSP
 	select SND_SOC_TLV320AIC3X
 	select SND_SOC_TPA6130A2
+	depends on GPIOLIB
 	help
 	  Say Y if you want to add support for SoC audio on Nokia RX-51
 	  hardware. This is also known as Nokia N900 product.
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 27930fc..454f41c 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -19,7 +19,7 @@
 
 config SND_S3C24XX_I2S
 	tristate
-	select S3C2410_DMA
+	select S3C24XX_DMA
 
 config SND_S3C_I2SV2_SOC
 	tristate
@@ -210,7 +210,7 @@
 
 config SND_SOC_BELLS
 	tristate "Audio support for Wolfson Bells"
-	depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410
+	depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA
 	select SND_SAMSUNG_I2S
 	select SND_SOC_WM5102
 	select SND_SOC_WM5110
diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c
index fa91376..fbced58 100644
--- a/sound/soc/samsung/h1940_uda1380.c
+++ b/sound/soc/samsung/h1940_uda1380.c
@@ -23,6 +23,7 @@
 #include "regs-iis.h"
 #include <asm/mach-types.h>
 
+#include <mach/gpio-samsung.h>
 #include "s3c24xx-i2s.h"
 
 static unsigned int rates[] = {
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 92f64363..0a9b44c 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -22,8 +22,6 @@
 #include <sound/soc.h>
 #include <sound/pcm_params.h>
 
-#include <mach/dma.h>
-
 #include <linux/platform_data/asoc-s3c.h>
 
 #include "dma.h"
@@ -1268,7 +1266,8 @@
 
 	return 0;
 err:
-	release_mem_region(regs_base, resource_size(res));
+	if (res)
+		release_mem_region(regs_base, resource_size(res));
 
 	return ret;
 }
diff --git a/sound/soc/samsung/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
index 807db41..98a04c1 100644
--- a/sound/soc/samsung/neo1973_wm8753.c
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -20,6 +20,7 @@
 
 #include <sound/soc.h>
 
+#include <mach/gpio-samsung.h>
 #include <asm/mach-types.h>
 #include "regs-iis.h"
 
diff --git a/sound/soc/samsung/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c
index 704460a..06ebdc0 100644
--- a/sound/soc/samsung/rx1950_uda1380.c
+++ b/sound/soc/samsung/rx1950_uda1380.c
@@ -24,6 +24,7 @@
 #include <sound/soc.h>
 #include <sound/jack.h>
 
+#include <mach/gpio-samsung.h>
 #include "regs-iis.h"
 #include <asm/mach-types.h>
 
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index fefc561..79e7efb 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -729,7 +729,7 @@
 			   struct snd_soc_component_driver *cmp_drv,
 			   struct snd_soc_dai_driver *dai_drv)
 {
-	struct snd_soc_dai_ops *ops = drv->ops;
+	struct snd_soc_dai_ops *ops = dai_drv->ops;
 
 	ops->trigger = s3c2412_i2s_trigger;
 	if (!ops->hw_params)
@@ -742,8 +742,8 @@
 	if (!ops->delay)
 		ops->delay = s3c2412_i2s_delay;
 
-	drv->suspend = s3c2412_i2s_suspend;
-	drv->resume = s3c2412_i2s_resume;
+	dai_drv->suspend = s3c2412_i2s_suspend;
+	dai_drv->resume = s3c2412_i2s_resume;
 
 	return snd_soc_register_component(dev, cmp_drv, dai_drv, 1);
 }
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index ea885cb..d079445 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -26,6 +26,8 @@
 #include <sound/pcm_params.h>
 
 #include <mach/dma.h>
+#include <mach/gpio-samsung.h>
+#include <plat/gpio-cfg.h>
 
 #include "dma.h"
 #include "regs-i2s-v2.h"
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 9c8ebd8..f31e916 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -24,6 +24,8 @@
 #include <sound/pcm_params.h>
 
 #include <mach/dma.h>
+#include <mach/gpio-samsung.h>
+#include <plat/gpio-cfg.h>
 #include "regs-iis.h"
 
 #include "dma.h"
diff --git a/sound/soc/samsung/smartq_wm8987.c b/sound/soc/samsung/smartq_wm8987.c
index 58ae323..c3b2ada 100644
--- a/sound/soc/samsung/smartq_wm8987.c
+++ b/sound/soc/samsung/smartq_wm8987.c
@@ -19,6 +19,7 @@
 #include <sound/soc.h>
 #include <sound/jack.h>
 
+#include <mach/gpio-samsung.h>
 #include <asm/mach-types.h>
 
 #include "i2s.h"
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
index b072bd1..d38ae98 100644
--- a/sound/soc/samsung/smdk_wm8994.c
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -152,13 +152,11 @@
 	.num_links = ARRAY_SIZE(smdk_dai),
 };
 
-#ifdef CONFIG_OF
 static const struct of_device_id samsung_wm8994_of_match[] = {
 	{ .compatible = "samsung,smdk-wm8994", .data = &smdk_board_data },
 	{},
 };
 MODULE_DEVICE_TABLE(of, samsung_wm8994_of_match);
-#endif /* CONFIG_OF */
 
 static int smdk_audio_probe(struct platform_device *pdev)
 {
@@ -188,7 +186,7 @@
 		smdk_dai[0].platform_of_node = smdk_dai[0].cpu_of_node;
 	}
 
-	id = of_match_device(samsung_wm8994_of_match, &pdev->dev);
+	id = of_match_device(of_match_ptr(samsung_wm8994_of_match), &pdev->dev);
 	if (id)
 		*board = *((struct smdk_wm8994_data *)id->data);
 
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index de9408b..e05a86b 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -14,6 +14,7 @@
 	select SND_HWDEP
 	select SND_RAWMIDI
 	select SND_PCM
+	select BITREVERSE
 	help
 	  Say Y here to include support for USB audio and USB MIDI
 	  devices.
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index cfede86..b22dbb1 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -63,11 +63,35 @@
 	return 0;
 }
 
+static bool same_kallsyms_reloc(const char *from_dir, char *to_dir)
+{
+	char from[PATH_MAX];
+	char to[PATH_MAX];
+	const char *name;
+	u64 addr1 = 0, addr2 = 0;
+	int i;
+
+	scnprintf(from, sizeof(from), "%s/kallsyms", from_dir);
+	scnprintf(to, sizeof(to), "%s/kallsyms", to_dir);
+
+	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+		addr1 = kallsyms__get_function_start(from, name);
+		if (addr1)
+			break;
+	}
+
+	if (name)
+		addr2 = kallsyms__get_function_start(to, name);
+
+	return addr1 == addr2;
+}
+
 static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
 					  size_t to_dir_sz)
 {
 	char from[PATH_MAX];
 	char to[PATH_MAX];
+	char to_subdir[PATH_MAX];
 	struct dirent *dent;
 	int ret = -1;
 	DIR *d;
@@ -86,10 +110,11 @@
 			continue;
 		scnprintf(to, sizeof(to), "%s/%s/modules", to_dir,
 			  dent->d_name);
-		if (!compare_proc_modules(from, to)) {
-			scnprintf(to, sizeof(to), "%s/%s", to_dir,
-				  dent->d_name);
-			strlcpy(to_dir, to, to_dir_sz);
+		scnprintf(to_subdir, sizeof(to_subdir), "%s/%s",
+			  to_dir, dent->d_name);
+		if (!compare_proc_modules(from, to) &&
+		    same_kallsyms_reloc(from_dir, to_subdir)) {
+			strlcpy(to_dir, to_subdir, to_dir_sz);
 			ret = 0;
 			break;
 		}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3c394bf..af47531 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -287,10 +287,7 @@
 	 * have no _text sometimes.
 	 */
 	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
-						 machine, "_text");
-	if (err < 0)
-		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
-							 machine, "_stext");
+						 machine);
 	if (err < 0)
 		pr_err("Couldn't record guest kernel [%d]'s reference"
 		       " relocation symbol.\n", machine->pid);
@@ -457,10 +454,7 @@
 	}
 
 	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
-						 machine, "_text");
-	if (err < 0)
-		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
-							 machine, "_stext");
+						 machine);
 	if (err < 0)
 		pr_err("Couldn't record kernel reference relocation symbol\n"
 		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index 67e5d0c..63a0e6f 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -454,7 +454,6 @@
 will need at least this:
 	- asm/perf_event.h - a basic stub will suffice at first
 	- support for atomic64 types (and associated helper functions)
-	- set_perf_event_pending() implemented
 
 If your architecture does have hardware capabilities, you can override the
 weak stub hw_perf_event_init() to register hardware counters.
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 7daa806..e84fa26 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -100,8 +100,8 @@
 
 #ifdef __aarch64__
 #define mb()		asm volatile("dmb ish" ::: "memory")
-#define wmb()		asm volatile("dmb ishld" ::: "memory")
-#define rmb()		asm volatile("dmb ishst" ::: "memory")
+#define wmb()		asm volatile("dmb ishst" ::: "memory")
+#define rmb()		asm volatile("dmb ishld" ::: "memory")
 #define cpu_relax()	asm volatile("yield" ::: "memory")
 #endif
 
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 2bd13ed..3d90880 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -26,7 +26,6 @@
 	struct map *kallsyms_map, *vmlinux_map;
 	struct machine kallsyms, vmlinux;
 	enum map_type type = MAP__FUNCTION;
-	struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
 	u64 mem_start, mem_end;
 
 	/*
@@ -70,14 +69,6 @@
 	 */
 	kallsyms_map = machine__kernel_map(&kallsyms, type);
 
-	sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
-	if (sym == NULL) {
-		pr_debug("dso__find_symbol_by_name ");
-		goto out;
-	}
-
-	ref_reloc_sym.addr = UM(sym->start);
-
 	/*
 	 * Step 5:
 	 *
@@ -89,7 +80,6 @@
 	}
 
 	vmlinux_map = machine__kernel_map(&vmlinux, type);
-	map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
 
 	/*
 	 * Step 6:
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 1fc1c2f..b0f3ca8 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -470,23 +470,32 @@
 	return 1;
 }
 
+u64 kallsyms__get_function_start(const char *kallsyms_filename,
+				 const char *symbol_name)
+{
+	struct process_symbol_args args = { .name = symbol_name, };
+
+	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
+		return 0;
+
+	return args.start;
+}
+
 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 				       perf_event__handler_t process,
-				       struct machine *machine,
-				       const char *symbol_name)
+				       struct machine *machine)
 {
 	size_t size;
-	const char *filename, *mmap_name;
-	char path[PATH_MAX];
+	const char *mmap_name;
 	char name_buff[PATH_MAX];
 	struct map *map;
+	struct kmap *kmap;
 	int err;
 	/*
 	 * We should get this from /sys/kernel/sections/.text, but till that is
 	 * available use this, and after it is use this as a fallback for older
 	 * kernels.
 	 */
-	struct process_symbol_args args = { .name = symbol_name, };
 	union perf_event *event = zalloc((sizeof(event->mmap) +
 					  machine->id_hdr_size));
 	if (event == NULL) {
@@ -502,30 +511,19 @@
 		 * see kernel/perf_event.c __perf_event_mmap
 		 */
 		event->header.misc = PERF_RECORD_MISC_KERNEL;
-		filename = "/proc/kallsyms";
 	} else {
 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
-		if (machine__is_default_guest(machine))
-			filename = (char *) symbol_conf.default_guest_kallsyms;
-		else {
-			sprintf(path, "%s/proc/kallsyms", machine->root_dir);
-			filename = path;
-		}
-	}
-
-	if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) {
-		free(event);
-		return -ENOENT;
 	}
 
 	map = machine->vmlinux_maps[MAP__FUNCTION];
+	kmap = map__kmap(map);
 	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
-			"%s%s", mmap_name, symbol_name) + 1;
+			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
 	size = PERF_ALIGN(size, sizeof(u64));
 	event->mmap.header.type = PERF_RECORD_MMAP;
 	event->mmap.header.size = (sizeof(event->mmap) -
 			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
-	event->mmap.pgoff = args.start;
+	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
 	event->mmap.start = map->start;
 	event->mmap.len   = map->end - event->mmap.start;
 	event->mmap.pid   = machine->pid;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index faf6e21..851fa06 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -214,8 +214,7 @@
 				   struct machine *machine, bool mmap_data);
 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 				       perf_event__handler_t process,
-				       struct machine *machine,
-				       const char *symbol_name);
+				       struct machine *machine);
 
 int perf_event__synthesize_modules(struct perf_tool *tool,
 				   perf_event__handler_t process,
@@ -279,4 +278,7 @@
 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf(union perf_event *event, FILE *fp);
 
+u64 kallsyms__get_function_start(const char *kallsyms_filename,
+				 const char *symbol_name);
+
 #endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h
new file mode 100644
index 0000000..d82b170b
--- /dev/null
+++ b/tools/perf/util/include/asm/hash.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+/* Stub */
+
+#endif /* __ASM_GENERIC_HASH_H */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index ded7459..c872991 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -496,19 +496,22 @@
 	return 1;
 }
 
+static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
+					   size_t bufsz)
+{
+	if (machine__is_default_guest(machine))
+		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
+	else
+		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
+}
+
 /* Figure out the start address of kernel map from /proc/kallsyms */
 static u64 machine__get_kernel_start_addr(struct machine *machine)
 {
-	const char *filename;
-	char path[PATH_MAX];
+	char filename[PATH_MAX];
 	struct process_args args;
 
-	if (machine__is_default_guest(machine))
-		filename = (char *)symbol_conf.default_guest_kallsyms;
-	else {
-		sprintf(path, "%s/proc/kallsyms", machine->root_dir);
-		filename = path;
-	}
+	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
 
 	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
 		return 0;
@@ -829,9 +832,25 @@
 	return 0;
 }
 
+const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
+
 int machine__create_kernel_maps(struct machine *machine)
 {
 	struct dso *kernel = machine__get_kernel(machine);
+	char filename[PATH_MAX];
+	const char *name;
+	u64 addr = 0;
+	int i;
+
+	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
+
+	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+		addr = kallsyms__get_function_start(filename, name);
+		if (addr)
+			break;
+	}
+	if (!addr)
+		return -1;
 
 	if (kernel == NULL ||
 	    __machine__create_kernel_maps(machine, kernel) < 0)
@@ -850,6 +869,13 @@
 	 * Now that we have all the maps created, just set the ->end of them:
 	 */
 	map_groups__fixup_end(&machine->kmaps);
+
+	if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
+					     addr)) {
+		machine__destroy_kernel_maps(machine);
+		return -1;
+	}
+
 	return 0;
 }
 
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 4771330..f77e91e 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -18,6 +18,8 @@
 #define	HOST_KERNEL_ID			(-1)
 #define	DEFAULT_GUEST_KERNEL_ID		(0)
 
+extern const char *ref_reloc_sym_names[];
+
 struct machine {
 	struct rb_node	  rb_node;
 	pid_t		  pid;
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 3b97513..39cd2d0 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -39,6 +39,7 @@
 	map->start    = start;
 	map->end      = end;
 	map->pgoff    = pgoff;
+	map->reloc    = 0;
 	map->dso      = dso;
 	map->map_ip   = map__map_ip;
 	map->unmap_ip = map__unmap_ip;
@@ -288,7 +289,7 @@
 	if (map->dso->rel)
 		return rip - map->pgoff;
 
-	return map->unmap_ip(map, rip);
+	return map->unmap_ip(map, rip) - map->reloc;
 }
 
 /**
@@ -311,7 +312,7 @@
 	if (map->dso->rel)
 		return map->unmap_ip(map, ip + map->pgoff);
 
-	return ip;
+	return ip + map->reloc;
 }
 
 void map_groups__init(struct map_groups *mg)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 18068c6..257e513 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -36,6 +36,7 @@
 	bool			erange_warned;
 	u32			priv;
 	u64			pgoff;
+	u64			reloc;
 	u32			maj, min; /* only valid for MMAP2 record */
 	u64			ino;      /* only valid for MMAP2 record */
 	u64			ino_generation;/* only valid for MMAP2 record */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 7594567..3e9f336 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -751,6 +751,8 @@
 			if (strcmp(elf_name, kmap->ref_reloc_sym->name))
 				continue;
 			kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
+			map->reloc = kmap->ref_reloc_sym->addr -
+				     kmap->ref_reloc_sym->unrelocated_addr;
 			break;
 		}
 	}
@@ -922,6 +924,7 @@
 				  (u64)shdr.sh_offset);
 			sym.st_value -= shdr.sh_addr - shdr.sh_offset;
 		}
+new_symbol:
 		/*
 		 * We need to figure out if the object was created from C++ sources
 		 * DWARF DW_compile_unit has this, but we don't always have access
@@ -933,7 +936,6 @@
 			if (demangled != NULL)
 				elf_name = demangled;
 		}
-new_symbol:
 		f = symbol__new(sym.st_value, sym.st_size,
 				GELF_ST_BIND(sym.st_info), elf_name);
 		free(demangled);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 39ce9ad..a9d758a 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -627,7 +627,7 @@
  * kernel range is broken in several maps, named [kernel].N, as we don't have
  * the original ELF section names vmlinux have.
  */
-static int dso__split_kallsyms(struct dso *dso, struct map *map,
+static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
 			       symbol_filter_t filter)
 {
 	struct map_groups *kmaps = map__kmap(map)->kmaps;
@@ -692,6 +692,12 @@
 			char dso_name[PATH_MAX];
 			struct dso *ndso;
 
+			if (delta) {
+				/* Kernel was relocated at boot time */
+				pos->start -= delta;
+				pos->end -= delta;
+			}
+
 			if (count == 0) {
 				curr_map = map;
 				goto filter_symbol;
@@ -721,6 +727,10 @@
 			curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
 			map_groups__insert(kmaps, curr_map);
 			++kernel_range;
+		} else if (delta) {
+			/* Kernel was relocated at boot time */
+			pos->start -= delta;
+			pos->end -= delta;
 		}
 filter_symbol:
 		if (filter && filter(curr_map, pos)) {
@@ -976,6 +986,23 @@
 	return 0;
 }
 
+static int validate_kcore_addresses(const char *kallsyms_filename,
+				    struct map *map)
+{
+	struct kmap *kmap = map__kmap(map);
+
+	if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
+		u64 start;
+
+		start = kallsyms__get_function_start(kallsyms_filename,
+						     kmap->ref_reloc_sym->name);
+		if (start != kmap->ref_reloc_sym->addr)
+			return -EINVAL;
+	}
+
+	return validate_kcore_modules(kallsyms_filename, map);
+}
+
 struct kcore_mapfn_data {
 	struct dso *dso;
 	enum map_type type;
@@ -1019,8 +1046,8 @@
 					     kallsyms_filename))
 		return -EINVAL;
 
-	/* All modules must be present at their original addresses */
-	if (validate_kcore_modules(kallsyms_filename, map))
+	/* Modules and kernel must be present at their original addresses */
+	if (validate_kcore_addresses(kallsyms_filename, map))
 		return -EINVAL;
 
 	md.dso = dso;
@@ -1113,15 +1140,41 @@
 	return -EINVAL;
 }
 
+/*
+ * If the kernel is relocated at boot time, kallsyms won't match.  Compute the
+ * delta based on the relocation reference symbol.
+ */
+static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
+{
+	struct kmap *kmap = map__kmap(map);
+	u64 addr;
+
+	if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
+		return 0;
+
+	addr = kallsyms__get_function_start(filename,
+					    kmap->ref_reloc_sym->name);
+	if (!addr)
+		return -1;
+
+	*delta = addr - kmap->ref_reloc_sym->addr;
+	return 0;
+}
+
 int dso__load_kallsyms(struct dso *dso, const char *filename,
 		       struct map *map, symbol_filter_t filter)
 {
+	u64 delta = 0;
+
 	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
 		return -1;
 
 	if (dso__load_all_kallsyms(dso, filename, map) < 0)
 		return -1;
 
+	if (kallsyms__delta(map, filename, &delta))
+		return -1;
+
 	symbols__fixup_duplicate(&dso->symbols[map->type]);
 	symbols__fixup_end(&dso->symbols[map->type]);
 
@@ -1133,7 +1186,7 @@
 	if (!dso__load_kcore(dso, map, filename))
 		return dso__split_kallsyms_for_kcore(dso, map, filter);
 	else
-		return dso__split_kallsyms(dso, map, filter);
+		return dso__split_kallsyms(dso, map, delta, filter);
 }
 
 static int dso__load_perf_map(struct dso *dso, struct map *map,
@@ -1424,7 +1477,7 @@
 			continue;
 		scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
 			  "%s/%s/kallsyms", dir, dent->d_name);
-		if (!validate_kcore_modules(kallsyms_filename, map)) {
+		if (!validate_kcore_addresses(kallsyms_filename, map)) {
 			strlcpy(dir, kallsyms_filename, dir_sz);
 			ret = 0;
 			break;
@@ -1479,7 +1532,7 @@
 		if (fd != -1) {
 			close(fd);
 			/* If module maps match go with /proc/kallsyms */
-			if (!validate_kcore_modules("/proc/kallsyms", map))
+			if (!validate_kcore_addresses("/proc/kallsyms", map))
 				goto proc_kallsyms;
 		}
 
diff --git a/tools/power/x86/turbostat/.gitignore b/tools/power/x86/turbostat/.gitignore
new file mode 100644
index 0000000..7521370
--- /dev/null
+++ b/tools/power/x86/turbostat/.gitignore
@@ -0,0 +1 @@
+turbostat
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index f09641d..d1b3a36 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -5,7 +5,7 @@
 
 turbostat : turbostat.c
 CFLAGS +=	-Wall
-CFLAGS +=	-I../../../../arch/x86/include/uapi/
+CFLAGS +=	-DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
 
 %: %.c
 	@mkdir -p $(BUILD_OUTPUT)
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 9d77f13..77eb130 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -20,8 +20,10 @@
  */
 
 #define _GNU_SOURCE
-#include <asm/msr.h>
+#include MSRHEADER
+#include <stdarg.h>
 #include <stdio.h>
+#include <err.h>
 #include <unistd.h>
 #include <sys/types.h>
 #include <sys/wait.h>
@@ -35,13 +37,16 @@
 #include <string.h>
 #include <ctype.h>
 #include <sched.h>
+#include <cpuid.h>
 
 char *proc_stat = "/proc/stat";
 unsigned int interval_sec = 5;	/* set with -i interval_sec */
 unsigned int verbose;		/* set with -v */
 unsigned int rapl_verbose;	/* set with -R */
+unsigned int rapl_joules;	/* set with -J */
 unsigned int thermal_verbose;	/* set with -T */
-unsigned int summary_only;	/* set with -s */
+unsigned int summary_only;	/* set with -S */
+unsigned int dump_only;		/* set with -s */
 unsigned int skip_c0;
 unsigned int skip_c1;
 unsigned int do_nhm_cstates;
@@ -77,14 +82,32 @@
 double rapl_power_units, rapl_energy_units, rapl_time_units;
 double rapl_joule_counter_range;
 
-#define RAPL_PKG	(1 << 0)
-#define RAPL_CORES	(1 << 1)
-#define RAPL_GFX	(1 << 2)
-#define RAPL_DRAM	(1 << 3)
-#define RAPL_PKG_PERF_STATUS	(1 << 4)
-#define RAPL_DRAM_PERF_STATUS	(1 << 5)
-#define RAPL_PKG_POWER_INFO	(1 << 6)
-#define RAPL_CORE_POLICY	(1 << 7)
+#define RAPL_PKG		(1 << 0)
+					/* 0x610 MSR_PKG_POWER_LIMIT */
+					/* 0x611 MSR_PKG_ENERGY_STATUS */
+#define RAPL_PKG_PERF_STATUS	(1 << 1)
+					/* 0x613 MSR_PKG_PERF_STATUS */
+#define RAPL_PKG_POWER_INFO	(1 << 2)
+					/* 0x614 MSR_PKG_POWER_INFO */
+
+#define RAPL_DRAM		(1 << 3)
+					/* 0x618 MSR_DRAM_POWER_LIMIT */
+					/* 0x619 MSR_DRAM_ENERGY_STATUS */
+					/* 0x61c MSR_DRAM_POWER_INFO */
+#define RAPL_DRAM_PERF_STATUS	(1 << 4)
+					/* 0x61b MSR_DRAM_PERF_STATUS */
+
+#define RAPL_CORES		(1 << 5)
+					/* 0x638 MSR_PP0_POWER_LIMIT */
+					/* 0x639 MSR_PP0_ENERGY_STATUS */
+#define RAPL_CORE_POLICY	(1 << 6)
+					/* 0x63a MSR_PP0_POLICY */
+
+
+#define RAPL_GFX		(1 << 7)
+					/* 0x640 MSR_PP1_POWER_LIMIT */
+					/* 0x641 MSR_PP1_ENERGY_STATUS */
+					/* 0x642 MSR_PP1_POLICY */
 #define	TJMAX_DEFAULT	100
 
 #define MAX(a, b) ((a) > (b) ? (a) : (b))
@@ -234,7 +257,7 @@
 	close(fd);
 
 	if (retval != sizeof *msr) {
-		fprintf(stderr, "%s offset 0x%zx read failed\n", pathname, offset);
+		fprintf(stderr, "%s offset 0x%llx read failed\n", pathname, (unsigned long long)offset);
 		return -1;
 	}
 
@@ -296,70 +319,92 @@
 		outp += sprintf(outp, "  %%pc10");
 	}
 
-	if (do_rapl & RAPL_PKG)
-		outp += sprintf(outp, "  Pkg_W");
-	if (do_rapl & RAPL_CORES)
-		outp += sprintf(outp, "  Cor_W");
-	if (do_rapl & RAPL_GFX)
-		outp += sprintf(outp, " GFX_W");
-	if (do_rapl & RAPL_DRAM)
-		outp += sprintf(outp, " RAM_W");
-	if (do_rapl & RAPL_PKG_PERF_STATUS)
-		outp += sprintf(outp, " PKG_%%");
-	if (do_rapl & RAPL_DRAM_PERF_STATUS)
-		outp += sprintf(outp, " RAM_%%");
+	if (do_rapl && !rapl_joules) {
+		if (do_rapl & RAPL_PKG)
+			outp += sprintf(outp, "  Pkg_W");
+		if (do_rapl & RAPL_CORES)
+			outp += sprintf(outp, "  Cor_W");
+		if (do_rapl & RAPL_GFX)
+			outp += sprintf(outp, " GFX_W");
+		if (do_rapl & RAPL_DRAM)
+			outp += sprintf(outp, " RAM_W");
+		if (do_rapl & RAPL_PKG_PERF_STATUS)
+			outp += sprintf(outp, " PKG_%%");
+		if (do_rapl & RAPL_DRAM_PERF_STATUS)
+			outp += sprintf(outp, " RAM_%%");
+	} else {
+		if (do_rapl & RAPL_PKG)
+			outp += sprintf(outp, "  Pkg_J");
+		if (do_rapl & RAPL_CORES)
+			outp += sprintf(outp, "  Cor_J");
+		if (do_rapl & RAPL_GFX)
+			outp += sprintf(outp, " GFX_J");
+		if (do_rapl & RAPL_DRAM)
+			outp += sprintf(outp, " RAM_W");
+		if (do_rapl & RAPL_PKG_PERF_STATUS)
+			outp += sprintf(outp, " PKG_%%");
+		if (do_rapl & RAPL_DRAM_PERF_STATUS)
+			outp += sprintf(outp, " RAM_%%");
+		outp += sprintf(outp, " time");
 
+	}
 	outp += sprintf(outp, "\n");
 }
 
 int dump_counters(struct thread_data *t, struct core_data *c,
 	struct pkg_data *p)
 {
-	fprintf(stderr, "t %p, c %p, p %p\n", t, c, p);
+	outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
 
 	if (t) {
-		fprintf(stderr, "CPU: %d flags 0x%x\n", t->cpu_id, t->flags);
-		fprintf(stderr, "TSC: %016llX\n", t->tsc);
-		fprintf(stderr, "aperf: %016llX\n", t->aperf);
-		fprintf(stderr, "mperf: %016llX\n", t->mperf);
-		fprintf(stderr, "c1: %016llX\n", t->c1);
-		fprintf(stderr, "msr0x%x: %08llX\n",
+		outp += sprintf(outp, "CPU: %d flags 0x%x\n",
+			t->cpu_id, t->flags);
+		outp += sprintf(outp, "TSC: %016llX\n", t->tsc);
+		outp += sprintf(outp, "aperf: %016llX\n", t->aperf);
+		outp += sprintf(outp, "mperf: %016llX\n", t->mperf);
+		outp += sprintf(outp, "c1: %016llX\n", t->c1);
+		outp += sprintf(outp, "msr0x%x: %08llX\n",
 			extra_delta_offset32, t->extra_delta32);
-		fprintf(stderr, "msr0x%x: %016llX\n",
+		outp += sprintf(outp, "msr0x%x: %016llX\n",
 			extra_delta_offset64, t->extra_delta64);
-		fprintf(stderr, "msr0x%x: %08llX\n",
+		outp += sprintf(outp, "msr0x%x: %08llX\n",
 			extra_msr_offset32, t->extra_msr32);
-		fprintf(stderr, "msr0x%x: %016llX\n",
+		outp += sprintf(outp, "msr0x%x: %016llX\n",
 			extra_msr_offset64, t->extra_msr64);
 		if (do_smi)
-			fprintf(stderr, "SMI: %08X\n", t->smi_count);
+			outp += sprintf(outp, "SMI: %08X\n", t->smi_count);
 	}
 
 	if (c) {
-		fprintf(stderr, "core: %d\n", c->core_id);
-		fprintf(stderr, "c3: %016llX\n", c->c3);
-		fprintf(stderr, "c6: %016llX\n", c->c6);
-		fprintf(stderr, "c7: %016llX\n", c->c7);
-		fprintf(stderr, "DTS: %dC\n", c->core_temp_c);
+		outp += sprintf(outp, "core: %d\n", c->core_id);
+		outp += sprintf(outp, "c3: %016llX\n", c->c3);
+		outp += sprintf(outp, "c6: %016llX\n", c->c6);
+		outp += sprintf(outp, "c7: %016llX\n", c->c7);
+		outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
 	}
 
 	if (p) {
-		fprintf(stderr, "package: %d\n", p->package_id);
-		fprintf(stderr, "pc2: %016llX\n", p->pc2);
-		fprintf(stderr, "pc3: %016llX\n", p->pc3);
-		fprintf(stderr, "pc6: %016llX\n", p->pc6);
-		fprintf(stderr, "pc7: %016llX\n", p->pc7);
-		fprintf(stderr, "pc8: %016llX\n", p->pc8);
-		fprintf(stderr, "pc9: %016llX\n", p->pc9);
-		fprintf(stderr, "pc10: %016llX\n", p->pc10);
-		fprintf(stderr, "Joules PKG: %0X\n", p->energy_pkg);
-		fprintf(stderr, "Joules COR: %0X\n", p->energy_cores);
-		fprintf(stderr, "Joules GFX: %0X\n", p->energy_gfx);
-		fprintf(stderr, "Joules RAM: %0X\n", p->energy_dram);
-		fprintf(stderr, "Throttle PKG: %0X\n", p->rapl_pkg_perf_status);
-		fprintf(stderr, "Throttle RAM: %0X\n", p->rapl_dram_perf_status);
-		fprintf(stderr, "PTM: %dC\n", p->pkg_temp_c);
+		outp += sprintf(outp, "package: %d\n", p->package_id);
+		outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
+		outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
+		outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
+		outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
+		outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
+		outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
+		outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
+		outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
+		outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
+		outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
+		outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
+		outp += sprintf(outp, "Throttle PKG: %0X\n",
+			p->rapl_pkg_perf_status);
+		outp += sprintf(outp, "Throttle RAM: %0X\n",
+			p->rapl_dram_perf_status);
+		outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
 	}
+
+	outp += sprintf(outp, "\n");
+
 	return 0;
 }
 
@@ -527,19 +572,39 @@
 		fmt6 = " %4.0f**";
 	}
 
-	if (do_rapl & RAPL_PKG)
-		outp += sprintf(outp, fmt6, p->energy_pkg * rapl_energy_units / interval_float);
-	if (do_rapl & RAPL_CORES)
-		outp += sprintf(outp, fmt6, p->energy_cores * rapl_energy_units / interval_float);
-	if (do_rapl & RAPL_GFX)
-		outp += sprintf(outp, fmt5, p->energy_gfx * rapl_energy_units / interval_float); 
-	if (do_rapl & RAPL_DRAM)
-		outp += sprintf(outp, fmt5, p->energy_dram * rapl_energy_units / interval_float);
-	if (do_rapl & RAPL_PKG_PERF_STATUS )
-		outp += sprintf(outp, fmt5, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
-	if (do_rapl & RAPL_DRAM_PERF_STATUS )
-		outp += sprintf(outp, fmt5, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
+	if (do_rapl && !rapl_joules) {
+		if (do_rapl & RAPL_PKG)
+			outp += sprintf(outp, fmt6, p->energy_pkg * rapl_energy_units / interval_float);
+		if (do_rapl & RAPL_CORES)
+			outp += sprintf(outp, fmt6, p->energy_cores * rapl_energy_units / interval_float);
+		if (do_rapl & RAPL_GFX)
+			outp += sprintf(outp, fmt5, p->energy_gfx * rapl_energy_units / interval_float);
+		if (do_rapl & RAPL_DRAM)
+			outp += sprintf(outp, fmt5, p->energy_dram * rapl_energy_units / interval_float);
+		if (do_rapl & RAPL_PKG_PERF_STATUS)
+			outp += sprintf(outp, fmt5, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
+		if (do_rapl & RAPL_DRAM_PERF_STATUS)
+			outp += sprintf(outp, fmt5, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
+	} else {
+		if (do_rapl & RAPL_PKG)
+			outp += sprintf(outp, fmt6,
+					p->energy_pkg * rapl_energy_units);
+		if (do_rapl & RAPL_CORES)
+			outp += sprintf(outp, fmt6,
+					p->energy_cores * rapl_energy_units);
+		if (do_rapl & RAPL_GFX)
+			outp += sprintf(outp, fmt5,
+					p->energy_gfx * rapl_energy_units);
+		if (do_rapl & RAPL_DRAM)
+			outp += sprintf(outp, fmt5,
+					p->energy_dram * rapl_energy_units);
+		if (do_rapl & RAPL_PKG_PERF_STATUS)
+			outp += sprintf(outp, fmt5, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
+		if (do_rapl & RAPL_DRAM_PERF_STATUS)
+			outp += sprintf(outp, fmt5, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
+	outp += sprintf(outp, fmt5, interval_float);
 
+	}
 done:
 	outp += sprintf(outp, "\n");
 
@@ -622,12 +687,10 @@
 	old->tsc = new->tsc - old->tsc;
 
 	/* check for TSC < 1 Mcycles over interval */
-	if (old->tsc < (1000 * 1000)) {
-		fprintf(stderr, "Insanely slow TSC rate, TSC stops in idle?\n");
-		fprintf(stderr, "You can disable all c-states by booting with \"idle=poll\"\n");
-		fprintf(stderr, "or just the deep ones with \"processor.max_cstate=1\"\n");
-		exit(-3);
-	}
+	if (old->tsc < (1000 * 1000))
+		errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
+		     "You can disable all c-states by booting with \"idle=poll\"\n"
+		     "or just the deep ones with \"processor.max_cstate=1\"");
 
 	old->c1 = new->c1 - old->c1;
 
@@ -1173,24 +1236,43 @@
 }
 
 /*
+ * Open a file, and exit on failure
+ */
+FILE *fopen_or_die(const char *path, const char *mode)
+{
+	FILE *filep = fopen(path, "r");
+	if (!filep)
+		err(1, "%s: open failed", path);
+	return filep;
+}
+
+/*
+ * Parse a file containing a single int.
+ */
+int parse_int_file(const char *fmt, ...)
+{
+	va_list args;
+	char path[PATH_MAX];
+	FILE *filep;
+	int value;
+
+	va_start(args, fmt);
+	vsnprintf(path, sizeof(path), fmt, args);
+	va_end(args);
+	filep = fopen_or_die(path, "r");
+	if (fscanf(filep, "%d", &value) != 1)
+		err(1, "%s: failed to parse number from file", path);
+	fclose(filep);
+	return value;
+}
+
+/*
  * cpu_is_first_sibling_in_core(cpu)
  * return 1 if given CPU is 1st HT sibling in the core
  */
 int cpu_is_first_sibling_in_core(int cpu)
 {
-	char path[64];
-	FILE *filep;
-	int first_cpu;
-
-	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
-	filep = fopen(path, "r");
-	if (filep == NULL) {
-		perror(path);
-		exit(1);
-	}
-	fscanf(filep, "%d", &first_cpu);
-	fclose(filep);
-	return (cpu == first_cpu);
+	return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
 }
 
 /*
@@ -1199,53 +1281,17 @@
  */
 int cpu_is_first_core_in_package(int cpu)
 {
-	char path[64];
-	FILE *filep;
-	int first_cpu;
-
-	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
-	filep = fopen(path, "r");
-	if (filep == NULL) {
-		perror(path);
-		exit(1);
-	}
-	fscanf(filep, "%d", &first_cpu);
-	fclose(filep);
-	return (cpu == first_cpu);
+	return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
 }
 
 int get_physical_package_id(int cpu)
 {
-	char path[80];
-	FILE *filep;
-	int pkg;
-
-	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
-	filep = fopen(path, "r");
-	if (filep == NULL) {
-		perror(path);
-		exit(1);
-	}
-	fscanf(filep, "%d", &pkg);
-	fclose(filep);
-	return pkg;
+	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
 }
 
 int get_core_id(int cpu)
 {
-	char path[80];
-	FILE *filep;
-	int core;
-
-	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
-	filep = fopen(path, "r");
-	if (filep == NULL) {
-		perror(path);
-		exit(1);
-	}
-	fscanf(filep, "%d", &core);
-	fclose(filep);
-	return core;
+	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
 }
 
 int get_num_ht_siblings(int cpu)
@@ -1257,11 +1303,7 @@
 	char character;
 
 	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
-	filep = fopen(path, "r");
-	if (filep == NULL) {
-		perror(path);
-		exit(1);
-	}
+	filep = fopen_or_die(path, "r");
 	/*
 	 * file format:
 	 * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
@@ -1331,17 +1373,11 @@
 	int cpu_num;
 	int retval;
 
-	fp = fopen(proc_stat, "r");
-	if (fp == NULL) {
-		perror(proc_stat);
-		exit(1);
-	}
+	fp = fopen_or_die(proc_stat, "r");
 
 	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
-	if (retval != 0) {
-		perror("/proc/stat format");
-		exit(1);
-	}
+	if (retval != 0)
+		err(1, "%s: failed to parse format", proc_stat);
 
 	while (1) {
 		retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
@@ -1445,19 +1481,15 @@
 {
 	struct stat sb;
 
-	if (stat("/dev/cpu/0/msr", &sb)) {
-		fprintf(stderr, "no /dev/cpu/0/msr\n");
-		fprintf(stderr, "Try \"# modprobe msr\"\n");
-		exit(-5);
-	}
+	if (stat("/dev/cpu/0/msr", &sb))
+		err(-5, "no /dev/cpu/0/msr\n"
+		    "Try \"# modprobe msr\"");
 }
 
 void check_super_user()
 {
-	if (getuid() != 0) {
-		fprintf(stderr, "must be root\n");
-		exit(-6);
-	}
+	if (getuid() != 0)
+		errx(-6, "must be root");
 }
 
 int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
@@ -1479,7 +1511,7 @@
 	case 0x3A:	/* IVB */
 	case 0x3E:	/* IVB Xeon */
 	case 0x3C:	/* HSW */
-	case 0x3F:	/* HSW */
+	case 0x3F:	/* HSX */
 	case 0x45:	/* HSW */
 	case 0x46:	/* HSW */
 	case 0x37:	/* BYT */
@@ -1595,11 +1627,13 @@
 	case 0x2A:
 	case 0x3A:
 	case 0x3C:	/* HSW */
-	case 0x3F:	/* HSW */
 	case 0x45:	/* HSW */
 	case 0x46:	/* HSW */
 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
 		break;
+	case 0x3F:	/* HSX */
+		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
+		break;
 	case 0x2D:
 	case 0x3E:
 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
@@ -1978,7 +2012,7 @@
 
 	eax = ebx = ecx = edx = 0;
 
-	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
+	__get_cpuid(0, &max_level, &ebx, &ecx, &edx);
 
 	if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
 		genuine_intel = 1;
@@ -1987,7 +2021,7 @@
 		fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
 			(char *)&ebx, (char *)&edx, (char *)&ecx);
 
-	asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
+	__get_cpuid(1, &fms, &ebx, &ecx, &edx);
 	family = (fms >> 8) & 0xf;
 	model = (fms >> 4) & 0xf;
 	stepping = fms & 0xf;
@@ -1998,10 +2032,8 @@
 		fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
 			max_level, family, model, stepping, family, model, stepping);
 
-	if (!(edx & (1 << 5))) {
-		fprintf(stderr, "CPUID: no MSR\n");
-		exit(1);
-	}
+	if (!(edx & (1 << 5)))
+		errx(1, "CPUID: no MSR");
 
 	/*
 	 * check max extended function levels of CPUID.
@@ -2009,31 +2041,27 @@
 	 * This check is valid for both Intel and AMD.
 	 */
 	ebx = ecx = edx = 0;
-	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
+	__get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
 
-	if (max_level < 0x80000007) {
-		fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
-		exit(1);
-	}
+	if (max_level < 0x80000007)
+		errx(1, "CPUID: no invariant TSC (max_level 0x%x)", max_level);
 
 	/*
 	 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
 	 * this check is valid for both Intel and AMD
 	 */
-	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
+	__get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
 	has_invariant_tsc = edx & (1 << 8);
 
-	if (!has_invariant_tsc) {
-		fprintf(stderr, "No invariant TSC\n");
-		exit(1);
-	}
+	if (!has_invariant_tsc)
+		errx(1, "No invariant TSC");
 
 	/*
 	 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
 	 * this check is valid for both Intel and AMD
 	 */
 
-	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
+	__get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
 	has_aperf = ecx & (1 << 0);
 	do_dts = eax & (1 << 0);
 	do_ptm = eax & (1 << 6);
@@ -2047,7 +2075,7 @@
 			has_epb ? ", EPB": "");
 
 	if (!has_aperf)
-		exit(-1);
+		errx(-1, "No APERF");
 
 	do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
 	do_nhm_cstates = genuine_intel;	/* all Intel w/ non-stop TSC have NHM counters */
@@ -2067,9 +2095,8 @@
 
 void usage()
 {
-	fprintf(stderr, "%s: [-v][-R][-T][-p|-P|-S][-c MSR# | -s]][-C MSR#][-m MSR#][-M MSR#][-i interval_sec | command ...]\n",
-		progname);
-	exit(1);
+	errx(1, "%s: [-v][-R][-T][-p|-P|-S][-c MSR#][-C MSR#][-m MSR#][-M MSR#][-i interval_sec | command ...]\n",
+	     progname);
 }
 
 
@@ -2112,19 +2139,15 @@
 		fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
 
 	cpus = calloc(1, (topo.max_cpu_num  + 1) * sizeof(struct cpu_topology));
-	if (cpus == NULL) {
-		perror("calloc cpus");
-		exit(1);
-	}
+	if (cpus == NULL)
+		err(1, "calloc cpus");
 
 	/*
 	 * Allocate and initialize cpu_present_set
 	 */
 	cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
-	if (cpu_present_set == NULL) {
-		perror("CPU_ALLOC");
-		exit(3);
-	}
+	if (cpu_present_set == NULL)
+		err(3, "CPU_ALLOC");
 	cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
 	CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
 	for_all_proc_cpus(mark_cpu_present);
@@ -2133,10 +2156,8 @@
 	 * Allocate and initialize cpu_affinity_set
 	 */
 	cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
-	if (cpu_affinity_set == NULL) {
-		perror("CPU_ALLOC");
-		exit(3);
-	}
+	if (cpu_affinity_set == NULL)
+		err(3, "CPU_ALLOC");
 	cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
 	CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
 
@@ -2220,8 +2241,7 @@
 
 	return;
 error:
-	perror("calloc counters");
-	exit(1);
+	err(1, "calloc counters");
 }
 /*
  * init_counter()
@@ -2276,12 +2296,10 @@
 
 void allocate_output_buffer()
 {
-	output_buffer = calloc(1, (1 + topo.num_cpus) * 256);
+	output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
 	outp = output_buffer;
-	if (outp == NULL) {
-		perror("calloc");
-		exit(-1);
-	}
+	if (outp == NULL)
+		err(-1, "calloc output buffer");
 }
 
 void setup_all_buffers(void)
@@ -2292,6 +2310,7 @@
 	allocate_output_buffer();
 	for_all_proc_cpus(initialize_counters);
 }
+
 void turbostat_init()
 {
 	check_cpuid();
@@ -2335,17 +2354,13 @@
 	} else {
 
 		/* parent */
-		if (child_pid == -1) {
-			perror("fork");
-			exit(1);
-		}
+		if (child_pid == -1)
+			err(1, "fork");
 
 		signal(SIGINT, SIG_IGN);
 		signal(SIGQUIT, SIG_IGN);
-		if (waitpid(child_pid, &status, 0) == -1) {
-			perror("wait");
-			exit(status);
-		}
+		if (waitpid(child_pid, &status, 0) == -1)
+			err(status, "waitpid");
 	}
 	/*
 	 * n.b. fork_it() does not check for errors from for_all_cpus()
@@ -2364,13 +2379,30 @@
 	return status;
 }
 
+int get_and_dump_counters(void)
+{
+	int status;
+
+	status = for_all_cpus(get_counters, ODD_COUNTERS);
+	if (status)
+		return status;
+
+	status = for_all_cpus(dump_counters, ODD_COUNTERS);
+	if (status)
+		return status;
+
+	flush_stdout();
+
+	return status;
+}
+
 void cmdline(int argc, char **argv)
 {
 	int opt;
 
 	progname = argv[0];
 
-	while ((opt = getopt(argc, argv, "+pPSvi:sc:sC:m:M:RT:")) != -1) {
+	while ((opt = getopt(argc, argv, "+pPsSvi:c:C:m:M:RJT:")) != -1) {
 		switch (opt) {
 		case 'p':
 			show_core_only++;
@@ -2378,6 +2410,9 @@
 		case 'P':
 			show_pkg_only++;
 			break;
+		case 's':
+			dump_only++;
+			break;
 		case 'S':
 			summary_only++;
 			break;
@@ -2405,6 +2440,10 @@
 		case 'T':
 			tcc_activation_temp_override = atoi(optarg);
 			break;
+		case 'J':
+			rapl_joules++;
+			break;
+
 		default:
 			usage();
 		}
@@ -2416,11 +2455,15 @@
 	cmdline(argc, argv);
 
 	if (verbose)
-		fprintf(stderr, "turbostat v3.5 April 26, 2013"
+		fprintf(stderr, "turbostat v3.6 Dec 2, 2013"
 			" - Len Brown <lenb@kernel.org>\n");
 
 	turbostat_init();
 
+	/* dump counters and exit */
+	if (dump_only)
+		return get_and_dump_counters();
+
 	/*
 	 * if any params left, it must be a command to fork
 	 */
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index be456ce..8ca405c 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -24,6 +24,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/uaccess.h>
 
 #include <linux/irqchip/arm-gic.h>
 
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 88b2fe3..00d8642 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -154,17 +154,13 @@
 	list_add_tail(&dev->list, &kvm->coalesced_zones);
 	mutex_unlock(&kvm->slots_lock);
 
-	return ret;
+	return 0;
 
 out_free_dev:
 	mutex_unlock(&kvm->slots_lock);
-
 	kfree(dev);
 
-	if (dev == NULL)
-		return -ENXIO;
-
-	return 0;
+	return ret;
 }
 
 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,